Home | History | Annotate | Download | only in bots
      1 // Copyright 2016 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 package main
      6 
      7 /*
      8 	Generate the tasks.json file.
      9 */
     10 
     11 import (
     12 	"encoding/json"
     13 	"flag"
     14 	"fmt"
     15 	"io/ioutil"
     16 	"os"
     17 	"path"
     18 	"path/filepath"
     19 	"regexp"
     20 	"runtime"
     21 	"sort"
     22 	"strconv"
     23 	"strings"
     24 	"time"
     25 
     26 	"github.com/skia-dev/glog"
     27 	"go.skia.org/infra/go/sklog"
     28 	"go.skia.org/infra/go/util"
     29 	"go.skia.org/infra/task_scheduler/go/specs"
     30 )
     31 
     32 const (
     33 	BUNDLE_RECIPES_NAME         = "Housekeeper-PerCommit-BundleRecipes"
     34 	ISOLATE_SKIMAGE_NAME        = "Housekeeper-PerCommit-IsolateSkImage"
     35 	ISOLATE_SKP_NAME            = "Housekeeper-PerCommit-IsolateSKP"
     36 	ISOLATE_SVG_NAME            = "Housekeeper-PerCommit-IsolateSVG"
     37 	ISOLATE_NDK_LINUX_NAME      = "Housekeeper-PerCommit-IsolateAndroidNDKLinux"
     38 	ISOLATE_SDK_LINUX_NAME      = "Housekeeper-PerCommit-IsolateAndroidSDKLinux"
     39 	ISOLATE_WIN_TOOLCHAIN_NAME  = "Housekeeper-PerCommit-IsolateWinToolchain"
     40 	ISOLATE_WIN_VULKAN_SDK_NAME = "Housekeeper-PerCommit-IsolateWinVulkanSDK"
     41 
     42 	DEFAULT_OS_DEBIAN    = "Debian-9.4"
     43 	DEFAULT_OS_LINUX_GCE = DEFAULT_OS_DEBIAN
     44 	DEFAULT_OS_MAC       = "Mac-10.13.3"
     45 	DEFAULT_OS_UBUNTU    = "Ubuntu-14.04"
     46 	DEFAULT_OS_WIN       = "Windows-2016Server-14393"
     47 
     48 	// Name prefix for upload jobs.
     49 	PREFIX_UPLOAD = "Upload"
     50 )
     51 
     52 var (
     53 	// "Constants"
     54 
     55 	// Top-level list of all jobs to run at each commit; loaded from
     56 	// jobs.json.
     57 	JOBS []string
     58 
     59 	// General configuration information.
     60 	CONFIG struct {
     61 		GsBucketCoverage string   `json:"gs_bucket_coverage"`
     62 		GsBucketGm       string   `json:"gs_bucket_gm"`
     63 		GsBucketNano     string   `json:"gs_bucket_nano"`
     64 		GsBucketCalm     string   `json:"gs_bucket_calm"`
     65 		NoUpload         []string `json:"no_upload"`
     66 		Pool             string   `json:"pool"`
     67 	}
     68 
     69 	// alternateSwarmDimensions can be set in an init function to override the default swarming bot
     70 	// dimensions for the given task.
     71 	alternateSwarmDimensions func(parts map[string]string) []string
     72 
     73 	// internalHardwareLabelFn can be set in an init function to provide an
     74 	// internal_hardware_label variable to the recipe.
     75 	internalHardwareLabelFn func(parts map[string]string) *int
     76 
     77 	// Defines the structure of job names.
     78 	jobNameSchema *JobNameSchema
     79 
     80 	// Git 2.13.
     81 	cipdGit1 = &specs.CipdPackage{
     82 		Name:    fmt.Sprintf("infra/git/${platform}"),
     83 		Path:    "git",
     84 		Version: fmt.Sprintf("version:2.13.0.chromium9"),
     85 	}
     86 	cipdGit2 = &specs.CipdPackage{
     87 		Name:    fmt.Sprintf("infra/tools/git/${platform}"),
     88 		Path:    "git",
     89 		Version: fmt.Sprintf("git_revision:a78b5f3658c0578a017db48df97d20ac09822bcd"),
     90 	}
     91 
     92 	// Flags.
     93 	builderNameSchemaFile = flag.String("builder_name_schema", "", "Path to the builder_name_schema.json file. If not specified, uses infra/bots/recipe_modules/builder_name_schema/builder_name_schema.json from this repo.")
     94 	assetsDir             = flag.String("assets_dir", "", "Directory containing assets.")
     95 	cfgFile               = flag.String("cfg_file", "", "JSON file containing general configuration information.")
     96 	jobsFile              = flag.String("jobs", "", "JSON file containing jobs to run.")
     97 )
     98 
     99 // internalHardwareLabel returns the internal ID for the bot, if any.
    100 func internalHardwareLabel(parts map[string]string) *int {
    101 	if internalHardwareLabelFn != nil {
    102 		return internalHardwareLabelFn(parts)
    103 	}
    104 	return nil
    105 }
    106 
    107 // linuxGceDimensions are the Swarming dimensions for Linux GCE
    108 // instances.
    109 func linuxGceDimensions() []string {
    110 	return []string{
    111 		// Specify CPU to avoid running builds on bots with a more unique CPU.
    112 		"cpu:x86-64-Haswell_GCE",
    113 		"gpu:none",
    114 		fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE),
    115 		fmt.Sprintf("pool:%s", CONFIG.Pool),
    116 	}
    117 }
    118 
    119 // deriveCompileTaskName returns the name of a compile task based on the given
    120 // job name.
    121 func deriveCompileTaskName(jobName string, parts map[string]string) string {
    122 	if strings.Contains(jobName, "Bookmaker") {
    123 		return "Build-Debian9-GCC-x86_64-Release"
    124 	} else if parts["role"] == "Housekeeper" {
    125 		return "Build-Debian9-GCC-x86_64-Release-Shared"
    126 	} else if parts["role"] == "Test" || parts["role"] == "Perf" || parts["role"] == "Calmbench" {
    127 		task_os := parts["os"]
    128 		ec := []string{}
    129 		if val := parts["extra_config"]; val != "" {
    130 			ec = strings.Split(val, "_")
    131 			ignore := []string{"Skpbench", "AbandonGpuContext", "PreAbandonGpuContext", "Valgrind", "ReleaseAndAbandonGpuContext", "CCPR", "FSAA", "FAAA", "FDAA", "NativeFonts", "GDI", "NoGPUThreads"}
    132 			keep := make([]string, 0, len(ec))
    133 			for _, part := range ec {
    134 				if !util.In(part, ignore) {
    135 					keep = append(keep, part)
    136 				}
    137 			}
    138 			ec = keep
    139 		}
    140 		if task_os == "Android" {
    141 			if !util.In("Android", ec) {
    142 				ec = append([]string{"Android"}, ec...)
    143 			}
    144 			task_os = "Debian9"
    145 		} else if task_os == "Chromecast" {
    146 			task_os = "Debian9"
    147 			ec = append([]string{"Chromecast"}, ec...)
    148 		} else if strings.Contains(task_os, "ChromeOS") {
    149 			ec = append([]string{"Chromebook", "GLES"}, ec...)
    150 			task_os = "Debian9"
    151 		} else if task_os == "iOS" {
    152 			ec = append([]string{task_os}, ec...)
    153 			task_os = "Mac"
    154 		} else if strings.Contains(task_os, "Win") {
    155 			task_os = "Win"
    156 		} else if strings.Contains(task_os, "Ubuntu") || strings.Contains(task_os, "Debian") {
    157 			task_os = "Debian9"
    158 		}
    159 		jobNameMap := map[string]string{
    160 			"role":          "Build",
    161 			"os":            task_os,
    162 			"compiler":      parts["compiler"],
    163 			"target_arch":   parts["arch"],
    164 			"configuration": parts["configuration"],
    165 		}
    166 		if len(ec) > 0 {
    167 			jobNameMap["extra_config"] = strings.Join(ec, "_")
    168 		}
    169 		name, err := jobNameSchema.MakeJobName(jobNameMap)
    170 		if err != nil {
    171 			glog.Fatal(err)
    172 		}
    173 		return name
    174 	} else {
    175 		return jobName
    176 	}
    177 }
    178 
    179 // swarmDimensions generates swarming bot dimensions for the given task.
    180 func swarmDimensions(parts map[string]string) []string {
    181 	if alternateSwarmDimensions != nil {
    182 		return alternateSwarmDimensions(parts)
    183 	}
    184 	return defaultSwarmDimensions(parts)
    185 }
    186 
    187 // defaultSwarmDimensions generates default swarming bot dimensions for the given task.
    188 func defaultSwarmDimensions(parts map[string]string) []string {
    189 	d := map[string]string{
    190 		"pool": CONFIG.Pool,
    191 	}
    192 	if os, ok := parts["os"]; ok {
    193 		d["os"], ok = map[string]string{
    194 			"Android":    "Android",
    195 			"Chromecast": "Android",
    196 			"ChromeOS":   "ChromeOS",
    197 			"Debian9":    DEFAULT_OS_DEBIAN,
    198 			"Mac":        DEFAULT_OS_MAC,
    199 			"Ubuntu14":   DEFAULT_OS_UBUNTU,
    200 			"Ubuntu16":   "Ubuntu-16.10",
    201 			"Ubuntu17":   "Ubuntu-17.04",
    202 			"Win":        DEFAULT_OS_WIN,
    203 			"Win10":      "Windows-10-16299.248",
    204 			"Win2k8":     "Windows-2008ServerR2-SP1",
    205 			"Win2016":    DEFAULT_OS_WIN,
    206 			"Win7":       "Windows-7-SP1",
    207 			"Win8":       "Windows-8.1-SP0",
    208 			"iOS":        "iOS-10.3.1",
    209 		}[os]
    210 		if !ok {
    211 			glog.Fatalf("Entry %q not found in OS mapping.", os)
    212 		}
    213 		if os == "Win10" && parts["model"] == "Golo" {
    214 			// Golo/MTV lab bots have Windows 10 version 1703, whereas Skolo bots have Windows 10 version
    215 			// 1709.
    216 			d["os"] = "Windows-10-15063"
    217 		}
    218 	} else {
    219 		d["os"] = DEFAULT_OS_DEBIAN
    220 	}
    221 	if parts["role"] == "Test" || parts["role"] == "Perf" || parts["role"] == "Calmbench" {
    222 		if strings.Contains(parts["os"], "Android") || strings.Contains(parts["os"], "Chromecast") {
    223 			// For Android, the device type is a better dimension
    224 			// than CPU or GPU.
    225 			deviceInfo, ok := map[string][]string{
    226 				"AndroidOne":      {"sprout", "MOB30Q"},
    227 				"Chorizo":         {"chorizo", "1.30_109591"},
    228 				"GalaxyS6":        {"zerofltetmo", "NRD90M_G920TUVU5FQK1"},
    229 				"GalaxyS7_G930A":  {"heroqlteatt", "NRD90M_G930AUCS4BQC2"},
    230 				"GalaxyS7_G930FD": {"herolte", "NRD90M_G930FXXU1DQAS"},
    231 				"MotoG4":          {"athene", "NPJ25.93-14"},
    232 				"NVIDIA_Shield":   {"foster", "NRD90M_1915764_848"},
    233 				"Nexus5":          {"hammerhead", "M4B30Z_3437181"},
    234 				"Nexus5x":         {"bullhead", "OPR6.170623.023"},
    235 				"Nexus7":          {"grouper", "LMY47V_1836172"}, // 2012 Nexus 7
    236 				"NexusPlayer":     {"fugu", "OPR6.170623.021"},
    237 				"Pixel":           {"sailfish", "OPR3.170623.008"},
    238 				"Pixel2XL":        {"taimen", "OPD1.170816.023"},
    239 				"PixelC":          {"dragon", "OPR1.170623.034"},
    240 				"PixelXL":         {"marlin", "OPR3.170623.008"},
    241 			}[parts["model"]]
    242 			if !ok {
    243 				glog.Fatalf("Entry %q not found in Android mapping.", parts["model"])
    244 			}
    245 			d["device_type"] = deviceInfo[0]
    246 			d["device_os"] = deviceInfo[1]
    247 			// TODO(kjlubick): Remove the python dimension after we have removed the
    248 			// Nexus5x devices from the local lab (on Monday, Dec 11, 2017 should be fine).
    249 			d["python"] = "2.7.9" // This indicates a RPI, e.g. in Skolo.  Golo is 2.7.12
    250 			if parts["model"] == "Nexus5x" {
    251 				d["python"] = "2.7.12"
    252 			}
    253 		} else if strings.Contains(parts["os"], "iOS") {
    254 			device, ok := map[string]string{
    255 				"iPadMini4": "iPad5,1",
    256 				"iPhone6":   "iPhone7,2",
    257 				"iPhone7":   "iPhone9,1",
    258 				"iPadPro":   "iPad6,3",
    259 			}[parts["model"]]
    260 			if !ok {
    261 				glog.Fatalf("Entry %q not found in iOS mapping.", parts["model"])
    262 			}
    263 			d["device"] = device
    264 		} else if parts["cpu_or_gpu"] == "CPU" {
    265 			modelMapping, ok := map[string]map[string]string{
    266 				"AVX": {
    267 					"MacMini7.1": "x86-64-E5-2697_v2",
    268 					"Golo":       "x86-64-E5-2670",
    269 				},
    270 				"AVX2": {
    271 					"GCE":       "x86-64-Haswell_GCE",
    272 					"NUC5i7RYH": "x86-64-i7-5557U",
    273 				},
    274 				"AVX512": {
    275 					"GCE": "x86-64-Skylake_GCE",
    276 				},
    277 			}[parts["cpu_or_gpu_value"]]
    278 			if !ok {
    279 				glog.Fatalf("Entry %q not found in CPU mapping.", parts["cpu_or_gpu_value"])
    280 			}
    281 			cpu, ok := modelMapping[parts["model"]]
    282 			if !ok {
    283 				glog.Fatalf("Entry %q not found in %q model mapping.", parts["model"], parts["cpu_or_gpu_value"])
    284 			}
    285 			d["cpu"] = cpu
    286 			if parts["model"] == "GCE" && d["os"] == DEFAULT_OS_DEBIAN {
    287 				d["os"] = DEFAULT_OS_LINUX_GCE
    288 			}
    289 			if parts["model"] == "GCE" && d["os"] == DEFAULT_OS_WIN {
    290 				// Use normal-size machines for Test and Perf tasks on Win GCE.
    291 				d["machine_type"] = "n1-standard-16"
    292 			}
    293 		} else {
    294 			if strings.Contains(parts["os"], "Win") {
    295 				gpu, ok := map[string]string{
    296 					"GT610":         "10de:104a-22.21.13.8205",
    297 					"GTX1070":       "10de:1ba1-23.21.13.8813",
    298 					"GTX660":        "10de:11c0-23.21.13.8813",
    299 					"GTX960":        "10de:1401-23.21.13.8813",
    300 					"IntelHD530":    "8086:1912-21.20.16.4590",
    301 					"IntelHD4400":   "8086:0a16-20.19.15.4703",
    302 					"IntelHD4600":   "8086:0412-20.19.15.4703",
    303 					"IntelIris540":  "8086:1926-21.20.16.4839",
    304 					"IntelIris6100": "8086:162b-20.19.15.4703",
    305 					"RadeonHD7770":  "1002:683d-22.19.165.512",
    306 					"RadeonR9M470X": "1002:6646-22.19.165.512",
    307 					"QuadroP400":    "10de:1cb3-22.21.13.8205",
    308 				}[parts["cpu_or_gpu_value"]]
    309 				if !ok {
    310 					glog.Fatalf("Entry %q not found in Win GPU mapping.", parts["cpu_or_gpu_value"])
    311 				}
    312 				d["gpu"] = gpu
    313 
    314 				// Specify cpu dimension for NUCs and ShuttleCs. We temporarily have two
    315 				// types of machines with a GTX960.
    316 				cpu, ok := map[string]string{
    317 					"NUC6i7KYK": "x86-64-i7-6770HQ",
    318 					"ShuttleC":  "x86-64-i7-6700K",
    319 				}[parts["model"]]
    320 				if ok {
    321 					d["cpu"] = cpu
    322 				}
    323 			} else if strings.Contains(parts["os"], "Ubuntu") || strings.Contains(parts["os"], "Debian") {
    324 				gpu, ok := map[string]string{
    325 					// Intel drivers come from CIPD, so no need to specify the version here.
    326 					"IntelBayTrail": "8086:0f31",
    327 					"IntelHD2000":   "8086:0102",
    328 					"IntelHD405":    "8086:22b1",
    329 					"IntelIris640":  "8086:5926",
    330 					"QuadroP400":    "10de:1cb3-384.59",
    331 				}[parts["cpu_or_gpu_value"]]
    332 				if !ok {
    333 					glog.Fatalf("Entry %q not found in Ubuntu GPU mapping.", parts["cpu_or_gpu_value"])
    334 				}
    335 				d["gpu"] = gpu
    336 			} else if strings.Contains(parts["os"], "Mac") {
    337 				gpu, ok := map[string]string{
    338 					"IntelIris5100": "8086:0a2e",
    339 				}[parts["cpu_or_gpu_value"]]
    340 				if !ok {
    341 					glog.Fatalf("Entry %q not found in Mac GPU mapping.", parts["cpu_or_gpu_value"])
    342 				}
    343 				d["gpu"] = gpu
    344 				// Yuck. We have two different types of MacMini7,1 with the same GPU but different CPUs.
    345 				if parts["cpu_or_gpu_value"] == "IntelIris5100" {
    346 					// Run all tasks on Golo machines for now.
    347 					d["cpu"] = "x86-64-i7-4578U"
    348 				}
    349 			} else if strings.Contains(parts["os"], "ChromeOS") {
    350 				version, ok := map[string]string{
    351 					"MaliT604":           "9901.12.0",
    352 					"MaliT764":           "10172.0.0",
    353 					"MaliT860":           "10172.0.0",
    354 					"PowerVRGX6250":      "10176.5.0",
    355 					"TegraK1":            "10172.0.0",
    356 					"IntelHDGraphics615": "10032.17.0",
    357 				}[parts["cpu_or_gpu_value"]]
    358 				if !ok {
    359 					glog.Fatalf("Entry %q not found in ChromeOS GPU mapping.", parts["cpu_or_gpu_value"])
    360 				}
    361 				d["gpu"] = parts["cpu_or_gpu_value"]
    362 				d["release_version"] = version
    363 			} else {
    364 				glog.Fatalf("Unknown GPU mapping for OS %q.", parts["os"])
    365 			}
    366 		}
    367 	} else {
    368 		d["gpu"] = "none"
    369 		if d["os"] == DEFAULT_OS_DEBIAN {
    370 			return linuxGceDimensions()
    371 		} else if d["os"] == DEFAULT_OS_WIN {
    372 			// Windows CPU bots.
    373 			d["cpu"] = "x86-64-Haswell_GCE"
    374 			// Use many-core machines for Build tasks on Win GCE, except for Goma.
    375 			if strings.Contains(parts["extra_config"], "Goma") {
    376 				d["machine_type"] = "n1-standard-16"
    377 			} else {
    378 				d["machine_type"] = "n1-highcpu-64"
    379 			}
    380 		} else if d["os"] == DEFAULT_OS_MAC {
    381 			// Mac CPU bots.
    382 			d["cpu"] = "x86-64-E5-2697_v2"
    383 		}
    384 	}
    385 
    386 	rv := make([]string, 0, len(d))
    387 	for k, v := range d {
    388 		rv = append(rv, fmt.Sprintf("%s:%s", k, v))
    389 	}
    390 	sort.Strings(rv)
    391 	return rv
    392 }
    393 
    394 // relpath returns the relative path to the given file from the config file.
    395 func relpath(f string) string {
    396 	_, filename, _, _ := runtime.Caller(0)
    397 	dir := path.Dir(filename)
    398 	rel := dir
    399 	if *cfgFile != "" {
    400 		rel = path.Dir(*cfgFile)
    401 	}
    402 	rv, err := filepath.Rel(rel, path.Join(dir, f))
    403 	if err != nil {
    404 		sklog.Fatal(err)
    405 	}
    406 	return rv
    407 }
    408 
    409 // bundleRecipes generates the task to bundle and isolate the recipes.
    410 func bundleRecipes(b *specs.TasksCfgBuilder) string {
    411 	b.MustAddTask(BUNDLE_RECIPES_NAME, &specs.TaskSpec{
    412 		CipdPackages: []*specs.CipdPackage{cipdGit1, cipdGit2},
    413 		Dimensions:   linuxGceDimensions(),
    414 		ExtraArgs: []string{
    415 			"--workdir", "../../..", "bundle_recipes",
    416 			fmt.Sprintf("buildername=%s", BUNDLE_RECIPES_NAME),
    417 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    418 		},
    419 		Isolate:  relpath("bundle_recipes.isolate"),
    420 		Priority: 0.7,
    421 	})
    422 	return BUNDLE_RECIPES_NAME
    423 }
    424 
    425 // useBundledRecipes returns true iff the given bot should use bundled recipes
    426 // instead of syncing recipe DEPS itself.
    427 func useBundledRecipes(parts map[string]string) bool {
    428 	// Use bundled recipes for all test/perf tasks.
    429 	return true
    430 }
    431 
    432 type isolateAssetCfg struct {
    433 	isolateFile string
    434 	cipdPkg     string
    435 }
    436 
    437 var ISOLATE_ASSET_MAPPING = map[string]isolateAssetCfg{
    438 	ISOLATE_SKIMAGE_NAME: {
    439 		isolateFile: "isolate_skimage.isolate",
    440 		cipdPkg:     "skimage",
    441 	},
    442 	ISOLATE_SKP_NAME: {
    443 		isolateFile: "isolate_skp.isolate",
    444 		cipdPkg:     "skp",
    445 	},
    446 	ISOLATE_SVG_NAME: {
    447 		isolateFile: "isolate_svg.isolate",
    448 		cipdPkg:     "svg",
    449 	},
    450 	ISOLATE_NDK_LINUX_NAME: {
    451 		isolateFile: "isolate_ndk_linux.isolate",
    452 		cipdPkg:     "android_ndk_linux",
    453 	},
    454 	ISOLATE_SDK_LINUX_NAME: {
    455 		isolateFile: "isolate_android_sdk_linux.isolate",
    456 		cipdPkg:     "android_sdk_linux",
    457 	},
    458 	ISOLATE_WIN_TOOLCHAIN_NAME: {
    459 		isolateFile: "isolate_win_toolchain.isolate",
    460 		cipdPkg:     "win_toolchain",
    461 	},
    462 	ISOLATE_WIN_VULKAN_SDK_NAME: {
    463 		isolateFile: "isolate_win_vulkan_sdk.isolate",
    464 		cipdPkg:     "win_vulkan_sdk",
    465 	},
    466 }
    467 
    468 // bundleRecipes generates the task to bundle and isolate the recipes.
    469 func isolateCIPDAsset(b *specs.TasksCfgBuilder, name string) string {
    470 	b.MustAddTask(name, &specs.TaskSpec{
    471 		CipdPackages: []*specs.CipdPackage{
    472 			b.MustGetCipdPackageFromAsset(ISOLATE_ASSET_MAPPING[name].cipdPkg),
    473 		},
    474 		Dimensions: linuxGceDimensions(),
    475 		Isolate:    relpath(ISOLATE_ASSET_MAPPING[name].isolateFile),
    476 		Priority:   0.7,
    477 	})
    478 	return name
    479 }
    480 
    481 // getIsolatedCIPDDeps returns the slice of Isolate_* tasks a given task needs.
    482 // This allows us to  save time on I/O bound bots, like the RPIs.
    483 func getIsolatedCIPDDeps(parts map[string]string) []string {
    484 	deps := []string{}
    485 	// Only do this on the RPIs for now. Other, faster machines shouldn't see much
    486 	// benefit and we don't need the extra complexity, for now
    487 	rpiOS := []string{"Android", "ChromeOS", "iOS"}
    488 
    489 	if o := parts["os"]; strings.Contains(o, "Chromecast") {
    490 		// Chromecasts don't have enough disk space to fit all of the content,
    491 		// so we do a subset of the skps.
    492 		deps = append(deps, ISOLATE_SKP_NAME)
    493 	} else if e := parts["extra_config"]; strings.Contains(e, "Skpbench") {
    494 		// Skpbench only needs skps
    495 		deps = append(deps, ISOLATE_SKP_NAME)
    496 	} else if util.In(o, rpiOS) {
    497 		deps = append(deps, ISOLATE_SKP_NAME)
    498 		deps = append(deps, ISOLATE_SVG_NAME)
    499 		deps = append(deps, ISOLATE_SKIMAGE_NAME)
    500 	}
    501 
    502 	return deps
    503 }
    504 
    505 // compile generates a compile task. Returns the name of the last task in the
    506 // generated chain of tasks, which the Job should add as a dependency.
    507 func compile(b *specs.TasksCfgBuilder, name string, parts map[string]string) string {
    508 	// Collect the necessary CIPD packages.
    509 	pkgs := []*specs.CipdPackage{}
    510 	deps := []string{}
    511 
    512 	// Android bots require a toolchain.
    513 	if strings.Contains(name, "Android") {
    514 		if parts["extra_config"] == "Android_Framework" {
    515 			// Do not need a toolchain when building the
    516 			// Android Framework.
    517 		} else if strings.Contains(name, "Mac") {
    518 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("android_ndk_darwin"))
    519 		} else if strings.Contains(name, "Win") {
    520 			pkg := b.MustGetCipdPackageFromAsset("android_ndk_windows")
    521 			pkg.Path = "n"
    522 			pkgs = append(pkgs, pkg)
    523 		} else {
    524 			deps = append(deps, isolateCIPDAsset(b, ISOLATE_NDK_LINUX_NAME))
    525 			if strings.Contains(name, "SKQP") {
    526 				deps = append(deps, isolateCIPDAsset(b, ISOLATE_SDK_LINUX_NAME))
    527 			}
    528 		}
    529 	} else if strings.Contains(name, "Chromecast") {
    530 		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("cast_toolchain"))
    531 		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("chromebook_arm_gles"))
    532 	} else if strings.Contains(name, "Chromebook") {
    533 		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux"))
    534 		if parts["target_arch"] == "x86_64" {
    535 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("chromebook_x86_64_gles"))
    536 		} else if parts["target_arch"] == "arm" {
    537 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("armhf_sysroot"))
    538 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("chromebook_arm_gles"))
    539 		}
    540 	} else if strings.Contains(name, "Debian") {
    541 		if strings.Contains(name, "Clang") {
    542 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux"))
    543 		}
    544 		if strings.Contains(name, "Vulkan") {
    545 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_sdk"))
    546 		}
    547 		if strings.Contains(name, "EMCC") {
    548 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("emscripten_sdk"))
    549 		}
    550 	} else if strings.Contains(name, "Win") {
    551 		deps = append(deps, isolateCIPDAsset(b, ISOLATE_WIN_TOOLCHAIN_NAME))
    552 		if strings.Contains(name, "Clang") {
    553 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_win"))
    554 		}
    555 		if strings.Contains(name, "Vulkan") {
    556 			deps = append(deps, isolateCIPDAsset(b, ISOLATE_WIN_VULKAN_SDK_NAME))
    557 		}
    558 	}
    559 
    560 	dimensions := swarmDimensions(parts)
    561 
    562 	// Add the task.
    563 	b.MustAddTask(name, &specs.TaskSpec{
    564 		CipdPackages: pkgs,
    565 		Dimensions:   dimensions,
    566 		Dependencies: deps,
    567 		ExtraArgs: []string{
    568 			"--workdir", "../../..", "compile",
    569 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    570 			fmt.Sprintf("buildername=%s", name),
    571 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    572 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    573 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    574 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    575 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    576 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    577 		},
    578 		Isolate:  relpath("compile_skia.isolate"),
    579 		Priority: 0.8,
    580 	})
    581 	// All compile tasks are runnable as their own Job. Assert that the Job
    582 	// is listed in JOBS.
    583 	if !util.In(name, JOBS) {
    584 		glog.Fatalf("Job %q is missing from the JOBS list!", name)
    585 	}
    586 
    587 	// Upload the skiaserve binary only for Linux Android compile bots.
    588 	// See skbug.com/7399 for context.
    589 	if parts["configuration"] == "Release" &&
    590 		parts["extra_config"] == "Android" &&
    591 		!strings.Contains(parts["os"], "Win") &&
    592 		!strings.Contains(parts["os"], "Mac") {
    593 		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
    594 		b.MustAddTask(uploadName, &specs.TaskSpec{
    595 			Dependencies: []string{name},
    596 			Dimensions:   linuxGceDimensions(),
    597 			ExtraArgs: []string{
    598 				"--workdir", "../../..", "upload_skiaserve",
    599 				fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    600 				fmt.Sprintf("buildername=%s", name),
    601 				fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    602 				fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    603 				fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    604 				fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    605 				fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    606 				fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    607 			},
    608 			// We're using the same isolate as upload_dm_results
    609 			Isolate:  relpath("upload_dm_results.isolate"),
    610 			Priority: 0.8,
    611 		})
    612 		return uploadName
    613 	}
    614 
    615 	return name
    616 }
    617 
    618 // recreateSKPs generates a RecreateSKPs task. Returns the name of the last
    619 // task in the generated chain of tasks, which the Job should add as a
    620 // dependency.
    621 func recreateSKPs(b *specs.TasksCfgBuilder, name string) string {
    622 	b.MustAddTask(name, &specs.TaskSpec{
    623 		CipdPackages:     []*specs.CipdPackage{b.MustGetCipdPackageFromAsset("go")},
    624 		Dimensions:       linuxGceDimensions(),
    625 		ExecutionTimeout: 4 * time.Hour,
    626 		ExtraArgs: []string{
    627 			"--workdir", "../../..", "recreate_skps",
    628 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    629 			fmt.Sprintf("buildername=%s", name),
    630 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    631 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    632 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    633 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    634 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    635 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    636 		},
    637 		IoTimeout: 40 * time.Minute,
    638 		Isolate:   relpath("compile_skia.isolate"),
    639 		Priority:  0.8,
    640 	})
    641 	return name
    642 }
    643 
    644 // updateMetaConfig generates a UpdateMetaConfig task. Returns the name of the
    645 // last task in the generated chain of tasks, which the Job should add as a
    646 // dependency.
    647 func updateMetaConfig(b *specs.TasksCfgBuilder, name string) string {
    648 	b.MustAddTask(name, &specs.TaskSpec{
    649 		CipdPackages: []*specs.CipdPackage{},
    650 		Dimensions:   linuxGceDimensions(),
    651 		ExtraArgs: []string{
    652 			"--workdir", "../../..", "update_meta_config",
    653 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    654 			fmt.Sprintf("buildername=%s", name),
    655 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    656 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    657 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    658 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    659 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    660 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    661 		},
    662 		Isolate:  relpath("meta_config.isolate"),
    663 		Priority: 0.8,
    664 	})
    665 	return name
    666 }
    667 
    668 // ctSKPs generates a CT SKPs task. Returns the name of the last task in the
    669 // generated chain of tasks, which the Job should add as a dependency.
    670 func ctSKPs(b *specs.TasksCfgBuilder, name string) string {
    671 	b.MustAddTask(name, &specs.TaskSpec{
    672 		CipdPackages: []*specs.CipdPackage{},
    673 		Dimensions: []string{
    674 			"pool:SkiaCT",
    675 			fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE),
    676 		},
    677 		ExecutionTimeout: 24 * time.Hour,
    678 		ExtraArgs: []string{
    679 			"--workdir", "../../..", "ct_skps",
    680 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    681 			fmt.Sprintf("buildername=%s", name),
    682 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    683 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    684 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    685 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    686 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    687 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    688 		},
    689 		IoTimeout: time.Hour,
    690 		Isolate:   relpath("ct_skps_skia.isolate"),
    691 		Priority:  0.8,
    692 	})
    693 	return name
    694 }
    695 
    696 // checkGeneratedFiles verifies that no generated SKSL files have been edited
    697 // by hand.
    698 func checkGeneratedFiles(b *specs.TasksCfgBuilder, name string) string {
    699 	b.MustAddTask(name, &specs.TaskSpec{
    700 		CipdPackages: []*specs.CipdPackage{},
    701 		Dimensions:   linuxGceDimensions(),
    702 		ExtraArgs: []string{
    703 			"--workdir", "../../..", "check_generated_files",
    704 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    705 			fmt.Sprintf("buildername=%s", name),
    706 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    707 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    708 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    709 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    710 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    711 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    712 		},
    713 		Isolate:  relpath("compile_skia.isolate"),
    714 		Priority: 0.8,
    715 	})
    716 	return name
    717 }
    718 
    719 // housekeeper generates a Housekeeper task. Returns the name of the last task
    720 // in the generated chain of tasks, which the Job should add as a dependency.
    721 func housekeeper(b *specs.TasksCfgBuilder, name, compileTaskName string) string {
    722 	b.MustAddTask(name, &specs.TaskSpec{
    723 		CipdPackages: []*specs.CipdPackage{b.MustGetCipdPackageFromAsset("go")},
    724 		Dependencies: []string{compileTaskName},
    725 		Dimensions:   linuxGceDimensions(),
    726 		ExtraArgs: []string{
    727 			"--workdir", "../../..", "housekeeper",
    728 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    729 			fmt.Sprintf("buildername=%s", name),
    730 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    731 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    732 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    733 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    734 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    735 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    736 		},
    737 		Isolate:  relpath("housekeeper_skia.isolate"),
    738 		Priority: 0.8,
    739 	})
    740 	return name
    741 }
    742 
    743 // bookmaker generates a Bookmaker task. Returns the name of the last task
    744 // in the generated chain of tasks, which the Job should add as a dependency.
    745 func bookmaker(b *specs.TasksCfgBuilder, name, compileTaskName string) string {
    746 	b.MustAddTask(name, &specs.TaskSpec{
    747 		CipdPackages: []*specs.CipdPackage{b.MustGetCipdPackageFromAsset("go")},
    748 		Dependencies: []string{compileTaskName},
    749 		Dimensions:   linuxGceDimensions(),
    750 		ExtraArgs: []string{
    751 			"--workdir", "../../..", "bookmaker",
    752 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    753 			fmt.Sprintf("buildername=%s", name),
    754 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    755 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    756 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    757 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    758 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    759 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    760 		},
    761 		Isolate:          relpath("compile_skia.isolate"),
    762 		Priority:         0.8,
    763 		ExecutionTimeout: 2 * time.Hour,
    764 		IoTimeout:        2 * time.Hour,
    765 	})
    766 	return name
    767 }
    768 
    769 // androidFrameworkCompile generates an Android Framework Compile task. Returns
    770 // the name of the last task in the generated chain of tasks, which the Job
    771 // should add as a dependency.
    772 func androidFrameworkCompile(b *specs.TasksCfgBuilder, name string) string {
    773 	b.MustAddTask(name, &specs.TaskSpec{
    774 		Dimensions: linuxGceDimensions(),
    775 		ExtraArgs: []string{
    776 			"--workdir", "../../..", "android_compile",
    777 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    778 			fmt.Sprintf("buildername=%s", name),
    779 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    780 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    781 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    782 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    783 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    784 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    785 		},
    786 		Isolate:  relpath("compile_skia.isolate"),
    787 		Priority: 0.8,
    788 	})
    789 	return name
    790 }
    791 
    792 // infra generates an infra_tests task. Returns the name of the last task in the
    793 // generated chain of tasks, which the Job should add as a dependency.
    794 func infra(b *specs.TasksCfgBuilder, name string) string {
    795 	b.MustAddTask(name, &specs.TaskSpec{
    796 		CipdPackages: []*specs.CipdPackage{b.MustGetCipdPackageFromAsset("go")},
    797 		Dimensions:   linuxGceDimensions(),
    798 		ExtraArgs: []string{
    799 			"--workdir", "../../..", "infra",
    800 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    801 			fmt.Sprintf("buildername=%s", name),
    802 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    803 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    804 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    805 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    806 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    807 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    808 		},
    809 		Isolate:  relpath("infra_skia.isolate"),
    810 		Priority: 0.8,
    811 	})
    812 	return name
    813 }
    814 
    815 func getParentRevisionName(compileTaskName string, parts map[string]string) string {
    816 	if parts["extra_config"] == "" {
    817 		return compileTaskName + "-ParentRevision"
    818 	} else {
    819 		return compileTaskName + "_ParentRevision"
    820 	}
    821 }
    822 
    823 // calmbench generates a calmbench task. Returns the name of the last task in the
    824 // generated chain of tasks, which the Job should add as a dependency.
    825 func calmbench(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, compileParentName string) string {
    826 	s := &specs.TaskSpec{
    827 		Dependencies: []string{compileTaskName, compileParentName},
    828 		CipdPackages: []*specs.CipdPackage{b.MustGetCipdPackageFromAsset("clang_linux")},
    829 		Dimensions:   swarmDimensions(parts),
    830 		ExtraArgs: []string{
    831 			"--workdir", "../../..", "calmbench",
    832 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    833 			fmt.Sprintf("buildername=%s", name),
    834 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    835 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    836 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    837 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    838 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    839 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    840 		},
    841 		Isolate:  relpath("calmbench.isolate"),
    842 		Priority: 0.8,
    843 	}
    844 
    845 	s.Dependencies = append(s.Dependencies, ISOLATE_SKP_NAME, ISOLATE_SVG_NAME)
    846 
    847 	b.MustAddTask(name, s)
    848 
    849 	// Upload results if necessary.
    850 	if strings.Contains(name, "Release") && doUpload(name) {
    851 		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
    852 		b.MustAddTask(uploadName, &specs.TaskSpec{
    853 			Dependencies: []string{name},
    854 			Dimensions:   linuxGceDimensions(),
    855 			ExtraArgs: []string{
    856 				"--workdir", "../../..", "upload_calmbench_results",
    857 				fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    858 				fmt.Sprintf("buildername=%s", name),
    859 				fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    860 				fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    861 				fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    862 				fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    863 				fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    864 				fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    865 				fmt.Sprintf("gs_bucket=%s", CONFIG.GsBucketCalm),
    866 			},
    867 			// We're using the same isolate as upload_nano_results
    868 			Isolate:  relpath("upload_nano_results.isolate"),
    869 			Priority: 0.8,
    870 		})
    871 		return uploadName
    872 	}
    873 
    874 	return name
    875 }
    876 
    877 // doUpload indicates whether the given Job should upload its results.
    878 func doUpload(name string) bool {
    879 	for _, s := range CONFIG.NoUpload {
    880 		m, err := regexp.MatchString(s, name)
    881 		if err != nil {
    882 			glog.Fatal(err)
    883 		}
    884 		if m {
    885 			return false
    886 		}
    887 	}
    888 	return true
    889 }
    890 
    891 // test generates a Test task. Returns the name of the last task in the
    892 // generated chain of tasks, which the Job should add as a dependency.
    893 func test(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
    894 	deps := []string{compileTaskName}
    895 	if strings.Contains(name, "Android_ASAN") {
    896 		deps = append(deps, isolateCIPDAsset(b, ISOLATE_NDK_LINUX_NAME))
    897 	}
    898 
    899 	s := &specs.TaskSpec{
    900 		CipdPackages:     pkgs,
    901 		Dependencies:     deps,
    902 		Dimensions:       swarmDimensions(parts),
    903 		ExecutionTimeout: 4 * time.Hour,
    904 		Expiration:       20 * time.Hour,
    905 		ExtraArgs: []string{
    906 			"--workdir", "../../..", "test",
    907 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    908 			fmt.Sprintf("buildbucket_build_id=%s", specs.PLACEHOLDER_BUILDBUCKET_BUILD_ID),
    909 			fmt.Sprintf("buildername=%s", name),
    910 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    911 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    912 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    913 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    914 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    915 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    916 		},
    917 		IoTimeout:   40 * time.Minute,
    918 		Isolate:     relpath("test_skia.isolate"),
    919 		MaxAttempts: 1,
    920 		Priority:    0.8,
    921 	}
    922 	if useBundledRecipes(parts) {
    923 		s.Dependencies = append(s.Dependencies, BUNDLE_RECIPES_NAME)
    924 		if strings.Contains(parts["os"], "Win") {
    925 			s.Isolate = relpath("test_skia_bundled_win.isolate")
    926 		} else {
    927 			s.Isolate = relpath("test_skia_bundled_unix.isolate")
    928 		}
    929 	}
    930 	if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
    931 		s.Dependencies = append(s.Dependencies, deps...)
    932 	}
    933 	if strings.Contains(parts["extra_config"], "Valgrind") {
    934 		s.ExecutionTimeout = 9 * time.Hour
    935 		s.Expiration = 48 * time.Hour
    936 		s.IoTimeout = time.Hour
    937 		s.CipdPackages = append(s.CipdPackages, b.MustGetCipdPackageFromAsset("valgrind"))
    938 		s.Dimensions = append(s.Dimensions, "valgrind:1")
    939 	} else if strings.Contains(parts["extra_config"], "MSAN") {
    940 		s.ExecutionTimeout = 9 * time.Hour
    941 	} else if parts["arch"] == "x86" && parts["configuration"] == "Debug" {
    942 		// skia:6737
    943 		s.ExecutionTimeout = 6 * time.Hour
    944 	}
    945 	iid := internalHardwareLabel(parts)
    946 	if iid != nil {
    947 		s.ExtraArgs = append(s.ExtraArgs, fmt.Sprintf("internal_hardware_label=%d", *iid))
    948 	}
    949 	b.MustAddTask(name, s)
    950 
    951 	// Upload results if necessary. TODO(kjlubick): If we do coverage analysis at the same
    952 	// time as normal tests (which would be nice), cfg.json needs to have Coverage removed.
    953 	if doUpload(name) {
    954 		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
    955 		b.MustAddTask(uploadName, &specs.TaskSpec{
    956 			Dependencies: []string{name},
    957 			Dimensions:   linuxGceDimensions(),
    958 			ExtraArgs: []string{
    959 				"--workdir", "../../..", "upload_dm_results",
    960 				fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
    961 				fmt.Sprintf("buildername=%s", name),
    962 				fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
    963 				fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
    964 				fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
    965 				fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
    966 				fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
    967 				fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
    968 				fmt.Sprintf("gs_bucket=%s", CONFIG.GsBucketGm),
    969 			},
    970 			Isolate:  relpath("upload_dm_results.isolate"),
    971 			Priority: 0.8,
    972 		})
    973 		return uploadName
    974 	}
    975 
    976 	return name
    977 }
    978 
    979 func coverage(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
    980 	shards := 1
    981 	deps := []string{}
    982 
    983 	tf := parts["test_filter"]
    984 	if strings.Contains(tf, "Shard") {
    985 		// Expected Shard_NN
    986 		shardstr := strings.Split(tf, "_")[1]
    987 		var err error
    988 		shards, err = strconv.Atoi(shardstr)
    989 		if err != nil {
    990 			glog.Fatalf("Expected int for number of shards %q in %s: %s", shardstr, name, err)
    991 		}
    992 	}
    993 	for i := 0; i < shards; i++ {
    994 		n := strings.Replace(name, tf, fmt.Sprintf("shard_%02d_%02d", i, shards), 1)
    995 		s := &specs.TaskSpec{
    996 			CipdPackages:     pkgs,
    997 			Dependencies:     []string{compileTaskName},
    998 			Dimensions:       swarmDimensions(parts),
    999 			ExecutionTimeout: 4 * time.Hour,
   1000 			Expiration:       20 * time.Hour,
   1001 			ExtraArgs: []string{
   1002 				"--workdir", "../../..", "test",
   1003 				fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
   1004 				fmt.Sprintf("buildername=%s", n),
   1005 				fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
   1006 				fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
   1007 				fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
   1008 				fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
   1009 				fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
   1010 				fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
   1011 			},
   1012 			IoTimeout:   40 * time.Minute,
   1013 			Isolate:     relpath("test_skia.isolate"),
   1014 			MaxAttempts: 1,
   1015 			Priority:    0.8,
   1016 		}
   1017 		if useBundledRecipes(parts) {
   1018 			s.Dependencies = append(s.Dependencies, BUNDLE_RECIPES_NAME)
   1019 			if strings.Contains(parts["os"], "Win") {
   1020 				s.Isolate = relpath("test_skia_bundled_win.isolate")
   1021 			} else {
   1022 				s.Isolate = relpath("test_skia_bundled_unix.isolate")
   1023 			}
   1024 		}
   1025 		if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
   1026 			s.Dependencies = append(s.Dependencies, deps...)
   1027 		}
   1028 		b.MustAddTask(n, s)
   1029 		deps = append(deps, n)
   1030 	}
   1031 
   1032 	uploadName := fmt.Sprintf("%s%s%s", "Upload", jobNameSchema.Sep, name)
   1033 	// We need clang_linux to get access to the llvm-profdata and llvm-cov binaries
   1034 	// which are used to deal with the raw coverage data output by the Test step.
   1035 	pkgs = append([]*specs.CipdPackage{}, b.MustGetCipdPackageFromAsset("clang_linux"))
   1036 	deps = append(deps, compileTaskName)
   1037 
   1038 	b.MustAddTask(uploadName, &specs.TaskSpec{
   1039 		// A dependency on compileTaskName makes the TaskScheduler link the
   1040 		// isolated output of the compile step to the input of the upload step,
   1041 		// which gives us access to the instrumented binary. The binary is
   1042 		// needed to figure out symbol names and line numbers.
   1043 		Dependencies: deps,
   1044 		Dimensions:   linuxGceDimensions(),
   1045 		CipdPackages: pkgs,
   1046 		ExtraArgs: []string{
   1047 			"--workdir", "../../..", "upload_coverage_results",
   1048 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
   1049 			fmt.Sprintf("buildername=%s", name),
   1050 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
   1051 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
   1052 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
   1053 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
   1054 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
   1055 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
   1056 			fmt.Sprintf("gs_bucket=%s", CONFIG.GsBucketCoverage),
   1057 		},
   1058 		Isolate:  relpath("upload_coverage_results.isolate"),
   1059 		Priority: 0.8,
   1060 	})
   1061 	return uploadName
   1062 }
   1063 
   1064 // perf generates a Perf task. Returns the name of the last task in the
   1065 // generated chain of tasks, which the Job should add as a dependency.
   1066 func perf(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
   1067 	recipe := "perf"
   1068 	isolate := relpath("perf_skia.isolate")
   1069 	if strings.Contains(parts["extra_config"], "Skpbench") {
   1070 		recipe = "skpbench"
   1071 		isolate = relpath("skpbench_skia.isolate")
   1072 		if useBundledRecipes(parts) {
   1073 			if strings.Contains(parts["os"], "Win") {
   1074 				isolate = relpath("skpbench_skia_bundled_win.isolate")
   1075 			} else {
   1076 				isolate = relpath("skpbench_skia_bundled_unix.isolate")
   1077 			}
   1078 		}
   1079 	} else if useBundledRecipes(parts) {
   1080 		if strings.Contains(parts["os"], "Win") {
   1081 			isolate = relpath("perf_skia_bundled_win.isolate")
   1082 		} else {
   1083 			isolate = relpath("perf_skia_bundled_unix.isolate")
   1084 		}
   1085 	}
   1086 	s := &specs.TaskSpec{
   1087 		CipdPackages:     pkgs,
   1088 		Dependencies:     []string{compileTaskName},
   1089 		Dimensions:       swarmDimensions(parts),
   1090 		ExecutionTimeout: 4 * time.Hour,
   1091 		Expiration:       20 * time.Hour,
   1092 		ExtraArgs: []string{
   1093 			"--workdir", "../../..", recipe,
   1094 			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
   1095 			fmt.Sprintf("buildername=%s", name),
   1096 			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
   1097 			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
   1098 			fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
   1099 			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
   1100 			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
   1101 			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
   1102 		},
   1103 		IoTimeout:   40 * time.Minute,
   1104 		Isolate:     isolate,
   1105 		MaxAttempts: 1,
   1106 		Priority:    0.8,
   1107 	}
   1108 	if useBundledRecipes(parts) {
   1109 		s.Dependencies = append(s.Dependencies, BUNDLE_RECIPES_NAME)
   1110 	}
   1111 	if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
   1112 		s.Dependencies = append(s.Dependencies, deps...)
   1113 	}
   1114 
   1115 	if strings.Contains(parts["extra_config"], "Valgrind") {
   1116 		s.ExecutionTimeout = 9 * time.Hour
   1117 		s.Expiration = 48 * time.Hour
   1118 		s.IoTimeout = time.Hour
   1119 		s.CipdPackages = append(s.CipdPackages, b.MustGetCipdPackageFromAsset("valgrind"))
   1120 		s.Dimensions = append(s.Dimensions, "valgrind:1")
   1121 	} else if strings.Contains(parts["extra_config"], "MSAN") {
   1122 		s.ExecutionTimeout = 9 * time.Hour
   1123 	} else if parts["arch"] == "x86" && parts["configuration"] == "Debug" {
   1124 		// skia:6737
   1125 		s.ExecutionTimeout = 6 * time.Hour
   1126 	}
   1127 	iid := internalHardwareLabel(parts)
   1128 	if iid != nil {
   1129 		s.ExtraArgs = append(s.ExtraArgs, fmt.Sprintf("internal_hardware_label=%d", *iid))
   1130 	}
   1131 	b.MustAddTask(name, s)
   1132 
   1133 	// Upload results if necessary.
   1134 	if strings.Contains(name, "Release") && doUpload(name) {
   1135 		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
   1136 		b.MustAddTask(uploadName, &specs.TaskSpec{
   1137 			Dependencies: []string{name},
   1138 			Dimensions:   linuxGceDimensions(),
   1139 			ExtraArgs: []string{
   1140 				"--workdir", "../../..", "upload_nano_results",
   1141 				fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
   1142 				fmt.Sprintf("buildername=%s", name),
   1143 				fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
   1144 				fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
   1145 				fmt.Sprintf("patch_repo=%s", specs.PLACEHOLDER_PATCH_REPO),
   1146 				fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
   1147 				fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
   1148 				fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
   1149 				fmt.Sprintf("gs_bucket=%s", CONFIG.GsBucketNano),
   1150 			},
   1151 			Isolate:  relpath("upload_nano_results.isolate"),
   1152 			Priority: 0.8,
   1153 		})
   1154 		return uploadName
   1155 	}
   1156 	return name
   1157 }
   1158 
   1159 // process generates tasks and jobs for the given job name.
   1160 func process(b *specs.TasksCfgBuilder, name string) {
   1161 	deps := []string{}
   1162 
   1163 	// Bundle Recipes.
   1164 	if name == BUNDLE_RECIPES_NAME {
   1165 		deps = append(deps, bundleRecipes(b))
   1166 	}
   1167 
   1168 	// Isolate CIPD assets.
   1169 	if _, ok := ISOLATE_ASSET_MAPPING[name]; ok {
   1170 		deps = append(deps, isolateCIPDAsset(b, name))
   1171 	}
   1172 
   1173 	parts, err := jobNameSchema.ParseJobName(name)
   1174 	if err != nil {
   1175 		glog.Fatal(err)
   1176 	}
   1177 
   1178 	// RecreateSKPs.
   1179 	if strings.Contains(name, "RecreateSKPs") {
   1180 		deps = append(deps, recreateSKPs(b, name))
   1181 	}
   1182 
   1183 	// UpdateMetaConfig bot.
   1184 	if strings.Contains(name, "UpdateMetaConfig") {
   1185 		deps = append(deps, updateMetaConfig(b, name))
   1186 	}
   1187 
   1188 	// CT bots.
   1189 	if strings.Contains(name, "-CT_") {
   1190 		deps = append(deps, ctSKPs(b, name))
   1191 	}
   1192 
   1193 	// Infra tests.
   1194 	if name == "Housekeeper-PerCommit-InfraTests" {
   1195 		deps = append(deps, infra(b, name))
   1196 	}
   1197 
   1198 	// Compile bots.
   1199 	if parts["role"] == "Build" {
   1200 		if parts["extra_config"] == "Android_Framework" {
   1201 			// Android Framework compile tasks use a different recipe.
   1202 			deps = append(deps, androidFrameworkCompile(b, name))
   1203 		} else {
   1204 			deps = append(deps, compile(b, name, parts))
   1205 		}
   1206 	}
   1207 
   1208 	// Most remaining bots need a compile task.
   1209 	compileTaskName := deriveCompileTaskName(name, parts)
   1210 	compileTaskParts, err := jobNameSchema.ParseJobName(compileTaskName)
   1211 	if err != nil {
   1212 		glog.Fatal(err)
   1213 	}
   1214 	compileParentName := getParentRevisionName(compileTaskName, compileTaskParts)
   1215 	compileParentParts, err := jobNameSchema.ParseJobName(compileParentName)
   1216 	if err != nil {
   1217 		glog.Fatal(err)
   1218 	}
   1219 
   1220 	// These bots do not need a compile task.
   1221 	if parts["role"] != "Build" &&
   1222 		name != "Housekeeper-PerCommit-BundleRecipes" &&
   1223 		name != "Housekeeper-PerCommit-InfraTests" &&
   1224 		name != "Housekeeper-PerCommit-CheckGeneratedFiles" &&
   1225 		!strings.Contains(name, "Android_Framework") &&
   1226 		!strings.Contains(name, "RecreateSKPs") &&
   1227 		!strings.Contains(name, "UpdateMetaConfig") &&
   1228 		!strings.Contains(name, "-CT_") &&
   1229 		!strings.Contains(name, "Housekeeper-PerCommit-Isolate") {
   1230 		compile(b, compileTaskName, compileTaskParts)
   1231 		if parts["role"] == "Calmbench" {
   1232 			compile(b, compileParentName, compileParentParts)
   1233 		}
   1234 	}
   1235 
   1236 	// Housekeepers.
   1237 	if name == "Housekeeper-PerCommit" {
   1238 		deps = append(deps, housekeeper(b, name, compileTaskName))
   1239 	}
   1240 	if name == "Housekeeper-PerCommit-CheckGeneratedFiles" {
   1241 		deps = append(deps, checkGeneratedFiles(b, name))
   1242 	}
   1243 	if strings.Contains(name, "Bookmaker") {
   1244 		deps = append(deps, bookmaker(b, name, compileTaskName))
   1245 	}
   1246 
   1247 	// Common assets needed by the remaining bots.
   1248 
   1249 	pkgs := []*specs.CipdPackage{}
   1250 
   1251 	if deps := getIsolatedCIPDDeps(parts); len(deps) == 0 {
   1252 		pkgs = []*specs.CipdPackage{
   1253 			b.MustGetCipdPackageFromAsset("skimage"),
   1254 			b.MustGetCipdPackageFromAsset("skp"),
   1255 			b.MustGetCipdPackageFromAsset("svg"),
   1256 		}
   1257 	}
   1258 
   1259 	if strings.Contains(name, "Ubuntu") || strings.Contains(name, "Debian") {
   1260 		if strings.Contains(name, "SAN") {
   1261 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux"))
   1262 		}
   1263 		if strings.Contains(name, "Vulkan") {
   1264 			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_sdk"))
   1265 		}
   1266 		if strings.Contains(name, "Intel") && strings.Contains(name, "GPU") {
   1267 			if strings.Contains(name, "Release") {
   1268 				pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_intel_driver_release"))
   1269 			} else {
   1270 				pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_intel_driver_debug"))
   1271 			}
   1272 		}
   1273 	}
   1274 
   1275 	// Test bots.
   1276 
   1277 	if parts["role"] == "Test" {
   1278 		if strings.Contains(parts["extra_config"], "Coverage") {
   1279 			deps = append(deps, coverage(b, name, parts, compileTaskName, pkgs))
   1280 		} else if !strings.Contains(name, "-CT_") {
   1281 			deps = append(deps, test(b, name, parts, compileTaskName, pkgs))
   1282 		}
   1283 
   1284 	}
   1285 
   1286 	// Perf bots.
   1287 	if parts["role"] == "Perf" && !strings.Contains(name, "-CT_") {
   1288 		deps = append(deps, perf(b, name, parts, compileTaskName, pkgs))
   1289 	}
   1290 
   1291 	// Calmbench bots.
   1292 	if parts["role"] == "Calmbench" {
   1293 		deps = append(deps, calmbench(b, name, parts, compileTaskName, compileParentName))
   1294 	}
   1295 
   1296 	// Add the Job spec.
   1297 	j := &specs.JobSpec{
   1298 		Priority:  0.8,
   1299 		TaskSpecs: deps,
   1300 		Trigger:   specs.TRIGGER_ANY_BRANCH,
   1301 	}
   1302 	if strings.Contains(name, "-Nightly-") {
   1303 		j.Trigger = specs.TRIGGER_NIGHTLY
   1304 	} else if strings.Contains(name, "-Weekly-") || strings.Contains(name, "CT_DM_1m_SKPs") {
   1305 		j.Trigger = specs.TRIGGER_WEEKLY
   1306 	} else if strings.Contains(name, "Flutter") || strings.Contains(name, "PDFium") || strings.Contains(name, "CommandBuffer") {
   1307 		j.Trigger = specs.TRIGGER_MASTER_ONLY
   1308 	}
   1309 	b.MustAddJob(name, j)
   1310 }
   1311 
   1312 func loadJson(flag *string, defaultFlag string, val interface{}) {
   1313 	if *flag == "" {
   1314 		*flag = defaultFlag
   1315 	}
   1316 	b, err := ioutil.ReadFile(*flag)
   1317 	if err != nil {
   1318 		glog.Fatal(err)
   1319 	}
   1320 	if err := json.Unmarshal(b, val); err != nil {
   1321 		glog.Fatal(err)
   1322 	}
   1323 }
   1324 
   1325 // Regenerate the tasks.json file.
   1326 func main() {
   1327 	b := specs.MustNewTasksCfgBuilder()
   1328 	b.SetAssetsDir(*assetsDir)
   1329 	infraBots := path.Join(b.CheckoutRoot(), "infra", "bots")
   1330 
   1331 	// Load the jobs from a JSON file.
   1332 	loadJson(jobsFile, path.Join(infraBots, "jobs.json"), &JOBS)
   1333 
   1334 	// Load general config information from a JSON file.
   1335 	loadJson(cfgFile, path.Join(infraBots, "cfg.json"), &CONFIG)
   1336 
   1337 	// Create the JobNameSchema.
   1338 	if *builderNameSchemaFile == "" {
   1339 		*builderNameSchemaFile = path.Join(b.CheckoutRoot(), "infra", "bots", "recipe_modules", "builder_name_schema", "builder_name_schema.json")
   1340 	}
   1341 	schema, err := NewJobNameSchema(*builderNameSchemaFile)
   1342 	if err != nil {
   1343 		glog.Fatal(err)
   1344 	}
   1345 	jobNameSchema = schema
   1346 
   1347 	// Create Tasks and Jobs.
   1348 	for _, name := range JOBS {
   1349 		process(b, name)
   1350 	}
   1351 
   1352 	b.MustFinish()
   1353 }
   1354 
   1355 // TODO(borenet): The below really belongs in its own file, probably next to the
   1356 // builder_name_schema.json file.
   1357 
   1358 // JobNameSchema is a struct used for (de)constructing Job names in a
   1359 // predictable format.
   1360 type JobNameSchema struct {
   1361 	Schema map[string][]string `json:"builder_name_schema"`
   1362 	Sep    string              `json:"builder_name_sep"`
   1363 }
   1364 
   1365 // NewJobNameSchema returns a JobNameSchema instance based on the given JSON
   1366 // file.
   1367 func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) {
   1368 	var rv JobNameSchema
   1369 	f, err := os.Open(jsonFile)
   1370 	if err != nil {
   1371 		return nil, err
   1372 	}
   1373 	defer util.Close(f)
   1374 	if err := json.NewDecoder(f).Decode(&rv); err != nil {
   1375 		return nil, err
   1376 	}
   1377 	return &rv, nil
   1378 }
   1379 
   1380 // ParseJobName splits the given Job name into its component parts, according
   1381 // to the schema.
   1382 func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) {
   1383 	split := strings.Split(n, s.Sep)
   1384 	if len(split) < 2 {
   1385 		return nil, fmt.Errorf("Invalid job name: %q", n)
   1386 	}
   1387 	role := split[0]
   1388 	split = split[1:]
   1389 	keys, ok := s.Schema[role]
   1390 	if !ok {
   1391 		return nil, fmt.Errorf("Invalid job name; %q is not a valid role.", role)
   1392 	}
   1393 	extraConfig := ""
   1394 	if len(split) == len(keys)+1 {
   1395 		extraConfig = split[len(split)-1]
   1396 		split = split[:len(split)-1]
   1397 	}
   1398 	if len(split) != len(keys) {
   1399 		return nil, fmt.Errorf("Invalid job name; %q has incorrect number of parts.", n)
   1400 	}
   1401 	rv := make(map[string]string, len(keys)+2)
   1402 	rv["role"] = role
   1403 	if extraConfig != "" {
   1404 		rv["extra_config"] = extraConfig
   1405 	}
   1406 	for i, k := range keys {
   1407 		rv[k] = split[i]
   1408 	}
   1409 	return rv, nil
   1410 }
   1411 
   1412 // MakeJobName assembles the given parts of a Job name, according to the schema.
   1413 func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) {
   1414 	role, ok := parts["role"]
   1415 	if !ok {
   1416 		return "", fmt.Errorf("Invalid job parts; jobs must have a role.")
   1417 	}
   1418 	keys, ok := s.Schema[role]
   1419 	if !ok {
   1420 		return "", fmt.Errorf("Invalid job parts; unknown role %q", role)
   1421 	}
   1422 	rvParts := make([]string, 0, len(parts))
   1423 	rvParts = append(rvParts, role)
   1424 	for _, k := range keys {
   1425 		v, ok := parts[k]
   1426 		if !ok {
   1427 			return "", fmt.Errorf("Invalid job parts; missing %q", k)
   1428 		}
   1429 		rvParts = append(rvParts, v)
   1430 	}
   1431 	if _, ok := parts["extra_config"]; ok {
   1432 		rvParts = append(rvParts, parts["extra_config"])
   1433 	}
   1434 	return strings.Join(rvParts, s.Sep), nil
   1435 }
   1436