• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5package main
6
7/*
8	Generate the tasks.json file.
9*/
10
11import (
12	"encoding/json"
13	"flag"
14	"fmt"
15	"io/ioutil"
16	"os"
17	"path"
18	"regexp"
19	"sort"
20	"strings"
21	"time"
22
23	"github.com/skia-dev/glog"
24	"go.skia.org/infra/go/util"
25	"go.skia.org/infra/task_scheduler/go/specs"
26)
27
28const (
29	DEFAULT_OS       = DEFAULT_OS_LINUX
30	DEFAULT_OS_LINUX = "Ubuntu-14.04"
31
32	// Name prefix for upload jobs.
33	PREFIX_UPLOAD = "Upload"
34)
35
36var (
37	// "Constants"
38
39	// Top-level list of all jobs to run at each commit; loaded from
40	// jobs.json.
41	JOBS []string
42
43	// Mapping of human-friendly Android device names to a pair of {device_type, device_os}.
44	ANDROID_MAPPING map[string][]string
45
46	// General configuration information.
47	CONFIG struct {
48		GsBucketGm   string   `json:"gs_bucket_gm"`
49		GsBucketNano string   `json:"gs_bucket_nano"`
50		NoUpload     []string `json:"no_upload"`
51		Pool         string   `json:"pool"`
52	}
53
54	// Mapping of human-friendly GPU names to PCI IDs.
55	GPU_MAPPING map[string]string
56
57	// Defines the structure of job names.
58	jobNameSchema *JobNameSchema
59
60	// Flags.
61	androidMapFile        = flag.String("android_map", "", "JSON file containing a mapping of human-friendly Android device names to a pair of {device_type, device_os}.")
62	builderNameSchemaFile = flag.String("builder_name_schema", "", "Path to the builder_name_schema.json file. If not specified, uses infra/bots/recipe_modules/builder_name_schema/builder_name_schema.json from this repo.")
63	assetsDir             = flag.String("assets_dir", "", "Directory containing assets.")
64	cfgFile               = flag.String("cfg_file", "", "JSON file containing general configuration information.")
65	gpuMapFile            = flag.String("gpu_map", "", "JSON file containing a mapping of human-friendly GPU names to PCI IDs.")
66	jobsFile              = flag.String("jobs", "", "JSON file containing jobs to run.")
67)
68
69// linuxGceDimensions are the Swarming dimensions for Linux GCE
70// instances.
71func linuxGceDimensions() []string {
72	return []string{
73		"cpu:x86-64-avx2",
74		"gpu:none",
75		fmt.Sprintf("os:%s", DEFAULT_OS_LINUX),
76		fmt.Sprintf("pool:%s", CONFIG.Pool),
77	}
78}
79
80// deriveCompileTaskName returns the name of a compile task based on the given
81// job name.
82func deriveCompileTaskName(jobName string, parts map[string]string) string {
83	if parts["role"] == "Housekeeper" {
84		return "Build-Ubuntu-GCC-x86_64-Release-Shared"
85	} else if parts["role"] == "Test" || parts["role"] == "Perf" {
86		task_os := parts["os"]
87		ec := parts["extra_config"]
88		ec = strings.TrimSuffix(ec, "_Skpbench")
89		ec = strings.TrimSuffix(ec, "_AbandonGpuContext")
90		ec = strings.TrimSuffix(ec, "_PreAbandonGpuContext")
91		if ec == "Valgrind" {
92			// skia:6267
93			ec = ""
94		}
95		if task_os == "Android" {
96			if ec == "Vulkan" {
97				ec = "Android_Vulkan"
98			}
99			task_os = "Ubuntu"
100		} else if task_os == "Chromecast" {
101			task_os = "Ubuntu"
102			ec = "Chromecast"
103		} else if task_os == "iOS" {
104			ec = task_os
105			task_os = "Mac"
106		} else if strings.Contains(task_os, "Win") {
107			task_os = "Win"
108		} else if strings.Contains(task_os, "Ubuntu") {
109			task_os = "Ubuntu"
110		}
111		jobNameMap := map[string]string{
112			"role":          "Build",
113			"os":            task_os,
114			"compiler":      parts["compiler"],
115			"target_arch":   parts["arch"],
116			"configuration": parts["configuration"],
117		}
118		if ec != "" {
119			jobNameMap["extra_config"] = ec
120		}
121		name, err := jobNameSchema.MakeJobName(jobNameMap)
122		if err != nil {
123			glog.Fatal(err)
124		}
125		return name
126	} else {
127		return jobName
128	}
129}
130
131// swarmDimensions generates swarming bot dimensions for the given task.
132func swarmDimensions(parts map[string]string) []string {
133	d := map[string]string{
134		"pool": CONFIG.Pool,
135	}
136	if os, ok := parts["os"]; ok {
137		d["os"] = map[string]string{
138			"Android":    "Android",
139			"Chromecast": "Android",
140			"Mac":        "Mac-10.11",
141			"Ubuntu":     DEFAULT_OS_LINUX,
142			"Ubuntu16":   "Ubuntu-16.10",
143			"Win":        "Windows-2008ServerR2-SP1",
144			"Win10":      "Windows-10-14393",
145			"Win2k8":     "Windows-2008ServerR2-SP1",
146			"Win8":       "Windows-8.1-SP0",
147			"iOS":        "iOS-9.3.1",
148		}[os]
149		// Chrome Golo has a different Windows image.
150		if parts["model"] == "Golo" && os == "Win10" {
151			d["os"] = "Windows-10-10586"
152		}
153	} else {
154		d["os"] = DEFAULT_OS
155	}
156	if parts["role"] == "Test" || parts["role"] == "Perf" {
157		if strings.Contains(parts["os"], "Android") || strings.Contains(parts["os"], "Chromecast") {
158			// For Android, the device type is a better dimension
159			// than CPU or GPU.
160			deviceInfo, ok := ANDROID_MAPPING[parts["model"]]
161			if !ok {
162				glog.Fatalf("Entry %q not found in Android mapping: %v", parts["model"], ANDROID_MAPPING)
163			}
164			d["device_type"] = deviceInfo[0]
165			d["device_os"] = deviceInfo[1]
166		} else if strings.Contains(parts["os"], "iOS") {
167			d["device"] = map[string]string{
168				"iPadMini4": "iPad5,1",
169			}[parts["model"]]
170		} else if parts["cpu_or_gpu"] == "CPU" {
171			d["gpu"] = "none"
172			d["cpu"] = map[string]string{
173				"AVX":  "x86-64",
174				"AVX2": "x86-64-avx2",
175				"SSE4": "x86-64",
176			}[parts["cpu_or_gpu_value"]]
177			if strings.Contains(parts["os"], "Win") && parts["cpu_or_gpu_value"] == "AVX2" {
178				// AVX2 is not correctly detected on Windows. Fall back on other
179				// dimensions to ensure that we correctly target machines which we know
180				// have AVX2 support.
181				d["cpu"] = "x86-64"
182				d["os"] = "Windows-2008ServerR2-SP1"
183			}
184		} else {
185			gpu, ok := GPU_MAPPING[parts["cpu_or_gpu_value"]]
186			if !ok {
187				glog.Fatalf("Entry %q not found in GPU mapping: %v", parts["cpu_or_gpu_value"], GPU_MAPPING)
188			}
189			d["gpu"] = gpu
190
191			// Hack: Specify machine_type dimension for NUCs and ShuttleCs. We
192			// temporarily have two types of machines with a GTX960. The only way to
193			// distinguish these bots is by machine_type.
194			machine_type, ok := map[string]string{
195				"NUC6i7KYK": "n1-highcpu-8",
196				"ShuttleC":  "n1-standard-8",
197			}[parts["model"]]
198			if ok {
199				d["machine_type"] = machine_type
200			}
201		}
202	} else {
203		d["gpu"] = "none"
204		if d["os"] == DEFAULT_OS_LINUX {
205			return linuxGceDimensions()
206		}
207	}
208
209	rv := make([]string, 0, len(d))
210	for k, v := range d {
211		rv = append(rv, fmt.Sprintf("%s:%s", k, v))
212	}
213	sort.Strings(rv)
214	return rv
215}
216
217// compile generates a compile task. Returns the name of the last task in the
218// generated chain of tasks, which the Job should add as a dependency.
219func compile(b *specs.TasksCfgBuilder, name string, parts map[string]string) string {
220	// Collect the necessary CIPD packages.
221	pkgs := []*specs.CipdPackage{}
222
223	// Android bots require a toolchain.
224	if strings.Contains(name, "Android") {
225		if strings.Contains(name, "Mac") {
226			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("android_ndk_darwin"))
227		} else if strings.Contains(name, "Win") {
228			pkg := b.MustGetCipdPackageFromAsset("android_ndk_windows")
229			pkg.Path = "n"
230			pkgs = append(pkgs, pkg)
231		} else {
232			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("android_ndk_linux"))
233		}
234	} else if strings.Contains(name, "Chromecast") {
235		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("cast_toolchain"))
236	} else if strings.Contains(name, "Ubuntu") {
237		if strings.Contains(name, "Clang") {
238			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux"))
239		}
240		if strings.Contains(name, "Vulkan") {
241			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_sdk"))
242		}
243	} else if strings.Contains(name, "Win") {
244		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("win_toolchain"))
245		if strings.Contains(name, "Vulkan") {
246			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("win_vulkan_sdk"))
247		}
248	}
249
250	// TODO(stephana): Remove this once all Mac machines are on the same
251	// OS version again. Move the call to swarmDimensions back to the
252	// creation of the TaskSpec struct below.
253	dimensions := swarmDimensions(parts)
254	if strings.Contains(name, "Mac") {
255		for idx, dim := range dimensions {
256			if strings.HasPrefix(dim, "os") {
257				dimensions[idx] = "os:Mac-10.12"
258				break
259			}
260		}
261	}
262
263	// Add the task.
264	b.MustAddTask(name, &specs.TaskSpec{
265		CipdPackages: pkgs,
266		Dimensions:   dimensions,
267		ExtraArgs: []string{
268			"--workdir", "../../..", "swarm_compile",
269			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
270			fmt.Sprintf("buildername=%s", name),
271			"mastername=fake-master",
272			"buildnumber=2",
273			"slavename=fake-buildslave",
274			"nobuildbot=True",
275			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
276			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
277			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
278			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
279			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
280		},
281		Isolate:  "compile_skia.isolate",
282		Priority: 0.8,
283	})
284	// All compile tasks are runnable as their own Job. Assert that the Job
285	// is listed in JOBS.
286	if !util.In(name, JOBS) {
287		glog.Fatalf("Job %q is missing from the JOBS list!", name)
288	}
289	return name
290}
291
292// recreateSKPs generates a RecreateSKPs task. Returns the name of the last
293// task in the generated chain of tasks, which the Job should add as a
294// dependency.
295func recreateSKPs(b *specs.TasksCfgBuilder, name string) string {
296	b.MustAddTask(name, &specs.TaskSpec{
297		CipdPackages:     []*specs.CipdPackage{},
298		Dimensions:       linuxGceDimensions(),
299		ExecutionTimeout: 4 * time.Hour,
300		ExtraArgs: []string{
301			"--workdir", "../../..", "swarm_RecreateSKPs",
302			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
303			fmt.Sprintf("buildername=%s", name),
304			"mastername=fake-master",
305			"buildnumber=2",
306			"slavename=fake-buildslave",
307			"nobuildbot=True",
308			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
309			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
310			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
311			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
312			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
313		},
314		IoTimeout: 40 * time.Minute,
315		Isolate:   "compile_skia.isolate",
316		Priority:  0.8,
317	})
318	return name
319}
320
321// ctSKPs generates a CT SKPs task. Returns the name of the last task in the
322// generated chain of tasks, which the Job should add as a dependency.
323func ctSKPs(b *specs.TasksCfgBuilder, name string) string {
324	b.MustAddTask(name, &specs.TaskSpec{
325		CipdPackages:     []*specs.CipdPackage{},
326		Dimensions:       []string{"pool:SkiaCT"},
327		ExecutionTimeout: 24 * time.Hour,
328		ExtraArgs: []string{
329			"--workdir", "../../..", "swarm_ct_skps",
330			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
331			fmt.Sprintf("buildername=%s", name),
332			"mastername=fake-master",
333			"buildnumber=2",
334			"slavename=fake-buildslave",
335			"nobuildbot=True",
336			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
337			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
338			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
339			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
340			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
341		},
342		IoTimeout: time.Hour,
343		Isolate:   "ct_skps_skia.isolate",
344		Priority:  0.8,
345	})
346	return name
347}
348
349// housekeeper generates a Housekeeper task. Returns the name of the last task
350// in the generated chain of tasks, which the Job should add as a dependency.
351func housekeeper(b *specs.TasksCfgBuilder, name, compileTaskName string) string {
352	b.MustAddTask(name, &specs.TaskSpec{
353		CipdPackages: []*specs.CipdPackage{b.MustGetCipdPackageFromAsset("go")},
354		Dependencies: []string{compileTaskName},
355		Dimensions:   linuxGceDimensions(),
356		ExtraArgs: []string{
357			"--workdir", "../../..", "swarm_housekeeper",
358			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
359			fmt.Sprintf("buildername=%s", name),
360			"mastername=fake-master",
361			"buildnumber=2",
362			"slavename=fake-buildslave",
363			"nobuildbot=True",
364			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
365			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
366			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
367			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
368			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
369		},
370		Isolate:  "housekeeper_skia.isolate",
371		Priority: 0.8,
372	})
373	return name
374}
375
376// infra generates an infra_tests task. Returns the name of the last task in the
377// generated chain of tasks, which the Job should add as a dependency.
378func infra(b *specs.TasksCfgBuilder, name string) string {
379	b.MustAddTask(name, &specs.TaskSpec{
380		CipdPackages: []*specs.CipdPackage{},
381		Dimensions:   linuxGceDimensions(),
382		ExtraArgs: []string{
383			"--workdir", "../../..", "swarm_infra",
384			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
385			fmt.Sprintf("buildername=%s", name),
386			"mastername=fake-master",
387			"buildnumber=2",
388			"slavename=fake-buildslave",
389			"nobuildbot=True",
390			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
391			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
392			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
393			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
394			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
395		},
396		Isolate:  "infra_skia.isolate",
397		Priority: 0.8,
398	})
399	return name
400}
401
402// doUpload indicates whether the given Job should upload its results.
403func doUpload(name string) bool {
404	for _, s := range CONFIG.NoUpload {
405		m, err := regexp.MatchString(s, name)
406		if err != nil {
407			glog.Fatal(err)
408		}
409		if m {
410			return false
411		}
412	}
413	return true
414}
415
416// test generates a Test task. Returns the name of the last task in the
417// generated chain of tasks, which the Job should add as a dependency.
418func test(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
419	s := &specs.TaskSpec{
420		CipdPackages:     pkgs,
421		Dependencies:     []string{compileTaskName},
422		Dimensions:       swarmDimensions(parts),
423		ExecutionTimeout: 4 * time.Hour,
424		Expiration:       20 * time.Hour,
425		ExtraArgs: []string{
426			"--workdir", "../../..", "swarm_test",
427			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
428			fmt.Sprintf("buildername=%s", name),
429			"mastername=fake-master",
430			"buildnumber=2",
431			"slavename=fake-buildslave",
432			"nobuildbot=True",
433			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
434			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
435			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
436			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
437			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
438		},
439		IoTimeout:   40 * time.Minute,
440		Isolate:     "test_skia.isolate",
441		MaxAttempts: 1,
442		Priority:    0.8,
443	}
444	if strings.Contains(parts["extra_config"], "Valgrind") {
445		s.ExecutionTimeout = 9 * time.Hour
446		s.Expiration = 48 * time.Hour
447		s.IoTimeout = time.Hour
448	} else if strings.Contains(parts["extra_config"], "MSAN") {
449		s.ExecutionTimeout = 9 * time.Hour
450	}
451	b.MustAddTask(name, s)
452
453	// Upload results if necessary.
454	if doUpload(name) {
455		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
456		b.MustAddTask(uploadName, &specs.TaskSpec{
457			Dependencies: []string{name},
458			Dimensions:   linuxGceDimensions(),
459			ExtraArgs: []string{
460				"--workdir", "../../..", "upload_dm_results",
461				fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
462				fmt.Sprintf("buildername=%s", name),
463				"mastername=fake-master",
464				"buildnumber=2",
465				"slavename=fake-buildslave",
466				"nobuildbot=True",
467				fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
468				fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
469				fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
470				fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
471				fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
472				fmt.Sprintf("gs_bucket=%s", CONFIG.GsBucketGm),
473			},
474			Isolate:  "upload_dm_results.isolate",
475			Priority: 0.8,
476		})
477		return uploadName
478	}
479	return name
480}
481
482// perf generates a Perf task. Returns the name of the last task in the
483// generated chain of tasks, which the Job should add as a dependency.
484func perf(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
485	recipe := "swarm_perf"
486	isolate := "perf_skia.isolate"
487	if strings.Contains(parts["extra_config"], "Skpbench") {
488		recipe = "swarm_skpbench"
489		isolate = "skpbench_skia.isolate"
490	}
491	s := &specs.TaskSpec{
492		CipdPackages:     pkgs,
493		Dependencies:     []string{compileTaskName},
494		Dimensions:       swarmDimensions(parts),
495		ExecutionTimeout: 4 * time.Hour,
496		Expiration:       20 * time.Hour,
497		ExtraArgs: []string{
498			"--workdir", "../../..", recipe,
499			fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
500			fmt.Sprintf("buildername=%s", name),
501			"mastername=fake-master",
502			"buildnumber=2",
503			"slavename=fake-buildslave",
504			"nobuildbot=True",
505			fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
506			fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
507			fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
508			fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
509			fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
510		},
511		IoTimeout:   40 * time.Minute,
512		Isolate:     isolate,
513		MaxAttempts: 1,
514		Priority:    0.8,
515	}
516	if strings.Contains(parts["extra_config"], "Valgrind") {
517		s.ExecutionTimeout = 9 * time.Hour
518		s.Expiration = 48 * time.Hour
519		s.IoTimeout = time.Hour
520	} else if strings.Contains(parts["extra_config"], "MSAN") {
521		s.ExecutionTimeout = 9 * time.Hour
522	}
523	b.MustAddTask(name, s)
524
525	// Upload results if necessary.
526	if strings.Contains(name, "Release") && doUpload(name) {
527		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
528		b.MustAddTask(uploadName, &specs.TaskSpec{
529			Dependencies: []string{name},
530			Dimensions:   linuxGceDimensions(),
531			ExtraArgs: []string{
532				"--workdir", "../../..", "upload_nano_results",
533				fmt.Sprintf("repository=%s", specs.PLACEHOLDER_REPO),
534				fmt.Sprintf("buildername=%s", name),
535				"mastername=fake-master",
536				"buildnumber=2",
537				"slavename=fake-buildslave",
538				"nobuildbot=True",
539				fmt.Sprintf("swarm_out_dir=%s", specs.PLACEHOLDER_ISOLATED_OUTDIR),
540				fmt.Sprintf("revision=%s", specs.PLACEHOLDER_REVISION),
541				fmt.Sprintf("patch_storage=%s", specs.PLACEHOLDER_PATCH_STORAGE),
542				fmt.Sprintf("patch_issue=%s", specs.PLACEHOLDER_ISSUE),
543				fmt.Sprintf("patch_set=%s", specs.PLACEHOLDER_PATCHSET),
544				fmt.Sprintf("gs_bucket=%s", CONFIG.GsBucketNano),
545			},
546			Isolate:  "upload_nano_results.isolate",
547			Priority: 0.8,
548		})
549		return uploadName
550	}
551	return name
552}
553
554// process generates tasks and jobs for the given job name.
555func process(b *specs.TasksCfgBuilder, name string) {
556	deps := []string{}
557
558	parts, err := jobNameSchema.ParseJobName(name)
559	if err != nil {
560		glog.Fatal(err)
561	}
562
563	// RecreateSKPs.
564	if strings.Contains(name, "RecreateSKPs") {
565		deps = append(deps, recreateSKPs(b, name))
566	}
567
568	// CT bots.
569	if strings.Contains(name, "-CT_") {
570		deps = append(deps, ctSKPs(b, name))
571	}
572
573	// Infra tests.
574	if name == "Housekeeper-PerCommit-InfraTests" {
575		deps = append(deps, infra(b, name))
576	}
577
578	// Compile bots.
579	if parts["role"] == "Build" {
580		deps = append(deps, compile(b, name, parts))
581	}
582
583	// Most remaining bots need a compile task.
584	compileTaskName := deriveCompileTaskName(name, parts)
585	compileTaskParts, err := jobNameSchema.ParseJobName(compileTaskName)
586	if err != nil {
587		glog.Fatal(err)
588	}
589	// These bots do not need a compile task.
590	if parts["role"] != "Build" &&
591		name != "Housekeeper-PerCommit-InfraTests" &&
592		!strings.Contains(name, "RecreateSKPs") &&
593		!strings.Contains(name, "-CT_") {
594		compile(b, compileTaskName, compileTaskParts)
595	}
596
597	// Housekeeper.
598	if name == "Housekeeper-PerCommit" {
599		deps = append(deps, housekeeper(b, name, compileTaskName))
600	}
601
602	// Common assets needed by the remaining bots.
603	pkgs := []*specs.CipdPackage{
604		b.MustGetCipdPackageFromAsset("skimage"),
605		b.MustGetCipdPackageFromAsset("skp"),
606		b.MustGetCipdPackageFromAsset("svg"),
607	}
608	if strings.Contains(name, "Chromecast") {
609		// Chromecasts don't have enough disk space to fit all of the content,
610		// so we do a subset of the skps.
611		pkgs = []*specs.CipdPackage{
612			b.MustGetCipdPackageFromAsset("skp"),
613		}
614	}
615	if strings.Contains(name, "Ubuntu") && strings.Contains(name, "SAN") {
616		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux"))
617	}
618	if strings.Contains(name, "Ubuntu16") {
619		if strings.Contains(name, "Vulkan") {
620			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_sdk"))
621		}
622		if strings.Contains(name, "Release") {
623			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_intel_driver_release"))
624		} else {
625			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_intel_driver_debug"))
626		}
627	}
628	// Skpbench only needs skps
629	if strings.Contains(name, "Skpbench") {
630		pkgs = []*specs.CipdPackage{
631			b.MustGetCipdPackageFromAsset("skp"),
632		}
633	}
634
635	// Test bots.
636	if parts["role"] == "Test" && !strings.Contains(name, "-CT_") {
637		deps = append(deps, test(b, name, parts, compileTaskName, pkgs))
638	}
639
640	// Perf bots.
641	if parts["role"] == "Perf" && !strings.Contains(name, "-CT_") {
642		deps = append(deps, perf(b, name, parts, compileTaskName, pkgs))
643	}
644
645	// Add the Job spec.
646	j := &specs.JobSpec{
647		Priority:  0.8,
648		TaskSpecs: deps,
649	}
650	if name == "Housekeeper-Nightly-RecreateSKPs_Canary" {
651		j.Trigger = "nightly"
652	}
653	if name == "Housekeeper-Weekly-RecreateSKPs" {
654		j.Trigger = "weekly"
655	}
656	if name == "Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug-CT_DM_1m_SKPs" {
657		j.Trigger = "weekly"
658	}
659	b.MustAddJob(name, j)
660}
661
662func loadJson(flag *string, defaultFlag string, val interface{}) {
663	if *flag == "" {
664		*flag = defaultFlag
665	}
666	b, err := ioutil.ReadFile(*flag)
667	if err != nil {
668		glog.Fatal(err)
669	}
670	if err := json.Unmarshal(b, val); err != nil {
671		glog.Fatal(err)
672	}
673}
674
675// Regenerate the tasks.json file.
676func main() {
677	b := specs.MustNewTasksCfgBuilder()
678	b.SetAssetsDir(*assetsDir)
679	infraBots := path.Join(b.CheckoutRoot(), "infra", "bots")
680
681	// Load the jobs from a JSON file.
682	loadJson(jobsFile, path.Join(infraBots, "jobs.json"), &JOBS)
683
684	// Load the GPU mapping from a JSON file.
685	loadJson(gpuMapFile, path.Join(infraBots, "gpu_map.json"), &GPU_MAPPING)
686
687	// Load the Android device mapping from a JSON file.
688	loadJson(androidMapFile, path.Join(infraBots, "android_map.json"), &ANDROID_MAPPING)
689
690	// Load general config information from a JSON file.
691	loadJson(cfgFile, path.Join(infraBots, "cfg.json"), &CONFIG)
692
693	// Create the JobNameSchema.
694	if *builderNameSchemaFile == "" {
695		*builderNameSchemaFile = path.Join(b.CheckoutRoot(), "infra", "bots", "recipe_modules", "builder_name_schema", "builder_name_schema.json")
696	}
697	schema, err := NewJobNameSchema(*builderNameSchemaFile)
698	if err != nil {
699		glog.Fatal(err)
700	}
701	jobNameSchema = schema
702
703	// Create Tasks and Jobs.
704	for _, name := range JOBS {
705		process(b, name)
706	}
707
708	b.MustFinish()
709}
710
711// TODO(borenet): The below really belongs in its own file, probably next to the
712// builder_name_schema.json file.
713
714// JobNameSchema is a struct used for (de)constructing Job names in a
715// predictable format.
716type JobNameSchema struct {
717	Schema map[string][]string `json:"builder_name_schema"`
718	Sep    string              `json:"builder_name_sep"`
719}
720
721// NewJobNameSchema returns a JobNameSchema instance based on the given JSON
722// file.
723func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) {
724	var rv JobNameSchema
725	f, err := os.Open(jsonFile)
726	if err != nil {
727		return nil, err
728	}
729	defer util.Close(f)
730	if err := json.NewDecoder(f).Decode(&rv); err != nil {
731		return nil, err
732	}
733	return &rv, nil
734}
735
736// ParseJobName splits the given Job name into its component parts, according
737// to the schema.
738func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) {
739	split := strings.Split(n, s.Sep)
740	if len(split) < 2 {
741		return nil, fmt.Errorf("Invalid job name: %q", n)
742	}
743	role := split[0]
744	split = split[1:]
745	keys, ok := s.Schema[role]
746	if !ok {
747		return nil, fmt.Errorf("Invalid job name; %q is not a valid role.", role)
748	}
749	extraConfig := ""
750	if len(split) == len(keys)+1 {
751		extraConfig = split[len(split)-1]
752		split = split[:len(split)-1]
753	}
754	if len(split) != len(keys) {
755		return nil, fmt.Errorf("Invalid job name; %q has incorrect number of parts.", n)
756	}
757	rv := make(map[string]string, len(keys)+2)
758	rv["role"] = role
759	if extraConfig != "" {
760		rv["extra_config"] = extraConfig
761	}
762	for i, k := range keys {
763		rv[k] = split[i]
764	}
765	return rv, nil
766}
767
768// MakeJobName assembles the given parts of a Job name, according to the schema.
769func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) {
770	role, ok := parts["role"]
771	if !ok {
772		return "", fmt.Errorf("Invalid job parts; jobs must have a role.")
773	}
774	keys, ok := s.Schema[role]
775	if !ok {
776		return "", fmt.Errorf("Invalid job parts; unknown role %q", role)
777	}
778	rvParts := make([]string, 0, len(parts))
779	rvParts = append(rvParts, role)
780	for _, k := range keys {
781		v, ok := parts[k]
782		if !ok {
783			return "", fmt.Errorf("Invalid job parts; missing %q", k)
784		}
785		rvParts = append(rvParts, v)
786	}
787	if _, ok := parts["extra_config"]; ok {
788		rvParts = append(rvParts, parts["extra_config"])
789	}
790	return strings.Join(rvParts, s.Sep), nil
791}
792