Home
last modified time | relevance | path

Searched refs:jobs (Results 1 – 25 of 213) sorted by relevance

123456789

/external/eclipse-basebuilder/basebuilder-3.6.2/org.eclipse.releng.basebuilder/plugins/
Dorg.eclipse.core.jobs_3.5.1.R36x_v20100824.jar ... .jobs org.eclipse.core.internal.jobs.Deadlock extends java.lang.Object { private
/external/v8/tools/testrunner/objects/
Dpeer.py30 def __init__(self, address, jobs, rel_perf, pubkey): argument
32 self.jobs = jobs # integer: number of CPUs
44 (self.address, self.jobs, self.relative_performance,
74 return [self.address, self.jobs, self.relative_performance]
/external/glide/library/src/main/java/com/bumptech/glide/load/engine/
DEngine.java30 private final Map<Key, EngineJob> jobs; field in Engine
63 Map<Key, EngineJob> jobs, EngineKeyFactory keyFactory, in Engine() argument
79 if (jobs == null) { in Engine()
80 jobs = new HashMap<Key, EngineJob>(); in Engine()
82 this.jobs = jobs; in Engine()
177 EngineJob current = jobs.get(key); in load()
190 jobs.put(key, engineJob); in load()
237 jobs.remove(key); in onEngineJobComplete()
242 EngineJob current = jobs.get(key); in onEngineJobCancelled()
244 jobs.remove(key); in onEngineJobCancelled()
/external/v8/tools/testrunner/server/
Dpresence_handler.py60 jobs = data[1]
64 response = [STARTUP_RESPONSE, self.server.daemon.jobs,
69 p = peer.Peer(self.client_address[0], jobs, relative_perf,
75 jobs = data[1]
78 p = peer.Peer(self.client_address[0], jobs, perf, pubkey_fingerprint)
117 request = [STARTUP_REQUEST, self.daemon.jobs, self.daemon.relative_perf,
/external/llvm/utils/lit/lit/
Drun.py188 def execute_tests(self, display, jobs, max_time=None, argument
213 if jobs != 1 and use_processes and multiprocessing:
218 consumer = MultiprocessResultsConsumer(self, display, jobs)
232 provider = TestProvider(self.tests, jobs, queue_impl, canceled_flag)
249 if jobs == 1:
253 self._execute_tests_in_parallel(task_impl, provider, consumer, jobs)
264 def _execute_tests_in_parallel(self, task_impl, provider, consumer, jobs): argument
268 for i in range(jobs)]
/external/llvm/utils/
Dllvm-compilers-check255 def __init__(self, work_queue, jobs, argument
260 self.jobs = jobs
405 llvm=dict(debug=["-j" + str(self.jobs)],
406 release=["-j" + str(self.jobs)],
407 paranoid=["-j" + str(self.jobs)]),
408 dragonegg=dict(debug=["-j" + str(self.jobs)],
409 release=["-j" + str(self.jobs)],
410 paranoid=["-j" + str(self.jobs)]))
597 jobs = options.jobs // options.threads variable
598 if jobs == 0:
[all …]
/external/fonttools/Lib/fontTools/
Dttx.py264 jobs = []
288 jobs.append((action, input, output))
289 return jobs, options
292 def process(jobs, options): argument
293 for action, input, output in jobs:
307 jobs, options = parseOptions(args)
309 process(jobs, options)
/external/llvm/lib/Fuzzer/
DFuzzerFlags.def42 FUZZER_FLAG_INT(jobs, 0, "Number of jobs to run. If jobs >= 1 we spawn"
43 " this number of jobs in separate worker processes"
46 "Number of simultaneous worker processes to run the jobs.")
DFuzzerDriver.cpp194 if (Flags.workers > 0 && Flags.jobs > 0) in FuzzerDriver()
195 return RunInMultipleProcesses(argc, argv, Flags.workers, Flags.jobs); in FuzzerDriver()
/external/smali/smali/src/main/java/org/jf/smali/
Dmain.java109 int jobs = -1; in main() local
149 jobs = Integer.parseInt(commandLine.getOptionValue("j")); in main()
184 if (jobs <= 0) { in main()
185 jobs = Runtime.getRuntime().availableProcessors(); in main()
186 if (jobs > 6) { in main()
187 jobs = 6; in main()
194 ExecutorService executor = Executors.newFixedThreadPool(jobs); in main()
/external/vixl/tools/
Dlint.py166 def LintFiles(files, lint_args = CPP_LINTER_RULES, jobs = 1, verbose = False, argument
169 pool = multiprocessing.Pool(jobs)
204 jobs = args.jobs, verbose = args.verbose)
Dpresubmit.py126 if args.jobs == 1:
129 (mode, std, args.simulator, args.jobs)
196 retcode = test.RunTests(manifest, jobs = args.jobs,
219 jobs = args.jobs, verbose = args.verbose,
Dtest.py184 def RunTests(manifest, jobs = 1, verbose = False, debugger = False, argument
209 pool = multiprocessing.Pool(jobs)
241 status = RunTests(manifest, jobs=args.jobs,
/external/smali/baksmali/src/main/java/org/jf/baksmali/
Dmain.java202 options.jobs = Integer.parseInt(commandLine.getOptionValue("j")); in main()
240 if (options.jobs <= 0) { in main()
241 options.jobs = Runtime.getRuntime().availableProcessors(); in main()
242 if (options.jobs > 6) { in main()
243 options.jobs = 6; in main()
DbaksmaliOptions.java81 public int jobs = -1; field in baksmaliOptions
/external/v8/tools/testrunner/local/
Dexecution.py199 def Run(self, jobs): argument
201 self._RunInternal(jobs)
207 def _RunInternal(self, jobs): argument
208 pool = Pool(jobs)
/external/v8/tools/testrunner/network/
Ddistro.py51 total_power += p.jobs * p.relative_performance
53 p.needed_work = total_work * p.jobs * p.relative_performance / total_power
Dnetwork_execution.py114 def Run(self, jobs): argument
166 self._RunInternal(jobs)
/external/deqp/android/scripts/
Dcommon.py147 jobs = []
151 jobs.append(job)
153 for job in jobs:
/external/libxml2/example/
Dgjobread.c175 jobPtr jobs[500]; /* using dynamic alloc is left as an exercise */ member
268 ret->jobs[ret->nbJobs++] = curjob; in parseGjobFile()
285 for (i = 0; i < cur->nbJobs; i++) printJob(cur->jobs[i]); in handleGjob()
/external/fio/examples/
Dgfapi.fio1 # Test opening a file from multiple jobs.
/external/mksh/
DMakefrag.inc5 SRCS= lalloc.c eval.c exec.c expr.c funcs.c histrap.c jobs.c lex.c main.c misc.c shf.c syn.c tree.…
6 …eval.c ../src/exec.c ../src/expr.c ../src/funcs.c ../src/histrap.c ../src/jobs.c ../src/lex.c ../s…
7 OBJS_BP= lalloc.o eval.o exec.o expr.o funcs.o histrap.o jobs.o lex.o main.o misc.o shf.o syn.o tr…
/external/okhttp/okhttp-tests/src/test/java/com/squareup/okhttp/internal/
DDiskLruCacheTest.java509 assertEquals(1, executor.jobs.size()); in shrinkMaxSizeEvicts()
660 while (executor.jobs.isEmpty()) { in rebuildJournalOnRepeatedReads()
667 while (executor.jobs.isEmpty()) { in rebuildJournalOnRepeatedEdits()
671 executor.jobs.removeFirst().run(); in rebuildJournalOnRepeatedEdits()
682 while (executor.jobs.isEmpty()) { in rebuildJournalOnRepeatedReadsWithOpenAndClose()
692 while (executor.jobs.isEmpty()) { in rebuildJournalOnRepeatedEditsWithOpenAndClose()
1324 final Deque<Runnable> jobs = new ArrayDeque<>(); field in DiskLruCacheTest.TestExecutor
1327 jobs.addLast(command); in execute()
/external/libxml2/result/HTML/
Dtest2.html.sax85 SAX.startElement(a, href='http://jobs.linuxtoday.com/')
86 SAX.characters(jobs, 4)
/external/llvm/docs/
DLibFuzzer.rst154 You may run ``N`` independent fuzzer jobs in parallel on ``M`` CPUs::
156 N=100; M=4; ./pcre_fuzzer ./CORPUS -jobs=$N -workers=$M
160 jobs will create a corpus with too many duplicates.
164 N=100; M=4; ./pcre_fuzzer ./CORPUS -jobs=$N -workers=$M -exit_on_first=1
214 # Run 20 independent fuzzer jobs.
215 ./a.out -jobs=20 -workers=20

123456789