/external/ltp/testcases/kernel/io/direct_io/ |
dma_thread_diotest.c | 134 static int workers; variable 231 workers = sysconf(_SC_NPROCESSORS_ONLN); 232 if (workers > MAX_WORKERS) 233 workers = MAX_WORKERS; 260 for (j = 0; j < workers; j++) { 270 for (j = 0; j < workers; j++) { 277 worker[workers - 1].length = 278 READSIZE - PAGE_SIZE * (workers - 1); 288 for (j = 0; j < workers; j++) { 298 for (j = 0; j < workers; j++) [all...] |
/external/autotest/client/site_tests/power_HotCPUSuspend/ |
power_HotCPUSuspend.py | 70 # create processs pool with enough workers to spin all CPUs 73 workers = max(16, cpus * 2) 74 pool = multiprocessing.Pool(workers) 78 logging.info('starting %d workers', workers) 79 results = [pool.apply_async(cpu_stress) for _ in xrange(workers)] 96 # check workers: if computation completed, something is wrong 103 # kill off the workers 104 logging.info('killing %d workers', workers) [all...] |
/external/boringssl/src/util/ |
generate-coverage.sh | 39 go run "$SRC/util/all_tests.go" -build-dir "$BUILD" -callgrind -num-workers 16 45 go test -shim-path "$BUILD/ssl/test/bssl_shim" -num-workers 1
|
/external/libvpx/libvpx/vp9/common/ |
vp9_thread_common.h | 53 int partial_frame, VPxWorker *workers,
|
vp9_thread_common.c | 154 VPxWorker *workers, int nworkers, 178 // workers on cores equal to the number of tile columns. Then if the decoder 181 // then the number of workers used by the loopfilter should be revisited. 183 VPxWorker *const worker = &workers[i]; 206 winterface->sync(&workers[i]); 213 int partial_frame, VPxWorker *workers, 230 workers, num_workers, lf_sync);
|
/external/fio/ |
workqueue.c | 30 sw = &wq->workers[start]; 73 struct submit_worker *sw = &wq->workers[i]; 237 if (!wq->workers) 241 sw = &wq->workers[i]; 252 sw = &wq->workers[i]; 263 sfree(wq->workers); 264 wq->workers = NULL; 273 struct submit_worker *sw = &wq->workers[index]; 325 wq->workers = smalloc(wq->max_workers * sizeof(struct submit_worker)); 326 if (!wq->workers) [all...] |
workqueue.h | 55 struct submit_worker *workers; member in struct:workqueue
|
/external/libvpx/libvpx/test/ |
vp9_thread_test.cc | 102 VPxWorker workers[kNumWorkers]; local 107 vpx_get_worker_interface()->init(&workers[n]); 109 workers[n].hook = ThreadHook; 110 workers[n].data1 = &hook_data[n]; 111 workers[n].data2 = &return_value[n]; 116 EXPECT_NE(vpx_get_worker_interface()->reset(&workers[n]), 0); 121 Run(&workers[n]); 125 vpx_get_worker_interface()->end(&workers[n]); 197 // Note any worker that requires synchronization between other workers will
|
/frameworks/native/libs/binder/tests/ |
binderThroughputTest.cpp | 196 // If client/server pairs, then half the workers are 203 vector<sp<IBinder> > workers; local 207 workers.push_back(serviceMgr->getService(generateServiceName(i))); 215 int target = cs_pair ? num % server_count : rand() % workers.size(); 223 status_t ret = workers[target]->transact(BINDER_NOP, data, &reply); 277 int workers, 283 // Create all the workers and wait for them to spawn. 284 for (int i = 0; i < workers; i++) { 285 pipes.push_back(make_worker(i, iterations, workers, payload_size, cs_pair)); 289 // Run the workers and wait for completion 334 int workers = 2; local [all...] |
schd-dbg.cpp | 37 vector<sp<IBinder> > workers; variable 292 status_t ret = workers[target]->transact(BINDER_NOP, data, &reply); 335 // If client/server pairs, then half the workers are 342 workers.push_back(serviceMgr->getService(generateServiceName(i))); 360 ASSERT(NO_ERROR == workers[target]->transact(BINDER_NOP, data, &reply));
|
/external/jemalloc/msvc/projects/vc2015/test_threads/ |
test_threads.cpp | 31 vector<thread> workers; local 41 workers.emplace_back([tid=i]() { 76 for (thread& t : workers) {
|
/system/libhwbinder/vts/performance/ |
Benchmark_throughput.cpp | 207 vector<sp<IBenchmark>> workers; local 218 workers.push_back(service); 235 Return<void> ret = workers[target]->sendVec(data_vec, [&](const auto &) {}); 308 // Num of workers. 309 int workers = 2; local 327 workers = atoi(argv[i + 1]); 344 services = workers; 358 // Create workers (test clients). 360 for (int i = 0; i < workers; i++) { 363 // Wait untill all workers are ready [all...] |
/libcore/ojluni/src/main/java/java/util/concurrent/ |
ThreadPoolExecutor.java | 352 * The workerCount is the number of workers that have been 358 * reported as the current size of the workers set. 466 * Lock held on access to workers set and related bookkeeping. 475 * ensuring workers set is stable while separately checking 484 private final HashSet<Worker> workers = new HashSet<>(); field in class:ThreadPoolExecutor 550 * Core pool size is the minimum number of workers to keep alive 759 for (Worker w : workers) 775 for (Worker w : workers) 791 * enabled but there are still other workers. In this case, at 795 * workers since shutdown began will also eventually exit [all...] |
/external/icu/android_icu4j/src/main/tests/android/icu/dev/test/util/ |
VersionInfoTest.java | 368 GetInstanceWorker[] workers = new GetInstanceWorker[numThreads]; local 371 // Create workers 372 for (int i = 0; i < workers.length; i++) { 373 workers[i] = new GetInstanceWorker(i, results[i]); 376 // Start workers 377 for (int i = 0; i < workers.length; i++) { 378 workers[i].start(); 382 for (int i = 0; i < workers.length; i++) { 384 workers[i].join();
|
/external/icu/icu4j/main/tests/core/src/com/ibm/icu/dev/test/util/ |
VersionInfoTest.java | 367 GetInstanceWorker[] workers = new GetInstanceWorker[numThreads]; local 370 // Create workers 371 for (int i = 0; i < workers.length; i++) { 372 workers[i] = new GetInstanceWorker(i, results[i]); 375 // Start workers 376 for (int i = 0; i < workers.length; i++) { 377 workers[i].start(); 381 for (int i = 0; i < workers.length; i++) { 383 workers[i].join();
|
/external/compiler-rt/lib/asan/tests/ |
asan_mac_test.cc | 176 pthread_t workers[kNumWorkers], forker; local 178 PTHREAD_CREATE(&workers[i], 0, MallocIntrospectionLockWorker, 0); 182 PTHREAD_JOIN(workers[i], 0);
|
/external/ltp/testcases/open_posix_testsuite/stress/threads/pthread_cond_timedwait/ |
stress2.c | 191 pthread_t workers[NCHILDREN * SCALABILITY_FACTOR + 2]; member in struct:celldata 337 /* create the workers */ 340 pthread_create(&(cd->workers[i]), &ta, worker, arg); 356 /* wait for the workers to be ready */ 387 ret = pthread_cancel(cd->workers[randval]); 404 ret = pthread_join(cd->workers[i], &w_ret);
|
/external/ltp/testcases/open_posix_testsuite/stress/threads/pthread_cond_wait/ |
stress.c | 191 pthread_t workers[NCHILDREN * SCALABILITY_FACTOR + 2]; member in struct:celldata 337 /* create the workers */ 340 pthread_create(&(cd->workers[i]), &ta, worker, arg); 356 /* wait for the workers to be ready */ 387 ret = pthread_cancel(cd->workers[randval]); 404 ret = pthread_join(cd->workers[i], &w_ret);
|
stress2.c | 191 pthread_t workers[NCHILDREN * SCALABILITY_FACTOR + 2]; member in struct:celldata 337 /* create the workers */ 340 pthread_create(&(cd->workers[i]), &ta, worker, arg); 356 /* wait for the workers to be ready */ 387 ret = pthread_cancel(cd->workers[randval]); 404 ret = pthread_join(cd->workers[i], &w_ret);
|
/external/llvm/lib/Fuzzer/ |
FuzzerDriver.cpp | 224 if (FlagValue(S.c_str(), "jobs") || FlagValue(S.c_str(), "workers")) 289 if (Flags.jobs > 0 && Flags.workers == 0) { 290 Flags.workers = std::min(NumberOfCpuCores() / 2, Flags.jobs); 291 if (Flags.workers > 1) 292 Printf("Running %d workers\n", Flags.workers); 295 if (Flags.workers > 0 && Flags.jobs > 0) 296 return RunInMultipleProcesses(Args, Flags.workers, Flags.jobs);
|
/external/libvpx/libvpx/vp9/encoder/ |
vp9_ethread.c | 87 CHECK_MEM_ERROR(cm, cpi->workers, 88 vpx_malloc(allocated_workers * sizeof(*cpi->workers))); 94 VPxWorker *const worker = &cpi->workers[i]; 137 VPxWorker *const worker = &cpi->workers[i]; 145 VPxWorker *const worker = &cpi->workers[i]; 159 VPxWorker *const worker = &cpi->workers[i]; 209 VPxWorker *const worker = &cpi->workers[i]; 643 VPxWorker *const worker = &cpi->workers[i];
|
/prebuilts/go/darwin-x86/src/runtime/ |
mgc.go | 41 // workers started by the scheduler and by assists performed as 60 // a. GC stops all workers, disables local work queue caches, 62 // cache, and reenables workers. 70 // b. Set gcphase to _GCmarktermination, and disable workers and 251 // workers are allowed to blacken objects. This must only be set when 289 // workers and are distinguished by gcMarkWorkerMode. 362 // mark workers during this cycle. This is updated atomically 378 // that assists and background mark workers started. 382 // workers that need to be started. This is computed at the 384 // dedicated mark workers get started [all...] |
/prebuilts/go/linux-x86/src/runtime/ |
mgc.go | 41 // workers started by the scheduler and by assists performed as 60 // a. GC stops all workers, disables local work queue caches, 62 // cache, and reenables workers. 70 // b. Set gcphase to _GCmarktermination, and disable workers and 251 // workers are allowed to blacken objects. This must only be set when 289 // workers and are distinguished by gcMarkWorkerMode. 362 // mark workers during this cycle. This is updated atomically 378 // that assists and background mark workers started. 382 // workers that need to be started. This is computed at the 384 // dedicated mark workers get started [all...] |
/external/autotest/server/cros/clique_lib/ |
clique_runner.py | 161 def _are_all_conn_workers_healthy(workers, aps, assoc_params_list, job): 162 """Returns if all the connection workers are working properly. 167 @param workers: a list of conn_worker objects. 172 @returns True if all the workers are healthy, False otherwise. 175 for worker, ap, assoc_params in zip(workers, aps, assoc_params_list): 210 connection workers. 234 # Let's create generic connection workers and make them connect 236 # these connection workers based on the role we want them to 260 # Check if all our APs, DUTs and connection workers are in good 288 raise error.TestError('Not all connection workers healthy.' [all...] |
/external/webrtc/third_party/gtest-parallel/ |
gtest-parallel | 254 parser.add_option('-w', '--workers', type='int', 256 help='number of workers to spawn') 391 workers = [start_daemon(worker) for i in range(options.workers)] 393 [t.join() for t in workers]
|