HomeSort by relevance Sort by last modified time
    Searched refs:num_threads (Results 126 - 150 of 323) sorted by null

1 2 3 4 56 7 8 91011>>

  /external/clang/test/OpenMP/
distribute_parallel_for_simd_ast_print.cpp 85 #pragma omp distribute parallel for simd private(argc, b), firstprivate(c, d), lastprivate(d, f) collapse(N) schedule(static, N) if (parallel :argc) num_threads(N) default(shared) shared(e) reduction(+ : h) dist_schedule(static,N)
97 // CHECK: #pragma omp distribute parallel for simd private(argc,b) firstprivate(c,d) lastprivate(d,f) collapse(N) schedule(static, N) if(parallel: argc) num_threads(N) default(shared) shared(e) reduction(+: h) dist_schedule(static, N)
129 #pragma omp distribute parallel for simd private(argc, b), firstprivate(argv, c), lastprivate(d, f) collapse(2) schedule(auto) if (argc) num_threads(a) default(shared) shared(e) reduction(+ : h) dist_schedule(static, b)
133 // CHECK: #pragma omp distribute parallel for simd private(argc,b) firstprivate(argv,c) lastprivate(d,f) collapse(2) schedule(auto) if(argc) num_threads(a) default(shared) shared(e) reduction(+: h) dist_schedule(static, b)
parallel_for_ast_print.cpp 75 #pragma omp parallel for private(argc, b), firstprivate(c, d), lastprivate(d, f) collapse(N) schedule(static, N) ordered(N) if (parallel :argc) num_threads(N) default(shared) shared(e) reduction(+ : h)
87 // CHECK-NEXT: #pragma omp parallel for private(argc,b) firstprivate(c,d) lastprivate(d,f) collapse(N) schedule(static, N) ordered(N) if(parallel: argc) num_threads(N) default(shared) shared(e) reduction(+: h)
114 #pragma omp parallel for private(argc, b), firstprivate(argv, c), lastprivate(d, f) collapse(2) schedule(auto) ordered if (argc) num_threads(a) default(shared) shared(e) reduction(+ : h) linear(a:-5)
118 // CHECK-NEXT: #pragma omp parallel for private(argc,b) firstprivate(argv,c) lastprivate(d,f) collapse(2) schedule(auto) ordered if(argc) num_threads(a) default(shared) shared(e) reduction(+: h) linear(a: -5)
target_parallel_ast_print.cpp 40 #pragma omp target parallel default(none), private(argc,b) firstprivate(argv) shared (d) if (parallel:argc > 0) num_threads(C) proc_bind(master) reduction(+:c, arr1[argc]) reduction(max:e, arr[:C][0:10])
42 #pragma omp target parallel if (C) num_threads(s) proc_bind(close) reduction(^:e, f, arr[0:C][:argc]) reduction(&& : g)
75 // CHECK-NEXT: #pragma omp target parallel default(none) private(argc,b) firstprivate(argv) shared(d) if(parallel: argc > 0) num_threads(5) proc_bind(master) reduction(+: c,arr1[argc]) reduction(max: e,arr[:5][0:10])
77 // CHECK-NEXT: #pragma omp target parallel if(5) num_threads(s) proc_bind(close) reduction(^: e,f,arr[0:5][:argc]) reduction(&&: g)
107 // CHECK-NEXT: #pragma omp target parallel default(none) private(argc,b) firstprivate(argv) shared(d) if(parallel: argc > 0) num_threads(1) proc_bind(master) reduction(+: c,arr1[argc]) reduction(max: e,arr[:1][0:10])
109 // CHECK-NEXT: #pragma omp target parallel if(1) num_threads(s) proc_bind(close) reduction(^: e,f,arr[0:1][:argc]) reduction(&&: g)
139 // CHECK-NEXT: #pragma omp target parallel default(none) private(argc,b) firstprivate(argv) shared(d) if(parallel: argc > 0) num_threads(C) proc_bind(master) reduction(+: c,arr1[argc]) reduction(max: e,arr[:C][0:10])
141 // CHECK-NEXT: #pragma omp target parallel if(C) num_threads(s) proc_bind(close) reduction(^: e,f,arr[0:C][:argc]) reduction(&&: g)
  /external/libchrome/base/threading/
simple_thread.cc 97 int num_threads)
99 num_threads_(num_threads),
simple_thread.h 165 DelegateSimpleThreadPool(const std::string& name_prefix, int num_threads);
  /external/tensorflow/tensorflow/compiler/xla/client/
client_library.cc 54 int num_threads) {
55 intra_op_parallelism_threads_ = num_threads;
  /external/tensorflow/tensorflow/contrib/factorization/kernels/
clustering_ops.cc 354 const int64 num_threads = worker_threads.num_threads; variable
376 (num_threads + 2) * kNearestNeighborsCentersMaxBlockSize) *
384 available_memory_budget / num_threads / bytes_per_row);
389 NextMultiple(num_threads, CeilOfRatio(num_points, rows_per_block));
410 const int64 units_per_thread = num_units / num_threads;
411 BlockingCounter counter(num_threads - 1);
412 for (int64 i = 1; i < num_threads; ++i) {
  /external/tensorflow/tensorflow/contrib/model_pruning/examples/cifar10/
cifar10_input.py 124 num_threads=num_preprocess_threads,
131 num_threads=num_preprocess_threads,
  /external/tensorflow/tensorflow/core/kernels/
bincount_op.cc 57 const int64 num_threads = thread_pool->NumThreads() + 1; local
60 TensorShape({num_threads, size}),
meta_support.cc 91 return tf_context->device()->tensorflow_cpu_worker_threads()->num_threads;
156 TensorflowGemmContext context(workers.num_threads, workers.workers);
169 TensorflowGemmContext context(workers.num_threads, workers.workers);
196 void SetNumThreads(int num_threads) { g_num_threads = num_threads; }
nn_ops_test.cc 107 CONV_OP op, int num_threads, int stride,
119 options.config.set_intra_op_parallelism_threads(num_threads);
502 int num_threads, int stride, Padding padding,
513 options.config.set_intra_op_parallelism_threads(num_threads);
737 thread::ThreadPool threadpool(Env::Default(), "test", num_threads); local
819 thread::ThreadPool threadpool(Env::Default(), "test", num_threads); local
911 thread::ThreadPool threadpool(Env::Default(), "test", num_threads); local
1015 thread::ThreadPool threadpool(Env::Default(), "test", num_threads); local
1195 thread::ThreadPool threadpool(Env::Default(), "test", num_threads); local
    [all...]
  /external/ImageMagick/MagickCore/
thread-private.h 33 num_threads((expression) == 0 ? 1 : \
  /external/tensorflow/tensorflow/contrib/data/python/ops/
dataset_ops.py 479 "num_threads", "output_buffer_size")
482 num_threads=None,
491 num_threads: (Optional.) Deprecated, use `num_parallel_calls` instead.
502 if num_threads is None and num_parallel_calls is None:
505 if num_threads is None:
512 num_threads))
  /external/tensorflow/tensorflow/contrib/slim/python/slim/data/
prefetch_queue_test.py 55 [counter, image, label], batch_size=batch_size, num_threads=1)
92 [counter, image, label], batch_size=batch_size, num_threads=4)
132 [counter, image, label], batch_size=batch_size, num_threads=4)
189 num_threads=1, dynamic_pad=True)
  /external/tensorflow/tensorflow/contrib/training/python/training/
sampling_ops.py 106 num_threads=prebatch_threads,
124 num_threads=queue_threads)
243 num_threads=threads_per_queue,
256 num_threads=threads_per_queue)
  /prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/x86_64-linux/include/c++/4.8/parallel/
par_loop.h 81 # pragma omp parallel num_threads(__num_threads)
find.h 115 # pragma omp parallel num_threads(__num_threads)
222 # pragma omp parallel shared(__result) num_threads(__num_threads)
346 # pragma omp parallel shared(__result) num_threads(__num_threads)
  /prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32/include/c++/4.8.3/parallel/
par_loop.h 81 # pragma omp parallel num_threads(__num_threads)
find.h 115 # pragma omp parallel num_threads(__num_threads)
222 # pragma omp parallel shared(__result) num_threads(__num_threads)
346 # pragma omp parallel shared(__result) num_threads(__num_threads)
  /external/mesa3d/src/gallium/drivers/r600/
evergreen_compute.c 714 int num_threads; local
735 num_threads = 128;
739 num_threads = 128;
743 num_threads = 128;
748 num_threads = 128;
752 num_threads = 128;
756 num_threads = 128;
760 num_threads = 128;
764 num_threads = 128;
768 num_threads = 128
    [all...]
  /external/eigen/unsupported/Eigen/CXX11/src/Tensor/
TensorReductionCuda.h 116 const Index num_threads = blockDim.x * gridDim.x; local
117 for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
206 const Index num_threads = blockDim.x * gridDim.x; local
208 for (Index i = thread_id; i < num_packets; i += num_threads) {
389 const Index num_threads = blockDim.x * gridDim.x; local
394 for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
460 const Index num_threads = blockDim.x * gridDim.x; local
466 for (; i + 1 < num_preserved_coeffs; i += 2*num_threads) {
669 const Index num_threads = blockDim.x * gridDim.x; local
673 for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
    [all...]
  /external/eigen/Eigen/src/Core/products/
GeneralMatrixMatrix.h 215 void initParallelSession(Index num_threads) const
217 m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
303 gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
343 gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
351 computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
356 computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
363 void initParallel(Index rows, Index cols, Index depth, Index num_threads)
371 computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
  /external/tensorflow/tensorflow/tools/benchmark/
benchmark_model.cc 51 Status InitializeSession(int num_threads, const string& graph,
58 if (num_threads > 0) {
59 config.set_intra_op_parallelism_threads(num_threads);
357 int num_threads = -1; local
386 Flag("num_threads", &num_threads, "number of threads"),
452 LOG(INFO) << "Num threads: [" << num_threads << "]";
464 InitializeSession(num_threads, graph, &session, &graph_def);
  /external/tensorflow/tensorflow/contrib/tensor_forest/kernels/
model_ops.cc 206 int num_threads = worker_threads->num_threads; variable
219 Shard(num_threads, worker_threads->workers, num_data, costPerTraverse,
305 int num_threads = worker_threads->num_threads; variable
314 Shard(num_threads, worker_threads->workers, num_data, costPerTraverse,
  /device/linaro/bootloader/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/test/
test_hashlib.py 330 num_threads = 5
333 expected_hash = hashlib.sha1(data*num_threads).hexdigest()
343 for threadnum in xrange(num_threads):

Completed in 315 milliseconds

1 2 3 4 56 7 8 91011>>