/external/tensorflow/tensorflow/core/util/tensor_bundle/ |
naming.cc | 28 string DataFilename(StringPiece prefix, int32 shard_id, int32 num_shards) { 29 DCHECK_GT(num_shards, 0); 30 DCHECK_LT(shard_id, num_shards); 33 shard_id, num_shards);
|
naming.h | 24 // DataFilename(prefix, shard_id, num_shards): pathname of a data file. 42 string DataFilename(StringPiece prefix, int32 shard_id, int32 num_shards);
|
/external/tensorflow/tensorflow/contrib/boosted_trees/lib/utils/ |
parallel_for.cc | 30 const int num_shards = std::max<int>( local 32 const int64 block_size = (batch_size + num_shards - 1) / num_shards;
|
/external/tensorflow/tensorflow/contrib/linear_optimizer/python/ops/ |
sharded_mutable_dense_hashtable_test.py | 32 for num_shards in [1, 3, 10]: 43 num_shards=num_shards) 55 for num_shards in [1, 3, 10]: 68 num_shards=num_shards) 85 num_shards = 2 93 num_shards=num_shards) 100 self.assertAllEqual(num_shards, len(keys_list) [all...] |
sharded_mutable_dense_hashtable.py | 40 The _ShardedMutableDenseHashTable keeps `num_shards` MutableDenseHashTable 51 num_shards=1, 57 for i in range(num_shards): 64 name='%s-%d-of-%d' % (name, i + 1, num_shards))) 109 num_shards = self._num_shards 110 if num_shards == 1: 116 num_shards) 119 for i in range(num_shards) 126 num_shards) 134 num_shards = self._num_shard [all...] |
sdca_ops.py | 149 num_shards=self._num_table_shards(), 176 num_shards = self._options.get('num_table_shards') 177 return 1 if num_shards is None else num_shards
|
/external/tensorflow/tensorflow/core/util/ |
work_sharder_test.cc | 33 int64 num_shards = 0; local 37 [=, &mu, &num_shards, &num_done_work, &work](int64 start, int64 limit) { 42 ++num_shards; 51 << num_shards; local
|
work_sharder.cc | 39 // We shard [0, total) into "num_shards" shards. 40 // 1 <= num_shards <= num worker threads 46 const int num_shards = local 56 const int64 block_size = (total + num_shards - 1) / num_shards;
|
/external/tensorflow/tensorflow/contrib/tpu/python/tpu/ |
tpu_optimizer.py | 76 num_shards = tpu_function.get_tpu_context().number_of_shards 77 if num_shards is None: 81 num_shards = 1 82 if num_shards > 1 and self._reduction == losses.Reduction.MEAN: 83 scale = 1.0 / num_shards
|
tpu_config.py | 45 'num_shards', 58 num_shards: (Deprecated, ignored by TPUEstimator). 62 product(computation_shape) * num_shards. 91 num_shards=None, 101 # Check num_shards. 102 if num_shards is not None: 103 util_lib.check_positive_integer(num_shards, 'TPUConfig num_shards') 129 num_shards=num_shards, [all...] |
tpu.py | 393 num_shards=1, 404 `input_shard_axes`). Each input is split into `num_shards` pieces 431 `num_shards`. 432 num_shards: The number of shards. 457 ValueError: If num_shards <= 0 462 if num_shards <= 0: 463 raise ValueError("num_shards must be a positive integer.") 478 array_ops.split(x, num_shards, axis=axis) 484 transposed_inputs = [[]] * num_shards 493 # There must be at least one shard since num_shards > 0 [all...] |
/external/tensorflow/tensorflow/python/kernel_tests/ |
embedding_ops_test.py | 131 def _EmbeddingParams(num_shards, 141 for i in range(num_shards): 142 shard_shape = [vocab_size // num_shards] + shape 143 if i < vocab_size % num_shards: # Excess goes evenly on the first shards 161 def _EmbeddingParamsAsPartitionedVariable(num_shards, 167 num_shards, vocab_size, dtype=dtype, shape=shape) 174 max_partitions=num_shards, min_slice_size=1), 181 num_shards, 200 val = np.copy(params[_PName(i % num_shards) + ":0"][ 201 i // num_shards, :]) * weight_valu [all...] |
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
embedding_ops_test.py | 45 def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1): 48 assert num_shards > 0 49 assert num_shards <= vocab_size 53 slicing=[num_shards, 1], 154 embedding_weights = self._random_weights(num_shards=3) 168 embedding_weights = self._random_weights(num_shards=3) 228 embedding_weights = self._random_weights(num_shards=3) 245 embedding_weights = self._random_weights(num_shards=3) 264 def _random_weights(self, size=50, num_shards=1): 266 assert num_shards > [all...] |
/external/tensorflow/tensorflow/core/lib/core/ |
blocking_counter_test.cc | 57 const int num_shards = num_threads * shards_per_thread; local 60 BlockingCounter bc(num_shards);
|
/external/tensorflow/tensorflow/core/kernels/ |
topk_op_gpu.cu.cc | 262 // mergeShards performs a top-k merge on `num_shards` many sorted streams that 264 // |s_1 1st|s_2 1st|...s_{num_shards} 1st|s_1 2nd|s_2 2nd|... 269 __device__ void mergeShards(int num_shards, int k, 273 // If k < num_shards, we can use a min-heap with k elements to get the top k 275 // If k > num_shards, we can initialize a min-heap with the top element from 277 const int heap_size = k < num_shards ? k : num_shards; 290 // Now perform top k with the remaining shards (if num_shards > heap_size). 291 for (int shard = heap_size; shard < num_shards; shard++) { 323 int next_shard_index = shard_index + num_shards; [all...] |
range_sampler.h | 217 int32 num_shards, int32 shard); 221 int32 num_shards, int32 shard);
|
candidate_sampler_ops.cc | 171 int64 num_shards; local 172 OP_REQUIRES_OK(context, context->GetAttr("num_shards", &num_shards)); 179 num_shards, shard)); 182 num_reserved_ids, num_shards, shard));
|
range_sampler.cc | 236 int32 num_shards, int32 shard) 239 num_shards_(num_shards), 252 int32 num_shards, int32 shard) 255 num_shards_(num_shards),
|
/external/tensorflow/tensorflow/contrib/factorization/python/ops/ |
factorization_ops.py | 304 def _shard_sizes(cls, dims, num_shards): 305 """Helper function to split dims values into num_shards.""" 306 shard_size, residual = divmod(dims, num_shards) 307 return [shard_size + 1] * residual + [shard_size] * (num_shards - residual) 310 def _create_factors(cls, rows, cols, num_shards, init, name): 315 assert len(init) == num_shards 318 elif num_shards == 1: 321 sizes = cls._shard_sizes(rows, num_shards) 322 assert len(sizes) == num_shards 344 def _create_weights(cls, wt_init, num_wts, num_shards, name) [all...] |
/external/tensorflow/tensorflow/python/ops/ |
candidate_sampling_ops.py | 210 num_shards=1, 260 num_shards: A sampler can be used to sample from a subset of the original 266 parameter (together with `num_shards`) indicates the particular partition 289 num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard,
|
partitioned_variables.py | 221 def fixed_size_partitioner(num_shards, axis=0): 225 num_shards: `int`, number of shards to partition variable. 234 partitions_list[axis] = min(num_shards, shape[axis].value)
|
/external/tensorflow/tensorflow/contrib/linear_optimizer/python/kernel_tests/ |
sdca_ops_test.py | 203 for num_shards in _SHARD_NUMBERS: 210 num_table_shards=num_shards, 251 for num_shards in _SHARD_NUMBERS: 260 num_table_shards=num_shards, 315 for num_shards in _SHARD_NUMBERS: 322 num_table_shards=num_shards, 372 for num_shards in _SHARD_NUMBERS: 380 num_table_shards=num_shards, 413 for num_shards in _SHARD_NUMBERS: 420 num_table_shards=num_shards, [all...] |
/external/tensorflow/tensorflow/contrib/all_reduce/python/ |
all_reduce_test.py | 180 def _buildShuffle(self, num_workers, num_gpus, num_shards): 183 for _ in range(num_shards)] 187 def _testShuffleAllReduce(self, num_workers, num_gpus, shape, num_shards): 189 build_f = self._buildShuffle(num_workers, num_gpus, num_shards)
|
/external/autotest/server/cros/ap_configurators/ |
pyauto_utils.py | 168 def Shard(ilist, shard_index, num_shards): 174 num_shards: shard count 176 chunk_size = len(ilist) / num_shards 178 if shard_index == num_shards - 1: # Exhaust the remainder in the last shard.
|
/external/tensorflow/tensorflow/contrib/kfac/python/ops/ |
utils.py | 346 num_shards = tpu_function.get_tpu_context().number_of_shards 347 if num_shards is None: 350 if num_shards == 1: 352 return tpu_ops.cross_replica_sum(tensor / num_shards)
|