/external/tensorflow/tensorflow/core/util/tensor_bundle/ |
naming.cc | 28 string DataFilename(StringPiece prefix, int32 shard_id, int32 num_shards) { 30 DCHECK_LT(shard_id, num_shards); 33 shard_id, num_shards);
|
naming.h | 24 // DataFilename(prefix, shard_id, num_shards): pathname of a data file. 42 string DataFilename(StringPiece prefix, int32 shard_id, int32 num_shards);
|
tensor_bundle.cc | 457 // Populates dtype, shape, and slices. Intentionally leaving out shard_id and 639 {DataFilename(prefix, to_merge_entry.shard_id(), num_shards), 808 io::InputBuffer* buffered_file = data_[entry.shard_id()]; 812 DataFilename(prefix_, entry.shard_id(), num_shards_), &file)); 815 data_[entry.shard_id()] = buffered_file; [all...] |
/external/tensorflow/tensorflow/contrib/factorization/python/ops/ |
gmm_ops.py | 271 for shard_id, shard in enumerate(data): 274 self._define_log_prob_operation(shard_id, shard) 275 self._define_prior_log_prob_operation(shard_id) 276 self._define_expectation_operation(shard_id) 277 self._define_partial_maximization_operation(shard_id, shard) 282 def _define_full_covariance_probs(self, shard_id, shard): 288 shard_id: id of the current shard. 300 self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions) 303 def _define_diag_covariance_probs(self, shard_id, shard): 307 shard_id: id of the current shard [all...] |
/external/autotest/scheduler/ |
rdb_hosts.py | 116 self.shard_id = host.shard_id
|
scheduler_models_unittest.py | 308 hqe.job.update_field('shard_id', 3) 313 self.assertIsNone(hqe.job.shard_id) 316 self.assertEquals(hqe.job.shard_id, 3)
|
rdb.py | 466 [host.shard_id for host in hosts])
|
monitor_db.py | [all...] |
scheduler_models.py | 368 'leased', 'shard_id', 'lock_reason') 652 # the other complete jobs, unless we first set shard_id to NULL 690 if self.job.shard_id is not None: 691 # If shard_id is None, the job will be synced back to the master 692 self.job.update_field('shard_id', None) [all...] |
/external/autotest/venv/lucifer/ |
handlers.py | 102 if self._job.shard_id is not None: 103 # If shard_id is None, the job will be synced back to the master 104 self._job.shard_id = None
|
/external/tensorflow/tensorflow/core/kernels/ |
conv_grad_input_ops.cc | 536 for (int shard_id = start; shard_id < limit; ++shard_id) { 537 T* im2col_buf = col_buffer_data + shard_id * size_C; 538 T* input_data = input_backprop_data + shard_id * input_offset; 539 const T* out_data = out_backprop_data + shard_id * output_offset; [all...] |
conv_grad_filter_ops.cc | 465 for (int shard_id = start; shard_id < limit; ++shard_id) { 466 const T* input_data_shard = input_data + shard_id * input_offset; 467 T* col_data_shard = col_buffer_data + shard_id * size_A; [all...] |
/external/autotest/frontend/afe/ |
models.py | [all...] |
rpc_interface_unittest.py | 210 # Setting a job to a complete status will set the shard_id to None in [all...] |