/external/tensorflow/tensorflow/core/kernels/ |
collective_nccl_reducer.h | 29 Status InitializeCollectiveParams(CollectiveParams* col_params) override; 36 CollectiveParams* col_params) override;
|
collective_nccl_reducer.cc | 31 Status NcclReducer::InitializeCollectiveParams(CollectiveParams* col_params) { 32 if (col_params->instance.type != REDUCTION_COLLECTIVE || 33 col_params->instance.impl_details.collective_name != "NcclReduce") { 35 col_params->instance.type, " expected ", 37 col_params->instance.impl_details.collective_name, 46 col_params_ = &col_ctx->col_params; 53 CollectiveParams* col_params) { 54 if (col_params->default_rank == 0 && col_params->group.num_tasks > 1) { 55 col_params->instance.communicator_key [all...] |
/external/tensorflow/tensorflow/core/common_runtime/ |
ring_alg.cc | 108 Status GenerateSubdivsInCollectiveParams(CollectiveParams* col_params) { 109 if (col_params->instance.shape.num_elements() == 0) { 113 col_params->group.group_size / col_params->group.num_tasks; 118 col_params->instance.impl_details.collective_name); 125 const size_t tensor_size = col_params->instance.shape.num_elements() * 126 DataTypeSize(col_params->instance.data_type); 130 int num_chunks = col_params->group.group_size * num_subdivs; 137 col_params->instance.impl_details.collective_name); 142 col_params->instance.impl_details.subdiv_offsets.reserve(num_subdivs) [all...] |
hierarchical_tree_broadcaster.cc | 77 CollectiveParams* col_params) { 78 CHECK_EQ(col_params->instance.type, BROADCAST_COLLECTIVE); 79 CHECK_EQ(col_params->instance.impl_details.collective_name, 82 col_params->instance.device_names[col_params->default_rank]; 87 << str_util::Join(col_params->instance.task_names, ", "); 89 const string* prior_task_name = &col_params->instance.task_names[0]; 91 for (int di = 1; di < col_params->group.group_size; ++di) { 92 if (col_params->instance.task_names[di] != *prior_task_name) { 95 prior_task_name = &col_params->instance.task_names[di] [all...] |
base_collective_executor.cc | 219 const CollectiveParams& col_params, 240 const Tensor* input = (col_params.instance.type == REDUCTION_COLLECTIVE || 241 col_params.instance.type == GATHER_COLLECTIVE || 242 (col_params.instance.type == BROADCAST_COLLECTIVE && 243 col_params.is_source)) 247 Status status = CreateCollective(col_params, &col_impl); 254 new CollectiveContext(this, dev_mgr_, ctx, CtxParams(ctx), col_params, 284 const CollectiveParams& col_params, 288 switch (col_params.instance.data_type) { 290 if (col_params.group.device_type == DEVICE_GPU) [all...] |
collective_util.cc | 54 string SubdivPermDebugString(const CollectiveParams& col_params) { 56 col_params.instance.impl_details.subdiv_permutations; 63 CHECK_GT(col_params.instance.device_names.size(), idx); 64 strings::StrAppend(&buf, col_params.instance.device_names[idx], "\n"); 68 for (auto o : col_params.instance.impl_details.subdiv_offsets) 71 for (auto d : col_params.subdiv_rank) strings::StrAppend(&buf, d, " "); 72 if (col_params.instance.type == BROADCAST_COLLECTIVE) { 74 for (auto src : col_params.instance.impl_details.subdiv_source_rank)
|
ring_gatherer.h | 36 Status InitializeCollectiveParams(CollectiveParams* col_params) override;
|
ring_reducer.h | 42 Status InitializeCollectiveParams(CollectiveParams* col_params) override;
|
base_collective_executor.h | 110 void ExecuteAsync(OpKernelContext* ctx, const CollectiveParams& col_params, 148 void WaitForDependencies(const CollectiveParams& col_params) override; 152 void Launched(const CollectiveParams& col_params) override; 166 Status CreateCollective(const CollectiveParams& col_params, 169 bool CheckDependencies(const CollectiveParams& col_params)
|
ring_gatherer.cc | 43 Status RingGatherer::InitializeCollectiveParams(CollectiveParams* col_params) { 44 DCHECK_EQ(col_params->instance.type, GATHER_COLLECTIVE); 45 DCHECK_EQ(col_params->instance.impl_details.collective_name, "RingGather"); 49 if (!col_params->instance.impl_details.subdiv_offsets.empty() && 50 (col_params->instance.impl_details.subdiv_offsets.size() > 1 || 51 col_params->instance.impl_details.subdiv_offsets[0] != 0)) { 55 if (col_params->instance.impl_details.subdiv_offsets.empty()) { 56 col_params->instance.impl_details.subdiv_offsets.push_back(0); 58 return RingAlg::InitializeCollectiveParams(col_params);
|
collective_util.h | 33 string SubdivPermDebugString(const CollectiveParams& col_params);
|
hierarchical_tree_broadcaster.h | 38 Status InitializeCollectiveParams(CollectiveParams* col_params) override;
|
ring_reducer.cc | 46 Status RingReducer::InitializeCollectiveParams(CollectiveParams* col_params) { 48 CHECK_EQ(col_params->instance.type, REDUCTION_COLLECTIVE); 49 CHECK_EQ(col_params->instance.impl_details.collective_name, "RingReduce"); 50 return RingAlg::InitializeCollectiveParams(col_params);
|
ring_alg.h | 38 Status InitializeCollectiveParams(CollectiveParams* col_params) override;
|
/external/tensorflow/tensorflow/core/framework/ |
collective.h | 135 // in 'col_params'. 262 const CollectiveParams& col_params, 288 virtual void WaitForDependencies(const CollectiveParams& col_params) {} 292 virtual void Launched(const CollectiveParams& col_params) {} 343 const CollectiveParams& col_params, const string& exec_key, 352 const CollectiveParams& col_params; member in class:tensorflow::CollectiveContext 370 // Initializes the portions of `col_params` specific to this 375 // `col_params` passed in and should not manipulate any data members. However 378 virtual Status InitializeCollectiveParams(CollectiveParams* col_params) = 0; 392 CollectiveParams* col_params) = 0 [all...] |
collective.cc | 160 const CollectiveParams& col_params, 167 col_params(col_params), 173 device_name(col_params.instance.device_names[col_params.default_rank]) {}
|