/external/tensorflow/tensorflow/compiler/xla/service/ |
hlo_sharding_test.cc | 44 HloSharding sharding = HloSharding::Replicate(); local 45 EXPECT_TRUE(sharding.IsReplicated()); 46 EXPECT_TRUE(sharding.IsTileMaximal()); 47 EXPECT_TRUE(sharding.UsesDevice(0)); 48 EXPECT_TRUE(sharding.UsesDevice(65535)); 51 EXPECT_EQ(other, sharding); 53 EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4}), 55 EXPECT_IS_NOT_OK(sharding.UniqueDevice()); 59 HloSharding sharding = HloSharding::AssignDevice(5); local 60 EXPECT_FALSE(sharding.IsReplicated()) 84 HloSharding sharding = local 93 HloSharding sharding = local 102 HloSharding sharding = local 112 HloSharding sharding = local 121 HloSharding sharding = local [all...] |
hlo_tfgraph_builder.cc | 106 instruction->sharding().HasUniqueDevice()) { 108 "dev", instruction->sharding().UniqueDevice().ConsumeValueOrDie()); 219 instruction->sharding().HasUniqueDevice()) { 220 TF_ASSIGN_OR_RETURN(int64 device, instruction->sharding().UniqueDevice());
|
user_computation_test.cc | 230 OpSharding sharding; local 231 sharding.set_type(OpSharding::Type::OpSharding_Type_MAXIMAL); 232 sharding.add_tile_assignment_dimensions(1); 233 sharding.add_tile_assignment_devices(kDevice); 235 TF_EXPECT_OK(computation.SetOpSharding(b_handle, sharding));
|
batchnorm_expander.cc | 290 batch_norm->sharding().GetAsShapeTree(batch_norm->shape()).element({0}); 298 tuple->set_sharding(batch_norm->sharding()); 390 inst->set_sharding(batch_norm->sharding()); 395 shifted_normalized->set_sharding(batch_norm->sharding()); 584 batch_norm->sharding().GetAsShapeTree(batch_norm->shape()).element({0}); 592 tuple->set_sharding(batch_norm->sharding());
|
hlo_sharding.cc | 152 "UniqueDevice() called on sharding that executes on multiple devices"); 166 StrCat("Sharding is tuple-shaped but validation shape is not.")); 178 " leaf elements, but this sharding contains ", 190 &status, StrCat("Note: While validating sharding tuple element ", 204 &status, StrCat("Note: While validating sharding ", ToString(), 214 StrCat("Validation shape is a tuple but sharding is not.")); 249 "Tile rank is different to the input rank. sharding=", ToString(), 258 "Tile shape is the same as the input shape. If a replicated sharding " 296 TF_ASSIGN_OR_RETURN(HloSharding sharding, 298 tuple_shardings.push_back(sharding); [all...] |
user_computation.h | 295 const OpSharding& sharding);
|
hlo_instruction.h | 921 const HloSharding& sharding() const { function in class:xla::HloInstruction [all...] |
hlo_instruction.cc | 690 broadcast->set_sharding(operand->sharding()); 715 reshaped_operand->set_sharding(operand->sharding()); 722 broadcast->set_sharding(operand->sharding()); [all...] |
hlo_graph_dumper.cc | 419 // When coloring by sharding information, we track the sharding string 853 string shard_str = instr->sharding().ToString(); [all...] |
user_computation.cc | [all...] |
service.cc | [all...] |
/external/tensorflow/tensorflow/compiler/tf2xla/ |
sharding_util_test.cc | 26 [](tensorflow::gtl::optional<xla::OpSharding> sharding) -> int64 { 27 if (sharding.has_value() && 28 sharding.value().type() == 30 return sharding.value().tile_assignment_devices(0);
|
sharding_util.cc | 35 xla::OpSharding sharding; local 37 if (!sharding.ParseFromString(value)) { 42 return tensorflow::gtl::optional<xla::OpSharding>(sharding); 84 TF_ASSIGN_OR_RETURN(tensorflow::gtl::optional<xla::OpSharding> sharding, 86 return ParseShardingFromDevice(device_name, num_cores_per_replica, sharding); 95 TF_ASSIGN_OR_RETURN(tensorflow::gtl::optional<xla::OpSharding> sharding, 97 return ParseShardingFromDevice(device_name, num_cores_per_replica, sharding);
|
tf2xla_util.cc | 263 tensorflow::gtl::optional<xla::OpSharding> sharding, 267 if (sharding.has_value()) { 268 TF_RET_CHECK(sharding.value().type() == 270 const int core_annotation = sharding.value().tile_assignment_devices(0);
|
xla_compiler.cc | 401 // Since we can't change the sharding metadata of <value> as this point, 402 // create a tuple/get-tuple-element combination so that sharding 505 auto sharding, 507 if (sharding.has_value()) { 508 TF_RET_CHECK(sharding.value().type() == 510 const int core = sharding.value().tile_assignment_devices(0);
|
/external/tensorflow/tensorflow/compiler/xla/tools/parser/ |
hlo_parser_test.cc | 190 %v1 = f32[4]{0} parameter(0), sharding={maximal device=1} 191 %v2 = f32[4]{0} parameter(1), sharding={maximal device=1} 192 %greater-than = pred[4]{0} greater-than(f32[4]{0} %v1, f32[4]{0} %v2), sharding={replicated} 193 ROOT %select = f32[4]{0} select(pred[4]{0} %greater-than, f32[4]{0} %v1, f32[4]{0} %v2), sharding={} 231 ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3), sharding={{replicated}, {maximal device=0}, {replicated}} 267 %recv = (f32[], u32[]) recv(), channel_id=15, sharding={maximal device=1} 268 ROOT %recv-done = f32[] recv-done((f32[], u32[]) %recv), channel_id=15, sharding={maximal device=1} 269 %constant = f32[] constant(2.1), sharding={maximal device=0} 270 %send = (f32[], u32[]) send(f32[] %constant), channel_id=16, sharding={maximal device=0}, control-predecessors={%recv} 271 %send-done = () send-done((f32[], u32[]) %send), channel_id=16, sharding={maximal device=0 [all...] |
hlo_parser.cc | 170 bool ParseSharding(OpSharding* sharding); 171 bool ParseSingleSharding(OpSharding* sharding, bool lbrace_pre_lexed); 432 optional<OpSharding> sharding; local 433 attrs["sharding"] = {/*required=*/false, AttrTy::kSharding, &sharding}; [all...] |
/tools/tradefederation/core/src/com/android/tradefed/config/ |
IGlobalConfiguration.java | 233 /** Sets the {@link IShardHelper} to be used when sharding a configuration. */ 234 public void setShardingStrategy(IShardHelper sharding);
|
GlobalConfiguration.java | 511 public void setShardingStrategy(IShardHelper sharding) { 512 setConfigurationObjectNoThrow(SHARDING_STRATEGY_TYPE_NAME, sharding);
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
computation_builder.h | 77 void SetSharding(const OpSharding& sharding) { sharding_ = sharding; } 79 // Clears the sharding. Ops will be sharded according to the default placement 84 const tensorflow::gtl::optional<OpSharding>& sharding() const { function in class:xla::ComputationBuilder [all...] |
/tools/tradefederation/core/src/com/android/tradefed/invoker/ |
TestInvocation.java | 650 mStatus = "sharding"; 651 boolean sharding = invocationPath.shardConfig(config, context, rescheduler); 652 if (sharding) {
|
/external/tensorflow/tensorflow/compiler/xla/ |
xla_data.proto | 933 // This sharding is replicated across all devices (implies maximal, 936 // This sharding is maximal - one device runs the entire operation. 938 // This sharding is a tuple - only the tuple_shardings field is valid. 957 // applied, this is inferred from the instruction this sharding gets attached 965 OpSharding sharding = 40;
|
/build/soong/third_party/zip/ |
reader.go | 333 // an old zip32 file that was sharding inputs into the largest chunks
|
/prebuilts/go/darwin-x86/src/archive/zip/ |
reader.go | 410 // an old zip32 file that was sharding inputs into the largest chunks
|
/prebuilts/go/linux-x86/src/archive/zip/ |
reader.go | 410 // an old zip32 file that was sharding inputs into the largest chunks
|