/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
cudnn_batchnorm_rewriter.cc | 62 if (ShapeUtil::ElementsIn(batch_norm->operand(0)->shape()) == 0) { 98 if (ShapeUtil::ElementsIn(batch_norm->operand(0)->shape()) == 0) { 167 if (ShapeUtil::ElementsIn(batch_norm->operand(0)->shape()) == 0) {
|
cusolver_rewriter.cc | 117 Shape call_shape = ShapeUtil::MakeTupleShape( 119 ShapeUtil::MakeShape(operand->shape().element_type(), {workspace_size}), 120 ShapeUtil::MakeShape(S32, {batch_size})});
|
ir_emitter_unnested.cc | 272 ? ShapeUtil::GetSubshape(hlo->shape(), {0}) 274 int64 num_elements = ShapeUtil::ElementsIn(element_shape); 304 ShapeUtil::ForEachSubshape(s, [&](const Shape& sub_shape, 306 if (sub_shape.IsArray() && !IsInt32(ShapeUtil::ElementsIn(sub_shape))) { 510 /*mem_size=*/ShapeUtil::ByteSizeOf(shape), custom_call)); 560 /*mem_size=*/ShapeUtil::ByteSizeOf(hlo->operand(1)->shape()), hlo)); 690 CHECK(ShapeUtil::Compatible(copy->operand(0)->shape(), copy->shape())); [all...] |
cudnn_conv_padding_legalization.cc | 329 ShapeUtil::MakeTupleShape( 330 {new_backward_conv_shape, ShapeUtil::MakeShape(U8, {0})}), 374 CHECK(ShapeUtil::Compatible(slice_shape, backward_conv_shape)) 375 << ShapeUtil::HumanString(slice_shape) << " vs " 376 << ShapeUtil::HumanString(backward_conv_shape);
|
cudnn_conv_runner.cc | 133 VLOG(3) << "input shape: " << ShapeUtil::HumanStringWithLayout(input_shape); 134 VLOG(3) << "filter shape: " << ShapeUtil::HumanStringWithLayout(filter_shape); 135 VLOG(3) << "Output shape: " << ShapeUtil::HumanStringWithLayout(output_shape); 150 << ShapeUtil::HumanString(output_shape); 429 LOG(FATAL) << ShapeUtil::HumanString(*params.output_shape);
|
hlo_to_ir_bindings.cc | 96 ShapeUtil::ForEachSubshape( 157 ShapeUtil::GetSubshape(hlo.shape(), shape_index), module_); 221 ShapeUtil::GetSubshape(hlo.shape(), shape_index));
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
hlo_value.cc | 44 return ShapeUtil::GetSubshape(instruction->shape(), index); 259 DCHECK(ShapeUtil::Compatible(inputs[0]->shape(), inputs[i]->shape())); 284 StrCat("InstructionValueSet(", ShapeUtil::HumanString(shape()), ")\n");
|
hlo_dataflow_analysis_test.cc | 95 const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {}); 96 const Shape vector_shape_ = ShapeUtil::MakeShape(F32, {42}); 97 const Shape tuple_shape_ = ShapeUtil::MakeTupleShape( 98 {ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})}); 483 ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); 586 ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); 660 ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); [all...] |
hlo_evaluator_test.cc | 391 Shape shape = ShapeUtil::MakeShape(S64, {2, 2}); 420 ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0)); 425 Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5}); 486 Shape shape = ShapeUtil::MakeShape(S64, {4, 2}); 508 Shape shape = ShapeUtil::MakeShape(S64, {2}); 583 Shape shape = ShapeUtil::MakeShape(S32, {5, 2}); 608 Shape shape = ShapeUtil::MakeShape(F32, {8, 5, 1, 1}); 652 Shape shape = ShapeUtil::MakeShape(F32, {1, 5}); 697 Shape shape = ShapeUtil::MakeShape(F32, {0, 9}); 734 Shape shape = ShapeUtil::MakeShape(F32, {4, 2}) [all...] |
batchnorm_expander.cc | 84 Shape shape = ShapeUtil::MakeShape(primitive_type, {}); 125 ShapeUtil::MakeShape(U32, {}), operand, i)); 127 ShapeUtil::MakeShape(U32, {}), HloOpcode::kMultiply, 132 ShapeUtil::MakeShape(operand->shape().element_type(), {}), 314 if (ShapeUtil::Equal(inst->shape(), operand_shape)) { 418 if (ShapeUtil::Equal(inst->shape(), operand_shape)) { 596 if (ShapeUtil::Equal(inst->shape(), activation_shape)) {
|
hlo_graph_dumper.cc | 123 if (ShapeUtil::HasPrimitiveType(instr->shape(), OPAQUE) || 124 ShapeUtil::HasPrimitiveType(instr->shape(), TOKEN)) { 127 return ShapeUtil::ElementsInRecursive(instr->shape()) < 4096; 823 if (ShapeUtil::IsZeroElementArray(shape)) { 824 return StrFormat("{} (%s)", ShapeUtil::HumanString(constant->shape())); 850 ShapeUtil::HumanString(constant->shape())); [all...] |
hlo_module.cc | 288 TF_RET_CHECK(ShapeUtil::Compatible(expected_program_shape.parameters(i), 292 << ShapeUtil::HumanStringWithLayout( 294 << ", actual: " << ShapeUtil::HumanStringWithLayout(parameter_shape); 299 ShapeUtil::Compatible(expected_program_shape.result(), result_shape)) 302 << ShapeUtil::HumanStringWithLayout(expected_program_shape.result()) 303 << ", actual: " << ShapeUtil::HumanStringWithLayout(result_shape);
|
service.cc | 218 TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(client_shape)); 219 if (!ShapeUtil::Compatible(client_shape, result_shape)) { 223 ShapeUtil::HumanStringWithLayout(client_shape), 224 ShapeUtil::HumanString(result_shape)); 279 if (!ShapeUtil::Compatible(*argument_shapes[i], 284 i, ShapeUtil::HumanString(program_shape.parameters(i)), 285 ShapeUtil::HumanString(*argument_shapes[i])); [all...] |
/external/tensorflow/tensorflow/compiler/xla/tests/ |
client_library_test_base.cc | 198 auto layout = ShapeUtil::MakeShapeWithLayout( 205 ShapeUtil::HumanStringWithLayout(layout))); 230 ShapeUtil::HumanStringWithLayout(literal.shape())); 244 ShapeUtil::HumanStringWithLayout(literal_relayout.shape())); 317 if (ShapeUtil::ElementIsFloating(expected.shape()) || 318 ShapeUtil::ElementIsComplex(expected.shape())) { 331 ShapeUtil::ForEachMutableSubshape( 388 ShapeUtil::ForEachMutableSubshape( 530 auto shape = ShapeUtil::MakeShape(use_bfloat16_ ? BF16 : F32, {}); 543 auto shape = ShapeUtil::MakeShape(use_bfloat16_ ? BF16 : F32, {}) [all...] |
matrix_ops_simple_test.cc | 80 Parameter(&builder, 0, ShapeUtil::MakeShapeWithType<T>({}), "x_value"); 197 ShapeUtil::MakeShape(prim_type, {lhs.height(), lhs.width()}); 199 ShapeUtil::MakeShape(prim_type, {rhs.height(), rhs.width()});
|
reduce_test.cc | 85 CHECK(ShapeUtil::Equal( 87 ShapeUtil::MakeShape(F32, {/*z=*/4, /*y=*/2, /*x=*/3}))) 95 const Shape input_shape = ShapeUtil::MakeShape(F32, {element_count}); 122 const Shape input_shape = ShapeUtil::MakeShape(S32, {element_count}); 160 const Shape input_shape = ShapeUtil::MakeShape(U8, {rows, cols}); 205 const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols}); 232 const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols}); 287 const Shape input_shape = ShapeUtil::MakeShape( 442 const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols}); 472 const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols}) [all...] |
test_utils.cc | 122 Shape floating_point_shape = ShapeUtil::ChangeElementType( 174 if (no_duplicates && ShapeUtil::ElementsIn(literal->shape()) < 267 ShapeUtil::HumanString(shape)); 340 ShapeUtil::HumanString(shape)); 460 ShapeUtil::GetDimension(indexed_shape, operand - first_index) - 461 ShapeUtil::GetDimension(slice_shape,
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
xla_compiler_test.cc | 326 xla::ShapeUtil::MakeShapeWithLayout(xla::S32, {2, 3}, {0, 1}); 329 xla::ShapeUtil::MakeTupleShape({transposed, transposed})); 368 xla::ShapeUtil::MakeShapeWithLayout(xla::S32, {2, 3}, {1, 0}); 371 xla::ShapeUtil::MakeTupleShape({transposed, transposed})); [all...] |
/external/tensorflow/tensorflow/compiler/xla/tools/ |
replay_computation.cc | 126 LOG(ERROR) << " " << ShapeUtil::HumanString(Shape(infeed.shape())) << " " 167 << ShapeUtil::HumanString(*xfeed_shape); 257 << ShapeUtil::HumanStringWithLayout(*outfeed_shape); 401 ShapeUtil::HumanString(result.shape()).c_str(), 409 ShapeUtil::HumanString(Shape(snapshot.result().shape())).c_str(),
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
dot_op_emitter.cc | 241 int64 size_bytes = m * n * ShapeUtil::ByteSizeOfPrimitiveType(primitive_type); 380 if (ShapeUtil::IsScalar(lhs_shape) || ShapeUtil::IsScalar(rhs_shape)) { 382 TF_RET_CHECK(ShapeUtil::IsScalar(lhs_shape) && 383 ShapeUtil::IsScalar(rhs_shape)); 495 if (ShapeUtil::ElementIsComplex(lhs_shape)) { 556 if (ShapeUtil::ElementIsComplex(lhs_array_.GetShape())) { 773 if (ShapeUtil::IsZeroElementArray(dot_info.lhs_shape) || 774 ShapeUtil::IsZeroElementArray(dot_info.rhs_shape)) { [all...] |
/external/tensorflow/tensorflow/compiler/xla/ |
shape_tree.h | 100 ShapeTree() : ShapeTree(ShapeUtil::MakeNil()) {} 118 // given index (see ShapeUtil::GetSubshape for how indexes are defined). 244 // index : the index of the element in the shape. See ShapeUtil::GetSubshape 311 // ShapeUtil::GetSubshape). 399 int64 count = ShapeUtil::TupleElementCount(shape); 411 const int64 size = ShapeUtil::TupleElementCount(shape); 447 const int64 size = ShapeUtil::TupleElementCount(shape); 646 CHECK(ShapeUtil::Compatible( 647 ShapeUtil::GetSubshape(shape(), target_base_index), 648 ShapeUtil::GetSubshape(other.shape(), source_base_index))) [all...] |
literal_util.h | 315 Literal literal(ShapeUtil::MakeShape( 324 ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<NativeT>(), 334 Literal literal(ShapeUtil::MakeShapeWithLayout( 420 Literal literal(ShapeUtil::MakeShapeWithSparseLayout( 438 Literal literal(ShapeUtil::MakeShapeWithLayout( 554 Literal literal(ShapeUtil::MakeShapeWithDescendingLayout(
|
shape_util.h | 47 // ShapeUtil::GetSubshape and other interfaces. Shapes are recursive data 171 class ShapeUtil { 261 return ShapeUtil::SameElementType(a, b); 393 return ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<T>(), 598 // ShapeUtil::ReshapeIsBitcast(input_shape, output_shape_with_layout) returns 637 // Simple ergonomic wrapper around ShapeUtil::ForEachIndexWithStatus. 648 return ShapeUtil::ForEachIndexWithStatus( 728 if (ShapeUtil::IsZeroElementArray(shape)) { [all...] |
/external/tensorflow/tensorflow/compiler/xla/client/ |
local_client.cc | 75 ShapeUtil::HumanStringWithLayout( 77 ShapeUtil::HumanStringWithLayout(arguments[i]->on_host_shape())); 332 ::xla::ShapeUtil::HumanString(shape))));
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/tests/ |
cpu_fusion_test.cc | 153 auto cshape = ShapeUtil::MakeShape(F32, {6}); 158 Shape r0f32 = ShapeUtil::MakeShape(F32, {}); 172 ShapeUtil::MakeShape(F32, {6, 1}), concatenate)),
|