HomeSort by relevance Sort by last modified time
    Searched refs:ShapeIndex (Results 26 - 50 of 98) sorted by null

12 3 4

  /external/tensorflow/tensorflow/compiler/xla/service/
hlo_value.h 37 // HloPosition specifies a ShapeIndex within the output of a specific
41 ShapeIndex index;
72 ShapeIndex operand_index;
104 HloValue(Id id, HloInstruction* instruction, const ShapeIndex& index,
130 const ShapeIndex& defining_index() const { return defining_position().index; }
132 const ShapeIndex& index() const override { return defining_index(); }
dynamic_dimension_inference.cc 80 HloInstruction* operand, ShapeIndex index, int64 dimension,
100 hlo, [&](HloInstruction* operand, ShapeIndex index, int64 dimension,
112 hlo, [&](HloInstruction* operand, ShapeIndex index, int64 dimension,
115 ShapeIndex new_index =
125 hlo, [&](HloInstruction*, ShapeIndex index, int64 dimension,
135 hlo, [&](HloInstruction* operand, ShapeIndex index, int64 dimension,
145 hlo, [&](HloInstruction* operand, ShapeIndex index, int64 dimension,
168 hlo, [&](HloInstruction* operand, ShapeIndex index, int64 dimension,
202 hlo, [&](HloInstruction* operand, ShapeIndex index, int64 dimension,
253 hlo, [&](HloInstruction* operand, ShapeIndex index, int64 dimension
    [all...]
shaped_buffer.h 78 const se::DeviceMemoryBase& buffer(const ShapeIndex& index) const {
83 void set_buffer(const se::DeviceMemoryBase& buffer, const ShapeIndex& index) {
163 void set_buffer(OwningDeviceMemory buffer, const ShapeIndex& index) {
hlo_dataflow_analysis.cc 79 const ShapeIndex& index) const {
88 const HloInstruction* instruction, const ShapeIndex& index) const {
94 const HloInstruction* instruction, const ShapeIndex& index) {
100 const ShapeIndex& index,
156 const ShapeIndex& index,
198 const ShapeIndex& index = pair.first;
288 const HloInstruction* instruction, const ShapeIndex& index) const {
293 const ShapeIndex& index) {
323 const ShapeIndex& operand_index = pair.first;
326 ShapeIndex index = {0}
    [all...]
allocation_tracker.cc 78 std::vector<ShapeIndex> shape_indices;
81 [&](const Shape& /*subshape*/, const ShapeIndex& index) {
86 for (const ShapeIndex& index : shape_indices) {
111 std::vector<ShapeIndex> shape_indices;
114 [&shape_indices](const Shape& /*subshape*/, const ShapeIndex& index) {
117 for (const ShapeIndex& index : shape_indices) {
buffer_assignment.h 131 const ShapeIndex& param_shape_index() const {
281 ShapeIndex param_shape_index,
323 ShapeIndex param_shape_index_;
378 const HloInstruction* hlo, const ShapeIndex& shape_index) const;
383 const HloInstruction* instruction, const ShapeIndex& index) const;
388 const ShapeIndex& index) const;
398 const HloInstruction* instruction, const ShapeIndex& index) const;
411 const HloInstruction* instruction, const ShapeIndex& index) const {
420 const ShapeIndex& shape_index_a,
422 const ShapeIndex& shape_index_b) const
    [all...]
shaped_buffer_test.cc 121 [&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) {
130 xla::ShapeIndex subtree_index = {1};
133 output.buffers().ForEachElement([&](const xla::ShapeIndex& sub_index,
135 xla::ShapeIndex orig_index = subtree_index;
142 [&](const xla::ShapeIndex& index, const se::DeviceMemoryBase& buffer) {
logical_buffer_analysis.cc 85 const ShapeIndex& index) const {
90 const ShapeIndex& index) {
104 [this, hlo_instruction](const Shape& shape, const ShapeIndex& index) {
hlo_value.cc 71 const ShapeIndex& index, bool is_phi)
113 // ShapeIndex in the given operand. Generally, instruction which pass through
116 bool MayUseOperandValue(int64 operand_number, const ShapeIndex& index,
263 const ShapeIndex& index = pair.first;
285 ForEachElement([&out](const ShapeIndex& index, const HloValueSet& value_set) {
bfloat16_propagation.cc 53 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
101 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
178 const Shape& subshape, const ShapeIndex& index) {
207 const ShapeIndex& index) const {
281 ShapeIndex use_output_index{use.operand_number};
290 ShapeIndex use_output_index;
342 const ShapeIndex& index) {
378 [hlo, this](const Shape& /* subshape */, const ShapeIndex& index) {
418 const ShapeIndex& index) {
458 const ShapeIndex& index)
    [all...]
dynamic_parameter_binding.cc 69 ShapeIndex dynamic_param_index(binding.dynamic_param_index().begin(),
72 ShapeIndex target_param_index(binding.target_param_index().begin(),
generic_transfer_manager.cc 85 [&](const Shape& subshape, const ShapeIndex& index) -> Status {
122 [&](const Shape& device_subshape, const ShapeIndex& index) -> Status {
  /external/tensorflow/tensorflow/compiler/xla/
shape_util.h 48 // structures (trees) and ShapeIndex defines a path through the tree where each
49 // element of ShapeIndex indexes into a tuple (or nested tuple) within the
63 // ShapeIndex is a trivial wrapper around std::vector with a minimum number of
65 class ShapeIndex {
67 ShapeIndex() = default;
68 ShapeIndex(std::initializer_list<int64> init) : indices_(init) {}
70 ShapeIndex(InputIt start, InputIt end) : indices_(start, end) {}
95 bool operator==(const ShapeIndex& other) const {
98 bool operator!=(const ShapeIndex& other) const { return !(*this == other); }
99 bool operator<(const ShapeIndex& other) const
    [all...]
shape_tree_test.cc 65 int_tree.ForEachElement([&num_nodes](const ShapeIndex& /*index*/, int data) {
74 [&num_nodes](const ShapeIndex& /*index*/, bool data) {
91 tree.ForEachElement([&num_nodes](const ShapeIndex& /*index*/, int data) {
99 [&num_nodes](const ShapeIndex& /*index*/, int* data) {
107 tree.ForEachElement([&num_nodes](const ShapeIndex& /*index*/, int data) {
164 [&sum](const ShapeIndex& /*index*/, int data) { sum += data; });
176 [](const ShapeIndex& /*index*/, int* data) { *data = 0; });
383 t.ForEachElement([&num_nodes](const ShapeIndex& /*index*/, int data) {
452 std::vector<ShapeIndex> v;
456 EXPECT_EQ(v, (std::vector<ShapeIndex>{{},
    [all...]
literal.cc 165 [&](const ShapeIndex& index, Piece* piece) {
190 [&](const ShapeIndex& index, Piece* piece) {
199 const ShapeIndex& shape_index) const {
204 const ShapeIndex& shape_index) {
308 [&](const ShapeIndex& index, Piece* piece) {
346 [&](const ShapeIndex& index, Piece* dest_piece) {
347 ShapeIndex src_index = {i};
428 const ShapeIndex& dest_shape_index,
429 const ShapeIndex& src_shape_index) {
441 [&](const ShapeIndex& index, Piece* piece)
    [all...]
shape_util.cc 54 string ShapeIndex::ToString() const { return ShapeIndexView(*this).ToString(); }
68 std::ostream& operator<<(std::ostream& out, const ShapeIndex& shape_index) {
354 const ShapeIndex& index) { ++n; });
818 bool ShapeUtil::IsLeafIndex(const Shape& shape, const ShapeIndex& index) {
836 ForEachSubshape(shape, [&](const Shape& sub_shape, const ShapeIndex& index) {
860 ShapeIndex* index) {
877 ShapeIndex* index) {
894 ShapeIndex index;
897 [&func](const Shape& subshape, const ShapeIndex& index) {
907 ShapeIndex index
    [all...]
  /external/tensorflow/tensorflow/compiler/xla/service/gpu/
gpu_transfer_manager.cc 60 shape, [&](const Shape& literal_subshape, const ShapeIndex& index) {
126 ShapeIndex* index) {
143 ShapeIndex index;
155 [&](const ShapeIndex& index,
hlo_to_ir_bindings.h 72 // output of "inst".at the given ShapeIndex.
89 const ShapeIndex& shape_index = {});
outfeed_thunk.cc 49 [&](const ShapeIndex& index, std::unique_ptr<OutfeedBuffer>* buffer) {
ir_emitter_unnested.cc 305 const ShapeIndex& /*index*/) {
708 absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
    [all...]
ir_emitter_unnested.h 212 absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
282 ShapeIndex output_shape_index);
336 HloInstruction* hlo, const ShapeIndex& index = {});
  /external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/
alias_analysis.h 41 const ShapeIndex& index = {});
dynamic_update_slice_util.h 54 ShapeIndex index;
alias_analysis.cc 35 const ShapeIndex& index) {
145 [&](const Shape& /*shape*/, const ShapeIndex& index) {
  /external/tensorflow/tensorflow/compiler/xrt/
xrt_state.cc 95 // of the host shape, meaning that for any valid ShapeIndex in the host shape
96 // that ShapeIndex is also valid in the device shape, but not vice versa. In
256 const xla::ShapeIndex& buffer_index) {
296 XRTTupleAllocation* parent, const xla::ShapeIndex& subshape,
314 [](const xla::ShapeIndex& index,
321 [&](const xla::ShapeIndex& index, XRTBufferAllocationPtr* buffer) {
324 xla::ShapeIndex parent_index = subshape;
351 [&](const xla::ShapeIndex& index, const ExpandedTupleInput& element) {
409 [&](const xla::ShapeIndex& index, const ExpandedTupleInput& element) {
442 elements.ForEachElement([&](const xla::ShapeIndex& index
    [all...]

Completed in 518 milliseconds

12 3 4