HomeSort by relevance Sort by last modified time
    Searched refs:new_shape (Results 1 - 25 of 70) sorted by null

1 2 3

  /external/tensorflow/tensorflow/compiler/xla/service/cpu/
cpu_layout_assignment.cc 81 Shape new_shape(old_shape);
82 std::vector<int64> dimension_order(new_shape.dimensions_size());
84 *new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
85 return new_shape;
89 Shape new_shape(old_shape);
90 std::vector<int64> dimension_order(new_shape.dimensions_size());
92 *new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
93 return new_shape;
  /external/tensorflow/tensorflow/lite/
string_util_test.cc 57 auto new_shape = TfLiteIntArrayCreate(2); local
58 new_shape->data[0] = 2;
59 new_shape->data[1] = 1;
60 buf0.WriteToTensor(t0, new_shape);
147 auto new_shape = TfLiteIntArrayCreate(2); local
148 new_shape->data[0] = 1;
149 new_shape->data[1] = 2;
151 buf.WriteToTensor(t0, new_shape);
string_util.cc 106 TfLiteIntArray* new_shape) {
110 if (new_shape == nullptr) {
111 new_shape = TfLiteIntArrayCopy(tensor->dims);
115 TfLiteTensorReset(tensor->type, tensor->name, new_shape, tensor->params,
string_util.h 77 // Fill content into a string tensor, with the given new_shape. The new shape
79 // ownership of new_shape. If 'new_shape' is nullptr, keep the tensor's
81 void WriteToTensor(TfLiteTensor* tensor, TfLiteIntArray* new_shape);
  /external/tensorflow/tensorflow/compiler/tf2xla/kernels/
shape_op.cc 139 std::vector<int64> new_shape(existing_dims_size);
140 for (size_t i = 0; i < new_shape.size(); ++i) {
141 new_shape[i] = existing_dims[i];
152 new_shape.emplace(new_shape.begin() + dim, 1);
154 ctx->SetOutput(0, xla::Reshape(ctx->Input("input"), new_shape));
172 std::vector<int64> new_shape; variable
202 new_shape.push_back(existing_dim);
207 new_shape.push_back(existing_dim);
212 ctx->SetOutput(0, xla::Reshape(ctx->Input(0), new_shape));
    [all...]
dynamic_stitch_op.cc 154 TensorShape new_shape; variable
156 new_shape.AddDim(indices[input_num].shape().dimensions(0));
159 new_shape.AddDim(data0_shape.dim_size(d));
163 if (new_shape == data_shapes[input_num]) {
166 input[input_num] = xla::Reshape(handle, new_shape.dim_sizes());
  /external/tensorflow/tensorflow/compiler/xla/service/gpu/
variadic_op_splitter.cc 58 Shape new_shape = concat->shape(); local
66 new_shape.set_dimensions(concat->concatenate_dimension(),
69 new_shape, operands_span.subspan(offset, kMaxParameters)));
cudnn_conv_pad_for_tensor_cores.cc 46 const Shape& new_shape) {
57 if (shape.dimensions(dim) == new_shape.dimensions(dim)) {
60 CHECK_GT(new_shape.dimensions(dim), shape.dimensions(dim));
62 new_shape.dimensions(dim) - shape.dimensions(dim));
70 HloInstruction::CreatePad(new_shape, instr, zero, pad_config));
185 const Shape& new_shape) {
187 int64 new_bytes = ShapeUtil::ByteSizeOf(new_shape);
195 << ShapeUtil::HumanString(new_shape) << ", a size increase of "
cudnn_conv_rewriter.cc 472 Shape new_shape = rhs->shape(); local
480 int64 input_features = new_shape.dimensions(input_feature_dimension);
481 int64 output_features = new_shape.dimensions(output_feature_dimension);
482 new_shape.set_dimensions(input_feature_dimension,
484 new_shape.set_dimensions(output_feature_dimension,
487 rhs = c->AddInstruction(HloInstruction::CreateReshape(new_shape, rhs));
  /external/tensorflow/tensorflow/contrib/distributions/python/ops/
batch_reshape.py 160 new_shape = array_ops.concat(
167 return array_ops.reshape(x, new_shape)
248 new_shape = array_ops.concat(
253 result = array_ops.reshape(result, new_shape)
256 new_shape = static_sample_shape.concatenate(self.batch_shape)
257 result.set_shape(result.shape.merge_with(new_shape))
271 new_shape = array_ops.concat(
273 result = array_ops.reshape(fn(), new_shape)
373 def calculate_reshape(original_shape, new_shape, validate=False, name=None):
375 batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
    [all...]
shape.py 406 new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
407 x = array_ops.reshape(x, shape=new_shape)
464 new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
465 x = array_ops.reshape(x, shape=new_shape)
  /external/tensorflow/tensorflow/python/ops/
random_grad.py 29 new_shape = array_ops.concat(
32 return array_ops.reshape(x, new_shape)
special_math_ops.py 384 new_shape = (
387 t0 = _reshape_if_necessary(t0, new_shape)
392 new_shape = (
395 t1 = _reshape_if_necessary(t1, new_shape)
420 def _reshape_if_necessary(tensor, new_shape):
422 # Accept None as an alias for -1 in new_shape.
423 new_shape = tuple(-1 if x is None else x for x in new_shape)
425 if (len(new_shape) == len(cur_shape) and
426 all(d0 == d1 or d1 == -1 for d0, d1 in zip(cur_shape, new_shape)))
    [all...]
  /external/tensorflow/tensorflow/core/kernels/
shape_ops.h 157 std::vector<int64> new_shape(existing_dims_size);
158 for (size_t i = 0; i < new_shape.size(); ++i) {
159 new_shape[i] = existing_dims[i];
170 new_shape.emplace(new_shape.begin() + dim, 1);
171 const TensorShape output_shape(new_shape);
202 std::vector<int64> new_shape; variable
233 new_shape.push_back(existing_dim);
238 new_shape.push_back(existing_dim);
243 const TensorShape output_shape(new_shape);
    [all...]
  /external/tensorflow/tensorflow/python/ops/parallel_for/
gradients.py 72 new_shape = array_ops.concat(
74 out = array_ops.reshape(out, new_shape)
141 new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
142 return array_ops.reshape(output, new_shape)
  /external/tensorflow/tensorflow/contrib/data/python/ops/
batching.py 204 original_shape.merge_with(new_shape)
205 for original_shape, new_shape in zip(flat_original_shapes,
  /external/tensorflow/tensorflow/compiler/xla/service/
reshape_mover.cc 142 const Shape new_shape = local
150 HloInstruction::CreateReshape(new_shape, operand));
157 new_shape, operand, inverse_permutation));
164 operand->CloneWithNewOperands(new_shape, operand->operands()));
174 operand->CloneWithNewOperands(new_shape, operand->operands()));
hlo_element_type_converter.cc 196 Shape new_shape = GetConvertedTupleShape(hlo->shape(), eliminate_type_, local
200 hlo->CloneWithNewOperands(new_shape, new_operands, &context));
  /external/tensorflow/tensorflow/contrib/distributions/python/ops/bijectors/
chain.py 225 new_shape = input_shape
231 new_shape = func(new_shape)
232 return new_shape
reshape.py 282 new_shape = event_shape_out
284 new_shape = array_ops.concat(
287 return array_ops.reshape(x, new_shape)
  /external/tensorflow/tensorflow/python/kernel_tests/
sparse_ops_test.py 359 new_shape = np.array([3, 6, 7], dtype=np.int64)
360 sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
366 new_shape = np.array([3, 6, 7], dtype=np.int64)
367 sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
379 new_shape = np.array([3, 6, 7], dtype=np.int64)
380 sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
393 new_shape = np.array([3, 6, 7], dtype=np.int64)
394 sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
430 new_shape = np.array([3, 7], dtype=np.int64)
433 sparse_ops.sparse_reset_shape(sp_input, new_shape)
    [all...]
sparse_reshape_op_test.py 277 # Even if new_shape has no shape information, we know the ranks of
281 new_shape = array_ops.placeholder(dtypes.int64)
282 sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
298 new_shape = [np.prod(factors[new_map == d]) for d in range(new_rank)]
304 new_dense = np.reshape(orig_dense, new_shape)
311 sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
316 self.assertAllEqual(output_val.dense_shape, new_shape)
  /external/tensorflow/tensorflow/core/grappler/optimizers/
remapper.cc 645 NodeDef* new_shape = optimized_graph->add_node(); local
646 new_shape->set_name(AddPrefixToNodeName("NCHWShape", fused_node.name()));
647 new_shape->set_op("Const");
648 new_shape->set_device(fused_node.device());
649 *new_shape->add_input() = AsControlDependency(scale);
650 (*new_shape->mutable_attr())["dtype"].set_type(DT_INT32);
657 (*new_shape->mutable_attr())["value"].mutable_tensor());
665 *reshaped_scale->add_input() = new_shape->name();
676 *reshaped_offset->add_input() = new_shape->name();
687 *reshaped_mean->add_input() = new_shape->name()
    [all...]
  /external/tensorflow/tensorflow/core/framework/
shape_inference.h 247 ShapeHandle new_shape; local
248 if (!Merge(inputs_[idx], shape, &new_shape).ok()) return false;
249 inputs_[idx] = new_shape;
279 ShapeHandle new_shape; local
280 Relax(inputs_[idx], shape, &new_shape);
281 if (inputs_[idx].SameHandle(new_shape)) {
284 inputs_[idx] = new_shape;
    [all...]
  /external/tensorflow/tensorflow/compiler/xla/
shape_util.cc 764 Shape new_shape = original;
765 new_shape.set_element_type(type);
766 return new_shape;
932 Shape new_shape = shape;
933 new_shape.clear_dimensions();
935 new_shape.add_dimensions(dim);
938 new_shape.set_dynamic_dimension(permutation[i],
    [all...]

Completed in 1442 milliseconds

1 2 3