/external/tensorflow/tensorflow/core/util/ |
bcast_test.cc | 16 #include "tensorflow/core/util/bcast.h" 26 string BCast(const tensorflow::BCast::Vec& x, const tensorflow::BCast::Vec& y, 28 tensorflow::BCast b(x, y, fewer_dims_optimization); 48 EXPECT_EQ("invalid", BCast({5, 3, 2}, {3}, use_optimization)); 49 EXPECT_EQ("invalid", BCast({5, 3, 2}, {2, 2}, use_optimization)); 50 EXPECT_EQ("invalid", BCast({5, 3, 2}, {10, 1, 1}, use_optimization)); 52 BCast({1, 2, 1, 2, 1, 2}, {2, 4, 2, 1, 2, 1}, use_optimization)); 58 EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}) [all...] |
bcast.h | 28 // BCast is a helper for broadcasting binary tensor operation. 43 // BCast takes the shape of two tensors and computes a few vectors of 49 // BCast b(x.shape(), y.shape()); 69 class BCast { 85 BCast(const Vec& x, const Vec& y, const bool fewer_dims_optimization = true); 86 ~BCast() {} 106 static TensorShape ToShape(const BCast::Vec& vec); 110 const BCast::Vec& vec) { 119 const BCast::Vec& vec) { 136 TF_DISALLOW_COPY_AND_ASSIGN(BCast); [all...] |
bcast.cc | 16 #include "tensorflow/core/util/bcast.h" 22 void BCast::Reverse(Vec* shape) { std::reverse(shape->begin(), shape->end()); } 24 BCast::BCast(const Vec& sx, const Vec& sy, const bool fewer_dims_optimization) { 158 BCast::Vec BCast::FromShape(const TensorShape& shape) { 160 BCast::Vec ret(N); 167 TensorShape BCast::ToShape(const BCast::Vec& vec) {
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
cwise_ops.cc | 29 #include "tensorflow/core/util/bcast.h" 43 BCast bcast(BCast::FromShape(lhs_shape), BCast::FromShape(rhs_shape), 45 if (!bcast.IsValid()) { 51 TensorShape bcast_shape = BCast::ToShape(bcast.output_shape()); 82 rhs_shape.dim_sizes(), bcast, extend_dimension); 91 xla::XlaOp lhs, xla::XlaOp rhs, const BCast& broadcast_helper) [all...] |
cwise_ops.h | 25 #include "tensorflow/core/util/bcast.h" 61 const absl::Span<const int64>& rhs_shape, const BCast& broadcast_helper, 70 xla::XlaOp lhs, xla::XlaOp rhs, const BCast& broadcast_helper);
|
bcast_ops.cc | 26 #include "tensorflow/core/util/bcast.h" 42 absl::InlinedVector<BCast::Vec, 2> shapes; 50 shapes.push_back(BCast::Vec(shape.begin(), shape.end())); 52 BCast bcast(shapes[0], shapes[1]); 53 OP_REQUIRES(ctx, bcast.IsValid(), 58 const int64 len = bcast.output_shape().size(); 61 output.flat<int32>()(i) = static_cast<int32>(bcast.output_shape()[i]); 91 absl::InlinedVector<BCast::Vec, 4> shapes; 100 shapes.push_back(BCast::Vec(vec.begin(), vec.end())) [all...] |
binary_ops.cc | 44 const BCast& broadcast_helper, \ 71 xla::XlaOp y, const BCast& broadcast_helper) { 89 xla::XlaOp y, const BCast& broadcast_helper) { 112 xla::XlaOp y, const BCast& broadcast_helper) { 132 const BCast& broadcast_helper) { 141 const BCast& broadcast_helper) { 153 xla::XlaOp y, const BCast& broadcast_helper) {
|
/external/tensorflow/tensorflow/core/kernels/ |
betainc_op.cc | 30 #include "tensorflow/core/util/bcast.h" 83 auto merged_shape_vec = BCast::FromShape(merged_shape); 84 BCast a_shaper(BCast::FromShape(a_shape), merged_shape_vec); 85 BCast b_shaper(BCast::FromShape(b_shape), merged_shape_vec); 86 BCast x_shaper(BCast::FromShape(x_shape), merged_shape_vec); 97 functor.BCast(ctx->eigen_device<Device>(), a_value, \ 98 BCast::ToIndexArray<NDIM>(a_shaper.x_bcast()), b_value, [all...] |
xent_op.cc | 27 #include "tensorflow/core/util/bcast.h" 49 BCast bcast(BCast::FromShape(logits_in.shape()), 50 BCast::FromShape(labels_in.shape())); 52 OP_REQUIRES(context, bcast.IsValid(), 57 shape_in = BCast::ToShape(bcast.output_shape()); 90 BCast::ToIndexArray<2>(bcast.x_bcast()) [all...] |
cwise_ops_common.cc | 58 bcast(BCast::FromShape(in0.shape()), BCast::FromShape(in1.shape())) { 59 if (!bcast.IsValid()) { 65 const TensorShape output_shape = BCast::ToShape(bcast.output_shape()); 72 ndims = static_cast<int>(bcast.x_reshape().size());
|
broadcast_to_op.cc | 28 #include "tensorflow/core/util/bcast.h" 77 BCast bcast(BCast::FromShape(input_shape), BCast::FromShape(output_shape), 79 OP_REQUIRES(ctx, bcast.IsValid(), 85 input_tensor, input_shape, bcast); 107 const TensorShape& input_shape, const BCast& bcast) const; \
|
broadcast_to_op.h | 26 #include "tensorflow/core/util/bcast.h" 37 const typename Eigen::array<int, NDIMS> &bcast) const { 38 To32Bit(out).device(device) = To32Bit(in).broadcast(bcast); 45 const typename Eigen::array<Eigen::DenseIndex, NDIMS> &bcast) const { 46 out.device(device) = in.broadcast(bcast); 51 const Tensor &input_tensor, const BCast &bcast) const { 57 device, output_tensor.template shaped<T, NDIMS>(bcast.result_shape()), 58 input_tensor.template shaped<T, NDIMS>(bcast.x_reshape()), 59 BCast::ToIndexArrayType<int, NDIMS>(bcast.x_bcast())) [all...] |
bcast_ops.cc | 20 #include "tensorflow/core/util/bcast.h" 34 gtl::InlinedVector<BCast::Vec, 4> shapes; 40 BCast::Vec vec; 46 BCast bcast(shapes[0], shapes[1]); 47 OP_REQUIRES(ctx, bcast.IsValid(), 51 Output(ctx, 0, bcast.output_shape()); 57 void Output(OpKernelContext* ctx, int idx, const BCast::Vec& v) { 83 gtl::InlinedVector<BCast::Vec, 4> shapes; 89 BCast::Vec vec [all...] |
sparse_dense_binary_op_shared.cc | 46 #include "tensorflow/core/util/bcast.h" 84 const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec)); 85 const auto rhs_dims = BCast::FromShape(dense_t->shape()); 86 BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims. 126 .broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
|
substr_op.cc | 33 #include "tensorflow/core/util/bcast.h" 130 // Create BCast helper with shape of input and pos/len 131 BCast bcast(BCast::FromShape(input_shape), BCast::FromShape(pos_shape)); 132 OP_REQUIRES(context, bcast.IsValid(), 136 TensorShape output_shape = BCast::ToShape(bcast.result_shape()); 143 // Reshape tensors according to BCast result [all...] |
cwise_ops_common.h | 37 #include "tensorflow/core/util/bcast.h" 53 // Sets up bcast with the shape of in0 and in1, ensures that the bcast 64 BCast bcast; member in struct:tensorflow::BinaryOpShared::BinaryOpState 96 BCast* bcast = &state.bcast; variable 124 functor::BinaryFunctor<Device, Functor, 2>().BCast( 125 eigen_device, out->shaped<Tout, 2>(bcast->result_shape()) [all...] |
betainc_op.h | 36 void BCast(const Device& d, typename TTypes<T, NDIM>::ConstTensor a,
|
quantized_mul_op.cc | 30 #include "tensorflow/core/util/bcast.h" 292 BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); 293 if (!bcast.IsValid()) { 301 0, BCast::ToShape(bcast.output_shape()), &z)); 316 const int ndims = bcast.x_reshape().size(); 355 LOG(INFO) << "bcast.x_reshape()= [all...] |
quantized_add_op.cc | 31 #include "tensorflow/core/util/bcast.h" 465 BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); 466 if (!bcast.IsValid()) { 474 0, BCast::ToShape(bcast.output_shape()), &z)); 498 const int ndims = bcast.x_reshape().size(); 547 LOG(INFO) << "bcast.x_reshape()= [all...] |
cwise_ops_gpu_common.cu.h | 78 void BCast(const GPUDevice& d,
|
cwise_ops_sycl_common.h | 81 void BCast(const SYCLDevice& d,
|
cwise_ops.h | [all...] |
/external/tensorflow/tensorflow/core/grappler/utils/ |
symbolic_shapes.cc | 17 #include "tensorflow/core/util/bcast.h" 23 BCast::Vec ShapeDims(const TensorShapeProto& shape) { 24 BCast::Vec dims; 99 BCast bcast(ShapeDims(left), ShapeDims(right), 101 return bcast.IsValid(); 115 BCast bcast(ShapeDims(left), ShapeDims(right), 117 if (!bcast.IsValid()) { 122 for (const auto& dim : bcast.output_shape()) [all...] |
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
constant_folding.cc | 54 #include "tensorflow/core/util/bcast.h" 509 BCast::Vec* shape, int64* min_id) { 565 BCast::Vec shape1; 569 BCast::Vec shape2; 587 // Beware: the reduction dimensions computed by the BCast class are valid iff 617 BCast bcast(shape1, shape2); 618 if (!bcast.IsValid()) { 622 BCast::Vec reduce_dims[2]; 623 reduce_dims[0] = bcast.grad_x_reduce_idx() [all...] |
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
AMDGPULibCalls.cpp | 620 auto *BCast = B.CreatePointerCast(PtrArg, PtrTy); 624 Args.push_back(BCast); [all...] |