| /external/tensorflow/tensorflow/core/kernels/ |
| inplace_ops.cc | 36 Tensor* output) { 38 auto Toutput = output->flat_outer_dims<T>(); 47 Tensor* output) { 48 CHECK_EQ(value.dtype(), output->dtype()); 52 return DoParallelConcatUpdate<CPUDevice, type>(d, value, loc, output); 65 Tensor* output) { 66 CHECK_EQ(value.dtype(), output->dtype()); 70 return DoParallelConcatUpdate<SyclDevice, type>(d, value, loc, output); 110 Tensor output = value; // This creates an alias intentionally. variable 113 ctx, ::tensorflow::functor::DoParallelConcat(d, update, loc_, &output)); [all...] |
| matrix_diag_op.cc | 69 Tensor* output = nullptr; variable 70 OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); 72 auto output_reshaped = output->flat_inner_dims<T, 2>(); 106 Tensor* output = nullptr; variable 107 OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); 109 auto output_reshaped = output->flat_inner_dims<T, 3>(); 148 typename TTypes<T, 3>::Tensor output) { 149 output.device(d) = output.constant(T()); 150 for (int64 r = 0; r < output.dimension(0); ++r) [all...] |
| matrix_inverse_op.cc | 120 // Allocate output. 121 Tensor* output; variable 124 {0}, 0, input.shape(), &output), 164 auto output_reshaped = output->template flat_inner_dims<Scalar, 3>();
|
| matrix_set_diag_op.cc | 83 Tensor* output = nullptr; variable 85 {0}, 0, input_shape, &output)); 86 auto output_reshaped = output->flat_inner_dims<T, 3>(); 120 typename TTypes<T, 3>::Tensor output) { 121 if (input.data() != output.data()) { 122 output.device(device) = input; 124 auto compute_shard = [&output, &diag](int64 begin, int64 end) { 127 output(batch, col, col) = diag(batch, col); 133 int64 cost_per_batch = 10 * output.dimension(1); // Heuristic. 134 thread_pool->ParallelFor(output.dimension(0), cost_per_batch [all...] |
| matrix_triangular_solve_op.cc | 86 MatrixMap& output = outputs->at(0); variable 99 output.noalias() = triangle.adjoint().solve(rhs); 101 output.noalias() = triangle.solve(rhs); 106 output.noalias() = triangle.adjoint().solve(rhs); 108 output.noalias() = triangle.solve(rhs); 178 MatrixMap& output = outputs->at(0); variable 188 auto out_ptr = AsDeviceMemory(output.data()); 197 errors::Internal("Failed to copy rhs into output before solve")); 201 // output = matrix \ rhs 202 // where matrix, rhs and output are assumed to be in column major [all...] |
| nth_element_op.cc | 90 T* output = output_tensor.flat<T>().data(); local 98 auto SubNthElement = [&, input, output, last_dim, n](int start, int limit) { 111 output[b] = buf[n];
|
| pack_op.cc | 80 Tensor output; variable 81 CHECK(output.CopyFrom(values[0], output_shape)); 82 c->set_output(0, output); 86 // Allocate output 87 Tensor* output; variable 88 OP_REQUIRES_OK(c, c->allocate_output(0, output_shape, &output)); 102 const int64 output_size = output->NumElements(); 105 output->shaped<T, 2>({before_dim, after_dim * axis_dim}); 117 ConcatGPU<T>(c, inputs_flat, output, &output_flat); 170 .HostMemory("output") [all...] |
| pad_op.cc | 83 // Compute the shape of the output tensor, and allocate it. 98 // If there is no padding to be done, forward the input to output. 107 Tensor* output = nullptr; variable 108 OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); 113 Operate<0>(context, in0.tensor<T, 0>(), paddings, pad_value, output); 118 Operate<1>(context, in0.flat<T>(), paddings, pad_value, output); 121 Operate<2>(context, in0.tensor<T, 2>(), paddings, pad_value, output); 124 Operate<3>(context, in0.tensor<T, 3>(), paddings, pad_value, output); 127 Operate<4>(context, in0.tensor<T, 4>(), paddings, pad_value, output); 130 Operate<5>(context, in0.tensor<T, 5>(), paddings, pad_value, output); [all...] |
| population_count_op.cc | 51 auto output = output_t->flat<uint8>(); variable 54 popcnt(c, input, output); 99 TTypes<uint8>::Flat output) { 101 uint8* output_ptr = output.data(); 110 // (bitset.count() -> output). The .count() itself is relatively cheap. 147 TTypes<uint8>::Flat output); \
|
| quantize_and_dequantize_op.cc | 57 Tensor* output = nullptr; variable 58 OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); 79 range_given_, &input_min_tensor, &input_max_tensor, output->flat<T>()); 107 Tensor* output = nullptr; variable 108 OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); 138 range_given_, &input_min_tensor, &input_max_tensor, output->flat<T>()); 170 Tensor* output = nullptr; variable 171 OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); 183 output->flat<T>());
|
| reference_gemm.h | 82 int32_t output = ((((total + offset_c) * mult_c) + rounding) >> shift_c); local 83 if (output > highest) { 84 output = highest; 86 if (output < lowest) { 87 output = lowest; 89 c[c_index] = static_cast<T3>(output);
|
| reverse_op_test.cc | 57 Tensor* output = GetOutput(0); local 60 test::ExpectTensorEqual<T>(expected, *output);
|
| reverse_sequence_op.cc | 133 Tensor* output = nullptr; variable 135 context->allocate_output(0, input.shape(), &output)); 141 seq_dim_, seq_lens_t, output->tensor<T, NDIM>()); \ 189 typename TTypes<T, Dims>::Tensor output); \
|
| roll_op.cc | 40 const T* input, T* output, const gtl::ArraySlice<int>& threshold, 42 auto work = [input, output, num_dims, &dim_size, &threshold, &dim_range]( 62 output[i + offset] = input[i]; 103 const T* input, T* output, 107 auto work = [input, output, num_dims, &dim_size, &threshold, &dim_range, isd]( 125 T* out_ptr = &output[0]; 285 Tensor* output = NULL; variable 287 context->allocate_output(0, input.shape(), &output)); 289 auto output_flat = output->flat<T>().data();
|
| scan_ops.cc | 63 Tensor* output = nullptr; variable 64 OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &output)); 83 output->shaped<T, 3>(reduced_shape),
|
| spectrogram_test.cc | 54 std::vector<std::vector<complex<double>>> output; local 55 sgram.ComputeComplexSpectrogram(input, &output); 56 EXPECT_EQ(0, output.size()); 66 std::vector<std::vector<complex<double>>> output; local 67 sgram.ComputeComplexSpectrogram(input, &output); 68 EXPECT_EQ(2, output.size()); 78 std::vector<std::vector<complex<double>>> output; local 79 sgram.ComputeComplexSpectrogram(input, &output); 80 EXPECT_EQ(2, output.size()); 91 std::vector<std::vector<complex<double>>> output; local 107 std::vector<std::vector<complex<double>>> output; local 132 std::vector<std::vector<complex<double>>> output; local 151 std::vector<std::vector<complex<double>>> output; local 246 std::vector<std::vector<complex<double>>> output; local 268 std::vector<std::vector<complex<float>>> output; local 288 std::vector<std::vector<double>> output; local 310 std::vector<std::vector<float>> output; local 335 std::vector<std::vector<complex<double>>> output; local [all...] |
| substr_op.cc | 54 // Allocate output 57 context->allocate_output("output", input_tensor.shape(), 59 auto output = output_tensor->flat<string>(); variable 72 output(i) = in.substr(pos, len); 86 output(i) = in.substr(pos, len); 104 OP_REQUIRES_OK(context, context->allocate_output("output", output_shape, 110 auto output = output_tensor->shaped<string, 1>(bcast.result_shape()); variable 152 output(i) = in.substr(pos, len); 159 auto output = output_tensor->shaped<string, 2>(bcast.result_shape()); variable 204 output(i, j) = in.substr(pos, len) [all...] |
| transpose_op.cc | 58 Tensor* output = nullptr; variable 60 context->allocate_output(0, input.shape(), &output)); 61 auto Tout = output->vec<T>(); 129 // output = TransposeOp(T<any> input, T<int32> perm) takes a tensor 133 // Specifically, the returned tensor output meets the following condition: 134 // 1) output.dims() == input.dims(); 135 // 2) output.dim_size(i) == input.dim_size(perm[i]); 136 // 3) output.tensor<T, N>(i_0, i_1, ..., i_N-1) == 191 Tensor output; local 192 OP_REQUIRES(ctx, output.CopyFrom(input, shape) 198 Tensor* output = nullptr; local [all...] |
| unpack_op.cc | 71 errors::InvalidArgument("output size must fit in Eigen DenseIndex")); 85 Tensor output; variable 86 CHECK(output.CopyFrom(input.Slice(i, i + 1), output_shape)); 87 context->set_output(i, output); 110 Tensor* output; variable 112 context->allocate_output(i, output_shape, &output)); 115 auto output_shaped = output->shaped<T, 3>({1, before_dim, after_dim}); 155 .HostMemory("output") 161 .HostMemory("output") 178 .HostMemory("output") [all...] |
| /external/tensorflow/tensorflow/core/platform/cloud/ |
| gcs_dns_cache.cc | 104 std::vector<string> output; local 121 output.emplace_back(buf); 131 return output;
|
| /external/tensorflow/tensorflow/java/src/main/java/org/tensorflow/ |
| Operation.java | 22 * other Operations in the Graph) as input, and produces zero or more {@link Tensor}s as output. 78 * output of the operation. 82 * @return the size of the list of Tensors produced by this named output. 83 * @throws IllegalArgumentException if this operation has no output with the provided name. 99 * @return array of {@code Output} 101 public Output<?>[] outputList(int idx, int length) { 102 Output<?>[] outputs = new Output<?>[length]; 104 outputs[i] = output(idx + i); 114 * operation.<Integer>output(0) 120 public <T> Output<T> output(int idx) { method in class:Operation [all...] |
| /external/tensorflow/tensorflow/python/framework/ |
| cpp_shape_inference.cc | 135 // Convert output shapes. 140 ProtoFromShapeHandle(c.output(i), &c, out.mutable_shape()); 192 std::vector<string> output; local 197 &output, &input_tensors_needed_out); 203 output.push_back(input_tensors_needed_out); 204 return output;
|
| /external/v8/src/compiler/ |
| instruction-scheduler.cc | 186 const InstructionOperand* output = instr->OutputAt(i); local 187 if (output->IsUnallocated()) { 188 operands_map_[UnallocatedOperand::cast(output)->virtual_register()] = 190 } else if (output->IsConstant()) { 191 operands_map_[ConstantOperand::cast(output)->virtual_register()] =
|
| /external/vogar/src/vogar/ |
| Outcome.java | 35 private final String output; field in class:Outcome 41 this.output = sanitizeOutputLines(outputLines); 48 this.output = sanitizeOutputLine(outputLine); 55 this.output = sanitizeOutputLine(outputLine); 62 this.output = sanitizeOutputLines(throwableToLines(throwable)); 91 return output; 95 return Arrays.asList(output.split("\n")); 163 && output.equals(outcome.output); 172 hashCode = 37 * hashCode + output.hashCode() [all...] |
| /external/vogar/src/vogar/commands/ |
| VmCommandBuilder.java | 46 private PrintStream output; field in class:VmCommandBuilder 113 public VmCommandBuilder output(PrintStream output) { method in class:VmCommandBuilder 114 this.output = output; 159 // Only output this if there's something on the boot classpath, 178 .tee(output)
|