| /external/tensorflow/tensorflow/contrib/image/kernels/ |
| adjust_hsv_in_yiq_op.cc | 40 Tensor* output = nullptr; member in struct:tensorflow::AdjustHsvInYiqOpBase::ComputeOptions 73 Tensor* output = nullptr; variable 75 context->allocate_output(0, input.shape(), &output)); 84 options.output = output; 103 Tensor* output = options.output; variable 109 auto output_data = output->shaped<float, 2>({channel_count, kChannelSize}); 159 delta_h, scale_s, scale_v, options.output);
|
| /external/tensorflow/tensorflow/contrib/lite/kernels/ |
| concatenation.cc | 63 // Output dimensions will match input dimensions, except 'axis', which 88 TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; local 89 TF_LITE_ENSURE_EQ(context, output->type, input_type); 91 TF_LITE_ENSURE_EQ(context, output->params.zero_point, 93 TF_LITE_ENSURE_EQ(context, output->params.scale, t0->params.scale); 96 return context->ResizeTensor(context, output, output_size); 104 TfLiteTensor* output = &context->tensors[node->outputs->data[0]]; local 105 if (axis < 0) axis += output->dims->size; 114 RemapDim(NumDimensions(output), axis), all_inputs.data(), \ 115 all_inputs.dims(), node->inputs->size, GetTensorData<scalar>(output), \ [all...] |
| div.cc | 46 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 54 TF_LITE_ENSURE_EQ(context, input1->type, output->type); 55 TF_LITE_ENSURE_EQ(context, input2->type, output->type); 58 return context->ResizeTensor(context, output, output_size); 64 TfLiteTensor* input2, TfLiteTensor* output) { 72 GetTensorData<float>(output), GetTensorDims(output)) 87 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 89 if (output->type == kTfLiteFloat32) { 90 EvalDivFloat<kernel_type>(context, node, params, input1, input2, output); [all...] |
| gather.cc | 40 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 43 // Check that input and output types match. 44 TF_LITE_ENSURE_EQ(context, input->type, output->type); 80 return context->ResizeTensor(context, output, output_shape); 86 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 92 GetTensorData<data_type>(output), GetTensorDims(output)); 113 buffer.WriteToTensor(output);
|
| hashtable_lookup.cc | 25 // Output: 26 // Output[0].dim[0] == Tensor[0].dim[0], num of lookups 27 // Each item in output is a raw bytes copy of corresponding item in input. 30 // Output[1].dim = { Tensor[0].dim[0] }, num of lookups 84 TfLiteTensor* output = GetOutput(context, node, 0); local 85 TF_LITE_ENSURE_EQ(context, value->type, output->type); 88 if (output->type != kTfLiteString) { 94 status = context->ResizeTensor(context, output, outputSize); 103 TfLiteTensor* output = GetOutput(context, node, 0); local 124 if (output->type == kTfLiteString) [all...] |
| hashtable_lookup_test.cc | 80 TfLiteTensor* output = interpreter_->tensor(output_); local 81 int num = GetStringCount(output); 84 auto ref = GetString(output, i);
|
| pad.cc | 40 output = GetOutput(context, node, 0); 45 TfLiteTensor* output; member in struct:tflite::ops::builtin::pad::PadContext 49 // Resizes output array based on the input size and padding size. This function 59 // Determines the size of the output tensor. 75 return context->ResizeTensor(context, op_context->output, output_size); 83 TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type); 88 // Exit early if paddings is a non-const tensor. Set output tensor to 89 // dynamic so output size can be determined in Eval. 91 SetTensorToDynamic(op_context.output); 101 // Resize the output tensor if the output tensor is dynamic [all...] |
| resize_bilinear.cc | 40 TfLiteTensor* size, TfLiteTensor* output) { 47 return context->ResizeTensor(context, output, output_size); 56 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 67 output->type = kTfLiteFloat32; 70 SetTensorToDynamic(output); 73 return ResizeOutputTensor(context, input, size, output); 82 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 85 if (IsDynamicTensor(output)) { 87 ResizeOutputTensor(context, input, size, output)); 90 if (output->type == kTfLiteFloat32) [all...] |
| space_to_depth.cc | 46 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 50 auto data_type = output->type; 54 TF_LITE_ENSURE_EQ(context, input->type, output->type); 71 return context->ResizeTensor(context, output, output_size); 80 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 85 GetTensorData<scalar>(output), GetTensorDims(output))
|
| split.cc | 64 TfLiteTensor* output = GetOutput(context, node, i); local 65 TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_dims)); 98 // When the 'axis' tensor is non-const we can't resize output tensors in
|
| sub.cc | 46 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 54 TF_LITE_ENSURE_EQ(context, input1->type, output->type); 55 TF_LITE_ENSURE_EQ(context, input2->type, output->type); 58 return context->ResizeTensor(context, output, output_size); 64 TfLiteTensor* input2, TfLiteTensor* output) { 72 GetTensorData<float>(output), GetTensorDims(output)) 87 TfLiteTensor* output = GetOutput(context, node, kOutputTensor); local 89 if (output->type == kTfLiteFloat32) { 90 EvalSubFloat<kernel_type>(context, node, params, input1, input2, output); [all...] |
| transpose.cc | 38 output = GetOutput(context, node, 0); 42 TfLiteTensor* output; member in struct:tflite::ops::builtin::transpose::TransposeContext 58 // Determine size of output tensor. 65 return context->ResizeTensor(context, op_context->output, output_size); 77 TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type); 80 SetTensorToDynamic(op_context.output); 90 // Resize the output tensor if the output tensor is dynamic. 91 if (IsDynamicTensor(op_context.output)) { 113 GetTensorData<scalar>(op_context.output), \ [all...] |
| /external/tensorflow/tensorflow/contrib/periodic_resample/kernels/ |
| periodic_resample_op.h | 41 // un-rasterize the output index 169 // Create an output tensor and attach it to the current context 173 auto output = output_tensor->flat<InputDataT>(); local 181 // Fill output tensor with periodically resampled input tensor values 184 output(output_index) = input(compute_input_index(
|
| /external/tensorflow/tensorflow/contrib/reduce_slice_ops/kernels/ |
| reduce_slice_ops.cc | 56 typename TTypes<T, 3>::Tensor output) { \ 58 Index dim1 = output.dimension(0); \ 59 Index dim2 = output.dimension(1); \ 60 Index dim3 = output.dimension(2); \ 75 output(x, y, z) = zero; \ 79 output(x, y, z) = reduceop(output(x, y, z), data(x, i, z)); \ 143 Tensor* output = nullptr; variable 144 OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); 148 output->flat_inner_outer_dims<T, 3>(axis - 1)) [all...] |
| /external/tensorflow/tensorflow/contrib/tensorrt/ |
| tensorrt_test.cc | 63 const char* kOutputTensor = "output"; 80 // Mark the output. 81 auto output = layer->getOutput(0); local 82 output->setName(kOutputTensor); 83 network->markOutput(*output); 99 float* output) { 102 // We have two bindings: input and output. 114 // Copy the input to the GPU, execute the network, and copy the output back. 122 ASSERT_EQ(0, cudaMemcpyAsync(output, buffers[output_index], sizeof(float), 145 float output; local [all...] |
| /external/tensorflow/tensorflow/core/grappler/costs/ |
| op_level_cost_estimator_test.cc | 130 auto output = op_context.op_info.add_outputs(); local 131 auto shape = output->mutable_shape(); 136 output->set_dtype(DT_FLOAT);
|
| /external/tensorflow/tensorflow/core/kernels/ |
| argmax_op.cc | 76 Tensor* output = nullptr; variable 77 OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); 83 output->tensor<Tout, NDIM - 1>()); \ 158 const int32 dimension, typename TTypes<Tout, Dims - 1>::Tensor output); \ 162 const int32 dimension, typename TTypes<Tout, Dims - 1>::Tensor output);
|
| betainc_op.cc | 73 Tensor* output = nullptr; variable 74 OP_REQUIRES_OK(ctx, ctx->allocate_output(0, merged_shape, &output)); 79 x.flat<T>(), output->flat<T>()); 101 output->shaped<T, NDIM>(a_shaper.y_reshape())); \ 134 typename TTypes<T, NDIM>::Tensor output); \ 143 typename TTypes<T, NDIM>::Tensor output); \
|
| bincount_op.cc | 41 typename TTypes<T, 1>::Tensor& output) { 42 int size = output.size(); 53 // Allocate partial output bin sums for each worker thread. Worker ids in 82 output.device(context->eigen_cpu_device()) = partial_bins.sum(reduce_dims); 109 auto output = output_t->flat<T>(); variable 111 ctx, arr, weights, output));
|
| concat_op_test.cc | 182 bfloat16* output = &result[j * dim2]; local 187 memcpy(output, inputs[j], dim2 * sizeof(bfloat16)); 189 output += dim2 * kNumCopies; 216 bfloat16* output = result; local 222 memcpy(output, inputs[j], dim2 * sizeof(bfloat16)); 224 output += dim2;
|
| decode_bmp_op.cc | 132 Tensor* output = nullptr; variable 135 0, TensorShape({abs_height, width, channels_}), &output)); 139 Decode(bmp_pixels, row_size, output->flat<uint8>().data(), width, 143 uint8* Decode(const uint8* input, const int row_size, uint8* const output, 153 uint8* const output, const int width, 171 output[dst_pos] = input[src_pos]; 175 output[dst_pos] = input[src_pos + 2]; 176 output[dst_pos + 1] = input[src_pos + 1]; 177 output[dst_pos + 2] = input[src_pos]; 181 output[dst_pos] = input[src_pos + 2] [all...] |
| dequantize_op.cc | 71 Tensor* output = nullptr; variable 72 OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); 79 float* out_ptr = output->flat<float>().data(); 92 min_range, max_range, output->flat<float>().data()); 96 output); 115 float* out_ptr = output->flat<float>().data();
|
| diag_op.cc | 98 Tensor* output = nullptr; variable 99 OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); 102 tensor.flat<T>().data(), output->flat<T>().data()); 110 // `output[i1,..., ik, i1,..., ik] = input[i1,..., ik]`, 116 // Let new_index is the offset of output's pointer with coordinate
|
| draw_bounding_box_op.cc | 80 Tensor* output; variable 84 0, TensorShape({batch_size, height, width, depth}), &output)); 86 output->tensor<T, 4>() = images.tensor<T, 4>(); 87 auto canvas = output->tensor<T, 4>();
|
| image_resizer_state.h | 16 // This is a helper struct to package up the input and output 54 // height_scale and width_scale, and calculates the output size. 84 errors::InvalidArgument("output dimensions must be positive")); 107 // Calculates all the required variables, and allocates the output. 115 &output)); 126 Tensor* output = nullptr; member in struct:tensorflow::ImageResizerState 151 // Allocate output and initialize to zeros. 170 output = nullptr; 175 &output)); 186 Tensor* output; member in struct:tensorflow::ImageResizerGradientState [all...] |