HomeSort by relevance Sort by last modified time
    Searched defs:output_max (Results 1 - 25 of 33) sorted by null

1 2

  /external/tensorflow/tensorflow/core/kernels/
quantized_reshape_op.cc 40 Tensor* output_max = nullptr; variable
41 OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max));
42 output_max->flat<float>()(0) = input_max_float;
quantize_op_test.cc 293 const float output_max = GetOutput(2)->flat<float>()(0); local
295 EXPECT_NEAR(255.0f, output_max, 1e-5f);
315 const float output_max = GetOutput(2)->flat<float>()(0); local
317 EXPECT_LT(0.0f, output_max);
337 const float output_max = GetOutput(2)->flat<float>()(0); local
339 EXPECT_NEAR(0.3f, output_max, 1e-5f);
359 const float output_max = GetOutput(2)->flat<float>()(0); local
361 EXPECT_NEAR(0.0f, output_max, 1e-5f);
quantized_activation_ops.cc 54 Tensor* output_max = nullptr; variable
55 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
56 output_max->flat<float>()(0) = max_input;
92 Tensor* output_max = nullptr; variable
93 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
94 output_max->flat<float>()(0) = max_input;
quantized_activation_ops_test.cc 63 const float output_max = GetOutput(2)->flat<float>()(0); local
65 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
94 const float output_max = GetOutput(2)->flat<float>()(0); local
96 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
quantized_pooling_ops.cc 97 Tensor* output_max = nullptr; variable
98 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
99 output_max->flat<float>()(0) = max_input;
121 Tensor* output_max = nullptr; variable
122 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
123 output_max->flat<float>()(0) = max_input;
requantize.cc 51 Tensor* output_max = nullptr; variable
52 OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max));
93 output_max->flat<float>().setConstant(requested_output_max_float);
quantize_down_and_shrink_range.cc 49 Tensor* output_max = nullptr; variable
50 OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max));
97 output_max->flat<float>().setConstant(actual_max_float);
quantized_bias_add_op.cc 88 Tensor* output_max = nullptr; variable
89 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
90 output_max->flat<float>()(0) = total_max;
quantized_bias_add_op_test.cc 84 const float output_max = GetOutput(2)->flat<float>()(0); local
86 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
166 const float output_max = GetOutput(2)->flat<float>()(0); local
168 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
quantized_pooling_ops_test.cc 77 const float output_max = GetOutput(2)->flat<float>()(0); local
79 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
122 const float output_max = GetOutput(2)->flat<float>()(0); local
124 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
requantization_range_op.cc 53 Tensor* output_max = nullptr; variable
54 OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_max));
69 output_max->flat<float>().setConstant(used_max_float);
mkl_quantized_concat_op_test.cc 151 const float output_max = GetOutput(2)->flat<float>()(0); local
153 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
225 const float output_max = GetOutput(2)->flat<float>()(0); local
227 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
mkl_quantized_pooling_ops_test.cc 126 const float output_max = GetOutput(2)->flat<float>()(0); local
128 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
194 const float output_max = GetOutput(2)->flat<float>()(0); local
196 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
mkl_requantization_range_per_channel_op.cc 103 Tensor* output_max = nullptr; variable
105 OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMaxIndex, {}, &output_max));
107 output_max->flat<float>()(0) = out_min_max;
mkl_requantize_ops_test.cc 164 const float output_max = GetOutput(1)->flat<float>()(0); local
166 EXPECT_NEAR(14.8217, output_max, 0.002);
225 const float output_max = GetOutput(1)->flat<float>()(0); local
227 EXPECT_NEAR(6.0, output_max, 0.002); // Values are aligned with clip_value.
294 const float output_max = GetOutput(2)->flat<float>()(0); local
296 EXPECT_NEAR(range_op_output_max, output_max, 0.002);
quantized_conv_ops_test.cc 128 const float output_max = GetOutput(2)->flat<float>()(0); local
130 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
319 const float output_max = GetOutput(2)->flat<float>()(0); local
321 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
quantized_matmul_op_test.cc 352 const float output_max = GetOutput(2)->flat<float>()(0); local
354 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
mkl_quantized_conv_ops_test.cc 199 const float output_max = GetOutput(2)->flat<float>()(0); local
201 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
mkl_requantize_per_channel_op.cc 135 Tensor* output_max = nullptr; variable
139 ctx->allocate_output(kOutputMaxIndex, {}, &output_max));
142 output_max->flat<float>()(0) = input_requested_max_float;
quantized_batch_norm_op_test.cc 128 const float output_max = GetOutput(2)->flat<float>()(0); local
130 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
236 const float output_max = GetOutput(2)->flat<float>()(0); local
238 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
quantized_concat_op_test.cc 112 const float output_max = GetOutput(2)->flat<float>()(0); local
114 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
178 const float output_max = GetOutput(2)->flat<float>()(0); local
180 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
240 const float output_max = GetOutput(2)->flat<float>()(0); local
242 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
quantized_batch_norm_op.cc 38 Tensor* output, float* output_min, float* output_max) {
50 *output_max = std::numeric_limits<float>::lowest();
79 *output_max = std::max(output_value, *output_max);
82 FloatToQuantized<T2>(output_value, *output_min, *output_max);
101 Tensor* output, float* output_min, float* output_max) {
116 *output_max = (1 << 20);
139 FloatToQuantized<T2>(scale_value, *output_min, *output_max);
141 FloatToQuantized<T2>(offset_value, *output_min, *output_max);
145 FloatToQuantized<T2>(1.0f, *output_min, *output_max);
211 float output_max; variable
    [all...]
  /external/tensorflow/tensorflow/lite/experimental/micro/kernels/
depthwise_conv_test.cc 117 float output_max, TfLiteFusedActivation activation, uint8_t* output_data) {
135 output_min, output_max),
235 const float output_max = 128.0f; local
289 F2Q(71, output_min, output_max),
290 F2Q(-34, output_min, output_max),
291 F2Q(99, output_min, output_max),
292 F2Q(-20, output_min, output_max),
293 F2Q(91, output_min, output_max),
294 F2Q(-26, output_min, output_max),
295 F2Q(127, output_min, output_max),
343 const float output_max = 128.0f; local
422 const float output_max = 128.0f; local
    [all...]
fully_connected_test.cc 110 float output_max, TfLiteFusedActivation activation, uint8_t* output_data) {
128 output_min, output_max),
273 const float output_max = 128.0f; local
321 F2Q(24, output_min, output_max),
322 F2Q(25, output_min, output_max),
323 F2Q(26, output_min, output_max),
324 F2Q(58, output_min, output_max),
325 F2Q(59, output_min, output_max),
326 F2Q(60, output_min, output_max),
329 output_min, output_max, // Output quantization range
344 const float output_max = 128.0f; local
415 const float output_max = 64.0f; local
512 const float output_max = 128.0f; local
583 const float output_max = 64.0f; local
    [all...]
softmax_test.cc 94 float output_min, float output_max,
107 output_min, output_max),
194 const float output_max = (255.0f / 256.0f); local
209 F2Q(0.011656231, output_min, output_max),
210 F2Q(0.031684921, output_min, output_max),
211 F2Q(0.086128544, output_min, output_max),
212 F2Q(0.234121657, output_min, output_max),
213 F2Q(0.636408647, output_min, output_max),
216 output_min, output_max, // Output quantized range.

Completed in 179 milliseconds

1 2