/external/tensorflow/tensorflow/core/kernels/ |
quantization_utils.cc | 20 void GetOutputMinAndMaxForQuantizedAdd(float input_min, float input_max, 36 std::max(input_max, std::max(-input_min, std::max(smaller_input_max,
|
quantized_activation_ops_test.cc | 46 const float input_max = 127.0f; local 52 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 59 AddInputFromArray<float>(TensorShape({1}), {input_max}); 77 const float input_max = 127.0f; local 83 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 90 AddInputFromArray<float>(TensorShape({1}), {input_max});
|
quantized_pooling_ops_test.cc | 52 const float input_max = 255.0f; local 62 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 73 AddInputFromArray<float>(TensorShape({1}), {input_max}); 97 const float input_max = 255.0f; local 107 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 118 AddInputFromArray<float>(TensorShape({1}), {input_max});
|
mkl_requantization_range_per_channel_op.cc | 47 const Tensor& input_max = ctx->input(kInputMaxIndex); variable 49 const size_t depth = input_max.NumElements(); 55 ctx, input_max.dim_size(0) == depth, 56 errors::InvalidArgument("input_max has incorrect size, expected ", 57 depth, " was ", input_max.dim_size(0))); 60 const float* input_max_data = input_max.flat<float>().data();
|
quantized_bias_add_op_test.cc | 52 const float input_max = 60.0f; local 59 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 78 AddInputFromArray<float>(TensorShape({1}), {input_max}); 102 const float input_max = 2006.27f; local 119 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 160 AddInputFromArray<float>(TensorShape({1}), {input_max});
|
quantized_bias_add_op.cc | 42 const float input_max = context->input(3).flat<float>()(0); variable 71 GetOutputMinAndMaxForQuantizedAdd(input_min, input_max, bias_min, 75 bias_ui8_array.size(), input_min, input_max, 81 input_max, bias, bias_min, bias_max, output, &total_min, &total_max);
|
meta_support.h | 75 // Take an array of numbers from the range [input_min, input_max] quantized 80 float input_min, float input_max, float output_min, 94 // [input_min, input_max], and [bias_min, bias_max] accordingly, as uint8 100 float input_min, float input_max, float bias_min,
|
quantization_utils_test.cc | 35 float input_max, float output_min, float output_max, 43 QuantizedToFloat(values_quantized[value_index], input_min, input_max), 55 input_max, output_min, output_max, 59 *eigen_device, i_tensor, input_min, input_max, output_min, output_max, 71 << ", input_max=" << input_max << ", output_min=" << output_min 76 void TestRequantizeMany8To32Bit(float input_min, float input_max, 85 QuantizedToFloat(values_quantized[value_index], input_min, input_max), 96 input_max, output_min, output_max, 107 << ", input_max=" << input_max << ", output_min=" << output_mi 231 const float input_max = ranges[range_index][1]; local 283 const float input_max = 100.0f; local 526 const float input_max = ranges[range_index][1]; local 548 const float input_max = 0.641057f; local 583 const float input_max = ranges[range_index][1]; local 626 const float input_max = 255.0f; local 656 const float input_max = 2400.0f; local 679 const float input_max = 127.0f; local [all...] |
mkl_quantized_pooling_ops_test.cc | 82 const float input_max = 255.0f; local 92 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 111 AddInputFromArray<float>(TensorShape({1}), {input_max}); 151 const float input_max = 255.0f; local 161 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 179 AddInputFromArray<float>(TensorShape({1}), {input_max});
|
quantized_batch_norm_op.cc | 32 const float input_max, const Tensor& mean, 57 QuantizedToFloat(input_flat(input_index), input_min, input_max); 95 const float input_max, const Tensor& mean, 151 input_max, *output_min, *output_max); 177 const float input_max = context->input(2).flat<float>()(0); variable 212 FixedPointBatchNorm<T1, T2>(input, input_min, input_max, mean, mean_min,
|
quantized_batch_norm_op_test.cc | 62 const float input_max = 127.0f; local 72 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 101 AddInputFromArray<float>(TensorShape({1}), {input_max}); 159 const float input_max = 127.0f; local 169 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 198 AddInputFromArray<float>(TensorShape({1}), {input_max});
|
quantized_concat_op.cc | 42 const float input_max = (*input_min_and_max)[input_index].second; local 43 if (input_min == output_min && input_max == output_max) { 52 QuantizedToFloatStruct<T> q2f(input_min, input_max); 88 const float input_max = input_maxes[i].flat<float>()(0); local 89 input_mins_and_maxes->emplace_back(input_min, input_max); 91 overall_max = std::max(overall_max, input_max);
|
quantize_and_dequantize_op.h | 115 auto input_max = input_max_tensor->scalar<T>(); local 118 input_max.device(d) = input.maximum(); 120 d.memcpyDeviceToHost(&max_range, input_max.data(), sizeof(T));
|
meta_support.cc | 257 float input_min, float input_max, float output_min, 272 CalculateRangeScale<int32_t>(input_min, input_max); 348 float input_min, float input_max, float bias_min, 366 CalculateRangeScale<uint8_t>(input_min, input_max);
|
quantized_instance_norm.cc | 278 float input_max = context->input(2).flat<float>()(0); variable 279 float input_scale = (input_max - input_min) / 255.0f; 281 OP_REQUIRES(context, input_min < input_max, 283 "input_min must be less than input_max : ", input_min, 284 " >= ", input_max));
|
quantization_utils.h | [all...] |
/external/tensorflow/tensorflow/lite/experimental/micro/kernels/ |
fully_connected_test.cc | 103 std::initializer_list<uint8_t> input_data, float input_min, float input_max, 122 input_max), 267 const float input_max = 64.0f; local 280 F2Q(1, input_min, input_max), F2Q(2, input_min, input_max), 281 F2Q(3, input_min, input_max), F2Q(4, input_min, input_max), 282 F2Q(5, input_min, input_max), F2Q(6, input_min, input_max), 283 F2Q(7, input_min, input_max), F2Q(8, input_min, input_max) 338 const float input_max = 64.0f; local 409 const float input_max = 128.0f; local 506 const float input_max = 64.0f; local 577 const float input_max = 128.0f; local [all...] |
depthwise_conv_test.cc | 110 std::initializer_list<uint8_t> input_data, float input_min, float input_max, 129 input_max), 229 const float input_max = 64.0f; local 243 F2Q(1, input_min, input_max), 244 F2Q(2, input_min, input_max), 245 F2Q(7, input_min, input_max), 246 F2Q(8, input_min, input_max), 247 F2Q(3, input_min, input_max), 248 F2Q(4, input_min, input_max), 249 F2Q(9, input_min, input_max), 337 const float input_max = 64.0f; local 416 const float input_max = 255.0f; local [all...] |
softmax_test.cc | 91 float input_min, float input_max, 105 input_max), 192 const float input_max = 64.0f; local 200 F2Q(1.0, input_min, input_max), 201 F2Q(2.0, input_min, input_max), 202 F2Q(3.0, input_min, input_max), 203 F2Q(4.0, input_min, input_max), 204 F2Q(5.0, input_min, input_max), 206 input_min, input_max, // Input quantized range.
|
/external/webrtc/webrtc/modules/audio_coding/neteq/ |
merge.h | 76 int16_t* expanded_max, int16_t* input_max) const; 87 size_t CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
|
merge.cc | 66 int16_t expanded_max, input_max; local 69 &expanded_max, &input_max); 92 expanded_max, input_max, old_length, 208 int16_t* expanded_max, int16_t* input_max) const { 213 *input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length); 228 WebRtcSpl_NormW32(*input_max * *input_max); 310 size_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max, 318 if (expanded_max * input_max > 26843546) {
|
/external/tensorflow/tensorflow/core/graph/ |
quantize_training.cc | 55 float input_max; member in struct:tensorflow::__anon45051::EdgeToConvert 64 input_max(max) {} 80 bool* range_given, float* input_min, float* input_max) { 96 *input_max = 6; 101 *input_max = 1; 106 *input_max = 1; 114 input_max); 124 input_max); 505 Node** input_max) { 507 // Make constant nodes for the input_min and input_max if the range i 541 Node* input_max; local 635 float input_max = 0; local [all...] |
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
fake_quantize_ops.cc | 100 float input_min, input_max; local 102 OP_REQUIRES_OK(ctx, ctx->GetAttr("max", &input_max)); 103 CpuNudge(input_min, input_max, quant_min_, quant_max_, &nudged_input_min_, 148 float input_min, input_max, scale; local 150 OP_REQUIRES_OK(ctx, ctx->GetAttr("max", &input_max)); 151 CpuNudge(input_min, input_max, quant_min, quant_max, &nudged_input_min_, 203 xla::XlaOp input_max = ctx->Input(2); variable 207 XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_, 246 xla::XlaOp input_max = ctx->Input(3); variable 250 XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_ [all...] |
/external/tensorflow/tensorflow/compiler/tests/ |
fake_quant_ops_test.py | 82 def _TestOp(self, input_min, input_max, num_bits, narrow_range, 117 max=input_max, 180 def _TestOp(self, input_min, input_max, num_bits, narrow_range, 211 max=input_max, 281 def _TestOp(self, input_min, input_max, num_bits, narrow_range, 325 max_placeholder: input_max 386 def _TestOp(self, input_min, input_max, num_bits, narrow_range, 429 max_placeholder: input_max
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
arithmetic.cc | 142 XlaOp input_max = Reduce(input, init_value, reducer, 149 ConvertElementType(Eq(input, input_max, broadcast_dims), output_type);
|