/external/tensorflow/tensorflow/core/kernels/ |
meta_support.h | 71 const quint8* a_data, const quint8* b_data, qint32* c_data, 81 float output_max, quint8* output); 85 void Dequantize(OpKernelContext* context, const quint8* input, int count, 91 float range_max, quint8* output); 98 void QuantizedBiasAdd(OpKernelContext* context, const quint8* input, 99 int input_count, const quint8* bias, int bias_count, 106 void Clamp(OpKernelContext* context, const quint8* input, int input_count, 107 quint8 clamp_min, quint8 clamp_max, quint8* output) [all...] |
quantized_reshape_op_test.cc | 55 input.flat<quint8>()(i) = quint8(i); 56 expected.flat<quint8>()(i) = quint8(i); 58 AddInputFromArray<quint8>(input.shape(), input.flat<quint8>()); 66 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
|
quantized_batch_norm_op_test.cc | 72 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 78 FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max); 83 Tensor variance_quantized = FloatTensorToQuantized<quint8>( 90 FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max); 96 FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max); 98 AddInputFromArray<quint8>(input_quantized.shape(), 99 input_quantized.flat<quint8>()); 102 AddInputFromArray<quint8>(mean_quantized.shape(), 103 mean_quantized.flat<quint8>()); 106 AddInputFromArray<quint8>(variance_quantized.shape() [all...] |
quantized_pooling_ops_test.cc | 45 .Attr("T", DataTypeToEnum<quint8>::v()) 62 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 70 AddInputFromArray<quint8>(input_quantized.shape(), 71 input_quantized.flat<quint8>()); 79 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); 90 .Attr("T", DataTypeToEnum<quint8>::v()) 107 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 115 AddInputFromArray<quint8>(input_quantized.shape(), 116 input_quantized.flat<quint8>()); 124 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max) [all...] |
quantized_activation_ops.cc | 42 if (meta::IsSupportedAndEnabled() && std::is_same<T, quint8>()) { 43 auto input_ui8_array = input.flat<quint8>(); 45 min_as_quantized, 255, output->flat<quint8>().data()); 76 if (meta::IsSupportedAndEnabled() && std::is_same<T, quint8>()) { 77 auto input_ui8_array = input.flat<quint8>(); 80 output->flat<quint8>().data()); 105 .TypeConstraint<quint8>("Tinput") 106 .TypeConstraint<quint8>("out_type"), 107 QuantizedReluOp<quint8>); 116 .TypeConstraint<quint8>("Tinput" [all...] |
quantized_bias_add_op_test.cc | 59 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 67 FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); 73 AddInputFromArray<quint8>(input_quantized.shape(), 74 input_quantized.flat<quint8>()); 75 AddInputFromArray<quint8>(bias_quantized.shape(), 76 bias_quantized.flat<quint8>()); 119 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 139 FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); 155 AddInputFromArray<quint8>(input_quantized.shape(), 156 input_quantized.flat<quint8>()); [all...] |
quantized_activation_ops_test.cc | 52 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 56 AddInputFromArray<quint8>(input_quantized.shape(), 57 input_quantized.flat<quint8>()); 65 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); 83 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); 87 AddInputFromArray<quint8>(input_quantized.shape(), 88 input_quantized.flat<quint8>()); 96 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
|
quantized_bias_add_op.cc | 67 if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() && 68 std::is_same<T2, quint8>() && std::is_same<T3, qint32>()) { 69 auto input_ui8_array = input.flat<quint8>(); 70 auto bias_ui8_array = bias.flat<quint8>(); 96 .TypeConstraint<quint8>("T1") 97 .TypeConstraint<quint8>("T2") 99 QuantizedBiasAddOp<quint8, quint8, qint32>);
|
quantization_utils_test.cc | 39 std::vector<quint8> expected_values; 42 expected_values.push_back(FloatToQuantized<quint8>( 50 auto output_values = o_tensor.flat<quint8>(); 58 RequantizeManyInNewRangeUsingEigen<qint32, quint8>( 78 const std::vector<quint8>& values_quantized, 90 tensorflow::test::AsTensor(gtl::ArraySlice<quint8>(values_quantized)); 94 const auto input_array = i_tensor.flat<quint8>(); 190 std::vector<quint8> values_quantized; 193 values_quantized.push_back(FloatToQuantized<quint8>(v, r[0], r[1])); 199 int low = Eigen::NumTraits<quint8>::lowest() [all...] |
quantized_matmul_op_test.cc | 58 AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); 63 AddInputFromArray<quint8>(TensorShape({3, 4}), 116 AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), {11}); 120 AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0}); 160 AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), {11}); 164 AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0}); 209 AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), { 228 AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), { 305 Tensor a_quantized = FloatTensorToQuantized<quint8>(a_float, a_min, a_max); 325 Tensor b_quantized = FloatTensorToQuantized<quint8>(b_float, b_min, b_max) [all...] |
quantized_conv_ops_test.cc | 74 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); 88 FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); 90 AddInputFromArray<quint8>(image_quantized.shape(), 91 image_quantized.flat<quint8>()); 92 AddInputFromArray<quint8>(filter_quantized.shape(), 93 filter_quantized.flat<quint8>()); 153 AddInputFromArray<quint8>( 158 AddInputFromArray<quint8>( 196 AddInputFromArray<quint8>( 201 AddInputFromArray<quint8>( [all...] |
quantize_op_test.cc | 36 .Attr("T", DataTypeToEnum<quint8>::v()) 50 test::FillValues<quint8>(&expected, {0, 1, 1, 2, 127, 255, 255}); 51 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); 59 .Attr("T", DataTypeToEnum<quint8>::v()) 70 // we are performing quantization by scaling to quint8. 74 test::FillValues<quint8>(&expected, {0, 0, 1, 1, 2, 127, 255, 255}); 75 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); 91 .Attr("T", DataTypeToEnum<quint8>::v()) 101 // we are performing quantization by scaling to quint8. 103 // Input element 2.0 should map to max quint8 value 255 [all...] |
requantize.cc | 78 std::is_same<T2, quint8>()) { 83 output->flat<quint8>().data()); 100 .TypeConstraint<quint8>("out_type"), 101 RequantizeOp<qint32, quint8>);
|
quantize_down_and_shrink_range_op_test.cc | 44 .Attr("out_type", DataTypeToEnum<quint8>::v()) 60 test::FillValues<quint8>(&expected, {0, 127, 255}); 61 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
|
quantized_resize_bilinear_op.cc | 131 inline uint8x8_t ToUint8x8(const quint8* v0, const quint8* v1, const quint8* v2, 132 const quint8* v3, const quint8* v4, const quint8* v5, 133 const quint8* v6, const quint8* v7) { 194 const quint8* tl0, const quint8* tr0, const quint8* bl0, const quint8* br0 [all...] |
quantized_instance_norm_test.cc | 28 void ReferenceImpl(const quint8* inp, float inp_min, float inp_max, 107 ReferenceImpl(input.flat<quint8>().data(), x_min, x_max, input.shape(), 110 auto out = outputs[0].flat<quint8>(); 126 auto input = input_tensor.flat<quint8>(); 128 input = input.random(Eigen::internal::UniformRandomGenerator<quint8>()); 135 auto input = input_tensor.flat<quint8>(); 145 auto input = input_tensor.flat<quint8>(); 156 auto input = input_tensor.flat<quint8>(); 157 input = input.random(Eigen::internal::UniformRandomGenerator<quint8>()); 164 auto input = input_tensor.flat<quint8>(); [all...] |
quantized_matmul_op.cc | 35 void GemmlowpMultiply(OpKernelContext* op_context, const quint8* a_data, 36 const quint8* b_data, qint32* c_data, int m, int n, int k, 135 if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() && 136 std::is_same<T2, quint8>() && std::is_same<Toutput, qint32>() && 143 } else if (std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && 196 .TypeConstraint<quint8>("T1") 197 .TypeConstraint<quint8>("T2") 199 QuantizedMatMulOp<quint8, quint8, qint32>) [all...] |
quantized_add_op.cc | 63 void ScalarAddition(OpKernelContext* context, const quint8* full_input, 65 int64 num_elements, quint8 scalar_input, 68 const int32 scalar_in_output_range = RequantizeInNewRange<quint8, qint32>( 72 QuantizedToFloat<quint8>(0, full_input_min, full_input_max); 74 QuantizedToFloat<quint8>(1, full_input_min, full_input_max); 120 void ScalarAddition(OpKernelContext* context, const quint8* full_input, 122 int64 num_elements, quint8 scalar_input, 125 const int32 scalar_in_output_range = RequantizeInNewRange<quint8, qint32>( 129 QuantizedToFloat<quint8>(0, full_input_min, full_input_max); 131 QuantizedToFloat<quint8>(1, full_input_min, full_input_max) [all...] |
requantize_op_test.cc | 41 .Attr("out_type", DataTypeToEnum<quint8>::v()) 62 test::FillValues<quint8>(&expected, {0, 128, 255}); 63 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
|
quantize_down_and_shrink_range.cc | 84 std::is_same<T2, quint8>()) { 88 actual_max_float, output->flat<quint8>().data()); 104 .TypeConstraint<quint8>("out_type"), 105 QuantizeDownAndShrinkRangeOp<qint32, quint8>);
|
quantized_concat_op_test.cc | 73 .Attr("T", DataTypeToEnum<quint8>::v()) 83 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); 92 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); 101 AddInputFromArray<quint8>(first_quantized.shape(), 102 first_quantized.flat<quint8>()); 103 AddInputFromArray<quint8>(second_quantized.shape(), 104 second_quantized.flat<quint8>()); 114 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); 201 .Attr("T", DataTypeToEnum<quint8>::v()) 211 FloatTensorToQuantized<quint8>(first_float, first_min, first_max) [all...] |
meta_support.cc | 118 void QuantizedGemmImpl(OpKernelContext* tf_context, const quint8* a_data, 119 const quint8* b_data, qint32* c_data, int m, int n, 221 bool transpose_b, const quint8* a_data, const quint8* b_data, 258 float output_max, quint8* output) { 292 void Dequantize(OpKernelContext* tf_context, const quint8* input, int count, 317 float range_min, float range_max, quint8* output) { 346 void QuantizedBiasAdd(OpKernelContext* tf_context, const quint8* input, 347 int input_count, const quint8* bias, int bias_count, 385 void Clamp(OpKernelContext* tf_context, const quint8* input, int count [all...] |
quantized_mul_op.cc | 51 void ScalarMultiply<quint8, qint32>(OpKernelContext* context, 52 const quint8* full_input, 54 quint8 scalar_input, 125 void VectorMultiply<quint8, qint32>(OpKernelContext* context, 126 const quint8* x_data, int32 offset_x, 127 const quint8* y_data, int32 offset_y, 203 void VectorTensorMultiply<quint8, qint32>( 204 const quint8* vector_data, int32 vector_offset, int64 vector_num_elements, 205 const quint8* tensor_data, int32 tensor_offset, int64 tensor_num_elements, 388 .TypeConstraint<quint8>("T1" [all...] |
/external/tensorflow/tensorflow/core/framework/ |
type_traits.h | 43 struct is_quantized<quint8> : true_type {}; 83 class numeric_limits<tensorflow::quint8> 99 struct is_signed<tensorflow::quint8> : public is_signed<tensorflow::uint8> {};
|
/external/tensorflow/tensorflow/python/ops/ |
dequantize_op_test.py | 42 dtypes.quint8: np.uint8, 63 self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 6.0, dtypes.quint8) 64 self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 123.456, dtypes.quint8) 66 np.array([0, 4, 42, 108, 243]), 5.0, 200.2, dtypes.quint8)
|