HomeSort by relevance Sort by last modified time
    Searched full:qint8 (Results 1 - 25 of 71) sorted by null

1 2 3

  /external/tensorflow/tensorflow/core/kernels/
maxpooling_op.h 43 struct SpatialMaxPooling<Device, qint8> {
44 void operator()(const Device& d, typename TTypes<qint8, 4>::Tensor output,
45 typename TTypes<qint8, 4>::ConstTensor input, int window_rows,
depthtospace_op.cc 69 // Assuming qint8 <--> NCHW_VECT_C, OIHW_VECT_I (int8x4) here.
70 constexpr bool is_int8x4 = std::is_same<T, qint8>::value;
73 "qint8 should be used with data_format NCHW_VECT_C."));
117 // NCHW_VECT_C with 4 x qint8 can be treated as NCHW int32.
191 Name("DepthToSpace").Device(DEVICE_GPU).TypeConstraint<qint8>("T"),
192 DepthToSpaceOp<GPUDevice, qint8>);
dequantize_op_test.cc 118 RunDequantizeMinCombinedTest<qint8>(0, 255.0f);
141 RunDequantizeScaledTest<qint8>(-255.0f, 127.0f, 0, 0.0);
144 RunDequantizeScaledTest<qint8>(-10.0f, 127.0f, -127, -127.0);
147 RunDequantizeScaledTest<qint8>(-2.0f, 1.0f, -127, -2.0);
150 RunDequantizeScaledTest<qint8>(-1.0f, 300.0f, 42, 99.212601);
185 BM_DequantizeMinCombinedCpu<qint8>(iters);
spacetodepth_op.cc 69 // Assuming qint8 <--> NCHW_VECT_C, OIHW_VECT_I (int8x4) here.
70 constexpr bool is_int8x4 = std::is_same<T, qint8>::value;
73 "qint8 should be used with data_format NCHW_VECT_C."));
119 // NCHW_VECT_C with 4 x qint8 can be treated as NCHW int32.
191 Name("SpaceToDepth").Device(DEVICE_GPU).TypeConstraint<qint8>("T"),
192 SpaceToDepthOp<GPUDevice, qint8>);
quantize_op_test.cc 121 .Attr("T", DataTypeToEnum<qint8>::v())
134 test::FillValues<qint8>(&expected, {-127, 0, 1, 1, 2, 64, 127});
135 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
151 .Attr("T", DataTypeToEnum<qint8>::v())
161 // Input element 2.0 should map to 127, max value of qint8.
162 test::FillValues<qint8>(&expected, {-64, 0, 127});
163 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
179 .Attr("T", DataTypeToEnum<qint8>::v())
192 test::FillValues<qint8>(&expected, {-126, 0, 1, 2, 4, 64, 127});
193 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0))
    [all...]
save_v2_op_test.cc 88 // Add a 2-d qint8 tensor
89 AddInput<qint8>(TensorShape({3, 2}),
90 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); });
94 return *reinterpret_cast<qint32*>(&x) * qint8(2);
195 // The 2-d qint8 tensor
206 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), val.template flat<qint8>()(i));
222 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2),
quantized_bias_add_op.cc 102 .TypeConstraint<qint8>("T1")
103 .TypeConstraint<qint8>("T2")
105 QuantizedBiasAddOp<qint8, qint8, qint32>);
save_op_test.cc 89 // Add a 2-d qint8 tensor
90 AddInput<qint8>(TensorShape({3, 2}),
91 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); });
95 return *reinterpret_cast<qint32*>(&x) * qint8(2);
213 // The 2-d qint8 tensor
223 qint8 data[6];
226 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), data[i]);
244 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), data[i]);
451 // Add a 2-d qint8 tenso
    [all...]
restore_op_test.cc 124 // Input #6 is a 2-d qint8 tensor
125 Tensor input_6 = MakeInput<qint8>(TensorShape({3, 2}), [](int x) -> qint8 {
126 return *reinterpret_cast<qint8*>(&x);
132 return *reinterpret_cast<qint32*>(&x) * qint8(2);
239 // The 2-d qint8 tensor
248 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), output->flat<qint8>()(i));
260 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2),
restore_v2_op_test.cc 142 // Input #6 is a 2-d qint8 tensor
143 Tensor input_6 = MakeInput<qint8>(
145 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); });
150 return *reinterpret_cast<qint32*>(&x) * qint8(2);
249 // The 2-d qint8 tensor
258 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), output->flat<qint8>()(i));
270 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2),
dequantize_op.cc 134 Name("Dequantize").Device(DEVICE_CPU).TypeConstraint<qint8>("T"),
135 DequantizeOp<CPUDevice, qint8>);
  /external/tensorflow/tensorflow/python/ops/
dequantize_op_test.py 43 dtypes.qint8: np.int8,
69 self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8)
70 self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8)
71 self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8)
  /external/tensorflow/tensorflow/contrib/fused_conv/ops/
fused_conv2d_bias_activation_op.cc 41 .Attr("T: {float, half, qint8}")
105 qint8 data formats. In the case of qint8, the output is clipped to [0..127].
112 `qint8 [ out_channels, in_channels, filter_height, filter_width ]`
115 Note: this tensor is still float, even if other inputs are qint8.
138 `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
144 `qint8 [ output_channels, input_channels / 4,
  /external/tensorflow/tensorflow/core/framework/
type_traits.h 41 struct is_quantized<qint8> : true_type {};
80 class numeric_limits<tensorflow::qint8>
97 struct is_signed<tensorflow::qint8> : public is_signed<tensorflow::int8> {};
numeric_types.h 37 typedef Eigen::QInt8 qint8; typedef in namespace:tensorflow
types.cc 84 return "qint8";
170 } else if (sp == "qint8") {
  /external/tensorflow/tensorflow/core/api_def/base_api/
api_def_Cumprod.pbtxt 8 `complex128`, `qint8`, `quint8`, `qint32`, `half`.
api_def_Dequantize.pbtxt 24 if T == qint8, in[i] += (range(T) + 1)/ 2.0
36 Note that if quantizedtype is qint8, the operation will additionally add
api_def_Cumsum.pbtxt 8 `complex128`, `qint8`, `quint8`, `qint32`, `half`.
api_def_QuantizeV2.pbtxt 45 if T == qint8, out[i] -= (range(T) + 1) / 2.0
56 If the output type was qint8 ([-128, 127]), the operation will additionally
58 with the range of qint8.
  /external/tensorflow/tensorflow/java/src/gen/resources/
tftypes.csv 12 QInt8,n,,n,,quantized int8
  /external/tensorflow/tensorflow/contrib/fused_conv/kernels/
fused_conv2d_bias_activation_op.cc 54 struct RawType<qint8> {
117 // Assuming qint8 <--> NCHW_VECT_C, OIHW_VECT_I (int8x4) here.
118 constexpr bool is_int8x4 = std::is_same<T, qint8>::value;
125 "qint8 should be used with data_format NCHW_VECT_C."));
129 "qint8 should be used with filter_format OIHW_VECT_I."));
156 " must be of size 4 for qint8.");
319 // Assuming qint8 <--> NCHW_VECT_C, OIHW_VECT_I (int8x4) here.
320 constexpr bool is_int8x4 = std::is_same<T, qint8>::value;
484 // For qint8, we have already checked filter is OIHW_VECT_I in the
486 // generate code for qint8
    [all...]
  /external/tensorflow/tensorflow/contrib/fused_conv/python/ops/
fused_conv2d_bias_activation_op.py 70 Note that in qint8 mode, it also clips to 127, so acts like ReluX.
75 "NCHW_VECT_C" qint8 [batch, channels / 4, height, width, channels % 4]
84 "OIHW_VECT_I" qint8 [ output_channels, input_channels / 4,
  /external/tensorflow/tensorflow/python/framework/
dtypes.py 51 * `tf.qint8`: Quantized 8-bit signed integer.
165 return self.base_dtype in [qint8, quint8, qint16, quint16, qint32]
361 qint8 = DType(types_pb2.DT_QINT8) variable
362 tf_export("qint8").export_constant(__name__, "qint8")
417 types_pb2.DT_QINT8: qint8,
467 types_pb2.DT_QINT8: "qint8",
517 _np_qint8 = np.dtype([("qint8", np.int8, 1)])
545 (_np_qint8, qint8),
644 qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref
    [all...]
dtypes_test.py 113 self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
160 self.assertEqual(dtypes.as_dtype("qint8").is_integer, False)
180 self.assertEqual(dtypes.as_dtype("qint8").is_floating, False)
200 self.assertEqual(dtypes.as_dtype("qint8").is_complex, False)
220 self.assertEqual(dtypes.as_dtype("qint8").is_unsigned, False)

Completed in 310 milliseconds

1 2 3