Home
last modified time | relevance | path

Searched refs:qint8 (Results 1 – 25 of 53) sorted by relevance

123

/external/tensorflow/tensorflow/core/kernels/
Dquantize_op_test.cc121 .Attr("T", DataTypeToEnum<qint8>::v()) in TEST_F()
134 test::FillValues<qint8>(&expected, {-127, 0, 1, 1, 2, 64, 127}); in TEST_F()
135 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); in TEST_F()
151 .Attr("T", DataTypeToEnum<qint8>::v()) in TEST_F()
162 test::FillValues<qint8>(&expected, {-64, 0, 127}); in TEST_F()
163 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); in TEST_F()
179 .Attr("T", DataTypeToEnum<qint8>::v()) in TEST_F()
192 test::FillValues<qint8>(&expected, {-126, 0, 1, 2, 4, 64, 127}); in TEST_F()
193 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); in TEST_F()
209 .Attr("T", DataTypeToEnum<qint8>::v()) in TEST_F()
[all …]
Ddequantize_op_test.cc118 RunDequantizeMinCombinedTest<qint8>(0, 255.0f); in TEST_F()
141 RunDequantizeScaledTest<qint8>(-255.0f, 127.0f, 0, 0.0); in TEST_F()
144 RunDequantizeScaledTest<qint8>(-10.0f, 127.0f, -127, -127.0); in TEST_F()
147 RunDequantizeScaledTest<qint8>(-2.0f, 1.0f, -127, -2.0); in TEST_F()
150 RunDequantizeScaledTest<qint8>(-1.0f, 300.0f, 42, 99.212601); in TEST_F()
185 BM_DequantizeMinCombinedCpu<qint8>(iters); in BM_DequantizeMinCombinedCpuQint8()
Dsave_op_test.cc90 AddInput<qint8>(TensorShape({3, 2}), in TEST_F()
91 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); in TEST_F()
95 return *reinterpret_cast<qint32*>(&x) * qint8(2); in TEST_F()
223 qint8 data[6]; in TEST_F()
226 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), data[i]); in TEST_F()
244 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), data[i]); in TEST_F()
452 AddInput<qint8>(TensorShape({3, 2}), in TEST_F()
453 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); in TEST_F()
457 return *reinterpret_cast<qint32*>(&x) * qint8(2); in TEST_F()
530 qint8 data[6]; in TEST_F()
Dsave_v2_op_test.cc89 AddInput<qint8>(TensorShape({3, 2}), in TEST_F()
90 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); in TEST_F()
94 return *reinterpret_cast<qint32*>(&x) * qint8(2); in TEST_F()
206 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), val.template flat<qint8>()(i)); in TEST_F()
222 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), in TEST_F()
Dquantized_bias_add_op.cc102 .TypeConstraint<qint8>("T1")
103 .TypeConstraint<qint8>("T2")
105 QuantizedBiasAddOp<qint8, qint8, qint32>);
Dmaxpooling_op.h43 struct SpatialMaxPooling<Device, qint8> {
44 void operator()(const Device& d, typename TTypes<qint8, 4>::Tensor output,
45 typename TTypes<qint8, 4>::ConstTensor input, int window_rows,
Drestore_v2_op_test.cc143 Tensor input_6 = MakeInput<qint8>( in RunTest()
145 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); in RunTest()
150 return *reinterpret_cast<qint32*>(&x) * qint8(2); in RunTest()
258 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), output->flat<qint8>()(i)); in RunTest()
270 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), in RunTest()
Drestore_op_test.cc125 Tensor input_6 = MakeInput<qint8>(TensorShape({3, 2}), [](int x) -> qint8 { in TEST_F()
126 return *reinterpret_cast<qint8*>(&x); in TEST_F()
132 return *reinterpret_cast<qint32*>(&x) * qint8(2); in TEST_F()
248 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), output->flat<qint8>()(i)); in TEST_F()
260 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), in TEST_F()
Ddepthtospace_op.cc70 constexpr bool is_int8x4 = std::is_same<T, qint8>::value; in Compute()
191 Name("DepthToSpace").Device(DEVICE_GPU).TypeConstraint<qint8>("T"),
192 DepthToSpaceOp<GPUDevice, qint8>);
Dspacetodepth_op.cc70 constexpr bool is_int8x4 = std::is_same<T, qint8>::value; in Compute()
191 Name("SpaceToDepth").Device(DEVICE_GPU).TypeConstraint<qint8>("T"),
192 SpaceToDepthOp<GPUDevice, qint8>);
Ddequantize_op.cc134 Name("Dequantize").Device(DEVICE_CPU).TypeConstraint<qint8>("T"),
135 DequantizeOp<CPUDevice, qint8>);
Dquantize_op.cc242 Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<qint8>("T"),
243 QuantizeV2Op<CPUDevice, qint8>);
Dconcat_lib_cpu.cc72 REGISTER(qint8)
Dmaxpooling_op.cc1073 constexpr bool is_int8x4 = std::is_same<T, qint8>::value; in Compute()
1397 Name("MaxPool").Device(DEVICE_GPU).TypeConstraint<qint8>("T"),
1398 MaxPoolingNoMaskOp<GPUDevice, qint8>);
1404 .TypeConstraint<qint8>("T"),
1405 MaxPoolingV2Op<GPUDevice, qint8>);
1411 .TypeConstraint<qint8>("T")
1413 MaxPoolingV2Op<GPUDevice, qint8>);
/external/tensorflow/tensorflow/python/ops/
Ddequantize_op_test.py43 dtypes.qint8: np.int8,
69 self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8)
70 self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8)
71 self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8)
/external/tensorflow/tensorflow/core/framework/
Dtype_traits.h41 struct is_quantized<qint8> : true_type {};
80 class numeric_limits<tensorflow::qint8>
97 struct is_signed<tensorflow::qint8> : public is_signed<tensorflow::int8> {};
Dtensor_test.cc312 t.matrix<qint8>()(a, b) = qint8(a * b); in TEST()
315 TestCopies<qint8>(t); in TEST()
723 auto nchw_vect_c = t_nchw_vect_c.tensor<qint8, 5>(); in TEST()
911 Tensor t1 = test::AsTensor<qint8>({0, 1, 2, 3, 4, 5}, {2, 3}); in TEST()
913 t2.flat<qint8>() = t1.flat<qint8>() + qint8(-2); in TEST()
914 Tensor t3 = test::AsTensor<qint8>({-2, -1, 0, 1, 2, 3}, {2, 3}); in TEST()
915 test::ExpectTensorEqual<qint8>(t2, t3); in TEST()
Dregister_types.h78 #define TF_CALL_qint8(m) m(::tensorflow::qint8)
109 #define TF_CALL_qint8(m) m(::tensorflow::qint8)
Dnumeric_types.h37 typedef Eigen::QInt8 qint8; typedef
/external/tensorflow/tensorflow/python/framework/
Ddtypes.py165 return self.base_dtype in [qint8, quint8, qint16, quint16, qint32]
361 qint8 = DType(types_pb2.DT_QINT8) variable
417 types_pb2.DT_QINT8: qint8,
545 (_np_qint8, qint8),
644 qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref,
Dtensor_util.py101 dtypes.qint8.as_numpy_dtype:
167 dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
230 dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,
327 dtypes.qint8: [_FilterInt, _FilterTuple],
400 dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,
589 dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16,
/external/tensorflow/tensorflow/contrib/fused_conv/kernels/
Dfused_conv2d_bias_activation_op.cc54 struct RawType<qint8> {
118 constexpr bool is_int8x4 = std::is_same<T, qint8>::value; in FusedConv2DBiasActivationOp()
320 constexpr bool is_int8x4 = std::is_same<T, qint8>::value; in launch()
650 .TypeConstraint<qint8>("T")
654 FusedConv2DBiasActivationOp<GPUDevice, qint8, float, float>);
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_QuantizeV2.pbtxt45 if T == qint8, out[i] -= (range(T) + 1) / 2.0
56 If the output type was qint8 ([-128, 127]), the operation will additionally
58 with the range of qint8.
Dapi_def_Dequantize.pbtxt24 if T == qint8, in[i] += (range(T) + 1)/ 2.0
36 Note that if quantizedtype is qint8, the operation will additionally add
/external/tensorflow/tensorflow/contrib/fused_conv/python/ops/
Dfused_conv2d_bias_activation_op_test.py655 NchwToNchwVectC(nn_ops.relu(logit)), -128, 127, dtypes.qint8)
818 dtype=dtypes.float32), -1.0, 1.0, dtypes.qint8)
828 dtype=dtypes.float32), -1.0, 1.0, dtypes.qint8)
841 dtype=dtypes.float32), -1.0, 1.0, dtypes.qint8)

123