Home
last modified time | relevance | path

Searched refs:input_quantized (Results 1 – 16 of 16) sorted by relevance

/external/tensorflow/tensorflow/core/kernels/
Dquantized_activation_ops_test.cc51 Tensor input_quantized = in TEST_F() local
56 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
57 input_quantized.flat<quint8>()); in TEST_F()
82 Tensor input_quantized = in TEST_F() local
87 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
88 input_quantized.flat<quint8>()); in TEST_F()
Dquantized_pooling_ops_test.cc61 Tensor input_quantized = in TEST_F() local
70 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
71 input_quantized.flat<quint8>()); in TEST_F()
106 Tensor input_quantized = in TEST_F() local
115 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
116 input_quantized.flat<quint8>()); in TEST_F()
Dquantized_bias_add_op_test.cc58 Tensor input_quantized = in TEST_F() local
73 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
74 input_quantized.flat<quint8>()); in TEST_F()
118 Tensor input_quantized = in TEST_F() local
155 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
156 input_quantized.flat<quint8>()); in TEST_F()
Dmkl_quantized_pooling_ops_test.cc91 Tensor input_quantized = in TEST_F() local
108 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
109 input_quantized.flat<quint8>()); in TEST_F()
160 Tensor input_quantized = in TEST_F() local
176 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
177 input_quantized.flat<quint8>()); in TEST_F()
Dquantized_batch_norm_op_test.cc71 Tensor input_quantized = in TEST_F() local
98 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
99 input_quantized.flat<quint8>()); in TEST_F()
168 Tensor input_quantized = in TEST_F() local
195 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F()
196 input_quantized.flat<quint8>()); in TEST_F()
/external/tensorflow/tensorflow/lite/kernels/
Dsvdf.cc206 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/1); in Prepare() local
207 input_quantized->type = weights_feature->type; in Prepare()
208 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
209 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
211 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
290 TfLiteTensor* input_quantized, TfLiteTensor* state, TfLiteTensor* output) { in EvalHybrid() argument
307 reinterpret_cast<int8_t*>(input_quantized->data.uint8); in EvalHybrid()
311 quantized_input_ptr_batch = input_quantized->data.int8; in EvalHybrid()
388 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/1); in Eval() local
416 scaling_factors, input_quantized, activation_state, in Eval()
Dbasic_rnn.cc109 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/0); in Prepare() local
110 input_quantized->type = input_weights->type; in Prepare()
111 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
112 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
114 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
238 TfLiteTensor* input_quantized = GetTemporary(context, node, 0); in Eval() local
242 input_quantized, hidden_state_quantized, in Eval()
Dfully_connected.cc141 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/0); in Prepare() local
142 input_quantized->type = filter->type; in Prepare()
143 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
146 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
148 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
209 const TfLiteTensor* bias, TfLiteTensor* input_quantized, in EvalHybrid() argument
251 quant_data = reinterpret_cast<int8_t*>(input_quantized->data.uint8); in EvalHybrid()
254 quant_data = input_quantized->data.int8; in EvalHybrid()
342 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/0); in EvalQuantized() local
345 input_quantized, scaling_factors, output); in EvalQuantized()
Dunidirectional_sequence_rnn.cc110 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/0); in Prepare() local
111 input_quantized->type = input_weights->type; in Prepare()
112 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
113 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
115 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
308 TfLiteTensor* input_quantized = GetTemporary(context, node, 0); in Eval() local
312 input_quantized, hidden_state_quantized, in Eval()
Dbidirectional_sequence_rnn.cc188 TfLiteTensor* input_quantized = in Prepare() local
190 input_quantized->type = fw_input_weights->type; in Prepare()
191 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
192 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
194 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
423 TfLiteTensor* scaling_factors, TfLiteTensor* input_quantized, in EvalHybrid() argument
474 GetInt8DataPtr(input_quantized, is_uint8_hybrid); in EvalHybrid()
656 TfLiteTensor* input_quantized = in Eval() local
672 input_quantized, aux_input_quantized, in Eval()
Dunidirectional_sequence_lstm.cc346 TfLiteTensor* input_quantized = in Prepare() local
348 input_quantized->type = input_to_output_weights->type; in Prepare()
349 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
350 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
352 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
522 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/1); in Eval() local
550 prod_scaling_factors, recovered_cell_weights, input_quantized, in Eval()
Dconv.cc387 TfLiteTensor* input_quantized = in Prepare() local
389 input_quantized->type = kTfLiteInt8; in Prepare()
390 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
391 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
393 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
600 const TfLiteTensor* input_quantized = in EvalHybrid() local
602 int8_t* quantized_input_ptr_batch = input_quantized->data.int8; in EvalHybrid()
Dbidirectional_sequence_lstm.cc609 TfLiteTensor* input_quantized = in Prepare() local
611 input_quantized->type = fw_input_to_output_weights->type; in Prepare()
612 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
613 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
615 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
955 TfLiteTensor* input_quantized = in Eval() local
993 recovered_cell_weights, input_quantized, aux_input_quantized, in Eval()
1016 recovered_cell_weights, input_quantized, aux_input_quantized, in Eval()
Dlstm.cc421 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/1); in Prepare() local
422 input_quantized->type = input_to_output_weights->type; in Prepare()
423 input_quantized->allocation_type = kTfLiteArenaRw; in Prepare()
424 if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { in Prepare()
426 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, in Prepare()
601 TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/1); in Eval() local
628 input_quantized, in Eval()
Dlstm_eval.h82 TfLiteTensor* recovered_cell_weights, TfLiteTensor* input_quantized,
Dlstm_eval.cc1095 TfLiteTensor* recovered_cell_weights, TfLiteTensor* input_quantized, in EvalHybrid()
1224 GetInt8DataPtr(input_quantized, is_uint8_hybrid); in EvalHybrid()