Home
last modified time | relevance | path

Searched refs:scaling_factors (Results 1 – 19 of 19) sorted by relevance

/external/tensorflow/tensorflow/lite/kernels/internal/
Dkernel_utils.cc126 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors, in RnnBatchStep() argument
136 quantized_hidden_state_ptr_batch, scaling_factors, in RnnBatchStep()
149 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors, in RnnBatchStep() argument
170 &scaling_factors[b]); in RnnBatchStep()
171 scaling_factors[b] *= input_weights_scale; in RnnBatchStep()
177 scaling_factors, batch_size, output_ptr_batch, /*result_stride=*/1); in RnnBatchStep()
189 &scaling_factors[b]); in RnnBatchStep()
190 scaling_factors[b] *= aux_input_weights_scale; in RnnBatchStep()
196 aux_quantized_input_ptr_batch, scaling_factors, batch_size, in RnnBatchStep()
210 &scaling_factors[b]); in RnnBatchStep()
[all …]
Dkernel_utils.h72 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors,
84 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors,
Dtensor_utils.h82 const int8_t* __restrict__ vectors, const float* scaling_factors,
99 const float* scaling_factors, int n_batch, float* __restrict__ result,
/external/tensorflow/tensorflow/lite/kernels/
Dbasic_rnn.cc131 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/2); in Prepare() local
132 scaling_factors->type = kTfLiteFloat32; in Prepare()
133 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
135 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
138 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
180 TfLiteTensor* scaling_factors, in EvalHybrid() argument
207 float* scaling_factors_ptr = scaling_factors->data.f; in EvalHybrid()
240 TfLiteTensor* scaling_factors = GetTemporary(context, node, 2); in Eval() local
243 scaling_factors, hidden_state, output); in Eval()
Dunidirectional_sequence_rnn.cc132 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/2); in Prepare() local
133 scaling_factors->type = kTfLiteFloat32; in Prepare()
134 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
136 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
139 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
208 TfLiteTensor* hidden_state_scratch, TfLiteTensor* scaling_factors, in EvalHybrid() argument
245 float* scaling_factors_ptr = scaling_factors->data.f; in EvalHybrid()
310 TfLiteTensor* scaling_factors = GetTemporary(context, node, 2); in Eval() local
313 scaling_factors, hidden_state, output); in Eval()
Dsvdf.cc217 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/2); in Prepare() local
218 scaling_factors->type = kTfLiteFloat32; in Prepare()
219 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
221 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
224 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
289 TfLiteTensor* scratch, TfLiteTensor* scaling_factors, in EvalHybrid() argument
316 float* scaling_factors_ptr = scaling_factors->data.f; in EvalHybrid()
389 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/2); in Eval() local
416 scaling_factors, input_quantized, activation_state, in Eval()
Dlstm_eval.cc470 float* scaling_factors, float* product_scaling_factors, in LstmStepWithAuxInput()
510 &unused_min, &unused_max, &scaling_factors[b]); in LstmStepWithAuxInput()
516 scaling_factors[b] * input_to_input_weights_scale; in LstmStepWithAuxInput()
526 scaling_factors[b] * input_to_forget_weights_scale; in LstmStepWithAuxInput()
535 scaling_factors[b] * input_to_cell_weights_scale; in LstmStepWithAuxInput()
543 scaling_factors[b] * input_to_output_weights_scale; in LstmStepWithAuxInput()
560 &scaling_factors[b]); in LstmStepWithAuxInput()
566 scaling_factors[b] * aux_input_to_input_weights_scale; in LstmStepWithAuxInput()
576 scaling_factors[b] * aux_input_to_forget_weights_scale; in LstmStepWithAuxInput()
585 scaling_factors[b] * aux_input_to_cell_weights_scale; in LstmStepWithAuxInput()
[all …]
Dfully_connected.cc152 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/1); in Prepare() local
153 scaling_factors->type = kTfLiteFloat32; in Prepare()
154 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
156 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
159 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
210 TfLiteTensor* scaling_factors, TfLiteTensor* output) { in EvalHybrid() argument
247 float* scaling_factors_ptr = scaling_factors->data.f; in EvalHybrid()
343 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/1); in EvalQuantized() local
345 input_quantized, scaling_factors, output); in EvalQuantized()
Dbidirectional_sequence_rnn.cc231 TfLiteTensor* scaling_factors = in Prepare() local
233 scaling_factors->type = kTfLiteFloat32; in Prepare()
234 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
236 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
239 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
423 TfLiteTensor* scaling_factors, TfLiteTensor* input_quantized, in EvalHybrid() argument
479 float* scaling_factors_ptr = scaling_factors->data.f; in EvalHybrid()
662 TfLiteTensor* scaling_factors = in Eval() local
671 bw_aux_input_weights, params, scaling_factors, in Eval()
Dunidirectional_sequence_lstm.cc390 TfLiteTensor* scaling_factors = in Prepare() local
392 scaling_factors->type = kTfLiteFloat32; in Prepare()
393 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
395 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
398 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
527 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/4); in Eval() local
549 /*output_offset=*/0, scratch_buffer, scaling_factors, in Eval()
Dbidirectional_sequence_lstm.cc683 TfLiteTensor* scaling_factors = in Prepare() local
685 scaling_factors->type = kTfLiteFloat32; in Prepare()
686 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
688 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
691 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
965 TfLiteTensor* scaling_factors = in Eval() local
992 fw_scratch_buffer, scaling_factors, prod_scaling_factors, in Eval()
1015 bw_scratch_buffer, scaling_factors, prod_scaling_factors, in Eval()
Dlstm.cc461 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/4); in Prepare() local
462 scaling_factors->type = kTfLiteFloat32; in Prepare()
463 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
465 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
468 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
606 TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/4); in Eval() local
627 scaling_factors, prod_scaling_factors, recovered_cell_weights, in Eval()
Dconv.cc399 TfLiteTensor* scaling_factors = in Prepare() local
401 scaling_factors->type = kTfLiteFloat32; in Prepare()
402 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
408 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
411 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
Dlstm_eval.h81 TfLiteTensor* scaling_factors, TfLiteTensor* prod_scaling_factors,
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils.h48 const int8_t* __restrict__ vectors, const float* scaling_factors,
58 const float* scaling_factors, int n_batch, float* __restrict__ result,
171 const int8_t* __restrict__ vector, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
174 scaling_factors, n_batch, result, in MatrixBatchVectorMultiplyAccumulate()
188 const float* scaling_factors, int n_batch, float* __restrict__ result, in SparseMatrixBatchVectorMultiplyAccumulate() argument
191 matrix, ledger, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in SparseMatrixBatchVectorMultiplyAccumulate()
Dportable_tensor_utils.cc90 const int8_t* __restrict__ vectors, const float* scaling_factors, in PortableMatrixBatchVectorMultiplyAccumulate() argument
94 const float batch_scaling_factor = scaling_factors[batch]; in PortableMatrixBatchVectorMultiplyAccumulate()
146 const float* scaling_factors, int n_batch, float* __restrict__ result, in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
153 const float batch_scaling_factor = scaling_factors[batch]; in PortableSparseMatrixBatchVectorMultiplyAccumulate()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dtensor_utils_impl.h50 const int8_t* __restrict__ vectors, const float* scaling_factors,
54 const int8_t* __restrict__ vectors, const float* scaling_factors,
68 const float* scaling_factors, int n_batch, float* __restrict__ result,
73 const float* scaling_factors, int n_batch, float* __restrict__ result,
Dneon_tensor_utils.h37 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
40 vectors, scaling_factors, n_batch, result, result_stride); in MatrixBatchVectorMultiplyAccumulate()
54 const float* scaling_factors, int n_batch, float* __restrict__ result, in SparseMatrixBatchVectorMultiplyAccumulate() argument
57 vectors, scaling_factors, in SparseMatrixBatchVectorMultiplyAccumulate()
Dneon_tensor_utils.cc198 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
212 const float* scaling_factors_ptr = scaling_factors + batch; in DotprodMatrixBatchFourVectorMultiplyAccumulate()
316 const float* scaling_factors, int n_batch, float* __restrict__ result, in DotprodSparseMatrixBatchVectorMultiplyAccumulate() argument
372 static_cast<int32>(row_sum) * scaling_factors[batch]; in DotprodSparseMatrixBatchVectorMultiplyAccumulate()
381 const int8_t* __restrict__ vectors, const float* scaling_factors, in NeonMatrixBatchVectorMultiplyAccumulate() argument
390 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result); in NeonMatrixBatchVectorMultiplyAccumulate()
428 const float batch_scaling_factor = scaling_factors[batch]; in NeonMatrixBatchVectorMultiplyAccumulate()
556 const float* scaling_factors, int n_batch, float* __restrict__ result, in NeonSparseMatrixBatchVectorMultiplyAccumulate() argument
561 matrix, ledger, m_rows, m_cols, vectors, scaling_factors, n_batch, in NeonSparseMatrixBatchVectorMultiplyAccumulate()
579 const float batch_scaling_factor = scaling_factors[batch]; in NeonSparseMatrixBatchVectorMultiplyAccumulate()