Home
last modified time | relevance | path

Searched refs:row_sums (Results 1 – 25 of 27) sorted by relevance

12

/external/tensorflow/tensorflow/lite/kernels/
Dbasic_rnn.cc183 TfLiteTensor* row_sums; in Prepare() local
185 GetTemporarySafe(context, node, /*index=*/5, &row_sums)); in Prepare()
186 row_sums->type = kTfLiteInt32; in Prepare()
187 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare()
189 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { in Prepare()
194 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare()
237 TfLiteTensor* row_sums, bool* compute_row_sums) { in EvalHybrid() argument
267 row_sums_ptr = GetTensorData<int32_t>(row_sums); in EvalHybrid()
324 TfLiteTensor* row_sums; in Eval() local
325 TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &row_sums)); in Eval()
[all …]
Dunidirectional_sequence_rnn.cc190 TfLiteTensor* row_sums; in Prepare() local
192 GetTemporarySafe(context, node, /*index=*/5, &row_sums)); in Prepare()
193 row_sums->type = kTfLiteInt32; in Prepare()
194 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare()
196 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { in Prepare()
201 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare()
274 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in EvalHybrid() argument
306 row_sums_ptr = GetTensorData<int32_t>(row_sums); in EvalHybrid()
400 TfLiteTensor* row_sums; in Eval() local
401 TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &row_sums)); in Eval()
[all …]
Dsvdf.cc230 TfLiteTensor* row_sums; in Prepare() local
232 GetTemporarySafe(context, node, /*index=*/5, &row_sums)); in Prepare()
233 row_sums->type = kTfLiteFloat32; in Prepare()
234 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare()
236 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { in Prepare()
240 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare()
333 TfLiteTensor* row_sums; in Eval() local
335 context, GetTemporarySafe(context, node, /*index=*/5, &row_sums)); in Eval()
356 if (params->asymmetric_quantize_inputs && row_sums != nullptr) { in Eval()
358 row_sums_ptr = GetTensorData<int32_t>(row_sums); in Eval()
Dfully_connected.cc326 TfLiteTensor* row_sums; in PrepareImpl() local
328 GetTemporarySafe(context, node, /*index=*/4, &row_sums)); in PrepareImpl()
329 row_sums->type = kTfLiteInt32; in PrepareImpl()
330 row_sums->allocation_type = kTfLiteArenaRwPersistent; in PrepareImpl()
332 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { in PrepareImpl()
336 context, context->ResizeTensor(context, row_sums, row_sums_size)); in PrepareImpl()
440 TfLiteTensor* row_sums, TfLiteTensor* input_offsets, in EvalHybridImpl() argument
488 row_sums_ptr = GetTensorData<int32_t>(row_sums); in EvalHybridImpl()
533 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in HybridFullyConnectedTask()
547 row_sums(row_sums), in HybridFullyConnectedTask()
[all …]
Dbatch_matmul.cc274 TfLiteTensor* row_sums; in InitializeTemporaries() local
276 GetTemporarySafe(context, node, /*index=*/6, &row_sums)); in InitializeTemporaries()
277 row_sums->type = kTfLiteInt32; in InitializeTemporaries()
278 row_sums->allocation_type = kTfLiteArenaRwPersistent; in InitializeTemporaries()
280 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { in InitializeTemporaries()
284 context, context->ResizeTensor(context, row_sums, row_sums_size)); in InitializeTemporaries()
448 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in EvalHybrid() argument
468 row_sums_ptr = GetTensorData<int32_t>(row_sums); in EvalHybrid()
588 TfLiteTensor* row_sums; in EvalQuantized() local
590 GetTemporarySafe(context, node, /*index=*/6, &row_sums)); in EvalQuantized()
[all …]
Dconv.cc579 TfLiteTensor* row_sums; in Prepare() local
582 GetTemporarySafe(context, node, data->row_sums_index, &row_sums)); in Prepare()
583 row_sums->type = kTfLiteInt32; in Prepare()
584 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare()
587 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { in Prepare()
591 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare()
891 TfLiteTensor* row_sums; in EvalHybridPerChannel() local
894 GetTemporarySafe(context, node, data->row_sums_index, &row_sums)); in EvalHybridPerChannel()
906 GetTensorData<int32>(scratch), GetTensorData<int32_t>(row_sums), in EvalHybridPerChannel()
Dunidirectional_sequence_lstm.cc1138 TfLiteTensor* row_sums; in Prepare() local
1140 GetTemporarySafe(context, node, kRowSums, &row_sums)); in Prepare()
1141 row_sums->type = kTfLiteInt32; in Prepare()
1142 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare()
1150 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { in Prepare()
1155 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare()
1350 TfLiteTensor* row_sums; in Eval() local
1352 GetTemporarySafe(context, node, kRowSums, &row_sums)); in Eval()
1353 const int row_sums_size = row_sums->dims->data[0]; in Eval()
1394 GetTemporary(context, node, kOutputStateZeroPoints), row_sums, in Eval()
Dlstm.cc1608 TfLiteTensor* row_sums; in Prepare() local
1610 GetTemporarySafe(context, node, kRowSums, &row_sums)); in Prepare()
1611 row_sums->type = kTfLiteInt32; in Prepare()
1612 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare()
1614 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { in Prepare()
1619 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare()
1936 TfLiteTensor* row_sums; in Eval() local
1938 GetTemporarySafe(context, node, kRowSums, &row_sums)); in Eval()
1939 const int row_sums_size = row_sums->dims->data[0]; in Eval()
2028 GetTemporary(context, node, kOutputStateZeroPoints), row_sums, in Eval()
[all …]
Dlstm_eval.h169 TfLiteTensor* output_state_zp, TfLiteTensor* row_sums, int row_sums_size,
/external/tensorflow/tensorflow/lite/kernels/internal/
Dkernel_utils.h75 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums);
89 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums);
Dkernel_utils.cc128 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums) { in RnnBatchStep() argument
139 asymmetric_quantize_inputs, zero_points, accum_scratch, row_sums, in RnnBatchStep()
155 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums) { in RnnBatchStep() argument
163 input_row_sums = row_sums; in RnnBatchStep()
164 aux_input_row_sums = row_sums; in RnnBatchStep()
Dtensor_utils.h54 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
65 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate() argument
75 row_sums, compute_row_sums, context); in MatrixBatchVectorMultiplyAccumulate()
Dtensor_utils_test.cc457 int32_t* row_sums = scratch.data() + 8 * 4; in TEST() local
462 input_offsets.data(), scratch.data(), row_sums, &compute_row_sums, in TEST()
478 input_offsets.data(), scratch.data(), row_sums, &compute_row_sums, in TEST()
505 input_offsets_big_batch.data(), scratch_big_batch.data(), row_sums, in TEST()
1166 std::vector<int32_t> row_sums(rows); in TestPerChannelDotprodMatrixBatchVectorMultiply() local
1173 row_sums.data(), &compute_row_sums, &context); in TestPerChannelDotprodMatrixBatchVectorMultiply()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dsse_tensor_utils.cc101 const int32_t* input_offset, const int32_t* row_sums) { in SseMatrixBatchVectorMultiplyAccumulateImpl() argument
113 row_sums && batch_offset ? batch_offset * row_sums[row] : 0; in SseMatrixBatchVectorMultiplyAccumulateImpl()
274 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in SseMatrixBatchVectorMultiplyAccumulate() argument
277 SseReductionSumVector(matrix, row_sums, m_rows, m_cols); in SseMatrixBatchVectorMultiplyAccumulate()
284 per_channel_scale, input_offset, row_sums); in SseMatrixBatchVectorMultiplyAccumulate()
Dsse_tensor_utils_impl.h52 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
Dneon_tensor_utils.cc436 const int32_t* input_offset, int32_t* row_sums) { in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
443 int32_t* row_sums_ptr = row_sums ? row_sums + row : nullptr; in DotprodMatrixBatchFourVectorMultiplyAccumulate()
598 const int32_t* input_offset, int32_t* row_sums) { in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() argument
650 row_sums); in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate()
1247 const int32_t* input_offset, int32_t* row_sums) { in NeonMatrixBatchVectorMultiplyAccumulateImpl() argument
1254 per_channel_scale, input_offset, row_sums); in NeonMatrixBatchVectorMultiplyAccumulateImpl()
1259 per_channel_scale, input_offset, row_sums); in NeonMatrixBatchVectorMultiplyAccumulateImpl()
1284 int32_t* row_sums_ptr = row_sums; in NeonMatrixBatchVectorMultiplyAccumulateImpl()
1285 if (row_sums == nullptr) { in NeonMatrixBatchVectorMultiplyAccumulateImpl()
1362 if (row_sums == nullptr) { in NeonMatrixBatchVectorMultiplyAccumulateImpl()
[all …]
Dbatch_matmul.h118 const int32_t* input_offset, int32_t* row_sums, in BatchMatMul() argument
184 lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth); in BatchMatMul()
210 const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0); in BatchMatMul()
Dneon_tensor_utils.h59 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate() argument
63 input_offset, scratch, row_sums, compute_row_sums, context); in MatrixBatchVectorMultiplyAccumulate()
Dsse_tensor_utils.h57 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate() argument
61 input_offset, scratch, row_sums, compute_row_sums, context); in MatrixBatchVectorMultiplyAccumulate()
Dneon_tensor_utils_impl.h59 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dbatch_matmul.h111 const int32_t* input_offset, int32_t* row_sums, in BatchMatMul() argument
151 lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth); in BatchMatMul()
162 const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0); in BatchMatMul()
Dsvdf.h199 int32_t* row_sums, bool* compute_row_sums) { in EvalHybridSVDF() argument
229 reinterpret_cast<int32_t*>(scratch), row_sums, compute_row_sums, in EvalHybridSVDF()
Dportable_tensor_utils.h79 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate() argument
83 per_channel_scale, input_offset, scratch, row_sums, compute_row_sums, in MatrixBatchVectorMultiplyAccumulate()
Dportable_tensor_utils.cc167 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in PortableMatrixBatchVectorMultiplyAccumulate() argument
175 PortableReductionSumVector(matrix, row_sums, m_rows, m_cols); in PortableMatrixBatchVectorMultiplyAccumulate()
199 dotprod -= row_sums[row] * batch_offset; in PortableMatrixBatchVectorMultiplyAccumulate()
Dportable_tensor_utils_impl.h71 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,

12