Home
last modified time | relevance | path

Searched refs:n_batch (Results 1 – 16 of 16) sorted by relevance

/external/tensorflow/tensorflow/lite/kernels/
Dlstm_eval.cc116 const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, in LstmStepWithAuxInput() argument
132 tensor_utils::ZeroVector(input_gate_scratch, n_cell * n_batch); in LstmStepWithAuxInput()
134 tensor_utils::ZeroVector(forget_gate_scratch, n_cell * n_batch); in LstmStepWithAuxInput()
135 tensor_utils::ZeroVector(cell_scratch, n_cell * n_batch); in LstmStepWithAuxInput()
136 tensor_utils::ZeroVector(output_gate_scratch, n_cell * n_batch); in LstmStepWithAuxInput()
140 n_batch, input_gate_scratch); in LstmStepWithAuxInput()
142 tensor_utils::VectorBatchVectorAssign(forget_gate_bias_ptr, n_cell, n_batch, in LstmStepWithAuxInput()
144 tensor_utils::VectorBatchVectorAssign(cell_bias_ptr, n_cell, n_batch, in LstmStepWithAuxInput()
146 tensor_utils::VectorBatchVectorAssign(output_gate_bias_ptr, n_cell, n_batch, in LstmStepWithAuxInput()
153 input_to_input_weights_ptr, n_cell, n_input, input_ptr_batch, n_batch, in LstmStepWithAuxInput()
[all …]
Dbidirectional_sequence_lstm_test.cc37 BidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in BidirectionalLSTMOpModel() argument
44 : n_batch_(n_batch), in BidirectionalLSTMOpModel()
406 const int n_batch = 1; in TEST_P() local
415 n_batch, n_input, n_cell, n_output, sequence_length, /*use_cifg=*/false, in TEST_P()
421 {sequence_length, n_batch, n_input}, // input tensor in TEST_P()
469 {n_batch, n_output}, // activation_state tensor in TEST_P()
470 {n_batch, n_cell}, // cell_state tensor in TEST_P()
472 {n_batch, n_output}, // activation_state tensor in TEST_P()
473 {n_batch, n_cell}, // cell_state tensor in TEST_P()
477 {sequence_length, n_batch, 0}, // aux_input tensor in TEST_P()
[all …]
Dunidirectional_sequence_lstm_test.cc34 UnidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in UnidirectionalLSTMOpModel() argument
41 : n_batch_(n_batch), in UnidirectionalLSTMOpModel()
243 int n_batch, int n_input, int n_cell, int n_output, int sequence_length, in HybridUnidirectionalLSTMOpModel() argument
249 n_batch, n_input, n_cell, n_output, sequence_length, time_major, in HybridUnidirectionalLSTMOpModel()
451 const int n_batch = 1; in TEST_F() local
459 n_batch, n_input, n_cell, n_output, sequence_length, in TEST_F()
465 {sequence_length, n_batch, n_input}, // input tensor in TEST_F()
489 {n_batch, n_output}, // activation_state tensor in TEST_F()
490 {n_batch, n_cell}, // cell_state tensor in TEST_F()
513 const int n_batch = 1; in TEST_F() local
[all …]
Dlstm_test.cc37 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
43 : n_batch_(n_batch), in LSTMOpModel()
271 HybridLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in HybridLSTMOpModel() argument
277 : LSTMOpModel(n_batch, n_input, n_cell, n_output, use_cifg, use_peephole, in HybridLSTMOpModel()
450 const int n_batch = 1; in TEST_F() local
456 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST_F()
462 {n_batch, n_input}, // input tensor in TEST_F()
507 const int n_batch = 1; in TEST_F() local
514 n_batch, n_input, n_cell, n_output, in TEST_F()
519 {n_batch, n_input}, // input tensor in TEST_F()
[all …]
Doptional_tensor_test.cc33 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
37 : n_batch_(n_batch), in LSTMOpModel()
231 const int n_batch = 1; in TEST() local
237 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST()
243 {n_batch, n_input}, // input tensor in TEST()
Dbidirectional_sequence_lstm.cc403 const int n_batch = time_major ? input->dims->data[1] : input->dims->data[0]; in Prepare() local
499 n_batch * n_fw_output); in Prepare()
500 TF_LITE_ENSURE_EQ(context, NumElements(fw_cell_state), n_batch * n_fw_cell); in Prepare()
504 fw_output_size->data[0] = time_major ? max_time : n_batch; in Prepare()
505 fw_output_size->data[1] = time_major ? n_batch : max_time; in Prepare()
537 fw_scratch_buffer_size->data[0] = n_batch; in Prepare()
564 bw_output_size->data[0] = time_major ? max_time : n_batch; in Prepare()
565 bw_output_size->data[1] = time_major ? n_batch : max_time; in Prepare()
575 n_batch * n_bw_output); in Prepare()
576 TF_LITE_ENSURE_EQ(context, NumElements(bw_cell_state), n_batch * n_bw_cell); in Prepare()
[all …]
Dunidirectional_sequence_lstm.cc267 const int n_batch = time_major ? input->dims->data[1] : input->dims->data[0]; in Prepare() local
298 TF_LITE_ENSURE_EQ(context, NumElements(activation_state), n_batch * n_output); in Prepare()
299 TF_LITE_ENSURE_EQ(context, NumElements(cell_state), n_batch * n_cell); in Prepare()
330 scratch_buffer_size->data[0] = n_batch; in Prepare()
394 int scaling_dims[1] = {n_batch}; in Prepare()
397 scaling_factors_size->data[0] = n_batch; in Prepare()
410 prod_scaling_factors_size->data[0] = n_batch; in Prepare()
Dlstm.cc341 const int n_batch = input->dims->data[0]; in Prepare() local
373 TF_LITE_ENSURE_EQ(context, NumElements(activation_state), n_batch * n_output); in Prepare()
374 TF_LITE_ENSURE_EQ(context, NumElements(cell_state), n_batch * n_cell); in Prepare()
378 output_size->data[0] = n_batch; in Prepare()
406 scratch_buffer_size->data[0] = n_batch; in Prepare()
464 int scaling_dims[1] = {n_batch}; in Prepare()
467 scaling_factors_size->data[0] = n_batch; in Prepare()
479 prod_scaling_factors_size->data[0] = n_batch; in Prepare()
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils.h43 int n_batch, float* result,
49 int n_batch, float* __restrict__ result, int result_stride);
53 const float* vector, int n_batch, float* result, int result_stride);
58 const float* scaling_factors, int n_batch, float* __restrict__ result,
79 int n_batch, float* result,
85 int n_batch, float* result);
93 int n_batch,
98 int n_batch, float* batch_vector);
101 void PortableVectorBatchVectorAdd(const float* vector, int v_size, int n_batch,
146 int n_batch, float normalization_epsilon);
[all …]
Dportable_tensor_utils.cc71 int n_batch, float* result, in PortableMatrixBatchVectorMultiplyAccumulate() argument
74 for (int b = 0; b < n_batch; b++) { in PortableMatrixBatchVectorMultiplyAccumulate()
91 int n_batch, float* __restrict__ result, int result_stride) { in PortableMatrixBatchVectorMultiplyAccumulate() argument
93 for (batch = 0; batch < n_batch; ++batch, vectors += m_cols) { in PortableMatrixBatchVectorMultiplyAccumulate()
115 const float* vector, int n_batch, float* result, int result_stride) { in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
120 for (int b = 0; b < n_batch; b++) { in PortableSparseMatrixBatchVectorMultiplyAccumulate()
146 const float* scaling_factors, int n_batch, float* __restrict__ result, in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
152 for (batch = 0; batch < n_batch; ++batch, vectors += m_cols) { in PortableSparseMatrixBatchVectorMultiplyAccumulate()
199 int n_batch, float* result, in PortableBatchVectorBatchVectorDotProduct() argument
204 for (int b = 0; b < n_batch; b++) { in PortableBatchVectorBatchVectorDotProduct()
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dneon_tensor_utils.h29 int n_batch, float* result, in MatrixBatchVectorMultiplyAccumulate() argument
32 vector, n_batch, result, result_stride); in MatrixBatchVectorMultiplyAccumulate()
38 int n_batch, float* __restrict__ result, int result_stride) { in MatrixBatchVectorMultiplyAccumulate() argument
40 vectors, scaling_factors, n_batch, result, result_stride); in MatrixBatchVectorMultiplyAccumulate()
45 const int m_cols, const float* vector, int n_batch, float* result, in SparseMatrixBatchVectorMultiplyAccumulate() argument
48 matrix, ledger, m_rows, m_cols, vector, n_batch, result, result_stride); in SparseMatrixBatchVectorMultiplyAccumulate()
54 const float* scaling_factors, int n_batch, float* __restrict__ result, in SparseMatrixBatchVectorMultiplyAccumulate() argument
58 n_batch, result, result_stride); in SparseMatrixBatchVectorMultiplyAccumulate()
74 const float* batch_vector, int n_batch, in VectorBatchVectorCwiseProduct() argument
77 n_batch, result); in VectorBatchVectorCwiseProduct()
[all …]
Dtensor_utils_impl.h40 int n_batch, float* result,
44 int n_batch, float* result,
51 int n_batch, float* __restrict__ result, int result_stride);
55 int n_batch, float* __restrict__ result, int result_stride);
59 const float* vector, int n_batch, float* result, int result_stride);
62 const float* vector, int n_batch, float* result, int result_stride);
68 const float* scaling_factors, int n_batch, float* __restrict__ result,
73 const float* scaling_factors, int n_batch, float* __restrict__ result,
101 int n_batch, float* result,
105 int n_batch, float* result,
[all …]
Dneon_tensor_utils.cc99 int n_batch, float* result, in NeonMatrixBatchVectorMultiplyAccumulate() argument
107 for (int b = 0; b < n_batch; b++) { in NeonMatrixBatchVectorMultiplyAccumulate()
150 const int8_t* ShuffleVectors(const int8_t* vectors, const int n_batch, in ShuffleVectors() argument
155 kWeightsPerUint32, n_batch * m_cols, shuffled_vectors_free)); in ShuffleVectors()
157 for (int i = 0; i < n_batch; i += 4) { in ShuffleVectors()
198 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
203 ShuffleVectors(vectors, n_batch, m_cols, &shuffled_vectors_free); in DotprodMatrixBatchFourVectorMultiplyAccumulate()
206 for (int batch = 0; batch < n_batch; batch += 4) { in DotprodMatrixBatchFourVectorMultiplyAccumulate()
316 const float* scaling_factors, int n_batch, float* __restrict__ result, in DotprodSparseMatrixBatchVectorMultiplyAccumulate() argument
328 for (int batch = 0; batch < n_batch; batch++) { in DotprodSparseMatrixBatchVectorMultiplyAccumulate()
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/
Dtensor_utils.h55 int n_batch, float* result,
71 const float* vector, int n_batch, float* result, int result_stride);
83 int n_batch, float* __restrict__ result, int result_stride);
99 const float* scaling_factors, int n_batch, float* __restrict__ result,
133 int n_batch, float* result,
138 const float* batch_vector, int n_batch,
146 int n_batch, float* result);
149 void VectorBatchVectorAdd(const float* vector, int v_size, int n_batch,
153 void VectorBatchVectorAssign(const float* vector, int v_size, int n_batch,
195 int v_size, int n_batch,
/external/deqp/external/openglcts/modules/gl/
Dgl4cVertexAttrib64BitTest.cpp3554 for (unsigned int n_batch = 0; n_batch < m_n_batches; ++n_batch) in initBuffers() local
3556 unsigned char* data1_ptr = m_bo_1_data + n_batch * m_bo_1_batch_size + m_bo_1_offset_double; in initBuffers()
3559 u.d = (double)(2 * n_batch); in initBuffers()
3561 u.d = (double)(2 * n_batch + 1); in initBuffers()
3883 for (unsigned int n_batch = 0; n_batch < m_n_batches; ++n_batch) in verifyXFBData() local
3885 unsigned int in_index = n_batch; in verifyXFBData()
3886 const unsigned int xfb_index = is_indiced ? m_bo_index_data[n_batch] : n_batch; in verifyXFBData()
3904 << n_batch << "]" in verifyXFBData()
/external/tensorflow/tensorflow/lite/delegates/nnapi/
Dnnapi_delegate_test.cc2259 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
2264 : n_batch_(n_batch), in LSTMOpModel()
2568 const int n_batch = 1; in TEST_F() local
2574 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST_F()
2580 {n_batch, n_input}, // input tensor in TEST_F()
2672 const int n_batch = 1; in TEST_F() local
2678 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST_F()
2684 {n_batch, n_input}, // input tensor in TEST_F()
3328 const int n_batch = 2; in TEST_F() local
3333 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST_F()
[all …]