Home
last modified time | relevance | path

Searched refs:batch_size (Results 1 – 12 of 12) sorted by relevance

/packages/modules/NeuralNetworks/common/cpu_operations/
DSVDF.cpp83 const uint32_t batch_size = SizeOfDimension(input, 0); in Prepare() local
99 stateShape->dimensions = {batch_size, memory_size * num_filters}; in Prepare()
105 outputShape->dimensions = {batch_size, num_units}; in Prepare()
170 const int batch_size = SizeOfDimension(input_, 0); in EvalFloat32() local
176 memcpy(outputStateData, inputStateData, sizeof(float) * batch_size * memory_size * num_filters); in EvalFloat32()
178 for (int b = 0; b < batch_size; b++) { in EvalFloat32()
187 float scratch[batch_size * num_filters]; in EvalFloat32()
188 std::fill_n(scratch, batch_size * num_filters, 0.0f); in EvalFloat32()
190 weightsFeatureData, num_filters, input_size, inputData, batch_size, scratch); in EvalFloat32()
194 for (int i = 0; i < batch_size * num_filters; ++i) { in EvalFloat32()
[all …]
DMultinomial.cpp78 const uint32_t batch_size = SizeOfDimension(input, 0); in Prepare() local
83 outputShape->dimensions = {batch_size, sample_count}; in Prepare()
112 const uint32_t batch_size = SizeOfDimension(input_, 0); in EvalFloat32() local
124 random_generator.ReserveRandomOutputs(batch_size * sample_count_aligned, 256); in EvalFloat32()
127 for (uint64_t b = 0; b < batch_size; ++b) { in EvalFloat32()
DRNN.cpp61 const uint32_t batch_size = SizeOfDimension(input, 0); in Prepare() local
72 hiddenStateShape->dimensions = {batch_size, num_units}; in Prepare()
76 outputShape->dimensions = {batch_size, num_units}; in Prepare()
145 const uint32_t batch_size = inputShape.dimensions[0]; in RNNStep() local
160 for (uint32_t b = 0; b < batch_size; b++) { in RNNStep()
DFullyConnected.cpp62 uint32_t batch_size = getSizeOfDimension(outputShape, 0); in fullyConnectedFloat32() local
64 if (batch_size * batch_size == input_n_elements) { in fullyConnectedFloat32()
DMultinomialTest.cpp44 MultinomialOpModel(uint32_t batch_size, uint32_t class_size, uint32_t sample_size) in MultinomialOpModel() argument
45 : batch_size_(batch_size), class_size_(class_size), sample_size_(sample_size) { in MultinomialOpModel()
/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dqlstm_projection.mod.py22 batch_size = 2 variable
27 InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0)
58 OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0)
59 CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0)
134 output_state_in: [ 0 for _ in range(batch_size * output_size) ],
135 cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
191 output_state_in: [ 0 for _ in range(batch_size * output_size) ],
192 cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
Dqlstm_noprojection.mod.py22 batch_size = 2 variable
27 InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0)
58 OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0)
59 CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0)
128 output_state_in: [ 0 for _ in range(batch_size * output_size) ],
129 cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
/packages/modules/NeuralNetworks/common/types/operations/src/
DFullyConnected.cpp46 uint32_t batch_size = 0; in validateShapes() local
49 batch_size = input_n_elements / input_size; in validateShapes()
59 output->dimensions = {batch_size, num_units}; in validateShapes()
/packages/modules/RemoteKeyProvisioning/app/src/com/android/rkpdapp/provisioner/
DProvisioner.java151 int batch_size = keysGenerated.size(); in batchProvision() local
152 if (batch_size < 1) { in batchProvision()
154 "Request at least 1 key to be signed. Num requested: " + batch_size); in batchProvision()
/packages/modules/Virtualization/virtualizationservice/src/
Dmaintenance.rs51 batch_size: usize, field
134 Ok(Self { sk, vm_id_db, batch_size: DELETE_MAX_BATCH_SIZE }) in new()
193 let len = std::cmp::min(vm_ids.len(), self.batch_size); in delete_ids()
341 fn new_test_state(history: Arc<Mutex<Vec<SkOp>>>, batch_size: usize) -> State { in new_test_state()
345 let inner = InnerState { sk, vm_id_db, batch_size }; in new_test_state()
/packages/modules/HealthFitness/testapps/toolbox/src/com/android/healthconnect/testapps/toolbox/ui/
DPerformanceTestingLinearLayout.kt57 return returnIntIfNotEmpty(findViewById<EditText>(R.id.batch_size).text.toString()) in getNumberOfRecordsPerBatch()
/packages/modules/NeuralNetworks/tools/api/
Dtypes.spec1162 * [batch_size, input_size], where "input_size" corresponds to the
1167 * Since %{NNAPILevel3}, zero batch_size is supported for this tensor.
1188 * * 0: The output tensor, of shape [batch_size, num_units]. %{BeforeNNAPILevel3For}
1701 * A 2-D tensor of shape [batch_size, input_size], where “batch_size
1742 * A 2-D tensor of shape [batch_size, output_size].
1744 * A 2-D tensor of shape [batch_size, num_units].
1794 * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
1795 * [batch_size, num_units * 4] without CIFG.
1797 * A 2-D tensor of shape [batch_size, output_size].
1799 * A 2-D tensor of shape [batch_size, num_units].
[all …]