/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | SVDF.cpp | 83 const uint32_t batch_size = SizeOfDimension(input, 0); in Prepare() local 99 stateShape->dimensions = {batch_size, memory_size * num_filters}; in Prepare() 105 outputShape->dimensions = {batch_size, num_units}; in Prepare() 170 const int batch_size = SizeOfDimension(input_, 0); in EvalFloat32() local 176 memcpy(outputStateData, inputStateData, sizeof(float) * batch_size * memory_size * num_filters); in EvalFloat32() 178 for (int b = 0; b < batch_size; b++) { in EvalFloat32() 187 float scratch[batch_size * num_filters]; in EvalFloat32() 188 std::fill_n(scratch, batch_size * num_filters, 0.0f); in EvalFloat32() 190 weightsFeatureData, num_filters, input_size, inputData, batch_size, scratch); in EvalFloat32() 194 for (int i = 0; i < batch_size * num_filters; ++i) { in EvalFloat32() [all …]
|
D | Multinomial.cpp | 78 const uint32_t batch_size = SizeOfDimension(input, 0); in Prepare() local 83 outputShape->dimensions = {batch_size, sample_count}; in Prepare() 112 const uint32_t batch_size = SizeOfDimension(input_, 0); in EvalFloat32() local 124 random_generator.ReserveRandomOutputs(batch_size * sample_count_aligned, 256); in EvalFloat32() 127 for (uint64_t b = 0; b < batch_size; ++b) { in EvalFloat32()
|
D | RNN.cpp | 61 const uint32_t batch_size = SizeOfDimension(input, 0); in Prepare() local 72 hiddenStateShape->dimensions = {batch_size, num_units}; in Prepare() 76 outputShape->dimensions = {batch_size, num_units}; in Prepare() 145 const uint32_t batch_size = inputShape.dimensions[0]; in RNNStep() local 160 for (uint32_t b = 0; b < batch_size; b++) { in RNNStep()
|
D | FullyConnected.cpp | 62 uint32_t batch_size = getSizeOfDimension(outputShape, 0); in fullyConnectedFloat32() local 64 if (batch_size * batch_size == input_n_elements) { in fullyConnectedFloat32()
|
D | MultinomialTest.cpp | 44 MultinomialOpModel(uint32_t batch_size, uint32_t class_size, uint32_t sample_size) in MultinomialOpModel() argument 45 : batch_size_(batch_size), class_size_(class_size), sample_size_(sample_size) { in MultinomialOpModel()
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/ |
D | qlstm_projection.mod.py | 22 batch_size = 2 variable 27 InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0) 58 OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0) 59 CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0) 134 output_state_in: [ 0 for _ in range(batch_size * output_size) ], 135 cell_state_in: [ 0 for _ in range(batch_size * num_units) ], 191 output_state_in: [ 0 for _ in range(batch_size * output_size) ], 192 cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
|
D | qlstm_noprojection.mod.py | 22 batch_size = 2 variable 27 InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0) 58 OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0) 59 CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0) 128 output_state_in: [ 0 for _ in range(batch_size * output_size) ], 129 cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
|
/packages/modules/NeuralNetworks/common/types/operations/src/ |
D | FullyConnected.cpp | 46 uint32_t batch_size = 0; in validateShapes() local 49 batch_size = input_n_elements / input_size; in validateShapes() 59 output->dimensions = {batch_size, num_units}; in validateShapes()
|
/packages/modules/RemoteKeyProvisioning/app/src/com/android/rkpdapp/provisioner/ |
D | Provisioner.java | 151 int batch_size = keysGenerated.size(); in batchProvision() local 152 if (batch_size < 1) { in batchProvision() 154 "Request at least 1 key to be signed. Num requested: " + batch_size); in batchProvision()
|
/packages/modules/Virtualization/virtualizationservice/src/ |
D | maintenance.rs | 51 batch_size: usize, field 134 Ok(Self { sk, vm_id_db, batch_size: DELETE_MAX_BATCH_SIZE }) in new() 193 let len = std::cmp::min(vm_ids.len(), self.batch_size); in delete_ids() 341 fn new_test_state(history: Arc<Mutex<Vec<SkOp>>>, batch_size: usize) -> State { in new_test_state() 345 let inner = InnerState { sk, vm_id_db, batch_size }; in new_test_state()
|
/packages/modules/HealthFitness/testapps/toolbox/src/com/android/healthconnect/testapps/toolbox/ui/ |
D | PerformanceTestingLinearLayout.kt | 57 return returnIntIfNotEmpty(findViewById<EditText>(R.id.batch_size).text.toString()) in getNumberOfRecordsPerBatch()
|
/packages/modules/NeuralNetworks/tools/api/ |
D | types.spec | 1162 * [batch_size, input_size], where "input_size" corresponds to the 1167 * Since %{NNAPILevel3}, zero batch_size is supported for this tensor. 1188 * * 0: The output tensor, of shape [batch_size, num_units]. %{BeforeNNAPILevel3For} 1701 * A 2-D tensor of shape [batch_size, input_size], where “batch_size” 1742 * A 2-D tensor of shape [batch_size, output_size]. 1744 * A 2-D tensor of shape [batch_size, num_units]. 1794 * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or 1795 * [batch_size, num_units * 4] without CIFG. 1797 * A 2-D tensor of shape [batch_size, output_size]. 1799 * A 2-D tensor of shape [batch_size, num_units]. [all …]
|