/packages/modules/NeuralNetworks/runtime/test/specs/V1_0/ |
D | svdf.mod.py | 17 batches = 2 variable 26 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 30 state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features)) 33 state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*feature… 34 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 60 state_in: [0 for _ in range(batches * memory_size * features)], 127 output0 = {state_out: [0 for _ in range(batches * memory_size * features)], 132 batch_start = i * input_size * batches 133 batch_end = batch_start + input_size * batches 135 golden_start = i * units * batches [all …]
|
D | svdf_bias_present.mod.py | 17 batches = 2 variable 26 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 30 state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features)) 33 state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*feature… 34 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 60 state_in: [0 for _ in range(batches * memory_size * features)], 127 output0 = {state_out: [0 for _ in range(batches * memory_size * features)], 132 batch_start = i * input_size * batches 133 batch_end = batch_start + input_size * batches 135 golden_start = i * units * batches [all …]
|
D | svdf2.mod.py | 17 batches = 2 variable 26 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 30 state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features)) 33 state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*feature… 34 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 75 state_in: [0 for _ in range(batches * memory_size * features)], 142 output0 = {state_out: [0 for _ in range(batches * memory_size * features)], 147 batch_start = i * input_size * batches 148 batch_end = batch_start + input_size * batches 150 golden_start = i * units * batches [all …]
|
D | rnn.mod.py | 17 batches = 2 variable 23 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 27 hidden_state_in = Input("hidden_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 31 hidden_state_out = IgnoredOutput("hidden_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units… 32 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 184 input_sequence_size = int(len(test_inputs) / input_size / batches) 193 input0[hidden_state_in] = [0 for x in range(batches * units)] 195 hidden_state_out: [0 for x in range(batches * units)],
|
D | rnn_state.mod.py | 17 batches = 2 variable 23 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 27 hidden_state_in = Input("hidden_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 31 hidden_state_out = IgnoredOutput("hidden_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units… 32 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
|
D | svdf_state.mod.py | 17 batches = 2 variable 24 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 28 state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*units)) 31 state_out = Output("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*units)) 32 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/ |
D | svdf_bias_present_float16.mod.py | 17 batches = 2 variable 26 input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size)) 30 state_in = Input("state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*features)) 33 state_out = IgnoredOutput("state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*feature… 34 output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units)) 60 state_in: [0 for _ in range(batches * memory_size * features)], 127 output0 = {state_out: [0 for _ in range(batches * memory_size * features)], 132 batch_start = i * input_size * batches 133 batch_end = batch_start + input_size * batches 135 golden_start = i * units * batches [all …]
|
D | svdf_float16.mod.py | 17 batches = 2 variable 26 input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size)) 30 state_in = Input("state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*features)) 33 state_out = IgnoredOutput("state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*feature… 34 output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units)) 60 state_in: [0 for _ in range(batches * memory_size * features)], 127 output0 = {state_out: [0 for _ in range(batches * memory_size * features)], 132 batch_start = i * input_size * batches 133 batch_end = batch_start + input_size * batches 135 golden_start = i * units * batches [all …]
|
D | rnn_float16.mod.py | 17 batches = 2 variable 23 input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size)) 27 hidden_state_in = Input("hidden_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units)) 31 hidden_state_out = IgnoredOutput("hidden_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units… 32 output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units)) 184 input_sequence_size = int(len(test_inputs) / input_size / batches) 193 input0[hidden_state_in] = [0 for x in range(batches * units)] 195 hidden_state_out: [0 for x in range(batches * units)],
|
D | svdf_state_float16.mod.py | 17 batches = 2 variable 24 input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size)) 28 state_in = Input("state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*units)) 31 state_out = Output("state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*units)) 32 output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units))
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_1/ |
D | svdf2_relaxed.mod.py | 17 batches = 2 variable 26 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 30 state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features)) 33 state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*feature… 34 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 76 state_in: [0 for _ in range(batches * memory_size * features)], 143 output0 = {state_out: [0 for _ in range(batches * memory_size * features)], 148 batch_start = i * input_size * batches 149 batch_end = batch_start + input_size * batches 151 golden_start = i * units * batches [all …]
|
D | svdf_relaxed.mod.py | 17 batches = 2 variable 26 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 30 state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features)) 33 state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*feature… 34 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 61 state_in: [0 for _ in range(batches * memory_size * features)], 128 output0 = {state_out: [0 for _ in range(batches * memory_size * features)], 133 batch_start = i * input_size * batches 134 batch_end = batch_start + input_size * batches 136 golden_start = i * units * batches [all …]
|
D | svdf_bias_present_relaxed.mod.py | 17 batches = 2 variable 26 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 30 state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*features)) 33 state_out = IgnoredOutput("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*feature… 34 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 61 state_in: [0 for _ in range(batches * memory_size * features)], 128 output0 = {state_out: [0 for _ in range(batches * memory_size * features)], 133 batch_start = i * input_size * batches 134 batch_end = batch_start + input_size * batches 136 golden_start = i * units * batches [all …]
|
D | rnn_relaxed.mod.py | 17 batches = 2 variable 23 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 27 hidden_state_in = Input("hidden_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 31 hidden_state_out = IgnoredOutput("hidden_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units… 32 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 185 input_sequence_size = int(len(test_inputs) / input_size / batches) 194 input0[hidden_state_in] = [0 for x in range(batches * units)] 196 hidden_state_out: [0 for x in range(batches * units)],
|
D | rnn_state_relaxed.mod.py | 17 batches = 2 variable 23 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 27 hidden_state_in = Input("hidden_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units)) 31 hidden_state_out = IgnoredOutput("hidden_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units… 32 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
|
D | svdf_state_relaxed.mod.py | 17 batches = 2 variable 24 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size)) 28 state_in = Input("state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*units)) 31 state_out = Output("state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, memory_size*units)) 32 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
|
/packages/modules/NeuralNetworks/common/ |
D | OperationsExecutionUtils.cpp | 333 uint32_t batches = getSizeOfDimension(input, 0); in depthToSpacePrepare() local 340 output->dimensions = {batches, height * blockSize, width * blockSize, in depthToSpacePrepare() 352 uint32_t batches = getSizeOfDimension(input, 0); in spaceToDepthPrepare() local 361 output->dimensions = {batches, height / blockSize, width / blockSize, in spaceToDepthPrepare() 447 uint32_t batches = getSizeOfDimension(input, 0); in batchToSpacePrepare() local 452 NN_OPS_CHECK(batches % (blockSizeData[0] * blockSizeData[1]) == 0); in batchToSpacePrepare() 454 output->dimensions = {batches / (blockSizeData[0] * blockSizeData[1]), in batchToSpacePrepare() 480 uint32_t batches = getSizeOfDimension(input, 0); in spaceToBatchPrepare() local 492 output->dimensions = {batches * (blockSizeData[0] * blockSizeData[1]), in spaceToBatchPrepare() 646 uint32_t batches = getSizeOfDimension(input, 0); in groupedConvPrepare() local [all …]
|
/packages/modules/Wifi/service/java/com/android/server/wifi/ |
D | ApplicationQosPolicyRequestHandler.java | 344 List<List<Integer>> batches = divideRequestIntoBatches(ownedPolicies); in queueRemoveAllRequest() 345 for (List<Integer> batch : batches) { in queueRemoveAllRequest() 375 List<List<QosPolicyParams>> batches = new ArrayList<>(); in queueAllPoliciesOnIface() 377 batches.addAll(divideRequestIntoBatches(policiesWithoutQosChars)); in queueAllPoliciesOnIface() 380 batches.addAll(divideRequestIntoBatches(policiesWithQosChars)); in queueAllPoliciesOnIface() 384 for (List<QosPolicyParams> batch : batches) { in queueAllPoliciesOnIface() 489 List<List<T>> batches = new ArrayList<>(); in divideRequestIntoBatches() local 493 batches.add(request.subList(startIndex, endIndex)); in divideRequestIntoBatches() 497 return batches; in divideRequestIntoBatches()
|
/packages/modules/AdServices/adservices/libraries/cobalt/java/com/android/cobalt/observations/ |
D | NonPrivateObservationGenerator.java | 87 ImmutableList.Builder<UnencryptedObservationBatch> batches = ImmutableList.builder(); in generateObservations() local 117 batches.add(batch.build()); in generateObservations() 119 return batches.build(); in generateObservations()
|
D | PrivateObservationGenerator.java | 111 ImmutableList.Builder<UnencryptedObservationBatch> batches = ImmutableList.builder(); in generateObservations() local 113 batches.add( in generateObservations() 117 return batches.build(); in generateObservations()
|
/packages/modules/AdServices/adservices/libraries/cobalt/java/com/android/cobalt/data/ |
D | DataService.java | 475 ImmutableList<UnencryptedObservationBatch> batches = in generateObservationsSync() local 478 for (UnencryptedObservationBatch batch : batches) { in generateObservationsSync() 484 numObservations, batches.size(), dayIndex, reportKey); in generateObservationsSync() 485 generatedObservations.addAll(batches); in generateObservationsSync()
|
/packages/apps/Dialer/java/com/android/dialer/calllog/database/ |
D | MutationApplier.java | 104 Iterable<List<Long>> batches = Iterables.partition(mutations.getDeletes(), 999); in applyToDatabaseInternal() local 105 for (List<Long> idsInBatch : batches) { in applyToDatabaseInternal()
|
/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | ResizeImageOps.cpp | 180 uint32_t batches = getSizeOfDimension(input, 0); in prepare() local 213 output.dimensions = {batches, channels, (uint32_t)height, (uint32_t)width}; in prepare() 215 output.dimensions = {batches, (uint32_t)height, (uint32_t)width, channels}; in prepare()
|
D | RNNTest.cpp | 134 BasicRNNOpModel(uint32_t batches, uint32_t units, uint32_t size) in BasicRNNOpModel() argument 135 : batches_(batches), units_(units), input_size_(size), activation_(kActivationRelu) { in BasicRNNOpModel()
|
D | Pooling.cpp | 305 uint32_t batches = getSizeOfDimension(input, 0); in prepare() local 320 output.dimensions = {batches, channels, outHeight, outWidth}; in prepare() 322 output.dimensions = {batches, outHeight, outWidth, channels}; in prepare()
|