/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/ |
D | fully_connected_quant8_signed.mod.py | 19 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 10}, 0.5f, -1", variable 26 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0) 42 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128", [-118, -108, -108, -1… variable 46 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 61 weights = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128") # num_units = 1, input_si… variable 65 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 70 weights: 84 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 0.5f, -128", [-126]) variable 88 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 103 weights = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 0.5f, -128") variable [all …]
|
D | unidirectional_sequence_rnn.mod.py | 19 def test(name, input, weights, recurrent_weights, bias, hidden_state, argument 25 model = Model().Operation("UNIDIRECTIONAL_SEQUENCE_RNN", input, weights, 31 weights: weights_data, 181 weights=Input("weights", "TENSOR_FLOAT32", 207 weights=Input("weights", "TENSOR_FLOAT32",
|
/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | QuantizedLSTM.cpp | 212 uint8_t* weights) { in assignWeightsSubmatrix() argument 218 weights[(row + offset_row) * weightsDims[1] + column + offset_column] = submatrixValues[i]; in assignWeightsSubmatrix() 270 auto checkWeightsShape = [&](const RunTimeOperandInfo* weights, uint32_t columns) -> bool { in prepare() argument 271 NN_RET_CHECK_EQ(NumDimensions(weights), 2u); in prepare() 272 NN_RET_CHECK_EQ(SizeOfDimension(weights, 0), outputSize); in prepare() 273 NN_RET_CHECK_EQ(SizeOfDimension(weights, 1), columns); in prepare() 274 NN_RET_CHECK_EQ(weights->scale, weightsScale); in prepare() 275 NN_RET_CHECK_EQ(weights->zeroPoint, weightsZeroPoint); in prepare() 354 uint8_t* weights) { in concatenateWeights() argument 357 assignWeightsSubmatrix(inputToInputWeights_, 0 * outputSize, outputSize, weightsDims, weights); in concatenateWeights() [all …]
|
D | UnidirectionalSequenceRNN.cpp | 56 const T* weights = context->getInputBuffer<T>(kWeightsTensor); in executeTyped() local 96 RNN::RNNStep<T>(input, fixedTimeInputShape, hiddenState, bias, weights, weightsShape, in executeTyped() 120 Shape weights = context->getInputShape(kWeightsTensor); in prepare() local 131 const uint32_t numUnits = getSizeOfDimension(weights, 0); in prepare() 135 NN_RET_CHECK_EQ(getNumberOfDimensions(weights), 2u); in prepare() 140 NN_RET_CHECK_EQ(inputSize, getSizeOfDimension(weights, 1)); in prepare()
|
/packages/modules/NeuralNetworks/common/types/operations/src/ |
D | FullyConnected.cpp | 26 bool validateShapes(const Shape& input, const Shape& weights, const Shape& bias, Shape* output) { in validateShapes() argument 29 NN_RET_CHECK(weights.type == input.type); in validateShapes() 40 NN_RET_CHECK_EQ(getNumberOfDimensions(weights), 2u); in validateShapes() 43 uint32_t num_units = getSizeOfDimension(weights, 0u); in validateShapes() 44 uint32_t input_size = getSizeOfDimension(weights, 1u); in validateShapes() 124 Shape weights = context->getInputShape(kWeightsTensor); in validate() local 126 if (hasKnownRank(input) && hasKnownRank(weights) && hasKnownRank(bias)) { in validate() 127 NN_RET_CHECK(validateShapes(input, weights, bias)); in validate()
|
/packages/modules/NeuralNetworks/runtime/test/ |
D | TestMemory.cpp | 61 WrapperMemory weights(offsetForMatrix3 + sizeof(matrix3), PROT_READ, fd, 0); in TEST_F() local 62 ASSERT_TRUE(weights.isValid()); in TEST_F() 75 model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4)); in TEST_F() 76 model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4)); in TEST_F() 124 WrapperMemory weights(buffer); in TEST_F() local 125 ASSERT_TRUE(weights.isValid()); in TEST_F() 138 model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4)); in TEST_F() 139 model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4)); in TEST_F()
|
/packages/modules/Wifi/framework/java/android/net/wifi/ |
D | WifiNetworkSelectionConfig.java | 191 private static boolean isValidFrequencyWeightArray(SparseArray<Integer> weights) { in isValidFrequencyWeightArray() argument 192 if (weights == null) return false; in isValidFrequencyWeightArray() 194 for (int i = 0; i < weights.size(); i++) { in isValidFrequencyWeightArray() 195 int value = weights.valueAt(i); in isValidFrequencyWeightArray() 479 public @NonNull Builder setFrequencyWeights(@NonNull SparseArray<Integer> weights) in setFrequencyWeights() argument 481 if (!isValidFrequencyWeightArray(weights)) { in setFrequencyWeights() 482 if (weights == null) { in setFrequencyWeights() 486 + weights.toString()); in setFrequencyWeights() 488 mWifiNetworkSelectionConfig.mFrequencyWeights = weights; in setFrequencyWeights()
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/ |
D | unidirectional_sequence_rnn.mod.py | 19 def test(name, input, weights, recurrent_weights, bias, hidden_state, argument 24 model = Model().Operation("UNIDIRECTIONAL_SEQUENCE_RNN", input, weights, 29 weights: weights_data, 143 weights=Input("weights", "TENSOR_FLOAT32", "{{{}, {}}}".format( 165 weights=Input("weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(
|
D | rnn_float16.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT16", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 38 weights: [
|
D | fully_connected_v1_2.mod.py | 20 weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 1}", [2]) variable 24 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights: ("TENSOR_QUANT8_ASYMM", 0.5, 120),
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_0/ |
D | fully_connected_float_weights_as_inputs.mod.py | 19 weights = Input("op2", "TENSOR_FLOAT32", "{1, 1}") variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights: [2],
|
D | fully_connected_quant8_weights_as_inputs.mod.py | 19 weights = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 1}, 0.5f, 0") variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights: [2],
|
D | fully_connected_float_large_weights_as_inputs.mod.py | 19 weights = Input("op2", "TENSOR_FLOAT32", "{1, 5}") # num_units = 1, input_size = 5 variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights:
|
D | fully_connected_quant8_large_weights_as_inputs.mod.py | 19 weights = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0") # num_units = 1, input_size = 5 variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 28 weights:
|
D | rnn_state.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 38 weights: [
|
D | rnn.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 38 weights: [
|
D | fully_connected_float_large.mod.py | 19 weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 5}", [2, 3, 4, 5, 6]) # num_units = 1, input_size… variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
|
D | fully_connected_quant8.mod.py | 19 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{1, 1}, 0.5f, 0", [2]) variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
|
D | fully_connected_quant8_large.mod.py | 19 weights = Parameter("op2", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0", [10, 20, 20, 20, 10]) # num_uni… variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
|
D | fully_connected_float_2.mod.py | 19 weights = Parameter("op2", "TENSOR_FLOAT32", "{16, 8}", variable 48 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_1/ |
D | fully_connected_float_large_weights_as_inputs_relaxed.mod.py | 19 weights = Input("op2", "TENSOR_FLOAT32", "{1, 5}") # num_units = 1, input_size = 5 variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 29 weights:
|
D | fully_connected_float_weights_as_inputs_relaxed.mod.py | 19 weights = Input("op2", "TENSOR_FLOAT32", "{1, 1}") variable 23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) 29 weights: [2],
|
D | rnn_state_relaxed.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 39 weights: [
|
D | rnn_relaxed.mod.py | 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable 34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in, 39 weights: [
|
/packages/modules/Wifi/framework/tests/src/android/net/wifi/ |
D | WifiNetworkSelectionConfigTest.java | 55 SparseArray<Integer> weights = new SparseArray<>(); in testWifiNetworkSelectionConfigParcel() local 56 weights.put(2450, WifiNetworkSelectionConfig.FREQUENCY_WEIGHT_HIGH); in testWifiNetworkSelectionConfigParcel() 57 weights.put(5450, WifiNetworkSelectionConfig.FREQUENCY_WEIGHT_LOW); in testWifiNetworkSelectionConfigParcel() 68 .setFrequencyWeights(weights) in testWifiNetworkSelectionConfigParcel() 91 assertTrue(weights.contentEquals(parcelConfig.getFrequencyWeights())); in testWifiNetworkSelectionConfigParcel()
|