Home
last modified time | relevance | path

Searched refs:TENSOR_QUANT8_ASYMM (Results 1 – 25 of 171) sorted by relevance

1234567

/frameworks/ml/nn/common/
DUtils.cpp403 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validateOperation()
404 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, in validateOperation()
405 OperandType::TENSOR_QUANT8_ASYMM, in validateOperation()
407 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; in validateOperation()
432 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validateOperation()
433 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, in validateOperation()
434 OperandType::TENSOR_QUANT8_ASYMM, in validateOperation()
436 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; in validateOperation()
478 if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { in validateOperation()
479 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; in validateOperation()
[all …]
/frameworks/ml/nn/runtime/test/generated/models/
Dconcat_quant8_3.model.cpp4 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {400, 30}, 0.5f, 0); in CreateModel()
5 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {400, 60}, 0.5f, 0); in CreateModel()
6 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {400, 90}, 0.5f, 0); in CreateModel()
Dmul_broadcast_quant8.model.cpp4 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 1.0, 0); in CreateModel()
5 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0, 0); in CreateModel()
6 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 2.0, 0); in CreateModel()
Dconcat_quant8_2.model.cpp4 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {40, 300}, 0.5f, 0); in CreateModel()
5 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {52, 300}, 0.5f, 0); in CreateModel()
6 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {92, 300}, 0.5f, 0); in CreateModel()
Dhashtable_lookup_quant8.model.cpp5 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {3, 2}, 0.5f, 0); in CreateModel()
6 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.5f, 0); in CreateModel()
7 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {4}, 1.f, 0); in CreateModel()
Dfully_connected_quant8_weights_as_inputs.model.cpp5 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 1}, 0.5f, 0); in CreateModel()
6 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {3, 1}, 0.5f, 0); in CreateModel()
7 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3, 1}, 1.f, 0); in CreateModel()
Dfully_connected_quant8_2.model.cpp5 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.f, 127); in CreateModel()
6 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 10}, 0.5f, 127); in CreateModel()
7 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 1, 5, 1}, 0.5f, 127); in CreateModel()
Dfully_connected_quant8.model.cpp5 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 1}, 0.5f, 0); in CreateModel()
6 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {3, 1}, 0.5f, 0); in CreateModel()
7 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3, 1}, 1.f, 0); in CreateModel()
Dconv_quant8_overflow_weights_as_inputs.model.cpp5 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0); in CreateModel()
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0); in CreateModel()
7 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0); in CreateModel()
Dconv_quant8_channels_weights_as_inputs.model.cpp5 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 3}, 0.5f, 0); in CreateModel()
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 3}, 1.0, 0); in CreateModel()
7 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5f, 0); in CreateModel()
Dconv_quant8_weights_as_inputs.model.cpp5 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.5f, 0); in CreateModel()
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 1.f, 0); in CreateModel()
7 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 3, 3, 1}, 0.5f, 0); in CreateModel()
Dconv_quant8_large_weights_as_inputs.model.cpp5 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0); in CreateModel()
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0); in CreateModel()
7 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0); in CreateModel()
Dconv_quant8_channels.model.cpp5 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 3}, 0.5f, 0); in CreateModel()
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 3}, 1.0, 0); in CreateModel()
7 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5f, 0); in CreateModel()
Dconv_quant8.model.cpp5 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 0.5f, 0); in CreateModel()
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 1.f, 0); in CreateModel()
7 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 3, 3, 1}, 0.5f, 0); in CreateModel()
Dconv_quant8_large.model.cpp5 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0); in CreateModel()
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0); in CreateModel()
7 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0); in CreateModel()
Dconv_quant8_overflow.model.cpp5 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0); in CreateModel()
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0); in CreateModel()
7 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0); in CreateModel()
/frameworks/ml/nn/runtime/test/generated/vts_models/
Dconcat_quant8_1.model.cpp6 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
15 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
33 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
Dmul_broadcast_quant8.model.cpp6 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
15 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
33 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
Dconcat_quant8_2.model.cpp6 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
15 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
33 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
Dadd_quant8.model.cpp6 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
15 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
33 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
Dadd_broadcast_quant8.model.cpp6 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
15 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
33 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
Dconcat_quant8_3.model.cpp6 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
15 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
33 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
Dmul_quant8.model.cpp6 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
15 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
33 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
Dfully_connected_quant8_2.model.cpp6 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
15 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
33 .type = OperandType::TENSOR_QUANT8_ASYMM, in createTestModel()
/frameworks/ml/nn/common/operations/
DReshape.cpp64 } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) { in depthToSpaceGeneric()
88 } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) { in spaceToDepthGeneric()
122 } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) { in padGeneric()
148 } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) { in batchToSpaceGeneric()
176 } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) { in spaceToBatchGeneric()
221 } else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM) { in transposeGeneric()

1234567