1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "CommonUtils.h"
18
19 #include <android-base/logging.h>
20 #include <nnapi/Result.h>
21 #include <nnapi/SharedMemory.h>
22 #include <nnapi/TypeUtils.h>
23 #include <nnapi/Types.h>
24 #include <nnapi/Validation.h>
25
26 #include <algorithm>
27 #include <any>
28 #include <functional>
29 #include <optional>
30 #include <variant>
31 #include <vector>
32
33 namespace android::hardware::neuralnetworks::utils {
34
makeQuantized8PerformanceConsistentWithP(const nn::Capabilities::PerformanceInfo & float32Performance,const nn::Capabilities::PerformanceInfo & quantized8Performance)35 nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWithP(
36 const nn::Capabilities::PerformanceInfo& float32Performance,
37 const nn::Capabilities::PerformanceInfo& quantized8Performance) {
38 // In Android P, most data types are treated as having the same performance as
39 // TENSOR_QUANT8_ASYMM. This collection must be in sorted order.
40 std::vector<nn::Capabilities::OperandPerformance> operandPerformances = {
41 {.type = nn::OperandType::FLOAT32, .info = float32Performance},
42 {.type = nn::OperandType::INT32, .info = quantized8Performance},
43 {.type = nn::OperandType::UINT32, .info = quantized8Performance},
44 {.type = nn::OperandType::TENSOR_FLOAT32, .info = float32Performance},
45 {.type = nn::OperandType::TENSOR_INT32, .info = quantized8Performance},
46 {.type = nn::OperandType::TENSOR_QUANT8_ASYMM, .info = quantized8Performance},
47 {.type = nn::OperandType::OEM, .info = quantized8Performance},
48 {.type = nn::OperandType::TENSOR_OEM_BYTE, .info = quantized8Performance},
49 };
50 return nn::Capabilities::OperandPerformanceTable::create(std::move(operandPerformances))
51 .value();
52 }
53
54 } // namespace android::hardware::neuralnetworks::utils
55