1 /*
2  * Copyright 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16  */
17 
18 #define LOG_TAG "ModelAssetTest"
19 
20 #include <NeuralNetworks.h>
21 #include <android-base/logging.h>
22 #include <android-base/scopeguard.h>
23 #include <android-base/unique_fd.h>
24 #include <android/asset_manager_jni.h>
25 #include <android/sharedmem.h>
26 #include <fcntl.h>
27 #include <jni.h>
28 #include <sys/mman.h>
29 #include <unistd.h>
30 
31 #include <map>
32 #include <memory>
33 #include <string>
34 #include <vector>
35 
36 #include "Utils.h"
37 
38 namespace {
39 
40 using namespace android::nn::cts;
41 
42 // A map from data type to element size in bytes.
43 // The keys only include the primary data types that will be used to construct a model
44 // in createAndCompileAddModel. The data types are selected in a way that a driver is likely to
45 // support at least one of the data types.
46 const std::map<int, uint32_t> kDataTypeToElementSizeMap = {
47         {ANEURALNETWORKS_TENSOR_FLOAT16, 2},
48         {ANEURALNETWORKS_TENSOR_FLOAT32, 4},
49         {ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, 1},
50         {ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, 1},
51 };
52 
isQuantizedType(int dataType)53 bool isQuantizedType(int dataType) {
54     return dataType == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
55             dataType == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
56 }
57 
getNnapiDeviceNames(JNIEnv * env)58 std::vector<const char*> getNnapiDeviceNames(JNIEnv* env) {
59     // Get the number of available NNAPI devices
60     uint32_t numDevices = 0;
61     ASSERT_OR_RETURN_DEFAULT(ANeuralNetworks_getDeviceCount(&numDevices) ==
62                              ANEURALNETWORKS_NO_ERROR);
63 
64     std::vector<const char*> deviceNames(numDevices, nullptr);
65     for (uint32_t i = 0; i < numDevices; i++) {
66         // Get device
67         ANeuralNetworksDevice* currentDevice;
68         ASSERT_OR_RETURN_DEFAULT(ANeuralNetworks_getDevice(/*devIndex=*/i, &currentDevice) ==
69                                  ANEURALNETWORKS_NO_ERROR);
70 
71         // Get device name
72         const char* deviceName = nullptr;
73         ASSERT_OR_RETURN_DEFAULT(ANeuralNetworksDevice_getName(currentDevice, &deviceName) ==
74                                  ANEURALNETWORKS_NO_ERROR);
75         deviceNames[i] = deviceName;
76     }
77     return deviceNames;
78 }
79 
getDeviceFeatureLevel(JNIEnv * env,const ANeuralNetworksDevice * device)80 int64_t getDeviceFeatureLevel(JNIEnv* env, const ANeuralNetworksDevice* device) {
81     int64_t featureLevel;
82     ASSERT_OR_RETURN_ZERO(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel) ==
83                           ANEURALNETWORKS_NO_ERROR);
84     return featureLevel;
85 }
86 
findDevice(JNIEnv * env,const std::string & name)87 const ANeuralNetworksDevice* findDevice(JNIEnv* env, const std::string& name) {
88     // Get the number of available NNAPI devices
89     uint32_t numDevices = 0;
90     ASSERT_OR_RETURN_NULL(ANeuralNetworks_getDeviceCount(&numDevices) == ANEURALNETWORKS_NO_ERROR);
91 
92     for (uint32_t i = 0; i < numDevices; i++) {
93         // Get device
94         ANeuralNetworksDevice* currentDevice;
95         ASSERT_OR_RETURN_NULL(ANeuralNetworks_getDevice(/*devIndex=*/i, &currentDevice) ==
96                               ANEURALNETWORKS_NO_ERROR);
97 
98         // Get device name
99         const char* deviceName = nullptr;
100         ASSERT_OR_RETURN_NULL(ANeuralNetworksDevice_getName(currentDevice, &deviceName) ==
101                               ANEURALNETWORKS_NO_ERROR);
102 
103         // Return if name matches
104         if (name == deviceName) {
105             return currentDevice;
106         }
107     }
108     fail(env, "No device found with name %s", name.c_str());
109     return nullptr;
110 }
111 
112 // Create a NNAPI memory directly from the asset file
createMemoryFromAsset(JNIEnv * env,AAsset * asset)113 ANeuralNetworksMemory* createMemoryFromAsset(JNIEnv* env, AAsset* asset) {
114     // Open the asset file as a file descriptor
115     off_t offset, length;
116     android::base::unique_fd assetFd(AAsset_openFileDescriptor(asset, &offset, &length));
117     ASSERT_OR_RETURN_NULL(assetFd.ok());
118 
119     // Create NNAPI memory from the asset file
120     ANeuralNetworksMemory* memory;
121     ASSERT_OR_RETURN_NULL(ANeuralNetworksMemory_createFromFd(length, PROT_READ, assetFd.get(),
122                                                              offset,
123                                                              &memory) == ANEURALNETWORKS_NO_ERROR);
124     return memory;
125 }
126 
127 // Copy the content of the asset file to an ashmem, and create a NNAPI memory from the ashmem
createMemoryFromAshmem(JNIEnv * env,AAsset * asset)128 ANeuralNetworksMemory* createMemoryFromAshmem(JNIEnv* env, AAsset* asset) {
129     // Create an ashmem
130     off_t length = AAsset_getLength(asset);
131     android::base::unique_fd ashmemFd(ASharedMemory_create("model_data", length));
132     ASSERT_OR_RETURN_NULL(ashmemFd.ok());
133 
134     // Copy the content of the asset file to the ashmem
135     void* ashmemData =
136             mmap(nullptr, length, PROT_READ | PROT_WRITE, MAP_SHARED, ashmemFd.get(), /*offset=*/0);
137     ASSERT_OR_RETURN_NULL(ashmemData != nullptr);
138     int bytesRead = AAsset_read(asset, ashmemData, length);
139     munmap(ashmemData, length);
140     ASSERT_OR_RETURN_NULL(bytesRead == length);
141 
142     // Create NNAPI memory from the ashmem
143     ANeuralNetworksMemory* memory;
144     ASSERT_OR_RETURN_NULL(ANeuralNetworksMemory_createFromFd(length, PROT_READ, ashmemFd.get(),
145                                                              /*offset=*/0,
146                                                              &memory) == ANEURALNETWORKS_NO_ERROR);
147     return memory;
148 }
149 
150 // Create and compile a model with a single ADD operation. We choose the ADD operation because
151 // it is commonly supported on most of the devices.
152 // The input dataType must be one of the keys in kDataTypeToElementSizeMap.
153 // The value of the constant tensor will be loaded from the given memory with offset = 0.
154 // This method returns whether the compilation is successful or not.
createAndCompileAddModel(JNIEnv * env,int dataType,const ANeuralNetworksMemory * memory,const ANeuralNetworksDevice * device)155 bool createAndCompileAddModel(JNIEnv* env, int dataType, const ANeuralNetworksMemory* memory,
156                               const ANeuralNetworksDevice* device) {
157     ASSERT_OR_RETURN_FALSE(kDataTypeToElementSizeMap.count(dataType) > 0);
158     const uint32_t tensorDims[] = {1, 256, 256, 4};
159     const uint32_t tensorSize = 1 * 256 * 256 * 4 * kDataTypeToElementSizeMap.at(dataType);
160     const uint32_t activation = 0;
161 
162     // Create model
163     ANeuralNetworksModel* model = nullptr;
164     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_create(&model) == ANEURALNETWORKS_NO_ERROR);
165     auto modelGuard =
166             android::base::make_scope_guard([model]() { ANeuralNetworksModel_free(model); });
167 
168     // Operand type for tensors
169     ANeuralNetworksOperandType tensorType;
170     tensorType.type = dataType;
171     tensorType.scale = isQuantizedType(dataType) ? 1.0f : 0.0f;
172     tensorType.zeroPoint = 0;
173     tensorType.dimensionCount = 4;
174     tensorType.dimensions = tensorDims;
175 
176     // Operand type for activation
177     ANeuralNetworksOperandType activationType;
178     activationType.type = ANEURALNETWORKS_INT32;
179     activationType.scale = 0.0f;
180     activationType.zeroPoint = 0;
181     activationType.dimensionCount = 0;
182     activationType.dimensions = nullptr;
183 
184     // Operands
185     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_addOperand(model, &tensorType) ==
186                            ANEURALNETWORKS_NO_ERROR);
187     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_addOperand(model, &tensorType) ==
188                            ANEURALNETWORKS_NO_ERROR);
189     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_addOperand(model, &activationType) ==
190                            ANEURALNETWORKS_NO_ERROR);
191     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_addOperand(model, &tensorType) ==
192                            ANEURALNETWORKS_NO_ERROR);
193 
194     // Constant values
195     ASSERT_OR_RETURN_FALSE(
196             ANeuralNetworksModel_setOperandValueFromMemory(model, /*index=*/1, memory, /*offset=*/0,
197                                                            tensorSize) == ANEURALNETWORKS_NO_ERROR);
198     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_setOperandValue(model, /*index=*/2, &activation,
199                                                                 sizeof(int32_t)) ==
200                            ANEURALNETWORKS_NO_ERROR);
201 
202     // ADD operation
203     uint32_t operation0InputIndexes[] = {0, 1, 2};
204     uint32_t operation0OutputIndexes[] = {3};
205     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_ADD, 3,
206                                                              operation0InputIndexes, 1,
207                                                              operation0OutputIndexes) ==
208                            ANEURALNETWORKS_NO_ERROR);
209 
210     // Model inputs and outputs
211     uint32_t modelInputIndexes[] = {0};
212     uint32_t modelOutputIndexes[] = {3};
213     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_identifyInputsAndOutputs(model, 1,
214                                                                          modelInputIndexes, 1,
215                                                                          modelOutputIndexes) ==
216                            ANEURALNETWORKS_NO_ERROR);
217 
218     // Finish the model
219     ASSERT_OR_RETURN_FALSE(
220             ANeuralNetworksModel_relaxComputationFloat32toFloat16(model, /*allow=*/true) ==
221             ANEURALNETWORKS_NO_ERROR);
222     ASSERT_OR_RETURN_FALSE(ANeuralNetworksModel_finish(model) == ANEURALNETWORKS_NO_ERROR);
223 
224     // Create compilation
225     ANeuralNetworksCompilation* compilation;
226     ASSERT_OR_RETURN_FALSE(
227             ANeuralNetworksCompilation_createForDevices(model, &device, /*numDevices=*/1,
228                                                         &compilation) == ANEURALNETWORKS_NO_ERROR);
229     auto compilationGuard = android::base::make_scope_guard(
230             [compilation]() { ANeuralNetworksCompilation_free(compilation); });
231 
232     // Compile
233     return ANeuralNetworksCompilation_finish(compilation) == ANEURALNETWORKS_NO_ERROR;
234 }
235 
236 } // namespace
237 
238 extern "C" JNIEXPORT jobjectArray JNICALL
Java_android_neuralnetworks_cts_ModelAssetTest_getNnapiDevices(JNIEnv * env,jobject)239 Java_android_neuralnetworks_cts_ModelAssetTest_getNnapiDevices(JNIEnv* env, jobject /* this */) {
240     // Get device names
241     auto deviceNames = getNnapiDeviceNames(env);
242     RETURN_IF_FAILED nullptr;
243 
244     // Convert to Java string array
245     jclass stringClass = env->FindClass("java/lang/String");
246     jobjectArray jDeviceNames = env->NewObjectArray(deviceNames.size(), stringClass, nullptr);
247     for (uint32_t i = 0; i < deviceNames.size(); i++) {
248         jstring jDeviceName = env->NewStringUTF(deviceNames[i]);
249         env->SetObjectArrayElement(jDeviceNames, i, jDeviceName);
250     }
251     return jDeviceNames;
252 }
253 
254 extern "C" JNIEXPORT void JNICALL
Java_android_neuralnetworks_cts_ModelAssetTest_nativeTestCompileFromAssetFile(JNIEnv * env,jobject,jobject jAssetManager,jstring jDeviceName)255 Java_android_neuralnetworks_cts_ModelAssetTest_nativeTestCompileFromAssetFile(JNIEnv* env,
256                                                                               jobject /* this */,
257                                                                               jobject jAssetManager,
258                                                                               jstring jDeviceName) {
259     AAssetManager* assetManager = AAssetManager_fromJava(env, jAssetManager);
260     const char* deviceNameCStr = env->GetStringUTFChars(jDeviceName, nullptr);
261     std::string deviceName = deviceNameCStr;
262     env->ReleaseStringUTFChars(jDeviceName, deviceNameCStr);
263 
264     // Find the NNAPI device
265     const auto* device = findDevice(env, deviceName);
266     RETURN_IF_FAILED;
267 
268     // Check device feature level. This test is added in NNAPI feature level 5, so skip if the
269     // device is of a lower feature level.
270     const int64_t featureLevel = getDeviceFeatureLevel(env, device);
271     RETURN_IF_FAILED;
272     if (featureLevel < ANEURALNETWORKS_FEATURE_LEVEL_5) return;
273 
274     // Open the asset file
275     AAsset* asset = AAssetManager_open(assetManager, "model_data.bin", AASSET_MODE_BUFFER);
276     ASSERT_OR_RETURN(asset != nullptr);
277     auto assetGuard = android::base::make_scope_guard([asset]() { AAsset_close(asset); });
278 
279     // Create NNAPI memory directly from asset file
280     auto* memoryFromAsset = createMemoryFromAsset(env, asset);
281     auto memoryFromAssetGuard = android::base::make_scope_guard(
282             [memoryFromAsset]() { ANeuralNetworksMemory_free(memoryFromAsset); });
283     RETURN_IF_FAILED;
284 
285     // Create NNAPI memory from ashmem
286     auto* memoryFromAshmem = createMemoryFromAshmem(env, asset);
287     auto memoryFromAshmemGuard = android::base::make_scope_guard(
288             [memoryFromAshmem]() { ANeuralNetworksMemory_free(memoryFromAshmem); });
289     RETURN_IF_FAILED;
290 
291     // Compile the model with both memories, we expect the compilation results are the same
292     for (const auto& [dataType, _] : kDataTypeToElementSizeMap) {
293         bool successWithAshmem = createAndCompileAddModel(env, dataType, memoryFromAshmem, device);
294         RETURN_IF_FAILED;
295         bool successWithAsset = createAndCompileAddModel(env, dataType, memoryFromAsset, device);
296         RETURN_IF_FAILED;
297         ASSERT_OR_RETURN(successWithAshmem == successWithAsset);
298     }
299 }
300