1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <android-base/properties.h>
18 #include <gtest/gtest.h>
19 
20 #include <algorithm>
21 #include <map>
22 #include <memory>
23 #include <set>
24 #include <string>
25 #include <utility>
26 
27 #include "GeneratedTestUtils.h"
28 #include "TestHarness.h"
29 #include "TestNeuralNetworksWrapper.h"
30 #include "TmpDirectoryUtils.h"
31 #include "fuzzing/OperationManager.h"
32 #include "fuzzing/RandomGraphGenerator.h"
33 #include "fuzzing/RandomGraphGeneratorUtils.h"
34 
35 #ifndef NNTEST_CTS
36 #include <HalInterfaces.h>
37 #include <SampleDriverFull.h>
38 
39 #include <vector>
40 
41 #include "HalUtils.h"
42 #include "Manager.h"
43 
44 #ifdef __ANDROID__
45 #include <memunreachable/memunreachable.h>
46 #endif  // __ANDROID__
47 
48 using android::nn::sample_driver::SampleDriverFull;
49 
50 #endif
51 
52 namespace android {
53 namespace nn {
54 namespace fuzzing_test {
55 
56 using namespace test_helper;
57 using test_wrapper::Result;
58 constexpr char kRefDeviceName[] = "nnapi-reference";
59 
60 #ifndef NNTEST_CTS
61 class TestDriverV1_2 : public SampleDriverFull {
62    public:
TestDriverV1_2()63     TestDriverV1_2() : SampleDriverFull(name, {.execTime = 0.9f, .powerUsage = 0.9f}) {}
64     static constexpr char name[] = "TestDriverV1_2";
65 };
66 
67 // Like SampleDriverFull, but implementing 1.1
68 class TestDriverV1_1 : public V1_1::IDevice {
69    public:
TestDriverV1_1()70     TestDriverV1_1()
71         : mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.8f, .powerUsage = 0.8f})) {}
72     static constexpr char name[] = "TestDriverV1_1";
getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb)73     hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
74         return mDriverV1_2->getCapabilities_1_1(_hidl_cb);
75     }
getSupportedOperations_1_1(const V1_1::Model & model,getSupportedOperations_1_1_cb _hidl_cb)76     hardware::Return<void> getSupportedOperations_1_1(
77             const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
78         return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb);
79     }
prepareModel_1_1(const V1_1::Model & model,V1_1::ExecutionPreference preference,const sp<V1_0::IPreparedModelCallback> & actualCallback)80     hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
81             const V1_1::Model& model, V1_1::ExecutionPreference preference,
82             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
83         return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback);
84     }
getStatus()85     hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
getCapabilities(getCapabilities_cb _hidl_cb)86     hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
87         return mDriverV1_2->getCapabilities(_hidl_cb);
88     }
getSupportedOperations(const V1_0::Model & model,getSupportedOperations_cb _hidl_cb)89     hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
90                                                   getSupportedOperations_cb _hidl_cb) override {
91         return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
92     }
prepareModel(const V1_0::Model & model,const sp<V1_0::IPreparedModelCallback> & actualCallback)93     hardware::Return<V1_0::ErrorStatus> prepareModel(
94             const V1_0::Model& model,
95             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
96         return mDriverV1_2->prepareModel(model, actualCallback);
97     }
98 
99    private:
100     const sp<V1_2::IDevice> mDriverV1_2;
101 };
102 
103 // Like SampleDriverFull, but implementing 1.0
104 class TestDriverV1_0 : public V1_0::IDevice {
105    public:
TestDriverV1_0()106     TestDriverV1_0()
107         : mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.7f, .powerUsage = 0.7f})) {}
108     static constexpr char name[] = "TestDriverV1_0";
getCapabilities(getCapabilities_cb _hidl_cb)109     hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
110         return mDriverV1_2->getCapabilities(_hidl_cb);
111     }
getSupportedOperations(const V1_0::Model & model,getSupportedOperations_cb _hidl_cb)112     hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
113                                                   getSupportedOperations_cb _hidl_cb) override {
114         return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
115     }
prepareModel(const V1_0::Model & model,const sp<V1_0::IPreparedModelCallback> & actualCallback)116     hardware::Return<V1_0::ErrorStatus> prepareModel(
117             const V1_0::Model& model,
118             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
119         return mDriverV1_2->prepareModel(model, actualCallback);
120     }
getStatus()121     hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
122 
123    private:
124     const sp<V1_2::IDevice> mDriverV1_2;
125 };
126 
127 #endif
128 
129 // NN API fuzzer logging setting comes from system property debug.nn.fuzzer.log and
130 // debug.nn.fuzzer.dumpspec.
131 // * setprop debug.nn.fuzzer.log 1 : enable logging.
132 // * setprop debug.nn.fuzzer.log 0 : silence logging.
133 // * setprop debug.nn.fuzzer.dumpspec 1 : dump the randomly generated graph to a spec file.
134 // * setprop debug.nn.fuzzer.dumpspec 0 : do not dump the graph.
135 //
136 // Logs and spec files are dumped to {NN_TMP_DIR}/${testname}.{log,mod.py},
137 // e.g. for test case TestRandomGraph/RandomGraphTest/Large/0,
138 //      log : {NN_TMP_DIR}/TestRandomGraph_RandomGraphTest_Large_0.log
139 //      spec: {NN_TMP_DIR}/TestRandomGraph_RandomGraphTest_Large_0.mod.py
140 //
141 class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
142    public:
SetUpTestCase()143     static void SetUpTestCase() {
144 #ifndef NNTEST_CTS
145         mEnableLog = ::android::base::GetProperty("debug.nn.fuzzer.log", "") == "1";
146         mDumpSpec = ::android::base::GetProperty("debug.nn.fuzzer.dumpspec", "") == "1";
147         mDetectMemoryLeak = ::android::base::GetProperty("debug.nn.fuzzer.detectleak", "") == "1";
148 
149         mStandardDevices = DeviceManager::get()->forTest_getDevices();
150         mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice(
151                 makeSharedDevice(TestDriverV1_2::name, new TestDriverV1_2)));
152         mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice(
153                 makeSharedDevice(TestDriverV1_1::name, new TestDriverV1_1)));
154         mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice(
155                 makeSharedDevice(TestDriverV1_0::name, new TestDriverV1_0)));
156 #endif
157         mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
158 
159         // Get all the devices and device names.
160         mStandardDevicesFeatureLevel = __ANDROID_API_FUTURE__;
161         uint32_t numDevices = 0;
162         ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
163         for (uint32_t i = 0; i < numDevices; i++) {
164             ANeuralNetworksDevice* device = nullptr;
165             const char* name = nullptr;
166             int64_t featureLevel;
167             ASSERT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
168             ASSERT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR);
169             ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel),
170                       ANEURALNETWORKS_NO_ERROR);
171             mDevices.emplace(name, device);
172             mStandardDevicesFeatureLevel = std::min(mStandardDevicesFeatureLevel, featureLevel);
173         }
174     }
175 
176    protected:
SetUp()177     virtual void SetUp() override {
178         // Initialize logging.
179         const ::testing::TestInfo* const testInfo =
180                 ::testing::UnitTest::GetInstance()->current_test_info();
181         mTestName = mTestName + testInfo->test_case_name() + "_" + testInfo->name();
182         std::replace(mTestName.begin(), mTestName.end(), '/', '_');
183         if (mEnableLog) NN_FUZZER_LOG_INIT(NN_TMP_DIR "/" + mTestName + ".log");
184     }
185 
TearDown()186     virtual void TearDown() override {
187         NN_FUZZER_LOG_CLOSE;
188         // Dump test results on failure for debugging.
189         if (::testing::Test::HasFailure() || mDumpSpec) {
190             dumpTestResults();
191         }
192 #if defined(__ANDROID__) && !defined(NNTEST_CTS)
193         if (mDetectMemoryLeak) {
194             ASSERT_TRUE(NoLeaks());
195         }
196 #endif
197     }
198 
shouldSkipTest(int64_t featureLevel)199     bool shouldSkipTest(int64_t featureLevel) {
200         static const std::set<std::string> kDisabledTests = {
201                 // In this test, the RGG produces a non-sensible graph with extreme large output
202                 // gain and highly clamped output range.
203                 // TODO: Currently quantized buffer values are uniformly distributed within
204                 //       [0, 255]. We should investigate on a better buffer value generation
205                 //       algorithm that represents the real-world cases.
206                 "TestRandomGraph_SingleOperationTest_CONV_2D_V1_2_40",
207                 "TestRandomGraph_SingleOperationTest_DEPTHWISE_CONV_2D_V1_0_32",
208         };
209         if (kDisabledTests.find(mTestName) != kDisabledTests.end()) return true;
210         for (const auto& op : mTestModel.main.operations) {
211             // Skip if testing BATCH_TO_SPACE_ND with batch dimension == 1.
212             if (op.type == TestOperationType::BATCH_TO_SPACE_ND &&
213                 mTestModel.main.operands[op.inputs[0]].dimensions[0] == 1 &&
214                 featureLevel <= __ANDROID_API_Q__) {
215                 return true;
216             }
217             // L2_NORMALIZATION on axis of all zeros is undefined before R.
218             if (op.type == TestOperationType::L2_NORMALIZATION &&
219                 featureLevel <= __ANDROID_API_Q__) {
220                 return true;
221             }
222             // Skip the following operations for 1.2 and earlier devices.
223             if ((op.type == TestOperationType::ADD || op.type == TestOperationType::SUB ||
224                  op.type == TestOperationType::MAXIMUM || op.type == TestOperationType::MINIMUM ||
225                  op.type == TestOperationType::ROI_ALIGN) &&
226                 mTestModel.main.operands[op.inputs[0]].type ==
227                         TestOperandType::TENSOR_QUANT8_ASYMM &&
228                 featureLevel <= __ANDROID_API_Q__) {
229                 return true;
230             }
231             // Skip the following operations when the VNDK version is earlier than R.
232             if (mVndkVersion < __ANDROID_API_R__ &&
233                 op.type == TestOperationType::HEATMAP_MAX_KEYPOINT) {
234                 return true;
235             }
236         }
237         return false;
238     }
239 
240     // Compute the golden output results of the test model on nnapi-reference. If possible, the
241     // golden results will be computed from an equivalent float32 model to avoid bias avoid bias
242     // from quantized CPU implementation.
computeGoldenResults()243     void computeGoldenResults() {
244         SCOPED_TRACE("computeGoldenResults");
245 
246         // Convert the test model to an equivalent float32 model if possible.
247         auto fpModel = convertToFloat32Model(mTestModel);
248         const TestModel& goldenModel = fpModel.has_value() ? fpModel.value() : mTestModel;
249 
250         // Create model.
251         generated_tests::GeneratedModel model;
252         generated_tests::createModel(goldenModel, &model);
253         ASSERT_TRUE(model.isValid());
254         ASSERT_EQ(model.finish(), Result::NO_ERROR);
255 
256         // Create compilation for nnapi-reference.
257         ASSERT_TRUE(mDevices.find(kRefDeviceName) != mDevices.end());
258         const auto refDevice = mDevices[kRefDeviceName];
259         auto [result, compilation] = test_wrapper::Compilation::createForDevice(&model, refDevice);
260         ASSERT_EQ(result, Result::NO_ERROR);
261         ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
262 
263         // Create request.
264         test_wrapper::Execution execution(&compilation);
265         std::vector<TestBuffer> outputs;
266         generated_tests::createRequest(goldenModel, &execution, &outputs);
267 
268         // Compute result.
269         ASSERT_EQ(execution.compute(), Result::NO_ERROR);
270 
271         if (fpModel.has_value()) {
272             // Quantize the execution results as golden values.
273             setExpectedOutputsFromFloat32Results(outputs, &mTestModel);
274         } else {
275             for (uint32_t i = 0; i < outputs.size(); i++) {
276                 auto outputIndex = mTestModel.main.outputIndexes[i];
277                 mTestModel.main.operands[outputIndex].data = outputs[i];
278             }
279         }
280     }
281 
282     // Compile and execute the generated graph on a device selected by name.
computeAndVerifyResultsForDevice(const test_wrapper::Model * model,uint32_t numOps,const std::string & name)283     void computeAndVerifyResultsForDevice(const test_wrapper::Model* model, uint32_t numOps,
284                                           const std::string& name) {
285         SCOPED_TRACE("Device: " + name);
286         std::cout << "[          ] - RUN:  " << name << "\n";
287         ASSERT_TRUE(mDevices.find(name) != mDevices.end());
288         const auto device = mDevices[name];
289 
290         // Check if the device fully supports the graph.
291         constexpr int kMaxNumberOperations = 1000;
292         ASSERT_TRUE(numOps <= kMaxNumberOperations);
293         bool supported[kMaxNumberOperations] = {false};
294         ASSERT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(model->getHandle(), &device,
295                                                                         1, supported),
296                   ANEURALNETWORKS_NO_ERROR);
297         if (!std::all_of(supported, supported + numOps, [](bool v) { return v; })) {
298             std::cout << "[          ]   SKIP: " << name << " does not support the graph.\n";
299             return;
300         }
301 
302         // Since this test is introduced in Android Q, we only check the accuracy of output results
303         // if the device has feature level >= Q (API level 29). For pre-Q devices, we allow
304         // them to produce less accurate results, but must not hang or crash.
305         int64_t featureLevel;
306         ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel),
307                   ANEURALNETWORKS_NO_ERROR);
308         if (shouldSkipTest(featureLevel)) return;
309 
310         // Create compilation for device.
311         auto [result, compilation] = test_wrapper::Compilation::createForDevice(model, device);
312         ASSERT_EQ(result, Result::NO_ERROR);
313         Result compileReturn = compilation.finish();
314         // Even if the model is fully supported, the compilation may still fail, e.g. each operation
315         // is supported, but model is too big (too many operations and/or too-large constants) for
316         // device.
317         if (compileReturn == Result::OP_FAILED) {
318             std::cout << "[          ]   SKIP: " << name << " failed at compilation step.\n";
319             return;
320         }
321         ASSERT_EQ(compileReturn, Result::NO_ERROR);
322 
323         // Create request.
324         test_wrapper::Execution execution(&compilation);
325         std::vector<TestBuffer> outputs;
326         generated_tests::createRequest(mTestModel, &execution, &outputs);
327 
328         // Compute result.
329         Result executeReturn = execution.compute();
330         // Even if the model is fully supported and the compilation succeeds, the execution may
331         // still fail, e.g. there may be operand shapes that are unknown until execution time, and
332         // at execution time turn out to be too big.
333         if (executeReturn == Result::OP_FAILED) {
334             std::cout << "[          ]   SKIP: " << name << " failed at execution step.\n";
335             return;
336         }
337         ASSERT_EQ(executeReturn, Result::NO_ERROR);
338 
339         if (featureLevel >= __ANDROID_API_Q__) {
340             checkResults(mTestModel, outputs, mCriteria);
341             mResults.emplace_back(name, std::move(outputs));
342         }
343     }
344 
345     // Compile and execute the generated graph normally (i.e., allow runtime to
346     // distribute across devices).
computeAndVerifyResults(const std::string & name,const test_wrapper::Model * model,bool shouldCheckResults)347     void computeAndVerifyResults(const std::string& name, const test_wrapper::Model* model,
348                                  bool shouldCheckResults) {
349         // Because we're not using the introspection/control API, the CpuDevice
350         // is available as a fallback, and hence we assume that compilation and
351         // execution will succeed.
352         SCOPED_TRACE(name);
353         std::cout << "[          ] - RUN:  " << name << "\n";
354 
355         // Create compilation.
356         test_wrapper::Compilation compilation(model);
357         ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
358 
359         // Create request.
360         test_wrapper::Execution execution(&compilation);
361         std::vector<TestBuffer> outputs;
362         generated_tests::createRequest(mTestModel, &execution, &outputs);
363 
364         // Compute and verify result.
365         ASSERT_EQ(execution.compute(), Result::NO_ERROR);
366         if (shouldCheckResults) {
367             checkResults(mTestModel, outputs, mCriteria);
368             mResults.emplace_back(name, std::move(outputs));
369         }
370     }
371 
372     // Main test entrance.
testRandomGraph(uint32_t numOperations,uint32_t dimensionRange)373     void testRandomGraph(uint32_t numOperations, uint32_t dimensionRange) {
374         // Generate a random graph.
375         RandomGraph graph;
376         ASSERT_TRUE(graph.generate(kSeed, numOperations, dimensionRange));
377 
378         // Create a model from the random graph.
379         mTestModel = graph.createTestModel();
380 
381         generated_tests::GeneratedModel model;
382         generated_tests::createModel(mTestModel, &model);
383         ASSERT_TRUE(model.isValid());
384         ASSERT_EQ(model.finish(), Result::NO_ERROR);
385 
386         // Compute reference results.
387         computeGoldenResults();
388 
389         // Compute on each available device.
390         for (auto& pair : mDevices) {
391             computeAndVerifyResultsForDevice(&model, numOperations, pair.first);
392         }
393 
394         if (numOperations > 1) {
395             if (!shouldSkipTest(mStandardDevicesFeatureLevel)) {
396                 // Compute normally (i.e., allow runtime to distribute across devices).
397                 computeAndVerifyResults("Compute normally", &model,
398                                         mStandardDevicesFeatureLevel >= __ANDROID_API_Q__);
399             }
400 
401 #ifndef NNTEST_CTS
402             {
403                 // Stress partitioner by allowing runtime to distribute across
404                 // three synthetic devices.  The synthetic devices use the
405                 // CpuExecutor for execution, so we always check results, even
406                 // though some are of feature level < __ANDROID_API_Q__: In this
407                 // case, we don't take feature level as an indication of
408                 // reliability, as we do with real devices.
409                 DeviceManager::get()->forTest_setDevices(mSyntheticDevices);
410                 computeAndVerifyResults("Compute across synthetic devices", &model, true);
411                 DeviceManager::get()->forTest_setDevices(mStandardDevices);
412             }
413 #endif
414         }
415     }
416 
dumpTestResults()417     void dumpTestResults() {
418         std::ofstream os(NN_TMP_DIR "/" + mTestName + ".mod.py");
419         ASSERT_TRUE(os.is_open());
420         os << "# Generated from " << mTestName << ". Do not edit.\n\n";
421         SpecDumper dumper(mTestModel, os);
422         dumper.dumpTestModel();
423         for (const auto& [name, results] : mResults) {
424             dumper.dumpResults(name, results);
425         }
426     }
427 
428     enum GraphSize : uint32_t { SINGLE = 1, SMALL = 5, LARGE = 40 };
429     enum DimensionRange : uint32_t { NARROW = 10, WIDE = 1000 };
430 
431     static bool mEnableLog;
432     static bool mDumpSpec;
433     static bool mDetectMemoryLeak;
434     static std::map<std::string, ANeuralNetworksDevice*> mDevices;
435 
436     const uint32_t kSeed = GetParam();
437     std::string mTestName;
438     TestModel mTestModel;
439     AccuracyCriteria mCriteria;
440 
441     // A vector of {name, output_results}.
442     std::vector<std::pair<std::string, std::vector<TestBuffer>>> mResults;
443 
444     static int mVndkVersion;
445     static int64_t mStandardDevicesFeatureLevel;  // minimum across all devices
446 #ifndef NNTEST_CTS
447     static std::vector<std::shared_ptr<Device>> mStandardDevices;
448     static std::vector<std::shared_ptr<Device>> mSyntheticDevices;
449 #endif
450 };
451 
452 bool RandomGraphTest::mEnableLog = false;
453 bool RandomGraphTest::mDumpSpec = false;
454 bool RandomGraphTest::mDetectMemoryLeak = false;
455 std::map<std::string, ANeuralNetworksDevice*> RandomGraphTest::mDevices;
456 
457 int RandomGraphTest::mVndkVersion = __ANDROID_API_FUTURE__;
458 int64_t RandomGraphTest::mStandardDevicesFeatureLevel;
459 #ifndef NNTEST_CTS
460 std::vector<std::shared_ptr<Device>> RandomGraphTest::mStandardDevices;
461 std::vector<std::shared_ptr<Device>> RandomGraphTest::mSyntheticDevices;
462 #endif
463 
464 // Single-op graph with dimensions in range [1, 1000].
465 class SingleOperationTest : public RandomGraphTest {};
466 #define TEST_SINGLE_OPERATION(operation, halVersion, criteria)               \
467     TEST_P(SingleOperationTest, operation##_##halVersion) {                  \
468         OperationFilter filter = {.opcodes = {TestOperationType::operation}, \
469                                   .versions = {TestHalVersion::halVersion}}; \
470         OperationManager::get()->applyFilter(filter);                        \
471         mCriteria = (criteria);                                              \
472         testRandomGraph(GraphSize::SINGLE, DimensionRange::WIDE);            \
473     }
474 
475 // TODO: Adjust the accuracy criteria based on testing.
476 // We define three sets of accuracy criteria for single-operation tests.
477 
478 // This is for operations that only copy buffers around without any computation on buffer values.
479 // Most of these operations fall into categories of reshape or selection, e.g. RESHAPE, GATHER.
480 // Additionally, operations with only logical or comparison arithmetic also use this criteria, e.g.
481 // EQUAL, ARGMAX, TOPK_V2.
482 const AccuracyCriteria kStrictCriteria = {
483         .float32 = {.bias = 1e-7f, .mse = 1e-10f, .atol = 1e-6f, .rtol = 1e-6f},
484         .float16 = {.bias = 1e-4f, .mse = 1e-8f, .atol = 1e-3f, .rtol = 1e-3f},
485         .int32 = {.atol = 1},
486         .quant8Asymm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
487         .quant8AsymmSigned = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
488         .quant8Symm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
489         .quant16Asymm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
490         .quant16Symm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
491 };
492 
493 // This is for operations that only do simple and single computation on buffer values, such as
494 // addition, multiplication, or requantization. Most of these operations fall into categories of
495 // broadcast or elementwise, e.g ADD, FLOOR.
496 const AccuracyCriteria kMediumCriteria = {
497         .float32 = {.bias = 1e-6f, .mse = 1e-8f, .atol = 1e-5f, .rtol = 1e-5f},
498         .float16 = {.bias = 1e-3f, .mse = 1e-5f, .atol = 1e-2f, .rtol = 1e-2f},
499         .int32 = {.atol = 1},
500         .quant8Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2},
501         .quant8AsymmSigned = {.bias = 1.2, .mse = 1.2, .atol = 2},
502         .quant8Symm = {.bias = 1.2, .mse = 1.2, .atol = 2},
503         .quant16Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2},
504         .quant16Symm = {.bias = 1.2, .mse = 1.2, .atol = 2},
505 };
506 
507 // This is for operations that involve sophisticated computations on buffer values, either a single
508 // but complex transformation, e.g. LOGISTIC, or multiple transformations with accumulated errors,
509 // e.g. L2_NORMALIZATION, REDUCE_*.
510 const AccuracyCriteria kRelaxedCriteria = {
511         .float32 = {.bias = 3e-5f, .mse = 1e-6f, .atol = 1e-3f, .rtol = 1e-3f},
512         .float16 = {.bias = 5e-3f, .mse = 1e-3f, .atol = 1.0f, .rtol = 1.0f},
513         .int32 = {.atol = 1},
514         .quant8Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10},
515         .quant8AsymmSigned = {.bias = 1.5, .mse = 1.5, .atol = 10},
516         .quant8Symm = {.bias = 1.5, .mse = 1.5, .atol = 10},
517         .quant16Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10},
518         .quant16Symm = {.bias = 1.5, .mse = 1.5, .atol = 10},
519 };
520 
521 // This is for convolution operations with potentially large kernel size.
522 const AccuracyCriteria kConvCriteria = {
523         .float32 = {.bias = 4e-4f, .mse = 1e-5f, .atol = 2e-2f, .rtol = 2e-2f},
524         .float16 = {.bias = 5e-2f, .mse = 1e-2f, .atol = 1.0f, .rtol = 1.0f},
525         .int32 = {.atol = 1},
526         .quant8Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10},
527         .quant8AsymmSigned = {.bias = 1.5, .mse = 1.5, .atol = 10},
528         .quant8Symm = {.bias = 1.5, .mse = 1.5, .atol = 10},
529         .quant16Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10},
530         .quant16Symm = {.bias = 1.5, .mse = 1.5, .atol = 10},
531 };
532 
533 /*-- NNAPI 1.0 Operations ---------------------------------------------------*/
534 
535 // TODO: The following 1.0 operation signatures are currently not defined:
536 // - ANEURALNETWORKS_LSH_PROJECTION
537 // - ANEURALNETWORKS_LSTM
538 // - ANEURALNETWORKS_RNN
539 // - ANEURALNETWORKS_SVDF
540 
541 TEST_SINGLE_OPERATION(ADD, V1_0, kMediumCriteria);
542 TEST_SINGLE_OPERATION(MUL, V1_0, kMediumCriteria);
543 TEST_SINGLE_OPERATION(FLOOR, V1_0, kMediumCriteria);
544 TEST_SINGLE_OPERATION(LOGISTIC, V1_0, kRelaxedCriteria);
545 TEST_SINGLE_OPERATION(RELU, V1_0, kMediumCriteria);
546 TEST_SINGLE_OPERATION(RELU1, V1_0, kMediumCriteria);
547 TEST_SINGLE_OPERATION(RELU6, V1_0, kMediumCriteria);
548 TEST_SINGLE_OPERATION(TANH, V1_0, kRelaxedCriteria);
549 TEST_SINGLE_OPERATION(SOFTMAX, V1_0, kRelaxedCriteria);
550 TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_0, kRelaxedCriteria);
551 TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_0, kRelaxedCriteria);
552 TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_0, kRelaxedCriteria);
553 TEST_SINGLE_OPERATION(L2_POOL_2D, V1_0, kRelaxedCriteria);
554 TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_0, kRelaxedCriteria);
555 TEST_SINGLE_OPERATION(CONV_2D, V1_0, kConvCriteria);
556 TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_0, kConvCriteria);
557 TEST_SINGLE_OPERATION(CONCATENATION, V1_0, kMediumCriteria);
558 TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_0, kRelaxedCriteria);
559 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_0, kStrictCriteria);
560 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_0, kStrictCriteria);
561 TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_0, kStrictCriteria);
562 TEST_SINGLE_OPERATION(HASHTABLE_LOOKUP, V1_0, kStrictCriteria);
563 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_0, kRelaxedCriteria);
564 TEST_SINGLE_OPERATION(RESHAPE, V1_0, kStrictCriteria);
565 TEST_SINGLE_OPERATION(DEQUANTIZE, V1_0, kMediumCriteria);
566 
567 /*-- NNAPI 1.1 Operations ---------------------------------------------------*/
568 
569 TEST_SINGLE_OPERATION(SUB, V1_1, kMediumCriteria);
570 TEST_SINGLE_OPERATION(DIV, V1_1, kRelaxedCriteria);
571 TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_1, kStrictCriteria);
572 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_1, kStrictCriteria);
573 TEST_SINGLE_OPERATION(MEAN, V1_1, kRelaxedCriteria);
574 TEST_SINGLE_OPERATION(PAD, V1_1, kStrictCriteria);
575 TEST_SINGLE_OPERATION(TRANSPOSE, V1_1, kStrictCriteria);
576 TEST_SINGLE_OPERATION(SQUEEZE, V1_1, kStrictCriteria);
577 TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_1, kStrictCriteria);
578 
579 /*-- NNAPI 1.0 and 1.1 Operations with Extended Behavior in 1.2 -------------*/
580 
581 TEST_SINGLE_OPERATION(ADD, V1_2, kMediumCriteria);
582 TEST_SINGLE_OPERATION(MUL, V1_2, kMediumCriteria);
583 TEST_SINGLE_OPERATION(SUB, V1_2, kMediumCriteria);
584 TEST_SINGLE_OPERATION(DIV, V1_2, kRelaxedCriteria);
585 TEST_SINGLE_OPERATION(FLOOR, V1_2, kMediumCriteria);
586 TEST_SINGLE_OPERATION(LOGISTIC, V1_2, kRelaxedCriteria);
587 TEST_SINGLE_OPERATION(RELU, V1_2, kMediumCriteria);
588 TEST_SINGLE_OPERATION(RELU1, V1_2, kMediumCriteria);
589 TEST_SINGLE_OPERATION(RELU6, V1_2, kMediumCriteria);
590 TEST_SINGLE_OPERATION(TANH, V1_2, kRelaxedCriteria);
591 TEST_SINGLE_OPERATION(CONCATENATION, V1_2, kMediumCriteria);
592 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_2, kStrictCriteria);
593 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_2, kStrictCriteria);
594 TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_2, kStrictCriteria);
595 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_2, kStrictCriteria);
596 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_2, kRelaxedCriteria);
597 TEST_SINGLE_OPERATION(RESHAPE, V1_2, kStrictCriteria);
598 TEST_SINGLE_OPERATION(MEAN, V1_2, kRelaxedCriteria);
599 TEST_SINGLE_OPERATION(PAD, V1_2, kStrictCriteria);
600 TEST_SINGLE_OPERATION(TRANSPOSE, V1_2, kStrictCriteria);
601 TEST_SINGLE_OPERATION(CONV_2D, V1_2, kConvCriteria);
602 TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_2, kConvCriteria);
603 TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_2, kRelaxedCriteria);
604 TEST_SINGLE_OPERATION(L2_POOL_2D, V1_2, kRelaxedCriteria);
605 TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_2, kRelaxedCriteria);
606 TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_2, kRelaxedCriteria);
607 TEST_SINGLE_OPERATION(SOFTMAX, V1_2, kRelaxedCriteria);
608 TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_2, kRelaxedCriteria);
609 TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_2, kRelaxedCriteria);
610 TEST_SINGLE_OPERATION(DEQUANTIZE, V1_2, kMediumCriteria);
611 TEST_SINGLE_OPERATION(SQUEEZE, V1_2, kStrictCriteria);
612 TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_2, kStrictCriteria);
613 TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_2, kStrictCriteria);
614 
615 /*-- NNAPI 1.2 Operations ---------------------------------------------------*/
616 
617 // TODO: The following 1.2 operation signatures are currently not defined:
618 // - ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM
619 // - ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM
620 // - ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN
621 // - ANEURALNETWORKS_BOX_WITH_NMS_LIMIT
622 // - ANEURALNETWORKS_DETECTION_POSTPROCESSING
623 // - ANEURALNETWORKS_GENERATE_PROPOSALS
624 // - ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
625 // - ANEURALNETWORKS_RANDOM_MULTINOMIAL
626 // - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
627 // - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN
628 
629 TEST_SINGLE_OPERATION(ABS, V1_2, kMediumCriteria);
630 TEST_SINGLE_OPERATION(EXP, V1_2, kRelaxedCriteria);
631 TEST_SINGLE_OPERATION(LOG, V1_2, kRelaxedCriteria);
632 TEST_SINGLE_OPERATION(NEG, V1_2, kMediumCriteria);
633 TEST_SINGLE_OPERATION(RSQRT, V1_2, kRelaxedCriteria);
634 TEST_SINGLE_OPERATION(SIN, V1_2, kRelaxedCriteria);
635 TEST_SINGLE_OPERATION(SQRT, V1_2, kRelaxedCriteria);
636 TEST_SINGLE_OPERATION(ARGMAX, V1_2, kStrictCriteria);
637 TEST_SINGLE_OPERATION(ARGMIN, V1_2, kStrictCriteria);
638 TEST_SINGLE_OPERATION(EQUAL, V1_2, kStrictCriteria);
639 TEST_SINGLE_OPERATION(GREATER, V1_2, kStrictCriteria);
640 TEST_SINGLE_OPERATION(GREATER_EQUAL, V1_2, kStrictCriteria);
641 TEST_SINGLE_OPERATION(LESS, V1_2, kStrictCriteria);
642 TEST_SINGLE_OPERATION(LESS_EQUAL, V1_2, kStrictCriteria);
643 TEST_SINGLE_OPERATION(LOGICAL_AND, V1_2, kStrictCriteria);
644 TEST_SINGLE_OPERATION(LOGICAL_NOT, V1_2, kStrictCriteria);
645 TEST_SINGLE_OPERATION(LOGICAL_OR, V1_2, kStrictCriteria);
646 TEST_SINGLE_OPERATION(NOT_EQUAL, V1_2, kStrictCriteria);
647 TEST_SINGLE_OPERATION(MAXIMUM, V1_2, kMediumCriteria);
648 TEST_SINGLE_OPERATION(MINIMUM, V1_2, kMediumCriteria);
649 TEST_SINGLE_OPERATION(POW, V1_2, kRelaxedCriteria);
650 TEST_SINGLE_OPERATION(PRELU, V1_2, kMediumCriteria);
651 TEST_SINGLE_OPERATION(REDUCE_ALL, V1_2, kRelaxedCriteria);
652 TEST_SINGLE_OPERATION(REDUCE_ANY, V1_2, kRelaxedCriteria);
653 TEST_SINGLE_OPERATION(REDUCE_MAX, V1_2, kRelaxedCriteria);
654 TEST_SINGLE_OPERATION(REDUCE_MIN, V1_2, kRelaxedCriteria);
655 TEST_SINGLE_OPERATION(REDUCE_PROD, V1_2, kRelaxedCriteria);
656 TEST_SINGLE_OPERATION(REDUCE_SUM, V1_2, kRelaxedCriteria);
657 TEST_SINGLE_OPERATION(CHANNEL_SHUFFLE, V1_2, kStrictCriteria);
658 TEST_SINGLE_OPERATION(INSTANCE_NORMALIZATION, V1_2, kRelaxedCriteria);
659 TEST_SINGLE_OPERATION(LOG_SOFTMAX, V1_2, kRelaxedCriteria);
660 TEST_SINGLE_OPERATION(GROUPED_CONV_2D, V1_2, kConvCriteria);
661 TEST_SINGLE_OPERATION(TRANSPOSE_CONV_2D, V1_2, kConvCriteria);
662 TEST_SINGLE_OPERATION(RESIZE_NEAREST_NEIGHBOR, V1_2, kRelaxedCriteria);
663 TEST_SINGLE_OPERATION(PAD_V2, V1_2, kStrictCriteria);
664 TEST_SINGLE_OPERATION(QUANTIZE, V1_2, kMediumCriteria);
665 TEST_SINGLE_OPERATION(CAST, V1_2, kMediumCriteria);
666 TEST_SINGLE_OPERATION(EXPAND_DIMS, V1_2, kStrictCriteria);
667 TEST_SINGLE_OPERATION(TILE, V1_2, kStrictCriteria);
668 TEST_SINGLE_OPERATION(GATHER, V1_2, kStrictCriteria);
669 TEST_SINGLE_OPERATION(SELECT, V1_2, kStrictCriteria);
670 TEST_SINGLE_OPERATION(TOPK_V2, V1_2, kStrictCriteria);
671 TEST_SINGLE_OPERATION(SLICE, V1_2, kStrictCriteria);
672 TEST_SINGLE_OPERATION(SPLIT, V1_2, kMediumCriteria);
673 TEST_SINGLE_OPERATION(ROI_ALIGN, V1_2, kRelaxedCriteria);
674 TEST_SINGLE_OPERATION(ROI_POOLING, V1_2, kRelaxedCriteria);
675 TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_2, kRelaxedCriteria);
676 
677 /*-- NNAPI 1.0, 1.1, and 1.2 Operations with Extended Behavior in 1.3 -------------*/
678 
679 TEST_SINGLE_OPERATION(ADD, V1_3, kMediumCriteria);
680 TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_3, kRelaxedCriteria);
681 TEST_SINGLE_OPERATION(CONCATENATION, V1_3, kMediumCriteria);
682 TEST_SINGLE_OPERATION(CONV_2D, V1_3, kConvCriteria);
683 TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_3, kConvCriteria);
684 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_3, kStrictCriteria);
685 TEST_SINGLE_OPERATION(DEQUANTIZE, V1_3, kMediumCriteria);
686 TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_3, kStrictCriteria);
687 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_3, kRelaxedCriteria);
688 TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_3, kRelaxedCriteria);
689 TEST_SINGLE_OPERATION(LOGISTIC, V1_3, kRelaxedCriteria);
690 TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_3, kRelaxedCriteria);
691 TEST_SINGLE_OPERATION(MUL, V1_3, kMediumCriteria);
692 TEST_SINGLE_OPERATION(RELU, V1_3, kMediumCriteria);
693 TEST_SINGLE_OPERATION(RELU1, V1_3, kMediumCriteria);
694 TEST_SINGLE_OPERATION(RELU6, V1_3, kMediumCriteria);
695 TEST_SINGLE_OPERATION(RESHAPE, V1_3, kStrictCriteria);
696 TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_3, kRelaxedCriteria);
697 TEST_SINGLE_OPERATION(SOFTMAX, V1_3, kRelaxedCriteria);
698 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_3, kStrictCriteria);
699 TEST_SINGLE_OPERATION(TANH, V1_3, kRelaxedCriteria);
700 TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_3, kStrictCriteria);
701 TEST_SINGLE_OPERATION(DIV, V1_3, kMediumCriteria);
702 TEST_SINGLE_OPERATION(MEAN, V1_3, kRelaxedCriteria);
703 TEST_SINGLE_OPERATION(PAD, V1_3, kStrictCriteria);
704 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_3, kStrictCriteria);
705 TEST_SINGLE_OPERATION(SQUEEZE, V1_3, kStrictCriteria);
706 TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_3, kStrictCriteria);
707 TEST_SINGLE_OPERATION(SUB, V1_3, kMediumCriteria);
708 TEST_SINGLE_OPERATION(TRANSPOSE, V1_3, kStrictCriteria);
709 TEST_SINGLE_OPERATION(ABS, V1_3, kMediumCriteria);
710 TEST_SINGLE_OPERATION(ARGMAX, V1_3, kStrictCriteria);
711 TEST_SINGLE_OPERATION(ARGMIN, V1_3, kStrictCriteria);
712 TEST_SINGLE_OPERATION(CAST, V1_3, kMediumCriteria);
713 TEST_SINGLE_OPERATION(CHANNEL_SHUFFLE, V1_3, kStrictCriteria);
714 TEST_SINGLE_OPERATION(EQUAL, V1_3, kStrictCriteria);
715 TEST_SINGLE_OPERATION(EXPAND_DIMS, V1_3, kStrictCriteria);
716 TEST_SINGLE_OPERATION(GATHER, V1_3, kStrictCriteria);
717 TEST_SINGLE_OPERATION(GREATER, V1_3, kStrictCriteria);
718 TEST_SINGLE_OPERATION(GREATER_EQUAL, V1_3, kStrictCriteria);
719 TEST_SINGLE_OPERATION(GROUPED_CONV_2D, V1_3, kConvCriteria);
720 TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_3, kRelaxedCriteria);
721 TEST_SINGLE_OPERATION(LESS, V1_3, kStrictCriteria);
722 TEST_SINGLE_OPERATION(LESS_EQUAL, V1_3, kStrictCriteria);
723 TEST_SINGLE_OPERATION(MAXIMUM, V1_3, kMediumCriteria);
724 TEST_SINGLE_OPERATION(MINIMUM, V1_3, kMediumCriteria);
725 TEST_SINGLE_OPERATION(NOT_EQUAL, V1_3, kStrictCriteria);
726 TEST_SINGLE_OPERATION(PAD_V2, V1_3, kStrictCriteria);
727 TEST_SINGLE_OPERATION(PRELU, V1_3, kMediumCriteria);
728 TEST_SINGLE_OPERATION(QUANTIZE, V1_3, kMediumCriteria);
729 TEST_SINGLE_OPERATION(REDUCE_MAX, V1_3, kRelaxedCriteria);
730 TEST_SINGLE_OPERATION(REDUCE_MIN, V1_3, kRelaxedCriteria);
731 TEST_SINGLE_OPERATION(ROI_ALIGN, V1_3, kRelaxedCriteria);
732 TEST_SINGLE_OPERATION(ROI_POOLING, V1_3, kRelaxedCriteria);
733 TEST_SINGLE_OPERATION(SELECT, V1_3, kStrictCriteria);
734 TEST_SINGLE_OPERATION(SLICE, V1_3, kStrictCriteria);
735 TEST_SINGLE_OPERATION(SPLIT, V1_3, kMediumCriteria);
736 TEST_SINGLE_OPERATION(TILE, V1_3, kStrictCriteria);
737 TEST_SINGLE_OPERATION(TOPK_V2, V1_3, kStrictCriteria);
738 TEST_SINGLE_OPERATION(TRANSPOSE_CONV_2D, V1_3, kConvCriteria);
739 TEST_SINGLE_OPERATION(RESIZE_NEAREST_NEIGHBOR, V1_3, kRelaxedCriteria);
740 
741 /*-- NNAPI 1.3 Operations ---------------------------------------------------*/
742 
743 // TODO: The following 1.3 operation signatures are currently not defined:
744 // - ANEURALNETWORKS_QUANTIZED_LSTM
745 // - ANEURALNETWORKS_IF
746 // - ANEURALNETWORKS_WHILE
747 
748 TEST_SINGLE_OPERATION(ELU, V1_3, kMediumCriteria);
749 TEST_SINGLE_OPERATION(HARD_SWISH, V1_3, kMediumCriteria);
750 TEST_SINGLE_OPERATION(FILL, V1_3, kStrictCriteria);
751 TEST_SINGLE_OPERATION(RANK, V1_3, kStrictCriteria);
752 
753 const AccuracyCriteria kSmallGraphCriteria = {
754         .float32 = {.bias = 4e-4f, .mse = 1e-5f, .atol = 1e-2f, .rtol = 1e-2f},
755         .float16 = {.bias = 5e-2f, .mse = 1e-2f, .atol = 1.0f, .rtol = 1.0f},
756         .int32 = {.atol = 1},
757         .quant8Asymm = {.bias = 2, .mse = 2, .atol = 12},
758         .quant8AsymmSigned = {.bias = 2, .mse = 2, .atol = 12},
759         .quant8Symm = {.bias = 2, .mse = 2, .atol = 12},
760         .quant16Asymm = {.bias = 2, .mse = 2, .atol = 12},
761         .quant16Symm = {.bias = 2, .mse = 2, .atol = 12},
762 };
763 
764 const AccuracyCriteria kLargeGraphCriteria = {
765         .float32 = {.bias = 1e-2f, .mse = 1e-4f, .atol = 1e-1f, .rtol = 1e-1f},
766         .float16 = {.bias = 1e-1f, .mse = 5e-2f, .atol = 1.0f, .rtol = 1.0f},
767         .int32 = {.atol = 1},
768         .quant8Asymm = {.bias = 2, .mse = 2, .atol = 12},
769         .quant8AsymmSigned = {.bias = 2, .mse = 2, .atol = 12},
770         .quant8Symm = {.bias = 2, .mse = 2, .atol = 12},
771         .quant16Asymm = {.bias = 2, .mse = 2, .atol = 12},
772         .quant16Symm = {.bias = 2, .mse = 2, .atol = 12},
773 };
774 
775 // Due to the limitation of the random graph generator, graphs generated with mixed-type or
776 // mixed-rank operations are likely to result in a disconnected network. Thus, we filter the
777 // operation signatures by primary data type and rank first, then generate random graph tests for
778 // each combination.
779 //
780 // Two parameterized tests are created for each filter:
781 // * 5-op graph with dimensions in range [1, 1000].
782 // * 40-op graph with dimensions in range [1, 10].
783 //
784 #define TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(dataType, rank)                             \
785     TEST_P(RandomGraphTest, SmallGraph_##dataType##_Rank##rank) {                             \
786         OperationFilter filter = {.dataTypes = {TestOperandType::dataType}, .ranks = {rank}}; \
787         OperationManager::get()->applyFilter(filter);                                         \
788         mCriteria = kSmallGraphCriteria;                                                      \
789         testRandomGraph(GraphSize::SMALL, DimensionRange::WIDE);                              \
790     }                                                                                         \
791     TEST_P(RandomGraphTest, LargeGraph_##dataType##_Rank##rank) {                             \
792         OperationFilter filter = {.dataTypes = {TestOperandType::dataType}, .ranks = {rank}}; \
793         OperationManager::get()->applyFilter(filter);                                         \
794         mCriteria = kLargeGraphCriteria;                                                      \
795         testRandomGraph(GraphSize::LARGE, DimensionRange::NARROW);                            \
796     }
797 
798 // Random graph test with TENSOR_QUANT8_ASYMM as the primary data type is currently not defined.
799 // The generated graph with TENSOR_QUANT8_ASYMM as the primary data type will likely to result in
800 // disconnected graphs due to the mismatch between quantized parameters.
801 
802 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 4);
803 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 3);
804 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 2);
805 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 1);
806 
807 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 4);
808 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 3);
809 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 2);
810 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 1);
811 
812 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 4);
813 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 3);
814 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 2);
815 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 1);
816 
817 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 4);
818 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 3);
819 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 2);
820 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 1);
821 
822 INSTANTIATE_TEST_SUITE_P(TestRandomGraph, SingleOperationTest, ::testing::Range(0u, 50u));
823 INSTANTIATE_TEST_SUITE_P(TestRandomGraph, RandomGraphTest, ::testing::Range(0u, 50u));
824 
825 }  // namespace fuzzing_test
826 }  // namespace nn
827 }  // namespace android
828