1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <android-base/chrono_utils.h>
18 #include <android/binder_enums.h>
19 #include <android/binder_interface_utils.h>
20 #include <android/binder_status.h>
21 #include <nnapi/hal/aidl/Conversions.h>
22 
23 #include "Callbacks.h"
24 #include "GeneratedTestHarness.h"
25 #include "Utils.h"
26 
27 namespace aidl::android::hardware::neuralnetworks::vts::functional {
28 
29 using implementation::PreparedModelCallback;
30 using test_helper::TestBuffer;
31 using test_helper::TestModel;
32 
33 enum class DeadlineBoundType { NOW, UNLIMITED, SHORT };
34 constexpr std::array<DeadlineBoundType, 3> deadlineBounds = {
35         DeadlineBoundType::NOW, DeadlineBoundType::UNLIMITED, DeadlineBoundType::SHORT};
toString(DeadlineBoundType type)36 std::string toString(DeadlineBoundType type) {
37     switch (type) {
38         case DeadlineBoundType::NOW:
39             return "NOW";
40         case DeadlineBoundType::UNLIMITED:
41             return "UNLIMITED";
42         case DeadlineBoundType::SHORT:
43             return "SHORT";
44     }
45     LOG(FATAL) << "Unrecognized DeadlineBoundType: " << static_cast<int>(type);
46     return {};
47 }
48 
49 constexpr auto kShortDuration = std::chrono::milliseconds{5};
50 
51 using Results = std::tuple<ErrorStatus, std::vector<OutputShape>, Timing>;
52 using MaybeResults = std::optional<Results>;
53 
54 using ExecutionFunction =
55         std::function<MaybeResults(const std::shared_ptr<IPreparedModel>& preparedModel,
56                                    const Request& request, int64_t deadlineNs)>;
57 
makeDeadline(DeadlineBoundType deadlineBoundType)58 static int64_t makeDeadline(DeadlineBoundType deadlineBoundType) {
59     const auto getNanosecondsSinceEpoch = [](const auto& time) -> int64_t {
60         const auto timeSinceEpoch = time.time_since_epoch();
61         return std::chrono::duration_cast<std::chrono::nanoseconds>(timeSinceEpoch).count();
62     };
63 
64     ::android::base::boot_clock::time_point timePoint;
65     switch (deadlineBoundType) {
66         case DeadlineBoundType::NOW:
67             timePoint = ::android::base::boot_clock::now();
68             break;
69         case DeadlineBoundType::UNLIMITED:
70             timePoint = ::android::base::boot_clock::time_point::max();
71             break;
72         case DeadlineBoundType::SHORT:
73             timePoint = ::android::base::boot_clock::now() + kShortDuration;
74             break;
75     }
76 
77     return getNanosecondsSinceEpoch(timePoint);
78 }
79 
runPrepareModelTest(const std::shared_ptr<IDevice> & device,const Model & model,Priority priority,std::optional<DeadlineBoundType> deadlineBound)80 void runPrepareModelTest(const std::shared_ptr<IDevice>& device, const Model& model,
81                          Priority priority, std::optional<DeadlineBoundType> deadlineBound) {
82     int64_t deadlineNs = kNoDeadline;
83     if (deadlineBound.has_value()) {
84         deadlineNs = makeDeadline(deadlineBound.value());
85     }
86 
87     // see if service can handle model
88     std::vector<bool> supportedOps;
89     const auto supportedCallStatus = device->getSupportedOperations(model, &supportedOps);
90     ASSERT_TRUE(supportedCallStatus.isOk());
91     ASSERT_NE(0ul, supportedOps.size());
92     const bool fullySupportsModel =
93             std::all_of(supportedOps.begin(), supportedOps.end(), [](bool valid) { return valid; });
94 
95     // launch prepare model
96     const std::shared_ptr<PreparedModelCallback> preparedModelCallback =
97             ndk::SharedRefBase::make<PreparedModelCallback>();
98     const auto prepareLaunchStatus =
99             device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, priority,
100                                  deadlineNs, {}, {}, kEmptyCacheToken, preparedModelCallback);
101     ASSERT_TRUE(prepareLaunchStatus.isOk())
102             << "prepareLaunchStatus: " << prepareLaunchStatus.getDescription();
103 
104     // retrieve prepared model
105     preparedModelCallback->wait();
106     const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
107     const std::shared_ptr<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
108 
109     // The getSupportedOperations call returns a list of operations that are guaranteed not to fail
110     // if prepareModel is called, and 'fullySupportsModel' is true i.f.f. the entire model is
111     // guaranteed. If a driver has any doubt that it can prepare an operation, it must return false.
112     // So here, if a driver isn't sure if it can support an operation, but reports that it
113     // successfully prepared the model, the test can continue.
114     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
115         ASSERT_EQ(nullptr, preparedModel.get());
116         return;
117     }
118 
119     // verify return status
120     if (!deadlineBound.has_value()) {
121         EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
122     } else {
123         switch (deadlineBound.value()) {
124             case DeadlineBoundType::NOW:
125             case DeadlineBoundType::SHORT:
126                 // Either the driver successfully completed the task or it
127                 // aborted and returned MISSED_DEADLINE_*.
128                 EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
129                             prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
130                             prepareReturnStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT);
131                 break;
132             case DeadlineBoundType::UNLIMITED:
133                 // If an unlimited deadline is supplied, we expect the execution to
134                 // proceed normally. In this case, check it normally by breaking out
135                 // of the switch statement.
136                 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
137                 break;
138         }
139     }
140     ASSERT_EQ(prepareReturnStatus == ErrorStatus::NONE, preparedModel.get() != nullptr);
141 }
142 
runPrepareModelTests(const std::shared_ptr<IDevice> & device,const Model & model)143 void runPrepareModelTests(const std::shared_ptr<IDevice>& device, const Model& model) {
144     // test priority
145     for (auto priority : ndk::enum_range<Priority>{}) {
146         SCOPED_TRACE("priority: " + toString(priority));
147         if (priority == kDefaultPriority) continue;
148         runPrepareModelTest(device, model, priority, {});
149     }
150 
151     // test deadline
152     for (auto deadlineBound : deadlineBounds) {
153         SCOPED_TRACE("deadlineBound: " + toString(deadlineBound));
154         runPrepareModelTest(device, model, kDefaultPriority, deadlineBound);
155     }
156 }
157 
executeSynchronously(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request,int64_t deadlineNs)158 static MaybeResults executeSynchronously(const std::shared_ptr<IPreparedModel>& preparedModel,
159                                          const Request& request, int64_t deadlineNs) {
160     SCOPED_TRACE("synchronous");
161     const bool measure = false;
162 
163     // run execution
164     ExecutionResult executionResult;
165     const auto ret = preparedModel->executeSynchronously(request, measure, deadlineNs,
166                                                          kOmittedTimeoutDuration, &executionResult);
167     EXPECT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
168             << ret.getDescription();
169     if (!ret.isOk()) {
170         if (ret.getExceptionCode() != EX_SERVICE_SPECIFIC) {
171             return std::nullopt;
172         }
173         return MaybeResults(
174                 {static_cast<ErrorStatus>(ret.getServiceSpecificError()), {}, kNoTiming});
175     }
176 
177     // return results
178     return MaybeResults({executionResult.outputSufficientSize
179                                  ? ErrorStatus::NONE
180                                  : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
181                          std::move(executionResult.outputShapes), executionResult.timing});
182 }
183 
executeBurst(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request,int64_t deadlineNs)184 static MaybeResults executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel,
185                                  const Request& request, int64_t deadlineNs) {
186     SCOPED_TRACE("burst");
187     const bool measure = false;
188 
189     // create burst
190     std::shared_ptr<IBurst> burst;
191     auto ret = preparedModel->configureExecutionBurst(&burst);
192     EXPECT_TRUE(ret.isOk()) << ret.getDescription();
193     EXPECT_NE(nullptr, burst.get());
194     if (!ret.isOk() || burst.get() == nullptr) {
195         return std::nullopt;
196     }
197 
198     // use -1 for all memory identifier tokens
199     const std::vector<int64_t> slots(request.pools.size(), -1);
200 
201     // run execution
202     ExecutionResult executionResult;
203     ret = burst->executeSynchronously(request, slots, measure, deadlineNs, kOmittedTimeoutDuration,
204                                       &executionResult);
205     EXPECT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
206             << ret.getDescription();
207     if (!ret.isOk()) {
208         if (ret.getExceptionCode() != EX_SERVICE_SPECIFIC) {
209             return std::nullopt;
210         }
211         return MaybeResults(
212                 {static_cast<ErrorStatus>(ret.getServiceSpecificError()), {}, kNoTiming});
213     }
214 
215     // return results
216     return MaybeResults({executionResult.outputSufficientSize
217                                  ? ErrorStatus::NONE
218                                  : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
219                          std::move(executionResult.outputShapes), executionResult.timing});
220 }
221 
runExecutionTest(const std::shared_ptr<IPreparedModel> & preparedModel,const TestModel & testModel,const Request & request,const ExecutionContext & context,bool synchronous,DeadlineBoundType deadlineBound)222 void runExecutionTest(const std::shared_ptr<IPreparedModel>& preparedModel,
223                       const TestModel& testModel, const Request& request,
224                       const ExecutionContext& context, bool synchronous,
225                       DeadlineBoundType deadlineBound) {
226     const ExecutionFunction execute = synchronous ? executeSynchronously : executeBurst;
227     const auto deadlineNs = makeDeadline(deadlineBound);
228 
229     // Perform execution and unpack results.
230     const auto results = execute(preparedModel, request, deadlineNs);
231     if (!results.has_value()) return;
232     const auto& [status, outputShapes, timing] = results.value();
233 
234     // Verify no timing information was returned
235     EXPECT_EQ(timing, kNoTiming);
236 
237     // Validate deadline information if applicable.
238     switch (deadlineBound) {
239         case DeadlineBoundType::NOW:
240         case DeadlineBoundType::SHORT:
241             // Either the driver successfully completed the task or it
242             // aborted and returned MISSED_DEADLINE_*.
243             ASSERT_TRUE(status == ErrorStatus::NONE ||
244                         status == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
245                         status == ErrorStatus::MISSED_DEADLINE_PERSISTENT);
246             break;
247         case DeadlineBoundType::UNLIMITED:
248             // If an unlimited deadline is supplied, we expect the execution to
249             // proceed normally. In this case, check it normally by breaking out
250             // of the switch statement.
251             ASSERT_EQ(ErrorStatus::NONE, status);
252             break;
253     }
254 
255     // If the model output operands are fully specified, outputShapes must be either
256     // either empty, or have the same number of elements as the number of outputs.
257     ASSERT_TRUE(outputShapes.size() == 0 ||
258                 outputShapes.size() == testModel.main.outputIndexes.size());
259 
260     // Go through all outputs, check returned output shapes.
261     for (uint32_t i = 0; i < outputShapes.size(); i++) {
262         EXPECT_TRUE(outputShapes[i].isSufficient);
263         const auto expect =
264                 utils::toSigned(testModel.main.operands[testModel.main.outputIndexes[i]].dimensions)
265                         .value();
266         const std::vector<int32_t>& actual = outputShapes[i].dimensions;
267         EXPECT_EQ(expect, actual);
268     }
269 
270     // Retrieve execution results.
271     const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
272 
273     // We want "close-enough" results.
274     if (status == ErrorStatus::NONE) {
275         checkResults(testModel, outputs);
276     }
277 }
278 
runExecutionTests(const std::shared_ptr<IPreparedModel> & preparedModel,const TestModel & testModel,const Request & request,const ExecutionContext & context)279 void runExecutionTests(const std::shared_ptr<IPreparedModel>& preparedModel,
280                        const TestModel& testModel, const Request& request,
281                        const ExecutionContext& context) {
282     for (bool synchronous : {false, true}) {
283         for (auto deadlineBound : deadlineBounds) {
284             runExecutionTest(preparedModel, testModel, request, context, synchronous,
285                              deadlineBound);
286         }
287     }
288 }
289 
runTests(const std::shared_ptr<IDevice> & device,const TestModel & testModel)290 void runTests(const std::shared_ptr<IDevice>& device, const TestModel& testModel) {
291     // setup
292     const Model model = createModel(testModel);
293 
294     // run prepare model tests
295     runPrepareModelTests(device, model);
296 
297     // prepare model
298     std::shared_ptr<IPreparedModel> preparedModel;
299     createPreparedModel(device, model, &preparedModel);
300     if (preparedModel == nullptr) return;
301 
302     // run execution tests
303     ExecutionContext context;
304     const Request request = context.createRequest(testModel);
305     runExecutionTests(preparedModel, testModel, request, context);
306 }
307 
308 class DeadlineTest : public GeneratedTestBase {};
309 
TEST_P(DeadlineTest,Test)310 TEST_P(DeadlineTest, Test) {
311     runTests(kDevice, kTestModel);
312 }
313 
314 INSTANTIATE_GENERATED_TEST(DeadlineTest,
__anonabd5dc0c0302(const TestModel& testModel) 315                            [](const TestModel& testModel) { return !testModel.expectFailure; });
316 
317 }  // namespace aidl::android::hardware::neuralnetworks::vts::functional
318