1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "neuralnetworks_aidl_hal_test"
18
19 #include <aidl/android/hardware/graphics/common/PixelFormat.h>
20 #include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
21 #include <android-base/logging.h>
22 #include <android/binder_auto_utils.h>
23 #include <android/binder_interface_utils.h>
24 #include <android/binder_status.h>
25 #include <gtest/gtest.h>
26
27 #include <LegacyUtils.h>
28 #include <TestHarness.h>
29 #include <Utils.h>
30 #include <nnapi/SharedMemory.h>
31 #include <nnapi/hal/aidl/Conversions.h>
32 #include <nnapi/hal/aidl/HalInterfaces.h>
33 #include <nnapi/hal/aidl/Utils.h>
34
35 #include "Callbacks.h"
36 #include "GeneratedTestHarness.h"
37 #include "Utils.h"
38 #include "VtsHalNeuralnetworks.h"
39
40 namespace aidl::android::hardware::neuralnetworks::vts::functional {
41
42 using namespace test_helper;
43 using implementation::PreparedModelCallback;
44
45 namespace {
46
47 // An AIDL driver is likely to support at least one of the following operand types.
48 const std::vector<TestOperandType> kTestOperandTypeChoicesVector = {
49 TestOperandType::TENSOR_FLOAT32,
50 TestOperandType::TENSOR_FLOAT16,
51 TestOperandType::TENSOR_QUANT8_ASYMM,
52 TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
53 };
54 const auto kTestOperandTypeChoices = testing::ValuesIn(kTestOperandTypeChoicesVector);
55 // TODO(b/179270601): restore kNamedDeviceChoices
56
isInChoices(TestOperandType type)57 bool isInChoices(TestOperandType type) {
58 return std::count(kTestOperandTypeChoicesVector.begin(), kTestOperandTypeChoicesVector.end(),
59 type) > 0;
60 }
61
isFloat(TestOperandType type)62 bool isFloat(TestOperandType type) {
63 CHECK(isInChoices(type));
64 return type == TestOperandType::TENSOR_FLOAT32 || type == TestOperandType::TENSOR_FLOAT16;
65 }
66
67 // Create placeholder buffers for model constants as well as inputs and outputs.
68 // We only care about the size here because we will not check accuracy in validation tests.
createDummyData(TestModel * testModel)69 void createDummyData(TestModel* testModel) {
70 for (auto& operand : testModel->main.operands) {
71 if (operand.data != nullptr) continue;
72 switch (operand.lifetime) {
73 case TestOperandLifeTime::SUBGRAPH_INPUT:
74 case TestOperandLifeTime::SUBGRAPH_OUTPUT:
75 case TestOperandLifeTime::CONSTANT_COPY:
76 case TestOperandLifeTime::CONSTANT_REFERENCE: {
77 const uint32_t size = nn::nonExtensionOperandSizeOfData(
78 static_cast<nn::OperandType>(operand.type), operand.dimensions);
79 operand.data = TestBuffer(size);
80 } break;
81 default:
82 break;
83 }
84 }
85 }
86
createInt32Scalar(int32_t value)87 TestOperand createInt32Scalar(int32_t value) {
88 return {
89 .type = TestOperandType::INT32,
90 .dimensions = {},
91 .numberOfConsumers = 1,
92 .scale = 0.0f,
93 .zeroPoint = 0,
94 .lifetime = TestOperandLifeTime::CONSTANT_COPY,
95 .data = TestBuffer::createFromVector<int32_t>({value}),
96 };
97 }
98
99 // Construct a test model with multiple CONV_2D operations with the given operand as inputs.
100 // The dimensions of the filters are chosen to ensure outputs has the same dimensions as inputs.
101 // We choose CONV_2D operation because it is commonly supported by most drivers.
createConvModel(const TestOperand & operand,uint32_t numOperations)102 TestModel createConvModel(const TestOperand& operand, uint32_t numOperations) {
103 CHECK(isInChoices(operand.type));
104
105 TestOperand weight = {.type = operand.type,
106 .dimensions = {operand.dimensions[3], 3, 3, operand.dimensions[3]},
107 .numberOfConsumers = 1,
108 .scale = isFloat(operand.type) ? 0.0f : 1.0f,
109 .zeroPoint = 0,
110 .lifetime = TestOperandLifeTime::CONSTANT_COPY};
111
112 TestOperand bias = {
113 .type = isFloat(operand.type) ? operand.type : TestOperandType::TENSOR_INT32,
114 .dimensions = {operand.dimensions[3]},
115 .numberOfConsumers = 1,
116 .scale = operand.scale * weight.scale,
117 .zeroPoint = 0,
118 .lifetime = TestOperandLifeTime::CONSTANT_COPY};
119
120 TestOperand output = operand;
121 output.numberOfConsumers = 0;
122 output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
123
124 const std::vector<TestOperand> operands = {
125 operand,
126 std::move(weight),
127 std::move(bias),
128 createInt32Scalar(1), // same padding
129 createInt32Scalar(1), // width stride
130 createInt32Scalar(1), // height stride
131 createInt32Scalar(0), // activation = NONE
132 std::move(output),
133 };
134
135 TestModel model;
136 for (uint32_t i = 0; i < numOperations; i++) {
137 model.main.operands.insert(model.main.operands.end(), operands.begin(), operands.end());
138 const uint32_t inputIndex = operands.size() * i;
139 const uint32_t outputIndex = inputIndex + operands.size() - 1;
140 std::vector<uint32_t> inputs(operands.size() - 1);
141 std::iota(inputs.begin(), inputs.end(), inputIndex);
142 model.main.operations.push_back({.type = TestOperationType::CONV_2D,
143 .inputs = std::move(inputs),
144 .outputs = {outputIndex}});
145 model.main.inputIndexes.push_back(inputIndex);
146 model.main.outputIndexes.push_back(outputIndex);
147 }
148 createDummyData(&model);
149 return model;
150 }
151
152 // Construct a test model with a single ADD operation with the given operand as input0 and input1.
153 // This is to cover additional cases that the CONV_2D model does not support, e.g. arbitrary input
154 // operand rank, scalar input operand. We choose ADD operation because it is commonly supported by
155 // most drivers.
createSingleAddModel(const TestOperand & operand)156 TestModel createSingleAddModel(const TestOperand& operand) {
157 CHECK(isInChoices(operand.type));
158
159 TestOperand act = {
160 .type = TestOperandType::INT32,
161 .dimensions = {},
162 .numberOfConsumers = 1,
163 .scale = 0.0f,
164 .zeroPoint = 0,
165 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
166 };
167
168 TestOperand output = operand;
169 output.numberOfConsumers = 0;
170 output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
171
172 TestModel model = {
173 .main =
174 {
175 .operands =
176 {
177 operand,
178 operand,
179 std::move(act),
180 output,
181 },
182 .operations = {{.type = TestOperationType::ADD,
183 .inputs = {0, 1, 2},
184 .outputs = {3}}},
185 .inputIndexes = {0, 1, 2},
186 .outputIndexes = {3},
187 },
188 };
189 createDummyData(&model);
190 return model;
191 }
192
193 // A placeholder invalid IPreparedModel class for MemoryDomainAllocateTest.InvalidPreparedModel
194 class InvalidPreparedModel final : public IPreparedModel {
195 public:
executeSynchronously(const Request &,bool,int64_t,int64_t,ExecutionResult *)196 ndk::ScopedAStatus executeSynchronously(const Request&, bool, int64_t, int64_t,
197 ExecutionResult*) override {
198 return ndk::ScopedAStatus::fromServiceSpecificError(
199 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
200 }
executeFenced(const Request &,const std::vector<ndk::ScopedFileDescriptor> &,bool,int64_t,int64_t,int64_t,FencedExecutionResult *)201 ndk::ScopedAStatus executeFenced(const Request&, const std::vector<ndk::ScopedFileDescriptor>&,
202 bool, int64_t, int64_t, int64_t,
203 FencedExecutionResult*) override {
204 return ndk::ScopedAStatus::fromServiceSpecificError(
205 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
206 }
executeSynchronouslyWithConfig(const Request &,const ExecutionConfig &,int64_t,ExecutionResult *)207 ndk::ScopedAStatus executeSynchronouslyWithConfig(const Request&, const ExecutionConfig&,
208 int64_t, ExecutionResult*) override {
209 return ndk::ScopedAStatus::fromServiceSpecificError(
210 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
211 }
executeFencedWithConfig(const Request &,const std::vector<ndk::ScopedFileDescriptor> &,const ExecutionConfig &,int64_t,int64_t,FencedExecutionResult *)212 ndk::ScopedAStatus executeFencedWithConfig(const Request&,
213 const std::vector<ndk::ScopedFileDescriptor>&,
214 const ExecutionConfig&, int64_t, int64_t,
215 FencedExecutionResult*) override {
216 return ndk::ScopedAStatus::fromServiceSpecificError(
217 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
218 }
configureExecutionBurst(std::shared_ptr<IBurst> *)219 ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>*) override {
220 return ndk::ScopedAStatus::fromServiceSpecificError(
221 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
222 }
createReusableExecution(const aidl_hal::Request &,const ExecutionConfig &,std::shared_ptr<aidl_hal::IExecution> *)223 ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, const ExecutionConfig&,
224 std::shared_ptr<aidl_hal::IExecution>*) override {
225 return ndk::ScopedAStatus::fromServiceSpecificError(
226 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
227 }
getInterfaceVersion(int32_t *)228 ndk::ScopedAStatus getInterfaceVersion(int32_t* /*interfaceVersion*/) {
229 return ndk::ScopedAStatus::fromServiceSpecificError(
230 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
231 }
getInterfaceHash(std::string *)232 ndk::ScopedAStatus getInterfaceHash(std::string* /*interfaceHash*/) {
233 return ndk::ScopedAStatus::fromServiceSpecificError(
234 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
235 }
asBinder()236 ndk::SpAIBinder asBinder() override { return ::ndk::SpAIBinder{}; }
isRemote()237 bool isRemote() override { return true; }
238 };
239
240 template <typename... Args>
createRequestMemoryPools(const Args &...pools)241 std::vector<RequestMemoryPool> createRequestMemoryPools(const Args&... pools) {
242 std::vector<RequestMemoryPool> memoryPools;
243 memoryPools.reserve(sizeof...(Args));
244 // This fold operator calls push_back on each of the function arguments.
245 (memoryPools.push_back(utils::clone(pools).value()), ...);
246 return memoryPools;
247 };
248
249 } // namespace
250
251 class MemoryDomainTestBase : public testing::Test {
252 protected:
MemoryDomainTestBase(std::shared_ptr<IDevice> device,TestOperandType type)253 MemoryDomainTestBase(std::shared_ptr<IDevice> device, TestOperandType type)
254 : kDevice(std::move(device)),
255 kTestOperandType(type),
256 kTestOperand(kTestOperandMap.at(type)),
257 kTestOperandDataSize(nn::nonExtensionOperandSizeOfData(static_cast<nn::OperandType>(type),
258 kTestOperand.dimensions)) {}
259
SetUp()260 void SetUp() override {
261 testing::Test::SetUp();
262 ASSERT_NE(kDevice, nullptr);
263 const bool deviceIsResponsive =
264 ndk::ScopedAStatus::fromStatus(AIBinder_ping(kDevice->asBinder().get())).isOk();
265 ASSERT_TRUE(deviceIsResponsive);
266 }
267
createConvPreparedModel(const TestOperand & testOperand,uint32_t numOperations=1)268 std::shared_ptr<IPreparedModel> createConvPreparedModel(const TestOperand& testOperand,
269 uint32_t numOperations = 1) {
270 const TestModel testModel = createConvModel(testOperand, numOperations);
271 const Model model = createModel(testModel);
272 std::shared_ptr<IPreparedModel> preparedModel;
273 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
274 return preparedModel;
275 }
276
createAddPreparedModel(const TestOperand & testOperand)277 std::shared_ptr<IPreparedModel> createAddPreparedModel(const TestOperand& testOperand) {
278 const TestModel testModel = createSingleAddModel(testOperand);
279 const Model model = createModel(testModel);
280 std::shared_ptr<IPreparedModel> preparedModel;
281 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
282 return preparedModel;
283 }
284
285 static const std::map<TestOperandType, TestOperand> kTestOperandMap;
286
287 const std::shared_ptr<IDevice> kDevice;
288 const TestOperandType kTestOperandType;
289 const TestOperand& kTestOperand;
290 const uint32_t kTestOperandDataSize;
291 };
292
293 const std::map<TestOperandType, TestOperand> MemoryDomainTestBase::kTestOperandMap = {
294 {TestOperandType::TENSOR_FLOAT32,
295 {
296 .type = TestOperandType::TENSOR_FLOAT32,
297 .dimensions = {1, 32, 32, 8},
298 .numberOfConsumers = 1,
299 .scale = 0.0f,
300 .zeroPoint = 0,
301 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
302 }},
303 {TestOperandType::TENSOR_FLOAT16,
304 {
305 .type = TestOperandType::TENSOR_FLOAT16,
306 .dimensions = {1, 32, 32, 8},
307 .numberOfConsumers = 1,
308 .scale = 0.0f,
309 .zeroPoint = 0,
310 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
311 }},
312 {TestOperandType::TENSOR_QUANT8_ASYMM,
313 {
314 .type = TestOperandType::TENSOR_QUANT8_ASYMM,
315 .dimensions = {1, 32, 32, 8},
316 .numberOfConsumers = 1,
317 .scale = 0.5f,
318 .zeroPoint = 0,
319 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
320 }},
321 {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
322 {
323 .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
324 .dimensions = {1, 32, 32, 8},
325 .numberOfConsumers = 1,
326 .scale = 0.5f,
327 .zeroPoint = 0,
328 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
329 }},
330 };
331
332 using MemoryDomainAllocateTestParam = std::tuple<NamedDevice, TestOperandType>;
333 class MemoryDomainAllocateTest : public MemoryDomainTestBase,
334 public testing::WithParamInterface<MemoryDomainAllocateTestParam> {
335 protected:
MemoryDomainAllocateTest()336 MemoryDomainAllocateTest()
337 : MemoryDomainTestBase(getData(std::get<NamedDevice>(GetParam())),
338 std::get<TestOperandType>(GetParam())) {}
339
340 struct AllocateTestArgs {
341 std::vector<int32_t> dimensions;
342 std::vector<std::shared_ptr<IPreparedModel>> preparedModels;
343 std::vector<BufferRole> inputRoles;
344 std::vector<BufferRole> outputRoles;
345 };
346
347 // Validation test for IDevice::allocate. The driver is expected to fail with INVALID_ARGUMENT,
348 // or GENERAL_FAILURE if memory domain is not supported.
validateAllocate(AllocateTestArgs args)349 void validateAllocate(AllocateTestArgs args) {
350 std::vector<IPreparedModelParcel> preparedModelParcels;
351 preparedModelParcels.reserve(args.preparedModels.size());
352 for (const auto& model : args.preparedModels) {
353 preparedModelParcels.push_back({.preparedModel = model});
354 }
355 DeviceBuffer buffer;
356 const auto ret =
357 kDevice->allocate({.dimensions = std::move(args.dimensions)}, preparedModelParcels,
358 args.inputRoles, args.outputRoles, &buffer);
359
360 ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
361 ASSERT_TRUE(static_cast<ErrorStatus>(ret.getServiceSpecificError()) ==
362 ErrorStatus::INVALID_ARGUMENT ||
363 static_cast<ErrorStatus>(ret.getServiceSpecificError()) ==
364 ErrorStatus::GENERAL_FAILURE);
365 }
366
testConflictOperands(const std::shared_ptr<IPreparedModel> & model1,const std::shared_ptr<IPreparedModel> & model2)367 void testConflictOperands(const std::shared_ptr<IPreparedModel>& model1,
368 const std::shared_ptr<IPreparedModel>& model2) {
369 validateAllocate({
370 .preparedModels = {model1, model2},
371 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
372 {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
373 });
374 validateAllocate({
375 .preparedModels = {model1, model2},
376 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
377 .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
378 });
379 validateAllocate({
380 .preparedModels = {model1, model2},
381 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
382 {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
383 });
384 }
385 };
386
TEST_P(MemoryDomainAllocateTest,EmptyRole)387 TEST_P(MemoryDomainAllocateTest, EmptyRole) {
388 // Test with empty prepared models and roles.
389 validateAllocate({});
390
391 auto preparedModel = createConvPreparedModel(kTestOperand);
392 if (preparedModel == nullptr) return;
393
394 // Test again with non-empty prepared models but empty roles.
395 validateAllocate({
396 .preparedModels = {preparedModel},
397 });
398 }
399
TEST_P(MemoryDomainAllocateTest,NullptrPreparedModel)400 TEST_P(MemoryDomainAllocateTest, NullptrPreparedModel) {
401 // Test with nullptr prepared model as input role.
402 validateAllocate({
403 .preparedModels = {nullptr},
404 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
405 });
406
407 // Test with nullptr prepared model as output role.
408 validateAllocate({
409 .preparedModels = {nullptr},
410 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
411 });
412 }
413
TEST_P(MemoryDomainAllocateTest,InvalidPreparedModel)414 TEST_P(MemoryDomainAllocateTest, InvalidPreparedModel) {
415 std::shared_ptr<InvalidPreparedModel> invalidPreparedModel =
416 ndk::SharedRefBase::make<InvalidPreparedModel>();
417
418 // Test with invalid prepared model as input role.
419 validateAllocate({
420 .preparedModels = {invalidPreparedModel},
421 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
422 });
423
424 // Test with invalid prepared model as output role.
425 validateAllocate({
426 .preparedModels = {invalidPreparedModel},
427 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
428 });
429 }
430
TEST_P(MemoryDomainAllocateTest,InvalidModelIndex)431 TEST_P(MemoryDomainAllocateTest, InvalidModelIndex) {
432 auto preparedModel = createConvPreparedModel(kTestOperand);
433 if (preparedModel == nullptr) return;
434
435 // This should fail, because the model index is out of bound.
436 validateAllocate({
437 .preparedModels = {preparedModel},
438 .inputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
439 });
440
441 // This should fail, because the model index is out of bound.
442 validateAllocate({
443 .preparedModels = {preparedModel},
444 .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
445 });
446 }
447
TEST_P(MemoryDomainAllocateTest,InvalidIOIndex)448 TEST_P(MemoryDomainAllocateTest, InvalidIOIndex) {
449 auto preparedModel = createConvPreparedModel(kTestOperand);
450 if (preparedModel == nullptr) return;
451
452 // This should fail, because the model only has one input.
453 validateAllocate({
454 .preparedModels = {preparedModel},
455 .inputRoles = {{.modelIndex = 0, .ioIndex = 1, .probability = 1.0f}},
456 });
457
458 // This should fail, because the model only has one output.
459 validateAllocate({
460 .preparedModels = {preparedModel},
461 .outputRoles = {{.modelIndex = 0, .ioIndex = 1, .probability = 1.0f}},
462 });
463 }
464
TEST_P(MemoryDomainAllocateTest,InvalidProbability)465 TEST_P(MemoryDomainAllocateTest, InvalidProbability) {
466 auto preparedModel = createConvPreparedModel(kTestOperand);
467 if (preparedModel == nullptr) return;
468
469 for (float invalidFreq : {10.0f, 0.0f, -0.5f}) {
470 // Test with invalid probability for input roles.
471 validateAllocate({
472 .preparedModels = {preparedModel},
473 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = invalidFreq}},
474 });
475 // Test with invalid probability for output roles.
476 validateAllocate({
477 .preparedModels = {preparedModel},
478 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = invalidFreq}},
479 });
480 }
481 }
482
TEST_P(MemoryDomainAllocateTest,SameRoleSpecifiedTwice)483 TEST_P(MemoryDomainAllocateTest, SameRoleSpecifiedTwice) {
484 auto preparedModel = createConvPreparedModel(kTestOperand);
485 if (preparedModel == nullptr) return;
486
487 // Same role with same model index.
488 validateAllocate({
489 .preparedModels = {preparedModel},
490 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
491 {.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
492 });
493 validateAllocate({
494 .preparedModels = {preparedModel},
495 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
496 {.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
497 });
498
499 // Different model indexes, but logically referring to the same role.
500 validateAllocate({
501 .preparedModels = {preparedModel, preparedModel},
502 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
503 {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
504 });
505 validateAllocate({
506 .preparedModels = {preparedModel, preparedModel},
507 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
508 {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
509 });
510 }
511
TEST_P(MemoryDomainAllocateTest,ConflictOperandType)512 TEST_P(MemoryDomainAllocateTest, ConflictOperandType) {
513 const std::map<TestOperandType, TestOperandType> conflictTypeMap = {
514 {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
515 {TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_FLOAT32},
516 {TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
517 {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, TestOperandType::TENSOR_QUANT8_ASYMM},
518 };
519
520 TestOperand conflictTestOperand = kTestOperand;
521 const auto it = conflictTypeMap.find(kTestOperandType);
522 ASSERT_FALSE(it == conflictTypeMap.end());
523 conflictTestOperand.type = it->second;
524
525 auto preparedModel = createConvPreparedModel(kTestOperand);
526 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
527 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
528 testConflictOperands(preparedModel, conflictPreparedModel);
529 }
530
TEST_P(MemoryDomainAllocateTest,ConflictScale)531 TEST_P(MemoryDomainAllocateTest, ConflictScale) {
532 if (isFloat(kTestOperandType)) return;
533
534 TestOperand conflictTestOperand = kTestOperand;
535 ASSERT_NE(conflictTestOperand.scale, 1.0f);
536 conflictTestOperand.scale = 1.0f;
537
538 auto preparedModel = createConvPreparedModel(kTestOperand);
539 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
540 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
541 testConflictOperands(preparedModel, conflictPreparedModel);
542 }
543
TEST_P(MemoryDomainAllocateTest,ConflictZeroPoint)544 TEST_P(MemoryDomainAllocateTest, ConflictZeroPoint) {
545 if (isFloat(kTestOperandType)) return;
546
547 TestOperand conflictTestOperand = kTestOperand;
548 ASSERT_NE(conflictTestOperand.zeroPoint, 10);
549 conflictTestOperand.zeroPoint = 10;
550
551 auto preparedModel = createConvPreparedModel(kTestOperand);
552 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
553 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
554 testConflictOperands(preparedModel, conflictPreparedModel);
555 }
556
TEST_P(MemoryDomainAllocateTest,ConflictRankBetweenRoles)557 TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoles) {
558 TestOperand conflictTestOperand = kTestOperand;
559 conflictTestOperand.dimensions.pop_back();
560
561 auto preparedModel = createAddPreparedModel(kTestOperand);
562 auto conflictPreparedModel = createAddPreparedModel(conflictTestOperand);
563 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
564 testConflictOperands(preparedModel, conflictPreparedModel);
565 }
566
TEST_P(MemoryDomainAllocateTest,ConflictDimensionsBetweenRoles)567 TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoles) {
568 TestOperand conflictTestOperand = kTestOperand;
569 conflictTestOperand.dimensions[0] = 4;
570
571 auto preparedModel = createConvPreparedModel(kTestOperand);
572 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
573 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
574 testConflictOperands(preparedModel, conflictPreparedModel);
575 }
576
TEST_P(MemoryDomainAllocateTest,ConflictRankBetweenRoleAndDesc)577 TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoleAndDesc) {
578 auto preparedModel = createConvPreparedModel(kTestOperand);
579 if (preparedModel == nullptr) return;
580
581 auto badDimensions = utils::toSigned(kTestOperand.dimensions).value();
582 badDimensions.pop_back();
583
584 validateAllocate({
585 .dimensions = badDimensions,
586 .preparedModels = {preparedModel},
587 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
588 });
589 validateAllocate({
590 .dimensions = badDimensions,
591 .preparedModels = {preparedModel},
592 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
593 });
594 }
595
TEST_P(MemoryDomainAllocateTest,ConflictDimensionsBetweenRoleAndDesc)596 TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoleAndDesc) {
597 auto preparedModel = createConvPreparedModel(kTestOperand);
598 if (preparedModel == nullptr) return;
599
600 auto badDimensions = utils::toSigned(kTestOperand.dimensions).value();
601 badDimensions[0] = 4;
602
603 validateAllocate({
604 .dimensions = badDimensions,
605 .preparedModels = {preparedModel},
606 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
607 });
608 validateAllocate({
609 .dimensions = badDimensions,
610 .preparedModels = {preparedModel},
611 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
612 });
613 }
614
TEST_P(MemoryDomainAllocateTest,ConflictRankWithScalarRole)615 TEST_P(MemoryDomainAllocateTest, ConflictRankWithScalarRole) {
616 auto preparedModel = createAddPreparedModel(kTestOperand);
617 if (preparedModel == nullptr) return;
618
619 // This should fail, because the target operand is a scalar but a non-empty dimension is
620 // specified.
621 validateAllocate({
622 .dimensions = {1},
623 .preparedModels = {preparedModel},
624 .inputRoles = {{.modelIndex = 0, .ioIndex = 2, .probability = 1.0f}},
625 });
626 }
627
printMemoryDomainAllocateTest(const testing::TestParamInfo<MemoryDomainAllocateTestParam> & info)628 std::string printMemoryDomainAllocateTest(
629 const testing::TestParamInfo<MemoryDomainAllocateTestParam>& info) {
630 const auto& [namedDevice, operandType] = info.param;
631 const std::string type = toString(static_cast<OperandType>(operandType));
632 return gtestCompliantName(getName(namedDevice) + "_" + type);
633 }
634
635 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainAllocateTest);
636 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainAllocateTest,
637 testing::Combine(testing::ValuesIn(getNamedDevices()),
638 kTestOperandTypeChoices),
639 printMemoryDomainAllocateTest);
640
641 class MemoryDomainCopyTestBase : public MemoryDomainTestBase {
642 protected:
MemoryDomainCopyTestBase(std::shared_ptr<IDevice> device,TestOperandType type)643 MemoryDomainCopyTestBase(std::shared_ptr<IDevice> device, TestOperandType type)
644 : MemoryDomainTestBase(std::move(device), type) {}
645
646 // Allocates device memory for roles of a single prepared model.
647 // Returns {IBuffer, token} if success; returns {nullptr, 0} if not supported.
allocateBuffer(const std::shared_ptr<IPreparedModel> & preparedModel,const std::vector<int32_t> & inputIndexes,const std::vector<int32_t> & outputIndexes,const std::vector<int32_t> & dimensions)648 DeviceBuffer allocateBuffer(const std::shared_ptr<IPreparedModel>& preparedModel,
649 const std::vector<int32_t>& inputIndexes,
650 const std::vector<int32_t>& outputIndexes,
651 const std::vector<int32_t>& dimensions) {
652 if (preparedModel == nullptr) {
653 return {.buffer = nullptr, .token = 0};
654 }
655
656 std::vector<BufferRole> inputRoles(inputIndexes.size()), outputRoles(outputIndexes.size());
657 auto trans = [](int32_t ind) -> BufferRole {
658 return {.modelIndex = 0, .ioIndex = ind, .probability = 1.0f};
659 };
660 std::transform(inputIndexes.begin(), inputIndexes.end(), inputRoles.begin(), trans);
661 std::transform(outputIndexes.begin(), outputIndexes.end(), outputRoles.begin(), trans);
662
663 IPreparedModelParcel parcel;
664 parcel.preparedModel = preparedModel;
665
666 DeviceBuffer buffer;
667
668 const auto ret = kDevice->allocate({.dimensions = dimensions}, {parcel}, inputRoles,
669 outputRoles, &buffer);
670
671 if (!ret.isOk()) {
672 EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
673 EXPECT_EQ(static_cast<ErrorStatus>(ret.getServiceSpecificError()),
674 ErrorStatus::GENERAL_FAILURE);
675 return DeviceBuffer{
676 .buffer = nullptr,
677 .token = 0,
678 };
679 }
680
681 EXPECT_NE(buffer.buffer, nullptr);
682 EXPECT_GT(buffer.token, 0);
683
684 return buffer;
685 }
686
allocateBuffer(const std::shared_ptr<IPreparedModel> & preparedModel,const std::vector<int32_t> & inputIndexes,const std::vector<int32_t> & outputIndexes)687 DeviceBuffer allocateBuffer(const std::shared_ptr<IPreparedModel>& preparedModel,
688 const std::vector<int32_t>& inputIndexes,
689 const std::vector<int32_t>& outputIndexes) {
690 return allocateBuffer(preparedModel, inputIndexes, outputIndexes, {});
691 }
692
getSize(const Memory & memory)693 size_t getSize(const Memory& memory) {
694 switch (memory.getTag()) {
695 case Memory::Tag::ashmem:
696 return memory.get<Memory::Tag::ashmem>().size;
697 case Memory::Tag::mappableFile:
698 return memory.get<Memory::Tag::mappableFile>().length;
699 case Memory::Tag::hardwareBuffer: {
700 const auto& hardwareBuffer = memory.get<Memory::Tag::hardwareBuffer>();
701 const bool isBlob =
702 hardwareBuffer.description.format == graphics::common::PixelFormat::BLOB;
703 return isBlob ? hardwareBuffer.description.width : 0;
704 }
705 }
706 return 0;
707 }
708
allocateSharedMemory(uint32_t size)709 Memory allocateSharedMemory(uint32_t size) {
710 const auto sharedMemory = nn::createSharedMemory(size).value();
711 auto memory = utils::convert(sharedMemory).value();
712 EXPECT_EQ(getSize(memory), size);
713 return memory;
714 }
715
testCopyFrom(const std::shared_ptr<IBuffer> & buffer,const Memory & memory,const std::vector<int32_t> & dimensions,ErrorStatus expectedStatus)716 void testCopyFrom(const std::shared_ptr<IBuffer>& buffer, const Memory& memory,
717 const std::vector<int32_t>& dimensions, ErrorStatus expectedStatus) {
718 const auto ret = buffer->copyFrom(memory, dimensions);
719 if (expectedStatus == ErrorStatus::NONE) {
720 ASSERT_TRUE(ret.isOk());
721 } else {
722 ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
723 ASSERT_EQ(expectedStatus, static_cast<ErrorStatus>(ret.getServiceSpecificError()));
724 }
725 }
726
testCopyTo(const std::shared_ptr<IBuffer> & buffer,const Memory & memory,ErrorStatus expectedStatus)727 void testCopyTo(const std::shared_ptr<IBuffer>& buffer, const Memory& memory,
728 ErrorStatus expectedStatus) {
729 const auto ret = buffer->copyTo(memory);
730 if (expectedStatus == ErrorStatus::NONE) {
731 ASSERT_TRUE(ret.isOk());
732 } else {
733 ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
734 ASSERT_EQ(expectedStatus, static_cast<ErrorStatus>(ret.getServiceSpecificError()));
735 }
736 }
737
initializeDeviceMemory(const std::shared_ptr<IBuffer> & buffer)738 void initializeDeviceMemory(const std::shared_ptr<IBuffer>& buffer) {
739 Memory memory = allocateSharedMemory(kTestOperandDataSize);
740 ASSERT_EQ(getSize(memory), kTestOperandDataSize);
741 testCopyFrom(buffer, memory, utils::toSigned(kTestOperand.dimensions).value(),
742 ErrorStatus::NONE);
743 }
744 };
745
746 using MemoryDomainCopyTestParam = std::tuple<NamedDevice, TestOperandType>;
747 class MemoryDomainCopyTest : public MemoryDomainCopyTestBase,
748 public testing::WithParamInterface<MemoryDomainCopyTestParam> {
749 protected:
MemoryDomainCopyTest()750 MemoryDomainCopyTest()
751 : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
752 std::get<TestOperandType>(GetParam())) {}
753 };
754
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidMemorySize)755 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize) {
756 auto preparedModel = createConvPreparedModel(kTestOperand);
757 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
758 if (buffer == nullptr) return;
759
760 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
761 Memory badMemory1 = allocateSharedMemory(badMemorySize1);
762 Memory badMemory2 = allocateSharedMemory(badMemorySize2);
763 testCopyFrom(buffer, badMemory1, {}, ErrorStatus::INVALID_ARGUMENT);
764 testCopyFrom(buffer, badMemory2, {}, ErrorStatus::INVALID_ARGUMENT);
765 }
766
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidMemorySize_DynamicShape)767 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize_DynamicShape) {
768 TestOperand testOperand = kTestOperand;
769 testOperand.dimensions[0] = 0;
770 auto preparedModel = createConvPreparedModel(testOperand);
771 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
772 if (buffer == nullptr) return;
773
774 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
775 Memory badMemory1 = allocateSharedMemory(badMemorySize1);
776 Memory badMemory2 = allocateSharedMemory(badMemorySize2);
777 Memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
778
779 const auto goodDimensions = utils::toSigned(kTestOperand.dimensions).value();
780 auto badDimensions = goodDimensions;
781 badDimensions[0] = 2;
782
783 testCopyFrom(buffer, badMemory1, goodDimensions, ErrorStatus::INVALID_ARGUMENT);
784 testCopyFrom(buffer, badMemory2, goodDimensions, ErrorStatus::INVALID_ARGUMENT);
785 testCopyFrom(buffer, goodMemory, goodDimensions, ErrorStatus::NONE);
786 testCopyFrom(buffer, goodMemory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
787 }
788
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidDimensions)789 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions) {
790 auto preparedModel = createConvPreparedModel(kTestOperand);
791 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
792 if (buffer == nullptr) return;
793
794 Memory memory = allocateSharedMemory(kTestOperandDataSize);
795
796 const auto goodDimensions = utils::toSigned(kTestOperand.dimensions).value();
797 std::vector<int32_t> badDimensions = goodDimensions;
798 badDimensions.pop_back();
799 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
800
801 badDimensions = goodDimensions;
802 badDimensions[0] = 2;
803 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
804
805 badDimensions = goodDimensions;
806 badDimensions[0] = 0;
807 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
808
809 testCopyFrom(buffer, memory, {}, ErrorStatus::NONE);
810 testCopyFrom(buffer, memory, goodDimensions, ErrorStatus::NONE);
811 }
812
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidDimensions_DynamicShape)813 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions_DynamicShape) {
814 TestOperand testOperand = kTestOperand;
815 testOperand.dimensions[0] = 0;
816 auto preparedModel = createConvPreparedModel(testOperand);
817 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
818 if (buffer == nullptr) return;
819
820 Memory memory = allocateSharedMemory(kTestOperandDataSize);
821
822 const auto goodDimensions = utils::toSigned(kTestOperand.dimensions).value();
823 std::vector<int32_t> badDimensions = goodDimensions;
824 badDimensions.pop_back();
825 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
826
827 badDimensions = goodDimensions;
828 badDimensions[0] = 2;
829 badDimensions[3] = 4;
830 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
831
832 badDimensions = goodDimensions;
833 badDimensions[0] = 1;
834 badDimensions[3] = 0;
835 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
836
837 testCopyFrom(buffer, memory, {}, ErrorStatus::INVALID_ARGUMENT);
838 testCopyFrom(buffer, memory, goodDimensions, ErrorStatus::NONE);
839 }
840
TEST_P(MemoryDomainCopyTest,CopyTo_UninitializedMemory)841 TEST_P(MemoryDomainCopyTest, CopyTo_UninitializedMemory) {
842 auto preparedModel = createConvPreparedModel(kTestOperand);
843 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
844 if (buffer == nullptr) return;
845
846 Memory memory = allocateSharedMemory(kTestOperandDataSize);
847 testCopyTo(buffer, memory, ErrorStatus::GENERAL_FAILURE);
848 }
849
TEST_P(MemoryDomainCopyTest,CopyTo_InvalidMemorySize)850 TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize) {
851 auto preparedModel = createConvPreparedModel(kTestOperand);
852 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
853 if (buffer == nullptr) return;
854
855 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
856 Memory badMemory1 = allocateSharedMemory(badMemorySize1);
857 Memory badMemory2 = allocateSharedMemory(badMemorySize2);
858 Memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
859
860 initializeDeviceMemory(buffer);
861 testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
862 testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
863 testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
864 }
865
TEST_P(MemoryDomainCopyTest,CopyTo_InvalidMemorySize_DynamicShape)866 TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize_DynamicShape) {
867 TestOperand testOperand = kTestOperand;
868 testOperand.dimensions[0] = 0;
869 auto preparedModel = createConvPreparedModel(testOperand);
870 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
871 if (buffer == nullptr) return;
872
873 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
874 Memory badMemory1 = allocateSharedMemory(badMemorySize1);
875 Memory badMemory2 = allocateSharedMemory(badMemorySize2);
876 Memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
877
878 initializeDeviceMemory(buffer);
879 testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
880 testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
881 testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
882 }
883
printMemoryDomainCopyTest(const testing::TestParamInfo<MemoryDomainCopyTestParam> & info)884 std::string printMemoryDomainCopyTest(
885 const testing::TestParamInfo<MemoryDomainCopyTestParam>& info) {
886 const auto& [namedDevice, operandType] = info.param;
887 const std::string type = toString(static_cast<OperandType>(operandType));
888 return gtestCompliantName(getName(namedDevice) + "_" + type);
889 }
890
891 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainCopyTest);
892 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainCopyTest,
893 testing::Combine(testing::ValuesIn(getNamedDevices()),
894 kTestOperandTypeChoices),
895 printMemoryDomainCopyTest);
896
897 using MemoryDomainExecutionTestParam = std::tuple<NamedDevice, TestOperandType, Executor>;
898 class MemoryDomainExecutionTest
899 : public MemoryDomainCopyTestBase,
900 public testing::WithParamInterface<MemoryDomainExecutionTestParam> {
901 protected:
MemoryDomainExecutionTest()902 MemoryDomainExecutionTest()
903 : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
904 std::get<TestOperandType>(GetParam())) {}
905
createSharedMemoryPool(uint32_t size)906 RequestMemoryPool createSharedMemoryPool(uint32_t size) {
907 return RequestMemoryPool(allocateSharedMemory(size));
908 }
909
createDeviceMemoryPool(uint32_t token)910 RequestMemoryPool createDeviceMemoryPool(uint32_t token) {
911 return RequestMemoryPool(static_cast<int32_t>(token));
912 }
913
testExecution(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request,ErrorStatus expectedStatus)914 void testExecution(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request,
915 ErrorStatus expectedStatus) {
916 switch (kExecutor) {
917 case Executor::SYNC:
918 EXPECT_EQ(executeSync(preparedModel, request), expectedStatus);
919 break;
920 case Executor::BURST:
921 EXPECT_EQ(executeBurst(preparedModel, request), expectedStatus);
922 break;
923 case Executor::FENCED:
924 EXPECT_EQ(executeFenced(preparedModel, request), expectedStatus);
925 break;
926 default:
927 ASSERT_TRUE(false);
928 }
929 }
930
executeSync(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)931 ErrorStatus executeSync(const std::shared_ptr<IPreparedModel>& preparedModel,
932 const Request& request) {
933 ExecutionResult executionResult;
934 const auto ret = preparedModel->executeSynchronously(
935 request, false, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
936
937 if (!ret.isOk()) {
938 EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
939 return static_cast<ErrorStatus>(ret.getServiceSpecificError());
940 }
941 const ErrorStatus executionStatus = executionResult.outputSufficientSize
942 ? ErrorStatus::NONE
943 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
944 EXPECT_EQ(executionResult.timing, kNoTiming);
945 return executionStatus;
946 }
947
executeFenced(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)948 ErrorStatus executeFenced(const std::shared_ptr<IPreparedModel>& preparedModel,
949 const Request& request) {
950 FencedExecutionResult executionResult;
951 const auto ret = preparedModel->executeFenced(request, {}, false, kNoDeadline,
952 kOmittedTimeoutDuration, kNoDuration,
953 &executionResult);
954 if (!ret.isOk()) {
955 EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
956 return static_cast<ErrorStatus>(ret.getServiceSpecificError());
957 }
958 if (executionResult.syncFence.get() != -1) {
959 waitForSyncFence(executionResult.syncFence.get());
960 }
961 EXPECT_NE(executionResult.callback, nullptr);
962
963 ErrorStatus executionStatus = ErrorStatus::GENERAL_FAILURE;
964 Timing time = kNoTiming;
965 Timing timeFenced = kNoTiming;
966 const auto retExecutionInfo =
967 executionResult.callback->getExecutionInfo(&time, &timeFenced, &executionStatus);
968 EXPECT_TRUE(retExecutionInfo.isOk());
969 EXPECT_EQ(time, kNoTiming);
970 return executionStatus;
971 }
972
executeBurst(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)973 ErrorStatus executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel,
974 const Request& request) {
975 // create burst
976 std::shared_ptr<IBurst> burst;
977 auto ret = preparedModel->configureExecutionBurst(&burst);
978 EXPECT_TRUE(ret.isOk()) << ret.getDescription();
979 EXPECT_NE(nullptr, burst.get());
980 if (!ret.isOk() || burst.get() == nullptr) {
981 return ErrorStatus::GENERAL_FAILURE;
982 }
983
984 // use -1 for all memory identifier tokens
985 const std::vector<int64_t> slots(request.pools.size(), -1);
986
987 ExecutionResult executionResult;
988 ret = burst->executeSynchronously(request, slots, false, kNoDeadline,
989 kOmittedTimeoutDuration, &executionResult);
990
991 if (!ret.isOk()) {
992 EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
993 return static_cast<ErrorStatus>(ret.getServiceSpecificError());
994 }
995 const ErrorStatus executionStatus = executionResult.outputSufficientSize
996 ? ErrorStatus::NONE
997 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
998 EXPECT_EQ(executionResult.timing, kNoTiming);
999 return executionStatus;
1000 }
1001
1002 const Executor kExecutor = std::get<Executor>(GetParam());
1003 };
1004
TEST_P(MemoryDomainExecutionTest,InvalidToken)1005 TEST_P(MemoryDomainExecutionTest, InvalidToken) {
1006 auto preparedModel = createConvPreparedModel(kTestOperand);
1007 if (preparedModel == nullptr) return;
1008
1009 RequestMemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1010 RequestMemoryPool badDeviceMemory1 = createDeviceMemoryPool(0); // Invalid token.
1011 RequestMemoryPool badDeviceMemory2 = createDeviceMemoryPool(100); // Unknown token.
1012 RequestArgument sharedMemoryArg = {
1013 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1014 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1015
1016 testExecution(preparedModel,
1017 {.inputs = {deviceMemoryArg},
1018 .outputs = {sharedMemoryArg},
1019 .pools = createRequestMemoryPools(sharedMemory, badDeviceMemory1)},
1020 ErrorStatus::INVALID_ARGUMENT);
1021 testExecution(preparedModel,
1022 {.inputs = {deviceMemoryArg},
1023 .outputs = {sharedMemoryArg},
1024 .pools = createRequestMemoryPools(sharedMemory, badDeviceMemory2)},
1025 ErrorStatus::INVALID_ARGUMENT);
1026 testExecution(preparedModel,
1027 {.inputs = {sharedMemoryArg},
1028 .outputs = {deviceMemoryArg},
1029 .pools = createRequestMemoryPools(sharedMemory, badDeviceMemory1)},
1030 ErrorStatus::INVALID_ARGUMENT);
1031 testExecution(preparedModel,
1032 {.inputs = {sharedMemoryArg},
1033 .outputs = {deviceMemoryArg},
1034 .pools = createRequestMemoryPools(sharedMemory, badDeviceMemory2)},
1035 ErrorStatus::INVALID_ARGUMENT);
1036 }
1037
TEST_P(MemoryDomainExecutionTest,InvalidPreparedModel)1038 TEST_P(MemoryDomainExecutionTest, InvalidPreparedModel) {
1039 auto preparedModel = createConvPreparedModel(kTestOperand);
1040 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
1041 if (buffer == nullptr) return;
1042 auto badPreparedModel = createConvPreparedModel(kTestOperand);
1043 if (badPreparedModel == nullptr) return;
1044
1045 RequestMemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1046 RequestMemoryPool deviceMemory = createDeviceMemoryPool(token);
1047 RequestArgument sharedMemoryArg = {
1048 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1049 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1050
1051 // This should fail, because the buffer is not allocated for badPreparedModel.
1052 initializeDeviceMemory(buffer);
1053 testExecution(badPreparedModel,
1054 {.inputs = {deviceMemoryArg},
1055 .outputs = {sharedMemoryArg},
1056 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1057 ErrorStatus::INVALID_ARGUMENT);
1058 testExecution(badPreparedModel,
1059 {.inputs = {sharedMemoryArg},
1060 .outputs = {deviceMemoryArg},
1061 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1062 ErrorStatus::INVALID_ARGUMENT);
1063 }
1064
TEST_P(MemoryDomainExecutionTest,InvalidIOIndex)1065 TEST_P(MemoryDomainExecutionTest, InvalidIOIndex) {
1066 auto preparedModel = createConvPreparedModel(kTestOperand, 2);
1067 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {});
1068 if (buffer == nullptr) return;
1069
1070 RequestMemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
1071 RequestMemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
1072 RequestMemoryPool sharedMemory3 = createSharedMemoryPool(kTestOperandDataSize);
1073 RequestMemoryPool deviceMemory = createDeviceMemoryPool(token);
1074 RequestArgument sharedMemoryArg1 = {
1075 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1076 RequestArgument sharedMemoryArg2 = {
1077 .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
1078 RequestArgument sharedMemoryArg3 = {
1079 .location = {.poolIndex = 2, .offset = 0, .length = kTestOperandDataSize}};
1080 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 3}};
1081
1082 // This should fail, because the device memory is not allocated for input 1.
1083 initializeDeviceMemory(buffer);
1084 testExecution(preparedModel,
1085 {.inputs = {sharedMemoryArg1, deviceMemoryArg},
1086 .outputs = {sharedMemoryArg2, sharedMemoryArg3},
1087 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, sharedMemory3,
1088 deviceMemory)},
1089 ErrorStatus::INVALID_ARGUMENT);
1090
1091 // This should fail, because the device memory is not allocated for output 1.
1092 testExecution(preparedModel,
1093 {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
1094 .outputs = {sharedMemoryArg3, deviceMemoryArg},
1095 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, sharedMemory3,
1096 deviceMemory)},
1097 ErrorStatus::INVALID_ARGUMENT);
1098 }
1099
TEST_P(MemoryDomainExecutionTest,InvalidIOType)1100 TEST_P(MemoryDomainExecutionTest, InvalidIOType) {
1101 auto preparedModel = createConvPreparedModel(kTestOperand);
1102 auto [inputBuffer, inputToken] = allocateBuffer(preparedModel, {0}, {});
1103 auto [outputBuffer, outputToken] = allocateBuffer(preparedModel, {}, {0});
1104 if (inputBuffer == nullptr || outputBuffer == nullptr) return;
1105
1106 RequestMemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1107 RequestMemoryPool deviceMemory = createDeviceMemoryPool(inputToken);
1108 RequestArgument sharedMemoryArg = {
1109 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1110 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1111
1112 // This should fail, because the device memory is allocated for input but used as output.
1113 testExecution(preparedModel,
1114 {.inputs = {sharedMemoryArg},
1115 .outputs = {deviceMemoryArg},
1116 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1117 ErrorStatus::INVALID_ARGUMENT);
1118
1119 // This should fail, because the device memory is allocated for output but used as input.
1120 deviceMemory.set<RequestMemoryPool::Tag::token>(outputToken);
1121 initializeDeviceMemory(outputBuffer);
1122 testExecution(preparedModel,
1123 {.inputs = {deviceMemoryArg},
1124 .outputs = {sharedMemoryArg},
1125 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1126 ErrorStatus::INVALID_ARGUMENT);
1127 }
1128
TEST_P(MemoryDomainExecutionTest,UninitializedMemory)1129 TEST_P(MemoryDomainExecutionTest, UninitializedMemory) {
1130 auto preparedModel = createConvPreparedModel(kTestOperand);
1131 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
1132 if (buffer == nullptr) return;
1133
1134 RequestMemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1135 RequestMemoryPool deviceMemory = createDeviceMemoryPool(token);
1136 RequestArgument sharedMemoryArg = {
1137 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1138 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1139
1140 // This should fail, because the device memory is not initialized.
1141 testExecution(preparedModel,
1142 {.inputs = {deviceMemoryArg},
1143 .outputs = {sharedMemoryArg},
1144 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1145 ErrorStatus::GENERAL_FAILURE);
1146
1147 // This should initialize the device memory.
1148 testExecution(preparedModel,
1149 {.inputs = {sharedMemoryArg},
1150 .outputs = {deviceMemoryArg},
1151 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1152 ErrorStatus::NONE);
1153
1154 // Test again with initialized device memory.
1155 testExecution(preparedModel,
1156 {.inputs = {deviceMemoryArg},
1157 .outputs = {sharedMemoryArg},
1158 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1159 ErrorStatus::NONE);
1160 }
1161
TEST_P(MemoryDomainExecutionTest,SameRequestMultipleRoles)1162 TEST_P(MemoryDomainExecutionTest, SameRequestMultipleRoles) {
1163 auto preparedModel = createConvPreparedModel(kTestOperand, 2);
1164 auto [buffer, token] = allocateBuffer(preparedModel, {0, 1}, {0, 1});
1165 if (buffer == nullptr) return;
1166
1167 RequestMemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
1168 RequestMemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
1169 RequestMemoryPool deviceMemory = createDeviceMemoryPool(token);
1170 RequestArgument sharedMemoryArg1 = {
1171 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1172 RequestArgument sharedMemoryArg2 = {
1173 .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
1174 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 2}};
1175
1176 // This should fail, because the same device memory cannot be used for both input and output.
1177 initializeDeviceMemory(buffer);
1178 testExecution(preparedModel,
1179 {.inputs = {deviceMemoryArg, sharedMemoryArg1},
1180 .outputs = {deviceMemoryArg, sharedMemoryArg2},
1181 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, deviceMemory)},
1182 ErrorStatus::INVALID_ARGUMENT);
1183
1184 // This should fail, because the same device memory cannot be used for multiple outputs.
1185 testExecution(preparedModel,
1186 {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
1187 .outputs = {deviceMemoryArg, deviceMemoryArg},
1188 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, deviceMemory)},
1189 ErrorStatus::INVALID_ARGUMENT);
1190
1191 // The same device memory can be used for multiple inputs.
1192 initializeDeviceMemory(buffer);
1193 testExecution(preparedModel,
1194 {.inputs = {deviceMemoryArg, deviceMemoryArg},
1195 .outputs = {sharedMemoryArg1, sharedMemoryArg2},
1196 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, deviceMemory)},
1197 ErrorStatus::NONE);
1198 }
1199
TEST_P(MemoryDomainExecutionTest,InvalidDimensions)1200 TEST_P(MemoryDomainExecutionTest, InvalidDimensions) {
1201 // FENCED execution does not support dynamic shape.
1202 if (kExecutor == Executor::FENCED) return;
1203
1204 TestOperand testOperand = kTestOperand;
1205 testOperand.dimensions[0] = 0;
1206 auto preparedModel = createConvPreparedModel(testOperand);
1207 auto deviceBuffer = allocateBuffer(preparedModel, {0}, {0},
1208 utils::toSigned(kTestOperand.dimensions).value());
1209 if (deviceBuffer.buffer == nullptr) return;
1210
1211 // Use an incompatible dimension and make sure the length matches with the bad dimension.
1212 auto badDimensions = utils::toSigned(kTestOperand.dimensions).value();
1213 badDimensions[0] = 2;
1214 const uint32_t badTestOperandDataSize = kTestOperandDataSize * 2;
1215
1216 RequestMemoryPool sharedMemory = createSharedMemoryPool(badTestOperandDataSize);
1217 RequestMemoryPool deviceMemory = createDeviceMemoryPool(deviceBuffer.token);
1218 RequestArgument sharedMemoryArg = {
1219 .location = {.poolIndex = 0, .offset = 0, .length = badTestOperandDataSize},
1220 .dimensions = badDimensions};
1221 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1222 RequestArgument deviceMemoryArgWithBadDimensions = {.location = {.poolIndex = 1},
1223 .dimensions = badDimensions};
1224
1225 initializeDeviceMemory(deviceBuffer.buffer);
1226 testExecution(preparedModel,
1227 {.inputs = {deviceMemoryArgWithBadDimensions},
1228 .outputs = {sharedMemoryArg},
1229 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1230 ErrorStatus::INVALID_ARGUMENT);
1231
1232 testExecution(preparedModel,
1233 {.inputs = {sharedMemoryArg},
1234 .outputs = {deviceMemoryArgWithBadDimensions},
1235 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1236 ErrorStatus::INVALID_ARGUMENT);
1237
1238 testExecution(preparedModel,
1239 {.inputs = {sharedMemoryArg},
1240 .outputs = {deviceMemoryArg},
1241 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1242 ErrorStatus::GENERAL_FAILURE);
1243 }
1244
1245 const auto kExecutorChoices = testing::Values(Executor::SYNC, Executor::BURST, Executor::FENCED);
1246
printMemoryDomainExecutionTest(const testing::TestParamInfo<MemoryDomainExecutionTestParam> & info)1247 std::string printMemoryDomainExecutionTest(
1248 const testing::TestParamInfo<MemoryDomainExecutionTestParam>& info) {
1249 const auto& [namedDevice, operandType, executor] = info.param;
1250 const std::string type = toString(static_cast<OperandType>(operandType));
1251 const std::string executorStr = toString(executor);
1252 return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + executorStr);
1253 }
1254
1255 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainExecutionTest);
1256 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainExecutionTest,
1257 testing::Combine(testing::ValuesIn(getNamedDevices()),
1258 kTestOperandTypeChoices, kExecutorChoices),
1259 printMemoryDomainExecutionTest);
1260
1261 } // namespace aidl::android::hardware::neuralnetworks::vts::functional
1262