1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "neuralnetworks_aidl_hal_test"
18
19 #include <aidl/android/hardware/graphics/common/PixelFormat.h>
20 #include <android-base/logging.h>
21 #include <android/binder_auto_utils.h>
22 #include <android/binder_interface_utils.h>
23 #include <android/binder_status.h>
24 #include <gtest/gtest.h>
25
26 #include <LegacyUtils.h>
27 #include <TestHarness.h>
28 #include <Utils.h>
29 #include <nnapi/SharedMemory.h>
30 #include <nnapi/hal/aidl/Conversions.h>
31 #include <nnapi/hal/aidl/Utils.h>
32
33 #include "AidlHalInterfaces.h"
34 #include "Callbacks.h"
35 #include "GeneratedTestHarness.h"
36 #include "MemoryUtils.h"
37 #include "Utils.h"
38 #include "VtsHalNeuralnetworks.h"
39
40 namespace aidl::android::hardware::neuralnetworks::vts::functional {
41
42 using namespace test_helper;
43 using implementation::PreparedModelCallback;
44
45 namespace {
46
47 // An AIDL driver is likely to support at least one of the following operand types.
48 const std::vector<TestOperandType> kTestOperandTypeChoicesVector = {
49 TestOperandType::TENSOR_FLOAT32,
50 TestOperandType::TENSOR_FLOAT16,
51 TestOperandType::TENSOR_QUANT8_ASYMM,
52 TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
53 };
54 const auto kTestOperandTypeChoices = testing::ValuesIn(kTestOperandTypeChoicesVector);
55 // TODO(b/179270601): restore kNamedDeviceChoices
56
isInChoices(TestOperandType type)57 bool isInChoices(TestOperandType type) {
58 return std::count(kTestOperandTypeChoicesVector.begin(), kTestOperandTypeChoicesVector.end(),
59 type) > 0;
60 }
61
isFloat(TestOperandType type)62 bool isFloat(TestOperandType type) {
63 CHECK(isInChoices(type));
64 return type == TestOperandType::TENSOR_FLOAT32 || type == TestOperandType::TENSOR_FLOAT16;
65 }
66
67 // Create placeholder buffers for model constants as well as inputs and outputs.
68 // We only care about the size here because we will not check accuracy in validation tests.
createDummyData(TestModel * testModel)69 void createDummyData(TestModel* testModel) {
70 for (auto& operand : testModel->main.operands) {
71 if (operand.data != nullptr) continue;
72 switch (operand.lifetime) {
73 case TestOperandLifeTime::SUBGRAPH_INPUT:
74 case TestOperandLifeTime::SUBGRAPH_OUTPUT:
75 case TestOperandLifeTime::CONSTANT_COPY:
76 case TestOperandLifeTime::CONSTANT_REFERENCE: {
77 const uint32_t size = nn::nonExtensionOperandSizeOfData(
78 static_cast<nn::OperandType>(operand.type), operand.dimensions);
79 operand.data = TestBuffer(size);
80 } break;
81 default:
82 break;
83 }
84 }
85 }
86
createInt32Scalar(int32_t value)87 TestOperand createInt32Scalar(int32_t value) {
88 return {
89 .type = TestOperandType::INT32,
90 .dimensions = {},
91 .numberOfConsumers = 1,
92 .scale = 0.0f,
93 .zeroPoint = 0,
94 .lifetime = TestOperandLifeTime::CONSTANT_COPY,
95 .data = TestBuffer::createFromVector<int32_t>({value}),
96 };
97 }
98
99 // Construct a test model with multiple CONV_2D operations with the given operand as inputs.
100 // The dimensions of the filters are chosen to ensure outputs has the same dimensions as inputs.
101 // We choose CONV_2D operation because it is commonly supported by most drivers.
createConvModel(const TestOperand & operand,uint32_t numOperations)102 TestModel createConvModel(const TestOperand& operand, uint32_t numOperations) {
103 CHECK(isInChoices(operand.type));
104
105 TestOperand weight = {.type = operand.type,
106 .dimensions = {operand.dimensions[3], 3, 3, operand.dimensions[3]},
107 .numberOfConsumers = 1,
108 .scale = isFloat(operand.type) ? 0.0f : 1.0f,
109 .zeroPoint = 0,
110 .lifetime = TestOperandLifeTime::CONSTANT_COPY};
111
112 TestOperand bias = {
113 .type = isFloat(operand.type) ? operand.type : TestOperandType::TENSOR_INT32,
114 .dimensions = {operand.dimensions[3]},
115 .numberOfConsumers = 1,
116 .scale = operand.scale * weight.scale,
117 .zeroPoint = 0,
118 .lifetime = TestOperandLifeTime::CONSTANT_COPY};
119
120 TestOperand output = operand;
121 output.numberOfConsumers = 0;
122 output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
123
124 const std::vector<TestOperand> operands = {
125 operand,
126 std::move(weight),
127 std::move(bias),
128 createInt32Scalar(1), // same padding
129 createInt32Scalar(1), // width stride
130 createInt32Scalar(1), // height stride
131 createInt32Scalar(0), // activation = NONE
132 std::move(output),
133 };
134
135 TestModel model;
136 for (uint32_t i = 0; i < numOperations; i++) {
137 model.main.operands.insert(model.main.operands.end(), operands.begin(), operands.end());
138 const uint32_t inputIndex = operands.size() * i;
139 const uint32_t outputIndex = inputIndex + operands.size() - 1;
140 std::vector<uint32_t> inputs(operands.size() - 1);
141 std::iota(inputs.begin(), inputs.end(), inputIndex);
142 model.main.operations.push_back({.type = TestOperationType::CONV_2D,
143 .inputs = std::move(inputs),
144 .outputs = {outputIndex}});
145 model.main.inputIndexes.push_back(inputIndex);
146 model.main.outputIndexes.push_back(outputIndex);
147 }
148 createDummyData(&model);
149 return model;
150 }
151
152 // Construct a test model with a single ADD operation with the given operand as input0 and input1.
153 // This is to cover additional cases that the CONV_2D model does not support, e.g. arbitrary input
154 // operand rank, scalar input operand. We choose ADD operation because it is commonly supported by
155 // most drivers.
createSingleAddModel(const TestOperand & operand)156 TestModel createSingleAddModel(const TestOperand& operand) {
157 CHECK(isInChoices(operand.type));
158
159 TestOperand act = {
160 .type = TestOperandType::INT32,
161 .dimensions = {},
162 .numberOfConsumers = 1,
163 .scale = 0.0f,
164 .zeroPoint = 0,
165 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
166 };
167
168 TestOperand output = operand;
169 output.numberOfConsumers = 0;
170 output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
171
172 TestModel model = {
173 .main =
174 {
175 .operands =
176 {
177 operand,
178 operand,
179 std::move(act),
180 output,
181 },
182 .operations = {{.type = TestOperationType::ADD,
183 .inputs = {0, 1, 2},
184 .outputs = {3}}},
185 .inputIndexes = {0, 1, 2},
186 .outputIndexes = {3},
187 },
188 };
189 createDummyData(&model);
190 return model;
191 }
192
193 // A placeholder invalid IPreparedModel class for MemoryDomainAllocateTest.InvalidPreparedModel
194 class InvalidPreparedModel : public BnPreparedModel {
195 public:
executeSynchronously(const Request &,bool,int64_t,int64_t,ExecutionResult *)196 ndk::ScopedAStatus executeSynchronously(const Request&, bool, int64_t, int64_t,
197 ExecutionResult*) override {
198 return ndk::ScopedAStatus::fromServiceSpecificError(
199 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
200 }
executeFenced(const Request &,const std::vector<ndk::ScopedFileDescriptor> &,bool,int64_t,int64_t,int64_t,FencedExecutionResult *)201 ndk::ScopedAStatus executeFenced(const Request&, const std::vector<ndk::ScopedFileDescriptor>&,
202 bool, int64_t, int64_t, int64_t,
203 FencedExecutionResult*) override {
204 return ndk::ScopedAStatus::fromServiceSpecificError(
205 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
206 }
configureExecutionBurst(std::shared_ptr<IBurst> *)207 ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>*) override {
208 return ndk::ScopedAStatus::fromServiceSpecificError(
209 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
210 }
211 };
212
213 template <typename... Args>
createRequestMemoryPools(const Args &...pools)214 std::vector<RequestMemoryPool> createRequestMemoryPools(const Args&... pools) {
215 std::vector<RequestMemoryPool> memoryPools;
216 memoryPools.reserve(sizeof...(Args));
217 // This fold operator calls push_back on each of the function arguments.
218 (memoryPools.push_back(utils::clone(pools).value()), ...);
219 return memoryPools;
220 };
221
222 } // namespace
223
224 class MemoryDomainTestBase : public testing::Test {
225 protected:
MemoryDomainTestBase(std::shared_ptr<IDevice> device,TestOperandType type)226 MemoryDomainTestBase(std::shared_ptr<IDevice> device, TestOperandType type)
227 : kDevice(std::move(device)),
228 kTestOperandType(type),
229 kTestOperand(kTestOperandMap.at(type)),
230 kTestOperandDataSize(nn::nonExtensionOperandSizeOfData(static_cast<nn::OperandType>(type),
231 kTestOperand.dimensions)) {}
232
SetUp()233 void SetUp() override {
234 testing::Test::SetUp();
235 ASSERT_NE(kDevice, nullptr);
236 const bool deviceIsResponsive =
237 ndk::ScopedAStatus::fromStatus(AIBinder_ping(kDevice->asBinder().get())).isOk();
238 ASSERT_TRUE(deviceIsResponsive);
239 }
240
createConvPreparedModel(const TestOperand & testOperand,uint32_t numOperations=1)241 std::shared_ptr<IPreparedModel> createConvPreparedModel(const TestOperand& testOperand,
242 uint32_t numOperations = 1) {
243 const TestModel testModel = createConvModel(testOperand, numOperations);
244 const Model model = createModel(testModel);
245 std::shared_ptr<IPreparedModel> preparedModel;
246 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
247 return preparedModel;
248 }
249
createAddPreparedModel(const TestOperand & testOperand)250 std::shared_ptr<IPreparedModel> createAddPreparedModel(const TestOperand& testOperand) {
251 const TestModel testModel = createSingleAddModel(testOperand);
252 const Model model = createModel(testModel);
253 std::shared_ptr<IPreparedModel> preparedModel;
254 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
255 return preparedModel;
256 }
257
258 static const std::map<TestOperandType, TestOperand> kTestOperandMap;
259
260 const std::shared_ptr<IDevice> kDevice;
261 const TestOperandType kTestOperandType;
262 const TestOperand& kTestOperand;
263 const uint32_t kTestOperandDataSize;
264 };
265
266 const std::map<TestOperandType, TestOperand> MemoryDomainTestBase::kTestOperandMap = {
267 {TestOperandType::TENSOR_FLOAT32,
268 {
269 .type = TestOperandType::TENSOR_FLOAT32,
270 .dimensions = {1, 32, 32, 8},
271 .numberOfConsumers = 1,
272 .scale = 0.0f,
273 .zeroPoint = 0,
274 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
275 }},
276 {TestOperandType::TENSOR_FLOAT16,
277 {
278 .type = TestOperandType::TENSOR_FLOAT16,
279 .dimensions = {1, 32, 32, 8},
280 .numberOfConsumers = 1,
281 .scale = 0.0f,
282 .zeroPoint = 0,
283 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
284 }},
285 {TestOperandType::TENSOR_QUANT8_ASYMM,
286 {
287 .type = TestOperandType::TENSOR_QUANT8_ASYMM,
288 .dimensions = {1, 32, 32, 8},
289 .numberOfConsumers = 1,
290 .scale = 0.5f,
291 .zeroPoint = 0,
292 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
293 }},
294 {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
295 {
296 .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
297 .dimensions = {1, 32, 32, 8},
298 .numberOfConsumers = 1,
299 .scale = 0.5f,
300 .zeroPoint = 0,
301 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
302 }},
303 };
304
305 using MemoryDomainAllocateTestParam = std::tuple<NamedDevice, TestOperandType>;
306 class MemoryDomainAllocateTest : public MemoryDomainTestBase,
307 public testing::WithParamInterface<MemoryDomainAllocateTestParam> {
308 protected:
MemoryDomainAllocateTest()309 MemoryDomainAllocateTest()
310 : MemoryDomainTestBase(getData(std::get<NamedDevice>(GetParam())),
311 std::get<TestOperandType>(GetParam())) {}
312
313 struct AllocateTestArgs {
314 std::vector<int32_t> dimensions;
315 std::vector<std::shared_ptr<IPreparedModel>> preparedModels;
316 std::vector<BufferRole> inputRoles;
317 std::vector<BufferRole> outputRoles;
318 };
319
320 // Validation test for IDevice::allocate. The driver is expected to fail with INVALID_ARGUMENT,
321 // or GENERAL_FAILURE if memory domain is not supported.
validateAllocate(AllocateTestArgs args)322 void validateAllocate(AllocateTestArgs args) {
323 std::vector<IPreparedModelParcel> preparedModelParcels;
324 preparedModelParcels.reserve(args.preparedModels.size());
325 for (const auto& model : args.preparedModels) {
326 preparedModelParcels.push_back({.preparedModel = model});
327 }
328 DeviceBuffer buffer;
329 const auto ret =
330 kDevice->allocate({.dimensions = std::move(args.dimensions)}, preparedModelParcels,
331 args.inputRoles, args.outputRoles, &buffer);
332
333 ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
334 ASSERT_TRUE(static_cast<ErrorStatus>(ret.getServiceSpecificError()) ==
335 ErrorStatus::INVALID_ARGUMENT ||
336 static_cast<ErrorStatus>(ret.getServiceSpecificError()) ==
337 ErrorStatus::GENERAL_FAILURE);
338 }
339
testConflictOperands(const std::shared_ptr<IPreparedModel> & model1,const std::shared_ptr<IPreparedModel> & model2)340 void testConflictOperands(const std::shared_ptr<IPreparedModel>& model1,
341 const std::shared_ptr<IPreparedModel>& model2) {
342 validateAllocate({
343 .preparedModels = {model1, model2},
344 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
345 {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
346 });
347 validateAllocate({
348 .preparedModels = {model1, model2},
349 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
350 .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
351 });
352 validateAllocate({
353 .preparedModels = {model1, model2},
354 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
355 {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
356 });
357 }
358 };
359
TEST_P(MemoryDomainAllocateTest,EmptyRole)360 TEST_P(MemoryDomainAllocateTest, EmptyRole) {
361 // Test with empty prepared models and roles.
362 validateAllocate({});
363
364 auto preparedModel = createConvPreparedModel(kTestOperand);
365 if (preparedModel == nullptr) return;
366
367 // Test again with non-empty prepared models but empty roles.
368 validateAllocate({
369 .preparedModels = {preparedModel},
370 });
371 }
372
TEST_P(MemoryDomainAllocateTest,NullptrPreparedModel)373 TEST_P(MemoryDomainAllocateTest, NullptrPreparedModel) {
374 // Test with nullptr prepared model as input role.
375 validateAllocate({
376 .preparedModels = {nullptr},
377 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
378 });
379
380 // Test with nullptr prepared model as output role.
381 validateAllocate({
382 .preparedModels = {nullptr},
383 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
384 });
385 }
386
TEST_P(MemoryDomainAllocateTest,InvalidPreparedModel)387 TEST_P(MemoryDomainAllocateTest, InvalidPreparedModel) {
388 std::shared_ptr<InvalidPreparedModel> invalidPreparedModel =
389 ndk::SharedRefBase::make<InvalidPreparedModel>();
390
391 // Test with invalid prepared model as input role.
392 validateAllocate({
393 .preparedModels = {invalidPreparedModel},
394 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
395 });
396
397 // Test with invalid prepared model as output role.
398 validateAllocate({
399 .preparedModels = {invalidPreparedModel},
400 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
401 });
402 }
403
TEST_P(MemoryDomainAllocateTest,InvalidModelIndex)404 TEST_P(MemoryDomainAllocateTest, InvalidModelIndex) {
405 auto preparedModel = createConvPreparedModel(kTestOperand);
406 if (preparedModel == nullptr) return;
407
408 // This should fail, because the model index is out of bound.
409 validateAllocate({
410 .preparedModels = {preparedModel},
411 .inputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
412 });
413
414 // This should fail, because the model index is out of bound.
415 validateAllocate({
416 .preparedModels = {preparedModel},
417 .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
418 });
419 }
420
TEST_P(MemoryDomainAllocateTest,InvalidIOIndex)421 TEST_P(MemoryDomainAllocateTest, InvalidIOIndex) {
422 auto preparedModel = createConvPreparedModel(kTestOperand);
423 if (preparedModel == nullptr) return;
424
425 // This should fail, because the model only has one input.
426 validateAllocate({
427 .preparedModels = {preparedModel},
428 .inputRoles = {{.modelIndex = 0, .ioIndex = 1, .probability = 1.0f}},
429 });
430
431 // This should fail, because the model only has one output.
432 validateAllocate({
433 .preparedModels = {preparedModel},
434 .outputRoles = {{.modelIndex = 0, .ioIndex = 1, .probability = 1.0f}},
435 });
436 }
437
TEST_P(MemoryDomainAllocateTest,InvalidProbability)438 TEST_P(MemoryDomainAllocateTest, InvalidProbability) {
439 auto preparedModel = createConvPreparedModel(kTestOperand);
440 if (preparedModel == nullptr) return;
441
442 for (float invalidFreq : {10.0f, 0.0f, -0.5f}) {
443 // Test with invalid probability for input roles.
444 validateAllocate({
445 .preparedModels = {preparedModel},
446 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = invalidFreq}},
447 });
448 // Test with invalid probability for output roles.
449 validateAllocate({
450 .preparedModels = {preparedModel},
451 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = invalidFreq}},
452 });
453 }
454 }
455
TEST_P(MemoryDomainAllocateTest,SameRoleSpecifiedTwice)456 TEST_P(MemoryDomainAllocateTest, SameRoleSpecifiedTwice) {
457 auto preparedModel = createConvPreparedModel(kTestOperand);
458 if (preparedModel == nullptr) return;
459
460 // Same role with same model index.
461 validateAllocate({
462 .preparedModels = {preparedModel},
463 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
464 {.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
465 });
466 validateAllocate({
467 .preparedModels = {preparedModel},
468 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
469 {.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
470 });
471
472 // Different model indexes, but logically referring to the same role.
473 validateAllocate({
474 .preparedModels = {preparedModel, preparedModel},
475 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
476 {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
477 });
478 validateAllocate({
479 .preparedModels = {preparedModel, preparedModel},
480 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
481 {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
482 });
483 }
484
TEST_P(MemoryDomainAllocateTest,ConflictOperandType)485 TEST_P(MemoryDomainAllocateTest, ConflictOperandType) {
486 const std::map<TestOperandType, TestOperandType> conflictTypeMap = {
487 {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
488 {TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_FLOAT32},
489 {TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
490 {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, TestOperandType::TENSOR_QUANT8_ASYMM},
491 };
492
493 TestOperand conflictTestOperand = kTestOperand;
494 const auto it = conflictTypeMap.find(kTestOperandType);
495 ASSERT_FALSE(it == conflictTypeMap.end());
496 conflictTestOperand.type = it->second;
497
498 auto preparedModel = createConvPreparedModel(kTestOperand);
499 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
500 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
501 testConflictOperands(preparedModel, conflictPreparedModel);
502 }
503
TEST_P(MemoryDomainAllocateTest,ConflictScale)504 TEST_P(MemoryDomainAllocateTest, ConflictScale) {
505 if (isFloat(kTestOperandType)) return;
506
507 TestOperand conflictTestOperand = kTestOperand;
508 ASSERT_NE(conflictTestOperand.scale, 1.0f);
509 conflictTestOperand.scale = 1.0f;
510
511 auto preparedModel = createConvPreparedModel(kTestOperand);
512 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
513 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
514 testConflictOperands(preparedModel, conflictPreparedModel);
515 }
516
TEST_P(MemoryDomainAllocateTest,ConflictZeroPoint)517 TEST_P(MemoryDomainAllocateTest, ConflictZeroPoint) {
518 if (isFloat(kTestOperandType)) return;
519
520 TestOperand conflictTestOperand = kTestOperand;
521 ASSERT_NE(conflictTestOperand.zeroPoint, 10);
522 conflictTestOperand.zeroPoint = 10;
523
524 auto preparedModel = createConvPreparedModel(kTestOperand);
525 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
526 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
527 testConflictOperands(preparedModel, conflictPreparedModel);
528 }
529
TEST_P(MemoryDomainAllocateTest,ConflictRankBetweenRoles)530 TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoles) {
531 TestOperand conflictTestOperand = kTestOperand;
532 conflictTestOperand.dimensions.pop_back();
533
534 auto preparedModel = createAddPreparedModel(kTestOperand);
535 auto conflictPreparedModel = createAddPreparedModel(conflictTestOperand);
536 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
537 testConflictOperands(preparedModel, conflictPreparedModel);
538 }
539
TEST_P(MemoryDomainAllocateTest,ConflictDimensionsBetweenRoles)540 TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoles) {
541 TestOperand conflictTestOperand = kTestOperand;
542 conflictTestOperand.dimensions[0] = 4;
543
544 auto preparedModel = createConvPreparedModel(kTestOperand);
545 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
546 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
547 testConflictOperands(preparedModel, conflictPreparedModel);
548 }
549
TEST_P(MemoryDomainAllocateTest,ConflictRankBetweenRoleAndDesc)550 TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoleAndDesc) {
551 auto preparedModel = createConvPreparedModel(kTestOperand);
552 if (preparedModel == nullptr) return;
553
554 auto badDimensions = utils::toSigned(kTestOperand.dimensions).value();
555 badDimensions.pop_back();
556
557 validateAllocate({
558 .dimensions = badDimensions,
559 .preparedModels = {preparedModel},
560 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
561 });
562 validateAllocate({
563 .dimensions = badDimensions,
564 .preparedModels = {preparedModel},
565 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
566 });
567 }
568
TEST_P(MemoryDomainAllocateTest,ConflictDimensionsBetweenRoleAndDesc)569 TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoleAndDesc) {
570 auto preparedModel = createConvPreparedModel(kTestOperand);
571 if (preparedModel == nullptr) return;
572
573 auto badDimensions = utils::toSigned(kTestOperand.dimensions).value();
574 badDimensions[0] = 4;
575
576 validateAllocate({
577 .dimensions = badDimensions,
578 .preparedModels = {preparedModel},
579 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
580 });
581 validateAllocate({
582 .dimensions = badDimensions,
583 .preparedModels = {preparedModel},
584 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
585 });
586 }
587
TEST_P(MemoryDomainAllocateTest,ConflictRankWithScalarRole)588 TEST_P(MemoryDomainAllocateTest, ConflictRankWithScalarRole) {
589 auto preparedModel = createAddPreparedModel(kTestOperand);
590 if (preparedModel == nullptr) return;
591
592 // This should fail, because the target operand is a scalar but a non-empty dimension is
593 // specified.
594 validateAllocate({
595 .dimensions = {1},
596 .preparedModels = {preparedModel},
597 .inputRoles = {{.modelIndex = 0, .ioIndex = 2, .probability = 1.0f}},
598 });
599 }
600
printMemoryDomainAllocateTest(const testing::TestParamInfo<MemoryDomainAllocateTestParam> & info)601 std::string printMemoryDomainAllocateTest(
602 const testing::TestParamInfo<MemoryDomainAllocateTestParam>& info) {
603 const auto& [namedDevice, operandType] = info.param;
604 const std::string type = toString(static_cast<OperandType>(operandType));
605 return gtestCompliantName(getName(namedDevice) + "_" + type);
606 }
607
608 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainAllocateTest);
609 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainAllocateTest,
610 testing::Combine(testing::ValuesIn(getNamedDevices()),
611 kTestOperandTypeChoices),
612 printMemoryDomainAllocateTest);
613
614 class MemoryDomainCopyTestBase : public MemoryDomainTestBase {
615 protected:
MemoryDomainCopyTestBase(std::shared_ptr<IDevice> device,TestOperandType type)616 MemoryDomainCopyTestBase(std::shared_ptr<IDevice> device, TestOperandType type)
617 : MemoryDomainTestBase(std::move(device), type) {}
618
619 // Allocates device memory for roles of a single prepared model.
620 // Returns {IBuffer, token} if success; returns {nullptr, 0} if not supported.
allocateBuffer(const std::shared_ptr<IPreparedModel> & preparedModel,const std::vector<int32_t> & inputIndexes,const std::vector<int32_t> & outputIndexes,const std::vector<int32_t> & dimensions)621 DeviceBuffer allocateBuffer(const std::shared_ptr<IPreparedModel>& preparedModel,
622 const std::vector<int32_t>& inputIndexes,
623 const std::vector<int32_t>& outputIndexes,
624 const std::vector<int32_t>& dimensions) {
625 if (preparedModel == nullptr) {
626 return {.buffer = nullptr, .token = 0};
627 }
628
629 std::vector<BufferRole> inputRoles(inputIndexes.size()), outputRoles(outputIndexes.size());
630 auto trans = [](int32_t ind) -> BufferRole {
631 return {.modelIndex = 0, .ioIndex = ind, .probability = 1.0f};
632 };
633 std::transform(inputIndexes.begin(), inputIndexes.end(), inputRoles.begin(), trans);
634 std::transform(outputIndexes.begin(), outputIndexes.end(), outputRoles.begin(), trans);
635
636 IPreparedModelParcel parcel;
637 parcel.preparedModel = preparedModel;
638
639 DeviceBuffer buffer;
640
641 const auto ret = kDevice->allocate({.dimensions = dimensions}, {parcel}, inputRoles,
642 outputRoles, &buffer);
643
644 if (!ret.isOk()) {
645 EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
646 EXPECT_EQ(static_cast<ErrorStatus>(ret.getServiceSpecificError()),
647 ErrorStatus::GENERAL_FAILURE);
648 return DeviceBuffer{
649 .buffer = nullptr,
650 .token = 0,
651 };
652 }
653
654 EXPECT_NE(buffer.buffer, nullptr);
655 EXPECT_GT(buffer.token, 0);
656
657 return buffer;
658 }
659
allocateBuffer(const std::shared_ptr<IPreparedModel> & preparedModel,const std::vector<int32_t> & inputIndexes,const std::vector<int32_t> & outputIndexes)660 DeviceBuffer allocateBuffer(const std::shared_ptr<IPreparedModel>& preparedModel,
661 const std::vector<int32_t>& inputIndexes,
662 const std::vector<int32_t>& outputIndexes) {
663 return allocateBuffer(preparedModel, inputIndexes, outputIndexes, {});
664 }
665
getSize(const Memory & memory)666 size_t getSize(const Memory& memory) {
667 switch (memory.getTag()) {
668 case Memory::Tag::ashmem:
669 return memory.get<Memory::Tag::ashmem>().size;
670 case Memory::Tag::mappableFile:
671 return memory.get<Memory::Tag::mappableFile>().length;
672 case Memory::Tag::hardwareBuffer: {
673 const auto& hardwareBuffer = memory.get<Memory::Tag::hardwareBuffer>();
674 const bool isBlob =
675 hardwareBuffer.description.format == graphics::common::PixelFormat::BLOB;
676 return isBlob ? hardwareBuffer.description.width : 0;
677 }
678 }
679 return 0;
680 }
681
allocateSharedMemory(uint32_t size)682 Memory allocateSharedMemory(uint32_t size) {
683 const auto sharedMemory = nn::createSharedMemory(size).value();
684 auto memory = utils::convert(sharedMemory).value();
685 EXPECT_EQ(getSize(memory), size);
686 return memory;
687 }
688
testCopyFrom(const std::shared_ptr<IBuffer> & buffer,const Memory & memory,const std::vector<int32_t> & dimensions,ErrorStatus expectedStatus)689 void testCopyFrom(const std::shared_ptr<IBuffer>& buffer, const Memory& memory,
690 const std::vector<int32_t>& dimensions, ErrorStatus expectedStatus) {
691 const auto ret = buffer->copyFrom(memory, dimensions);
692 if (expectedStatus == ErrorStatus::NONE) {
693 ASSERT_TRUE(ret.isOk());
694 } else {
695 ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
696 ASSERT_EQ(expectedStatus, static_cast<ErrorStatus>(ret.getServiceSpecificError()));
697 }
698 }
699
testCopyTo(const std::shared_ptr<IBuffer> & buffer,const Memory & memory,ErrorStatus expectedStatus)700 void testCopyTo(const std::shared_ptr<IBuffer>& buffer, const Memory& memory,
701 ErrorStatus expectedStatus) {
702 const auto ret = buffer->copyTo(memory);
703 if (expectedStatus == ErrorStatus::NONE) {
704 ASSERT_TRUE(ret.isOk());
705 } else {
706 ASSERT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
707 ASSERT_EQ(expectedStatus, static_cast<ErrorStatus>(ret.getServiceSpecificError()));
708 }
709 }
710
initializeDeviceMemory(const std::shared_ptr<IBuffer> & buffer)711 void initializeDeviceMemory(const std::shared_ptr<IBuffer>& buffer) {
712 Memory memory = allocateSharedMemory(kTestOperandDataSize);
713 ASSERT_EQ(getSize(memory), kTestOperandDataSize);
714 testCopyFrom(buffer, memory, utils::toSigned(kTestOperand.dimensions).value(),
715 ErrorStatus::NONE);
716 }
717 };
718
719 using MemoryDomainCopyTestParam = std::tuple<NamedDevice, TestOperandType>;
720 class MemoryDomainCopyTest : public MemoryDomainCopyTestBase,
721 public testing::WithParamInterface<MemoryDomainCopyTestParam> {
722 protected:
MemoryDomainCopyTest()723 MemoryDomainCopyTest()
724 : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
725 std::get<TestOperandType>(GetParam())) {}
726 };
727
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidMemorySize)728 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize) {
729 auto preparedModel = createConvPreparedModel(kTestOperand);
730 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
731 if (buffer == nullptr) return;
732
733 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
734 Memory badMemory1 = allocateSharedMemory(badMemorySize1);
735 Memory badMemory2 = allocateSharedMemory(badMemorySize2);
736 testCopyFrom(buffer, badMemory1, {}, ErrorStatus::INVALID_ARGUMENT);
737 testCopyFrom(buffer, badMemory2, {}, ErrorStatus::INVALID_ARGUMENT);
738 }
739
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidMemorySize_DynamicShape)740 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize_DynamicShape) {
741 TestOperand testOperand = kTestOperand;
742 testOperand.dimensions[0] = 0;
743 auto preparedModel = createConvPreparedModel(testOperand);
744 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
745 if (buffer == nullptr) return;
746
747 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
748 Memory badMemory1 = allocateSharedMemory(badMemorySize1);
749 Memory badMemory2 = allocateSharedMemory(badMemorySize2);
750 Memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
751
752 const auto goodDimensions = utils::toSigned(kTestOperand.dimensions).value();
753 auto badDimensions = goodDimensions;
754 badDimensions[0] = 2;
755
756 testCopyFrom(buffer, badMemory1, goodDimensions, ErrorStatus::INVALID_ARGUMENT);
757 testCopyFrom(buffer, badMemory2, goodDimensions, ErrorStatus::INVALID_ARGUMENT);
758 testCopyFrom(buffer, goodMemory, goodDimensions, ErrorStatus::NONE);
759 testCopyFrom(buffer, goodMemory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
760 }
761
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidDimensions)762 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions) {
763 auto preparedModel = createConvPreparedModel(kTestOperand);
764 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
765 if (buffer == nullptr) return;
766
767 Memory memory = allocateSharedMemory(kTestOperandDataSize);
768
769 const auto goodDimensions = utils::toSigned(kTestOperand.dimensions).value();
770 std::vector<int32_t> badDimensions = goodDimensions;
771 badDimensions.pop_back();
772 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
773
774 badDimensions = goodDimensions;
775 badDimensions[0] = 2;
776 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
777
778 badDimensions = goodDimensions;
779 badDimensions[0] = 0;
780 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
781
782 testCopyFrom(buffer, memory, {}, ErrorStatus::NONE);
783 testCopyFrom(buffer, memory, goodDimensions, ErrorStatus::NONE);
784 }
785
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidDimensions_DynamicShape)786 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions_DynamicShape) {
787 TestOperand testOperand = kTestOperand;
788 testOperand.dimensions[0] = 0;
789 auto preparedModel = createConvPreparedModel(testOperand);
790 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
791 if (buffer == nullptr) return;
792
793 Memory memory = allocateSharedMemory(kTestOperandDataSize);
794
795 const auto goodDimensions = utils::toSigned(kTestOperand.dimensions).value();
796 std::vector<int32_t> badDimensions = goodDimensions;
797 badDimensions.pop_back();
798 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
799
800 badDimensions = goodDimensions;
801 badDimensions[0] = 2;
802 badDimensions[3] = 4;
803 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
804
805 badDimensions = goodDimensions;
806 badDimensions[0] = 1;
807 badDimensions[3] = 0;
808 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
809
810 testCopyFrom(buffer, memory, {}, ErrorStatus::INVALID_ARGUMENT);
811 testCopyFrom(buffer, memory, goodDimensions, ErrorStatus::NONE);
812 }
813
TEST_P(MemoryDomainCopyTest,CopyTo_UninitializedMemory)814 TEST_P(MemoryDomainCopyTest, CopyTo_UninitializedMemory) {
815 auto preparedModel = createConvPreparedModel(kTestOperand);
816 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
817 if (buffer == nullptr) return;
818
819 Memory memory = allocateSharedMemory(kTestOperandDataSize);
820 testCopyTo(buffer, memory, ErrorStatus::GENERAL_FAILURE);
821 }
822
TEST_P(MemoryDomainCopyTest,CopyTo_InvalidMemorySize)823 TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize) {
824 auto preparedModel = createConvPreparedModel(kTestOperand);
825 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
826 if (buffer == nullptr) return;
827
828 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
829 Memory badMemory1 = allocateSharedMemory(badMemorySize1);
830 Memory badMemory2 = allocateSharedMemory(badMemorySize2);
831 Memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
832
833 initializeDeviceMemory(buffer);
834 testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
835 testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
836 testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
837 }
838
TEST_P(MemoryDomainCopyTest,CopyTo_InvalidMemorySize_DynamicShape)839 TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize_DynamicShape) {
840 TestOperand testOperand = kTestOperand;
841 testOperand.dimensions[0] = 0;
842 auto preparedModel = createConvPreparedModel(testOperand);
843 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
844 if (buffer == nullptr) return;
845
846 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
847 Memory badMemory1 = allocateSharedMemory(badMemorySize1);
848 Memory badMemory2 = allocateSharedMemory(badMemorySize2);
849 Memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
850
851 initializeDeviceMemory(buffer);
852 testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
853 testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
854 testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
855 }
856
printMemoryDomainCopyTest(const testing::TestParamInfo<MemoryDomainCopyTestParam> & info)857 std::string printMemoryDomainCopyTest(
858 const testing::TestParamInfo<MemoryDomainCopyTestParam>& info) {
859 const auto& [namedDevice, operandType] = info.param;
860 const std::string type = toString(static_cast<OperandType>(operandType));
861 return gtestCompliantName(getName(namedDevice) + "_" + type);
862 }
863
864 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainCopyTest);
865 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainCopyTest,
866 testing::Combine(testing::ValuesIn(getNamedDevices()),
867 kTestOperandTypeChoices),
868 printMemoryDomainCopyTest);
869
870 using MemoryDomainExecutionTestParam = std::tuple<NamedDevice, TestOperandType, Executor>;
871 class MemoryDomainExecutionTest
872 : public MemoryDomainCopyTestBase,
873 public testing::WithParamInterface<MemoryDomainExecutionTestParam> {
874 protected:
MemoryDomainExecutionTest()875 MemoryDomainExecutionTest()
876 : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
877 std::get<TestOperandType>(GetParam())) {}
878
createSharedMemoryPool(uint32_t size)879 RequestMemoryPool createSharedMemoryPool(uint32_t size) {
880 return RequestMemoryPool(allocateSharedMemory(size));
881 }
882
createDeviceMemoryPool(uint32_t token)883 RequestMemoryPool createDeviceMemoryPool(uint32_t token) {
884 return RequestMemoryPool(static_cast<int32_t>(token));
885 }
886
testExecution(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request,ErrorStatus expectedStatus)887 void testExecution(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request,
888 ErrorStatus expectedStatus) {
889 switch (kExecutor) {
890 case Executor::SYNC:
891 EXPECT_EQ(executeSync(preparedModel, request), expectedStatus);
892 break;
893 case Executor::BURST:
894 EXPECT_EQ(executeBurst(preparedModel, request), expectedStatus);
895 break;
896 case Executor::FENCED:
897 EXPECT_EQ(executeFenced(preparedModel, request), expectedStatus);
898 break;
899 default:
900 ASSERT_TRUE(false);
901 }
902 }
903
executeSync(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)904 ErrorStatus executeSync(const std::shared_ptr<IPreparedModel>& preparedModel,
905 const Request& request) {
906 ExecutionResult executionResult;
907 const auto ret = preparedModel->executeSynchronously(
908 request, false, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
909
910 if (!ret.isOk()) {
911 EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
912 return static_cast<ErrorStatus>(ret.getServiceSpecificError());
913 }
914 const ErrorStatus executionStatus = executionResult.outputSufficientSize
915 ? ErrorStatus::NONE
916 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
917 EXPECT_EQ(executionResult.timing, kNoTiming);
918 return executionStatus;
919 }
920
executeFenced(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)921 ErrorStatus executeFenced(const std::shared_ptr<IPreparedModel>& preparedModel,
922 const Request& request) {
923 FencedExecutionResult executionResult;
924 const auto ret = preparedModel->executeFenced(request, {}, false, kNoDeadline,
925 kOmittedTimeoutDuration, kNoDuration,
926 &executionResult);
927 if (!ret.isOk()) {
928 EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
929 return static_cast<ErrorStatus>(ret.getServiceSpecificError());
930 }
931 if (executionResult.syncFence.get() != -1) {
932 waitForSyncFence(executionResult.syncFence.get());
933 }
934 EXPECT_NE(executionResult.callback, nullptr);
935
936 ErrorStatus executionStatus = ErrorStatus::GENERAL_FAILURE;
937 Timing time = kNoTiming;
938 Timing timeFenced = kNoTiming;
939 const auto retExecutionInfo =
940 executionResult.callback->getExecutionInfo(&time, &timeFenced, &executionStatus);
941 EXPECT_TRUE(retExecutionInfo.isOk());
942 EXPECT_EQ(time, kNoTiming);
943 return executionStatus;
944 }
945
executeBurst(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)946 ErrorStatus executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel,
947 const Request& request) {
948 // create burst
949 std::shared_ptr<IBurst> burst;
950 auto ret = preparedModel->configureExecutionBurst(&burst);
951 EXPECT_TRUE(ret.isOk()) << ret.getDescription();
952 EXPECT_NE(nullptr, burst.get());
953 if (!ret.isOk() || burst.get() == nullptr) {
954 return ErrorStatus::GENERAL_FAILURE;
955 }
956
957 // use -1 for all memory identifier tokens
958 const std::vector<int64_t> slots(request.pools.size(), -1);
959
960 ExecutionResult executionResult;
961 ret = burst->executeSynchronously(request, slots, false, kNoDeadline,
962 kOmittedTimeoutDuration, &executionResult);
963
964 if (!ret.isOk()) {
965 EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
966 return static_cast<ErrorStatus>(ret.getServiceSpecificError());
967 }
968 const ErrorStatus executionStatus = executionResult.outputSufficientSize
969 ? ErrorStatus::NONE
970 : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
971 EXPECT_EQ(executionResult.timing, kNoTiming);
972 return executionStatus;
973 }
974
975 const Executor kExecutor = std::get<Executor>(GetParam());
976 };
977
TEST_P(MemoryDomainExecutionTest,InvalidToken)978 TEST_P(MemoryDomainExecutionTest, InvalidToken) {
979 auto preparedModel = createConvPreparedModel(kTestOperand);
980 if (preparedModel == nullptr) return;
981
982 RequestMemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
983 RequestMemoryPool badDeviceMemory1 = createDeviceMemoryPool(0); // Invalid token.
984 RequestMemoryPool badDeviceMemory2 = createDeviceMemoryPool(100); // Unknown token.
985 RequestArgument sharedMemoryArg = {
986 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
987 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
988
989 testExecution(preparedModel,
990 {.inputs = {deviceMemoryArg},
991 .outputs = {sharedMemoryArg},
992 .pools = createRequestMemoryPools(sharedMemory, badDeviceMemory1)},
993 ErrorStatus::INVALID_ARGUMENT);
994 testExecution(preparedModel,
995 {.inputs = {deviceMemoryArg},
996 .outputs = {sharedMemoryArg},
997 .pools = createRequestMemoryPools(sharedMemory, badDeviceMemory2)},
998 ErrorStatus::INVALID_ARGUMENT);
999 testExecution(preparedModel,
1000 {.inputs = {sharedMemoryArg},
1001 .outputs = {deviceMemoryArg},
1002 .pools = createRequestMemoryPools(sharedMemory, badDeviceMemory1)},
1003 ErrorStatus::INVALID_ARGUMENT);
1004 testExecution(preparedModel,
1005 {.inputs = {sharedMemoryArg},
1006 .outputs = {deviceMemoryArg},
1007 .pools = createRequestMemoryPools(sharedMemory, badDeviceMemory2)},
1008 ErrorStatus::INVALID_ARGUMENT);
1009 }
1010
TEST_P(MemoryDomainExecutionTest,InvalidPreparedModel)1011 TEST_P(MemoryDomainExecutionTest, InvalidPreparedModel) {
1012 auto preparedModel = createConvPreparedModel(kTestOperand);
1013 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
1014 if (buffer == nullptr) return;
1015 auto badPreparedModel = createConvPreparedModel(kTestOperand);
1016 if (badPreparedModel == nullptr) return;
1017
1018 RequestMemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1019 RequestMemoryPool deviceMemory = createDeviceMemoryPool(token);
1020 RequestArgument sharedMemoryArg = {
1021 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1022 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1023
1024 // This should fail, because the buffer is not allocated for badPreparedModel.
1025 initializeDeviceMemory(buffer);
1026 testExecution(badPreparedModel,
1027 {.inputs = {deviceMemoryArg},
1028 .outputs = {sharedMemoryArg},
1029 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1030 ErrorStatus::INVALID_ARGUMENT);
1031 testExecution(badPreparedModel,
1032 {.inputs = {sharedMemoryArg},
1033 .outputs = {deviceMemoryArg},
1034 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1035 ErrorStatus::INVALID_ARGUMENT);
1036 }
1037
TEST_P(MemoryDomainExecutionTest,InvalidIOIndex)1038 TEST_P(MemoryDomainExecutionTest, InvalidIOIndex) {
1039 auto preparedModel = createConvPreparedModel(kTestOperand, 2);
1040 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {});
1041 if (buffer == nullptr) return;
1042
1043 RequestMemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
1044 RequestMemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
1045 RequestMemoryPool sharedMemory3 = createSharedMemoryPool(kTestOperandDataSize);
1046 RequestMemoryPool deviceMemory = createDeviceMemoryPool(token);
1047 RequestArgument sharedMemoryArg1 = {
1048 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1049 RequestArgument sharedMemoryArg2 = {
1050 .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
1051 RequestArgument sharedMemoryArg3 = {
1052 .location = {.poolIndex = 2, .offset = 0, .length = kTestOperandDataSize}};
1053 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 3}};
1054
1055 // This should fail, because the device memory is not allocated for input 1.
1056 initializeDeviceMemory(buffer);
1057 testExecution(preparedModel,
1058 {.inputs = {sharedMemoryArg1, deviceMemoryArg},
1059 .outputs = {sharedMemoryArg2, sharedMemoryArg3},
1060 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, sharedMemory3,
1061 deviceMemory)},
1062 ErrorStatus::INVALID_ARGUMENT);
1063
1064 // This should fail, because the device memory is not allocated for output 1.
1065 testExecution(preparedModel,
1066 {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
1067 .outputs = {sharedMemoryArg3, deviceMemoryArg},
1068 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, sharedMemory3,
1069 deviceMemory)},
1070 ErrorStatus::INVALID_ARGUMENT);
1071 }
1072
TEST_P(MemoryDomainExecutionTest,InvalidIOType)1073 TEST_P(MemoryDomainExecutionTest, InvalidIOType) {
1074 auto preparedModel = createConvPreparedModel(kTestOperand);
1075 auto [inputBuffer, inputToken] = allocateBuffer(preparedModel, {0}, {});
1076 auto [outputBuffer, outputToken] = allocateBuffer(preparedModel, {}, {0});
1077 if (inputBuffer == nullptr || outputBuffer == nullptr) return;
1078
1079 RequestMemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1080 RequestMemoryPool deviceMemory = createDeviceMemoryPool(inputToken);
1081 RequestArgument sharedMemoryArg = {
1082 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1083 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1084
1085 // This should fail, because the device memory is allocated for input but used as output.
1086 testExecution(preparedModel,
1087 {.inputs = {sharedMemoryArg},
1088 .outputs = {deviceMemoryArg},
1089 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1090 ErrorStatus::INVALID_ARGUMENT);
1091
1092 // This should fail, because the device memory is allocated for output but used as input.
1093 deviceMemory.set<RequestMemoryPool::Tag::token>(outputToken);
1094 initializeDeviceMemory(outputBuffer);
1095 testExecution(preparedModel,
1096 {.inputs = {deviceMemoryArg},
1097 .outputs = {sharedMemoryArg},
1098 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1099 ErrorStatus::INVALID_ARGUMENT);
1100 }
1101
TEST_P(MemoryDomainExecutionTest,UninitializedMemory)1102 TEST_P(MemoryDomainExecutionTest, UninitializedMemory) {
1103 auto preparedModel = createConvPreparedModel(kTestOperand);
1104 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
1105 if (buffer == nullptr) return;
1106
1107 RequestMemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1108 RequestMemoryPool deviceMemory = createDeviceMemoryPool(token);
1109 RequestArgument sharedMemoryArg = {
1110 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1111 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1112
1113 // This should fail, because the device memory is not initialized.
1114 testExecution(preparedModel,
1115 {.inputs = {deviceMemoryArg},
1116 .outputs = {sharedMemoryArg},
1117 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1118 ErrorStatus::GENERAL_FAILURE);
1119
1120 // This should initialize the device memory.
1121 testExecution(preparedModel,
1122 {.inputs = {sharedMemoryArg},
1123 .outputs = {deviceMemoryArg},
1124 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1125 ErrorStatus::NONE);
1126
1127 // Test again with initialized device memory.
1128 testExecution(preparedModel,
1129 {.inputs = {deviceMemoryArg},
1130 .outputs = {sharedMemoryArg},
1131 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1132 ErrorStatus::NONE);
1133 }
1134
TEST_P(MemoryDomainExecutionTest,SameRequestMultipleRoles)1135 TEST_P(MemoryDomainExecutionTest, SameRequestMultipleRoles) {
1136 auto preparedModel = createConvPreparedModel(kTestOperand, 2);
1137 auto [buffer, token] = allocateBuffer(preparedModel, {0, 1}, {0, 1});
1138 if (buffer == nullptr) return;
1139
1140 RequestMemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
1141 RequestMemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
1142 RequestMemoryPool deviceMemory = createDeviceMemoryPool(token);
1143 RequestArgument sharedMemoryArg1 = {
1144 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1145 RequestArgument sharedMemoryArg2 = {
1146 .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
1147 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 2}};
1148
1149 // This should fail, because the same device memory cannot be used for both input and output.
1150 initializeDeviceMemory(buffer);
1151 testExecution(preparedModel,
1152 {.inputs = {deviceMemoryArg, sharedMemoryArg1},
1153 .outputs = {deviceMemoryArg, sharedMemoryArg2},
1154 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, deviceMemory)},
1155 ErrorStatus::INVALID_ARGUMENT);
1156
1157 // This should fail, because the same device memory cannot be used for multiple outputs.
1158 testExecution(preparedModel,
1159 {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
1160 .outputs = {deviceMemoryArg, deviceMemoryArg},
1161 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, deviceMemory)},
1162 ErrorStatus::INVALID_ARGUMENT);
1163
1164 // The same device memory can be used for multiple inputs.
1165 initializeDeviceMemory(buffer);
1166 testExecution(preparedModel,
1167 {.inputs = {deviceMemoryArg, deviceMemoryArg},
1168 .outputs = {sharedMemoryArg1, sharedMemoryArg2},
1169 .pools = createRequestMemoryPools(sharedMemory1, sharedMemory2, deviceMemory)},
1170 ErrorStatus::NONE);
1171 }
1172
TEST_P(MemoryDomainExecutionTest,InvalidDimensions)1173 TEST_P(MemoryDomainExecutionTest, InvalidDimensions) {
1174 // FENCED execution does not support dynamic shape.
1175 if (kExecutor == Executor::FENCED) return;
1176
1177 TestOperand testOperand = kTestOperand;
1178 testOperand.dimensions[0] = 0;
1179 auto preparedModel = createConvPreparedModel(testOperand);
1180 auto deviceBuffer = allocateBuffer(preparedModel, {0}, {0},
1181 utils::toSigned(kTestOperand.dimensions).value());
1182 if (deviceBuffer.buffer == nullptr) return;
1183
1184 // Use an incompatible dimension and make sure the length matches with the bad dimension.
1185 auto badDimensions = utils::toSigned(kTestOperand.dimensions).value();
1186 badDimensions[0] = 2;
1187 const uint32_t badTestOperandDataSize = kTestOperandDataSize * 2;
1188
1189 RequestMemoryPool sharedMemory = createSharedMemoryPool(badTestOperandDataSize);
1190 RequestMemoryPool deviceMemory = createDeviceMemoryPool(deviceBuffer.token);
1191 RequestArgument sharedMemoryArg = {
1192 .location = {.poolIndex = 0, .offset = 0, .length = badTestOperandDataSize},
1193 .dimensions = badDimensions};
1194 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1195 RequestArgument deviceMemoryArgWithBadDimensions = {.location = {.poolIndex = 1},
1196 .dimensions = badDimensions};
1197
1198 initializeDeviceMemory(deviceBuffer.buffer);
1199 testExecution(preparedModel,
1200 {.inputs = {deviceMemoryArgWithBadDimensions},
1201 .outputs = {sharedMemoryArg},
1202 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1203 ErrorStatus::INVALID_ARGUMENT);
1204
1205 testExecution(preparedModel,
1206 {.inputs = {sharedMemoryArg},
1207 .outputs = {deviceMemoryArgWithBadDimensions},
1208 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1209 ErrorStatus::INVALID_ARGUMENT);
1210
1211 testExecution(preparedModel,
1212 {.inputs = {sharedMemoryArg},
1213 .outputs = {deviceMemoryArg},
1214 .pools = createRequestMemoryPools(sharedMemory, deviceMemory)},
1215 ErrorStatus::GENERAL_FAILURE);
1216 }
1217
1218 const auto kExecutorChoices = testing::Values(Executor::SYNC, Executor::BURST, Executor::FENCED);
1219
printMemoryDomainExecutionTest(const testing::TestParamInfo<MemoryDomainExecutionTestParam> & info)1220 std::string printMemoryDomainExecutionTest(
1221 const testing::TestParamInfo<MemoryDomainExecutionTestParam>& info) {
1222 const auto& [namedDevice, operandType, executor] = info.param;
1223 const std::string type = toString(static_cast<OperandType>(operandType));
1224 const std::string executorStr = toString(executor);
1225 return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + executorStr);
1226 }
1227
1228 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemoryDomainExecutionTest);
1229 INSTANTIATE_TEST_SUITE_P(TestMemoryDomain, MemoryDomainExecutionTest,
1230 testing::Combine(testing::ValuesIn(getNamedDevices()),
1231 kTestOperandTypeChoices, kExecutorChoices),
1232 printMemoryDomainExecutionTest);
1233
1234 } // namespace aidl::android::hardware::neuralnetworks::vts::functional
1235