1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "GeneratedTestUtils.h"
18 
19 #include <android-base/file.h>
20 #include <android-base/logging.h>
21 #include <android-base/mapped_file.h>
22 #include <android-base/unique_fd.h>
23 #include <android/sharedmem.h>
24 #include <gtest/gtest.h>
25 
26 #include <algorithm>
27 #include <memory>
28 #include <string>
29 #include <utility>
30 #include <vector>
31 
32 #include "TestHarness.h"
33 
34 #ifdef NNTEST_SLTS
35 #include "SupportLibraryWrapper.h"
36 #else
37 #include "TestNeuralNetworksWrapper.h"
38 #endif
39 
40 namespace android::nn::generated_tests {
41 using namespace test_wrapper;
42 using namespace test_helper;
43 
getOperandType(const TestOperand & op,bool testDynamicOutputShape)44 static OperandType getOperandType(const TestOperand& op, bool testDynamicOutputShape) {
45     auto dims = op.dimensions;
46     if (testDynamicOutputShape && op.lifetime == TestOperandLifeTime::SUBGRAPH_OUTPUT) {
47         dims.assign(dims.size(), 0);
48     }
49     if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
50         return OperandType(
51                 static_cast<Type>(op.type), dims,
52                 SymmPerChannelQuantParams(op.channelQuant.scales, op.channelQuant.channelDim));
53     } else {
54         return OperandType(static_cast<Type>(op.type), dims, op.scale, op.zeroPoint);
55     }
56 }
57 
58 // A Memory object with a memory mapping
59 class MemoryWithPointer : public Memory {
60    public:
61 #ifdef NNTEST_SLTS
create(const NnApiSupportLibrary * nnapi,uint32_t size)62     static std::unique_ptr<MemoryWithPointer> create(const NnApiSupportLibrary* nnapi,
63                                                      uint32_t size) {
64 #else   // NNTEST_SLTS
65     static std::unique_ptr<MemoryWithPointer> create(uint32_t size) {
66 #endif  // NNTEST_SLTS
67 
68         CHECK_GT(size, 0u);
69 #ifdef __ANDROID__
70         auto fd = base::unique_fd(ASharedMemory_create(nullptr, size));
71 #else   // __ANDROID__
72         TemporaryFile tmpFile;
73         base::unique_fd fd(tmpFile.release());
74         CHECK_EQ(ftruncate(fd.get(), size), 0);
75 #endif  // __ANDROID__
76         EXPECT_TRUE(fd.ok());
77         const int protect = PROT_READ | PROT_WRITE;
78         const size_t offset = 0;
79         auto mapping = base::MappedFile::FromFd(fd.get(), offset, size, protect);
80         EXPECT_NE(mapping, nullptr);
81 
82 #ifdef NNTEST_SLTS
83         return std::unique_ptr<MemoryWithPointer>(
84                 new MemoryWithPointer(nnapi, size, protect, fd.get(), offset, std::move(mapping)));
85 #else   // NNTEST_SLTS
86         return std::unique_ptr<MemoryWithPointer>(
87                 new MemoryWithPointer(size, protect, fd.get(), offset, std::move(mapping)));
88 #endif  // NNTEST_SLTS
89     }
90 
91     uint8_t* getPointer() const { return reinterpret_cast<uint8_t*>(mMapping->data()); }
92 
93    private:
94 #ifdef NNTEST_SLTS
95     MemoryWithPointer(const NnApiSupportLibrary* nnapi, size_t size, int protect, int fd,
96                       size_t offset, std::unique_ptr<base::MappedFile> mapping)
97         : Memory(nnapi, size, protect, fd, offset), mMapping(std::move(mapping)) {}
98 #else
99     MemoryWithPointer(size_t size, int protect, int fd, size_t offset,
100                       std::unique_ptr<base::MappedFile> mapping)
101         : Memory(size, protect, fd, offset), mMapping(std::move(mapping)) {}
102 #endif
103 
104     std::unique_ptr<base::MappedFile> mMapping;
105 };
106 
107 #ifdef NNTEST_SLTS
createConstantReferenceMemory(const NnApiSupportLibrary * nnapi,const TestModel & testModel)108 static std::unique_ptr<MemoryWithPointer> createConstantReferenceMemory(
109         const NnApiSupportLibrary* nnapi, const TestModel& testModel) {
110 #else
111 static std::unique_ptr<MemoryWithPointer> createConstantReferenceMemory(
112         const TestModel& testModel) {
113 #endif
114     uint32_t size = 0;
115 
116     auto processSubgraph = [&size](const TestSubgraph& subgraph) {
117         for (const TestOperand& operand : subgraph.operands) {
118             if (operand.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
119                 size += operand.data.alignedSize();
120             }
121         }
122     };
123 
124     processSubgraph(testModel.main);
125     for (const TestSubgraph& subgraph : testModel.referenced) {
126         processSubgraph(subgraph);
127     }
128 #ifdef NNTEST_SLTS
129     return size == 0 ? nullptr : MemoryWithPointer::create(nnapi, size);
130 #else
131     return size == 0 ? nullptr : MemoryWithPointer::create(size);
132 #endif
133 }
134 
135 static void createModelFromSubgraph(const TestSubgraph& subgraph, bool testDynamicOutputShape,
136                                     const std::vector<TestSubgraph>& refSubgraphs,
137                                     const std::unique_ptr<MemoryWithPointer>& memory,
138                                     uint32_t* memoryOffset, Model* model, Model* refModels) {
139     // Operands.
140     for (const auto& operand : subgraph.operands) {
141         auto type = getOperandType(operand, testDynamicOutputShape);
142         auto index = model->addOperand(&type);
143 
144         switch (operand.lifetime) {
145             case TestOperandLifeTime::CONSTANT_COPY: {
146                 model->setOperandValue(index, operand.data.get<void>(), operand.data.size());
147             } break;
148             case TestOperandLifeTime::CONSTANT_REFERENCE: {
149                 const uint32_t length = operand.data.size();
150                 std::memcpy(memory->getPointer() + *memoryOffset, operand.data.get<void>(), length);
151                 model->setOperandValueFromMemory(index, memory.get(), *memoryOffset, length);
152                 *memoryOffset += operand.data.alignedSize();
153             } break;
154             case TestOperandLifeTime::NO_VALUE: {
155                 model->setOperandValue(index, nullptr, 0);
156             } break;
157             case TestOperandLifeTime::SUBGRAPH: {
158                 uint32_t refIndex = *operand.data.get<uint32_t>();
159                 CHECK_LT(refIndex, refSubgraphs.size());
160                 const TestSubgraph& refSubgraph = refSubgraphs[refIndex];
161                 Model* refModel = &refModels[refIndex];
162 
163                 if (!refModel->isFinished()) {
164                     createModelFromSubgraph(refSubgraph, testDynamicOutputShape, refSubgraphs,
165                                             memory, memoryOffset, refModel, refModels);
166                     ASSERT_EQ(refModel->finish(), Result::NO_ERROR);
167                     ASSERT_TRUE(refModel->isValid());
168                 }
169                 model->setOperandValueFromModel(index, refModel);
170             } break;
171             case TestOperandLifeTime::SUBGRAPH_INPUT:
172             case TestOperandLifeTime::SUBGRAPH_OUTPUT:
173             case TestOperandLifeTime::TEMPORARY_VARIABLE: {
174                 // Nothing to do here.
175             } break;
176         }
177     }
178 
179     // Operations.
180     for (const auto& operation : subgraph.operations) {
181         model->addOperation(static_cast<int>(operation.type), operation.inputs, operation.outputs);
182     }
183 
184     // Inputs and outputs.
185     model->identifyInputsAndOutputs(subgraph.inputIndexes, subgraph.outputIndexes);
186 }
187 
188 #ifdef NNTEST_SLTS
189 void createModel(const NnApiSupportLibrary* nnapi, const TestModel& testModel,
190                  bool testDynamicOutputShape, GeneratedModel* model) {
191 #else
192 void createModel(const TestModel& testModel, bool testDynamicOutputShape, GeneratedModel* model) {
193 #endif
194     ASSERT_NE(nullptr, model);
195 
196 #ifdef NNTEST_SLTS
197     std::unique_ptr<MemoryWithPointer> memory = createConstantReferenceMemory(nnapi, testModel);
198 #else
199     std::unique_ptr<MemoryWithPointer> memory = createConstantReferenceMemory(testModel);
200 #endif
201     uint32_t memoryOffset = 0;
202 #ifdef NNTEST_SLTS
203     std::vector<Model> refModels;
204     refModels.reserve(testModel.referenced.size());
205     for (int i = 0; i < testModel.referenced.size(); ++i) {
206         refModels.push_back(Model(nnapi));
207     }
208 #else
209     std::vector<Model> refModels(testModel.referenced.size());
210 #endif
211     createModelFromSubgraph(testModel.main, testDynamicOutputShape, testModel.referenced, memory,
212                             &memoryOffset, model, refModels.data());
213     model->setRefModels(std::move(refModels));
214     model->setConstantReferenceMemory(std::move(memory));
215 
216     // Relaxed computation.
217     model->relaxComputationFloat32toFloat16(testModel.isRelaxed);
218 
219     if (!testModel.expectFailure) {
220         ASSERT_TRUE(model->isValid());
221     }
222 }
223 
224 void createRequest(const TestModel& testModel, Execution* execution,
225                    std::vector<TestBuffer>* outputs) {
226     ASSERT_NE(nullptr, execution);
227     ASSERT_NE(nullptr, outputs);
228 
229     // Model inputs.
230     for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
231         const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
232         ASSERT_EQ(Result::NO_ERROR,
233                   execution->setInput(i, operand.data.get<void>(), operand.data.size()));
234     }
235 
236     // Model outputs.
237     for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
238         const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
239 
240         // In the case of zero-sized output, we should at least provide a one-byte buffer.
241         // This is because zero-sized tensors are only supported internally to the runtime, or
242         // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
243         // tensor as model output. Otherwise, we will have two semantic conflicts:
244         // - "Zero dimension" conflicts with "unspecified dimension".
245         // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
246         const size_t bufferSize = std::max<size_t>(operand.data.size(), 1);
247 
248         outputs->emplace_back(bufferSize);
249         ASSERT_EQ(Result::NO_ERROR,
250                   execution->setOutput(i, outputs->back().getMutable<void>(), bufferSize));
251     }
252 }
253 
254 }  // namespace android::nn::generated_tests
255