1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // This test only tests internal APIs, and has dependencies on internal header
18 // files, including NN API HIDL definitions.
19 // It is not part of CTS.
20 
21 #include "TestMemory.h"
22 
23 #include "NeuralNetworksWrapper.h"
24 #include "Manager.h"
25 #include "Memory.h"
26 
27 #include <android/sharedmem.h>
28 #include <gtest/gtest.h>
29 
30 #include <fstream>
31 #include <string>
32 
33 using WrapperCompilation = ::android::nn::wrapper::Compilation;
34 using WrapperExecution = ::android::nn::wrapper::Execution;
35 using WrapperMemory = ::android::nn::wrapper::Memory;
36 using WrapperModel = ::android::nn::wrapper::Model;
37 using WrapperOperandType = ::android::nn::wrapper::OperandType;
38 using WrapperResult = ::android::nn::wrapper::Result;
39 using WrapperType = ::android::nn::wrapper::Type;
40 
41 namespace {
42 
43 // Tests to ensure that various kinds of memory leaks do not occur.
44 //
45 // The fixture checks that no anonymous shared memory regions are leaked by
46 // comparing the count of /dev/ashmem mappings in SetUp and TearDown. This could
47 // break if the test or framework starts lazily instantiating something that
48 // creates a mapping - at that point the way the test works needs to be
49 // reinvestigated. The filename /dev/ashmem is a documented part of the Android
50 // kernel interface (see
51 // https://source.android.com/devices/architecture/kernel/reqs-interfaces).
52 //
53 // (We can also get very unlucky and mask a memory leak by unrelated unmapping
54 // somewhere else. This seems unlikely enough to not deal with.)
55 class MemoryLeakTest : public ::testing::Test {
56 protected:
57     void SetUp() override;
58     void TearDown() override;
59 
60 private:
61     size_t GetAshmemMappingsCount();
62 
63     size_t mStartingMapCount = 0;
64 };
65 
SetUp()66 void MemoryLeakTest::SetUp() {
67     mStartingMapCount = GetAshmemMappingsCount();
68 }
69 
TearDown()70 void MemoryLeakTest::TearDown() {
71     const size_t endingMapCount = GetAshmemMappingsCount();
72     ASSERT_EQ(mStartingMapCount, endingMapCount);
73 }
74 
GetAshmemMappingsCount()75 size_t MemoryLeakTest::GetAshmemMappingsCount() {
76     std::ifstream mappingsStream("/proc/self/maps");
77     if (! mappingsStream.good()) {
78         // errno is set by std::ifstream on Linux
79         ADD_FAILURE() << "Failed to open /proc/self/maps: " << std::strerror(errno);
80         return 0;
81     }
82     std::string line;
83     int mapCount = 0;
84     while (std::getline(mappingsStream, line)) {
85       if (line.find("/dev/ashmem") != std::string::npos) {
86         ++mapCount;
87       }
88     }
89     return mapCount;
90 }
91 
92 // As well as serving as a functional test for ASharedMemory, also
93 // serves as a regression test for http://b/69685100 "RunTimePoolInfo
94 // leaks shared memory regions".
95 //
96 // TODO: test non-zero offset.
TEST_F(MemoryLeakTest,TestASharedMemory)97 TEST_F(MemoryLeakTest, TestASharedMemory) {
98     // Layout where to place matrix2 and matrix3 in the memory we'll allocate.
99     // We have gaps to test that we don't assume contiguity.
100     constexpr uint32_t offsetForMatrix2 = 20;
101     constexpr uint32_t offsetForMatrix3 = offsetForMatrix2 + sizeof(matrix2) + 30;
102     constexpr uint32_t weightsSize = offsetForMatrix3 + sizeof(matrix3) + 60;
103 
104     int weightsFd = ASharedMemory_create("weights", weightsSize);
105     ASSERT_GT(weightsFd, -1);
106     uint8_t* weightsData = (uint8_t*)mmap(nullptr, weightsSize, PROT_READ | PROT_WRITE,
107                                           MAP_SHARED, weightsFd, 0);
108     ASSERT_NE(weightsData, nullptr);
109     memcpy(weightsData + offsetForMatrix2, matrix2, sizeof(matrix2));
110     memcpy(weightsData + offsetForMatrix3, matrix3, sizeof(matrix3));
111     WrapperMemory weights(weightsSize, PROT_READ | PROT_WRITE, weightsFd, 0);
112     ASSERT_TRUE(weights.isValid());
113 
114     WrapperModel model;
115     WrapperOperandType matrixType(WrapperType::TENSOR_FLOAT32, {3, 4});
116     WrapperOperandType scalarType(WrapperType::INT32, {});
117     int32_t activation(0);
118     auto a = model.addOperand(&matrixType);
119     auto b = model.addOperand(&matrixType);
120     auto c = model.addOperand(&matrixType);
121     auto d = model.addOperand(&matrixType);
122     auto e = model.addOperand(&matrixType);
123     auto f = model.addOperand(&scalarType);
124 
125     model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4));
126     model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4));
127     model.setOperandValue(f, &activation, sizeof(activation));
128     model.addOperation(ANEURALNETWORKS_ADD, {a, c, f}, {b});
129     model.addOperation(ANEURALNETWORKS_ADD, {b, e, f}, {d});
130     model.identifyInputsAndOutputs({c}, {d});
131     ASSERT_TRUE(model.isValid());
132     model.finish();
133 
134     // Test the two node model.
135     constexpr uint32_t offsetForMatrix1 = 20;
136     constexpr size_t inputSize = offsetForMatrix1 + sizeof(Matrix3x4);
137     int inputFd = ASharedMemory_create("input", inputSize);
138     ASSERT_GT(inputFd, -1);
139     uint8_t* inputData = (uint8_t*)mmap(nullptr, inputSize,
140                                         PROT_READ | PROT_WRITE, MAP_SHARED, inputFd, 0);
141     ASSERT_NE(inputData, nullptr);
142     memcpy(inputData + offsetForMatrix1, matrix1, sizeof(Matrix3x4));
143     WrapperMemory input(inputSize, PROT_READ, inputFd, 0);
144     ASSERT_TRUE(input.isValid());
145 
146     constexpr uint32_t offsetForActual = 32;
147     constexpr size_t outputSize = offsetForActual + sizeof(Matrix3x4);
148     int outputFd = ASharedMemory_create("output", outputSize);
149     ASSERT_GT(outputFd, -1);
150     uint8_t* outputData = (uint8_t*)mmap(nullptr, outputSize,
151                                          PROT_READ | PROT_WRITE, MAP_SHARED, outputFd, 0);
152     ASSERT_NE(outputData, nullptr);
153     memset(outputData, 0, outputSize);
154     WrapperMemory actual(outputSize, PROT_READ | PROT_WRITE, outputFd, 0);
155     ASSERT_TRUE(actual.isValid());
156 
157     WrapperCompilation compilation2(&model);
158     ASSERT_EQ(compilation2.finish(), WrapperResult::NO_ERROR);
159 
160     WrapperExecution execution2(&compilation2);
161     ASSERT_EQ(execution2.setInputFromMemory(0, &input, offsetForMatrix1, sizeof(Matrix3x4)),
162               WrapperResult::NO_ERROR);
163     ASSERT_EQ(execution2.setOutputFromMemory(0, &actual, offsetForActual, sizeof(Matrix3x4)),
164               WrapperResult::NO_ERROR);
165     ASSERT_EQ(execution2.compute(), WrapperResult::NO_ERROR);
166     ASSERT_EQ(CompareMatrices(expected3,
167                               *reinterpret_cast<Matrix3x4*>(outputData + offsetForActual)), 0);
168 
169     munmap(weightsData, weightsSize);
170     munmap(inputData, inputSize);
171     munmap(outputData, outputSize);
172     close(weightsFd);
173     close(inputFd);
174     close(outputFd);
175 }
176 
177 // Regression test for http://b/69621433 "MemoryFd leaks shared memory regions".
TEST_F(MemoryLeakTest,GetPointer)178 TEST_F(MemoryLeakTest, GetPointer) {
179     static const size_t size = 1;
180 
181     int fd = ASharedMemory_create(nullptr, size);
182     ASSERT_GE(fd, 0);
183 
184     uint8_t* buf = (uint8_t*)mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
185     ASSERT_NE(buf, nullptr);
186     *buf = 0;
187 
188     {
189         // Scope "mem" in such a way that any shared memory regions it
190         // owns will be released before we check the value of *buf: We
191         // want to verify that the explicit mmap() above is not
192         // perturbed by any mmap()/munmap() that results from methods
193         // invoked on "mem".
194 
195         WrapperMemory mem(size, PROT_READ | PROT_WRITE, fd, 0);
196         ASSERT_TRUE(mem.isValid());
197 
198         auto internalMem = reinterpret_cast<::android::nn::Memory*>(mem.get());
199         uint8_t *dummy;
200         ASSERT_EQ(internalMem->getPointer(&dummy), ANEURALNETWORKS_NO_ERROR);
201         (*dummy)++;
202     }
203 
204     ASSERT_EQ(*buf, (uint8_t)1);
205     ASSERT_EQ(munmap(buf, size), 0);
206 
207     close(fd);
208 }
209 
210 // Regression test for http://b/69621433 "MemoryFd leaks shared memory regions".
TEST_F(MemoryLeakTest,Instantiate)211 TEST_F(MemoryLeakTest, Instantiate) {
212     static const size_t size = 1;
213     int fd = ASharedMemory_create(nullptr, size);
214     ASSERT_GE(fd, 0);
215     WrapperMemory mem(size, PROT_READ | PROT_WRITE, fd, 0);
216     ASSERT_TRUE(mem.isValid());
217 
218     auto internalMem = reinterpret_cast<::android::nn::Memory*>(mem.get());
219     uint8_t *dummy;
220     ASSERT_EQ(internalMem->getPointer(&dummy), ANEURALNETWORKS_NO_ERROR);
221 
222     close(fd);
223 }
224 
225 #ifndef NNTEST_ONLY_PUBLIC_API
226 // Regression test for http://b/73663843, conv_2d trying to allocate too much memory.
TEST_F(MemoryLeakTest,convTooLarge)227 TEST_F(MemoryLeakTest, convTooLarge) {
228     android::nn::DeviceManager::get()->setUseCpuOnly(true);
229     WrapperModel model;
230 
231     // This kernel/input size will make convQuant8 allocate 12 * 13 * 13 * 128 * 92 * 92, which is
232     // just outside of signed int range (0x82F56000) - this will fail due to CPU implementation
233     // limitations
234     WrapperOperandType type3(WrapperType::INT32, {});
235     WrapperOperandType type2(WrapperType::TENSOR_INT32, {128}, 0.25, 0);
236     WrapperOperandType type0(WrapperType::TENSOR_QUANT8_ASYMM, {12, 104, 104, 128}, 0.5, 0);
237     WrapperOperandType type4(WrapperType::TENSOR_QUANT8_ASYMM, {12, 92, 92, 128}, 1.0, 0);
238     WrapperOperandType type1(WrapperType::TENSOR_QUANT8_ASYMM, {128, 13, 13, 128}, 0.5, 0);
239 
240     // Operands
241     auto op1 = model.addOperand(&type0);
242     auto op2 = model.addOperand(&type1);
243     auto op3 = model.addOperand(&type2);
244     auto pad0 = model.addOperand(&type3);
245     auto act = model.addOperand(&type3);
246     auto stride = model.addOperand(&type3);
247     auto op4 = model.addOperand(&type4);
248 
249     // Operations
250     uint8_t op2_init[128 * 13 * 13 * 128] = {};
251     model.setOperandValue(op2, op2_init, sizeof(op2_init));
252     int32_t op3_init[128] = {};
253     model.setOperandValue(op3, op3_init, sizeof(op3_init));
254     int32_t pad0_init[] = {0};
255     model.setOperandValue(pad0, pad0_init, sizeof(pad0_init));
256     int32_t act_init[] = {0};
257     model.setOperandValue(act, act_init, sizeof(act_init));
258     int32_t stride_init[] = {1};
259     model.setOperandValue(stride, stride_init, sizeof(stride_init));
260     model.addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
261 
262     // Inputs and outputs
263     model.identifyInputsAndOutputs({op1}, {op4});
264     ASSERT_TRUE(model.isValid());
265     model.finish();
266 
267     // Compilation
268     WrapperCompilation compilation(&model);
269     ASSERT_EQ(WrapperResult::NO_ERROR,compilation.finish());
270     WrapperExecution execution(&compilation);
271 
272     // Set input and outputs
273     static uint8_t input[12 * 104 * 104 * 128] = {};
274     ASSERT_EQ(WrapperResult::NO_ERROR, execution.setInput(0, input, sizeof(input)));
275     static uint8_t output[12 * 92 * 92 * 128] = {};
276     ASSERT_EQ(WrapperResult::NO_ERROR, execution.setOutput(0, output, sizeof(output)));
277 
278     // This shouldn't segfault
279     WrapperResult r = execution.compute();
280 
281     ASSERT_EQ(WrapperResult::OP_FAILED, r);
282 }
283 #endif // NNTEST_ONLY_PUBLIC_API
284 
285 }  // end namespace
286