1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <HalInterfaces.h>
18 #include <SampleDriver.h>
19 #include <SampleDriverFull.h>
20 #include <gtest/gtest.h>
21 #include <nnapi/SharedMemory.h>
22 #include <nnapi/hal/1.2/Device.h>
23
24 #include <algorithm>
25 #include <map>
26 #include <set>
27 #include <string>
28 #include <tuple>
29 #include <utility>
30 #include <vector>
31
32 #include "HalUtils.h"
33 #include "Manager.h"
34 #include "Memory.h"
35 #include "TestNeuralNetworksWrapper.h"
36 #include "TestUtils.h"
37
38 #ifdef __ANDROID__
39 constexpr bool kRunningOnAndroid = true;
40 #else // __ANDROID__
41 constexpr bool kRunningOnAndroid = false;
42 #endif // __ANDROID__
43
44 using namespace android::nn;
45 namespace hardware = android::hardware;
46 using WrapperResult = test_wrapper::Result;
47 using Type = test_wrapper::Type;
48 using android::sp;
49 using android::nn::isAhwbBlob;
50
51 namespace {
52
53 // A buffer for test that does nothing.
54 class TestBuffer : public V1_3::IBuffer {
55 public:
copyTo(const hardware::hidl_memory &)56 hardware::Return<V1_3::ErrorStatus> copyTo(const hardware::hidl_memory&) override {
57 return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
58 }
copyFrom(const hardware::hidl_memory &,const hardware::hidl_vec<uint32_t> &)59 hardware::Return<V1_3::ErrorStatus> copyFrom(const hardware::hidl_memory&,
60 const hardware::hidl_vec<uint32_t>&) override {
61 return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
62 }
63 };
64
65 enum class AllocateReturn { OK, BAD_TOKEN, BAD_IBUFFER, BAD_STATUS, NOT_SUPPORTED };
66
67 // Print AllocateReturn enum for better GTEST failure messages
operator <<(std::ostream & os,AllocateReturn allocateReturn)68 std::ostream& operator<<(std::ostream& os, AllocateReturn allocateReturn) {
69 switch (allocateReturn) {
70 case AllocateReturn::OK:
71 return os << "OK";
72 case AllocateReturn::BAD_IBUFFER:
73 return os << "BAD_IBUFFER";
74 case AllocateReturn::BAD_TOKEN:
75 return os << "BAD_TOKEN";
76 case AllocateReturn::BAD_STATUS:
77 return os << "BAD_STATUS";
78 case AllocateReturn::NOT_SUPPORTED:
79 return os << "NOT_SUPPORTED";
80 }
81 LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(allocateReturn);
82 return os;
83 }
84
85 class TestDriverLatest : public sample_driver::SampleDriver {
86 public:
TestDriverLatest(const char * name,std::set<V1_3::OperationType> supportedOperations,AllocateReturn allocateReturn)87 TestDriverLatest(const char* name, std::set<V1_3::OperationType> supportedOperations,
88 AllocateReturn allocateReturn)
89 : SampleDriver(name),
90 kSupportedOperations(std::move(supportedOperations)),
91 kAllocateReturn(allocateReturn) {}
92
getCapabilities_1_3(getCapabilities_1_3_cb cb)93 hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
94 android::nn::initVLogMask();
95 // Faster than cpu.
96 const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
97 const V1_3::Capabilities capabilities = {
98 .relaxedFloat32toFloat16PerformanceScalar = kPerf,
99 .relaxedFloat32toFloat16PerformanceTensor = kPerf,
100 .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
101 .ifPerformance = kPerf,
102 .whilePerformance = kPerf};
103 cb(V1_3::ErrorStatus::NONE, capabilities);
104 return hardware::Void();
105 }
106
getSupportedOperations_1_3(const V1_3::Model & model,getSupportedOperations_1_3_cb cb)107 hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
108 getSupportedOperations_1_3_cb cb) override {
109 // The tests will never use a referenced model.
110 CHECK(model.referenced.size() == 0);
111 std::vector<bool> supported(model.main.operations.size(), false);
112 std::transform(model.main.operations.begin(), model.main.operations.end(),
113 supported.begin(), [this](const V1_3::Operation& op) {
114 return kSupportedOperations.count(op.type) > 0;
115 });
116 cb(V1_3::ErrorStatus::NONE, supported);
117 return hardware::Void();
118 }
119
allocate(const V1_3::BufferDesc &,const hardware::hidl_vec<sp<V1_3::IPreparedModel>> &,const hardware::hidl_vec<V1_3::BufferRole> &,const hardware::hidl_vec<V1_3::BufferRole> &,allocate_cb cb)120 hardware::Return<void> allocate(const V1_3::BufferDesc&,
121 const hardware::hidl_vec<sp<V1_3::IPreparedModel>>&,
122 const hardware::hidl_vec<V1_3::BufferRole>&,
123 const hardware::hidl_vec<V1_3::BufferRole>&,
124 allocate_cb cb) override {
125 switch (kAllocateReturn) {
126 case AllocateReturn::OK:
127 cb(V1_3::ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
128 return hardware::Void();
129 case AllocateReturn::BAD_IBUFFER:
130 cb(V1_3::ErrorStatus::NONE, nullptr, mValidBufferToken++);
131 return hardware::Void();
132 case AllocateReturn::BAD_TOKEN:
133 cb(V1_3::ErrorStatus::NONE, new TestBuffer(), 0);
134 return hardware::Void();
135 case AllocateReturn::BAD_STATUS:
136 cb(V1_3::ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
137 return hardware::Void();
138 case AllocateReturn::NOT_SUPPORTED:
139 cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
140 return hardware::Void();
141 }
142 LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(kAllocateReturn);
143 return hardware::Void();
144 }
145
146 private:
147 const std::set<V1_3::OperationType> kSupportedOperations;
148 const AllocateReturn kAllocateReturn;
149 uint32_t mValidBufferToken = 1;
150 };
151
152 // Create the following model for test.
153 //
154 // input0 ---+
155 // +--- ADD ---> output0 ---+
156 // input1 ---+ +--- MUL ---> output1 (dynamic shape)
157 // +--- SUB ---> temp ---+
158 // input2 ---+
159 //
createTestModel(test_wrapper::Model * model)160 void createTestModel(test_wrapper::Model* model) {
161 test_wrapper::OperandType tensorTypeFullySpecified(Type::TENSOR_FLOAT32, {1});
162 test_wrapper::OperandType tensorTypeDynamicShape(Type::TENSOR_FLOAT32, {0});
163 test_wrapper::OperandType actType(Type::INT32, {});
164 uint32_t input0 = model->addOperand(&tensorTypeFullySpecified);
165 uint32_t input1 = model->addOperand(&tensorTypeFullySpecified);
166 uint32_t input2 = model->addOperand(&tensorTypeFullySpecified);
167 uint32_t temp = model->addOperand(&tensorTypeFullySpecified);
168 uint32_t output0 = model->addOperand(&tensorTypeFullySpecified);
169 uint32_t output1 = model->addOperand(&tensorTypeDynamicShape);
170 uint32_t act = model->addOperand(&actType);
171 int32_t activation = 0;
172 model->setOperandValue(act, &activation, sizeof(int32_t));
173 model->addOperation(ANEURALNETWORKS_ADD, {input0, input1, act}, {output0});
174 model->addOperation(ANEURALNETWORKS_SUB, {input1, input2, act}, {temp});
175 model->addOperation(ANEURALNETWORKS_MUL, {output0, temp, act}, {output1});
176 model->identifyInputsAndOutputs({input0, input1, input2}, {output0, output1});
177 EXPECT_EQ(model->finish(), WrapperResult::NO_ERROR);
178 }
179
180 class MemoryDomainTestBase : public ::testing::Test {
181 protected:
SetUp()182 void SetUp() override {
183 ::testing::Test::SetUp();
184 if (DeviceManager::get()->getUseCpuOnly()) {
185 GTEST_SKIP();
186 }
187 createTestModel(&mModel);
188 // Clear the device list.
189 DeviceManager::get()->forTest_setDevices({});
190 }
191
TearDown()192 void TearDown() override {
193 DeviceManager::get()->forTest_reInitializeDeviceList();
194 ::testing::Test::TearDown();
195 }
196
197 // If "deviceNames" is not empty, the compilation is created with explicit device list;
198 // otherwise, it is created normally.
createCompilation(const std::vector<std::string> & deviceNames)199 test_wrapper::Compilation createCompilation(const std::vector<std::string>& deviceNames) {
200 test_wrapper::Compilation compilation;
201 if (!deviceNames.empty()) {
202 // Map device names to ANeuralNetworksDevice.
203 std::map<std::string, ANeuralNetworksDevice*> deviceMap;
204 uint32_t numDevices = 0;
205 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
206 for (uint32_t i = 0; i < numDevices; i++) {
207 ANeuralNetworksDevice* device = nullptr;
208 const char* name = nullptr;
209 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
210 EXPECT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR);
211 deviceMap.emplace(name, device);
212 }
213 std::vector<const ANeuralNetworksDevice*> devices(deviceNames.size());
214 std::transform(deviceNames.begin(), deviceNames.end(), devices.begin(),
215 [&deviceMap](const std::string& name) { return deviceMap.at(name); });
216 WrapperResult result;
217 std::tie(result, compilation) =
218 test_wrapper::Compilation::createForDevices(&mModel, devices);
219 EXPECT_EQ(result, WrapperResult::NO_ERROR);
220 } else {
221 compilation = test_wrapper::Compilation(&mModel);
222 }
223 EXPECT_EQ(compilation.finish(), WrapperResult::NO_ERROR);
224 return compilation;
225 }
226
allocateDeviceMemory(const test_wrapper::Compilation & compilation,const std::vector<uint32_t> & inputIndexes,const std::vector<uint32_t> & outputIndexes)227 std::pair<int, test_wrapper::Memory> allocateDeviceMemory(
228 const test_wrapper::Compilation& compilation, const std::vector<uint32_t>& inputIndexes,
229 const std::vector<uint32_t>& outputIndexes) {
230 const auto* annCompilation = compilation.getHandle();
231 ANeuralNetworksMemoryDesc* desc = nullptr;
232 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
233 for (uint32_t index : inputIndexes) {
234 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, annCompilation, index, 1.0f),
235 ANEURALNETWORKS_NO_ERROR);
236 }
237 for (uint32_t index : outputIndexes) {
238 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, annCompilation, index, 1.0f),
239 ANEURALNETWORKS_NO_ERROR);
240 }
241 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
242
243 ANeuralNetworksMemory* memory;
244 int n = ANeuralNetworksMemory_createFromDesc(desc, &memory);
245 ANeuralNetworksMemoryDesc_free(desc);
246 return {n, test_wrapper::Memory(memory)};
247 }
248
249 test_wrapper::Model mModel;
250 };
251
252 // Test memory domain with the following parameters
253 // - If true, use a V1_2 driver, otherwise, use the latest version;
254 // - If true, compile with explicit device list, otherwise, compile in the default way;
255 // - The return of the allocate function.
256 using MemoryDomainTestParam = std::tuple<bool, bool, AllocateReturn>;
257
258 class MemoryDomainTest : public MemoryDomainTestBase,
259 public ::testing::WithParamInterface<MemoryDomainTestParam> {
260 protected:
261 // If kUseV1_2Driver, allocateReturn must be AllocateReturn::NOT_SUPPORTED.
createAndRegisterDriver(const char * name,std::set<V1_3::OperationType> supportedOperations,AllocateReturn allocateReturn)262 void createAndRegisterDriver(const char* name,
263 std::set<V1_3::OperationType> supportedOperations,
264 AllocateReturn allocateReturn) {
265 if (kUseV1_2Driver) {
266 CHECK(allocateReturn == AllocateReturn::NOT_SUPPORTED);
267 const sp<TestDriverLatest> testDriver =
268 new TestDriverLatest(name, supportedOperations, AllocateReturn::NOT_SUPPORTED);
269 DeviceManager::get()->forTest_registerDevice(
270 V1_2::utils::Device::create(name, testDriver).value());
271 } else {
272 DeviceManager::get()->forTest_registerDevice(makeSharedDevice(
273 name,
274 new TestDriverLatest(name, std::move(supportedOperations), allocateReturn)));
275 }
276 }
277
278 // If not kCompileWithExplicitDeviceList, the input argument "deviceNames" is ignored.
createCompilation(const std::vector<std::string> & deviceNames)279 test_wrapper::Compilation createCompilation(const std::vector<std::string>& deviceNames) {
280 if (kCompileWithExplicitDeviceList) {
281 return MemoryDomainTestBase::createCompilation(deviceNames);
282 } else {
283 return MemoryDomainTestBase::createCompilation({});
284 }
285 }
286
287 const bool kUseV1_2Driver = std::get<0>(GetParam());
288 const bool kCompileWithExplicitDeviceList = std::get<1>(GetParam());
289 const AllocateReturn kAllocateReturn = std::get<2>(GetParam());
290 };
291
292 // Test device memory allocation on a compilation with only a single partition.
TEST_P(MemoryDomainTest,SinglePartition)293 TEST_P(MemoryDomainTest, SinglePartition) {
294 createAndRegisterDriver(
295 "test_driver",
296 {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
297 kAllocateReturn);
298 auto compilation = createCompilation({"test_driver"});
299 ASSERT_NE(compilation.getHandle(), nullptr);
300
301 auto [n, memory] = allocateDeviceMemory(compilation, {0}, {0});
302 if (kAllocateReturn == AllocateReturn::OK) {
303 // The memory should be backed by the IBuffer returned from the driver.
304 ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
305 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
306 ASSERT_NE(m, nullptr);
307 EXPECT_NE(m->getIBuffer(), nullptr);
308 } else {
309 if (kCompileWithExplicitDeviceList) {
310 // Should not fallback when the compiled with explicit device list.
311 ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
312 } else {
313 // The memory should fallback to ashmem or blob ahwb based on the driver version.
314 ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
315 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
316 ASSERT_NE(m, nullptr);
317 EXPECT_EQ(m->getIBuffer(), nullptr);
318 const auto& memory = m->getMemory();
319 EXPECT_TRUE(validate(memory).ok());
320 if (kUseV1_2Driver) {
321 EXPECT_FALSE(isAhwbBlob(memory));
322 } else {
323 EXPECT_EQ(isAhwbBlob(memory), kRunningOnAndroid);
324 }
325 }
326 }
327 }
328
329 // Test device memory allocation on a compilation with multiple partitions.
TEST_P(MemoryDomainTest,MultiplePartitions)330 TEST_P(MemoryDomainTest, MultiplePartitions) {
331 createAndRegisterDriver("test_driver_add", {V1_3::OperationType::ADD}, kAllocateReturn);
332 createAndRegisterDriver("test_driver_sub", {V1_3::OperationType::SUB}, kAllocateReturn);
333 createAndRegisterDriver("test_driver_mul", {V1_3::OperationType::MUL}, kAllocateReturn);
334 auto compilation = createCompilation({"test_driver_add", "test_driver_sub", "test_driver_mul"});
335 ASSERT_NE(compilation.getHandle(), nullptr);
336
337 {
338 // input0 is only used in one single partition.
339 auto [n, memory] = allocateDeviceMemory(compilation, {0}, {});
340 if (kAllocateReturn == AllocateReturn::OK) {
341 // The memory should be backed by the IBuffer returned from the driver.
342 ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
343 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
344 ASSERT_NE(m, nullptr);
345 EXPECT_NE(m->getIBuffer(), nullptr);
346 } else {
347 if (kCompileWithExplicitDeviceList) {
348 // Should not fallback when the compiled with explicit device list.
349 ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
350 } else {
351 // The memory should fallback to ashmem or blob ahwb based on the driver version.
352 ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
353 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
354 ASSERT_NE(m, nullptr);
355 EXPECT_EQ(m->getIBuffer(), nullptr);
356 const auto& memory = m->getMemory();
357 EXPECT_TRUE(validate(memory).ok());
358 if (kUseV1_2Driver) {
359 EXPECT_FALSE(isAhwbBlob(memory));
360 } else {
361 EXPECT_EQ(isAhwbBlob(memory), kRunningOnAndroid);
362 }
363 }
364 }
365 }
366
367 {
368 // input1 is shared by two partitions with different drivers, so the runtime will not
369 // attempt to allocate on device.
370 auto [n, memory] = allocateDeviceMemory(compilation, {1}, {});
371 if (kCompileWithExplicitDeviceList) {
372 // Should not fallback when the compiled with explicit device list.
373 ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
374 } else {
375 // The memory should fallback to ashmem or blob ahwb based on the driver version.
376 ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
377 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
378 ASSERT_NE(m, nullptr);
379 EXPECT_EQ(m->getIBuffer(), nullptr);
380 const auto& memory = m->getMemory();
381 EXPECT_TRUE(validate(memory).ok());
382 if (kUseV1_2Driver) {
383 EXPECT_FALSE(isAhwbBlob(memory));
384 } else {
385 EXPECT_EQ(isAhwbBlob(memory), kRunningOnAndroid);
386 }
387 }
388 }
389
390 {
391 // output0 is shared by two partitions with different drivers, so the runtime will not
392 // attempt to allocate on device.
393 auto [n, memory] = allocateDeviceMemory(compilation, {}, {0});
394 if (kCompileWithExplicitDeviceList) {
395 // Should not fallback when the compiled with explicit device list.
396 ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
397 } else {
398 // The memory should fallback to ashmem or blob ahwb based on the driver version.
399 ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
400 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
401 ASSERT_NE(m, nullptr);
402 EXPECT_EQ(m->getIBuffer(), nullptr);
403 const auto& memory = m->getMemory();
404 EXPECT_TRUE(validate(memory).ok());
405 if (kUseV1_2Driver) {
406 EXPECT_FALSE(isAhwbBlob(memory));
407 } else {
408 EXPECT_EQ(isAhwbBlob(memory), kRunningOnAndroid);
409 }
410 }
411 }
412 }
413
414 // Test device memory allocation with dynamic shape.
TEST_P(MemoryDomainTest,DynamicShape)415 TEST_P(MemoryDomainTest, DynamicShape) {
416 createAndRegisterDriver(
417 "test_driver",
418 {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
419 kAllocateReturn);
420 auto compilation = createCompilation({"test_driver"});
421 ASSERT_NE(compilation.getHandle(), nullptr);
422
423 auto [n, memory] = allocateDeviceMemory(compilation, {}, {1});
424 if (kAllocateReturn == AllocateReturn::OK) {
425 // The memory should be backed by the IBuffer returned from the driver.
426 ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
427 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
428 ASSERT_NE(m, nullptr);
429 EXPECT_NE(m->getIBuffer(), nullptr);
430 } else {
431 // We do not fallback in the case of dynamic shape.
432 ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
433 }
434 }
435
436 static const auto kAllocateReturnChoices =
437 testing::Values(AllocateReturn::OK, AllocateReturn::BAD_TOKEN, AllocateReturn::BAD_IBUFFER,
438 AllocateReturn::BAD_STATUS, AllocateReturn::NOT_SUPPORTED);
439
440 INSTANTIATE_TEST_SUITE_P(DeviceVersionV1_2, MemoryDomainTest,
441 testing::Combine(testing::Values(true), testing::Bool(),
442 testing::Values(AllocateReturn::NOT_SUPPORTED)));
443
444 INSTANTIATE_TEST_SUITE_P(DeviceVersionLatest, MemoryDomainTest,
445 testing::Combine(testing::Values(false), testing::Bool(),
446 kAllocateReturnChoices));
447
448 class MemoryDomainCopyTest : public MemoryDomainTestBase {};
449
TEST_F(MemoryDomainCopyTest,MemoryCopyTest)450 TEST_F(MemoryDomainCopyTest, MemoryCopyTest) {
451 DeviceManager::get()->forTest_registerDevice(makeSharedDevice(
452 "test_driver", new sample_driver::SampleDriverFull(
453 "test_driver", {.execTime = 0.1f, .powerUsage = 0.1f})));
454 auto compilation = createCompilation({"test_driver"});
455 ASSERT_NE(compilation.getHandle(), nullptr);
456
457 // Allocate ashmem.
458 const float initValue1 = 3.14f, initValue2 = 2.72f;
459 auto ashmem1 = TestAshmem::createFrom(&initValue1, sizeof(float));
460 auto ashmem2 = TestAshmem::createFrom(&initValue2, sizeof(float));
461 ASSERT_NE(ashmem1, nullptr);
462 ASSERT_NE(ashmem2, nullptr);
463
464 // Allocate device memories.
465 auto [n1, memory1] = allocateDeviceMemory(compilation, {0}, {});
466 auto [n2, memory2] = allocateDeviceMemory(compilation, {0}, {});
467 ASSERT_EQ(n1, ANEURALNETWORKS_NO_ERROR);
468 ASSERT_EQ(n2, ANEURALNETWORKS_NO_ERROR);
469
470 // Test memory copying: ashmem1 -> memory1 -> memory2 -> ashmem2
471 ASSERT_EQ(ANeuralNetworksMemory_copy(ashmem1->get()->get(), memory1.get()),
472 ANEURALNETWORKS_NO_ERROR);
473 ASSERT_EQ(ANeuralNetworksMemory_copy(memory1.get(), memory2.get()), ANEURALNETWORKS_NO_ERROR);
474 ASSERT_EQ(ANeuralNetworksMemory_copy(memory2.get(), ashmem2->get()->get()),
475 ANEURALNETWORKS_NO_ERROR);
476
477 EXPECT_EQ(ashmem2->dataAs<float>()[0], initValue1);
478 }
479
480 } // namespace
481