1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "Device.h"
18 
19 #include "Adapter.h"
20 #include "Buffer.h"
21 #include "PreparedModel.h"
22 
23 #include <aidl/android/hardware/neuralnetworks/BnDevice.h>
24 #include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
25 #include <aidl/android/hardware/neuralnetworks/BufferRole.h>
26 #include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
27 #include <aidl/android/hardware/neuralnetworks/DeviceType.h>
28 #include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
29 #include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
30 #include <aidl/android/hardware/neuralnetworks/Extension.h>
31 #include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
32 #include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
33 #include <aidl/android/hardware/neuralnetworks/Model.h>
34 #include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
35 #include <aidl/android/hardware/neuralnetworks/Priority.h>
36 #include <android-base/logging.h>
37 #include <android/binder_auto_utils.h>
38 #include <android/binder_interface_utils.h>
39 #include <nnapi/IDevice.h>
40 #include <nnapi/Result.h>
41 #include <nnapi/TypeUtils.h>
42 #include <nnapi/Types.h>
43 #include <nnapi/hal/aidl/Conversions.h>
44 
45 #include <chrono>
46 #include <memory>
47 #include <string>
48 #include <vector>
49 
50 namespace aidl::android::hardware::neuralnetworks::adapter {
51 namespace {
52 
53 template <typename Type>
convertInput(const Type & object)54 auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
55     auto result = nn::convert(object);
56     if (!result.has_value()) {
57         result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
58     }
59     return result;
60 }
61 
makeDuration(int64_t durationNs)62 nn::Duration makeDuration(int64_t durationNs) {
63     return nn::Duration(std::chrono::nanoseconds(durationNs));
64 }
65 
makeOptionalTimePoint(int64_t durationNs)66 nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
67     if (durationNs < -1) {
68         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
69     }
70     return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
71 }
72 
convertCacheToken(const std::vector<uint8_t> & token)73 nn::GeneralResult<nn::CacheToken> convertCacheToken(const std::vector<uint8_t>& token) {
74     nn::CacheToken nnToken;
75     if (token.size() != nnToken.size()) {
76         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid token";
77     }
78     std::copy(token.begin(), token.end(), nnToken.begin());
79     return nnToken;
80 }
81 
downcast(const IPreparedModelParcel & preparedModel)82 nn::GeneralResult<nn::SharedPreparedModel> downcast(const IPreparedModelParcel& preparedModel) {
83     if (preparedModel.preparedModel == nullptr) {
84         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "preparedModel is nullptr";
85     }
86     if (preparedModel.preparedModel->isRemote()) {
87         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Cannot convert remote models";
88     }
89 
90     // This static_cast is safe because adapter::PreparedModel is the only class that implements
91     // the IPreparedModel interface in the adapter service code.
92     const auto* casted = static_cast<const PreparedModel*>(preparedModel.preparedModel.get());
93     return casted->getUnderlyingPreparedModel();
94 }
95 
downcastAll(const std::vector<IPreparedModelParcel> & preparedModels)96 nn::GeneralResult<std::vector<nn::SharedPreparedModel>> downcastAll(
97         const std::vector<IPreparedModelParcel>& preparedModels) {
98     std::vector<nn::SharedPreparedModel> canonical;
99     canonical.reserve(preparedModels.size());
100     for (const auto& preparedModel : preparedModels) {
101         canonical.push_back(NN_TRY(downcast(preparedModel)));
102     }
103     return canonical;
104 }
105 
allocate(const nn::IDevice & device,const BufferDesc & desc,const std::vector<IPreparedModelParcel> & preparedModels,const std::vector<BufferRole> & inputRoles,const std::vector<BufferRole> & outputRoles)106 nn::GeneralResult<DeviceBuffer> allocate(const nn::IDevice& device, const BufferDesc& desc,
107                                          const std::vector<IPreparedModelParcel>& preparedModels,
108                                          const std::vector<BufferRole>& inputRoles,
109                                          const std::vector<BufferRole>& outputRoles) {
110     auto nnDesc = NN_TRY(convertInput(desc));
111     auto nnPreparedModels = NN_TRY(downcastAll(preparedModels));
112     auto nnInputRoles = NN_TRY(convertInput(inputRoles));
113     auto nnOutputRoles = NN_TRY(convertInput(outputRoles));
114 
115     auto buffer = NN_TRY(device.allocate(nnDesc, nnPreparedModels, nnInputRoles, nnOutputRoles));
116     CHECK(buffer != nullptr);
117 
118     const nn::Request::MemoryDomainToken token = buffer->getToken();
119     auto aidlBuffer = ndk::SharedRefBase::make<Buffer>(std::move(buffer));
120     return DeviceBuffer{.buffer = std::move(aidlBuffer), .token = static_cast<int32_t>(token)};
121 }
122 
getSupportedOperations(const nn::IDevice & device,const Model & model)123 nn::GeneralResult<std::vector<bool>> getSupportedOperations(const nn::IDevice& device,
124                                                             const Model& model) {
125     const auto nnModel = NN_TRY(convertInput(model));
126     return device.getSupportedOperations(nnModel);
127 }
128 
129 using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
130 
adaptPreparedModel(nn::SharedPreparedModel preparedModel)131 std::shared_ptr<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel) {
132     if (preparedModel == nullptr) {
133         return nullptr;
134     }
135     return ndk::SharedRefBase::make<PreparedModel>(std::move(preparedModel));
136 }
137 
notify(IPreparedModelCallback * callback,ErrorStatus status,const std::shared_ptr<IPreparedModel> & preparedModel)138 void notify(IPreparedModelCallback* callback, ErrorStatus status,
139             const std::shared_ptr<IPreparedModel>& preparedModel) {
140     if (callback != nullptr) {
141         const auto ret = callback->notify(status, preparedModel);
142         if (!ret.isOk()) {
143             LOG(ERROR) << "IPreparedModelCallback::notify failed with " << ret.getDescription();
144         }
145     }
146 }
147 
notify(IPreparedModelCallback * callback,PrepareModelResult result)148 void notify(IPreparedModelCallback* callback, PrepareModelResult result) {
149     if (!result.has_value()) {
150         const auto& [message, status] = result.error();
151         LOG(ERROR) << message;
152         const auto aidlCode = utils::convert(status).value_or(ErrorStatus::GENERAL_FAILURE);
153         notify(callback, aidlCode, nullptr);
154     } else {
155         auto preparedModel = std::move(result).value();
156         auto aidlPreparedModel = adaptPreparedModel(std::move(preparedModel));
157         notify(callback, ErrorStatus::NONE, std::move(aidlPreparedModel));
158     }
159 }
160 
prepareModel(const nn::SharedDevice & device,const Executor & executor,const Model & model,ExecutionPreference preference,Priority priority,int64_t deadlineNs,const std::vector<ndk::ScopedFileDescriptor> & modelCache,const std::vector<ndk::ScopedFileDescriptor> & dataCache,const std::vector<uint8_t> & token,const std::vector<TokenValuePair> & hints,const std::vector<ExtensionNameAndPrefix> & extensionNameToPrefix,const std::shared_ptr<IPreparedModelCallback> & callback)161 nn::GeneralResult<void> prepareModel(
162         const nn::SharedDevice& device, const Executor& executor, const Model& model,
163         ExecutionPreference preference, Priority priority, int64_t deadlineNs,
164         const std::vector<ndk::ScopedFileDescriptor>& modelCache,
165         const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
166         const std::vector<TokenValuePair>& hints,
167         const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix,
168         const std::shared_ptr<IPreparedModelCallback>& callback) {
169     if (callback.get() == nullptr) {
170         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
171     }
172 
173     auto nnModel = NN_TRY(convertInput(model));
174     const auto nnPreference = NN_TRY(convertInput(preference));
175     const auto nnPriority = NN_TRY(convertInput(priority));
176     const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
177     auto nnModelCache = NN_TRY(convertInput(modelCache));
178     auto nnDataCache = NN_TRY(convertInput(dataCache));
179     const auto nnToken = NN_TRY(convertCacheToken(token));
180     auto nnHints = NN_TRY(convertInput(hints));
181     auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
182 
183     Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
184                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
185                  nnToken, nnHints = std::move(nnHints),
186                  nnExtensionNameToPrefix = std::move(nnExtensionNameToPrefix), callback] {
187         auto result =
188                 device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline, nnModelCache,
189                                      nnDataCache, nnToken, nnHints, nnExtensionNameToPrefix);
190         notify(callback.get(), std::move(result));
191     };
192     executor(std::move(task), nnDeadline);
193 
194     return {};
195 }
196 
prepareModelFromCache(const nn::SharedDevice & device,const Executor & executor,int64_t deadlineNs,const std::vector<ndk::ScopedFileDescriptor> & modelCache,const std::vector<ndk::ScopedFileDescriptor> & dataCache,const std::vector<uint8_t> & token,const std::shared_ptr<IPreparedModelCallback> & callback)197 nn::GeneralResult<void> prepareModelFromCache(
198         const nn::SharedDevice& device, const Executor& executor, int64_t deadlineNs,
199         const std::vector<ndk::ScopedFileDescriptor>& modelCache,
200         const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
201         const std::shared_ptr<IPreparedModelCallback>& callback) {
202     if (callback.get() == nullptr) {
203         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
204     }
205 
206     const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
207     auto nnModelCache = NN_TRY(convertInput(modelCache));
208     auto nnDataCache = NN_TRY(convertInput(dataCache));
209     const auto nnToken = NN_TRY(convertCacheToken(token));
210 
211     auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
212                  nnDataCache = std::move(nnDataCache), nnToken, callback] {
213         auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
214         notify(callback.get(), std::move(result));
215     };
216     executor(std::move(task), nnDeadline);
217 
218     return {};
219 }
220 
221 }  // namespace
222 
Device(::android::nn::SharedDevice device,Executor executor)223 Device::Device(::android::nn::SharedDevice device, Executor executor)
224     : kDevice(std::move(device)), kExecutor(std::move(executor)) {
225     CHECK(kDevice != nullptr);
226     CHECK(kExecutor != nullptr);
227 }
228 
allocate(const BufferDesc & desc,const std::vector<IPreparedModelParcel> & preparedModels,const std::vector<BufferRole> & inputRoles,const std::vector<BufferRole> & outputRoles,DeviceBuffer * buffer)229 ndk::ScopedAStatus Device::allocate(const BufferDesc& desc,
230                                     const std::vector<IPreparedModelParcel>& preparedModels,
231                                     const std::vector<BufferRole>& inputRoles,
232                                     const std::vector<BufferRole>& outputRoles,
233                                     DeviceBuffer* buffer) {
234     auto result = adapter::allocate(*kDevice, desc, preparedModels, inputRoles, outputRoles);
235     if (!result.has_value()) {
236         const auto& [message, code] = result.error();
237         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
238         return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
239                 static_cast<int32_t>(aidlCode), message.c_str());
240     }
241     *buffer = std::move(result).value();
242     return ndk::ScopedAStatus::ok();
243 }
244 
getCapabilities(Capabilities * capabilities)245 ndk::ScopedAStatus Device::getCapabilities(Capabilities* capabilities) {
246     *capabilities = utils::convert(kDevice->getCapabilities()).value();
247     return ndk::ScopedAStatus::ok();
248 }
249 
getNumberOfCacheFilesNeeded(NumberOfCacheFiles * numberOfCacheFiles)250 ndk::ScopedAStatus Device::getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) {
251     const auto [numModelCache, numDataCache] = kDevice->getNumberOfCacheFilesNeeded();
252     *numberOfCacheFiles = NumberOfCacheFiles{.numModelCache = static_cast<int32_t>(numModelCache),
253                                              .numDataCache = static_cast<int32_t>(numDataCache)};
254     return ndk::ScopedAStatus::ok();
255 }
256 
getSupportedExtensions(std::vector<Extension> * extensions)257 ndk::ScopedAStatus Device::getSupportedExtensions(std::vector<Extension>* extensions) {
258     *extensions = utils::convert(kDevice->getSupportedExtensions()).value();
259     return ndk::ScopedAStatus::ok();
260 }
261 
getSupportedOperations(const Model & model,std::vector<bool> * supported)262 ndk::ScopedAStatus Device::getSupportedOperations(const Model& model,
263                                                   std::vector<bool>* supported) {
264     auto result = adapter::getSupportedOperations(*kDevice, model);
265     if (!result.has_value()) {
266         const auto& [message, code] = result.error();
267         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
268         return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
269                 static_cast<int32_t>(aidlCode), message.c_str());
270     }
271     *supported = std::move(result).value();
272     return ndk::ScopedAStatus::ok();
273 }
274 
getType(DeviceType * deviceType)275 ndk::ScopedAStatus Device::getType(DeviceType* deviceType) {
276     *deviceType = utils::convert(kDevice->getType()).value();
277     return ndk::ScopedAStatus::ok();
278 }
279 
getVersionString(std::string * version)280 ndk::ScopedAStatus Device::getVersionString(std::string* version) {
281     *version = kDevice->getVersionString();
282     return ndk::ScopedAStatus::ok();
283 }
284 
prepareModel(const Model & model,ExecutionPreference preference,Priority priority,int64_t deadlineNs,const std::vector<ndk::ScopedFileDescriptor> & modelCache,const std::vector<ndk::ScopedFileDescriptor> & dataCache,const std::vector<uint8_t> & token,const std::shared_ptr<IPreparedModelCallback> & callback)285 ndk::ScopedAStatus Device::prepareModel(const Model& model, ExecutionPreference preference,
286                                         Priority priority, int64_t deadlineNs,
287                                         const std::vector<ndk::ScopedFileDescriptor>& modelCache,
288                                         const std::vector<ndk::ScopedFileDescriptor>& dataCache,
289                                         const std::vector<uint8_t>& token,
290                                         const std::shared_ptr<IPreparedModelCallback>& callback) {
291     const auto result =
292             adapter::prepareModel(kDevice, kExecutor, model, preference, priority, deadlineNs,
293                                   modelCache, dataCache, token, {}, {}, callback);
294     if (!result.has_value()) {
295         const auto& [message, code] = result.error();
296         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
297         notify(callback.get(), aidlCode, nullptr);
298         return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
299                 static_cast<int32_t>(aidlCode), message.c_str());
300     }
301     return ndk::ScopedAStatus::ok();
302 }
303 
prepareModelFromCache(int64_t deadlineNs,const std::vector<ndk::ScopedFileDescriptor> & modelCache,const std::vector<ndk::ScopedFileDescriptor> & dataCache,const std::vector<uint8_t> & token,const std::shared_ptr<IPreparedModelCallback> & callback)304 ndk::ScopedAStatus Device::prepareModelFromCache(
305         int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
306         const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
307         const std::shared_ptr<IPreparedModelCallback>& callback) {
308     const auto result = adapter::prepareModelFromCache(kDevice, kExecutor, deadlineNs, modelCache,
309                                                        dataCache, token, callback);
310     if (!result.has_value()) {
311         const auto& [message, code] = result.error();
312         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
313         notify(callback.get(), aidlCode, nullptr);
314         return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
315                 static_cast<int32_t>(aidlCode), message.c_str());
316     }
317     return ndk::ScopedAStatus::ok();
318 }
319 
prepareModelWithConfig(const Model & model,const PrepareModelConfig & config,const std::shared_ptr<IPreparedModelCallback> & callback)320 ndk::ScopedAStatus Device::prepareModelWithConfig(
321         const Model& model, const PrepareModelConfig& config,
322         const std::shared_ptr<IPreparedModelCallback>& callback) {
323     const auto result = adapter::prepareModel(
324             kDevice, kExecutor, model, config.preference, config.priority, config.deadlineNs,
325             config.modelCache, config.dataCache, utils::toVec(config.cacheToken),
326             config.compilationHints, config.extensionNameToPrefix, callback);
327     if (!result.has_value()) {
328         const auto& [message, code] = result.error();
329         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
330         notify(callback.get(), aidlCode, nullptr);
331         return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
332                 static_cast<int32_t>(aidlCode), message.c_str());
333     }
334     return ndk::ScopedAStatus::ok();
335 }
336 
337 }  // namespace aidl::android::hardware::neuralnetworks::adapter
338