1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Callbacks.h"
18
19 #include "Conversions.h"
20 #include "PreparedModel.h"
21 #include "Utils.h"
22
23 #include <android/hardware/neuralnetworks/1.0/types.h>
24 #include <android/hardware/neuralnetworks/1.2/types.h>
25 #include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
26 #include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
27 #include <android/hardware/neuralnetworks/1.3/types.h>
28 #include <nnapi/IPreparedModel.h>
29 #include <nnapi/Result.h>
30 #include <nnapi/Types.h>
31 #include <nnapi/hal/1.0/Callbacks.h>
32 #include <nnapi/hal/1.0/Conversions.h>
33 #include <nnapi/hal/1.0/PreparedModel.h>
34 #include <nnapi/hal/1.2/Callbacks.h>
35 #include <nnapi/hal/1.2/Conversions.h>
36 #include <nnapi/hal/1.2/PreparedModel.h>
37 #include <nnapi/hal/CommonUtils.h>
38 #include <nnapi/hal/HandleError.h>
39 #include <nnapi/hal/ProtectCallback.h>
40 #include <nnapi/hal/TransferValue.h>
41
42 #include <utility>
43
44 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
45 // lifetimes across processes and for protecting asynchronous calls across HIDL.
46
47 namespace android::hardware::neuralnetworks::V1_3::utils {
48 namespace {
49
prepareModelCallback(V1_0::ErrorStatus status,const sp<V1_0::IPreparedModel> & preparedModel)50 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
51 V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) {
52 if (const auto dynamicPreparedModel =
53 V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
54 const auto currentVersionStatus = NN_TRY(convertFromNonCanonical(status));
55 return V1_3::utils::prepareModelCallback(currentVersionStatus, dynamicPreparedModel);
56 }
57 if (const auto dynamicPreparedModel =
58 V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
59 return V1_2::utils::prepareModelCallback(status, dynamicPreparedModel);
60 }
61 return V1_0::utils::prepareModelCallback(status, preparedModel);
62 }
63
64 nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape> & outputShapes,const V1_2::Timing & timing)65 convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
66 const V1_2::Timing& timing) {
67 return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
68 }
69
70 } // namespace
71
supportedOperationsCallback(ErrorStatus status,const hidl_vec<bool> & supportedOperations)72 nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
73 ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
74 HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status);
75 return supportedOperations;
76 }
77
prepareModelCallback(ErrorStatus status,const sp<IPreparedModel> & preparedModel)78 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
79 ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
80 HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
81 return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
82 }
83
executionCallback(ErrorStatus status,const hidl_vec<V1_2::OutputShape> & outputShapes,const V1_2::Timing & timing)84 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
85 ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
86 const V1_2::Timing& timing) {
87 if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
88 auto canonicalOutputShapes =
89 nn::convert(outputShapes).value_or(std::vector<nn::OutputShape>{});
90 return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
91 << "execution failed with " << toString(status);
92 }
93 HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
94 return hal::utils::makeExecutionFailure(
95 convertExecutionGeneralResultsHelper(outputShapes, timing));
96 }
97
notify(V1_0::ErrorStatus status,const sp<V1_0::IPreparedModel> & preparedModel)98 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
99 const sp<V1_0::IPreparedModel>& preparedModel) {
100 mData.put(prepareModelCallback(status, preparedModel));
101 return Void();
102 }
103
notify_1_2(V1_0::ErrorStatus status,const sp<V1_2::IPreparedModel> & preparedModel)104 Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
105 const sp<V1_2::IPreparedModel>& preparedModel) {
106 mData.put(prepareModelCallback(status, preparedModel));
107 return Void();
108 }
109
notify_1_3(ErrorStatus status,const sp<IPreparedModel> & preparedModel)110 Return<void> PreparedModelCallback::notify_1_3(ErrorStatus status,
111 const sp<IPreparedModel>& preparedModel) {
112 mData.put(prepareModelCallback(status, preparedModel));
113 return Void();
114 }
115
notifyAsDeadObject()116 void PreparedModelCallback::notifyAsDeadObject() {
117 mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
118 }
119
get()120 PreparedModelCallback::Data PreparedModelCallback::get() {
121 return mData.take();
122 }
123
124 // ExecutionCallback methods begin here
125
notify(V1_0::ErrorStatus status)126 Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
127 mData.put(V1_0::utils::executionCallback(status));
128 return Void();
129 }
130
notify_1_2(V1_0::ErrorStatus status,const hidl_vec<V1_2::OutputShape> & outputShapes,const V1_2::Timing & timing)131 Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
132 const hidl_vec<V1_2::OutputShape>& outputShapes,
133 const V1_2::Timing& timing) {
134 mData.put(V1_2::utils::executionCallback(status, outputShapes, timing));
135 return Void();
136 }
137
notify_1_3(ErrorStatus status,const hidl_vec<V1_2::OutputShape> & outputShapes,const V1_2::Timing & timing)138 Return<void> ExecutionCallback::notify_1_3(ErrorStatus status,
139 const hidl_vec<V1_2::OutputShape>& outputShapes,
140 const V1_2::Timing& timing) {
141 mData.put(executionCallback(status, outputShapes, timing));
142 return Void();
143 }
144
notifyAsDeadObject()145 void ExecutionCallback::notifyAsDeadObject() {
146 mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
147 }
148
get()149 ExecutionCallback::Data ExecutionCallback::get() {
150 return mData.take();
151 }
152
153 } // namespace android::hardware::neuralnetworks::V1_3::utils
154