1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "Callbacks.h"
18 
19 #include "Conversions.h"
20 #include "PreparedModel.h"
21 #include "Utils.h"
22 
23 #include <android/hardware/neuralnetworks/1.0/types.h>
24 #include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
25 #include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
26 #include <android/hardware/neuralnetworks/1.2/types.h>
27 #include <nnapi/IPreparedModel.h>
28 #include <nnapi/Result.h>
29 #include <nnapi/Types.h>
30 #include <nnapi/hal/1.0/Callbacks.h>
31 #include <nnapi/hal/1.0/Conversions.h>
32 #include <nnapi/hal/1.0/PreparedModel.h>
33 #include <nnapi/hal/CommonUtils.h>
34 #include <nnapi/hal/HandleError.h>
35 #include <nnapi/hal/ProtectCallback.h>
36 #include <nnapi/hal/TransferValue.h>
37 
38 #include <utility>
39 
40 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
41 // lifetimes across processes and for protecting asynchronous calls across HIDL.
42 
43 namespace android::hardware::neuralnetworks::V1_2::utils {
44 namespace {
45 
prepareModelCallback(V1_0::ErrorStatus status,const sp<V1_0::IPreparedModel> & preparedModel)46 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
47         V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) {
48     if (const auto dynamicPreparedModel =
49                 V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
50         return V1_2::utils::prepareModelCallback(status, dynamicPreparedModel);
51     }
52     return V1_0::utils::prepareModelCallback(status, preparedModel);
53 }
54 
55 nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape> & outputShapes,const Timing & timing)56 convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
57                                      const Timing& timing) {
58     return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
59 }
60 
61 }  // namespace
62 
prepareModelCallback(V1_0::ErrorStatus status,const sp<IPreparedModel> & preparedModel)63 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
64         V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
65     HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
66     return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
67 }
68 
executionCallback(V1_0::ErrorStatus status,const hidl_vec<OutputShape> & outputShapes,const Timing & timing)69 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
70         V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
71     if (status == V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
72         auto canonicalOutputShapes =
73                 nn::convert(outputShapes).value_or(std::vector<nn::OutputShape>{});
74         return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
75                << "execution failed with " << toString(status);
76     }
77     HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
78     return hal::utils::makeExecutionFailure(
79             convertExecutionGeneralResultsHelper(outputShapes, timing));
80 }
81 
notify(V1_0::ErrorStatus status,const sp<V1_0::IPreparedModel> & preparedModel)82 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
83                                            const sp<V1_0::IPreparedModel>& preparedModel) {
84     mData.put(prepareModelCallback(status, preparedModel));
85     return Void();
86 }
87 
notify_1_2(V1_0::ErrorStatus status,const sp<IPreparedModel> & preparedModel)88 Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
89                                                const sp<IPreparedModel>& preparedModel) {
90     mData.put(prepareModelCallback(status, preparedModel));
91     return Void();
92 }
93 
notifyAsDeadObject()94 void PreparedModelCallback::notifyAsDeadObject() {
95     mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
96 }
97 
get()98 PreparedModelCallback::Data PreparedModelCallback::get() {
99     return mData.take();
100 }
101 
102 // ExecutionCallback methods begin here
103 
notify(V1_0::ErrorStatus status)104 Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
105     mData.put(V1_0::utils::executionCallback(status));
106     return Void();
107 }
108 
notify_1_2(V1_0::ErrorStatus status,const hidl_vec<OutputShape> & outputShapes,const Timing & timing)109 Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
110                                            const hidl_vec<OutputShape>& outputShapes,
111                                            const Timing& timing) {
112     mData.put(executionCallback(status, outputShapes, timing));
113     return Void();
114 }
115 
notifyAsDeadObject()116 void ExecutionCallback::notifyAsDeadObject() {
117     mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
118 }
119 
get()120 ExecutionCallback::Data ExecutionCallback::get() {
121     return mData.take();
122 }
123 
124 }  // namespace android::hardware::neuralnetworks::V1_2::utils
125