1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
18 #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
19 
20 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
21 #include <nnapi/IPreparedModel.h>
22 #include <nnapi/Result.h>
23 #include <nnapi/Types.h>
24 #include <nnapi/hal/CommonUtils.h>
25 #include <nnapi/hal/ProtectCallback.h>
26 
27 #include <memory>
28 #include <tuple>
29 #include <utility>
30 #include <vector>
31 
32 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
33 // lifetimes across processes and for protecting asynchronous calls across HIDL.
34 
35 namespace android::hardware::neuralnetworks::V1_0::utils {
36 
37 // Class that adapts V1_0::IPreparedModel to nn::IPreparedModel.
38 class PreparedModel final : public nn::IPreparedModel,
39                             public std::enable_shared_from_this<PreparedModel> {
40     struct PrivateConstructorTag {};
41 
42   public:
43     static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
44             sp<V1_0::IPreparedModel> preparedModel);
45 
46     PreparedModel(PrivateConstructorTag tag, sp<V1_0::IPreparedModel> preparedModel,
47                   hal::utils::DeathHandler deathHandler);
48 
49     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
50             const nn::Request& request, nn::MeasureTiming measure,
51             const nn::OptionalTimePoint& deadline,
52             const nn::OptionalDuration& loopTimeoutDuration) const override;
53 
54     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
55             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
56             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
57             const nn::OptionalDuration& loopTimeoutDuration,
58             const nn::OptionalDuration& timeoutDurationAfterFence) const override;
59 
60     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
61             const nn::Request& request, nn::MeasureTiming measure,
62             const nn::OptionalDuration& loopTimeoutDuration) const override;
63 
64     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
65 
66     std::any getUnderlyingResource() const override;
67 
68     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
69             const V1_0::Request& request, const hal::utils::RequestRelocation& relocation) const;
70 
71   private:
72     const sp<V1_0::IPreparedModel> kPreparedModel;
73     const hal::utils::DeathHandler kDeathHandler;
74 };
75 
76 }  // namespace android::hardware::neuralnetworks::V1_0::utils
77 
78 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
79