1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
18 #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
19 
20 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
21 #include <nnapi/IPreparedModel.h>
22 #include <nnapi/Result.h>
23 #include <nnapi/Types.h>
24 #include <nnapi/hal/CommonUtils.h>
25 
26 #include "nnapi/hal/1.0/ProtectCallback.h"
27 
28 #include <memory>
29 #include <tuple>
30 #include <utility>
31 #include <vector>
32 
33 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
34 // lifetimes across processes and for protecting asynchronous calls across HIDL.
35 
36 namespace android::hardware::neuralnetworks::V1_0::utils {
37 
38 // Class that adapts V1_0::IPreparedModel to nn::IPreparedModel.
39 class PreparedModel final : public nn::IPreparedModel,
40                             public std::enable_shared_from_this<PreparedModel> {
41     struct PrivateConstructorTag {};
42 
43   public:
44     static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
45             sp<V1_0::IPreparedModel> preparedModel);
46 
47     PreparedModel(PrivateConstructorTag tag, sp<V1_0::IPreparedModel> preparedModel,
48                   hal::utils::DeathHandler deathHandler);
49 
50     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
51             const nn::Request& request, nn::MeasureTiming measure,
52             const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
53             const std::vector<nn::TokenValuePair>& hints,
54             const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
55 
56     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
57             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
58             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
59             const nn::OptionalDuration& loopTimeoutDuration,
60             const nn::OptionalDuration& timeoutDurationAfterFence,
61             const std::vector<nn::TokenValuePair>& hints,
62             const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
63 
64     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
65             const nn::Request& request, nn::MeasureTiming measure,
66             const nn::OptionalDuration& loopTimeoutDuration,
67             const std::vector<nn::TokenValuePair>& hints,
68             const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
69 
70     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
71 
72     std::any getUnderlyingResource() const override;
73 
74     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
75             const V1_0::Request& request, const hal::utils::RequestRelocation& relocation) const;
76 
77   private:
78     const sp<V1_0::IPreparedModel> kPreparedModel;
79     const hal::utils::DeathHandler kDeathHandler;
80 };
81 
82 }  // namespace android::hardware::neuralnetworks::V1_0::utils
83 
84 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
85