1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
18 #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
19 
20 #include <android-base/thread_annotations.h>
21 #include <nnapi/IPreparedModel.h>
22 #include <nnapi/Result.h>
23 #include <nnapi/Types.h>
24 
25 #include <functional>
26 #include <memory>
27 #include <mutex>
28 #include <utility>
29 #include <vector>
30 
31 namespace android::hardware::neuralnetworks::utils {
32 
33 class ResilientPreparedModel final : public nn::IPreparedModel,
34                                      public std::enable_shared_from_this<ResilientPreparedModel> {
35     struct PrivateConstructorTag {};
36 
37   public:
38     using Factory = std::function<nn::GeneralResult<nn::SharedPreparedModel>()>;
39 
40     static nn::GeneralResult<std::shared_ptr<const ResilientPreparedModel>> create(
41             Factory makePreparedModel);
42 
43     explicit ResilientPreparedModel(PrivateConstructorTag tag, Factory makePreparedModel,
44                                     nn::SharedPreparedModel preparedModel);
45 
46     nn::SharedPreparedModel getPreparedModel() const;
47     nn::GeneralResult<nn::SharedPreparedModel> recover(
48             const nn::IPreparedModel* failingPreparedModel) const;
49 
50     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
51             const nn::Request& request, nn::MeasureTiming measure,
52             const nn::OptionalTimePoint& deadline,
53             const nn::OptionalDuration& loopTimeoutDuration) const override;
54 
55     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
56             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
57             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
58             const nn::OptionalDuration& loopTimeoutDuration,
59             const nn::OptionalDuration& timeoutDurationAfterFence) const override;
60 
61     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
62             const nn::Request& request, nn::MeasureTiming measure,
63             const nn::OptionalDuration& loopTimeoutDuration) const override;
64 
65     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
66 
67     std::any getUnderlyingResource() const override;
68 
69   private:
70     bool isValidInternal() const EXCLUDES(mMutex);
71     nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
72             const nn::Request& request, nn::MeasureTiming measure,
73             const nn::OptionalDuration& loopTimeoutDuration) const;
74     nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const;
75 
76     const Factory kMakePreparedModel;
77     mutable std::mutex mMutex;
78     mutable nn::SharedPreparedModel mPreparedModel GUARDED_BY(mMutex);
79 };
80 
81 }  // namespace android::hardware::neuralnetworks::utils
82 
83 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
84