1 /* 2 * Copyright (C) 2020 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H 18 #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H 19 20 #include <android/hardware/neuralnetworks/1.2/IPreparedModel.h> 21 #include <android/hardware/neuralnetworks/1.2/types.h> 22 #include <nnapi/IPreparedModel.h> 23 #include <nnapi/Result.h> 24 #include <nnapi/Types.h> 25 #include <nnapi/hal/CommonUtils.h> 26 #include <nnapi/hal/ProtectCallback.h> 27 28 #include <memory> 29 #include <tuple> 30 #include <utility> 31 #include <vector> 32 33 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface 34 // lifetimes across processes and for protecting asynchronous calls across HIDL. 35 36 namespace android::hardware::neuralnetworks::V1_2::utils { 37 38 // Class that adapts V1_2::IPreparedModel to nn::IPreparedModel. 39 class PreparedModel final : public nn::IPreparedModel, 40 public std::enable_shared_from_this<PreparedModel> { 41 struct PrivateConstructorTag {}; 42 43 public: 44 static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create( 45 sp<V1_2::IPreparedModel> preparedModel, bool executeSynchronously); 46 47 PreparedModel(PrivateConstructorTag tag, bool executeSynchronously, 48 sp<V1_2::IPreparedModel> preparedModel, hal::utils::DeathHandler deathHandler); 49 50 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute( 51 const nn::Request& request, nn::MeasureTiming measure, 52 const nn::OptionalTimePoint& deadline, 53 const nn::OptionalDuration& loopTimeoutDuration) const override; 54 55 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced( 56 const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, 57 nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, 58 const nn::OptionalDuration& loopTimeoutDuration, 59 const nn::OptionalDuration& timeoutDurationAfterFence) const override; 60 61 nn::GeneralResult<nn::SharedExecution> createReusableExecution( 62 const nn::Request& request, nn::MeasureTiming measure, 63 const nn::OptionalDuration& loopTimeoutDuration) const override; 64 65 nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override; 66 67 std::any getUnderlyingResource() const override; 68 69 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal( 70 const V1_0::Request& request, MeasureTiming measure, 71 const hal::utils::RequestRelocation& relocation) const; 72 73 private: 74 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously( 75 const V1_0::Request& request, MeasureTiming measure) const; 76 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeAsynchronously( 77 const V1_0::Request& request, MeasureTiming measure) const; 78 79 const bool kExecuteSynchronously; 80 const sp<V1_2::IPreparedModel> kPreparedModel; 81 const hal::utils::DeathHandler kDeathHandler; 82 }; 83 84 } // namespace android::hardware::neuralnetworks::V1_2::utils 85 86 #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H 87