/hardware/interfaces/neuralnetworks/1.0/utils/src/ |
D | PreparedModel.cpp | 44 nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( in create() 51 return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel), in create() 55 PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_0::IPreparedModel> preparedModel, in PreparedModel() function in android::hardware::neuralnetworks::V1_0::utils::PreparedModel 59 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute( in execute() 77 PreparedModel::executeInternal(const V1_0::Request& request, in executeInternal() 98 PreparedModel::executeFenced(const nn::Request& /*request*/, in executeFenced() 108 nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution( in createReusableExecution() 122 nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const { in configureExecutionBurst() 126 std::any PreparedModel::getUnderlyingResource() const { in getUnderlyingResource()
|
D | Execution.cpp | 43 std::shared_ptr<const PreparedModel> preparedModel, Request request, in create() 54 std::shared_ptr<const PreparedModel> preparedModel, Request request, in Execution()
|
D | Callbacks.cpp | 50 return NN_TRY(PreparedModel::create(preparedModel)); in prepareModelCallback()
|
/hardware/interfaces/neuralnetworks/1.2/utils/src/ |
D | PreparedModel.cpp | 49 nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( in create() 56 return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, executeSynchronously, in create() 60 PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously, in PreparedModel() function in android::hardware::neuralnetworks::V1_2::utils::PreparedModel 68 PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const { in executeSynchronously() 78 PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming measure) const { in executeAsynchronously() 91 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute( in execute() 110 PreparedModel::executeInternal(const V1_0::Request& request, MeasureTiming measure, in executeInternal() 127 PreparedModel::executeFenced(const nn::Request& /*request*/, in executeFenced() 137 nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution( in createReusableExecution() 153 nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const { in configureExecutionBurst() [all …]
|
D | Callbacks.cpp | 66 return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); in prepareModelCallback()
|
/hardware/interfaces/neuralnetworks/1.0/utils/test/ |
D | PreparedModelTest.cpp | 64 const auto result = PreparedModel::create(kInvalidPreparedModel); in TEST() 78 const auto result = PreparedModel::create(mockPreparedModel); in TEST() 93 const auto result = PreparedModel::create(mockPreparedModel); in TEST() 108 const auto result = PreparedModel::create(mockPreparedModel); in TEST() 118 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 134 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 151 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 168 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 184 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 200 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() [all …]
|
/hardware/interfaces/neuralnetworks/1.2/utils/test/ |
D | PreparedModelTest.cpp | 96 const auto result = PreparedModel::create(kInvalidPreparedModel, /*executeSynchronously=*/true); in TEST() 110 const auto result = PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true); in TEST() 125 const auto result = PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true); in TEST() 140 const auto result = PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true); in TEST() 151 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); in TEST() 168 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); in TEST() 186 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); in TEST() 203 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); in TEST() 220 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value(); in TEST() 238 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value(); in TEST() [all …]
|
/hardware/interfaces/neuralnetworks/aidl/utils/src/ |
D | PreparedModel.cpp | 60 nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( in create() 67 return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel)); in create() 70 PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, in PreparedModel() function in aidl::android::hardware::neuralnetworks::utils::PreparedModel 74 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute( in execute() 96 PreparedModel::executeInternal(const Request& request, bool measure, int64_t deadline, in executeInternal() 123 PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, in executeFenced() 146 PreparedModel::executeFencedInternal(const Request& request, in executeFencedInternal() 195 nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution( in createReusableExecution() 212 nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const { in configureExecutionBurst() 219 std::any PreparedModel::getUnderlyingResource() const { in getUnderlyingResource()
|
D | Execution.cpp | 40 std::shared_ptr<const PreparedModel> preparedModel, Request request, in create() 52 std::shared_ptr<const PreparedModel> preparedModel, Request request, in Execution()
|
D | Callbacks.cpp | 42 return NN_TRY(PreparedModel::create(preparedModel)); in prepareModelCallback()
|
/hardware/interfaces/neuralnetworks/1.3/utils/src/ |
D | PreparedModel.cpp | 89 nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( in create() 96 return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, executeSynchronously, in create() 100 PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously, in PreparedModel() function in android::hardware::neuralnetworks::V1_3::utils::PreparedModel 108 PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure, in executeSynchronously() 121 PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming measure, in executeAsynchronously() 137 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute( in execute() 160 PreparedModel::executeInternal(const Request& request, V1_2::MeasureTiming measure, in executeInternal() 180 PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, in executeFenced() 204 PreparedModel::executeFencedInternal(const Request& request, const hidl_vec<hidl_handle>& waitFor, in executeFencedInternal() 234 nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution( in createReusableExecution() [all …]
|
D | Execution.cpp | 44 std::shared_ptr<const PreparedModel> preparedModel, Request request, in create() 57 std::shared_ptr<const PreparedModel> preparedModel, Request request, in Execution()
|
/hardware/interfaces/neuralnetworks/1.3/utils/test/ |
D | PreparedModelTest.cpp | 124 const auto result = PreparedModel::create(kInvalidPreparedModel, /*executeSynchronously=*/true); in TEST() 138 const auto result = PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true); in TEST() 153 const auto result = PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true); in TEST() 168 const auto result = PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true); in TEST() 179 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); in TEST() 196 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); in TEST() 214 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); in TEST() 231 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); in TEST() 248 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value(); in TEST() 266 PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value(); in TEST() [all …]
|
/hardware/interfaces/neuralnetworks/aidl/utils/test/ |
D | PreparedModelTest.cpp | 73 const auto result = PreparedModel::create(kInvalidPreparedModel); in TEST() 83 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 105 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 121 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 137 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 153 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 182 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 212 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 228 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() 244 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() [all …]
|
/hardware/interfaces/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/ |
D | PreparedModel.h | 38 class PreparedModel final : public nn::IPreparedModel, 39 public std::enable_shared_from_this<PreparedModel> { 43 static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create( 46 PreparedModel(PrivateConstructorTag tag, sp<V1_0::IPreparedModel> preparedModel,
|
D | Execution.h | 43 std::shared_ptr<const PreparedModel> preparedModel, Request request, 46 Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel, 57 const std::shared_ptr<const PreparedModel> kPreparedModel;
|
/hardware/interfaces/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ |
D | PreparedModel.h | 38 class PreparedModel final : public nn::IPreparedModel, 39 public std::enable_shared_from_this<PreparedModel> { 43 static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create( 46 PreparedModel(PrivateConstructorTag tag,
|
D | Execution.h | 41 std::shared_ptr<const PreparedModel> preparedModel, Request request, 44 Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel, 56 const std::shared_ptr<const PreparedModel> kPreparedModel;
|
/hardware/interfaces/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/ |
D | Execution.h | 41 std::shared_ptr<const PreparedModel> preparedModel, Request request, 45 Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel, 57 const std::shared_ptr<const PreparedModel> kPreparedModel;
|
D | PreparedModel.h | 38 class PreparedModel final : public nn::IPreparedModel, 39 public std::enable_shared_from_this<PreparedModel> { 43 static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create( 46 PreparedModel(PrivateConstructorTag tag, bool executeSynchronously,
|
/hardware/interfaces/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ |
D | Execution.h | 43 std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request, 46 Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel, 58 const std::shared_ptr<const PreparedModel> kPreparedModel;
|
D | PreparedModel.h | 39 class PreparedModel final : public nn::IPreparedModel, 40 public std::enable_shared_from_this<PreparedModel> { 44 static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create( 47 PreparedModel(PrivateConstructorTag tag, bool executeSynchronously,
|
/hardware/interfaces/neuralnetworks/utils/adapter/src/ |
D | PreparedModel.cpp | 292 PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId) in PreparedModel() function in android::hardware::neuralnetworks::adapter::PreparedModel 298 nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const { in getUnderlyingPreparedModel() 302 Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request, in execute() 314 Return<V1_0::ErrorStatus> PreparedModel::execute_1_2(const V1_0::Request& request, in execute_1_2() 328 Return<V1_3::ErrorStatus> PreparedModel::execute_1_3( in execute_1_3() 344 Return<void> PreparedModel::executeSynchronously(const V1_0::Request& request, in executeSynchronously() 361 Return<void> PreparedModel::executeSynchronously_1_3( in executeSynchronously_1_3() 380 Return<void> PreparedModel::configureExecutionBurst( in configureExecutionBurst() 396 Return<void> PreparedModel::executeFenced(const V1_3::Request& request, in executeFenced()
|
D | Device.cpp | 67 sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, in adaptPreparedModel() 72 return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor), userId); in adaptPreparedModel() 76 const sp<PreparedModel>& hidlPreparedModel) { in notify() 87 const sp<PreparedModel>& hidlPreparedModel) { in notify() 99 const sp<PreparedModel>& hidlPreparedModel) { in notify() 294 const auto* casted = static_cast<const PreparedModel*>(preparedModel.get()); in downcast()
|
/hardware/interfaces/neuralnetworks/utils/adapter/include/nnapi/hal/ |
D | PreparedModel.h | 40 class PreparedModel final : public V1_3::IPreparedModel { 42 PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId);
|