Home
last modified time | relevance | path

Searched refs:outputShapes (Results 1 – 25 of 28) sorted by relevance

12

/packages/modules/NeuralNetworks/common/cpu_operations/
DSplit.cpp33 const std::vector<Shape>& outputShapes) { in splitGeneric() argument
48 const int copySize = outputShapes[i].dimensions[axis] * baseInnerSize; in splitGeneric()
59 const std::vector<Shape>& outputShapes) { in splitFloat16() argument
61 return splitGeneric<_Float16>(inputData, inputShape, axis, outputDataPtrs, outputShapes); in splitFloat16()
66 const std::vector<Shape>& outputShapes) { in splitFloat32() argument
68 return splitGeneric<float>(inputData, inputShape, axis, outputDataPtrs, outputShapes); in splitFloat32()
73 const std::vector<Shape>& outputShapes) { in splitQuant8() argument
75 return splitGeneric<uint8_t>(inputData, inputShape, axis, outputDataPtrs, outputShapes); in splitQuant8()
80 const std::vector<Shape>& outputShapes) { in splitQuant8Signed() argument
82 return splitGeneric<int8_t>(inputData, inputShape, axis, outputDataPtrs, outputShapes); in splitQuant8Signed()
[all …]
/packages/modules/NeuralNetworks/runtime/
DExecutionCallback.cpp29 void ExecutionCallback::notify(ErrorStatus status, const std::vector<OutputShape>& outputShapes, in notify() argument
31 notifyInternal(status, outputShapes, timing); in notify()
115 std::vector<OutputShape> outputShapes, Timing timing) { in notifyInternal() argument
120 if (outputShapes.size() == 0) { in notifyInternal()
124 outputShapes = {}; in notifyInternal()
130 if (outputShapes.size() != 0) { in notifyInternal()
134 outputShapes = {}; in notifyInternal()
150 mOutputShapes = std::move(outputShapes); in notifyInternal()
DExecutionBuilder.cpp678 auto [n2, outputShapes, timing] = executor->computeOnCpuFallback(); in cpuFallbackPartial()
679 return {n2, std::move(outputShapes), timing, executor}; in cpuFallbackPartial()
692 auto [n, outputShapes, timing] = mExecutor->compute(deadline, burstController); in computeInternal()
695 return {n, std::move(outputShapes), timing}; in computeInternal()
700 return {n, std::move(outputShapes), {}}; in computeInternal()
724 std::vector<OutputShape> outputShapes = getInitialOutputShapes(); in computeInternal() local
737 ? mPlan->fallback(controller, &executor, &burstController, &outputShapes) in computeInternal()
738 : mPlan->next(controller, &executor, &burstController, &outputShapes); in computeInternal()
752 return {ANEURALNETWORKS_NO_ERROR, outputShapes, {}}; in computeInternal()
761 if (!executor->updateOutputShapes(stepN, stepOutputShapes, &outputShapes, in computeInternal()
[all …]
DExecutionCallback.h85 void notify(ErrorStatus status, const std::vector<OutputShape>& outputShapes,
208 void notifyInternal(ErrorStatus errorStatus, std::vector<OutputShape> outputShapes,
DExecutionBuilder.h125 int finishComputation(int result, const std::vector<OutputShape>& outputShapes,
127 ErrorStatus finishComputation(ErrorStatus error, const std::vector<OutputShape>& outputShapes, in finishComputation() argument
130 finishComputation(convertErrorStatusToResultCode(error), outputShapes, mode); in finishComputation()
184 bool updateOutputShapes(ErrorStatus status, const std::vector<OutputShape>& outputShapes);
DManager.cpp613 std::vector<OutputShape> outputShapes; in execute() local
618 std::tie(outputShapes, timing) = std::move(result).value(); in execute()
626 outputShapes = std::move(returnedOutputShapes); in execute()
628 return {n, std::move(outputShapes), timing}; in execute()
632 return {ANEURALNETWORKS_NO_ERROR, std::move(outputShapes), timing}; in execute()
785 auto [outputShapes, timing] = std::move(result).value(); in compute()
786 return {ANEURALNETWORKS_NO_ERROR, std::move(outputShapes), timing}; in compute()
1090 const auto& outputShapes = executor.getOutputShapes(); in computeOnCpu() local
1091 return {err, outputShapes, {}}; in computeOnCpu()
1121 const auto [result, outputShapes, timing] = execute(inputs, outputs, memories, nullptr, measure, in executeFenced()
[all …]
/packages/modules/NeuralNetworks/driver/sample_hidl/
DSampleDriverUtils.cpp81 const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing) { in notify() argument
82 const auto ret = callback->notify_1_2(convertToV1_0(status), outputShapes, timing); in notify()
89 const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing) { in notify() argument
90 const auto ret = callback->notify_1_3(status, outputShapes, timing); in notify()
DSampleDriver.cpp411 const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) { in updateDeviceMemories() argument
417 if (!bufferWrappers[poolIndex]->updateDimensions(outputShapes[i].dimensions)) { in updateDeviceMemories()
437 if (!outputShapes[i].isSufficient) { in updateDeviceMemories()
439 << ": actual shape = " << toString(outputShapes[i].dimensions); in updateDeviceMemories()
482 hardware::hidl_vec<V1_2::OutputShape> outputShapes = convertToV1_2(executor.getOutputShapes()); in asyncExecute() local
486 updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes); in asyncExecute()
498 notify(callback, executionStatus, outputShapes, timing); in asyncExecute()
500 notify(callback, executionStatus, outputShapes, kNoTiming); in asyncExecute()
614 hardware::hidl_vec<V1_2::OutputShape> outputShapes = convertToV1_2(executor.getOutputShapes()); in executeSynchronouslyBase() local
618 updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes); in executeSynchronouslyBase()
[all …]
DSampleDriverUtils.h51 const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing);
54 const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing);
/packages/modules/NeuralNetworks/driver/sample/
DCanonicalPreparedModel.cpp74 const std::vector<OutputShape>& outputShapes) { in updateDeviceMemories() argument
81 if (!bufferWrappers[poolIndex]->updateDimensions(outputShapes[i].dimensions)) { in updateDeviceMemories()
103 if (!outputShapes[i].isSufficient) { in updateDeviceMemories()
105 << ": actual shape = " << toString(outputShapes[i].dimensions); in updateDeviceMemories()
167 const auto& outputShapes = executor.getOutputShapes(); in execute() local
171 updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes); in execute()
176 return NN_ERROR(executionStatus, outputShapes); in execute()
186 return std::make_pair(outputShapes, timing); in execute()
/packages/modules/NeuralNetworks/common/
DExecutionBurstServer.cpp88 const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, in execute()
91 returnedOutputShapes = outputShapes; in execute()
115 const std::vector<V1_2::OutputShape>& outputShapes, in serialize() argument
118 size_t count = 2 + outputShapes.size(); in serialize()
119 for (const auto& outputShape : outputShapes) { in serialize()
132 /*.numberOfOperands=*/static_cast<uint32_t>(outputShapes.size())}); in serialize()
137 for (const auto& operand : outputShapes) { in serialize()
477 const std::vector<V1_2::OutputShape>& outputShapes, in send() argument
479 const std::vector<FmqResultDatum> serialized = serialize(errorStatus, outputShapes, timing); in send()
649 const auto [errorStatus, outputShapes, returnedTiming] = in task()
[all …]
DExecutionBurstController.cpp156 std::vector<V1_2::OutputShape> outputShapes; in deserialize() local
214 outputShapes.push_back({/*.dimensions=*/dimensions, /*.isSufficient=*/isSufficient}); in deserialize()
235 return std::make_tuple(errorStatus, std::move(outputShapes), timing); in deserialize()
587 V1_0::ErrorStatus status, std::vector<V1_2::OutputShape> outputShapes, V1_2::Timing timing, in getExecutionResult() argument
590 getExecutionResult(convertToV1_3(status), std::move(outputShapes), timing); in getExecutionResult()
627 auto [status, outputShapes, timing] = std::move(*result); in compute()
628 return getExecutionResult(status, std::move(outputShapes), timing, /*fallback=*/false); in compute()
DCpuExecutor.cpp1382 std::vector<Shape> outputShapes(numOutputs); in executeOperation() local
1384 outputShapes[i] = operands[outs[i]].shape(); in executeOperation()
1387 success = splitPrepare(input.shape(), axis, numOutputs, &outputShapes); in executeOperation()
1390 outputShapes[i], &result); in executeOperation()
1400 input.shape(), axis, &outputDataPtrs, outputShapes); in executeOperation()
1409 input.shape(), axis, &outputDataPtrs, outputShapes); in executeOperation()
1418 input.shape(), axis, &outputDataPtrs, outputShapes); in executeOperation()
1427 input.shape(), axis, &outputDataPtrs, outputShapes); in executeOperation()
1436 input.shape(), axis, &outputDataPtrs, outputShapes); in executeOperation()
DLegacyHalUtils.cpp226 V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, in getExecutionResult() argument
228 return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes), in getExecutionResult()
1589 const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) { in uncheckedConvert() argument
1590 return convertVec<OutputShape>(outputShapes); in uncheckedConvert()
1711 hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes) { in convertToV1_2() argument
1712 return convertVecToV1_2<V1_2::OutputShape>(outputShapes); in convertToV1_2()
DLegacyUtils.cpp1777 ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) { in getExecutionResult() argument
1781 !outputShapes.empty()) { in getExecutionResult()
1783 outputShapes.clear(); in getExecutionResult()
1789 return {n, std::move(outputShapes), timing}; in getExecutionResult()
/packages/modules/NeuralNetworks/common/include/
DOperations.h113 const std::vector<Shape>& outputShapes);
117 const std::vector<Shape>& outputShapes);
121 const std::vector<Shape>& outputShapes);
125 const std::vector<Shape>& outputShapes);
129 const std::vector<Shape>& outputShapes);
DExecutionBurstServer.h56 const std::vector<hardware::neuralnetworks::V1_2::OutputShape>& outputShapes,
164 const std::vector<hardware::neuralnetworks::V1_2::OutputShape>& outputShapes,
DLegacyHalUtils.h185 V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
367 const hardware::hidl_vec<V1_2::OutputShape>& outputShapes);
376 hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes);
DLegacyUtils.h260 ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing);
/packages/modules/NeuralNetworks/common/types/src/
DTypes.cpp50 std::vector<OutputShape> outputShapes) in ExecutionError() argument
51 : message(std::move(message)), code(code), outputShapes(std::move(outputShapes)) {} in ExecutionError()
/packages/modules/NeuralNetworks/driver/sample_aidl/
DSampleDriverAidl.cpp358 const std::vector<aidl_hal::OutputShape>& outputShapes) { in updateDeviceMemories() argument
364 const auto unsignedDimensions = toUnsigned(outputShapes[i].dimensions).value(); in updateDeviceMemories()
385 if (!outputShapes[i].isSufficient) { in updateDeviceMemories()
387 << ": actual shape = " << toString(outputShapes[i].dimensions); in updateDeviceMemories()
459 auto outputShapes = aidl_hal::utils::convert(executor.getOutputShapes()).value(); in executeSynchronously() local
463 updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes); in executeSynchronously()
470 executionResult->outputShapes = std::move(outputShapes); in executeSynchronously()
/packages/modules/NeuralNetworks/shim_and_sl/
DShimPreparedModel.cpp346 std::vector<OutputShape> outputShapes; in executeSynchronouslyInternal() local
347 outputShapes.reserve(numOutputs); in executeSynchronouslyInternal()
365 outputShapes.push_back(std::move(outputShape)); in executeSynchronouslyInternal()
390 std::move(outputShapes), in executeSynchronouslyInternal()
/packages/modules/NeuralNetworks/common/types/include/nnapi/
DTypes.h230 std::vector<OutputShape> outputShapes = {});
238 std::vector<OutputShape> outputShapes; member
DValidation.h70 Result<Version> validate(const std::vector<OutputShape>& outputShapes);
/packages/modules/NeuralNetworks/tools/api/
DTypes.t139 std::vector<OutputShape> outputShapes = {});
147 std::vector<OutputShape> outputShapes;

12