/* * Copyright (C) 2020 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Conversions.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "Utils.h" namespace { template constexpr std::underlying_type_t underlyingType(Type value) { return static_cast>(value); } using HalDuration = std::chrono::duration; } // namespace namespace android::nn { namespace { using hardware::hidl_handle; using hardware::hidl_vec; template using UnvalidatedConvertOutput = std::decay_t()).value())>; template GeneralResult>> unvalidatedConvert( const hidl_vec& arguments) { std::vector> canonical; canonical.reserve(arguments.size()); for (const auto& argument : arguments) { canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument))); } return canonical; } template GeneralResult> validatedConvert(const Type& halObject) { auto canonical = NN_TRY(nn::unvalidatedConvert(halObject)); NN_TRY(hal::V1_2::utils::compliantVersion(canonical)); return canonical; } template GeneralResult>> validatedConvert( const hidl_vec& arguments) { std::vector> canonical; canonical.reserve(arguments.size()); for (const auto& argument : arguments) { canonical.push_back(NN_TRY(validatedConvert(argument))); } return canonical; } } // anonymous namespace GeneralResult unvalidatedConvert(const hal::V1_2::OperandType& operandType) { return static_cast(operandType); } GeneralResult unvalidatedConvert(const hal::V1_2::OperationType& operationType) { return static_cast(operationType); } GeneralResult unvalidatedConvert(const hal::V1_2::DeviceType& deviceType) { return static_cast(deviceType); } GeneralResult unvalidatedConvert(const hal::V1_2::Capabilities& capabilities) { const bool validOperandTypes = std::all_of( capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), [](const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) { return validatedConvert(operandPerformance.type).has_value(); }); if (!validOperandTypes) { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid OperandType when converting OperandPerformance in Capabilities"; } const auto relaxedFloat32toFloat16PerformanceScalar = NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); const auto relaxedFloat32toFloat16PerformanceTensor = NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)); auto table = NN_TRY(hal::utils::makeGeneralFailure( Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)), nn::ErrorStatus::GENERAL_FAILURE)); return Capabilities{ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, .operandPerformance = std::move(table), }; } GeneralResult unvalidatedConvert( const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) { return Capabilities::OperandPerformance{ .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), }; } GeneralResult unvalidatedConvert(const hal::V1_2::Operation& operation) { return Operation{ .type = NN_TRY(unvalidatedConvert(operation.type)), .inputs = operation.inputs, .outputs = operation.outputs, }; } GeneralResult unvalidatedConvert( const hal::V1_2::SymmPerChannelQuantParams& symmPerChannelQuantParams) { return Operand::SymmPerChannelQuantParams{ .scales = symmPerChannelQuantParams.scales, .channelDim = symmPerChannelQuantParams.channelDim, }; } GeneralResult unvalidatedConvert(const hal::V1_2::Operand& operand) { return Operand{ .type = NN_TRY(unvalidatedConvert(operand.type)), .dimensions = operand.dimensions, .scale = operand.scale, .zeroPoint = operand.zeroPoint, .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), .location = NN_TRY(unvalidatedConvert(operand.location)), .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), }; } GeneralResult unvalidatedConvert( const hal::V1_2::Operand::ExtraParams& extraParams) { using Discriminator = hal::V1_2::Operand::ExtraParams::hidl_discriminator; switch (extraParams.getDiscriminator()) { case Discriminator::none: return Operand::NoParams{}; case Discriminator::channelQuant: return unvalidatedConvert(extraParams.channelQuant()); case Discriminator::extension: return extraParams.extension(); } return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Unrecognized Operand::ExtraParams discriminator: " << underlyingType(extraParams.getDiscriminator()); } GeneralResult unvalidatedConvert(const hal::V1_2::Model& model) { auto operations = NN_TRY(unvalidatedConvert(model.operations)); // Verify number of consumers. const auto numberOfConsumers = NN_TRY(hal::utils::countNumberOfConsumers(model.operands.size(), operations)); CHECK(model.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < model.operands.size(); ++i) { if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid numberOfConsumers for operand " << i << ", expected " << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers; } } auto main = Model::Subgraph{ .operands = NN_TRY(unvalidatedConvert(model.operands)), .operations = std::move(operations), .inputIndexes = model.inputIndexes, .outputIndexes = model.outputIndexes, }; return Model{ .main = std::move(main), .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), .pools = NN_TRY(unvalidatedConvert(model.pools)), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), }; } GeneralResult unvalidatedConvert( const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { return Model::ExtensionNameAndPrefix{ .name = extensionNameAndPrefix.name, .prefix = extensionNameAndPrefix.prefix, }; } GeneralResult unvalidatedConvert(const hal::V1_2::OutputShape& outputShape) { return OutputShape{ .dimensions = outputShape.dimensions, .isSufficient = outputShape.isSufficient, }; } GeneralResult unvalidatedConvert(const hal::V1_2::MeasureTiming& measureTiming) { return static_cast(measureTiming); } GeneralResult unvalidatedConvert(const hal::V1_2::Timing& timing) { constexpr uint64_t kMaxTiming = std::chrono::floor(Duration::max()).count(); constexpr auto convertTiming = [](uint64_t halTiming) -> OptionalDuration { constexpr uint64_t kNoTiming = std::numeric_limits::max(); if (halTiming == kNoTiming) { return {}; } if (halTiming > kMaxTiming) { return Duration::max(); } return HalDuration{halTiming}; }; return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice), .timeInDriver = convertTiming(timing.timeInDriver)}; } GeneralResult unvalidatedConvert(const hal::V1_2::Extension& extension) { return Extension{ .name = extension.name, .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)), }; } GeneralResult unvalidatedConvert( const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation) { return Extension::OperandTypeInformation{ .type = operandTypeInformation.type, .isTensor = operandTypeInformation.isTensor, .byteSize = operandTypeInformation.byteSize, }; } GeneralResult unvalidatedConvert(const hidl_handle& hidlHandle) { if (hidlHandle.getNativeHandle() == nullptr) { return nullptr; } auto handle = NN_TRY(hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle())); return std::make_shared(std::move(handle)); } GeneralResult convert(const hal::V1_2::DeviceType& deviceType) { return validatedConvert(deviceType); } GeneralResult convert(const hal::V1_2::Capabilities& capabilities) { return validatedConvert(capabilities); } GeneralResult convert(const hal::V1_2::Model& model) { return validatedConvert(model); } GeneralResult convert(const hal::V1_2::MeasureTiming& measureTiming) { return validatedConvert(measureTiming); } GeneralResult convert(const hal::V1_2::Timing& timing) { return validatedConvert(timing); } GeneralResult convert(const hardware::hidl_memory& memory) { return validatedConvert(memory); } GeneralResult> convert(const hidl_vec& extensions) { return validatedConvert(extensions); } GeneralResult> convert(const hidl_vec& handles) { return validatedConvert(handles); } GeneralResult> convert( const hidl_vec& outputShapes) { return validatedConvert(outputShapes); } } // namespace android::nn namespace android::hardware::neuralnetworks::V1_2::utils { namespace { using utils::unvalidatedConvert; nn::GeneralResult unvalidatedConvert(const nn::Operand::LifeTime& lifetime) { return V1_0::utils::unvalidatedConvert(lifetime); } nn::GeneralResult unvalidatedConvert( const nn::Capabilities::PerformanceInfo& performanceInfo) { return V1_0::utils::unvalidatedConvert(performanceInfo); } nn::GeneralResult unvalidatedConvert(const nn::DataLocation& location) { return V1_0::utils::unvalidatedConvert(location); } nn::GeneralResult> unvalidatedConvert( const nn::Model::OperandValues& operandValues) { return V1_0::utils::unvalidatedConvert(operandValues); } nn::GeneralResult unvalidatedConvert(const nn::SharedMemory& memory) { return V1_0::utils::unvalidatedConvert(memory); } template using UnvalidatedConvertOutput = std::decay_t()).value())>; template nn::GeneralResult>> unvalidatedConvert( const std::vector& arguments) { hidl_vec> halObject(arguments.size()); for (size_t i = 0; i < arguments.size(); ++i) { halObject[i] = NN_TRY(unvalidatedConvert(arguments[i])); } return halObject; } nn::GeneralResult makeExtraParams(nn::Operand::NoParams /*noParams*/) { return Operand::ExtraParams{}; } nn::GeneralResult makeExtraParams( const nn::Operand::SymmPerChannelQuantParams& channelQuant) { Operand::ExtraParams ret; ret.channelQuant(NN_TRY(unvalidatedConvert(channelQuant))); return ret; } nn::GeneralResult makeExtraParams( const nn::Operand::ExtensionParams& extension) { Operand::ExtraParams ret; ret.extension(extension); return ret; } template nn::GeneralResult> validatedConvert(const Type& canonical) { NN_TRY(compliantVersion(canonical)); return unvalidatedConvert(canonical); } template nn::GeneralResult>> validatedConvert( const std::vector& arguments) { hidl_vec> halObject(arguments.size()); for (size_t i = 0; i < arguments.size(); ++i) { halObject[i] = NN_TRY(validatedConvert(arguments[i])); } return halObject; } } // anonymous namespace nn::GeneralResult unvalidatedConvert(const nn::OperandType& operandType) { return static_cast(operandType); } nn::GeneralResult unvalidatedConvert(const nn::OperationType& operationType) { return static_cast(operationType); } nn::GeneralResult unvalidatedConvert(const nn::DeviceType& deviceType) { switch (deviceType) { case nn::DeviceType::UNKNOWN: return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid DeviceType UNKNOWN"; case nn::DeviceType::OTHER: case nn::DeviceType::CPU: case nn::DeviceType::GPU: case nn::DeviceType::ACCELERATOR: return static_cast(deviceType); } return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid DeviceType " << underlyingType(deviceType); } nn::GeneralResult unvalidatedConvert(const nn::Capabilities& capabilities) { std::vector operandPerformance; operandPerformance.reserve(capabilities.operandPerformance.asVector().size()); std::copy_if(capabilities.operandPerformance.asVector().begin(), capabilities.operandPerformance.asVector().end(), std::back_inserter(operandPerformance), [](const nn::Capabilities::OperandPerformance& operandPerformance) { return compliantVersion(operandPerformance.type).has_value(); }); return Capabilities{ .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), .operandPerformance = NN_TRY(unvalidatedConvert(operandPerformance)), }; } nn::GeneralResult unvalidatedConvert( const nn::Capabilities::OperandPerformance& operandPerformance) { return Capabilities::OperandPerformance{ .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), }; } nn::GeneralResult unvalidatedConvert(const nn::Operation& operation) { return Operation{ .type = NN_TRY(unvalidatedConvert(operation.type)), .inputs = operation.inputs, .outputs = operation.outputs, }; } nn::GeneralResult unvalidatedConvert( const nn::Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams) { return SymmPerChannelQuantParams{ .scales = symmPerChannelQuantParams.scales, .channelDim = symmPerChannelQuantParams.channelDim, }; } nn::GeneralResult unvalidatedConvert(const nn::Operand& operand) { return Operand{ .type = NN_TRY(unvalidatedConvert(operand.type)), .dimensions = operand.dimensions, .numberOfConsumers = 0, .scale = operand.scale, .zeroPoint = operand.zeroPoint, .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), .location = NN_TRY(unvalidatedConvert(operand.location)), .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), }; } nn::GeneralResult unvalidatedConvert( const nn::Operand::ExtraParams& extraParams) { return std::visit([](const auto& x) { return makeExtraParams(x); }, extraParams); } nn::GeneralResult unvalidatedConvert(const nn::Model& model) { if (!hal::utils::hasNoPointerData(model)) { return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Model cannot be unvalidatedConverted because it contains pointer-based memory"; } auto operands = NN_TRY(unvalidatedConvert(model.main.operands)); // Update number of consumers. const auto numberOfConsumers = NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), model.main.operations)); CHECK(operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < operands.size(); ++i) { operands[i].numberOfConsumers = numberOfConsumers[i]; } return Model{ .operands = std::move(operands), .operations = NN_TRY(unvalidatedConvert(model.main.operations)), .inputIndexes = model.main.inputIndexes, .outputIndexes = model.main.outputIndexes, .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), .pools = NN_TRY(unvalidatedConvert(model.pools)), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), }; } nn::GeneralResult unvalidatedConvert( const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { return Model::ExtensionNameAndPrefix{ .name = extensionNameAndPrefix.name, .prefix = extensionNameAndPrefix.prefix, }; } nn::GeneralResult unvalidatedConvert(const nn::OutputShape& outputShape) { return OutputShape{.dimensions = outputShape.dimensions, .isSufficient = outputShape.isSufficient}; } nn::GeneralResult unvalidatedConvert(const nn::MeasureTiming& measureTiming) { return static_cast(measureTiming); } nn::GeneralResult unvalidatedConvert(const nn::Timing& timing) { constexpr auto convertTiming = [](nn::OptionalDuration canonicalTiming) -> uint64_t { constexpr uint64_t kNoTiming = std::numeric_limits::max(); if (!canonicalTiming.has_value()) { return kNoTiming; } return std::chrono::ceil(*canonicalTiming).count(); }; return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice), .timeInDriver = convertTiming(timing.timeInDriver)}; } nn::GeneralResult unvalidatedConvert(const nn::Extension& extension) { return Extension{ .name = extension.name, .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes)), }; } nn::GeneralResult unvalidatedConvert( const nn::Extension::OperandTypeInformation& operandTypeInformation) { return Extension::OperandTypeInformation{ .type = operandTypeInformation.type, .isTensor = operandTypeInformation.isTensor, .byteSize = operandTypeInformation.byteSize, }; } nn::GeneralResult unvalidatedConvert(const nn::SharedHandle& handle) { if (handle == nullptr) { return {}; } return hal::utils::hidlHandleFromSharedHandle(*handle); } nn::GeneralResult convert(const nn::DeviceType& deviceType) { return validatedConvert(deviceType); } nn::GeneralResult convert(const nn::Capabilities& capabilities) { return validatedConvert(capabilities); } nn::GeneralResult convert(const nn::Model& model) { return validatedConvert(model); } nn::GeneralResult convert(const nn::MeasureTiming& measureTiming) { return validatedConvert(measureTiming); } nn::GeneralResult convert(const nn::Timing& timing) { return validatedConvert(timing); } nn::GeneralResult> convert(const std::vector& extensions) { return validatedConvert(extensions); } nn::GeneralResult> convert(const std::vector& handles) { return validatedConvert(handles); } nn::GeneralResult> convert(const std::vector& outputShapes) { return validatedConvert(outputShapes); } nn::GeneralResult convert(const nn::DeviceStatus& deviceStatus) { return V1_1::utils::convert(deviceStatus); } nn::GeneralResult convert(const nn::Request& request) { return V1_1::utils::convert(request); } nn::GeneralResult convert(const nn::ErrorStatus& status) { return V1_1::utils::convert(status); } nn::GeneralResult convert( const nn::ExecutionPreference& executionPreference) { return V1_1::utils::convert(executionPreference); } } // namespace android::hardware::neuralnetworks::V1_2::utils