Home
last modified time | relevance | path

Searched refs:quant_specs (Results 1 – 16 of 16) sorted by relevance

/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/
Dquantize_model.cc81 TFL::QuantizationSpecs quant_specs; in QuantizeModel() local
82 quant_specs.inference_type = tflite::TflTypeToTfType(inference_type); in QuantizeModel()
83 quant_specs.post_training_quantization = true; in QuantizeModel()
84 quant_specs.disable_per_channel = disable_per_channel; in QuantizeModel()
93 quant_specs.inference_type = input_tf_type; in QuantizeModel()
96 quant_specs.verify_numeric = verify_numeric; in QuantizeModel()
97 quant_specs.legacy_float_scale = legacy_float_scale; in QuantizeModel()
99 pm.addPass(TFL::CreatePrepareQuantizePass(quant_specs)); in QuantizeModel()
/external/tensorflow/tensorflow/compiler/mlir/lite/
Dtf_tfl_passes.cc45 void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs, in AddQuantizationPasses() argument
48 mlir::TFL::CreatePrepareQuantizePass(quant_specs)); in AddQuantizationPasses()
49 if (quant_specs.default_ranges.first.hasValue() || in AddQuantizationPasses()
50 quant_specs.default_ranges.second.hasValue()) { in AddQuantizationPasses()
53 quant_specs.default_ranges.first.getValueOr(0.0), in AddQuantizationPasses()
54 quant_specs.default_ranges.second.getValueOr(0.0), in AddQuantizationPasses()
55 quant_specs.IsSignedInferenceType())); in AddQuantizationPasses()
58 mlir::TFL::CreateQuantizePass(quant_specs.verify_numeric)); in AddQuantizationPasses()
60 quant_specs.inference_type != quant_specs.inference_input_type; in AddQuantizationPasses()
99 if (!pass_config.quant_specs.serialized_quant_stats.empty()) { in AddTFToTFLConversionPasses()
[all …]
Dtf_tfl_translate.cc194 mlir::TFL::QuantizationSpecs quant_specs; in main() local
196 inference_type, &quant_specs)) { in main()
201 quant_specs.weight_quantization = true; in main()
203 quant_specs.inference_type = tensorflow::DT_QINT8; in main()
205 quant_specs.inference_type = tensorflow::DT_HALF; in main()
212 quant_specs.inference_input_type = quant_specs.inference_type; in main()
223 quant_specs.serialized_quant_stats = file->getBuffer().str(); in main()
226 mlir::TFL::PassConfig pass_config(quant_specs); in main()
245 /*select_user_tf_ops=*/{}, quant_specs, tags, &result, &pm); in main()
Dtf_to_tfl_flatbuffer.cc141 const mlir::TFL::QuantizationSpecs& quant_specs, in ConvertTFExecutorToTFLOrFlatbuffer() argument
174 if (!quant_specs.RunWeightQuantization()) { in ConvertTFExecutorToTFLOrFlatbuffer()
206 if (quant_specs.inference_type == tensorflow::DT_QINT8) { in ConvertTFExecutorToTFLOrFlatbuffer()
208 } else if (quant_specs.inference_type == tensorflow::DT_HALF) { in ConvertTFExecutorToTFLOrFlatbuffer()
Dtf_tfl_passes.h37 void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs,
Dtf_to_tfl_flatbuffer.h67 const mlir::TFL::QuantizationSpecs& quant_specs,
/external/tensorflow/tensorflow/compiler/mlir/lite/python/
Dtf_tfl_flatbuffer_helpers.cc196 mlir::TFL::QuantizationSpecs* quant_specs, std::vector<string>* node_names, in PopulateQuantizationSpecs() argument
201 quant_specs->inference_input_type = in PopulateQuantizationSpecs()
207 if (quant_specs->inference_input_type != tensorflow::DT_FLOAT) { in PopulateQuantizationSpecs()
208 inference_type = quant_specs->inference_input_type; in PopulateQuantizationSpecs()
245 inference_type, quant_specs)) { in PopulateQuantizationSpecs()
253 quant_specs->weight_quantization = true; in PopulateQuantizationSpecs()
255 quant_specs->inference_type = tensorflow::DT_HALF; in PopulateQuantizationSpecs()
256 quant_specs->inference_input_type = tensorflow::DT_HALF; in PopulateQuantizationSpecs()
258 quant_specs->inference_type = tensorflow::DT_QINT8; in PopulateQuantizationSpecs()
259 quant_specs->inference_input_type = tensorflow::DT_QINT8; in PopulateQuantizationSpecs()
[all …]
Dgraphdef_to_tfl_flatbuffer.cc53 mlir::TFL::QuantizationSpecs quant_specs; in ConvertGraphDefToTFLiteFlatBuffer() local
64 model_flags, toco_flags, &quant_specs, &node_names, &node_dtypes, in ConvertGraphDefToTFLiteFlatBuffer()
88 mlir::TFL::PassConfig pass_config(quant_specs); in ConvertGraphDefToTFLiteFlatBuffer()
Dsaved_model_to_tfl_flatbuffer.cc126 mlir::TFL::QuantizationSpecs quant_specs; in ConvertSavedModelToTFLiteFlatBuffer() local
137 model_flags, toco_flags, &quant_specs, &node_names, &node_dtypes, in ConvertSavedModelToTFLiteFlatBuffer()
173 mlir::TFL::PassConfig pass_config(quant_specs); in ConvertSavedModelToTFLiteFlatBuffer()
Dtf_tfl_flatbuffer_helpers.h42 mlir::TFL::QuantizationSpecs* quant_specs, std::vector<string>* node_names,
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/
Dquantization_config.cc46 QuantizationSpecs* quant_specs) { in ParseInputNodeQuantSpecs() argument
79 quant_specs); in ParseInputNodeQuantSpecs()
86 tensorflow::DataType inference_type, QuantizationSpecs* quant_specs) { in GetInputNodeQuantSpecs() argument
87 quant_specs->inference_type = inference_type; in GetInputNodeQuantSpecs()
100 quant_specs->input_ranges.push_back({node_mins[i], node_maxs[i]}); in GetInputNodeQuantSpecs()
Dquantization_config.h144 QuantizationSpecs* quant_specs);
153 tensorflow::DataType inference_type, QuantizationSpecs* quant_specs);
/external/tensorflow/tensorflow/compiler/mlir/lite/transforms/
Dprepare_quantize_helper.h226 const QuantizationSpecs& quant_specs,
229 quant_specs(quant_specs) {}
232 QuantizationSpecs quant_specs;
315 /*narrow_range=*/true, quant_specs.legacy_float_scale) in processConstantOp()
389 if (quant_specs.legacy_float_scale) { in replaceStatsOp()
406 const QuantizationSpecs& quant_specs) in ConvertLstmStatsToQDQs()
408 : ConvertOpStatsToQDQs<SourceOp>(context, quant_specs) {} in ConvertLstmStatsToQDQs()
469 /*isSigned=*/this->quant_specs.IsSignedInferenceType()); in processIntermediates()
470 if (this->quant_specs.legacy_float_scale) { in processIntermediates()
Dprepare_quantize.cc116 explicit PrepareQuantizePass(const QuantizationSpecs& quant_specs) in PrepareQuantizePass() argument
117 : quant_specs_(quant_specs) {} in PrepareQuantizePass()
419 const QuantizationSpecs& quant_specs) { in CreatePrepareQuantizePass() argument
420 return std::make_unique<PrepareQuantizePass>(quant_specs); in CreatePrepareQuantizePass()
Dpasses.h56 const QuantizationSpecs& quant_specs);
/external/tensorflow/tensorflow/compiler/mlir/lite/common/
Dtfl_pass_config.h34 quant_specs(std::move(specs)),
52 QuantizationSpecs quant_specs; member