1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATOR_H_
16 #define TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATOR_H_
17 
18 #include <unordered_map>
19 
20 #include "flatbuffers/flatbuffers.h"  // TF:flatbuffers
21 #include "tensorflow/lite/core/api/op_resolver.h"
22 #include "tensorflow/lite/model.h"
23 #include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h"
24 
25 namespace tflite {
26 namespace optimize {
27 namespace calibration {
28 
29 // Warning: This is not a public API and subject to change.
30 
31 // Builds a interpreter that logs the calibration data in memory.
32 // The calibration data can be recovered using |calibration_reader|.
33 //
34 // Sample usage:
35 // std::unique_ptr<Interpreter> interpreter;
36 // std::unique_ptr<CalibrationReader> calibration_reader;
37 // BuiltinOpResolver resolver = ...
38 // FlatBufferModel model = ..
39 //
40 // BuildLoggingInterpreter(model, resolver, &interpreter,
41 //  &calibration_reader);
42 //
43 //
44 // * Allocate tensors...
45 // * Call interpreter->invoke on calibration dataset.
46 //
47 // Calibration data can be read either directly by calling
48 // std::unordered_map<int,  CalibrationStats>> tensor_index_to_stats;
49 // calibration_reader->GetTensorStatsAsMap(&tensor_index_to_stats);
50 //
51 // or adding calibration data to model itself.
52 // ModelT * original_floating_point_model = ...
53 // calibration_reader->AddCalibrationToModel(original_floating_point_model);
54 //
55 TfLiteStatus BuildLoggingInterpreter(
56     const FlatBufferModel& model, const OpResolver& op_resolver,
57     std::unique_ptr<Interpreter>* interpreter,
58     std::unique_ptr<CalibrationReader>* calibration_reader);
59 
60 }  // namespace calibration
61 }  // namespace optimize
62 }  // namespace tflite
63 
64 #endif  // TENSORFLOW_LITE_TOOLS_OPTIMIZE_CALIBRATOR_H_
65