1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_BUILDER_H_
17 #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_BUILDER_H_
18 
19 #include "absl/container/flat_hash_map.h"
20 #include "tensorflow/lite/c/common.h"
21 #include "tensorflow/lite/delegates/gpu/common/model.h"
22 #include "tensorflow/lite/delegates/gpu/common/shape.h"
23 #include "tensorflow/lite/delegates/gpu/common/status.h"
24 #include "tensorflow/lite/delegates/gpu/common/tensor.h"
25 
26 namespace tflite {
27 namespace gpu {
28 
29 // Validates which operations are supported and returns array of operations to
30 // replace with GPU kernels. The caller must free the pointer on TfLiteIntArray.
31 // 'max_delegated_partitions' limits the maximum number of partitions to
32 // delegate as a graph could possibly have multiple partitions (each partition
33 // consists of a subset of ops) to be replaced.
34 TfLiteIntArray* GetOpsToReplace(TfLiteContext* context,
35                                 bool allow_quant_ops = false,
36                                 int max_delegated_partitions = 1);
37 
38 // Extracts TFLite delegate execution plan from the input TFLite context and
39 // converts it into generic graph format.
40 //
41 // If model is quantized, quant_conversion_map maps the dequantized tensor
42 // (floating-point) to the original tensor (fixed-point) & vice-versa.
43 // NOTE: Not all of these new tensors will have any data and need memory
44 // allocated for them. We need to do that only for the overall GPU graph inputs
45 // & outputs. This should be done by the delegate, by setting the appropriate
46 // TfLiteNode->temporaries.
47 absl::Status BuildModel(
48     TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
49     GraphFloat32* graph,
50     absl::flat_hash_map<int, int>* quant_conversion_map = nullptr);
51 
52 // Same as above but also apply all transformations on the final graph.
53 // Prefer using this method instead of BuildModel.
54 //
55 // If model is quantized, quant_conversion_map maps the dequantized tensor
56 // (floating-point) to the original TFLite tensor (fixed-point) & vice-versa.
57 // NOTE: Not all of these new tensors will have any data and need memory
58 // allocated for them. We need to do that only for the overall GPU graph inputs
59 // & outputs. This should be done by the delegate, by setting the appropriate
60 // TfLiteNode->temporaries.
61 absl::Status BuildFinalModel(
62     TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
63     GraphFloat32* graph,
64     absl::flat_hash_map<int, int>* quant_conversion_map = nullptr);
65 
66 // Module-internal converter, exposed for unit testing purpose only.
67 absl::Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor,
68                                             TensorRef<BHWC>* tensor_ref);
69 
70 }  // namespace gpu
71 }  // namespace tflite
72 
73 #endif  // TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_BUILDER_H_
74