1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 #ifndef TENSORFLOW_LITE_C_C_API_INTERNAL_H_ 16 #define TENSORFLOW_LITE_C_C_API_INTERNAL_H_ 17 18 #include <stdarg.h> 19 20 #include <memory> 21 #include <vector> 22 23 #include "tensorflow/lite/builtin_ops.h" 24 #include "tensorflow/lite/core/api/error_reporter.h" 25 #include "tensorflow/lite/core/api/op_resolver.h" 26 #include "tensorflow/lite/interpreter.h" 27 #include "tensorflow/lite/model.h" 28 #include "tensorflow/lite/mutable_op_resolver.h" 29 30 // Internal structures and subroutines used by the C API. These are likely to 31 // change and should not be depended on directly by any C API clients. 32 // 33 // NOTE: This header does not follow C conventions and does not define a C API. 34 // It is effectively an (internal) implementation detail of the C API. 35 36 struct TfLiteModel { 37 // Sharing is safe as FlatBufferModel is const. 38 std::shared_ptr<const tflite::FlatBufferModel> impl; 39 }; 40 41 // The `TfLiteOpResolver` struct is an abstract callback interface that 42 // contains function pointers for callbacks that return a 43 // `TfLiteRegistration` given an op code or custom op name. This mechanism is 44 // used to map ops referenced in the flatbuffer model to executable function 45 // pointers (`TfLiteRegistration`s). 46 // This struct mirrors the tflite::OpResolver C++ abstract base class. 47 struct TfLiteOpResolverCallbacks { 48 // Opaque data that gets passed down to the callback functions. 49 void* user_data = nullptr; 50 51 // Callback that finds the op registration for a builtin operator by enum 52 // code. The `user_data` parameter will be set to the 53 // `op_resolver_user_data` value that was passed to 54 // `TfLiteInterpreterOptionsSetOpResolver`. 55 const TfLiteRegistration* (*find_builtin_op)(void* user_data, 56 TfLiteBuiltinOperator op, 57 int version); 58 // Callback that finds the op registration of a custom operator by op name. 59 // The `user_data` parameter will be set to the `op_resolver_user_data` value 60 // that was passed to `TfLiteInterpreterOptionsSetOpResolver`. 61 const TfLiteRegistration* (*find_custom_op)(void* user_data, const char* op, 62 int version); 63 }; 64 65 // This struct mirrors the tflite::ErrorResolver C++ abstract base class. 66 struct TfLiteErrorReporterCallback { 67 // Opaque data that gets passed down to the callback function. 68 void* user_data = nullptr; 69 70 // Callback function that reports an error. 71 void (*error_reporter)(void* user_data, const char* format, 72 va_list args) = nullptr; 73 }; 74 75 struct TfLiteInterpreterOptions { 76 enum { 77 kDefaultNumThreads = -1, 78 }; 79 int num_threads = kDefaultNumThreads; 80 81 tflite::MutableOpResolver mutable_op_resolver; 82 83 TfLiteOpResolverCallbacks op_resolver_callbacks = {}; 84 85 std::vector<TfLiteDelegate*> delegates; 86 87 TfLiteErrorReporterCallback error_reporter_callback; 88 89 bool use_nnapi = false; 90 91 // Determines whether to allow automatic fallback to CPU. 92 // If true, and if one or more delegates were set, 93 // then if Invoke with delegates fails, it will be 94 // automatically retried without delegates. 95 bool enable_delegate_fallback = false; 96 }; 97 98 struct TfLiteInterpreter { 99 // Taking a reference to the (const) model data avoids lifetime-related issues 100 // and complexity with the TfLiteModel's existence. 101 std::shared_ptr<const tflite::FlatBufferModel> model; 102 103 // The interpreter does not take ownership of the provided ErrorReporter 104 // instance, so we ensure its validity here. Note that the interpreter may use 105 // the reporter in its destructor, so the reporter should be declared first. 106 std::unique_ptr<tflite::ErrorReporter> optional_error_reporter; 107 108 std::unique_ptr<tflite::Interpreter> impl; 109 110 bool enable_delegate_fallback; 111 }; 112 113 namespace tflite { 114 namespace internal { 115 116 // This adds the builtin and/or custom operators specified in options in 117 // `optional_options` (if any) to `mutable_resolver`, and then returns a newly 118 // created TfLiteInterpreter using `mutable_op_resolver` as the default 119 // OpResolver, and using any other options in `optional_options`, and using 120 // the provided `model`. 121 // 122 // * `model` must be a valid model instance. The caller retains ownership of the 123 // object, and can destroy it immediately after creating the interpreter; the 124 // interpreter will maintain its own reference to the underlying model data. 125 // * `optional_options` may be null. The caller retains ownership of the object, 126 // and can safely destroy it immediately after creating the interpreter. 127 // * `mutable_resolver` must not be null. The caller retains ownership of the 128 // MutableOpResolver object, and can safely destroy it immediately after 129 // creating the interpreter. 130 // 131 // NOTE: The client *must* explicitly allocate tensors before attempting to 132 // access input tensor data or invoke the interpreter. 133 134 TfLiteInterpreter* InterpreterCreateWithOpResolver( 135 const TfLiteModel* model, const TfLiteInterpreterOptions* optional_options, 136 tflite::MutableOpResolver* mutable_resolver); 137 138 } // namespace internal 139 } // namespace tflite 140 141 #endif // TENSORFLOW_LITE_C_C_API_INTERNAL_H_ 142