1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
17 
18 #include <cstdlib>
19 
20 #include "tensorflow/lite/c/builtin_op_data.h"
21 #include "tensorflow/lite/c/c_api_internal.h"
22 #include "tensorflow/lite/schema/schema_generated.h"
23 
24 namespace tflite {
25 
26 namespace {
27 
28 // Copies the contents from the flatbuffer int vector `flatbuffer` into the
29 // int array `buffer`. `flat_vector` and `buffer` represent the same
30 // configuration operation for a given operation.
FlatBufferIntVectorToArray(int max_size_of_buffer,const flatbuffers::Vector<int32_t> * flat_vector,int * buffer,ErrorReporter * error_reporter,const char * op_name)31 TfLiteStatus FlatBufferIntVectorToArray(
32     int max_size_of_buffer, const flatbuffers::Vector<int32_t>* flat_vector,
33     int* buffer, ErrorReporter* error_reporter, const char* op_name) {
34   if (!flat_vector) {
35     error_reporter->Report("Input array not provided for operation '%s'.\n",
36                            op_name);
37     return kTfLiteError;
38   } else {
39     int num_dimensions = flat_vector->Length();
40     if (num_dimensions > max_size_of_buffer / sizeof(int)) {
41       error_reporter->Report(
42           "Found too many dimensions in the input array of operation '%s'.\n",
43           op_name);
44       return kTfLiteError;
45     } else {
46       for (int i = 0; i < num_dimensions; ++i) {
47         buffer[i] = flat_vector->Get(i);
48       }
49     }
50   }
51   return kTfLiteOk;
52 }
53 
54 }  // namespace
55 
ConvertTensorType(TensorType tensor_type,TfLiteType * type,ErrorReporter * error_reporter)56 TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
57                                ErrorReporter* error_reporter) {
58   switch (tensor_type) {
59     case TensorType_FLOAT32:
60       *type = kTfLiteFloat32;
61       break;
62     case TensorType_INT16:
63       *type = kTfLiteInt16;
64       break;
65     case TensorType_INT32:
66       *type = kTfLiteInt32;
67       break;
68     case TensorType_UINT8:
69       *type = kTfLiteUInt8;
70       break;
71     case TensorType_INT8:
72       *type = kTfLiteInt8;
73       break;
74     case TensorType_INT64:
75       *type = kTfLiteInt64;
76       break;
77     case TensorType_STRING:
78       *type = kTfLiteString;
79       break;
80     case TensorType_BOOL:
81       *type = kTfLiteBool;
82       break;
83     case TensorType_COMPLEX64:
84       *type = kTfLiteComplex64;
85       break;
86     default:
87       error_reporter->Report("Unimplemented data type %s (%d) in tensor\n",
88                              EnumNameTensorType(tensor_type), tensor_type);
89       return kTfLiteError;
90   }
91   return kTfLiteOk;
92 }
93 
94 // Parse the appropriate data out of the op.
95 //
96 // This handles builtin data explicitly as there are flatbuffer schemas.
97 // If it returns kTfLiteOk, it passes the data out with `builtin_data`, which
98 // need to be released by calling `free`.`
99 // If it returns kTfLiteError, `builtin_data` will be `nullptr`.
ParseOpData(const Operator * op,BuiltinOperator op_type,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)100 TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
101                          ErrorReporter* error_reporter,
102                          BuiltinDataAllocator* allocator, void** builtin_data) {
103   auto parse_padding = [](Padding padding) {
104     switch (padding) {
105       case Padding_SAME:
106         return kTfLitePaddingSame;
107       case Padding_VALID:
108         return kTfLitePaddingValid;
109     }
110     return kTfLitePaddingUnknown;
111   };
112   auto parse_activation = [](ActivationFunctionType activation) {
113     switch (activation) {
114       case ActivationFunctionType_NONE:
115         return kTfLiteActNone;
116       case ActivationFunctionType_RELU:
117         return kTfLiteActRelu;
118       case ActivationFunctionType_RELU_N1_TO_1:
119         return kTfLiteActRelu1;
120       case ActivationFunctionType_RELU6:
121         return kTfLiteActRelu6;
122       case ActivationFunctionType_TANH:
123         return kTfLiteActTanh;
124       case ActivationFunctionType_SIGN_BIT:
125         return kTfLiteActSignBit;
126     }
127     return kTfLiteActNone;
128   };
129   auto parseLSHProjectionType = [](LSHProjectionType type) {
130     switch (type) {
131       case LSHProjectionType_SPARSE:
132         return kTfLiteLshProjectionSparse;
133       case LSHProjectionType_DENSE:
134         return kTfLiteLshProjectionDense;
135       default:
136         return kTfLiteLshProjectionUnknown;
137     }
138   };
139   auto parseCombinerType = [](CombinerType type) {
140     switch (type) {
141       case CombinerType_MEAN:
142         return kTfLiteCombinerTypeMean;
143       case CombinerType_SQRTN:
144         return kTfLiteCombinerTypeSqrtn;
145       case CombinerType_SUM:
146       default:
147         return kTfLiteCombinerTypeSum;
148     }
149   };
150 
151   *builtin_data = nullptr;
152   switch (op_type) {
153     case BuiltinOperator_CONV_2D: {
154       TfLiteConvParams* params = allocator->AllocatePOD<TfLiteConvParams>();
155       if (auto* conv_params = op->builtin_options_as_Conv2DOptions()) {
156         params->padding = parse_padding(conv_params->padding());
157         params->stride_width = conv_params->stride_w();
158         params->stride_height = conv_params->stride_h();
159         params->activation =
160             parse_activation(conv_params->fused_activation_function());
161 
162         params->dilation_width_factor = conv_params->dilation_w_factor();
163         params->dilation_height_factor = conv_params->dilation_h_factor();
164       }
165       *builtin_data = reinterpret_cast<void*>(params);
166       break;
167     }
168     case BuiltinOperator_CAST: {
169       TfLiteCastParams* params = allocator->AllocatePOD<TfLiteCastParams>();
170       if (auto* schema_params = op->builtin_options_as_CastOptions()) {
171         auto in_status =
172             ConvertTensorType(schema_params->in_data_type(),
173                               &params->in_data_type, error_reporter);
174         auto out_status =
175             ConvertTensorType(schema_params->out_data_type(),
176                               &params->out_data_type, error_reporter);
177         if (in_status != kTfLiteOk || out_status != kTfLiteOk) {
178           allocator->Deallocate(params);
179           return kTfLiteError;
180         }
181       }
182       *builtin_data = reinterpret_cast<void*>(params);
183       break;
184     }
185     case BuiltinOperator_LSH_PROJECTION: {
186       TfLiteLSHProjectionParams* params =
187           allocator->AllocatePOD<TfLiteLSHProjectionParams>();
188       if (auto* lshParams = op->builtin_options_as_LSHProjectionOptions()) {
189         params->type = parseLSHProjectionType(lshParams->type());
190       }
191       *builtin_data = reinterpret_cast<void*>(params);
192       break;
193     }
194     case BuiltinOperator_AVERAGE_POOL_2D:
195     case BuiltinOperator_MAX_POOL_2D:
196     case BuiltinOperator_L2_POOL_2D: {
197       TfLitePoolParams* params = allocator->AllocatePOD<TfLitePoolParams>();
198       if (auto* pool_params = op->builtin_options_as_Pool2DOptions()) {
199         params->padding = parse_padding(pool_params->padding());
200         params->stride_width = pool_params->stride_w();
201         params->stride_height = pool_params->stride_h();
202         params->filter_width = pool_params->filter_width();
203         params->filter_height = pool_params->filter_height();
204         params->activation =
205             parse_activation(pool_params->fused_activation_function());
206       }
207       *builtin_data = reinterpret_cast<void*>(params);
208       break;
209     }
210     case BuiltinOperator_DEPTHWISE_CONV_2D: {
211       TfLiteDepthwiseConvParams* params =
212           allocator->AllocatePOD<TfLiteDepthwiseConvParams>();
213       if (auto* conv_params = op->builtin_options_as_DepthwiseConv2DOptions()) {
214         params->padding = parse_padding(conv_params->padding());
215         params->stride_width = conv_params->stride_w();
216         params->stride_height = conv_params->stride_h();
217         params->depth_multiplier = conv_params->depth_multiplier();
218         params->activation =
219             parse_activation(conv_params->fused_activation_function());
220 
221         params->dilation_width_factor = conv_params->dilation_w_factor();
222         params->dilation_height_factor = conv_params->dilation_h_factor();
223       }
224       *builtin_data = reinterpret_cast<void*>(params);
225       break;
226     }
227     case BuiltinOperator_SVDF: {
228       TfLiteSVDFParams* params = allocator->AllocatePOD<TfLiteSVDFParams>();
229       if (auto* svdf_params = op->builtin_options_as_SVDFOptions()) {
230         params->rank = svdf_params->rank();
231         params->activation =
232             parse_activation(svdf_params->fused_activation_function());
233       }
234       *builtin_data = reinterpret_cast<void*>(params);
235       break;
236     }
237     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
238       auto params = allocator->AllocatePOD<TfLiteSequenceRNNParams>();
239       if (auto* sequence_rnn_params =
240               op->builtin_options_as_SequenceRNNOptions()) {
241         params->activation =
242             parse_activation(sequence_rnn_params->fused_activation_function());
243         params->time_major = sequence_rnn_params->time_major();
244       }
245       *builtin_data = reinterpret_cast<void*>(params);
246       break;
247     }
248     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
249       auto params =
250           allocator->AllocatePOD<TfLiteBidirectionalSequenceRNNParams>();
251       if (auto* bidi_sequence_rnn_params =
252               op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
253         params->activation = parse_activation(
254             bidi_sequence_rnn_params->fused_activation_function());
255         params->time_major = bidi_sequence_rnn_params->time_major();
256         params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
257       }
258       *builtin_data = reinterpret_cast<void*>(params);
259       break;
260     }
261     case BuiltinOperator_RNN: {
262       TfLiteRNNParams* params = allocator->AllocatePOD<TfLiteRNNParams>();
263       if (auto* rnn_params = op->builtin_options_as_RNNOptions()) {
264         params->activation =
265             parse_activation(rnn_params->fused_activation_function());
266       }
267       *builtin_data = reinterpret_cast<void*>(params);
268       break;
269     }
270     case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
271       TfLiteEmbeddingLookupSparseParams* params =
272           allocator->AllocatePOD<TfLiteEmbeddingLookupSparseParams>();
273       if (auto* embedding_params =
274               op->builtin_options_as_EmbeddingLookupSparseOptions()) {
275         params->combiner = parseCombinerType(embedding_params->combiner());
276       }
277       *builtin_data = reinterpret_cast<void*>(params);
278       break;
279     }
280     case BuiltinOperator_FULLY_CONNECTED: {
281       TfLiteFullyConnectedParams* params =
282           allocator->AllocatePOD<TfLiteFullyConnectedParams>();
283       if (auto* fully_connected_params =
284               op->builtin_options_as_FullyConnectedOptions()) {
285         params->activation = parse_activation(
286             fully_connected_params->fused_activation_function());
287         switch (fully_connected_params->weights_format()) {
288           case FullyConnectedOptionsWeightsFormat_DEFAULT:
289             params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
290             break;
291           case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
292             params->weights_format =
293                 kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
294             break;
295           default:
296             error_reporter->Report("Unhandled fully-connected weights format.");
297             return kTfLiteError;
298         }
299       }
300       *builtin_data = reinterpret_cast<void*>(params);
301       break;
302     }
303     case BuiltinOperator_HASHTABLE_LOOKUP:
304       // no-op.
305       break;
306     case BuiltinOperator_SOFTMAX: {
307       TfLiteSoftmaxParams* params =
308           allocator->AllocatePOD<TfLiteSoftmaxParams>();
309       if (auto* softmax_params = op->builtin_options_as_SoftmaxOptions()) {
310         params->beta = softmax_params->beta();
311       }
312       *builtin_data = reinterpret_cast<void*>(params);
313       break;
314     }
315     case BuiltinOperator_CONCATENATION: {
316       TfLiteConcatenationParams* params =
317           allocator->AllocatePOD<TfLiteConcatenationParams>();
318       if (auto* concatenation_params =
319               op->builtin_options_as_ConcatenationOptions()) {
320         params->activation =
321             parse_activation(concatenation_params->fused_activation_function());
322         params->axis = concatenation_params->axis();
323       }
324       *builtin_data = reinterpret_cast<void*>(params);
325       break;
326     }
327     case BuiltinOperator_MUL: {
328       auto* params = allocator->AllocatePOD<TfLiteMulParams>();
329       if (auto* schema_params = op->builtin_options_as_MulOptions()) {
330         params->activation =
331             parse_activation(schema_params->fused_activation_function());
332       }
333       *builtin_data = reinterpret_cast<void*>(params);
334       break;
335     }
336     case BuiltinOperator_ADD: {
337       auto* params = allocator->AllocatePOD<TfLiteAddParams>();
338       if (auto* schema_params = op->builtin_options_as_AddOptions()) {
339         params->activation =
340             parse_activation(schema_params->fused_activation_function());
341       }
342       *builtin_data = reinterpret_cast<void*>(params);
343       break;
344     }
345     case BuiltinOperator_DIV: {
346       auto* params = allocator->AllocatePOD<TfLiteDivParams>();
347       if (auto* schema_params = op->builtin_options_as_DivOptions()) {
348         params->activation =
349             parse_activation(schema_params->fused_activation_function());
350       }
351       *builtin_data = reinterpret_cast<void*>(params);
352       break;
353     }
354     case BuiltinOperator_SUB: {
355       auto* params = allocator->AllocatePOD<TfLiteSubParams>();
356       if (auto* schema_params = op->builtin_options_as_SubOptions()) {
357         params->activation =
358             parse_activation(schema_params->fused_activation_function());
359       }
360       *builtin_data = reinterpret_cast<void*>(params);
361       break;
362     }
363     case BuiltinOperator_L2_NORMALIZATION: {
364       auto* params = allocator->AllocatePOD<TfLiteL2NormParams>();
365       if (auto* schema_params = op->builtin_options_as_L2NormOptions()) {
366         params->activation =
367             parse_activation(schema_params->fused_activation_function());
368       }
369       *builtin_data = reinterpret_cast<void*>(params);
370       break;
371     }
372     case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
373       auto* params = allocator->AllocatePOD<TfLiteLocalResponseNormParams>();
374       if (auto* schema_params =
375               op->builtin_options_as_LocalResponseNormalizationOptions()) {
376         params->radius = schema_params->radius();
377         params->bias = schema_params->bias();
378         params->alpha = schema_params->alpha();
379         params->beta = schema_params->beta();
380       }
381       *builtin_data = reinterpret_cast<void*>(params);
382       break;
383     }
384     case BuiltinOperator_LSTM: {
385       auto params = allocator->AllocatePOD<TfLiteLSTMParams>();
386       if (auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
387         params->activation =
388             parse_activation(lstm_params->fused_activation_function());
389         params->cell_clip = lstm_params->cell_clip();
390         params->proj_clip = lstm_params->proj_clip();
391         switch (lstm_params->kernel_type()) {
392           case LSTMKernelType_FULL:
393             params->kernel_type = kTfLiteLSTMFullKernel;
394             break;
395           case LSTMKernelType_BASIC:
396             params->kernel_type = kTfLiteLSTMBasicKernel;
397             break;
398         }
399       }
400       *builtin_data = reinterpret_cast<void*>(params);
401       break;
402     }
403     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
404       auto* params =
405           allocator->AllocatePOD<TfLiteUnidirectionalSequenceLSTMParams>();
406       if (auto* seq_lstm_params =
407               op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
408         params->activation =
409             parse_activation(seq_lstm_params->fused_activation_function());
410         params->cell_clip = seq_lstm_params->cell_clip();
411         params->proj_clip = seq_lstm_params->proj_clip();
412         params->time_major = seq_lstm_params->time_major();
413       }
414       *builtin_data = reinterpret_cast<void*>(params);
415       break;
416     }
417     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
418       auto params =
419           allocator->AllocatePOD<TfLiteBidirectionalSequenceLSTMParams>();
420       if (auto* bidi_lstm_params =
421               op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
422         params->activation =
423             parse_activation(bidi_lstm_params->fused_activation_function());
424         params->cell_clip = bidi_lstm_params->cell_clip();
425         params->proj_clip = bidi_lstm_params->proj_clip();
426         params->merge_outputs = bidi_lstm_params->merge_outputs();
427         params->time_major = bidi_lstm_params->time_major();
428       }
429       *builtin_data = reinterpret_cast<void*>(params);
430       break;
431     }
432     case BuiltinOperator_RESIZE_BILINEAR: {
433       auto* params = allocator->AllocatePOD<TfLiteResizeBilinearParams>();
434       if (auto* schema_params =
435               op->builtin_options_as_ResizeBilinearOptions()) {
436         params->align_corners = schema_params->align_corners();
437       }
438       *builtin_data = reinterpret_cast<void*>(params);
439       break;
440     }
441     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
442       // Large functions confuse MacOS builds with XCode 8 so a lambda is
443       // required to minimize function size. TODO(b/118447267): Simplify
444       // ParseOpData function and reduce its length.
445       [&]() {
446         auto* params =
447             allocator->AllocatePOD<TfLiteResizeNearestNeighborParams>();
448         if (auto* schema_params =
449                 op->builtin_options_as_ResizeNearestNeighborOptions()) {
450           params->align_corners = schema_params->align_corners();
451         }
452         *builtin_data = reinterpret_cast<void*>(params);
453       }();
454       break;
455     }
456     case BuiltinOperator_RESHAPE: {
457       auto* params = allocator->AllocatePOD<TfLiteReshapeParams>();
458       if (auto* schema_params = op->builtin_options_as_ReshapeOptions()) {
459         auto* new_shape = schema_params->new_shape();
460         TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
461             sizeof(params->shape), new_shape, params->shape, error_reporter,
462             "reshape"));
463         params->num_dimensions = new_shape->Length();
464       }
465       *builtin_data = reinterpret_cast<void*>(params);
466       break;
467     }
468     case BuiltinOperator_SKIP_GRAM: {
469       TfLiteSkipGramParams* params =
470           allocator->AllocatePOD<TfLiteSkipGramParams>();
471       if (auto* skip_gram_params = op->builtin_options_as_SkipGramOptions()) {
472         params->ngram_size = skip_gram_params->ngram_size();
473         params->max_skip_size = skip_gram_params->max_skip_size();
474         params->include_all_ngrams = skip_gram_params->include_all_ngrams();
475       }
476       *builtin_data = reinterpret_cast<void*>(params);
477       break;
478     }
479     case BuiltinOperator_SPACE_TO_DEPTH: {
480       auto* params = allocator->AllocatePOD<TfLiteSpaceToDepthParams>();
481       if (auto* schema_params = op->builtin_options_as_SpaceToDepthOptions()) {
482         params->block_size = schema_params->block_size();
483       }
484       *builtin_data = reinterpret_cast<void*>(params);
485       break;
486     }
487     case BuiltinOperator_GATHER: {
488       TfLiteGatherParams* params = allocator->AllocatePOD<TfLiteGatherParams>();
489       params->axis = 0;
490       if (auto* gather_params = op->builtin_options_as_GatherOptions()) {
491         params->axis = gather_params->axis();
492       }
493 
494       *builtin_data = reinterpret_cast<void*>(params);
495       break;
496     }
497     case BuiltinOperator_MEAN:
498     case BuiltinOperator_REDUCE_MAX:
499     case BuiltinOperator_REDUCE_MIN:
500     case BuiltinOperator_REDUCE_PROD:
501     case BuiltinOperator_REDUCE_ANY:
502     case BuiltinOperator_SUM: {
503       auto* params = allocator->AllocatePOD<TfLiteReducerParams>();
504       if (auto* schema_params = op->builtin_options_as_ReducerOptions()) {
505         params->keep_dims = schema_params->keep_dims();
506       }
507       *builtin_data = reinterpret_cast<void*>(params);
508       break;
509     }
510     case BuiltinOperator_SPLIT: {
511       auto* params = allocator->AllocatePOD<TfLiteSplitParams>();
512       if (auto* schema_params = op->builtin_options_as_SplitOptions()) {
513         params->num_splits = schema_params->num_splits();
514       }
515       *builtin_data = reinterpret_cast<void*>(params);
516       break;
517     }
518     case BuiltinOperator_SPLIT_V: {
519       auto* params = allocator->AllocatePOD<TfLiteSplitParams>();
520       if (auto* schema_params = op->builtin_options_as_SplitVOptions()) {
521         params->num_splits = schema_params->num_splits();
522       }
523       *builtin_data = reinterpret_cast<void*>(params);
524       break;
525     }
526     case BuiltinOperator_SQUEEZE: {
527       auto* params = allocator->AllocatePOD<TfLiteSqueezeParams>();
528       if (auto* schema_params = op->builtin_options_as_SqueezeOptions()) {
529         const auto& squeeze_dims = schema_params->squeeze_dims();
530         TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
531             sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
532             error_reporter, "squeeze"));
533         params->num_squeeze_dims = squeeze_dims->Length();
534       }
535       *builtin_data = reinterpret_cast<void*>(params);
536       break;
537     }
538     case BuiltinOperator_STRIDED_SLICE: {
539       auto* params = allocator->AllocatePOD<TfLiteStridedSliceParams>();
540       if (auto* schema_params = op->builtin_options_as_StridedSliceOptions()) {
541         params->begin_mask = schema_params->begin_mask();
542         params->end_mask = schema_params->end_mask();
543         params->ellipsis_mask = schema_params->ellipsis_mask();
544         params->new_axis_mask = schema_params->new_axis_mask();
545         params->shrink_axis_mask = schema_params->shrink_axis_mask();
546       }
547       *builtin_data = reinterpret_cast<void*>(params);
548       break;
549     }
550     case BuiltinOperator_ARG_MAX: {
551       auto* params = allocator->AllocatePOD<TfLiteArgMaxParams>();
552       if (auto* schema_params = op->builtin_options_as_ArgMaxOptions()) {
553         ConvertTensorType(schema_params->output_type(), &params->output_type,
554                           error_reporter);
555       }
556       *builtin_data = reinterpret_cast<void*>(params);
557       break;
558     }
559     case BuiltinOperator_ARG_MIN: {
560       auto* params = allocator->AllocatePOD<TfLiteArgMinParams>();
561       if (const auto* schema_params = op->builtin_options_as_ArgMinOptions()) {
562         ConvertTensorType(schema_params->output_type(), &params->output_type,
563                           error_reporter);
564       }
565       *builtin_data = reinterpret_cast<void*>(params);
566       break;
567     }
568     case BuiltinOperator_TRANSPOSE_CONV: {
569       TfLiteTransposeConvParams* params =
570           allocator->AllocatePOD<TfLiteTransposeConvParams>();
571       if (auto* transpose_conv_params =
572               op->builtin_options_as_TransposeConvOptions()) {
573         params->padding = parse_padding(transpose_conv_params->padding());
574         params->stride_width = transpose_conv_params->stride_w();
575         params->stride_height = transpose_conv_params->stride_h();
576       }
577       *builtin_data = reinterpret_cast<void*>(params);
578       break;
579     }
580     case BuiltinOperator_SPARSE_TO_DENSE: {
581       TfLiteSparseToDenseParams* params =
582           allocator->AllocatePOD<TfLiteSparseToDenseParams>();
583       if (auto* sparse_to_dense_params =
584               op->builtin_options_as_SparseToDenseOptions()) {
585         params->validate_indices = sparse_to_dense_params->validate_indices();
586       }
587       *builtin_data = reinterpret_cast<void*>(params);
588       break;
589     }
590     case BuiltinOperator_SHAPE: {
591       auto* params = allocator->AllocatePOD<TfLiteShapeParams>();
592       if (auto* schema_params = op->builtin_options_as_ShapeOptions()) {
593         ConvertTensorType(schema_params->out_type(), &params->out_type,
594                           error_reporter);
595       }
596       *builtin_data = static_cast<void*>(params);
597       break;
598     }
599     case BuiltinOperator_PACK: {
600       TfLitePackParams* params = allocator->AllocatePOD<TfLitePackParams>();
601       if (auto* pack_params = op->builtin_options_as_PackOptions()) {
602         params->values_count = pack_params->values_count();
603         params->axis = pack_params->axis();
604       }
605       *builtin_data = reinterpret_cast<void*>(params);
606       break;
607     }
608     case BuiltinOperator_DELEGATE: {
609       // TODO(ycling): Revisit when supporting saving delegated models.
610       error_reporter->Report("DELEGATE op shouldn't exist in model.");
611       return kTfLiteError;
612     }
613     case BuiltinOperator_FAKE_QUANT: {
614       auto* params = allocator->AllocatePOD<TfLiteFakeQuantParams>();
615       if (auto* schema_params = op->builtin_options_as_FakeQuantOptions()) {
616         params->min = schema_params->min();
617         params->max = schema_params->max();
618         params->num_bits = schema_params->num_bits();
619         params->narrow_range = schema_params->narrow_range();
620       }
621       *builtin_data = static_cast<void*>(params);
622       break;
623     }
624     case BuiltinOperator_ONE_HOT: {
625       auto* params = allocator->AllocatePOD<TfLiteOneHotParams>();
626       if (auto* schema_params = op->builtin_options_as_OneHotOptions()) {
627         params->axis = schema_params->axis();
628       }
629       *builtin_data = static_cast<void*>(params);
630       break;
631     }
632     case BuiltinOperator_UNPACK: {
633       TfLiteUnpackParams* params = allocator->AllocatePOD<TfLiteUnpackParams>();
634       if (auto* unpack_params = op->builtin_options_as_UnpackOptions()) {
635         params->num = unpack_params->num();
636         params->axis = unpack_params->axis();
637       }
638       *builtin_data = reinterpret_cast<void*>(params);
639       break;
640     }
641     case BuiltinOperator_LEAKY_RELU: {
642       TfLiteLeakyReluParams* params =
643           allocator->AllocatePOD<TfLiteLeakyReluParams>();
644       if (auto* leaky_relu_params = op->builtin_options_as_LeakyReluOptions()) {
645         params->alpha = leaky_relu_params->alpha();
646       }
647       *builtin_data = reinterpret_cast<void*>(params);
648       break;
649     }
650     case BuiltinOperator_MIRROR_PAD: {
651       TfLiteMirrorPaddingParams* params =
652           allocator->AllocatePOD<TfLiteMirrorPaddingParams>();
653       auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions();
654       if (mirror_pad_params != nullptr) {
655         params->mode =
656             mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT
657                 ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect
658                 : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric;
659       }
660       *builtin_data = reinterpret_cast<void*>(params);
661       break;
662     }
663     case BuiltinOperator_UNIQUE: {
664       TfLiteUniqueParams* params = allocator->AllocatePOD<TfLiteUniqueParams>();
665       auto* unique_params = op->builtin_options_as_UniqueOptions();
666       if (unique_params != nullptr) {
667         params->index_out_type =
668             unique_params->idx_out_type() == tflite::TensorType_INT64
669                 ? TfLiteType::kTfLiteInt64
670                 : TfLiteType::kTfLiteInt32;
671       }
672       *builtin_data = reinterpret_cast<void*>(params);
673       break;
674     }
675     case BuiltinOperator_REVERSE_SEQUENCE: {
676       TfLiteReverseSequenceParams* params =
677           allocator->AllocatePOD<TfLiteReverseSequenceParams>();
678       if (auto* reverse_seq_params =
679               op->builtin_options_as_ReverseSequenceOptions()) {
680         params->seq_dim = reverse_seq_params->seq_dim();
681         params->batch_dim = reverse_seq_params->batch_dim();
682       }
683       *builtin_data = reinterpret_cast<void*>(params);
684       break;
685     }
686 
687     // Below are the ops with no builtin_data strcture.
688     case BuiltinOperator_ABS:
689     case BuiltinOperator_BATCH_TO_SPACE_ND:
690     // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
691     // ok for now, since there is no call implementation either.
692     case BuiltinOperator_CALL:
693     case BuiltinOperator_CONCAT_EMBEDDINGS:
694     case BuiltinOperator_COS:
695     case BuiltinOperator_CUSTOM:
696     case BuiltinOperator_DEQUANTIZE:
697     case BuiltinOperator_ELU:
698     case BuiltinOperator_EMBEDDING_LOOKUP:
699     case BuiltinOperator_EQUAL:
700     case BuiltinOperator_EXP:
701     case BuiltinOperator_EXPAND_DIMS:
702     case BuiltinOperator_CEIL:
703     case BuiltinOperator_FLOOR:
704     case BuiltinOperator_GREATER:
705     case BuiltinOperator_GREATER_EQUAL:
706     case BuiltinOperator_LESS:
707     case BuiltinOperator_LESS_EQUAL:
708     case BuiltinOperator_LOG:
709     case BuiltinOperator_LOGISTIC:
710     case BuiltinOperator_LOG_SOFTMAX:
711     case BuiltinOperator_MAXIMUM:
712     case BuiltinOperator_MINIMUM:
713     case BuiltinOperator_NEG:
714     case BuiltinOperator_NOT_EQUAL:
715     case BuiltinOperator_PAD:
716     case BuiltinOperator_PADV2:
717     case BuiltinOperator_PRELU:
718     case BuiltinOperator_RELU:
719     case BuiltinOperator_RELU6:
720     case BuiltinOperator_RELU_N1_TO_1:
721     case BuiltinOperator_RSQRT:
722     case BuiltinOperator_SELECT:
723     case BuiltinOperator_SIN:
724     case BuiltinOperator_SLICE:
725     case BuiltinOperator_SPACE_TO_BATCH_ND:
726     case BuiltinOperator_SQRT:
727     case BuiltinOperator_TANH:
728     case BuiltinOperator_TILE:
729     case BuiltinOperator_TOPK_V2:
730     case BuiltinOperator_TRANSPOSE:
731     case BuiltinOperator_POW:
732     case BuiltinOperator_LOGICAL_OR:
733     case BuiltinOperator_LOGICAL_AND:
734     case BuiltinOperator_LOGICAL_NOT:
735     case BuiltinOperator_FLOOR_DIV:
736     case BuiltinOperator_SQUARE:
737     case BuiltinOperator_ZEROS_LIKE:
738     case BuiltinOperator_FILL:
739     case BuiltinOperator_FLOOR_MOD:
740     case BuiltinOperator_RANGE:
741     case BuiltinOperator_SQUARED_DIFFERENCE:
742     case BuiltinOperator_REVERSE_V2:
743     case BuiltinOperator_ADD_N:
744     case BuiltinOperator_GATHER_ND:
745     case BuiltinOperator_WHERE:
746     case BuiltinOperator_RANK:
747       break;
748   }
749   return kTfLiteOk;
750 }  // NOLINT[readability/fn_size]
751 
752 }  // namespace tflite
753