1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
17 
18 #include <cstddef>
19 #include <cstdint>
20 #include <memory>
21 
22 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
23 #include "tensorflow/lite/c/builtin_op_data.h"
24 #include "tensorflow/lite/c/common.h"
25 #include "tensorflow/lite/core/api/error_reporter.h"
26 #include "tensorflow/lite/kernels/internal/compatibility.h"
27 #include "tensorflow/lite/schema/schema_generated.h"
28 
29 namespace tflite {
30 
31 namespace {
32 
33 // Utility class for safely allocating POD data. This is useful for avoiding
34 // leaks in cases where op params are allocated but fail to propagate to the
35 // parsed op data (e.g., when model parameters are invalid).
36 class SafeBuiltinDataAllocator {
37  public:
38   class BuiltinDataDeleter {
39    public:
BuiltinDataDeleter(BuiltinDataAllocator * allocator)40     explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
41         : allocator_(allocator) {}
42 
operator ()(void * data)43     void operator()(void* data) { allocator_->Deallocate(data); }
44 
45    private:
46     BuiltinDataAllocator* allocator_;
47   };
48 
49   template <typename T>
50   using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
51 
SafeBuiltinDataAllocator(BuiltinDataAllocator * allocator)52   explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
53       : allocator_(allocator) {}
54 
55   template <typename T>
Allocate()56   BuiltinDataPtr<T> Allocate() {
57     return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(),
58                              BuiltinDataDeleter(allocator_));
59   }
60 
61  private:
62   BuiltinDataAllocator* allocator_;
63 };
64 
65 // All the Parse functions take some pointers as params and this function has
66 // the common DCHECKs to catch if any of those are nullptr.
CheckParsePointerParams(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)67 void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
68                              BuiltinDataAllocator* allocator,
69                              void** builtin_data) {
70   TFLITE_DCHECK(op != nullptr);
71   TFLITE_DCHECK(error_reporter != nullptr);
72   TFLITE_DCHECK(allocator != nullptr);
73   TFLITE_DCHECK(builtin_data != nullptr);
74 }
75 
76 // Copies the contents from the flatbuffer int vector `flatbuffer` into the
77 // int array `buffer`. `flat_vector` and `buffer` represent the same
78 // configuration operation for a given operation.
FlatBufferIntVectorToArray(int max_size_of_buffer,const flatbuffers::Vector<int32_t> * flat_vector,int * buffer,ErrorReporter * error_reporter,const char * op_name)79 TfLiteStatus FlatBufferIntVectorToArray(
80     int max_size_of_buffer, const flatbuffers::Vector<int32_t>* flat_vector,
81     int* buffer, ErrorReporter* error_reporter, const char* op_name) {
82   if (!flat_vector) {
83     TF_LITE_REPORT_ERROR(error_reporter,
84                          "Input array not provided for operation '%s'.\n",
85                          op_name);
86     return kTfLiteError;
87   } else {
88     size_t num_dimensions = flat_vector->size();
89     if (num_dimensions > max_size_of_buffer / sizeof(int)) {
90       TF_LITE_REPORT_ERROR(
91           error_reporter,
92           "Found too many dimensions in the input array of operation '%s'.\n",
93           op_name);
94       return kTfLiteError;
95     } else {
96       for (size_t i = 0; i < num_dimensions; ++i) {
97         buffer[i] = flat_vector->Get(i);
98       }
99     }
100   }
101   return kTfLiteOk;
102 }
103 
104 // Converts the flatbuffer activation to what is used at runtime.
ConvertActivation(ActivationFunctionType activation)105 TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
106   switch (activation) {
107     case ActivationFunctionType_NONE:
108       return kTfLiteActNone;
109     case ActivationFunctionType_RELU:
110       return kTfLiteActRelu;
111     case ActivationFunctionType_RELU_N1_TO_1:
112       return kTfLiteActReluN1To1;
113     case ActivationFunctionType_RELU6:
114       return kTfLiteActRelu6;
115     case ActivationFunctionType_TANH:
116       return kTfLiteActTanh;
117     case ActivationFunctionType_SIGN_BIT:
118       return kTfLiteActSignBit;
119   }
120   return kTfLiteActNone;
121 }
122 
123 // Converts the flatbuffer padding enum to what is used at runtime.
ConvertPadding(Padding padding)124 TfLitePadding ConvertPadding(Padding padding) {
125   switch (padding) {
126     case Padding_SAME:
127       return kTfLitePaddingSame;
128     case Padding_VALID:
129       return kTfLitePaddingValid;
130   }
131   return kTfLitePaddingUnknown;
132 }
133 
134 #ifndef TF_LITE_STATIC_MEMORY
ParseOpDataTfLite(const Operator * op,BuiltinOperator op_type,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)135 TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
136                                ErrorReporter* error_reporter,
137                                BuiltinDataAllocator* allocator,
138                                void** builtin_data) {
139   auto parseLSHProjectionType = [](LSHProjectionType type) {
140     switch (type) {
141       case LSHProjectionType_SPARSE:
142         return kTfLiteLshProjectionSparse;
143       case LSHProjectionType_DENSE:
144         return kTfLiteLshProjectionDense;
145       default:
146         return kTfLiteLshProjectionUnknown;
147     }
148   };
149   auto parseCombinerType = [](CombinerType type) {
150     switch (type) {
151       case CombinerType_MEAN:
152         return kTfLiteCombinerTypeMean;
153       case CombinerType_SQRTN:
154         return kTfLiteCombinerTypeSqrtn;
155       case CombinerType_SUM:
156       default:
157         return kTfLiteCombinerTypeSum;
158     }
159   };
160 
161   SafeBuiltinDataAllocator safe_allocator(allocator);
162   *builtin_data = nullptr;
163   switch (op_type) {
164     case BuiltinOperator_ABS: {
165       return ParseAbs(op, error_reporter, allocator, builtin_data);
166     }
167 
168     case BuiltinOperator_ADD: {
169       return ParseAdd(op, error_reporter, allocator, builtin_data);
170     }
171 
172     case BuiltinOperator_ADD_N: {
173       return ParseAddN(op, error_reporter, allocator, builtin_data);
174     }
175 
176     case BuiltinOperator_ARG_MAX: {
177       return ParseArgMax(op, error_reporter, allocator, builtin_data);
178     }
179 
180     case BuiltinOperator_ARG_MIN: {
181       return ParseArgMin(op, error_reporter, allocator, builtin_data);
182     }
183 
184     case BuiltinOperator_AVERAGE_POOL_2D: {
185       return ParsePool(op, error_reporter, allocator, builtin_data);
186     }
187 
188     case BuiltinOperator_BATCH_MATMUL: {
189       return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);
190     }
191 
192     case BuiltinOperator_BATCH_TO_SPACE_ND: {
193       return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
194     }
195 
196     case BuiltinOperator_CEIL: {
197       return ParseCeil(op, error_reporter, allocator, builtin_data);
198     }
199 
200     case BuiltinOperator_CONCATENATION: {
201       return ParseConcatenation(op, error_reporter, allocator, builtin_data);
202     }
203 
204     case BuiltinOperator_CONV_2D: {
205       return ParseConv2D(op, error_reporter, allocator, builtin_data);
206     }
207 
208     case BuiltinOperator_DEPTH_TO_SPACE: {
209       return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);
210     }
211 
212     case BuiltinOperator_DEPTHWISE_CONV_2D: {
213       return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
214     }
215 
216     case BuiltinOperator_DEQUANTIZE: {
217       return ParseDequantize(op, error_reporter, allocator, builtin_data);
218     }
219 
220     case BuiltinOperator_DIV: {
221       return ParseDiv(op, error_reporter, allocator, builtin_data);
222     }
223 
224     case BuiltinOperator_ELU: {
225       return ParseElu(op, error_reporter, allocator, builtin_data);
226     }
227 
228     case BuiltinOperator_EXP: {
229       return ParseExp(op, error_reporter, allocator, builtin_data);
230     }
231 
232     case BuiltinOperator_EXPAND_DIMS: {
233       return ParseExpandDims(op, error_reporter, allocator, builtin_data);
234     }
235 
236     case BuiltinOperator_FILL: {
237       return ParseFill(op, error_reporter, allocator, builtin_data);
238     }
239 
240     case BuiltinOperator_FLOOR: {
241       return ParseFloor(op, error_reporter, allocator, builtin_data);
242     }
243 
244     case BuiltinOperator_FLOOR_DIV: {
245       return ParseFloorDiv(op, error_reporter, allocator, builtin_data);
246     }
247 
248     case BuiltinOperator_FLOOR_MOD: {
249       return ParseFloorMod(op, error_reporter, allocator, builtin_data);
250     }
251 
252     case BuiltinOperator_FULLY_CONNECTED: {
253       return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
254     }
255 
256     case BuiltinOperator_GATHER_ND: {
257       return ParseGatherNd(op, error_reporter, allocator, builtin_data);
258     }
259 
260     case BuiltinOperator_GREATER: {
261       return ParseGreater(op, error_reporter, allocator, builtin_data);
262     }
263 
264     case BuiltinOperator_GREATER_EQUAL: {
265       return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
266     }
267 
268     case BuiltinOperator_HARD_SWISH: {
269       return ParseHardSwish(op, error_reporter, allocator, builtin_data);
270     }
271 
272     case BuiltinOperator_L2_NORMALIZATION: {
273       return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
274     }
275 
276     case BuiltinOperator_L2_POOL_2D: {
277       return ParsePool(op, error_reporter, allocator, builtin_data);
278     }
279 
280     case BuiltinOperator_LEAKY_RELU: {
281       return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);
282     }
283 
284     case BuiltinOperator_LESS: {
285       return ParseLess(op, error_reporter, allocator, builtin_data);
286     }
287 
288     case BuiltinOperator_LESS_EQUAL: {
289       return ParseLessEqual(op, error_reporter, allocator, builtin_data);
290     }
291 
292     case BuiltinOperator_LOG: {
293       return ParseLog(op, error_reporter, allocator, builtin_data);
294     }
295 
296     case BuiltinOperator_LOGICAL_AND: {
297       return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
298     }
299 
300     case BuiltinOperator_LOGICAL_NOT: {
301       return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
302     }
303 
304     case BuiltinOperator_LOGICAL_OR: {
305       return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
306     }
307 
308     case BuiltinOperator_LOGISTIC: {
309       return ParseLogistic(op, error_reporter, allocator, builtin_data);
310     }
311 
312     case BuiltinOperator_MAXIMUM: {
313       return ParseMaximum(op, error_reporter, allocator, builtin_data);
314     }
315 
316     case BuiltinOperator_MAX_POOL_2D: {
317       return ParsePool(op, error_reporter, allocator, builtin_data);
318     }
319 
320     case BuiltinOperator_MEAN: {
321       return ParseReducer(op, error_reporter, allocator, builtin_data);
322     }
323 
324     case BuiltinOperator_MINIMUM: {
325       return ParseMinimum(op, error_reporter, allocator, builtin_data);
326     }
327 
328     case BuiltinOperator_MUL: {
329       return ParseMul(op, error_reporter, allocator, builtin_data);
330     }
331 
332     case BuiltinOperator_NEG: {
333       return ParseNeg(op, error_reporter, allocator, builtin_data);
334     }
335 
336     case BuiltinOperator_NOT_EQUAL: {
337       return ParseNotEqual(op, error_reporter, allocator, builtin_data);
338     }
339 
340     case BuiltinOperator_PACK: {
341       return ParsePack(op, error_reporter, allocator, builtin_data);
342     }
343 
344     case BuiltinOperator_PAD: {
345       return ParsePad(op, error_reporter, allocator, builtin_data);
346     }
347 
348     case BuiltinOperator_PADV2: {
349       return ParsePadV2(op, error_reporter, allocator, builtin_data);
350     }
351 
352     case BuiltinOperator_POW: {
353       return ParsePow(op, error_reporter, allocator, builtin_data);
354     }
355 
356     case BuiltinOperator_PRELU: {
357       return ParsePrelu(op, error_reporter, allocator, builtin_data);
358     }
359 
360     case BuiltinOperator_QUANTIZE: {
361       return ParseQuantize(op, error_reporter, allocator, builtin_data);
362     }
363 
364     case BuiltinOperator_REDUCE_ANY: {
365       return ParseReducer(op, error_reporter, allocator, builtin_data);
366     }
367 
368     case BuiltinOperator_REDUCE_MAX: {
369       return ParseReducer(op, error_reporter, allocator, builtin_data);
370     }
371 
372     case BuiltinOperator_REDUCE_MIN: {
373       return ParseReducer(op, error_reporter, allocator, builtin_data);
374     }
375 
376     case BuiltinOperator_REDUCE_PROD: {
377       return ParseReducer(op, error_reporter, allocator, builtin_data);
378     }
379 
380     case BuiltinOperator_RELU: {
381       return ParseRelu(op, error_reporter, allocator, builtin_data);
382     }
383 
384     case BuiltinOperator_RELU6: {
385       return ParseRelu6(op, error_reporter, allocator, builtin_data);
386     }
387 
388     case BuiltinOperator_RESHAPE: {
389       return ParseReshape(op, error_reporter, allocator, builtin_data);
390     }
391 
392     case BuiltinOperator_RESIZE_BILINEAR: {
393       return ParseResizeBilinear(op, error_reporter, allocator, builtin_data);
394     }
395 
396     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
397       return ParseResizeNearestNeighbor(op, error_reporter, allocator,
398                                         builtin_data);
399     }
400 
401     case BuiltinOperator_ROUND: {
402       return ParseRound(op, error_reporter, allocator, builtin_data);
403     }
404 
405     case BuiltinOperator_RSQRT: {
406       return ParseRsqrt(op, error_reporter, allocator, builtin_data);
407     }
408 
409     case BuiltinOperator_SHAPE: {
410       return ParseShape(op, error_reporter, allocator, builtin_data);
411     }
412 
413     case BuiltinOperator_SIN: {
414       return ParseSin(op, error_reporter, allocator, builtin_data);
415     }
416 
417     case BuiltinOperator_SOFTMAX: {
418       return ParseSoftmax(op, error_reporter, allocator, builtin_data);
419     }
420 
421     case BuiltinOperator_SPACE_TO_BATCH_ND: {
422       return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data);
423     }
424 
425     case BuiltinOperator_SPACE_TO_DEPTH: {
426       return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data);
427     }
428 
429     case BuiltinOperator_SPLIT: {
430       return ParseSplit(op, error_reporter, allocator, builtin_data);
431     }
432 
433     case BuiltinOperator_SPLIT_V: {
434       return ParseSplitV(op, error_reporter, allocator, builtin_data);
435     }
436 
437     case BuiltinOperator_SQRT: {
438       return ParseSqrt(op, error_reporter, allocator, builtin_data);
439     }
440 
441     case BuiltinOperator_SQUARE: {
442       return ParseSquare(op, error_reporter, allocator, builtin_data);
443     }
444 
445     case BuiltinOperator_STRIDED_SLICE: {
446       return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
447     }
448 
449     case BuiltinOperator_SUB: {
450       return ParseSub(op, error_reporter, allocator, builtin_data);
451     }
452 
453     case BuiltinOperator_SUM: {
454       return ParseReducer(op, error_reporter, allocator, builtin_data);
455     }
456 
457     case BuiltinOperator_SVDF: {
458       return ParseSvdf(op, error_reporter, allocator, builtin_data);
459     }
460 
461     case BuiltinOperator_TANH: {
462       return ParseTanh(op, error_reporter, allocator, builtin_data);
463     }
464 
465     case BuiltinOperator_TRANSPOSE_CONV: {
466       return ParseTransposeConv(op, error_reporter, allocator, builtin_data);
467     }
468 
469     case BuiltinOperator_UNPACK: {
470       return ParseUnpack(op, error_reporter, allocator, builtin_data);
471     }
472 
473     case BuiltinOperator_ZEROS_LIKE: {
474       return ParseZerosLike(op, error_reporter, allocator, builtin_data);
475     }
476 
477     case BuiltinOperator_CAST: {
478       return ParseCast(op, error_reporter, allocator, builtin_data);
479     }
480     case BuiltinOperator_LSH_PROJECTION: {
481       auto params = safe_allocator.Allocate<TfLiteLSHProjectionParams>();
482       TF_LITE_ENSURE(error_reporter, params != nullptr);
483       if (const auto* lshParams =
484               op->builtin_options_as_LSHProjectionOptions()) {
485         params->type = parseLSHProjectionType(lshParams->type());
486       }
487       *builtin_data = params.release();
488       return kTfLiteOk;
489     }
490     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
491       auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
492       TF_LITE_ENSURE(error_reporter, params != nullptr);
493       if (const auto* sequence_rnn_params =
494               op->builtin_options_as_SequenceRNNOptions()) {
495         params->activation =
496             ConvertActivation(sequence_rnn_params->fused_activation_function());
497         params->time_major = sequence_rnn_params->time_major();
498         params->asymmetric_quantize_inputs =
499             sequence_rnn_params->asymmetric_quantize_inputs();
500       }
501       *builtin_data = params.release();
502       return kTfLiteOk;
503     }
504     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
505       auto params =
506           safe_allocator.Allocate<TfLiteBidirectionalSequenceRNNParams>();
507       TF_LITE_ENSURE(error_reporter, params != nullptr);
508       if (const auto* bidi_sequence_rnn_params =
509               op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
510         params->activation = ConvertActivation(
511             bidi_sequence_rnn_params->fused_activation_function());
512         params->time_major = bidi_sequence_rnn_params->time_major();
513         params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
514         params->asymmetric_quantize_inputs =
515             bidi_sequence_rnn_params->asymmetric_quantize_inputs();
516       }
517       *builtin_data = params.release();
518       return kTfLiteOk;
519     }
520     case BuiltinOperator_RNN: {
521       auto params = safe_allocator.Allocate<TfLiteRNNParams>();
522       TF_LITE_ENSURE(error_reporter, params != nullptr);
523       if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
524         params->activation =
525             ConvertActivation(rnn_params->fused_activation_function());
526         params->asymmetric_quantize_inputs =
527             rnn_params->asymmetric_quantize_inputs();
528       }
529       *builtin_data = params.release();
530       return kTfLiteOk;
531     }
532     case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
533       auto params =
534           safe_allocator.Allocate<TfLiteEmbeddingLookupSparseParams>();
535       TF_LITE_ENSURE(error_reporter, params != nullptr);
536       if (const auto* embedding_params =
537               op->builtin_options_as_EmbeddingLookupSparseOptions()) {
538         params->combiner = parseCombinerType(embedding_params->combiner());
539       }
540       *builtin_data = params.release();
541       return kTfLiteOk;
542     }
543 
544     case BuiltinOperator_HASHTABLE_LOOKUP:
545       // no-op.
546       return kTfLiteOk;
547 
548     case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
549       auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
550       TF_LITE_ENSURE(error_reporter, params != nullptr);
551       if (const auto* schema_params =
552               op->builtin_options_as_LocalResponseNormalizationOptions()) {
553         params->radius = schema_params->radius();
554         params->bias = schema_params->bias();
555         params->alpha = schema_params->alpha();
556         params->beta = schema_params->beta();
557       }
558       *builtin_data = params.release();
559       return kTfLiteOk;
560     }
561     case BuiltinOperator_LSTM: {
562       auto params = safe_allocator.Allocate<TfLiteLSTMParams>();
563       TF_LITE_ENSURE(error_reporter, params != nullptr);
564       if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
565         params->activation =
566             ConvertActivation(lstm_params->fused_activation_function());
567         params->cell_clip = lstm_params->cell_clip();
568         params->proj_clip = lstm_params->proj_clip();
569         switch (lstm_params->kernel_type()) {
570           case LSTMKernelType_FULL:
571             params->kernel_type = kTfLiteLSTMFullKernel;
572             break;
573           case LSTMKernelType_BASIC:
574             params->kernel_type = kTfLiteLSTMBasicKernel;
575             break;
576           default:
577             TF_LITE_REPORT_ERROR(error_reporter,
578                                  "Unhandled LSTM kernel type: %d",
579                                  lstm_params->kernel_type());
580             return kTfLiteError;
581         }
582         params->asymmetric_quantize_inputs =
583             lstm_params->asymmetric_quantize_inputs();
584       } else {
585         TF_LITE_REPORT_ERROR(error_reporter,
586                              "No valid LSTM builtin options exist");
587         return kTfLiteError;
588       }
589       *builtin_data = params.release();
590       return kTfLiteOk;
591     }
592     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
593       auto params =
594           safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
595       TF_LITE_ENSURE(error_reporter, params != nullptr);
596       if (const auto* seq_lstm_params =
597               op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
598         params->activation =
599             ConvertActivation(seq_lstm_params->fused_activation_function());
600         params->cell_clip = seq_lstm_params->cell_clip();
601         params->proj_clip = seq_lstm_params->proj_clip();
602         params->time_major = seq_lstm_params->time_major();
603         params->asymmetric_quantize_inputs =
604             seq_lstm_params->asymmetric_quantize_inputs();
605       }
606       *builtin_data = params.release();
607       return kTfLiteOk;
608     }
609     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
610       auto params =
611           safe_allocator.Allocate<TfLiteBidirectionalSequenceLSTMParams>();
612       TF_LITE_ENSURE(error_reporter, params != nullptr);
613       if (const auto* bidi_lstm_params =
614               op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
615         params->activation =
616             ConvertActivation(bidi_lstm_params->fused_activation_function());
617         params->cell_clip = bidi_lstm_params->cell_clip();
618         params->proj_clip = bidi_lstm_params->proj_clip();
619         params->merge_outputs = bidi_lstm_params->merge_outputs();
620         params->time_major = bidi_lstm_params->time_major();
621         params->asymmetric_quantize_inputs =
622             bidi_lstm_params->asymmetric_quantize_inputs();
623       }
624       *builtin_data = params.release();
625       return kTfLiteOk;
626     }
627     case BuiltinOperator_SKIP_GRAM: {
628       auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
629       TF_LITE_ENSURE(error_reporter, params != nullptr);
630       if (const auto* skip_gram_params =
631               op->builtin_options_as_SkipGramOptions()) {
632         params->ngram_size = skip_gram_params->ngram_size();
633         params->max_skip_size = skip_gram_params->max_skip_size();
634         params->include_all_ngrams = skip_gram_params->include_all_ngrams();
635       }
636       *builtin_data = params.release();
637       return kTfLiteOk;
638     }
639 
640     case BuiltinOperator_GATHER: {
641       return ParseGather(op, error_reporter, allocator, builtin_data);
642     }
643 
644     case BuiltinOperator_SQUEEZE: {
645       auto params = safe_allocator.Allocate<TfLiteSqueezeParams>();
646       TF_LITE_ENSURE(error_reporter, params != nullptr);
647       if (const auto* schema_params = op->builtin_options_as_SqueezeOptions()) {
648         const auto* squeeze_dims = schema_params->squeeze_dims();
649         if (squeeze_dims != nullptr) {
650           TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
651               sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
652               error_reporter, "squeeze"));
653           params->num_squeeze_dims = squeeze_dims->size();
654         } else {
655           params->num_squeeze_dims = 0;
656         }
657       }
658       *builtin_data = params.release();
659       return kTfLiteOk;
660     }
661     case BuiltinOperator_SPARSE_TO_DENSE: {
662       auto params = safe_allocator.Allocate<TfLiteSparseToDenseParams>();
663       TF_LITE_ENSURE(error_reporter, params != nullptr);
664       if (const auto* sparse_to_dense_params =
665               op->builtin_options_as_SparseToDenseOptions()) {
666         params->validate_indices = sparse_to_dense_params->validate_indices();
667       }
668       *builtin_data = params.release();
669       return kTfLiteOk;
670     }
671     case BuiltinOperator_DELEGATE: {
672       // TODO(ycling): Revisit when supporting saving delegated models.
673       TF_LITE_REPORT_ERROR(error_reporter,
674                            "DELEGATE op shouldn't exist in model.");
675       return kTfLiteError;
676     }
677     case BuiltinOperator_FAKE_QUANT: {
678       auto params = safe_allocator.Allocate<TfLiteFakeQuantParams>();
679       TF_LITE_ENSURE(error_reporter, params != nullptr);
680       if (const auto* schema_params =
681               op->builtin_options_as_FakeQuantOptions()) {
682         params->min = schema_params->min();
683         params->max = schema_params->max();
684         params->num_bits = schema_params->num_bits();
685         params->narrow_range = schema_params->narrow_range();
686       }
687       *builtin_data = params.release();
688       return kTfLiteOk;
689     }
690     case BuiltinOperator_ONE_HOT: {
691       auto params = safe_allocator.Allocate<TfLiteOneHotParams>();
692       TF_LITE_ENSURE(error_reporter, params != nullptr);
693       if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {
694         params->axis = schema_params->axis();
695       }
696       *builtin_data = params.release();
697       return kTfLiteOk;
698     }
699     case BuiltinOperator_MIRROR_PAD: {
700       auto params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
701       TF_LITE_ENSURE(error_reporter, params != nullptr);
702       const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions();
703       if (mirror_pad_params != nullptr) {
704         params->mode =
705             mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT
706                 ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect
707                 : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric;
708       }
709       *builtin_data = params.release();
710       return kTfLiteOk;
711     }
712     case BuiltinOperator_UNIQUE: {
713       auto params = safe_allocator.Allocate<TfLiteUniqueParams>();
714       TF_LITE_ENSURE(error_reporter, params != nullptr);
715       const auto* unique_params = op->builtin_options_as_UniqueOptions();
716       if (unique_params != nullptr) {
717         params->index_out_type =
718             unique_params->idx_out_type() == tflite::TensorType_INT64
719                 ? TfLiteType::kTfLiteInt64
720                 : TfLiteType::kTfLiteInt32;
721       }
722       *builtin_data = params.release();
723       return kTfLiteOk;
724     }
725     case BuiltinOperator_REVERSE_SEQUENCE: {
726       auto params = safe_allocator.Allocate<TfLiteReverseSequenceParams>();
727       TF_LITE_ENSURE(error_reporter, params != nullptr);
728       if (const auto* reverse_seq_params =
729               op->builtin_options_as_ReverseSequenceOptions()) {
730         params->seq_dim = reverse_seq_params->seq_dim();
731         params->batch_dim = reverse_seq_params->batch_dim();
732       }
733       *builtin_data = params.release();
734       return kTfLiteOk;
735     }
736     case BuiltinOperator_IF: {
737       auto params = safe_allocator.Allocate<TfLiteIfParams>();
738       TF_LITE_ENSURE(error_reporter, params != nullptr);
739       if (const auto* if_params = op->builtin_options_as_IfOptions()) {
740         params->then_subgraph_index = if_params->then_subgraph_index();
741         params->else_subgraph_index = if_params->else_subgraph_index();
742       }
743       *builtin_data = params.release();
744       return kTfLiteOk;
745     }
746     case BuiltinOperator_WHILE: {
747       auto params = safe_allocator.Allocate<TfLiteWhileParams>();
748       TF_LITE_ENSURE(error_reporter, params != nullptr);
749       if (const auto* while_params = op->builtin_options_as_WhileOptions()) {
750         params->cond_subgraph_index = while_params->cond_subgraph_index();
751         params->body_subgraph_index = while_params->body_subgraph_index();
752       }
753       *builtin_data = params.release();
754       return kTfLiteOk;
755     }
756     case BuiltinOperator_CALL_ONCE: {
757       auto params = safe_allocator.Allocate<TfLiteCallOnceParams>();
758       TF_LITE_ENSURE(error_reporter, params != nullptr);
759       if (const auto* call_once_params =
760               op->builtin_options_as_CallOnceOptions()) {
761         params->init_subgraph_index = call_once_params->init_subgraph_index();
762       }
763       *builtin_data = params.release();
764       return kTfLiteOk;
765     }
766     case BuiltinOperator_CUMSUM: {
767       auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
768       TF_LITE_ENSURE(error_reporter, params != nullptr);
769       if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
770         params->exclusive = cumsum_params->exclusive();
771         params->reverse = cumsum_params->reverse();
772       }
773       *builtin_data = params.release();
774       return kTfLiteOk;
775     }
776     case BuiltinOperator_CONV_3D: {
777       auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
778       TF_LITE_ENSURE(error_reporter, params != nullptr);
779       if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {
780         params->padding = ConvertPadding(conv3d_params->padding());
781         params->activation =
782             ConvertActivation(conv3d_params->fused_activation_function());
783         params->stride_depth = conv3d_params->stride_d();
784         params->stride_height = conv3d_params->stride_h();
785         params->stride_width = conv3d_params->stride_w();
786         params->dilation_depth_factor = conv3d_params->dilation_d_factor();
787         params->dilation_height_factor = conv3d_params->dilation_h_factor();
788         params->dilation_width_factor = conv3d_params->dilation_w_factor();
789       }
790       *builtin_data = params.release();
791       return kTfLiteOk;
792     }
793     // Below are the ops with no builtin_data structure.
794     // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
795     // ok for now, since there is no call implementation either.
796     case BuiltinOperator_CALL:
797     case BuiltinOperator_CONCAT_EMBEDDINGS:
798     case BuiltinOperator_COS:
799     case BuiltinOperator_CUSTOM:
800     case BuiltinOperator_EMBEDDING_LOOKUP:
801     case BuiltinOperator_EQUAL:
802     case BuiltinOperator_LOG_SOFTMAX:
803     case BuiltinOperator_MATRIX_DIAG:
804     case BuiltinOperator_MATRIX_SET_DIAG:
805     case BuiltinOperator_RELU_N1_TO_1:
806     case BuiltinOperator_SELECT:
807     case BuiltinOperator_SELECT_V2:
808     case BuiltinOperator_SLICE:
809     case BuiltinOperator_TILE:
810     case BuiltinOperator_TOPK_V2:
811     case BuiltinOperator_TRANSPOSE:
812     case BuiltinOperator_RANGE:
813     case BuiltinOperator_SQUARED_DIFFERENCE:
814     case BuiltinOperator_REVERSE_V2:
815     case BuiltinOperator_WHERE:
816     case BuiltinOperator_RANK:
817     case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
818     case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
819     case BuiltinOperator_SCATTER_ND:
820     case BuiltinOperator_DENSIFY:
821     case BuiltinOperator_SEGMENT_SUM:
822     case BuiltinOperator_BROADCAST_TO:
823     case BuiltinOperator_RFFT2D:
824     case BuiltinOperator_IMAG:
825     case BuiltinOperator_REAL:
826     case BuiltinOperator_COMPLEX_ABS:
827       return kTfLiteOk;
828     case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
829       return kTfLiteError;
830   }
831   return kTfLiteError;
832 }  // NOLINT[readability/fn_size]
833 #endif  // !defined(TF_LITE_STATIC_MEMORY)
834 }  // namespace
835 
ConvertTensorType(TensorType tensor_type,TfLiteType * type,ErrorReporter * error_reporter)836 TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
837                                ErrorReporter* error_reporter) {
838   switch (tensor_type) {
839     case TensorType_FLOAT16:
840       *type = kTfLiteFloat16;
841       return kTfLiteOk;
842     case TensorType_FLOAT32:
843       *type = kTfLiteFloat32;
844       return kTfLiteOk;
845     case TensorType_FLOAT64:
846       *type = kTfLiteFloat64;
847       return kTfLiteOk;
848     case TensorType_INT16:
849       *type = kTfLiteInt16;
850       return kTfLiteOk;
851     case TensorType_INT32:
852       *type = kTfLiteInt32;
853       return kTfLiteOk;
854     case TensorType_UINT32:
855       *type = kTfLiteUInt32;
856       return kTfLiteOk;
857     case TensorType_UINT8:
858       *type = kTfLiteUInt8;
859       return kTfLiteOk;
860     case TensorType_INT8:
861       *type = kTfLiteInt8;
862       return kTfLiteOk;
863     case TensorType_INT64:
864       *type = kTfLiteInt64;
865       return kTfLiteOk;
866     case TensorType_UINT64:
867       *type = kTfLiteUInt64;
868       return kTfLiteOk;
869     case TensorType_STRING:
870       *type = kTfLiteString;
871       return kTfLiteOk;
872     case TensorType_BOOL:
873       *type = kTfLiteBool;
874       return kTfLiteOk;
875     case TensorType_COMPLEX64:
876       *type = kTfLiteComplex64;
877       return kTfLiteOk;
878     case TensorType_COMPLEX128:
879       *type = kTfLiteComplex128;
880       return kTfLiteOk;
881     case TensorType_RESOURCE:
882       *type = kTfLiteResource;
883       return kTfLiteOk;
884     case TensorType_VARIANT:
885       *type = kTfLiteVariant;
886       return kTfLiteOk;
887     default:
888       *type = kTfLiteNoType;
889       TF_LITE_REPORT_ERROR(error_reporter,
890                            "Unsupported data type %d in tensor\n", tensor_type);
891       return kTfLiteError;
892   }
893 }
894 
895 // We have this parse function instead of directly returning kTfLiteOk from the
896 // switch-case in ParseOpData because this function is used as part of the
897 // selective registration for the OpResolver implementation in micro.
ParseAbs(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)898 TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
899                       void**) {
900   return kTfLiteOk;
901 }
902 
ParseAdd(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)903 TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
904                       BuiltinDataAllocator* allocator, void** builtin_data) {
905   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
906 
907   SafeBuiltinDataAllocator safe_allocator(allocator);
908   std::unique_ptr<TfLiteAddParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
909       params = safe_allocator.Allocate<TfLiteAddParams>();
910   TF_LITE_ENSURE(error_reporter, params != nullptr);
911 
912   const AddOptions* schema_params = op->builtin_options_as_AddOptions();
913 
914   if (schema_params != nullptr) {
915     params->activation =
916         ConvertActivation(schema_params->fused_activation_function());
917     params->pot_scale_int16 = schema_params->pot_scale_int16();
918   } else {
919     // TODO(b/157480169): We should either return kTfLiteError or fill in some
920     // reasonable defaults in the params struct. We are not doing so until we
921     // better undertand the ramifications of changing the legacy behavior.
922   }
923 
924   *builtin_data = params.release();
925   return kTfLiteOk;
926 }
927 
ParseAddN(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)928 TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
929                        BuiltinDataAllocator* allocator, void** builtin_data) {
930   return kTfLiteOk;
931 }
932 
ParseArgMax(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)933 TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
934                          BuiltinDataAllocator* allocator, void** builtin_data) {
935   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
936 
937   SafeBuiltinDataAllocator safe_allocator(allocator);
938   std::unique_ptr<TfLiteArgMaxParams,
939                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
940       params = safe_allocator.Allocate<TfLiteArgMaxParams>();
941   TF_LITE_ENSURE(error_reporter, params != nullptr);
942 
943   const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
944 
945   if (schema_params != nullptr) {
946     TF_LITE_ENSURE_STATUS(ConvertTensorType(
947         schema_params->output_type(), &params->output_type, error_reporter));
948   } else {
949     // TODO(b/157480169): We should either return kTfLiteError or fill in some
950     // reasonable defaults in the params struct. We are not doing so until we
951     // better undertand the ramifications of changing the legacy behavior.
952   }
953 
954   *builtin_data = params.release();
955   return kTfLiteOk;
956 }
957 
ParseArgMin(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)958 TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
959                          BuiltinDataAllocator* allocator, void** builtin_data) {
960   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
961 
962   SafeBuiltinDataAllocator safe_allocator(allocator);
963   std::unique_ptr<TfLiteArgMinParams,
964                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
965       params = safe_allocator.Allocate<TfLiteArgMinParams>();
966   TF_LITE_ENSURE(error_reporter, params != nullptr);
967 
968   const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
969 
970   if (schema_params != nullptr) {
971     TF_LITE_ENSURE_STATUS(ConvertTensorType(
972         schema_params->output_type(), &params->output_type, error_reporter));
973   } else {
974     // TODO(b/157480169): We should either return kTfLiteError or fill in some
975     // reasonable defaults in the params struct. We are not doing so until we
976     // better undertand the ramifications of changing the legacy behavior.
977   }
978 
979   *builtin_data = params.release();
980   return kTfLiteOk;
981 }
982 
983 // We have this parse function instead of directly returning kTfLiteOk from the
984 // switch-case in ParseOpData because this function is used as part of the
985 // selective registration for the OpResolver implementation in micro.
ParseBatchMatMul(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)986 TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
987                               BuiltinDataAllocator* allocator,
988                               void** builtin_data) {
989   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
990 
991   SafeBuiltinDataAllocator safe_allocator(allocator);
992   auto params = safe_allocator.Allocate<TfLiteBatchMatMulParams>();
993   TF_LITE_ENSURE(error_reporter, params != nullptr);
994   if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) {
995     params->adj_x = bmm_params->adj_x();
996     params->adj_y = bmm_params->adj_y();
997     params->asymmetric_quantize_inputs =
998         bmm_params->asymmetric_quantize_inputs();
999   }
1000   *builtin_data = params.release();
1001   return kTfLiteOk;
1002 }
1003 
1004 // We have this parse function instead of directly returning kTfLiteOk from the
1005 // switch-case in ParseOpData because this function is used as part of the
1006 // selective registration for the OpResolver implementation in micro.
ParseBatchToSpaceNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1007 TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
1008                                  BuiltinDataAllocator*, void**) {
1009   return kTfLiteOk;
1010 }
1011 
1012 // We have this parse function instead of directly returning kTfLiteOk from the
1013 // switch-case in ParseOpData because this function is used as part of the
1014 // selective registration for the OpResolver implementation in micro.
ParseCast(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1015 TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
1016                        BuiltinDataAllocator* allocator, void** builtin_data) {
1017   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1018 
1019   SafeBuiltinDataAllocator safe_allocator(allocator);
1020   auto params = safe_allocator.Allocate<TfLiteCastParams>();
1021   TF_LITE_ENSURE(error_reporter, params != nullptr);
1022   if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
1023     TF_LITE_ENSURE_STATUS(ConvertTensorType(
1024         schema_params->in_data_type(), &params->in_data_type, error_reporter));
1025     TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
1026                                             &params->out_data_type,
1027                                             error_reporter));
1028   }
1029   *builtin_data = params.release();
1030   return kTfLiteOk;
1031 }
1032 
1033 // We have this parse function instead of directly returning kTfLiteOk from the
1034 // switch-case in ParseOpData because this function is used as part of the
1035 // selective registration for the OpResolver implementation in micro.
ParseCeil(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1036 TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1037                        void**) {
1038   return kTfLiteOk;
1039 }
1040 
ParseConcatenation(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1041 TfLiteStatus ParseConcatenation(const Operator* op,
1042                                 ErrorReporter* error_reporter,
1043                                 BuiltinDataAllocator* allocator,
1044                                 void** builtin_data) {
1045   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1046 
1047   SafeBuiltinDataAllocator safe_allocator(allocator);
1048   std::unique_ptr<TfLiteConcatenationParams,
1049                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1050       params = safe_allocator.Allocate<TfLiteConcatenationParams>();
1051   TF_LITE_ENSURE(error_reporter, params != nullptr);
1052 
1053   const ConcatenationOptions* schema_params =
1054       op->builtin_options_as_ConcatenationOptions();
1055 
1056   if (schema_params != nullptr) {
1057     params->activation =
1058         ConvertActivation(schema_params->fused_activation_function());
1059     params->axis = schema_params->axis();
1060   } else {
1061     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1062     // reasonable defaults in the params struct. We are not doing so until we
1063     // better undertand the ramifications of changing the legacy behavior.
1064   }
1065 
1066   *builtin_data = params.release();
1067   return kTfLiteOk;
1068 }
1069 
ParseConv2D(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1070 TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
1071                          BuiltinDataAllocator* allocator, void** builtin_data) {
1072   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1073 
1074   SafeBuiltinDataAllocator safe_allocator(allocator);
1075   std::unique_ptr<TfLiteConvParams,
1076                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1077       params = safe_allocator.Allocate<TfLiteConvParams>();
1078   TF_LITE_ENSURE(error_reporter, params != nullptr);
1079 
1080   const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
1081 
1082   if (schema_params != nullptr) {
1083     params->padding = ConvertPadding(schema_params->padding());
1084     params->stride_width = schema_params->stride_w();
1085     params->stride_height = schema_params->stride_h();
1086     params->activation =
1087         ConvertActivation(schema_params->fused_activation_function());
1088 
1089     params->dilation_width_factor = schema_params->dilation_w_factor();
1090     params->dilation_height_factor = schema_params->dilation_h_factor();
1091   } else {
1092     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1093     // reasonable defaults in the params struct. We are not doing so until we
1094     // better undertand the ramifications of changing the legacy behavior.
1095   }
1096 
1097   *builtin_data = params.release();
1098   return kTfLiteOk;
1099 }
1100 
1101 // We have this parse function instead of directly returning kTfLiteOk from the
1102 // switch-case in ParseOpData because this function is used as part of the
1103 // selective registration for the OpResolver implementation in micro.
ParseCos(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1104 TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1105                       void**) {
1106   return kTfLiteOk;
1107 }
1108 
ParseDepthToSpace(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1109 TfLiteStatus ParseDepthToSpace(const Operator* op,
1110                                ErrorReporter* error_reporter,
1111                                BuiltinDataAllocator* allocator,
1112                                void** builtin_data) {
1113   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1114 
1115   SafeBuiltinDataAllocator safe_allocator(allocator);
1116   std::unique_ptr<TfLiteDepthToSpaceParams,
1117                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1118       params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
1119   TF_LITE_ENSURE(error_reporter, params != nullptr);
1120 
1121   const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions();
1122   if (schema_params != nullptr) {
1123     params->block_size = schema_params->block_size();
1124   } else {
1125     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1126     // reasonable defaults in the params struct. We are not doing so until we
1127     // better undertand the ramifications of changing the legacy behavior.
1128   }
1129 
1130   *builtin_data = params.release();
1131   return kTfLiteOk;
1132 }
1133 
ParseDepthwiseConv2D(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1134 TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
1135                                   ErrorReporter* error_reporter,
1136                                   BuiltinDataAllocator* allocator,
1137                                   void** builtin_data) {
1138   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1139 
1140   SafeBuiltinDataAllocator safe_allocator(allocator);
1141 
1142   std::unique_ptr<TfLiteDepthwiseConvParams,
1143                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1144       params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
1145   TF_LITE_ENSURE(error_reporter, params != nullptr);
1146 
1147   const DepthwiseConv2DOptions* schema_params =
1148       op->builtin_options_as_DepthwiseConv2DOptions();
1149 
1150   if (schema_params != nullptr) {
1151     params->padding = ConvertPadding(schema_params->padding());
1152     params->stride_width = schema_params->stride_w();
1153     params->stride_height = schema_params->stride_h();
1154     params->depth_multiplier = schema_params->depth_multiplier();
1155     params->activation =
1156         ConvertActivation(schema_params->fused_activation_function());
1157 
1158     params->dilation_width_factor = schema_params->dilation_w_factor();
1159     params->dilation_height_factor = schema_params->dilation_h_factor();
1160   } else {
1161     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1162     // reasonable defaults in the params struct. We are not doing so until we
1163     // better undertand the ramifications of changing the legacy behavior.
1164   }
1165 
1166   *builtin_data = params.release();
1167   return kTfLiteOk;
1168 }
1169 
1170 // We have this parse function instead of directly returning kTfLiteOk from the
1171 // switch-case in ParseOpData because this function is used as part of the
1172 // selective registration for the OpResolver implementation in micro.
ParseDequantize(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1173 TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
1174                              BuiltinDataAllocator*, void**) {
1175   return kTfLiteOk;
1176 }
1177 
ParseDiv(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1178 TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
1179                       BuiltinDataAllocator* allocator, void** builtin_data) {
1180   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1181 
1182   SafeBuiltinDataAllocator safe_allocator(allocator);
1183   auto params = safe_allocator.Allocate<TfLiteDivParams>();
1184   TF_LITE_ENSURE(error_reporter, params != nullptr);
1185   if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
1186     params->activation =
1187         ConvertActivation(schema_params->fused_activation_function());
1188   }
1189   *builtin_data = params.release();
1190   return kTfLiteOk;
1191 }
1192 
1193 // We have this parse function instead of directly returning kTfLiteOk from the
1194 // switch-case in ParseOpData because this function is used as part of the
1195 // selective registration for the OpResolver implementation in micro.
ParseElu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1196 TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1197                       void**) {
1198   return kTfLiteOk;
1199 }
1200 
1201 // We have this parse function instead of directly returning kTfLiteOk from the
1202 // switch-case in ParseOpData because this function is used as part of the
1203 // selective registration for the OpResolver implementation in micro.
ParseEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1204 TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1205                         void**) {
1206   return kTfLiteOk;
1207 }
1208 
1209 // We have this parse function instead of directly returning kTfLiteOk from the
1210 // switch-case in ParseOpData because this function is used as part of the
1211 // selective registration for the OpResolver implementation in micro.
ParseExp(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1212 TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1213                       void**) {
1214   return kTfLiteOk;
1215 }
1216 
1217 // We have this parse function instead of directly returning kTfLiteOk from the
1218 // switch-case in ParseOpData because this function is used as part of the
1219 // selective registration for the OpResolver implementation in micro.
ParseExpandDims(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1220 TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*,
1221                              BuiltinDataAllocator*, void**) {
1222   return kTfLiteOk;
1223 }
1224 
1225 // We have this parse function instead of directly returning kTfLiteOk from the
1226 // switch-case in ParseOpData because this function is used as part of the
1227 // selective registration for the OpResolver implementation in micro.
ParseFill(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1228 TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1229                        void**) {
1230   return kTfLiteOk;
1231 }
1232 
1233 // We have this parse function instead of directly returning kTfLiteOk from the
1234 // switch-case in ParseOpData because this function is used as part of the
1235 // selective registration for the OpResolver implementation in micro.
ParseFloor(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1236 TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1237                         void**) {
1238   return kTfLiteOk;
1239 }
1240 
1241 // We have this parse function instead of directly returning kTfLiteOk from the
1242 // switch-case in ParseOpData because this function is used as part of the
1243 // selective registration for the OpResolver implementation in micro.
ParseFloorDiv(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1244 TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*,
1245                            BuiltinDataAllocator*, void**) {
1246   return kTfLiteOk;
1247 }
1248 
1249 // We have this parse function instead of directly returning kTfLiteOk from the
1250 // switch-case in ParseOpData because this function is used as part of the
1251 // selective registration for the OpResolver implementation in micro.
ParseFloorMod(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1252 TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*,
1253                            BuiltinDataAllocator*, void**) {
1254   return kTfLiteOk;
1255 }
1256 
ParseFullyConnected(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1257 TfLiteStatus ParseFullyConnected(const Operator* op,
1258                                  ErrorReporter* error_reporter,
1259                                  BuiltinDataAllocator* allocator,
1260                                  void** builtin_data) {
1261   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1262 
1263   SafeBuiltinDataAllocator safe_allocator(allocator);
1264 
1265   std::unique_ptr<TfLiteFullyConnectedParams,
1266                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1267       params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
1268   TF_LITE_ENSURE(error_reporter, params != nullptr);
1269 
1270   const FullyConnectedOptions* schema_params =
1271       op->builtin_options_as_FullyConnectedOptions();
1272 
1273   if (schema_params != nullptr) {
1274     params->activation =
1275         ConvertActivation(schema_params->fused_activation_function());
1276     params->keep_num_dims = schema_params->keep_num_dims();
1277     params->asymmetric_quantize_inputs =
1278         schema_params->asymmetric_quantize_inputs();
1279 
1280     switch (schema_params->weights_format()) {
1281       case FullyConnectedOptionsWeightsFormat_DEFAULT:
1282         params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
1283         break;
1284       case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
1285         params->weights_format =
1286             kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
1287         break;
1288       default:
1289         TF_LITE_REPORT_ERROR(error_reporter,
1290                              "Unhandled fully-connected weights format.");
1291         return kTfLiteError;
1292     }
1293   } else {
1294     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1295     // reasonable defaults in the params struct. We are not doing so until we
1296     // better undertand the ramifications of changing the legacy behavior.
1297   }
1298 
1299   *builtin_data = params.release();
1300   return kTfLiteOk;
1301 }
1302 
1303 // We have this parse function instead of directly returning kTfLiteOk from the
1304 // switch-case in ParseOpData because this function is used as part of the
1305 // selective registration for the OpResolver implementation in micro.
ParseGather(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1306 TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
1307                          BuiltinDataAllocator* allocator, void** builtin_data) {
1308   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1309 
1310   SafeBuiltinDataAllocator safe_allocator(allocator);
1311   auto params = safe_allocator.Allocate<TfLiteGatherParams>();
1312   TF_LITE_ENSURE(error_reporter, params != nullptr);
1313   params->axis = 0;
1314   if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
1315     params->axis = gather_params->axis();
1316   }
1317 
1318   *builtin_data = params.release();
1319   return kTfLiteOk;
1320 }
1321 
1322 // We have this parse function instead of directly returning kTfLiteOk from the
1323 // switch-case in ParseOpData because this function is used as part of the
1324 // selective registration for the OpResolver implementation in micro.
ParseGatherNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1325 TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*,
1326                            BuiltinDataAllocator*, void**) {
1327   return kTfLiteOk;
1328 }
1329 
1330 // We have this parse function instead of directly returning kTfLiteOk from the
1331 // switch-case in ParseOpData because this function is used as part of the
1332 // selective registration for the OpResolver implementation in micro.
ParseGreater(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1333 TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
1334                           BuiltinDataAllocator*, void**) {
1335   return kTfLiteOk;
1336 }
1337 
1338 // We have this parse function instead of directly returning kTfLiteOk from the
1339 // switch-case in ParseOpData because this function is used as part of the
1340 // selective registration for the OpResolver implementation in micro.
ParseGreaterEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1341 TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
1342                                BuiltinDataAllocator*, void**) {
1343   return kTfLiteOk;
1344 }
1345 
1346 // We have this parse function instead of directly returning kTfLiteOk from the
1347 // switch-case in ParseOpData because this function is used as part of the
1348 // selective registration for the OpResolver implementation in micro.
ParseHardSwish(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1349 TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
1350                             BuiltinDataAllocator*, void**) {
1351   return kTfLiteOk;
1352 }
1353 
ParseL2Normalization(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1354 TfLiteStatus ParseL2Normalization(const Operator* op,
1355                                   ErrorReporter* error_reporter,
1356                                   BuiltinDataAllocator* allocator,
1357                                   void** builtin_data) {
1358   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1359 
1360   SafeBuiltinDataAllocator safe_allocator(allocator);
1361   std::unique_ptr<TfLiteL2NormParams,
1362                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1363       params = safe_allocator.Allocate<TfLiteL2NormParams>();
1364   TF_LITE_ENSURE(error_reporter, params != nullptr);
1365 
1366   const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
1367 
1368   if (schema_params != nullptr) {
1369     params->activation =
1370         ConvertActivation(schema_params->fused_activation_function());
1371   } else {
1372     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1373     // reasonable defaults in the params struct. We are not doing so until we
1374     // better undertand the ramifications of changing the legacy behavior.
1375   }
1376 
1377   *builtin_data = params.release();
1378   return kTfLiteOk;
1379 }
1380 
ParseLeakyRelu(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1381 TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
1382                             BuiltinDataAllocator* allocator,
1383                             void** builtin_data) {
1384   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1385 
1386   SafeBuiltinDataAllocator safe_allocator(allocator);
1387   auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
1388   TF_LITE_ENSURE(error_reporter, params != nullptr);
1389   if (const auto* leaky_relu_params =
1390           op->builtin_options_as_LeakyReluOptions()) {
1391     params->alpha = leaky_relu_params->alpha();
1392   }
1393   *builtin_data = params.release();
1394   return kTfLiteOk;
1395 }
1396 
1397 // We have this parse function instead of directly returning kTfLiteOk from the
1398 // switch-case in ParseOpData because this function is used as part of the
1399 // selective registration for the OpResolver implementation in micro.
ParseLess(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1400 TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1401                        void**) {
1402   return kTfLiteOk;
1403 }
1404 
1405 // We have this parse function instead of directly returning kTfLiteOk from the
1406 // switch-case in ParseOpData because this function is used as part of the
1407 // selective registration for the OpResolver implementation in micro.
ParseLessEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1408 TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
1409                             BuiltinDataAllocator*, void**) {
1410   return kTfLiteOk;
1411 }
1412 
1413 // We have this parse function instead of directly returning kTfLiteOk from the
1414 // switch-case in ParseOpData because this function is used as part of the
1415 // selective registration for the OpResolver implementation in micro.
ParseLog(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1416 TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1417                       void**) {
1418   return kTfLiteOk;
1419 }
1420 
1421 // We have this parse function instead of directly returning kTfLiteOk from the
1422 // switch-case in ParseOpData because this function is used as part of the
1423 // selective registration for the OpResolver implementation in micro.
ParseLogicalAnd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1424 TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
1425                              BuiltinDataAllocator*, void**) {
1426   return kTfLiteOk;
1427 }
1428 
1429 // We have this parse function instead of directly returning kTfLiteOk from the
1430 // switch-case in ParseOpData because this function is used as part of the
1431 // selective registration for the OpResolver implementation in micro.
ParseLogicalNot(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1432 TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
1433                              BuiltinDataAllocator*, void**) {
1434   return kTfLiteOk;
1435 }
1436 
1437 // We have this parse function instead of directly returning kTfLiteOk from the
1438 // switch-case in ParseOpData because this function is used as part of the
1439 // selective registration for the OpResolver implementation in micro.
ParseLogicalOr(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1440 TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
1441                             BuiltinDataAllocator*, void**) {
1442   return kTfLiteOk;
1443 }
1444 
1445 // We have this parse function instead of directly returning kTfLiteOk from the
1446 // switch-case in ParseOpData because this function is used as part of the
1447 // selective registration for the OpResolver implementation in micro.
ParseLogistic(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1448 TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
1449                            BuiltinDataAllocator*, void**) {
1450   return kTfLiteOk;
1451 }
1452 
1453 // We have this parse function instead of directly returning kTfLiteOk from the
1454 // switch-case in ParseOpData because this function is used as part of the
1455 // selective registration for the OpResolver implementation in micro.
ParseMaximum(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1456 TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
1457                           BuiltinDataAllocator*, void**) {
1458   return kTfLiteOk;
1459 }
1460 
1461 // We have this parse function instead of directly returning kTfLiteOk from the
1462 // switch-case in ParseOpData because this function is used as part of the
1463 // selective registration for the OpResolver implementation in micro.
ParseMinimum(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1464 TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
1465                           BuiltinDataAllocator*, void**) {
1466   return kTfLiteOk;
1467 }
1468 
ParseMul(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1469 TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
1470                       BuiltinDataAllocator* allocator, void** builtin_data) {
1471   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1472 
1473   SafeBuiltinDataAllocator safe_allocator(allocator);
1474   std::unique_ptr<TfLiteMulParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
1475       params = safe_allocator.Allocate<TfLiteMulParams>();
1476   TF_LITE_ENSURE(error_reporter, params != nullptr);
1477 
1478   const MulOptions* schema_params = op->builtin_options_as_MulOptions();
1479 
1480   if (schema_params != nullptr) {
1481     params->activation =
1482         ConvertActivation(schema_params->fused_activation_function());
1483   } else {
1484     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1485     // reasonable defaults in the params struct. We are not doing so until we
1486     // better undertand the ramifications of changing the legacy behavior.
1487   }
1488 
1489   *builtin_data = params.release();
1490   return kTfLiteOk;
1491 }
1492 
1493 // We have this parse function instead of directly returning kTfLiteOk from the
1494 // switch-case in ParseOpData because this function is used as part of the
1495 // selective registration for the OpResolver implementation in micro.
ParseNeg(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1496 TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1497                       void**) {
1498   return kTfLiteOk;
1499 }
1500 
1501 // We have this parse function instead of directly returning kTfLiteOk from the
1502 // switch-case in ParseOpData because this function is used as part of the
1503 // selective registration for the OpResolver implementation in micro.
ParseNotEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1504 TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
1505                            BuiltinDataAllocator*, void**) {
1506   return kTfLiteOk;
1507 }
1508 
ParsePack(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1509 TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
1510                        BuiltinDataAllocator* allocator, void** builtin_data) {
1511   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1512 
1513   SafeBuiltinDataAllocator safe_allocator(allocator);
1514   std::unique_ptr<TfLitePackParams,
1515                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1516       params = safe_allocator.Allocate<TfLitePackParams>();
1517   TF_LITE_ENSURE(error_reporter, params != nullptr);
1518 
1519   const PackOptions* schema_params = op->builtin_options_as_PackOptions();
1520 
1521   if (schema_params != nullptr) {
1522     params->values_count = schema_params->values_count();
1523     params->axis = schema_params->axis();
1524   } else {
1525     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1526     // reasonable defaults in the params struct. We are not doing so until we
1527     // better undertand the ramifications of changing the legacy behavior.
1528   }
1529 
1530   *builtin_data = params.release();
1531   return kTfLiteOk;
1532 }
1533 
1534 // We have this parse function instead of directly returning kTfLiteOk from the
1535 // switch-case in ParseOpData because this function is used as part of the
1536 // selective registration for the OpResolver implementation in micro.
ParsePad(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1537 TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1538                       void**) {
1539   return kTfLiteOk;
1540 }
1541 
1542 // We have this parse function instead of directly returning kTfLiteOk from the
1543 // switch-case in ParseOpData because this function is used as part of the
1544 // selective registration for the OpResolver implementation in micro.
ParsePadV2(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1545 TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1546                         void**) {
1547   return kTfLiteOk;
1548 }
1549 
ParsePool(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1550 TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
1551                        BuiltinDataAllocator* allocator, void** builtin_data) {
1552   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1553 
1554   SafeBuiltinDataAllocator safe_allocator(allocator);
1555   std::unique_ptr<TfLitePoolParams,
1556                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1557       params = safe_allocator.Allocate<TfLitePoolParams>();
1558   TF_LITE_ENSURE(error_reporter, params != nullptr);
1559 
1560   const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
1561 
1562   if (schema_params != nullptr) {
1563     params->padding = ConvertPadding(schema_params->padding());
1564     params->stride_width = schema_params->stride_w();
1565     params->stride_height = schema_params->stride_h();
1566     params->filter_width = schema_params->filter_width();
1567     params->filter_height = schema_params->filter_height();
1568     params->activation =
1569         ConvertActivation(schema_params->fused_activation_function());
1570   } else {
1571     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1572     // reasonable defaults in the params struct. We are not doing so until we
1573     // better undertand the ramifications of changing the legacy behavior.
1574   }
1575 
1576   *builtin_data = params.release();
1577   return kTfLiteOk;
1578 }
1579 
1580 // We have this parse function instead of directly returning kTfLiteOk from the
1581 // switch-case in ParseOpData because this function is used as part of the
1582 // selective registration for the OpResolver implementation in micro.
ParsePow(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1583 TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1584                       void**) {
1585   return kTfLiteOk;
1586 }
1587 
1588 // We have this parse function instead of directly returning kTfLiteOk from the
1589 // switch-case in ParseOpData because this function is used as part of the
1590 // selective registration for the OpResolver implementation in micro.
ParsePrelu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1591 TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1592                         void**) {
1593   return kTfLiteOk;
1594 }
1595 
1596 // We have this parse function instead of directly returning kTfLiteOk from the
1597 // switch-case in ParseOpData because this function is used as part of the
1598 // selective registration for the OpResolver implementation in micro.
ParseQuantize(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1599 TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
1600                            BuiltinDataAllocator*, void**) {
1601   return kTfLiteOk;
1602 }
1603 
ParseReducer(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1604 TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
1605                           BuiltinDataAllocator* allocator,
1606                           void** builtin_data) {
1607   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1608 
1609   SafeBuiltinDataAllocator safe_allocator(allocator);
1610 
1611   std::unique_ptr<TfLiteReducerParams,
1612                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1613       params = safe_allocator.Allocate<TfLiteReducerParams>();
1614   TF_LITE_ENSURE(error_reporter, params != nullptr);
1615 
1616   const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
1617 
1618   if (schema_params != nullptr) {
1619     params->keep_dims = schema_params->keep_dims();
1620   } else {
1621     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1622     // reasonable defaults in the params struct. We are not doing so until we
1623     // better undertand the ramifications of changing the legacy behavior.
1624   }
1625 
1626   *builtin_data = params.release();
1627   return kTfLiteOk;
1628 }
1629 
1630 // We have this parse function instead of directly returning kTfLiteOk from the
1631 // switch-case in ParseOpData because this function is used as part of the
1632 // selective registration for the OpResolver implementation in micro.
ParseRelu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1633 TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1634                        void**) {
1635   return kTfLiteOk;
1636 }
1637 
1638 // We have this parse function instead of directly returning kTfLiteOk from the
1639 // switch-case in ParseOpData because this function is used as part of the
1640 // selective registration for the OpResolver implementation in micro.
ParseRelu6(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1641 TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1642                         void**) {
1643   return kTfLiteOk;
1644 }
1645 
ParseReshape(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1646 TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
1647                           BuiltinDataAllocator* allocator,
1648                           void** builtin_data) {
1649   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1650 
1651   SafeBuiltinDataAllocator safe_allocator(allocator);
1652 
1653   std::unique_ptr<TfLiteReshapeParams,
1654                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1655       params = safe_allocator.Allocate<TfLiteReshapeParams>();
1656   TF_LITE_ENSURE(error_reporter, params != nullptr);
1657 
1658   const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
1659 
1660   if (schema_params != nullptr) {
1661     const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
1662     if (new_shape != nullptr) {
1663       TF_LITE_ENSURE_STATUS(
1664           FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
1665                                      params->shape, error_reporter, "reshape"));
1666       params->num_dimensions = new_shape->size();
1667     } else {
1668       // TODO(b/157480169) TODO(b/147203660): We should either return
1669       // kTfLiteError or fill in some reasonable defaults in the params struct.
1670       // We are not doing so until we better undertand the ramifications of
1671       // changing the legacy behavior.
1672     }
1673   } else {
1674     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1675     // reasonable defaults in the params struct. We are not doing so until we
1676     // better undertand the ramifications of changing the legacy behavior.
1677   }
1678 
1679   *builtin_data = params.release();
1680   return kTfLiteOk;
1681 }
1682 
ParseResizeBilinear(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1683 TfLiteStatus ParseResizeBilinear(const Operator* op,
1684                                  ErrorReporter* error_reporter,
1685                                  BuiltinDataAllocator* allocator,
1686                                  void** builtin_data) {
1687   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1688 
1689   SafeBuiltinDataAllocator safe_allocator(allocator);
1690   std::unique_ptr<TfLiteResizeBilinearParams,
1691                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1692       params = safe_allocator.Allocate<TfLiteResizeBilinearParams>();
1693   TF_LITE_ENSURE(error_reporter, params != nullptr);
1694 
1695   const ResizeBilinearOptions* schema_params =
1696       op->builtin_options_as_ResizeBilinearOptions();
1697 
1698   if (schema_params != nullptr) {
1699     params->align_corners = schema_params->align_corners();
1700     params->half_pixel_centers = schema_params->half_pixel_centers();
1701   } else {
1702     params->align_corners = false;
1703     params->half_pixel_centers = false;
1704   }
1705 
1706   *builtin_data = params.release();
1707   return kTfLiteOk;
1708 }
1709 
ParseResizeNearestNeighbor(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1710 TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
1711                                         ErrorReporter* error_reporter,
1712                                         BuiltinDataAllocator* allocator,
1713                                         void** builtin_data) {
1714   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1715 
1716   SafeBuiltinDataAllocator safe_allocator(allocator);
1717   std::unique_ptr<TfLiteResizeNearestNeighborParams,
1718                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1719       params = safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
1720   TF_LITE_ENSURE(error_reporter, params != nullptr);
1721 
1722   const ResizeNearestNeighborOptions* schema_params =
1723       op->builtin_options_as_ResizeNearestNeighborOptions();
1724 
1725   if (schema_params != nullptr) {
1726     params->align_corners = schema_params->align_corners();
1727     params->half_pixel_centers = schema_params->half_pixel_centers();
1728   } else {
1729     params->align_corners = false;
1730     params->half_pixel_centers = false;
1731   }
1732 
1733   *builtin_data = params.release();
1734   return kTfLiteOk;
1735 }
1736 
1737 // We have this parse function instead of directly returning kTfLiteOk from the
1738 // switch-case in ParseOpData because this function is used as part of the
1739 // selective registration for the OpResolver implementation in micro.
ParseRound(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1740 TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1741                         void**) {
1742   return kTfLiteOk;
1743 }
1744 
1745 // We have this parse function instead of directly returning kTfLiteOk from the
1746 // switch-case in ParseOpData because this function is used as part of the
1747 // selective registration for the OpResolver implementation in micro.
ParseRsqrt(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1748 TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1749                         void**) {
1750   return kTfLiteOk;
1751 }
1752 
ParseShape(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1753 TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
1754                         BuiltinDataAllocator* allocator, void** builtin_data) {
1755   SafeBuiltinDataAllocator safe_allocator(allocator);
1756   std::unique_ptr<TfLiteShapeParams,
1757                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1758       params = safe_allocator.Allocate<TfLiteShapeParams>();
1759   TF_LITE_ENSURE(error_reporter, params != nullptr);
1760 
1761   const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions();
1762 
1763   if (schema_params != nullptr) {
1764     TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(),
1765                                             &params->out_type, error_reporter));
1766   } else {
1767     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1768     // reasonable defaults in the params struct. We are not doing so until we
1769     // better undertand the ramifications of changing the legacy behavior.
1770   }
1771 
1772   *builtin_data = params.release();
1773   return kTfLiteOk;
1774 }
1775 
1776 // We have this parse function instead of directly returning kTfLiteOk from the
1777 // switch-case in ParseOpData because this function is used as part of the
1778 // selective registration for the OpResolver implementation in micro.
ParseSin(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1779 TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1780                       void**) {
1781   return kTfLiteOk;
1782 }
1783 
ParseSoftmax(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1784 TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
1785                           BuiltinDataAllocator* allocator,
1786                           void** builtin_data) {
1787   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1788 
1789   SafeBuiltinDataAllocator safe_allocator(allocator);
1790   std::unique_ptr<TfLiteSoftmaxParams,
1791                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1792       params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
1793   TF_LITE_ENSURE(error_reporter, params != nullptr);
1794 
1795   const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
1796 
1797   if (schema_params != nullptr) {
1798     params->beta = schema_params->beta();
1799   } else {
1800     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1801     // reasonable defaults in the params struct. We are not doing so until we
1802     // better undertand the ramifications of changing the legacy behavior.
1803   }
1804 
1805   *builtin_data = params.release();
1806   return kTfLiteOk;
1807 }
1808 
1809 // We have this parse function instead of directly returning kTfLiteOk from the
1810 // switch-case in ParseOpData because this function is used as part of the
1811 // selective registration for the OpResolver implementation in micro.
ParseSpaceToBatchNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1812 TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*,
1813                                  BuiltinDataAllocator*, void**) {
1814   return kTfLiteOk;
1815 }
1816 
ParseSpaceToDepth(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1817 TfLiteStatus ParseSpaceToDepth(const Operator* op,
1818                                ErrorReporter* error_reporter,
1819                                BuiltinDataAllocator* allocator,
1820                                void** builtin_data) {
1821   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1822 
1823   SafeBuiltinDataAllocator safe_allocator(allocator);
1824   std::unique_ptr<TfLiteSpaceToDepthParams,
1825                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1826       params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
1827   TF_LITE_ENSURE(error_reporter, params != nullptr);
1828 
1829   const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions();
1830   if (schema_params != nullptr) {
1831     params->block_size = schema_params->block_size();
1832   } else {
1833     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1834     // reasonable defaults in the params struct. We are not doing so until we
1835     // better undertand the ramifications of changing the legacy behavior.
1836   }
1837 
1838   *builtin_data = params.release();
1839   return kTfLiteOk;
1840 }
1841 
ParseSplit(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1842 TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
1843                         BuiltinDataAllocator* allocator, void** builtin_data) {
1844   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1845 
1846   SafeBuiltinDataAllocator safe_allocator(allocator);
1847   std::unique_ptr<TfLiteSplitParams,
1848                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1849       params = safe_allocator.Allocate<TfLiteSplitParams>();
1850   TF_LITE_ENSURE(error_reporter, params != nullptr);
1851 
1852   const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
1853 
1854   if (schema_params != nullptr) {
1855     params->num_splits = schema_params->num_splits();
1856   } else {
1857     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1858     // reasonable defaults in the params struct. We are not doing so until we
1859     // better undertand the ramifications of changing the legacy behavior.
1860   }
1861 
1862   *builtin_data = params.release();
1863   return kTfLiteOk;
1864 }
1865 
ParseSplitV(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1866 TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
1867                          BuiltinDataAllocator* allocator, void** builtin_data) {
1868   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1869   SafeBuiltinDataAllocator safe_allocator(allocator);
1870 
1871   std::unique_ptr<TfLiteSplitVParams,
1872                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1873       params = safe_allocator.Allocate<TfLiteSplitVParams>();
1874   TF_LITE_ENSURE(error_reporter, params != nullptr);
1875 
1876   const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions();
1877 
1878   if (schema_params != nullptr) {
1879     params->num_splits = schema_params->num_splits();
1880   } else {
1881     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1882     // reasonable defaults in the params struct. We are not doing so until we
1883     // better undertand the ramifications of changing the legacy behavior.
1884   }
1885 
1886   *builtin_data = params.release();
1887   return kTfLiteOk;
1888 }
1889 
1890 // We have this parse function instead of directly returning kTfLiteOk from the
1891 // switch-case in ParseOpData because this function is used as part of the
1892 // selective registration for the OpResolver implementation in micro.
ParseSqrt(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1893 TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1894                        void**) {
1895   return kTfLiteOk;
1896 }
1897 
1898 // We have this parse function instead of directly returning kTfLiteOk from the
1899 // switch-case in ParseOpData because this function is used as part of the
1900 // selective registration for the OpResolver implementation in micro.
ParseSquare(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1901 TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1902                          void**) {
1903   return kTfLiteOk;
1904 }
1905 
ParseStridedSlice(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1906 TfLiteStatus ParseStridedSlice(const Operator* op,
1907                                ErrorReporter* error_reporter,
1908                                BuiltinDataAllocator* allocator,
1909                                void** builtin_data) {
1910   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1911 
1912   SafeBuiltinDataAllocator safe_allocator(allocator);
1913   std::unique_ptr<TfLiteStridedSliceParams,
1914                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1915       params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
1916   TF_LITE_ENSURE(error_reporter, params != nullptr);
1917 
1918   const StridedSliceOptions* schema_params =
1919       op->builtin_options_as_StridedSliceOptions();
1920 
1921   if (schema_params != nullptr) {
1922     params->begin_mask = schema_params->begin_mask();
1923     params->end_mask = schema_params->end_mask();
1924     params->ellipsis_mask = schema_params->ellipsis_mask();
1925     params->new_axis_mask = schema_params->new_axis_mask();
1926     params->shrink_axis_mask = schema_params->shrink_axis_mask();
1927   } else {
1928     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1929     // reasonable defaults in the params struct. We are not doing so until we
1930     // better undertand the ramifications of changing the legacy behavior.
1931   }
1932 
1933   *builtin_data = params.release();
1934   return kTfLiteOk;
1935 }
1936 
ParseSub(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1937 TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
1938                       BuiltinDataAllocator* allocator, void** builtin_data) {
1939   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1940 
1941   SafeBuiltinDataAllocator safe_allocator(allocator);
1942   std::unique_ptr<TfLiteSubParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
1943       params = safe_allocator.Allocate<TfLiteSubParams>();
1944   TF_LITE_ENSURE(error_reporter, params != nullptr);
1945 
1946   const SubOptions* schema_params = op->builtin_options_as_SubOptions();
1947 
1948   if (schema_params != nullptr) {
1949     params->activation =
1950         ConvertActivation(schema_params->fused_activation_function());
1951     params->pot_scale_int16 = schema_params->pot_scale_int16();
1952   } else {
1953     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1954     // reasonable defaults in the params struct. We are not doing so until we
1955     // better undertand the ramifications of changing the legacy behavior.
1956   }
1957 
1958   *builtin_data = params.release();
1959   return kTfLiteOk;
1960 }
1961 
ParseSvdf(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1962 TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
1963                        BuiltinDataAllocator* allocator, void** builtin_data) {
1964   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1965 
1966   SafeBuiltinDataAllocator safe_allocator(allocator);
1967   std::unique_ptr<TfLiteSVDFParams,
1968                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1969       params = safe_allocator.Allocate<TfLiteSVDFParams>();
1970   TF_LITE_ENSURE(error_reporter, params != nullptr);
1971 
1972   const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
1973   if (schema_params != nullptr) {
1974     params->rank = schema_params->rank();
1975     params->activation =
1976         ConvertActivation(schema_params->fused_activation_function());
1977     params->asymmetric_quantize_inputs =
1978         schema_params->asymmetric_quantize_inputs();
1979   } else {
1980     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1981     // reasonable defaults in the params struct. We are not doing so until we
1982     // better undertand the ramifications of changing the legacy behavior.
1983   }
1984 
1985   *builtin_data = params.release();
1986   return kTfLiteOk;
1987 }
1988 
1989 // We have this parse function instead of directly returning kTfLiteOk from the
1990 // switch-case in ParseOpData because this function is used as part of the
1991 // selective registration for the OpResolver implementation in micro.
ParseTanh(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1992 TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1993                        void**) {
1994   return kTfLiteOk;
1995 }
1996 //
1997 // We have this parse function instead of directly returning kTfLiteOk from the
1998 // switch-case in ParseOpData because this function is used as part of the
1999 // selective registration for the OpResolver implementation in micro.
ParseTranspose(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2000 TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*,
2001                             BuiltinDataAllocator*, void**) {
2002   return kTfLiteOk;
2003 }
2004 
ParseTransposeConv(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2005 TfLiteStatus ParseTransposeConv(const Operator* op,
2006                                 ErrorReporter* error_reporter,
2007                                 BuiltinDataAllocator* allocator,
2008                                 void** builtin_data) {
2009   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2010 
2011   SafeBuiltinDataAllocator safe_allocator(allocator);
2012   std::unique_ptr<TfLiteTransposeConvParams,
2013                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2014       params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
2015   TF_LITE_ENSURE(error_reporter, params != nullptr);
2016   const TransposeConvOptions* transpose_conv_params =
2017       op->builtin_options_as_TransposeConvOptions();
2018   if (transpose_conv_params != nullptr) {
2019     params->padding = ConvertPadding(transpose_conv_params->padding());
2020     params->stride_width = transpose_conv_params->stride_w();
2021     params->stride_height = transpose_conv_params->stride_h();
2022   } else {
2023     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2024     // reasonable defaults in the params struct. We are not doing so until we
2025     // better undertand the ramifications of changing the legacy behavior.
2026   }
2027   *builtin_data = params.release();
2028   return kTfLiteOk;
2029 }
2030 
ParseUnpack(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2031 TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
2032                          BuiltinDataAllocator* allocator, void** builtin_data) {
2033   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2034 
2035   SafeBuiltinDataAllocator safe_allocator(allocator);
2036   std::unique_ptr<TfLiteUnpackParams,
2037                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2038       params = safe_allocator.Allocate<TfLiteUnpackParams>();
2039   TF_LITE_ENSURE(error_reporter, params != nullptr);
2040 
2041   const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
2042 
2043   if (schema_params != nullptr) {
2044     params->num = schema_params->num();
2045     params->axis = schema_params->axis();
2046   } else {
2047     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2048     // reasonable defaults in the params struct. We are not doing so until we
2049     // better undertand the ramifications of changing the legacy behavior.
2050   }
2051 
2052   *builtin_data = params.release();
2053   return kTfLiteOk;
2054 }
2055 
2056 // We have this parse function instead of directly returning kTfLiteOk from the
2057 // switch-case in ParseOpData because this function is used as part of the
2058 // selective registration for the OpResolver implementation in micro.
ParseZerosLike(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2059 TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*,
2060                             BuiltinDataAllocator*, void**) {
2061   return kTfLiteOk;
2062 }
2063 
ParseOpData(const Operator * op,BuiltinOperator op_type,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2064 TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
2065                          ErrorReporter* error_reporter,
2066                          BuiltinDataAllocator* allocator, void** builtin_data) {
2067 // TODO(b/145762662): It would be preferable to have the build graph for TF Lite
2068 // Micro not have the ParseOpData function at all. This would require splitting
2069 // the current file into two separate files, one of which defines the
2070 // ParseOpData function and the other that defines the operator specific parse
2071 // functions (e.g. ParseAdd).
2072 //
2073 // Such a split was attempted but was not worth the effort at the time because
2074 // of the following reasons:
2075 //  * We could either duplicate the functions and the SafeBuiltinDataAllocator
2076 //    class in the anonymous namespace of this file, or attempt to make a common
2077 //    library with these helper functions and class.
2078 //  * Making a common library with a separate build target was not feasible as
2079 //    it introduced circular dependencies due to the ErrorReporter and a common
2080 //    .cc and .h within the same api build target the also cause circular
2081 //    dependencies due to the  BuiltinDataAllocator class.
2082 //  * If all the builtin operators were to have their own parse functions, or we
2083 //    were ok with some amount of code duplication, then this split of the .cc
2084 //    files would be a lot more feasible.
2085 #ifdef TF_LITE_STATIC_MEMORY
2086   TF_LITE_REPORT_ERROR(
2087       error_reporter,
2088       "ParseOpData is unsupported on TfLiteMicro, please use the operator "
2089       "specific parse functions (e.g. ParseAdd etc.).\n");
2090   return kTfLiteError;
2091 #else
2092   return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
2093                            builtin_data);
2094 #endif
2095 }
2096 
2097 }  // namespace tflite
2098