1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_CONTRIB_LITE_KERNELS_KERNEL_UTIL_H_
16 #define TENSORFLOW_CONTRIB_LITE_KERNELS_KERNEL_UTIL_H_
17 
18 #include "tensorflow/contrib/lite/builtin_op_data.h"
19 #include "tensorflow/contrib/lite/context.h"
20 
21 namespace tflite {
22 
NumDimensions(const TfLiteTensor * t)23 inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; }
SizeOfDimension(const TfLiteTensor * t,int dim)24 inline int SizeOfDimension(const TfLiteTensor* t, int dim) {
25   return t->dims->data[dim];
26 }
GetInput(TfLiteContext * context,TfLiteNode * node,int index)27 inline TfLiteTensor* GetInput(TfLiteContext* context, TfLiteNode* node,
28                               int index) {
29   return &context->tensors[node->inputs->data[index]];
30 }
GetOutput(TfLiteContext * context,TfLiteNode * node,int index)31 inline TfLiteTensor* GetOutput(TfLiteContext* context, TfLiteNode* node,
32                                int index) {
33   return &context->tensors[node->outputs->data[index]];
34 }
NumInputs(const TfLiteNode * node)35 inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; }
NumOutputs(const TfLiteNode * node)36 inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; }
37 
NumElements(const TfLiteTensor * t)38 inline int64_t NumElements(const TfLiteTensor* t) {
39   int64_t count = 1;
40   for (int i = 0; i < NumDimensions(t); ++i) {
41     count *= SizeOfDimension(t, i);
42   }
43   return count;
44 }
45 
GetOptionalInputTensor(TfLiteContext * context,const TfLiteNode * node,int index)46 inline TfLiteTensor* GetOptionalInputTensor(TfLiteContext* context,
47                                             const TfLiteNode* node, int index) {
48   const bool use_tensor = node->inputs->data[index] != kOptionalTensor;
49   if (use_tensor) {
50     return &context->tensors[node->inputs->data[index]];
51   }
52   return nullptr;
53 }
54 
55 // Determines whether tensor is constant.
IsConstantTensor(TfLiteTensor * tensor)56 inline bool IsConstantTensor(TfLiteTensor* tensor) {
57   return tensor->allocation_type == kTfLiteMmapRo;
58 }
59 
60 // Determines whether tensor is dynamic. Note that a tensor can be non-const and
61 // not dynamic. This function specificially checks for a dynamic tensor.
IsDynamicTensor(TfLiteTensor * tensor)62 inline bool IsDynamicTensor(TfLiteTensor* tensor) {
63   return tensor->allocation_type == kTfLiteDynamic;
64 }
65 
66 // Sets tensor to dynamic.
SetTensorToDynamic(TfLiteTensor * tensor)67 inline void SetTensorToDynamic(TfLiteTensor* tensor) {
68   if (tensor->allocation_type != kTfLiteDynamic) {
69     tensor->allocation_type = kTfLiteDynamic;
70     tensor->data.raw = nullptr;
71   }
72 }
73 
74 // Calculates the multiplication factor for a quantized convolution (or
75 // quantized depthwise convolution) involving the given tensors. Returns an
76 // error if the scales of the tensors are not compatible.
77 TfLiteStatus GetQuantizedConvolutionMultipler(
78     TfLiteContext* context, TfLiteTensor* input, TfLiteTensor* filter,
79     TfLiteTensor* bias, TfLiteTensor* output, double* multiplier);
80 
81 // Calculates the useful range of an activation layer given its activation
82 // tensor.
83 void CalculateActivationRangeUint8(TfLiteFusedActivation activation,
84                                    TfLiteTensor* output, int32_t* act_min,
85                                    int32_t* act_max);
86 void CalculateActivationRangeFloat(TfLiteFusedActivation activation,
87                                    float* activation_min,
88                                    float* activation_max);
89 
90 // Return true if the given tensors have the same shape.
91 bool HaveSameShapes(TfLiteTensor* input1, TfLiteTensor* input2);
92 
93 // Calculate the output_shape that is necessary for element-wise operations
94 // with broadcasting involving the two input tensors.
95 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
96                                         TfLiteTensor* input1,
97                                         TfLiteTensor* input2,
98                                         TfLiteIntArray** output_shape);
99 }  // namespace tflite
100 
101 #endif  // TENSORFLOW_CONTRIB_LITE_KERNELS_KERNEL_UTIL_H_
102