1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Operations"
18
19 #pragma clang diagnostic push
20 #pragma clang diagnostic ignored "-Wunused-parameter"
21 #include <tensorflow/lite/kernels/internal/common.h>
22 #pragma clang diagnostic pop
23
24 #include <algorithm>
25 #include <cfloat>
26 #include <cmath>
27 #include <vector>
28
29 #include "CpuOperationUtils.h"
30 #include "GroupedConv2D.h"
31 #include "Operations.h"
32 #include "Tracing.h"
33
34 namespace android {
35 namespace nn {
36
37 #define ANDROID_NN_GROUPED_CONV_PARAMETERS \
38 uint32_t numBatches = getSizeOfDimension(inputShape, 0); \
39 uint32_t inputHeight = getSizeOfDimension(inputShape, 1); \
40 uint32_t inputWidth = getSizeOfDimension(inputShape, 2); \
41 uint32_t inputDepth = getSizeOfDimension(inputShape, 3); \
42 uint32_t filterHeight = getSizeOfDimension(filterShape, 1); \
43 uint32_t filterWidth = getSizeOfDimension(filterShape, 2); \
44 uint32_t filterDepth = getSizeOfDimension(filterShape, 3); \
45 uint32_t outputHeight = getSizeOfDimension(outputShape, 1); \
46 uint32_t outputWidth = getSizeOfDimension(outputShape, 2); \
47 uint32_t outputDepth = getSizeOfDimension(outputShape, 3); \
48 uint32_t outputGroupDepth = outputDepth / numGroups;
49
groupedConvFloat32(const float * inputData,const Shape & inputShape,const float * filterData,const Shape & filterShape,const float * biasData,const Shape &,int32_t padding_left,int32_t,int32_t padding_top,int32_t,int32_t stride_width,int32_t stride_height,int32_t numGroups,int32_t activation,float * outputData,const Shape & outputShape)50 bool groupedConvFloat32(const float* inputData, const Shape& inputShape, const float* filterData,
51 const Shape& filterShape, const float* biasData, const Shape& /*biasShape*/,
52 int32_t padding_left, int32_t /*padding_right*/, int32_t padding_top,
53 int32_t /*padding_bottom*/, int32_t stride_width, int32_t stride_height,
54 int32_t numGroups, int32_t activation, float* outputData,
55 const Shape& outputShape) {
56 NNTRACE_TRANS("groupConvFloat32");
57 ANDROID_NN_GROUPED_CONV_PARAMETERS
58
59 float output_activation_min = 0.0f, output_activation_max = 0.0f;
60 CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
61
62 const float* inputBase = inputData;
63 float* outPtr = outputData;
64 for (uint32_t b = 0; b < numBatches; b++) {
65 for (uint32_t h = 0; h < outputHeight; h++) {
66 for (uint32_t w = 0; w < outputWidth; w++) {
67 const float* filterBase = filterData;
68 for (int32_t g = 0; g < numGroups; g++) {
69 for (uint32_t d = 0; d < outputGroupDepth; d++) {
70 int32_t wInputOrigin =
71 static_cast<int32_t>(w) * stride_width - padding_left;
72 int32_t hInputOrigin =
73 static_cast<int32_t>(h) * stride_height - padding_top;
74 float sum = 0.0f;
75 for (uint32_t i = 0; i < filterHeight; i++) {
76 for (uint32_t j = 0; j < filterWidth; j++) {
77 for (uint32_t k = 0; k < filterDepth; k++) {
78 int32_t hInput = hInputOrigin + static_cast<int32_t>(i);
79 int32_t wInput = wInputOrigin + static_cast<int32_t>(j);
80 uint32_t dInput = filterDepth * g + k;
81 if (hInput >= 0 && hInput < static_cast<int32_t>(inputHeight) &&
82 wInput >= 0 && wInput < static_cast<int32_t>(inputWidth)) {
83 uint32_t filterIndex =
84 i * filterWidth * filterDepth + j * filterDepth + k;
85 uint32_t inputIndex = hInput * inputWidth * inputDepth +
86 wInput * inputDepth + dInput;
87 sum += filterBase[filterIndex] * inputBase[inputIndex];
88 }
89 }
90 }
91 }
92 sum += biasData[g * outputGroupDepth + d];
93 sum = std::max(std::min(sum, output_activation_max), output_activation_min);
94 outPtr[d] = sum;
95 filterBase += filterHeight * filterWidth * filterDepth;
96 }
97 outPtr += outputGroupDepth;
98 }
99 }
100 }
101 inputBase += inputHeight * inputWidth * inputDepth;
102 }
103
104 return true;
105 }
106
107 template <typename T>
groupedConvQuant8(const T * inputData,const Shape & inputShape,const T * filterData,const Shape & filterShape,const int32_t * biasData,const Shape & biasShape,int32_t padding_left,int32_t,int32_t padding_top,int32_t,int32_t stride_width,int32_t stride_height,int32_t numGroups,int32_t activation,T * outputData,const Shape & outputShape)108 bool groupedConvQuant8(const T* inputData, const Shape& inputShape, const T* filterData,
109 const Shape& filterShape, const int32_t* biasData, const Shape& biasShape,
110 int32_t padding_left, int32_t /*padding_right*/, int32_t padding_top,
111 int32_t /*padding_bottom*/, int32_t stride_width, int32_t stride_height,
112 int32_t numGroups, int32_t activation, T* outputData,
113 const Shape& outputShape) {
114 NNTRACE_TRANS("groupConvQuant8");
115 ANDROID_NN_GROUPED_CONV_PARAMETERS
116
117 int32_t inputOffset = -inputShape.offset;
118 int32_t filterOffset = -filterShape.offset;
119 int32_t outputOffset = outputShape.offset;
120
121 double realMultiplier = 0.0;
122 int32_t outputMultiplier = 0;
123 int32_t outputShift = 0;
124 NN_RET_CHECK(GetQuantizedConvolutionMultiplier(inputShape, filterShape, biasShape, outputShape,
125 &realMultiplier));
126 int exponent;
127 NN_RET_CHECK(QuantizeMultiplier(realMultiplier, &outputMultiplier, &exponent));
128 outputShift = -exponent;
129
130 int32_t output_activation_min = 0, output_activation_max = 0;
131 CalculateActivationRange<T>(activation, outputShape, &output_activation_min,
132 &output_activation_max);
133
134 const T* inputBase = inputData;
135 T* outPtr = outputData;
136 for (uint32_t b = 0; b < numBatches; b++) {
137 for (uint32_t h = 0; h < outputHeight; h++) {
138 for (uint32_t w = 0; w < outputWidth; w++) {
139 const T* filterBase = filterData;
140 for (int32_t g = 0; g < numGroups; g++) {
141 for (uint32_t d = 0; d < outputGroupDepth; d++) {
142 int32_t wInputOrigin =
143 static_cast<int32_t>(w) * stride_width - padding_left;
144 int32_t hInputOrigin =
145 static_cast<int32_t>(h) * stride_height - padding_top;
146 int32_t sum = 0.0f;
147 for (uint32_t i = 0; i < filterHeight; i++) {
148 for (uint32_t j = 0; j < filterWidth; j++) {
149 for (uint32_t k = 0; k < filterDepth; k++) {
150 int32_t hInput = hInputOrigin + static_cast<int32_t>(i);
151 int32_t wInput = wInputOrigin + static_cast<int32_t>(j);
152 uint32_t dInput = filterDepth * g + k;
153 if (hInput >= 0 && hInput < static_cast<int32_t>(inputHeight) &&
154 wInput >= 0 && wInput < static_cast<int32_t>(inputWidth)) {
155 uint32_t filterIndex =
156 i * filterWidth * filterDepth + j * filterDepth + k;
157 uint32_t inputIndex = hInput * inputWidth * inputDepth +
158 wInput * inputDepth + dInput;
159 sum += (static_cast<int32_t>(filterBase[filterIndex]) +
160 filterOffset) *
161 (static_cast<int32_t>(inputBase[inputIndex]) +
162 inputOffset);
163 }
164 }
165 }
166 }
167 sum += biasData[g * outputGroupDepth + d];
168 sum = tflite::MultiplyByQuantizedMultiplier(sum, outputMultiplier,
169 -outputShift);
170 sum += outputOffset;
171 sum = std::max(std::min(sum, output_activation_max), output_activation_min);
172 outPtr[d] = static_cast<T>(sum);
173 filterBase += filterHeight * filterWidth * filterDepth;
174 }
175 outPtr += outputGroupDepth;
176 }
177 }
178 }
179 inputBase += inputHeight * inputWidth * inputDepth;
180 }
181
182 return true;
183 }
184
185 template bool groupedConvQuant8<int8_t>(const int8_t* inputData, const Shape& inputShape,
186 const int8_t* filterData, const Shape& filterShape,
187 const int32_t* biasData, const Shape& biasShape,
188 int32_t padding_left, int32_t padding_right,
189 int32_t padding_top, int32_t padding_bottom,
190 int32_t stride_width, int32_t stride_height,
191 int32_t numGroups, int32_t activation, int8_t* outputData,
192 const Shape& outputShape);
193
194 template bool groupedConvQuant8<uint8_t>(const uint8_t* inputData, const Shape& inputShape,
195 const uint8_t* filterData, const Shape& filterShape,
196 const int32_t* biasData, const Shape& biasShape,
197 int32_t padding_left, int32_t padding_right,
198 int32_t padding_top, int32_t padding_bottom,
199 int32_t stride_width, int32_t stride_height,
200 int32_t numGroups, int32_t activation, uint8_t* outputData,
201 const Shape& outputShape);
202
203 template <typename T>
groupedConvQuant8PerChannel(const T * inputData,const Shape & inputShape,const int8_t * filterData,const Shape & filterShape,const float * filterScales,const int32_t * biasData,const Shape & biasShape,int32_t padding_left,int32_t,int32_t padding_top,int32_t,int32_t stride_width,int32_t stride_height,int32_t numGroups,int32_t activation,T * outputData,const Shape & outputShape)204 bool groupedConvQuant8PerChannel(const T* inputData, const Shape& inputShape,
205 const int8_t* filterData, const Shape& filterShape,
206 const float* filterScales, const int32_t* biasData,
207 const Shape& biasShape, int32_t padding_left,
208 int32_t /*padding_right*/, int32_t padding_top,
209 int32_t /*padding_bottom*/, int32_t stride_width,
210 int32_t stride_height, int32_t numGroups, int32_t activation,
211 T* outputData, const Shape& outputShape) {
212 NNTRACE_TRANS("groupConvQuant8");
213 ANDROID_NN_GROUPED_CONV_PARAMETERS
214
215 int32_t inputOffset = -inputShape.offset;
216 int32_t outputOffset = outputShape.offset;
217
218 auto realMultiplier = std::vector<double>(outputDepth, .0f);
219 auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
220 auto outputShift = std::vector<int32_t>(outputDepth, 0);
221
222 for (uint32_t i = 0; i < outputDepth; ++i) {
223 Shape filterChannelShape = filterShape;
224 filterChannelShape.scale = filterScales[i];
225 Shape biasChannelShape = biasShape;
226 biasChannelShape.scale = filterScales[i] * inputShape.scale;
227
228 NN_RET_CHECK(GetQuantizedConvolutionMultiplier(
229 inputShape, filterChannelShape, biasChannelShape, outputShape, &realMultiplier[i]));
230 int exponent;
231 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent));
232 outputShift[i] = -exponent;
233 }
234
235 int32_t output_activation_min = 0, output_activation_max = 0;
236 CalculateActivationRange<T>(activation, outputShape, &output_activation_min,
237 &output_activation_max);
238
239 const T* inputBase = inputData;
240 T* outPtr = outputData;
241 for (uint32_t b = 0; b < numBatches; b++) {
242 for (uint32_t h = 0; h < outputHeight; h++) {
243 for (uint32_t w = 0; w < outputWidth; w++) {
244 const int8_t* filterBase = filterData;
245 for (int32_t g = 0; g < numGroups; g++) {
246 for (uint32_t d = 0; d < outputGroupDepth; d++) {
247 int32_t wInputOrigin =
248 static_cast<int32_t>(w) * stride_width - padding_left;
249 int32_t hInputOrigin =
250 static_cast<int32_t>(h) * stride_height - padding_top;
251 int32_t sum = 0.0f;
252 for (uint32_t i = 0; i < filterHeight; i++) {
253 for (uint32_t j = 0; j < filterWidth; j++) {
254 for (uint32_t k = 0; k < filterDepth; k++) {
255 int32_t hInput = hInputOrigin + static_cast<int32_t>(i);
256 int32_t wInput = wInputOrigin + static_cast<int32_t>(j);
257 uint32_t dInput = filterDepth * g + k;
258 if (hInput >= 0 && hInput < static_cast<int32_t>(inputHeight) &&
259 wInput >= 0 && wInput < static_cast<int32_t>(inputWidth)) {
260 uint32_t filterIndex =
261 i * filterWidth * filterDepth + j * filterDepth + k;
262 uint32_t inputIndex = hInput * inputWidth * inputDepth +
263 wInput * inputDepth + dInput;
264 sum += (static_cast<int32_t>(filterBase[filterIndex])) *
265 (static_cast<int32_t>(inputBase[inputIndex]) +
266 inputOffset);
267 }
268 }
269 }
270 }
271 int channelIndex = g * outputGroupDepth + d;
272 sum += biasData[channelIndex];
273 sum = tflite::MultiplyByQuantizedMultiplier(
274 sum, outputMultiplier[channelIndex], -outputShift[channelIndex]);
275 sum += outputOffset;
276 sum = std::max(std::min(sum, output_activation_max), output_activation_min);
277 outPtr[d] = static_cast<T>(sum);
278 filterBase += filterHeight * filterWidth * filterDepth;
279 }
280 outPtr += outputGroupDepth;
281 }
282 }
283 }
284 inputBase += inputHeight * inputWidth * inputDepth;
285 }
286
287 return true;
288 }
289
groupedConvFloat16(const _Float16 * inputData,const Shape & inputShape,const _Float16 * filterData,const Shape & filterShape,const _Float16 * biasData,const Shape & biasShape,int32_t padding_left,int32_t padding_right,int32_t padding_top,int32_t padding_bottom,int32_t stride_width,int32_t stride_height,int32_t numGroups,int32_t activation,_Float16 * outputData,const Shape & outputShape)290 bool groupedConvFloat16(const _Float16* inputData, const Shape& inputShape,
291 const _Float16* filterData, const Shape& filterShape,
292 const _Float16* biasData, const Shape& biasShape, int32_t padding_left,
293 int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
294 int32_t stride_width, int32_t stride_height, int32_t numGroups,
295 int32_t activation, _Float16* outputData, const Shape& outputShape) {
296 NNTRACE_TRANS("groupConvFloat16");
297
298 std::vector<float> inputData_float32(getNumberOfElements(inputShape));
299 std::vector<float> filterData_float32(getNumberOfElements(filterShape));
300 std::vector<float> biasData_float32(getNumberOfElements(biasShape));
301 std::vector<float> outputData_float32(getNumberOfElements(outputShape));
302
303 convertFloat16ToFloat32(inputData, &inputData_float32);
304 convertFloat16ToFloat32(filterData, &filterData_float32);
305 convertFloat16ToFloat32(biasData, &biasData_float32);
306
307 groupedConvFloat32(inputData_float32.data(), inputShape, filterData_float32.data(), filterShape,
308 biasData_float32.data(), biasShape, padding_left, padding_right, padding_top,
309 padding_bottom, stride_width, stride_height, numGroups, activation,
310 outputData_float32.data(), outputShape);
311 convertFloat32ToFloat16(outputData_float32, outputData);
312
313 return true;
314 }
315
316 template bool groupedConvQuant8PerChannel<uint8_t>(
317 const uint8_t* inputData, const Shape& inputShape, const int8_t* filterData,
318 const Shape& filterShape, const float* filterScales, const int32_t* biasData,
319 const Shape& biasShape, int32_t padding_left, int32_t padding_right, int32_t padding_top,
320 int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t numGroups,
321 int32_t activation, uint8_t* outputData, const Shape& outputShape);
322
323 template bool groupedConvQuant8PerChannel<int8_t>(
324 const int8_t* inputData, const Shape& inputShape, const int8_t* filterData,
325 const Shape& filterShape, const float* filterScales, const int32_t* biasData,
326 const Shape& biasShape, int32_t padding_left, int32_t padding_right, int32_t padding_top,
327 int32_t padding_bottom, int32_t stride_width, int32_t stride_height, int32_t numGroups,
328 int32_t activation, int8_t* outputData, const Shape& outputShape);
329
330 #undef ANDROID_NN_GROUPED_CONV_PARAMETERS
331 } // namespace nn
332 } // namespace android
333