1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_FRAMEWORK_ML_NN_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURE_UTILS_H
18 #define ANDROID_FRAMEWORK_ML_NN_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURE_UTILS_H
19 
20 #include <functional>
21 #include <string>
22 #include <vector>
23 
24 #include "TestNeuralNetworksWrapper.h"
25 #include "fuzzing/OperationManager.h"
26 #include "fuzzing/RandomGraphGenerator.h"
27 #include "fuzzing/RandomGraphGeneratorUtils.h"
28 
29 namespace android {
30 namespace nn {
31 namespace fuzzing_test {
32 
33 namespace {
34 
35 // From Type to cpp type.
36 template <Type type>
37 struct CppType;
38 template <>
39 struct CppType<Type::TENSOR_FLOAT32> {
40     using type = float;
41 };
42 template <>
43 struct CppType<Type::FLOAT32> {
44     using type = float;
45 };
46 template <>
47 struct CppType<Type::TENSOR_INT32> {
48     using type = int32_t;
49 };
50 template <>
51 struct CppType<Type::INT32> {
52     using type = int32_t;
53 };
54 template <>
55 struct CppType<Type::TENSOR_QUANT8_ASYMM> {
56     using type = uint8_t;
57 };
58 template <>
59 struct CppType<Type::TENSOR_QUANT8_SYMM> {
60     using type = int8_t;
61 };
62 template <>
63 struct CppType<Type::TENSOR_QUANT16_ASYMM> {
64     using type = uint16_t;
65 };
66 template <>
67 struct CppType<Type::TENSOR_QUANT16_SYMM> {
68     using type = int16_t;
69 };
70 template <>
71 struct CppType<Type::TENSOR_BOOL8> {
72     using type = bool8;
73 };
74 template <>
75 struct CppType<Type::BOOL> {
76     using type = bool8;
77 };
78 template <>
79 struct CppType<Type::TENSOR_FLOAT16> {
80     using type = _Float16;
81 };
82 template <>
83 struct CppType<Type::FLOAT16> {
84     using type = _Float16;
85 };
86 
87 // The buffer value X is chosen uniformly in the range [kMinFloat32, kMaxFloat32]. kMinFloat32 and
88 // kMaxFloat32 are selected by setting:
89 // * E[X] = 0, so that the sum will less likely to overflow or underflow;
90 // * E[abs(X)] = 1, so that the production will less likely to overflow or underflow.
91 constexpr float kMaxFloat32 = 2.0f;
92 constexpr float kMinFloat32 = -kMaxFloat32;
93 
94 template <typename T>
95 inline void uniform(T low, T up, RandomOperand* op) {
96     T* data = reinterpret_cast<T*>(op->buffer.data());
97     uint32_t len = op->getNumberOfElements();
98     for (uint32_t i = 0; i < len; i++) data[i] = getUniform(low, up);
99 }
100 template <>
101 inline void uniform<bool8>(bool8, bool8, RandomOperand* op) {
102     bool8* data = reinterpret_cast<bool8*>(op->buffer.data());
103     uint32_t len = op->getNumberOfElements();
104     for (uint32_t i = 0; i < len; i++) data[i] = getBernoulli(0.5f);
105 }
106 
107 // Generate random buffer values with uniform distribution.
108 // Dispatch to different generators by operand dataType.
109 inline void uniformFinalizer(RandomOperand* op) {
110     switch (op->dataType) {
111         case Type::TENSOR_FLOAT32:
112             uniform<float>(kMinFloat32, kMaxFloat32, op);
113             break;
114         case Type::TENSOR_INT32:
115             uniform<int32_t>(0, 255, op);
116             break;
117         case Type::TENSOR_QUANT8_ASYMM:
118             uniform<uint8_t>(0, 255, op);
119             break;
120         case Type::TENSOR_QUANT8_SYMM:
121             uniform<uint8_t>(-128, 127, op);
122             break;
123         case Type::TENSOR_BOOL8:
124             uniform<bool8>(true, false, op);
125             break;
126         case Type::TENSOR_FLOAT16:
127             uniform<_Float16>(kMinFloat32, kMaxFloat32, op);
128             break;
129         default:
130             NN_FUZZER_CHECK(false) << "Unsupported data type.";
131     }
132 }
133 
134 // A helper struct for DEFINE_OPERATION_SIGNATURE macro.
135 struct OperationSignatureHelper {
136     std::string name;
137     OperationSignatureHelper(const std::string& name) : name(name) {}
138     int operator+(const OperationSignature& op) {
139         OperationManager::get()->addSignature(name, op);
140         return 0;
141     }
142 };
143 
144 }  // namespace
145 
146 inline void implicitPadding(const RandomVariable& input, const RandomVariable& filter,
147                             const RandomVariable& stride, const RandomVariable& dilation,
148                             int32_t paddingScheme, RandomVariable* output) {
149     switch (paddingScheme) {
150         case ANEURALNETWORKS_PADDING_SAME:
151             *output = (input + (stride - 1)) / stride;
152             break;
153         case ANEURALNETWORKS_PADDING_VALID:
154             *output = (input - filter * dilation + (dilation + stride - 1)) / stride;
155             break;
156         default:
157             NN_FUZZER_CHECK(false) << "Unknown padding scheme";
158     }
159 }
160 
161 inline void explicitPadding(const RandomVariable& input, const RandomVariable& filter,
162                             const RandomVariable& stride, const RandomVariable& dilation,
163                             const RandomVariable& paddingHead, const RandomVariable& paddingTail,
164                             RandomVariable* output) {
165     auto effectiveFilter = (filter - 1) * dilation + 1;
166     *output = (input - effectiveFilter + (stride + paddingHead + paddingTail)) / stride;
167     // TFLite will crash if the filter size is less than or equal to the paddings.
168     effectiveFilter.setGreaterThan(paddingHead);
169     effectiveFilter.setGreaterThan(paddingTail);
170 }
171 
172 inline void implicitPaddingTranspose(const RandomVariable& input, const RandomVariable& filter,
173                                      const RandomVariable& stride, int32_t paddingScheme,
174                                      RandomVariable* output) {
175     switch (paddingScheme) {
176         case ANEURALNETWORKS_PADDING_SAME:
177             *output = input * stride;
178             break;
179         case ANEURALNETWORKS_PADDING_VALID:
180             *output = (input - 1) * stride + filter;
181             break;
182         default:
183             NN_FUZZER_CHECK(false) << "Unknown padding scheme";
184     }
185 }
186 
187 inline void explicitPaddingTranspose(const RandomVariable& input, const RandomVariable& filter,
188                                      const RandomVariable& stride,
189                                      const RandomVariable& paddingHead,
190                                      const RandomVariable& paddingTail, RandomVariable* output) {
191     *output = stride * input + filter - (stride + paddingHead + paddingTail);
192 }
193 
194 inline void setSameQuantization(const std::shared_ptr<RandomOperand>& to,
195                                 const std::shared_ptr<RandomOperand>& from) {
196     NN_FUZZER_CHECK(to->dataType == from->dataType);
197     to->scale = from->scale;
198     to->zeroPoint = from->zeroPoint;
199 }
200 
201 inline void setFreeDimensions(const std::shared_ptr<RandomOperand>& op, uint32_t rank) {
202     op->dimensions.resize(rank);
203     for (uint32_t i = 0; i < rank; i++) op->dimensions[i] = RandomVariableType::FREE;
204 }
205 
206 inline void setConvFCScale(bool applyOutputScaleBound, RandomOperation* op) {
207     if (op->inputs[0]->dataType == Type::TENSOR_QUANT8_ASYMM) {
208         float biasScale = op->inputs[0]->scale * op->inputs[1]->scale;
209         op->inputs[2]->scale = biasScale;
210         if (applyOutputScaleBound) {
211             op->outputs[0]->scale = getUniform(biasScale, biasScale * 5);
212         }
213     }
214 }
215 
216 // For ops with input0 and output0 of the same dimension.
217 inline void sameDimensionOpConstructor(Type, uint32_t rank, RandomOperation* op) {
218     setFreeDimensions(op->inputs[0], rank);
219     op->outputs[0]->dimensions = op->inputs[0]->dimensions;
220 }
221 
222 // For ops with input0 and output0 of the same shape including scale and zeroPoint.
223 inline void sameShapeOpConstructor(Type dataType, uint32_t rank, RandomOperation* op) {
224     sameDimensionOpConstructor(dataType, rank, op);
225     setSameQuantization(op->outputs[0], op->inputs[0]);
226 }
227 
228 inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op) {
229     op->dataType = dataType;
230     if (dataType == Type::TENSOR_QUANT8_ASYMM) {
231         op->scale = getUniform<float>(0.1, 2.0);
232         op->zeroPoint = getUniform<int32_t>(0, 255);
233     } else if (dataType == Type::TENSOR_QUANT8_SYMM) {
234         op->scale = getUniform<float>(0.1, 2.0);
235         op->zeroPoint = 0;
236     } else {
237         op->scale = 0.0f;
238         op->zeroPoint = 0;
239     }
240 }
241 
242 // An INPUT operand with uniformly distributed buffer values. The operand's data type is set the
243 // same as the operation's primary data type. In the case of quantized data type, the quantization
244 // parameters are chosen randomly and uniformly.
245 #define INPUT_DEFAULT                                                               \
246     {                                                                               \
247         .type = RandomOperandType::INPUT, .constructor = defaultOperandConstructor, \
248         .finalizer = uniformFinalizer                                               \
249     }
250 
251 // An INPUT operand with a specified data type and uniformly distributed buffer values. In the case
252 // of quantized data type, the quantization parameters are chosen randomly and uniformly.
253 #define INPUT_TYPED(opType)                                                                      \
254     {                                                                                            \
255         .type = RandomOperandType::INPUT,                                                        \
256         .constructor = [](Type, uint32_t rank,                                                   \
257                           RandomOperand* op) { defaultOperandConstructor((opType), rank, op); }, \
258         .finalizer = uniformFinalizer                                                            \
259     }
260 
261 // For the bias tensor in convolutions and fully connected operator.
262 // An INPUT operand with uniformly distributed buffer values. The operand's data type is set to
263 // TENSOR_INT32 if the operation's primary data type is TENSOR_QUANT8_ASYMM. Otherwise, it is the
264 // same as INPUT_DEFAULT.
265 #define INPUT_BIAS                                                    \
266     {                                                                 \
267         .type = RandomOperandType::INPUT,                             \
268         .constructor =                                                \
269                 [](Type dataType, uint32_t rank, RandomOperand* op) { \
270                     if (dataType == Type::TENSOR_QUANT8_ASYMM) {      \
271                         dataType = Type::TENSOR_INT32;                \
272                     }                                                 \
273                     defaultOperandConstructor(dataType, rank, op);    \
274                 },                                                    \
275         .finalizer = uniformFinalizer                                 \
276     }
277 
278 // A helper macro for common code block filling operand buffer with random method.
279 #define PARAMETER_FILL_BUFFER_HELPER(opType, len, method, ...)               \
280     op->dataType = opType;                                                   \
281     int length = (len);                                                      \
282     if (kScalarDataType[static_cast<int>(opType)]) {                         \
283         NN_FUZZER_CHECK(length == 1);                                        \
284     } else {                                                                 \
285         op->dimensions = {length};                                           \
286     }                                                                        \
287     op->resizeBuffer<CppType<opType>::type>(length);                         \
288     auto data = reinterpret_cast<CppType<opType>::type*>(op->buffer.data()); \
289     for (int i = 0; i < length; i++) {                                       \
290         data[i] = method<CppType<opType>::type>(__VA_ARGS__);                \
291     }
292 
293 // A 1-D vector of CONST parameters of length len, each uniformly selected within range [low, up].
294 #define PARAMETER_VEC_RANGE(opType, len, low, up)                                                \
295     {                                                                                            \
296         .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) { \
297             PARAMETER_FILL_BUFFER_HELPER(opType, len, getUniform, low, up);                      \
298         }                                                                                        \
299     }
300 
301 // A CONST scalar uniformly selected within range [low, up].
302 #define PARAMETER_RANGE(opType, low, up) PARAMETER_VEC_RANGE(opType, 1, low, up)
303 
304 // A CONST floating point scalar uniformly selected within range [low, up]. The operand's data type
305 // is set to FLOAT16 if the operation's primary data type is TENSOR_FLOAT16. Otherwise, the data
306 // type is set to FLOAT32.
307 #define PARAMETER_FLOAT_RANGE(low, up)                                               \
308     {                                                                                \
309         .type = RandomOperandType::CONST,                                            \
310         .constructor = [](Type dataType, uint32_t, RandomOperand* op) {              \
311             if (dataType == Type::TENSOR_FLOAT16) {                                  \
312                 PARAMETER_FILL_BUFFER_HELPER(Type::FLOAT16, 1, getUniform, low, up); \
313             } else {                                                                 \
314                 PARAMETER_FILL_BUFFER_HELPER(Type::FLOAT32, 1, getUniform, low, up); \
315             }                                                                        \
316         }                                                                            \
317     }
318 
319 // A CONST scalar uniformly selected from the provided choices.
320 #define PARAMETER_CHOICE(opType, ...)                                                            \
321     {                                                                                            \
322         .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) { \
323             const std::vector<CppType<opType>::type> choices = {__VA_ARGS__};                    \
324             PARAMETER_FILL_BUFFER_HELPER(opType, 1, getRandomChoice, choices);                   \
325         }                                                                                        \
326     }
327 
328 // A CONST scalar with unintialized buffer value. The buffer values are expected to be filled in the
329 // operation constructor or finalizer.
330 #define PARAMETER_NONE(opType)                                                          \
331     {                                                                                   \
332         .type = RandomOperandType::CONST,                                               \
333         .constructor = [](Type, uint32_t, RandomOperand* op) { op->dataType = opType; } \
334     }
335 
336 // A CONST integer scalar with value set as a FREE RandomVariable within default range.
337 #define RANDOM_INT_FREE                                                                          \
338     {                                                                                            \
339         .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) { \
340             op->dataType = Type::INT32;                                                          \
341             op->randomBuffer = {RandomVariableType::FREE};                                       \
342         }                                                                                        \
343     }
344 
345 // A CONST integer scalar with value set as a FREE RandomVariable within range [low, up].
346 #define RANDOM_INT_RANGE(low, up)                                                                \
347     {                                                                                            \
348         .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) { \
349             op->dataType = Type::INT32;                                                          \
350             op->randomBuffer = {RandomVariable((low), (up))};                                    \
351         }                                                                                        \
352     }
353 
354 // An OUTPUT operand with data type set the same as the operation primary data type. In the case of
355 // quantized data type, the quantization parameters are chosen randomly and uniformly.
356 #define OUTPUT_DEFAULT \
357     { .type = RandomOperandType::OUTPUT, .constructor = defaultOperandConstructor }
358 
359 // An OUTPUT operand with a specified data type. In the case of quantized data type, the
360 // quantization parameters are chosen randomly and uniformly.
361 #define OUTPUT_TYPED(opType)                                        \
362     {                                                               \
363         .type = RandomOperandType::OUTPUT,                          \
364         .constructor = [](Type, uint32_t rank, RandomOperand* op) { \
365             defaultOperandConstructor((opType), rank, op);          \
366         }                                                           \
367     }
368 
369 // An OUTPUT operand with data type set the same as the operation primary data type. In the case of
370 // quantized data type, the quantization parameters are set to the specified values.
371 #define OUTPUT_QUANT(fixedScale, fixedZeroPoint)                             \
372     {                                                                        \
373         .type = RandomOperandType::OUTPUT,                                   \
374         .constructor = [](Type dataType, uint32_t rank, RandomOperand* op) { \
375             defaultOperandConstructor(dataType, rank, op);                   \
376             if (op->dataType == Type::TENSOR_QUANT8_ASYMM ||                 \
377                 dataType == Type::TENSOR_QUANT8_SYMM) {                      \
378                 op->scale = (fixedScale);                                    \
379                 op->zeroPoint = (fixedZeroPoint);                            \
380             }                                                                \
381         }                                                                    \
382     }
383 
384 // DEFINE_OPERATION_SIGNATURE creates a OperationSignature by aggregate initialization and adds it
385 // to the global OperationManager singleton.
386 //
387 // Usage:
388 //   DEFINE_OPERATION_SIGNATURE(name) { aggregate_initialization };
389 //
390 // Example:
391 //   DEFINE_OPERATION_SIGNATURE(RELU_V1_0) {
392 //       .opType = ANEURALNETWORKS_RELU,
393 //       .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
394 //       .supportedRanks = {1, 2, 3, 4},
395 //       .version = HalVersion::V1_0,
396 //       .inputs = {INPUT_DEFAULT},
397 //       .outputs = {OUTPUT_DEFAULT},
398 //       .constructor = sameShapeOpConstructor};
399 //
400 #define DEFINE_OPERATION_SIGNATURE(name) \
401     const int dummy_##name = OperationSignatureHelper(#name) + OperationSignature
402 
403 }  // namespace fuzzing_test
404 }  // namespace nn
405 }  // namespace android
406 
407 #endif  // ANDROID_FRAMEWORK_ML_NN_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURE_UTILS_H
408