1 /* 2 * Copyright (C) 2019 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURES_OPERATION_SIGNATURE_UTILS_H 18 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURES_OPERATION_SIGNATURE_UTILS_H 19 20 #include <algorithm> 21 #include <functional> 22 #include <memory> 23 #include <string> 24 #include <vector> 25 26 #include "TestHarness.h" 27 #include "TestNeuralNetworksWrapper.h" 28 #include "fuzzing/OperationManager.h" 29 #include "fuzzing/RandomGraphGenerator.h" 30 #include "fuzzing/RandomGraphGeneratorUtils.h" 31 32 namespace android { 33 namespace nn { 34 namespace fuzzing_test { 35 36 namespace { 37 38 using namespace test_helper; 39 40 // From TestOperandType to cpp type. 41 template <TestOperandType type> 42 struct CppType; 43 template <> 44 struct CppType<TestOperandType::TENSOR_FLOAT32> { 45 using type = float; 46 }; 47 template <> 48 struct CppType<TestOperandType::FLOAT32> { 49 using type = float; 50 }; 51 template <> 52 struct CppType<TestOperandType::TENSOR_INT32> { 53 using type = int32_t; 54 }; 55 template <> 56 struct CppType<TestOperandType::INT32> { 57 using type = int32_t; 58 }; 59 template <> 60 struct CppType<TestOperandType::TENSOR_QUANT8_ASYMM> { 61 using type = uint8_t; 62 }; 63 template <> 64 struct CppType<TestOperandType::TENSOR_QUANT8_SYMM> { 65 using type = int8_t; 66 }; 67 template <> 68 struct CppType<TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED> { 69 using type = int8_t; 70 }; 71 template <> 72 struct CppType<TestOperandType::TENSOR_QUANT16_ASYMM> { 73 using type = uint16_t; 74 }; 75 template <> 76 struct CppType<TestOperandType::TENSOR_QUANT16_SYMM> { 77 using type = int16_t; 78 }; 79 template <> 80 struct CppType<TestOperandType::TENSOR_BOOL8> { 81 using type = bool8; 82 }; 83 template <> 84 struct CppType<TestOperandType::BOOL> { 85 using type = bool8; 86 }; 87 template <> 88 struct CppType<TestOperandType::TENSOR_FLOAT16> { 89 using type = _Float16; 90 }; 91 template <> 92 struct CppType<TestOperandType::FLOAT16> { 93 using type = _Float16; 94 }; 95 96 // The buffer value X is chosen uniformly in the range [kMinFloat32, kMaxFloat32]. kMinFloat32 and 97 // kMaxFloat32 are selected by setting: 98 // * E[X] = 0, so that the sum will less likely to overflow or underflow; 99 // * E[abs(X)] = 1, so that the production will less likely to overflow or underflow. 100 constexpr float kMaxFloat32 = 2.0f; 101 constexpr float kMinFloat32 = -kMaxFloat32; 102 103 template <typename T> 104 inline T getUniformValue(int valueProperties, T low, T up, T zeroPoint) { 105 if (valueProperties & RandomOperand::NON_NEGATIVE) { 106 NN_FUZZER_CHECK(up >= zeroPoint); 107 low = std::max(low, zeroPoint); 108 } 109 if (valueProperties & RandomOperand::NON_ZERO) { 110 return getUniformNonZero(low, up, zeroPoint); 111 } else { 112 return getUniform(low, up); 113 } 114 } 115 template <> 116 inline bool8 getUniformValue(int, bool8, bool8, bool8) { 117 return getBernoulli(0.5f); 118 } 119 120 template <typename T> 121 inline void uniform(T low, T up, T zeroPoint, RandomOperand* op) { 122 T* data = reinterpret_cast<T*>(op->buffer.data()); 123 uint32_t len = op->getNumberOfElements(); 124 for (uint32_t i = 0; i < len; i++) { 125 data[i] = getUniformValue<T>(op->valueProperties, low, up, zeroPoint); 126 } 127 } 128 129 // Generate random buffer values with uniform distribution. 130 // Dispatch to different generators by operand dataType. 131 inline void uniformFinalizer(RandomOperand* op) { 132 switch (op->dataType) { 133 case TestOperandType::TENSOR_FLOAT32: 134 case TestOperandType::FLOAT32: 135 uniform<float>(kMinFloat32, kMaxFloat32, 0.0f, op); 136 break; 137 case TestOperandType::TENSOR_INT32: 138 case TestOperandType::INT32: 139 uniform<int32_t>(0, 255, op->zeroPoint, op); 140 break; 141 case TestOperandType::TENSOR_QUANT8_ASYMM: 142 uniform<uint8_t>(0, 255, op->zeroPoint, op); 143 break; 144 case TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED: 145 uniform<int8_t>(-128, 127, op->zeroPoint, op); 146 break; 147 case TestOperandType::TENSOR_QUANT8_SYMM: 148 uniform<int8_t>(-128, 127, op->zeroPoint, op); 149 break; 150 case TestOperandType::TENSOR_QUANT16_ASYMM: 151 uniform<uint16_t>(0, 65535, op->zeroPoint, op); 152 break; 153 case TestOperandType::TENSOR_QUANT16_SYMM: 154 uniform<int16_t>(-32768, 32767, op->zeroPoint, op); 155 break; 156 case TestOperandType::TENSOR_BOOL8: 157 uniform<bool8>(true, false, false, op); 158 break; 159 case TestOperandType::TENSOR_FLOAT16: 160 case TestOperandType::FLOAT16: 161 uniform<_Float16>(kMinFloat32, kMaxFloat32, 0.0f, op); 162 break; 163 default: 164 NN_FUZZER_CHECK(false) << "Unsupported data type."; 165 } 166 } 167 168 // A helper struct for DEFINE_OPERATION_SIGNATURE macro. 169 struct OperationSignatureHelper { 170 std::string name; 171 OperationSignatureHelper(const std::string& name) : name(name) {} 172 int operator+(const OperationSignature& op) { 173 OperationManager::get()->addSignature(name, op); 174 return 0; 175 } 176 }; 177 178 } // namespace 179 180 inline void implicitPadding(const RandomVariable& input, const RandomVariable& filter, 181 const RandomVariable& stride, const RandomVariable& dilation, 182 int32_t paddingScheme, RandomVariable* output) { 183 switch (paddingScheme) { 184 case ANEURALNETWORKS_PADDING_SAME: 185 *output = (input + (stride - 1)) / stride; 186 break; 187 case ANEURALNETWORKS_PADDING_VALID: 188 *output = (input - filter * dilation + (dilation + stride - 1)) / stride; 189 break; 190 default: 191 NN_FUZZER_CHECK(false) << "Unknown padding scheme"; 192 } 193 } 194 195 inline void explicitPadding(const RandomVariable& input, const RandomVariable& filter, 196 const RandomVariable& stride, const RandomVariable& dilation, 197 const RandomVariable& paddingHead, const RandomVariable& paddingTail, 198 RandomVariable* output) { 199 auto effectiveFilter = (filter - 1) * dilation + 1; 200 *output = (input - effectiveFilter + (stride + paddingHead + paddingTail)) / stride; 201 // TFLite will crash if the filter size is less than or equal to the paddings. 202 effectiveFilter.setGreaterThan(paddingHead); 203 effectiveFilter.setGreaterThan(paddingTail); 204 } 205 206 inline void implicitPaddingTranspose(const RandomVariable& input, const RandomVariable& filter, 207 const RandomVariable& stride, int32_t paddingScheme, 208 RandomVariable* output) { 209 switch (paddingScheme) { 210 case ANEURALNETWORKS_PADDING_SAME: 211 *output = input * stride; 212 break; 213 case ANEURALNETWORKS_PADDING_VALID: 214 *output = (input - 1) * stride + filter; 215 break; 216 default: 217 NN_FUZZER_CHECK(false) << "Unknown padding scheme"; 218 } 219 } 220 221 inline void explicitPaddingTranspose(const RandomVariable& input, const RandomVariable& filter, 222 const RandomVariable& stride, 223 const RandomVariable& paddingHead, 224 const RandomVariable& paddingTail, RandomVariable* output) { 225 *output = stride * input + filter - (stride + paddingHead + paddingTail); 226 } 227 228 inline void setSameQuantization(const std::shared_ptr<RandomOperand>& to, 229 const std::shared_ptr<RandomOperand>& from) { 230 NN_FUZZER_CHECK(to->dataType == from->dataType); 231 to->scale = from->scale; 232 to->zeroPoint = from->zeroPoint; 233 } 234 235 inline void setFreeDimensions(const std::shared_ptr<RandomOperand>& op, uint32_t rank) { 236 op->dimensions.resize(rank); 237 for (uint32_t i = 0; i < rank; i++) op->dimensions[i] = RandomVariableType::FREE; 238 } 239 240 inline void setConvFCScale(bool applyOutputScaleBound, RandomOperation* op) { 241 if (isQuantizedType(op->inputs[0]->dataType)) { 242 float biasScale = op->inputs[0]->scale * op->inputs[1]->scale; 243 op->inputs[2]->scale = biasScale; 244 if (applyOutputScaleBound) { 245 op->outputs[0]->scale = getUniform(biasScale, biasScale * 5); 246 } 247 } 248 } 249 250 // For ops with input0 and output0 of the same dimension. 251 inline void sameDimensionOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) { 252 setFreeDimensions(op->inputs[0], rank); 253 op->outputs[0]->dimensions = op->inputs[0]->dimensions; 254 } 255 256 // For ops with input0 and output0 of the same shape including scale and zeroPoint. 257 inline void sameShapeOpConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) { 258 sameDimensionOpConstructor(dataType, rank, op); 259 setSameQuantization(op->outputs[0], op->inputs[0]); 260 } 261 262 inline void defaultOperandConstructor(TestOperandType dataType, uint32_t, RandomOperand* op) { 263 op->dataType = dataType; 264 if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM) { 265 op->scale = getUniform<float>(0.1, 2.0); 266 op->zeroPoint = getUniform<int32_t>(0, 255); 267 } else if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED) { 268 op->scale = getUniform<float>(0.1, 2.0); 269 op->zeroPoint = getUniform<int32_t>(-128, 127); 270 } else if (dataType == TestOperandType::TENSOR_QUANT8_SYMM) { 271 op->scale = getUniform<float>(0.1, 2.0); 272 op->zeroPoint = 0; 273 } else if (dataType == TestOperandType::TENSOR_QUANT16_ASYMM) { 274 op->scale = getUniform<float>(0.1, 2.0); 275 op->zeroPoint = getUniform<int32_t>(0, 65535); 276 } else if (dataType == TestOperandType::TENSOR_QUANT16_SYMM) { 277 op->scale = getUniform<float>(0.1, 2.0); 278 op->zeroPoint = 0; 279 } else { 280 op->scale = 0.0f; 281 op->zeroPoint = 0; 282 } 283 } 284 285 inline void defaultScalarOperandConstructor(TestOperandType dataType, uint32_t, RandomOperand* op) { 286 switch (dataType) { 287 case TestOperandType::TENSOR_FLOAT32: 288 op->dataType = TestOperandType::FLOAT32; 289 op->scale = 0.0f; 290 op->zeroPoint = 0; 291 break; 292 case TestOperandType::TENSOR_FLOAT16: 293 op->dataType = TestOperandType::FLOAT16; 294 op->scale = 0.0f; 295 op->zeroPoint = 0; 296 break; 297 case TestOperandType::TENSOR_INT32: 298 op->dataType = TestOperandType::INT32; 299 op->scale = 0.0f; 300 op->zeroPoint = 0; 301 break; 302 default: 303 NN_FUZZER_CHECK(false) << "Data type " << toString(dataType) 304 << " is not supported in defaultScalarOperandConstructor."; 305 } 306 } 307 308 // An INPUT operand with uniformly distributed buffer values. The operand's data type is set the 309 // same as the operation's primary data type. In the case of quantized data type, the quantization 310 // parameters are chosen randomly and uniformly. 311 #define INPUT_DEFAULT \ 312 { \ 313 .type = RandomOperandType::INPUT, .constructor = defaultOperandConstructor, \ 314 .finalizer = uniformFinalizer \ 315 } 316 317 // A scalar operand with an uniformly distributed value. The operand's data type is set to the 318 // corresponding scalar type of the operation's primary data type (which is always a tensor type). 319 #define INPUT_SCALAR \ 320 { \ 321 .type = RandomOperandType::INPUT, .constructor = defaultScalarOperandConstructor, \ 322 .finalizer = uniformFinalizer \ 323 } 324 325 // An INPUT operand with a specified data type and uniformly distributed buffer values. In the case 326 // of quantized data type, the quantization parameters are chosen randomly and uniformly. 327 #define INPUT_TYPED(opType) \ 328 { \ 329 .type = RandomOperandType::INPUT, \ 330 .constructor = [](TestOperandType, uint32_t rank, \ 331 RandomOperand* op) { defaultOperandConstructor((opType), rank, op); }, \ 332 .finalizer = uniformFinalizer \ 333 } 334 335 // For the bias tensor in convolutions and fully connected operator. 336 // An INPUT operand with uniformly distributed buffer values. The operand's data type is set to 337 // TENSOR_INT32 if the operation's primary data type is TENSOR_QUANT8_ASYMM. Otherwise, it is the 338 // same as INPUT_DEFAULT. 339 #define INPUT_BIAS \ 340 { \ 341 .type = RandomOperandType::INPUT, \ 342 .constructor = \ 343 [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \ 344 if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM || \ 345 dataType == TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED) { \ 346 dataType = TestOperandType::TENSOR_INT32; \ 347 } \ 348 defaultOperandConstructor(dataType, rank, op); \ 349 }, \ 350 .finalizer = uniformFinalizer \ 351 } 352 353 // A helper macro for common code block filling operand buffer with random method. 354 #define PARAMETER_FILL_BUFFER_HELPER(opType, len, method, ...) \ 355 op->dataType = opType; \ 356 int length = (len); \ 357 if (kScalarDataType[static_cast<int>(opType)]) { \ 358 NN_FUZZER_CHECK(length == 1); \ 359 } else { \ 360 op->dimensions = {length}; \ 361 } \ 362 op->resizeBuffer<CppType<opType>::type>(length); \ 363 auto data = reinterpret_cast<CppType<opType>::type*>(op->buffer.data()); \ 364 for (int i = 0; i < length; i++) { \ 365 data[i] = method<CppType<opType>::type>(__VA_ARGS__); \ 366 } 367 368 // A 1-D vector of CONST parameters of length len, each uniformly selected within range [low, up]. 369 #define PARAMETER_VEC_RANGE(opType, len, low, up) \ 370 { \ 371 .type = RandomOperandType::CONST, \ 372 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \ 373 PARAMETER_FILL_BUFFER_HELPER(opType, len, getUniform, low, up); \ 374 } \ 375 } 376 377 // A CONST scalar uniformly selected within range [low, up]. 378 #define PARAMETER_RANGE(opType, low, up) PARAMETER_VEC_RANGE(opType, 1, low, up) 379 380 // A CONST floating point scalar uniformly selected within range [low, up]. The operand's data type 381 // is set to FLOAT16 if the operation's primary data type is TENSOR_FLOAT16. Otherwise, the data 382 // type is set to FLOAT32. 383 #define PARAMETER_FLOAT_RANGE(low, up) \ 384 { \ 385 .type = RandomOperandType::CONST, \ 386 .constructor = [](TestOperandType dataType, uint32_t, RandomOperand* op) { \ 387 if (dataType == TestOperandType::TENSOR_FLOAT16) { \ 388 PARAMETER_FILL_BUFFER_HELPER(TestOperandType::FLOAT16, 1, getUniform, low, up); \ 389 } else { \ 390 PARAMETER_FILL_BUFFER_HELPER(TestOperandType::FLOAT32, 1, getUniform, low, up); \ 391 } \ 392 } \ 393 } 394 395 // A CONST scalar uniformly selected from the provided choices. 396 #define PARAMETER_CHOICE(opType, ...) \ 397 { \ 398 .type = RandomOperandType::CONST, \ 399 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \ 400 const std::vector<CppType<opType>::type> choices = {__VA_ARGS__}; \ 401 PARAMETER_FILL_BUFFER_HELPER(opType, 1, getRandomChoice, choices); \ 402 } \ 403 } 404 405 // A CONST scalar with unintialized buffer value. The buffer values are expected to be filled in the 406 // operation constructor or finalizer. 407 #define PARAMETER_NONE(opType) \ 408 { \ 409 .type = RandomOperandType::CONST, \ 410 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { op->dataType = opType; } \ 411 } 412 413 // A CONST omitted operand. 414 #define PARAMETER_NO_VALUE(opType) \ 415 { \ 416 .type = RandomOperandType::NO_VALUE, \ 417 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { op->dataType = opType; } \ 418 } 419 420 // A CONST integer scalar with value set as a FREE RandomVariable within default range. 421 #define RANDOM_INT_FREE \ 422 { \ 423 .type = RandomOperandType::CONST, \ 424 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \ 425 op->dataType = TestOperandType::INT32; \ 426 op->randomBuffer = {RandomVariableType::FREE}; \ 427 } \ 428 } 429 430 // A CONST integer scalar with value set as a FREE RandomVariable within range [low, up]. 431 #define RANDOM_INT_RANGE(low, up) \ 432 { \ 433 .type = RandomOperandType::CONST, \ 434 .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \ 435 op->dataType = TestOperandType::INT32; \ 436 op->randomBuffer = {RandomVariable((low), (up))}; \ 437 } \ 438 } 439 440 // An OUTPUT operand with data type set the same as the operation primary data type. In the case of 441 // quantized data type, the quantization parameters are chosen randomly and uniformly. 442 #define OUTPUT_DEFAULT \ 443 { .type = RandomOperandType::OUTPUT, .constructor = defaultOperandConstructor } 444 445 // An OUTPUT operand with a specified data type. In the case of quantized data type, the 446 // quantization parameters are chosen randomly and uniformly. 447 #define OUTPUT_TYPED(opType) \ 448 { \ 449 .type = RandomOperandType::OUTPUT, \ 450 .constructor = [](TestOperandType, uint32_t rank, RandomOperand* op) { \ 451 defaultOperandConstructor((opType), rank, op); \ 452 } \ 453 } 454 455 // An OUTPUT operand with data type set the same as the operation primary data type. In the case of 456 // quantized data type, the quantization parameters are set to the specified values. 457 #define OUTPUT_QUANT(fixedScale, fixedZeroPoint) \ 458 { \ 459 .type = RandomOperandType::OUTPUT, \ 460 .constructor = [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \ 461 defaultOperandConstructor(dataType, rank, op); \ 462 if (isQuantizedType(op->dataType)) { \ 463 op->scale = (fixedScale); \ 464 op->zeroPoint = (fixedZeroPoint); \ 465 } \ 466 } \ 467 } 468 469 // DEFINE_OPERATION_SIGNATURE creates a OperationSignature by aggregate initialization and adds it 470 // to the global OperationManager singleton. 471 // 472 // Usage: 473 // DEFINE_OPERATION_SIGNATURE(name) { aggregate_initialization }; 474 // 475 // Example: 476 // DEFINE_OPERATION_SIGNATURE(RELU_V1_0) { 477 // .opType = TestOperationType::RELU, 478 // .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, 479 // TestOperandType::TENSOR_QUANT8_ASYMM}, .supportedRanks = {1, 2, 3, 4}, .version = 480 // TestHalVersion::V1_0, .inputs = {INPUT_DEFAULT}, .outputs = {OUTPUT_DEFAULT}, .constructor 481 // = sameShapeOpConstructor}; 482 // 483 #define DEFINE_OPERATION_SIGNATURE(name) \ 484 const int dummy_##name = OperationSignatureHelper(#name) + OperationSignature 485 486 } // namespace fuzzing_test 487 } // namespace nn 488 } // namespace android 489 490 #endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURES_OPERATION_SIGNATURE_UTILS_H 491