1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <algorithm>
18 #include <vector>
19 
20 #include "TestHarness.h"
21 #include "fuzzing/RandomGraphGeneratorUtils.h"
22 #include "fuzzing/operation_signatures/OperationSignatureUtils.h"
23 
24 namespace android {
25 namespace nn {
26 namespace fuzzing_test {
27 
roiTensorConstructor(TestOperandType dataType,uint32_t,RandomOperand * op)28 static void roiTensorConstructor(TestOperandType dataType, uint32_t, RandomOperand* op) {
29     op->dataType = dataType;
30     if (isQuantizedType(dataType)) {
31         op->dataType = TestOperandType::TENSOR_QUANT16_ASYMM;
32         op->scale = 0.125f;
33         op->zeroPoint = 0;
34     }
35 }
36 
37 static const OperandSignature kInputRoiTensor = {.type = RandomOperandType::INPUT,
38                                                  .constructor = roiTensorConstructor};
39 static const OperandSignature kOutputRoiTensor = {.type = RandomOperandType::OUTPUT,
40                                                   .constructor = roiTensorConstructor};
41 
roiConstructor(TestOperandType,uint32_t rank,RandomOperation * op)42 static void roiConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
43     NN_FUZZER_CHECK(rank == 4);
44     bool useNchw;
45     if (op->opType == TestOperationType::ROI_ALIGN) {
46         useNchw = op->inputs[9]->value<bool8>();
47     } else {
48         useNchw = op->inputs[7]->value<bool8>();
49     }
50 
51     op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE,
52                                  RandomVariableType::FREE, RandomVariableType::FREE};
53     op->inputs[1]->dimensions = {RandomVariableType::FREE, 4};
54     op->inputs[2]->dimensions = {op->inputs[1]->dimensions[0]};
55     auto outBatch = op->inputs[1]->dimensions[0];
56     auto outDepth = op->inputs[0]->dimensions[useNchw ? 1 : 3];
57     auto outHeight = op->inputs[3]->value<RandomVariable>();
58     auto outWidth = op->inputs[4]->value<RandomVariable>();
59     if (useNchw) {
60         op->outputs[0]->dimensions = {outBatch, outDepth, outHeight, outWidth};
61     } else {
62         op->outputs[0]->dimensions = {outBatch, outHeight, outWidth, outDepth};
63     }
64 
65     if (op->opType == TestOperationType::ROI_POOLING) {
66         setSameQuantization(op->outputs[0], op->inputs[0]);
67     }
68 
69     // The values of the RoI tensor has a special format and cannot be generated from another
70     // operation.
71     op->inputs[1]->doNotConnect = true;
72 }
73 
74 template <typename T>
fillRoiTensor(uint32_t numRois,T maxH,T maxW,RandomOperand * op)75 inline void fillRoiTensor(uint32_t numRois, T maxH, T maxW, RandomOperand* op) {
76     NN_FUZZER_CHECK(!op->buffer.empty())
77             << "Trying to fill ROI tensor but the underlying buffer has not been allocated";
78     for (uint32_t i = 0; i < numRois; i++) {
79         T low = getUniform<T>(0, maxW);
80         op->value<T>(i * 4) = low;
81         op->value<T>(i * 4 + 2) = getUniform<T>(low, maxW);
82         low = getUniform<T>(0, maxH);
83         op->value<T>(i * 4 + 1) = low;
84         op->value<T>(i * 4 + 3) = getUniform<T>(low, maxH);
85     }
86 }
87 
roiFinalizer(RandomOperation * op)88 static void roiFinalizer(RandomOperation* op) {
89     bool useNchw;
90     if (op->opType == TestOperationType::ROI_ALIGN) {
91         useNchw = op->inputs[9]->value<bool8>();
92     } else {
93         useNchw = op->inputs[7]->value<bool8>();
94     }
95 
96     uint32_t batch = op->inputs[0]->dimensions[0].getValue();
97     uint32_t height = op->inputs[0]->dimensions[useNchw ? 2 : 1].getValue();
98     uint32_t width = op->inputs[0]->dimensions[useNchw ? 3 : 2].getValue();
99     uint32_t numRois = op->inputs[1]->dimensions[0].getValue();
100     // Fill values to the roi tensor with format [x1, y1, x2, y2].
101     switch (op->inputs[1]->dataType) {
102         case TestOperandType::TENSOR_FLOAT32: {
103             float maxH = static_cast<float>(height) * op->inputs[5]->value<float>();
104             float maxW = static_cast<float>(width) * op->inputs[6]->value<float>();
105             fillRoiTensor<float>(numRois, maxH, maxW, op->inputs[1].get());
106         } break;
107         case TestOperandType::TENSOR_QUANT16_ASYMM: {
108             uint16_t maxH = static_cast<float>(height) * op->inputs[5]->value<float>();
109             uint16_t maxW = static_cast<float>(width) * op->inputs[6]->value<float>();
110             fillRoiTensor<uint16_t>(numRois, maxH, maxW, op->inputs[1].get());
111 
112         } break;
113         default:
114             NN_FUZZER_CHECK(false) << "Unsupported data type.";
115     }
116 
117     // Fill values to the batch index tensor.
118     std::vector<int32_t> batchIndex(numRois);
119     for (uint32_t i = 0; i < numRois; i++) batchIndex[i] = getUniform<int32_t>(0, batch - 1);
120     // Same batches are grouped together.
121     std::sort(batchIndex.begin(), batchIndex.end());
122     for (uint32_t i = 0; i < numRois; i++) op->inputs[2]->value<int32_t>(i) = batchIndex[i];
123 }
124 
125 // TestOperandType::TENSOR_FLOAT16 is intentionally excluded for all bounding box ops because
126 // 1. It has limited precision for compuation on bounding box indices, which will lead to poor
127 //    accuracy evaluation.
128 // 2. There is no actual graph that uses this data type on bounding boxes.
129 
130 #define DEFINE_ROI_ALIGN_SIGNATURE(ver, ...)                                      \
131     DEFINE_OPERATION_SIGNATURE(ROI_ALIGN_##ver){                                  \
132             .opType = TestOperationType::ROI_ALIGN,                               \
133             .supportedDataTypes = {__VA_ARGS__},                                  \
134             .supportedRanks = {4},                                                \
135             .version = TestHalVersion::ver,                                       \
136             .inputs =                                                             \
137                     {                                                             \
138                             INPUT_DEFAULT,                                        \
139                             kInputRoiTensor,                                      \
140                             PARAMETER_NONE(TestOperandType::TENSOR_INT32),        \
141                             RANDOM_INT_FREE,                                      \
142                             RANDOM_INT_FREE,                                      \
143                             PARAMETER_FLOAT_RANGE(0.1f, 10.0f),                   \
144                             PARAMETER_FLOAT_RANGE(0.1f, 10.0f),                   \
145                             PARAMETER_RANGE(TestOperandType::INT32, 0, 10),       \
146                             PARAMETER_RANGE(TestOperandType::INT32, 0, 10),       \
147                             PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
148                     },                                                            \
149             .outputs = {OUTPUT_DEFAULT},                                          \
150             .constructor = roiConstructor,                                        \
151             .finalizer = roiFinalizer};
152 
153 DEFINE_ROI_ALIGN_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
154                            TestOperandType::TENSOR_QUANT8_ASYMM);
155 DEFINE_ROI_ALIGN_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
156 
157 #define DEFINE_ROI_POOLING_SIGNATURE(ver, ...)                                    \
158     DEFINE_OPERATION_SIGNATURE(ROI_POOLING_##ver){                                \
159             .opType = TestOperationType::ROI_POOLING,                             \
160             .supportedDataTypes = {__VA_ARGS__},                                  \
161             .supportedRanks = {4},                                                \
162             .version = TestHalVersion::ver,                                       \
163             .inputs =                                                             \
164                     {                                                             \
165                             INPUT_DEFAULT,                                        \
166                             kInputRoiTensor,                                      \
167                             PARAMETER_NONE(TestOperandType::TENSOR_INT32),        \
168                             RANDOM_INT_FREE,                                      \
169                             RANDOM_INT_FREE,                                      \
170                             PARAMETER_FLOAT_RANGE(0.1f, 10.0f),                   \
171                             PARAMETER_FLOAT_RANGE(0.1f, 10.0f),                   \
172                             PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
173                     },                                                            \
174             .outputs = {OUTPUT_DEFAULT},                                          \
175             .constructor = roiConstructor,                                        \
176             .finalizer = roiFinalizer};
177 
178 DEFINE_ROI_POOLING_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
179                              TestOperandType::TENSOR_QUANT8_ASYMM);
180 DEFINE_ROI_POOLING_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
181 
heatmapMaxKeypointConstructor(TestOperandType,uint32_t rank,RandomOperation * op)182 static void heatmapMaxKeypointConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
183     NN_FUZZER_CHECK(rank == 4);
184 
185     bool useNchw = op->inputs[2]->value<bool8>();
186     RandomVariable heatmapSize = RandomVariableType::FREE;
187     RandomVariable numRois = RandomVariableType::FREE;
188     RandomVariable numKeypoints = RandomVariableType::FREE;
189     heatmapSize.setRange(2, kInvalidValue);
190 
191     if (useNchw) {
192         op->inputs[0]->dimensions = {numRois, numKeypoints, heatmapSize, heatmapSize};
193     } else {
194         op->inputs[0]->dimensions = {numRois, heatmapSize, heatmapSize, numKeypoints};
195     }
196     op->inputs[1]->dimensions = {numRois, 4};
197     op->outputs[0]->dimensions = {numRois, numKeypoints};
198     op->outputs[1]->dimensions = {numRois, numKeypoints, 2};
199 
200     // The values of the RoI tensor has a special format and cannot be generated from another
201     // operation.
202     op->inputs[1]->doNotConnect = true;
203 
204     // TODO: This is an ugly fix due to the limitation of the current generator that can not handle
205     // the dimension dependency within an input. Without the following line, most of the generated
206     // HEATMAP_MAX_KEYPOINT graphs will be invalid and triggers retry.
207     RandomVariableNetwork::get()->addDimensionProd(
208             {numRois, numKeypoints, heatmapSize * heatmapSize});
209 }
210 
heatmapMaxKeypointFinalizer(RandomOperation * op)211 static void heatmapMaxKeypointFinalizer(RandomOperation* op) {
212     uint32_t numRois = op->inputs[0]->dimensions[0].getValue();
213     uint32_t heatmapSize = op->inputs[0]->dimensions[2].getValue();
214     // Fill values to the roi tensor with format [x1, y1, x2, y2].
215     switch (op->inputs[1]->dataType) {
216         case TestOperandType::TENSOR_FLOAT32: {
217             float maxSize = heatmapSize;
218             fillRoiTensor<float>(numRois, maxSize, maxSize, op->inputs[1].get());
219         } break;
220         case TestOperandType::TENSOR_QUANT16_ASYMM: {
221             uint16_t maxSize = static_cast<uint16_t>(heatmapSize * 8);
222             fillRoiTensor<uint16_t>(numRois, maxSize, maxSize, op->inputs[1].get());
223         } break;
224         default:
225             NN_FUZZER_CHECK(false) << "Unsupported data type.";
226     }
227 }
228 
229 #define DEFINE_HEATMAP_MAX_KEYPOINT_SIGNATURE(ver, ...)                       \
230     DEFINE_OPERATION_SIGNATURE(HEATMAP_MAX_KEYPOINT_##ver){                   \
231             .opType = TestOperationType::HEATMAP_MAX_KEYPOINT,                \
232             .supportedDataTypes = {__VA_ARGS__},                              \
233             .supportedRanks = {4},                                            \
234             .version = TestHalVersion::ver,                                   \
235             .inputs = {INPUT_DEFAULT, kInputRoiTensor,                        \
236                        PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
237             .outputs = {OUTPUT_DEFAULT, kOutputRoiTensor},                    \
238             .constructor = heatmapMaxKeypointConstructor,                     \
239             .finalizer = heatmapMaxKeypointFinalizer};
240 
241 DEFINE_HEATMAP_MAX_KEYPOINT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
242                                       TestOperandType::TENSOR_QUANT8_ASYMM);
243 DEFINE_HEATMAP_MAX_KEYPOINT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
244 
245 }  // namespace fuzzing_test
246 }  // namespace nn
247 }  // namespace android
248