1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Operations"
18
19 #include "Comparisons.h"
20
21 #include <functional>
22 #include <vector>
23
24 #include "IndexedShapeWrapper.h"
25 #include "OperationResolver.h"
26 #include "OperationsExecutionUtils.h"
27
28 namespace android {
29 namespace nn {
30 namespace comparisons {
31 namespace {
32
33 template <typename DataType, typename ComparisonType>
compute(const std::function<bool (ComparisonType,ComparisonType)> & func,const DataType * aData,const Shape & aShape,const DataType * bData,const Shape & bShape,bool8 * outputData,const Shape & outputShape)34 bool compute(const std::function<bool(ComparisonType, ComparisonType)>& func, const DataType* aData,
35 const Shape& aShape, const DataType* bData, const Shape& bShape, bool8* outputData,
36 const Shape& outputShape) {
37 IndexedShapeWrapper aShapeIndexed(aShape);
38 IndexedShapeWrapper bShapeIndexed(bShape);
39 IndexedShapeWrapper outputShapeIndexed(outputShape);
40 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0);
41 bool lastIndex = false;
42 do {
43 uint32_t outputFlatIndex;
44 NN_RET_CHECK(outputShapeIndexed.indexToFlatIndex(curIndex, &outputFlatIndex));
45 uint32_t aFlatIndex;
46 NN_RET_CHECK(aShapeIndexed.broadcastedIndexToFlatIndex(curIndex, &aFlatIndex));
47 uint32_t bFlatIndex;
48 NN_RET_CHECK(bShapeIndexed.broadcastedIndexToFlatIndex(curIndex, &bFlatIndex));
49
50 if (aShape.type == OperandType::TENSOR_QUANT8_ASYMM ||
51 aShape.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
52 const float realA = (aData[aFlatIndex] - aShape.offset) * aShape.scale;
53 const float realB = (bData[bFlatIndex] - bShape.offset) * bShape.scale;
54 outputData[outputFlatIndex] = func(realA, realB);
55 } else {
56 outputData[outputFlatIndex] = func(aData[aFlatIndex], bData[bFlatIndex]);
57 }
58
59 NN_RET_CHECK(outputShapeIndexed.nextIndexInplace(&curIndex, &lastIndex));
60 } while (!lastIndex);
61 return true;
62 }
63
64 template <typename DataType, typename ComparisonType>
executeLessTyped(IOperationExecutionContext * context)65 bool executeLessTyped(IOperationExecutionContext* context) {
66 return compute<DataType, ComparisonType>(
67 std::less<ComparisonType>(), context->getInputBuffer<DataType>(kInputTensor1),
68 context->getInputShape(kInputTensor1), context->getInputBuffer<DataType>(kInputTensor2),
69 context->getInputShape(kInputTensor2), context->getOutputBuffer<bool8>(kOutputTensor),
70 context->getOutputShape(kOutputTensor));
71 }
72
73 template <typename DataType, typename ComparisonType>
executeLessEqualTyped(IOperationExecutionContext * context)74 bool executeLessEqualTyped(IOperationExecutionContext* context) {
75 return compute<DataType, ComparisonType>(
76 std::less_equal<ComparisonType>(), context->getInputBuffer<DataType>(kInputTensor1),
77 context->getInputShape(kInputTensor1), context->getInputBuffer<DataType>(kInputTensor2),
78 context->getInputShape(kInputTensor2), context->getOutputBuffer<bool8>(kOutputTensor),
79 context->getOutputShape(kOutputTensor));
80 }
81
82 template <typename DataType, typename ComparisonType>
executeEqualTyped(IOperationExecutionContext * context)83 bool executeEqualTyped(IOperationExecutionContext* context) {
84 return compute<DataType, ComparisonType>(
85 std::equal_to<ComparisonType>(), context->getInputBuffer<DataType>(kInputTensor1),
86 context->getInputShape(kInputTensor1), context->getInputBuffer<DataType>(kInputTensor2),
87 context->getInputShape(kInputTensor2), context->getOutputBuffer<bool8>(kOutputTensor),
88 context->getOutputShape(kOutputTensor));
89 }
90
91 template <typename DataType, typename ComparisonType>
executeNotEqualTyped(IOperationExecutionContext * context)92 bool executeNotEqualTyped(IOperationExecutionContext* context) {
93 return compute<DataType, ComparisonType>(
94 std::not_equal_to<ComparisonType>(), context->getInputBuffer<DataType>(kInputTensor1),
95 context->getInputShape(kInputTensor1), context->getInputBuffer<DataType>(kInputTensor2),
96 context->getInputShape(kInputTensor2), context->getOutputBuffer<bool8>(kOutputTensor),
97 context->getOutputShape(kOutputTensor));
98 }
99
100 template <typename DataType, typename ComparisonType>
executeGreaterEqualTyped(IOperationExecutionContext * context)101 bool executeGreaterEqualTyped(IOperationExecutionContext* context) {
102 return compute<DataType, ComparisonType>(
103 std::greater_equal<ComparisonType>(), context->getInputBuffer<DataType>(kInputTensor1),
104 context->getInputShape(kInputTensor1), context->getInputBuffer<DataType>(kInputTensor2),
105 context->getInputShape(kInputTensor2), context->getOutputBuffer<bool8>(kOutputTensor),
106 context->getOutputShape(kOutputTensor));
107 }
108
109 template <typename DataType, typename ComparisonType>
executeGreaterTyped(IOperationExecutionContext * context)110 bool executeGreaterTyped(IOperationExecutionContext* context) {
111 return compute<DataType, ComparisonType>(
112 std::greater<ComparisonType>(), context->getInputBuffer<DataType>(kInputTensor1),
113 context->getInputShape(kInputTensor1), context->getInputBuffer<DataType>(kInputTensor2),
114 context->getInputShape(kInputTensor2), context->getOutputBuffer<bool8>(kOutputTensor),
115 context->getOutputShape(kOutputTensor));
116 }
117
118 } // namespace
119
prepare(IOperationExecutionContext * context)120 bool prepare(IOperationExecutionContext* context) {
121 Shape input1 = context->getInputShape(kInputTensor1);
122 Shape input2 = context->getInputShape(kInputTensor2);
123 Shape output = context->getOutputShape(kOutputTensor);
124 NN_RET_CHECK(calculateBroadcastedShape(input1, input2, &output));
125 return context->setOutputShape(kOutputTensor, output);
126 }
127
executeLess(IOperationExecutionContext * context)128 bool executeLess(IOperationExecutionContext* context) {
129 switch (context->getInputType(kInputTensor1)) {
130 case OperandType::TENSOR_FLOAT16:
131 return executeLessTyped<_Float16, _Float16>(context);
132 case OperandType::TENSOR_FLOAT32:
133 return executeLessTyped<float, float>(context);
134 case OperandType::TENSOR_INT32:
135 return executeLessTyped<int32_t, int32_t>(context);
136 case OperandType::TENSOR_QUANT8_ASYMM:
137 return executeLessTyped<uint8_t, float>(context);
138 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
139 return executeLessTyped<int8_t, float>(context);
140 case OperandType::TENSOR_BOOL8:
141 return executeLessTyped<bool8, bool8>(context);
142 default:
143 NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
144 }
145 }
146
executeLessEqual(IOperationExecutionContext * context)147 bool executeLessEqual(IOperationExecutionContext* context) {
148 switch (context->getInputType(kInputTensor1)) {
149 case OperandType::TENSOR_FLOAT16:
150 return executeLessEqualTyped<_Float16, _Float16>(context);
151 case OperandType::TENSOR_FLOAT32:
152 return executeLessEqualTyped<float, float>(context);
153 case OperandType::TENSOR_INT32:
154 return executeLessEqualTyped<int32_t, int32_t>(context);
155 case OperandType::TENSOR_QUANT8_ASYMM:
156 return executeLessEqualTyped<uint8_t, float>(context);
157 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
158 return executeLessEqualTyped<int8_t, float>(context);
159 case OperandType::TENSOR_BOOL8:
160 return executeLessEqualTyped<bool8, bool8>(context);
161 default:
162 NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
163 }
164 }
165
executeEqual(IOperationExecutionContext * context)166 bool executeEqual(IOperationExecutionContext* context) {
167 switch (context->getInputType(kInputTensor1)) {
168 case OperandType::TENSOR_FLOAT16:
169 return executeEqualTyped<_Float16, _Float16>(context);
170 case OperandType::TENSOR_FLOAT32:
171 return executeEqualTyped<float, float>(context);
172 case OperandType::TENSOR_INT32:
173 return executeEqualTyped<int32_t, int32_t>(context);
174 case OperandType::TENSOR_QUANT8_ASYMM:
175 return executeEqualTyped<uint8_t, float>(context);
176 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
177 return executeEqualTyped<int8_t, float>(context);
178 case OperandType::TENSOR_BOOL8:
179 return executeEqualTyped<bool8, bool8>(context);
180 default:
181 NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
182 }
183 }
184
executeNotEqual(IOperationExecutionContext * context)185 bool executeNotEqual(IOperationExecutionContext* context) {
186 switch (context->getInputType(kInputTensor1)) {
187 case OperandType::TENSOR_FLOAT16:
188 return executeNotEqualTyped<_Float16, _Float16>(context);
189 case OperandType::TENSOR_FLOAT32:
190 return executeNotEqualTyped<float, float>(context);
191 case OperandType::TENSOR_INT32:
192 return executeNotEqualTyped<int32_t, int32_t>(context);
193 case OperandType::TENSOR_QUANT8_ASYMM:
194 return executeNotEqualTyped<uint8_t, float>(context);
195 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
196 return executeNotEqualTyped<int8_t, float>(context);
197 case OperandType::TENSOR_BOOL8:
198 return executeNotEqualTyped<bool8, bool8>(context);
199 default:
200 NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
201 }
202 }
203
executeGreaterEqual(IOperationExecutionContext * context)204 bool executeGreaterEqual(IOperationExecutionContext* context) {
205 switch (context->getInputType(kInputTensor1)) {
206 case OperandType::TENSOR_FLOAT16:
207 return executeGreaterEqualTyped<_Float16, _Float16>(context);
208 case OperandType::TENSOR_FLOAT32:
209 return executeGreaterEqualTyped<float, float>(context);
210 case OperandType::TENSOR_INT32:
211 return executeGreaterEqualTyped<int32_t, int32_t>(context);
212 case OperandType::TENSOR_QUANT8_ASYMM:
213 return executeGreaterEqualTyped<uint8_t, float>(context);
214 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
215 return executeGreaterEqualTyped<int8_t, float>(context);
216 case OperandType::TENSOR_BOOL8:
217 return executeGreaterEqualTyped<bool8, bool8>(context);
218 default:
219 NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
220 }
221 }
222
executeGreater(IOperationExecutionContext * context)223 bool executeGreater(IOperationExecutionContext* context) {
224 switch (context->getInputType(kInputTensor1)) {
225 case OperandType::TENSOR_FLOAT16:
226 return executeGreaterTyped<_Float16, _Float16>(context);
227 case OperandType::TENSOR_FLOAT32:
228 return executeGreaterTyped<float, float>(context);
229 case OperandType::TENSOR_INT32:
230 return executeGreaterTyped<int32_t, int32_t>(context);
231 case OperandType::TENSOR_QUANT8_ASYMM:
232 return executeGreaterTyped<uint8_t, float>(context);
233 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
234 return executeGreaterTyped<int8_t, float>(context);
235 case OperandType::TENSOR_BOOL8:
236 return executeGreaterTyped<bool8, bool8>(context);
237 default:
238 NN_RET_CHECK_FAIL() << "Unsupported tensor type for comparison";
239 }
240 }
241
242 } // namespace comparisons
243
244 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(LESS, comparisons::prepare, comparisons::executeLess);
245 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(LESS_EQUAL, comparisons::prepare,
246 comparisons::executeLessEqual);
247 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(EQUAL, comparisons::prepare, comparisons::executeEqual);
248 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(NOT_EQUAL, comparisons::prepare,
249 comparisons::executeNotEqual);
250 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(GREATER_EQUAL, comparisons::prepare,
251 comparisons::executeGreaterEqual);
252 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(GREATER, comparisons::prepare,
253 comparisons::executeGreater);
254
255 } // namespace nn
256 } // namespace android
257