1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_TYPES_NNAPI_OPERATION_TYPES_H
18 #define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_TYPES_NNAPI_OPERATION_TYPES_H
19 
20 namespace android::nn {
21 
22 /**
23  * Operation types.
24  *
25  * The type of an operation in a model.
26  */
27 enum class OperationType {
28     /**
29      * Adds two tensors, element-wise.
30      *
31      * Takes two input tensors of identical {@link OperandType} and compatible
32      * dimensions. The output is the sum of both input tensors, optionally
33      * modified by an activation function.
34      *
35      * Two dimensions are compatible when:
36      *     1. they are equal, or
37      *     2. one of them is 1
38      *
39      * The size of the output is the maximum size along each dimension of the
40      * input operands. It starts with the trailing dimensions, and works its
41      * way forward.
42      *
43      * Example:
44      *
45      *     input1.dimension = {4, 1, 2}
46      *     input2.dimension = {5, 4, 3, 1}
47      *     output.dimension = {5, 4, 3, 2}
48      *
49      * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
50      * dimension is only compatible with 0 or 1. The size of the output
51      * dimension is zero if either of corresponding input dimension is zero.
52      *
53      * Supported tensor {@link OperandType}:
54      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
55      * * {@link OperandType::TENSOR_FLOAT32}
56      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
57      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
58      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
59      *
60      * Supported tensor rank: up to 4
61      *
62      * Inputs:
63      * * 0: A tensor.
64      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
65      *      as input0.
66      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
67      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
68      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
69      * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
70      *      {@link FusedActivationFunc} values. Specifies the activation to
71      *      invoke on the result.
72      *      For a {@link OperandType::TENSOR_INT32} tensor,
73      *      the {@link FusedActivationFunc} must be "NONE".
74      *
75      * Outputs:
76      * * 0: The sum, a tensor of the same {@link OperandType} as input0.
77      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
78      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
79      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
80      */
81     ADD = 0,
82 
83     /**
84      * Performs a 2-D average pooling operation.
85      *
86      * The output dimensions are functions of the filter dimensions, stride, and
87      * padding.
88      *
89      * The values in the output tensor are computed as:
90      *
91      *     output[b, i, j, channel] =
92      *         sum_{di, dj}(
93      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
94      *         ) / sum(1)
95      *
96      * Supported tensor {@link OperandType}:
97      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
98      * * {@link OperandType::TENSOR_FLOAT32}
99      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
100      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
101      *
102      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
103      * With the default data layout NHWC, the data is stored in the order of:
104      * [batch, height, width, channels]. Alternatively, the data layout could
105      * be NCHW, the data storage order of: [batch, channels, height, width].
106      * NCHW is supported since HAL version 1.2.
107      *
108      * Both explicit padding and implicit padding are supported.
109      *
110      * Inputs (explicit padding):
111      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
112      *      the input.
113      *      Since HAL version 1.2, zero batches is supported for this tensor.
114      * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
115      *      the left, in the ‘width’ dimension.
116      * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
117      *      the right, in the ‘width’ dimension.
118      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
119      *      the top, in the ‘height’ dimension.
120      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
121      *      the bottom, in the ‘height’ dimension.
122      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
123      *      walking through input in the ‘width’ dimension.
124      * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
125      *      walking through input in the ‘height’ dimension.
126      * * 7: An {@link OperandType::INT32} scalar, specifying the filter
127      *      width.
128      * * 8: An {@link OperandType::INT32} scalar, specifying the filter
129      *      height.
130      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
131      *      {@link FusedActivationFunc} values. Specifies the activation to
132      *      invoke on the result.
133      * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
134      *       Set to true to specify NCHW data layout for input0 and output0.
135      *       Available since HAL version 1.2.
136      *
137      * Inputs (implicit padding):
138      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
139      *      the input.
140      *      Since HAL version 1.2, zero batches is supported for this tensor.
141      * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
142      *      padding scheme, has to be one of the
143      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
144      * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
145      *      walking through input in the ‘width’ dimension.
146      * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
147      *      walking through input in the ‘height’ dimension.
148      * * 4: An {@link OperandType::INT32} scalar, specifying the filter
149      *      width.
150      * * 5: An {@link OperandType::INT32} scalar, specifying the filter
151      *      height.
152      * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
153      *      {@link FusedActivationFunc} values. Specifies the activation to
154      *      invoke on the result.
155      * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
156      *      Set to true to specify NCHW data layout for input0 and output0.
157      *      Available since HAL version 1.2.
158      *
159      * Outputs:
160      * * 0: The output 4-D tensor, of shape
161      *      [batches, out_height, out_width, depth].
162      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
163      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
164      *      the scale and zeroPoint must be the same as input0.
165      */
166     AVERAGE_POOL_2D = 1,
167 
168     /**
169      * Concatenates the input tensors along the given dimension.
170      *
171      * The input tensors must have identical {@link OperandType} and the same
172      * dimensions except the dimension along the concatenation axis.
173      *
174      * Supported tensor {@link OperandType}:
175      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
176      * * {@link OperandType::TENSOR_FLOAT32}
177      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
178      *   (full support since HAL version 1.2, see the input section)
179      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
180      *
181      * Supported tensor rank: up to 4
182      *
183      * Inputs:
184      * * 0 ~ n-1: The list of n input tensors, of shape
185      *            [D0, D1, ..., Daxis(i), ..., Dm].
186      *            Before HAL version 1.2, all input tensors of
187      *            {@link OperandType::TENSOR_QUANT8_ASYMM}
188      *            must have the same scale and zeroPoint as the output tensor.
189      *            Input tensors of
190      *            {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
191      *            are allowed to have different scale and zeroPoint.
192      *            Since HAL version 1.2, zero-sized tensors are supported.
193      * * n: An {@link OperandType::INT32} scalar, specifying the
194      *      concatenation axis.
195      *
196      * Outputs:
197      * * 0: The output, a tensor of the same {@link OperandType} as the input
198      *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
199      *      Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
200      *      the scale and zeroPoint values can be different from
201      *      input tensors. Before HAL version 1.2 they have to be the same as for the
202      *      input tensors.
203      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
204      *      the scale and zeroPoint values can be different from input tensors.
205      */
206     CONCATENATION = 2,
207 
208     /**
209      * Performs a 2-D convolution operation.
210      *
211      * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
212      * batch of images, applying the filter to each window of each image of the
213      * appropriate size.
214      *
215      * The output dimensions are functions of the filter dimensions, stride, and
216      * padding.
217      *
218      * The values in the output tensor are computed as:
219      *
220      *     output[b, i, j, channel] =
221      *         sum_{di, dj, k} (
222      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
223      *             filter[channel, di, dj, k]
224      *         ) + bias[channel]
225      *
226      * Supported tensor {@link OperandType} configurations:
227      * * 32 bit floating point:
228      * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
229      *
230      * * Quantized:
231      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
232      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
233      * * * input.scale * filter.scale).
234      *
235      * Available since HAL version 1.2:
236      * * 16 bit floating point:
237      * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
238      *
239      * * Quantized with symmetric per channel quantization for the filter:
240      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
241      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
242      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
243      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
244      *
245      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
246      * With the default data layout NHWC, the data is stored in the order of:
247      * [batch, height, width, channels]. Alternatively, the data layout could
248      * be NCHW, the data storage order of: [batch, channels, height, width].
249      * NCHW is supported since HAL version 1.2.
250      *
251      * Both explicit padding and implicit padding are supported.
252      *
253      * Inputs (explicit padding):
254      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
255      *      specifying the input.
256      *      Since HAL version 1.2, zero batches is supported for this tensor.
257      * * 1: A 4-D tensor, of shape
258      *      [depth_out, filter_height, filter_width, depth_in], specifying the
259      *      filter.
260      *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
261      *      the channel dimension (SymmPerChannelQuantParams::channelDim)
262      *      must be set to 0.
263      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
264      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
265      *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
266      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
267      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
268      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
269      *      of 0 and bias_scale == input_scale * filter_scale.
270      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
271      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
272      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
273      *      bias_scale[i] = input_scale * filter_scale[i].
274      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
275      *      the left, in the ‘width’ dimension.
276      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
277      *      the right, in the ‘width’ dimension.
278      * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
279      *      the top, in the ‘height’ dimension.
280      * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
281      *      the bottom, in the ‘height’ dimension.
282      * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
283      *      walking through input in the ‘width’ dimension.
284      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
285      *      walking through input in the ‘height’ dimension.
286      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
287      *      {@link FusedActivationFunc} values. Specifies the activation to
288      *      invoke on the result.
289      * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
290      *      Set to true to specify NCHW data layout for input0 and output0.
291      *      Available since HAL version 1.2.
292      * * 11: An optional {@link OperandType::INT32} scalar, specifying the dilation
293      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
294      *      cells between each filter element on width dimension. If this input is set,
295      *      input 12 (dilation factor for height) must be specified as well.
296      *      Available since HAL version 1.2.
297      * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
298      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
299      *      cells between each filter element on height dimension. If this input is set,
300      *      input 11 (dilation factor for width) must be specified as well.
301      *      Available since HAL version 1.2.
302      *
303      * Inputs (implicit padding):
304      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
305      *      specifying the input.
306      *      Since HAL version 1.2, zero batches is supported for this tensor.
307      * * 1: A 4-D tensor, of shape
308      *      [depth_out, filter_height, filter_width, depth_in], specifying the
309      *      filter.
310      *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
311      *      the channel dimension (SymmPerChannelQuantParams::channelDim)
312      *      must be set to 0.
313      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
314      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
315      *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
316      *      type.
317      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
318      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
319      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
320      *      of 0 and bias_scale == input_scale * filter_scale.
321      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
322      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
323      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
324      *      bias_scale[i] = input_scale * filter_scale[i].
325      * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
326      *      padding scheme, has to be one of the
327      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
328      * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
329      *      walking through input in the ‘width’ dimension.
330      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
331      *      walking through input in the ‘height’ dimension.
332      * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
333      *      {@link FusedActivationFunc} values. Specifies the activation to
334      *      invoke on the result.
335      * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
336      *      Set to true to specify NCHW data layout for input0 and output0.
337      *      Available since HAL version 1.2.
338      * * 8: An optional {@link OperandType::INT32} scalar, specifying the dilation
339      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
340      *      cells between each filter element on width dimension. If this input is set,
341      *      input 9 (dilation factor for height) must be specified as well.
342      *      Available since HAL version 1.2.
343      * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
344      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
345      *      cells between each filter element on height dimension. If this input is set,
346      *      input 8 (dilation factor for width) must be specified as well.
347      *      Available since HAL version 1.2.
348      *
349      * Outputs:
350      * * 0: The output 4-D tensor, of shape
351      *      [batches, out_height, out_width, depth_out].
352      *      Before HAL version 1.2, for output tensor of
353      *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
354      *      be satisfied: output_scale > input_scale * filter_scale
355      */
356     CONV_2D = 3,
357 
358     /**
359      * Performs a depthwise 2-D convolution operation.
360      *
361      * Given an input tensor of shape [batches, height, width, depth_in] and a
362      * filter tensor of shape [1, filter_height, filter_width, depth_out]
363      * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
364      * applies a different filter to each input channel (expanding from 1
365      * channel to channel_multiplier channels for each), then concatenates the
366      * results together.
367      *
368      * The output has depth_out = depth_in * depth_multiplier channels.
369      * The output dimensions are functions of the filter dimensions, stride, and
370      * padding.
371      *
372      * The values in the output tensor are computed as:
373      *
374      *     output[b, i, j, k * channel_multiplier + q] =
375      *         sum_{di, dj} (
376      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
377      *             filter[1, di, dj, k * channel_multiplier + q]
378      *         ) + bias[k * channel_multiplier + q]
379      *
380      * Supported tensor {@link OperandType} configurations:
381      * * 32 bit floating point:
382      * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
383      *
384      * * Quantized:
385      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
386      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
387      * * * input.scale * filter.scale).
388      *
389      * Available since HAL version 1.2:
390      * * 16 bit floating point:
391      * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
392      *
393      * * Quantized with symmetric per channel quantization for the filter:
394      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
395      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
396      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
397      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
398      *
399      * Available since HAL version 1.3:
400      * * Quantized signed (since HAL version 1.3):
401      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
402      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
403      * * * input.scale * filter.scale).
404      *
405      * * Quantized signed with filter symmetric per channel quantization
406      *   (since HAL version 1.3):
407      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
408      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
409      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
410      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
411      *
412      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
413      * With the default data layout NHWC, the data is stored in the order of:
414      * [batch, height, width, channels]. Alternatively, the data layout could
415      * be NCHW, the data storage order of: [batch, channels, height, width].
416      * NCHW is supported since HAL version 1.2.
417      *
418      * Both explicit padding and implicit padding are supported.
419      *
420      * Inputs (explicit padding):
421      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
422      *      specifying the input.
423      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
424      *      specifying the filter.
425      *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
426      *      the channel dimension (SymmPerChannelQuantParams::channelDim)
427      *      must be set to 3.
428      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
429      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
430      *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
431      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
432      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
433      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
434      *      of 0 and bias_scale == input_scale * filter_scale.
435      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
436      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
437      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
438      *      bias_scale[i] = input_scale * filter_scale[i].
439      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
440      *      the left, in the ‘width’ dimension.
441      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
442      *      the right, in the ‘width’ dimension.
443      * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
444      *      the top, in the ‘height’ dimension.
445      * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
446      *      the bottom, in the ‘height’ dimension.
447      * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
448      *      walking through input in the ‘width’ dimension.
449      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
450      *      walking through input in the ‘height’ dimension.
451      * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise
452      *      multiplier.
453      * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
454      *       {@link FusedActivationFunc} values. Specifies the activation to
455      *       invoke on the result.
456      * * 11: An optional {@link OperandType::BOOL} scalar, default to false.
457      *       Set to true to specify NCHW data layout for input0 and output0.
458      *       Available since HAL version 1.2.
459      * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
460      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
461      *      cells between each filter element on width dimension. If this input is set,
462      *      input 13 (dilation factor for height) must be specified as well.
463      *      Available since HAL version 1.2.
464      * * 13: An optional {@link OperandType::INT32} scalar, specifying the dilation
465      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
466      *      cells between each filter element on height dimension. If this input is set,
467      *      input 12 (dilation factor for width) must be specified as well.
468      *      Available since HAL version 1.2.
469      *
470      * Inputs (implicit padding):
471      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
472      *      specifying the input.
473      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
474      *      specifying the filter.
475      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
476      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
477      *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
478      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
479      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
480      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
481      *      of 0 and bias_scale == input_scale * filter_scale.
482      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
483      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
484      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
485      *      bias_scale[i] = input_scale * filter_scale[i].
486      * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
487      *      padding scheme, has to be one of the
488      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
489      * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
490      *      walking through input in the ‘width’ dimension.
491      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
492      *      walking through input in the ‘height’ dimension.
493      * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise
494      *      multiplier.
495      * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
496      *      {@link FusedActivationFunc} values. Specifies the activation to
497      *      invoke on the result.
498      * * 8: An optional {@link OperandType::BOOL} scalar, default to false.
499      *      Set to true to specify NCHW data layout for input0 and output0.
500      *      Available since HAL version 1.2.
501      * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
502      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
503      *      cells between each filter element on width dimension. If this input is set,
504      *      input 10 (dilation factor for height) must be specified as well.
505      *      Available since HAL version 1.2.
506      * * 10: An optional {@link OperandType::INT32} scalar, specifying the dilation
507      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
508      *      cells between each filter element on height dimension. If this input is set,
509      *      input 9 (dilation factor for width) must be specified as well.
510      *      Available since HAL version 1.2.
511      *
512      * Outputs:
513      * * 0: The output 4-D tensor, of shape
514      *      [batches, out_height, out_width, depth_out]. Before HAL version 1.2, for
515      *      output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
516      *      the following condition must be satisfied:
517      *      output_scale > input_scale * filter_scale
518      */
519     DEPTHWISE_CONV_2D = 4,
520 
521     /**
522      * Rearranges data from depth into blocks of spatial data.
523      *
524      * More specifically, this op outputs a copy of the input tensor where
525      * values from the depth dimension are moved in spatial blocks to the height
526      * and width dimensions. The value block_size indicates the input block size
527      * and how the data is moved.
528      *
529      * Chunks of data of size block_size * block_size from depth are rearranged
530      * into non-overlapping blocks of size block_size x block_size.
531      *
532      * The width of the output tensor is input_depth * block_size, whereas the
533      * height is input_height * block_size. The depth of the input tensor must
534      * be divisible by block_size * block_size
535      *
536      * Supported tensor {@link OperandType}:
537      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
538      * * {@link OperandType::TENSOR_FLOAT32}
539      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
540      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
541      *
542      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
543      * With the default data layout NHWC, the data is stored in the order of:
544      * [batch, height, width, channels]. Alternatively, the data layout could
545      * be NCHW, the data storage order of: [batch, channels, height, width].
546      * NCHW is supported since HAL version 1.2.
547      *
548      * Inputs:
549      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
550      *      specifying the input.
551      * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
552      *      block_size must be >=1 and block_size * block_size must be a divisor
553      *      of the input depth.
554      * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
555      *      Set to true to specify NCHW data layout for input0 and output0.
556      *      Available since HAL version 1.2.
557      *
558      * Outputs:
559      * * 0: The output 4-D tensor, of shape [batch, height*block_size,
560      *      width*block_size, depth/(block_size*block_size)].
561      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
562      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
563      *      the scale and zeroPoint must be the same as input0.
564      */
565     DEPTH_TO_SPACE = 5,
566 
567     /**
568      * Dequantizes the input tensor.
569      *
570      * The formula is:
571      *
572      *     output = (input - zeroPoint) * scale.
573      *
574      * Supported input tensor {@link OperandType}:
575      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
576      * * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2)
577      * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2)
578      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
579      *
580      * Supported output tensor {@link OperandType}:
581      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
582      * * {@link OperandType::TENSOR_FLOAT32}.
583      *
584      * Supported tensor rank: up to 4
585      *
586      * Inputs:
587      * * 0: A tensor.
588      *      Since HAL version 1.2, this tensor may be zero-sized.
589      *
590      * Outputs:
591      * * 0: A tensor with the same shape as input0.
592      */
593     DEQUANTIZE = 6,
594 
595     /**
596      * Looks up sub-tensors in the input tensor.
597      *
598      * This operator takes for input a tensor of values (Values) and
599      * a one-dimensional tensor of selection indices (Lookups).
600      * The output tensor is the concatenation of sub-tensors of Values as
601      * selected by Lookups.
602      *
603      * Think of Values as being sliced along its first dimension:
604      * The entries in Lookups select which slices are concatenated together
605      * to create the output tensor.
606      *
607      * For example, if Values has shape of [40, 200, 300] and
608      * Lookups has shape of [3], all three values found in Lookups are
609      * expected to be between 0 and 39. The resulting tensor must
610      * have shape of [3, 200, 300].
611      *
612      * If a value in Lookups is out of bounds, the operation must fail
613      * and an error must be reported.
614      *
615      * Supported value tensor {@link OperandType}:
616      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.3)
617      * * {@link OperandType::TENSOR_FLOAT32}
618      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.2)
619      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
620      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
621      *
622      * Supported value tensor rank: from 2
623      *
624      * Inputs:
625      * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
626      *      The values are indices into the first dimension of Values.
627      * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
628      *      extracted.
629      *
630      * Output:
631      * * 0: A n-D tensor with the same rank and shape as the Values
632      *      tensor, except for the first dimension which has the same size
633      *      as Lookups' only dimension.
634      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
635      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
636      *      the scale and zeroPoint must be the same as input1.
637      */
638     EMBEDDING_LOOKUP = 7,
639 
640     /**
641      * Computes element-wise floor() on the input tensor.
642      *
643      * Supported tensor {@link OperandType}:
644      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
645      * * {@link OperandType::TENSOR_FLOAT32}
646      *
647      * Supported tensor rank: up to 4
648      *
649      * Inputs:
650      * * 0: A tensor.
651      *
652      * Outputs:
653      * * 0: The output tensor, of the same {@link OperandType} and dimensions as
654      *      the input tensor.
655      */
656     FLOOR = 8,
657 
658     /**
659      * Denotes a fully (densely) connected layer, which connects all elements
660      * in the input tensor with each element in the output tensor.
661      *
662      * This layer implements the operation:
663      *
664      *     outputs = activation(inputs * weights’ + bias)
665      *
666      * Supported tensor {@link OperandType}:
667      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
668      * * {@link OperandType::TENSOR_FLOAT32}
669      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
670      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
671      *
672      * Supported tensor rank: up to 4.
673      *
674      * Inputs:
675      * * 0: A tensor of at least rank 2, specifying the input. If rank is
676      *      greater than 2, then it gets flattened to a 2-D Tensor. The
677      *      (flattened) 2-D Tensor is reshaped (if necessary) to
678      *      [batch_size, input_size], where "input_size" corresponds to the
679      *      number of inputs to the layer, matching the second dimension of
680      *      weights, and "batch_size" is calculated by dividing the number of
681      *      elements by "input_size".
682      *      Since HAL version 1.2, zero batch_size is supported for this tensor.
683      * * 1: A 2-D tensor, specifying the weights, of shape
684      *      [num_units, input_size], where "num_units" corresponds to the number
685      *      of output nodes.
686      * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
687      *      tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
688      *      also be of {@link OperandType::TENSOR_FLOAT32}.
689      *      For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
690      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
691      *      the bias should be of {@link OperandType::TENSOR_INT32},
692      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
693      * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
694      *      {@link FusedActivationFunc} values. Specifies the activation to
695      *      invoke on the result.
696      *
697      * Outputs:
698      * * 0: The output tensor, of shape [batch_size, num_units]. Before HAL version 1.2, for
699      *      output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
700      *      condition must be satisfied: output_scale > input_scale * filter_scale.
701      */
702     FULLY_CONNECTED = 9,
703 
704     /**
705      * Looks up sub-tensors in the input tensor using a key-value map.
706      *
707      * This operator takes for input a tensor of values (Values),
708      * a one-dimensional tensor of selection values (Lookups) and
709      * a one-dimensional tensor that maps these values to Values
710      * indexes. The output tensor is the concatenation of sub-tensors of
711      * Values as selected by Lookups via Keys.
712      *
713      * Think of Values as being sliced along its outer-most dimension.
714      * The output is a concatenation of selected slices, with one slice
715      * for each entry of Lookups. The slice selected is the one at the
716      * same index as the Maps entry that matches the value in Lookups.
717      *
718      * For a hit, the corresponding sub-tensor of Values is included
719      * in the Output tensor. For a miss, the corresponding sub-tensor in
720      * Output must have zero values.
721      *
722      * For example, if Values has shape of [40, 200, 300],
723      * Keys should have a shape of [40]. If Lookups tensor has shape
724      * of [3], three slices are being concatenated, so the resulting tensor
725      * must have the shape of [3, 200, 300]. If the first entry in Lookups
726      * has the value 123456, that value must be located in Keys tensor.
727      * If the sixth entry of Keys contains 123456, the sixth slice of Values
728      * must be selected. If no entry in Keys has 123456, a slice of zeroes
729      * must be concatenated.
730      *
731      * Supported value tensor {@link OperandType}:
732      * * {@link OperandType::TENSOR_FLOAT32}
733      * * {@link OperandType::TENSOR_INT32}
734      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
735      *
736      * Supported value tensor rank: from 2
737      *
738      * Inputs:
739      * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
740      *      shape [ k ].
741      * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape
742      *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
743      *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
744      *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
745      *      ascending order.
746      * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
747      *      must be n.
748      *
749      * Outputs:
750      * * 0: Output. A tensor with shape [ k …].
751      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
752      *      the scale and zeroPoint must be the same as input2.
753      * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
754      *      hits (True) or not (False).
755      *      Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
756      *      and scale 1.0f.
757      *      A non-zero byte represents True, a hit. A zero indicates otherwise.
758      */
759     HASHTABLE_LOOKUP = 10,
760 
761     /**
762      * Applies L2 normalization along the axis dimension.
763      *
764      * The values in the output tensor are computed as:
765      *
766      *     output[batch, row, col, channel] =
767      *         input[batch, row, col, channel] /
768      *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
769      *
770      * By default the axis dimension is the last dimension of the input tensor.
771      *
772      * Supported tensor {@link OperandType}:
773      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
774      * * {@link OperandType::TENSOR_FLOAT32}
775      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
776      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
777      *
778      * Supported tensor rank: up to 4
779      * Tensors with rank less than 4 are only supported since HAL version 1.2.
780      *
781      * Inputs:
782      * * 0: An n-D tensor, specifying the tensor to be normalized.
783      * * 1: An optional {@link OperandType::INT32} scalar, default to -1,
784      *      specifying the dimension normalization would be performed on.
785      *      Negative index is used to specify axis from the end (e.g. -1 for
786      *      the last axis). Must be in the range [-n, n).
787      *      Available since HAL version 1.2.
788      *
789      * Outputs:
790      * * 0: A tensor of the same {@link OperandType} and same shape as input0.
791      *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
792      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
793      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
794      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
795      *
796      *      NOTE: Before HAL version 1.3, if the elements along an axis are all zeros,
797      *      the result is undefined. Since HAL version 1.3, if the elements along an axis
798      *      are all zeros, the result is logical zero.
799      */
800     L2_NORMALIZATION = 11,
801 
802     /**
803      * Performs an 2-D L2 pooling operation.
804      *
805      * The output dimensions are functions of the filter dimensions, stride, and
806      * padding.
807      *
808      * The values in the output tensor are computed as:
809      *
810      *     output[b, i, j, c] =
811      *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
812      *              sum(1))
813      *
814      * Supported tensor {@link OperandType}:
815      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
816      * * {@link OperandType::TENSOR_FLOAT32}
817      *
818      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
819      * With the default data layout NHWC, the data is stored in the order of:
820      * [batch, height, width, channels]. Alternatively, the data layout could
821      * be NCHW, the data storage order of: [batch, channels, height, width].
822      * NCHW is supported since HAL version 1.2.
823      *
824      * Both explicit padding and implicit padding are supported.
825      *
826      * Inputs (explicit padding):
827      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
828      *      the input.
829      *      Since HAL version 1.2, zero batches is supported for this tensor.
830      * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
831      *      the left, in the ‘width’ dimension.
832      * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
833      *      the right, in the ‘width’ dimension.
834      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
835      *      the top, in the ‘height’ dimension.
836      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
837      *      the bottom, in the ‘height’ dimension.
838      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
839      *      walking through input in the ‘width’ dimension.
840      * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
841      *      walking through input in the ‘height’ dimension.
842      * * 7: An {@link OperandType::INT32} scalar, specifying the filter
843      *      width.
844      * * 8: An {@link OperandType::INT32} scalar, specifying the filter
845      *      height.
846      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
847      *      {@link FusedActivationFunc} values. Specifies the activation to
848      *      invoke on the result.
849      * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
850      *       Set to true to specify NCHW data layout for input0 and output0.
851      *       Available since HAL version 1.2.
852      *
853      * Inputs (implicit padding):
854      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
855      *      the input.
856      *      Since HAL version 1.2, zero batches is supported for this tensor.
857      * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
858      *      padding scheme, has to be one of the
859      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
860      * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
861      *      walking through input in the ‘width’ dimension.
862      * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
863      *      walking through input in the ‘height’ dimension.
864      * * 4: An {@link OperandType::INT32} scalar, specifying the filter
865      *      width.
866      * * 5: An {@link OperandType::INT32} scalar, specifying the filter
867      *      height.
868      * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
869      *      {@link FusedActivationFunc} values. Specifies the activation to
870      *      invoke on the result.
871      * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
872      *      Set to true to specify NCHW data layout for input0 and output0.
873      *      Available since HAL version 1.2.
874      *
875      * Outputs:
876      * * 0: The output 4-D tensor, of shape
877      *      [batches, out_height, out_width, depth].
878      */
879     L2_POOL_2D = 12,
880 
881     /**
882      * Applies Local Response Normalization along the depth dimension.
883      *
884      * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
885      * last dimension), and each vector is normalized independently. Within a
886      * given vector, each component is divided by the weighted, squared sum of
887      * inputs within depth_radius.
888      *
889      * The output is calculated using this formula:
890      *
891      *     sqr_sum[a, b, c, d] = sum(
892      *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
893      *     output = input / pow((bias + alpha * sqr_sum), beta)
894      *
895      * For input tensor with rank less than 4, independently normalizes each
896      * 1-D slice along specified dimension.
897      *
898      * Supported tensor {@link OperandType}:
899      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
900      * * {@link OperandType::TENSOR_FLOAT32}
901      *
902      * Supported tensor rank: up to 4
903      * Tensors with rank less than 4 are only supported since HAL version 1.2.
904      *
905      * Inputs:
906      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
907      *      the input.
908      * * 1: An {@link OperandType::INT32} scalar, specifying the radius of
909      *      the normalization window.
910      * * 2: A scalar, specifying the bias, must not be zero.
911      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the bias
912      *      value must be of {@link OperandType::FLOAT16}.
913      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
914      *      value must be of {@link OperandType::FLOAT32}.
915      * * 3: A scalar, specifying the scale factor, alpha.
916      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
917      *      alpha value must be of {@link OperandType::FLOAT16}.
918      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
919      *      alpha value must be of {@link OperandType::FLOAT32}.
920      * * 4: A scalar, specifying the exponent, beta.
921      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
922      *      value must be of {@link OperandType::FLOAT16}.
923      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
924      *      value must be of {@link OperandType::FLOAT32}.
925      * * 5: An optional {@link OperandType::INT32} scalar, default to -1,
926      *      specifying the dimension normalization would be performed on.
927      *      Negative index is used to specify axis from the end (e.g. -1 for
928      *      the last axis). Must be in the range [-n, n).
929      *      Available since HAL version 1.2.
930      *
931      * Outputs:
932      * * 0: The output tensor of same shape as input0.
933      */
934     LOCAL_RESPONSE_NORMALIZATION = 13,
935 
936     /**
937      * Computes sigmoid activation on the input tensor element-wise.
938      *
939      * The output is calculated using this formula:
940      *
941      *     output = 1 / (1 + exp(-input))
942      *
943      * Supported tensor {@link OperandType}:
944      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
945      * * {@link OperandType::TENSOR_FLOAT32}
946      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
947      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
948      *
949      * Supported tensor rank: up to 4.
950      *
951      * Inputs:
952      * * 0: A tensor, specifying the input.
953      *      Since HAL version 1.2, this tensor may be zero-sized.
954      *
955      * Outputs:
956      * * 0: The output tensor of same shape as input0.
957      *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
958      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
959      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
960      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
961      */
962     LOGISTIC = 14,
963 
964     /**
965      * Projects an input to a bit vector via locality senstive hashing.
966      *
967      * Supported input tensor {@link OperandType}:
968      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
969      * * {@link OperandType::TENSOR_FLOAT32}
970      * * {@link OperandType::TENSOR_INT32}
971      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
972      *
973      * Supported input tensor rank: from 1
974      *
975      * Inputs:
976      * * 0: Hash functions. Dim.size == 2, DataType: Float.
977      *      Tensor[0].Dim[0]: Number of hash functions.
978      *      Tensor[0].Dim[1]: Number of projected output bits generated by each
979      *      hash function.
980      *      If the projection type is Sparse:
981      *      Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
982      *
983      * * 1: Input. Dim.size >= 1, no restriction on DataType.
984      * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
985      *      If not set, each input element is considered to have the same weight
986      *      of 1.0.
987      *      Tensor[1].Dim[0] == Tensor[2].Dim[0]
988      * * 3: Type:
989      *        Sparse:
990      *          Value LSHProjectionType_SPARSE(=3) (since HAL version 1.2).
991      *          Computed bit vector is considered to be sparse.
992      *          Each output element is an int32 made up of multiple bits
993      *          computed from hash functions.
994      *
995      *          NOTE: To avoid collisions across hash functions, an offset value
996      *          of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
997      *          where k is the index of the hash function.
998      *
999      *          Value LSHProjectionType_SPARSE_DEPRECATED(=1).
1000      *          Legacy behavior that does not include the offset value.
1001      *
1002      *        Dense:
1003      *          Value LSHProjectionType_DENSE(=2).
1004      *          Computed bit vector is considered to be dense. Each output
1005      *          element represents a bit and can take the value of either
1006      *          0 or 1.
1007      *
1008      * Outputs:
1009      * * 0: If the projection type is Sparse:
1010      *      Output.Dim == { Tensor[0].Dim[0] }
1011      *      A tensor of int32 that represents hash signatures.
1012      *
1013      *      If the projection type is Dense:
1014      *      Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
1015      *      A flattened tensor that represents projected bit vectors.
1016      * The offset value for sparse projections was added in HAL version 1.2.
1017      */
1018     LSH_PROJECTION = 15,
1019 
1020     /**
1021      * Performs a single time step in a Long Short-Term Memory (LSTM) layer
1022      *
1023      * The LSTM operation is described by the following equations.
1024      *
1025      * \f{eqnarray*}{
1026      * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
1027      * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
1028      * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
1029      *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
1030      * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
1031      *      & & \\
1032      *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
1033      *      & if\ there\ is\ a\ projection; \\
1034      * h_t =& & \\
1035      *      & o_t \odot g(C_t) & otherwise. \\
1036      * \f}
1037      * Where:
1038      * * \f$x_t\f$ is the input,
1039      * * \f$i_t\f$ is the input gate,
1040      * * \f$f_t\f$ is the forget gate,
1041      * * \f$C_t\f$ is the cell state,
1042      * * \f$o_t\f$ is the output,
1043      * * \f$h_t\f$ is the output state,
1044      * * \f$\sigma\f$ is the logistic sigmoid function,
1045      * * \f$g\f$ is the cell input and cell output activation function, usually
1046      *   \f$tahn\f$,
1047      * * \f$W_{xi}\f$ is the input-to-input weight matrix,
1048      * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
1049      * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
1050      * * \f$b_i\f$ is the input gate bias,
1051      * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
1052      * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
1053      * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
1054      * * \f$b_f\f$ is the forget gate bias,
1055      * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
1056      * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
1057      * * \f$b_c\f$ is the cell bias,
1058      * * \f$W_{xo}\f$ is the input-to-output weight matrix,
1059      * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
1060      * * \f$W_{co}\f$ is the cell-to-output weight matrix,
1061      * * \f$b_o\f$ is the output gate bias,
1062      * * \f$W_{proj}\f$ is the projection weight matrix,
1063      * * \f$b_{proj}\f$ is the projection bias,
1064      * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
1065      * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
1066      * * \f$\odot\f$ is the
1067      *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
1068      *   Hadamard product</a> that takes two matrices and produces another
1069      *   matrix, each element of which is the product of the corresponding
1070      *   elements of the input matrices.
1071      *
1072      * Since HAL version 1.2 LSTM supports layer normalization.
1073      * In case layer normalization is used, the inputs to internal activation
1074      * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
1075      * following an approach from section 3.1 from
1076      * https://arxiv.org/pdf/1607.06450.pdf
1077      *
1078      * The operation has the following independently optional inputs:
1079      * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
1080      *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
1081      *   have values or neither of them have values (i.e., all set to null). If
1082      *   they have values, the peephole optimization is used.
1083      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
1084      *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
1085      *   or none of them have values. If they have no values, coupling of input
1086      *   and forget gates (CIFG) is used, in which case the input gate
1087      *   (\f$i_t\f$) is calculated using the following equation instead.
1088      *   \f{eqnarray*}{
1089      *   i_t = 1 - f_t
1090      *   \f}
1091      *   In case peephole optimization is used and CIFG is not used
1092      *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
1093      *   cell-to-input weights must have no value.
1094      * * The projection weights (\f$W_{proj}\f$) is required only for the
1095      *   recurrent projection layer, and should otherwise have no value.
1096      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
1097      *   value if the recurrent projection layer exists, and should otherwise
1098      *   have no value.
1099      * * (HAL version 1.2 or later) The four layer normalization weights either all have
1100      *   values or none of them have values. Additionally, if CIFG is used,
1101      *   input layer normalization weights tensor is omitted and the other layer
1102      *   normalization weights either all have values or none of them have
1103      *   values. Layer normalization is used when the values of all the layer
1104      *   normalization weights are present.
1105      *
1106      * References:
1107      *
1108      * The default non-peephole non-CIFG implementation is based on:
1109      * http://www.bioinf.jku.at/publications/older/2604.pdf
1110      * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
1111      * Computation, 9(8):1735-1780, 1997.
1112      *
1113      * The peephole implementation and projection layer is based on:
1114      * https://research.google.com/pubs/archive/43905.pdf
1115      * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
1116      * recurrent neural network architectures for large scale acoustic
1117      * modeling." INTERSPEECH, 2014.
1118      * (However, the concept of peephole optimization was introduced in work
1119      * prior to this paper.)
1120      *
1121      * The coupling of input and forget gate (CIFG) is based on:
1122      * http://arxiv.org/pdf/1503.04069.pdf
1123      * Greff et al. "LSTM: A Search Space Odyssey"
1124      *
1125      * The layer normalization is based on:
1126      * https://arxiv.org/pdf/1607.06450.pdf
1127      * Jimmy Ba et al. "Layer Normalization"
1128      *
1129      * Supported tensor {@link OperandType}:
1130      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1131      * * {@link OperandType::TENSOR_FLOAT32}
1132      *
1133      * All input and output tensors must be of the same type.
1134      *
1135      * Inputs:
1136      * * 0: The input (\f$x_t\f$).
1137      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1138      *      corresponds to the batching dimension, and “input_size” is the size
1139      *      of the input.
1140      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
1141      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1142      *      corresponds to the number of cell units.
1143      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
1144      *      A 2-D tensor of shape [num_units, input_size].
1145      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
1146      *      A 2-D tensor of shape [num_units, input_size].
1147      * * 4: The input-to-output weights (\f$W_{xo}\f$).
1148      *      A 2-D tensor of shape [num_units, input_size].
1149      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
1150      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
1151      *      corresponds to either the number of cell units (i.e., “num_units”),
1152      *      or the second dimension of the “projection_weights”, if defined.
1153      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
1154      *      A 2-D tensor of shape [num_units, output_size].
1155      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
1156      *      A 2-D tensor of shape [num_units, output_size].
1157      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
1158      *      A 2-D tensor of shape [num_units, output_size].
1159      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
1160      *      A 1-D tensor of shape [num_units].
1161      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
1162      *      A 1-D tensor of shape [num_units].
1163      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
1164      *      A 1-D tensor of shape [num_units].
1165      * * 12:The input gate bias (\f$b_i\f$). Optional.
1166      *      A 1-D tensor of shape [num_units].
1167      * * 13:The forget gate bias (\f$b_f\f$).
1168      *      A 1-D tensor of shape [num_units].
1169      * * 14:The cell bias (\f$b_c\f$).
1170      *      A 1-D tensor of shape [num_units].
1171      * * 15:The output gate bias (\f$b_o\f$).
1172      *      A 1-D tensor of shape [num_units].
1173      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
1174      *      A 2-D tensor of shape [output_size, num_units].
1175      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
1176      *      A 1-D tensor of shape [output_size].
1177      * * 18:The output state (in) (\f$h_{t-1}\f$).
1178      *      A 2-D tensor of shape [batch_size, output_size].
1179      * * 19:The cell state (in) (\f$C_{t-1}\f$).
1180      *      A 2-D tensor of shape [batch_size, num_units].
1181      * * 20:The activation function (\f$g\f$).
1182      *      A value indicating the activation function:
1183      *      <ul>
1184      *      <li>0: None;
1185      *      <li>1: Relu;
1186      *      <li>3: Relu6;
1187      *      <li>4: Tanh;
1188      *      <li>6: Sigmoid.
1189      *      </ul>
1190      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
1191      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
1192      *      then clipping is disabled.
1193      *      Until HAL version 1.2 this scalar must be of type {@link
1194      *      OperandType::FLOAT32}. Since HAL version 1.2, if all the input
1195      *      tensors have type {@link OperandType::TENSOR_FLOAT32}, this
1196      *      scalar must be of the type {@link OperandType::FLOAT32},
1197      *      otherwise if all the input tensors have the type {@link
1198      *      OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
1199      *      OperandType::FLOAT16}.
1200      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
1201      *      projection layer, such that values are bound within
1202      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1203      *      Until HAL version 1.2 this scalar must be of type {@link
1204      *      OperandType::FLOAT32}. Since HAL version 1.2, if all the input
1205      *      tensors have type {@link OperandType::TENSOR_FLOAT32}, this
1206      *      scalar must be of the type {@link OperandType::FLOAT32},
1207      *      otherwise if all the input tensors have the type {@link
1208      *      OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
1209      *      OperandType::FLOAT16}.
1210      * Since HAL version 1.2 there are additional inputs to this op:
1211      * * 23:The input layer normalization weights.
1212      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1213      *      to activation at input gate.
1214      * * 24:The forget layer normalization weights.
1215      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1216      *      to activation at forget gate.
1217      * * 25:The cell layer normalization weights.
1218      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1219      *      to activation at cell gate.
1220      * * 26:The output layer normalization weights.
1221      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1222      *      to activation at output gate.
1223      *
1224      * Outputs:
1225      * * 0: The scratch buffer.
1226      *      A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
1227      *      [batch_size, num_units * 4] without CIFG.
1228      * * 1: The output state (out) (\f$h_t\f$).
1229      *      A 2-D tensor of shape [batch_size, output_size].
1230      * * 2: The cell state (out) (\f$C_t\f$).
1231      *      A 2-D tensor of shape [batch_size, num_units].
1232      * * 3: The output (\f$o_t\f$).
1233      *      A 2-D tensor of shape [batch_size, output_size]. This is effectively
1234      *      the same as the current “output state (out)” value.
1235      */
1236     LSTM = 16,
1237 
1238     /**
1239      * Performs an 2-D max pooling operation.
1240      *
1241      * The output dimensions are functions of the filter dimensions, stride, and
1242      * padding.
1243      *
1244      * The values in the output tensor are computed as:
1245      *
1246      *     output[b, i, j, channel] =
1247      *         max_{di, dj} (
1248      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
1249      *         )
1250      *
1251      * Supported tensor {@link OperandType}:
1252      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1253      * * {@link OperandType::TENSOR_FLOAT32}
1254      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1255      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1256      *
1257      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1258      * With the default data layout NHWC, the data is stored in the order of:
1259      * [batch, height, width, channels]. Alternatively, the data layout could
1260      * be NCHW, the data storage order of: [batch, channels, height, width].
1261      * NCHW is supported since HAL version 1.2.
1262      *
1263      * Both explicit padding and implicit padding are supported.
1264      *
1265      * Inputs (explicit padding):
1266      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1267      *      the input.
1268      *      Since HAL version 1.2, zero batches is supported for this tensor.
1269      * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
1270      *      the left, in the ‘width’ dimension.
1271      * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
1272      *      the right, in the ‘width’ dimension.
1273      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
1274      *      the top, in the ‘height’ dimension.
1275      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
1276      *      the bottom, in the ‘height’ dimension.
1277      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
1278      *      walking through input in the ‘width’ dimension.
1279      * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
1280      *      walking through input in the ‘height’ dimension.
1281      * * 7: An {@link OperandType::INT32} scalar, specifying the filter
1282      *      width.
1283      * * 8: An {@link OperandType::INT32} scalar, specifying the filter
1284      *      height.
1285      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
1286      *      {@link FusedActivationFunc} values. Specifies the activation to
1287      *      invoke on the result.
1288      * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
1289      *       Set to true to specify NCHW data layout for input0 and output0.
1290      *       Available since HAL version 1.2.
1291      *
1292      * Inputs (implicit padding):
1293      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1294      *      the input.
1295      *      Since HAL version 1.2, zero batches is supported for this tensor.
1296      * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
1297      *      padding scheme, has to be one of the
1298      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
1299      * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
1300      *      walking through input in the ‘width’ dimension.
1301      * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
1302      *      walking through input in the ‘height’ dimension.
1303      * * 4: An {@link OperandType::INT32} scalar, specifying the filter
1304      *      width.
1305      * * 5: An {@link OperandType::INT32} scalar, specifying the filter
1306      *      height.
1307      * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
1308      *      {@link FusedActivationFunc} values. Specifies the activation to
1309      *      invoke on the result.
1310      * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
1311      *      Set to true to specify NCHW data layout for input0 and output0.
1312      *      Available since HAL version 1.2.
1313      *
1314      * Outputs:
1315      * * 0: The output 4-D tensor, of shape
1316      *      [batches, out_height, out_width, depth].
1317      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1318      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1319      *      the scale and zeroPoint must be the same as input0.
1320      */
1321     MAX_POOL_2D = 17,
1322 
1323     /**
1324      * Multiplies two tensors, element-wise.
1325      *
1326      * Takes two input tensors of identical {@link OperandType} and compatible
1327      * dimensions. The output is the product of both input tensors, optionally
1328      * modified by an activation function.
1329      *
1330      * Two dimensions are compatible when:
1331      *     1. they are equal, or
1332      *     2. one of them is 1
1333      *
1334      * The size of the resulting output is the maximum size along each dimension
1335      * of the input operands. It starts with the trailing dimensions, and works
1336      * its way forward.
1337      *
1338      * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
1339      * dimension is only compatible with 0 or 1. The size of the output
1340      * dimension is zero if either of corresponding input dimension is zero.
1341      *
1342      * Supported tensor {@link OperandType}:
1343      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1344      * * {@link OperandType::TENSOR_FLOAT32}
1345      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1346      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1347      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
1348      *
1349      * Supported tensor rank: up to 4
1350      *
1351      * Inputs:
1352      * * 0: A tensor.
1353      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
1354      *      as input0.
1355      * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
1356      *      {@link FusedActivationFunc} values. Specifies the activation to
1357      *      invoke on the result.
1358      *      For a {@link OperandType::TENSOR_INT32} tensor,
1359      *      the {@link FusedActivationFunc} must be "NONE".
1360      *
1361      * Outputs:
1362      * * 0: The product, a tensor of the same {@link OperandType} as input0.
1363      *      For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
1364      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
1365      *      the following condition must be satisfied:
1366      *      output_scale > input1_scale * input2_scale.
1367      */
1368     MUL = 18,
1369 
1370     /**
1371      * Computes rectified linear activation on the input tensor element-wise.
1372      *
1373      * The output is calculated using this formula:
1374      *
1375      *     output = max(0, input)
1376      *
1377      * Supported tensor {@link OperandType}:
1378      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1379      * * {@link OperandType::TENSOR_FLOAT32}
1380      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1381      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1382      *
1383      * Supported tensor rank: up to 4.
1384      *
1385      * Inputs:
1386      * * 0: A tensor, specifying the input.
1387      *      Since HAL version 1.2, this tensor may be zero-sized.
1388      *
1389      * Outputs:
1390      * * 0: The output tensor of same shape as input0.
1391      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1392      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1393      *      the scale and zeroPoint must be the same as input0.
1394      */
1395     RELU = 19,
1396 
1397     /**
1398      * Computes rectified linear 1 activation on the input tensor element-wise.
1399      *
1400      * The output is calculated using this formula:
1401      *
1402      *     output = min(1.f, max(-1.f, input))
1403      *
1404      * Supported tensor {@link OperandType}:
1405      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1406      * * {@link OperandType::TENSOR_FLOAT32}
1407      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1408      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1409      *
1410      * Supported tensor rank: up to 4.
1411      *
1412      * Inputs:
1413      * * 0: A tensor, specifying the input.
1414      *      Since HAL version 1.2, this tensor may be zero-sized.
1415      *
1416      * Outputs:
1417      * * 0: The output tensor of the same shape as input0.
1418      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1419      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1420      *      the scale and zeroPoint must be the same as input0.
1421      */
1422     RELU1 = 20,
1423 
1424     /**
1425      * Computes rectified linear 6 activation on the input tensor element-wise.
1426      *
1427      * The output is calculated using this formula:
1428      *
1429      *     output = min(6, max(0, input))
1430      *
1431      * Supported tensor {@link OperandType}:
1432      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1433      * * {@link OperandType::TENSOR_FLOAT32}
1434      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1435      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1436      *
1437      * Supported tensor rank: up to 4.
1438      *
1439      * Inputs:
1440      * * 0: A tensor, specifying the input.
1441      *      Since HAL version 1.2, this tensor may be zero-sized.
1442      *
1443      * Outputs:
1444      * * 0: The output tensor of same shape as input0.
1445      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1446      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1447      *      the scale and zeroPoint must be the same as input0.
1448      */
1449     RELU6 = 21,
1450 
1451     /**
1452      * Reshapes a tensor.
1453      *
1454      * Given tensor, this operation returns a tensor that has the same values as
1455      * tensor, but with a newly specified shape.
1456      *
1457      * Supported tensor {@link OperandType}:
1458      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1459      * * {@link OperandType::TENSOR_FLOAT32}
1460      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1461      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1462      * * {@link OperandType::TENSOR_INT32} (since NNAPI feature level 6)
1463      *
1464      * Supported tensor rank: up to 4.
1465      *
1466      * Inputs:
1467      * * 0: A tensor, specifying the tensor to be reshaped.
1468      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the
1469      *      shape of the output tensor. The number of elements implied by shape
1470      *      must be the same as the number of elements in the input tensor.
1471      *
1472      *      If one component of shape is the special value -1, the size of that
1473      *      dimension is computed so that the total size remains constant. In
1474      *      particular, a shape of [-1] flattens into 1-D. At most one component
1475      *      of shape can be -1.
1476      *
1477      * Outputs:
1478      * * 0: The output tensor, of shape specified by the input shape.
1479      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1480      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1481      *      the scale and zeroPoint must be the same as input0.
1482      */
1483     RESHAPE = 22,
1484 
1485     /**
1486      * Resizes images to given size using the bilinear interpretation.
1487      *
1488      * Resized images must be distorted if their output aspect ratio is not the
1489      * same as input aspect ratio. The corner pixels of output may not be the
1490      * same as corner pixels of input.
1491      *
1492      * Supported tensor {@link OperandType}:
1493      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1494      * * {@link OperandType::TENSOR_FLOAT32}
1495      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
1496      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1497      *
1498      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1499      * With the default data layout NHWC, the data is stored in the order of:
1500      * [batch, height, width, channels]. Alternatively, the data layout could
1501      * be NCHW, the data storage order of: [batch, channels, height, width].
1502      * NCHW is supported since HAL version 1.2.
1503      *
1504      * Both resizing by shape and resizing by scale are supported.
1505      *
1506      * Inputs (resizing by shape):
1507      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1508      *      the input.
1509      *      Since HAL version 1.2, zero batches is supported for this tensor.
1510      * * 1: An {@link OperandType::INT32} scalar, specifying the output
1511      *      width of the output tensor.
1512      * * 2: An {@link OperandType::INT32} scalar, specifying the output
1513      *      height of the output tensor.
1514      * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
1515      *      Set to true to specify NCHW data layout for input0 and output0.
1516      *      Available since HAL version 1.2.
1517      * * 4: Align corners. An optional {@link OperandType::BOOL}
1518      *      scalar, default to false.  If True, the centers of the 4 corner
1519      *      pixels of the input and output tensors are aligned, preserving the
1520      *      values at the corner pixels.
1521      *      Available since HAL version 1.3.
1522      * * 5: Half pixel centers. An optional {@link OperandType::BOOL}
1523      *      scalar, default to false. If True, the pixel centers are assumed to
1524      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1525      *      TF 2.0. If this parameter is True, then align_corners parameter
1526      *      must be False.
1527      *      Available since HAL version 1.3.
1528      *
1529      * Inputs (resizing by scale, since HAL version 1.2):
1530      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1531      *      the input. Zero batches is supported for this tensor.
1532      * * 1: A scalar, specifying width_scale, the scaling factor of the width
1533      *      dimension from the input tensor to the output tensor. The output
1534      *      width is calculated as new_width = floor(width * width_scale).
1535      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
1536      *      of {@link OperandType::TENSOR_FLOAT16} and of
1537      *      {@link OperandType::FLOAT32} otherwise.
1538      * * 2: A scalar, specifying height_scale, the scaling factor of the height
1539      *      dimension from the input tensor to the output tensor. The output
1540      *      height is calculated as new_height = floor(height * height_scale).
1541      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
1542      *      of {@link OperandType::TENSOR_FLOAT16} and of
1543      *      {@link OperandType::FLOAT32} otherwise.
1544      * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
1545      *      Set to true to specify NCHW data layout for input0 and output0.
1546      * * 4: Align corners. An optional {@link OperandType::BOOL}
1547      *      scalar, default to false.  If True, the centers of the 4 corner
1548      *      pixels of the input and output tensors are aligned, preserving the
1549      *      values at the corner pixels.
1550      *      Available since HAL version 1.3.
1551      * * 5: Half pixel centers. An optional {@link OperandType::BOOL}
1552      *      scalar, default to false. If True, the pixel centers are assumed to
1553      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1554      *      TF 2.0. If this parameter is True, then align_corners parameter
1555      *      must be False.
1556      *      Available since HAL version 1.3.
1557      *
1558      * Outputs:
1559      * * 0: The output 4-D tensor, of shape
1560      *      [batches, new_height, new_width, depth].
1561      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1562      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1563      *      the scale and zeroPoint must be the same as input0.
1564      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
1565      *      the scale and zeroPoint must be the same as input0.
1566      */
1567     RESIZE_BILINEAR = 23,
1568 
1569     /**
1570      * A basic recurrent neural network layer.
1571      *
1572      * This layer implements the operation:
1573      * outputs = state = activation(inputs * input_weights +
1574      *                              state * recurrent_weights + bias)
1575      *
1576      * Where:
1577      * * “input_weights” is a weight matrix that multiplies the inputs;
1578      * * “recurrent_weights” is a weight matrix that multiplies the current
1579      *    “state” which itself is the output from the previous time step
1580      *    computation;
1581      * * “bias” is a bias vector (added to each output vector in the batch);
1582      * * “activation” is the function passed as the “fused_activation_function”
1583      *   argument (if not “NONE”).
1584      *
1585      * Supported tensor {@link OperandType}:
1586      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1587      * * {@link OperandType::TENSOR_FLOAT32}
1588      *
1589      * The input tensors must all be the same type.
1590      *
1591      * Inputs:
1592      * * 0: input.
1593      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1594      *      corresponds to the batching dimension, and “input_size” is the size
1595      *      of the input.
1596      * * 1: weights.
1597      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1598      *      corresponds to the number of units.
1599      * * 2: recurrent_weights.
1600      *      A 2-D tensor of shape [num_units, num_units], with columns
1601      *      corresponding to the weights from each unit.
1602      * * 3: bias.
1603      *      A 1-D tensor of shape [num_units].
1604      * * 4: hidden state (in).
1605      *      A 2-D tensor of shape [batch_size, num_units].
1606      * * 5: fused_activation_function.
1607      *      An optional {@link FusedActivationFunc} value indicating the
1608      *      activation function. If “NONE” is specified then it results in a
1609      *      linear activation.
1610      *
1611      * Outputs:
1612      * * 0: hidden state (out).
1613      *      A 2-D tensor of shape [batch_size, num_units].
1614      *
1615      * * 1: output.
1616      *      A 2-D tensor of shape [batch_size, num_units]. This is effectively
1617      *      the same as the current state value.
1618      */
1619     RNN = 24,
1620 
1621     /**
1622      * Computes the softmax activation on the input tensor element-wise, per
1623      * batch, by normalizing the input vector so the maximum coefficient is
1624      * zero.
1625      *
1626      * The output is calculated using this formula:
1627      *
1628      *     output[batch, i] =
1629      *         exp((input[batch, i] - max(input[batch, :])) * beta) /
1630      *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1631      *
1632      * For input tensor with rank other than 2, the activation will be applied
1633      * independently on each 1-D slice along specified dimension.
1634      *
1635      * Supported tensor {@link OperandType}:
1636      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1637      * * {@link OperandType::TENSOR_FLOAT32}
1638      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1639      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1640      *
1641      * Supported tensor rank: up to 4.
1642      * Tensors with rank other than 2 or 4 are only supported since HAL version 1.2.
1643      *
1644      * Inputs:
1645      * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1646      *      Since HAL version 1.2, this tensor may be zero-sized.
1647      * * 1: A scalar, specifying the positive scaling factor for the exponent,
1648      *      beta. If input0 is of {@link OperandType::TENSOR_FLOAT32},
1649      *      {@link OperandType::TENSOR_QUANT8_ASYMM} or
1650      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, the scalar
1651      *      must be of {@link OperandType::FLOAT32}.
1652      *      If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the
1653      *      scalar must be of {@link OperandType::FLOAT16}.
1654      * * 2: An optional {@link OperandType::INT32} scalar, default to -1,
1655      *      specifying the dimension the activation would be performed on.
1656      *      Negative index is used to specify axis from the end (e.g. -1 for
1657      *      the last axis). Must be in the range [-n, n).
1658      *      Available since HAL version 1.2.
1659      *
1660      * Outputs:
1661      * * 0: The output tensor of same shape as input0.
1662      *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
1663      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1664      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
1665      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1666      */
1667     SOFTMAX = 25,
1668 
1669     /**
1670      * Rearranges blocks of spatial data, into depth.
1671      *
1672      * More specifically, this op outputs a copy of the input tensor where
1673      * values from the height and width dimensions are moved to the depth
1674      * dimension. The value block_size indicates the input block size and how
1675      * the data is moved.
1676      *
1677      * Chunks of data of size block_size * block_size from depth are rearranged
1678      * into non-overlapping blocks of size block_size x block_size.
1679      *
1680      * The depth of the output tensor is input_depth * block_size * block_size.
1681      * The input tensor's height and width must be divisible by block_size.
1682      *
1683      * Supported tensor {@link OperandType}:
1684      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1685      * * {@link OperandType::TENSOR_FLOAT32}
1686      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1687      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1688      *
1689      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1690      * With the default data layout NHWC, the data is stored in the order of:
1691      * [batch, height, width, channels]. Alternatively, the data layout could
1692      * be NCHW, the data storage order of: [batch, channels, height, width].
1693      * NCHW is supported since HAL version 1.2.
1694      *
1695      * Inputs:
1696      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1697      *      specifying the input.
1698      * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
1699      *      block_size must be >=1 and block_size must be a divisor of both the
1700      *      input height and width.
1701      * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
1702      *      Set to true to specify NCHW data layout for input0 and output0.
1703      *      Available since HAL version 1.2.
1704      *
1705      * Outputs:
1706      * * 0: The output 4-D tensor, of shape [batches, height/block_size,
1707      *      width/block_size, depth_in*block_size*block_size].
1708      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1709      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1710      *      the scale and zeroPoint must be the same as input0.
1711      */
1712     SPACE_TO_DEPTH = 26,
1713 
1714     /**
1715      * SVDF op is a kind of stateful layer derived from the notion that a
1716      * densely connected layer that's processing a sequence of input frames can
1717      * be approximated by using a singular value decomposition of each of its
1718      * nodes. The implementation is based on:
1719      *
1720      * https://research.google.com/pubs/archive/43813.pdf
1721      *
1722      * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
1723      * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
1724      * INTERSPEECH, 2015.
1725      *
1726      * It processes the incoming input using a 2-stage filtering mechanism:
1727      * * stage 1 performs filtering on the "features" dimension, whose outputs
1728      *   get pushed into a memory of fixed-size memory_size.
1729      * * stage 2 performs filtering on the "time" dimension of the memory_size
1730      *   memoized outputs of stage 1.
1731      *
1732      * Specifically, for rank 1, this layer implements the operation:
1733      *
1734      *     memory = push(conv1d(inputs, weights_feature, feature_dim,
1735      *                          "PADDING_VALID"));
1736      *     outputs = activation(memory * weights_time + bias);
1737      *
1738      * Where:
1739      * * “weights_feature” is a weights matrix that processes the inputs (by
1740      *   convolving the input with every “feature filter”), and whose outputs
1741      *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
1742      *   entry gets dropped);
1743      * * “weights_time” is a weights matrix that processes the “memory” (by a
1744      *   batched matrix multiplication on the num_units);
1745      * * “bias” is an optional bias vector (added to each output vector in the
1746      *   batch); and
1747      * * “activation” is the function passed as the “fused_activation_function”
1748      *   argument (if not “NONE”).
1749      *
1750      * Each rank adds a dimension to the weights matrices by means of stacking
1751      * the filters.
1752      *
1753      * Supported tensor {@link OperandType}:
1754      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1755      * * {@link OperandType::TENSOR_FLOAT32}
1756      *
1757      * All input tensors must be the same type.
1758      *
1759      * Inputs:
1760      * * 0: input.
1761      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1762      *      corresponds to the batching dimension, and “input_size” is the size
1763      *      of the input.
1764      * * 1: weights_feature.
1765      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1766      *      corresponds to the number of units.
1767      * * 2: weights_time.
1768      *      A 2-D tensor of shape [num_units, memory_size], where “memory_size”
1769      *      corresponds to the fixed-size of the memory.
1770      * * 3: bias.
1771      *      An optional 1-D tensor of shape [num_units].
1772      * * 4: state (in).
1773      *      A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
1774      * * 5: rank.
1775      *      The rank of the SVD approximation.
1776      * * 6: fused_activation_function.
1777      *      An optional {@link FusedActivationFunc} value indicating the
1778      *      activation function. If “NONE” is specified then it results in a
1779      *      linear activation.
1780      *
1781      * Outputs:
1782      * * 0: state (out).
1783      *      A 2-D tensor of the same {@link OperandType} as the inputs, with shape
1784      *      [batch_size, (memory_size - 1) * num_units * rank].
1785      * * 1: output.
1786      *      A 2-D tensor of the same {@link OperandType} as the inputs, with shape
1787      *      [batch_size, num_units].
1788      */
1789     SVDF = 27,
1790 
1791     /**
1792      * Computes hyperbolic tangent of input tensor element-wise.
1793      *
1794      * The output is calculated using this formula:
1795      *
1796      *     output = tanh(input)
1797      *
1798      * Supported tensor {@link OperandType}:
1799      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1800      * * {@link OperandType::TENSOR_FLOAT32}
1801      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
1802      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1803      *
1804      * Supported tensor rank: up to 4.
1805      *
1806      * Inputs:
1807      * * 0: A tensor, specifying the input.
1808      *      Since HAL version 1.2, this tensor may be zero-sized.
1809      *
1810      * Outputs:
1811      * * 0: The output tensor of same shape as input0.
1812      *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
1813      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
1814      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
1815      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
1816      */
1817     TANH = 28,
1818 
1819     /**
1820      * BatchToSpace for N-dimensional tensors.
1821      *
1822      * This operation reshapes the batch dimension (dimension 0) into M + 1
1823      * dimensions of shape block_shape + [batch], interleaves these blocks back
1824      * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
1825      * result with the same rank as the input.
1826      *
1827      * This is the reverse of SpaceToBatch.
1828      *
1829      * Supported tensor {@link OperandType}:
1830      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1831      * * {@link OperandType::TENSOR_FLOAT32}
1832      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1833      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1834      *
1835      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1836      * With the default data layout NHWC, the data is stored in the order of:
1837      * [batch, height, width, channels]. Alternatively, the data layout could
1838      * be NCHW, the data storage order of: [batch, channels, height, width].
1839      * NCHW is supported since HAL version 1.2.
1840      *
1841      * Inputs:
1842      * * 0: An n-D tensor, specifying the tensor to be reshaped
1843      * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
1844      *      sizes for each spatial dimension of the input tensor. All values
1845      *      must be >= 1.
1846      * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
1847      *      Set to true to specify NCHW data layout for input0 and output0.
1848      *      Available since API level 29.
1849      *
1850      * Outputs:
1851      * * 0: A tensor of the same {@link OperandType} as input0.
1852      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1853      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1854      *      the scale and zeroPoint must be the same as input0.
1855      */
1856     BATCH_TO_SPACE_ND = 29,
1857 
1858     /**
1859      * Element-wise division of two tensors.
1860      *
1861      * Takes two input tensors of identical {@link OperandType} and compatible
1862      * dimensions. The output is the result of dividing the first input tensor
1863      * by the second, optionally modified by an activation function.
1864      *
1865      * For inputs of {@link OperandType::TENSOR_INT32}, performs
1866      * "floor division" ("//" in Python). For example,
1867      *     5 // 2 = 2
1868      *    -5 // 2 = -3
1869      *
1870      * Two dimensions are compatible when:
1871      *     1. they are equal, or
1872      *     2. one of them is 1
1873      *
1874      * The size of the output is the maximum size along each dimension of the
1875      * input operands. It starts with the trailing dimensions, and works its way
1876      * forward.
1877      *
1878      * Example:
1879      *     input1.dimension =    {4, 1, 2}
1880      *     input2.dimension = {5, 4, 3, 1}
1881      *     output.dimension = {5, 4, 3, 2}
1882      *
1883      * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
1884      * dimension is only compatible with 0 or 1. The size of the output
1885      * dimension is zero if either of corresponding input dimension is zero.
1886      *
1887      * Supported tensor {@link OperandType}:
1888      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1889      * * {@link OperandType::TENSOR_FLOAT32}
1890      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
1891      *
1892      * Supported tensor rank: up to 4
1893      *
1894      * Inputs:
1895      * * 0: An n-D tensor, specifying the first input.
1896      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
1897      *      as input0.
1898      * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
1899      *      {@link FusedActivationFunc} values. Specifies the activation to
1900      *      invoke on the result.
1901      *      For a {@link OperandType::TENSOR_INT32} tensor,
1902      *      the {@link FusedActivationFunc} must be "NONE".
1903      *
1904      * Outputs:
1905      * * 0: A tensor of the same {@link OperandType} as input0.
1906      */
1907     DIV = 30,
1908 
1909     /**
1910      * Computes the mean of elements across dimensions of a tensor.
1911      *
1912      * Reduces the input tensor along the given dimensions to reduce. Unless
1913      * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
1914      * in axis. If keep_dims is true, the reduced dimensions are retained with
1915      * length 1.
1916      *
1917      * Supported tensor {@link OperandType}:
1918      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1919      * * {@link OperandType::TENSOR_FLOAT32}
1920      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1921      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1922      *
1923      * Supported tensor rank: up to 4
1924      *
1925      * Inputs:
1926      * * 0: A tensor, specifying the input.
1927      * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions
1928      *      to reduce. Must be in the range
1929      *      [-rank(input_tensor), rank(input_tensor)).
1930      *
1931      *      NOTE: When the operation was introduced, the documentation
1932      *      incorrectly stated that if dimensions were empty, the operation
1933      *      would reduce across all dimensions. This behavior was never
1934      *      implemented.
1935      *
1936      * * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive,
1937      *      retains reduced dimensions with length 1.
1938      *
1939      * Outputs:
1940      * * 0: A tensor of the same {@link OperandType} as input0.
1941      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1942      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1943      *      the scale and zeroPoint must be the same as input0.
1944      *      If all dimensions are reduced and keep_dims is false, the output
1945      *      shape is [1].
1946      */
1947     MEAN = 31,
1948 
1949     /**
1950      * Pads a tensor.
1951      *
1952      * This operation pads a tensor according to the specified paddings.
1953      *
1954      * Supported tensor {@link OperandType}:
1955      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1956      * * {@link OperandType::TENSOR_FLOAT32}
1957      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1958      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1959      *   (full support since HAL version 1.2, see the output section)
1960      *
1961      * Supported tensor rank: up to 4
1962      *
1963      * Inputs:
1964      * * 0: An n-D tensor, specifying the tensor to be padded.
1965      * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
1966      *      for each spatial dimension of the input tensor. The shape of the
1967      *      tensor must be {rank(input0), 2}.
1968      *      padding[i, 0] specifies the number of elements to be padded in the
1969      *      front of dimension i.
1970      *      padding[i, 1] specifies the number of elements to be padded after the
1971      *      end of dimension i.
1972      *
1973      * Outputs:
1974      * * 0: A tensor of the same {@link OperandType} as input0. The
1975      *      output tensor has the same rank as input0, and each
1976      *      dimension of the output tensor has the same size as the
1977      *      corresponding dimension of the input tensor plus the size
1978      *      of the padding:
1979      *          output0.dimension[i] =
1980      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
1981      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1982      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1983      *      the scale and zeroPoint must be the same as input0.
1984      *
1985      *      NOTE: Before HAL version 1.2, the pad value for
1986      *      {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
1987      *      Since HAL version 1.2, the pad value is always the logical zero.
1988      */
1989     PAD = 32,
1990 
1991     /**
1992      * SpaceToBatch for N-Dimensional tensors.
1993      *
1994      * This operation divides "spatial" dimensions [1, ..., M] of the input into
1995      * a grid of blocks of shape block_shape, and interleaves these blocks with
1996      * the "batch" dimension (0) such that in the output, the spatial dimensions
1997      * [1, ..., M] correspond to the position within the grid, and the batch
1998      * dimension combines both the position within a spatial block and the
1999      * original batch position. Prior to division into blocks, the spatial
2000      * dimensions of the input are optionally zero padded according to paddings.
2001      *
2002      * Supported tensor {@link OperandType}:
2003      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2004      * * {@link OperandType::TENSOR_FLOAT32}
2005      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2006      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2007      *   (full support since HAL version 1.2, see the output section)
2008      *
2009      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2010      * With the default data layout NHWC, the data is stored in the order of:
2011      * [batch, height, width, channels]. Alternatively, the data layout could
2012      * be NCHW, the data storage order of: [batch, channels, height, width].
2013      * NCHW is supported since HAL version 1.2.
2014      *
2015      * Inputs:
2016      * * 0: An n-D tensor, specifying the input.
2017      * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
2018      *      sizes for each spatial dimension of the input tensor. All values
2019      *      must be >= 1.
2020      * * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
2021      *      for each spatial dimension of the input tensor. All values must be
2022      *      >= 0. The shape of the tensor must be {M, 2}, where M is the number
2023      *      of spatial dimensions.
2024      *      padding[i, 0] specifies the number of element to be padded in the
2025      *      front of dimension i.
2026      *      padding[i, 1] specifies the number of element to be padded after the
2027      *      end of dimension i.
2028      * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
2029      *      Set to true to specify NCHW data layout for input0 and output0.
2030      *      Available since HAL version 1.2.
2031      *
2032      * Outputs:
2033      * * 0: A tensor of the same {@link OperandType} as input0.
2034      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2035      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2036      *      the scale and zeroPoint must be the same as input0.
2037      *
2038      *      NOTE: Before HAL version 1.2, the pad value for
2039      *      {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
2040      *      Since HAL version 1.2, the pad value is always the logical zero.
2041      */
2042     SPACE_TO_BATCH_ND = 33,
2043 
2044     /**
2045      * Removes dimensions of size 1 from the shape of a tensor.
2046      *
2047      * Given a tensor input, this operation returns a tensor of the same
2048      * {@link OperandType} with all dimensions of size 1 removed. If you don't
2049      * want to remove all size 1 dimensions, you can remove specific size 1
2050      * dimensions by specifying the axes (input1).
2051      *
2052      * Supported tensor {@link OperandType}:
2053      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2054      * * {@link OperandType::TENSOR_FLOAT32}
2055      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2056      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2057      *
2058      * Supported tensor rank: up to 4
2059      *
2060      * Inputs:
2061      * * 0: An n-D tensor, the tensor to be squeezed.
2062      * * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The
2063      *      dimensions to squeeze. If specified only squeezes the dimensions
2064      *      listed. Otherwise, squeezes all dimensions. The dimension index
2065      *      starts at 0. An error must be reported if squeezing a dimension that
2066      *      is not 1.
2067      *
2068      * Outputs:
2069      * * 0: A tensor of the same {@link OperandType} as input0. Contains the
2070      *      same data as input, but has one or more dimensions of size 1
2071      *      removed.
2072      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2073      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2074      *      the scale and zeroPoint must be the same as input0.
2075      *      If all input dimensions are equal to 1 and are to be squeezed, the
2076      *      output shape is [1].
2077      */
2078     SQUEEZE = 34,
2079 
2080     /**
2081      * Extracts a strided slice of a tensor.
2082      *
2083      * Roughly speaking, this op extracts a slice of size (end - begin) / stride
2084      * from the given input tensor. Starting at the location specified by begin
2085      * the slice continues by adding stride to the index until all dimensions
2086      * are not less than end. Note that a stride can be negative, which causes a
2087      * reverse slice.
2088      *
2089      * Supported tensor {@link OperandType}:
2090      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2091      * * {@link OperandType::TENSOR_FLOAT32}
2092      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2093      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2094      *
2095      * Supported tensor rank: up to 4
2096      *
2097      * Inputs:
2098      * * 0: An n-D tensor, specifying the tensor to be sliced.
2099      * * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
2100      *      starts of the dimensions of the input tensor to be sliced. The
2101      *      length must be of rank(input0).
2102      * * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
2103      *      ends of the dimensions of the input tensor to be sliced. The length
2104      *      must be of rank(input0).
2105      * * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
2106      *      strides of the dimensions of the input tensor to be sliced. The
2107      *      length must be of rank(input0). The entries must be non-zero.
2108      * * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit
2109      *      of begin_mask is set, begin[i] is ignored and the fullest possible
2110      *      range in that dimension is used instead.
2111      * * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of
2112      *      end_mask is set, end[i] is ignored and the fullest possible range in
2113      *      that dimension is used instead.
2114      * * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the
2115      *      ith bit of shrink_axis_mask is set, the ith dimension specification
2116      *      shrinks the dimensionality by 1, taking on the value at index
2117      *      begin[i]. In this case, the ith specification must define a
2118      *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
2119      *
2120      * Outputs:
2121      * * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
2122      *      where k is the number of bits set in shrink_axis_mask.
2123      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2124      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2125      *      the scale and zeroPoint must be the same as input0.
2126      *      If shrink_axis_mask is true for all input dimensions, the output
2127      *      shape is [1].
2128      */
2129     STRIDED_SLICE = 35,
2130 
2131     /**
2132      * Element-wise subtraction of two tensors.
2133      *
2134      * Takes two input tensors of identical {@link OperandType} and compatible
2135      * dimensions. The output is the result of subtracting the second input
2136      * tensor from the first one, optionally modified by an activation function.
2137      *
2138      * Two dimensions are compatible when:
2139      *     1. they are equal, or
2140      *     2. one of them is 1
2141      *
2142      * The size of the output is the maximum size along each dimension of the
2143      * input operands. It starts with the trailing dimensions, and works its way
2144      * forward.
2145      *
2146      * Example:
2147      *     input1.dimension =    {4, 1, 2}
2148      *     input2.dimension = {5, 4, 3, 1}
2149      *     output.dimension = {5, 4, 3, 2}
2150      *
2151      * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
2152      * dimension is only compatible with 0 or 1. The size of the output
2153      * dimension is zero if either of corresponding input dimension is zero.
2154      *
2155      * Supported tensor {@link OperandType}:
2156      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2157      * * {@link OperandType::TENSOR_FLOAT32}
2158      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
2159      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2160      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
2161      *
2162      * Supported tensor rank: up to 4
2163      *
2164      * Inputs:
2165      * * 0: An n-D tensor, specifying the first input.
2166      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
2167      *      as input0.
2168      * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
2169      *      {@link FusedActivationFunc} values. Specifies the activation to
2170      *      invoke on the result.
2171      *      For a {@link OperandType::TENSOR_INT32} tensor,
2172      *      the {@link FusedActivationFunc} must be "NONE".
2173      *
2174      * Outputs:
2175      * * 0: A tensor of the same {@link OperandType} as input0.
2176      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2177      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2178      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
2179      */
2180     SUB = 36,
2181 
2182     /**
2183      * Transposes the input tensor, permuting the dimensions according to the
2184      * perm tensor.
2185      *
2186      * The returned tensor's dimension i corresponds to the input dimension
2187      * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
2188      * rank of the input tensor. Hence by default, this operation performs a
2189      * regular matrix transpose on 2-D input Tensors.
2190      *
2191      * Supported tensor {@link OperandType}:
2192      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2193      * * {@link OperandType::TENSOR_FLOAT32}
2194      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2195      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2196      *
2197      * Supported tensor rank: up to 4
2198      *
2199      * Inputs:
2200      * * 0: An n-D tensor, specifying the tensor to be transposed.
2201      *      Since HAL version 1.2, this tensor may be zero-sized.
2202      * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
2203      *      the permutation of the dimensions of the input tensor.
2204      *
2205      * Outputs:
2206      * * 0: A tensor of the same {@link OperandType} as input0.
2207      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2208      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2209      *      the scale and zeroPoint must be the same as input0.
2210      */
2211     TRANSPOSE = 37,
2212 
2213     /**
2214      * Computes the absolute value of a tensor, element-wise.
2215      *
2216      * Supported tensor {@link OperandType}:
2217      * * {@link OperandType::TENSOR_FLOAT16}
2218      * * {@link OperandType::TENSOR_FLOAT32}
2219      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
2220      *
2221      * Supported tensor rank: from 1.
2222      *
2223      * Inputs:
2224      * * 0: A tensor.
2225      *
2226      * Outputs:
2227      * * 0: The output tensor of same shape as input0.
2228      */
2229     ABS = 38,
2230 
2231     /**
2232      * Returns the index of the largest element along an axis.
2233      *
2234      * Supported tensor {@link OperandType}:
2235      * * {@link OperandType::TENSOR_FLOAT16}
2236      * * {@link OperandType::TENSOR_FLOAT32}
2237      * * {@link OperandType::TENSOR_INT32}
2238      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2239      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2240      *
2241      * Supported tensor rank: from 1
2242      *
2243      * Inputs:
2244      * * 0: An n-D tensor specifying the input. Must be non-empty.
2245      * * 1: An {@link OperandType::INT32} scalar specifying the axis to
2246      *      reduce across. Negative index is used to specify axis from the
2247      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2248      *
2249      * Outputs:
2250      * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
2251      *      If input is 1-dimensional, the output shape is [1].
2252      */
2253     // There is no underscore in ARG_MAX to avoid name conflict with
2254     // the macro defined in libc/kernel/uapi/linux/limits.h.
2255     ARGMAX = 39,
2256 
2257     /**
2258      * Returns the index of the smallest element along an axis.
2259      *
2260      * Supported tensor {@link OperandType}:
2261      * * {@link OperandType::TENSOR_FLOAT16}
2262      * * {@link OperandType::TENSOR_FLOAT32}
2263      * * {@link OperandType::TENSOR_INT32}
2264      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2265      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2266      *
2267      * Supported tensor rank: from 1
2268      *
2269      * Inputs:
2270      * * 0: An n-D tensor specifying the input. Must be non-empty.
2271      * * 1: An {@link OperandType::INT32} scalar specifying the axis to
2272      *      reduce across. Negative index is used to specify axis from the
2273      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2274      *
2275      * Outputs:
2276      * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
2277      *      If input is 1-dimensional, the output shape is [1].
2278      */
2279     ARGMIN = 40,  // See ARGMAX for naming discussion.
2280 
2281     /**
2282      * Transform axis-aligned bounding box proposals using bounding box deltas.
2283      *
2284      * Given the positions of bounding box proposals and the corresponding
2285      * bounding box deltas for each class, return the refined bounding box
2286      * regions. The resulting bounding boxes are cliped against the edges of
2287      * the image.
2288      *
2289      * Supported tensor {@link OperandType}:
2290      * * {@link OperandType::TENSOR_FLOAT16}
2291      * * {@link OperandType::TENSOR_FLOAT32}
2292      * * {@link OperandType::TENSOR_QUANT16_ASYMM}
2293      *
2294      * Inputs:
2295      * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
2296      *      bounding box proposals, each line with format [x1, y1, x2, y2].
2297      *      For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
2298      *      the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
2299      *      is supported for this tensor.
2300      * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
2301      *      bounding box delta for each region of interest and each class. The
2302      *      bounding box deltas are organized in the following order
2303      *      [dx, dy, dw, dh], where dx and dy is the relative correction factor
2304      *      for the center position of the bounding box with respect to the width
2305      *      and height, dw and dh is the log-scale relative correction factor
2306      *      for the width and height. For input0 of type
2307      *      {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
2308      *      of {@link OperandType::TENSOR_QUANT8_ASYMM} or
2309      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is
2310      *      supported for this tensor.
2311      * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
2312      *      [num_rois], specifying the batch index of each box. Boxes with
2313      *      the same batch index are grouped together. Zero num_rois is
2314      *      supported for this tensor.
2315      * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
2316      *      each image in the batch, each line with format
2317      *      [image_height, image_width].
2318      *
2319      * Outputs:
2320      * * 0: A tensor of the same {@link OperandType} as input0, with shape
2321      *      [num_rois, num_classes * 4], specifying the coordinates of each
2322      *      output bounding box for each class, with format [x1, y1, x2, y2].
2323      *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
2324      *      scale must be 0.125 and the zero point must be 0.
2325      */
2326     AXIS_ALIGNED_BBOX_TRANSFORM = 41,
2327 
2328     /**
2329      * A recurrent neural network layer that applies an LSTM cell to a
2330      * sequence of inputs in forward and backward directions.
2331      *
2332      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2333      * one input into the two RNN cells in the following way:
2334      *
2335      *       INPUT  (INPUT_REVERSED)
2336      *         |         |
2337      *    ---------------------
2338      *    | FW_LSTM   BW_LSTM |
2339      *    ---------------------
2340      *         |         |
2341      *      FW_OUT     BW_OUT
2342      *
2343      * An op with cross-linking takes two inputs and feeds them into the RNN
2344      * cells in the following way:
2345      *
2346      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2347      *           |             |
2348      *     INPUT | (INPUT_R'D.)|
2349      *       |   |       |     |
2350      *    -----------------------
2351      *    |  \  /        \    / |
2352      *    | FW_LSTM     BW_LSTM |
2353      *    -----------------------
2354      *         |           |
2355      *      FW_OUT      BW_OUT
2356      *
2357      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2358      * weights are present. While stacking this op on top of itself, this
2359      * allows to connect both forward and backward outputs from previous cell
2360      * to the next cell's input.
2361      *
2362      * Since HAL version 1.3 parallel linking mode is supported. The mode is
2363      * enabled if auxiliary input is present but auxiliary weights are omitted.
2364      * In this case, the cell feeds inputs into the RNN in the following way:
2365      *
2366      *       INPUT (AUX_INPUT_REVERSED)
2367      *         |         |
2368      *    ---------------------
2369      *    | FW_LSTM   BW_LSTM |
2370      *    ---------------------
2371      *         |         |
2372      *      FW_OUT     BW_OUT
2373      *
2374      * While stacking this op on top of itself, this allows to connect both
2375      * forward and backward outputs from previous cell to the next cell's
2376      * corresponding inputs.
2377      *
2378      * Supported tensor {@link OperandType}:
2379      * * {@link OperandType::TENSOR_FLOAT16}
2380      * * {@link OperandType::TENSOR_FLOAT32}
2381      *
2382      * Supported tensor rank: 3, either time-major or batch-major.
2383      *
2384      * All input and output tensors must be of the same type.
2385      *
2386      * Inputs:
2387      * * 0: The input.
2388      *      A 3-D tensor of shape:
2389      *        If time-major: [max_time, batch_size, input_size]
2390      *        If batch-major: [batch_size, max_time, input_size]
2391      *      where "max_time" is the number of timesteps (sequence length),
2392      *      "batch_size" corresponds to the batching dimension, and
2393      *      "input_size" is the size of the input.
2394      * * 1: The forward input-to-input weights. Optional.
2395      *      A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
2396      *      corresponds to the number of forward cell units.
2397      * * 2: The forward input-to-forget weights.
2398      *      A 2-D tensor of shape [fw_num_units, input_size].
2399      * * 3: The forward input-to-cell weights.
2400      *      A 2-D tensor of shape [fw_num_units, input_size].
2401      * * 4: The forward input-to-output weights.
2402      *      A 2-D tensor of shape [fw_num_units, input_size].
2403      * * 5: The forward recurrent-to-input weights. Optional.
2404      *      A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
2405      *      corresponds to either the number of cell units (i.e., fw_num_units),
2406      *      or the second dimension of the “fw_projection_weights”, if defined.
2407      * * 6: The forward recurrent-to-forget weights.
2408      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2409      * * 7: The forward recurrent-to-cell weights.
2410      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2411      * * 8: The forward recurrent-to-output weights.
2412      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2413      * * 9: The forward cell-to-input weights. Optional.
2414      *      A 1-D tensor of shape [fw_num_units].
2415      * * 10: The forward cell-to-forget weights. Optional.
2416      *       A 1-D tensor of shape [fw_num_units].
2417      * * 11: The forward cell-to-output weights. Optional.
2418      *       A 1-D tensor of shape [fw_num_units].
2419      * * 12: The forward input gate bias. Optional.
2420      *       A 1-D tensor of shape [fw_num_units].
2421      * * 13: The forward forget gate bias.
2422      *       A 1-D tensor of shape [fw_num_units].
2423      * * 14: The forward cell gate bias.
2424      *       A 1-D tensor of shape [fw_num_units].
2425      * * 15: The forward output gate bias.
2426      *       A 1-D tensor of shape [fw_num_units].
2427      * * 16: The forward projection weights. Optional.
2428      *       A 2-D tensor of shape [fw_output_size, fw_num_units].
2429      * * 17: The forward projection bias. Optional.
2430      *       A 1-D tensor of shape [fw_output_size].
2431      * * 18: The backward input-to-input weights. Optional.
2432      *       A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
2433      *       corresponds to the number of backward cell units.
2434      * * 19: The backward input-to-forget weights.
2435      *       A 2-D tensor of shape [bw_num_units, input_size].
2436      * * 20: The backward input-to-cell weights.
2437      *       A 2-D tensor of shape [bw_num_units, input_size].
2438      * * 21: The backward input-to-output weights.
2439      *       A 2-D tensor of shape [bw_num_units, input_size].
2440      * * 22: The backward recurrent-to-input weights. Optional.
2441      *       A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
2442      *       corresponds to either the number of cell units (i.e., “bw_num_units”),
2443      *       or the second dimension of the “bw_projection_weights”, if defined.
2444      * * 23: The backward recurrent-to-forget weights.
2445      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2446      * * 24: The backward recurrent-to-cell weights.
2447      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2448      * * 25: The backward recurrent-to-output weights.
2449      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2450      * * 26: The backward cell-to-input weights. Optional.
2451      *       A 1-D tensor of shape [bw_num_units].
2452      * * 27: The backward cell-to-forget weights. Optional.
2453      *       A 1-D tensor of shape [bw_num_units].
2454      * * 28: The backward cell-to-output weights. Optional.
2455      *       A 1-D tensor of shape [bw_num_units].
2456      * * 29: The backward input gate bias. Optional.
2457      *       A 1-D tensor of shape [bw_num_units].
2458      * * 30: The backward forget gate bias.
2459      *       A 1-D tensor of shape [bw_num_units].
2460      * * 31: The backward cell gate bias.
2461      *       A 1-D tensor of shape [bw_num_units].
2462      * * 32: The backward output gate bias.
2463      *       A 1-D tensor of shape [bw_num_units].
2464      * * 33: The backward projection weights. Optional.
2465      *       A 2-D tensor of shape [bw_output_size, bw_num_units].
2466      * * 34: The backward projection bias. Optional.
2467      *       A 1-D tensor of shape [bw_output_size].
2468      * * 35: The forward input activation state.
2469      *       A 2-D tensor of shape [batch_size, bw_output_size].
2470      * * 36: The forward input cell state.
2471      *       A 2-D tensor of shape [batch_size, bw_num_units].
2472      * * 37: The backward input activation state.
2473      *       A 2-D tensor of shape [batch_size, bw_output_size].
2474      * * 38: The backward input cell state.
2475      *       A 2-D tensor of shape [batch_size, bw_num_units].
2476      * * 39: The auxiliary input. Optional.
2477      *       A 3-D tensor of shape [max_time, batch_size, aux_input_size],
2478      *       where “batch_size” corresponds to the batching dimension, and
2479      *       “aux_input_size” is the size of the auxiliary input. Optional. See
2480      *       the docs above for the usage modes explanation.
2481      * * 40: The forward auxiliary input-to-input weights.
2482      *       Optional. See the docs above for the usage modes explanation.
2483      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2484      * * 41: The forward auxiliary input-to-forget weights.
2485      *       Optional. See the docs above for the usage modes explanation.
2486      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2487      * * 42: The forward auxiliary input-to-cell weights.
2488      *       Optional. See the docs above for the usage modes explanation.
2489      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2490      * * 43: The forward auxiliary input-to-output weights.
2491      *       Optional. See the docs above for the usage modes explanation.
2492      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2493      * * 44: The backward auxiliary input-to-input weights.
2494      *       Optional. See the docs above for the usage modes explanation.
2495      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2496      * * 45: The backward auxiliary input-to-forget weights.
2497      *       Optional. See the docs above for the usage modes explanation.
2498      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2499      * * 46: The backward auxiliary input-to-cell weights.
2500      *       Optional. See the docs above for the usage modes explanation.
2501      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2502      * * 47: The backward auxiliary input-to-output weights.
2503      *       Optional. See the docs above for the usage modes explanation.
2504      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2505      * * 48: The activation function.
2506      *       A value indicating the activation function:
2507      *       <ul>
2508      *       <li>0: None;
2509      *       <li>1: Relu;
2510      *       <li>3: Relu6;
2511      *       <li>4: Tanh;
2512      *       <li>6: Sigmoid.
2513      *       </ul>
2514      * * 49: The clipping threshold for the cell state, such
2515      *       that values are bound within [-cell_clip, cell_clip]. If set to 0.0
2516      *       then clipping is disabled.
2517      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
2518      *       this scalar must be of the type {@link OperandType::FLOAT32},
2519      *       otherwise if all the input tensors have the type
2520      *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
2521      *       of type {@link OperandType::FLOAT16}.
2522      * * 50: The clipping threshold for the output from the
2523      *       projection layer, such that values are bound within
2524      *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2525      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
2526      *       this scalar must be of the type {@link OperandType::FLOAT32},
2527      *       otherwise if all the input tensors have the type
2528      *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
2529      *       of type {@link OperandType::FLOAT16}.
2530      * * 51: merge_outputs
2531      *       An {@link OperandType::BOOL} scalar specifying if the outputs
2532      *       from forward and backward cells should be merged.
2533      * * 52: time_major
2534      *       An {@link OperandType::BOOL} scalar specifying the shape format
2535      *       of input and output tensors.
2536      * * 53: The forward input layer normalization weights. Optional.
2537      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2538      *       to activation at input gate.
2539      * * 54: The forward forget layer normalization weights. Optional.
2540      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2541      *       to activation at forget gate.
2542      * * 55: The forward cell layer normalization weights. Optional.
2543      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2544      *       to activation at cell gate.
2545      * * 56: The forward output layer normalization weights. Optional.
2546      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2547      *       to activation at output gate.
2548      * * 57: The backward input layer normalization weights. Optional.
2549      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2550      *       to activation at input gate.
2551      * * 58: The backward forget layer normalization weights. Optional.
2552      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2553      *       to activation at forget gate.
2554      * * 59: The backward cell layer normalization weights. Optional.
2555      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2556      *       to activation at cell gate.
2557      * * 60: The backward output layer normalization weights. Optional.
2558      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2559      *       to activation at output gate.
2560      *
2561      * Outputs:
2562      * * 0: The forward output.
2563      *      A 3-D tensor of shape:
2564      *        If time-major and not merge_outputs:
2565      *          [max_time, batch_size, fw_output_size]
2566      *        If time-major and merge_outputs:
2567      *          [max_time, batch_size, fw_output_size + bw_output_size]
2568      *        If batch-major and not merge_outputs:
2569      *          [batch_size, max_time, fw_output_size]
2570      *        If batch-major and merge_outputs:
2571      *          [batch_size, max_time, fw_output_size + bw_output_size]
2572      * * 1: The backward output.  Unused if merge_outputs is true.
2573      *      A 3-D tensor of shape:
2574      *        If time-major: [max_time, batch_size, bw_output_size]
2575      *        If batch-major: [batch_size, max_time, bw_output_size]
2576      * * 2: The forward activation state output.
2577      *      A 2-D tensor of shape [batch_size, fw_output_size] containing an
2578      *      activation state from the last time step in the sequence. This
2579      *      output is optional and can be omitted. If this output is present
2580      *      then outputs 3-5 must be present as well.
2581      *      Available since HAL version 1.3.
2582      * * 3: The forward cell state output.
2583      *      A tensor of shape [batch_size, fw_cell_size] containing a cell state
2584      *      from the last time step in the sequence. This output is optional
2585      *      and can be omitted. If this output is present
2586      *      then outputs 2, 4, 5 must be present as well.
2587      *      Available since HAL version 1.3.
2588      * * 4: The backward activation state output.
2589      *      A 2-D tensor of shape [batch_size, bw_output_size] containing an
2590      *      activation state from the last time step in the sequence. This
2591      *      output is optional and can be omitted. If this output is present
2592      *      then outputs 2, 3, 5 must be present as well.
2593      *      Available since HAL version 1.3.
2594      * * 5: The backward cell state output.
2595      *      A tensor of shape [batch_size, bw_cell_size] containing a cell state
2596      *      from the last time step in the sequence. This output is optional
2597      *      and can be omitted. If this output is present
2598      *      then outputs 2-4 must be present as well.
2599      *      Available since HAL version 1.3.
2600      */
2601     BIDIRECTIONAL_SEQUENCE_LSTM = 42,
2602 
2603     /**
2604      * A recurrent neural network layer that applies a basic RNN cell to a
2605      * sequence of inputs in forward and backward directions.
2606      *
2607      * This Op unrolls the input along the sequence dimension, and implements
2608      * the following operation for each element in the sequence s =
2609      * 1...sequence_length:
2610      *   fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
2611      *          fw_state * fw_recurrent_weights’ + fw_bias)
2612      *
2613      * And for each element in sequence t = sequence_length : 1
2614      *   bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
2615      *          bw_state * bw_recurrent_weights’ + bw_bias)
2616      *
2617      * Where:
2618      * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
2619      * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
2620      *    current “state” which itself is the output from the previous time step
2621      *    computation;
2622      * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
2623      *    batch);
2624      * * “activation” is the function passed as the “fused_activation_function”
2625      *   argument (if not “NONE”).
2626      *
2627      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2628      * one input into the two RNN cells in the following way:
2629      *
2630      *       INPUT  (INPUT_REVERSED)
2631      *         |         |
2632      *    ---------------------
2633      *    | FW_RNN     BW_RNN |
2634      *    ---------------------
2635      *         |         |
2636      *      FW_OUT     BW_OUT
2637      *
2638      * An op with cross-linking takes two inputs and feeds them into the RNN
2639      * cells in the following way:
2640      *
2641      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2642      *           |             |
2643      *     INPUT | (INPUT_R'D.)|
2644      *       |   |       |     |
2645      *    -----------------------
2646      *    |  \  /        \    / |
2647      *    | FW_RNN       BW_RNN |
2648      *    -----------------------
2649      *         |           |
2650      *      FW_OUT      BW_OUT
2651      *
2652      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2653      * weights are present. While stacking this op on top of itself, this
2654      * allows to connect both forward and backward outputs from previous cell
2655      * to the next cell's input.
2656      *
2657      * Since HAL version 1.3 parallel linking mode is supported. The mode is
2658      * enabled if auxiliary input is present but auxiliary weights are omitted.
2659      * In this case, the cell feeds inputs into the RNN in the following way:
2660      *
2661      *       INPUT (AUX_INPUT_REVERSED)
2662      *         |         |
2663      *    ---------------------
2664      *    | FW_RNN     BW_RNN |
2665      *    ---------------------
2666      *         |         |
2667      *      FW_OUT     BW_OUT
2668      *
2669      * While stacking this op on top of itself, this allows to connect both
2670      * forward and backward outputs from previous cell to the next cell's
2671      * corresponding inputs.
2672      *
2673      * Supported tensor {@link OperandType}:
2674      * * {@link OperandType::TENSOR_FLOAT16}
2675      * * {@link OperandType::TENSOR_FLOAT32}
2676      *
2677      * The input tensors must all be the same type.
2678      *
2679      * Inputs:
2680      * * 0: input.
2681      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2682      *      it is set to true, then the input has a shape [maxTime, batchSize,
2683      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
2684      *      inputSize].
2685      * * 1: fwWeights.
2686      *      A 2-D tensor of shape [fwNumUnits, inputSize].
2687      * * 2: fwRecurrentWeights.
2688      *      A 2-D tensor of shape [fwNumUnits, fwNumUnits].
2689      * * 3: fwBias.
2690      *      A 1-D tensor of shape [fwNumUnits].
2691      * * 4: fwHiddenState.
2692      *      A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
2693      *      state input for the first time step of the computation.
2694      * * 5: bwWeights.
2695      *      A 2-D tensor of shape [bwNumUnits, inputSize].
2696      * * 6: bwRecurrentWeights.
2697      *      A 2-D tensor of shape [bwNumUnits, bwNumUnits].
2698      * * 7: bwBias.
2699      *      A 1-D tensor of shape [bwNumUnits].
2700      * * 8: bwHiddenState
2701      *      A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
2702      *      state input for the first time step of the computation.
2703      * * 9: auxInput.
2704      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2705      *      it is set to true, then the input has a shape [maxTime, batchSize,
2706      *      auxInputSize], otherwise the input has a shape [batchSize, maxTime,
2707      *      auxInputSize]. Can be omitted. See the docs above for the usage
2708      *      modes explanation.
2709      * * 10:fwAuxWeights.
2710      *      A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted.
2711      *      See the docs above for the usage modes explanation.
2712      * * 11:bwAuxWeights.
2713      *      A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted.
2714      *      See the docs above for the usage modes explanation.
2715      * * 12:fusedActivationFunction.
2716      *      A {@link FusedActivationFunc} value indicating the activation function. If
2717      *      “NONE” is specified then it results in a linear activation.
2718      * * 13:timeMajor
2719      *      An {@link OperandType::BOOL} scalar specifying the shape format
2720      *      of input and output tensors.
2721      * * 14:mergeOutputs
2722      *      An {@link OperandType::BOOL} scalar specifying if the outputs
2723      *      from forward and backward cells are separate (if set to false) or
2724      *      concatenated (if set to true).
2725      * Outputs:
2726      * * 0: fwOutput.
2727      *      A 3-D tensor. The first two dimensions of the shape are defined by
2728      *      the input 6 (timeMajor) and the third dimension is defined by the
2729      *      input 14 (mergeOutputs). If timeMajor is set to true, then the first
2730      *      two dimensions are [maxTime, batchSize], otherwise they are set to
2731      *      [batchSize, maxTime]. If mergeOutputs is set to true, then the third
2732      *      dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
2733      *      to fwNumUnits.
2734      * * 1: bwOutput.
2735      *      A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
2736      *      this tensor is not produced. The shape is defined by the input 6
2737      *      (timeMajor). If it is set to true, then the shape is set to
2738      *      [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
2739      *      [batchSize, maxTime, bwNumUnits].
2740      * * 2: The forward hidden state output.
2741      *      A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden
2742      *      state from the last time step in the sequence. This output is
2743      *      optional and can be omitted. If this output is present then output
2744      *      3 must be present as well.
2745      *      Available since HAL version 1.3.
2746      * * 3: The backward hidden state output.
2747      *      A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden
2748      *      state from the last time step in the sequence. This output is
2749      *      optional and can be omitted. If this output is present then output
2750      *      2 must be present as well.
2751      *      Available since HAL version 1.3.
2752      */
2753     BIDIRECTIONAL_SEQUENCE_RNN = 43,
2754 
2755     /**
2756      * Greedily selects a subset of bounding boxes in descending order of score.
2757      *
2758      * This op applies NMS algorithm to each class. In each loop of execution,
2759      * the box with maximum score gets selected and removed from the pending set.
2760      * The scores of the rest of boxes are lowered according to the
2761      * intersection-over-union (IOU) overlapping with the previously selected
2762      * boxes and a specified NMS kernel method. Any boxes with score less
2763      * than a threshold are removed from the pending set.
2764      *
2765      * Three NMS kernels are supported:
2766      * * Hard:     score_new = score_old * (1 if IoU < threshold else 0)
2767      * * Linear:   score_new = score_old * (1 if IoU < threshold else 1 - IoU)
2768      * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
2769      *
2770      * Axis-aligned bounding boxes are represented by its upper-left corner
2771      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
2772      * bounding box should satisfy x1 <= x2 and y1 <= y2.
2773      *
2774      * Supported tensor {@link OperandType}:
2775      * * {@link OperandType::TENSOR_FLOAT16}
2776      * * {@link OperandType::TENSOR_FLOAT32}
2777      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2778      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2779      *
2780      * Inputs:
2781      * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
2782      *      of each bounding box proposal. The boxes are grouped by batches in the
2783      *      first dimension. Zero num_rois is supported for this tensor.
2784      * * 1: A 2-D Tensor specifying the bounding boxes of shape
2785      *      [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
2786      *      The boxes are grouped by batches in the first dimension. The sequential
2787      *      order of the boxes corresponds with input0. For input0 of type
2788      *      {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
2789      *      {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
2790      *      scale of 0.125.
2791      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
2792      *      this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
2793      *      with zeroPoint of -128 and scale of 0.125.
2794      *      Zero num_rois is supported for this tensor.
2795      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
2796      *      [num_rois], specifying the batch index of each box. Boxes with
2797      *      the same batch index are grouped together.
2798      * * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
2799      *      with scores lower than the threshold are filtered before sending
2800      *      to the NMS algorithm.
2801      * * 4: An {@link OperandType::INT32} scalar, specifying the maximum
2802      *      number of selected bounding boxes for each image. Set to a negative
2803      *      value for unlimited number of output bounding boxes.
2804      * * 5: An {@link OperandType::INT32} scalar, specifying the NMS
2805      *      kernel method, options are 0:hard, 1:linear, 2:gaussian.
2806      * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
2807      *      threshold in hard and linear NMS kernel. This field is ignored if
2808      *      gaussian kernel is selected.
2809      * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
2810      *      gaussian NMS kernel. This field is ignored if gaussian kernel is
2811      *      not selected.
2812      * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
2813      *      Boxes with scores lower than the threshold are dropped during the
2814      *      score updating phase in soft NMS.
2815      *
2816      * Outputs:
2817      * * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
2818      *      [num_output_rois], specifying the score of each output box. The boxes
2819      *      are grouped by batches, but the sequential order in each batch is not
2820      *      guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
2821      *      guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM}
2822      *      or {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
2823      *      the scale and zero point must be the same as input0.
2824      * * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
2825      *      [num_output_rois, 4], specifying the coordinates of each
2826      *      output bounding box with the same format as input1. The sequential
2827      *      order of the boxes corresponds with output0. For type of
2828      *      {@link OperandType::TENSOR_QUANT16_ASYMM}, the scale must be
2829      *      0.125 and the zero point must be 0.
2830      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
2831      *      [num_output_rois], specifying the class of each output box. The
2832      *      sequential order of the boxes corresponds with output0.
2833      * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
2834      *      [num_output_rois], specifying the batch index of each box. Boxes
2835      *      with the same batch index are grouped together.
2836      */
2837     BOX_WITH_NMS_LIMIT = 44,
2838 
2839     /**
2840      * Casts a tensor to a type.
2841      *
2842      * This operation ignores the scale and zeroPoint of quanized tensors,
2843      * e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
2844      * as a tensor of uint8 values.
2845      *
2846      * Supported tensor {@link OperandType}:
2847      * * {@link OperandType::TENSOR_FLOAT16}
2848      * * {@link OperandType::TENSOR_FLOAT32}
2849      * * {@link OperandType::TENSOR_INT32}
2850      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2851      * Since HAL version 1.3, casting tensors of the following
2852      * {@link OperandType} to the same {@link OperandType} is supported:
2853      * * {@link OperandType::TENSOR_BOOL8}
2854      * * {@link OperandType::TENSOR_INT32}
2855      * * {@link OperandType::TENSOR_QUANT16_ASYMM}
2856      * * {@link OperandType::TENSOR_QUANT16_SYMM}
2857      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
2858      * * {@link OperandType::TENSOR_QUANT8_SYMM}
2859      *
2860      * Supported tensor rank: from 1
2861      *
2862      * Inputs:
2863      * * 0: A tensor.
2864      *
2865      * Outputs:
2866      * * 0: A tensor with the same shape as input0.
2867      */
2868     CAST = 45,
2869 
2870     /**
2871      * Shuffle the channels of the input tensor.
2872      *
2873      * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
2874      * divide the channel dimension into num_groups groups, and reorganize the
2875      * channels by grouping channels with the same index in each group.
2876      *
2877      * Along the channel dimension, the output is calculated using this formula:
2878      *
2879      *     output_channel[k * num_groups + g] = input_channel[g * group_size + k]
2880      *
2881      * where group_size = num_channels / num_groups
2882      *
2883      * The number of channels must be divisible by num_groups.
2884      *
2885      * Supported tensor {@link OperandType}:
2886      * * {@link OperandType::TENSOR_FLOAT16}
2887      * * {@link OperandType::TENSOR_FLOAT32}
2888      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2889      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2890      *
2891      * Supported tensor rank: up to 4
2892      *
2893      * Inputs:
2894      * * 0: An n-D tensor, specifying the tensor to be shuffled.
2895      * * 1: An {@link OperandType::INT32} scalar, specifying the number of
2896      *      groups.
2897      * * 2: An {@link OperandType::INT32} scalar, specifying the dimension
2898      *      channel shuffle would be performed on. Negative index is used to
2899      *      specify axis from the end (e.g. -1 for the last axis). Must be in
2900      *      the range [-n, n).
2901      *
2902      * Outputs:
2903      * * 0: A tensor of the same {@link OperandType} and same shape as input0.
2904      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2905      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2906      *      the scale and zeroPoint must be the same as input0.
2907      */
2908     CHANNEL_SHUFFLE = 46,
2909 
2910     /**
2911      * Apply postprocessing steps to bounding box detections.
2912      *
2913      * Bounding box detections are generated by applying transformation on a set
2914      * of predefined anchors with the bounding box deltas from bounding box
2915      * regression. A final step of hard NMS is applied to limit the number of
2916      * returned boxes.
2917      *
2918      * Supported tensor {@link OperandType}:
2919      * * {@link OperandType::TENSOR_FLOAT16}
2920      * * {@link OperandType::TENSOR_FLOAT32}
2921      *
2922      * Inputs:
2923      * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
2924      *      the score of each anchor with each class. Class 0 for each
2925      *      [batches, num_anchors, 0] is background and will be ignored.
2926      * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
2927      *      the first four values in length_box_encoding specifying the bounding
2928      *      box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
2929      *      where dy and dx is the linear-scale relative correction factor for the
2930      *      center position of the bounding box with respect to the width and height,
2931      *      dh and dw is the log-scale relative correction factor for the width and
2932      *      height. All the entries in length_box_encoding beyond the first four
2933      *      values are ignored in this operation.
2934      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
2935      *      predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
2936      *      ctr_x are the center position of the box, and h and w are the height
2937      *      and the width.
2938      * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scaling
2939      *      factor for dy in bounding box deltas.
2940      * * 4: An {@link OperandType::FLOAT32} scalar, specifying the scaling
2941      *      factor for dx in bounding box deltas.
2942      * * 5: An {@link OperandType::FLOAT32} scalar, specifying the scaling
2943      *      factor for dh in bounding box deltas.
2944      * * 6: An {@link OperandType::FLOAT32} scalar, specifying the scaling
2945      *      factor for dw in bounding box deltas.
2946      * * 7: An {@link OperandType::BOOL} scalar, set to true to use regular
2947      *      multi-class NMS algorithm that do NMS separately for each class,
2948      *      set to false for a faster algorithm that only do one single NMS
2949      *      using the highest class score..
2950      * * 8: An {@link OperandType::INT32} scalar, max_num_detections, specifying
2951      *      the maximum number of boxes for the output. Boxes with the lowest
2952      *      scores are discarded to meet the limit.
2953      * * 9: An {@link OperandType::INT32} scalar, only used when input7 is
2954      *      set to false, specifying the maximum number of classes per detection.
2955      * * 10: An {@link OperandType::INT32} scalar, only used when input7 is
2956      *       set to true, specifying the maximum number of detections when
2957      *       applying NMS algorithm for each single class.
2958      * * 11: A scalar, score_threshold. Boxes with scores lower than the
2959      *       threshold are filtered before sending to the NMS algorithm. The
2960      *       scalar must be of {@link OperandType::FLOAT16} if input0 is of
2961      *       {@link OperandType::TENSOR_FLOAT16} and of
2962      *       {@link OperandType::FLOAT32} if input0 is of
2963      *       {@link OperandType::TENSOR_FLOAT32}.
2964      * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
2965      *       must be of {@link OperandType::FLOAT16} if input0 is of
2966      *       {@link OperandType::TENSOR_FLOAT16} and of
2967      *       {@link OperandType::FLOAT32} if input0 is of
2968      *       {@link OperandType::TENSOR_FLOAT32}.
2969      * * 13: An {@link OperandType::BOOL} scalar, set to true to include
2970      *       background class in the list of label map for the output, set
2971      *       to false to not include the background. When the background
2972      *       class is included, it has label 0 and the output classes start
2973      *       at 1 in the label map, otherwise, the output classes start at 0.
2974      *
2975      * Outputs:
2976      * * 0: A 2-D tensor of the same {@link OperandType} as input0, with shape
2977      *      [batches, max_num_detections], specifying the score of each output
2978      *      detections.
2979      * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
2980      *      coordinates of each output bounding box, with format
2981      *      [y1, x1, y2, x2].
2982      * * 2: A 2-D {@link OperandType::TENSOR_INT32} tensor, of shape
2983      *      [batches, max_num_detections], specifying the class label for each
2984      *      output detection.
2985      * * 3: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape [batches],
2986      *      specifying the number of valid output detections for each batch.
2987      */
2988     DETECTION_POSTPROCESSING = 47,
2989 
2990     /**
2991      * For input tensors x and y, computes x == y elementwise.
2992      *
2993      * Supported tensor {@link OperandType}:
2994      * * {@link OperandType::TENSOR_BOOL8}
2995      * * {@link OperandType::TENSOR_FLOAT16}
2996      * * {@link OperandType::TENSOR_FLOAT32}
2997      * * {@link OperandType::TENSOR_INT32}
2998      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2999      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3000      *
3001      * Supported tensor rank: from 1
3002      *
3003      * This operation supports broadcasting.
3004      *
3005      * Inputs:
3006      * * 0: A tensor.
3007      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3008      *      with input0.
3009      *
3010      * Outputs:
3011      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3012      */
3013     EQUAL = 48,
3014 
3015     /**
3016      * Computes exponential of x element-wise.
3017      *
3018      * Supported tensor {@link OperandType}:
3019      * * {@link OperandType::TENSOR_FLOAT16}
3020      * * {@link OperandType::TENSOR_FLOAT32}
3021      *
3022      * Supported tensor rank: from 1.
3023      *
3024      * Inputs:
3025      * * 0: A tensor.
3026      *
3027      * Outputs:
3028      * * 0: The output tensor of same shape as input0.
3029      */
3030     EXP = 49,
3031 
3032     /**
3033      * Inserts a dimension of 1 into a tensor's shape.
3034      *
3035      * Given a tensor input, this operation inserts a dimension of 1 at the
3036      * given dimension index of input's shape. The dimension index starts at
3037      * zero; if you specify a negative dimension index, it is counted backward
3038      * from the end.
3039      *
3040      * Supported tensor {@link OperandType}:
3041      * * {@link OperandType::TENSOR_FLOAT16}
3042      * * {@link OperandType::TENSOR_FLOAT32}
3043      * * {@link OperandType::TENSOR_INT32}
3044      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3045      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3046      *
3047      * Supported tensor rank: from 1
3048      *
3049      * Inputs:
3050      * * 0: An n-D tensor.
3051      * * 1: An {@link OperandType::INT32} scalar specifying the dimension
3052      *      index to expand. Must be in the range [-(n + 1), (n + 1)).
3053      *
3054      * Outputs:
3055      * * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as
3056      *      input0.
3057      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3058      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3059      *      the scale and zeroPoint must be the same as input0.
3060      */
3061     EXPAND_DIMS = 50,
3062 
3063     /**
3064      * Gathers values along an axis.
3065      *
3066      * Produces an output tensor with shape
3067      *     input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
3068      * where:
3069      *     # Vector indices (output is rank(input0)).
3070      *     output[a_0, ..., a_n, i, b_0, ..., b_n] =
3071      *       input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
3072      *
3073      *     # Higher rank indices (output is rank(input0) + rank(indices) - 1).
3074      *     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
3075      *       input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
3076      *
3077      * Supported tensor {@link OperandType}:
3078      * * {@link OperandType::TENSOR_FLOAT16}
3079      * * {@link OperandType::TENSOR_FLOAT32}
3080      * * {@link OperandType::TENSOR_INT32}
3081      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3082      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3083      *
3084      * Supported tensor rank: from 1
3085      *
3086      * Inputs:
3087      * * 0: An n-D tensor from which to gather values.
3088      * * 1: An {@link OperandType::INT32} scalar specifying the axis.
3089      *      Negative index is used to specify axis from the end
3090      *      (e.g. -1 for the last axis). Must be in the range [-n, n).
3091      * * 2: A k-D tensor {@link OperandType::TENSOR_INT32} of indices.
3092      *      The values must be in the bounds of the corresponding dimensions
3093      *      of input0.
3094      *
3095      * Outputs:
3096      * * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0.
3097      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3098      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3099      *      the scale and zeroPoint must be the same as input0.
3100      */
3101     GATHER = 51,
3102 
3103     /**
3104      * Generate aixs-aligned bounding box proposals.
3105      *
3106      * Bounding box proposals are generated by applying transformation on a set
3107      * of predefined anchors with the bounding box deltas from bounding box
3108      * regression. A final step of hard NMS is applied to limit the number of
3109      * returned boxes.
3110      *
3111      * Axis-aligned bounding boxes are represented by its upper-left corner
3112      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3113      * bounding box should satisfy x1 <= x2 and y1 <= y2.
3114      *
3115      * Supported tensor {@link OperandType}:
3116      * * {@link OperandType::TENSOR_FLOAT16}
3117      * * {@link OperandType::TENSOR_FLOAT32}
3118      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3119      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3120      *
3121      * Inputs:
3122      * * 0: A 4-D Tensor specifying the score of each anchor at each
3123      *      location. With "NHWC" data layout, the tensor shape is
3124      *      [batches, height, width, num_anchors]. With "NCHW" data layout,
3125      *      the tensor shape is [batches, num_anchors, height, width].
3126      * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
3127      *      layout, the tensor shape is [batches, height, width, num_anchors * 4].
3128      *      With "NCHW" data layout, the tensor shape is
3129      *      [batches, num_anchors * 4, height, width]. The box deltas are encoded
3130      *      in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
3131      *      relative correction factor for the center position of the bounding box
3132      *      with respect to the width and height, dw and dh is the log-scale
3133      *      relative correction factor for the width and height. The last
3134      *      dimensions is the channel dimension.
3135      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3136      *      predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
3137      *      {@link OperandType::TENSOR_QUANT8_ASYMM} or
3138      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of
3139      *      {@link OperandType::TENSOR_QUANT16_SYMM}, with scale of 0.125.
3140      * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
3141      *      each image in the batch, with format [image_height, image_width].
3142      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} or
3143      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this
3144      *      tensor should be of {@link OperandType::TENSOR_QUANT16_SYMM}, with
3145      *      scale of 0.125.
3146      * * 4: An {@link OperandType::FLOAT32} scalar, specifying the ratio
3147      *      from the height of original image to the height of feature map.
3148      * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
3149      *      from the width of original image to the width of feature map.
3150      * * 6: An {@link OperandType::INT32} scalar, specifying the maximum
3151      *      number of boxes before going into the hard NMS algorithm. Boxes
3152      *      with the lowest scores are discarded to meet the limit. Set to
3153      *      a non-positive value for unlimited number.
3154      * * 7: An {@link OperandType::INT32} scalar, specifying the maximum
3155      *      number of boxes returning from the hard NMS algorithm. Boxes
3156      *      with the lowest scores are discarded to meet the limit. Set to
3157      *      a non-positive value for unlimited number.
3158      * * 8: An {@link OperandType::FLOAT32} scalar, specifying the IoU
3159      *      threshold for hard NMS.
3160      * * 9: An {@link OperandType::FLOAT32} scalar, min_size. Boxes with
3161      *      height or width lower than the absolute threshold are filtered out.
3162      * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
3163      *       NCHW data layout for input0 and input1. Set to false for NHWC.
3164      *
3165      * Outputs:
3166      * * 0: A tensor of the same {@link OperandType} as input0, of shape
3167      *      [num_output_rois], specifying the score of each output box.
3168      *      The boxes are grouped by batches, but the sequential order in
3169      *      each batch is not guaranteed. For type of
3170      *      {@link OperandType::TENSOR_QUANT8_ASYMM} or
3171      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero
3172      *      point must be the same as input0.
3173      * * 1: A tensor of the same {@link OperandType} as input3, of shape
3174      *      [num_output_rois, 4], specifying the coordinates of each output
3175      *      bounding box for each class, with format [x1, y1, x2, y2].
3176      *      The sequential order of the boxes corresponds with output0.
3177      *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
3178      *      scale must be 0.125 and the zero point must be 0.
3179      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
3180      *      [num_output_rois], specifying the batch index of each box. Boxes
3181      *      with the same batch index are grouped together.
3182      */
3183     GENERATE_PROPOSALS = 52,
3184 
3185     /**
3186      * For input tensors x and y, computes x > y elementwise.
3187      *
3188      * Supported tensor {@link OperandType}:
3189      * * {@link OperandType::TENSOR_BOOL8}
3190      * * {@link OperandType::TENSOR_FLOAT16}
3191      * * {@link OperandType::TENSOR_FLOAT32}
3192      * * {@link OperandType::TENSOR_INT32}
3193      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3194      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3195      *
3196      * Supported tensor rank: from 1
3197      *
3198      * This operation supports broadcasting.
3199      *
3200      * Inputs:
3201      * * 0: A tensor.
3202      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3203      *      with input0.
3204      *
3205      * Outputs:
3206      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3207      */
3208     GREATER = 53,
3209     /**
3210      * For input tensors x and y, computes x >= y elementwise.
3211      *
3212      * Supported tensor {@link OperandType}:
3213      * * {@link OperandType::TENSOR_BOOL8}
3214      * * {@link OperandType::TENSOR_FLOAT16}
3215      * * {@link OperandType::TENSOR_FLOAT32}
3216      * * {@link OperandType::TENSOR_INT32}
3217      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3218      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3219      *
3220      * Supported tensor rank: from 1
3221      *
3222      * This operation supports broadcasting.
3223      *
3224      * Inputs:
3225      * * 0: A tensor.
3226      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3227      *      with input0.
3228      *
3229      * Outputs:
3230      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3231      */
3232     GREATER_EQUAL = 54,
3233 
3234     /**
3235      * Performs a grouped 2-D convolution operation.
3236      *
3237      * Given an input tensor of shape [batches, height, width, depth_in] and a
3238      * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
3239      * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
3240      * applies a group of different filters to each input channel group, then
3241      * concatenates the results together.
3242      *
3243      * Specifically, the input channels are divided into num_groups groups, each with
3244      * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
3245      * filters are also divided into num_groups groups, i.e. depth_out is divisible
3246      * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
3247      * input channel group, and the result are concatenated together.
3248      *
3249      * The output dimensions are functions of the filter dimensions, stride, and
3250      * padding.
3251      *
3252      * The values in the output tensor are computed as:
3253      *
3254      *     output[b, i, j, g * channel_multiplier + q] =
3255      *         sum_{di, dj, dk} (
3256      *             input[b, strides[1] * i + di, strides[2] * j + dj,
3257      *                   g * depth_group + dk] *
3258      *             filter[g * channel_multiplier + q, di, dj, dk]
3259      *         ) + bias[channel]
3260      *
3261      * where channel_multiplier = depth_out / num_groups
3262      *
3263      * Supported tensor {@link OperandType} configurations:
3264      * * 16 bit floating point:
3265      * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
3266      *
3267      * * 32 bit floating point:
3268      * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
3269      *
3270      * * Quantized:
3271      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
3272      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
3273      * * * input.scale * filter.scale).
3274      *
3275      * * Quantized signed (since HAL version 1.3):
3276      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
3277      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
3278      * * * input.scale * filter.scale).
3279      *
3280      * * Quantized with symmetric per channel quantization for the filter:
3281      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
3282      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3283      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
3284      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3285      *
3286      * * Quantized signed with filter symmetric per channel quantization
3287      *   (since HAL version 1.3):
3288      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
3289      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3290      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
3291      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3292      *
3293      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3294      * With the default data layout NHWC, the data is stored in the order of:
3295      * [batch, height, width, channels]. Alternatively, the data layout could
3296      * be NCHW, the data storage order of: [batch, channels, height, width].
3297      *
3298      * Both explicit padding and implicit padding are supported.
3299      *
3300      * Inputs (explicit padding):
3301      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3302      *      specifying the input, where depth_in = num_groups * depth_group.
3303      * * 1: A 4-D tensor, of shape
3304      *      [depth_out, filter_height, filter_width, depth_group], specifying
3305      *      the filter, where depth_out must be divisible by num_groups.  For
3306      *      tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
3307      *      the channel dimension (channelDim at
3308      *      {@link SymmPerChannelQuantParams}) must be set to 0.
3309      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3310      *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
3311      *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
3312      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
3313      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
3314      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
3315      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3316      *      of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3317      *      should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
3318      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3319      *      bias_scale[i] = input_scale * filter_scale[i].
3320      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
3321      *      the left, in the ‘width’ dimension.
3322      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
3323      *      the right, in the ‘width’ dimension.
3324      * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
3325      *      the top, in the ‘height’ dimension.
3326      * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
3327      *      the bottom, in the ‘height’ dimension.
3328      * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
3329      *      walking through input in the ‘width’ dimension.
3330      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
3331      *      walking through input in the ‘height’ dimension.
3332      * * 9: An {@link OperandType::INT32} scalar, specifying the number of
3333      *      groups.
3334      * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
3335      *       {@link FusedActivationFunc} values. Specifies the activation to
3336      *       invoke on the result.
3337      * * 11: An {@link OperandType::BOOL} scalar, set to true to specify
3338      *       NCHW data layout for input0 and output0. Set to false for NHWC.
3339      *
3340      * Inputs (implicit padding):
3341      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3342      *      specifying the input, where depth_in = num_groups * depth_group.
3343      * * 1: A 4-D tensor, of shape
3344      *      [depth_out, filter_height, filter_width, depth_group], specifying
3345      *      the filter, where depth_out must be divisible by num_groups.  For
3346      *      tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
3347      *      the channel dimension (SymmPerChannelQuantParams::channelDim)
3348      *      must be set to 0.
3349      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3350      *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
3351      *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
3352      *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
3353      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
3354      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
3355      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
3356      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3357      *      of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3358      *      should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
3359      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3360      *      bias_scale[i] = input_scale * filter_scale[i].
3361      * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
3362      *      padding scheme, has to be one of the
3363      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
3364      * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
3365      *      walking through input in the ‘width’ dimension.
3366      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
3367      *      walking through input in the ‘height’ dimension.
3368      * * 6: An {@link OperandType::INT32} scalar, specifying the number of
3369      *      groups.
3370      * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
3371      *      {@link FusedActivationFunc} values. Specifies the activation to
3372      *      invoke on the result.
3373      * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
3374      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3375      *
3376      * Outputs:
3377      * * 0: The output 4-D tensor, of shape
3378      *      [batches, out_height, out_width, depth_out].
3379      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3380      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3381      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3382      */
3383     GROUPED_CONV_2D = 55,
3384 
3385     /**
3386      * Localize the maximum keypoints from heatmaps.
3387      *
3388      * This operation approximates the accurate maximum keypoint scores and
3389      * indices after bicubic upscaling by using Taylor expansion up to the
3390      * quadratic term.
3391      *
3392      * The bounding box is represented by its upper-left corner coordinate
3393      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
3394      * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
3395      *
3396      * Supported tensor {@link OperandType}:
3397      * * {@link OperandType::TENSOR_FLOAT16}
3398      * * {@link OperandType::TENSOR_FLOAT32}
3399      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3400      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3401      *
3402      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3403      * With the default data layout NHWC, the data is stored in the order of:
3404      * [batch, height, width, channels]. Alternatively, the data layout could
3405      * be NCHW, the data storage order of: [batch, channels, height, width].
3406      *
3407      * Inputs:
3408      * * 0: A 4-D Tensor of shape
3409      *      [num_boxes, heatmap_size, heatmap_size, num_keypoints],
3410      *      specifying the heatmaps, the height and width of heatmaps should
3411      *      be the same, and must be greater than or equal to 2.
3412      * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
3413      *      each with format [x1, y1, x2, y2]. For input0 of type
3414      *      {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should
3415      *      be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint
3416      *      of 0 and scale of 0.125.
3417      *      For input0 of type
3418      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this tensor
3419      *      should be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with
3420      *      zeroPoint of -128 and scale of 0.125.
3421      * * 2: An {@link OperandType::BOOL} scalar, set to true to specify
3422      *      NCHW data layout for input0. Set to false for NHWC.
3423      *
3424      * Outputs:
3425      * * 0: A tensor of the same {@link OperandType} as input0, with shape
3426      *      [num_boxes, num_keypoints], specifying score of the keypoints.
3427      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} or
3428      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3429      *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
3430      * * 1: A tensor of the same {@link OperandType} as input1, with shape
3431      *      [num_boxes, num_keypoints, 2], specifying the location of
3432      *      the keypoints, the second dimension is organized as
3433      *      [keypoint_x, keypoint_y].
3434      *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
3435      *      scale must be 0.125 and the zero point must be 0.
3436      */
3437     HEATMAP_MAX_KEYPOINT = 56,
3438 
3439     /**
3440      * Applies instance normalization to the input tensor.
3441      *
3442      * The values in the output tensor are computed as:
3443      *
3444      *     output[b, h, w, c] =
3445      *         (input[b, h, w, c] - mean[b, c]) * gamma /
3446      *         sqrt(var[b, c] + epsilon) + beta
3447      *
3448      * Where the mean and variance are computed across the spatial dimensions:
3449      *
3450      *     mean[b, c] =
3451      *         sum_{h, w}(input[b, h, w, c]) / sum(1)
3452      *
3453      *     var[b, c] =
3454      *         sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
3455      *
3456      * Supported tensor {@link OperandType}:
3457      * * {@link OperandType::TENSOR_FLOAT16}
3458      * * {@link OperandType::TENSOR_FLOAT32}
3459      *
3460      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3461      * With the default data layout NHWC, the data is stored in the order of:
3462      * [batch, height, width, channels]. Alternatively, the data layout could
3463      * be NCHW, the data storage order of: [batch, channels, height, width].
3464      *
3465      * Inputs:
3466      * * 0: An n-D tensor, specifying the tensor to be normalized.
3467      * * 1: A scalar, specifying gamma, the scale applied to the normalized
3468      *      tensor. The scalar must be of {@link OperandType::FLOAT16} if
3469      *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
3470      *      {@link OperandType::FLOAT32} if input0 is of
3471      *      {@link OperandType::TENSOR_FLOAT32}.
3472      * * 2: A scalar, specifying beta, the offset applied to the normalized
3473      *      tensor. The scalar must be of {@link OperandType::FLOAT16} if
3474      *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
3475      *      {@link OperandType::FLOAT32} if input0 is of
3476      *      {@link OperandType::TENSOR_FLOAT32}.
3477      * * 3: A scalar, specifying epsilon, the small value added to variance to
3478      *      avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if
3479      *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
3480      *      {@link OperandType::FLOAT32} if input0 is of
3481      *      {@link OperandType::TENSOR_FLOAT32}.
3482      * * 4: An {@link OperandType::BOOL} scalar, set to true to specify
3483      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3484      *
3485      * Outputs:
3486      * * 0: A tensor of the same {@link OperandType} and same shape as input0.
3487      */
3488     INSTANCE_NORMALIZATION = 57,
3489 
3490     /**
3491      * For input tensors x and y, computes x < y elementwise.
3492      *
3493      * Supported tensor {@link OperandType}:
3494      * * {@link OperandType::TENSOR_BOOL8}
3495      * * {@link OperandType::TENSOR_FLOAT16}
3496      * * {@link OperandType::TENSOR_FLOAT32}
3497      * * {@link OperandType::TENSOR_INT32}
3498      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3499      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3500      *
3501      * Supported tensor rank: from 1
3502      *
3503      * This operation supports broadcasting.
3504      *
3505      * Inputs:
3506      * * 0: A tensor.
3507      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3508      *      with input0.
3509      *
3510      * Outputs:
3511      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3512      */
3513     LESS = 58,
3514 
3515     /**
3516      * For input tensors x and y, computes x <= y elementwise.
3517      *
3518      * Supported tensor {@link OperandType}:
3519      * * {@link OperandType::TENSOR_BOOL8}
3520      * * {@link OperandType::TENSOR_FLOAT16}
3521      * * {@link OperandType::TENSOR_FLOAT32}
3522      * * {@link OperandType::TENSOR_INT32}
3523      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3524      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3525      *
3526      * Supported tensor rank: from 1
3527      *
3528      * This operation supports broadcasting.
3529      *
3530      * Inputs:
3531      * * 0: A tensor.
3532      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3533      *      with input0.
3534      *
3535      * Outputs:
3536      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3537      */
3538     LESS_EQUAL = 59,
3539 
3540     /**
3541      * Computes natural logarithm of x element-wise.
3542      *
3543      * Supported tensor {@link OperandType}:
3544      * * {@link OperandType::TENSOR_FLOAT16}
3545      * * {@link OperandType::TENSOR_FLOAT32}
3546      *
3547      * Supported tensor rank: from 1.
3548      *
3549      * Inputs:
3550      * * 0: A tensor.
3551      *
3552      * Outputs:
3553      * * 0: The output tensor of same shape as input0.
3554      */
3555     LOG = 60,
3556 
3557     /**
3558      * Returns the truth value of x AND y element-wise.
3559      *
3560      * Supported tensor {@link OperandType}:
3561      * * {@link OperandType::TENSOR_BOOL8}
3562      *
3563      * Supported tensor rank: from 1
3564      *
3565      * This operation supports broadcasting.
3566      *
3567      * Inputs:
3568      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3569      * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
3570      *      compatible with input0.
3571      *
3572      * Outputs:
3573      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3574      */
3575     LOGICAL_AND = 61,
3576 
3577     /**
3578      * Computes the truth value of NOT x element-wise.
3579      *
3580      * Supported tensor {@link OperandType}:
3581      * * {@link OperandType::TENSOR_BOOL8}
3582      *
3583      * Supported tensor rank: from 1.
3584      *
3585      * Inputs:
3586      * * 0: A tensor.
3587      *
3588      * Outputs:
3589      * * 0: The output tensor of same shape as input0.
3590      */
3591     LOGICAL_NOT = 62,
3592 
3593     /**
3594      * Returns the truth value of x OR y element-wise.
3595      *
3596      * Supported tensor {@link OperandType}:
3597      * * {@link OperandType::TENSOR_BOOL8}
3598      *
3599      * Supported tensor rank: from 1
3600      *
3601      * This operation supports broadcasting.
3602      *
3603      * Inputs:
3604      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3605      * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
3606      *      compatible with input0.
3607      *
3608      * Outputs:
3609      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3610      */
3611     LOGICAL_OR = 63,
3612 
3613     /**
3614      * Computes the log softmax activations given logits.
3615      *
3616      * The output is calculated using this formula:
3617      *
3618      *     output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
3619      *
3620      * Supported tensor {@link OperandType}:
3621      * * {@link OperandType::TENSOR_FLOAT16}
3622      * * {@link OperandType::TENSOR_FLOAT32}
3623      *
3624      * Supported tensor rank: from 1.
3625      *
3626      * Inputs:
3627      * * 0: A tensor specifying the input logits.
3628      * * 1: A scalar, specifying the positive scaling factor for the exponent,
3629      *      beta.
3630      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
3631      *      value must be of {@link OperandType::FLOAT16}.
3632      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
3633      *      value must be of {@link OperandType::FLOAT32}.
3634      * * 2: An {@link OperandType::INT32} scalar specifying the axis to
3635      *      reduce across. Negative index is used to specify axis from the
3636      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
3637      *
3638      * Outputs:
3639      * * 0: The output tensor of the same {@link OperandType} and shape as
3640      *      input0.
3641      */
3642     LOG_SOFTMAX = 64,
3643 
3644     /**
3645      * Returns the element-wise maximum of two tensors.
3646      *
3647      * Supported tensor {@link OperandType}:
3648      * * {@link OperandType::TENSOR_FLOAT16}
3649      * * {@link OperandType::TENSOR_FLOAT32}
3650      * * {@link OperandType::TENSOR_INT32}
3651      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3652      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3653      *
3654      * Supported tensor rank: from 1.
3655      *
3656      * Inputs:
3657      * * 0: A tensor.
3658      * * 1: A tensor of the same {@link OperandType} and compatible dimensions
3659      *      with input0.
3660      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
3661      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
3662      *
3663      * Outputs:
3664      * * 0: A tensor of the same {@link OperandType} as input0.
3665      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3666      *      {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
3667      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3668      */
3669     MAXIMUM = 65,
3670 
3671     /**
3672      * Returns the element-wise minimum of two tensors.
3673      *
3674      * Supported tensor {@link OperandType}:
3675      * * {@link OperandType::TENSOR_FLOAT16}
3676      * * {@link OperandType::TENSOR_FLOAT32}
3677      * * {@link OperandType::TENSOR_INT32}
3678      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3679      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3680      *
3681      * Supported tensor rank: from 1.
3682      *
3683      * Inputs:
3684      * * 0: A tensor.
3685      * * 1: A tensor of the same {@link OperandType} and compatible dimensions
3686      *      with input0.
3687      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
3688      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
3689      *
3690      * Outputs:
3691      * * 0: A tensor of the same {@link OperandType} as input0.
3692      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3693      *      {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
3694      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3695      */
3696     MINIMUM = 66,
3697 
3698     /**
3699      * Computes numerical negative value element-wise.
3700      *
3701      * Supported tensor {@link OperandType}:
3702      * * {@link OperandType::TENSOR_FLOAT16}
3703      * * {@link OperandType::TENSOR_FLOAT32}
3704      * * {@link OperandType::TENSOR_INT32}
3705      *
3706      * Supported tensor rank: from 1.
3707      *
3708      * Inputs:
3709      * * 0: A tensor.
3710      *
3711      * Outputs:
3712      * * 0: The output tensor of same shape as input0.
3713      */
3714     NEG = 67,
3715 
3716     /**
3717      * For input tensors x and y, computes x != y elementwise.
3718      *
3719      * Supported tensor {@link OperandType}:
3720      * * {@link OperandType::TENSOR_BOOL8}
3721      * * {@link OperandType::TENSOR_FLOAT16}
3722      * * {@link OperandType::TENSOR_FLOAT32}
3723      * * {@link OperandType::TENSOR_INT32}
3724      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3725      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3726      *
3727      * Supported tensor rank: from 1
3728      *
3729      * This operation supports broadcasting.
3730      *
3731      * Inputs:
3732      * * 0: A tensor.
3733      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3734      *      with input0.
3735      *
3736      * Outputs:
3737      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3738      */
3739     NOT_EQUAL = 68,
3740 
3741     /**
3742      * Pads a tensor with the given constant value according to the specified
3743      * paddings.
3744      *
3745      * Supported tensor {@link OperandType}:
3746      * * {@link OperandType::TENSOR_FLOAT16}
3747      * * {@link OperandType::TENSOR_FLOAT32}
3748      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3749      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3750      *
3751      * Supported tensor rank: up to 4
3752      *
3753      * Inputs:
3754      * * 0: An n-D tensor, specifying the tensor to be padded.
3755      * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
3756      *      for each spatial dimension of the input tensor. The shape of the
3757      *      tensor must be {rank(input0), 2}.
3758      *      padding[i, 0] specifies the number of elements to be padded in the
3759      *      front of dimension i.
3760      *      padding[i, 1] specifies the number of elements to be padded after
3761      *      the end of dimension i.
3762      * * 2: A scalar specifying the value to use for padding input0.
3763      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
3764      *      pad value must be of {@link OperandType::FLOAT16}.
3765      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
3766      *      pad value must be of {@link OperandType::FLOAT32}.
3767      *      For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
3768      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
3769      *      the pad value must be of {@link OperandType::INT32}. The
3770      *      scale and zeroPoint are assumed to be the same as in input0.
3771      *
3772      * Outputs:
3773      * * 0: A tensor of the same {@link OperandType} as input0. The
3774      *      output tensor has the same rank as input0, and each
3775      *      dimension of the output tensor has the same size as the
3776      *      corresponding dimension of the input tensor plus the size
3777      *      of the padding:
3778      *          output0.dimension[i] =
3779      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
3780      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3781      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3782      *      the scale and zeroPoint must be the same as input0.
3783      */
3784     PAD_V2 = 69,
3785 
3786     /**
3787      * Computes the power of one value to another.
3788      *
3789      * Given a tensor base and a tensor exponent, this operation computes
3790      * base^exponent elementwise.
3791      *
3792      * This operations supports broadcasting. The size of the output is the
3793      * maximum size along each dimension of the input operands. It starts with
3794      * the trailing dimensions, and works its way forward.
3795      *
3796      * For example:
3797      *     base.dimension     =    {4, 1, 2}
3798      *     exponent.dimension = {5, 4, 3, 1}
3799      *     output.dimension   = {5, 4, 3, 2}
3800      *
3801      * Supported tensor {@link OperandType}:
3802      * * {@link OperandType::TENSOR_FLOAT16}
3803      * * {@link OperandType::TENSOR_FLOAT32}
3804      *
3805      * Supported tensor rank: from 1
3806      *
3807      * Inputs:
3808      * * 0: A tensor specifying the base.
3809      * * 1: A tensor specifying the exponent.
3810      *
3811      * Outputs:
3812      * * 0: An output tensor.
3813      */
3814     POW = 70,
3815 
3816     /**
3817      * Parametric Rectified Linear Unit.
3818      *
3819      * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
3820      * is a learned array with the same {@link OperandType} and compatible
3821      * dimensions as input x.
3822      *
3823      * Two dimensions are compatible when:
3824      *     1. they are equal, or
3825      *     2. one of them is 1
3826      *
3827      * The size of the output is the maximum size along each dimension of the
3828      * input operands. It starts with the trailing dimensions, and works its way
3829      * forward.
3830      *
3831      * Example:
3832      *     input.dimension  =    {4, 1, 2}
3833      *     alpha.dimension  = {5, 4, 3, 1}
3834      *     output.dimension = {5, 4, 3, 2}
3835      *
3836      * Supported tensor {@link OperandType}:
3837      * * {@link OperandType::TENSOR_FLOAT16}
3838      * * {@link OperandType::TENSOR_FLOAT32}
3839      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3840      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3841      *
3842      * Supported tensor rank: from 1
3843      *
3844      * Inputs:
3845      * * 0: A tensor, specifying the input.
3846      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
3847      *      as input0, specifying the alpha.
3848      *
3849      * Outputs:
3850      * * 0: A tensor of the same {@link OperandType} as input0.
3851      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3852      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3853      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
3854      */
3855     PRELU = 71,
3856 
3857     /**
3858      * Quantizes the input tensor.
3859      *
3860      * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM} output tensor is:
3861      *
3862      *     output = max(0, min(255, round(input / scale) + zeroPoint)
3863      *
3864      * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} output
3865      * tensor is:
3866      *
3867      *     output = max(-128, min(127, round(input / scale) + zeroPoint)
3868      *
3869      * Supported input tensor {@link OperandType}:
3870      * * {@link OperandType::TENSOR_FLOAT16}
3871      * * {@link OperandType::TENSOR_FLOAT32}
3872      *
3873      * Supported output tensor {@link OperandType}:
3874      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3875      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3876      *
3877      * Supported tensor rank: from 1
3878      *
3879      * Inputs:
3880      * * 0: A tensor, may be zero-sized.
3881      *
3882      * Outputs:
3883      * * 0: The output tensor of same shape as input0, but with
3884      *      {@link OperandType::TENSOR_QUANT8_ASYMM} or.
3885      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}.
3886      */
3887     QUANTIZE = 72,
3888 
3889     /**
3890      * A version of quantized LSTM, using 16 bit quantization for internal
3891      * state.
3892      *
3893      * There is no projection layer, so cell state size is equal to the output
3894      * size.
3895      *
3896      * Inputs:
3897      * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3898      *      and shape [numBatches, inputSize] specifying the input to the LSTM
3899      *      cell. Tensor is quantized with a fixed quantization range of
3900      *      [-1, 127/128] (scale = 1/128, zeroPoint = 128).
3901      * * 1: The input-to-input weights.
3902      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3903      *      and shape [outputSize, inputSize] specifying input-to-input part of
3904      *      weights for fully-connected layer inside the LSTM cell.
3905      *      Quantization zero point and scale must be the same across all the
3906      *      weights.
3907      * * 2: The input-to-forget weights.
3908      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3909      *      and shape [outputSize, inputSize] specifying input-to-forget part of
3910      *      weights for fully-connected layer inside the LSTM cell.
3911      *      Quantization zero point and scale must be the same across all the
3912      *      weights.
3913      * * 3: The input-to-cell weights.
3914      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3915      *      and shape [outputSize, inputSize] specifying input-to-cell part of
3916      *      weights for fully-connected layer inside the LSTM cell.
3917      *      Quantization zero point and scale must be the same across all the
3918      *      weights.
3919      * * 4: The input-to-output weights.
3920      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3921      *      and shape [outputSize, inputSize] specifying input-to-output part of
3922      *      weights for fully-connected layer inside the LSTM cell.
3923      *      Quantization zero point and scale must be the same across all the
3924      *      weights.
3925      * * 5: The recurrent-to-input weights.
3926      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3927      *      and shape [outputSize, outputSize] specifying recurrent-to-input part
3928      *      of weights for fully-connected layer inside the LSTM cell.
3929      *      Quantization zero point and scale must be the same across all the
3930      *      weights.
3931      * * 6: The recurrent-to-forget weights.
3932      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3933      *      and shape [outputSize, outputSize] specifying recurrent-to-forget
3934      *      part of weights for fully-connected layer inside the LSTM cell.
3935      *      Quantization zero point and scale must be the same across all the
3936      *      weights.
3937      * * 7: The recurrent-to-cell weights.
3938      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3939      *      and shape [outputSize, outputSize] specifying recurrent-to-cell part
3940      *      of weights for fully-connected layer inside the LSTM cell.
3941      *      Quantization zero point and scale must be the same across all the
3942      *      weights.
3943      * * 8: The recurrent-to-output weights.
3944      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3945      *      and shape [outputSize, outputSize] specifying recurrent-to-output
3946      *      part of weights for fully-connected layer inside the LSTM cell.
3947      *      Quantization zero point and scale must be the same across all the
3948      *      weights.
3949      * * 9: The input gate bias.
3950      *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
3951      *      [outputSize] specifying the bias for the fully-connected layer
3952      *      inside the LSTM cell. Bias is quantized with scale being a product
3953      *      of input and weights scales and zeroPoint equal to 0.
3954      * * 10:The forget gate bias.
3955      *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
3956      *      [outputSize] specifying the bias for the fully-connected layer
3957      *      inside the LSTM cell. Bias is quantized with scale being a product
3958      *      of input and weights scales and zeroPoint equal to 0.
3959      * * 11:The cell bias.
3960      *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
3961      *      [outputSize] specifying the bias for the fully-connected layer
3962      *      inside the LSTM cell. Bias is quantized with scale being a product
3963      *      of input and weights scales and zeroPoint equal to 0.
3964      * * 12:The output gate bias.
3965      *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
3966      *      [outputSize] specifying the bias for the fully-connected layer
3967      *      inside the LSTM cell. Bias is quantized with scale being a product
3968      *      of input and weights scales and zeroPoint equal to 0.
3969      * * 13: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
3970      *       and shape [numBatches, outputSize] specifying the cell state from the
3971      *       previous time step of the LSTM cell. It is quantized using a
3972      *       quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
3973      *       32768, zeroPoint = 0).
3974      * * 14: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3975      *       and shape [numBathes, outputSize] specifying the output of the LSTM
3976      *       cell from previous time-step. Tensor is quantized with a fixed
3977      *       quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
3978      *       128).
3979      *
3980      *
3981      * Outputs:
3982      * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
3983      *      and shape [numBatches, outputSize] which contains a cell state from
3984      *      the current time step. Tensor is quantized using a quantization
3985      *      range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
3986      *      0).
3987      * * 1: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3988      *      and shape [numBathes, outputSize] which contains the output value.
3989      *      Tensor is quantized with a fixed quantization range of [-1, 127/128]
3990      *      (scale = 1/128, zeroPoint = 128).
3991      */
3992     QUANTIZED_16BIT_LSTM = 73,
3993 
3994     /**
3995      * Draws samples from a multinomial distribution.
3996      *
3997      * Supported tensor {@link OperandType}:
3998      * * {@link OperandType::TENSOR_FLOAT16}
3999      * * {@link OperandType::TENSOR_FLOAT32}
4000      *
4001      * Inputs:
4002      * * 0: A 2-D tensor with shape [batches, classes], specifying the
4003      *      unnormalized log-probabilities for all classes.
4004      * * 1: A scalar {@link OperandType::INT32}, specifying the number of
4005      *      independent samples to draw for each row slice.
4006      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2],
4007      *      specifying seeds used to initialize the random distribution. If both
4008      *      provided seeds are 0, both will be randomly generated.
4009      * Outputs:
4010      * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
4011      *      [batches, samples], containing the drawn samples.
4012      */
4013     RANDOM_MULTINOMIAL = 74,
4014 
4015     /**
4016      * Reduces a tensor by computing the "logical and" of elements along given
4017      * dimensions.
4018      *
4019      * If keep_dims is true, the reduced dimensions are
4020      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4021      * 1 for each entry in dimensions.
4022      *
4023      * Supported tensor {@link OperandType}:
4024      * * {@link OperandType::TENSOR_BOOL8}
4025      *
4026      * Supported tensor rank: up to 4
4027      *
4028      * Inputs:
4029      * * 0: An n-D tensor.
4030      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4031      *      to reduce. Dimension values must be in the range [-n, n).
4032      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4033      *      retains reduced dimensions with length 1.
4034      *
4035      * Outputs:
4036      * * 0: A tensor of the same {@link OperandType} as input0.
4037      *      If all dimensions are reduced and keep_dims is false, the output
4038      *      shape is [1].
4039      */
4040     REDUCE_ALL = 75,
4041 
4042     /**
4043      * Reduces a tensor by computing the "logical or" of elements along given
4044      * dimensions.
4045      *
4046      * If keep_dims is true, the reduced dimensions are
4047      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4048      * 1 for each entry in dimensions.
4049      *
4050      * Supported tensor {@link OperandType}:
4051      * * {@link OperandType::TENSOR_BOOL8}
4052      *
4053      * Supported tensor rank: up to 4
4054      *
4055      * Inputs:
4056      * * 0: An n-D tensor.
4057      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4058      *      to reduce. Dimension values must be in the range [-n, n).
4059      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4060      *      retains reduced dimensions with length 1.
4061      *
4062      * Outputs:
4063      * * 0: A tensor of the same {@link OperandType} as input0.
4064      *      If all dimensions are reduced and keep_dims is false, the output
4065      *      shape is [1].
4066      */
4067     REDUCE_ANY = 76,
4068 
4069     /**
4070      * Reduces a tensor by computing the maximum of elements along given
4071      * dimensions.
4072      *
4073      * If keep_dims is true, the reduced dimensions are
4074      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4075      * 1 for each entry in dimensions.
4076      *
4077      * Supported tensor {@link OperandType}:
4078      * * {@link OperandType::TENSOR_FLOAT16}
4079      * * {@link OperandType::TENSOR_FLOAT32}
4080      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4081      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4082      *
4083      * Supported tensor rank: up to 4
4084      *
4085      * Inputs:
4086      * * 0: An n-D tensor.
4087      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4088      *      to reduce. Dimension values must be in the range [-n, n).
4089      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4090      *      retains reduced dimensions with length 1.
4091      *
4092      * Outputs:
4093      * * 0: A tensor of the same {@link OperandType} as input0.
4094      *      If all dimensions are reduced and keep_dims is false, the output
4095      *      shape is [1].
4096      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4097      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4098      *      the scale and zeroPoint must be the same as input0.
4099      */
4100     REDUCE_MAX = 77,
4101 
4102     /**
4103      * Reduces a tensor by computing the minimum of elements along given
4104      * dimensions.
4105      *
4106      * If keep_dims is true, the reduced dimensions are
4107      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4108      * 1 for each entry in dimensions.
4109      *
4110      * Supported tensor {@link OperandType}:
4111      * * {@link OperandType::TENSOR_FLOAT16}
4112      * * {@link OperandType::TENSOR_FLOAT32}
4113      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4114      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4115      *
4116      * Supported tensor rank: up to 4
4117      *
4118      * Inputs:
4119      * * 0: An n-D tensor.
4120      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4121      *      to reduce. Dimension values must be in the range [-n, n).
4122      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4123      *      retains reduced dimensions with length 1.
4124      *
4125      * Outputs:
4126      * * 0: A tensor of the same {@link OperandType} as input0.
4127      *      If all dimensions are reduced and keep_dims is false, the output
4128      *      shape is [1].
4129      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4130      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4131      *      the scale and zeroPoint must be the same as input0.
4132      */
4133     REDUCE_MIN = 78,
4134 
4135     /**
4136      * Reduces a tensor by multiplying elements along given dimensions.
4137      *
4138      * If keep_dims is true, the reduced dimensions are
4139      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4140      * 1 for each entry in dimensions.
4141      *
4142      * Supported tensor {@link OperandType}:
4143      * * {@link OperandType::TENSOR_FLOAT16}
4144      * * {@link OperandType::TENSOR_FLOAT32}
4145      *
4146      * Supported tensor rank: up to 4
4147      *
4148      * Inputs:
4149      * * 0: An n-D tensor.
4150      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4151      *      to reduce. Dimension values must be in the range [-n, n).
4152      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4153      *      retains reduced dimensions with length 1.
4154      *
4155      * Outputs:
4156      * * 0: A tensor of the same {@link OperandType} as input0.
4157      *      If all dimensions are reduced and keep_dims is false, the output
4158      *      shape is [1].
4159      */
4160     REDUCE_PROD = 79,
4161 
4162     /**
4163      * Reduces a tensor by summing elements along given dimensions.
4164      *
4165      * If keep_dims is true, the reduced dimensions are
4166      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4167      * 1 for each entry in dimensions.
4168      *
4169      * Supported tensor {@link OperandType}:
4170      * * {@link OperandType::TENSOR_FLOAT16}
4171      * * {@link OperandType::TENSOR_FLOAT32}
4172      *
4173      * Supported tensor rank: up to 4
4174      *
4175      * Inputs:
4176      * * 0: An n-D tensor.
4177      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4178      *      to reduce. Dimension values must be in the range [-n, n).
4179      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4180      *      retains reduced dimensions with length 1.
4181      *
4182      * Outputs:
4183      * * 0: A tensor of the same {@link OperandType} as input0.
4184      *      If all dimensions are reduced and keep_dims is false, the output
4185      *      shape is [1].
4186      */
4187     REDUCE_SUM = 80,
4188 
4189     /**
4190      * Select and scale the feature map of each region of interest to a unified
4191      * output size by average pooling sampling points from bilinear interpolation.
4192      *
4193      * The region of interest is represented by its upper-left corner coordinate
4194      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4195      * A spatial scaling factor is applied to map into feature map coordinate.
4196      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4197      *
4198      * No rounding is applied in this operation. The sampling points are unified
4199      * distributed in the pooling bin and their values are calculated by bilinear
4200      * interpolation.
4201      *
4202      * Supported tensor {@link OperandType}:
4203      * * {@link OperandType::TENSOR_FLOAT16}
4204      * * {@link OperandType::TENSOR_FLOAT32}
4205      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4206      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4207      *
4208      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4209      * With the default data layout NHWC, the data is stored in the order of:
4210      * [batch, height, width, channels]. Alternatively, the data layout could
4211      * be NCHW, the data storage order of: [batch, channels, height, width].
4212      *
4213      * Inputs:
4214      * * 0: A 4-D tensor, specifying the feature map.
4215      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4216      *      the regions of interest, each line with format [x1, y1, x2, y2].
4217      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
4218      *      this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
4219      *      with zeroPoint of 0 and scale of 0.125. Zero num_rois is
4220      *      supported for this tensor.
4221      * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
4222      *      [num_rois], specifying the batch index of each box. Boxes with
4223      *      the same batch index are grouped together. Zero num_rois is
4224      *      supported for this tensor.
4225      * * 3: An {@link OperandType::INT32} scalar, specifying the output
4226      *      height of the output tensor.
4227      * * 4: An {@link OperandType::INT32} scalar, specifying the output
4228      *      width of the output tensor.
4229      * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
4230      *      from the height of original image to the height of feature map.
4231      * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
4232      *      from the width of original image to the width of feature map.
4233      * * 7: An {@link OperandType::INT32} scalar, specifying the number of
4234      *      sampling points in height dimension used to compute the output.
4235      *      Set to 0 for adaptive value of ceil(roi_height/out_height).
4236      * * 8: An {@link OperandType::INT32} scalar, specifying the number of
4237      *      sampling points in width dimension used to compute the output.
4238      *      Set to 0 for adaptive value of ceil(roi_width/out_width).
4239      * * 9: An {@link OperandType::BOOL} scalar, set to true to specify
4240      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4241      *
4242      * Outputs:
4243      * * 0: A tensor of the same {@link OperandType} as input0. The output
4244      *      shape is [num_rois, out_height, out_width, depth].
4245      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4246      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4247      *      the scale and zeroPoint can be different from the input0 scale and zeroPoint.
4248      */
4249     ROI_ALIGN = 81,
4250 
4251     /**
4252      * Select and scale the feature map of each region of interest to a unified
4253      * output size by max-pooling.
4254      *
4255      * The region of interest is represented by its upper-left corner coordinate
4256      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4257      * A spatial scaling factor is applied to map into feature map coordinate.
4258      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4259      *
4260      * Rounding is applied in this operation to ensure integer boundary for
4261      * regions of interest and pooling bins.
4262      *
4263      * Supported tensor {@link OperandType}:
4264      * * {@link OperandType::TENSOR_FLOAT16}
4265      * * {@link OperandType::TENSOR_FLOAT32}
4266      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4267      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4268      *
4269      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4270      * With the default data layout NHWC, the data is stored in the order of:
4271      * [batch, height, width, channels]. Alternatively, the data layout could
4272      * be NCHW, the data storage order of: [batch, channels, height, width].
4273      *
4274      * Inputs:
4275      * * 0: A 4-D tensor, specifying the feature map.
4276      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4277      *      the regions of interest, each line with format [x1, y1, x2, y2].
4278      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} and
4279      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4280      *      this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
4281      *      with zeroPoint of 0 and scale of 0.125.
4282      * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
4283      *      [num_rois], specifying the batch index of each box. Boxes with
4284      *      the same batch index are grouped together.
4285      * * 3: An {@link OperandType::INT32} scalar, specifying the output
4286      *      height of the output tensor.
4287      * * 4: An {@link OperandType::INT32} scalar, specifying the output
4288      *      width of the output tensor.
4289      * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
4290      *      from the height of original image to the height of feature map.
4291      * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
4292      *      from the width of original image to the width of feature map.
4293      * * 7: An {@link OperandType::BOOL} scalar, set to true to specify
4294      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4295      *
4296      * Outputs:
4297      * * 0: A tensor of the same {@link OperandType} as input0. The output
4298      *      shape is [num_rois, out_height, out_width, depth].
4299      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} and
4300      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4301      *      the scale and zeroPoint must be the same as input0.
4302      */
4303     ROI_POOLING = 82,
4304 
4305     /**
4306      * Computes reciprocal of square root of x element-wise.
4307      *
4308      * Supported tensor {@link OperandType}:
4309      * * {@link OperandType::TENSOR_FLOAT16}
4310      * * {@link OperandType::TENSOR_FLOAT32}
4311      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since NNAPI feature level 7)
4312      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 7)
4313      *
4314      * Supported tensor rank: from 1.
4315      *
4316      * Inputs:
4317      * * 0: A tensor.
4318      *
4319      * Outputs:
4320      * * 0: The output tensor of same shape as input0.
4321      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4322      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4323      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4324      */
4325     RSQRT = 83,
4326 
4327     /**
4328      * Using a tensor of booleans c and input tensors x and y select values
4329      * elementwise from both input tensors:
4330      *
4331      * O[i] = C[i] ? x[i] : y[i].
4332      *
4333      * Supported tensor {@link OperandType}:
4334      * * {@link OperandType::TENSOR_FLOAT16}
4335      * * {@link OperandType::TENSOR_FLOAT32}
4336      * * {@link OperandType::TENSOR_INT32}
4337      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4338      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4339      *
4340      * Supported tensor rank: from 1
4341      *
4342      * Inputs:
4343      * * 0: A tensor of type {@link OperandType::TENSOR_BOOL8} acting as a
4344      *      mask that chooses, based on the value at each element, whether the
4345      *      corresponding element in the output should be taken from input1 (if
4346      *      true) or input2 (if false).
4347      * * 1: An input tensor of the same shape as input0.
4348      * * 2: An input tensor of the same shape and type as input1.
4349      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM}
4350      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4351      *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
4352      *
4353      * Outputs:
4354      * * 0: A tensor of the same type and shape as input1 and input2.
4355      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
4356      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4357      */
4358     SELECT = 84,
4359 
4360     /**
4361      * Computes sin of x element-wise.
4362      *
4363      * Supported tensor {@link OperandType}:
4364      * * {@link OperandType::TENSOR_FLOAT16}
4365      * * {@link OperandType::TENSOR_FLOAT32}
4366      *
4367      * Supported tensor rank: from 1.
4368      *
4369      * Inputs:
4370      * * 0: A tensor.
4371      *
4372      * Outputs:
4373      * * 0: The output tensor of same shape as input0.
4374      */
4375     SIN = 85,
4376 
4377     /**
4378      * Extracts a slice of specified size from the input tensor starting at a
4379      * specified location.
4380      *
4381      * The starting location is specified as a 1-D tensor containing offsets
4382      * for each dimension. The size is specified as a 1-D tensor containing
4383      * either size of a slice along corresponding dimension or -1. In the latter
4384      * case, all the remaining elements in dimension are included in the slice.
4385      *
4386      * A sum of begin offset and a size of a slice must not exceed size of a
4387      * corresponding dimension.
4388      *
4389      * Supported tensor {@link OperandType}:
4390      * * {@link OperandType::TENSOR_FLOAT16}
4391      * * {@link OperandType::TENSOR_FLOAT32}
4392      * * {@link OperandType::TENSOR_INT32}
4393      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4394      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4395      *
4396      * Supported tensor rank: from 1
4397      *
4398      * Inputs:
4399      * * 0: An n-D tensor to take slice from, may be zero-sized.
4400      * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
4401      *      the beginning indices of the slice in each dimension.
4402      * * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
4403      *      the size of the slice in each dimension.
4404      *
4405      * Outputs:
4406      * * 0: An n-D tensor of the same type as the input containing the slice.
4407      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4408      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4409      *      its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
4410      */
4411     SLICE = 86,
4412 
4413     /**
4414      * Splits a tensor along a given axis into num_splits subtensors.
4415      *
4416      * Supported tensor {@link OperandType}:
4417      * * {@link OperandType::TENSOR_FLOAT16}
4418      * * {@link OperandType::TENSOR_FLOAT32}
4419      * * {@link OperandType::TENSOR_INT32}
4420      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4421      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4422      *
4423      * Supported tensor rank: from 1
4424      *
4425      * Inputs:
4426      * * 0: An n-D tensor to split.
4427      * * 1: An {@link OperandType::INT32} scalar specifying the axis along
4428      *      which to split.
4429      * * 2: An {@link OperandType::INT32} scalar indicating the number of
4430      *      splits along given axis. Must evenly divide axis size.
4431      *
4432      * Outputs:
4433      * * 0 ~ (num_splits - 1): Resulting subtensors.
4434      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4435      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4436      *      the scale and zeroPoint must be the same as input0.
4437      */
4438     SPLIT = 87,
4439 
4440     /**
4441      * Computes square root of x element-wise.
4442      *
4443      * Supported tensor {@link OperandType}:
4444      * * {@link OperandType::TENSOR_FLOAT16}
4445      * * {@link OperandType::TENSOR_FLOAT32}
4446      *
4447      * Supported tensor rank: from 1.
4448      *
4449      * Inputs:
4450      * * 0: A tensor.
4451      *
4452      * Outputs:
4453      * * 0: The output tensor of same shape as input0.
4454      */
4455     SQRT = 88,
4456 
4457     /**
4458      * Constructs a tensor by tiling a given tensor.
4459      *
4460      * This operation creates a new tensor by replicating `input` `multiples`
4461      * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
4462      * elements, and the values of `input` are replicated `multiples[i]` times
4463      * along the i-th dimension.
4464      * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
4465      *
4466      * Supported tensor {@link OperandType}:
4467      * * {@link OperandType::TENSOR_FLOAT16}
4468      * * {@link OperandType::TENSOR_FLOAT32}
4469      * * {@link OperandType::TENSOR_INT32}
4470      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4471      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4472      *
4473      * Supported tensor rank: from 1
4474      *
4475      * Inputs:
4476      * * 0: input, an n-D tensor specifying the input.
4477      * * 1: multiples, a 1-D tensor of {@link OperandType::TENSOR_INT32}.
4478      *      The length of multiples must be n.
4479      *
4480      * Outputs:
4481      * * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
4482      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4483      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4484      *      the scale and zeroPoint must be the same as input0.
4485      */
4486     TILE = 89,
4487 
4488     /**
4489      * Finds values and indices of the k largest entries for the last dimension.
4490      *
4491      * Resulting values in each dimensions are sorted in descending order. If
4492      * two values are equal, the one with larger index appears first.
4493      *
4494      * Supported tensor {@link OperandType}:
4495      * * {@link OperandType::TENSOR_FLOAT16}
4496      * * {@link OperandType::TENSOR_FLOAT32}
4497      * * {@link OperandType::TENSOR_INT32}
4498      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4499      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4500      *
4501      * Supported tensor rank: from 1
4502      *
4503      * Inputs:
4504      * * 0: input, an n-D tensor specifying the input.
4505      * * 1: k, an {@link OperandType::INT32} scalar, specifying the number of
4506      *      top elements to look for along the last dimension.
4507      *
4508      * Outputs:
4509      * * 0: An n-D tensor of the same type as the input, containing the k
4510      *      largest elements along each last dimensional slice.
4511      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4512      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4513      *      the scale and zeroPoint must be the same as input0.
4514      * * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
4515      *      containing the indices of values within the last dimension of input.
4516      */
4517     TOPK_V2 = 90,
4518 
4519     /**
4520      * Performs the transpose of 2-D convolution operation.
4521      *
4522      * This operation is sometimes called "deconvolution" after Deconvolutional
4523      * Networks, but is actually the transpose (gradient) of
4524      * {@link OperandType::CONV_2D} rather than an actual deconvolution.
4525      *
4526      * The output dimensions are functions of the filter dimensions, stride, and
4527      * padding.
4528      *
4529      * Supported tensor {@link OperandType} configurations:
4530      * * 16 bit floating point:
4531      * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
4532      *
4533      * * 32 bit floating point:
4534      * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
4535      *
4536      * * Quantized:
4537      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
4538      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
4539      * * * input.scale * filter.scale).
4540      *
4541      * * Quantized with symmetric per channel quantization for the filter:
4542      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
4543      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4544      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
4545      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4546      *
4547      * Available since HAL version 1.3:
4548      * * Quantized signed (since HAL version 1.3):
4549      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
4550      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
4551      * * * input.scale * filter.scale).
4552      *
4553      * * Quantized signed with filter symmetric per channel quantization
4554      *   (since HAL version 1.3):
4555      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
4556      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4557      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
4558      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4559      *
4560      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4561      * With the default data layout NHWC, the data is stored in the order of:
4562      * [batch, height, width, channels]. Alternatively, the data layout could
4563      * be NCHW, the data storage order of: [batch, channels, height, width].
4564      *
4565      * Both explicit padding and implicit padding are supported.
4566      *
4567      * Inputs (explicit padding):
4568      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4569      *      specifying the input.
4570      * * 1: A 4-D tensor, of shape
4571      *      [depth_out, filter_height, filter_width, depth_in], specifying the
4572      *      filter. For tensor of type
4573      *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4574      *      dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
4575      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4576      *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
4577      *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the
4578      *      same type.
4579      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
4580      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
4581      *      the bias should be of {@link OperandType::TENSOR_INT32},
4582      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
4583      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
4584      *      the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
4585      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
4586      *      bias_scale[i] = input_scale * filter_scale[i].
4587      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
4588      *      the left, in the ‘width’ dimension.
4589      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
4590      *      the right, in the ‘width’ dimension.
4591      * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
4592      *      the top, in the ‘height’ dimension.
4593      * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
4594      *      the bottom, in the ‘height’ dimension.
4595      * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
4596      *      walking through input in the ‘width’ dimension.
4597      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
4598      *      walking through input in the ‘height’ dimension.
4599      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
4600      *      {@link FusedActivationFunc} values. Specifies the activation to
4601      *      invoke on the result.
4602      * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
4603      *       NCHW data layout for input0 and output0. Set to false for NHWC.
4604      *
4605      * Inputs (implicit padding):
4606      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4607      *      specifying the input.
4608      * * 1: A 4-D tensor, of shape
4609      *      [depth_out, filter_height, filter_width, depth_in], specifying the
4610      *      filter. For tensor of type
4611      *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4612      *      dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
4613      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4614      *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
4615      *      {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
4616      *      same type.
4617      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
4618      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
4619      *      the bias should be of {@link OperandType::TENSOR_INT32},
4620      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
4621      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
4622      *      the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
4623      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
4624      *      bias_scale[i] = input_scale * filter_scale[i].
4625      * * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
4626      *      tensor shape.
4627      * * 4: An {@link OperandType::INT32} scalar, specifying the implicit
4628      *      padding scheme, has to be one of the
4629      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
4630      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
4631      *      walking through input in the ‘width’ dimension.
4632      * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
4633      *      walking through input in the ‘height’ dimension.
4634      * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
4635      *      {@link FusedActivationFunc} values. Specifies the activation to
4636      *      invoke on the result.
4637      * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
4638      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4639      *
4640      * Outputs:
4641      * * 0: The output 4-D tensor, of shape
4642      *      [batches, out_height, out_width, depth_out].
4643      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4644      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4645      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4646      */
4647     TRANSPOSE_CONV_2D = 91,
4648 
4649     /**
4650      * A recurrent neural network specified by an LSTM cell.
4651      *
4652      * Performs (fully) dynamic unrolling of input.
4653      *
4654      * This Op unrolls the input along the time dimension, and implements the
4655      * following operation for each element in the sequence
4656      * s = 1...sequence_length:
4657      *   outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
4658      *
4659      * Where LSTMOp is the LSTM op as in {@link OperandType::LSTM},
4660      * the "projection" is an optional projection layer from state and output
4661      * and the “activation” is the function passed as the
4662      * “fused_activation_function” argument (if not “NONE”).
4663      *
4664      * Supported tensor {@link OperandType}:
4665      * * {@link OperandType::TENSOR_FLOAT16}
4666      * * {@link OperandType::TENSOR_FLOAT32}
4667      *
4668      * Supported tensor rank: 3, either time-major or batch-major.
4669      *
4670      * All input and output tensors must be of the same type.
4671      *
4672      * Inputs:
4673      * * 0: The input (\f$x_t\f$).
4674      *      A 3-D tensor of shape:
4675      *        If time-major: [max_time, batch_size, input_size]
4676      *        If batch-major: [batch_size, max_time, input_size]
4677      *      where “max_time” is the number of timesteps (sequence length),
4678      *      “batch_size” corresponds to the batching dimension, and
4679      *      “input_size” is the size of the input.
4680      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
4681      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
4682      *      corresponds to the number of cell units.
4683      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
4684      *      A 2-D tensor of shape [num_units, input_size].
4685      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
4686      *      A 2-D tensor of shape [num_units, input_size].
4687      * * 4: The input-to-output weights (\f$W_{xo}\f$).
4688      *      A 2-D tensor of shape [num_units, input_size].
4689      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
4690      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
4691      *      corresponds to either the number of cell units (i.e., “num_units”),
4692      *      or the second dimension of the “projection_weights”, if defined.
4693      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
4694      *      A 2-D tensor of shape [num_units, output_size].
4695      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
4696      *      A 2-D tensor of shape [num_units, output_size].
4697      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
4698      *      A 2-D tensor of shape [num_units, output_size].
4699      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
4700      *      A 1-D tensor of shape [num_units].
4701      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
4702      *      A 1-D tensor of shape [num_units].
4703      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
4704      *      A 1-D tensor of shape [num_units].
4705      * * 12:The input gate bias (\f$b_i\f$). Optional.
4706      *      A 1-D tensor of shape [num_units].
4707      * * 13:The forget gate bias (\f$b_f\f$).
4708      *      A 1-D tensor of shape [num_units].
4709      * * 14:The cell bias (\f$b_c\f$).
4710      *      A 1-D tensor of shape [num_units].
4711      * * 15:The output gate bias (\f$b_o\f$).
4712      *      A 1-D tensor of shape [num_units].
4713      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
4714      *      A 2-D tensor of shape [output_size, num_units].
4715      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
4716      *      A 1-D tensor of shape [output_size].
4717      * * 18:The output state (in) (\f$h_{t-1}\f$).
4718      *      A 2-D tensor of shape [batch_size, output_size].
4719      * * 19:The cell state (in) (\f$C_{t-1}\f$).
4720      *      A 2-D tensor of shape [batch_size, num_units].
4721      * * 20:The activation function (\f$g\f$).
4722      *      A value indicating the activation function:
4723      *      <ul>
4724      *      <li>0: None;
4725      *      <li>1: Relu;
4726      *      <li>3: Relu6;
4727      *      <li>4: Tanh;
4728      *      <li>6: Sigmoid.
4729      *      </ul>
4730      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
4731      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
4732      *      then clipping is disabled.
4733      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
4734      *      projection layer, such that values are bound within
4735      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
4736      * * 23:Time-major if true, batch-major if false.
4737      * * 24:The input layer normalization weights. Optional.
4738      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
4739      *      to activation at input gate.
4740      * * 25:The forget layer normalization weights. Optional.
4741      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
4742      *      to activation at forget gate.
4743      * * 26:The cell layer normalization weights. Optional.
4744      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
4745      *      to activation at cell gate.
4746      * * 27:The output layer normalization weights. Optional.
4747      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
4748      *      to activation at output gate.
4749      *
4750      * Outputs:
4751      * * 0: The output (\f$o_t\f$).
4752      *      A 3-D tensor of shape:
4753      *        If time-major: [max_time, batch_size, output_size]
4754      *        If batch-major: [batch_size, max_time, output_size]
4755      * * 1: A tensor of shape [batch_size, output_size] containing a hidden
4756      *      state from the last time step in the sequence. This output is
4757      *      optional and can be omitted. If this output is present then
4758      *      output #2 must be present as well.
4759      *      Available since HAL version 1.3.
4760      * * 2: A tensor of shape [batch_size, cell_size] containing a cell state
4761      *      from the last time step in the sequence. This output is optional
4762      *      and can be omitted.
4763      *      Available since HAL version 1.3.
4764      */
4765     UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
4766 
4767     /**
4768      * A recurrent neural network layer that applies a basic RNN cell to a
4769      * sequence of inputs.
4770      *
4771      * This layer unrolls the input along the sequence dimension, and implements
4772      * the following operation
4773      * for each element in the sequence s = 1...sequence_length:
4774      *   outputs[s] = state = activation(inputs[s] * input_weights’ + state *
4775      *   recurrent_weights’ + bias)
4776      *
4777      * Where:
4778      * * “input_weights” is a weight matrix that multiplies the inputs;
4779      * * “recurrent_weights” is a weight matrix that multiplies the current
4780      *    “state” which itself is the output from the previous time step
4781      *    computation;
4782      * * “bias” is a bias vector (added to each output vector in the batch);
4783      * * “activation” is the function passed as the “fused_activation_function”
4784      *   argument (if not “NONE”).
4785      *
4786      * Supported tensor {@link OperandType}:
4787      * * {@link OperandType::TENSOR_FLOAT16}
4788      * * {@link OperandType::TENSOR_FLOAT32}
4789      *
4790      * The input tensors must all be the same type.
4791      *
4792      * Inputs:
4793      * * 0: input.
4794      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
4795      *      it is set to 1, then the input has a shape [maxTime, batchSize,
4796      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
4797      *      inputSize].
4798      * * 1: weights.
4799      *      A 2-D tensor of shape [numUnits, inputSize].
4800      * * 2: recurrent_weights.
4801      *      A 2-D tensor of shape [numUnits, numUnits].
4802      * * 3: bias.
4803      *      A 1-D tensor of shape [numUnits].
4804      * * 4: hidden state
4805      *      A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
4806      *      state input for the first time step of the computation.
4807      * * 5: fusedActivationFunction.
4808      *      A {@link FusedActivationFunc} value indicating the activation function. If
4809      *      “NONE” is specified then it results in a linear activation.
4810      * * 6: timeMajor
4811      *      An {@link OperandType::INT32} scalar specifying the shape format
4812      *      of input and output tensors. Must be set to either 0 or 1.
4813      * Outputs:
4814      * * 0: output.
4815      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
4816      *      it is set to 1, then the output has a shape [maxTime, batchSize,
4817      *      numUnits], otherwise the output has a shape [batchSize, maxTime,
4818      *      numUnits].
4819      */
4820     UNIDIRECTIONAL_SEQUENCE_RNN = 93,
4821 
4822     /**
4823      * Resizes images to given size using the nearest neighbor interpretation.
4824      *
4825      * Resized images must be distorted if their output aspect ratio is not the
4826      * same as input aspect ratio. The corner pixels of output may not be the
4827      * same as corner pixels of input.
4828      *
4829      * Supported tensor {@link OperandType}:
4830      * * {@link OperandType::TENSOR_FLOAT16}
4831      * * {@link OperandType::TENSOR_FLOAT32}
4832      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4833      *
4834      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4835      * With the default data layout NHWC, the data is stored in the order of:
4836      * [batch, height, width, channels]. Alternatively, the data layout could
4837      * be NCHW, the data storage order of: [batch, channels, height, width].
4838      *
4839      * Both resizing by shape and resizing by scale are supported.
4840      *
4841      * Inputs (resizing by shape):
4842      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
4843      *      the input. Zero batches is supported for this tensor.
4844      * * 1: An {@link OperandType::INT32} scalar, specifying the output
4845      *      width of the output tensor.
4846      * * 2: An {@link OperandType::INT32} scalar, specifying the output
4847      *      height of the output tensor.
4848      * * 3: An {@link OperandType::BOOL} scalar, default to false.
4849      *      Set to true to specify NCHW data layout for input0 and output0.
4850      * * 4: Align corners. An optional {@link OperandType::BOOL}
4851      *      scalar, default to false.  If True, the centers of the 4 corner
4852      *      pixels of the input and output tensors are aligned, preserving the
4853      *      values at the corner pixels.
4854      *      Available since HAL version 1.3.
4855      * * 5: Half pixel centers. An optional {@link OperandType::BOOL}
4856      *      scalar, default to false. If True, the pixel centers are assumed to
4857      *      be at (0.5, 0.5). This is the default behavior of image.resize in
4858      *      TF 2.0. If this parameter is True, then align_corners parameter
4859      *      must be False.
4860      *      Available since HAL version 1.3.
4861      *
4862      * Inputs (resizing by scale):
4863      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
4864      *      the input. Zero batches is supported for this tensor.
4865      * * 1: A scalar, specifying width_scale, the scaling factor of the width
4866      *      dimension from the input tensor to the output tensor. The output
4867      *      width is calculated as new_width = floor(width * width_scale).
4868      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
4869      *      of {@link OperandType::TENSOR_FLOAT16} and of
4870      *      {@link OperandType::FLOAT32} otherwise.
4871      * * 2: A scalar, specifying height_scale, the scaling factor of the height
4872      *      dimension from the input tensor to the output tensor. The output
4873      *      height is calculated as new_height = floor(height * height_scale).
4874      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
4875      *      of {@link OperandType::TENSOR_FLOAT16} and of
4876      *      {@link OperandType::FLOAT32} otherwise.
4877      * * 3: An {@link OperandType::BOOL} scalar, default to false.
4878      *      Set to true to specify NCHW data layout for input0 and output0.
4879      * * 4: Align corners. An optional {@link OperandType::BOOL}
4880      *      scalar, default to false.  If True, the centers of the 4 corner
4881      *      pixels of the input and output tensors are aligned, preserving the
4882      *      values at the corner pixels.
4883      *      Available since HAL version 1.3.
4884      * * 5: Half pixel centers. An optional {@link OperandType::BOOL}
4885      *      scalar, default to false. If True, the pixel centers are assumed to
4886      *      be at (0.5, 0.5). This is the default behavior of image.resize in
4887      *      TF 2.0. If this parameter is True, then align_corners parameter
4888      *      must be False.
4889      *      Available since HAL version 1.3.
4890      *
4891      * Outputs:
4892      * * 0: The output 4-D tensor, of shape
4893      *      [batches, new_height, new_width, depth].
4894      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4895      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4896      *      the scale and zeroPoint must be the same as input0.
4897      */
4898     RESIZE_NEAREST_NEIGHBOR = 94,
4899 
4900     /**
4901      * Quantized version of {@link OperationType::LSTM}.
4902      *
4903      * The input and the output use asymmetric quantized types, while the rest
4904      * use symmetric ones.
4905      *
4906      * Inputs:
4907      * * 0: The input to the LSTM cell.
4908      *      Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
4909      *      Shape: [batchSize, inputSize]
4910      * * 1: The input-to-input weights. Optional.
4911      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4912      *      Shape: [numUnits, inputSize]
4913      * * 2: The input-to-forget weights.
4914      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4915      *      Shape: [numUnits, inputSize]
4916      * * 3: The input-to-cell weights.
4917      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4918      *      Shape: [numUnits, inputSize]
4919      * * 4: The input-to-output weights.
4920      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4921      *      Shape: [numUnits, inputSize]
4922      * * 5: The recurrent-to-input weights. Optional.
4923      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4924      *      Shape: [numUnits, outputSize]
4925      * * 6: The recurrent-to-forget weights.
4926      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4927      *      Shape: [numUnits, outputSize]
4928      * * 7: The recurrent-to-cell weights.
4929      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4930      *      Shape: [numUnits, outputSize]
4931      * * 8: The recurrent-to-output weights.
4932      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4933      *      Shape: [numUnits, outputSize]
4934      * * 9: The cell-to-input weights (for peephole). Optional.
4935      *      Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4936      *      Shape: [numUnits]
4937      * * 10: The cell-to-forget weights (for peephole). Optional.
4938      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4939      *       Shape: [numUnits]
4940      * * 11: The cell-to-output weights (for peephole). Optional.
4941      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4942      *       Shape: [numUnits]
4943      * * 12: The input gate bias. Quantized with scale being the
4944      *       product of input and weights scales and zeroPoint equal to 0.
4945      *       Optional.
4946      *       Type: {@link OperandType::TENSOR_INT32}
4947      *       Shape: [numUnits]
4948      * * 13: The forget gate bias. Quantized with scale being the
4949      *       product of input and weights scales and zeroPoint equal to 0.
4950      *       Type: {@link OperandType::TENSOR_INT32}
4951      *       Shape: [numUnits]
4952      * * 14: The cell bias. Quantized with scale being the
4953      *       product of input and weights scales and zeroPoint equal to 0.
4954      *       Type: {@link OperandType::TENSOR_INT32}
4955      *       Shape: [numUnits]
4956      * * 15: The output gate bias. Quantized with scale being the
4957      *       product of input and weights scales and zeroPoint equal to 0.
4958      *       Type: {@link OperandType::TENSOR_INT32}
4959      *       Shape: [numUnits]
4960      * * 16: The projection weights. Optional.
4961      *       Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4962      *       Shape: [outputSize, numUnits]
4963      * * 17: The projection bias. Quantized with scale being the
4964      *       product of input and weights scales and zeroPoint equal to 0.
4965      *       Optional.
4966      *       Type: {@link OperandType::TENSOR_INT32}
4967      *       Shape: [outputSize]
4968      * * 18: The output from the previous time step.
4969      *       Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
4970      *       Shape: [batchSize, outputSize]
4971      * * 19: The cell state from the previous time step.
4972      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4973      *       Shape: [batchSize, numUnits]
4974      * * 20: The input layer normalization weights. Used to rescale
4975      *       normalized inputs to activation at input gate. Optional.
4976      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4977      *       Shape: [numUnits]
4978      * * 21: The forget layer normalization weights. Used to
4979      *       rescale normalized inputs to activation at forget gate. Optional.
4980      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4981      *       Shape: [numUnits]
4982      * * 22: The cell layer normalization weights. Used to rescale
4983      *       normalized inputs to activation at cell gate. Optional.
4984      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4985      *       Shape: [numUnits]
4986      * * 23: The output layer normalization weights. Used to
4987      *       rescale normalized inputs to activation at output gate. Optional.
4988      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4989      *       Shape: [numUnits]
4990      * * 24: The cell clip. If provided the cell state is clipped
4991      *       by this value prior to the cell output activation. Optional.
4992      *       Type: {@link OperandType::FLOAT32}.
4993      * * 25: The projection clip. If provided and projection is enabled,
4994      *       this is used for clipping the projected values. Optional.
4995      *       Type: {@link OperandType::FLOAT32}.
4996      * * 26: The scale of the intermediate result of matmul,
4997      *       i.e. input to layer normalization, at input gate.
4998      *       Type: {@link OperandType::FLOAT32}.
4999      * * 27: The scale of the intermediate result of matmul,
5000      *       i.e. input to layer normalization, at forget gate.
5001      *       Type: {@link OperandType::FLOAT32}.
5002      * * 28: The scale of the intermediate result of matmul,
5003      *       i.e. input to layer normalization, at cell gate.
5004      *       Type: {@link OperandType::FLOAT32}.
5005      * * 29: The scale of the intermediate result of matmul,
5006      *       i.e. input to layer normalization, at output gate.
5007      *       Type: {@link OperandType::FLOAT32}.
5008      * * 30: The zero point of the hidden state, i.e. input to
5009      *       projection.
5010      *       Type: {@link OperandType::INT32}.
5011      * * 31: The scale of the hidden state, i.e. input to
5012      *       projection.
5013      *       Type: {@link OperandType::FLOAT32}.
5014      *
5015      * Outputs:
5016      * * 0: The output state (out).
5017      *      Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5018      *      Shape: [batchSize, outputSize]
5019      * * 1: The cell state (out).
5020      *      Type: {@link OperandType::TENSOR_QUANT16_SYMM}
5021      *      Shape: [batchSize, numUnits]
5022      * * 2: The output. This is effectively the same as the current
5023      *      "output state (out)" value.
5024      *      Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5025      *      Shape: [batchSize, outputSize]
5026      */
5027     QUANTIZED_LSTM = 95,
5028 
5029     /**
5030      * Executes one of the two referenced subgraphs as determined by a boolean
5031      * value.
5032      *
5033      * The inputs and outputs of the two referenced subgraphs must agree with the
5034      * signature of this operation. That is, if the operation has (3 + n) inputs
5035      * and m outputs, both subgraphs must have n inputs and m outputs with the same
5036      * types, ranks, dimensions, scales,
5037      * zeroPoints, and extraParams as the corresponding operation
5038      * inputs and outputs.
5039      * All of the operands mentioned must have fully specified dimensions.
5040      *
5041      * Inputs:
5042      * * 0: A value of type {@link OperandType::TENSOR_BOOL8} and shape [1]
5043      *      that determines which of the two referenced subgraphs to execute.
5044      *      The operand must have fully specified dimensions.
5045      * * 1: A {@link OperandType::SUBGRAPH} reference to the subgraph to be
5046      *      executed if the condition is true.
5047      * * 2: A {@link OperandType::SUBGRAPH} reference to the subgraph to be
5048      *      executed if the condition is false.
5049      * * 3 ~ (n + 2): Inputs to be passed to the subgraph selected for execution.
5050      *
5051      * Outputs:
5052      * * 0 ~ (m - 1): Outputs produced by the selected subgraph.
5053      */
5054     IF = 96,
5055 
5056     /**
5057      * Executes the body subgraph until the condition subgraph outputs false.
5058      *
5059      * The inputs to this operation are the condition subgraph, the body subgraph,
5060      * and operand values for the first iteration of the loop. The values are
5061      * implicitly split into three groups of input-output, state-only, and
5062      * input-only values, as described below.
5063      *
5064      * The outputs of this operation are the final values of input-output
5065      * operands.
5066      *
5067      * Both the condition and body subgraph receive (m + k + n) inputs.
5068      * * The first m (m >= 1) inputs are input-output operands. For the first
5069      *   iteration, these are initialized from the corresponding inputs of the
5070      *   WHILE operation. In subsequent iterations, their values come from the
5071      *   corresponding outputs of the body subgraph produced during the previous
5072      *   iteration.
5073      * * The next k (k >= 0) inputs are state-only operands. They are similar to
5074      *   the input-output operands, except that their values are no longer
5075      *   available after the loop terminates.
5076      * * The last n (n >= 0) inputs are input-only operands. Their values come
5077      *   from the corresponding inputs of the WHILE operation.
5078      *
5079      * The body subgraph produces (m + k) outputs.
5080      * * The first m outputs are input-output operands. They become the outputs
5081      *   of the WHILE operation when a termination condition is reached.
5082      * * The last k outputs are state-only operands. Their values are no longer
5083      *   available after the loop terminates.
5084      *
5085      * The numbers m, k, and n are inferred by the driver as follows:
5086      *     m = (WHILE operation output count)
5087      *     k = (body subgraph output count) - m
5088      *     n = (body subgraph input count) - m - k
5089      *
5090      * The pseudo-code below illustrates the flow of a WHILE operation with
5091      * inputs condition, body, initial_input_output, initial_state, input_only
5092      * (m = 1, k = 1, n = 1):
5093      *
5094      *     input_output = initial_input_output
5095      *     state = initial_state
5096      *     while condition(input_output, state, input_only):
5097      *         input_output, state = body(input_output, state, input_only)
5098      *     return input_output
5099      *
5100      * Inputs:
5101      * * 0: A {@link OperandType::SUBGRAPH} reference to the condition
5102      *      subgraph. The subgraph must have (m + k + n) inputs with
5103      *      the same types, ranks, dimensions,
5104      *      scales, zeroPoints, and extraParams as the
5105      *      corresponding inputs of the WHILE operation and exactly one output
5106      *      of {@link OperandType::TENSOR_BOOL8} and shape [1].
5107      *      All of the operands mentioned must have fully specified dimensions.
5108      * * 1: A {@link OperandType::SUBGRAPH} reference to the body subgraph.
5109      *      The subgraph must have (m + k + n) inputs and (m + k) outputs with
5110      *      the same types, ranks, dimensions,
5111      *      scales, zeroPoints, and extraParams as the
5112      *      corresponding inputs and outputs of the WHILE operation.
5113      *      All of the operands mentioned must have fully specified dimensions.
5114      * * (m inputs): Initial values for input-output operands.
5115      * * (k inputs): Initial values for state-only operands.
5116      * * (n inputs): Values for input-only operands.
5117      *
5118      * Outputs:
5119      * * 0 ~ (m - 1): Outputs produced by the loop.
5120      */
5121     WHILE = 97,
5122 
5123     /**
5124      * Computes exponential linear activation on the input tensor element-wise.
5125      *
5126      * The output is calculated using the following formula:
5127      *
5128      *     ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))
5129      *
5130      * Supported tensor {@link OperandType}:
5131      * * {@link OperandType::TENSOR_FLOAT16}
5132      * * {@link OperandType::TENSOR_FLOAT32}
5133      *
5134      * Supported tensor rank: from 1.
5135      *
5136      * Inputs:
5137      * * 0: A tensor, specifying the input. May be zero-sized.
5138      * * 1: A scalar, specifying the alpha parameter.
5139      *      For input tensor of {@link OperandType::TENSOR_FLOAT16},
5140      *      the alpha value must be of {@link OperandType::FLOAT16}.
5141      *      For input tensor of {@link OperandType::TENSOR_FLOAT32},
5142      *      the alpha value must be of {@link OperandType::FLOAT32}.
5143      *
5144      * Outputs:
5145      * * 0: The output tensor of same shape and type as input0.
5146      */
5147     ELU = 98,
5148 
5149     /**
5150      * Computes hard-swish activation on the input tensor element-wise.
5151      *
5152      * Hard swish activation is introduced in
5153      * https://arxiv.org/pdf/1905.02244.pdf
5154      *
5155      * The output is calculated using the following formula:
5156      *
5157      *     h-swish(x) = x * max(0, min(6, (x + 3))) / 6
5158      *
5159      * Supported tensor {@link OperandType}:
5160      * * {@link OperandType::TENSOR_FLOAT16}
5161      * * {@link OperandType::TENSOR_FLOAT32}
5162      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
5163      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5164      *
5165      * Supported tensor rank: from 1.
5166      *
5167      * Inputs:
5168      * * 0: A tensor, specifying the input. May be zero-sized.
5169      *
5170      * Outputs:
5171      * * 0: The output tensor of same shape and type as input0.
5172      *      Scale and zero point of this tensor may be different from the input
5173      *      tensor's parameters.
5174      */
5175     HARD_SWISH = 99,
5176 
5177     /**
5178      * Creates a tensor filled with a scalar value.
5179      *
5180      * Supported output tensor {@link OperandType}:
5181      * * {@link OperandType::TENSOR_FLOAT16}
5182      * * {@link OperandType::TENSOR_FLOAT32}
5183      * * {@link OperandType::TENSOR_INT32}
5184      *
5185      * Supported tensor rank: from 1.
5186      *
5187      * Inputs:
5188      * * 0: A 1-D tensor, specifying the desired output tensor shape.
5189      * * 1: A scalar, specifying the value to fill the output tensors with.
5190      *      For output tensor of {@link OperandType::TENSOR_FLOAT16},
5191      *      the scalar must be of {@link OperandType::FLOAT16}.
5192      *      For output tensor of {@link OperandType::TENSOR_FLOAT32},
5193      *      the scalar must be of {@link OperandType::FLOAT32}.
5194      *      For output tensor of {@link OperandType::TENSOR_INT32},
5195      *      the scalar must be of {@link OperandType::INT32}.
5196      *
5197      * Outputs:
5198      * * 0: The output tensor.
5199      */
5200     FILL = 100,
5201 
5202     /**
5203      * Returns the rank of a tensor.
5204      *
5205      * The rank of a tensor is the number of dimensions in it. Also known as
5206      * "order", "degree", "ndims".
5207      *
5208      * Supported tensor {@link OperandType}:
5209      * * {@link OperandType::TENSOR_FLOAT16}
5210      * * {@link OperandType::TENSOR_FLOAT32}
5211      * * {@link OperandType::TENSOR_INT32}
5212      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
5213      * * {@link OperandType::TENSOR_QUANT16_SYMM}
5214      * * {@link OperandType::TENSOR_BOOL8}
5215      * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
5216      * * {@link OperandType::TENSOR_QUANT16_ASYMM}
5217      * * {@link OperandType::TENSOR_QUANT8_SYMM}
5218      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5219      *
5220      * Supported tensor rank: from 1.
5221      *
5222      * Inputs:
5223      * * 0: The input tensor.
5224      *
5225      * Outputs:
5226      * * 0: A scalar of {@link OperandType::INT32}, specifying the rank
5227      *      of the input tensor.
5228      */
5229     RANK = 101,
5230 
5231     /**
5232      * Performs multiplication of two tensors in batches.
5233      *
5234      * Multiplies all slices of two input tensors and arranges the individual
5235      * results in a single output tensor of the same batch size. Each pair of
5236      * slices in the same batch have identical {@link OperandType}. Each
5237      * slice can optionally be adjointed (transpose and conjugate) before
5238      * multiplication.
5239      *
5240      * The two input tensors and the output tensor must be 2-D or higher and
5241      * have the same batch size.
5242      *
5243      * Supported tensor {@link OperandType}:
5244      * * {@link OperandType::TENSOR_FLOAT16}
5245      * * {@link OperandType::TENSOR_FLOAT32}
5246      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5247      * * {@link OperandType::TENSOR_INT32}
5248      *
5249      * Supported tensor rank: at least 2 and up to 4
5250      *
5251      * Inputs:
5252      * * 0: A tensor with 2-D or higher shape [..., r_x, c_x].
5253      * * 1: A tensor with 2-D or higher shape [..., r_y, c_y]. It has the same
5254      *      {@link OperandType} and batch size as input0.
5255      * * 2: An optional {@link OperandType::BOOL} scalar adj_x, default
5256      *      to false. Set to true to adjoint the slices of input0.
5257      * * 3: An optional {@link OperandType::BOOL} scalar adj_y, default
5258      *      to false. Set to true to adjoint the slices of input1.
5259      *
5260      * Outputs:
5261      * * 0: A tensor with 2-D or higher shape [..., r_o, c_o], where
5262      *      r_o = c_x if adj_x else r_x
5263      *      c_o = r_y if adj_y else c_y
5264      */
5265     BATCH_MATMUL = 102,
5266 
5267     /**
5268      * Packs N input tensors (N >= 1) of rank R into one output tensor of rank R+1.
5269      * The tensors are packed along a given axis.
5270      *
5271      * The input tensors must have identical {@link OperandType} and dimensions.
5272      *
5273      * For example, suppose there are N input tensors of shape (A, B, C).
5274      * If axis is 0, the output tensor will have shape (N, A, B, C).
5275      * If axis is 1, the output tensor will have shape (A, N, B, C).
5276      *
5277      * All dimensions through the axis dimension determine the output tile count;
5278      * the remaining dimensions determine the tile shape.
5279      *
5280      * Return to the example of N input tensors of shape (A, B, C).
5281      * If axis is 0, there are N tiles in the output, each of shape (A, B, C).
5282      * If axis is 1, there are A*N tiles in the output, each of shape (B, C).
5283      *
5284      * The coordinates of a tile within the output tensor are (t[0],...,t[axis]).
5285      * The coordinates of a tile within an input tensor are (t[0],...,t[axis-1]).
5286      * (If axis is 0, an input tensor consists of a single tile.)
5287      * If we index input tensors starting with 0 (rather than by operand number),
5288      * then output_tile[t[0],...,t[axis]] = input_tile[t[axis]][t[0],...,t[axis-1]].
5289      * That is, all output tile coordinates except for the axis coordinate select
5290      * the corresponding location within some input tensor; and the axis coordinate
5291      * selects the input tensor.
5292      *
5293      * Supported tensor {@link OperandType}:
5294      * * {@link OperandType::TENSOR_FLOAT16}
5295      * * {@link OperandType::TENSOR_FLOAT32}
5296      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
5297      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5298      * * {@link OperandType::TENSOR_INT32}
5299      *
5300      * Supported input tensor rank: from 1
5301      *
5302      * Inputs:
5303      * * 0: A scalar of type {@link OperandType::INT32}, specifying
5304      *      the axis along which to pack.  The valid range is [0, R+1).
5305      * * 1 ~ N: Input tensors to be packed together.
5306      *          For {@link OperandType::TENSOR_QUANT8_ASYMM} and
5307      *          {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensors,
5308      *          the scales and zeroPoint must be the same for all input tensors,
5309      *          and will be the same for the output tensor.
5310      *
5311      * Outputs:
5312      * * 0: The packed tensor.
5313      */
5314     PACK = 103,
5315 
5316     /**
5317      * Pads a tensor with mirrored values.
5318      *
5319      * This operator specifies one of two padding modes: REFLECT or SYMMETRIC.
5320      * In the case of REFLECT mode, the mirroring excludes the border element
5321      * on the padding side.
5322      * In the case of SYMMETRIC mode, the mirroring includes the border element
5323      * on the padding side.
5324      *
5325      * For example, if the input is the 1-D tensor `[1, 2, 3]` and the padding
5326      * is `[0, 2]` (i.e., pad no elements before the first (and only) dimension,
5327      * and two elements after the first (and only) dimension), then:
5328      *     - REFLECT mode produces the output `[1, 2, 3, 2, 1]`
5329      *     - SYMMETRIC mode produces the output `[1, 2, 3, 3, 2]`
5330      *
5331      * Supported tensor {@link OperandType}:
5332      * * {@link OperandType::TENSOR_FLOAT16}
5333      * * {@link OperandType::TENSOR_FLOAT32}
5334      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
5335      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5336      * * {@link OperandType::TENSOR_INT32}
5337      *
5338      * Supported tensor rank: from 1.
5339      *
5340      * Inputs:
5341      * * 0: An n-D tensor, specifying the tensor to be padded.
5342      * * 1: A 2-D tensor of {@link OperandType::TENSOR_INT32}, the paddings
5343      *      for each spatial dimension of the input tensor. The shape of the
5344      *      tensor must be {rank(input0), 2}.
5345      *      padding[i, 0] specifies the number of elements to be padded in the
5346      *      front of dimension i.
5347      *      padding[i, 1] specifies the number of elements to be padded after the
5348      *      end of dimension i.
5349      *      Each padding value must be nonnegative.
5350      *      In the case of REFLECT mode, each padding value must be less than the
5351      *      corresponding dimension.
5352      *      In the case of SYMMETRIC mode, each padding value must be less than or
5353      *      equal to the corresponding dimension.
5354      * * 2: An {@link OperandType::INT32} scalar, specifying the mode.
5355      *      Options are 0:REFLECT and 1:SYMMETRIC.
5356      *
5357      * Outputs:
5358      * * 0: A tensor of the same {@link OperandType} as input0. The
5359      *      output tensor has the same rank as input0, and each
5360      *      dimension of the output tensor has the same size as the
5361      *      corresponding dimension of the input tensor plus the size
5362      *      of the padding:
5363      *          output0.dimension[i] =
5364      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
5365      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
5366      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5367      *      the scale and zeroPoint must be the same as input0.
5368      */
5369     MIRROR_PAD = 104,
5370 
5371     /**
5372      * Reverses a specified dimension of a tensor.
5373      *
5374      * Supported tensor {@link OperandType}:
5375      * * {@link OperandType::TENSOR_FLOAT16}
5376      * * {@link OperandType::TENSOR_FLOAT32}
5377      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
5378      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5379      * * {@link OperandType::TENSOR_INT32}
5380      *
5381      * Supported tensor rank: up to 8.
5382      *
5383      * Inputs:
5384      * * 0: Input tensor of rank n.
5385      * * 1: Axis tensor of type {@link OperandType::TENSOR_INT32} and shape [1],
5386      *      specifying which dimension of the input tensor is to be reversed. The dimension
5387      *      must be in the range [0, n).
5388      *
5389      * Outputs:
5390      * * 0: The reversed tensor of the same shape as the input tensor.
5391      *      For {@link OperandType::TENSOR_QUANT8_ASYMM} and
5392      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensors,
5393      *      the scales and zeroPoint must be the same as input0.
5394      */
5395     REVERSE = 105,
5396 
5397     /**
5398      * DEPRECATED. Since HAL version 1.2, extensions are the preferred
5399      * alternative to OEM operation and data types.
5400      *
5401      * This operation is OEM specific. It should only be used for OEM
5402      * applications.
5403      */
5404     OEM_OPERATION = 10000,
5405 
5406 #ifdef NN_EXPERIMENTAL_FEATURE
5407     /**
5408      * Expands a representation of a sparse tensor to a dense tensor.
5409      *
5410      * To encode a conceptual n-dimensional dense tensor with dims [D0, ..., Dn-1], potentially with
5411      * a k-dimensional block (0 <= k <= n) with dims [Dn, ..., Dn+k-1], the format specifies:
5412      * * 1: In what order to traverse these dimensions. For example, to store a 2-D matrix in row
5413      *      major order, the traversal order would be [D0, D1], whereas to store it in column major
5414      *      order, the traversal order would be [D1, D0]. If the 2-D matrix has a 2-D inner block,
5415      *      the traversal order could be [D0, D1, D2, D3].
5416      * * 2: How each block dimension in [Dn, ..., Dn+k-1] maps to the original tensor dimension in
5417      *      [D0, ..., Dn-1].
5418      * * 3: In the traversal order defined above, the format (dense vs. sparse) and index metadata
5419      *      for each dimension. For a dense dimension, this is just the size of that dimension. For
5420      *      a sparse dimension, it's the same as the compressed index defined in the Compressed
5421      *      Sparse Row (CSR) format.
5422      *      (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
5423      *
5424      * The number of inputs to this operation is determined by the number of dimensions (including
5425      * the block dimensions) of the sparsity parameters. Currently, the only formats supported are
5426      * DENSE and SPARSE_CSR, but additional sparsity formats may be added in later versions of this
5427      * operation.
5428      *
5429      * Supported tensor {@link OperandType}:
5430      * * {@link OperandType::TENSOR_FLOAT16}
5431      * * {@link OperandType::TENSOR_FLOAT32}
5432      * * {@link OperandType::TENSOR_QUANT8_SYMM}
5433      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
5434      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5435      * * {@link OperandType::TENSOR_BOOL8}
5436      * * {@link OperandType::TENSOR_INT32}
5437      * * {@link OperandType::TENSOR_QUANT16_SYMM}
5438      * * {@link OperandType::TENSOR_QUANT16_ASYMM}
5439      *
5440      *
5441      * Reference:
5442      * * This implementation is a modification of the TACO format.
5443      *   http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
5444      *
5445      * Inputs:
5446      * * 0: A 1-D tensor representing the compressed sparse tensor data of a conceptual
5447      *      n-dimensional tensor.
5448      * * 1: A 1-D {@link OperandType::TENSOR_INT32} tensor defining the traversal order for reading
5449      *      the non-zero blocks. For an n-dimensional tensor with dimensions [D0, D1, …, Dn-1]: if
5450      *      block sparse with a k-dimensional block (0 < k <= n), the traversal order has n+k
5451      *      elements. The first n elements are still a permutation of [D0, …, Dn-1]. The last k
5452      *      elements are a permutation of [Dn, …, Dn+k-1], defining how to traverse a block
5453      *      internally. If not block sparse, the traversal order is just a permutation of [D0, …,
5454      *      Dn-1].
5455      * * 2: An optional 1-D {@link OperandType::TENSOR_INT32} tensor defining the block map. For a
5456      *      block sparse n-dimensional tensor with a k-dimensional block (0 < k <= n), it stores how
5457      *      a block dimension [Dn, …, Dn+k-1] maps to the original tensor dimension in [D0, …,
5458      *      Dn-1]. For i, j where 0 <= i < j < k, blockMap[i] < blockMap[j]. If not block sparse,
5459      *      this is null.
5460      * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor with n+k elements defining the format of
5461      *      each dimension in the traversal order (listed above). The format is either DENSE (where
5462      *      DENSE = 0) or SPARSE_CSR (where SPARSE_CSR = 1). DENSE means that each coordinate in
5463      *      this dimension is stored implicitly. SPARSE_CSR means only the coordinates with non-zero
5464      *      elements are stored.
5465      * * 4: A 1-D {@link OperandType::TENSOR_INT32} tensor with n+k elements defining the size of
5466      *      each dimension or block. The product of all these sizes totals the number of elements in
5467      *      the dense tensor. First n elements represent the sparse tensor’s shape, and the last k
5468      *      elements represent the block’s shape.
5469      * * 5 ~ (5 + 2 * (n+k)): An optional pair of {@link OperandType::TENSOR_INT32} tensors which
5470      *      together specify the sparse indices along that dimension. The first pair of arguments
5471      *      corresponds to D0, the second to D1, and so on until Dn+k-1. If the dimension is DENSE,
5472      *      both arguments in the pair are null and the dimension is implicitly specified by the
5473      *      corresponding element in Input 4. If the dimension is SPARSE_CSR, then we use the pair
5474      *      of array segments and array indices to encode that dimension:
5475      * * * +0: An optional list of n+k input 1-D {@link OperandType::TENSOR_INT32} tensors, defining
5476      *         the array segments. The array segments represent how to segment the indices array,
5477      *         each segment corresponds to one element in the previous dimension. Array segments are
5478      *         interspersed with array indices (listed below), so this input could be input (5, 5 +
5479      *         2, …, 5 + 2*(n+k-1)). For i, j where 0 =< i < j, arraySegments[i] <=
5480      *         arraySegments[j]. Used if the dimension is SPARSE_CSR, omitted if the dimension is
5481      *         DENSE.
5482      * * * +1: An optional list of n+k input 1-D {@link OperandType::TENSOR_INT32} tensors, defining
5483      *         the array indices. The array indices represent the index of the non-zero elements
5484      *         within this dimension (as those in the CSR matrix format, where the first array is
5485      *         row pointers and the second array is column indices). Array indices are interspersed
5486      *         with array segments (listed above), so this input could be input (6, 6 + 2, …, 6 +
5487      *         2*(n+k-1)). Used if the dimension is SPARSE_CSR, omitted if the dimension is DENSE.
5488      *
5489      * Outputs:
5490      * * 0: An n-D dense tensor. The output tensor has the same {@link OperandType} as input 0.
5491      */
5492     DENSIFY = 20000,
5493 #endif  // NN_EXPERIMENTAL_FEATURE
5494 };
5495 
5496 }  // namespace android::nn
5497 
5498 #endif  // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_TYPES_NNAPI_OPERATION_TYPES_H
5499