1 /* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /** 18 * @addtogroup NeuralNetworks 19 * @{ 20 */ 21 22 /** 23 * @file NeuralNetworks.h 24 */ 25 26 #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H 27 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H 28 29 /****************************************************************** 30 * 31 * IMPORTANT NOTICE: 32 * 33 * This file is part of Android's set of stable system headers 34 * exposed by the Android NDK (Native Development Kit). 35 * 36 * Third-party source AND binary code relies on the definitions 37 * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. 38 * 39 * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) 40 * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS 41 * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY 42 * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES 43 */ 44 45 #include <android/hardware_buffer.h> 46 #include <stddef.h> 47 #include <stdint.h> 48 #include <sys/cdefs.h> 49 50 __BEGIN_DECLS 51 52 /** 53 * Operand types. 54 * 55 * The type of an operand in a model. 56 * 57 * Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors 58 * with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent 59 * scalar values and must have no dimensions. 60 * 61 * Although we define many types, most operators accept just a few 62 * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 63 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 64 * and {@link ANEURALNETWORKS_INT32}. 65 * 66 * Available since API level 27. 67 */ 68 typedef enum { 69 /** A 32 bit floating point scalar value. */ 70 ANEURALNETWORKS_FLOAT32 = 0, 71 /** A signed 32 bit integer scalar value. */ 72 ANEURALNETWORKS_INT32 = 1, 73 /** An unsigned 32 bit integer scalar value. */ 74 ANEURALNETWORKS_UINT32 = 2, 75 /** A tensor of 32 bit floating point values. */ 76 ANEURALNETWORKS_TENSOR_FLOAT32 = 3, 77 /** A tensor of 32 bit integer values. */ 78 ANEURALNETWORKS_TENSOR_INT32 = 4, 79 /** 80 * A tensor of 8 bit unsigned integers that represent real numbers. 81 * 82 * Attached to this tensor are two numbers that can be used to convert the 83 * 8 bit integer to the real value and vice versa. These two numbers are: 84 * - scale: a 32 bit floating point value greater than zero. 85 * - zeroPoint: a 32 bit integer, in range [0, 255]. 86 * 87 * The formula is: 88 * real_value = (integer_value - zeroPoint) * scale. 89 */ 90 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5, 91 /** 92 * An 8 bit boolean scalar value. 93 * 94 * Values of this operand type are either true or false. A zero value 95 * represents false; any other value represents true. 96 * 97 * Available since API level 29. 98 */ 99 ANEURALNETWORKS_BOOL = 6, 100 /** 101 * A tensor of 16 bit signed integers that represent real numbers. 102 * 103 * Attached to this tensor is a number representing real value scale that is 104 * used to convert the 16 bit number to a real value in the following way: 105 * realValue = integerValue * scale. 106 * 107 * scale is a 32 bit floating point with value greater than zero. 108 * 109 * Available since API level 29. 110 */ 111 ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7, 112 /** 113 * A tensor of IEEE 754 16 bit floating point values. 114 * 115 * Available since API level 29. 116 */ 117 ANEURALNETWORKS_TENSOR_FLOAT16 = 8, 118 /** 119 * A tensor of 8 bit boolean values. 120 * 121 * Values of this operand type are either true or false. A zero value 122 * represents false; any other value represents true. 123 * 124 * Available since API level 29. 125 */ 126 ANEURALNETWORKS_TENSOR_BOOL8 = 9, 127 /** 128 * An IEEE 754 16 bit floating point scalar value. 129 * 130 * Available since API level 29. 131 */ 132 ANEURALNETWORKS_FLOAT16 = 10, 133 /** 134 * A tensor of 8 bit signed integers that represent real numbers. 135 * 136 * This tensor is associated with additional fields that can 137 * be used to convert the 8 bit signed integer to the real value and vice versa. 138 * These fields are: 139 * - channelDim: a 32 bit unsigned integer indicating channel dimension. 140 * - scales: an array of positive 32 bit floating point values. 141 * The size of the scales array must be equal to dimensions[channelDim]. 142 * 143 * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used 144 * to set the parameters for an Operand of this type. 145 * 146 * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0). 147 * 148 * The formula is: 149 * realValue[..., C, ...] = 150 * integerValue[..., C, ...] * scales[C] 151 * where C is an index in the Channel dimension. 152 * 153 * Available since API level 29. 154 */ 155 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, 156 /** 157 * A tensor of 16 bit unsigned integers that represent real numbers. 158 * 159 * Attached to this tensor are two numbers that can be used to convert the 160 * 16 bit integer to the real value and vice versa. These two numbers are: 161 * - scale: a 32 bit floating point value greater than zero. 162 * - zeroPoint: a 32 bit integer, in range [0, 65535]. 163 * 164 * The formula is: 165 * real_value = (integer_value - zeroPoint) * scale. 166 * 167 * Available since API level 29. 168 */ 169 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12, 170 /** 171 * A tensor of 8 bit signed integers that represent real numbers. 172 * 173 * Attached to this tensor is a number representing real value scale that is 174 * used to convert the 8 bit number to a real value in the following way: 175 * realValue = integerValue * scale. 176 * 177 * scale is a 32 bit floating point with value greater than zero. 178 * 179 * Available since API level 29. 180 */ 181 ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13, 182 /** 183 * A tensor of 8 bit signed integers that represent real numbers. 184 * 185 * Attached to this tensor are two numbers that can be used to convert the 186 * 8 bit integer to the real value and vice versa. These two numbers are: 187 * - scale: a 32 bit floating point value greater than zero. 188 * - zeroPoint: a 32 bit integer, in range [-128, 127]. 189 * 190 * The formula is: 191 * real_value = (integer_value - zeroPoint) * scale. 192 * 193 * Available since API level 30. 194 */ 195 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14, 196 197 /** 198 * A reference to a model. 199 * 200 * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set 201 * the value for an Operand of this type. 202 * 203 * Available since API level 30. 204 */ 205 ANEURALNETWORKS_MODEL = 15, 206 } OperandCode; 207 208 /** 209 * Operation types. 210 * 211 * The type of an operation in a model. 212 * 213 * Available since API level 27. 214 */ 215 typedef enum { 216 // Operations below are available since API level 27. 217 218 /** 219 * Adds two tensors, element-wise. 220 * 221 * Takes two input tensors of identical {@link OperandCode} and compatible 222 * dimensions. The output is the sum of both input tensors, optionally 223 * modified by an activation function. 224 * 225 * Two dimensions are compatible when: 226 * 1. they are equal, or 227 * 2. one of them is 1 228 * 229 * The size of the output is the maximum size along each dimension of the 230 * input operands. It starts with the trailing dimensions, and works its 231 * way forward. 232 * 233 * Example: 234 * 235 * input1.dimension = {4, 1, 2} 236 * input2.dimension = {5, 4, 3, 1} 237 * output.dimension = {5, 4, 3, 2} 238 * 239 * Since API level 29, generic zero-sized input tensor is supported. Zero 240 * dimension is only compatible with 0 or 1. The size of the output 241 * dimension is zero if either of corresponding input dimension is zero. 242 * 243 * Supported tensor {@link OperandCode}: 244 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 245 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 246 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 247 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 248 * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) 249 * 250 * Supported tensor rank: up to 4 251 * 252 * Inputs: 253 * * 0: A tensor. 254 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 255 * as input0. 256 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 257 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 258 * the scales and zeroPoint can be different from input0 scale and zeroPoint. 259 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 260 * {@link FuseCode} values. Specifies the activation to 261 * invoke on the result. 262 * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, 263 * the {@link FuseCode} must be "NONE". 264 * 265 * Outputs: 266 * * 0: The sum, a tensor of the same {@link OperandCode} as input0. 267 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 268 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 269 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 270 * 271 * Available since API level 27. 272 */ 273 ANEURALNETWORKS_ADD = 0, 274 275 /** 276 * Performs a 2-D average pooling operation. 277 * 278 * The output dimensions are functions of the filter dimensions, stride, and 279 * padding. 280 * 281 * The values in the output tensor are computed as: 282 * 283 * output[b, i, j, channel] = 284 * sum_{di, dj}( 285 * input[b, strides[1] * i + di, strides[2] * j + dj, channel] 286 * ) / sum(1) 287 * 288 * Supported tensor {@link OperandCode}: 289 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 290 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 291 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 292 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 293 * 294 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 295 * With the default data layout NHWC, the data is stored in the order of: 296 * [batch, height, width, channels]. Alternatively, the data layout could 297 * be NCHW, the data storage order of: [batch, channels, height, width]. 298 * NCHW is supported since API level 29. 299 * 300 * Both explicit padding and implicit padding are supported. 301 * 302 * Inputs (explicit padding): 303 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 304 * the input. 305 * Since API level 29, zero batches is supported for this tensor. 306 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 307 * the left, in the ‘width’ dimension. 308 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 309 * the right, in the ‘width’ dimension. 310 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 311 * the top, in the ‘height’ dimension. 312 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 313 * the bottom, in the ‘height’ dimension. 314 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 315 * walking through input in the ‘width’ dimension. 316 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 317 * walking through input in the ‘height’ dimension. 318 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 319 * width. 320 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 321 * height. 322 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 323 * {@link FuseCode} values. Specifies the activation to 324 * invoke on the result. 325 * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 326 * Set to true to specify NCHW data layout for input0 and output0. 327 * Available since API level 29. 328 * 329 * Inputs (implicit padding): 330 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 331 * the input. 332 * Since API level 29, zero batches is supported for this tensor. 333 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 334 * padding scheme, has to be one of the 335 * {@link PaddingCode} values. 336 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 337 * walking through input in the ‘width’ dimension. 338 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 339 * walking through input in the ‘height’ dimension. 340 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 341 * width. 342 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 343 * height. 344 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 345 * {@link FuseCode} values. Specifies the activation to 346 * invoke on the result. 347 * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 348 * Set to true to specify NCHW data layout for input0 and output0. 349 * Available since API level 29. 350 * 351 * Outputs: 352 * * 0: The output 4-D tensor, of shape 353 * [batches, out_height, out_width, depth]. 354 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 355 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 356 * the scale and zeroPoint must be the same as input0. 357 * 358 * Available since API level 27. 359 */ 360 ANEURALNETWORKS_AVERAGE_POOL_2D = 1, 361 362 /** 363 * Concatenates the input tensors along the given dimension. 364 * 365 * The input tensors must have identical {@link OperandCode} and the same 366 * dimensions except the dimension along the concatenation axis. 367 * 368 * Supported tensor {@link OperandCode}: 369 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 370 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 371 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 372 * (full support since API level 29, see the input section) 373 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 374 * 375 * Supported tensor rank: up to 4 376 * 377 * Inputs: 378 * * 0 ~ n-1: The list of n input tensors, of shape 379 * [D0, D1, ..., Daxis(i), ..., Dm]. 380 * Before API level 29, all input tensors of 381 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 382 * must have the same scale and zeroPoint as the output tensor. 383 * Input tensors of 384 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 385 * are allowed to have different scale and zeroPoint. 386 * Since API level 29, zero-sized tensors are supported. 387 * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the 388 * concatenation axis. 389 * 390 * Outputs: 391 * * 0: The output, a tensor of the same {@link OperandCode} as the input 392 * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. 393 * Since API level 29, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 394 * the scale and zeroPoint values can be different from 395 * input tensors. Before API level 29 they have to be the same as for the input tensors. 396 * 397 * Available since API level 27. 398 */ 399 ANEURALNETWORKS_CONCATENATION = 2, 400 401 /** 402 * Performs a 2-D convolution operation. 403 * 404 * The CONV_2D op sweeps a 2-D filter that can mix channels together over a 405 * batch of images, applying the filter to each window of each image of the 406 * appropriate size. 407 * 408 * The output dimensions are functions of the filter dimensions, stride, and 409 * padding. 410 * 411 * The values in the output tensor are computed as: 412 * 413 * output[b, i, j, channel] = 414 * sum_{di, dj, k} ( 415 * input[b, strides[1] * i + di, strides[2] * j + dj, k] * 416 * filter[channel, di, dj, k] 417 * ) + bias[channel] 418 * 419 * Supported tensor {@link OperandCode} configurations: 420 * * 32 bit floating point: 421 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. 422 * 423 * * Quantized: 424 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. 425 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 426 * * * input.scale * filter.scale). 427 * 428 * Available since API level 29: 429 * * 16 bit floating point: 430 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. 431 * 432 * * Quantized with symmetric per channel quantization for the filter: 433 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. 434 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 435 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 436 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 437 * 438 * Available since API level 30: 439 * * Quantized signed (since API level 30): 440 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. 441 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 442 * * * input.scale * filter.scale). 443 * 444 * * Quantized signed with filter symmetric per channel quantization (since API level 30): 445 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. 446 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 447 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 448 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 449 * 450 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 451 * With the default data layout NHWC, the data is stored in the order of: 452 * [batch, height, width, channels]. Alternatively, the data layout could 453 * be NCHW, the data storage order of: [batch, channels, height, width]. 454 * NCHW is supported since API level 29. 455 * 456 * Both explicit padding and implicit padding are supported. 457 * 458 * Inputs (explicit padding): 459 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 460 * specifying the input. 461 * Since API level 29, zero batches is supported for this tensor. 462 * * 1: A 4-D tensor, of shape 463 * [depth_out, filter_height, filter_width, depth_in], specifying the 464 * filter. 465 * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} 466 * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) 467 * must be set to 0. 468 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 469 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} 470 * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. 471 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 472 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 473 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 474 * of 0 and bias_scale == input_scale * filter_scale. 475 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, 476 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 477 * and bias_scale of 0. The actual scale of each value 'i' is equal to 478 * bias_scale[i] = input_scale * filter_scale[i]. 479 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 480 * the left, in the ‘width’ dimension. 481 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 482 * the right, in the ‘width’ dimension. 483 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 484 * the top, in the ‘height’ dimension. 485 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 486 * the bottom, in the ‘height’ dimension. 487 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 488 * walking through input in the ‘width’ dimension. 489 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 490 * walking through input in the ‘height’ dimension. 491 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 492 * {@link FuseCode} values. Specifies the activation to 493 * invoke on the result. 494 * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 495 * Set to true to specify NCHW data layout for input0 and output0. 496 * Available since API level 29. 497 * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 498 * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped 499 * cells between each filter element on width dimension. If this input is set, 500 * input 12 (dilation factor for height) must be specified as well. 501 * Available since API level 29. 502 * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 503 * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped 504 * cells between each filter element on height dimension. If this input is set, 505 * input 11 (dilation factor for width) must be specified as well. 506 * Available since API level 29. 507 * 508 * Inputs (implicit padding): 509 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 510 * specifying the input. 511 * Since API level 29, zero batches is supported for this tensor. 512 * * 1: A 4-D tensor, of shape 513 * [depth_out, filter_height, filter_width, depth_in], specifying the 514 * filter. 515 * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} 516 * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) 517 * must be set to 0. 518 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 519 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} 520 * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same 521 * type. 522 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 523 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 524 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 525 * of 0 and bias_scale == input_scale * filter_scale. 526 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, 527 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 528 * and bias_scale of 0. The actual scale of each value 'i' is equal to 529 * bias_scale[i] = input_scale * filter_scale[i]. 530 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 531 * padding scheme, has to be one of the 532 * {@link PaddingCode} values. 533 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 534 * walking through input in the ‘width’ dimension. 535 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 536 * walking through input in the ‘height’ dimension. 537 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 538 * {@link FuseCode} values. Specifies the activation to 539 * invoke on the result. 540 * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 541 * Set to true to specify NCHW data layout for input0 and output0. 542 * Available since API level 29. 543 * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 544 * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped 545 * cells between each filter element on width dimension. If this input is set, 546 * input 9 (dilation factor for height) must be specified as well. 547 * Available since API level 29. 548 * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 549 * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped 550 * cells between each filter element on height dimension. If this input is set, 551 * input 8 (dilation factor for width) must be specified as well. 552 * Available since API level 29. 553 * 554 * Outputs: 555 * * 0: The output 4-D tensor, of shape 556 * [batches, out_height, out_width, depth_out]. 557 * Before API level 29, for output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 558 * the following condition must be satisfied: output_scale > input_scale * filter_scale 559 * 560 * Available since API level 27. 561 */ 562 ANEURALNETWORKS_CONV_2D = 3, 563 564 /** 565 * Performs a depthwise 2-D convolution operation. 566 * 567 * Given an input tensor of shape [batches, height, width, depth_in] and a 568 * filter tensor of shape [1, filter_height, filter_width, depth_out] 569 * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV 570 * applies a different filter to each input channel (expanding from 1 571 * channel to channel_multiplier channels for each), then concatenates the 572 * results together. 573 * 574 * The output has depth_out = depth_in * depth_multiplier channels. 575 * The output dimensions are functions of the filter dimensions, stride, and 576 * padding. 577 * 578 * The values in the output tensor are computed as: 579 * 580 * output[b, i, j, k * channel_multiplier + q] = 581 * sum_{di, dj} ( 582 * input[b, strides[1] * i + di, strides[2] * j + dj, k] * 583 * filter[1, di, dj, k * channel_multiplier + q] 584 * ) + bias[k * channel_multiplier + q] 585 * 586 * Supported tensor {@link OperandCode} configurations: 587 * * 32 bit floating point: 588 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. 589 * 590 * * Quantized: 591 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. 592 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 593 * * * input.scale * filter.scale). 594 * 595 * Available since API level 29: 596 * * 16 bit floating point: 597 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. 598 * 599 * * Quantized with symmetric per channel quantization for the filter: 600 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. 601 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 602 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 603 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 604 * 605 * Available since API level 30: 606 * * Quantized signed (since API level 30): 607 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. 608 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 609 * * * input.scale * filter.scale). 610 * 611 * * Quantized signed with filter symmetric per channel quantization (since API level 30): 612 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. 613 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 614 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 615 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 616 * 617 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 618 * With the default data layout NHWC, the data is stored in the order of: 619 * [batch, height, width, channels]. Alternatively, the data layout could 620 * be NCHW, the data storage order of: [batch, channels, height, width]. 621 * NCHW is supported since API level 29. 622 * 623 * Both explicit padding and implicit padding are supported. 624 * 625 * Inputs (explicit padding): 626 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 627 * specifying the input. 628 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 629 * specifying the filter. 630 * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} 631 * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) 632 * must be set to 3. 633 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 634 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} 635 * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. 636 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 637 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 638 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 639 * of 0 and bias_scale == input_scale * filter_scale. 640 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, 641 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 642 * and bias_scale of 0. The actual scale of each value 'i' is equal to 643 * bias_scale[i] = input_scale * filter_scale[i]. 644 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 645 * the left, in the ‘width’ dimension. 646 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 647 * the right, in the ‘width’ dimension. 648 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 649 * the top, in the ‘height’ dimension. 650 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 651 * the bottom, in the ‘height’ dimension. 652 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 653 * walking through input in the ‘width’ dimension. 654 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 655 * walking through input in the ‘height’ dimension. 656 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise 657 * multiplier. 658 * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 659 * {@link FuseCode} values. Specifies the activation to 660 * invoke on the result. 661 * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 662 * Set to true to specify NCHW data layout for input0 and output0. 663 * Available since API level 29. 664 * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 665 * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped 666 * cells between each filter element on width dimension. If this input is set, 667 * input 13 (dilation factor for height) must be specified as well. 668 * Available since API level 29. 669 * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 670 * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped 671 * cells between each filter element on height dimension. If this input is set, 672 * input 12 (dilation factor for width) must be specified as well. 673 * Available since API level 29. 674 * 675 * Inputs (implicit padding): 676 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 677 * specifying the input. 678 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 679 * specifying the filter. 680 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 681 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} 682 * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. 683 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 684 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 685 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 686 * of 0 and bias_scale == input_scale * filter_scale. 687 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, 688 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 689 * and bias_scale of 0. The actual scale of each value 'i' is equal to 690 * bias_scale[i] = input_scale * filter_scale[i]. 691 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 692 * padding scheme, has to be one of the 693 * {@link PaddingCode} values. 694 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 695 * walking through input in the ‘width’ dimension. 696 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 697 * walking through input in the ‘height’ dimension. 698 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise 699 * multiplier. 700 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 701 * {@link FuseCode} values. Specifies the activation to 702 * invoke on the result. 703 * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 704 * Set to true to specify NCHW data layout for input0 and output0. 705 * Available since API level 29. 706 * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 707 * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped 708 * cells between each filter element on width dimension. If this input is set, 709 * input 10 (dilation factor for height) must be specified as well. 710 * Available since API level 29. 711 * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 712 * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped 713 * cells between each filter element on height dimension. If this input is set, 714 * input 9 (dilation factor for width) must be specified as well. 715 * Available since API level 29. 716 * 717 * Outputs: 718 * * 0: The output 4-D tensor, of shape 719 * [batches, out_height, out_width, depth_out]. Before API level 29, for 720 * output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 721 * the following condition must be satisfied: 722 * output_scale > input_scale * filter_scale 723 * 724 * Available since API level 27. 725 */ 726 ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4, 727 728 /** 729 * Rearranges data from depth into blocks of spatial data. 730 * 731 * More specifically, this op outputs a copy of the input tensor where 732 * values from the depth dimension are moved in spatial blocks to the height 733 * and width dimensions. The value block_size indicates the input block size 734 * and how the data is moved. 735 * 736 * Chunks of data of size block_size * block_size from depth are rearranged 737 * into non-overlapping blocks of size block_size x block_size. 738 * 739 * The width of the output tensor is input_depth * block_size, whereas the 740 * height is input_height * block_size. The depth of the input tensor must 741 * be divisible by block_size * block_size 742 * 743 * Supported tensor {@link OperandCode}: 744 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 745 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 746 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 747 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 748 * 749 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 750 * With the default data layout NHWC, the data is stored in the order of: 751 * [batch, height, width, channels]. Alternatively, the data layout could 752 * be NCHW, the data storage order of: [batch, channels, height, width]. 753 * NCHW is supported since API level 29. 754 * 755 * Inputs: 756 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 757 * specifying the input. 758 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. 759 * block_size must be >=1 and block_size * block_size must be a divisor 760 * of the input depth. 761 * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 762 * Set to true to specify NCHW data layout for input0 and output0. 763 * Available since API level 29. 764 * 765 * Outputs: 766 * * 0: The output 4-D tensor, of shape [batch, height*block_size, 767 * width*block_size, depth/(block_size*block_size)]. 768 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 769 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 770 * the scale and zeroPoint must be the same as input0. 771 * 772 * Available since API level 27. 773 */ 774 ANEURALNETWORKS_DEPTH_TO_SPACE = 5, 775 776 /** 777 * Dequantizes the input tensor. 778 * 779 * The formula is: 780 * 781 * output = (input - zeroPoint) * scale. 782 * 783 * Supported input tensor {@link OperandCode}: 784 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 785 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since API level 29) 786 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since API level 29) 787 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 788 * 789 * Supported output tensor {@link OperandCode}: 790 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 791 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. 792 * 793 * Supported tensor rank: up to 4 794 * 795 * Inputs: 796 * * 0: A tensor. 797 * Since API level 29, this tensor may be zero-sized. 798 * 799 * Outputs: 800 * * 0: A tensor with the same shape as input0. 801 * 802 * Available since API level 27. 803 */ 804 ANEURALNETWORKS_DEQUANTIZE = 6, 805 806 /** 807 * Looks up sub-tensors in the input tensor. 808 * 809 * This operator takes for input a tensor of values (Values) and 810 * a one-dimensional tensor of selection indices (Lookups). 811 * The output tensor is the concatenation of sub-tensors of Values as 812 * selected by Lookups. 813 * 814 * Think of Values as being sliced along its first dimension: 815 * The entries in Lookups select which slices are concatenated together 816 * to create the output tensor. 817 * 818 * For example, if Values has shape of [40, 200, 300] and 819 * Lookups has shape of [3], all three values found in Lookups are 820 * expected to be between 0 and 39. The resulting tensor must 821 * have shape of [3, 200, 300]. 822 * 823 * If a value in Lookups is out of bounds, the operation must fail 824 * and an error must be reported. 825 * 826 * Supported value tensor {@link OperandCode}: 827 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 30) 828 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 829 * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 29) 830 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 831 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 832 * 833 * Supported value tensor rank: from 2 834 * 835 * Inputs: 836 * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. 837 * The values are indices into the first dimension of Values. 838 * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are 839 * extracted. 840 * 841 * Output: 842 * * 0: A n-D tensor with the same rank and shape as the Values 843 * tensor, except for the first dimension which has the same size 844 * as Lookups' only dimension. 845 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 846 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 847 * the scale and zeroPoint must be the same as input1. 848 * 849 * Available since API level 27. 850 */ 851 ANEURALNETWORKS_EMBEDDING_LOOKUP = 7, 852 853 /** 854 * Computes element-wise floor() on the input tensor. 855 * 856 * Supported tensor {@link OperandCode}: 857 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 858 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 859 * 860 * Supported tensor rank: up to 4 861 * 862 * Inputs: 863 * * 0: A tensor. 864 * 865 * Outputs: 866 * * 0: The output tensor, of the same {@link OperandCode} and dimensions as 867 * the input tensor. 868 * 869 * Available since API level 27. 870 */ 871 ANEURALNETWORKS_FLOOR = 8, 872 873 /** 874 * Denotes a fully (densely) connected layer, which connects all elements 875 * in the input tensor with each element in the output tensor. 876 * 877 * This layer implements the operation: 878 * 879 * outputs = activation(inputs * weights’ + bias) 880 * 881 * Supported tensor {@link OperandCode}: 882 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 883 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 884 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 885 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 886 * 887 * Supported tensor rank: up to 4. 888 * 889 * Inputs: 890 * * 0: A tensor of at least rank 2, specifying the input. If rank is 891 * greater than 2, then it gets flattened to a 2-D Tensor. The 892 * (flattened) 2-D Tensor is reshaped (if necessary) to 893 * [batch_size, input_size], where "input_size" corresponds to the 894 * number of inputs to the layer, matching the second dimension of 895 * weights, and "batch_size" is calculated by dividing the number of 896 * elements by "input_size". 897 * Since API level 29, zero batch_size is supported for this tensor. 898 * * 1: A 2-D tensor, specifying the weights, of shape 899 * [num_units, input_size], where "num_units" corresponds to the number 900 * of output nodes. 901 * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input 902 * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should 903 * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. 904 * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 905 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 906 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, 907 * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. 908 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 909 * {@link FuseCode} values. Specifies the activation to 910 * invoke on the result. 911 * 912 * Outputs: 913 * * 0: The output tensor, of shape [batch_size, num_units]. Before API level 29, for 914 * output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following 915 * condition must be satisfied: output_scale > input_scale * filter_scale. 916 * 917 * Available since API level 27. 918 */ 919 ANEURALNETWORKS_FULLY_CONNECTED = 9, 920 921 /** 922 * Looks up sub-tensors in the input tensor using a key-value map. 923 * 924 * This operator takes for input a tensor of values (Values), 925 * a one-dimensional tensor of selection values (Lookups) and 926 * a one-dimensional tensor that maps these values to Values 927 * indexes. The output tensor is the concatenation of sub-tensors of 928 * Values as selected by Lookups via Keys. 929 * 930 * Think of Values as being sliced along its outer-most dimension. 931 * The output is a concatenation of selected slices, with one slice 932 * for each entry of Lookups. The slice selected is the one at the 933 * same index as the Maps entry that matches the value in Lookups. 934 * 935 * For a hit, the corresponding sub-tensor of Values is included 936 * in the Output tensor. For a miss, the corresponding sub-tensor in 937 * Output must have zero values. 938 * 939 * For example, if Values has shape of [40, 200, 300], 940 * Keys should have a shape of [40]. If Lookups tensor has shape 941 * of [3], three slices are being concatenated, so the resulting tensor 942 * must have the shape of [3, 200, 300]. If the first entry in Lookups 943 * has the value 123456, that value must be located in Keys tensor. 944 * If the sixth entry of Keys contains 123456, the sixth slice of Values 945 * must be selected. If no entry in Keys has 123456, a slice of zeroes 946 * must be concatenated. 947 * 948 * Supported value tensor {@link OperandCode}: 949 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 950 * * {@link ANEURALNETWORKS_TENSOR_INT32} 951 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 952 * 953 * Supported value tensor rank: from 2 954 * 955 * Inputs: 956 * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with 957 * shape [ k ]. 958 * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape 959 * [ n ]; Keys and Values pair represent a map, i.e., the ith element 960 * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values 961 * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in 962 * ascending order. 963 * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension 964 * must be n. 965 * 966 * Outputs: 967 * * 0: Output. A tensor with shape [ k …]. 968 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 969 * the scale and zeroPoint must be the same as input2. 970 * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup 971 * hits (True) or not (False). 972 * Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0 973 * and scale 1.0f. 974 * A non-zero byte represents True, a hit. A zero indicates otherwise. 975 * 976 * Available since API level 27. 977 */ 978 ANEURALNETWORKS_HASHTABLE_LOOKUP = 10, 979 980 /** 981 * Applies L2 normalization along the axis dimension. 982 * 983 * The values in the output tensor are computed as: 984 * 985 * output[batch, row, col, channel] = 986 * input[batch, row, col, channel] / 987 * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) 988 * 989 * By default the axis dimension is the last dimension of the input tensor. 990 * 991 * Supported tensor {@link OperandCode}: 992 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 993 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 994 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 995 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 996 * 997 * Supported tensor rank: up to 4 998 * Tensors with rank less than 4 are only supported since API level 29. 999 * 1000 * Inputs: 1001 * * 0: An n-D tensor, specifying the tensor to be normalized. 1002 * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, 1003 * specifying the dimension normalization would be performed on. 1004 * Negative index is used to specify axis from the end (e.g. -1 for 1005 * the last axis). Must be in the range [-n, n). 1006 * Available since API level 29. 1007 * 1008 * Outputs: 1009 * * 0: A tensor of the same {@link OperandCode} and same shape as input0. 1010 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 1011 * the scale must be 1.f / 128 and the zeroPoint must be 128. 1012 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 1013 * the scale must be 1.f / 128 and the zeroPoint must be 0. 1014 * 1015 * NOTE: Before API level 30, if the elements along an axis are all zeros, 1016 * the result is undefined. Since API level 30, if the elements along an axis 1017 * are all zeros, the result is logical zero. 1018 * 1019 * Available since API level 27. 1020 */ 1021 ANEURALNETWORKS_L2_NORMALIZATION = 11, 1022 1023 /** 1024 * Performs an 2-D L2 pooling operation. 1025 * 1026 * The output dimensions are functions of the filter dimensions, stride, and 1027 * padding. 1028 * 1029 * The values in the output tensor are computed as: 1030 * 1031 * output[b, i, j, c] = 1032 * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) / 1033 * sum(1)) 1034 * 1035 * Supported tensor {@link OperandCode}: 1036 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1037 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1038 * 1039 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 1040 * With the default data layout NHWC, the data is stored in the order of: 1041 * [batch, height, width, channels]. Alternatively, the data layout could 1042 * be NCHW, the data storage order of: [batch, channels, height, width]. 1043 * NCHW is supported since API level 29. 1044 * 1045 * Both explicit padding and implicit padding are supported. 1046 * 1047 * Inputs (explicit padding): 1048 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1049 * the input. 1050 * Since API level 29, zero batches is supported for this tensor. 1051 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1052 * the left, in the ‘width’ dimension. 1053 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1054 * the right, in the ‘width’ dimension. 1055 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1056 * the top, in the ‘height’ dimension. 1057 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1058 * the bottom, in the ‘height’ dimension. 1059 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1060 * walking through input in the ‘width’ dimension. 1061 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1062 * walking through input in the ‘height’ dimension. 1063 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1064 * width. 1065 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1066 * height. 1067 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1068 * {@link FuseCode} values. Specifies the activation to 1069 * invoke on the result. 1070 * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1071 * Set to true to specify NCHW data layout for input0 and output0. 1072 * Available since API level 29. 1073 * 1074 * Inputs (implicit padding): 1075 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1076 * the input. 1077 * Since API level 29, zero batches is supported for this tensor. 1078 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 1079 * padding scheme, has to be one of the 1080 * {@link PaddingCode} values. 1081 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1082 * walking through input in the ‘width’ dimension. 1083 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1084 * walking through input in the ‘height’ dimension. 1085 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1086 * width. 1087 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1088 * height. 1089 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1090 * {@link FuseCode} values. Specifies the activation to 1091 * invoke on the result. 1092 * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1093 * Set to true to specify NCHW data layout for input0 and output0. 1094 * Available since API level 29. 1095 * 1096 * Outputs: 1097 * * 0: The output 4-D tensor, of shape 1098 * [batches, out_height, out_width, depth]. 1099 * 1100 * Available since API level 27. 1101 */ 1102 ANEURALNETWORKS_L2_POOL_2D = 12, 1103 1104 /** 1105 * Applies Local Response Normalization along the depth dimension. 1106 * 1107 * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the 1108 * last dimension), and each vector is normalized independently. Within a 1109 * given vector, each component is divided by the weighted, squared sum of 1110 * inputs within depth_radius. 1111 * 1112 * The output is calculated using this formula: 1113 * 1114 * sqr_sum[a, b, c, d] = sum( 1115 * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)) 1116 * output = input / pow((bias + alpha * sqr_sum), beta) 1117 * 1118 * For input tensor with rank less than 4, independently normalizes each 1119 * 1-D slice along specified dimension. 1120 * 1121 * Supported tensor {@link OperandCode}: 1122 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1123 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1124 * 1125 * Supported tensor rank: up to 4 1126 * Tensors with rank less than 4 are only supported since API level 29. 1127 * 1128 * Inputs: 1129 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1130 * the input. 1131 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of 1132 * the normalization window. 1133 * * 2: A scalar, specifying the bias, must not be zero. 1134 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias 1135 * value must be of {@link ANEURALNETWORKS_FLOAT16}. 1136 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias 1137 * value must be of {@link ANEURALNETWORKS_FLOAT32}. 1138 * * 3: A scalar, specifying the scale factor, alpha. 1139 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the 1140 * alpha value must be of {@link ANEURALNETWORKS_FLOAT16}. 1141 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the 1142 * alpha value must be of {@link ANEURALNETWORKS_FLOAT32}. 1143 * * 4: A scalar, specifying the exponent, beta. 1144 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta 1145 * value must be of {@link ANEURALNETWORKS_FLOAT16}. 1146 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta 1147 * value must be of {@link ANEURALNETWORKS_FLOAT32}. 1148 * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, 1149 * specifying the dimension normalization would be performed on. 1150 * Negative index is used to specify axis from the end (e.g. -1 for 1151 * the last axis). Must be in the range [-n, n). 1152 * Available since API level 29. 1153 * 1154 * Outputs: 1155 * * 0: The output tensor of same shape as input0. 1156 * 1157 * Available since API level 27. 1158 */ 1159 ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13, 1160 1161 /** 1162 * Computes sigmoid activation on the input tensor element-wise. 1163 * 1164 * The output is calculated using this formula: 1165 * 1166 * output = 1 / (1 + exp(-input)) 1167 * 1168 * Supported tensor {@link OperandCode}: 1169 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1170 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1171 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1172 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1173 * 1174 * Supported tensor rank: up to 4. 1175 * 1176 * Inputs: 1177 * * 0: A tensor, specifying the input. 1178 * Since API level 29, this tensor may be zero-sized. 1179 * 1180 * Outputs: 1181 * * 0: The output tensor of same shape as input0. 1182 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 1183 * the scale must be 1.f / 256 and the zeroPoint must be 0. 1184 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 1185 * the scale must be 1.f / 256 and the zeroPoint must be -128. 1186 * 1187 * Available since API level 27. 1188 */ 1189 ANEURALNETWORKS_LOGISTIC = 14, 1190 1191 /** 1192 * Projects an input to a bit vector via locality senstive hashing. 1193 * 1194 * Supported input tensor {@link OperandCode}: 1195 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1196 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1197 * * {@link ANEURALNETWORKS_TENSOR_INT32} 1198 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1199 * 1200 * Supported input tensor rank: from 1 1201 * 1202 * Inputs: 1203 * * 0: Hash functions. Dim.size == 2, DataType: Float. 1204 * Tensor[0].Dim[0]: Number of hash functions. 1205 * Tensor[0].Dim[1]: Number of projected output bits generated by each 1206 * hash function. 1207 * If the projection type is Sparse: 1208 * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32 1209 * 1210 * * 1: Input. Dim.size >= 1, no restriction on DataType. 1211 * * 2: Weight. Optional. Dim.size == 1, DataType: Float. 1212 * If not set, each input element is considered to have the same weight 1213 * of 1.0. 1214 * Tensor[1].Dim[0] == Tensor[2].Dim[0] 1215 * * 3: Type: 1216 * Sparse: 1217 * Value LSHProjectionType_SPARSE(=3) (since API level 29). 1218 * Computed bit vector is considered to be sparse. 1219 * Each output element is an int32 made up of multiple bits 1220 * computed from hash functions. 1221 * 1222 * NOTE: To avoid collisions across hash functions, an offset value 1223 * of k * (1 << Tensor[0].Dim[1]) will be added to each signature, 1224 * where k is the index of the hash function. 1225 * 1226 * Value LSHProjectionType_SPARSE_DEPRECATED(=1). 1227 * Legacy behavior that does not include the offset value. 1228 * 1229 * Dense: 1230 * Value LSHProjectionType_DENSE(=2). 1231 * Computed bit vector is considered to be dense. Each output 1232 * element represents a bit and can take the value of either 1233 * 0 or 1. 1234 * 1235 * Outputs: 1236 * * 0: If the projection type is Sparse: 1237 * Output.Dim == { Tensor[0].Dim[0] } 1238 * A tensor of int32 that represents hash signatures. 1239 * 1240 * If the projection type is Dense: 1241 * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } 1242 * A flattened tensor that represents projected bit vectors. 1243 * 1244 * Available since API level 27. 1245 * The offset value for sparse projections was added in API level 29. 1246 */ 1247 ANEURALNETWORKS_LSH_PROJECTION = 15, 1248 1249 /** 1250 * Performs a single time step in a Long Short-Term Memory (LSTM) layer 1251 * 1252 * The LSTM operation is described by the following equations. 1253 * 1254 * \f{eqnarray*}{ 1255 * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ 1256 * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ 1257 * C_t =& clip(f_t \odot C_{t-1} + i_t \odot 1258 * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\ 1259 * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\ 1260 * & & \\ 1261 * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) 1262 * & if\ there\ is\ a\ projection; \\ 1263 * h_t =& & \\ 1264 * & o_t \odot g(C_t) & otherwise. \\ 1265 * \f} 1266 * Where: 1267 * * \f$x_t\f$ is the input, 1268 * * \f$i_t\f$ is the input gate, 1269 * * \f$f_t\f$ is the forget gate, 1270 * * \f$C_t\f$ is the cell state, 1271 * * \f$o_t\f$ is the output, 1272 * * \f$h_t\f$ is the output state, 1273 * * \f$\sigma\f$ is the logistic sigmoid function, 1274 * * \f$g\f$ is the cell input and cell output activation function, usually 1275 * \f$tahn\f$, 1276 * * \f$W_{xi}\f$ is the input-to-input weight matrix, 1277 * * \f$W_{hi}\f$ is the recurrent to input weight matrix, 1278 * * \f$W_{ci}\f$ is the cell-to-input weight matrix, 1279 * * \f$b_i\f$ is the input gate bias, 1280 * * \f$W_{xf}\f$ is the input-to-forget weight matrix, 1281 * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix, 1282 * * \f$W_{cf}\f$ is the cell-to-forget weight matrix, 1283 * * \f$b_f\f$ is the forget gate bias, 1284 * * \f$W_{xc}\f$ is the input-to-cell weight matrix, 1285 * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix, 1286 * * \f$b_c\f$ is the cell bias, 1287 * * \f$W_{xo}\f$ is the input-to-output weight matrix, 1288 * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix, 1289 * * \f$W_{co}\f$ is the cell-to-output weight matrix, 1290 * * \f$b_o\f$ is the output gate bias, 1291 * * \f$W_{proj}\f$ is the projection weight matrix, 1292 * * \f$b_{proj}\f$ is the projection bias, 1293 * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and 1294 * * \f$t_{proj}\f$ is the threshold for clipping the projected output. 1295 * * \f$\odot\f$ is the 1296 * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)"> 1297 * Hadamard product</a> that takes two matrices and produces another 1298 * matrix, each element of which is the product of the corresponding 1299 * elements of the input matrices. 1300 * 1301 * Since API level 29 LSTM supports layer normalization. 1302 * In case layer normalization is used, the inputs to internal activation 1303 * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered 1304 * following an approach from section 3.1 from 1305 * https://arxiv.org/pdf/1607.06450.pdf 1306 * 1307 * The operation has the following independently optional inputs: 1308 * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights 1309 * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all 1310 * have values or neither of them have values (i.e., all set to null). If 1311 * they have values, the peephole optimization is used. 1312 * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights 1313 * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values, 1314 * or none of them have values. If they have no values, coupling of input 1315 * and forget gates (CIFG) is used, in which case the input gate 1316 * (\f$i_t\f$) is calculated using the following equation instead. 1317 * \f{eqnarray*}{ 1318 * i_t = 1 - f_t 1319 * \f} 1320 * In case peephole optimization is used and CIFG is not used 1321 * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the 1322 * cell-to-input weights must have no value. 1323 * * The projection weights (\f$W_{proj}\f$) is required only for the 1324 * recurrent projection layer, and should otherwise have no value. 1325 * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a 1326 * value if the recurrent projection layer exists, and should otherwise 1327 * have no value. 1328 * * (API level 29 or later) The four layer normalization weights either all have 1329 * values or none of them have values. Additionally, if CIFG is used, 1330 * input layer normalization weights tensor is omitted and the other layer 1331 * normalization weights either all have values or none of them have 1332 * values. Layer normalization is used when the values of all the layer 1333 * normalization weights are present. 1334 * 1335 * References: 1336 * 1337 * The default non-peephole non-CIFG implementation is based on: 1338 * http://www.bioinf.jku.at/publications/older/2604.pdf 1339 * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural 1340 * Computation, 9(8):1735-1780, 1997. 1341 * 1342 * The peephole implementation and projection layer is based on: 1343 * https://research.google.com/pubs/archive/43905.pdf 1344 * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory 1345 * recurrent neural network architectures for large scale acoustic 1346 * modeling." INTERSPEECH, 2014. 1347 * (However, the concept of peephole optimization was introduced in work 1348 * prior to this paper.) 1349 * 1350 * The coupling of input and forget gate (CIFG) is based on: 1351 * http://arxiv.org/pdf/1503.04069.pdf 1352 * Greff et al. "LSTM: A Search Space Odyssey" 1353 * 1354 * The layer normalization is based on: 1355 * https://arxiv.org/pdf/1607.06450.pdf 1356 * Jimmy Ba et al. "Layer Normalization" 1357 * 1358 * Supported tensor {@link OperandCode}: 1359 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1360 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1361 * 1362 * All input and output tensors must be of the same type. 1363 * 1364 * Inputs: 1365 * * 0: The input (\f$x_t\f$). 1366 * A 2-D tensor of shape [batch_size, input_size], where “batch_size” 1367 * corresponds to the batching dimension, and “input_size” is the size 1368 * of the input. 1369 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. 1370 * A 2-D tensor of shape [num_units, input_size], where “num_units” 1371 * corresponds to the number of cell units. 1372 * * 2: The input-to-forget weights (\f$W_{xf}\f$). 1373 * A 2-D tensor of shape [num_units, input_size]. 1374 * * 3: The input-to-cell weights (\f$W_{xc}\f$). 1375 * A 2-D tensor of shape [num_units, input_size]. 1376 * * 4: The input-to-output weights (\f$W_{xo}\f$). 1377 * A 2-D tensor of shape [num_units, input_size]. 1378 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. 1379 * A 2-D tensor of shape [num_units, output_size], where “output_size” 1380 * corresponds to either the number of cell units (i.e., “num_units”), 1381 * or the second dimension of the “projection_weights”, if defined. 1382 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). 1383 * A 2-D tensor of shape [num_units, output_size]. 1384 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). 1385 * A 2-D tensor of shape [num_units, output_size]. 1386 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). 1387 * A 2-D tensor of shape [num_units, output_size]. 1388 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. 1389 * A 1-D tensor of shape [num_units]. 1390 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. 1391 * A 1-D tensor of shape [num_units]. 1392 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. 1393 * A 1-D tensor of shape [num_units]. 1394 * * 12:The input gate bias (\f$b_i\f$). Optional. 1395 * A 1-D tensor of shape [num_units]. 1396 * * 13:The forget gate bias (\f$b_f\f$). 1397 * A 1-D tensor of shape [num_units]. 1398 * * 14:The cell bias (\f$b_c\f$). 1399 * A 1-D tensor of shape [num_units]. 1400 * * 15:The output gate bias (\f$b_o\f$). 1401 * A 1-D tensor of shape [num_units]. 1402 * * 16:The projection weights (\f$W_{proj}\f$). Optional. 1403 * A 2-D tensor of shape [output_size, num_units]. 1404 * * 17:The projection bias (\f$b_{proj}\f$). Optional. 1405 * A 1-D tensor of shape [output_size]. 1406 * * 18:The output state (in) (\f$h_{t-1}\f$). 1407 * A 2-D tensor of shape [batch_size, output_size]. 1408 * * 19:The cell state (in) (\f$C_{t-1}\f$). 1409 * A 2-D tensor of shape [batch_size, num_units]. 1410 * * 20:The activation function (\f$g\f$). 1411 * A value indicating the activation function: 1412 * <ul> 1413 * <li>0: None; 1414 * <li>1: Relu; 1415 * <li>3: Relu6; 1416 * <li>4: Tanh; 1417 * <li>6: Sigmoid. 1418 * </ul> 1419 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such 1420 * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 1421 * then clipping is disabled. 1422 * Until API level 29 this scalar must be of type {@link 1423 * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input 1424 * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this 1425 * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, 1426 * otherwise if all the input tensors have the type {@link 1427 * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link 1428 * ANEURALNETWORKS_FLOAT16}. 1429 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the 1430 * projection layer, such that values are bound within 1431 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 1432 * Until API level 29 this scalar must be of type {@link 1433 * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input 1434 * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this 1435 * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, 1436 * otherwise if all the input tensors have the type {@link 1437 * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link 1438 * ANEURALNETWORKS_FLOAT16}. 1439 * Since API level 29 there are additional inputs to this op: 1440 * * 23:The input layer normalization weights. 1441 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 1442 * to activation at input gate. 1443 * * 24:The forget layer normalization weights. 1444 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 1445 * to activation at forget gate. 1446 * * 25:The cell layer normalization weights. 1447 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 1448 * to activation at cell gate. 1449 * * 26:The output layer normalization weights. 1450 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 1451 * to activation at output gate. 1452 * 1453 * Outputs: 1454 * * 0: The scratch buffer. 1455 * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or 1456 * [batch_size, num_units * 4] without CIFG. 1457 * * 1: The output state (out) (\f$h_t\f$). 1458 * A 2-D tensor of shape [batch_size, output_size]. 1459 * * 2: The cell state (out) (\f$C_t\f$). 1460 * A 2-D tensor of shape [batch_size, num_units]. 1461 * * 3: The output (\f$o_t\f$). 1462 * A 2-D tensor of shape [batch_size, output_size]. This is effectively 1463 * the same as the current “output state (out)” value. 1464 * 1465 * Available since API level 27. 1466 */ 1467 ANEURALNETWORKS_LSTM = 16, 1468 1469 /** 1470 * Performs an 2-D max pooling operation. 1471 * 1472 * The output dimensions are functions of the filter dimensions, stride, and 1473 * padding. 1474 * 1475 * The values in the output tensor are computed as: 1476 * 1477 * output[b, i, j, channel] = 1478 * max_{di, dj} ( 1479 * input[b, strides[1] * i + di, strides[2] * j + dj, channel] 1480 * ) 1481 * 1482 * Supported tensor {@link OperandCode}: 1483 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1484 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1485 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1486 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1487 * 1488 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 1489 * With the default data layout NHWC, the data is stored in the order of: 1490 * [batch, height, width, channels]. Alternatively, the data layout could 1491 * be NCHW, the data storage order of: [batch, channels, height, width]. 1492 * NCHW is supported since API level 29. 1493 * 1494 * Both explicit padding and implicit padding are supported. 1495 * 1496 * Inputs (explicit padding): 1497 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1498 * the input. 1499 * Since API level 29, zero batches is supported for this tensor. 1500 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1501 * the left, in the ‘width’ dimension. 1502 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1503 * the right, in the ‘width’ dimension. 1504 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1505 * the top, in the ‘height’ dimension. 1506 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1507 * the bottom, in the ‘height’ dimension. 1508 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1509 * walking through input in the ‘width’ dimension. 1510 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1511 * walking through input in the ‘height’ dimension. 1512 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1513 * width. 1514 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1515 * height. 1516 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1517 * {@link FuseCode} values. Specifies the activation to 1518 * invoke on the result. 1519 * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1520 * Set to true to specify NCHW data layout for input0 and output0. 1521 * Available since API level 29. 1522 * 1523 * Inputs (implicit padding): 1524 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1525 * the input. 1526 * Since API level 29, zero batches is supported for this tensor. 1527 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 1528 * padding scheme, has to be one of the 1529 * {@link PaddingCode} values. 1530 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1531 * walking through input in the ‘width’ dimension. 1532 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1533 * walking through input in the ‘height’ dimension. 1534 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1535 * width. 1536 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1537 * height. 1538 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1539 * {@link FuseCode} values. Specifies the activation to 1540 * invoke on the result. 1541 * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1542 * Set to true to specify NCHW data layout for input0 and output0. 1543 * Available since API level 29. 1544 * 1545 * Outputs: 1546 * * 0: The output 4-D tensor, of shape 1547 * [batches, out_height, out_width, depth]. 1548 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 1549 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 1550 * the scale and zeroPoint must be the same as input0. 1551 * 1552 * Available since API level 27. 1553 */ 1554 ANEURALNETWORKS_MAX_POOL_2D = 17, 1555 1556 /** 1557 * Multiplies two tensors, element-wise. 1558 * 1559 * Takes two input tensors of identical {@link OperandCode} and compatible 1560 * dimensions. The output is the product of both input tensors, optionally 1561 * modified by an activation function. 1562 * 1563 * Two dimensions are compatible when: 1564 * 1. they are equal, or 1565 * 2. one of them is 1 1566 * 1567 * The size of the resulting output is the maximum size along each dimension 1568 * of the input operands. It starts with the trailing dimensions, and works 1569 * its way forward. 1570 * 1571 * Since API level 29, generic zero-sized input tensor is supported. Zero 1572 * dimension is only compatible with 0 or 1. The size of the output 1573 * dimension is zero if either of corresponding input dimension is zero. 1574 * 1575 * Supported tensor {@link OperandCode}: 1576 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1577 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1578 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1579 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1580 * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) 1581 * 1582 * Supported tensor rank: up to 4 1583 * 1584 * Inputs: 1585 * * 0: A tensor. 1586 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 1587 * as input0. 1588 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1589 * {@link FuseCode} values. Specifies the activation to 1590 * invoke on the result. 1591 * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, 1592 * the {@link FuseCode} must be "NONE". 1593 * 1594 * Outputs: 1595 * * 0: The product, a tensor of the same {@link OperandCode} as input0. 1596 * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1597 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 1598 * the following condition must be satisfied: 1599 * output_scale > input1_scale * input2_scale. 1600 * 1601 * Available since API level 27. 1602 */ 1603 ANEURALNETWORKS_MUL = 18, 1604 1605 /** 1606 * Computes rectified linear activation on the input tensor element-wise. 1607 * 1608 * The output is calculated using this formula: 1609 * 1610 * output = max(0, input) 1611 * 1612 * Supported tensor {@link OperandCode}: 1613 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1614 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1615 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1616 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1617 * 1618 * Supported tensor rank: up to 4. 1619 * 1620 * Inputs: 1621 * * 0: A tensor, specifying the input. 1622 * Since API level 29, this tensor may be zero-sized. 1623 * 1624 * Outputs: 1625 * * 0: The output tensor of same shape as input0. 1626 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 1627 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 1628 * the scale and zeroPoint must be the same as input0. 1629 * 1630 * Available since API level 27. 1631 */ 1632 ANEURALNETWORKS_RELU = 19, 1633 1634 /** 1635 * Computes rectified linear 1 activation on the input tensor element-wise. 1636 * 1637 * The output is calculated using this formula: 1638 * 1639 * output = min(1.f, max(-1.f, input)) 1640 * 1641 * Supported tensor {@link OperandCode}: 1642 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1643 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1644 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1645 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1646 * 1647 * Supported tensor rank: up to 4. 1648 * 1649 * Inputs: 1650 * * 0: A tensor, specifying the input. 1651 * Since API level 29, this tensor may be zero-sized. 1652 * 1653 * Outputs: 1654 * * 0: The output tensor of the same shape as input0. 1655 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 1656 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 1657 * the scale and zeroPoint must be the same as input0. 1658 * 1659 * Available since API level 27. 1660 */ 1661 ANEURALNETWORKS_RELU1 = 20, 1662 1663 /** 1664 * Computes rectified linear 6 activation on the input tensor element-wise. 1665 * 1666 * The output is calculated using this formula: 1667 * 1668 * output = min(6, max(0, input)) 1669 * 1670 * Supported tensor {@link OperandCode}: 1671 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1672 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1673 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1674 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1675 * 1676 * Supported tensor rank: up to 4. 1677 * 1678 * Inputs: 1679 * * 0: A tensor, specifying the input. 1680 * Since API level 29, this tensor may be zero-sized. 1681 * 1682 * Outputs: 1683 * * 0: The output tensor of same shape as input0. 1684 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 1685 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 1686 * the scale and zeroPoint must be the same as input0. 1687 * 1688 * Available since API level 27. 1689 */ 1690 ANEURALNETWORKS_RELU6 = 21, 1691 1692 /** 1693 * Reshapes a tensor. 1694 * 1695 * Given tensor, this operation returns a tensor that has the same values as 1696 * tensor, but with a newly specified shape. 1697 * 1698 * Supported tensor {@link OperandCode}: 1699 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1700 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1701 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1702 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1703 * 1704 * Supported tensor rank: up to 4. 1705 * 1706 * Inputs: 1707 * * 0: A tensor, specifying the tensor to be reshaped. 1708 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the 1709 * shape of the output tensor. The number of elements implied by shape 1710 * must be the same as the number of elements in the input tensor. 1711 * 1712 * If one component of shape is the special value -1, the size of that 1713 * dimension is computed so that the total size remains constant. In 1714 * particular, a shape of [-1] flattens into 1-D. At most one component 1715 * of shape can be -1. 1716 * 1717 * Outputs: 1718 * * 0: The output tensor, of shape specified by the input shape. 1719 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 1720 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 1721 * the scale and zeroPoint must be the same as input0. 1722 * 1723 * Available since API level 27. 1724 */ 1725 ANEURALNETWORKS_RESHAPE = 22, 1726 1727 /** 1728 * Resizes images to given size using the bilinear interpretation. 1729 * 1730 * Resized images must be distorted if their output aspect ratio is not the 1731 * same as input aspect ratio. The corner pixels of output may not be the 1732 * same as corner pixels of input. 1733 * 1734 * Supported tensor {@link OperandCode}: 1735 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1736 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1737 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 1738 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1739 * 1740 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 1741 * With the default data layout NHWC, the data is stored in the order of: 1742 * [batch, height, width, channels]. Alternatively, the data layout could 1743 * be NCHW, the data storage order of: [batch, channels, height, width]. 1744 * NCHW is supported since API level 29. 1745 * 1746 * Both resizing by shape and resizing by scale are supported. 1747 * 1748 * Inputs (resizing by shape): 1749 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1750 * the input. 1751 * Since API level 29, zero batches is supported for this tensor. 1752 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 1753 * width of the output tensor. 1754 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 1755 * height of the output tensor. 1756 * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1757 * Set to true to specify NCHW data layout for input0 and output0. 1758 * Available since API level 29. 1759 * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} 1760 * scalar, default to false. If True, the centers of the 4 corner 1761 * pixels of the input and output tensors are aligned, preserving the 1762 * values at the corner pixels. 1763 * Available since API level 30. 1764 * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} 1765 * scalar, default to false. If True, the pixel centers are assumed to 1766 * be at (0.5, 0.5). This is the default behavior of image.resize in 1767 * TF 2.0. If this parameter is True, then align_corners parameter 1768 * must be False. 1769 * Available since API level 30. 1770 * 1771 * Inputs (resizing by scale, since API level 29): 1772 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1773 * the input. Zero batches is supported for this tensor. 1774 * * 1: A scalar, specifying width_scale, the scaling factor of the width 1775 * dimension from the input tensor to the output tensor. The output 1776 * width is calculated as new_width = floor(width * width_scale). 1777 * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is 1778 * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 1779 * {@link ANEURALNETWORKS_FLOAT32} otherwise. 1780 * * 2: A scalar, specifying height_scale, the scaling factor of the height 1781 * dimension from the input tensor to the output tensor. The output 1782 * height is calculated as new_height = floor(height * height_scale). 1783 * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is 1784 * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 1785 * {@link ANEURALNETWORKS_FLOAT32} otherwise. 1786 * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1787 * Set to true to specify NCHW data layout for input0 and output0. 1788 * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} 1789 * scalar, default to false. If True, the centers of the 4 corner 1790 * pixels of the input and output tensors are aligned, preserving the 1791 * values at the corner pixels. 1792 * Available since API level 30. 1793 * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} 1794 * scalar, default to false. If True, the pixel centers are assumed to 1795 * be at (0.5, 0.5). This is the default behavior of image.resize in 1796 * TF 2.0. If this parameter is True, then align_corners parameter 1797 * must be False. 1798 * Available since API level 30. 1799 * 1800 * Outputs: 1801 * * 0: The output 4-D tensor, of shape 1802 * [batches, new_height, new_width, depth]. 1803 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 1804 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 1805 * the scale and zeroPoint must be the same as input0. 1806 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1807 * the scale and zeroPoint must be the same as input0. 1808 * 1809 * Available since API level 27. 1810 */ 1811 ANEURALNETWORKS_RESIZE_BILINEAR = 23, 1812 1813 /** 1814 * A basic recurrent neural network layer. 1815 * 1816 * This layer implements the operation: 1817 * outputs = state = activation(inputs * input_weights + 1818 * state * recurrent_weights + bias) 1819 * 1820 * Where: 1821 * * “input_weights” is a weight matrix that multiplies the inputs; 1822 * * “recurrent_weights” is a weight matrix that multiplies the current 1823 * “state” which itself is the output from the previous time step 1824 * computation; 1825 * * “bias” is a bias vector (added to each output vector in the batch); 1826 * * “activation” is the function passed as the “fused_activation_function” 1827 * argument (if not “NONE”). 1828 * 1829 * Supported tensor {@link OperandCode}: 1830 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1831 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1832 * 1833 * The input tensors must all be the same type. 1834 * 1835 * Inputs: 1836 * * 0: input. 1837 * A 2-D tensor of shape [batch_size, input_size], where “batch_size” 1838 * corresponds to the batching dimension, and “input_size” is the size 1839 * of the input. 1840 * * 1: weights. 1841 * A 2-D tensor of shape [num_units, input_size], where “num_units” 1842 * corresponds to the number of units. 1843 * * 2: recurrent_weights. 1844 * A 2-D tensor of shape [num_units, num_units], with columns 1845 * corresponding to the weights from each unit. 1846 * * 3: bias. 1847 * A 1-D tensor of shape [num_units]. 1848 * * 4: hidden state (in). 1849 * A 2-D tensor of shape [batch_size, num_units]. 1850 * * 5: fused_activation_function. 1851 * An optional {@link FuseCode} value indicating the 1852 * activation function. If “NONE” is specified then it results in a 1853 * linear activation. 1854 * 1855 * Outputs: 1856 * * 0: hidden state (out). 1857 * A 2-D tensor of shape [batch_size, num_units]. 1858 * 1859 * * 1: output. 1860 * A 2-D tensor of shape [batch_size, num_units]. This is effectively 1861 * the same as the current state value. 1862 * 1863 * Available since API level 27. 1864 */ 1865 ANEURALNETWORKS_RNN = 24, 1866 1867 /** 1868 * Computes the softmax activation on the input tensor element-wise, per 1869 * batch, by normalizing the input vector so the maximum coefficient is 1870 * zero. 1871 * 1872 * The output is calculated using this formula: 1873 * 1874 * output[batch, i] = 1875 * exp((input[batch, i] - max(input[batch, :])) * beta) / 1876 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} 1877 * 1878 * For input tensor with rank other than 2, the activation will be applied 1879 * independently on each 1-D slice along specified dimension. 1880 * 1881 * Supported tensor {@link OperandCode}: 1882 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1883 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1884 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1885 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1886 * 1887 * Supported tensor rank: up to 4. 1888 * Tensors with rank other than 2 or 4 are only supported since API level 29. 1889 * 1890 * Inputs: 1891 * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. 1892 * Since API level 29, this tensor may be zero-sized. 1893 * * 1: A scalar, specifying the positive scaling factor for the exponent, 1894 * beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 1895 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or 1896 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar 1897 * must be of {@link ANEURALNETWORKS_FLOAT32}. 1898 * If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the 1899 * scalar must be of {@link ANEURALNETWORKS_FLOAT16}. 1900 * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, 1901 * specifying the dimension the activation would be performed on. 1902 * Negative index is used to specify axis from the end (e.g. -1 for 1903 * the last axis). Must be in the range [-n, n). 1904 * Available since API level 29. 1905 * 1906 * Outputs: 1907 * * 0: The output tensor of same shape as input0. 1908 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 1909 * the scale must be 1.f / 256 and the zeroPoint must be 0. 1910 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 1911 * the scale must be 1.f / 256 and the zeroPoint must be -128. 1912 * 1913 * Available since API level 27. 1914 */ 1915 ANEURALNETWORKS_SOFTMAX = 25, 1916 1917 /** 1918 * Rearranges blocks of spatial data, into depth. 1919 * 1920 * More specifically, this op outputs a copy of the input tensor where 1921 * values from the height and width dimensions are moved to the depth 1922 * dimension. The value block_size indicates the input block size and how 1923 * the data is moved. 1924 * 1925 * Chunks of data of size block_size * block_size from depth are rearranged 1926 * into non-overlapping blocks of size block_size x block_size. 1927 * 1928 * The depth of the output tensor is input_depth * block_size * block_size. 1929 * The input tensor's height and width must be divisible by block_size. 1930 * 1931 * Supported tensor {@link OperandCode}: 1932 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1933 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1934 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1935 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 1936 * 1937 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 1938 * With the default data layout NHWC, the data is stored in the order of: 1939 * [batch, height, width, channels]. Alternatively, the data layout could 1940 * be NCHW, the data storage order of: [batch, channels, height, width]. 1941 * NCHW is supported since API level 29. 1942 * 1943 * Inputs: 1944 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 1945 * specifying the input. 1946 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. 1947 * block_size must be >=1 and block_size must be a divisor of both the 1948 * input height and width. 1949 * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1950 * Set to true to specify NCHW data layout for input0 and output0. 1951 * Available since API level 29. 1952 * 1953 * Outputs: 1954 * * 0: The output 4-D tensor, of shape [batches, height/block_size, 1955 * width/block_size, depth_in*block_size*block_size]. 1956 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 1957 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 1958 * the scale and zeroPoint must be the same as input0. 1959 * 1960 * Available since API level 27. 1961 */ 1962 ANEURALNETWORKS_SPACE_TO_DEPTH = 26, 1963 1964 /** 1965 * SVDF op is a kind of stateful layer derived from the notion that a 1966 * densely connected layer that's processing a sequence of input frames can 1967 * be approximated by using a singular value decomposition of each of its 1968 * nodes. The implementation is based on: 1969 * 1970 * https://research.google.com/pubs/archive/43813.pdf 1971 * 1972 * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada. 1973 * “Compressing Deep Neural Networks using a Rank-Constrained Topology”. 1974 * INTERSPEECH, 2015. 1975 * 1976 * It processes the incoming input using a 2-stage filtering mechanism: 1977 * * stage 1 performs filtering on the "features" dimension, whose outputs 1978 * get pushed into a memory of fixed-size memory_size. 1979 * * stage 2 performs filtering on the "time" dimension of the memory_size 1980 * memoized outputs of stage 1. 1981 * 1982 * Specifically, for rank 1, this layer implements the operation: 1983 * 1984 * memory = push(conv1d(inputs, weights_feature, feature_dim, 1985 * "ANEURALNETWORKS_PADDING_VALID")); 1986 * outputs = activation(memory * weights_time + bias); 1987 * 1988 * Where: 1989 * * “weights_feature” is a weights matrix that processes the inputs (by 1990 * convolving the input with every “feature filter”), and whose outputs 1991 * get pushed, stacked in order, into the fixed-size “memory” (the oldest 1992 * entry gets dropped); 1993 * * “weights_time” is a weights matrix that processes the “memory” (by a 1994 * batched matrix multiplication on the num_units); 1995 * * “bias” is an optional bias vector (added to each output vector in the 1996 * batch); and 1997 * * “activation” is the function passed as the “fused_activation_function” 1998 * argument (if not “NONE”). 1999 * 2000 * Each rank adds a dimension to the weights matrices by means of stacking 2001 * the filters. 2002 * 2003 * Supported tensor {@link OperandCode}: 2004 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2005 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2006 * 2007 * All input tensors must be the same type. 2008 * 2009 * Inputs: 2010 * * 0: input. 2011 * A 2-D tensor of shape [batch_size, input_size], where “batch_size” 2012 * corresponds to the batching dimension, and “input_size” is the size 2013 * of the input. 2014 * * 1: weights_feature. 2015 * A 2-D tensor of shape [num_units, input_size], where “num_units” 2016 * corresponds to the number of units. 2017 * * 2: weights_time. 2018 * A 2-D tensor of shape [num_units, memory_size], where “memory_size” 2019 * corresponds to the fixed-size of the memory. 2020 * * 3: bias. 2021 * An optional 1-D tensor of shape [num_units]. 2022 * * 4: state (in). 2023 * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank]. 2024 * * 5: rank. 2025 * The rank of the SVD approximation. 2026 * * 6: fused_activation_function. 2027 * An optional {@link FuseCode} value indicating the 2028 * activation function. If “NONE” is specified then it results in a 2029 * linear activation. 2030 * 2031 * Outputs: 2032 * * 0: state (out). 2033 * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape 2034 * [batch_size, (memory_size - 1) * num_units * rank]. 2035 * * 1: output. 2036 * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape 2037 * [batch_size, num_units]. 2038 * 2039 * Available since API level 27. 2040 */ 2041 ANEURALNETWORKS_SVDF = 27, 2042 2043 /** 2044 * Computes hyperbolic tangent of input tensor element-wise. 2045 * 2046 * The output is calculated using this formula: 2047 * 2048 * output = tanh(input) 2049 * 2050 * Supported tensor {@link OperandCode}: 2051 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2052 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2053 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 2054 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2055 * 2056 * Supported tensor rank: up to 4. 2057 * 2058 * Inputs: 2059 * * 0: A tensor, specifying the input. 2060 * Since API level 29, this tensor may be zero-sized. 2061 * 2062 * Outputs: 2063 * * 0: The output tensor of same shape as input0. 2064 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 2065 * the scale must be 1.f / 128 and the zeroPoint must be 128. 2066 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 2067 * the scale must be 1.f / 128 and the zeroPoint must be 0. 2068 * 2069 * Available since API level 27. 2070 */ 2071 ANEURALNETWORKS_TANH = 28, 2072 2073 // Operations below are available since API level 28. 2074 2075 /** 2076 * BatchToSpace for N-dimensional tensors. 2077 * 2078 * This operation reshapes the batch dimension (dimension 0) into M + 1 2079 * dimensions of shape block_shape + [batch], interleaves these blocks back 2080 * into the grid defined by the spatial dimensions [1, ..., M], to obtain a 2081 * result with the same rank as the input. 2082 * 2083 * This is the reverse of SpaceToBatch. 2084 * 2085 * Supported tensor {@link OperandCode}: 2086 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2087 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2088 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2089 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2090 * 2091 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 2092 * With the default data layout NHWC, the data is stored in the order of: 2093 * [batch, height, width, channels]. Alternatively, the data layout could 2094 * be NCHW, the data storage order of: [batch, channels, height, width]. 2095 * NCHW is supported since API level 29. 2096 * 2097 * Inputs: 2098 * * 0: An n-D tensor, specifying the tensor to be reshaped 2099 * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block 2100 * sizes for each spatial dimension of the input tensor. All values 2101 * must be >= 1. 2102 * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 2103 * Set to true to specify NCHW data layout for input0 and output0. 2104 * Available since API level 29. 2105 * 2106 * Outputs: 2107 * * 0: A tensor of the same {@link OperandCode} as input0. 2108 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 2109 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 2110 * the scale and zeroPoint must be the same as input0. 2111 * 2112 * Available since API level 28. 2113 */ 2114 ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29, 2115 2116 /** 2117 * Element-wise division of two tensors. 2118 * 2119 * Takes two input tensors of identical {@link OperandCode} and compatible 2120 * dimensions. The output is the result of dividing the first input tensor 2121 * by the second, optionally modified by an activation function. 2122 * 2123 * For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs 2124 * "floor division" ("//" in Python). For example, 2125 * 5 // 2 = 2 2126 * -5 // 2 = -3 2127 * 2128 * Two dimensions are compatible when: 2129 * 1. they are equal, or 2130 * 2. one of them is 1 2131 * 2132 * The size of the output is the maximum size along each dimension of the 2133 * input operands. It starts with the trailing dimensions, and works its way 2134 * forward. 2135 * 2136 * Example: 2137 * input1.dimension = {4, 1, 2} 2138 * input2.dimension = {5, 4, 3, 1} 2139 * output.dimension = {5, 4, 3, 2} 2140 * 2141 * Since API level 29, generic zero-sized input tensor is supported. Zero 2142 * dimension is only compatible with 0 or 1. The size of the output 2143 * dimension is zero if either of corresponding input dimension is zero. 2144 * 2145 * Supported tensor {@link OperandCode}: 2146 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2147 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2148 * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) 2149 * 2150 * Supported tensor rank: up to 4 2151 * 2152 * Inputs: 2153 * * 0: An n-D tensor, specifying the first input. 2154 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 2155 * as input0. 2156 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 2157 * {@link FuseCode} values. Specifies the activation to 2158 * invoke on the result. 2159 * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, 2160 * the {@link FuseCode} must be "NONE". 2161 * 2162 * Outputs: 2163 * * 0: A tensor of the same {@link OperandCode} as input0. 2164 * 2165 * Available since API level 28. 2166 */ 2167 ANEURALNETWORKS_DIV = 30, 2168 2169 /** 2170 * Computes the mean of elements across dimensions of a tensor. 2171 * 2172 * Reduces the input tensor along the given dimensions to reduce. Unless 2173 * keep_dims is true, the rank of the tensor is reduced by 1 for each entry 2174 * in axis. If keep_dims is true, the reduced dimensions are retained with 2175 * length 1. 2176 * 2177 * Supported tensor {@link OperandCode}: 2178 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2179 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2180 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2181 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2182 * 2183 * Supported tensor rank: up to 4 2184 * 2185 * Inputs: 2186 * * 0: A tensor, specifying the input. 2187 * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 2188 * to reduce. Must be in the range 2189 * [-rank(input_tensor), rank(input_tensor)). 2190 * 2191 * NOTE: When the operation was introduced, the documentation 2192 * incorrectly stated that if dimensions were empty, the operation 2193 * would reduce across all dimensions. This behavior was never 2194 * implemented. 2195 * 2196 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive, 2197 * retains reduced dimensions with length 1. 2198 * 2199 * Outputs: 2200 * * 0: A tensor of the same {@link OperandCode} as input0. 2201 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 2202 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 2203 * the scale and zeroPoint must be the same as input0. 2204 * If all dimensions are reduced and keep_dims is false, the output 2205 * shape is [1]. 2206 * 2207 * Available since API level 28. 2208 */ 2209 ANEURALNETWORKS_MEAN = 31, 2210 2211 /** 2212 * Pads a tensor. 2213 * 2214 * This operation pads a tensor according to the specified paddings. 2215 * 2216 * Supported tensor {@link OperandCode}: 2217 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2218 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2219 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2220 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2221 * (full support since API level 29, see the output section) 2222 * 2223 * Supported tensor rank: up to 4 2224 * 2225 * Inputs: 2226 * * 0: An n-D tensor, specifying the tensor to be padded. 2227 * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings 2228 * for each spatial dimension of the input tensor. The shape of the 2229 * tensor must be {rank(input0), 2}. 2230 * padding[i, 0] specifies the number of elements to be padded in the 2231 * front of dimension i. 2232 * padding[i, 1] specifies the number of elements to be padded after the 2233 * end of dimension i. 2234 * 2235 * Outputs: 2236 * * 0: A tensor of the same {@link OperandCode} as input0. The 2237 * output tensor has the same rank as input0, and each 2238 * dimension of the output tensor has the same size as the 2239 * corresponding dimension of the input tensor plus the size 2240 * of the padding: 2241 * output0.dimension[i] = 2242 * padding[i, 0] + input0.dimension[i] + padding[i, 1] 2243 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 2244 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 2245 * the scale and zeroPoint must be the same as input0. 2246 * 2247 * NOTE: Before API level 29, the pad value for 2248 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. 2249 * Since API level 29, the pad value is always the logical zero. 2250 * 2251 * Available since API level 28. 2252 */ 2253 ANEURALNETWORKS_PAD = 32, 2254 2255 /** 2256 * SpaceToBatch for N-Dimensional tensors. 2257 * 2258 * This operation divides "spatial" dimensions [1, ..., M] of the input into 2259 * a grid of blocks of shape block_shape, and interleaves these blocks with 2260 * the "batch" dimension (0) such that in the output, the spatial dimensions 2261 * [1, ..., M] correspond to the position within the grid, and the batch 2262 * dimension combines both the position within a spatial block and the 2263 * original batch position. Prior to division into blocks, the spatial 2264 * dimensions of the input are optionally zero padded according to paddings. 2265 * 2266 * Supported tensor {@link OperandCode}: 2267 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2268 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2269 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2270 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2271 * (full support since API level 29, see the output section) 2272 * 2273 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 2274 * With the default data layout NHWC, the data is stored in the order of: 2275 * [batch, height, width, channels]. Alternatively, the data layout could 2276 * be NCHW, the data storage order of: [batch, channels, height, width]. 2277 * NCHW is supported since API level 29. 2278 * 2279 * Inputs: 2280 * * 0: An n-D tensor, specifying the input. 2281 * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block 2282 * sizes for each spatial dimension of the input tensor. All values 2283 * must be >= 1. 2284 * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings 2285 * for each spatial dimension of the input tensor. All values must be 2286 * >= 0. The shape of the tensor must be {M, 2}, where M is the number 2287 * of spatial dimensions. 2288 * padding[i, 0] specifies the number of element to be padded in the 2289 * front of dimension i. 2290 * padding[i, 1] specifies the number of element to be padded after the 2291 * end of dimension i. 2292 * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 2293 * Set to true to specify NCHW data layout for input0 and output0. 2294 * Available since API level 29. 2295 * 2296 * Outputs: 2297 * * 0: A tensor of the same {@link OperandCode} as input0. 2298 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 2299 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 2300 * the scale and zeroPoint must be the same as input0. 2301 * 2302 * NOTE: Before API level 29, the pad value for 2303 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. 2304 * Since API level 29, the pad value is always the logical zero. 2305 * 2306 * Available since API level 28. 2307 */ 2308 ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33, 2309 2310 /** 2311 * Removes dimensions of size 1 from the shape of a tensor. 2312 * 2313 * Given a tensor input, this operation returns a tensor of the same 2314 * {@link OperandCode} with all dimensions of size 1 removed. If you don't 2315 * want to remove all size 1 dimensions, you can remove specific size 1 2316 * dimensions by specifying the axes (input1). 2317 * 2318 * Supported tensor {@link OperandCode}: 2319 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2320 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2321 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2322 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2323 * 2324 * Supported tensor rank: up to 4 2325 * 2326 * Inputs: 2327 * * 0: An n-D tensor, the tensor to be squeezed. 2328 * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The 2329 * dimensions to squeeze. If specified only squeezes the dimensions 2330 * listed. Otherwise, squeezes all dimensions. The dimension index 2331 * starts at 0. An error must be reported if squeezing a dimension that 2332 * is not 1. 2333 * 2334 * Outputs: 2335 * * 0: A tensor of the same {@link OperandCode} as input0. Contains the 2336 * same data as input, but has one or more dimensions of size 1 2337 * removed. 2338 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 2339 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 2340 * the scale and zeroPoint must be the same as input0. 2341 * If all input dimensions are equal to 1 and are to be squeezed, the 2342 * output shape is [1]. 2343 * 2344 * Available since API level 28. 2345 */ 2346 ANEURALNETWORKS_SQUEEZE = 34, 2347 2348 /** 2349 * Extracts a strided slice of a tensor. 2350 * 2351 * Roughly speaking, this op extracts a slice of size (end - begin) / stride 2352 * from the given input tensor. Starting at the location specified by begin 2353 * the slice continues by adding stride to the index until all dimensions 2354 * are not less than end. Note that a stride can be negative, which causes a 2355 * reverse slice. 2356 * 2357 * Supported tensor {@link OperandCode}: 2358 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2359 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2360 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2361 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2362 * 2363 * Supported tensor rank: up to 4 2364 * 2365 * Inputs: 2366 * * 0: An n-D tensor, specifying the tensor to be sliced. 2367 * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The 2368 * starts of the dimensions of the input tensor to be sliced. The 2369 * length must be of rank(input0). 2370 * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The 2371 * ends of the dimensions of the input tensor to be sliced. The length 2372 * must be of rank(input0). 2373 * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The 2374 * strides of the dimensions of the input tensor to be sliced. The 2375 * length must be of rank(input0). The entries must be non-zero. 2376 * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit 2377 * of begin_mask is set, begin[i] is ignored and the fullest possible 2378 * range in that dimension is used instead. 2379 * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of 2380 * end_mask is set, end[i] is ignored and the fullest possible range in 2381 * that dimension is used instead. 2382 * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the 2383 * ith bit of shrink_axis_mask is set, the ith dimension specification 2384 * shrinks the dimensionality by 1, taking on the value at index 2385 * begin[i]. In this case, the ith specification must define a 2386 * slice of size 1, e.g. begin[i] = x, end[i] = x + 1. 2387 * 2388 * Outputs: 2389 * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k), 2390 * where k is the number of bits set in shrink_axis_mask. 2391 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 2392 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 2393 * the scale and zeroPoint must be the same as input0. 2394 * If shrink_axis_mask is true for all input dimensions, the output 2395 * shape is [1]. 2396 * 2397 * Available since API level 28. 2398 */ 2399 ANEURALNETWORKS_STRIDED_SLICE = 35, 2400 2401 /** 2402 * Element-wise subtraction of two tensors. 2403 * 2404 * Takes two input tensors of identical {@link OperandCode} and compatible 2405 * dimensions. The output is the result of subtracting the second input 2406 * tensor from the first one, optionally modified by an activation function. 2407 * 2408 * Two dimensions are compatible when: 2409 * 1. they are equal, or 2410 * 2. one of them is 1 2411 * 2412 * The size of the output is the maximum size along each dimension of the 2413 * input operands. It starts with the trailing dimensions, and works its way 2414 * forward. 2415 * 2416 * Example: 2417 * input1.dimension = {4, 1, 2} 2418 * input2.dimension = {5, 4, 3, 1} 2419 * output.dimension = {5, 4, 3, 2} 2420 * 2421 * Since API level 29, generic zero-sized input tensor is supported. Zero 2422 * dimension is only compatible with 0 or 1. The size of the output 2423 * dimension is zero if either of corresponding input dimension is zero. 2424 * 2425 * Supported tensor {@link OperandCode}: 2426 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2427 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2428 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 2429 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2430 * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) 2431 * 2432 * Supported tensor rank: up to 4 2433 * 2434 * Inputs: 2435 * * 0: An n-D tensor, specifying the first input. 2436 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 2437 * as input0. 2438 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 2439 * {@link FuseCode} values. Specifies the activation to 2440 * invoke on the result. 2441 * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, 2442 * the {@link FuseCode} must be "NONE". 2443 * 2444 * Outputs: 2445 * * 0: A tensor of the same {@link OperandCode} as input0. 2446 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 2447 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 2448 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 2449 * 2450 * Available since API level 28. 2451 */ 2452 ANEURALNETWORKS_SUB = 36, 2453 2454 /** 2455 * Transposes the input tensor, permuting the dimensions according to the 2456 * perm tensor. 2457 * 2458 * The returned tensor's dimension i corresponds to the input dimension 2459 * perm[i]. If perm is not given, it is set to (n-1...0), where n is the 2460 * rank of the input tensor. Hence by default, this operation performs a 2461 * regular matrix transpose on 2-D input Tensors. 2462 * 2463 * Supported tensor {@link OperandCode}: 2464 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2465 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2466 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2467 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2468 * 2469 * Supported tensor rank: up to 4 2470 * 2471 * Inputs: 2472 * * 0: An n-D tensor, specifying the tensor to be transposed. 2473 * Since API level 29, this tensor may be zero-sized. 2474 * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, 2475 * the permutation of the dimensions of the input tensor. 2476 * 2477 * Outputs: 2478 * * 0: A tensor of the same {@link OperandCode} as input0. 2479 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 2480 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 2481 * the scale and zeroPoint must be the same as input0. 2482 * 2483 * Available since API level 28. 2484 */ 2485 ANEURALNETWORKS_TRANSPOSE = 37, 2486 2487 // Operations below are available since API level 29. 2488 2489 /** 2490 * Computes the absolute value of a tensor, element-wise. 2491 * 2492 * Supported tensor {@link OperandCode}: 2493 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2494 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2495 * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) 2496 * 2497 * Supported tensor rank: from 1. 2498 * 2499 * Inputs: 2500 * * 0: A tensor. 2501 * 2502 * Outputs: 2503 * * 0: The output tensor of same shape as input0. 2504 * 2505 * Available since API level 29. 2506 */ 2507 ANEURALNETWORKS_ABS = 38, 2508 2509 /** 2510 * Returns the index of the largest element along an axis. 2511 * 2512 * Supported tensor {@link OperandCode}: 2513 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2514 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2515 * * {@link ANEURALNETWORKS_TENSOR_INT32} 2516 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2517 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2518 * 2519 * Supported tensor rank: from 1 2520 * 2521 * Inputs: 2522 * * 0: An n-D tensor specifying the input. Must be non-empty. 2523 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to 2524 * reduce across. Negative index is used to specify axis from the 2525 * end (e.g. -1 for the last axis). Must be in the range [-n, n). 2526 * 2527 * Outputs: 2528 * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. 2529 * If input is 1-dimensional, the output shape is [1]. 2530 * 2531 * Available since API level 29. 2532 */ 2533 // There is no underscore in ARG_MAX to avoid name conflict with 2534 // the macro defined in libc/kernel/uapi/linux/limits.h. 2535 ANEURALNETWORKS_ARGMAX = 39, 2536 2537 /** 2538 * Returns the index of the smallest element along an axis. 2539 * 2540 * Supported tensor {@link OperandCode}: 2541 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2542 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2543 * * {@link ANEURALNETWORKS_TENSOR_INT32} 2544 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2545 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 2546 * 2547 * Supported tensor rank: from 1 2548 * 2549 * Inputs: 2550 * * 0: An n-D tensor specifying the input. Must be non-empty. 2551 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to 2552 * reduce across. Negative index is used to specify axis from the 2553 * end (e.g. -1 for the last axis). Must be in the range [-n, n). 2554 * 2555 * Outputs: 2556 * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. 2557 * If input is 1-dimensional, the output shape is [1]. 2558 * 2559 * Available since API level 29. 2560 */ 2561 ANEURALNETWORKS_ARGMIN = 40, // See ARGMAX for naming discussion. 2562 2563 /** 2564 * Transform axis-aligned bounding box proposals using bounding box deltas. 2565 * 2566 * Given the positions of bounding box proposals and the corresponding 2567 * bounding box deltas for each class, return the refined bounding box 2568 * regions. The resulting bounding boxes are cliped against the edges of 2569 * the image. 2570 * 2571 * Supported tensor {@link OperandCode}: 2572 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2573 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2574 * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} 2575 * 2576 * Inputs: 2577 * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the 2578 * bounding box proposals, each line with format [x1, y1, x2, y2]. 2579 * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, 2580 * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois 2581 * is supported for this tensor. 2582 * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the 2583 * bounding box delta for each region of interest and each class. The 2584 * bounding box deltas are organized in the following order 2585 * [dx, dy, dw, dh], where dx and dy is the relative correction factor 2586 * for the center position of the bounding box with respect to the width 2587 * and height, dw and dh is the log-scale relative correction factor 2588 * for the width and height. For input0 of type 2589 * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be 2590 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or 2591 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is 2592 * supported for this tensor. 2593 * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 2594 * [num_rois], specifying the batch index of each box. Boxes with 2595 * the same batch index are grouped together. Zero num_rois is 2596 * supported for this tensor. 2597 * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of 2598 * each image in the batch, each line with format 2599 * [image_height, image_width]. 2600 * 2601 * Outputs: 2602 * * 0: A tensor of the same {@link OperandCode} as input0, with shape 2603 * [num_rois, num_classes * 4], specifying the coordinates of each 2604 * output bounding box for each class, with format [x1, y1, x2, y2]. 2605 * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the 2606 * scale must be 0.125 and the zero point must be 0. 2607 * 2608 * Available since API level 29. 2609 */ 2610 ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41, 2611 2612 /** 2613 * A recurrent neural network layer that applies an LSTM cell to a 2614 * sequence of inputs in forward and backward directions. 2615 * 2616 * The op supports cross-linking via an auxiliary input. Regular cell feeds 2617 * one input into the two RNN cells in the following way: 2618 * 2619 * INPUT (INPUT_REVERSED) 2620 * | | 2621 * --------------------- 2622 * | FW_LSTM BW_LSTM | 2623 * --------------------- 2624 * | | 2625 * FW_OUT BW_OUT 2626 * 2627 * An op with cross-linking takes two inputs and feeds them into the RNN 2628 * cells in the following way: 2629 * 2630 * AUX_INPUT (AUX_INPUT_REVERSED) 2631 * | | 2632 * INPUT | (INPUT_R'D.)| 2633 * | | | | 2634 * ----------------------- 2635 * | \ / \ / | 2636 * | FW_LSTM BW_LSTM | 2637 * ----------------------- 2638 * | | 2639 * FW_OUT BW_OUT 2640 * 2641 * The cross-linking mode is enabled iff auxiliary input and auxiliary 2642 * weights are present. While stacking this op on top of itself, this 2643 * allows to connect both forward and backward outputs from previous cell 2644 * to the next cell's input. 2645 * 2646 * Since API level 30 parallel linking mode is supported. The mode is 2647 * enabled if auxiliary input is present but auxiliary weights are omitted. 2648 * In this case, the cell feeds inputs into the RNN in the following way: 2649 * 2650 * INPUT (AUX_INPUT_REVERSED) 2651 * | | 2652 * --------------------- 2653 * | FW_LSTM BW_LSTM | 2654 * --------------------- 2655 * | | 2656 * FW_OUT BW_OUT 2657 * 2658 * While stacking this op on top of itself, this allows to connect both 2659 * forward and backward outputs from previous cell to the next cell's 2660 * corresponding inputs. 2661 * 2662 * Supported tensor {@link OperandCode}: 2663 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2664 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2665 * 2666 * Supported tensor rank: 3, either time-major or batch-major. 2667 * 2668 * All input and output tensors must be of the same type. 2669 * 2670 * Inputs: 2671 * * 0: The input. 2672 * A 3-D tensor of shape: 2673 * If time-major: [max_time, batch_size, input_size] 2674 * If batch-major: [batch_size, max_time, input_size] 2675 * where "max_time" is the number of timesteps (sequence length), 2676 * "batch_size" corresponds to the batching dimension, and 2677 * "input_size" is the size of the input. 2678 * * 1: The forward input-to-input weights. Optional. 2679 * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units” 2680 * corresponds to the number of forward cell units. 2681 * * 2: The forward input-to-forget weights. 2682 * A 2-D tensor of shape [fw_num_units, input_size]. 2683 * * 3: The forward input-to-cell weights. 2684 * A 2-D tensor of shape [fw_num_units, input_size]. 2685 * * 4: The forward input-to-output weights. 2686 * A 2-D tensor of shape [fw_num_units, input_size]. 2687 * * 5: The forward recurrent-to-input weights. Optional. 2688 * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size” 2689 * corresponds to either the number of cell units (i.e., fw_num_units), 2690 * or the second dimension of the “fw_projection_weights”, if defined. 2691 * * 6: The forward recurrent-to-forget weights. 2692 * A 2-D tensor of shape [fw_num_units, fw_output_size]. 2693 * * 7: The forward recurrent-to-cell weights. 2694 * A 2-D tensor of shape [fw_num_units, fw_output_size]. 2695 * * 8: The forward recurrent-to-output weights. 2696 * A 2-D tensor of shape [fw_num_units, fw_output_size]. 2697 * * 9: The forward cell-to-input weights. Optional. 2698 * A 1-D tensor of shape [fw_num_units]. 2699 * * 10: The forward cell-to-forget weights. Optional. 2700 * A 1-D tensor of shape [fw_num_units]. 2701 * * 11: The forward cell-to-output weights. Optional. 2702 * A 1-D tensor of shape [fw_num_units]. 2703 * * 12: The forward input gate bias. Optional. 2704 * A 1-D tensor of shape [fw_num_units]. 2705 * * 13: The forward forget gate bias. 2706 * A 1-D tensor of shape [fw_num_units]. 2707 * * 14: The forward cell gate bias. 2708 * A 1-D tensor of shape [fw_num_units]. 2709 * * 15: The forward output gate bias. 2710 * A 1-D tensor of shape [fw_num_units]. 2711 * * 16: The forward projection weights. Optional. 2712 * A 2-D tensor of shape [fw_output_size, fw_num_units]. 2713 * * 17: The forward projection bias. Optional. 2714 * A 1-D tensor of shape [fw_output_size]. 2715 * * 18: The backward input-to-input weights. Optional. 2716 * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units” 2717 * corresponds to the number of backward cell units. 2718 * * 19: The backward input-to-forget weights. 2719 * A 2-D tensor of shape [bw_num_units, input_size]. 2720 * * 20: The backward input-to-cell weights. 2721 * A 2-D tensor of shape [bw_num_units, input_size]. 2722 * * 21: The backward input-to-output weights. 2723 * A 2-D tensor of shape [bw_num_units, input_size]. 2724 * * 22: The backward recurrent-to-input weights. Optional. 2725 * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size” 2726 * corresponds to either the number of cell units (i.e., “bw_num_units”), 2727 * or the second dimension of the “bw_projection_weights”, if defined. 2728 * * 23: The backward recurrent-to-forget weights. 2729 * A 2-D tensor of shape [bw_num_units, bw_output_size]. 2730 * * 24: The backward recurrent-to-cell weights. 2731 * A 2-D tensor of shape [bw_num_units, bw_output_size]. 2732 * * 25: The backward recurrent-to-output weights. 2733 * A 2-D tensor of shape [bw_num_units, bw_output_size]. 2734 * * 26: The backward cell-to-input weights. Optional. 2735 * A 1-D tensor of shape [bw_num_units]. 2736 * * 27: The backward cell-to-forget weights. Optional. 2737 * A 1-D tensor of shape [bw_num_units]. 2738 * * 28: The backward cell-to-output weights. Optional. 2739 * A 1-D tensor of shape [bw_num_units]. 2740 * * 29: The backward input gate bias. Optional. 2741 * A 1-D tensor of shape [bw_num_units]. 2742 * * 30: The backward forget gate bias. 2743 * A 1-D tensor of shape [bw_num_units]. 2744 * * 31: The backward cell gate bias. 2745 * A 1-D tensor of shape [bw_num_units]. 2746 * * 32: The backward output gate bias. 2747 * A 1-D tensor of shape [bw_num_units]. 2748 * * 33: The backward projection weights. Optional. 2749 * A 2-D tensor of shape [bw_output_size, bw_num_units]. 2750 * * 34: The backward projection bias. Optional. 2751 * A 1-D tensor of shape [bw_output_size]. 2752 * * 35: The forward input activation state. 2753 * A 2-D tensor of shape [batch_size, bw_output_size]. 2754 * * 36: The forward input cell state. 2755 * A 2-D tensor of shape [batch_size, bw_num_units]. 2756 * * 37: The backward input activation state. 2757 * A 2-D tensor of shape [batch_size, bw_output_size]. 2758 * * 38: The backward input cell state. 2759 * A 2-D tensor of shape [batch_size, bw_num_units]. 2760 * * 39: The auxiliary input. Optional. 2761 * A 3-D tensor of shape [max_time, batch_size, aux_input_size], 2762 * where “batch_size” corresponds to the batching dimension, and 2763 * “aux_input_size” is the size of the auxiliary input. Optional. See 2764 * the docs above for the usage modes explanation. 2765 * * 40: The forward auxiliary input-to-input weights. 2766 * Optional. See the docs above for the usage modes explanation. 2767 * A 2-D tensor of shape [fw_num_units, aux_input_size]. 2768 * * 41: The forward auxiliary input-to-forget weights. 2769 * Optional. See the docs above for the usage modes explanation. 2770 * A 2-D tensor of shape [fw_num_units, aux_input_size]. 2771 * * 42: The forward auxiliary input-to-cell weights. 2772 * Optional. See the docs above for the usage modes explanation. 2773 * A 2-D tensor of shape [fw_num_units, aux_input_size]. 2774 * * 43: The forward auxiliary input-to-output weights. 2775 * Optional. See the docs above for the usage modes explanation. 2776 * A 2-D tensor of shape [fw_num_units, aux_input_size]. 2777 * * 44: The backward auxiliary input-to-input weights. 2778 * Optional. See the docs above for the usage modes explanation. 2779 * A 2-D tensor of shape [bw_num_units, aux_input_size]. 2780 * * 45: The backward auxiliary input-to-forget weights. 2781 * Optional. See the docs above for the usage modes explanation. 2782 * A 2-D tensor of shape [bw_num_units, aux_input_size]. 2783 * * 46: The backward auxiliary input-to-cell weights. 2784 * Optional. See the docs above for the usage modes explanation. 2785 * A 2-D tensor of shape [bw_num_units, aux_input_size]. 2786 * * 47: The backward auxiliary input-to-output weights. 2787 * Optional. See the docs above for the usage modes explanation. 2788 * A 2-D tensor of shape [bw_num_units, aux_input_size]. 2789 * * 48: The activation function. 2790 * A value indicating the activation function: 2791 * <ul> 2792 * <li>0: None; 2793 * <li>1: Relu; 2794 * <li>3: Relu6; 2795 * <li>4: Tanh; 2796 * <li>6: Sigmoid. 2797 * </ul> 2798 * * 49: The clipping threshold for the cell state, such 2799 * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 2800 * then clipping is disabled. 2801 * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 2802 * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, 2803 * otherwise if all the input tensors have the type 2804 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be 2805 * of type {@link ANEURALNETWORKS_FLOAT16}. 2806 * * 50: The clipping threshold for the output from the 2807 * projection layer, such that values are bound within 2808 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 2809 * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 2810 * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, 2811 * otherwise if all the input tensors have the type 2812 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be 2813 * of type {@link ANEURALNETWORKS_FLOAT16}. 2814 * * 51: merge_outputs 2815 * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs 2816 * from forward and backward cells should be merged. 2817 * * 52: time_major 2818 * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format 2819 * of input and output tensors. 2820 * * 53: The forward input layer normalization weights. Optional. 2821 * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs 2822 * to activation at input gate. 2823 * * 54: The forward forget layer normalization weights. Optional. 2824 * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs 2825 * to activation at forget gate. 2826 * * 55: The forward cell layer normalization weights. Optional. 2827 * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs 2828 * to activation at cell gate. 2829 * * 56: The forward output layer normalization weights. Optional. 2830 * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs 2831 * to activation at output gate. 2832 * * 57: The backward input layer normalization weights. Optional. 2833 * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs 2834 * to activation at input gate. 2835 * * 58: The backward forget layer normalization weights. Optional. 2836 * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs 2837 * to activation at forget gate. 2838 * * 59: The backward cell layer normalization weights. Optional. 2839 * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs 2840 * to activation at cell gate. 2841 * * 60: The backward output layer normalization weights. Optional. 2842 * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs 2843 * to activation at output gate. 2844 * 2845 * Outputs: 2846 * * 0: The forward output. 2847 * A 3-D tensor of shape: 2848 * If time-major and not merge_outputs: 2849 * [max_time, batch_size, fw_output_size] 2850 * If time-major and merge_outputs: 2851 * [max_time, batch_size, fw_output_size + bw_output_size] 2852 * If batch-major and not merge_outputs: 2853 * [batch_size, max_time, fw_output_size] 2854 * If batch-major and merge_outputs: 2855 * [batch_size, max_time, fw_output_size + bw_output_size] 2856 * * 1: The backward output. Unused if merge_outputs is true. 2857 * A 3-D tensor of shape: 2858 * If time-major: [max_time, batch_size, bw_output_size] 2859 * If batch-major: [batch_size, max_time, bw_output_size] 2860 * * 2: The forward activation state output. 2861 * A 2-D tensor of shape [batch_size, fw_output_size] containing an 2862 * activation state from the last time step in the sequence. This 2863 * output is optional and can be omitted. If this output is present 2864 * then outputs 3-5 must be present as well. 2865 * Available since API level 30. 2866 * * 3: The forward cell state output. 2867 * A tensor of shape [batch_size, fw_cell_size] containing a cell state 2868 * from the last time step in the sequence. This output is optional 2869 * and can be omitted. If this output is present 2870 * then outputs 2, 4, 5 must be present as well. 2871 * Available since API level 30. 2872 * * 4: The backward activation state output. 2873 * A 2-D tensor of shape [batch_size, bw_output_size] containing an 2874 * activation state from the last time step in the sequence. This 2875 * output is optional and can be omitted. If this output is present 2876 * then outputs 2, 3, 5 must be present as well. 2877 * Available since API level 30. 2878 * * 5: The backward cell state output. 2879 * A tensor of shape [batch_size, bw_cell_size] containing a cell state 2880 * from the last time step in the sequence. This output is optional 2881 * and can be omitted. If this output is present 2882 * then outputs 2-4 must be present as well. 2883 * Available since API level 30. 2884 * 2885 * Available since API level 29. 2886 * 2887 * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI 2888 * does not maintain internal states. This operator does not support the usage pattern in which 2889 * multiple cells are chained and state tensors are propagated. 2890 */ 2891 ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42, 2892 2893 /** 2894 * A recurrent neural network layer that applies a basic RNN cell to a 2895 * sequence of inputs in forward and backward directions. 2896 * 2897 * This Op unrolls the input along the sequence dimension, and implements 2898 * the following operation for each element in the sequence s = 2899 * 1...sequence_length: 2900 * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ + 2901 * fw_state * fw_recurrent_weights’ + fw_bias) 2902 * 2903 * And for each element in sequence t = sequence_length : 1 2904 * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ + 2905 * bw_state * bw_recurrent_weights’ + bw_bias) 2906 * 2907 * Where: 2908 * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs; 2909 * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the 2910 * current “state” which itself is the output from the previous time step 2911 * computation; 2912 * * “{fw,bw}_bias” is a bias vector (added to each output vector in the 2913 * batch); 2914 * * “activation” is the function passed as the “fused_activation_function” 2915 * argument (if not “NONE”). 2916 * 2917 * The op supports cross-linking via an auxiliary input. Regular cell feeds 2918 * one input into the two RNN cells in the following way: 2919 * 2920 * INPUT (INPUT_REVERSED) 2921 * | | 2922 * --------------------- 2923 * | FW_RNN BW_RNN | 2924 * --------------------- 2925 * | | 2926 * FW_OUT BW_OUT 2927 * 2928 * An op with cross-linking takes two inputs and feeds them into the RNN 2929 * cells in the following way: 2930 * 2931 * AUX_INPUT (AUX_INPUT_REVERSED) 2932 * | | 2933 * INPUT | (INPUT_R'D.)| 2934 * | | | | 2935 * ----------------------- 2936 * | \ / \ / | 2937 * | FW_RNN BW_RNN | 2938 * ----------------------- 2939 * | | 2940 * FW_OUT BW_OUT 2941 * 2942 * The cross-linking mode is enabled iff auxiliary input and auxiliary 2943 * weights are present. While stacking this op on top of itself, this 2944 * allows to connect both forward and backward outputs from previous cell 2945 * to the next cell's input. 2946 * 2947 * Since API level 30 parallel linking mode is supported. The mode is 2948 * enabled if auxiliary input is present but auxiliary weights are omitted. 2949 * In this case, the cell feeds inputs into the RNN in the following way: 2950 * 2951 * INPUT (AUX_INPUT_REVERSED) 2952 * | | 2953 * --------------------- 2954 * | FW_RNN BW_RNN | 2955 * --------------------- 2956 * | | 2957 * FW_OUT BW_OUT 2958 * 2959 * While stacking this op on top of itself, this allows to connect both 2960 * forward and backward outputs from previous cell to the next cell's 2961 * corresponding inputs. 2962 * 2963 * Supported tensor {@link OperandCode}: 2964 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2965 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2966 * 2967 * The input tensors must all be the same type. 2968 * 2969 * Inputs: 2970 * * 0: input. 2971 * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If 2972 * it is set to true, then the input has a shape [maxTime, batchSize, 2973 * inputSize], otherwise the input has a shape [batchSize, maxTime, 2974 * inputSize]. 2975 * * 1: fwWeights. 2976 * A 2-D tensor of shape [fwNumUnits, inputSize]. 2977 * * 2: fwRecurrentWeights. 2978 * A 2-D tensor of shape [fwNumUnits, fwNumUnits]. 2979 * * 3: fwBias. 2980 * A 1-D tensor of shape [fwNumUnits]. 2981 * * 4: fwHiddenState. 2982 * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden 2983 * state input for the first time step of the computation. 2984 * * 5: bwWeights. 2985 * A 2-D tensor of shape [bwNumUnits, inputSize]. 2986 * * 6: bwRecurrentWeights. 2987 * A 2-D tensor of shape [bwNumUnits, bwNumUnits]. 2988 * * 7: bwBias. 2989 * A 1-D tensor of shape [bwNumUnits]. 2990 * * 8: bwHiddenState 2991 * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden 2992 * state input for the first time step of the computation. 2993 * * 9: auxInput. 2994 * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If 2995 * it is set to true, then the input has a shape [maxTime, batchSize, 2996 * auxInputSize], otherwise the input has a shape [batchSize, maxTime, 2997 * auxInputSize]. Can be omitted. See the docs above for the usage 2998 * modes explanation. 2999 * * 10:fwAuxWeights. 3000 * A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted. 3001 * See the docs above for the usage modes explanation. 3002 * * 11:bwAuxWeights. 3003 * A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted. 3004 * See the docs above for the usage modes explanation. 3005 * * 12:fusedActivationFunction. 3006 * A {@link FuseCode} value indicating the activation function. If 3007 * “NONE” is specified then it results in a linear activation. 3008 * * 13:timeMajor 3009 * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format 3010 * of input and output tensors. 3011 * * 14:mergeOutputs 3012 * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs 3013 * from forward and backward cells are separate (if set to false) or 3014 * concatenated (if set to true). 3015 * Outputs: 3016 * * 0: fwOutput. 3017 * A 3-D tensor. The first two dimensions of the shape are defined by 3018 * the input 6 (timeMajor) and the third dimension is defined by the 3019 * input 14 (mergeOutputs). If timeMajor is set to true, then the first 3020 * two dimensions are [maxTime, batchSize], otherwise they are set to 3021 * [batchSize, maxTime]. If mergeOutputs is set to true, then the third 3022 * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set 3023 * to fwNumUnits. 3024 * * 1: bwOutput. 3025 * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then 3026 * this tensor is not produced. The shape is defined by the input 6 3027 * (timeMajor). If it is set to true, then the shape is set to 3028 * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to 3029 * [batchSize, maxTime, bwNumUnits]. 3030 * * 2: The forward hidden state output. 3031 * A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden 3032 * state from the last time step in the sequence. This output is 3033 * optional and can be omitted. If this output is present then output 3034 * 3 must be present as well. 3035 * Available since API level 30. 3036 * * 3: The backward hidden state output. 3037 * A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden 3038 * state from the last time step in the sequence. This output is 3039 * optional and can be omitted. If this output is present then output 3040 * 2 must be present as well. 3041 * Available since API level 30. 3042 * 3043 * Available since API level 29. 3044 * 3045 * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI 3046 * does not maintain internal states. This operator does not support the usage pattern in which 3047 * multiple cells are chained and state tensors are propagated. 3048 */ 3049 ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43, 3050 3051 /** 3052 * Greedily selects a subset of bounding boxes in descending order of score. 3053 * 3054 * This op applies NMS algorithm to each class. In each loop of execution, 3055 * the box with maximum score gets selected and removed from the pending set. 3056 * The scores of the rest of boxes are lowered according to the 3057 * intersection-over-union (IOU) overlapping with the previously selected 3058 * boxes and a specified NMS kernel method. Any boxes with score less 3059 * than a threshold are removed from the pending set. 3060 * 3061 * Three NMS kernels are supported: 3062 * * Hard: score_new = score_old * (1 if IoU < threshold else 0) 3063 * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU) 3064 * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma) 3065 * 3066 * Axis-aligned bounding boxes are represented by its upper-left corner 3067 * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid 3068 * bounding box should satisfy x1 <= x2 and y1 <= y2. 3069 * 3070 * Supported tensor {@link OperandCode}: 3071 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3072 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3073 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3074 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3075 * 3076 * Inputs: 3077 * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score 3078 * of each bounding box proposal. The boxes are grouped by batches in the 3079 * first dimension. Zero num_rois is supported for this tensor. 3080 * * 1: A 2-D Tensor specifying the bounding boxes of shape 3081 * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2]. 3082 * The boxes are grouped by batches in the first dimension. The sequential 3083 * order of the boxes corresponds with input0. For input0 of type 3084 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of 3085 * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and 3086 * scale of 0.125. 3087 * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 3088 * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, 3089 * with zeroPoint of -128 and scale of 0.125. 3090 * Zero num_rois is supported for this tensor. 3091 * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 3092 * [num_rois], specifying the batch index of each box. Boxes with 3093 * the same batch index are grouped together. 3094 * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes 3095 * with scores lower than the threshold are filtered before sending 3096 * to the NMS algorithm. 3097 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum 3098 * number of selected bounding boxes for each image. Set to a negative 3099 * value for unlimited number of output bounding boxes. 3100 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS 3101 * kernel method, options are 0:hard, 1:linear, 2:gaussian. 3102 * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU 3103 * threshold in hard and linear NMS kernel. This field is ignored if 3104 * gaussian kernel is selected. 3105 * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in 3106 * gaussian NMS kernel. This field is ignored if gaussian kernel is 3107 * not selected. 3108 * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold. 3109 * Boxes with scores lower than the threshold are dropped during the 3110 * score updating phase in soft NMS. 3111 * 3112 * Outputs: 3113 * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape 3114 * [num_output_rois], specifying the score of each output box. The boxes 3115 * are grouped by batches, but the sequential order in each batch is not 3116 * guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 3117 * guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3118 * or {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 3119 * the scale and zero point must be the same as input0. 3120 * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape 3121 * [num_output_rois, 4], specifying the coordinates of each 3122 * output bounding box with the same format as input1. The sequential 3123 * order of the boxes corresponds with output0. For type of 3124 * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be 3125 * 0.125 and the zero point must be 0. 3126 * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 3127 * [num_output_rois], specifying the class of each output box. The 3128 * sequential order of the boxes corresponds with output0. 3129 * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 3130 * [num_output_rois], specifying the batch index of each box. Boxes 3131 * with the same batch index are grouped together. 3132 * 3133 * Available since API level 29. 3134 */ 3135 ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44, 3136 3137 /** 3138 * Casts a tensor to a type. 3139 * 3140 * This operation ignores the scale and zeroPoint of quanized tensors, 3141 * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input 3142 * as a tensor of uint8 values. 3143 * 3144 * Supported tensor {@link OperandCode}: 3145 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3146 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3147 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3148 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3149 * Since API level 30, casting tensors of the following 3150 * {@link OperandCode} to the same {@link OperandCode} is supported: 3151 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3152 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3153 * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} 3154 * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 3155 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 3156 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 3157 * 3158 * Supported tensor rank: from 1 3159 * 3160 * Inputs: 3161 * * 0: A tensor. 3162 * 3163 * Outputs: 3164 * * 0: A tensor with the same shape as input0. 3165 * 3166 * Available since API level 29. 3167 */ 3168 ANEURALNETWORKS_CAST = 45, 3169 3170 /** 3171 * Shuffle the channels of the input tensor. 3172 * 3173 * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE 3174 * divide the channel dimension into num_groups groups, and reorganize the 3175 * channels by grouping channels with the same index in each group. 3176 * 3177 * Along the channel dimension, the output is calculated using this formula: 3178 * 3179 * output_channel[k * num_groups + g] = input_channel[g * group_size + k] 3180 * 3181 * where group_size = num_channels / num_groups 3182 * 3183 * The number of channels must be divisible by num_groups. 3184 * 3185 * Supported tensor {@link OperandCode}: 3186 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3187 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3188 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3189 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3190 * 3191 * Supported tensor rank: up to 4 3192 * 3193 * Inputs: 3194 * * 0: An n-D tensor, specifying the tensor to be shuffled. 3195 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 3196 * groups. 3197 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension 3198 * channel shuffle would be performed on. Negative index is used to 3199 * specify axis from the end (e.g. -1 for the last axis). Must be in 3200 * the range [-n, n). 3201 * 3202 * Outputs: 3203 * * 0: A tensor of the same {@link OperandCode} and same shape as input0. 3204 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 3205 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 3206 * the scale and zeroPoint must be the same as input0. 3207 * 3208 * Available since API level 29. 3209 */ 3210 ANEURALNETWORKS_CHANNEL_SHUFFLE = 46, 3211 3212 /** 3213 * Apply postprocessing steps to bounding box detections. 3214 * 3215 * Bounding box detections are generated by applying transformation on a set 3216 * of predefined anchors with the bounding box deltas from bounding box 3217 * regression. A final step of hard NMS is applied to limit the number of 3218 * returned boxes. 3219 * 3220 * Supported tensor {@link OperandCode}: 3221 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3222 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3223 * 3224 * Inputs: 3225 * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying 3226 * the score of each anchor with each class. Class 0 for each 3227 * [batches, num_anchors, 0] is background and will be ignored. 3228 * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with 3229 * the first four values in length_box_encoding specifying the bounding 3230 * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw], 3231 * where dy and dx is the linear-scale relative correction factor for the 3232 * center position of the bounding box with respect to the width and height, 3233 * dh and dw is the log-scale relative correction factor for the width and 3234 * height. All the entries in length_box_encoding beyond the first four 3235 * values are ignored in this operation. 3236 * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each 3237 * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and 3238 * ctr_x are the center position of the box, and h and w are the height 3239 * and the width. 3240 * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling 3241 * factor for dy in bounding box deltas. 3242 * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling 3243 * factor for dx in bounding box deltas. 3244 * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling 3245 * factor for dh in bounding box deltas. 3246 * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling 3247 * factor for dw in bounding box deltas. 3248 * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular 3249 * multi-class NMS algorithm that do NMS separately for each class, 3250 * set to false for a faster algorithm that only do one single NMS 3251 * using the highest class score.. 3252 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying 3253 * the maximum number of boxes for the output. Boxes with the lowest 3254 * scores are discarded to meet the limit. 3255 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is 3256 * set to false, specifying the maximum number of classes per detection. 3257 * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is 3258 * set to true, specifying the maximum number of detections when 3259 * applying NMS algorithm for each single class. 3260 * * 11: A scalar, score_threshold. Boxes with scores lower than the 3261 * threshold are filtered before sending to the NMS algorithm. The 3262 * scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of 3263 * {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 3264 * {@link ANEURALNETWORKS_FLOAT32} if input0 is of 3265 * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. 3266 * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar 3267 * must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of 3268 * {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 3269 * {@link ANEURALNETWORKS_FLOAT32} if input0 is of 3270 * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. 3271 * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include 3272 * background class in the list of label map for the output, set 3273 * to false to not include the background. When the background 3274 * class is included, it has label 0 and the output classes start 3275 * at 1 in the label map, otherwise, the output classes start at 0. 3276 * 3277 * Outputs: 3278 * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape 3279 * [batches, max_num_detections], specifying the score of each output 3280 * detections. 3281 * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the 3282 * coordinates of each output bounding box, with format 3283 * [y1, x1, y2, x2]. 3284 * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 3285 * [batches, max_num_detections], specifying the class label for each 3286 * output detection. 3287 * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches], 3288 * specifying the number of valid output detections for each batch. 3289 * 3290 * Available since API level 29. 3291 */ 3292 ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47, 3293 3294 /** 3295 * For input tensors x and y, computes x == y elementwise. 3296 * 3297 * Supported tensor {@link OperandCode}: 3298 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3299 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3300 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3301 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3302 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3303 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3304 * 3305 * Supported tensor rank: from 1 3306 * 3307 * This operation supports broadcasting. 3308 * 3309 * Inputs: 3310 * * 0: A tensor. 3311 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3312 * with input0. 3313 * 3314 * Outputs: 3315 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3316 * 3317 * Available since API level 29. 3318 */ 3319 ANEURALNETWORKS_EQUAL = 48, 3320 3321 /** 3322 * Computes exponential of x element-wise. 3323 * 3324 * Supported tensor {@link OperandCode}: 3325 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3326 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3327 * 3328 * Supported tensor rank: from 1. 3329 * 3330 * Inputs: 3331 * * 0: A tensor. 3332 * 3333 * Outputs: 3334 * * 0: The output tensor of same shape as input0. 3335 * 3336 * Available since API level 29. 3337 */ 3338 ANEURALNETWORKS_EXP = 49, 3339 3340 /** 3341 * Inserts a dimension of 1 into a tensor's shape. 3342 * 3343 * Given a tensor input, this operation inserts a dimension of 1 at the 3344 * given dimension index of input's shape. The dimension index starts at 3345 * zero; if you specify a negative dimension index, it is counted backward 3346 * from the end. 3347 * 3348 * Supported tensor {@link OperandCode}: 3349 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3350 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3351 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3352 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3353 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3354 * 3355 * Supported tensor rank: from 1 3356 * 3357 * Inputs: 3358 * * 0: An n-D tensor. 3359 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension 3360 * index to expand. Must be in the range [-(n + 1), (n + 1)). 3361 * 3362 * Outputs: 3363 * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as 3364 * input0. 3365 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 3366 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 3367 * the scale and zeroPoint must be the same as input0. 3368 * 3369 * Available since API level 29. 3370 */ 3371 ANEURALNETWORKS_EXPAND_DIMS = 50, 3372 3373 /** 3374 * Gathers values along an axis. 3375 * 3376 * Produces an output tensor with shape 3377 * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:] 3378 * where: 3379 * # Vector indices (output is rank(input0)). 3380 * output[a_0, ..., a_n, i, b_0, ..., b_n] = 3381 * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n] 3382 * 3383 * # Higher rank indices (output is rank(input0) + rank(indices) - 1). 3384 * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = 3385 * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] 3386 * 3387 * Supported tensor {@link OperandCode}: 3388 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3389 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3390 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3391 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3392 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3393 * 3394 * Supported tensor rank: from 1 3395 * 3396 * Inputs: 3397 * * 0: An n-D tensor from which to gather values. 3398 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis. 3399 * Negative index is used to specify axis from the end 3400 * (e.g. -1 for the last axis). Must be in the range [-n, n). 3401 * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices. 3402 * The values must be in the bounds of the corresponding dimensions 3403 * of input0. 3404 * 3405 * Outputs: 3406 * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0. 3407 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 3408 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 3409 * the scale and zeroPoint must be the same as input0. 3410 * 3411 * Available since API level 29. 3412 */ 3413 ANEURALNETWORKS_GATHER = 51, 3414 3415 /** 3416 * Generate aixs-aligned bounding box proposals. 3417 * 3418 * Bounding box proposals are generated by applying transformation on a set 3419 * of predefined anchors with the bounding box deltas from bounding box 3420 * regression. A final step of hard NMS is applied to limit the number of 3421 * returned boxes. 3422 * 3423 * Axis-aligned bounding boxes are represented by its upper-left corner 3424 * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid 3425 * bounding box should satisfy x1 <= x2 and y1 <= y2. 3426 * 3427 * Supported tensor {@link OperandCode}: 3428 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3429 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3430 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3431 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3432 * 3433 * Inputs: 3434 * * 0: A 4-D Tensor specifying the score of each anchor at each 3435 * location. With "NHWC" data layout, the tensor shape is 3436 * [batches, height, width, num_anchors]. With "NCHW" data layout, 3437 * the tensor shape is [batches, num_anchors, height, width]. 3438 * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data 3439 * layout, the tensor shape is [batches, height, width, num_anchors * 4]. 3440 * With "NCHW" data layout, the tensor shape is 3441 * [batches, num_anchors * 4, height, width]. The box deltas are encoded 3442 * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale 3443 * relative correction factor for the center position of the bounding box 3444 * with respect to the width and height, dw and dh is the log-scale 3445 * relative correction factor for the width and height. The last 3446 * dimensions is the channel dimension. 3447 * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each 3448 * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type 3449 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or 3450 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of 3451 * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125. 3452 * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of 3453 * each image in the batch, with format [image_height, image_width]. 3454 * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or 3455 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this 3456 * tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with 3457 * scale of 0.125. 3458 * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 3459 * from the height of original image to the height of feature map. 3460 * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 3461 * from the width of original image to the width of feature map. 3462 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum 3463 * number of boxes before going into the hard NMS algorithm. Boxes 3464 * with the lowest scores are discarded to meet the limit. Set to 3465 * a non-positive value for unlimited number. 3466 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum 3467 * number of boxes returning from the hard NMS algorithm. Boxes 3468 * with the lowest scores are discarded to meet the limit. Set to 3469 * a non-positive value for unlimited number. 3470 * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU 3471 * threshold for hard NMS. 3472 * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with 3473 * height or width lower than the absolute threshold are filtered out. 3474 * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3475 * NCHW data layout for input0 and input1. Set to false for NHWC. 3476 * 3477 * Outputs: 3478 * * 0: A tensor of the same {@link OperandCode} as input0, of shape 3479 * [num_output_rois], specifying the score of each output box. 3480 * The boxes are grouped by batches, but the sequential order in 3481 * each batch is not guaranteed. For type of 3482 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or 3483 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero 3484 * point must be the same as input0. 3485 * * 1: A tensor of the same {@link OperandCode} as input3, of shape 3486 * [num_output_rois, 4], specifying the coordinates of each output 3487 * bounding box for each class, with format [x1, y1, x2, y2]. 3488 * The sequential order of the boxes corresponds with output0. 3489 * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the 3490 * scale must be 0.125 and the zero point must be 0. 3491 * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 3492 * [num_output_rois], specifying the batch index of each box. Boxes 3493 * with the same batch index are grouped together. 3494 * 3495 * Available since API level 29. 3496 */ 3497 ANEURALNETWORKS_GENERATE_PROPOSALS = 52, 3498 3499 /** 3500 * For input tensors x and y, computes x > y elementwise. 3501 * 3502 * Supported tensor {@link OperandCode}: 3503 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3504 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3505 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3506 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3507 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3508 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3509 * 3510 * Supported tensor rank: from 1 3511 * 3512 * This operation supports broadcasting. 3513 * 3514 * Inputs: 3515 * * 0: A tensor. 3516 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3517 * with input0. 3518 * 3519 * Outputs: 3520 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3521 * 3522 * Available since API level 29. 3523 */ 3524 ANEURALNETWORKS_GREATER = 53, 3525 /** 3526 * For input tensors x and y, computes x >= y elementwise. 3527 * 3528 * Supported tensor {@link OperandCode}: 3529 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3530 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3531 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3532 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3533 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3534 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3535 * 3536 * Supported tensor rank: from 1 3537 * 3538 * This operation supports broadcasting. 3539 * 3540 * Inputs: 3541 * * 0: A tensor. 3542 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3543 * with input0. 3544 * 3545 * Outputs: 3546 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3547 * 3548 * Available since API level 29. 3549 */ 3550 ANEURALNETWORKS_GREATER_EQUAL = 54, 3551 3552 /** 3553 * Performs a grouped 2-D convolution operation. 3554 * 3555 * Given an input tensor of shape [batches, height, width, depth_in] and a 3556 * filter tensor of shape [depth_out, filter_height, filter_width, depth_group] 3557 * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV 3558 * applies a group of different filters to each input channel group, then 3559 * concatenates the results together. 3560 * 3561 * Specifically, the input channels are divided into num_groups groups, each with 3562 * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional 3563 * filters are also divided into num_groups groups, i.e. depth_out is divisible 3564 * by num_groups. GROUPED_CONV applies each group of filters to the corresponding 3565 * input channel group, and the result are concatenated together. 3566 * 3567 * The output dimensions are functions of the filter dimensions, stride, and 3568 * padding. 3569 * 3570 * The values in the output tensor are computed as: 3571 * 3572 * output[b, i, j, g * channel_multiplier + q] = 3573 * sum_{di, dj, dk} ( 3574 * input[b, strides[1] * i + di, strides[2] * j + dj, 3575 * g * depth_group + dk] * 3576 * filter[g * channel_multiplier + q, di, dj, dk] 3577 * ) + bias[channel] 3578 * 3579 * where channel_multiplier = depth_out / num_groups 3580 * 3581 * Supported tensor {@link OperandCode} configurations: 3582 * * 16 bit floating point: 3583 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. 3584 * 3585 * * 32 bit floating point: 3586 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. 3587 * 3588 * * Quantized: 3589 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. 3590 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 3591 * * * input.scale * filter.scale). 3592 * 3593 * * Quantized signed (since API level 30): 3594 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. 3595 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 3596 * * * input.scale * filter.scale). 3597 * 3598 * * Quantized with symmetric per channel quantization for the filter: 3599 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. 3600 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 3601 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 3602 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 3603 * 3604 * * Quantized signed with filter symmetric per channel quantization (since API level 30): 3605 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. 3606 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 3607 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 3608 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 3609 * 3610 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 3611 * With the default data layout NHWC, the data is stored in the order of: 3612 * [batch, height, width, channels]. Alternatively, the data layout could 3613 * be NCHW, the data storage order of: [batch, channels, height, width]. 3614 * 3615 * Both explicit padding and implicit padding are supported. 3616 * 3617 * Inputs (explicit padding): 3618 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 3619 * specifying the input, where depth_in = num_groups * depth_group. 3620 * * 1: A 4-D tensor, of shape 3621 * [depth_out, filter_height, filter_width, depth_group], specifying 3622 * the filter, where depth_out must be divisible by num_groups. For 3623 * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} 3624 * the channel dimension (channelDim at 3625 * {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0. 3626 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 3627 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 3628 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type. 3629 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 3630 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 3631 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 3632 * of 0 and bias_scale == input_scale * filter_scale. For filter tensor 3633 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 3634 * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 3635 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to 3636 * bias_scale[i] = input_scale * filter_scale[i]. 3637 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 3638 * the left, in the ‘width’ dimension. 3639 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 3640 * the right, in the ‘width’ dimension. 3641 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 3642 * the top, in the ‘height’ dimension. 3643 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 3644 * the bottom, in the ‘height’ dimension. 3645 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 3646 * walking through input in the ‘width’ dimension. 3647 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 3648 * walking through input in the ‘height’ dimension. 3649 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 3650 * groups. 3651 * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 3652 * {@link FuseCode} values. Specifies the activation to 3653 * invoke on the result. 3654 * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3655 * NCHW data layout for input0 and output0. Set to false for NHWC. 3656 * 3657 * Inputs (implicit padding): 3658 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 3659 * specifying the input, where depth_in = num_groups * depth_group. 3660 * * 1: A 4-D tensor, of shape 3661 * [depth_out, filter_height, filter_width, depth_group], specifying 3662 * the filter, where depth_out must be divisible by num_groups. For 3663 * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} 3664 * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) 3665 * must be set to 0. 3666 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 3667 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 3668 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same 3669 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type. 3670 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 3671 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 3672 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 3673 * of 0 and bias_scale == input_scale * filter_scale. For filter tensor 3674 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 3675 * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 3676 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to 3677 * bias_scale[i] = input_scale * filter_scale[i]. 3678 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 3679 * padding scheme, has to be one of the 3680 * {@link PaddingCode} values. 3681 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 3682 * walking through input in the ‘width’ dimension. 3683 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 3684 * walking through input in the ‘height’ dimension. 3685 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 3686 * groups. 3687 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 3688 * {@link FuseCode} values. Specifies the activation to 3689 * invoke on the result. 3690 * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3691 * NCHW data layout for input0 and output0. Set to false for NHWC. 3692 * 3693 * Outputs: 3694 * * 0: The output 4-D tensor, of shape 3695 * [batches, out_height, out_width, depth_out]. 3696 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 3697 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 3698 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 3699 * 3700 * Available since API level 29. 3701 */ 3702 ANEURALNETWORKS_GROUPED_CONV_2D = 55, 3703 3704 /** 3705 * Localize the maximum keypoints from heatmaps. 3706 * 3707 * This operation approximates the accurate maximum keypoint scores and 3708 * indices after bicubic upscaling by using Taylor expansion up to the 3709 * quadratic term. 3710 * 3711 * The bounding box is represented by its upper-left corner coordinate 3712 * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. 3713 * A valid bounding box should satisfy x1 <= x2 and y1 <= y2. 3714 * 3715 * Supported tensor {@link OperandCode}: 3716 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3717 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3718 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3719 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3720 * 3721 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 3722 * With the default data layout NHWC, the data is stored in the order of: 3723 * [batch, height, width, channels]. Alternatively, the data layout could 3724 * be NCHW, the data storage order of: [batch, channels, height, width]. 3725 * 3726 * Inputs: 3727 * * 0: A 4-D Tensor of shape 3728 * [num_boxes, heatmap_size, heatmap_size, num_keypoints], 3729 * specifying the heatmaps, the height and width of heatmaps should 3730 * be the same, and must be greater than or equal to 2. 3731 * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes, 3732 * each with format [x1, y1, x2, y2]. For input0 of type 3733 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should 3734 * be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint 3735 * of 0 and scale of 0.125. 3736 * For input0 of type 3737 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor 3738 * should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with 3739 * zeroPoint of -128 and scale of 0.125. 3740 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3741 * NCHW data layout for input0. Set to false for NHWC. 3742 * 3743 * Outputs: 3744 * * 0: A tensor of the same {@link OperandCode} as input0, with shape 3745 * [num_boxes, num_keypoints], specifying score of the keypoints. 3746 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or 3747 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 3748 * the scale and zeroPoint can be different from input0 scale and zeroPoint. 3749 * * 1: A tensor of the same {@link OperandCode} as input1, with shape 3750 * [num_boxes, num_keypoints, 2], specifying the location of 3751 * the keypoints, the second dimension is organized as 3752 * [keypoint_x, keypoint_y]. 3753 * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the 3754 * scale must be 0.125 and the zero point must be 0. 3755 * 3756 * Available since API level 29. 3757 */ 3758 ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56, 3759 3760 /** 3761 * Applies instance normalization to the input tensor. 3762 * 3763 * The values in the output tensor are computed as: 3764 * 3765 * output[b, h, w, c] = 3766 * (input[b, h, w, c] - mean[b, c]) * gamma / 3767 * sqrt(var[b, c] + epsilon) + beta 3768 * 3769 * Where the mean and variance are computed across the spatial dimensions: 3770 * 3771 * mean[b, c] = 3772 * sum_{h, w}(input[b, h, w, c]) / sum(1) 3773 * 3774 * var[b, c] = 3775 * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1) 3776 * 3777 * Supported tensor {@link OperandCode}: 3778 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3779 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3780 * 3781 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 3782 * With the default data layout NHWC, the data is stored in the order of: 3783 * [batch, height, width, channels]. Alternatively, the data layout could 3784 * be NCHW, the data storage order of: [batch, channels, height, width]. 3785 * 3786 * Inputs: 3787 * * 0: An n-D tensor, specifying the tensor to be normalized. 3788 * * 1: A scalar, specifying gamma, the scale applied to the normalized 3789 * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if 3790 * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 3791 * {@link ANEURALNETWORKS_FLOAT32} if input0 is of 3792 * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. 3793 * * 2: A scalar, specifying beta, the offset applied to the normalized 3794 * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if 3795 * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 3796 * {@link ANEURALNETWORKS_FLOAT32} if input0 is of 3797 * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. 3798 * * 3: A scalar, specifying epsilon, the small value added to variance to 3799 * avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if 3800 * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 3801 * {@link ANEURALNETWORKS_FLOAT32} if input0 is of 3802 * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. 3803 * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3804 * NCHW data layout for input0 and output0. Set to false for NHWC. 3805 * 3806 * Outputs: 3807 * * 0: A tensor of the same {@link OperandCode} and same shape as input0. 3808 * 3809 * Available since API level 29. 3810 */ 3811 ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57, 3812 3813 /** 3814 * For input tensors x and y, computes x < y elementwise. 3815 * 3816 * Supported tensor {@link OperandCode}: 3817 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3818 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3819 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3820 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3821 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3822 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3823 * 3824 * Supported tensor rank: from 1 3825 * 3826 * This operation supports broadcasting. 3827 * 3828 * Inputs: 3829 * * 0: A tensor. 3830 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3831 * with input0. 3832 * 3833 * Outputs: 3834 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3835 * 3836 * Available since API level 29. 3837 */ 3838 ANEURALNETWORKS_LESS = 58, 3839 3840 /** 3841 * For input tensors x and y, computes x <= y elementwise. 3842 * 3843 * Supported tensor {@link OperandCode}: 3844 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3845 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3846 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3847 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3848 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3849 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3850 * 3851 * Supported tensor rank: from 1 3852 * 3853 * This operation supports broadcasting. 3854 * 3855 * Inputs: 3856 * * 0: A tensor. 3857 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3858 * with input0. 3859 * 3860 * Outputs: 3861 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3862 * 3863 * Available since API level 29. 3864 */ 3865 ANEURALNETWORKS_LESS_EQUAL = 59, 3866 3867 /** 3868 * Computes natural logarithm of x element-wise. 3869 * 3870 * Supported tensor {@link OperandCode}: 3871 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3872 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3873 * 3874 * Supported tensor rank: from 1. 3875 * 3876 * Inputs: 3877 * * 0: A tensor. 3878 * 3879 * Outputs: 3880 * * 0: The output tensor of same shape as input0. 3881 * 3882 * Available since API level 29. 3883 */ 3884 ANEURALNETWORKS_LOG = 60, 3885 3886 /** 3887 * Returns the truth value of x AND y element-wise. 3888 * 3889 * Supported tensor {@link OperandCode}: 3890 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3891 * 3892 * Supported tensor rank: from 1 3893 * 3894 * This operation supports broadcasting. 3895 * 3896 * Inputs: 3897 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3898 * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions 3899 * compatible with input0. 3900 * 3901 * Outputs: 3902 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3903 * 3904 * Available since API level 29. 3905 */ 3906 ANEURALNETWORKS_LOGICAL_AND = 61, 3907 3908 /** 3909 * Computes the truth value of NOT x element-wise. 3910 * 3911 * Supported tensor {@link OperandCode}: 3912 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3913 * 3914 * Supported tensor rank: from 1. 3915 * 3916 * Inputs: 3917 * * 0: A tensor. 3918 * 3919 * Outputs: 3920 * * 0: The output tensor of same shape as input0. 3921 * 3922 * Available since API level 29. 3923 */ 3924 ANEURALNETWORKS_LOGICAL_NOT = 62, 3925 3926 /** 3927 * Returns the truth value of x OR y element-wise. 3928 * 3929 * Supported tensor {@link OperandCode}: 3930 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3931 * 3932 * Supported tensor rank: from 1 3933 * 3934 * This operation supports broadcasting. 3935 * 3936 * Inputs: 3937 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3938 * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions 3939 * compatible with input0. 3940 * 3941 * Outputs: 3942 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3943 * 3944 * Available since API level 29. 3945 */ 3946 ANEURALNETWORKS_LOGICAL_OR = 63, 3947 3948 /** 3949 * Computes the log softmax activations given logits. 3950 * 3951 * The output is calculated using this formula: 3952 * 3953 * output = logits * beta - log(reduce_sum(exp(logits * beta), axis)) 3954 * 3955 * Supported tensor {@link OperandCode}: 3956 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3957 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3958 * 3959 * Supported tensor rank: from 1. 3960 * 3961 * Inputs: 3962 * * 0: A tensor specifying the input logits. 3963 * * 1: A scalar, specifying the positive scaling factor for the exponent, 3964 * beta. 3965 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta 3966 * value must be of {@link ANEURALNETWORKS_FLOAT16}. 3967 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta 3968 * value must be of {@link ANEURALNETWORKS_FLOAT32}. 3969 * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to 3970 * reduce across. Negative index is used to specify axis from the 3971 * end (e.g. -1 for the last axis). Must be in the range [-n, n). 3972 * 3973 * Outputs: 3974 * * 0: The output tensor of the same {@link OperandCode} and shape as 3975 * input0. 3976 * 3977 * Available since API level 29. 3978 */ 3979 ANEURALNETWORKS_LOG_SOFTMAX = 64, 3980 3981 /** 3982 * Returns the element-wise maximum of two tensors. 3983 * 3984 * Supported tensor {@link OperandCode}: 3985 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3986 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3987 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3988 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3989 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 3990 * 3991 * Supported tensor rank: from 1. 3992 * 3993 * Inputs: 3994 * * 0: A tensor. 3995 * * 1: A tensor of the same {@link OperandCode} and compatible dimensions 3996 * with input0. 3997 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3998 * the scales and zeroPoint can be different from input0 scale and zeroPoint. 3999 * 4000 * Outputs: 4001 * * 0: A tensor of the same {@link OperandCode} as input0. 4002 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4003 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4004 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 4005 * 4006 * Available since API level 29. 4007 */ 4008 ANEURALNETWORKS_MAXIMUM = 65, 4009 4010 /** 4011 * Returns the element-wise minimum of two tensors. 4012 * 4013 * Supported tensor {@link OperandCode}: 4014 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4015 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4016 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4017 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4018 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4019 * 4020 * Supported tensor rank: from 1. 4021 * 4022 * Inputs: 4023 * * 0: A tensor. 4024 * * 1: A tensor of the same {@link OperandCode} and compatible dimensions 4025 * with input0. 4026 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4027 * the scales and zeroPoint can be different from input0 scale and zeroPoint. 4028 * 4029 * Outputs: 4030 * * 0: A tensor of the same {@link OperandCode} as input0. 4031 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4032 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4033 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 4034 * 4035 * Available since API level 29. 4036 */ 4037 ANEURALNETWORKS_MINIMUM = 66, 4038 4039 /** 4040 * Computes numerical negative value element-wise. 4041 * 4042 * Supported tensor {@link OperandCode}: 4043 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4044 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4045 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4046 * 4047 * Supported tensor rank: from 1. 4048 * 4049 * Inputs: 4050 * * 0: A tensor. 4051 * 4052 * Outputs: 4053 * * 0: The output tensor of same shape as input0. 4054 * 4055 * Available since API level 29. 4056 */ 4057 ANEURALNETWORKS_NEG = 67, 4058 4059 /** 4060 * For input tensors x and y, computes x != y elementwise. 4061 * 4062 * Supported tensor {@link OperandCode}: 4063 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 4064 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4065 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4066 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4067 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4068 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4069 * 4070 * Supported tensor rank: from 1 4071 * 4072 * This operation supports broadcasting. 4073 * 4074 * Inputs: 4075 * * 0: A tensor. 4076 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 4077 * with input0. 4078 * 4079 * Outputs: 4080 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 4081 * 4082 * Available since API level 29. 4083 */ 4084 ANEURALNETWORKS_NOT_EQUAL = 68, 4085 4086 /** 4087 * Pads a tensor with the given constant value according to the specified 4088 * paddings. 4089 * 4090 * Supported tensor {@link OperandCode}: 4091 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4092 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4093 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4094 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4095 * 4096 * Supported tensor rank: up to 4 4097 * 4098 * Inputs: 4099 * * 0: An n-D tensor, specifying the tensor to be padded. 4100 * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings 4101 * for each spatial dimension of the input tensor. The shape of the 4102 * tensor must be {rank(input0), 2}. 4103 * padding[i, 0] specifies the number of elements to be padded in the 4104 * front of dimension i. 4105 * padding[i, 1] specifies the number of elements to be padded after 4106 * the end of dimension i. 4107 * * 2: An scalar specifying the value to use for padding input0. 4108 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the 4109 * pad value must be of {@link ANEURALNETWORKS_FLOAT16}. 4110 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the 4111 * pad value must be of {@link ANEURALNETWORKS_FLOAT32}. 4112 * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4113 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 4114 * the pad value must be of {@link ANEURALNETWORKS_INT32}. The 4115 * scale and zeroPoint are assumed to be the same as in input0. 4116 * 4117 * Outputs: 4118 * * 0: A tensor of the same {@link OperandCode} as input0. The 4119 * output tensor has the same rank as input0, and each 4120 * dimension of the output tensor has the same size as the 4121 * corresponding dimension of the input tensor plus the size 4122 * of the padding: 4123 * output0.dimension[i] = 4124 * padding[i, 0] + input0.dimension[i] + padding[i, 1] 4125 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4126 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4127 * the scale and zeroPoint must be the same as input0. 4128 * 4129 * Available since API level 29. 4130 */ 4131 ANEURALNETWORKS_PAD_V2 = 69, 4132 4133 /** 4134 * Computes the power of one value to another. 4135 * 4136 * Given a tensor base and a tensor exponent, this operation computes 4137 * base^exponent elementwise. 4138 * 4139 * This operations supports broadcasting. The size of the output is the 4140 * maximum size along each dimension of the input operands. It starts with 4141 * the trailing dimensions, and works its way forward. 4142 * 4143 * For example: 4144 * base.dimension = {4, 1, 2} 4145 * exponent.dimension = {5, 4, 3, 1} 4146 * output.dimension = {5, 4, 3, 2} 4147 * 4148 * Supported tensor {@link OperandCode}: 4149 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4150 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4151 * 4152 * Supported tensor rank: from 1 4153 * 4154 * Inputs: 4155 * * 0: A tensor specifying the base. 4156 * * 1: A tensor specifying the exponent. 4157 * 4158 * Outputs: 4159 * * 0: An output tensor. 4160 * 4161 * Available since API level 29. 4162 */ 4163 ANEURALNETWORKS_POW = 70, 4164 4165 /** 4166 * Parametric Rectified Linear Unit. 4167 * 4168 * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha 4169 * is a learned array with the same {@link OperandCode} and compatible 4170 * dimensions as input x. 4171 * 4172 * Two dimensions are compatible when: 4173 * 1. they are equal, or 4174 * 2. one of them is 1 4175 * 4176 * The size of the output is the maximum size along each dimension of the 4177 * input operands. It starts with the trailing dimensions, and works its way 4178 * forward. 4179 * 4180 * Example: 4181 * input.dimension = {4, 1, 2} 4182 * alpha.dimension = {5, 4, 3, 1} 4183 * output.dimension = {5, 4, 3, 2} 4184 * 4185 * Supported tensor {@link OperandCode}: 4186 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4187 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4188 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4189 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4190 * 4191 * Supported tensor rank: from 1 4192 * 4193 * Inputs: 4194 * * 0: A tensor, specifying the input. 4195 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 4196 * as input0, specifying the alpha. 4197 * 4198 * Outputs: 4199 * * 0: A tensor of the same {@link OperandCode} as input0. 4200 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4201 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4202 * the scales and zeroPoint can be different from input0 scale and zeroPoint. 4203 * 4204 * Available since API level 29. 4205 */ 4206 ANEURALNETWORKS_PRELU = 71, 4207 4208 /** 4209 * Quantizes the input tensor. 4210 * 4211 * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} output tensor is: 4212 * 4213 * output = max(0, min(255, round(input / scale) + zeroPoint) 4214 * 4215 * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} output 4216 * tensor is: 4217 * 4218 * output = max(-128, min(127, round(input / scale) + zeroPoint) 4219 * 4220 * Supported input tensor {@link OperandCode}: 4221 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4222 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4223 * 4224 * Supported output tensor {@link OperandCode}: 4225 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4226 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4227 * 4228 * Supported tensor rank: from 1 4229 * 4230 * Inputs: 4231 * * 0: A tensor, may be zero-sized. 4232 * 4233 * Outputs: 4234 * * 0: The output tensor of same shape as input0, but with 4235 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or. 4236 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. 4237 * 4238 * Available since API level 29. 4239 */ 4240 ANEURALNETWORKS_QUANTIZE = 72, 4241 4242 /** 4243 * A version of quantized LSTM, using 16 bit quantization for internal 4244 * state. 4245 * 4246 * There is no projection layer, so cell state size is equal to the output 4247 * size. 4248 * 4249 * Inputs: 4250 * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4251 * and shape [numBatches, inputSize] specifying the input to the LSTM 4252 * cell. Tensor is quantized with a fixed quantization range of 4253 * [-1, 127/128] (scale = 1/128, zeroPoint = 128). 4254 * * 1: The input-to-input weights. 4255 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4256 * and shape [outputSize, inputSize] specifying input-to-input part of 4257 * weights for fully-connected layer inside the LSTM cell. 4258 * Quantization zero point and scale must be the same across all the 4259 * weights. 4260 * * 2: The input-to-forget weights. 4261 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4262 * and shape [outputSize, inputSize] specifying input-to-forget part of 4263 * weights for fully-connected layer inside the LSTM cell. 4264 * Quantization zero point and scale must be the same across all the 4265 * weights. 4266 * * 3: The input-to-cell weights. 4267 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4268 * and shape [outputSize, inputSize] specifying input-to-cell part of 4269 * weights for fully-connected layer inside the LSTM cell. 4270 * Quantization zero point and scale must be the same across all the 4271 * weights. 4272 * * 4: The input-to-output weights. 4273 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4274 * and shape [outputSize, inputSize] specifying input-to-output part of 4275 * weights for fully-connected layer inside the LSTM cell. 4276 * Quantization zero point and scale must be the same across all the 4277 * weights. 4278 * * 5: The recurrent-to-input weights. 4279 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4280 * and shape [outputSize, outputSize] specifying recurrent-to-input part 4281 * of weights for fully-connected layer inside the LSTM cell. 4282 * Quantization zero point and scale must be the same across all the 4283 * weights. 4284 * * 6: The recurrent-to-forget weights. 4285 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4286 * and shape [outputSize, outputSize] specifying recurrent-to-forget 4287 * part of weights for fully-connected layer inside the LSTM cell. 4288 * Quantization zero point and scale must be the same across all the 4289 * weights. 4290 * * 7: The recurrent-to-cell weights. 4291 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4292 * and shape [outputSize, outputSize] specifying recurrent-to-cell part 4293 * of weights for fully-connected layer inside the LSTM cell. 4294 * Quantization zero point and scale must be the same across all the 4295 * weights. 4296 * * 8: The recurrent-to-output weights. 4297 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4298 * and shape [outputSize, outputSize] specifying recurrent-to-output 4299 * part of weights for fully-connected layer inside the LSTM cell. 4300 * Quantization zero point and scale must be the same across all the 4301 * weights. 4302 * * 9: The input gate bias. 4303 * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape 4304 * [outputSize] specifying the bias for the fully-connected layer 4305 * inside the LSTM cell. Bias is quantized with scale being a product 4306 * of input and weights scales and zeroPoint equal to 0. 4307 * * 10:The forget gate bias. 4308 * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape 4309 * [outputSize] specifying the bias for the fully-connected layer 4310 * inside the LSTM cell. Bias is quantized with scale being a product 4311 * of input and weights scales and zeroPoint equal to 0. 4312 * * 11:The cell bias. 4313 * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape 4314 * [outputSize] specifying the bias for the fully-connected layer 4315 * inside the LSTM cell. Bias is quantized with scale being a product 4316 * of input and weights scales and zeroPoint equal to 0. 4317 * * 12:The output gate bias. 4318 * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape 4319 * [outputSize] specifying the bias for the fully-connected layer 4320 * inside the LSTM cell. Bias is quantized with scale being a product 4321 * of input and weights scales and zeroPoint equal to 0. 4322 * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 4323 * and shape [numBatches, outputSize] specifying the cell state from the 4324 * previous time step of the LSTM cell. It is quantized using a 4325 * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 4326 * 32768, zeroPoint = 0). 4327 * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4328 * and shape [numBathes, outputSize] specifying the output of the LSTM 4329 * cell from previous time-step. Tensor is quantized with a fixed 4330 * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint = 4331 * 128). 4332 * 4333 * 4334 * Outputs: 4335 * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 4336 * and shape [numBatches, outputSize] which contains a cell state from 4337 * the current time step. Tensor is quantized using a quantization 4338 * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint = 4339 * 0). 4340 * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4341 * and shape [numBathes, outputSize] which contains the output value. 4342 * Tensor is quantized with a fixed quantization range of [-1, 127/128] 4343 * (scale = 1/128, zeroPoint = 128). 4344 */ 4345 ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73, 4346 4347 /** 4348 * Draws samples from a multinomial distribution. 4349 * 4350 * Supported tensor {@link OperandCode}: 4351 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4352 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4353 * 4354 * Inputs: 4355 * * 0: A 2-D tensor with shape [batches, classes], specifying the 4356 * unnormalized log-probabilities for all classes. 4357 * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of 4358 * independent samples to draw for each row slice. 4359 * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2], 4360 * specifying seeds used to initialize the random distribution. If both 4361 * provided seeds are 0, both will be randomly generated. 4362 * Outputs: 4363 * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape 4364 * [batches, samples], containing the drawn samples. 4365 * 4366 * Available since API level 29. 4367 */ 4368 ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74, 4369 4370 /** 4371 * Reduces a tensor by computing the "logical and" of elements along given 4372 * dimensions. 4373 * 4374 * If keep_dims is true, the reduced dimensions are 4375 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4376 * 1 for each entry in dimensions. 4377 * 4378 * Supported tensor {@link OperandCode}: 4379 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 4380 * 4381 * Supported tensor rank: up to 4 4382 * 4383 * Inputs: 4384 * * 0: An n-D tensor. 4385 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4386 * to reduce. Dimension values must be in the range [-n, n). 4387 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4388 * retains reduced dimensions with length 1. 4389 * 4390 * Outputs: 4391 * * 0: A tensor of the same {@link OperandCode} as input0. 4392 * If all dimensions are reduced and keep_dims is false, the output 4393 * shape is [1]. 4394 * 4395 * Available since API level 29. 4396 */ 4397 ANEURALNETWORKS_REDUCE_ALL = 75, 4398 4399 /** 4400 * Reduces a tensor by computing the "logical or" of elements along given 4401 * dimensions. 4402 * 4403 * If keep_dims is true, the reduced dimensions are 4404 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4405 * 1 for each entry in dimensions. 4406 * 4407 * Supported tensor {@link OperandCode}: 4408 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 4409 * 4410 * Supported tensor rank: up to 4 4411 * 4412 * Inputs: 4413 * * 0: An n-D tensor. 4414 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4415 * to reduce. Dimension values must be in the range [-n, n). 4416 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4417 * retains reduced dimensions with length 1. 4418 * 4419 * Outputs: 4420 * * 0: A tensor of the same {@link OperandCode} as input0. 4421 * If all dimensions are reduced and keep_dims is false, the output 4422 * shape is [1]. 4423 * 4424 * Available since API level 29. 4425 */ 4426 ANEURALNETWORKS_REDUCE_ANY = 76, 4427 4428 /** 4429 * Reduces a tensor by computing the maximum of elements along given 4430 * dimensions. 4431 * 4432 * If keep_dims is true, the reduced dimensions are 4433 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4434 * 1 for each entry in dimensions. 4435 * 4436 * Supported tensor {@link OperandCode}: 4437 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4438 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4439 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4440 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4441 * 4442 * Supported tensor rank: up to 4 4443 * 4444 * Inputs: 4445 * * 0: An n-D tensor. 4446 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4447 * to reduce. Dimension values must be in the range [-n, n). 4448 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4449 * retains reduced dimensions with length 1. 4450 * 4451 * Outputs: 4452 * * 0: A tensor of the same {@link OperandCode} as input0. 4453 * If all dimensions are reduced and keep_dims is false, the output 4454 * shape is [1]. 4455 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4456 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4457 * the scale and zeroPoint must be the same as input0. 4458 * 4459 * Available since API level 29. 4460 */ 4461 ANEURALNETWORKS_REDUCE_MAX = 77, 4462 4463 /** 4464 * Reduces a tensor by computing the minimum of elements along given 4465 * dimensions. 4466 * 4467 * If keep_dims is true, the reduced dimensions are 4468 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4469 * 1 for each entry in dimensions. 4470 * 4471 * Supported tensor {@link OperandCode}: 4472 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4473 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4474 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4475 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4476 * 4477 * Supported tensor rank: up to 4 4478 * 4479 * Inputs: 4480 * * 0: An n-D tensor. 4481 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4482 * to reduce. Dimension values must be in the range [-n, n). 4483 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4484 * retains reduced dimensions with length 1. 4485 * 4486 * Outputs: 4487 * * 0: A tensor of the same {@link OperandCode} as input0. 4488 * If all dimensions are reduced and keep_dims is false, the output 4489 * shape is [1]. 4490 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4491 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4492 * the scale and zeroPoint must be the same as input0. 4493 * 4494 * Available since API level 29. 4495 */ 4496 ANEURALNETWORKS_REDUCE_MIN = 78, 4497 4498 /** 4499 * Reduces a tensor by multiplying elements along given dimensions. 4500 * 4501 * If keep_dims is true, the reduced dimensions are 4502 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4503 * 1 for each entry in dimensions. 4504 * 4505 * Supported tensor {@link OperandCode}: 4506 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4507 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4508 * 4509 * Supported tensor rank: up to 4 4510 * 4511 * Inputs: 4512 * * 0: An n-D tensor. 4513 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4514 * to reduce. Dimension values must be in the range [-n, n). 4515 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4516 * retains reduced dimensions with length 1. 4517 * 4518 * Outputs: 4519 * * 0: A tensor of the same {@link OperandCode} as input0. 4520 * If all dimensions are reduced and keep_dims is false, the output 4521 * shape is [1]. 4522 * 4523 * Available since API level 29. 4524 */ 4525 ANEURALNETWORKS_REDUCE_PROD = 79, 4526 4527 /** 4528 * Reduces a tensor by summing elements along given dimensions. 4529 * 4530 * If keep_dims is true, the reduced dimensions are 4531 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4532 * 1 for each entry in dimensions. 4533 * 4534 * Supported tensor {@link OperandCode}: 4535 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4536 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4537 * 4538 * Supported tensor rank: up to 4 4539 * 4540 * Inputs: 4541 * * 0: An n-D tensor. 4542 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4543 * to reduce. Dimension values must be in the range [-n, n). 4544 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4545 * retains reduced dimensions with length 1. 4546 * 4547 * Outputs: 4548 * * 0: A tensor of the same {@link OperandCode} as input0. 4549 * If all dimensions are reduced and keep_dims is false, the output 4550 * shape is [1]. 4551 * 4552 * Available since API level 29. 4553 */ 4554 ANEURALNETWORKS_REDUCE_SUM = 80, 4555 4556 /** 4557 * Select and scale the feature map of each region of interest to a unified 4558 * output size by average pooling sampling points from bilinear interpolation. 4559 * 4560 * The region of interest is represented by its upper-left corner coordinate 4561 * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. 4562 * A spatial scaling factor is applied to map into feature map coordinate. 4563 * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. 4564 * 4565 * No rounding is applied in this operation. The sampling points are unified 4566 * distributed in the pooling bin and their values are calculated by bilinear 4567 * interpolation. 4568 * 4569 * Supported tensor {@link OperandCode}: 4570 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4571 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4572 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4573 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4574 * 4575 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 4576 * With the default data layout NHWC, the data is stored in the order of: 4577 * [batch, height, width, channels]. Alternatively, the data layout could 4578 * be NCHW, the data storage order of: [batch, channels, height, width]. 4579 * 4580 * Inputs: 4581 * * 0: A 4-D tensor, specifying the feature map. 4582 * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of 4583 * the regions of interest, each line with format [x1, y1, x2, y2]. 4584 * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 4585 * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, 4586 * with zeroPoint of 0 and scale of 0.125. Zero num_rois is 4587 * supported for this tensor. 4588 * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 4589 * [num_rois], specifying the batch index of each box. Boxes with 4590 * the same batch index are grouped together. Zero num_rois is 4591 * supported for this tensor. 4592 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4593 * height of the output tensor. 4594 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4595 * width of the output tensor. 4596 * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 4597 * from the height of original image to the height of feature map. 4598 * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 4599 * from the width of original image to the width of feature map. 4600 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 4601 * sampling points in height dimension used to compute the output. 4602 * Set to 0 for adaptive value of ceil(roi_height/out_height). 4603 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 4604 * sampling points in width dimension used to compute the output. 4605 * Set to 0 for adaptive value of ceil(roi_width/out_width). 4606 * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 4607 * NCHW data layout for input0 and output0. Set to false for NHWC. 4608 * 4609 * Outputs: 4610 * * 0: A tensor of the same {@link OperandCode} as input0. The output 4611 * shape is [num_rois, out_height, out_width, depth]. 4612 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4613 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4614 * the scale and zeroPoint can be different from the input0 scale and zeroPoint. 4615 * 4616 * Available since API level 29. 4617 */ 4618 ANEURALNETWORKS_ROI_ALIGN = 81, 4619 4620 /** 4621 * Select and scale the feature map of each region of interest to a unified 4622 * output size by max-pooling. 4623 * 4624 * The region of interest is represented by its upper-left corner coordinate 4625 * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. 4626 * A spatial scaling factor is applied to map into feature map coordinate. 4627 * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. 4628 * 4629 * Rounding is applied in this operation to ensure integer boundary for 4630 * regions of interest and pooling bins. 4631 * 4632 * Supported tensor {@link OperandCode}: 4633 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4634 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4635 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4636 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4637 * 4638 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 4639 * With the default data layout NHWC, the data is stored in the order of: 4640 * [batch, height, width, channels]. Alternatively, the data layout could 4641 * be NCHW, the data storage order of: [batch, channels, height, width]. 4642 * 4643 * Inputs: 4644 * * 0: A 4-D tensor, specifying the feature map. 4645 * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of 4646 * the regions of interest, each line with format [x1, y1, x2, y2]. 4647 * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4648 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4649 * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, 4650 * with zeroPoint of 0 and scale of 0.125. 4651 * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 4652 * [num_rois], specifying the batch index of each box. Boxes with 4653 * the same batch index are grouped together. 4654 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4655 * height of the output tensor. 4656 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4657 * width of the output tensor. 4658 * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 4659 * from the height of original image to the height of feature map. 4660 * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 4661 * from the width of original image to the width of feature map. 4662 * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 4663 * NCHW data layout for input0 and output0. Set to false for NHWC. 4664 * 4665 * Outputs: 4666 * * 0: A tensor of the same {@link OperandCode} as input0. The output 4667 * shape is [num_rois, out_height, out_width, depth]. 4668 * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4669 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4670 * the scale and zeroPoint must be the same as input0. 4671 * 4672 * Available since API level 29. 4673 */ 4674 ANEURALNETWORKS_ROI_POOLING = 82, 4675 4676 /** 4677 * Computes reciprocal of square root of x element-wise. 4678 * 4679 * Supported tensor {@link OperandCode}: 4680 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4681 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4682 * 4683 * Supported tensor rank: from 1. 4684 * 4685 * Inputs: 4686 * * 0: A tensor. 4687 * 4688 * Outputs: 4689 * * 0: The output tensor of same shape as input0. 4690 * 4691 * Available since API level 29. 4692 */ 4693 ANEURALNETWORKS_RSQRT = 83, 4694 4695 /** 4696 * Using a tensor of booleans c and input tensors x and y select values 4697 * elementwise from both input tensors: 4698 * 4699 * O[i] = C[i] ? x[i] : y[i]. 4700 * 4701 * Supported tensor {@link OperandCode}: 4702 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4703 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4704 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4705 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4706 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4707 * 4708 * Supported tensor rank: from 1 4709 * 4710 * Inputs: 4711 * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a 4712 * mask that chooses, based on the value at each element, whether the 4713 * corresponding element in the output should be taken from input1 (if 4714 * true) or input2 (if false). 4715 * * 1: An input tensor of the same shape as input0. 4716 * * 2: An input tensor of the same shape and type as input1. 4717 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4718 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4719 * the scales and zeroPoint can be different from input1 scale and zeroPoint. 4720 * 4721 * Outputs: 4722 * * 0: A tensor of the same type and shape as input1 and input2. 4723 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4724 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 4725 * 4726 * Available since API level 29. 4727 */ 4728 ANEURALNETWORKS_SELECT = 84, 4729 4730 /** 4731 * Computes sin of x element-wise. 4732 * 4733 * Supported tensor {@link OperandCode}: 4734 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4735 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4736 * 4737 * Supported tensor rank: from 1. 4738 * 4739 * Inputs: 4740 * * 0: A tensor. 4741 * 4742 * Outputs: 4743 * * 0: The output tensor of same shape as input0. 4744 * 4745 * Available since API level 29. 4746 */ 4747 ANEURALNETWORKS_SIN = 85, 4748 4749 /** 4750 * Extracts a slice of specified size from the input tensor starting at a 4751 * specified location. 4752 * 4753 * The starting location is specified as a 1-D tensor containing offsets 4754 * for each dimension. The size is specified as a 1-D tensor containing 4755 * either size of a slice along corresponding dimension or -1. In the latter 4756 * case, all the remaining elements in dimension are included in the slice. 4757 * 4758 * A sum of begin offset and a size of a slice must not exceed size of a 4759 * corresponding dimension. 4760 * 4761 * Supported tensor {@link OperandCode}: 4762 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4763 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4764 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4765 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4766 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4767 * 4768 * Supported tensor rank: from 1 4769 * 4770 * Inputs: 4771 * * 0: An n-D tensor to take slice from, may be zero-sized. 4772 * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying 4773 * the beginning indices of the slice in each dimension. 4774 * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying 4775 * the size of the slice in each dimension. 4776 * 4777 * Outputs: 4778 * * 0: An n-D tensor of the same type as the input containing the slice. 4779 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4780 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4781 * its scale and zeroPoint has to be same as the input0 scale and zeroPoint. 4782 * 4783 * Available since API level 29. 4784 */ 4785 ANEURALNETWORKS_SLICE = 86, 4786 4787 /** 4788 * Splits a tensor along a given axis into num_splits subtensors. 4789 * 4790 * Supported tensor {@link OperandCode}: 4791 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4792 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4793 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4794 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4795 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4796 * 4797 * Supported tensor rank: from 1 4798 * 4799 * Inputs: 4800 * * 0: An n-D tensor to split. 4801 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along 4802 * which to split. 4803 * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of 4804 * splits along given axis. Must evenly divide axis size. 4805 * 4806 * Outputs: 4807 * * 0 ~ (num_splits - 1): Resulting subtensors. 4808 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4809 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4810 * the scale and zeroPoint must be the same as input0. 4811 * 4812 * Available since API level 29. 4813 */ 4814 ANEURALNETWORKS_SPLIT = 87, 4815 4816 /** 4817 * Computes square root of x element-wise. 4818 * 4819 * Supported tensor {@link OperandCode}: 4820 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4821 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4822 * 4823 * Supported tensor rank: from 1. 4824 * 4825 * Inputs: 4826 * * 0: A tensor. 4827 * 4828 * Outputs: 4829 * * 0: The output tensor of same shape as input0. 4830 * 4831 * Available since API level 29. 4832 */ 4833 ANEURALNETWORKS_SQRT = 88, 4834 4835 /** 4836 * Constructs a tensor by tiling a given tensor. 4837 * 4838 * This operation creates a new tensor by replicating `input` `multiples` 4839 * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]` 4840 * elements, and the values of `input` are replicated `multiples[i]` times 4841 * along the i-th dimension. 4842 * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`. 4843 * 4844 * Supported tensor {@link OperandCode}: 4845 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4846 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4847 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4848 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4849 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4850 * 4851 * Supported tensor rank: from 1 4852 * 4853 * Inputs: 4854 * * 0: input, an n-D tensor specifying the input. 4855 * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. 4856 * The length of multiples must be n. 4857 * 4858 * Outputs: 4859 * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`. 4860 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4861 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4862 * the scale and zeroPoint must be the same as input0. 4863 * 4864 * Available since API level 29. 4865 */ 4866 ANEURALNETWORKS_TILE = 89, 4867 4868 /** 4869 * Finds values and indices of the k largest entries for the last dimension. 4870 * 4871 * Resulting values in each dimensions are sorted in descending order. If 4872 * two values are equal, the one with larger index appears first. 4873 * 4874 * Supported tensor {@link OperandCode}: 4875 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4876 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4877 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4878 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4879 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 4880 * 4881 * Supported tensor rank: from 1 4882 * 4883 * Inputs: 4884 * * 0: input, an n-D tensor specifying the input. 4885 * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 4886 * top elements to look for along the last dimension. 4887 * 4888 * Outputs: 4889 * * 0: An n-D tensor of the same type as the input, containing the k 4890 * largest elements along each last dimensional slice. 4891 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 4892 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 4893 * the scale and zeroPoint must be the same as input0. 4894 * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} 4895 * containing the indices of values within the last dimension of input. 4896 * 4897 * Available since API level 29. 4898 */ 4899 ANEURALNETWORKS_TOPK_V2 = 90, 4900 4901 /** 4902 * Performs the transpose of 2-D convolution operation. 4903 * 4904 * This operation is sometimes called "deconvolution" after Deconvolutional 4905 * Networks, but is actually the transpose (gradient) of 4906 * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution. 4907 * 4908 * The output dimensions are functions of the filter dimensions, stride, and 4909 * padding. 4910 * 4911 * Supported tensor {@link OperandCode} configurations: 4912 * * 16 bit floating point: 4913 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. 4914 * 4915 * * 32 bit floating point: 4916 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. 4917 * 4918 * * Quantized: 4919 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. 4920 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 4921 * * * input.scale * filter.scale). 4922 * 4923 * * Quantized with symmetric per channel quantization for the filter: 4924 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. 4925 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 4926 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 4927 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 4928 * 4929 * Available since API level 30: 4930 * * Quantized signed (since API level 30): 4931 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. 4932 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 4933 * * * input.scale * filter.scale). 4934 * 4935 * * Quantized signed with filter symmetric per channel quantization (since API level 30): 4936 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. 4937 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 4938 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 4939 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 4940 * 4941 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 4942 * With the default data layout NHWC, the data is stored in the order of: 4943 * [batch, height, width, channels]. Alternatively, the data layout could 4944 * be NCHW, the data storage order of: [batch, channels, height, width]. 4945 * 4946 * Both explicit padding and implicit padding are supported. 4947 * 4948 * Inputs (explicit padding): 4949 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 4950 * specifying the input. 4951 * Since API level 29, zero batches is supported for this tensor. 4952 * * 1: A 4-D tensor, of shape 4953 * [depth_out, filter_height, filter_width, depth_in], specifying the 4954 * filter. For tensor of type 4955 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel 4956 * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0. 4957 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 4958 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 4959 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the 4960 * same type. 4961 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4962 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 4963 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, 4964 * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. 4965 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, 4966 * the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 4967 * and bias_scale of 0. The actual scale of each value 'i' is equal to 4968 * bias_scale[i] = input_scale * filter_scale[i]. 4969 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 4970 * the left, in the ‘width’ dimension. 4971 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 4972 * the right, in the ‘width’ dimension. 4973 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 4974 * the top, in the ‘height’ dimension. 4975 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 4976 * the bottom, in the ‘height’ dimension. 4977 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 4978 * walking through input in the ‘width’ dimension. 4979 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 4980 * walking through input in the ‘height’ dimension. 4981 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 4982 * {@link FuseCode} values. Specifies the activation to 4983 * invoke on the result. 4984 * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 4985 * NCHW data layout for input0 and output0. Set to false for NHWC. 4986 * 4987 * Inputs (implicit padding): 4988 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 4989 * specifying the input. 4990 * Since API level 29, zero batches is supported for this tensor. 4991 * * 1: A 4-D tensor, of shape 4992 * [depth_out, filter_height, filter_width, depth_in], specifying the 4993 * filter. For tensor of type 4994 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel 4995 * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0. 4996 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 4997 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 4998 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the 4999 * same type. 5000 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 5001 * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, 5002 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, 5003 * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. 5004 * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, 5005 * the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 5006 * and bias_scale of 0. The actual scale of each value 'i' is equal to 5007 * bias_scale[i] = input_scale * filter_scale[i]. 5008 * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output 5009 * tensor shape. 5010 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 5011 * padding scheme, has to be one of the 5012 * {@link PaddingCode} values. 5013 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 5014 * walking through input in the ‘width’ dimension. 5015 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 5016 * walking through input in the ‘height’ dimension. 5017 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 5018 * {@link FuseCode} values. Specifies the activation to 5019 * invoke on the result. 5020 * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 5021 * NCHW data layout for input0 and output0. Set to false for NHWC. 5022 * 5023 * Outputs: 5024 * * 0: The output 4-D tensor, of shape 5025 * [batches, out_height, out_width, depth_out]. 5026 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 5027 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 5028 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 5029 * 5030 * Available since API level 29. 5031 */ 5032 ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91, 5033 5034 /** 5035 * A recurrent neural network specified by an LSTM cell. 5036 * 5037 * Performs (fully) dynamic unrolling of input. 5038 * 5039 * This Op unrolls the input along the time dimension, and implements the 5040 * following operation for each element in the sequence 5041 * s = 1...sequence_length: 5042 * outputs[s] = projection(state = activation(LSTMOp(inputs[s]))) 5043 * 5044 * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM}, 5045 * the "projection" is an optional projection layer from state and output 5046 * and the “activation” is the function passed as the 5047 * “fused_activation_function” argument (if not “NONE”). 5048 * 5049 * Supported tensor {@link OperandCode}: 5050 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 5051 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 5052 * 5053 * Supported tensor rank: 3, either time-major or batch-major. 5054 * 5055 * All input and output tensors must be of the same type. 5056 * 5057 * Inputs: 5058 * * 0: The input (\f$x_t\f$). 5059 * A 3-D tensor of shape: 5060 * If time-major: [max_time, batch_size, input_size] 5061 * If batch-major: [batch_size, max_time, input_size] 5062 * where “max_time” is the number of timesteps (sequence length), 5063 * “batch_size” corresponds to the batching dimension, and 5064 * “input_size” is the size of the input. 5065 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. 5066 * A 2-D tensor of shape [num_units, input_size], where “num_units” 5067 * corresponds to the number of cell units. 5068 * * 2: The input-to-forget weights (\f$W_{xf}\f$). 5069 * A 2-D tensor of shape [num_units, input_size]. 5070 * * 3: The input-to-cell weights (\f$W_{xc}\f$). 5071 * A 2-D tensor of shape [num_units, input_size]. 5072 * * 4: The input-to-output weights (\f$W_{xo}\f$). 5073 * A 2-D tensor of shape [num_units, input_size]. 5074 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. 5075 * A 2-D tensor of shape [num_units, output_size], where “output_size” 5076 * corresponds to either the number of cell units (i.e., “num_units”), 5077 * or the second dimension of the “projection_weights”, if defined. 5078 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). 5079 * A 2-D tensor of shape [num_units, output_size]. 5080 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). 5081 * A 2-D tensor of shape [num_units, output_size]. 5082 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). 5083 * A 2-D tensor of shape [num_units, output_size]. 5084 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. 5085 * A 1-D tensor of shape [num_units]. 5086 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. 5087 * A 1-D tensor of shape [num_units]. 5088 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. 5089 * A 1-D tensor of shape [num_units]. 5090 * * 12:The input gate bias (\f$b_i\f$). Optional. 5091 * A 1-D tensor of shape [num_units]. 5092 * * 13:The forget gate bias (\f$b_f\f$). 5093 * A 1-D tensor of shape [num_units]. 5094 * * 14:The cell bias (\f$b_c\f$). 5095 * A 1-D tensor of shape [num_units]. 5096 * * 15:The output gate bias (\f$b_o\f$). 5097 * A 1-D tensor of shape [num_units]. 5098 * * 16:The projection weights (\f$W_{proj}\f$). Optional. 5099 * A 2-D tensor of shape [output_size, num_units]. 5100 * * 17:The projection bias (\f$b_{proj}\f$). Optional. 5101 * A 1-D tensor of shape [output_size]. 5102 * * 18:The output state (in) (\f$h_{t-1}\f$). 5103 * A 2-D tensor of shape [batch_size, output_size]. 5104 * * 19:The cell state (in) (\f$C_{t-1}\f$). 5105 * A 2-D tensor of shape [batch_size, num_units]. 5106 * * 20:The activation function (\f$g\f$). 5107 * A value indicating the activation function: 5108 * <ul> 5109 * <li>0: None; 5110 * <li>1: Relu; 5111 * <li>3: Relu6; 5112 * <li>4: Tanh; 5113 * <li>6: Sigmoid. 5114 * </ul> 5115 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such 5116 * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 5117 * then clipping is disabled. 5118 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the 5119 * projection layer, such that values are bound within 5120 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 5121 * * 23:Time-major if true, batch-major if false. 5122 * * 24:The input layer normalization weights. Optional. 5123 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 5124 * to activation at input gate. 5125 * * 25:The forget layer normalization weights. Optional. 5126 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 5127 * to activation at forget gate. 5128 * * 26:The cell layer normalization weights. Optional. 5129 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 5130 * to activation at cell gate. 5131 * * 27:The output layer normalization weights. Optional. 5132 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 5133 * to activation at output gate. 5134 * 5135 * Outputs: 5136 * * 0: The output (\f$o_t\f$). 5137 * A 3-D tensor of shape: 5138 * If time-major: [max_time, batch_size, output_size] 5139 * If batch-major: [batch_size, max_time, output_size] 5140 * * 1: A tensor of shape [batch_size, output_size] containing a hidden 5141 * state from the last time step in the sequence. This output is 5142 * optional and can be omitted. If this output is present then 5143 * output #2 must be present as well. 5144 * Available since API level 30. 5145 * * 2: A tensor of shape [batch_size, cell_size] containing a cell state 5146 * from the last time step in the sequence. This output is optional 5147 * and can be omitted. 5148 * Available since API level 30. 5149 * 5150 * Available since API level 29. 5151 * 5152 * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI 5153 * does not maintain internal states. This operator does not support the usage pattern in which 5154 * multiple cells are chained and state tensors are propagated. 5155 */ 5156 ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92, 5157 5158 /** 5159 * A recurrent neural network layer that applies a basic RNN cell to a 5160 * sequence of inputs. 5161 * 5162 * This layer unrolls the input along the sequence dimension, and implements 5163 * the following operation 5164 * for each element in the sequence s = 1...sequence_length: 5165 * outputs[s] = state = activation(inputs[s] * input_weights’ + state * 5166 * recurrent_weights’ + bias) 5167 * 5168 * Where: 5169 * * “input_weights” is a weight matrix that multiplies the inputs; 5170 * * “recurrent_weights” is a weight matrix that multiplies the current 5171 * “state” which itself is the output from the previous time step 5172 * computation; 5173 * * “bias” is a bias vector (added to each output vector in the batch); 5174 * * “activation” is the function passed as the “fused_activation_function” 5175 * argument (if not “NONE”). 5176 * 5177 * Supported tensor {@link OperandCode}: 5178 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 5179 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 5180 * 5181 * The input tensors must all be the same type. 5182 * 5183 * Inputs: 5184 * * 0: input. 5185 * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If 5186 * it is set to 1, then the input has a shape [maxTime, batchSize, 5187 * inputSize], otherwise the input has a shape [batchSize, maxTime, 5188 * inputSize]. 5189 * * 1: weights. 5190 * A 2-D tensor of shape [numUnits, inputSize]. 5191 * * 2: recurrent_weights. 5192 * A 2-D tensor of shape [numUnits, numUnits]. 5193 * * 3: bias. 5194 * A 1-D tensor of shape [numUnits]. 5195 * * 4: hidden state 5196 * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden 5197 * state input for the first time step of the computation. 5198 * * 5: fusedActivationFunction. 5199 * A {@link FuseCode} value indicating the activation function. If 5200 * “NONE” is specified then it results in a linear activation. 5201 * * 6: timeMajor 5202 * An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format 5203 * of input and output tensors. Must be set to either 0 or 1. 5204 * Outputs: 5205 * * 0: output. 5206 * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If 5207 * it is set to 1, then the output has a shape [maxTime, batchSize, 5208 * numUnits], otherwise the output has a shape [batchSize, maxTime, 5209 * numUnits]. 5210 * * 1: A tensor of shape [batchSize, numUnits] containing hidden state 5211 * from the last time step in the sequence. This output is optional 5212 * and can be omitted. 5213 * Available since API level 30. 5214 * 5215 * Available since API level 29. 5216 * 5217 * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI 5218 * does not maintain internal states. This operator does not support the usage pattern in which 5219 * multiple cells are chained and state tensors are propagated. 5220 */ 5221 ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93, 5222 5223 /** 5224 * Resizes images to given size using the nearest neighbor interpretation. 5225 * 5226 * Resized images must be distorted if their output aspect ratio is not the 5227 * same as input aspect ratio. The corner pixels of output may not be the 5228 * same as corner pixels of input. 5229 * 5230 * Supported tensor {@link OperandCode}: 5231 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 5232 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 5233 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 5234 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) 5235 * 5236 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 5237 * With the default data layout NHWC, the data is stored in the order of: 5238 * [batch, height, width, channels]. Alternatively, the data layout could 5239 * be NCHW, the data storage order of: [batch, channels, height, width]. 5240 * 5241 * Both resizing by shape and resizing by scale are supported. 5242 * 5243 * Inputs (resizing by shape): 5244 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 5245 * the input. Zero batches is supported for this tensor. 5246 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 5247 * width of the output tensor. 5248 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 5249 * height of the output tensor. 5250 * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false. 5251 * Set to true to specify NCHW data layout for input0 and output0. 5252 * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} 5253 * scalar, default to false. If True, the centers of the 4 corner 5254 * pixels of the input and output tensors are aligned, preserving the 5255 * values at the corner pixels. 5256 * Available since API level 30. 5257 * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} 5258 * scalar, default to false. If True, the pixel centers are assumed to 5259 * be at (0.5, 0.5). This is the default behavior of image.resize in 5260 * TF 2.0. If this parameter is True, then align_corners parameter 5261 * must be False. 5262 * Available since API level 30. 5263 * 5264 * Inputs (resizing by scale): 5265 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 5266 * the input. Zero batches is supported for this tensor. 5267 * * 1: A scalar, specifying width_scale, the scaling factor of the width 5268 * dimension from the input tensor to the output tensor. The output 5269 * width is calculated as new_width = floor(width * width_scale). 5270 * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is 5271 * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 5272 * {@link ANEURALNETWORKS_FLOAT32} otherwise. 5273 * * 2: A scalar, specifying height_scale, the scaling factor of the height 5274 * dimension from the input tensor to the output tensor. The output 5275 * height is calculated as new_height = floor(height * height_scale). 5276 * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is 5277 * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 5278 * {@link ANEURALNETWORKS_FLOAT32} otherwise. 5279 * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false. 5280 * Set to true to specify NCHW data layout for input0 and output0. 5281 * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} 5282 * scalar, default to false. If True, the centers of the 4 corner 5283 * pixels of the input and output tensors are aligned, preserving the 5284 * values at the corner pixels. 5285 * Available since API level 30. 5286 * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} 5287 * scalar, default to false. If True, the pixel centers are assumed to 5288 * be at (0.5, 0.5). This is the default behavior of image.resize in 5289 * TF 2.0. If this parameter is True, then align_corners parameter 5290 * must be False. 5291 * Available since API level 30. 5292 * 5293 * Outputs: 5294 * * 0: The output 4-D tensor, of shape 5295 * [batches, new_height, new_width, depth]. 5296 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and 5297 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, 5298 * the scale and zeroPoint must be the same as input0. 5299 * 5300 * Available since API level 29. 5301 */ 5302 ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94, 5303 5304 // Operations below are available since API level 30. 5305 5306 /** 5307 * Quantized version of {@link ANEURALNETWORKS_LSTM}. 5308 * 5309 * The input and the output use asymmetric quantized types, while the rest 5310 * use symmetric ones. 5311 * 5312 * Inputs: 5313 * * 0: The input to the LSTM cell. 5314 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 5315 * Shape: [batchSize, inputSize] 5316 * * 1: The input-to-input weights. Optional. 5317 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5318 * Shape: [numUnits, inputSize] 5319 * * 2: The input-to-forget weights. 5320 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5321 * Shape: [numUnits, inputSize] 5322 * * 3: The input-to-cell weights. 5323 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5324 * Shape: [numUnits, inputSize] 5325 * * 4: The input-to-output weights. 5326 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5327 * Shape: [numUnits, inputSize] 5328 * * 5: The recurrent-to-input weights. Optional. 5329 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5330 * Shape: [numUnits, outputSize] 5331 * * 6: The recurrent-to-forget weights. 5332 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5333 * Shape: [numUnits, outputSize] 5334 * * 7: The recurrent-to-cell weights. 5335 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5336 * Shape: [numUnits, outputSize] 5337 * * 8: The recurrent-to-output weights. 5338 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5339 * Shape: [numUnits, outputSize] 5340 * * 9: The cell-to-input weights (for peephole). Optional. 5341 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5342 * Shape: [numUnits] 5343 * * 10: The cell-to-forget weights (for peephole). Optional. 5344 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5345 * Shape: [numUnits] 5346 * * 11: The cell-to-output weights (for peephole). Optional. 5347 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5348 * Shape: [numUnits] 5349 * * 12: The input gate bias. Quantized with scale being the 5350 * product of input and weights scales and zeroPoint equal to 0. 5351 * Optional. 5352 * Type: {@link ANEURALNETWORKS_TENSOR_INT32} 5353 * Shape: [numUnits] 5354 * * 13: The forget gate bias. Quantized with scale being the 5355 * product of input and weights scales and zeroPoint equal to 0. 5356 * Type: {@link ANEURALNETWORKS_TENSOR_INT32} 5357 * Shape: [numUnits] 5358 * * 14: The cell bias. Quantized with scale being the 5359 * product of input and weights scales and zeroPoint equal to 0. 5360 * Type: {@link ANEURALNETWORKS_TENSOR_INT32} 5361 * Shape: [numUnits] 5362 * * 15: The output gate bias. Quantized with scale being the 5363 * product of input and weights scales and zeroPoint equal to 0. 5364 * Type: {@link ANEURALNETWORKS_TENSOR_INT32} 5365 * Shape: [numUnits] 5366 * * 16: The projection weights. Optional. 5367 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5368 * Shape: [outputSize, numUnits] 5369 * * 17: The projection bias. Quantized with scale being the 5370 * product of input and weights scales and zeroPoint equal to 0. 5371 * Optional. 5372 * Type: {@link ANEURALNETWORKS_TENSOR_INT32} 5373 * Shape: [outputSize] 5374 * * 18: The output from the previous time step. 5375 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 5376 * Shape: [batchSize, outputSize] 5377 * * 19: The cell state from the previous time step. 5378 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5379 * Shape: [batchSize, numUnits] 5380 * * 20: The input layer normalization weights. Used to rescale 5381 * normalized inputs to activation at input gate. Optional. 5382 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5383 * Shape: [numUnits] 5384 * * 21: The forget layer normalization weights. Used to 5385 * rescale normalized inputs to activation at forget gate. Optional. 5386 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5387 * Shape: [numUnits] 5388 * * 22: The cell layer normalization weights. Used to rescale 5389 * normalized inputs to activation at cell gate. Optional. 5390 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5391 * Shape: [numUnits] 5392 * * 23: The output layer normalization weights. Used to 5393 * rescale normalized inputs to activation at output gate. Optional. 5394 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5395 * Shape: [numUnits] 5396 * * 24: The cell clip. If provided the cell state is clipped 5397 * by this value prior to the cell output activation. Optional. 5398 * Type: {@link ANEURALNETWORKS_FLOAT32}. 5399 * * 25: The projection clip. If provided and projection is enabled, 5400 * this is used for clipping the projected values. Optional. 5401 * Type: {@link ANEURALNETWORKS_FLOAT32}. 5402 * * 26: The scale of the intermediate result of matmul, 5403 * i.e. input to layer normalization, at input gate. 5404 * Type: {@link ANEURALNETWORKS_FLOAT32}. 5405 * * 27: The scale of the intermediate result of matmul, 5406 * i.e. input to layer normalization, at forget gate. 5407 * Type: {@link ANEURALNETWORKS_FLOAT32}. 5408 * * 28: The scale of the intermediate result of matmul, 5409 * i.e. input to layer normalization, at cell gate. 5410 * Type: {@link ANEURALNETWORKS_FLOAT32}. 5411 * * 29: The scale of the intermediate result of matmul, 5412 * i.e. input to layer normalization, at output gate. 5413 * Type: {@link ANEURALNETWORKS_FLOAT32}. 5414 * * 30: The zero point of the hidden state, i.e. input to 5415 * projection. 5416 * Type: {@link ANEURALNETWORKS_INT32}. 5417 * * 31: The scale of the hidden state, i.e. input to 5418 * projection. 5419 * Type: {@link ANEURALNETWORKS_FLOAT32}. 5420 * 5421 * Outputs: 5422 * * 0: The output state (out). 5423 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 5424 * Shape: [batchSize, outputSize] 5425 * * 1: The cell state (out). 5426 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5427 * Shape: [batchSize, numUnits] 5428 * * 2: The output. This is effectively the same as the current 5429 * "output state (out)" value. 5430 * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 5431 * Shape: [batchSize, outputSize] 5432 * 5433 * Available since API level 30. 5434 */ 5435 ANEURALNETWORKS_QUANTIZED_LSTM = 95, 5436 5437 /** 5438 * Executes one of the two referenced models as determined by a boolean 5439 * value. 5440 * 5441 * The inputs and outputs of the two referenced models must agree with the 5442 * signature of this operation. That is, if the operation has (3 + n) inputs 5443 * and m outputs, both models must have n inputs and m outputs with the same 5444 * types, ranks (if specified), dimensions (if specified), scales, 5445 * zeroPoints, and other operand parameters as the corresponding operation 5446 * inputs and outputs. 5447 * 5448 * Inputs: 5449 * * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1] 5450 * that determines which of the two referenced models to execute. 5451 * The operand must have fully specified dimensions. 5452 * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the model to be 5453 * executed if the condition is true. 5454 * * 2: A {@link ANEURALNETWORKS_MODEL} reference to the model to be 5455 * executed if the condition is false. 5456 * * 3 ~ (n + 2): Inputs to be passed to the model selected for execution. 5457 * 5458 * Outputs: 5459 * * 0 ~ (m - 1): Outputs produced by the selected model. 5460 * 5461 * Available since API level 30. 5462 */ 5463 ANEURALNETWORKS_IF = 96, 5464 5465 /** 5466 * Executes the body model until the condition model outputs false. 5467 * 5468 * The inputs to this operation are the condition model, the body model, 5469 * and operand values for the first iteration of the loop. The values are 5470 * implicitly split into three groups of input-output, state-only, and 5471 * input-only values, as described below. 5472 * 5473 * The outputs of this operation are the final values of input-output 5474 * operands. 5475 * 5476 * Both the condition and body model receive (m + k + n) inputs. 5477 * * The first m (m >= 1) inputs are input-output operands. For the first 5478 * iteration, these are initialized from the corresponding inputs of the 5479 * WHILE operation. In subsequent iterations, their values come from the 5480 * corresponding outputs of the body model produced during the previous 5481 * iteration. 5482 * * The next k (k >= 0) inputs are state-only operands. They are similar to 5483 * the input-output operands, except that their values are no longer 5484 * available after the loop terminates. 5485 * * The last n (n >= 0) inputs are input-only operands. Their values come 5486 * from the corresponding inputs of the WHILE operation. 5487 * 5488 * The body model produces (m + k) outputs. 5489 * * The first m outputs are input-output operands. They become the outputs 5490 * of the WHILE operation when a termination condition is reached. 5491 * * The last k outputs are state-only operands. Their values are no longer 5492 * available after the loop terminates. 5493 * 5494 * The numbers m, k, and n are inferred by the runtime as follows: 5495 * m = (WHILE operation output count) 5496 * k = (body model output count) - m 5497 * n = (body model input count) - m - k 5498 * 5499 * The pseudo-code below illustrates the flow of a WHILE operation with 5500 * inputs condition, body, initial_input_output, initial_state, input_only 5501 * (m = 1, k = 1, n = 1): 5502 * 5503 * input_output = initial_input_output 5504 * state = initial_state 5505 * while condition(input_output, state, input_only): 5506 * input_output, state = body(input_output, state, input_only) 5507 * return input_output 5508 * 5509 * To prevent infinite loops, there is an implicit execution timeout 5510 * associated with each loop ("loop timeout duration"). See {@link 5511 * ANeuralNetworksExecution_setLoopTimeout}. 5512 * 5513 * Inputs: 5514 * * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition 5515 * model. The model must have (m + k + n) inputs with 5516 * the same types, ranks (if specified), dimensions (if specified), 5517 * scales, zeroPoints, and other operand parameters as the 5518 * corresponding inputs of the WHILE operation and exactly one output 5519 * of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]. 5520 * The output operand must have fully specified dimensions. 5521 * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model. 5522 * The model must have (m + k + n) inputs and (m + k) outputs with 5523 * the same types, ranks (if specified), dimensions (if specified), 5524 * scales, zeroPoints, and other operand parameters as the 5525 * corresponding inputs and outputs of the WHILE operation. 5526 * * (m inputs): Initial values for input-output operands. 5527 * * (k inputs): Initial values for state-only operands. 5528 * * (n inputs): Values for input-only operands. 5529 * 5530 * Outputs: 5531 * * 0 ~ (m - 1): Outputs produced by the loop. 5532 * 5533 * Available since API level 30. 5534 */ 5535 ANEURALNETWORKS_WHILE = 97, 5536 5537 /** 5538 * Computes exponential linear activation on the input tensor element-wise. 5539 * 5540 * The output is calculated using the following formula: 5541 * 5542 * ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1)) 5543 * 5544 * Supported tensor {@link OperandCode}: 5545 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 5546 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 5547 * 5548 * Supported tensor rank: from 1. 5549 * 5550 * Inputs: 5551 * * 0: A tensor, specifying the input. May be zero-sized. 5552 * * 1: A scalar, specifying the alpha parameter. 5553 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, 5554 * the alpha value must be of {@link ANEURALNETWORKS_FLOAT16}. 5555 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 5556 * the alpha value must be of {@link ANEURALNETWORKS_FLOAT32}. 5557 * 5558 * Outputs: 5559 * * 0: The output tensor of same shape and type as input0. 5560 * 5561 * Available since API level 30. 5562 */ 5563 ANEURALNETWORKS_ELU = 98, 5564 5565 /** 5566 * Computes hard-swish activation on the input tensor element-wise. 5567 * 5568 * Hard swish activation is introduced in 5569 * https://arxiv.org/pdf/1905.02244.pdf 5570 * 5571 * The output is calculated using the following formula: 5572 * 5573 * h-swish(x) = x * max(0, min(6, (x + 3))) / 6 5574 5575 * Supported tensor {@link OperandCode}: 5576 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 5577 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 5578 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 5579 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 5580 * 5581 * Supported tensor rank: from 1. 5582 * 5583 * Inputs: 5584 * * 0: A tensor, specifying the input. May be zero-sized. 5585 * 5586 * Outputs: 5587 * * 0: The output tensor of same shape and type as input0. 5588 * Scale and zero point of this tensor may be different from the input 5589 * tensor's parameters. 5590 * 5591 * Available since API level 30. 5592 */ 5593 ANEURALNETWORKS_HARD_SWISH = 99, 5594 5595 /** 5596 * Creates a tensor filled with a scalar value. 5597 * 5598 * Supported output tensor {@link OperandCode}: 5599 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 5600 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 5601 * * {@link ANEURALNETWORKS_TENSOR_INT32} 5602 * 5603 * Supported tensor rank: from 1. 5604 * 5605 * Inputs: 5606 * * 0: A 1-D tensor, specifying the desired output tensor shape. 5607 * * 1: A scalar, specifying the value to fill the output tensors with. 5608 * For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, 5609 * the scalar must be of {@link ANEURALNETWORKS_FLOAT16}. 5610 * For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 5611 * the scalar must be of {@link ANEURALNETWORKS_FLOAT32}. 5612 * For output tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, 5613 * the scalar must be of {@link ANEURALNETWORKS_INT32}. 5614 * 5615 * Outputs: 5616 * * 0: The output tensor. 5617 * 5618 * Available since API level 30. 5619 */ 5620 ANEURALNETWORKS_FILL = 100, 5621 5622 /** 5623 * Returns the rank of a tensor. 5624 * 5625 * The rank of a tensor is the number of dimensions in it. Also known as 5626 * "order", "degree", "ndims". 5627 * 5628 * Supported tensor {@link OperandCode}: 5629 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 5630 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 5631 * * {@link ANEURALNETWORKS_TENSOR_INT32} 5632 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 5633 * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 5634 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 5635 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} 5636 * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} 5637 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} 5638 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} 5639 * 5640 * Supported tensor rank: from 1. 5641 * 5642 * Inputs: 5643 * * 0: The input tensor. 5644 * 5645 * Outputs: 5646 * * 0: A scalar of {@link ANEURALNETWORKS_INT32}, specifying the rank 5647 * of the input tensor. 5648 * 5649 * Available since API level 30. 5650 */ 5651 ANEURALNETWORKS_RANK = 101, 5652 } OperationCode; 5653 5654 /** 5655 * Fused activation function types. 5656 * 5657 * 5658 * Available since API level 27. 5659 */ 5660 typedef enum { 5661 /** NO fused activation function. */ 5662 ANEURALNETWORKS_FUSED_NONE = 0, 5663 /** Fused ReLU activation function. */ 5664 ANEURALNETWORKS_FUSED_RELU = 1, 5665 /** Fused ReLU1 activation function. */ 5666 ANEURALNETWORKS_FUSED_RELU1 = 2, 5667 /** Fused ReLU6 activation function. */ 5668 ANEURALNETWORKS_FUSED_RELU6 = 3, 5669 } FuseCode; 5670 5671 /** 5672 * Implicit padding algorithms. 5673 * 5674 * 5675 * Available since API level 27. 5676 */ 5677 typedef enum { 5678 /** 5679 * SAME padding. 5680 * Padding on both ends are the "same": 5681 * padding_to_beginning = total_padding / 2 5682 * padding_to_end = (total_padding + 1)/2. 5683 * i.e., for even number of padding, padding to both ends are exactly 5684 * the same; for odd number of padding, padding to the ending is bigger 5685 * than the padding to the beginning by 1. 5686 * 5687 * total_padding is a function of input, stride, dilation and filter size. 5688 * It could be computed as follows: 5689 * out_size = (input + stride - 1) / stride 5690 * effective_filter_size = (filter_size - 1) * dilation + 1 5691 * needed_input = (out_size - 1) * stride + effective_filter_size 5692 * total_padding = max(0, needed_input - input_size) 5693 * The computation is the same for the horizontal and vertical directions. 5694 */ 5695 ANEURALNETWORKS_PADDING_SAME = 1, 5696 5697 /** 5698 * VALID padding. 5699 * No padding. When the input size is not evenly divisible by 5700 * the filter size, the input at the end that could not fill 5701 * the whole filter tile will simply be ignored. 5702 */ 5703 ANEURALNETWORKS_PADDING_VALID = 2, 5704 } PaddingCode; 5705 5706 /** 5707 * Execution preferences. 5708 * 5709 * Available since API level 27. 5710 */ 5711 typedef enum { 5712 /** 5713 * Prefer executing in a way that minimizes battery drain. 5714 * This is desirable for compilations that will be executed often. 5715 */ 5716 ANEURALNETWORKS_PREFER_LOW_POWER = 0, 5717 /** 5718 * Prefer returning a single answer as fast as possible, even if this causes 5719 * more power consumption. 5720 */ 5721 ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1, 5722 /** 5723 * Prefer maximizing the throughput of successive frames, for example when 5724 * processing successive frames coming from the camera. 5725 */ 5726 ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2, 5727 } PreferenceCode; 5728 5729 /** 5730 * Device types. 5731 * 5732 * The type of NNAPI device. 5733 */ 5734 typedef enum { 5735 /** The device type cannot be provided. */ 5736 ANEURALNETWORKS_DEVICE_UNKNOWN = 0, 5737 /** The device does not fall into any category below. */ 5738 ANEURALNETWORKS_DEVICE_OTHER = 1, 5739 /** The device runs NNAPI models on single or multi-core CPU. */ 5740 ANEURALNETWORKS_DEVICE_CPU = 2, 5741 /** The device can run NNAPI models and also accelerate graphics APIs such 5742 * as OpenGL ES and Vulkan. */ 5743 ANEURALNETWORKS_DEVICE_GPU = 3, 5744 /** Dedicated accelerator for Machine Learning workloads. */ 5745 ANEURALNETWORKS_DEVICE_ACCELERATOR = 4, 5746 } DeviceTypeCode; 5747 5748 /** 5749 * Result codes. 5750 * 5751 * <p>Any NNAPI function can return any result code, including result codes not 5752 * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR} 5753 * indicates a failure of some kind.</p> 5754 * 5755 * <p>Additional information about the nature of a failure can be obtained from 5756 * the device log after enabling NNAPI debugging by setting the debug.nn.vlog 5757 * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p> 5758 * 5759 * Available since API level 27. 5760 */ 5761 typedef enum { 5762 /** 5763 * Operation was succesful. 5764 */ 5765 ANEURALNETWORKS_NO_ERROR = 0, 5766 5767 /** 5768 * Failure caused by not enough available memory. 5769 */ 5770 ANEURALNETWORKS_OUT_OF_MEMORY = 1, 5771 5772 ANEURALNETWORKS_INCOMPLETE = 2, 5773 5774 /** 5775 * Failure caused by unexpected null argument. 5776 */ 5777 ANEURALNETWORKS_UNEXPECTED_NULL = 3, 5778 5779 /** 5780 * Failure caused by invalid function arguments, invalid model definition, 5781 * invalid execution definition or invalid data at execution time. 5782 */ 5783 ANEURALNETWORKS_BAD_DATA = 4, 5784 5785 /** 5786 * Failure caused by failed model execution. 5787 */ 5788 ANEURALNETWORKS_OP_FAILED = 5, 5789 5790 /** 5791 * Failure caused by object being in the wrong state. 5792 */ 5793 ANEURALNETWORKS_BAD_STATE = 6, 5794 5795 /** 5796 * Failure caused by not being able to map a file into memory. 5797 * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer 5798 * not supported by the device. 5799 * Mitigate by reading its content into memory. 5800 */ 5801 ANEURALNETWORKS_UNMAPPABLE = 7, 5802 5803 /** 5804 * Failure caused by insufficient buffer size provided to a model output. 5805 */ 5806 ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8, 5807 5808 /** 5809 * Failure caused by a device not being available. 5810 */ 5811 ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9, 5812 5813 /** 5814 * Failure because a deadline could not be met for a task, but future 5815 * deadlines may still be met for the same task after a short delay. 5816 * 5817 * Available since API level 30. 5818 */ 5819 ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10, 5820 5821 /** 5822 * Failure because a deadline could not be met for a task, and future 5823 * deadlines will likely also not be met for the same task even after a 5824 * short delay. 5825 * 5826 * Available since API level 30. 5827 */ 5828 ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11, 5829 5830 /** 5831 * Failure because of a resource limitation within the driver, but future 5832 * calls for the same task may still succeed after a short delay. 5833 * 5834 * Available since API level 30. 5835 */ 5836 ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12, 5837 5838 /** 5839 * Failure because of a resource limitation within the driver, and future 5840 * calls for the same task will likely also fail even after a short 5841 * delay. 5842 * 5843 * Available since API level 30. 5844 */ 5845 ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13, 5846 5847 /** 5848 * Failure indicating an object is in a dead state. 5849 * 5850 * Available since API level 30. 5851 */ 5852 ANEURALNETWORKS_DEAD_OBJECT = 14, 5853 } ResultCode; 5854 5855 /** 5856 * For {@link ANeuralNetworksModel_setOperandValue}, values with a 5857 * length smaller or equal to this will be immediately copied into 5858 * the model. The size is in bytes. 5859 * 5860 * Available since API level 27. 5861 */ 5862 enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; 5863 5864 /** 5865 * For {@link ANeuralNetworksCompilation_setCaching}, specify the size 5866 * of the cache token required from the application. The size is in bytes. 5867 * 5868 * Available since API level 29. 5869 */ 5870 enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 }; 5871 5872 /** 5873 * Different duration measurements. 5874 * 5875 * Durations are measured in nanoseconds. 5876 * 5877 * Available since API level 29. 5878 */ 5879 typedef enum { 5880 // Execution time on hardware (not driver, which runs on host processor). 5881 ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, 5882 // Execution time in driver (including time on hardware). Excludes overhead 5883 // such as that of the runtime itself and the IPC needed for the runtime to 5884 // communicate with the driver. 5885 ANEURALNETWORKS_DURATION_IN_DRIVER = 1, 5886 // Execution time on hardware, after all dependencies have been signaled. 5887 // If no dependencies specified (for example, if the execution was scheduled other 5888 // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the 5889 // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE. 5890 // Available since API level 30. 5891 ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2, 5892 // Execution time in driver, after all dependencies have been signaled. Excludes 5893 // overhead such as that of the runtime itself and the IPC needed for the runtime 5894 // to communicate with the driver. 5895 // If no dependencies specified (for example, if the execution was scheduled other 5896 // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the 5897 // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER. 5898 // Available since API level 30. 5899 ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3, 5900 } DurationCode; 5901 5902 /** 5903 * Relative execution priority. 5904 * 5905 * Available since API level 30. 5906 */ 5907 typedef enum { 5908 ANEURALNETWORKS_PRIORITY_LOW = 90, 5909 ANEURALNETWORKS_PRIORITY_MEDIUM = 100, 5910 ANEURALNETWORKS_PRIORITY_HIGH = 110, 5911 ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM, 5912 } PriorityCode; 5913 5914 /** 5915 * ANeuralNetworksMemory is an opaque type that represents memory. 5916 * 5917 * This type is used to represent shared memory, memory mapped files, 5918 * and similar memories. 5919 * 5920 * By using shared memory, a program can efficiently communicate to the 5921 * runtime and drivers the tensors that define a model. See 5922 * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application 5923 * should typically create one shared memory object that contains every constant tensor 5924 * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to 5925 * create shared memory from a file handle. 5926 * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to 5927 * create shared memory from an AHardwareBuffer handle. 5928 * 5929 * Memory objects can also be used to specify the input and output arguments of 5930 * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} 5931 * and {@link ANeuralNetworksExecution_setOutputFromMemory}. 5932 * 5933 * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory}, 5934 * {@link ANeuralNetworksExecution_setInputFromMemory} and 5935 * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared 5936 * memory object must be aligned on a boundary of a byte size that is a multiple 5937 * of the element type byte size, e.g., a tensor with 5938 * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary. 5939 * 5940 * It is the application's responsibility to ensure that there are no uses of 5941 * the memory after calling {@link ANeuralNetworksMemory_free}. This includes 5942 * any model which references this memory because of a call to 5943 * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation 5944 * created using such a model, any execution object or burst object created 5945 * using such a compilation, or any execution which references this memory 5946 * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or 5947 * {@link ANeuralNetworksExecution_setOutputFromMemory}. 5948 * 5949 * Available since API level 27. 5950 * 5951 * Starting at API level 30, the application may request creation of device native memory from 5952 * {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation 5953 * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and 5954 * {@link ANeuralNetworksMemory_createFromDesc}. 5955 */ 5956 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; 5957 5958 /** 5959 * ANeuralNetworksModel is an opaque type that contains a description of the 5960 * mathematical operations that constitute the model. 5961 * 5962 * <p>Build the model by calling<ul> 5963 * <li>{@link ANeuralNetworksModel_create}</li> 5964 * <li>{@link ANeuralNetworksModel_addOperation}</li> 5965 * <li>{@link ANeuralNetworksModel_addOperand}</li> 5966 * </ul> 5967 * 5968 * This forms a graph in which each operation and operand is a node, a 5969 * directed edge from an operand to an operation indicates that the 5970 * operand is an input to the operation, and a directed edge from an 5971 * operation to an operand indicates that the operand is an output 5972 * from the operation. This graph must be acyclic. 5973 * 5974 * A model is completed by calling {@link ANeuralNetworksModel_finish}. 5975 * A model is destroyed by calling {@link ANeuralNetworksModel_free}. 5976 * 5977 * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} 5978 * has been called on it.</p> 5979 * 5980 * <p>It is the application's responsibility to make sure that only one thread 5981 * modifies a model at a given time. It is however safe for more than one 5982 * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> 5983 * 5984 * <p>It is also the application's responsibility to ensure that there are no 5985 * other uses of the model after calling {@link ANeuralNetworksModel_free}. 5986 * This includes any compilation, execution object or burst object created using 5987 * the model.</p> 5988 * 5989 * Available since API level 27. 5990 */ 5991 typedef struct ANeuralNetworksModel ANeuralNetworksModel; 5992 5993 /** 5994 * ANeuralNetworksCompilation is an opaque type that can be used to compile 5995 * a machine learning model. 5996 * 5997 * <p>To use:<ul> 5998 * <li>Create a new compilation instance by calling the 5999 * {@link ANeuralNetworksCompilation_create} function or 6000 * {@link ANeuralNetworksCompilation_createForDevices}.</li> 6001 * <li>Set any desired properties on the compilation (for example, 6002 * {@link ANeuralNetworksCompilation_setPreference}).</li> 6003 * <li>Optionally, set the caching signature and the cache directory on the 6004 * compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li> 6005 * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> 6006 * <li>Use the compilation as many times as needed 6007 * with {@link ANeuralNetworksExecution_create} and 6008 * {@link ANeuralNetworksBurst_create}.</li> 6009 * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} 6010 * once all executions using the compilation have completed.</li></ul></p> 6011 * 6012 * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. 6013 * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. 6014 * 6015 * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} 6016 * has been called on it.</p> 6017 * 6018 * <p>It is the application's responsibility to make sure that only 6019 * one thread modifies a compilation at a given time. It is however 6020 * safe for more than one thread to use the compilation once 6021 * {@link ANeuralNetworksCompilation_finish} has returned.</p> 6022 * 6023 * <p>It is also the application's responsibility to ensure that there are no other 6024 * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. 6025 * This includes any execution object or burst object created using the compilation, 6026 * or any memory descriptor with the compilation as part of one of the roles specified by 6027 * {@link ANeuralNetworksMemoryDesc_addInputRole} or 6028 * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p> 6029 * 6030 * Available since API level 27. 6031 */ 6032 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; 6033 6034 /** 6035 * ANeuralNetworksExecution is an opaque type that can be used to apply a machine 6036 * learning model to a set of inputs. 6037 * 6038 * <p>To use:<ul> 6039 * <li>Create a new execution instance by calling the 6040 * {@link ANeuralNetworksExecution_create} function.</li> 6041 * <li>Associate input buffers or memory regions to the model inputs with 6042 * {@link ANeuralNetworksExecution_setInput} or 6043 * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> 6044 * <li>Associate output buffers or memory regions to the model outputs with 6045 * {@link ANeuralNetworksExecution_setOutput} or 6046 * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> 6047 * <li>Apply the model with one of the following:</li><ul> 6048 * <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute} 6049 * or with {@link ANeuralNetworksExecution_startComputeWithDependencies}, 6050 * waiting for the execution to complete with 6051 * {@link ANeuralNetworksEvent_wait}.</li> 6052 * <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li> 6053 * <li>Synchronously as part of an execution burst with 6054 * {@link ANeuralNetworksExecution_burstCompute}.</li></ul> 6055 * <li>Destroy the execution with 6056 * {@link ANeuralNetworksExecution_free}.</li></ul></p> 6057 * 6058 * <p>An output buffer or memory region must not overlap with any 6059 * other output buffer or memory region, with an input buffer or 6060 * memory region, or with an operand value in a memory object 6061 * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p> 6062 * 6063 * <p>An execution cannot be modified once 6064 * {@link ANeuralNetworksExecution_burstCompute}, 6065 * {@link ANeuralNetworksExecution_compute}, 6066 * {@link ANeuralNetworksExecution_startCompute} or 6067 * {@link ANeuralNetworksExecution_startComputeWithDependencies} has been called on it.</p> 6068 * 6069 * <p>An execution can be applied to a model with 6070 * {@link ANeuralNetworksExecution_burstCompute}, 6071 * {@link ANeuralNetworksExecution_compute}, 6072 * {@link ANeuralNetworksExecution_startCompute} or 6073 * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new 6074 * executions to do new evaluations of the model.</p> 6075 * 6076 * <p>It is the application's responsibility to make sure that only one thread 6077 * modifies an execution at a given time. It is however safe for more than one 6078 * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> 6079 * 6080 * <p>It is also the application's responsibility to ensure that the execution 6081 * either has never been scheduled or has completed (i.e., that 6082 * {@link ANeuralNetworksExecution_burstCompute}, 6083 * {@link ANeuralNetworksExecution_compute}, or 6084 * {@link ANeuralNetworksEvent_wait} has returned) before calling 6085 * {@link ANeuralNetworksExecution_free}.</p>. 6086 * 6087 * <p>It is also the application's responsibility to ensure that there are no other 6088 * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p> 6089 * 6090 * <p>Multiple executions can be scheduled and evaluated concurrently, either by 6091 * means of {@link ANeuralNetworksExecution_compute} or 6092 * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) in 6093 * different threads, or by means of 6094 * {@link ANeuralNetworksExecution_startCompute} or 6095 * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous). 6096 * (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on 6097 * different burst objects.) The runtime makes no guarantee on the ordering of 6098 * completion of executions. If it's important to the application, the 6099 * application should enforce the ordering by ensuring that one execution 6100 * completes before the next is scheduled (for example, by scheduling all 6101 * executions synchronously within a single thread, or by scheduling all 6102 * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between 6103 * calls to {@link ANeuralNetworksExecution_startCompute}); or by using 6104 * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a 6105 * list of events to be signaled before starting the actual evaluation.</p> 6106 * 6107 * Available since API level 27. 6108 */ 6109 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; 6110 6111 #if __ANDROID_API__ >= 29 6112 /** 6113 * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand. 6114 */ 6115 typedef struct ANeuralNetworksSymmPerChannelQuantParams { 6116 /* The index of the channel dimension. */ 6117 uint32_t channelDim; 6118 /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ 6119 uint32_t scaleCount; 6120 /** The array of scaling values for each channel. Each value must be greater than zero. */ 6121 const float* scales; 6122 } ANeuralNetworksSymmPerChannelQuantParams; 6123 6124 /** 6125 * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency 6126 * of a rapid sequence of executions. It will likely cause overhead if only used 6127 * for a single execution. 6128 * 6129 * ANeuralNetworksBurst serves as a context object for any number of inferences 6130 * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst 6131 * object and the {@link ANeuralNetworksExecution} objects used with it must all 6132 * have been created from the same {@link ANeuralNetworksCompilation} object. 6133 * 6134 * This object is also used as a hint to drivers, providing insight to the 6135 * lifetime of a rapid sequence of executions. For example, a driver may choose 6136 * to increase the clock frequency of its accelerator for the lifetime of a 6137 * burst object. 6138 * 6139 * <p>To use:<ul> 6140 * <li>Create a new burst object by calling the 6141 * {@link ANeuralNetworksBurst_create} function.</li> 6142 * <li>For each execution:</li><ul> 6143 * <li>Create {@link ANeuralNetworksExecution} and configure its 6144 * properties (see {@link ANeuralNetworksExecution} for details).</li> 6145 * <li>Apply the model synchronously with 6146 * {@link ANeuralNetworksExecution_burstCompute}, reusing the same 6147 * {@link ANeuralNetworksBurst} with the new 6148 * {@link ANeuralNetworksExecution}.</li> 6149 * <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul> 6150 * <li>Destroy the burst with 6151 * {@link ANeuralNetworksBurst_free}.</li></ul></p> 6152 * 6153 * Available since API level 29. 6154 */ 6155 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst; 6156 #endif // __ANDROID_API__ >= 29 6157 6158 /** 6159 * ANeuralNetworksOperandType describes the type of an operand. 6160 * 6161 * This structure is used to describe both scalars and tensors. 6162 * 6163 * A tensor operand type with all dimensions specified is "fully 6164 * specified". Whenever possible (i.e., whenever the dimensions are 6165 * known at model construction time), a tensor operand type should be 6166 * (but is not required to be) fully specified, in order to enable the 6167 * best possible performance. 6168 * 6169 * If a tensor operand's type is not fully specified, the dimensions 6170 * of the operand are deduced from the operand types and values of the 6171 * operation for which that operand is an output or from the corresponding 6172 * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input 6173 * operand type in the case of referenced model input operands. 6174 * 6175 * <p>In the following situations, a tensor operand type must be fully 6176 * specified:<ul> 6177 * <li>The operand has a constant value, set by 6178 * {@link ANeuralNetworksModel_setOperandValue} (with a 6179 * non-nullptr buffer) or 6180 * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> 6181 * <li>The operand is a model input (see 6182 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main 6183 * model within a compilation. A fully specified tensor operand type 6184 * must either be provided to {@link ANeuralNetworksModel_addOperand}; 6185 * or it must be provided to the corresponding 6186 * {@link ANeuralNetworksExecution_setInput}, or 6187 * {@link ANeuralNetworksExecution_setInputFromMemory}. 6188 * EXCEPTION: If the input is optional and omitted 6189 * (by passing nullptr for buffer to 6190 * {@link ANeuralNetworksExecution_setInput}) then it need 6191 * not have a fully specified tensor operand type.</li> 6192 * <li>The operand is a model output (see 6193 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main 6194 * model within a compilation and is to be used with {@link 6195 * ANeuralNetworksExecution_startComputeWithDependencies}. 6196 * A fully specified tensor operand type must either be provided 6197 * to {@link ANeuralNetworksModel_addOperand}; or it must be 6198 * provided to the corresponding 6199 * {@link ANeuralNetworksExecution_setOutput}, or 6200 * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul> 6201 * 6202 * A tensor operand type of specified rank but some number of 6203 * unspecified dimensions is represented by setting dimensionCount to 6204 * the rank and each unspecified dimension to 0. 6205 * 6206 * Available since API level 27. 6207 * 6208 * Starting at API level 29, a tensor operand type of unspecified rank is 6209 * represented by setting dimensionCount to 0 and dimensions to NULL (just as if 6210 * it were a scalar operand type). 6211 */ 6212 typedef struct ANeuralNetworksOperandType { 6213 /** 6214 * The data type, e.g ANEURALNETWORKS_FLOAT32. 6215 */ 6216 int32_t type; 6217 6218 /** 6219 * The number of dimensions (rank). 6220 * 6221 * Must be 0 for scalars. 6222 */ 6223 uint32_t dimensionCount; 6224 6225 /** 6226 * The dimensions of the tensor. 6227 * 6228 * Must be nullptr for scalars. 6229 */ 6230 const uint32_t* dimensions; 6231 6232 /** 6233 * The quantization scale. 6234 * 6235 * Must be 0 when not applicable to an operand type. 6236 * 6237 * See {@link OperandCode}. 6238 */ 6239 float scale; 6240 6241 /** 6242 * The quantization zero point. 6243 * 6244 * Must be 0 when not applicable to an operand type. 6245 * 6246 * See {@link OperandCode}. 6247 */ 6248 int32_t zeroPoint; 6249 } ANeuralNetworksOperandType; 6250 6251 typedef int32_t ANeuralNetworksOperationType; 6252 6253 /** 6254 * ANeuralNetworksEvent is an opaque type that represents an event 6255 * that will be signaled once an execution completes. 6256 * 6257 * Available since API level 27. 6258 */ 6259 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; 6260 6261 #if __ANDROID_API__ >= 29 6262 6263 /** 6264 * ANeuralNetworksDevice is an opaque type that represents a device. 6265 * 6266 * This type is used to query basic properties and supported operations of the corresponding 6267 * device, and control which device(s) a model is to be run on. 6268 * 6269 * Available since API level 29. 6270 */ 6271 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice; 6272 6273 #endif // __ANDROID_API__ >= 29 6274 6275 #if __ANDROID_API__ >= 30 6276 6277 /** 6278 * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor. 6279 * 6280 * A memory descriptor describes the properties of a memory object, and is used by 6281 * {@link ANeuralNetworksMemory_createFromDesc}. 6282 * 6283 * To use: 6284 * - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}. 6285 * - Specify all of the intended input and output roles by calling 6286 * {@link ANeuralNetworksMemoryDesc_addInputRole} and 6287 * {@link ANeuralNetworksMemoryDesc_addOutputRole}. 6288 * - Optionally, specify the memory dimensions by calling 6289 * {@link ANeuralNetworksMemoryDesc_setDimensions}. 6290 * - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}. 6291 * - Use the memory descriptor as many times as needed with 6292 * {@link ANeuralNetworksMemory_createFromDesc}. 6293 * - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}. 6294 * 6295 * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}. 6296 * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}. 6297 * 6298 * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish} 6299 * has been called on it. 6300 * 6301 * It is the application's responsibility to make sure that only 6302 * one thread modifies a memory descriptor at a given time. It is however 6303 * safe for more than one thread to use the memory descriptor once 6304 * {@link ANeuralNetworksMemoryDesc_finish} has returned. 6305 * 6306 * It is also the application's responsibility to ensure that there are no other 6307 * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}. 6308 * It is however safe to continue using a {@link ANeuralNetworksMemory} object created 6309 * from the memory descriptor. 6310 * 6311 * Available since API level 30. 6312 */ 6313 typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc; 6314 6315 /** 6316 * Create a {@link ANeuralNetworksMemoryDesc} with no properties. 6317 * 6318 * This only creates the memory descriptor. Its properties should be set with calls to 6319 * {@link ANeuralNetworksMemoryDesc_addInputRole}, 6320 * {@link ANeuralNetworksMemoryDesc_addOutputRole}, and 6321 * {@link ANeuralNetworksMemoryDesc_setDimensions}. 6322 * 6323 * {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties have been set. 6324 * 6325 * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory descriptor 6326 * is no longer needed. 6327 * 6328 * Available since API level 30. 6329 * 6330 * @param desc The {@link ANeuralNetworksMemoryDesc} to be created. 6331 * Set to NULL if unsuccessful. 6332 * 6333 * @return ANEURALNETWORKS_NO_ERROR if successful. 6334 */ 6335 int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc** desc) __INTRODUCED_IN(30); 6336 6337 /** 6338 * Destroy a memory descriptor. 6339 * 6340 * The memory descriptor need not have been finished by a call to 6341 * {@link ANeuralNetworksMemoryDesc_finish}. 6342 * 6343 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. 6344 * 6345 * Available since API level 30. 6346 * 6347 * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable and 6348 * results in no operation. 6349 */ 6350 void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc* desc) __INTRODUCED_IN(30); 6351 6352 /** 6353 * Specify that a memory object will be playing the role of an input to an execution created from a 6354 * particular compilation. 6355 * 6356 * The compilation and the input index fully specify an input operand. This function 6357 * may be invoked multiple times on the same memory descriptor with different input operands, 6358 * and the same input operand may be specified on multiple memory descriptors. However, 6359 * specifying the same input operand on the same memory descriptor more than once will 6360 * return an error. 6361 * 6362 * The dimensions of the corresponding model operands of all the roles specified by 6363 * {@link ANeuralNetworksMemoryDesc_addInputRole} and 6364 * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two 6365 * dimensions are incompatible if both ranks are fully specified but have different values, or if 6366 * there is at least one axis that is fully specified in both but has different values. 6367 * 6368 * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and 6369 * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory descriptor 6370 * before invoking {@link ANeuralNetworksMemoryDesc_finish}. 6371 * 6372 * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been 6373 * called will return an error. 6374 * 6375 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. 6376 * 6377 * Available since API level 30. 6378 * 6379 * @param desc The memory descriptor to be modified. 6380 * @param compilation The compilation object. It must already have been finished by calling 6381 * {@link ANeuralNetworksCompilation_finish}, and must outlive the memory 6382 * descriptor. 6383 * @param index The index of the input argument we are referencing from the compilation. It is 6384 * an index into the inputs list passed to 6385 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 6386 * the index associated with {@link ANeuralNetworksModel_addOperand}. 6387 * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the 6388 * memory is to be used in the specified role. This is provided as a hint to 6389 * optimize the case when different roles prefer different memory locations or data 6390 * layouts. 6391 * 6392 * @return ANEURALNETWORKS_NO_ERROR if successful. 6393 */ 6394 int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc* desc, 6395 const ANeuralNetworksCompilation* compilation, 6396 uint32_t index, float frequency) __INTRODUCED_IN(30); 6397 6398 /** 6399 * Specify that a memory object will be playing the role of an output to an execution created from a 6400 * particular compilation. 6401 * 6402 * The compilation and the output index fully specify an output operand. This function 6403 * may be invoked multiple times on the same memory descriptor with different output operands, 6404 * and the same output operand may be specified on multiple memory descriptors. However, 6405 * specifying the same output operand on the same memory descriptor object more than once will 6406 * return an error. 6407 * 6408 * The dimensions of the corresponding model operands of all the roles specified by 6409 * {@link ANeuralNetworksMemoryDesc_addInputRole} and 6410 * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two 6411 * dimensions are incompatible if both ranks are fully specified but have different values, or if 6412 * there is at least one axis that is fully specified in both but has different values. 6413 * 6414 * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and 6415 * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory descriptor 6416 * before invoking {@link ANeuralNetworksMemoryDesc_finish}. 6417 * 6418 * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been 6419 * called will return an error. 6420 * 6421 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. 6422 * 6423 * Available since API level 30. 6424 * 6425 * @param desc The memory descriptor to be modified. 6426 * @param compilation The compilation object. It must already have been finished by calling 6427 * {@link ANeuralNetworksCompilation_finish}, and must outlive the memory 6428 * descriptor. 6429 * @param index The index of the output argument we are referencing from the compilation. It is 6430 * an index into the outputs list passed to 6431 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 6432 * the index associated with {@link ANeuralNetworksModel_addOperand}. 6433 * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the 6434 * memory is to be used in the specified role. This is provided as a hint to 6435 * optimize the case when multiple roles prefer different memory locations or data 6436 * layouts. 6437 * 6438 * @return ANEURALNETWORKS_NO_ERROR if successful. 6439 */ 6440 int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc* desc, 6441 const ANeuralNetworksCompilation* compilation, 6442 uint32_t index, float frequency) __INTRODUCED_IN(30); 6443 6444 /** 6445 * Set the dimensional information of the memory descriptor. 6446 * 6447 * The specified dimensions must be compatible with the dimensions of the corresponding model 6448 * operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and 6449 * {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are incompatible if both ranks 6450 * are fully specified but have different values, or if there is at least one axis that is fully 6451 * specified in both but has different values. 6452 * 6453 * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been 6454 * called will return an error. 6455 * 6456 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. 6457 * 6458 * Available since API level 30. 6459 * 6460 * @param desc The memory descriptor to be modified. 6461 * @param rank The number of dimensions. Must be 0 for scalars. 6462 * @param dimensions An array of dimensions. An entry with the value 0 indicates that the 6463 * corresponding axis has an unknown size. 6464 * 6465 * @return ANEURALNETWORKS_NO_ERROR if successful. 6466 */ 6467 int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc* desc, uint32_t rank, 6468 const uint32_t* dimensions) __INTRODUCED_IN(30); 6469 6470 /** 6471 * Indicate that we have finished modifying a memory descriptor. Required before calling 6472 * {@link ANeuralNetworksMemory_createFromDesc}. 6473 * 6474 * This function must only be called once for a given memory descriptor. 6475 * 6476 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. 6477 * 6478 * Available since API level 30. 6479 * 6480 * @param desc The memory descriptor to be finished. 6481 * 6482 * @return ANEURALNETWORKS_NO_ERROR if successful. 6483 */ 6484 int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc* desc) __INTRODUCED_IN(30); 6485 6486 /** 6487 * Creates a memory object from a memory descriptor. 6488 * 6489 * The memory object is created with an uninitialized buffer. A memory object with an uninitialized 6490 * buffer may only be used according to the roles specified by {@link 6491 * ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory in {@link 6492 * ANeuralNetworksMemory_copy}. The buffer of a memory object is initialized after the memory object 6493 * is used as an output in a successful execution, or used as the destination memory in a successful 6494 * {@link ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may be used 6495 * according to all roles specified in {@link ANeuralNetworksMemoryDesc}, or as the source or 6496 * destination memory in {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will 6497 * return to the uninitialized state if the memory object is used as an output in a failed 6498 * execution, or used as the destination memory in a failed {@link ANeuralNetworksMemory_copy}. 6499 * 6500 * The dimensions of the memory descriptor are deduced from the dimensions of the corresponding 6501 * model operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and 6502 * {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions set by the call to 6503 * {@link ANeuralNetworksMemoryDesc_setDimensions}, if any. The memory descriptor may have 6504 * unspecified dimensions or rank. In such a case, the same memory object may be used with different 6505 * shapes of outputs in different executions. When the memory is used as an input, the input shape 6506 * must be the same as the output shape from the last execution using this memory object as an 6507 * output, or the last {@link ANeuralNetworkMemory_copy} using this memory object as the destination 6508 * memory. Creating a memory object with unspecified dimensions or rank may fail for certain sets of 6509 * roles. 6510 * 6511 * Using the memory in roles or shapes that are not compatible with the rules specified above will 6512 * return an error. 6513 * 6514 * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or 6515 * {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object, 6516 * both offset and length must be set to zero and the entire memory region will be 6517 * associated with the specified input or output operand. 6518 * 6519 * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the memory created from this 6520 * function will return an error. 6521 * 6522 * {@link ANeuralNetworksMemory_free} must be called once the memory is no longer needed. 6523 * 6524 * Attempting to create memory from an unfinished memory descriptor will return an error. 6525 * 6526 * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the {@link ANeuralNetworksMemory} 6527 * object. 6528 * 6529 * Available since API level 30. 6530 * 6531 * @param desc The memory descriptor. 6532 * @param memory The memory object to be created. 6533 * Set to NULL if unsuccessful. 6534 * 6535 * @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if the memory is 6536 * created with unspecified dimensions or rank and it is not supported for this set of 6537 * roles. 6538 */ 6539 int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc* desc, 6540 ANeuralNetworksMemory** memory) __INTRODUCED_IN(30); 6541 6542 /** 6543 * Copies data from one memory object to another. 6544 * 6545 * If at most one of the src and dst is created from {@link ANeuralNetworksMemory_createFromDesc}, 6546 * the src and dst must have the same logical size: 6547 * - If the memory is created from {@link ANeuralNetworksMemory_createFromFd}, or if it is created 6548 * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with format of 6549 * AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory. 6550 * - If the memory is created from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a 6551 * format other than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there is 6552 * no padding and the data is tightly packed. This function may fail if the AHardwareBuffer 6553 * cannot be accessed. 6554 * - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc}, the logical size 6555 * equals the size indicated by the {@link OperandCode} multiplied by the number of elements. This 6556 * function will fail if the number of elements is unknown. 6557 * 6558 * If both src and dst are created from {@link ANeuralNetworksMemory_createFromDesc}, they must have 6559 * compatible dimensions. Two dimensions are incompatible if both ranks are fully specified but 6560 * have different values, or if there is at least one axis that is fully specified in both but has 6561 * different values. The dst may have unspecified dimensions or rank. In such a case, the dimensions 6562 * of dst will get updated according to the dimensions of the src. 6563 * 6564 * In both cases, if the src is created from {@link ANeuralNetworksMemory_createFromDesc}, it must 6565 * have been used as an output in a successful execution, or used as the destination memory in a 6566 * successful {@link ANeuralNetworksMemory_copy}. 6567 * 6568 * The src and dst may have different data layout, in which case the data copying is performed 6569 * logically with data layout transformation. 6570 * 6571 * Available since API level 30. 6572 * 6573 * @param src The source memory object. 6574 * @param dst The destination memory object. 6575 * 6576 * @return ANEURALNETWORKS_NO_ERROR if successful. 6577 */ 6578 int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory* src, const ANeuralNetworksMemory* dst) 6579 __INTRODUCED_IN(30); 6580 6581 #endif // __ANDROID_API__ >= 30 6582 6583 #if __ANDROID_API__ >= 29 6584 6585 /** 6586 * Get the number of available devices. 6587 * 6588 * @param numDevices Used to return the number of devices. 6589 * 6590 * @return ANEURALNETWORKS_NO_ERROR if successful. 6591 * 6592 * Available since API level 29. 6593 */ 6594 int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) __INTRODUCED_IN(29); 6595 6596 /** 6597 * Get the representation of the specified device. 6598 * 6599 * @param devIndex The index of the specified device. Must be less than the 6600 number of available devices. 6601 * @param device The representation of the specified device. 6602 * The same representation will always be returned for the specified 6603 * device. 6604 * 6605 * @return ANEURALNETWORKS_NO_ERROR if successful. 6606 * 6607 * Available since API level 29. 6608 */ 6609 int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device) 6610 __INTRODUCED_IN(29); 6611 6612 /** 6613 * Get the name of the specified device. 6614 * 6615 * @param device The representation of the specified device. 6616 * @param name The returned name of the specified device. The name will be in UTF-8 6617 * and will be null-terminated. It will be recognizable as a known device name 6618 * rather than a cryptic string. For devices with feature level reported by 6619 * {@link ANeuralNetworksDevice_getFeatureLevel} that is 29 and above, the 6620 * format of the name is {VENDOR}-{DEVICE}. For devices with feature level 28 6621 * or lower, the format of the name is undefined. 6622 * The name will remain valid for the duration of the application. 6623 * 6624 * @return ANEURALNETWORKS_NO_ERROR if successful. 6625 * 6626 * Available since API level 29. 6627 */ 6628 int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name) 6629 __INTRODUCED_IN(29); 6630 6631 /** 6632 * Get the type of a given device. 6633 * 6634 * The device type can be used to help application developers to distribute Machine Learning 6635 * workloads and other workloads such as graphical rendering. 6636 * E.g., for an app which renders AR scenes based on real time object detection results, 6637 * the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU 6638 * for graphical rendering. 6639 * 6640 * @param device The representation of the specified device. 6641 * @param type The returned {@link DeviceTypeCode} of the specified device. 6642 * 6643 * @return ANEURALNETWORKS_NO_ERROR if successful. 6644 * 6645 * Available since API level 29. 6646 */ 6647 int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type) 6648 __INTRODUCED_IN(29); 6649 6650 /** 6651 * Get the version of the driver implementation of the specified device. 6652 * 6653 * It’s the responsibility of the driver implementor to insure that this version string 6654 * uniquely distinguishes this implementation from all previous implementations. 6655 * 6656 * This version string must not be confused with the feature level which is solely defined 6657 * by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions. 6658 * For example, it is not possible to filter all drivers older than a certain version. 6659 * 6660 * Application developers may use this version string to avoid or prefer specific driver 6661 * implementations. For example, an application may want to do so because: 6662 * - A specific version of the driver does not provide the required performance, 6663 * perhaps because of a performance regression. 6664 * - A specific version of the driver has a bug or returns results that don’t match 6665 * the minimum precision requirement for the application. 6666 * 6667 * @param device The representation of the specified device. 6668 * @param version The returned version string of the driver for the specified device. The 6669 * string will be in UTF-8 and will be null-terminated. For devices with feature 6670 * level 28 or lower, "UNKNOWN" will be returned. The version string will remain 6671 * valid for the duration of the application. 6672 * 6673 * @return ANEURALNETWORKS_NO_ERROR if successful. 6674 * 6675 * Available since API level 29. 6676 */ 6677 int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version) 6678 __INTRODUCED_IN(29); 6679 6680 /** 6681 * Get the supported NNAPI version of the specified device. 6682 * 6683 * Each device has a supported feature level, which is the most advanced feature this driver 6684 * implements. For example, if the driver implements the features introduced in Android P, 6685 * but does not implement the features introduced after Android P, the value would be 28. 6686 * Developers could decide whether or not the specified device should be used for a Model that 6687 * has certain feature requirements. 6688 * 6689 * @param device The representation of the specified device. 6690 * @param featureLevel The API level of the most advanced feature this driver implements. 6691 * 6692 * @return ANEURALNETWORKS_NO_ERROR if successful. 6693 * 6694 * Available since API level 29. 6695 */ 6696 int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device, 6697 int64_t* featureLevel) __INTRODUCED_IN(29); 6698 6699 #if __ANDROID_API__ >= 30 6700 6701 /** 6702 * Wait until the device is in a live state. 6703 * 6704 * A device may encounter internal errors and temporarily enter a dead state. A 6705 * call that uses a device in such a state will return with the error 6706 * {@link ANEURALNETWORKS_DEAD_OBJECT}. ANeuralNetworksDevice_wait will block until 6707 * the device is in a live state. 6708 * 6709 * @param device The representation of the specified device. 6710 * 6711 * @return ANEURALNETWORKS_NO_ERROR if successful. 6712 * 6713 * Available since API level 30. 6714 */ 6715 int ANeuralNetworksDevice_wait(const ANeuralNetworksDevice* device) __INTRODUCED_IN(30); 6716 6717 #endif // __ANDROID_API__ >= 30 6718 6719 /** 6720 * Get the supported operations for a specified set of devices. If multiple devices 6721 * are selected, the supported operation list is a union of supported operations of all 6722 * selected devices. 6723 * 6724 * @param model The model to be queried. 6725 * @param devices The set of devices. Must not contain duplicates. 6726 * @param numDevices The number of devices in the set. 6727 * @param supportedOps The boolean array to be filled. True means supported. The size of the 6728 * boolean array must be at least as large as the number of operations 6729 * in the model. The order of elements in the supportedOps array matches 6730 * the order in which the corresponding operations were added to the model. 6731 * 6732 * @return ANEURALNETWORKS_NO_ERROR if successful. 6733 * 6734 * Available since API level 29. 6735 */ 6736 int ANeuralNetworksModel_getSupportedOperationsForDevices( 6737 const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, 6738 uint32_t numDevices, bool* supportedOps) __INTRODUCED_IN(29); 6739 6740 /** 6741 * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set 6742 * of devices. If more than one device is specified, the compilation will 6743 * distribute the workload automatically across the devices. The model must be fully 6744 * supported by the specified set of devices. This means that 6745 * ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every 6746 * operation for that model/devices pair. 6747 * 6748 * The user must handle all compilation and execution failures from the 6749 * specified set of devices. This is in contrast to a use of {@link 6750 * ANeuralNetworksCompilation_create}, where the runtime will attempt to recover 6751 * from such failures. 6752 * 6753 * The model passed to this function is termed the "main model" of the 6754 * compilation, to distinguish it from other models referred to by an Operand 6755 * of type {@link ANEURALNETWORKS_MODEL} within this compilation. 6756 * 6757 * @param model The {@link ANeuralNetworksModel} to be compiled. 6758 * @param devices The set of devices. Must not contain duplicates. 6759 * @param numDevices The number of devices in the set. 6760 * @param compilation The newly created object or NULL if unsuccessful. 6761 * 6762 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA 6763 * if the model is invalid. 6764 * 6765 * Available since API level 29. 6766 */ 6767 int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* model, 6768 const ANeuralNetworksDevice* const* devices, 6769 uint32_t numDevices, 6770 ANeuralNetworksCompilation** compilation) 6771 __INTRODUCED_IN(29); 6772 6773 /** 6774 * Sets the compilation caching signature and the cache directory. 6775 * 6776 * Provides optional caching information to the runtime for faster repeated 6777 * compilation. 6778 * 6779 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 6780 * 6781 * @param compilation The compilation to be modified. 6782 * @param cacheDir The cache directory for the runtime to store and retrieve caching 6783 * data. It is recommended to use the code cache directory provided 6784 * by the Android runtime. If not using the code cache directory, the 6785 * user should choose a directory local to the application, and is 6786 * responsible for managing the cache entries. 6787 * @param token The token provided by the user to specify a model must be of length 6788 * ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that 6789 * the token is unique to a model within the application. The NNAPI 6790 * runtime cannot detect token collisions; a collision will result in a 6791 * failed execution or in a successful execution that produces incorrect 6792 * output values. 6793 * 6794 * @return ANEURALNETWORKS_NO_ERROR if successful. 6795 * 6796 * Available since API level 29. 6797 */ 6798 int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation* compilation, 6799 const char* cacheDir, const uint8_t* token) 6800 __INTRODUCED_IN(29); 6801 6802 /** 6803 * Schedule synchronous evaluation of the execution. 6804 * 6805 * <p>Schedules synchronous evaluation of the execution. Returns once the 6806 * execution has completed and the outputs are ready to be consumed. 6807 * </p> 6808 * 6809 * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution, 6810 * and the execution is not able to complete before the timeout duration is 6811 * exceeded, then execution may be aborted, in which case 6812 * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. If the device has 6813 * a feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} 6814 * that is lower than 30, then the timeout duration hint will be ignored. 6815 * 6816 * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and 6817 * the condition model does not output false within the loop timeout duration, 6818 * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} 6819 * will be returned. 6820 * 6821 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6822 * 6823 * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. 6824 * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution. 6825 * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for 6826 * asynchronous execution with dependencies. 6827 * 6828 * Available since API level 29. 6829 * 6830 * @param execution The execution to be scheduled and executed. 6831 * 6832 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. 6833 * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot 6834 * be properly mapped. 6835 */ 6836 int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __INTRODUCED_IN(29); 6837 6838 /** 6839 * Get the dimensional information of the specified output operand of the model of the 6840 * {@link ANeuralNetworksExecution}. 6841 * 6842 * The execution must have completed. On asynchronous execution initiated by 6843 * {@link ANeuralNetworksExecution_startCompute} or 6844 * {@link ANeuralNetworksExecution_startComputeWithDependencies}, 6845 * {@link ANeuralNetworksEvent_wait} must be called prior to this function. 6846 * 6847 * @param execution The execution to be queried. 6848 * @param index The index of the output argument we are querying. It is 6849 * an index into the lists passed to 6850 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 6851 * the index associated with {@link ANeuralNetworksModel_addOperand}. 6852 * @param rank The rank of the output operand. 6853 * 6854 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE 6855 * if the target output is provided an insufficient buffer at execution time, 6856 * ANEURALNETWORKS_BAD_DATA if the index is invalid. 6857 * 6858 * Available since API level 29. 6859 */ 6860 int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* execution, 6861 int32_t index, uint32_t* rank) 6862 __INTRODUCED_IN(29); 6863 6864 /** 6865 * Get the dimensional information of the specified output operand of the model of the 6866 * {@link ANeuralNetworksExecution}. The target output operand cannot be a scalar. 6867 * 6868 * The execution must have completed. On asynchronous execution initiated by 6869 * {@link ANeuralNetworksExecution_startCompute} or 6870 * {@link ANeuralNetworksExecution_startComputeWithDependencies}, 6871 * {@link ANeuralNetworksEvent_wait} must be called prior to this function. 6872 * 6873 * @param execution The execution to be queried. 6874 * @param index The index of the output argument we are querying. It is an index into the lists 6875 * passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 6876 * the index associated with {@link ANeuralNetworksModel_addOperand}. 6877 * @param dimensions The dimension array to be filled. The size of the array must be exactly as 6878 * large as the rank of the output operand to be queried in the model. 6879 * 6880 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE 6881 * if the target output is provided an insufficient buffer at execution time, 6882 * ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar. 6883 * 6884 * Available since API level 29. 6885 */ 6886 int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* execution, 6887 int32_t index, uint32_t* dimensions) 6888 __INTRODUCED_IN(29); 6889 6890 /** 6891 * Create a {@link ANeuralNetworksBurst} to apply the given compilation. 6892 * This only creates the burst object. Computation is only performed once 6893 * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid 6894 * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}. 6895 * 6896 * <p>The provided compilation must outlive the burst object.</p> 6897 * 6898 * Available since API level 29. 6899 * 6900 * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. 6901 * @param burst The newly created object or NULL if unsuccessful. 6902 * 6903 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA 6904 * if the compilation is invalid. 6905 */ 6906 int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation, 6907 ANeuralNetworksBurst** burst) __INTRODUCED_IN(29); 6908 6909 /** 6910 * Destroys the burst object. 6911 * 6912 * Available since API level 29. 6913 * 6914 * @param burst The burst object to be destroyed. Passing NULL is acceptable and 6915 * results in no operation. 6916 */ 6917 void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); 6918 6919 /** 6920 * Schedule synchronous evaluation of the execution on a burst object. 6921 * 6922 * <p>Schedules synchronous evaluation of the execution. Returns once the 6923 * execution has completed and the outputs are ready to be consumed.</p> 6924 * 6925 * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution, 6926 * and the execution is not able to complete before the timeout duration is 6927 * exceeded, then execution may be aborted, in which case 6928 * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. 6929 * 6930 * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and 6931 * the condition model does not output false within the loop timeout duration, 6932 * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} 6933 * will be returned. If the device has a feature level reported by 6934 * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the 6935 * timeout duration hint will be ignored. 6936 * 6937 * <p>There must be at most one {@link ANeuralNetworksExecution} processing at 6938 * any given time for any given burst object. Any 6939 * {@link ANeuralNetworksExecution} launched before the previous has finished 6940 * will result in ANEURALNETWORKS_BAD_STATE.</p> 6941 * 6942 * See {@link ANeuralNetworksExecution_compute} for synchronous execution. 6943 * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution. 6944 * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for 6945 * asynchronous execution with dependencies. 6946 * 6947 * Available since API level 29. 6948 * 6949 * @param burst The burst object to execute on. 6950 * @param execution The execution to be scheduled and executed. The execution 6951 * must be created from the same {@link 6952 * ANeuralNetworksCompilation} as the burst object. 6953 * 6954 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. 6955 */ 6956 int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution, 6957 ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); 6958 6959 /** 6960 * Creates a shared memory object from an AHardwareBuffer handle. 6961 * 6962 * If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB 6963 * format, it can be used the same way as shared memory created from a file handle. See 6964 * {@link ANeuralNetworksMemory} for a description on how to use this shared memory. 6965 * 6966 * If the shared memory is backed by an AHardwareBuffer of a format other than 6967 * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and outputs. 6968 * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or 6969 * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both 6970 * offset and length must be set to zero and the entire memory region will be 6971 * associated with the specified input or output operand. There is no guarantee 6972 * that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination 6973 * can be used by arbitrary devices. The execution will fail if the selected set of 6974 * devices cannot consume the buffer. 6975 * 6976 * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory 6977 * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is 6978 * disallowed. 6979 * 6980 * The provided AHardwareBuffer must outlive the ANeuralNetworksMemory object. 6981 * 6982 * Available since API level 29. 6983 * 6984 * @param ahwb The AHardwareBuffer handle. 6985 * @param memory The memory object to be created. 6986 * Set to NULL if unsuccessful. 6987 * 6988 * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. 6989 * 6990 * @see AHardwareBuffer 6991 */ 6992 int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb, 6993 ANeuralNetworksMemory** memory) 6994 __INTRODUCED_IN(29); 6995 6996 /** 6997 6998 * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be 6999 * measured. Evaluation of the execution must not have been scheduled. 7000 * 7001 * By default, duration is not measured. 7002 * 7003 * The {@link ANeuralNetworksExecution} must have been created from an 7004 * {@link ANeuralNetworksCompilation} which in turn was created from 7005 * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1. 7006 * If the device has a feature level reported by 7007 * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 29, then the 7008 * duration will not be measured. 7009 * 7010 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7011 * 7012 * Available since API level 29. 7013 * 7014 * @param execution The execution to be modified. 7015 * @param measure 'true' if duration is to be measured, 'false' if not. 7016 * 7017 * @return ANEURALNETWORKS_NO_ERROR if successful. 7018 */ 7019 int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* execution, bool measure) 7020 __INTRODUCED_IN(29); 7021 7022 /** 7023 * Get the time spent in the specified {@link ANeuralNetworksExecution}, in nanoseconds. 7024 * 7025 * The execution must have completed. On asynchronous execution initiated by 7026 * {@link ANeuralNetworksExecution_startCompute} or 7027 * {@link ANeuralNetworksExecution_startComputeWithDependencies}, 7028 * {@link ANeuralNetworksEvent_wait} must be called prior to this function. 7029 * 7030 * @param execution The execution to be queried. 7031 * @param durationCode The measurement to be queried, specified by {@link DurationCode}. 7032 * @param duration The returned duration. If no measurement was requested by 7033 * {@link ANeuralNetworksExecution_setMeasureTiming}, if the 7034 * device is has a feature level reported by 7035 * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower 7036 * than 29, or for some other reason the duration is not 7037 * available, UINT64_MAX will be returned. A particular device 7038 * need not support any given measurement. 7039 * 7040 * @return ANEURALNETWORKS_NO_ERROR if successful. 7041 * 7042 * Available since API level 29. 7043 */ 7044 int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* execution, 7045 int32_t durationCode, uint64_t* duration) 7046 __INTRODUCED_IN(29); 7047 7048 #endif // __ANDROID_API__ >= 29 7049 7050 #if __ANDROID_API__ >= 27 7051 7052 /** 7053 * Creates a shared memory object from a file descriptor. 7054 * 7055 * The shared memory is backed by a file descriptor via mmap. 7056 * See {@link ANeuralNetworksMemory} for a description on how to use 7057 * this shared memory. 7058 * 7059 * Available since API level 27. 7060 * 7061 * @param size The requested size in bytes. 7062 * Must not be larger than the file size. 7063 * @param prot The desired memory protection for the mapping. 7064 * It is either PROT_NONE or the bitwise OR of one or 7065 * more of the following flags: PROT_READ, PROT_WRITE. 7066 * @param fd The requested file descriptor. 7067 * The file descriptor has to be mmap-able. The file 7068 * descriptor will be duplicated. 7069 * @param offset The offset to the beginning of the file of the area to map. 7070 * The offset has to be aligned to a page size. 7071 * @param memory The memory object to be created. 7072 * Set to NULL if unsuccessful. 7073 * 7074 * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. 7075 */ 7076 int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, 7077 ANeuralNetworksMemory** memory) __INTRODUCED_IN(27); 7078 7079 /** 7080 * Delete a memory object. 7081 * 7082 * Destroys the object used by the run time to keep track of the memory. 7083 * This will free the underlying actual memory if no other code has open 7084 * handles to this memory. 7085 * 7086 * Available since API level 27. 7087 * 7088 * @param memory The memory object to be freed. Passing NULL is acceptable and 7089 * results in no operation. 7090 */ 7091 void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27); 7092 7093 /** 7094 * Create an empty {@link ANeuralNetworksModel}. 7095 * 7096 * <p>This only creates the object. Computation is performed once 7097 * {@link ANeuralNetworksExecution_burstCompute}, 7098 * {@link ANeuralNetworksExecution_compute}, 7099 * {@link ANeuralNetworksExecution_startCompute} or 7100 * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked. 7101 * 7102 * The model should be constructed with calls to 7103 * {@link ANeuralNetworksModel_addOperation} and 7104 * {@link ANeuralNetworksModel_addOperand} 7105 * 7106 * <p>{@link ANeuralNetworksModel_finish} should be called once the model 7107 * has been fully constructed.</p> 7108 * 7109 * <p>{@link ANeuralNetworksModel_free} should be called once the model 7110 * is no longer needed.</p> 7111 * 7112 * Available since API level 27. 7113 * 7114 * @param model The {@link ANeuralNetworksModel} to be created. 7115 * Set to NULL if unsuccessful. 7116 * 7117 * @return ANEURALNETWORKS_NO_ERROR if successful. 7118 */ 7119 int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __INTRODUCED_IN(27); 7120 7121 /** 7122 * Destroy a model. 7123 * 7124 * The model need not have been finished by a call to 7125 * {@link ANeuralNetworksModel_finish}. 7126 * 7127 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7128 * 7129 * Available since API level 27. 7130 * 7131 * @param model The model to be destroyed. Passing NULL is acceptable and 7132 * results in no operation. 7133 */ 7134 void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __INTRODUCED_IN(27); 7135 7136 /** 7137 * Indicate that we have finished modifying a model. Required before 7138 * calling {@link ANeuralNetworksCompilation_create} and 7139 * {@link ANeuralNetworksCompilation_createForDevices}. 7140 * 7141 * An application must ensure that no other thread uses the model at the same 7142 * time. 7143 * 7144 * This function must only be called once for a given model. 7145 * 7146 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7147 * 7148 * Available since API level 27. 7149 * 7150 * @param model The model to be finished. 7151 * 7152 * @return ANEURALNETWORKS_NO_ERROR if successful. 7153 */ 7154 int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __INTRODUCED_IN(27); 7155 7156 /** 7157 * Add an operand to a model. 7158 * 7159 * The order in which the operands are added is important. The first one added 7160 * to a model will have the index value 0, the second 1, etc. These indexes are 7161 * used as operand identifiers in 7162 * {@link ANeuralNetworksModel_addOperation}, 7163 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}, 7164 * {@link ANeuralNetworksModel_setOperandValue}, 7165 * {@link ANeuralNetworksModel_setOperandValueFromMemory}, 7166 * {@link ANeuralNetworksExecution_setInput}, 7167 * {@link ANeuralNetworksExecution_setInputFromMemory}, 7168 * {@link ANeuralNetworksExecution_setOutput}, 7169 * {@link ANeuralNetworksExecution_setOutputFromMemory} and 7170 * {@link ANeuralNetworksExecution_setOperandValue}. 7171 * 7172 * <p>Every operand must be referenced in exactly one of the following 7173 * ways:<ul> 7174 * <li>It is identified as a model input with 7175 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li> 7176 * <li>It is identified as a constant with 7177 * {@link ANeuralNetworksModel_setOperandValue} or 7178 * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> 7179 * <li>It is identified as an output of exactly one operation with 7180 * {@link ANeuralNetworksModel_addOperation}.</li></p> 7181 * <p>An operand that is identified as a model input or as a constant 7182 * must not also be identified as a model output with 7183 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p> 7184 * 7185 * To build a model that can accommodate inputs of various sizes, as 7186 * you may want to do for a CNN, leave unspecified the dimensions that 7187 * will vary at run time. If you do so, fully specify dimensions 7188 * when calling {@link ANeuralNetworksExecution_setInput} or 7189 * {@link ANeuralNetworksExecution_setInputFromMemory}. 7190 * 7191 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 7192 * called will return an error. 7193 * 7194 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7195 * 7196 * Available since API level 27. 7197 * 7198 * @param model The model to be modified. 7199 * @param type The {@link ANeuralNetworksOperandType} that describes the shape 7200 * of the operand. Neither the {@link ANeuralNetworksOperandType} 7201 * nor the dimensions it points to need to outlive the call to 7202 * {@link ANeuralNetworksModel_addOperand}. 7203 * 7204 * @return ANEURALNETWORKS_NO_ERROR if successful. 7205 */ 7206 int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, 7207 const ANeuralNetworksOperandType* type) __INTRODUCED_IN(27); 7208 7209 /** 7210 * Sets an operand to a constant value. 7211 * 7212 * Values of length smaller or equal to 7213 * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES} 7214 * are immediately copied into the model. 7215 * 7216 * For values of length greater than 7217 * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, a pointer to 7218 * the buffer is stored within the model. The application must not change the 7219 * content of this region until all executions using this model have 7220 * completed. As the data may be copied during processing, modifying the data 7221 * after this call yields undefined results. The provided buffer must outlive 7222 * this model. 7223 * 7224 * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory} 7225 * is likely to be more efficient. 7226 * 7227 * To indicate that an optional operand should be considered missing, 7228 * pass nullptr for buffer and 0 for length. 7229 * 7230 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 7231 * called will return an error. 7232 * 7233 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7234 * 7235 * Available since API level 27. 7236 * 7237 * @param model The model to be modified. 7238 * @param index The index of the model operand we're setting. 7239 * @param buffer A pointer to the data to use. 7240 * @param length The size in bytes of the data value. 7241 * 7242 * @return ANEURALNETWORKS_NO_ERROR if successful. 7243 */ 7244 int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index, 7245 const void* buffer, size_t length) __INTRODUCED_IN(27); 7246 7247 #if __ANDROID_API__ >= 29 7248 7249 /** 7250 * Sets an operand's per channel quantization parameters. 7251 * 7252 * Sets parameters required by a tensor of type 7253 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}. 7254 * This function must be called for every tensor of type 7255 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before 7256 * calling {@link ANeuralNetworksModel_finish}. 7257 * 7258 * Available since API level 29. 7259 * 7260 * @param model The model to be modified. 7261 * @param index The index of the model operand we're setting. 7262 * @param channelQuant The per channel quantization parameters for the operand. 7263 * No memory in this struct needs to outlive the call to 7264 * this function. 7265 * 7266 * @return ANEURALNETWORKS_NO_ERROR if successful. 7267 */ 7268 int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( 7269 ANeuralNetworksModel* model, int32_t index, 7270 const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __INTRODUCED_IN(29); 7271 7272 #endif // __ANDROID_API__ >= 29 7273 7274 /** 7275 * Sets an operand to a value stored in a memory object. 7276 * 7277 * The content of the memory is not copied. A reference to that memory is stored 7278 * inside the model. The application must not change the content of the memory 7279 * region until all executions using this model have completed. As the data may 7280 * be copied during processing, modifying the data after this call yields 7281 * undefined results. 7282 * 7283 * <p>The provided memory must outlive this model.</p> 7284 * 7285 * To indicate that an optional operand should be considered missing, 7286 * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer. 7287 * 7288 * It is disallowed to set an operand value with shared memory backed by an AHardwareBuffer 7289 * of a format other than AHARDWAREBUFFER_FORMAT_BLOB. 7290 * 7291 * It is disallowed to set an operand value with memory created from 7292 * {@link ANeuralNetworksMemory_createFromDesc}. 7293 * 7294 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 7295 * called will return an error. 7296 * 7297 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7298 * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on 7299 * AHardwareBuffer usage. 7300 * 7301 * Available since API level 27. 7302 * 7303 * @param model The model to be modified. 7304 * @param index The index of the model operand we're setting. 7305 * @param buffer A pointer to the data to use. 7306 * @param memory The memory containing the data. 7307 * @param offset This specifies the location of the data within the memory. 7308 * The offset is in bytes from the start of memory. 7309 * @param length The size in bytes of the data value. 7310 * 7311 * @return ANEURALNETWORKS_NO_ERROR if successful. 7312 */ 7313 int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index, 7314 const ANeuralNetworksMemory* memory, 7315 size_t offset, size_t length) 7316 __INTRODUCED_IN(27); 7317 7318 #if __ANDROID_API__ >= 30 7319 7320 /** 7321 * Sets an operand to a value that is a reference to another NNAPI model. 7322 * 7323 * The referenced model must already have been finished by a call to 7324 * {@link ANeuralNetworksModel_finish}. 7325 * 7326 * The {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16} setting of 7327 * referenced models is overridden by that setting of the main model of a 7328 * compilation. 7329 * 7330 * The referenced model must outlive the model referring to it. 7331 * 7332 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has 7333 * been called will return an error. 7334 * 7335 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7336 * 7337 * Available since API level 30. 7338 * 7339 * @param model The model to be modified. 7340 * @param index The index of the model operand we're setting. 7341 * @param value The model to be referenced. 7342 * 7343 * @return ANEURALNETWORKS_NO_ERROR if successful. 7344 */ 7345 int ANeuralNetworksModel_setOperandValueFromModel(ANeuralNetworksModel* model, int32_t index, 7346 const ANeuralNetworksModel* value) 7347 __INTRODUCED_IN(30); 7348 7349 #endif // __ANDROID_API__ >= 30 7350 7351 /** 7352 * Add an operation to a model. 7353 * 7354 * @param model The model to be modified. 7355 * @param type The {@link ANeuralNetworksOperationType} of the operation. 7356 * @param inputCount The number of entries in the inputs array. 7357 * @param inputs An array of indexes identifying each operand. 7358 * @param outputCount The number of entries in the outputs array. 7359 * @param outputs An array of indexes identifying each operand. 7360 * 7361 * The operands specified by inputs and outputs must have been 7362 * previously added by calls to {@link ANeuralNetworksModel_addOperand}. 7363 * 7364 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 7365 * called will return an error. 7366 * 7367 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7368 * 7369 * Available since API level 27. 7370 * 7371 * @return ANEURALNETWORKS_NO_ERROR if successful. 7372 */ 7373 int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, 7374 ANeuralNetworksOperationType type, uint32_t inputCount, 7375 const uint32_t* inputs, uint32_t outputCount, 7376 const uint32_t* outputs) __INTRODUCED_IN(27); 7377 7378 /** 7379 * Specifies which operands will be the model's inputs and 7380 * outputs. Every model must have at least one input and one output. 7381 * 7382 * An operand cannot be used for both input and output. Doing so will 7383 * return an error. 7384 * 7385 * @param model The model to be modified. 7386 * @param inputCount The number of entries in the inputs array. 7387 * @param inputs An array of indexes identifying the input operands. 7388 * @param outputCount The number of entries in the outputs array. 7389 * @param outputs An array of indexes identifying the output operands. 7390 * 7391 * The operands specified by inputs and outputs must have been 7392 * previously added by calls to {@link ANeuralNetworksModel_addOperand}. 7393 * 7394 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 7395 * called will return an error. 7396 * 7397 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7398 * 7399 * Available since API level 27. 7400 * 7401 */ 7402 int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount, 7403 const uint32_t* inputs, uint32_t outputCount, 7404 const uint32_t* outputs) __INTRODUCED_IN(27); 7405 7406 #if __ANDROID_API__ >= 28 7407 7408 /** 7409 * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be 7410 * calculated with range and/or precision as low as that of the IEEE 754 16-bit 7411 * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32} 7412 * must be calculated using at least the range and precision of the IEEE 754 7413 * 32-bit floating-point format. 7414 * 7415 * The relaxComputationFloat32toFloat16 setting of the main model of 7416 * a compilation overrides the values of the referenced models. 7417 * 7418 * @param model The model to be modified. 7419 * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be 7420 * calculated with range and/or precision as low as that of the 7421 * IEEE 754 16-bit floating point format. 'false' indicates 7422 * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using 7423 * at least the range and precision of the IEEE 754 32-bit floating 7424 * point format. 7425 * 7426 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 7427 * called will return an error. 7428 * 7429 * Available since API level 28. 7430 * 7431 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 7432 */ 7433 int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow) 7434 __INTRODUCED_IN(28); 7435 7436 #endif // __ANDROID_API__ >= 28 7437 7438 /** 7439 * Create a {@link ANeuralNetworksCompilation} to compile the given model. 7440 * 7441 * The model passed to this function is termed the "main model" of the 7442 * compilation, to distinguish it from other models referred to by an Operand 7443 * of type {@link ANEURALNETWORKS_MODEL} within this compilation. 7444 * 7445 * <p>This function only creates the object. Compilation is only performed once 7446 * {@link ANeuralNetworksCompilation_finish} is invoked.</p> 7447 * 7448 * <p>{@link ANeuralNetworksCompilation_finish} should be called once 7449 * all desired properties have been set on the compilation.</p> 7450 * 7451 * <p>{@link ANeuralNetworksModel_free} should be called once the compilation 7452 * is no longer needed.</p> 7453 * 7454 * <p>The provided model must outlive the compilation.</p> 7455 * 7456 * The model must already have been finished by a call to 7457 * {@link ANeuralNetworksModel_finish}. 7458 * 7459 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 7460 * 7461 * Available since API level 27. 7462 * 7463 * @param model The {@link ANeuralNetworksModel} to be compiled. 7464 * @param compilation The newly created object or NULL if unsuccessful. 7465 * 7466 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA 7467 * if the model is invalid. 7468 */ 7469 int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model, 7470 ANeuralNetworksCompilation** compilation) __INTRODUCED_IN(27); 7471 7472 /** 7473 * Destroy a compilation. 7474 * 7475 * The compilation need not have been finished by a call to 7476 * {@link ANeuralNetworksCompilation_finish}. 7477 * 7478 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 7479 * 7480 * Available since API level 27. 7481 * 7482 * @param compilation The compilation to be destroyed. Passing NULL is acceptable and 7483 * results in no operation. 7484 */ 7485 void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); 7486 7487 /** 7488 * Sets the execution preference. 7489 * 7490 * <p>Provides guidance to the runtime when trade-offs are possible. By default the runtime 7491 * uses PREFER_SINGLE_FAST_ANSWER</p> 7492 * 7493 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 7494 * 7495 * Available since API level 27. 7496 * 7497 * @param compilation The compilation to be modified. 7498 * @param preference Either {@link PREFER_LOW_POWER}, 7499 * {@link PREFER_SINGLE_FAST_ANSWER}, or 7500 * {@link PREFER_SUSTAINED_SPEED}. 7501 * 7502 * @return ANEURALNETWORKS_NO_ERROR if successful. 7503 */ 7504 int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation, 7505 int32_t preference) __INTRODUCED_IN(27); 7506 7507 /** 7508 * Indicate that we have finished modifying a compilation. Required before 7509 * calling {@link ANeuralNetworksBurst_create} or 7510 * {@link ANeuralNetworksExecution_create}. 7511 * 7512 * An application must ensure that no other thread uses the compilation at the 7513 * same time. 7514 * 7515 * This function must only be called once for a given compilation. 7516 * 7517 * If {@link ANeuralNetworksCompilation_setTimeout} was called on this 7518 * compilation, and the compilation is not able to be finished before the 7519 * timeout duration is exceeded, then compilation may be aborted, in which case 7520 * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. 7521 * 7522 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 7523 * 7524 * Available since API level 27. 7525 * 7526 * @param compilation The compilation to be finished. 7527 * 7528 * @return ANEURALNETWORKS_NO_ERROR if successful. 7529 */ 7530 int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); 7531 7532 #if __ANDROID_API__ >= 30 7533 7534 /** 7535 * Set the execution priority. 7536 * 7537 * Execution priorities are relative to other executions created by the same 7538 * application (specifically same uid) for the same device. Specifically, 7539 * priorities of executions from one application will not affect executions from 7540 * another application. Similarly, priorities of executions on one device will 7541 * not affect executions on another device. 7542 * 7543 * Higher priority executions may use more compute resources than lower priority 7544 * executions, and may preempt or starve lower priority executions. 7545 * 7546 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 7547 * 7548 * Available since API level 30. 7549 * 7550 * @param compilation The compilation to be modified. 7551 * @param priority The relative priority of the execution compared to other 7552 * executions created by the application. Must be one of 7553 * ANEURALNETWORKS_PRIORITY_*. 7554 * 7555 * @return ANEURALNETWORKS_NO_ERROR if successful. 7556 */ 7557 int ANeuralNetworksCompilation_setPriority(ANeuralNetworksCompilation* compilation, int priority) 7558 __INTRODUCED_IN(30); 7559 7560 /** 7561 * Set the maximum expected duration for compiling the model. 7562 * 7563 * If the device is not able to complete the compilation within the specified 7564 * duration, the compilation may be aborted. The timeout duration begins at the 7565 * call to {@link ANeuralNetworksCompilation_finish}. 7566 * 7567 * This timeout duration acts as a hint to drivers, and can be used to both free 7568 * up compute resources within the driver and return control back to the 7569 * application quicker than is possible without the hint. It enables drivers 7570 * that are able to estimate how long a compilation will take to abort the 7571 * compilation before it has even started if the driver believes the compilation 7572 * cannot be completed within the timeout duration. Similarly, it enables 7573 * drivers to abort an ongoing compilation if it is taking too long. However, 7574 * this call does not guarantee that the compilation will complete or abort 7575 * within the timeout duration. 7576 * 7577 * By default (i.e., unless ANeuralNetworksCompilation_setTimeout is called), 7578 * the timeout duration for compiling the model is considered infinite. 7579 * 7580 * The {@link ANeuralNetworksCompilation} must have been created with 7581 * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1, 7582 * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the 7583 * device has a feature level reported by 7584 * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the 7585 * timeout duration hint will be ignored. 7586 * 7587 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 7588 * 7589 * @param compilation The compilation to be modified. 7590 * @param duration The maximum amount of time in nanoseconds that is expected to 7591 * be spent finishing a compilation. If this duration is exceeded, the 7592 * compilation may be aborted. If set to 0, the timeout duration is 7593 * considered infinite. 7594 * 7595 * @return ANEURALNETWORKS_NO_ERROR if successful. 7596 * 7597 * Available since API level 30. 7598 */ 7599 int ANeuralNetworksCompilation_setTimeout(ANeuralNetworksCompilation* compilation, 7600 uint64_t duration) __INTRODUCED_IN(30); 7601 7602 #endif // __ANDROID_API__ >= 30 7603 7604 /** 7605 * Create a {@link ANeuralNetworksExecution} to apply the given compilation. 7606 * This only creates the object. Computation is only performed once 7607 * {@link ANeuralNetworksExecution_burstCompute}, 7608 * {@link ANeuralNetworksExecution_compute}, 7609 * {@link ANeuralNetworksExecution_startCompute} or 7610 * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked. 7611 * 7612 * <p>The provided compilation must outlive the execution.</p> 7613 * 7614 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7615 * 7616 * Available since API level 27. 7617 * 7618 * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. 7619 * @param execution The newly created object or NULL if unsuccessful. 7620 * 7621 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA 7622 * if the compilation is invalid. 7623 */ 7624 int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, 7625 ANeuralNetworksExecution** execution) __INTRODUCED_IN(27); 7626 7627 /** 7628 * Destroy an execution. 7629 * 7630 * <p>The execution need not have been scheduled by a call to 7631 * {@link ANeuralNetworksExecution_burstCompute}, 7632 * {@link ANeuralNetworksExecution_compute}, 7633 * {@link ANeuralNetworksExecution_startCompute} or 7634 * {@link ANeuralNetworksExecution_startComputeWithDependencies}; but if it has been scheduled, 7635 * then the application must not call {@link ANeuralNetworksExecution_free} 7636 * until the execution has completed (i.e., 7637 * {@link ANeuralNetworksExecution_burstCompute}, 7638 * {@link ANeuralNetworksExecution_compute}, or 7639 * {@link ANeuralNetworksEvent_wait} has returned). 7640 * 7641 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7642 * 7643 * Available since API level 27. 7644 * 7645 * @param execution The execution to be destroyed. Passing NULL is acceptable and 7646 * results in no operation. 7647 */ 7648 void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __INTRODUCED_IN(27); 7649 7650 /** 7651 * Associate a user buffer with an input of the model of the 7652 * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have 7653 * been scheduled. Once evaluation of the execution has been scheduled, the 7654 * application must not change the content of the buffer until the execution has 7655 * completed. Evaluation of the execution will not change the content of the 7656 * buffer. 7657 * 7658 * <p>The provided buffer must outlive the execution.</p> 7659 * 7660 * If the input is optional, you can indicate that it is omitted by 7661 * passing nullptr for buffer and 0 for length. 7662 * 7663 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7664 * 7665 * Available since API level 27. 7666 * 7667 * @param execution The execution to be modified. 7668 * @param index The index of the input argument we are setting. It is 7669 * an index into the lists passed to 7670 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 7671 * the index associated with 7672 * {@link ANeuralNetworksModel_addOperand}. 7673 * @param type The {@link ANeuralNetworksOperandType} of the 7674 * operand. Unless the input is omitted, this should be 7675 * used to specify the dimensions that were left 7676 * unspecified when the operand was added to the 7677 * model. All other properties of the type must be the 7678 * same as specified in the model. If the type is the same 7679 * as specified when the model was built, NULL can be 7680 * passed. Neither the {@link ANeuralNetworksOperandType} 7681 * nor the dimensions it points to need to outlive the call 7682 * to {@link ANeuralNetworksExecution_setInput}. 7683 * @param buffer The buffer containing the data. 7684 * @param length The length in bytes of the buffer. 7685 * 7686 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the 7687 * name is not recognized or the buffer is too small for the input. 7688 */ 7689 int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index, 7690 const ANeuralNetworksOperandType* type, const void* buffer, 7691 size_t length) __INTRODUCED_IN(27); 7692 7693 /** 7694 * Associate a region of a memory object with an input of the model of the 7695 * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have 7696 * been scheduled. Once evaluation of the execution has been scheduled, the 7697 * application must not change the content of the region until the execution has 7698 * completed. Evaluation of the execution will not change the content of the 7699 * region. 7700 * 7701 * <p>The provided memory must outlive the execution.</p> 7702 * 7703 * If the input is optional, you can indicate that it is omitted by 7704 * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for 7705 * buffer and 0 for length. 7706 * 7707 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7708 * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on 7709 * AHardwareBuffer usage. 7710 * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects 7711 * created from memory descriptors. 7712 * 7713 * Available since API level 27. 7714 * 7715 * @param execution The execution to be modified. 7716 * @param index The index of the input argument we are setting. It is 7717 * an index into the lists passed to 7718 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 7719 * the index associated with {@link ANeuralNetworksModel_addOperand}. 7720 * @param type The {@link ANeuralNetworksOperandType} of the 7721 * operand. This should be used to specify the dimensions 7722 * that were left unspecified when the operand was added 7723 * to the model. All other properties of the type must be 7724 * the same as specified in the model. If the type is the 7725 * same as specified when the model was built, NULL can be 7726 * passed. Neither the {@link ANeuralNetworksOperandType} 7727 * nor the dimensions it points to need to outlive the call 7728 * to {@link ANeuralNetworksExecution_setInputFromMemory}. 7729 * @param memory The memory containing the data. 7730 * @param offset This specifies the location of the data within the memory. 7731 * The offset is in bytes from the start of memory. 7732 * @param length The size in bytes of the data value. 7733 * 7734 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the 7735 * name is not recognized or the buffer is too small for the input. 7736 */ 7737 int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index, 7738 const ANeuralNetworksOperandType* type, 7739 const ANeuralNetworksMemory* memory, size_t offset, 7740 size_t length) __INTRODUCED_IN(27); 7741 7742 /** 7743 * Associate a user buffer with an output of the model of the 7744 * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have 7745 * been scheduled. Once evaluation of the execution has been scheduled, the 7746 * application must not change the content of the buffer until the execution has 7747 * completed. 7748 * 7749 * If the output is optional, you can indicate that it is omitted by 7750 * passing nullptr for buffer and 0 for length. 7751 * 7752 * <p>The provided buffer must outlive the execution.</p> 7753 * 7754 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7755 * 7756 * Available since API level 27. 7757 * 7758 * @param execution The execution to be modified. 7759 * @param index The index of the output argument we are setting. It is 7760 * an index into the lists passed to 7761 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 7762 * the index associated with {@link ANeuralNetworksModel_addOperand}. 7763 * @param type The {@link ANeuralNetworksOperandType} of the 7764 * operand. Unless the output is omitted, this should be 7765 * used to specify the dimensions that were left 7766 * unspecified when the operand was added to the 7767 * model. All other properties of the type must be the 7768 * same as specified in the model. If the type is the same 7769 * as specified when the model was built, NULL can be 7770 * passed. Neither the {@link ANeuralNetworksOperandType} 7771 * nor the dimensions it points to need to outlive the call 7772 * to {@link ANeuralNetworksExecution_setOutput}. 7773 * Since API level 29, the output operand can have unspecified 7774 * dimensions or rank to be deduced dynamically during the execution. 7775 * However, the user must provide a large enough buffer. The user 7776 * can retrieve the output dimensional information after the execution 7777 * by {@link ANeuralNetworksExecution_getOutputOperandRank} and 7778 * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. 7779 * @param buffer The buffer where the data is to be written. 7780 * @param length The length in bytes of the buffer. 7781 * 7782 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the 7783 * name is not recognized or the buffer is too small for the output. 7784 */ 7785 int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index, 7786 const ANeuralNetworksOperandType* type, void* buffer, 7787 size_t length) __INTRODUCED_IN(27); 7788 7789 /** 7790 * Associate a region of a memory object with an output of the model of the 7791 * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have 7792 * been scheduled. Once evaluation of the execution has been scheduled, the 7793 * application must not change the content of the region until the execution has 7794 * completed. 7795 * 7796 * If the output is optional, you can indicate that it is omitted by 7797 * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for 7798 * buffer and 0 for length. 7799 * 7800 * <p>The provided memory must outlive the execution.</p> 7801 * 7802 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7803 * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on 7804 * AHardwareBuffer usage. 7805 * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects 7806 * created from memory descriptors. 7807 * 7808 * Available since API level 27. 7809 * 7810 * @param execution The execution to be modified. 7811 * @param index The index of the output argument we are setting. It is 7812 * an index into the lists passed to 7813 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 7814 * the index associated with {@link ANeuralNetworksModel_addOperand}. 7815 * @param type The {@link ANeuralNetworksOperandType} of the operand. This should be 7816 * used to specify the dimensions that were left 7817 * unspecified when the operand was added to the 7818 * model. All other properties of the type must be the 7819 * same as specified in the model. If the type is the same 7820 * as specified when the model was built, NULL can be 7821 * passed. Neither the {@link ANeuralNetworksOperandType} 7822 * nor the dimensions it points to need to outlive the call 7823 * to {@link ANeuralNetworksExecution_setOutputFromMemory}. 7824 * Since API level 29, the output operand can have unspecified 7825 * dimensions or rank to be deduced dynamically during the execution. 7826 * However, the user must provide a large enough memory. The user 7827 * can retrieve the output dimensional information after the execution 7828 * by {@link ANeuralNetworksExecution_getOutputOperandRank} and 7829 * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. 7830 * @param memory The memory where the data is to be stored. 7831 * @param offset This specifies the location of the data within the memory. 7832 * The offset is in bytes from the start of memory. 7833 * @param length The length in bytes of the data value. 7834 * 7835 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the 7836 * name is not recognized or the buffer is too small for the output. 7837 */ 7838 int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index, 7839 const ANeuralNetworksOperandType* type, 7840 const ANeuralNetworksMemory* memory, size_t offset, 7841 size_t length) __INTRODUCED_IN(27); 7842 7843 /** 7844 * Schedule asynchronous evaluation of the execution. 7845 * 7846 * <p>Schedules asynchronous evaluation of the execution. Once the execution 7847 * has completed and the outputs are ready to be consumed, the returned event 7848 * will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that 7849 * event. 7850 * </p> 7851 * 7852 * ANeuralNetworksEvent_wait must be called to recuperate the resources used 7853 * by the execution. 7854 * 7855 * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution, 7856 * and the execution is not able to complete before the timeout duration is 7857 * exceeded, then execution may be aborted, in which case 7858 * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned through 7859 * {@link ANeuralNetworksExecution_startCompute} or 7860 * {@link ANeuralNetworksEvent_wait} on the event object. If the device has a 7861 * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that 7862 * is lower than 30, then the timeout duration hint will be ignored. 7863 * 7864 * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and 7865 * the condition model does not output false within the loop timeout duration, 7866 * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} 7867 * will be returned through {@link ANeuralNetworksEvent_wait} on the event 7868 * object. 7869 * 7870 * If the device can detect before the execution has started that the execution 7871 * will not complete within the timeout duration, the device may choose to skip 7872 * the execution and instead return {@link ANEURALNETWORKS_MISSED_DEADLINE_*}. 7873 * 7874 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7875 * 7876 * See {@link ANeuralNetworksExecution_compute} for synchronous execution. 7877 * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. 7878 * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for 7879 * asynchronous execution with dependencies. 7880 * 7881 * Available since API level 27. 7882 * 7883 * @param execution The execution to be scheduled and executed. 7884 * @param event The event that will be signaled on completion. event is set to 7885 * NULL if there's an error. 7886 * 7887 * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled. 7888 */ 7889 int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, 7890 ANeuralNetworksEvent** event) __INTRODUCED_IN(27); 7891 7892 #if __ANDROID_API__ >= 30 7893 7894 /** 7895 * Set the maximum expected duration of the specified execution. 7896 * 7897 * If the device is not able to complete the execution within the specified 7898 * duration, the execution may be aborted. The timeout duration begins at a 7899 * call to one of: 7900 * - {@link ANeuralNetworksExecution_burstCompute} 7901 * - {@link ANeuralNetworksExecution_compute} 7902 * - {@link ANeuralNetworksExecution_startCompute} 7903 * - {@link ANeuralNetworksExecution_startComputeWithDependencies} 7904 * 7905 * This timeout duration acts as a hint to drivers, and can be used to both free 7906 * up compute resources within the driver and return control back to the 7907 * application quicker than is possible without the hint. It enables drivers 7908 * that are able to estimate how long an execution will take to abort the 7909 * execution before it has even started if the driver believes the execution 7910 * cannot be completed within the timeout duration. Similarly, it enables 7911 * drivers to abort an ongoing execution if it is taking too long. However, this 7912 * call does not guarantee that the execution will complete or abort within the 7913 * timeout duration. 7914 * 7915 * By default (i.e., unless ANeuralNetworksExecution_setTimeout is called), 7916 * the timeout duration for execution is considered infinite. 7917 * 7918 * The {@link ANeuralNetworksExecution} must have been created from an 7919 * {@link ANeuralNetworksCompilation} which in turn was created from 7920 * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1, 7921 * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the 7922 * device has a feature level reported by 7923 * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the 7924 * timeout duration hint will be ignored. 7925 * 7926 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7927 * 7928 * @param execution The execution to be modified. 7929 * @param duration The maximum amount of time in nanoseconds that is expected to 7930 * be spent executing a model. If this duration is exceeded, the execution 7931 * may be aborted. If set to 0, the timeout duration is considered infinite. 7932 * 7933 * @return ANEURALNETWORKS_NO_ERROR if successful. 7934 * 7935 * Available since API level 30. 7936 */ 7937 int ANeuralNetworksExecution_setTimeout(ANeuralNetworksExecution* execution, uint64_t duration) 7938 __INTRODUCED_IN(30); 7939 7940 /** 7941 * Set the maximum duration of WHILE loops in the specified execution. 7942 * 7943 * This is a fuzzy per-loop timeout intended to prevent infinite loops. 7944 * 7945 * If a WHILE loop condition model does not output false within the specified 7946 * duration, the execution will be aborted. 7947 * 7948 * See {@link ANeuralNetworks_getDefaultLoopTimeout} and 7949 * {@link ANeuralNetworks_getMaximumLoopTimeout} for the default 7950 * and maximum timeout values. 7951 * 7952 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 7953 * 7954 * @param execution The execution to be modified. 7955 * @param duration The maximum amount of time in nanoseconds that can be spent 7956 * executing a WHILE loop. If the specified duration value exceeds the value 7957 * produced by {@link ANeuralNetworks_getMaximumLoopTimeout}, it will be 7958 * overridden by that value. 7959 * 7960 * @return ANEURALNETWORKS_NO_ERROR if successful. 7961 * ANEURALNETWORKS_BAD_STATE if execution has started. 7962 * ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL. 7963 * 7964 * Available since API level 30. 7965 */ 7966 int ANeuralNetworksExecution_setLoopTimeout(ANeuralNetworksExecution* execution, uint64_t duration) 7967 __INTRODUCED_IN(30); 7968 7969 /** 7970 * Get the default timeout value for WHILE loops. 7971 * 7972 * @return The default timeout value in nanoseconds. 7973 * 7974 * Available since API level 30. 7975 */ 7976 uint64_t ANeuralNetworks_getDefaultLoopTimeout() __INTRODUCED_IN(30); 7977 7978 /** 7979 * Get the maximum timeout value for WHILE loops. 7980 * 7981 * @return The maximum timeout value in nanoseconds. 7982 * 7983 * Available since API level 30. 7984 */ 7985 uint64_t ANeuralNetworks_getMaximumLoopTimeout() __INTRODUCED_IN(30); 7986 7987 #endif // __ANDROID_API__ >= 30 7988 7989 /** 7990 * Waits until the execution completes. 7991 * 7992 * More than one thread can wait on an event. When the execution completes, 7993 * all threads will be released. 7994 * 7995 * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution 7996 * corresponding to this event, and the execution is not able to complete 7997 * before the duration is exceeded, the execution may be aborted, in which case 7998 * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned here. 7999 * 8000 * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and 8001 * the condition model does not output false within the loop timeout duration, 8002 * the execution will be aborted, and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} 8003 * will be returned here. 8004 * 8005 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 8006 * 8007 * Available since API level 27. 8008 * 8009 * @param event The event that will be signaled on completion. 8010 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. 8011 * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot 8012 * be properly mapped. 8013 */ 8014 int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); 8015 8016 /** 8017 * Destroys the event. 8018 * 8019 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 8020 * 8021 * Available since API level 27. 8022 * 8023 * @param event The event object to be destroyed. Passing NULL is acceptable and 8024 * results in no operation. 8025 */ 8026 void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); 8027 8028 #endif // __ANDROID_API__ >= 27 8029 8030 #if __ANDROID_API__ >= 30 8031 /** 8032 * Create a {@link ANeuralNetworksEvent} from a sync_fence file descriptor. 8033 * 8034 * The newly created ANeuralNetworksEvent does not take ownership of the provided sync_fence_fd, 8035 * it will instead dup the provided sync_fence_fd and own the duplicate. 8036 * 8037 * @param sync_fence_fd The sync_fence file descriptor. 8038 * @param event The newly created object or NULL if unsuccessful. 8039 * 8040 * @return ANEURALNETWORKS_NO_ERROR if successful. 8041 * 8042 * Available since API level 30. 8043 */ 8044 int ANeuralNetworksEvent_createFromSyncFenceFd(int sync_fence_fd, ANeuralNetworksEvent** event) 8045 __INTRODUCED_IN(30); 8046 8047 /** 8048 * Get sync_fence file descriptor from the event. 8049 * 8050 * If the ANeuralNetworksEvent is not backed by a sync fence, the sync_fence_fd 8051 * will be set to -1, and ANEURALNETWORKS_BAD_DATA will be returned. 8052 * 8053 * See {@link ANeuralNetworksEvent_createFromSyncFenceFd} and 8054 * {@link ANeuralNetworksExecution_startComputeWithDependencies} to see how to create 8055 * an event backed by a sync fence. 8056 * 8057 * The user takes ownership of the returned fd, and must close the returned file descriptor when 8058 * it is no longer needed. 8059 * 8060 * @param event An event that is backed by a sync fence. 8061 * @param sync_fence_fd The sync_fence file descriptor. The file descriptor will 8062 * be set to -1 if there is an error. 8063 * 8064 * @return ANEURALNETWORKS_NO_ERROR if successful. 8065 * 8066 * Available since API level 30. 8067 */ 8068 int ANeuralNetworksEvent_getSyncFenceFd(const ANeuralNetworksEvent* event, int* sync_fence_fd) 8069 __INTRODUCED_IN(30); 8070 8071 /** 8072 * Schedule asynchronous evaluation of the execution with dependencies. 8073 * 8074 * The execution will wait for all the depending events to be signaled before 8075 * starting the evaluation. Once the execution has completed and the outputs 8076 * are ready to be consumed, the returned event will be signaled. Depending on which 8077 * devices are handling the execution, the event could be backed by a sync fence. 8078 * Use {@link ANeuralNetworksEvent_wait} to wait for that event. 8079 * 8080 * ANeuralNetworksEvent_wait must be called to recurperate the resources used 8081 * by the execution. 8082 * 8083 * If parts of the execution are scheduled on devices that do not support fenced execution, 8084 * the function call may wait for such parts to finish before returning. 8085 * 8086 * The function will return an error if any of the events in dependencies is already in a bad 8087 * state. After the execution is scheduled, if any of the events in dependencies does not complete 8088 * normally, the execution will fail, and {@link ANeuralNetworksEvent_wait} on the returned 8089 * event will return an error. 8090 * 8091 * The function will return an error if any of the execution outputs has a tensor operand type 8092 * that is not fully specified. 8093 * 8094 * The function can be passed a timeout duration in nanoseconds. This timeout 8095 * duration acts as a hint to drivers in the same way that the timeout durations 8096 * in {@link ANeuralNetworksCompilation_setTimeout} and {@link 8097 * ANeuralNetworksExecution_setTimeout} act as hints to drivers. The duration 8098 * begins when all waitFor sync fences have been signaled, and can be used 8099 * together with {@link ANeuralNetworksExecution_setTimeout} which specifies the 8100 * maximum timeout duration beginning at the call to 8101 * {@link ANeuralNetworksExecution_startComputeWithDependencies}. 8102 * If the duration is non-zero, the {@link ANeuralNetworksExecution} must have been created 8103 * from an {@link ANeuralNetworksCompilation} which in turn was created from 8104 * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1, 8105 * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If either 8106 * the timeout duration from {@link ANeuralNetworksExecution_setTimeout} or the 8107 * timeout duration passed to this call is exceeded, the execution may be 8108 * aborted, in which case {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be 8109 * returned through {@link ANeuralNetworksExecution_startComputeWithDependencies} 8110 * or {@link ANeuralNetworksEvent_wait} on the event object. If the device has a 8111 * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that 8112 * is lower than 30, then the timeout duration hints will be ignored. 8113 * 8114 * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and 8115 * the condition model does not output false within the loop timeout duration, 8116 * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} 8117 * will be returned through {@link ANeuralNetworksEvent_wait} on the event 8118 * object. 8119 * 8120 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 8121 * 8122 * See {@link ANeuralNetworksExecution_compute} for synchronous execution. 8123 * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. 8124 * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution. 8125 * 8126 * @param execution The execution to be scheduled and executed. 8127 * @param dependencies A set of depending events. The actual evaluation will not start 8128 * until all the events are signaled. 8129 * @param num_dependencies The number of events in the dependencies set. 8130 * @param duration The maximum amount of time in nanoseconds that is expected to 8131 * be spent executing the model after all dependencies are 8132 * signaled. If set to 0, the timeout duration is considered 8133 * infinite. 8134 * @param event The event that will be signaled on completion. event is set to 8135 * NULL if there's an error. 8136 * 8137 * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled. 8138 * 8139 * Available since API level 30. 8140 */ 8141 int ANeuralNetworksExecution_startComputeWithDependencies( 8142 ANeuralNetworksExecution* execution, const ANeuralNetworksEvent* const* dependencies, 8143 uint32_t num_dependencies, uint64_t duration, ANeuralNetworksEvent** event) 8144 __INTRODUCED_IN(30); 8145 8146 #endif // __ANDROID_API__ >= 30 8147 8148 __END_DECLS 8149 8150 #endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H 8151 8152 /** @} */ 8153