1 /* 2 * Copyright (C) 2018 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H 18 #define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H 19 20 #define LOG_TAG "neuralnetworks_hidl_hal_test" 21 22 #include "TestHarness.h" 23 24 #include <android/hardware/neuralnetworks/1.0/types.h> 25 26 namespace android { 27 namespace hardware { 28 namespace neuralnetworks { 29 namespace V1_0 { 30 namespace vts { 31 namespace functional { 32 33 using MixedTypedExample = test_helper::MixedTypedExampleType; 34 35 #define FOR_EACH_TEST_MODEL(FN) \ 36 FN(add_broadcast_quant8) \ 37 FN(add) \ 38 FN(add_quant8) \ 39 FN(avg_pool_float_1) \ 40 FN(avg_pool_float_2) \ 41 FN(avg_pool_float_3) \ 42 FN(avg_pool_float_4) \ 43 FN(avg_pool_float_5) \ 44 FN(avg_pool_quant8_1) \ 45 FN(avg_pool_quant8_2) \ 46 FN(avg_pool_quant8_3) \ 47 FN(avg_pool_quant8_4) \ 48 FN(avg_pool_quant8_5) \ 49 FN(concat_float_1) \ 50 FN(concat_float_2) \ 51 FN(concat_float_3) \ 52 FN(concat_quant8_1) \ 53 FN(concat_quant8_2) \ 54 FN(concat_quant8_3) \ 55 FN(conv_1_h3_w2_SAME) \ 56 FN(conv_1_h3_w2_VALID) \ 57 FN(conv_3_h3_w2_SAME) \ 58 FN(conv_3_h3_w2_VALID) \ 59 FN(conv_float_2) \ 60 FN(conv_float_channels) \ 61 FN(conv_float_channels_weights_as_inputs) \ 62 FN(conv_float_large) \ 63 FN(conv_float_large_weights_as_inputs) \ 64 FN(conv_float) \ 65 FN(conv_float_weights_as_inputs) \ 66 FN(conv_quant8_2) \ 67 FN(conv_quant8_channels) \ 68 FN(conv_quant8_channels_weights_as_inputs) \ 69 FN(conv_quant8_large) \ 70 FN(conv_quant8_large_weights_as_inputs) \ 71 FN(conv_quant8) \ 72 FN(conv_quant8_overflow) \ 73 FN(conv_quant8_overflow_weights_as_inputs) \ 74 FN(conv_quant8_weights_as_inputs) \ 75 FN(depth_to_space_float_1) \ 76 FN(depth_to_space_float_2) \ 77 FN(depth_to_space_float_3) \ 78 FN(depth_to_space_quant8_1) \ 79 FN(depth_to_space_quant8_2) \ 80 FN(depthwise_conv2d_float_2) \ 81 FN(depthwise_conv2d_float_large_2) \ 82 FN(depthwise_conv2d_float_large_2_weights_as_inputs) \ 83 FN(depthwise_conv2d_float_large) \ 84 FN(depthwise_conv2d_float_large_weights_as_inputs) \ 85 FN(depthwise_conv2d_float) \ 86 FN(depthwise_conv2d_float_weights_as_inputs) \ 87 FN(depthwise_conv2d_quant8_2) \ 88 FN(depthwise_conv2d_quant8_large) \ 89 FN(depthwise_conv2d_quant8_large_weights_as_inputs) \ 90 FN(depthwise_conv2d_quant8) \ 91 FN(depthwise_conv2d_quant8_weights_as_inputs) \ 92 FN(depthwise_conv) \ 93 FN(dequantize) \ 94 FN(embedding_lookup) \ 95 FN(floor) \ 96 FN(fully_connected_float_2) \ 97 FN(fully_connected_float_large) \ 98 FN(fully_connected_float_large_weights_as_inputs) \ 99 FN(fully_connected_float) \ 100 FN(fully_connected_float_weights_as_inputs) \ 101 FN(fully_connected_quant8_2) \ 102 FN(fully_connected_quant8_large) \ 103 FN(fully_connected_quant8_large_weights_as_inputs) \ 104 FN(fully_connected_quant8) \ 105 FN(fully_connected_quant8_weights_as_inputs) \ 106 FN(hashtable_lookup_float) \ 107 FN(hashtable_lookup_quant8) \ 108 FN(l2_normalization_2) \ 109 FN(l2_normalization_large) \ 110 FN(l2_normalization) \ 111 FN(l2_pool_float_2) \ 112 FN(l2_pool_float_large) \ 113 FN(l2_pool_float) \ 114 FN(local_response_norm_float_1) \ 115 FN(local_response_norm_float_2) \ 116 FN(local_response_norm_float_3) \ 117 FN(local_response_norm_float_4) \ 118 FN(logistic_float_1) \ 119 FN(logistic_float_2) \ 120 FN(logistic_quant8_1) \ 121 FN(logistic_quant8_2) \ 122 FN(lsh_projection_2) \ 123 FN(lsh_projection) \ 124 FN(lsh_projection_weights_as_inputs) \ 125 FN(lstm2) \ 126 FN(lstm2_state2) \ 127 FN(lstm2_state) \ 128 FN(lstm3) \ 129 FN(lstm3_state2) \ 130 FN(lstm3_state3) \ 131 FN(lstm3_state) \ 132 FN(lstm) \ 133 FN(lstm_state2) \ 134 FN(lstm_state) \ 135 FN(max_pool_float_1) \ 136 FN(max_pool_float_2) \ 137 FN(max_pool_float_3) \ 138 FN(max_pool_float_4) \ 139 FN(max_pool_quant8_1) \ 140 FN(max_pool_quant8_2) \ 141 FN(max_pool_quant8_3) \ 142 FN(max_pool_quant8_4) \ 143 FN(mobilenet_224_gender_basic_fixed) \ 144 FN(mobilenet_quantized) \ 145 FN(mul_broadcast_quant8) \ 146 FN(mul) \ 147 FN(mul_quant8) \ 148 FN(mul_relu) \ 149 FN(relu1_float_1) \ 150 FN(relu1_float_2) \ 151 FN(relu1_quant8_1) \ 152 FN(relu1_quant8_2) \ 153 FN(relu6_float_1) \ 154 FN(relu6_float_2) \ 155 FN(relu6_quant8_1) \ 156 FN(relu6_quant8_2) \ 157 FN(relu_float_1) \ 158 FN(relu_float_2) \ 159 FN(relu_quant8_1) \ 160 FN(relu_quant8_2) \ 161 FN(reshape) \ 162 FN(reshape_quant8) \ 163 FN(reshape_quant8_weights_as_inputs) \ 164 FN(reshape_weights_as_inputs) \ 165 FN(resize_bilinear_2) \ 166 FN(resize_bilinear) \ 167 FN(rnn) \ 168 FN(rnn_state) \ 169 FN(softmax_float_1) \ 170 FN(softmax_float_2) \ 171 FN(softmax_quant8_1) \ 172 FN(softmax_quant8_2) \ 173 FN(space_to_depth_float_1) \ 174 FN(space_to_depth_float_2) \ 175 FN(space_to_depth_float_3) \ 176 FN(space_to_depth_quant8_1) \ 177 FN(space_to_depth_quant8_2) \ 178 FN(svdf2) \ 179 FN(svdf) \ 180 FN(svdf_state) \ 181 FN(tanh) 182 183 #define FORWARD_DECLARE_GENERATED_OBJECTS(function) \ 184 namespace function { \ 185 extern std::vector<MixedTypedExample> examples; \ 186 Model createTestModel(); \ 187 } 188 189 FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS) 190 191 #undef FORWARD_DECLARE_GENERATED_OBJECTS 192 193 } // namespace functional 194 } // namespace vts 195 } // namespace V1_0 196 } // namespace neuralnetworks 197 } // namespace hardware 198 } // namespace android 199 200 #endif // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H 201