1 // clang-format off
2 // Generated file (from: split_quant8_1.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {6}, 1.0f, 0);
5   OperandType type1(Type::INT32, {});
6   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 0);
7   // Phase 1, operands
8   auto input0 = model->addOperand(&type0);
9   auto axis = model->addOperand(&type1);
10   auto num_splits = model->addOperand(&type1);
11   auto output0 = model->addOperand(&type2);
12   auto output1 = model->addOperand(&type2);
13   auto output2 = model->addOperand(&type2);
14   // Phase 2, operations
15   static int32_t axis_init[] = {0};
16   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
17   static int32_t num_splits_init[] = {3};
18   model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
19   model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1, output2});
20   // Phase 3, inputs and outputs
21   model->identifyInputsAndOutputs(
22     {input0},
23     {output0, output1, output2});
24   assert(model->isValid());
25 }
26 
is_ignored(int i)27 inline bool is_ignored(int i) {
28   static std::set<int> ignore = {};
29   return ignore.find(i) != ignore.end();
30 }
31 
CreateModel_relaxed(Model * model)32 void CreateModel_relaxed(Model *model) {
33   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {6}, 1.0f, 0);
34   OperandType type1(Type::INT32, {});
35   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 0);
36   // Phase 1, operands
37   auto input0 = model->addOperand(&type0);
38   auto axis = model->addOperand(&type1);
39   auto num_splits = model->addOperand(&type1);
40   auto output0 = model->addOperand(&type2);
41   auto output1 = model->addOperand(&type2);
42   auto output2 = model->addOperand(&type2);
43   // Phase 2, operations
44   static int32_t axis_init[] = {0};
45   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
46   static int32_t num_splits_init[] = {3};
47   model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
48   model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1, output2});
49   // Phase 3, inputs and outputs
50   model->identifyInputsAndOutputs(
51     {input0},
52     {output0, output1, output2});
53   // Phase 4: set relaxed execution
54   model->relaxComputationFloat32toFloat16(true);
55   assert(model->isValid());
56 }
57 
is_ignored_relaxed(int i)58 inline bool is_ignored_relaxed(int i) {
59   static std::set<int> ignore = {};
60   return ignore.find(i) != ignore.end();
61 }
62 
CreateModel_dynamic_output_shape(Model * model)63 void CreateModel_dynamic_output_shape(Model *model) {
64   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {6}, 1.0f, 0);
65   OperandType type1(Type::INT32, {});
66   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {0}, 1.0f, 0);
67   // Phase 1, operands
68   auto input0 = model->addOperand(&type0);
69   auto axis = model->addOperand(&type1);
70   auto num_splits = model->addOperand(&type1);
71   auto output0 = model->addOperand(&type3);
72   auto output1 = model->addOperand(&type3);
73   auto output2 = model->addOperand(&type3);
74   // Phase 2, operations
75   static int32_t axis_init[] = {0};
76   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
77   static int32_t num_splits_init[] = {3};
78   model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
79   model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1, output2});
80   // Phase 3, inputs and outputs
81   model->identifyInputsAndOutputs(
82     {input0},
83     {output0, output1, output2});
84   assert(model->isValid());
85 }
86 
is_ignored_dynamic_output_shape(int i)87 inline bool is_ignored_dynamic_output_shape(int i) {
88   static std::set<int> ignore = {};
89   return ignore.find(i) != ignore.end();
90 }
91 
CreateModel_dynamic_output_shape_relaxed(Model * model)92 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
93   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {6}, 1.0f, 0);
94   OperandType type1(Type::INT32, {});
95   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {0}, 1.0f, 0);
96   // Phase 1, operands
97   auto input0 = model->addOperand(&type0);
98   auto axis = model->addOperand(&type1);
99   auto num_splits = model->addOperand(&type1);
100   auto output0 = model->addOperand(&type3);
101   auto output1 = model->addOperand(&type3);
102   auto output2 = model->addOperand(&type3);
103   // Phase 2, operations
104   static int32_t axis_init[] = {0};
105   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
106   static int32_t num_splits_init[] = {3};
107   model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
108   model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1, output2});
109   // Phase 3, inputs and outputs
110   model->identifyInputsAndOutputs(
111     {input0},
112     {output0, output1, output2});
113   // Phase 4: set relaxed execution
114   model->relaxComputationFloat32toFloat16(true);
115   assert(model->isValid());
116 }
117 
is_ignored_dynamic_output_shape_relaxed(int i)118 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
119   static std::set<int> ignore = {};
120   return ignore.find(i) != ignore.end();
121 }
122 
123