1#
2# Copyright (C) 2018 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#      http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16
17layout = BoolScalar("layout", False) # NHWC
18
19# TEST 1: TRANSPOSE_CONV2D, pad = valid, stride = 2
20i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
21w1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18]) # weight
22b1 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [-1.5, -2]) # bias
23s1 = Int32Vector("shape", [1, 5, 5, 2]) # output shape
24act = Int32Scalar("act", 0) # act = none
25o1 = Output("op4", "TENSOR_FLOAT32", "{1, 5, 5, 2}") # output
26Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 2, 2, 2, act, layout).To(o1)
27
28# Additional data type
29quant8 = DataTypeConverter().Identify({
30    i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
31    w1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
32    b1: ("TENSOR_INT32", 0.25, 0),
33    o1: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
34})
35
36quant8_mult_gt_1 = DataTypeConverter().Identify({
37    i1: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
38    w1: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
39    b1: ("TENSOR_INT32", 0.25, 0),
40    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 80)
41})
42
43# Per-channel quantization
44channelQuant8 = DataTypeConverter().Identify({
45    i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
46    w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
47    b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
48    o1: ("TENSOR_QUANT8_ASYMM", 0.5, 80)
49})
50
51channelQuant8_mult_gt_1 = DataTypeConverter().Identify({
52    i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
53    w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
54    b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
55    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 80)
56})
57
58Example({
59    i1: [1, 2, 3, 4],
60    o1: [-0.5,  0,  1.5,  2,   5.5,   8,  4.5,  6,  8.5, 10,
61          5.5,  6,  7.5,  8,  23.5,  26, 16.5, 18, 20.5, 22,
62         14.5, 18, 22.5, 26,  60.5,  70, 40.5, 46, 52.5, 58,
63         19.5, 22, 25.5, 28,  59.5,  66, 34.5, 38, 42.5, 46,
64         37.5, 40, 43.5, 46, 101.5, 108, 58.5, 62, 66.5, 70]
65}).AddNchw(i1, o1, s1, layout).AddAllActivations(o1, act).AddVariations("relaxed", quant8, quant8_mult_gt_1, channelQuant8, channelQuant8_mult_gt_1, "float16")
66
67
68# TEST 2: TRANSPOSE_CONV2D_LARGE, pad = same, stride = 3, act = relu
69i2 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 2, 1}") # input 0
70w2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [9, 5, 6, 9, 8, 5, 3, 1, 4]) # weight
71b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-1000]) # bias
72s2 = Int32Vector("shape", [1, 3, 4, 1]) # output shape
73o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 4, 1}") # output
74Model().Operation("TRANSPOSE_CONV_2D", i2, w2, b2, s2, 1, 3, 3, 1, layout).To(o2)
75
76# Additional data type
77quant8 = DataTypeConverter().Identify({
78    i2: ("TENSOR_QUANT8_ASYMM", 2.0, 0),
79    w2: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
80    b2: ("TENSOR_INT32", 0.5, 0),
81    o2: ("TENSOR_QUANT8_ASYMM", 20.0, 50)
82})
83
84# Per-channel quantization
85channelQuant8 = DataTypeConverter().Identify({
86    i2: ("TENSOR_QUANT8_ASYMM", 2.0, 0),
87    w2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25])),
88    b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5], hide=True)),
89    o2: ("TENSOR_QUANT8_ASYMM", 20.0, 50)
90})
91
92Example({
93    i2: [300, 500],
94    o2: [500.,  800.,  3500., 1500.,
95         1400., 500.,  3500., 3000.,
96         0.,    200.,  500.,  0.]
97}).AddNchw(i2, o2, s2, layout).AddVariations("relaxed", quant8, channelQuant8, "float16")
98
99
100# TEST 3: TRANSPOSE_CONV2D_SAME, outputShape = [1, 4, 4, 1], pad = same, stride = 1, act = none
101i3 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
102w3 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
103b3 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
104s3 = Int32Vector("shape", [1, 4, 4, 1]) # output shape
105o3 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output
106Model().Operation("TRANSPOSE_CONV_2D", i3, w3, b3, s3, 1, 1, 1, 0, layout).To(o3)
107
108# Additional data type
109quant8 = DataTypeConverter().Identify({
110    i3: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
111    w3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
112    b3: ("TENSOR_INT32", 0.25, 0),
113    o3: ("TENSOR_QUANT8_ASYMM", 16.0, 0)
114})
115
116Example({
117    i3: [1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16,
118         17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
119    o3: [184,  412,  568,  528,
120         678,  1347, 1689, 1434,
121         1494, 2715, 3057, 2442,
122         1968, 3352, 3652, 2760]
123}).AddNchw(i3, o3, s3, layout).AddVariations("relaxed", quant8, "float16")
124
125
126# TEST 4: TRANSPOSE_CONV2D_VALID, outputShape = [1, 6, 6, 1], pad = valid, stride = 1, act = none
127i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
128w4 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
129b4 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
130s4 = Int32Vector("shape", [1, 6, 6, 1]) # output shape
131o4 = Output("op4", "TENSOR_FLOAT32", "{1, 6, 6, 1}") # output
132Model().Operation("TRANSPOSE_CONV_2D", i4, w4, b4, s4, 2, 1, 1, 0, layout).To(o4)
133
134# Additional data type
135quant8 = DataTypeConverter().Identify({
136    i4: ("TENSOR_QUANT8_ASYMM", 0.25, 10),
137    w4: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
138    b4: ("TENSOR_INT32", 0.125, 0),
139    o4: ("TENSOR_QUANT8_ASYMM", 32.0, 80)
140})
141
142Example({
143    i4: [1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16,
144         17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
145    o4: [5,    22,   59,   101,  114,  83,
146         52,   184,  412,  568,  528,  344,
147         237,  678,  1347, 1689, 1434, 879,
148         597,  1494, 2715, 3057, 2442, 1431,
149         856,  1968, 3352, 3652, 2760, 1548,
150         689,  1534, 2543, 2729, 2010, 1103]
151}).AddNchw(i4, o4, s4, layout).AddVariations("relaxed", quant8, "float16")
152
153
154# TEST 5: TRANSPOSE_CONV2D_EXPLICIT, pad = [1, 2, 2, 1], stride = 1, act = none
155i5 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
156w5 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
157b5 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
158o5 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}") # output
159Model().Operation("TRANSPOSE_CONV_2D", i5, w5, b5, 1, 2, 2, 1, 1, 1, 0, layout).To(o5)
160
161# Additional data type
162quant8 = DataTypeConverter().Identify({
163    i5: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
164    w5: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
165    b5: ("TENSOR_INT32", 0.125, 0),
166    o5: ("TENSOR_QUANT8_ASYMM", 20.0, 50)
167})
168
169Example({
170    i5: [1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16,
171         17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
172    o5: [678,  1347, 1689,
173         1494, 2715, 3057,
174         1968, 3352, 3652]
175}).AddNchw(i5, o5, layout).AddVariations("relaxed", quant8, "float16")
176
177
178# TEST 6: zero-sized input, implicit padding
179
180# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
181p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
182p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
183o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
184o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
185tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
186tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
187model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
188
189# Use ROI_ALIGN op to convert into zero-sized feature map.
190i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
191zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
192model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
193
194# TRANSPOSE_CONV_2D op with numBatches = 0.
195w = Parameter("weights", "TENSOR_FLOAT32", "{2, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 9, 7, 5, 2, 4, 6, 8, 10, 12, 10, 8, 6]) # weight
196b = Parameter("bias", "TENSOR_FLOAT32", "{2}", [-1.5, -2]) # bias
197s = Int32Vector("shape", [0, 5, 5, 2]) # output shape
198o3 = Output("out", "TENSOR_FLOAT32", "{0, 5, 5, 2}") # out
199model = model.Operation("TRANSPOSE_CONV_2D", zero_sized, w, b, s, 2, 2, 2, 0, layout).To(o3)
200
201quant8 = DataTypeConverter().Identify({
202    p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
203    p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
204    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
205    tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
206    i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
207    zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
208    w: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
209    b: ("TENSOR_INT32", 0.01, 0),
210    o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
211})
212
213Example({
214    i1: [1],
215    o1: [],
216    o2: [],
217    o3: [],
218}).AddNchw(i1, zero_sized, o3, s, layout).AddVariations("relaxed", quant8, "float16")
219
220
221# TEST 7: zero-sized input, explicit padding
222
223# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
224p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
225p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
226o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
227o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
228tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
229tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
230model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
231
232# Use ROI_ALIGN op to convert into zero-sized feature map.
233i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
234zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 4, 4, 1}")
235model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 4, 4, 2.0, 2.0, 4, 4, layout).To(zero_sized)
236
237# TRANSPOSE_CONV_2D op with numBatches = 0.
238w = Parameter("weights", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 9, 7, 5]) # weight
239b = Parameter("bias", "TENSOR_FLOAT32", "{1}", [-1.5]) # bias
240o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
241model = model.Operation("TRANSPOSE_CONV_2D", zero_sized, w, b, 1, 2, 2, 1, 1, 1, 0, layout).To(o3)
242
243quant8 = DataTypeConverter().Identify({
244    p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
245    p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
246    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
247    tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
248    i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
249    zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
250    w: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
251    b: ("TENSOR_INT32", 0.01, 0),
252    o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
253})
254
255Example({
256    i1: [1],
257    o1: [],
258    o2: [],
259    o3: [],
260}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
261
262
263# TEST 8: TRANSPOSE_CONV2D_SAME, outputShape = [1, 4, 4, 1], pad = same, stride = 2, act = none
264i8 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
265w8 = Parameter("op2", "TENSOR_FLOAT32", "{1, 1, 1, 1}", [2]) # weight
266b8 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
267s8 = Int32Vector("shape", [1, 4, 4, 1]) # output shape
268o8 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output
269Model().Operation("TRANSPOSE_CONV_2D", i8, w8, b8, s8, 1, 2, 2, 0, layout).To(o8)
270
271# Additional data type
272quant8 = DataTypeConverter().Identify({
273    i8: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
274    w8: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
275    b8: ("TENSOR_INT32", 0.25, 0),
276    o8: ("TENSOR_QUANT8_ASYMM", 16.0, 0)
277})
278
279Example({
280    i8: [1,  2,  3,  4],
281    o8: [2, 0, 4, 0, 0, 0, 0, 0, 6, 0, 8, 0, 0, 0, 0, 0]
282}).AddNchw(i8, o8, s8, layout).AddVariations("relaxed", quant8, "float16")
283