1#
2# Copyright (C) 2017 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#      http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.  #
15
16# LSTM Test: No Cifg, No Peephole, No Projection, and No Clipping.
17
18model = Model()
19
20n_batch = 2
21n_input = 2
22n_cell = 4
23n_output = n_cell
24
25InputType = ("TENSOR_QUANT8_ASYMM", [n_batch, n_input], 1 / 128, 128)
26input_ = Input("input", InputType)
27
28weights_scale = 0.00408021
29weights_zero_point = 100
30
31InputWeightsType = ("TENSOR_QUANT8_ASYMM",
32                    [n_output, n_input], weights_scale, weights_zero_point)
33input_to_input_weights = Input("inputToInputWeights", InputWeightsType)
34input_to_forget_weights = Input("inputToForgetWeights", InputWeightsType)
35input_to_cell_weights = Input("inputToCellWeights", InputWeightsType)
36input_to_output_weights = Input("inputToOutputWeights", InputWeightsType)
37
38RecurrentWeightsType = ("TENSOR_QUANT8_ASYMM",
39                        [n_output, n_output], weights_scale, weights_zero_point)
40recurrent_to_input_weights = Input("recurrentToInputWeights", RecurrentWeightsType)
41recurrent_to_forget_weights = Input("recurrentToForgetWeights", RecurrentWeightsType)
42recurrent_to_cell_weights = Input("recurrentToCellWeights", RecurrentWeightsType)
43recurrent_to_output_weights = Input("recurrentToOutputWeights", RecurrentWeightsType)
44
45BiasType = ("TENSOR_INT32", [n_output], weights_scale / 128., 0)
46input_gate_bias = Input("inputGateBias", BiasType)
47forget_gate_bias = Input("forgetGateBias", BiasType)
48cell_gate_bias = Input("cellGateBias", BiasType)
49output_gate_bias = Input("outputGateBias", BiasType)
50
51StateType = ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0)
52OutputType = ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128)
53prev_cell_state = Input("prevCellState", StateType)
54prev_output = Input("prevOutput", OutputType)
55
56cell_state_out = Output("cellStateOut", StateType)
57output = Output("output", OutputType)
58
59
60model = model.Operation("QUANTIZED_16BIT_LSTM",
61                        input_,
62                        input_to_input_weights,
63                        input_to_forget_weights,
64                        input_to_cell_weights,
65                        input_to_output_weights,
66                        recurrent_to_input_weights,
67                        recurrent_to_forget_weights,
68                        recurrent_to_cell_weights,
69                        recurrent_to_output_weights,
70                        input_gate_bias,
71                        forget_gate_bias,
72                        cell_gate_bias,
73                        output_gate_bias,
74                        prev_cell_state,
75                        prev_output
76).To([cell_state_out, output])
77
78input_dict = {
79    input_: [166, 179, 50,  150],
80    input_to_input_weights: [146, 250, 235, 171, 10, 218, 171, 108],
81    input_to_forget_weights: [24, 50, 132, 179, 158, 110, 3, 169],
82    input_to_cell_weights: [133, 34, 29, 49, 206, 109, 54, 183],
83    input_to_output_weights: [195, 187, 11, 99, 109, 10, 218, 48],
84    recurrent_to_input_weights: [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26],
85    recurrent_to_forget_weights: [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253],
86    recurrent_to_cell_weights: [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216],
87    recurrent_to_output_weights: [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98],
88    input_gate_bias: [-7876, 13488, -726, 32839],
89    forget_gate_bias: [9206, -46884, -11693, -38724],
90    cell_gate_bias: [39481, 48624, 48976, -21419],
91    output_gate_bias: [-58999, -17050, -41852, -40538],
92    prev_cell_state: [876, 1034, 955, -909, 761, 1029, 796, -1036],
93    prev_output: [136, 150, 140, 115, 135, 152, 138, 112],
94}
95
96output_dict = {
97    cell_state_out: [1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235],
98    output: [140, 151, 146, 112, 136, 156, 142, 112]
99}
100Example((input_dict, output_dict), model=model).AddVariations("relaxed")
101
102
103# TEST 2: same as the first one but only the first batch is tested and weights
104# are compile time constants
105model = Model()
106
107n_batch = 1
108n_input = 2
109n_cell = 4
110n_output = n_cell
111
112InputType = ("TENSOR_QUANT8_ASYMM", [n_batch, n_input], 1 / 128, 128)
113input_ = Input("input", InputType)
114
115weights_scale = 0.00408021
116weights_zero_point = 100
117
118InputWeightsType = ("TENSOR_QUANT8_ASYMM",
119                    [n_output, n_input], weights_scale, weights_zero_point)
120input_to_input_weights = Parameter("inputToInputWeights", InputWeightsType,
121                                   [146, 250, 235, 171, 10, 218, 171, 108])
122input_to_forget_weights = Parameter("inputToForgetWeights", InputWeightsType,
123                                    [24, 50, 132, 179, 158, 110, 3, 169])
124input_to_cell_weights = Parameter("inputToCellWeights", InputWeightsType,
125                                  [133, 34, 29, 49, 206, 109, 54, 183])
126input_to_output_weights = Parameter("inputToOutputWeights", InputWeightsType,
127                                    [195, 187, 11, 99, 109, 10, 218, 48])
128
129RecurrentWeightsType = ("TENSOR_QUANT8_ASYMM",
130                        [n_output, n_output], weights_scale, weights_zero_point)
131recurrent_to_input_weights = Parameter("recurrentToInputWeights", RecurrentWeightsType,
132                                       [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26])
133recurrent_to_forget_weights = Parameter("recurrentToForgetWeights", RecurrentWeightsType,
134                                        [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253])
135recurrent_to_cell_weights = Parameter("recurrentToCellWeights", RecurrentWeightsType,
136                                      [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216])
137recurrent_to_output_weights = Parameter("recurrentToOutputWeights", RecurrentWeightsType,
138                                        [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98])
139
140BiasType = ("TENSOR_INT32", [n_output], weights_scale / 128., 0)
141input_gate_bias = Parameter("inputGateBias", BiasType,
142                            [-7876, 13488, -726, 32839])
143forget_gate_bias = Parameter("forgetGateBias", BiasType,
144                             [9206, -46884, -11693, -38724])
145cell_gate_bias = Parameter("cellGateBias", BiasType,
146                           [39481, 48624, 48976, -21419])
147output_gate_bias = Parameter("outputGateBias", BiasType,
148                             [-58999, -17050, -41852, -40538])
149
150StateType = ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0)
151OutputType = ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128)
152prev_cell_state = Input("prevCellState", StateType)
153prev_output = Input("prevOutput", OutputType)
154
155cell_state_out = Output("cellStateOut", StateType)
156output = Output("output", OutputType)
157
158model = model.Operation("QUANTIZED_16BIT_LSTM", input_, input_to_input_weights,
159                        input_to_forget_weights, input_to_cell_weights,
160                        input_to_output_weights, recurrent_to_input_weights,
161                        recurrent_to_forget_weights, recurrent_to_cell_weights,
162                        recurrent_to_output_weights, input_gate_bias,
163                        forget_gate_bias, cell_gate_bias, output_gate_bias,
164                        prev_cell_state,
165                        prev_output).To([cell_state_out, output])
166
167input_dict = {
168    input_: [166, 179],
169    prev_cell_state: [876, 1034, 955, -909],
170    prev_output: [136, 150, 140, 115],
171}
172
173output_dict = {
174    cell_state_out: [1485, 1177, 1373, -1023],
175    output: [140, 151, 146, 112]
176}
177Example((input_dict, output_dict), model=model,
178        name="constant_weights").AddVariations("relaxed")
179