Home | History | Annotate | Download | only in V1_2
      1 #
      2 # Copyright (C) 2017 The Android Open Source Project
      3 #
      4 # Licensed under the Apache License, Version 2.0 (the "License");
      5 # you may not use this file except in compliance with the License.
      6 # You may obtain a copy of the License at
      7 #
      8 #      http://www.apache.org/licenses/LICENSE-2.0
      9 #
     10 # Unless required by applicable law or agreed to in writing, software
     11 # distributed under the License is distributed on an "AS IS" BASIS,
     12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13 # See the License for the specific language governing permissions and
     14 # limitations under the License.  #
     15 
     16 # LSTM Test: No Cifg, No Peephole, No Projection, and No Clipping.
     17 
     18 model = Model()
     19 
     20 n_batch = 2
     21 n_input = 2
     22 n_cell = 4
     23 n_output = n_cell
     24 
     25 input_ = Input("input", ("TENSOR_QUANT8_ASYMM", (n_batch, n_input), 1 / 128, 128))
     26 
     27 weights_scale = 0.00408021
     28 weights_zero_point = 100
     29 
     30 input_to_input_weights = Input("inputToInputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
     31 input_to_forget_weights = Input("inputToForgetWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
     32 input_to_cell_weights = Input("inputToCellWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
     33 input_to_output_weights = Input("inputToOutputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
     34 
     35 recurrent_to_input_weights = Input("recurrentToInputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
     36 recurrent_to_forget_weights = Input("recurrentToForgetWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
     37 recurrent_to_cell_weights = Input("recurrentToCellWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
     38 recurrent_to_output_weights = Input("recurrentToOutputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
     39 
     40 input_gate_bias = Input("inputGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0))
     41 forget_gate_bias = Input("forgetGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0))
     42 cell_gate_bias = Input("cellGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0))
     43 output_gate_bias = Input("outputGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0))
     44 
     45 prev_cell_state = Input("prevCellState", ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
     46 prev_output = Input("prevOutput", ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
     47 
     48 cell_state_out = Output("cellStateOut", ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
     49 output = Output("output", ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
     50 
     51 
     52 model = model.Operation("QUANTIZED_16BIT_LSTM",
     53                         input_,
     54                         input_to_input_weights,
     55                         input_to_forget_weights,
     56                         input_to_cell_weights,
     57                         input_to_output_weights,
     58                         recurrent_to_input_weights,
     59                         recurrent_to_forget_weights,
     60                         recurrent_to_cell_weights,
     61                         recurrent_to_output_weights,
     62                         input_gate_bias,
     63                         forget_gate_bias,
     64                         cell_gate_bias,
     65                         output_gate_bias,
     66                         prev_cell_state,
     67                         prev_output
     68 ).To([cell_state_out, output])
     69 
     70 input_dict = {
     71     input_: [166, 179, 50,  150],
     72     input_to_input_weights: [146, 250, 235, 171, 10, 218, 171, 108],
     73     input_to_forget_weights: [24, 50, 132, 179, 158, 110, 3, 169],
     74     input_to_cell_weights: [133, 34, 29, 49, 206, 109, 54, 183],
     75     input_to_output_weights: [195, 187, 11, 99, 109, 10, 218, 48],
     76     recurrent_to_input_weights: [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26],
     77     recurrent_to_forget_weights: [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253],
     78     recurrent_to_cell_weights: [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216],
     79     recurrent_to_output_weights: [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98],
     80     input_gate_bias: [-7876, 13488, -726, 32839],
     81     forget_gate_bias: [9206, -46884, -11693, -38724],
     82     cell_gate_bias: [39481, 48624, 48976, -21419],
     83     output_gate_bias: [-58999, -17050, -41852, -40538],
     84     prev_cell_state: [876, 1034, 955, -909, 761, 1029, 796, -1036],
     85     prev_output: [136, 150, 140, 115, 135, 152, 138, 112],
     86 }
     87 
     88 output_dict = {
     89     cell_state_out: [1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235],
     90     output: [140, 151, 146, 112, 136, 156, 142, 112]
     91 }
     92 Example((input_dict, output_dict), model=model).AddVariations("relaxed")
     93 
     94 
     95 # TEST 2: same as the first one but only the first batch is tested and weights
     96 # are compile time constants
     97 model = Model()
     98 
     99 n_batch = 1
    100 n_input = 2
    101 n_cell = 4
    102 n_output = n_cell
    103 
    104 input_ = Input("input",
    105                ("TENSOR_QUANT8_ASYMM", (n_batch, n_input), 1 / 128, 128))
    106 
    107 weights_scale = 0.00408021
    108 weights_zero_point = 100
    109 
    110 input_to_input_weights = Parameter(
    111     "inputToInputWeights",
    112     ("TENSOR_QUANT8_ASYMM",
    113      (n_output, n_input), weights_scale, weights_zero_point),
    114     [146, 250, 235, 171, 10, 218, 171, 108])
    115 input_to_forget_weights = Parameter(
    116     "inputToForgetWeights",
    117     ("TENSOR_QUANT8_ASYMM",
    118      (n_output, n_input), weights_scale, weights_zero_point),
    119     [24, 50, 132, 179, 158, 110, 3, 169])
    120 input_to_cell_weights = Parameter(
    121     "inputToCellWeights",
    122     ("TENSOR_QUANT8_ASYMM",
    123      (n_output, n_input), weights_scale, weights_zero_point),
    124     [133, 34, 29, 49, 206, 109, 54, 183])
    125 input_to_output_weights = Parameter(
    126     "inputToOutputWeights",
    127     ("TENSOR_QUANT8_ASYMM",
    128      (n_output, n_input), weights_scale, weights_zero_point),
    129     [195, 187, 11, 99, 109, 10, 218, 48])
    130 
    131 recurrent_to_input_weights = Parameter(
    132     "recurrentToInputWeights",
    133     ("TENSOR_QUANT8_ASYMM",
    134      (n_output, n_output), weights_scale, weights_zero_point),
    135     [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26])
    136 recurrent_to_forget_weights = Parameter(
    137     "recurrentToForgetWeights",
    138     ("TENSOR_QUANT8_ASYMM",
    139      (n_output, n_output), weights_scale, weights_zero_point),
    140     [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253])
    141 recurrent_to_cell_weights = Parameter(
    142     "recurrentToCellWeights",
    143     ("TENSOR_QUANT8_ASYMM",
    144      (n_output, n_output), weights_scale, weights_zero_point),
    145     [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216])
    146 recurrent_to_output_weights = Parameter(
    147     "recurrentToOutputWeights",
    148     ("TENSOR_QUANT8_ASYMM",
    149      (n_output, n_output), weights_scale, weights_zero_point),
    150     [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98])
    151 
    152 input_gate_bias = Parameter("inputGateBias",
    153                             ("TENSOR_INT32",
    154                              (n_output,), weights_scale / 128., 0),
    155                             [-7876, 13488, -726, 32839])
    156 forget_gate_bias = Parameter("forgetGateBias",
    157                              ("TENSOR_INT32",
    158                               (n_output,), weights_scale / 128., 0),
    159                              [9206, -46884, -11693, -38724])
    160 cell_gate_bias = Parameter("cellGateBias",
    161                            ("TENSOR_INT32",
    162                             (n_output,), weights_scale / 128., 0),
    163                            [39481, 48624, 48976, -21419])
    164 output_gate_bias = Parameter("outputGateBias",
    165                              ("TENSOR_INT32",
    166                               (n_output,), weights_scale / 128., 0),
    167                              [-58999, -17050, -41852, -40538])
    168 
    169 prev_cell_state = Input("prevCellState",
    170                         ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
    171 prev_output = Input("prevOutput",
    172                     ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
    173 
    174 cell_state_out = Output("cellStateOut",
    175                         ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
    176 output = Output("output",
    177                 ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
    178 
    179 model = model.Operation("QUANTIZED_16BIT_LSTM", input_, input_to_input_weights,
    180                         input_to_forget_weights, input_to_cell_weights,
    181                         input_to_output_weights, recurrent_to_input_weights,
    182                         recurrent_to_forget_weights, recurrent_to_cell_weights,
    183                         recurrent_to_output_weights, input_gate_bias,
    184                         forget_gate_bias, cell_gate_bias, output_gate_bias,
    185                         prev_cell_state,
    186                         prev_output).To([cell_state_out, output])
    187 
    188 input_dict = {
    189     input_: [166, 179],
    190     prev_cell_state: [876, 1034, 955, -909],
    191     prev_output: [136, 150, 140, 115],
    192 }
    193 
    194 output_dict = {
    195     cell_state_out: [1485, 1177, 1373, -1023],
    196     output: [140, 151, 146, 112]
    197 }
    198 Example((input_dict, output_dict), model=model,
    199         name="constant_weights").AddVariations("relaxed")
    200