/external/tensorflow/tensorflow/contrib/lite/kernels/ |
unidirectional_sequence_lstm.cc | 36 // Input Tensors of size {max_time, n_batch, n_input} 234 const int n_batch = input->dims->data[1]; local 264 output_size->data[1] = n_batch; 270 output_state_size->data[0] = n_batch; 277 cell_size->data[0] = n_batch; 291 scratch_buffer_size->data[0] = n_batch; 298 scratch_buffer_size->data[0] = n_batch; 355 const int n_batch = input->dims->data[1]; local 374 forget_gate_scratch = scratch_buffer->data.f + n_cell * n_batch; 375 output_gate_scratch = scratch_buffer->data.f + 2 * n_cell * n_batch; [all...] |
lstm.cc | 36 // Input Tensors of size {n_batch, n_input} 233 const int n_batch = input->dims->data[0]; local 262 output_size->data[0] = n_batch; 268 output_state_size->data[0] = n_batch; 275 cell_size->data[0] = n_batch; 289 scratch_buffer_size->data[0] = n_batch; 296 scratch_buffer_size->data[0] = n_batch; 352 const int n_batch = input->dims->data[0]; local 371 forget_gate_scratch = scratch_buffer->data.f + n_cell * n_batch; 372 output_gate_scratch = scratch_buffer->data.f + 2 * n_cell * n_batch; [all...] |
optional_tensor_test.cc | 33 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, 37 : n_batch_(n_batch), 247 const int n_batch = 1; local 253 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, 259 {n_batch, n_input}, // input tensor
|
/frameworks/ml/nn/common/operations/ |
LSTM.cpp | 246 const uint32_t n_batch = SizeOfDimension(input, 0); local 271 outputShape->dimensions = { n_batch, n_output }; 276 outputStateShape->dimensions = { n_batch, n_output }; 281 cellStateShape->dimensions = { n_batch, n_cell }; 290 scratchShape->dimensions = { n_batch, n_cell * 3 }; 293 scratchShape->dimensions = { n_batch, n_cell * 4 }; 303 const uint32_t n_batch = input_->shape().dimensions[0]; local 321 forget_gate_scratch = cell_scratch + n_cell * n_batch; 322 output_gate_scratch = cell_scratch + 2 * n_cell * n_batch; 325 cell_scratch = input_gate_scratch + n_cell * n_batch; 333 n_cell, n_batch, input_gate_scratch); local 336 n_cell, n_batch, forget_gate_scratch); local 338 n_cell, n_batch, cell_scratch); local 340 n_cell, n_batch, output_gate_scratch); local 346 GetBuffer<float>(input_), n_batch, input_gate_scratch, \/*result_stride*\/1); local 350 GetBuffer<float>(input_), n_batch, forget_gate_scratch, \/*result_stride*\/1); local 353 GetBuffer<float>(input_), n_batch, cell_scratch, \/*result_stride*\/1); local 356 GetBuffer<float>(input_), n_batch, output_gate_scratch, \/*result_stride*\/1); local 362 GetBuffer<float>(output_state_in_), n_batch, input_gate_scratch, \/*result_stride*\/1); local 366 GetBuffer<float>(output_state_in_), n_batch, forget_gate_scratch, \/*result_stride*\/1); local 369 GetBuffer<float>(output_state_in_), n_batch, cell_scratch, \/*result_stride*\/1); local 372 GetBuffer<float>(output_state_in_), n_batch, output_gate_scratch, \/*result_stride*\/1); local 379 GetBuffer<float>(cell_state_in_), n_batch, input_gate_scratch); local 390 GetBuffer<float>(cell_state_in_), n_batch, forget_gate_scratch); local 423 GetBuffer<float>(cell_state_out_), n_batch, output_gate_scratch); local 442 n_batch, GetBuffer<float>(output_)); local 448 output_gate_scratch, n_batch, GetBuffer<float>(output_), local [all...] |
/frameworks/ml/nn/tools/test_generator/tests/P_lstm/ |
lstm.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 153 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ], 154 cell_state_out: [ 0 for x in range(n_batch * n_cell) ] [all...] |
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/optimized/ |
neon_tensor_utils.h | 29 int n_batch, float* result, 32 vector, n_batch, result, result_stride); 49 int n_batch, float* result) { 51 batch_vector, n_batch, result); 61 int n_batch, float* result, 64 n_batch, result, result_stride); 67 void VectorBatchVectorAssign(const float* vector, int v_size, int n_batch, 69 PortableVectorBatchVectorAssign(vector, v_size, n_batch, batch_vector);
|
tensor_utils_impl.h | 36 int n_batch, float* result, 40 int n_batch, float* result, 68 int n_batch, float* result, 72 int n_batch, float* result, 81 int n_batch, 86 int n_batch, float* result); 100 int n_batch, float* batch_vector);
|
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/reference/ |
portable_tensor_utils.h | 33 int n_batch, float* result, 54 int n_batch, float* result, 63 int n_batch, 68 int n_batch, float* batch_vector); 108 int n_batch, float* result, 111 n_batch, result, result_stride); 127 int n_batch, float* result) { 129 n_batch, result); 139 int n_batch, float* result, 141 PortableBatchVectorBatchVectorDotProduct(vector1, vector2, v_size, n_batch, [all...] |
portable_tensor_utils.cc | 33 int n_batch, float* result, 36 for (int b = 0; b < n_batch; b++) { 67 int n_batch, float* result, 72 for (int b = 0; b < n_batch; b++) { 92 int n_batch, 94 for (int b = 0; b < n_batch; b++) { 102 int n_batch, float* batch_vector) { 103 for (int b = 0; b < n_batch; b++) {
|
/frameworks/ml/nn/runtime/test/specs/V1_0/ |
lstm2.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell * 3)) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 132 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ], 138 input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) [all...] |
lstm2_state2.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell * 3)) 59 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 132 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ], 133 cell_state_out: [ 0 for x in range(n_batch * n_cell) ] [all...] |
lstm_state2.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 140 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ], 141 cell_state_out: [ 0 for x in range(n_batch * n_cell) ] [all...] |
lstm.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 140 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
|
lstm2_state.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell * 3)) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 132 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ],
|
lstm_state.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 140 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
|
lstm3.mod.py | 21 n_batch = 2 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) [all...] |
lstm3_state3.mod.py | 21 n_batch = 2 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) [all...] |
/frameworks/ml/nn/runtime/test/specs/V1_1/ |
lstm2_relaxed.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell * 3)) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 133 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ], 139 input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) [all...] |
lstm2_state2_relaxed.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell * 3)) 59 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 133 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ], 134 cell_state_out: [ 0 for x in range(n_batch * n_cell) ] [all...] |
lstm_state2_relaxed.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 141 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ], 142 cell_state_out: [ 0 for x in range(n_batch * n_cell) ] [all...] |
lstm2_state_relaxed.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell * 3)) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 133 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ],
|
lstm_relaxed.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 141 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
|
lstm_state_relaxed.mod.py | 21 n_batch = 1 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 141 scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
|
lstm3_relaxed.mod.py | 21 n_batch = 2 variable 27 input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input)) 51 output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 52 cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 58 scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4))) 59 output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) 60 cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell)) 61 output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output)) [all...] |
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/ |
tensor_utils.h | 34 int n_batch, float* result, 51 // Dot product of two batch vectors of size n_batch * v_size: 60 // Then result will be a vector of n_batch size which will be saved with a 68 int n_batch, float* result, 76 int n_batch, float* result); 79 void VectorBatchVectorAssign(const float* vector, int v_size, int n_batch,
|