/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
neon_tensor_utils.h | 30 int result_stride) { 32 vector, n_batch, result, result_stride); 38 int n_batch, float* __restrict__ result, int result_stride) { 40 vectors, scaling_factors, n_batch, result, result_stride); 46 int result_stride) { 48 matrix, ledger, m_rows, m_cols, vector, n_batch, result, result_stride); 55 int result_stride) { 58 n_batch, result, result_stride); 95 int result_stride) { 97 n_batch, result, result_stride); [all...] |
tensor_utils_impl.h | 41 int result_stride); 45 int result_stride); 51 int n_batch, float* __restrict__ result, int result_stride); 55 int n_batch, float* __restrict__ result, int result_stride); 59 const float* vector, int n_batch, float* result, int result_stride); 62 const float* vector, int n_batch, float* result, int result_stride); 69 int result_stride); 74 int result_stride); 102 int result_stride); 106 int result_stride); [all...] |
neon_tensor_utils.cc | 100 int result_stride) { 108 float* result_in_batch = result + b * m_rows * result_stride; 131 result_in_batch += result_stride; 317 int result_stride) { 371 result[(batch * m_rows + row) * result_stride] += 382 int n_batch, float* __restrict__ result, int result_stride) { 386 if (n_batch % 4 == 0 && result_stride == 1) { 432 for (row = 0; row < m_rows; ++row, result += result_stride) { 512 const float* vector, int n_batch, float* result, int result_stride) { 548 result_in_batch += result_stride; [all...] |
/external/tensorflow/tensorflow/lite/kernels/internal/ |
tensor_utils.h | 47 // provided in result_stride (the number of elements between consecutive result 48 // values). For example result_stride = 1, will cause the output to look like 51 // but result_stride = 3, will cause it to be arranged like this in memory: 56 int result_stride); 71 const float* vector, int n_batch, float* result, int result_stride); 83 int n_batch, float* __restrict__ result, int result_stride); 100 int result_stride); 126 // stride of result_stride in memory starting from 'result': 134 int result_stride);
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
portable_tensor_utils.h | 44 int result_stride); 49 int n_batch, float* __restrict__ result, int result_stride); 53 const float* vector, int n_batch, float* result, int result_stride); 59 int result_stride); 80 int result_stride); 164 int result_stride) { 166 n_batch, result, result_stride); 172 int n_batch, float* __restrict__ result, int result_stride) { 175 result_stride); 180 const float* vector, int n_batch, float* result, int result_stride) { [all...] |
portable_tensor_utils.cc | 72 int result_stride) { 83 result_in_batch += result_stride; 91 int n_batch, float* __restrict__ result, int result_stride) { 97 for (row = 0; row < m_rows; ++row, result += result_stride) { 115 const float* vector, int n_batch, float* result, int result_stride) { 138 result_in_batch += result_stride; 147 int result_stride) { 157 for (row = 0; row < m_rows; ++row, result += result_stride) { 200 int result_stride) { 209 result_ptr += result_stride; [all...] |
/external/gemmlowp/meta/ |
legacy_multi_thread_common.h | 49 std::int32_t result_stride; member in struct:gemmlowp::meta::internal::MetaTask 54 std::int32_t result_stride, const F& operation) 61 result_stride(result_stride), 68 result + task_rect.m_offset * result_stride + task_rect.n_offset; 70 task_rect.n, k, task_result, result_stride); 120 OUT_TYPE* result, std::int32_t result_stride, 129 result_stride); 138 [&tasks, &task_scratch, lhs, rhs, k, result, result_stride, operation, 141 task_scratch, lhs, rhs, rect, k, result, result_stride, operation)) [all...] |
legacy_multi_thread_gemm.h | 37 std::int32_t result_stride, const F& operation) { 47 result + i * optimal_n, result_stride); 52 result + chunks_count_less_one * optimal_n, result_stride); 55 result, result_stride); 70 std::int32_t result_stride) const { 71 CacheFriendlyMatrixMatrix(scratch, lhs, rhs, m, n, k, result, result_stride, 80 std::int32_t result_stride) const { 82 sum_offset, multiplier, shift, result, result_stride); 100 std::int32_t result_stride) const { 101 CacheFriendlyMatrixMatrix(scratch, lhs, rhs, m, n, k, result, result_stride, [all...] |
legacy_single_thread_gemm.h | 34 std::uint8_t* result, std::int32_t result_stride) { 69 params.fused_kernel.output_stream.stride = result_stride; 126 std::int32_t result_stride) { 158 params.fused_kernel.output_stream.stride = result_stride * 4; 211 std::int32_t result_stride) { 243 params.fused_kernel.output_stream.stride = result_stride * 4;
|
legacy_multi_thread_gemv.h | 42 std::int32_t result_stride) const { 62 std::int32_t result_stride) const { 81 std::int32_t result_stride) const {
|
/external/gemmlowp/eight_bit_int_gemm/ |
eight_bit_int_gemm.cc | 258 std::int32_t result_stride, std::uint8_t* result) { 261 if (IsRowMajorOrVector(result_transpose, result_stride, m, n)) { 282 std::int32_t result_stride, float* result) { 285 if (IsRowMajorOrVector(result_transpose, result_stride, m, n)) {
|
/external/gemmlowp/meta/generators/ |
mul_Nx8_Mx8_neon.py | 259 result_stride): 263 result_stride) 267 result_stride) 273 0), emitter.Dereference(result_address, None), result_stride) 278 emitter.Dereference(result_address, None), result_stride) 374 ['std::int32_t', 'result_stride']] 431 registers.MapParameter('result_stride'))
|
/external/libaom/libaom/aom_dsp/ |
noise_model.c | 1340 const int result_stride = (num_blocks_w + 2) * block_size; local [all...] |
/external/mesa3d/src/gallium/drivers/r600/ |
r600_query.c | 1608 uint32_t result_stride; member in struct:__anon33504 [all...] |
/external/mesa3d/src/gallium/drivers/radeon/ |
r600_query.c | 1649 uint32_t result_stride; member in struct:__anon33528 [all...] |
/external/deqp/external/openglcts/modules/gl/ |
gl4cGPUShaderFP64Tests.cpp | 15144 const glw::GLuint result_stride = function_object.getResultStride(); local [all...] |