HomeSort by relevance Sort by last modified time
    Searched full:lhs_offset (Results 1 - 20 of 20) sorted by null

  /external/gemmlowp/meta/
legacy_operations_common.h 20 Quantized8BitOperation(std::int32_t lhs_offset, std::int32_t rhs_offset,
23 : lhs_offset(lhs_offset),
30 std::int32_t lhs_offset; member in class:Quantized8BitOperation
39 FloatOperation(std::int32_t lhs_offset, std::int32_t rhs_offset,
41 : lhs_offset(lhs_offset),
46 std::int32_t lhs_offset; member in class:FloatOperation
53 Int32Operation(std::int32_t lhs_offset, std::int32_t rhs_offset)
54 : lhs_offset(lhs_offset), rhs_offset(rhs_offset) {
57 std::int32_t lhs_offset; member in class:Int32Operation
    [all...]
legacy_multi_thread_gemv.h 33 GemvQuantized8BitOperation(std::int32_t lhs_offset, std::int32_t rhs_offset,
36 : Quantized8BitOperation(lhs_offset, rhs_offset, sum_offset, multiplier,
43 gemv_q8(scratch, lhs, rhs, n, k, lhs_offset, rhs_offset, sum_offset,
55 GemvFloatOperation(std::int32_t lhs_offset, std::int32_t rhs_offset,
57 : FloatOperation(lhs_offset, rhs_offset, result_offset) {}
63 gemv_f(scratch, lhs, rhs, n, k, lhs_offset, rhs_offset, result_offset,
75 GemvInt32Operation(std::int32_t lhs_offset, std::int32_t rhs_offset)
76 : Int32Operation(lhs_offset, rhs_offset) {}
82 gemv_i32(scratch, lhs, rhs, n, k, lhs_offset, rhs_offset, result);
102 std::int32_t k, std::int32_t lhs_offset,
    [all...]
legacy_multi_thread_gemm.h 61 GemmQuantized8BitOperation(std::int32_t lhs_offset, std::int32_t rhs_offset,
64 : Quantized8BitOperation(lhs_offset, rhs_offset, sum_offset, multiplier,
81 gemm_q8_strided(scratch, lhs, rhs, m, n, k, lhs_offset, rhs_offset,
93 GemmFloatOperation(std::int32_t lhs_offset, std::int32_t rhs_offset,
95 : FloatOperation(lhs_offset, rhs_offset, result_offset) {}
111 gemm_f_strided(scratch, lhs, rhs, m, n, k, lhs_offset, rhs_offset,
123 GemmInt32Operation(std::int32_t lhs_offset, std::int32_t rhs_offset)
124 : Int32Operation(lhs_offset, rhs_offset) {}
140 gemm_i32_strided(scratch, lhs, rhs, m, n, k, lhs_offset, rhs_offset, result,
162 std::int32_t lhs_offset, std::int32_t rhs_offset
    [all...]
legacy_single_thread_gemm.h 31 std::int32_t k, std::int32_t lhs_offset,
58 result_offset + k * lhs_offset * rhs_offset;
62 params.right_stream.multiplicative_sum_offset = lhs_offset;
76 std::int32_t lhs_offset, std::int32_t rhs_offset,
102 result_offset + k * lhs_offset * rhs_offset;
106 params.right_stream.multiplicative_sum_offset = lhs_offset;
124 std::int32_t k, std::int32_t lhs_offset,
150 params.left_stream.additive_sum_offset = k * lhs_offset * rhs_offset;
154 params.right_stream.multiplicative_sum_offset = lhs_offset;
165 std::int32_t lhs_offset, std::int32_t rhs_offset
    [all...]
test_gemm_correctness.cc 34 #define LHS_OFFSET (-127)
104 expected += (static_cast<int>(lhs[depth * i + k]) + LHS_OFFSET) *
139 expected += (static_cast<int>(lhs[depth * i + k]) + LHS_OFFSET) *
168 expected += (static_cast<int>(lhs[depth * i + k]) + LHS_OFFSET) *
188 expected += (static_cast<int>(lhs[depth * i + k]) + LHS_OFFSET) *
208 expected += (static_cast<int>(lhs[depth * i + k]) + LHS_OFFSET) *
227 expected += (static_cast<int>(lhs[depth * i + k]) + LHS_OFFSET) *
250 params->right_stream.multiplicative_sum_offset = LHS_OFFSET;
261 SUM_OFFSET * k + k * LHS_OFFSET * RHS_OFFSET;
278 SUM_OFFSET * k + k * LHS_OFFSET * RHS_OFFSET
    [all...]
  /external/gemmlowp/doc/
low-precision.md 40 - A int32 lhs_offset, that will be added to each entry of the lhs matrix.
46 1. Cast lhs entries from uint8 to int32 and add lhs_offset to each of them.
64 be interpreted during the matrix computation: - lhs_offset - rhs_offset -
70 1. Cast lhs entries from uint8 to int32 and add lhs_offset to each of them.
97 scheme requires adding the lhs_offset and rhs_offset to each of the lhs and rhs
102 lhs_offset and rhs_offset in registers, which would eat into the register space
105 One may then consider adding the lhs_offset and rhs_offset once and for all to
118 Adding lhs_offset to each entry of `lhs`, means adding `lhs_offset * P` to
124 Thus, as far as handling `lhs_offset` and `rhs_offset` goes, the matrix produc
    [all...]
public.md 21 `lhs_offset`, `rhs_offset` to them, is explained in
39 int lhs_offset, int rhs_offset,
49 &uint8_result_matrix, lhs_offset, rhs_offset, output_pipeline);
94 * `lhs_offset`, `rhs_offset` are constants added to each matrix entry in the
142 This is a variant where `lhs_offset` and `rhs_offset` may be vectors instead of
quantization.md 283 mathematical trick to handle `lhs_offset`, `rhs_offset` that we alluded to
287 `lhs_offset`, `rhs_offset` were 0.
quantization_example.cc 332 const int lhs_offset = -lhs_qparams.zero_point;
364 &actual_uint8_result_map, lhs_offset, rhs_offset, output_pipeline);
  /external/gemmlowp/public/
gemmlowp.h 38 const LhsOffset& lhs_offset,
42 context, lhs, rhs, result, lhs_offset, rhs_offset, output_pipeline);
57 int lhs_offset, int rhs_offset,
61 const OffsetColDup lhs_offset_vector(lhs_offset, lhs.rows());
77 MatrixMap<Scalar, ResultOrder>* result, int lhs_offset,
81 context, lhs, rhs, result, lhs_offset, rhs_offset,
  /external/gemmlowp/internal/
unpack.h 98 const LhsOffset& lhs_offset, const RhsOffset& rhs_offset,
113 LoadForBroadcasting<RegisterBlockType>(lhs_offset, src_row);
134 const LhsOffset& lhs_offset, const RhsOffset& rhs_offset,
187 lhs_sums_of_each_slice, rhs_sums_of_each_slice, lhs_offset,
200 lhs_sums_of_each_slice, rhs_sums_of_each_slice, lhs_offset,
212 lhs_sums_of_each_slice, rhs_sums_of_each_slice, lhs_offset,
229 rhs_sums_of_each_slice, lhs_offset, rhs_offset, depth, r, c,
236 rhs_sums_of_each_slice, lhs_offset, rhs_offset, depth, r, c,
243 rhs_sums_of_each_slice, lhs_offset, rhs_offset, depth, r, c,
256 rhs_sums_of_each_slice, lhs_offset, rhs_offset, depth, r, c
    [all...]
dispatch_gemm_shape.h 159 const LhsOffset& lhs_offset, const RhsOffset& rhs_offset,
177 Transpose(rhs_offset), Transpose(lhs_offset),
184 lhs_offset, rhs_offset, output_pipeline);
single_thread_gemm.h 71 const LhsOffset& lhs_offset, const RhsOffset& rhs_offset,
148 lhs_offset.block(r, rs), rhs_offset.block(c, cs), output_pipeline);
multi_thread_gemm.h 456 lhs_offset(_lhs_offset),
490 lhs_offset.block(curr_result_block.start_row, rs),
504 const LhsOffset& lhs_offset; member in struct:gemmlowp::GemmWithPackedRhsTask
612 const LhsOffset& lhs_offset, const RhsOffset& rhs_offset,
635 lhs_offset, rhs_offset,
682 lhs_offset, rhs_offset, block_params, output_pipeline));
  /external/gemmlowp/meta/generators/
gemv_1xMxK_neon.py 43 'lhs_offset * rhs_offset * k + result_offset')
57 'lhs_offset * rhs_offset * k')
89 ['rhs_chunk', 'k', 'k', 'zipped_rhs_1', 'lhs_offset', 'const_offset'])
94 ['rhs_chunk', 'k', 'k', 'zipped_rhs_2', 'lhs_offset', 'const_offset'])
107 ['rhs_chunk', 'k', 'k', 'zipped_rhs_1', 'lhs_offset', 'const_offset'])
112 ['rhs_chunk', 'k', 'k', 'zipped_rhs_2', 'lhs_offset', 'const_offset'])
120 ['rhs_chunk', 'k', 'k', 'zipped_rhs_1', 'lhs_offset', 'const_offset'])
150 ['std::int32_t', 'k'], ['std::int32_t', 'lhs_offset'],
quantized_mul_kernels_common.py 58 lhs_offset = _ReadParams(emitter, registers, lhs, kernel_m, 4)
71 self.lhs_offsets = _Duplicate(emitter, registers, kernel_m, lhs_offset)
79 for (row, lhs_offset) in zip(data, self.lhs_offsets):
81 emitter.EmitVAdd('s32', row_register, row_register, lhs_offset)
134 lhs_offset = _ReadParams(emitter, registers, lhs, kernel_m, 4)
136 self.lhs_offsets = _Duplicate(emitter, registers, kernel_m, lhs_offset)
144 for (row, lhs_offset) in zip(data, self.lhs_offsets):
146 emitter.EmitVAdd('s32', row_register, row_register, lhs_offset)
166 lhs_offset = _ReadParams(emitter, registers, lhs, kernel_m, 4)
171 self.lhs_offsets = _Duplicate(emitter, registers, kernel_m, lhs_offset)
    [all...]
gemm_NxMxK_neon.py 51 'lhs_offset * rhs_offset * k + result_offset')
68 'lhs_offset * rhs_offset * k')
85 ['rhs_chunk', 'k', 'k', 'zipped_rhs_chunk', 'lhs_offset', 0])
93 ['rhs_chunk', 'k', 'k', 'zipped_rhs_chunk', 'lhs_offset', 0])
184 ['std::int32_t', 'lhs_offset'], ['std::int32_t', 'rhs_offset']]
  /external/gemmlowp/eight_bit_int_gemm/
eight_bit_int_gemm.cc 78 const int lhs_offset = a_offset; local
99 context, lhs, rhs, &result, lhs_offset, rhs_offset, result_offset, \
116 const int lhs_offset = a_offset; local
136 context, lhs, rhs, &result, lhs_offset, rhs_offset, empty_pipeline); \
254 std::int32_t lhs_offset, std::int32_t rhs_offset,
264 scratch->buffer(), lhs, rhs, m, n, k, lhs_offset,
271 lhs_offset, sum_offset, multiplicative_offset,
280 std::int32_t lhs_offset, std::int32_t rhs_offset,
288 scratch->buffer(), lhs, rhs, m, n, k, lhs_offset,
294 lhs_offset, result_offset, result)
    [all...]
  /external/gemmlowp/test/
test.cc 133 MatrixMap<Scalar, ResultOrder>* result, int lhs_offset,
144 const OffsetColDup lhs_offset_vector(lhs_offset, rows);
173 MatrixMap<Scalar, ResultOrder>* result, int lhs_offset,
185 const OffsetColDup lhs_offset_vector(lhs_offset, rows);
210 MatrixMap<Scalar, ResultOrder>* result, int lhs_offset,
215 ResultOrder>(context, lhs, rhs, result, lhs_offset,
244 MatrixMap<Scalar, ResultOrder>* result, int lhs_offset,
253 lhs.cols(), lhs.data(), lhs_offset, lhs.stride(), rhs.data(),
270 MatrixMap<Scalar, ResultOrder>* result, int lhs_offset,
275 rhs.cols(), lhs.cols(), lhs.data(), lhs_offset,
1208 const int lhs_offset = 12; local
    [all...]
correctness_meta_gemm.cc 64 std::int32_t lhs_offset, std::int32_t rhs_offset,
74 (static_cast<std::int32_t>(left[depth * i + k]) + lhs_offset) *
107 std::int32_t lhs_offset, std::int32_t rhs_offset,
115 (static_cast<std::int32_t>(left[depth * i + k]) + lhs_offset) *
141 std::int32_t lhs_offset, std::int32_t rhs_offset) {
148 (static_cast<std::int32_t>(left[depth * i + k]) + lhs_offset) *

Completed in 355 milliseconds