HomeSort by relevance Sort by last modified time
    Searched defs:offset_const (Results 1 - 11 of 11) sorted by null

  /external/mesa3d/src/compiler/nir/
nir_lower_atomics.c 105 nir_load_const_instr *offset_const = local
107 offset_const->value.u32[0] = instr->variables[0]->var->data.offset;
109 nir_instr_insert_before(&instr->instr, &offset_const->instr);
111 nir_ssa_def *offset_def = &offset_const->def;
121 offset_const->value.u32[0] += deref_array->base_offset *
  /external/mesa3d/src/intel/compiler/
brw_nir_analyze_ubo_ranges.c 134 nir_const_value *offset_const = nir_src_as_const_value(intrin->src[1]); local
136 if (block_const && offset_const) {
138 const int offset = offset_const->u32[0] / 32;
  /external/libaom/libaom/av1/common/x86/
jnt_convolve_sse2.c 44 const __m128i offset_const = _mm_set1_epi16(offset); local
73 const __m128i res_unsigned = _mm_add_epi16(res_16b, offset_const);
83 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
129 const __m128i res_unsigned = _mm_add_epi16(res_16b, offset_const);
140 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
174 const __m128i offset_const = _mm_set1_epi16(offset); local
221 __m128i res_unsigned = _mm_add_epi16(res_16b, offset_const);
231 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
250 res_unsigned = _mm_add_epi16(res_16b, offset_const);
260 &comp_avg_res, &offset_const, &rounding_const, rounding_shift)
419 const __m128i offset_const = _mm_set1_epi16(offset); local
    [all...]
jnt_convolve_ssse3.c 50 const __m128i offset_const = _mm_set1_epi16(offset); local
205 const __m128i res_unsigned = _mm_add_epi16(res_16b, offset_const);
216 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
convolve_2d_sse2.c 387 const __m128i offset_const = _mm_set1_epi16(offset); local
403 const __m128i res_unsigned_lo = _mm_add_epi16(res_lo, offset_const);
406 const __m128i res_unsigned_hi = _mm_add_epi16(res_hi, offset_const);
417 &comp_avg_res_lo, &offset_const, &rounding_const, rounding_shift);
423 &comp_avg_res_hi, &offset_const, &rounding_const, rounding_shift);
445 const __m128i res_unsigned = _mm_add_epi16(res, offset_const);
454 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
highbd_convolve_2d_sse4.c 51 const __m128i offset_const = _mm_set1_epi32(offset); local
76 _mm_add_epi32(res_32b_lo, offset_const);
84 _mm_add_epi32(res_32b_hi, offset_const);
91 &comp_avg_res_lo, &offset_const, &rounding_const, rounding_shift);
93 &comp_avg_res_hi, &offset_const, &rounding_const, rounding_shift);
130 const __m128i res_unsigned_lo = _mm_add_epi32(res_32b, offset_const);
134 _mm_add_epi32(res_32b_hi, offset_const);
142 &comp_avg_res_lo, &offset_const, &rounding_const, rounding_shift);
144 &comp_avg_res_hi, &offset_const, &rounding_const, rounding_shift);
197 const __m128i offset_const = _mm_set1_epi32(offset) local
    [all...]
highbd_jnt_convolve_avx2.c 52 const __m256i offset_const = _mm256_set1_epi32(offset); local
79 _mm256_add_epi32(res_32b_lo, offset_const);
87 _mm256_add_epi32(res_32b_hi, offset_const);
94 &comp_avg_res_lo, &offset_const, &rounding_const, rounding_shift);
96 &comp_avg_res_hi, &offset_const, &rounding_const, rounding_shift);
138 _mm256_add_epi32(res_32b, offset_const);
145 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
183 _mm256_add_epi32(res_32b_lo, offset_const);
191 _mm256_add_epi32(res_32b_hi, offset_const);
198 highbd_convolve_rounding(&comp_avg_res_lo, &offset_const,
272 const __m256i offset_const = _mm256_set1_epi32(offset); local
499 const __m256i offset_const = _mm256_set1_epi32(offset); local
667 const __m256i offset_const = _mm256_set1_epi32(offset); local
    [all...]
highbd_jnt_convolve_sse4.c 50 const __m128i offset_const = _mm_set1_epi32(offset); local
111 __m128i res_unsigned_lo_0 = _mm_add_epi32(res_a_round0, offset_const);
112 __m128i res_unsigned_lo_1 = _mm_add_epi32(res_a_round1, offset_const);
132 highbd_convolve_rounding_sse2(&comp_avg_res_0, &offset_const,
135 highbd_convolve_rounding_sse2(&comp_avg_res_1, &offset_const,
175 __m128i res_unsigned_hi_0 = _mm_add_epi32(res_b_round0, offset_const);
176 __m128i res_unsigned_hi_1 = _mm_add_epi32(res_b_round1, offset_const);
203 highbd_convolve_rounding_sse2(&comp_avg_res_lo_0, &offset_const,
206 highbd_convolve_rounding_sse2(&comp_avg_res_lo_1, &offset_const,
209 highbd_convolve_rounding_sse2(&comp_avg_res_hi_0, &offset_const,
294 const __m128i offset_const = _mm_set1_epi32(offset); local
    [all...]
jnt_convolve_avx2.c 55 const __m256i offset_const = _mm256_set1_epi16(offset); local
96 const __m256i res_unsigned = _mm256_add_epi16(res, offset_const);
106 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
151 const __m256i res_unsigned = _mm256_add_epi16(res, offset_const);
161 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
211 const __m256i offset_const = _mm256_set1_epi16(offset); local
303 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
368 convolve_rounding(&comp_avg_res_lo, &offset_const,
372 convolve_rounding(&comp_avg_res_hi, &offset_const,
482 &comp_avg_res, &offset_const, &rounding_const, rounding_shift)
615 const __m256i offset_const = _mm256_set1_epi16(offset); local
829 const __m256i offset_const = _mm256_set1_epi16(offset); local
    [all...]
  /external/libaom/libaom/av1/common/arm/
convolve_neon.c 125 const int32x4_t round_shift_vec, const int32x4_t offset_const,
140 sum0 = vaddq_s32(sum0, offset_const);
154 const int32x4_t round_shift_vec, const int32x4_t offset_const,
178 sum0 = vaddq_s32(sum0, offset_const);
179 sum1 = vaddq_s32(sum1, offset_const);
1155 const int32x4_t offset_const = vdupq_n_s32(1 << offset_bits); local
    [all...]
jnt_convolve_neon.c 581 const int32x4_t offset_const = vdupq_n_s32(1 << offset); local
638 round_shift_vec, offset_const);
640 round_shift_vec, offset_const);
642 round_shift_vec, offset_const);
644 round_shift_vec, offset_const);
683 round_shift_vec, offset_const);
    [all...]

Completed in 380 milliseconds