/external/chromium_org/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/ |
vp8_mse16x16_neon.c | 68 q7s32 = vaddq_s32(q7s32, q8s32); 69 q9s32 = vaddq_s32(q9s32, q10s32); 70 q10s32 = vaddq_s32(q7s32, q9s32); 123 q7s32 = vaddq_s32(q7s32, q8s32); 124 q9s32 = vaddq_s32(q9s32, q10s32); 125 q9s32 = vaddq_s32(q7s32, q9s32);
|
vp8_shortwalsh4x4_neon.c | 94 q0s32 = vaddq_s32(q8s32, q9s32); 95 q1s32 = vaddq_s32(q11s32, q10s32); 114 q8s32 = vaddq_s32(q0s32, q15s32); 115 q9s32 = vaddq_s32(q1s32, q15s32); 116 q10s32 = vaddq_s32(q2s32, q15s32); 117 q11s32 = vaddq_s32(q3s32, q15s32);
|
/external/libhevc/common/arm/ |
ihevc_weighted_pred_neon_intr.c | 155 i4_tmp1_t = vaddq_s32(i4_tmp1_t, tmp_lvl_shift_t); 159 i4_tmp2_t = vaddq_s32(i4_tmp2_t, tmp_lvl_shift_t); 302 i4_tmp1_t = vaddq_s32(i4_tmp1_t, tmp_lvl_shift_t.val[0]); 306 i4_tmp2_t = vaddq_s32(i4_tmp2_t, tmp_lvl_shift_t.val[0]); 461 i4_tmp1_t1 = vaddq_s32(i4_tmp1_t1, i4_tmp1_t2); 464 i4_tmp1_t1 = vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t); 469 i4_tmp2_t1 = vaddq_s32(i4_tmp2_t1, i4_tmp2_t2); 472 i4_tmp2_t1 = vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t); 647 i4_tmp1_t1 = vaddq_s32(i4_tmp1_t1, i4_tmp1_t2); 650 i4_tmp1_t1 = vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t.val[0]) [all...] |
ihevc_weighted_pred_bi_default.s | 185 vqadd.s16 d18,d18,d0 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) 187 vqadd.s16 d20,d8,d9 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) 188 vqadd.s16 d19,d20,d0 @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) 193 vqadd.s16 d30,d30,d0 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) iii iteration 196 vqadd.s16 d18,d24,d25 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) iv iteration 201 vst1.32 {d30[0]},[r14],r5 @store pu1_dst iii iteration @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) iv iteratio 235 vqadd.s16 d18,d18,d0 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) 237 vqadd.s16 d20,d8,d9 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) 238 vqadd.s16 d19,d20,d0 @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) 275 vqadd.s16 q12,q12,q0 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t [all...] |
ihevc_weighted_pred_bi.s | 195 vadd.s32 q2,q2,q4 @vaddq_s32(i4_tmp1_t1, i4_tmp1_t2) 201 vadd.s32 q2,q2,q15 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) 209 vadd.s32 q5,q5,q6 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) ii iteration 214 vadd.s32 q5,q5,q15 @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) ii iteration 216 vadd.s32 q7,q7,q8 @vaddq_s32(i4_tmp1_t1, i4_tmp1_t2) iii iteration 221 vadd.s32 q7,q7,q15 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) iii iteration 229 vadd.s32 q9,q9,q10 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) iv iteration 232 vadd.s32 q9,q9,q15 @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) iv iteration
|
ihevc_weighted_pred_uni.s | 161 vadd.i32 q2,q2,q15 @vaddq_s32(i4_tmp1_t, tmp_lvl_shift_t) 168 vadd.i32 q3,q3,q15 @vaddq_s32(i4_tmp2_t, tmp_lvl_shift_t) ii iteration 173 vadd.i32 q5,q5,q15 @vaddq_s32(i4_tmp1_t, tmp_lvl_shift_t) iii iteration 184 vadd.i32 q6,q6,q15 @vaddq_s32(i4_tmp2_t, tmp_lvl_shift_t) iv iteration
|
/external/libhevc/common/arm64/ |
ihevc_weighted_pred_bi_default.s | 200 sqadd v18.4h,v18.4h,v0.4h //vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) 202 sqadd v20.4h,v1.4h,v3.4h //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) 203 sqadd v19.4h,v20.4h,v0.4h //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) 209 sqadd v30.4h,v30.4h,v0.4h //vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) iii iteration 212 sqadd v18.4h,v24.4h,v25.4h //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) iv iteration 218 st1 {v30.s}[0],[x14],x5 //store pu1_dst iii iteration //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) iv iteratio 255 sqadd v18.4h,v18.4h,v0.4h //vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) 257 sqadd v20.4h,v1.4h,v3.4h //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) 258 sqadd v19.4h,v20.4h,v0.4h //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) 297 sqadd v24.8h,v24.8h,v0.8h //vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t [all...] |
ihevc_weighted_pred_bi.s | 226 add v4.4s, v4.4s , v5.4s //vaddq_s32(i4_tmp1_t1, i4_tmp1_t2) 232 add v4.4s, v4.4s , v30.4s //vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) 240 add v6.4s, v6.4s , v17.4s //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) ii iteration 245 add v6.4s, v6.4s , v30.4s //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) ii iteration 247 add v19.4s, v19.4s , v16.4s //vaddq_s32(i4_tmp1_t1, i4_tmp1_t2) iii iteration 254 add v19.4s, v19.4s , v30.4s //vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) iii iteration 263 add v18.4s, v18.4s , v20.4s //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) iv iteration 266 add v18.4s, v18.4s , v30.4s //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) iv iteration
|
ihevc_weighted_pred_uni.s | 177 add v4.4s, v4.4s , v30.4s //vaddq_s32(i4_tmp1_t, tmp_lvl_shift_t) 185 add v6.4s, v6.4s , v30.4s //vaddq_s32(i4_tmp2_t, tmp_lvl_shift_t) ii iteration 190 add v7.4s, v7.4s , v30.4s //vaddq_s32(i4_tmp1_t, tmp_lvl_shift_t) iii iteration 203 add v16.4s, v16.4s , v30.4s //vaddq_s32(i4_tmp2_t, tmp_lvl_shift_t) iv iteration
|
/external/chromium_org/third_party/skia/src/opts/ |
SkBitmapProcState_matrix_neon.h | 80 hbase = vaddq_s32(lbase, vdupq_n_s32(SkFractionalIntToFixed(dx4))); 92 lbase = vaddq_s32 (lbase, vdupq_n_s32(SkFractionalIntToFixed(dx8))); 93 hbase = vaddq_s32 (hbase, vdupq_n_s32(SkFractionalIntToFixed(dx8))); 150 x2base = vaddq_s32(xbase, vdupq_n_s32(SkFractionalIntToFixed(dx4))); 151 y2base = vaddq_s32(ybase, vdupq_n_s32(SkFractionalIntToFixed(dy4))); 163 xbase = vaddq_s32(xbase, vdupq_n_s32(SkFractionalIntToFixed(dx8))); 164 ybase = vaddq_s32(ybase, vdupq_n_s32(SkFractionalIntToFixed(dy8))); 165 x2base = vaddq_s32(x2base, vdupq_n_s32(SkFractionalIntToFixed(dx8))); 166 y2base = vaddq_s32(y2base, vdupq_n_s32(SkFractionalIntToFixed(dy8)));
|
SkBitmapProcState_matrixProcs_neon.cpp | 196 wide_fx2 = vaddq_s32(wide_fx, vdupq_n_s32(4 * dx));
|
/external/skia/src/opts/ |
SkBitmapProcState_matrix_neon.h | 80 hbase = vaddq_s32(lbase, vdupq_n_s32(SkFractionalIntToFixed(dx4))); 92 lbase = vaddq_s32 (lbase, vdupq_n_s32(SkFractionalIntToFixed(dx8))); 93 hbase = vaddq_s32 (hbase, vdupq_n_s32(SkFractionalIntToFixed(dx8))); 150 x2base = vaddq_s32(xbase, vdupq_n_s32(SkFractionalIntToFixed(dx4))); 151 y2base = vaddq_s32(ybase, vdupq_n_s32(SkFractionalIntToFixed(dy4))); 163 xbase = vaddq_s32(xbase, vdupq_n_s32(SkFractionalIntToFixed(dx8))); 164 ybase = vaddq_s32(ybase, vdupq_n_s32(SkFractionalIntToFixed(dy8))); 165 x2base = vaddq_s32(x2base, vdupq_n_s32(SkFractionalIntToFixed(dx8))); 166 y2base = vaddq_s32(y2base, vdupq_n_s32(SkFractionalIntToFixed(dy8)));
|
SkBitmapProcState_matrixProcs_neon.cpp | 196 wide_fx2 = vaddq_s32(wide_fx, vdupq_n_s32(4 * dx));
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/neon/ |
variance_neon.c | 76 q10s32 = vaddq_s32(q10s32, q9s32); 155 q10s32 = vaddq_s32(q10s32, q9s32); 220 q10s32 = vaddq_s32(q10s32, q9s32); 305 q10s32 = vaddq_s32(q10s32, q9s32);
|
vp8_subpixelvariance_neon.c | 474 q10s32 = vaddq_s32(q10s32, q9s32); 610 q10s32 = vaddq_s32(q10s32, q9s32); 740 q10s32 = vaddq_s32(q10s32, q9s32); [all...] |
/external/eigen/Eigen/src/Core/arch/NEON/ |
PacketMath.h | 106 return vaddq_s32(pset1<Packet4i>(a), countdown); 110 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } 295 sum1 = vaddq_s32(res1.val[0], res1.val[1]); 296 sum2 = vaddq_s32(res2.val[0], res2.val[1]); 297 sum = vaddq_s32(sum1, sum2);
|
/external/chromium_org/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
pitch_estimator_c.c | 83 int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
|
/external/chromium_org/third_party/libwebp/dsp/ |
enc_neon.c | 319 const int16x4_t tmp1 = vshrn_n_s32(vaddq_s32(a2_p_a3, kCst1812), 9); 320 const int16x4_t tmp3 = vshrn_n_s32(vaddq_s32(a3_m_a2, kCst937), 9); 513 tmp0.val[0] = vaddq_s32(a0, a1); 514 tmp0.val[1] = vaddq_s32(a3, a2); 524 const int32x4_t a0 = vaddq_s32(tmp1.val[0], tmp1.val[2]); 525 const int32x4_t a1 = vaddq_s32(tmp1.val[1], tmp1.val[3]); 601 const int32x4_t a0 = vaddq_s32(rows.val[0], rows.val[1]); 603 const int32x4_t a1 = vaddq_s32(rows.val[2], rows.val[3]); 608 const int32x4_t b0 = vqabsq_s32(vaddq_s32(a0, a1)); // abs(a0 + a1) 609 const int32x4_t b1 = vqabsq_s32(vaddq_s32(a3, a2)); // abs(a3 + a2 [all...] |
dec_neon.c | [all...] |
/external/webp/src/dsp/ |
enc_neon.c | 319 const int16x4_t tmp1 = vshrn_n_s32(vaddq_s32(a2_p_a3, kCst1812), 9); 320 const int16x4_t tmp3 = vshrn_n_s32(vaddq_s32(a3_m_a2, kCst937), 9); 513 tmp0.val[0] = vaddq_s32(a0, a1); 514 tmp0.val[1] = vaddq_s32(a3, a2); 524 const int32x4_t a0 = vaddq_s32(tmp1.val[0], tmp1.val[2]); 525 const int32x4_t a1 = vaddq_s32(tmp1.val[1], tmp1.val[3]); 601 const int32x4_t a0 = vaddq_s32(rows.val[0], rows.val[1]); 603 const int32x4_t a1 = vaddq_s32(rows.val[2], rows.val[3]); 608 const int32x4_t b0 = vqabsq_s32(vaddq_s32(a0, a1)); // abs(a0 + a1) 609 const int32x4_t b1 = vqabsq_s32(vaddq_s32(a3, a2)); // abs(a3 + a2 [all...] |
dec_neon.c | [all...] |
/frameworks/av/services/audioflinger/ |
AudioResamplerSinc.cpp | [all...] |
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/ |
vp9_variance_neon.c | 76 *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
|
/external/webrtc/src/modules/audio_coding/codecs/isac/fix/source/ |
pitch_estimator.c | 222 int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
|
/external/clang/test/CodeGen/ |
arm64-arguments.c | 229 int32x4_t v = vaddq_s32(*(int32x4_t *)&s1, 246 int32x4_t v = vaddq_s32(*(int32x4_t *)&s1,
|