1 /* 2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <arm_neon.h> 12 13 #include "./vp9_rtcd.h" 14 #include "./vpx_dsp_rtcd.h" 15 #include "./vpx_scale_rtcd.h" 16 #include "vp9/common/vp9_blockd.h" 17 #include "vpx_dsp/arm/transpose_neon.h" 18 #include "vpx_dsp/arm/vpx_convolve8_neon.h" 19 #include "vpx_dsp/vpx_filter.h" 20 #include "vpx_scale/yv12config.h" 21 22 // Note: The scaling functions could write extra rows and columns in dst, which 23 // exceed the right and bottom boundaries of the destination frame. We rely on 24 // the following frame extension function to fix these rows and columns. 25 26 static INLINE void scale_plane_2_to_1_phase_0(const uint8_t *src, 27 const int src_stride, 28 uint8_t *dst, 29 const int dst_stride, const int w, 30 const int h) { 31 const int max_width = (w + 15) & ~15; 32 int y = h; 33 34 assert(w && h); 35 36 do { 37 int x = max_width; 38 do { 39 const uint8x16x2_t s = vld2q_u8(src); 40 vst1q_u8(dst, s.val[0]); 41 src += 32; 42 dst += 16; 43 x -= 16; 44 } while (x); 45 src += 2 * (src_stride - max_width); 46 dst += dst_stride - max_width; 47 } while (--y); 48 } 49 50 static INLINE void scale_plane_4_to_1_phase_0(const uint8_t *src, 51 const int src_stride, 52 uint8_t *dst, 53 const int dst_stride, const int w, 54 const int h) { 55 const int max_width = (w + 15) & ~15; 56 int y = h; 57 58 assert(w && h); 59 60 do { 61 int x = max_width; 62 do { 63 const uint8x16x4_t s = vld4q_u8(src); 64 vst1q_u8(dst, s.val[0]); 65 src += 64; 66 dst += 16; 67 x -= 16; 68 } while (x); 69 src += 4 * (src_stride - max_width); 70 dst += dst_stride - max_width; 71 } while (--y); 72 } 73 74 static INLINE void scale_plane_bilinear_kernel( 75 const uint8x16_t in0, const uint8x16_t in1, const uint8x16_t in2, 76 const uint8x16_t in3, const uint8x8_t coef0, const uint8x8_t coef1, 77 uint8_t *const dst) { 78 const uint16x8_t h0 = vmull_u8(vget_low_u8(in0), coef0); 79 const uint16x8_t h1 = vmull_u8(vget_high_u8(in0), coef0); 80 const uint16x8_t h2 = vmull_u8(vget_low_u8(in2), coef0); 81 const uint16x8_t h3 = vmull_u8(vget_high_u8(in2), coef0); 82 const uint16x8_t h4 = vmlal_u8(h0, vget_low_u8(in1), coef1); 83 const uint16x8_t h5 = vmlal_u8(h1, vget_high_u8(in1), coef1); 84 const uint16x8_t h6 = vmlal_u8(h2, vget_low_u8(in3), coef1); 85 const uint16x8_t h7 = vmlal_u8(h3, vget_high_u8(in3), coef1); 86 87 const uint8x8_t hor0 = vrshrn_n_u16(h4, 7); // temp: 00 01 02 03 04 05 06 07 88 const uint8x8_t hor1 = vrshrn_n_u16(h5, 7); // temp: 08 09 0A 0B 0C 0D 0E 0F 89 const uint8x8_t hor2 = vrshrn_n_u16(h6, 7); // temp: 10 11 12 13 14 15 16 17 90 const uint8x8_t hor3 = vrshrn_n_u16(h7, 7); // temp: 18 19 1A 1B 1C 1D 1E 1F 91 const uint16x8_t v0 = vmull_u8(hor0, coef0); 92 const uint16x8_t v1 = vmull_u8(hor1, coef0); 93 const uint16x8_t v2 = vmlal_u8(v0, hor2, coef1); 94 const uint16x8_t v3 = vmlal_u8(v1, hor3, coef1); 95 // dst: 0 1 2 3 4 5 6 7 8 9 A B C D E F 96 const uint8x16_t d = vcombine_u8(vrshrn_n_u16(v2, 7), vrshrn_n_u16(v3, 7)); 97 vst1q_u8(dst, d); 98 } 99 100 static INLINE void scale_plane_2_to_1_bilinear( 101 const uint8_t *const src, const int src_stride, uint8_t *dst, 102 const int dst_stride, const int w, const int h, const int16_t c0, 103 const int16_t c1) { 104 const int max_width = (w + 15) & ~15; 105 const uint8_t *src0 = src; 106 const uint8_t *src1 = src + src_stride; 107 const uint8x8_t coef0 = vdup_n_u8(c0); 108 const uint8x8_t coef1 = vdup_n_u8(c1); 109 int y = h; 110 111 assert(w && h); 112 113 do { 114 int x = max_width; 115 do { 116 // 000 002 004 006 008 00A 00C 00E 010 012 014 016 018 01A 01C 01E 117 // 001 003 005 007 009 00B 00D 00F 011 013 015 017 019 01B 01D 01F 118 const uint8x16x2_t s0 = vld2q_u8(src0); 119 // 100 102 104 106 108 10A 10C 10E 110 112 114 116 118 11A 11C 11E 120 // 101 103 105 107 109 10B 10D 10F 111 113 115 117 119 11B 11D 11F 121 const uint8x16x2_t s1 = vld2q_u8(src1); 122 scale_plane_bilinear_kernel(s0.val[0], s0.val[1], s1.val[0], s1.val[1], 123 coef0, coef1, dst); 124 src0 += 32; 125 src1 += 32; 126 dst += 16; 127 x -= 16; 128 } while (x); 129 src0 += 2 * (src_stride - max_width); 130 src1 += 2 * (src_stride - max_width); 131 dst += dst_stride - max_width; 132 } while (--y); 133 } 134 135 static INLINE void scale_plane_4_to_1_bilinear( 136 const uint8_t *const src, const int src_stride, uint8_t *dst, 137 const int dst_stride, const int w, const int h, const int16_t c0, 138 const int16_t c1) { 139 const int max_width = (w + 15) & ~15; 140 const uint8_t *src0 = src; 141 const uint8_t *src1 = src + src_stride; 142 const uint8x8_t coef0 = vdup_n_u8(c0); 143 const uint8x8_t coef1 = vdup_n_u8(c1); 144 int y = h; 145 146 assert(w && h); 147 148 do { 149 int x = max_width; 150 do { 151 // (*) -- useless 152 // 000 004 008 00C 010 014 018 01C 020 024 028 02C 030 034 038 03C 153 // 001 005 009 00D 011 015 019 01D 021 025 029 02D 031 035 039 03D 154 // 002 006 00A 00E 012 016 01A 01E 022 026 02A 02E 032 036 03A 03E (*) 155 // 003 007 00B 00F 013 017 01B 01F 023 027 02B 02F 033 037 03B 03F (*) 156 const uint8x16x4_t s0 = vld4q_u8(src0); 157 // 100 104 108 10C 110 114 118 11C 120 124 128 12C 130 134 138 13C 158 // 101 105 109 10D 111 115 119 11D 121 125 129 12D 131 135 139 13D 159 // 102 106 10A 10E 112 116 11A 11E 122 126 12A 12E 132 136 13A 13E (*) 160 // 103 107 10B 10F 113 117 11B 11F 123 127 12B 12F 133 137 13B 13F (*) 161 const uint8x16x4_t s1 = vld4q_u8(src1); 162 scale_plane_bilinear_kernel(s0.val[0], s0.val[1], s1.val[0], s1.val[1], 163 coef0, coef1, dst); 164 src0 += 64; 165 src1 += 64; 166 dst += 16; 167 x -= 16; 168 } while (x); 169 src0 += 4 * (src_stride - max_width); 170 src1 += 4 * (src_stride - max_width); 171 dst += dst_stride - max_width; 172 } while (--y); 173 } 174 175 static INLINE uint8x8_t scale_filter_bilinear(const uint8x8_t *const s, 176 const uint8x8_t *const coef) { 177 const uint16x8_t h0 = vmull_u8(s[0], coef[0]); 178 const uint16x8_t h1 = vmlal_u8(h0, s[1], coef[1]); 179 180 return vrshrn_n_u16(h1, 7); 181 } 182 183 static void scale_plane_2_to_1_general(const uint8_t *src, const int src_stride, 184 uint8_t *dst, const int dst_stride, 185 const int w, const int h, 186 const int16_t *const coef, 187 uint8_t *const temp_buffer) { 188 const int width_hor = (w + 3) & ~3; 189 const int width_ver = (w + 7) & ~7; 190 const int height_hor = (2 * h + SUBPEL_TAPS - 2 + 7) & ~7; 191 const int height_ver = (h + 3) & ~3; 192 const int16x8_t filters = vld1q_s16(coef); 193 int x, y = height_hor; 194 uint8_t *t = temp_buffer; 195 uint8x8_t s[14], d[4]; 196 197 assert(w && h); 198 199 src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2 + 1; 200 201 // horizontal 4x8 202 // Note: processing 4x8 is about 20% faster than processing row by row using 203 // vld4_u8(). 204 do { 205 load_u8_8x8(src + 2, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], 206 &s[6], &s[7]); 207 transpose_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); 208 x = width_hor; 209 210 do { 211 src += 8; 212 load_u8_8x8(src, src_stride, &s[6], &s[7], &s[8], &s[9], &s[10], &s[11], 213 &s[12], &s[13]); 214 transpose_u8_8x8(&s[6], &s[7], &s[8], &s[9], &s[10], &s[11], &s[12], 215 &s[13]); 216 217 d[0] = scale_filter_8(&s[0], filters); // 00 10 20 30 40 50 60 70 218 d[1] = scale_filter_8(&s[2], filters); // 01 11 21 31 41 51 61 71 219 d[2] = scale_filter_8(&s[4], filters); // 02 12 22 32 42 52 62 72 220 d[3] = scale_filter_8(&s[6], filters); // 03 13 23 33 43 53 63 73 221 // 00 01 02 03 40 41 42 43 222 // 10 11 12 13 50 51 52 53 223 // 20 21 22 23 60 61 62 63 224 // 30 31 32 33 70 71 72 73 225 transpose_u8_8x4(&d[0], &d[1], &d[2], &d[3]); 226 vst1_lane_u32((uint32_t *)(t + 0 * width_hor), vreinterpret_u32_u8(d[0]), 227 0); 228 vst1_lane_u32((uint32_t *)(t + 1 * width_hor), vreinterpret_u32_u8(d[1]), 229 0); 230 vst1_lane_u32((uint32_t *)(t + 2 * width_hor), vreinterpret_u32_u8(d[2]), 231 0); 232 vst1_lane_u32((uint32_t *)(t + 3 * width_hor), vreinterpret_u32_u8(d[3]), 233 0); 234 vst1_lane_u32((uint32_t *)(t + 4 * width_hor), vreinterpret_u32_u8(d[0]), 235 1); 236 vst1_lane_u32((uint32_t *)(t + 5 * width_hor), vreinterpret_u32_u8(d[1]), 237 1); 238 vst1_lane_u32((uint32_t *)(t + 6 * width_hor), vreinterpret_u32_u8(d[2]), 239 1); 240 vst1_lane_u32((uint32_t *)(t + 7 * width_hor), vreinterpret_u32_u8(d[3]), 241 1); 242 243 s[0] = s[8]; 244 s[1] = s[9]; 245 s[2] = s[10]; 246 s[3] = s[11]; 247 s[4] = s[12]; 248 s[5] = s[13]; 249 250 t += 4; 251 x -= 4; 252 } while (x); 253 src += 8 * src_stride - 2 * width_hor; 254 t += 7 * width_hor; 255 y -= 8; 256 } while (y); 257 258 // vertical 8x4 259 x = width_ver; 260 t = temp_buffer; 261 do { 262 load_u8_8x8(t, width_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], 263 &s[7]); 264 t += 6 * width_hor; 265 y = height_ver; 266 267 do { 268 load_u8_8x8(t, width_hor, &s[6], &s[7], &s[8], &s[9], &s[10], &s[11], 269 &s[12], &s[13]); 270 t += 8 * width_hor; 271 272 d[0] = scale_filter_8(&s[0], filters); // 00 01 02 03 04 05 06 07 273 d[1] = scale_filter_8(&s[2], filters); // 10 11 12 13 14 15 16 17 274 d[2] = scale_filter_8(&s[4], filters); // 20 21 22 23 24 25 26 27 275 d[3] = scale_filter_8(&s[6], filters); // 30 31 32 33 34 35 36 37 276 vst1_u8(dst + 0 * dst_stride, d[0]); 277 vst1_u8(dst + 1 * dst_stride, d[1]); 278 vst1_u8(dst + 2 * dst_stride, d[2]); 279 vst1_u8(dst + 3 * dst_stride, d[3]); 280 281 s[0] = s[8]; 282 s[1] = s[9]; 283 s[2] = s[10]; 284 s[3] = s[11]; 285 s[4] = s[12]; 286 s[5] = s[13]; 287 288 dst += 4 * dst_stride; 289 y -= 4; 290 } while (y); 291 t -= width_hor * (2 * height_ver + 6); 292 t += 8; 293 dst -= height_ver * dst_stride; 294 dst += 8; 295 x -= 8; 296 } while (x); 297 } 298 299 static void scale_plane_4_to_1_general(const uint8_t *src, const int src_stride, 300 uint8_t *dst, const int dst_stride, 301 const int w, const int h, 302 const int16_t *const coef, 303 uint8_t *const temp_buffer) { 304 const int width_hor = (w + 1) & ~1; 305 const int width_ver = (w + 7) & ~7; 306 const int height_hor = (4 * h + SUBPEL_TAPS - 2 + 7) & ~7; 307 const int height_ver = (h + 1) & ~1; 308 const int16x8_t filters = vld1q_s16(coef); 309 int x, y = height_hor; 310 uint8_t *t = temp_buffer; 311 uint8x8_t s[12], d[2]; 312 313 assert(w && h); 314 315 src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2 + 3; 316 317 // horizontal 2x8 318 // Note: processing 2x8 is about 20% faster than processing row by row using 319 // vld4_u8(). 320 do { 321 load_u8_8x8(src + 4, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], 322 &s[6], &s[7]); 323 transpose_u8_4x8(&s[0], &s[1], &s[2], &s[3], s[4], s[5], s[6], s[7]); 324 x = width_hor; 325 326 do { 327 uint8x8x2_t dd; 328 src += 8; 329 load_u8_8x8(src, src_stride, &s[4], &s[5], &s[6], &s[7], &s[8], &s[9], 330 &s[10], &s[11]); 331 transpose_u8_8x8(&s[4], &s[5], &s[6], &s[7], &s[8], &s[9], &s[10], 332 &s[11]); 333 334 d[0] = scale_filter_8(&s[0], filters); // 00 10 20 30 40 50 60 70 335 d[1] = scale_filter_8(&s[4], filters); // 01 11 21 31 41 51 61 71 336 // dd.val[0]: 00 01 20 21 40 41 60 61 337 // dd.val[1]: 10 11 30 31 50 51 70 71 338 dd = vtrn_u8(d[0], d[1]); 339 vst1_lane_u16((uint16_t *)(t + 0 * width_hor), 340 vreinterpret_u16_u8(dd.val[0]), 0); 341 vst1_lane_u16((uint16_t *)(t + 1 * width_hor), 342 vreinterpret_u16_u8(dd.val[1]), 0); 343 vst1_lane_u16((uint16_t *)(t + 2 * width_hor), 344 vreinterpret_u16_u8(dd.val[0]), 1); 345 vst1_lane_u16((uint16_t *)(t + 3 * width_hor), 346 vreinterpret_u16_u8(dd.val[1]), 1); 347 vst1_lane_u16((uint16_t *)(t + 4 * width_hor), 348 vreinterpret_u16_u8(dd.val[0]), 2); 349 vst1_lane_u16((uint16_t *)(t + 5 * width_hor), 350 vreinterpret_u16_u8(dd.val[1]), 2); 351 vst1_lane_u16((uint16_t *)(t + 6 * width_hor), 352 vreinterpret_u16_u8(dd.val[0]), 3); 353 vst1_lane_u16((uint16_t *)(t + 7 * width_hor), 354 vreinterpret_u16_u8(dd.val[1]), 3); 355 356 s[0] = s[8]; 357 s[1] = s[9]; 358 s[2] = s[10]; 359 s[3] = s[11]; 360 361 t += 2; 362 x -= 2; 363 } while (x); 364 src += 8 * src_stride - 4 * width_hor; 365 t += 7 * width_hor; 366 y -= 8; 367 } while (y); 368 369 // vertical 8x2 370 x = width_ver; 371 t = temp_buffer; 372 do { 373 load_u8_8x4(t, width_hor, &s[0], &s[1], &s[2], &s[3]); 374 t += 4 * width_hor; 375 y = height_ver; 376 377 do { 378 load_u8_8x8(t, width_hor, &s[4], &s[5], &s[6], &s[7], &s[8], &s[9], 379 &s[10], &s[11]); 380 t += 8 * width_hor; 381 382 d[0] = scale_filter_8(&s[0], filters); // 00 01 02 03 04 05 06 07 383 d[1] = scale_filter_8(&s[4], filters); // 10 11 12 13 14 15 16 17 384 vst1_u8(dst + 0 * dst_stride, d[0]); 385 vst1_u8(dst + 1 * dst_stride, d[1]); 386 387 s[0] = s[8]; 388 s[1] = s[9]; 389 s[2] = s[10]; 390 s[3] = s[11]; 391 392 dst += 2 * dst_stride; 393 y -= 2; 394 } while (y); 395 t -= width_hor * (4 * height_ver + 4); 396 t += 8; 397 dst -= height_ver * dst_stride; 398 dst += 8; 399 x -= 8; 400 } while (x); 401 } 402 403 // Notes for 4 to 3 scaling: 404 // 405 // 1. 6 rows are calculated in each horizontal inner loop, so width_hor must be 406 // multiple of 6, and no less than w. 407 // 408 // 2. 8 rows are calculated in each vertical inner loop, so width_ver must be 409 // multiple of 8, and no less than w. 410 // 411 // 3. 8 columns are calculated in each horizontal inner loop for further 412 // vertical scaling, so height_hor must be multiple of 8, and no less than 413 // 4 * h / 3. 414 // 415 // 4. 6 columns are calculated in each vertical inner loop, so height_ver must 416 // be multiple of 6, and no less than h. 417 // 418 // 5. The physical location of the last row of the 4 to 3 scaled frame is 419 // decided by phase_scaler, and are always less than 1 pixel below the last row 420 // of the original image. 421 422 static void scale_plane_4_to_3_bilinear(const uint8_t *src, 423 const int src_stride, uint8_t *dst, 424 const int dst_stride, const int w, 425 const int h, const int phase_scaler, 426 uint8_t *const temp_buffer) { 427 static const int step_q4 = 16 * 4 / 3; 428 const int width_hor = (w + 5) - ((w + 5) % 6); 429 const int stride_hor = width_hor + 2; // store 2 extra pixels 430 const int width_ver = (w + 7) & ~7; 431 // We only need 1 extra row below because there are only 2 bilinear 432 // coefficients. 433 const int height_hor = (4 * h / 3 + 1 + 7) & ~7; 434 const int height_ver = (h + 5) - ((h + 5) % 6); 435 int x, y = height_hor; 436 uint8_t *t = temp_buffer; 437 uint8x8_t s[9], d[8], c[6]; 438 439 assert(w && h); 440 441 c[0] = vdup_n_u8((uint8_t)vp9_filter_kernels[BILINEAR][phase_scaler][3]); 442 c[1] = vdup_n_u8((uint8_t)vp9_filter_kernels[BILINEAR][phase_scaler][4]); 443 c[2] = vdup_n_u8( 444 (uint8_t)vp9_filter_kernels[BILINEAR][(phase_scaler + 1 * step_q4) & 445 SUBPEL_MASK][3]); 446 c[3] = vdup_n_u8( 447 (uint8_t)vp9_filter_kernels[BILINEAR][(phase_scaler + 1 * step_q4) & 448 SUBPEL_MASK][4]); 449 c[4] = vdup_n_u8( 450 (uint8_t)vp9_filter_kernels[BILINEAR][(phase_scaler + 2 * step_q4) & 451 SUBPEL_MASK][3]); 452 c[5] = vdup_n_u8( 453 (uint8_t)vp9_filter_kernels[BILINEAR][(phase_scaler + 2 * step_q4) & 454 SUBPEL_MASK][4]); 455 456 d[6] = vdup_n_u8(0); 457 d[7] = vdup_n_u8(0); 458 459 // horizontal 6x8 460 do { 461 load_u8_8x8(src, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], 462 &s[6], &s[7]); 463 src += 1; 464 transpose_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); 465 x = width_hor; 466 467 do { 468 load_u8_8x8(src, src_stride, &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], 469 &s[7], &s[8]); 470 src += 8; 471 transpose_u8_8x8(&s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7], &s[8]); 472 473 // 00 10 20 30 40 50 60 70 474 // 01 11 21 31 41 51 61 71 475 // 02 12 22 32 42 52 62 72 476 // 03 13 23 33 43 53 63 73 477 // 04 14 24 34 44 54 64 74 478 // 05 15 25 35 45 55 65 75 479 d[0] = scale_filter_bilinear(&s[0], &c[0]); 480 d[1] = 481 scale_filter_bilinear(&s[(phase_scaler + 1 * step_q4) >> 4], &c[2]); 482 d[2] = 483 scale_filter_bilinear(&s[(phase_scaler + 2 * step_q4) >> 4], &c[4]); 484 d[3] = scale_filter_bilinear(&s[4], &c[0]); 485 d[4] = scale_filter_bilinear(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], 486 &c[2]); 487 d[5] = scale_filter_bilinear(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], 488 &c[4]); 489 490 // 00 01 02 03 04 05 xx xx 491 // 10 11 12 13 14 15 xx xx 492 // 20 21 22 23 24 25 xx xx 493 // 30 31 32 33 34 35 xx xx 494 // 40 41 42 43 44 45 xx xx 495 // 50 51 52 53 54 55 xx xx 496 // 60 61 62 63 64 65 xx xx 497 // 70 71 72 73 74 75 xx xx 498 transpose_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]); 499 // store 2 extra pixels 500 vst1_u8(t + 0 * stride_hor, d[0]); 501 vst1_u8(t + 1 * stride_hor, d[1]); 502 vst1_u8(t + 2 * stride_hor, d[2]); 503 vst1_u8(t + 3 * stride_hor, d[3]); 504 vst1_u8(t + 4 * stride_hor, d[4]); 505 vst1_u8(t + 5 * stride_hor, d[5]); 506 vst1_u8(t + 6 * stride_hor, d[6]); 507 vst1_u8(t + 7 * stride_hor, d[7]); 508 509 s[0] = s[8]; 510 511 t += 6; 512 x -= 6; 513 } while (x); 514 src += 8 * src_stride - 4 * width_hor / 3 - 1; 515 t += 7 * stride_hor + 2; 516 y -= 8; 517 } while (y); 518 519 // vertical 8x6 520 x = width_ver; 521 t = temp_buffer; 522 do { 523 load_u8_8x8(t, stride_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], 524 &s[7]); 525 t += stride_hor; 526 y = height_ver; 527 528 do { 529 load_u8_8x8(t, stride_hor, &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], 530 &s[7], &s[8]); 531 t += 8 * stride_hor; 532 533 d[0] = scale_filter_bilinear(&s[0], &c[0]); 534 d[1] = 535 scale_filter_bilinear(&s[(phase_scaler + 1 * step_q4) >> 4], &c[2]); 536 d[2] = 537 scale_filter_bilinear(&s[(phase_scaler + 2 * step_q4) >> 4], &c[4]); 538 d[3] = scale_filter_bilinear(&s[4], &c[0]); 539 d[4] = scale_filter_bilinear(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], 540 &c[2]); 541 d[5] = scale_filter_bilinear(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], 542 &c[4]); 543 vst1_u8(dst + 0 * dst_stride, d[0]); 544 vst1_u8(dst + 1 * dst_stride, d[1]); 545 vst1_u8(dst + 2 * dst_stride, d[2]); 546 vst1_u8(dst + 3 * dst_stride, d[3]); 547 vst1_u8(dst + 4 * dst_stride, d[4]); 548 vst1_u8(dst + 5 * dst_stride, d[5]); 549 550 s[0] = s[8]; 551 552 dst += 6 * dst_stride; 553 y -= 6; 554 } while (y); 555 t -= stride_hor * (4 * height_ver / 3 + 1); 556 t += 8; 557 dst -= height_ver * dst_stride; 558 dst += 8; 559 x -= 8; 560 } while (x); 561 } 562 563 static void scale_plane_4_to_3_general(const uint8_t *src, const int src_stride, 564 uint8_t *dst, const int dst_stride, 565 const int w, const int h, 566 const InterpKernel *const coef, 567 const int phase_scaler, 568 uint8_t *const temp_buffer) { 569 static const int step_q4 = 16 * 4 / 3; 570 const int width_hor = (w + 5) - ((w + 5) % 6); 571 const int stride_hor = width_hor + 2; // store 2 extra pixels 572 const int width_ver = (w + 7) & ~7; 573 // We need (SUBPEL_TAPS - 1) extra rows: (SUBPEL_TAPS / 2 - 1) extra rows 574 // above and (SUBPEL_TAPS / 2) extra rows below. 575 const int height_hor = (4 * h / 3 + SUBPEL_TAPS - 1 + 7) & ~7; 576 const int height_ver = (h + 5) - ((h + 5) % 6); 577 const int16x8_t filters0 = 578 vld1q_s16(coef[(phase_scaler + 0 * step_q4) & SUBPEL_MASK]); 579 const int16x8_t filters1 = 580 vld1q_s16(coef[(phase_scaler + 1 * step_q4) & SUBPEL_MASK]); 581 const int16x8_t filters2 = 582 vld1q_s16(coef[(phase_scaler + 2 * step_q4) & SUBPEL_MASK]); 583 int x, y = height_hor; 584 uint8_t *t = temp_buffer; 585 uint8x8_t s[15], d[8]; 586 587 assert(w && h); 588 589 src -= (SUBPEL_TAPS / 2 - 1) * src_stride + SUBPEL_TAPS / 2; 590 d[6] = vdup_n_u8(0); 591 d[7] = vdup_n_u8(0); 592 593 // horizontal 6x8 594 do { 595 load_u8_8x8(src + 1, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], 596 &s[6], &s[7]); 597 transpose_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); 598 x = width_hor; 599 600 do { 601 src += 8; 602 load_u8_8x8(src, src_stride, &s[7], &s[8], &s[9], &s[10], &s[11], &s[12], 603 &s[13], &s[14]); 604 transpose_u8_8x8(&s[7], &s[8], &s[9], &s[10], &s[11], &s[12], &s[13], 605 &s[14]); 606 607 // 00 10 20 30 40 50 60 70 608 // 01 11 21 31 41 51 61 71 609 // 02 12 22 32 42 52 62 72 610 // 03 13 23 33 43 53 63 73 611 // 04 14 24 34 44 54 64 74 612 // 05 15 25 35 45 55 65 75 613 d[0] = scale_filter_8(&s[0], filters0); 614 d[1] = scale_filter_8(&s[(phase_scaler + 1 * step_q4) >> 4], filters1); 615 d[2] = scale_filter_8(&s[(phase_scaler + 2 * step_q4) >> 4], filters2); 616 d[3] = scale_filter_8(&s[4], filters0); 617 d[4] = 618 scale_filter_8(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], filters1); 619 d[5] = 620 scale_filter_8(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], filters2); 621 622 // 00 01 02 03 04 05 xx xx 623 // 10 11 12 13 14 15 xx xx 624 // 20 21 22 23 24 25 xx xx 625 // 30 31 32 33 34 35 xx xx 626 // 40 41 42 43 44 45 xx xx 627 // 50 51 52 53 54 55 xx xx 628 // 60 61 62 63 64 65 xx xx 629 // 70 71 72 73 74 75 xx xx 630 transpose_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]); 631 // store 2 extra pixels 632 vst1_u8(t + 0 * stride_hor, d[0]); 633 vst1_u8(t + 1 * stride_hor, d[1]); 634 vst1_u8(t + 2 * stride_hor, d[2]); 635 vst1_u8(t + 3 * stride_hor, d[3]); 636 vst1_u8(t + 4 * stride_hor, d[4]); 637 vst1_u8(t + 5 * stride_hor, d[5]); 638 vst1_u8(t + 6 * stride_hor, d[6]); 639 vst1_u8(t + 7 * stride_hor, d[7]); 640 641 s[0] = s[8]; 642 s[1] = s[9]; 643 s[2] = s[10]; 644 s[3] = s[11]; 645 s[4] = s[12]; 646 s[5] = s[13]; 647 s[6] = s[14]; 648 649 t += 6; 650 x -= 6; 651 } while (x); 652 src += 8 * src_stride - 4 * width_hor / 3; 653 t += 7 * stride_hor + 2; 654 y -= 8; 655 } while (y); 656 657 // vertical 8x6 658 x = width_ver; 659 t = temp_buffer; 660 do { 661 load_u8_8x8(t, stride_hor, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], 662 &s[7]); 663 t += 7 * stride_hor; 664 y = height_ver; 665 666 do { 667 load_u8_8x8(t, stride_hor, &s[7], &s[8], &s[9], &s[10], &s[11], &s[12], 668 &s[13], &s[14]); 669 t += 8 * stride_hor; 670 671 d[0] = scale_filter_8(&s[0], filters0); 672 d[1] = scale_filter_8(&s[(phase_scaler + 1 * step_q4) >> 4], filters1); 673 d[2] = scale_filter_8(&s[(phase_scaler + 2 * step_q4) >> 4], filters2); 674 d[3] = scale_filter_8(&s[4], filters0); 675 d[4] = 676 scale_filter_8(&s[4 + ((phase_scaler + 1 * step_q4) >> 4)], filters1); 677 d[5] = 678 scale_filter_8(&s[4 + ((phase_scaler + 2 * step_q4) >> 4)], filters2); 679 vst1_u8(dst + 0 * dst_stride, d[0]); 680 vst1_u8(dst + 1 * dst_stride, d[1]); 681 vst1_u8(dst + 2 * dst_stride, d[2]); 682 vst1_u8(dst + 3 * dst_stride, d[3]); 683 vst1_u8(dst + 4 * dst_stride, d[4]); 684 vst1_u8(dst + 5 * dst_stride, d[5]); 685 686 s[0] = s[8]; 687 s[1] = s[9]; 688 s[2] = s[10]; 689 s[3] = s[11]; 690 s[4] = s[12]; 691 s[5] = s[13]; 692 s[6] = s[14]; 693 694 dst += 6 * dst_stride; 695 y -= 6; 696 } while (y); 697 t -= stride_hor * (4 * height_ver / 3 + 7); 698 t += 8; 699 dst -= height_ver * dst_stride; 700 dst += 8; 701 x -= 8; 702 } while (x); 703 } 704 705 void vp9_scale_and_extend_frame_neon(const YV12_BUFFER_CONFIG *src, 706 YV12_BUFFER_CONFIG *dst, 707 INTERP_FILTER filter_type, 708 int phase_scaler) { 709 const int src_w = src->y_crop_width; 710 const int src_h = src->y_crop_height; 711 const int dst_w = dst->y_crop_width; 712 const int dst_h = dst->y_crop_height; 713 const int dst_uv_w = dst_w / 2; 714 const int dst_uv_h = dst_h / 2; 715 int scaled = 0; 716 717 // phase_scaler is usually 0 or 8. 718 assert(phase_scaler >= 0 && phase_scaler < 16); 719 720 if (2 * dst_w == src_w && 2 * dst_h == src_h) { 721 // 2 to 1 722 scaled = 1; 723 if (phase_scaler == 0) { 724 scale_plane_2_to_1_phase_0(src->y_buffer, src->y_stride, dst->y_buffer, 725 dst->y_stride, dst_w, dst_h); 726 scale_plane_2_to_1_phase_0(src->u_buffer, src->uv_stride, dst->u_buffer, 727 dst->uv_stride, dst_uv_w, dst_uv_h); 728 scale_plane_2_to_1_phase_0(src->v_buffer, src->uv_stride, dst->v_buffer, 729 dst->uv_stride, dst_uv_w, dst_uv_h); 730 } else if (filter_type == BILINEAR) { 731 const int16_t c0 = vp9_filter_kernels[BILINEAR][phase_scaler][3]; 732 const int16_t c1 = vp9_filter_kernels[BILINEAR][phase_scaler][4]; 733 scale_plane_2_to_1_bilinear(src->y_buffer, src->y_stride, dst->y_buffer, 734 dst->y_stride, dst_w, dst_h, c0, c1); 735 scale_plane_2_to_1_bilinear(src->u_buffer, src->uv_stride, dst->u_buffer, 736 dst->uv_stride, dst_uv_w, dst_uv_h, c0, c1); 737 scale_plane_2_to_1_bilinear(src->v_buffer, src->uv_stride, dst->v_buffer, 738 dst->uv_stride, dst_uv_w, dst_uv_h, c0, c1); 739 } else { 740 const int buffer_stride = (dst_w + 3) & ~3; 741 const int buffer_height = (2 * dst_h + SUBPEL_TAPS - 2 + 7) & ~7; 742 uint8_t *const temp_buffer = 743 (uint8_t *)malloc(buffer_stride * buffer_height); 744 if (temp_buffer) { 745 scale_plane_2_to_1_general( 746 src->y_buffer, src->y_stride, dst->y_buffer, dst->y_stride, dst_w, 747 dst_h, vp9_filter_kernels[filter_type][phase_scaler], temp_buffer); 748 scale_plane_2_to_1_general( 749 src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride, 750 dst_uv_w, dst_uv_h, vp9_filter_kernels[filter_type][phase_scaler], 751 temp_buffer); 752 scale_plane_2_to_1_general( 753 src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride, 754 dst_uv_w, dst_uv_h, vp9_filter_kernels[filter_type][phase_scaler], 755 temp_buffer); 756 free(temp_buffer); 757 } else { 758 scaled = 0; 759 } 760 } 761 } else if (4 * dst_w == src_w && 4 * dst_h == src_h) { 762 // 4 to 1 763 scaled = 1; 764 if (phase_scaler == 0) { 765 scale_plane_4_to_1_phase_0(src->y_buffer, src->y_stride, dst->y_buffer, 766 dst->y_stride, dst_w, dst_h); 767 scale_plane_4_to_1_phase_0(src->u_buffer, src->uv_stride, dst->u_buffer, 768 dst->uv_stride, dst_uv_w, dst_uv_h); 769 scale_plane_4_to_1_phase_0(src->v_buffer, src->uv_stride, dst->v_buffer, 770 dst->uv_stride, dst_uv_w, dst_uv_h); 771 } else if (filter_type == BILINEAR) { 772 const int16_t c0 = vp9_filter_kernels[BILINEAR][phase_scaler][3]; 773 const int16_t c1 = vp9_filter_kernels[BILINEAR][phase_scaler][4]; 774 scale_plane_4_to_1_bilinear(src->y_buffer, src->y_stride, dst->y_buffer, 775 dst->y_stride, dst_w, dst_h, c0, c1); 776 scale_plane_4_to_1_bilinear(src->u_buffer, src->uv_stride, dst->u_buffer, 777 dst->uv_stride, dst_uv_w, dst_uv_h, c0, c1); 778 scale_plane_4_to_1_bilinear(src->v_buffer, src->uv_stride, dst->v_buffer, 779 dst->uv_stride, dst_uv_w, dst_uv_h, c0, c1); 780 } else { 781 const int buffer_stride = (dst_w + 1) & ~1; 782 const int buffer_height = (4 * dst_h + SUBPEL_TAPS - 2 + 7) & ~7; 783 uint8_t *const temp_buffer = 784 (uint8_t *)malloc(buffer_stride * buffer_height); 785 if (temp_buffer) { 786 scale_plane_4_to_1_general( 787 src->y_buffer, src->y_stride, dst->y_buffer, dst->y_stride, dst_w, 788 dst_h, vp9_filter_kernels[filter_type][phase_scaler], temp_buffer); 789 scale_plane_4_to_1_general( 790 src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride, 791 dst_uv_w, dst_uv_h, vp9_filter_kernels[filter_type][phase_scaler], 792 temp_buffer); 793 scale_plane_4_to_1_general( 794 src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride, 795 dst_uv_w, dst_uv_h, vp9_filter_kernels[filter_type][phase_scaler], 796 temp_buffer); 797 free(temp_buffer); 798 } else { 799 scaled = 0; 800 } 801 } 802 } else if (4 * dst_w == 3 * src_w && 4 * dst_h == 3 * src_h) { 803 // 4 to 3 804 const int buffer_stride = (dst_w + 5) - ((dst_w + 5) % 6) + 2; 805 const int buffer_height = (4 * dst_h / 3 + SUBPEL_TAPS - 1 + 7) & ~7; 806 uint8_t *const temp_buffer = 807 (uint8_t *)malloc(buffer_stride * buffer_height); 808 if (temp_buffer) { 809 scaled = 1; 810 if (filter_type == BILINEAR) { 811 scale_plane_4_to_3_bilinear(src->y_buffer, src->y_stride, dst->y_buffer, 812 dst->y_stride, dst_w, dst_h, phase_scaler, 813 temp_buffer); 814 scale_plane_4_to_3_bilinear(src->u_buffer, src->uv_stride, 815 dst->u_buffer, dst->uv_stride, dst_uv_w, 816 dst_uv_h, phase_scaler, temp_buffer); 817 scale_plane_4_to_3_bilinear(src->v_buffer, src->uv_stride, 818 dst->v_buffer, dst->uv_stride, dst_uv_w, 819 dst_uv_h, phase_scaler, temp_buffer); 820 } else { 821 scale_plane_4_to_3_general( 822 src->y_buffer, src->y_stride, dst->y_buffer, dst->y_stride, dst_w, 823 dst_h, vp9_filter_kernels[filter_type], phase_scaler, temp_buffer); 824 scale_plane_4_to_3_general(src->u_buffer, src->uv_stride, dst->u_buffer, 825 dst->uv_stride, dst_uv_w, dst_uv_h, 826 vp9_filter_kernels[filter_type], 827 phase_scaler, temp_buffer); 828 scale_plane_4_to_3_general(src->v_buffer, src->uv_stride, dst->v_buffer, 829 dst->uv_stride, dst_uv_w, dst_uv_h, 830 vp9_filter_kernels[filter_type], 831 phase_scaler, temp_buffer); 832 } 833 free(temp_buffer); 834 } 835 } 836 837 if (scaled) { 838 vpx_extend_frame_borders(dst); 839 } else { 840 // Call c version for all other scaling ratios. 841 vp9_scale_and_extend_frame_c(src, dst, filter_type, phase_scaler); 842 } 843 } 844