1 // Copyright 2011 Google Inc. All Rights Reserved. 2 // 3 // Use of this source code is governed by a BSD-style license 4 // that can be found in the COPYING file in the root of the source 5 // tree. An additional intellectual property rights grant can be found 6 // in the file PATENTS. All contributing project authors may 7 // be found in the AUTHORS file in the root of the source tree. 8 // ----------------------------------------------------------------------------- 9 // 10 // SSE2 version of YUV to RGB upsampling functions. 11 // 12 // Author: somnath (at) google.com (Somnath Banerjee) 13 14 #include "src/dsp/dsp.h" 15 16 #if defined(WEBP_USE_SSE2) 17 18 #include <assert.h> 19 #include <emmintrin.h> 20 #include <string.h> 21 #include "src/dsp/yuv.h" 22 23 #ifdef FANCY_UPSAMPLING 24 25 // We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows 26 // u = (9*a + 3*b + 3*c + d + 8) / 16 27 // = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2 28 // = (a + m + 1) / 2 29 // where m = (a + 3*b + 3*c + d) / 8 30 // = ((a + b + c + d) / 2 + b + c) / 4 31 // 32 // Let's say k = (a + b + c + d) / 4. 33 // We can compute k as 34 // k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1 35 // where s = (a + d + 1) / 2 and t = (b + c + 1) / 2 36 // 37 // Then m can be written as 38 // m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1 39 40 // Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1 41 #define GET_M(ij, in, out) do { \ 42 const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \ 43 const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \ 44 const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \ 45 const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\ 46 const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \ 47 (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \ 48 } while (0) 49 50 // pack and store two alternating pixel rows 51 #define PACK_AND_STORE(a, b, da, db, out) do { \ 52 const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \ 53 const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \ 54 const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \ 55 const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \ 56 _mm_store_si128(((__m128i*)(out)) + 0, t_1); \ 57 _mm_store_si128(((__m128i*)(out)) + 1, t_2); \ 58 } while (0) 59 60 // Loads 17 pixels each from rows r1 and r2 and generates 32 pixels. 61 #define UPSAMPLE_32PIXELS(r1, r2, out) { \ 62 const __m128i one = _mm_set1_epi8(1); \ 63 const __m128i a = _mm_loadu_si128((const __m128i*)&(r1)[0]); \ 64 const __m128i b = _mm_loadu_si128((const __m128i*)&(r1)[1]); \ 65 const __m128i c = _mm_loadu_si128((const __m128i*)&(r2)[0]); \ 66 const __m128i d = _mm_loadu_si128((const __m128i*)&(r2)[1]); \ 67 \ 68 const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \ 69 const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \ 70 const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \ 71 \ 72 const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \ 73 const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \ 74 \ 75 const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \ 76 const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \ 77 const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \ 78 const __m128i t4 = _mm_avg_epu8(s, t); \ 79 const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \ 80 __m128i diag1, diag2; \ 81 \ 82 GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \ 83 GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \ 84 \ 85 /* pack the alternate pixels */ \ 86 PACK_AND_STORE(a, b, diag1, diag2, (out) + 0); /* store top */ \ 87 PACK_AND_STORE(c, d, diag2, diag1, (out) + 2 * 32); /* store bottom */ \ 88 } 89 90 // Turn the macro into a function for reducing code-size when non-critical 91 static void Upsample32Pixels_SSE2(const uint8_t r1[], const uint8_t r2[], 92 uint8_t* const out) { 93 UPSAMPLE_32PIXELS(r1, r2, out); 94 } 95 96 #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ 97 uint8_t r1[17], r2[17]; \ 98 memcpy(r1, (tb), (num_pixels)); \ 99 memcpy(r2, (bb), (num_pixels)); \ 100 /* replicate last byte */ \ 101 memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \ 102 memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \ 103 /* using the shared function instead of the macro saves ~3k code size */ \ 104 Upsample32Pixels_SSE2(r1, r2, out); \ 105 } 106 107 #define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \ 108 top_dst, bottom_dst, cur_x) do { \ 109 FUNC##32_SSE2((top_y) + (cur_x), r_u, r_v, (top_dst) + (cur_x) * (XSTEP)); \ 110 if ((bottom_y) != NULL) { \ 111 FUNC##32_SSE2((bottom_y) + (cur_x), r_u + 64, r_v + 64, \ 112 (bottom_dst) + (cur_x) * (XSTEP)); \ 113 } \ 114 } while (0) 115 116 #define SSE2_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ 117 static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ 118 const uint8_t* top_u, const uint8_t* top_v, \ 119 const uint8_t* cur_u, const uint8_t* cur_v, \ 120 uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ 121 int uv_pos, pos; \ 122 /* 16byte-aligned array to cache reconstructed u and v */ \ 123 uint8_t uv_buf[14 * 32 + 15] = { 0 }; \ 124 uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \ 125 uint8_t* const r_v = r_u + 32; \ 126 \ 127 assert(top_y != NULL); \ 128 { /* Treat the first pixel in regular way */ \ 129 const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ 130 const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ 131 const int u0_t = (top_u[0] + u_diag) >> 1; \ 132 const int v0_t = (top_v[0] + v_diag) >> 1; \ 133 FUNC(top_y[0], u0_t, v0_t, top_dst); \ 134 if (bottom_y != NULL) { \ 135 const int u0_b = (cur_u[0] + u_diag) >> 1; \ 136 const int v0_b = (cur_v[0] + v_diag) >> 1; \ 137 FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \ 138 } \ 139 } \ 140 /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \ 141 for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \ 142 UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \ 143 UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \ 144 CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \ 145 } \ 146 if (len > 1) { \ 147 const int left_over = ((len + 1) >> 1) - (pos >> 1); \ 148 uint8_t* const tmp_top_dst = r_u + 4 * 32; \ 149 uint8_t* const tmp_bottom_dst = tmp_top_dst + 4 * 32; \ 150 uint8_t* const tmp_top = tmp_bottom_dst + 4 * 32; \ 151 uint8_t* const tmp_bottom = (bottom_y == NULL) ? NULL : tmp_top + 32; \ 152 assert(left_over > 0); \ 153 UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \ 154 UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \ 155 memcpy(tmp_top, top_y + pos, len - pos); \ 156 if (bottom_y != NULL) memcpy(tmp_bottom, bottom_y + pos, len - pos); \ 157 CONVERT2RGB_32(FUNC, XSTEP, tmp_top, tmp_bottom, tmp_top_dst, \ 158 tmp_bottom_dst, 0); \ 159 memcpy(top_dst + pos * (XSTEP), tmp_top_dst, (len - pos) * (XSTEP)); \ 160 if (bottom_y != NULL) { \ 161 memcpy(bottom_dst + pos * (XSTEP), tmp_bottom_dst, \ 162 (len - pos) * (XSTEP)); \ 163 } \ 164 } \ 165 } 166 167 // SSE2 variants of the fancy upsampler. 168 SSE2_UPSAMPLE_FUNC(UpsampleRgbaLinePair_SSE2, VP8YuvToRgba, 4) 169 SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePair_SSE2, VP8YuvToBgra, 4) 170 171 #if !defined(WEBP_REDUCE_CSP) 172 SSE2_UPSAMPLE_FUNC(UpsampleRgbLinePair_SSE2, VP8YuvToRgb, 3) 173 SSE2_UPSAMPLE_FUNC(UpsampleBgrLinePair_SSE2, VP8YuvToBgr, 3) 174 SSE2_UPSAMPLE_FUNC(UpsampleArgbLinePair_SSE2, VP8YuvToArgb, 4) 175 SSE2_UPSAMPLE_FUNC(UpsampleRgba4444LinePair_SSE2, VP8YuvToRgba4444, 2) 176 SSE2_UPSAMPLE_FUNC(UpsampleRgb565LinePair_SSE2, VP8YuvToRgb565, 2) 177 #endif // WEBP_REDUCE_CSP 178 179 #undef GET_M 180 #undef PACK_AND_STORE 181 #undef UPSAMPLE_32PIXELS 182 #undef UPSAMPLE_LAST_BLOCK 183 #undef CONVERT2RGB 184 #undef CONVERT2RGB_32 185 #undef SSE2_UPSAMPLE_FUNC 186 187 //------------------------------------------------------------------------------ 188 // Entry point 189 190 extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; 191 192 extern void WebPInitUpsamplersSSE2(void); 193 194 WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersSSE2(void) { 195 WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair_SSE2; 196 WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair_SSE2; 197 WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair_SSE2; 198 WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair_SSE2; 199 #if !defined(WEBP_REDUCE_CSP) 200 WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_SSE2; 201 WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_SSE2; 202 WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair_SSE2; 203 WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair_SSE2; 204 WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair_SSE2; 205 WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair_SSE2; 206 WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair_SSE2; 207 #endif // WEBP_REDUCE_CSP 208 } 209 210 #endif // FANCY_UPSAMPLING 211 212 //------------------------------------------------------------------------------ 213 214 extern WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */]; 215 extern void WebPInitYUV444ConvertersSSE2(void); 216 217 #define YUV444_FUNC(FUNC_NAME, CALL, CALL_C, XSTEP) \ 218 extern void CALL_C(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ 219 uint8_t* dst, int len); \ 220 static void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ 221 uint8_t* dst, int len) { \ 222 int i; \ 223 const int max_len = len & ~31; \ 224 for (i = 0; i < max_len; i += 32) { \ 225 CALL(y + i, u + i, v + i, dst + i * (XSTEP)); \ 226 } \ 227 if (i < len) { /* C-fallback */ \ 228 CALL_C(y + i, u + i, v + i, dst + i * (XSTEP), len - i); \ 229 } \ 230 } 231 232 YUV444_FUNC(Yuv444ToRgba_SSE2, VP8YuvToRgba32_SSE2, WebPYuv444ToRgba_C, 4); 233 YUV444_FUNC(Yuv444ToBgra_SSE2, VP8YuvToBgra32_SSE2, WebPYuv444ToBgra_C, 4); 234 #if !defined(WEBP_REDUCE_CSP) 235 YUV444_FUNC(Yuv444ToRgb_SSE2, VP8YuvToRgb32_SSE2, WebPYuv444ToRgb_C, 3); 236 YUV444_FUNC(Yuv444ToBgr_SSE2, VP8YuvToBgr32_SSE2, WebPYuv444ToBgr_C, 3); 237 YUV444_FUNC(Yuv444ToArgb_SSE2, VP8YuvToArgb32_SSE2, WebPYuv444ToArgb_C, 4) 238 YUV444_FUNC(Yuv444ToRgba4444_SSE2, VP8YuvToRgba444432_SSE2, \ 239 WebPYuv444ToRgba4444_C, 2) 240 YUV444_FUNC(Yuv444ToRgb565_SSE2, VP8YuvToRgb56532_SSE2, WebPYuv444ToRgb565_C, 2) 241 #endif // WEBP_REDUCE_CSP 242 243 WEBP_TSAN_IGNORE_FUNCTION void WebPInitYUV444ConvertersSSE2(void) { 244 WebPYUV444Converters[MODE_RGBA] = Yuv444ToRgba_SSE2; 245 WebPYUV444Converters[MODE_BGRA] = Yuv444ToBgra_SSE2; 246 WebPYUV444Converters[MODE_rgbA] = Yuv444ToRgba_SSE2; 247 WebPYUV444Converters[MODE_bgrA] = Yuv444ToBgra_SSE2; 248 #if !defined(WEBP_REDUCE_CSP) 249 WebPYUV444Converters[MODE_RGB] = Yuv444ToRgb_SSE2; 250 WebPYUV444Converters[MODE_BGR] = Yuv444ToBgr_SSE2; 251 WebPYUV444Converters[MODE_ARGB] = Yuv444ToArgb_SSE2; 252 WebPYUV444Converters[MODE_RGBA_4444] = Yuv444ToRgba4444_SSE2; 253 WebPYUV444Converters[MODE_RGB_565] = Yuv444ToRgb565_SSE2; 254 WebPYUV444Converters[MODE_Argb] = Yuv444ToArgb_SSE2; 255 WebPYUV444Converters[MODE_rgbA_4444] = Yuv444ToRgba4444_SSE2; 256 #endif // WEBP_REDUCE_CSP 257 } 258 259 #else 260 261 WEBP_DSP_INIT_STUB(WebPInitYUV444ConvertersSSE2) 262 263 #endif // WEBP_USE_SSE2 264 265 #if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_SSE2)) 266 WEBP_DSP_INIT_STUB(WebPInitUpsamplersSSE2) 267 #endif 268