1 /* 2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <smmintrin.h> // SSE4.1 12 13 #include "./vpx_dsp_rtcd.h" 14 #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h" 15 #include "vpx_dsp/x86/highbd_inv_txfm_sse4.h" 16 #include "vpx_dsp/x86/inv_txfm_sse2.h" 17 #include "vpx_dsp/x86/inv_txfm_ssse3.h" 18 #include "vpx_dsp/x86/transpose_sse2.h" 19 20 void vpx_highbd_idct8x8_half1d_sse4_1(__m128i *const io) { 21 __m128i step1[8], step2[8]; 22 23 transpose_32bit_4x4x2(io, io); 24 25 // stage 1 26 step1[0] = io[0]; 27 step1[2] = io[4]; 28 step1[1] = io[2]; 29 step1[3] = io[6]; 30 highbd_butterfly_sse4_1(io[1], io[7], cospi_28_64, cospi_4_64, &step1[4], 31 &step1[7]); 32 highbd_butterfly_sse4_1(io[5], io[3], cospi_12_64, cospi_20_64, &step1[5], 33 &step1[6]); 34 35 // stage 2 36 highbd_butterfly_cospi16_sse4_1(step1[0], step1[2], &step2[0], &step2[1]); 37 highbd_butterfly_sse4_1(step1[1], step1[3], cospi_24_64, cospi_8_64, 38 &step2[2], &step2[3]); 39 step2[4] = _mm_add_epi32(step1[4], step1[5]); 40 step2[5] = _mm_sub_epi32(step1[4], step1[5]); 41 step2[6] = _mm_sub_epi32(step1[7], step1[6]); 42 step2[7] = _mm_add_epi32(step1[7], step1[6]); 43 44 // stage 3 45 step1[0] = _mm_add_epi32(step2[0], step2[3]); 46 step1[1] = _mm_add_epi32(step2[1], step2[2]); 47 step1[2] = _mm_sub_epi32(step2[1], step2[2]); 48 step1[3] = _mm_sub_epi32(step2[0], step2[3]); 49 step1[4] = step2[4]; 50 highbd_butterfly_cospi16_sse4_1(step2[6], step2[5], &step1[6], &step1[5]); 51 step1[7] = step2[7]; 52 53 // stage 4 54 highbd_idct8_stage4(step1, io); 55 } 56 57 static void highbd_idct8x8_12_half1d(__m128i *const io) { 58 __m128i temp1[2], step1[8], step2[8]; 59 60 transpose_32bit_4x4(io, io); 61 62 // stage 1 63 step1[0] = io[0]; 64 step1[1] = io[2]; 65 extend_64bit(io[1], temp1); 66 step1[4] = multiplication_round_shift_sse4_1(temp1, cospi_28_64); 67 step1[7] = multiplication_round_shift_sse4_1(temp1, cospi_4_64); 68 extend_64bit(io[3], temp1); 69 step1[5] = multiplication_round_shift_sse4_1(temp1, -cospi_20_64); 70 step1[6] = multiplication_round_shift_sse4_1(temp1, cospi_12_64); 71 72 // stage 2 73 extend_64bit(step1[0], temp1); 74 step2[0] = multiplication_round_shift_sse4_1(temp1, cospi_16_64); 75 extend_64bit(step1[1], temp1); 76 step2[2] = multiplication_round_shift_sse4_1(temp1, cospi_24_64); 77 step2[3] = multiplication_round_shift_sse4_1(temp1, cospi_8_64); 78 step2[4] = _mm_add_epi32(step1[4], step1[5]); 79 step2[5] = _mm_sub_epi32(step1[4], step1[5]); 80 step2[6] = _mm_sub_epi32(step1[7], step1[6]); 81 step2[7] = _mm_add_epi32(step1[7], step1[6]); 82 83 // stage 3 84 step1[0] = _mm_add_epi32(step2[0], step2[3]); 85 step1[1] = _mm_add_epi32(step2[0], step2[2]); 86 step1[2] = _mm_sub_epi32(step2[0], step2[2]); 87 step1[3] = _mm_sub_epi32(step2[0], step2[3]); 88 step1[4] = step2[4]; 89 highbd_butterfly_cospi16_sse4_1(step2[6], step2[5], &step1[6], &step1[5]); 90 step1[7] = step2[7]; 91 92 // stage 4 93 highbd_idct8_stage4(step1, io); 94 } 95 96 void vpx_highbd_idct8x8_64_add_sse4_1(const tran_low_t *input, uint16_t *dest, 97 int stride, int bd) { 98 __m128i io[16]; 99 100 io[0] = _mm_load_si128((const __m128i *)(input + 0 * 8 + 0)); 101 io[4] = _mm_load_si128((const __m128i *)(input + 0 * 8 + 4)); 102 io[1] = _mm_load_si128((const __m128i *)(input + 1 * 8 + 0)); 103 io[5] = _mm_load_si128((const __m128i *)(input + 1 * 8 + 4)); 104 io[2] = _mm_load_si128((const __m128i *)(input + 2 * 8 + 0)); 105 io[6] = _mm_load_si128((const __m128i *)(input + 2 * 8 + 4)); 106 io[3] = _mm_load_si128((const __m128i *)(input + 3 * 8 + 0)); 107 io[7] = _mm_load_si128((const __m128i *)(input + 3 * 8 + 4)); 108 109 if (bd == 8) { 110 __m128i io_short[8]; 111 112 io_short[0] = _mm_packs_epi32(io[0], io[4]); 113 io_short[1] = _mm_packs_epi32(io[1], io[5]); 114 io_short[2] = _mm_packs_epi32(io[2], io[6]); 115 io_short[3] = _mm_packs_epi32(io[3], io[7]); 116 io[8] = _mm_load_si128((const __m128i *)(input + 4 * 8 + 0)); 117 io[12] = _mm_load_si128((const __m128i *)(input + 4 * 8 + 4)); 118 io[9] = _mm_load_si128((const __m128i *)(input + 5 * 8 + 0)); 119 io[13] = _mm_load_si128((const __m128i *)(input + 5 * 8 + 4)); 120 io[10] = _mm_load_si128((const __m128i *)(input + 6 * 8 + 0)); 121 io[14] = _mm_load_si128((const __m128i *)(input + 6 * 8 + 4)); 122 io[11] = _mm_load_si128((const __m128i *)(input + 7 * 8 + 0)); 123 io[15] = _mm_load_si128((const __m128i *)(input + 7 * 8 + 4)); 124 io_short[4] = _mm_packs_epi32(io[8], io[12]); 125 io_short[5] = _mm_packs_epi32(io[9], io[13]); 126 io_short[6] = _mm_packs_epi32(io[10], io[14]); 127 io_short[7] = _mm_packs_epi32(io[11], io[15]); 128 129 vpx_idct8_sse2(io_short); 130 vpx_idct8_sse2(io_short); 131 round_shift_8x8(io_short, io); 132 } else { 133 __m128i temp[4]; 134 135 vpx_highbd_idct8x8_half1d_sse4_1(io); 136 137 io[8] = _mm_load_si128((const __m128i *)(input + 4 * 8 + 0)); 138 io[12] = _mm_load_si128((const __m128i *)(input + 4 * 8 + 4)); 139 io[9] = _mm_load_si128((const __m128i *)(input + 5 * 8 + 0)); 140 io[13] = _mm_load_si128((const __m128i *)(input + 5 * 8 + 4)); 141 io[10] = _mm_load_si128((const __m128i *)(input + 6 * 8 + 0)); 142 io[14] = _mm_load_si128((const __m128i *)(input + 6 * 8 + 4)); 143 io[11] = _mm_load_si128((const __m128i *)(input + 7 * 8 + 0)); 144 io[15] = _mm_load_si128((const __m128i *)(input + 7 * 8 + 4)); 145 vpx_highbd_idct8x8_half1d_sse4_1(&io[8]); 146 147 temp[0] = io[4]; 148 temp[1] = io[5]; 149 temp[2] = io[6]; 150 temp[3] = io[7]; 151 io[4] = io[8]; 152 io[5] = io[9]; 153 io[6] = io[10]; 154 io[7] = io[11]; 155 vpx_highbd_idct8x8_half1d_sse4_1(io); 156 157 io[8] = temp[0]; 158 io[9] = temp[1]; 159 io[10] = temp[2]; 160 io[11] = temp[3]; 161 vpx_highbd_idct8x8_half1d_sse4_1(&io[8]); 162 163 highbd_idct8x8_final_round(io); 164 } 165 166 recon_and_store_8x8(io, dest, stride, bd); 167 } 168 169 void vpx_highbd_idct8x8_12_add_sse4_1(const tran_low_t *input, uint16_t *dest, 170 int stride, int bd) { 171 const __m128i zero = _mm_setzero_si128(); 172 __m128i io[16]; 173 174 io[0] = _mm_load_si128((const __m128i *)(input + 0 * 8 + 0)); 175 io[1] = _mm_load_si128((const __m128i *)(input + 1 * 8 + 0)); 176 io[2] = _mm_load_si128((const __m128i *)(input + 2 * 8 + 0)); 177 io[3] = _mm_load_si128((const __m128i *)(input + 3 * 8 + 0)); 178 179 if (bd == 8) { 180 __m128i io_short[8]; 181 182 io_short[0] = _mm_packs_epi32(io[0], zero); 183 io_short[1] = _mm_packs_epi32(io[1], zero); 184 io_short[2] = _mm_packs_epi32(io[2], zero); 185 io_short[3] = _mm_packs_epi32(io[3], zero); 186 187 idct8x8_12_add_kernel_ssse3(io_short); 188 round_shift_8x8(io_short, io); 189 } else { 190 __m128i temp[4]; 191 192 highbd_idct8x8_12_half1d(io); 193 194 temp[0] = io[4]; 195 temp[1] = io[5]; 196 temp[2] = io[6]; 197 temp[3] = io[7]; 198 highbd_idct8x8_12_half1d(io); 199 200 io[8] = temp[0]; 201 io[9] = temp[1]; 202 io[10] = temp[2]; 203 io[11] = temp[3]; 204 highbd_idct8x8_12_half1d(&io[8]); 205 206 highbd_idct8x8_final_round(io); 207 } 208 209 recon_and_store_8x8(io, dest, stride, bd); 210 } 211