1 ; 2 ; jdmrgext.asm - merged upsampling/color conversion (64-bit AVX2) 3 ; 4 ; Copyright 2009, 2012 Pierre Ossman <ossman (a] cendio.se> for Cendio AB 5 ; Copyright (C) 2009, 2012, 2016, D. R. Commander. 6 ; Copyright (C) 2015, Intel Corporation. 7 ; 8 ; Based on the x86 SIMD extension for IJG JPEG library 9 ; Copyright (C) 1999-2006, MIYASAKA Masaru. 10 ; For conditions of distribution and use, see copyright notice in jsimdext.inc 11 ; 12 ; This file should be assembled with NASM (Netwide Assembler), 13 ; can *not* be assembled with Microsoft's MASM or any compatible 14 ; assembler (including Borland's Turbo Assembler). 15 ; NASM is available from http://nasm.sourceforge.net/ or 16 ; http://sourceforge.net/project/showfiles.php?group_id=6208 17 ; 18 ; [TAB8] 19 20 %include "jcolsamp.inc" 21 22 ; -------------------------------------------------------------------------- 23 ; 24 ; Upsample and color convert for the case of 2:1 horizontal and 1:1 vertical. 25 ; 26 ; GLOBAL(void) 27 ; jsimd_h2v1_merged_upsample_avx2(JDIMENSION output_width, 28 ; JSAMPIMAGE input_buf, 29 ; JDIMENSION in_row_group_ctr, 30 ; JSAMPARRAY output_buf); 31 ; 32 33 ; r10d = JDIMENSION output_width 34 ; r11 = JSAMPIMAGE input_buf 35 ; r12d = JDIMENSION in_row_group_ctr 36 ; r13 = JSAMPARRAY output_buf 37 38 %define wk(i) rbp - (WK_NUM - (i)) * SIZEOF_YMMWORD ; ymmword wk[WK_NUM] 39 %define WK_NUM 3 40 41 align 32 42 GLOBAL_FUNCTION(jsimd_h2v1_merged_upsample_avx2) 43 44 EXTN(jsimd_h2v1_merged_upsample_avx2): 45 push rbp 46 mov rax, rsp ; rax = original rbp 47 sub rsp, byte 4 48 and rsp, byte (-SIZEOF_YMMWORD) ; align to 256 bits 49 mov [rsp], rax 50 mov rbp, rsp ; rbp = aligned rbp 51 lea rsp, [wk(0)] 52 collect_args 4 53 push rbx 54 55 mov ecx, r10d ; col 56 test rcx, rcx 57 jz near .return 58 59 push rcx 60 61 mov rdi, r11 62 mov ecx, r12d 63 mov rsi, JSAMPARRAY [rdi+0*SIZEOF_JSAMPARRAY] 64 mov rbx, JSAMPARRAY [rdi+1*SIZEOF_JSAMPARRAY] 65 mov rdx, JSAMPARRAY [rdi+2*SIZEOF_JSAMPARRAY] 66 mov rdi, r13 67 mov rsi, JSAMPROW [rsi+rcx*SIZEOF_JSAMPROW] ; inptr0 68 mov rbx, JSAMPROW [rbx+rcx*SIZEOF_JSAMPROW] ; inptr1 69 mov rdx, JSAMPROW [rdx+rcx*SIZEOF_JSAMPROW] ; inptr2 70 mov rdi, JSAMPROW [rdi] ; outptr 71 72 pop rcx ; col 73 74 .columnloop: 75 76 vmovdqu ymm6, YMMWORD [rbx] ; ymm6=Cb(0123456789ABCDEFGHIJKLMNOPQRSTUV) 77 vmovdqu ymm7, YMMWORD [rdx] ; ymm7=Cr(0123456789ABCDEFGHIJKLMNOPQRSTUV) 78 79 vpxor ymm1, ymm1, ymm1 ; ymm1=(all 0's) 80 vpcmpeqw ymm3, ymm3, ymm3 81 vpsllw ymm3, ymm3, 7 ; ymm3={0xFF80 0xFF80 0xFF80 0xFF80 ..} 82 83 vpermq ymm6, ymm6, 0xd8 ; ymm6=Cb(01234567GHIJKLMN89ABCDEFOPQRSTUV) 84 vpermq ymm7, ymm7, 0xd8 ; ymm7=Cr(01234567GHIJKLMN89ABCDEFOPQRSTUV) 85 vpunpcklbw ymm4, ymm6, ymm1 ; ymm4=Cb(0123456789ABCDEF)=CbL 86 vpunpckhbw ymm6, ymm6, ymm1 ; ymm6=Cb(GHIJKLMNOPQRSTUV)=CbH 87 vpunpcklbw ymm0, ymm7, ymm1 ; ymm0=Cr(0123456789ABCDEF)=CrL 88 vpunpckhbw ymm7, ymm7, ymm1 ; ymm7=Cr(GHIJKLMNOPQRSTUV)=CrH 89 90 vpaddw ymm5, ymm6, ymm3 91 vpaddw ymm2, ymm4, ymm3 92 vpaddw ymm1, ymm7, ymm3 93 vpaddw ymm3, ymm0, ymm3 94 95 ; (Original) 96 ; R = Y + 1.40200 * Cr 97 ; G = Y - 0.34414 * Cb - 0.71414 * Cr 98 ; B = Y + 1.77200 * Cb 99 ; 100 ; (This implementation) 101 ; R = Y + 0.40200 * Cr + Cr 102 ; G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr 103 ; B = Y - 0.22800 * Cb + Cb + Cb 104 105 vpaddw ymm6, ymm5, ymm5 ; ymm6=2*CbH 106 vpaddw ymm4, ymm2, ymm2 ; ymm4=2*CbL 107 vpaddw ymm7, ymm1, ymm1 ; ymm7=2*CrH 108 vpaddw ymm0, ymm3, ymm3 ; ymm0=2*CrL 109 110 vpmulhw ymm6, ymm6, [rel PW_MF0228] ; ymm6=(2*CbH * -FIX(0.22800)) 111 vpmulhw ymm4, ymm4, [rel PW_MF0228] ; ymm4=(2*CbL * -FIX(0.22800)) 112 vpmulhw ymm7, ymm7, [rel PW_F0402] ; ymm7=(2*CrH * FIX(0.40200)) 113 vpmulhw ymm0, ymm0, [rel PW_F0402] ; ymm0=(2*CrL * FIX(0.40200)) 114 115 vpaddw ymm6, ymm6, [rel PW_ONE] 116 vpaddw ymm4, ymm4, [rel PW_ONE] 117 vpsraw ymm6, ymm6, 1 ; ymm6=(CbH * -FIX(0.22800)) 118 vpsraw ymm4, ymm4, 1 ; ymm4=(CbL * -FIX(0.22800)) 119 vpaddw ymm7, ymm7, [rel PW_ONE] 120 vpaddw ymm0, ymm0, [rel PW_ONE] 121 vpsraw ymm7, ymm7, 1 ; ymm7=(CrH * FIX(0.40200)) 122 vpsraw ymm0, ymm0, 1 ; ymm0=(CrL * FIX(0.40200)) 123 124 vpaddw ymm6, ymm6, ymm5 125 vpaddw ymm4, ymm4, ymm2 126 vpaddw ymm6, ymm6, ymm5 ; ymm6=(CbH * FIX(1.77200))=(B-Y)H 127 vpaddw ymm4, ymm4, ymm2 ; ymm4=(CbL * FIX(1.77200))=(B-Y)L 128 vpaddw ymm7, ymm7, ymm1 ; ymm7=(CrH * FIX(1.40200))=(R-Y)H 129 vpaddw ymm0, ymm0, ymm3 ; ymm0=(CrL * FIX(1.40200))=(R-Y)L 130 131 vmovdqa YMMWORD [wk(0)], ymm6 ; wk(0)=(B-Y)H 132 vmovdqa YMMWORD [wk(1)], ymm7 ; wk(1)=(R-Y)H 133 134 vpunpckhwd ymm6, ymm5, ymm1 135 vpunpcklwd ymm5, ymm5, ymm1 136 vpmaddwd ymm5, ymm5, [rel PW_MF0344_F0285] 137 vpmaddwd ymm6, ymm6, [rel PW_MF0344_F0285] 138 vpunpckhwd ymm7, ymm2, ymm3 139 vpunpcklwd ymm2, ymm2, ymm3 140 vpmaddwd ymm2, ymm2, [rel PW_MF0344_F0285] 141 vpmaddwd ymm7, ymm7, [rel PW_MF0344_F0285] 142 143 vpaddd ymm5, ymm5, [rel PD_ONEHALF] 144 vpaddd ymm6, ymm6, [rel PD_ONEHALF] 145 vpsrad ymm5, ymm5, SCALEBITS 146 vpsrad ymm6, ymm6, SCALEBITS 147 vpaddd ymm2, ymm2, [rel PD_ONEHALF] 148 vpaddd ymm7, ymm7, [rel PD_ONEHALF] 149 vpsrad ymm2, ymm2, SCALEBITS 150 vpsrad ymm7, ymm7, SCALEBITS 151 152 vpackssdw ymm5, ymm5, ymm6 ; ymm5=CbH*-FIX(0.344)+CrH*FIX(0.285) 153 vpackssdw ymm2, ymm2, ymm7 ; ymm2=CbL*-FIX(0.344)+CrL*FIX(0.285) 154 vpsubw ymm5, ymm5, ymm1 ; ymm5=CbH*-FIX(0.344)+CrH*-FIX(0.714)=(G-Y)H 155 vpsubw ymm2, ymm2, ymm3 ; ymm2=CbL*-FIX(0.344)+CrL*-FIX(0.714)=(G-Y)L 156 157 vmovdqa YMMWORD [wk(2)], ymm5 ; wk(2)=(G-Y)H 158 159 mov al, 2 ; Yctr 160 jmp short .Yloop_1st 161 162 .Yloop_2nd: 163 vmovdqa ymm0, YMMWORD [wk(1)] ; ymm0=(R-Y)H 164 vmovdqa ymm2, YMMWORD [wk(2)] ; ymm2=(G-Y)H 165 vmovdqa ymm4, YMMWORD [wk(0)] ; ymm4=(B-Y)H 166 167 .Yloop_1st: 168 vmovdqu ymm7, YMMWORD [rsi] ; ymm7=Y(0123456789ABCDEFGHIJKLMNOPQRSTUV) 169 170 vpcmpeqw ymm6, ymm6, ymm6 171 vpsrlw ymm6, ymm6, BYTE_BIT ; ymm6={0xFF 0x00 0xFF 0x00 ..} 172 vpand ymm6, ymm6, ymm7 ; ymm6=Y(02468ACEGIKMOQSU)=YE 173 vpsrlw ymm7, ymm7, BYTE_BIT ; ymm7=Y(13579BDFHJLNPRTV)=YO 174 175 vmovdqa ymm1, ymm0 ; ymm1=ymm0=(R-Y)(L/H) 176 vmovdqa ymm3, ymm2 ; ymm3=ymm2=(G-Y)(L/H) 177 vmovdqa ymm5, ymm4 ; ymm5=ymm4=(B-Y)(L/H) 178 179 vpaddw ymm0, ymm0, ymm6 ; ymm0=((R-Y)+YE)=RE=R(02468ACEGIKMOQSU) 180 vpaddw ymm1, ymm1, ymm7 ; ymm1=((R-Y)+YO)=RO=R(13579BDFHJLNPRTV) 181 vpackuswb ymm0, ymm0, ymm0 ; ymm0=R(02468ACE********GIKMOQSU********) 182 vpackuswb ymm1, ymm1, ymm1 ; ymm1=R(13579BDF********HJLNPRTV********) 183 184 vpaddw ymm2, ymm2, ymm6 ; ymm2=((G-Y)+YE)=GE=G(02468ACEGIKMOQSU) 185 vpaddw ymm3, ymm3, ymm7 ; ymm3=((G-Y)+YO)=GO=G(13579BDFHJLNPRTV) 186 vpackuswb ymm2, ymm2, ymm2 ; ymm2=G(02468ACE********GIKMOQSU********) 187 vpackuswb ymm3, ymm3, ymm3 ; ymm3=G(13579BDF********HJLNPRTV********) 188 189 vpaddw ymm4, ymm4, ymm6 ; ymm4=((B-Y)+YE)=BE=B(02468ACEGIKMOQSU) 190 vpaddw ymm5, ymm5, ymm7 ; ymm5=((B-Y)+YO)=BO=B(13579BDFHJLNPRTV) 191 vpackuswb ymm4, ymm4, ymm4 ; ymm4=B(02468ACE********GIKMOQSU********) 192 vpackuswb ymm5, ymm5, ymm5 ; ymm5=B(13579BDF********HJLNPRTV********) 193 194 %if RGB_PIXELSIZE == 3 ; --------------- 195 196 ; ymmA=(00 02 04 06 08 0A 0C 0E ** 0G 0I 0K 0M 0O 0Q 0S 0U **) 197 ; ymmB=(01 03 05 07 09 0B 0D 0F ** 0H 0J 0L 0N 0P 0R 0T 0V **) 198 ; ymmC=(10 12 14 16 18 1A 1C 1E ** 1G 1I 1K 1M 1O 1Q 1S 1U **) 199 ; ymmD=(11 13 15 17 19 1B 1D 1F ** 1H 1J 1L 1N 1P 1R 1T 1V **) 200 ; ymmE=(20 22 24 26 28 2A 2C 2E ** 2G 2I 2K 2M 2O 2Q 2S 2U **) 201 ; ymmF=(21 23 25 27 29 2B 2D 2F ** 2H 2J 2L 2N 2P 2R 2T 2V **) 202 ; ymmG=(** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **) 203 ; ymmH=(** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **) 204 205 vpunpcklbw ymmA, ymmA, ymmC ; ymmA=(00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E 206 ; 0G 1G 0I 1I 0K 1K 0M 1M 0O 1O 0Q 1Q 0S 1S 0U 1U) 207 vpunpcklbw ymmE, ymmE, ymmB ; ymmE=(20 01 22 03 24 05 26 07 28 09 2A 0B 2C 0D 2E 0F 208 ; 2G 0H 2I 0J 2K 0L 2M 0N 2O 0P 2Q 0R 2S 0T 2U 0V) 209 vpunpcklbw ymmD, ymmD, ymmF ; ymmD=(11 21 13 23 15 25 17 27 19 29 1B 2B 1D 2D 1F 2F 210 ; 1H 2H 1J 2J 1L 2L 1N 2N 1P 2P 1R 2R 1T 2T 1V 2V) 211 212 vpsrldq ymmH, ymmA, 2 ; ymmH=(02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E 0G 1G 213 ; 0I 1I 0K 1K 0M 1M 0O 1O 0Q 1Q 0S 1S 0U 1U -- --) 214 vpunpckhwd ymmG, ymmA, ymmE ; ymmG=(08 18 28 09 0A 1A 2A 0B 0C 1C 2C 0D 0E 1E 2E 0F 215 ; 0O 1O 2O 0P 0Q 1Q 2Q 0R 0S 1S 2S 0T 0U 1U 2U 0V) 216 vpunpcklwd ymmA, ymmA, ymmE ; ymmA=(00 10 20 01 02 12 22 03 04 14 24 05 06 16 26 07 217 ; 0G 1G 2G 0H 0I 1I 2I 0J 0K 1K 2K 0L 0M 1M 2M 0N) 218 219 vpsrldq ymmE, ymmE, 2 ; ymmE=(22 03 24 05 26 07 28 09 2A 0B 2C 0D 2E 0F 2G 0H 220 ; 2I 0J 2K 0L 2M 0N 2O 0P 2Q 0R 2S 0T 2U 0V -- --) 221 222 vpsrldq ymmB, ymmD, 2 ; ymmB=(13 23 15 25 17 27 19 29 1B 2B 1D 2D 1F 2F 1H 2H 223 ; 1J 2J 1L 2L 1N 2N 1P 2P 1R 2R 1T 2T 1V 2V -- --) 224 vpunpckhwd ymmC, ymmD, ymmH ; ymmC=(19 29 0A 1A 1B 2B 0C 1C 1D 2D 0E 1E 1F 2F 0G 1G 225 ; 1P 2P 0Q 1Q 1R 2R 0S 1S 1T 2T 0U 1U 1V 2V -- --) 226 vpunpcklwd ymmD, ymmD, ymmH ; ymmD=(11 21 02 12 13 23 04 14 15 25 06 16 17 27 08 18 227 ; 1H 2H 0I 1I 1J 2J 0K 1K 1L 2L 0M 1M 1N 2N 0O 1O) 228 229 vpunpckhwd ymmF, ymmE, ymmB ; ymmF=(2A 0B 1B 2B 2C 0D 1D 2D 2E 0F 1F 2F 2G 0H 1H 2H 230 ; 2Q 0R 1R 2R 2S 0T 1T 2T 2U 0V 1V 2V -- -- -- --) 231 vpunpcklwd ymmE, ymmE, ymmB ; ymmE=(22 03 13 23 24 05 15 25 26 07 17 27 28 09 19 29 232 ; 2I 0J 1J 2J 2K 0L 1L 2L 2M 0N 1N 2N 2O 0P 1P 2P) 233 234 vpshufd ymmH, ymmA, 0x4E ; ymmH=(04 14 24 05 06 16 26 07 00 10 20 01 02 12 22 03 235 ; 0K 1K 2K 0L 0M 1M 2M 0N 0G 1G 2G 0H 0I 1I 2I 0J) 236 vpunpckldq ymmA, ymmA, ymmD ; ymmA=(00 10 20 01 11 21 02 12 02 12 22 03 13 23 04 14 237 ; 0G 1G 2G 0H 1H 2H 0I 1I 0I 1I 2I 0J 1J 2J 0K 1K) 238 vpunpckhdq ymmD, ymmD, ymmE ; ymmD=(15 25 06 16 26 07 17 27 17 27 08 18 28 09 19 29 239 ; 1L 2L 0M 1M 2M 0N 1N 2N 1N 2N 0O 1O 2O 0P 1P 2P) 240 vpunpckldq ymmE, ymmE, ymmH ; ymmE=(22 03 13 23 04 14 24 05 24 05 15 25 06 16 26 07 241 ; 2I 0J 1J 2J 0K 1K 2K 0L 2K 0L 1L 2L 0M 1M 2M 0N) 242 243 vpshufd ymmH, ymmG, 0x4E ; ymmH=(0C 1C 2C 0D 0E 1E 2E 0F 08 18 28 09 0A 1A 2A 0B 244 ; 0S 1S 2S 0T 0U 1U 2U 0V 0O 1O 2O 0P 0Q 1Q 2Q 0R) 245 vpunpckldq ymmG, ymmG, ymmC ; ymmG=(08 18 28 09 19 29 0A 1A 0A 1A 2A 0B 1B 2B 0C 1C 246 ; 0O 1O 2O 0P 1P 2P 0Q 1Q 0Q 1Q 2Q 0R 1R 2R 0S 1S) 247 vpunpckhdq ymmC, ymmC, ymmF ; ymmC=(1D 2D 0E 1E 2E 0F 1F 2F 1F 2F 0G 1G 2G 0H 1H 2H 248 ; 1T 2T 0U 1U 2U 0V 1V 2V 1V 2V -- -- -- -- -- --) 249 vpunpckldq ymmF, ymmF, ymmH ; ymmF=(2A 0B 1B 2B 0C 1C 2C 0D 2C 0D 1D 2D 0E 1E 2E 0F 250 ; 2Q 0R 1R 2R 0S 1S 2S 0T 2S 0T 1T 2T 0U 1U 2U 0V) 251 252 vpunpcklqdq ymmH, ymmA, ymmE ; ymmH=(00 10 20 01 11 21 02 12 22 03 13 23 04 14 24 05 253 ; 0G 1G 2G 0H 1H 2H 0I 1I 2I 0J 1J 2J 0K 1K 2K 0L) 254 vpunpcklqdq ymmG, ymmD, ymmG ; ymmG=(15 25 06 16 26 07 17 27 08 18 28 09 19 29 0A 1A 255 ; 1L 2L 0M 1M 2M 0N 1N 2N 0O 1O 2O 0P 1P 2P 0Q 1Q) 256 vpunpcklqdq ymmC, ymmF, ymmC ; ymmC=(2A 0B 1B 2B 0C 1C 2C 0D 1D 2D 0E 1E 2E 0F 1F 2F 257 ; 2Q 0R 1R 2R 0S 1S 2S 0T 1T 2T 0U 1U 2U 0V 1V 2V) 258 259 vperm2i128 ymmA, ymmH, ymmG, 0x20 ; ymmA=(00 10 20 01 11 21 02 12 22 03 13 23 04 14 24 05 260 ; 15 25 06 16 26 07 17 27 08 18 28 09 19 29 0A 1A) 261 vperm2i128 ymmD, ymmC, ymmH, 0x30 ; ymmD=(2A 0B 1B 2B 0C 1C 2C 0D 1D 2D 0E 1E 2E 0F 1F 2F 262 ; 0G 1G 2G 0H 1H 2H 0I 1I 2I 0J 1J 2J 0K 1K 2K 0L) 263 vperm2i128 ymmF, ymmG, ymmC, 0x31 ; ymmF=(1L 2L 0M 1M 2M 0N 1N 2N 0O 1O 2O 0P 1P 2P 0Q 1Q 264 ; 2Q 0R 1R 2R 0S 1S 2S 0T 1T 2T 0U 1U 2U 0V 1V 2V) 265 266 cmp rcx, byte SIZEOF_YMMWORD 267 jb short .column_st64 268 269 test rdi, SIZEOF_YMMWORD-1 270 jnz short .out1 271 ; --(aligned)------------------- 272 vmovntdq YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA 273 vmovntdq YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD 274 vmovntdq YMMWORD [rdi+2*SIZEOF_YMMWORD], ymmF 275 jmp short .out0 276 .out1: ; --(unaligned)----------------- 277 vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA 278 vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD 279 vmovdqu YMMWORD [rdi+2*SIZEOF_YMMWORD], ymmF 280 .out0: 281 add rdi, byte RGB_PIXELSIZE*SIZEOF_YMMWORD ; outptr 282 sub rcx, byte SIZEOF_YMMWORD 283 jz near .endcolumn 284 285 add rsi, byte SIZEOF_YMMWORD ; inptr0 286 dec al ; Yctr 287 jnz near .Yloop_2nd 288 289 add rbx, byte SIZEOF_YMMWORD ; inptr1 290 add rdx, byte SIZEOF_YMMWORD ; inptr2 291 jmp near .columnloop 292 293 .column_st64: 294 lea rcx, [rcx+rcx*2] ; imul ecx, RGB_PIXELSIZE 295 cmp rcx, byte 2*SIZEOF_YMMWORD 296 jb short .column_st32 297 vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA 298 vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD 299 add rdi, byte 2*SIZEOF_YMMWORD ; outptr 300 vmovdqa ymmA, ymmF 301 sub rcx, byte 2*SIZEOF_YMMWORD 302 jmp short .column_st31 303 .column_st32: 304 cmp rcx, byte SIZEOF_YMMWORD 305 jb short .column_st31 306 vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA 307 add rdi, byte SIZEOF_YMMWORD ; outptr 308 vmovdqa ymmA, ymmD 309 sub rcx, byte SIZEOF_YMMWORD 310 jmp short .column_st31 311 .column_st31: 312 cmp rcx, byte SIZEOF_XMMWORD 313 jb short .column_st15 314 vmovdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA 315 add rdi, byte SIZEOF_XMMWORD ; outptr 316 vperm2i128 ymmA, ymmA, ymmA, 1 317 sub rcx, byte SIZEOF_XMMWORD 318 .column_st15: 319 ; Store the lower 8 bytes of xmmA to the output when it has enough 320 ; space. 321 cmp rcx, byte SIZEOF_MMWORD 322 jb short .column_st7 323 vmovq XMM_MMWORD [rdi], xmmA 324 add rdi, byte SIZEOF_MMWORD 325 sub rcx, byte SIZEOF_MMWORD 326 vpsrldq xmmA, xmmA, SIZEOF_MMWORD 327 .column_st7: 328 ; Store the lower 4 bytes of xmmA to the output when it has enough 329 ; space. 330 cmp rcx, byte SIZEOF_DWORD 331 jb short .column_st3 332 vmovd XMM_DWORD [rdi], xmmA 333 add rdi, byte SIZEOF_DWORD 334 sub rcx, byte SIZEOF_DWORD 335 vpsrldq xmmA, xmmA, SIZEOF_DWORD 336 .column_st3: 337 ; Store the lower 2 bytes of rax to the output when it has enough 338 ; space. 339 vmovd eax, xmmA 340 cmp rcx, byte SIZEOF_WORD 341 jb short .column_st1 342 mov WORD [rdi], ax 343 add rdi, byte SIZEOF_WORD 344 sub rcx, byte SIZEOF_WORD 345 shr rax, 16 346 .column_st1: 347 ; Store the lower 1 byte of rax to the output when it has enough 348 ; space. 349 test rcx, rcx 350 jz short .endcolumn 351 mov BYTE [rdi], al 352 353 %else ; RGB_PIXELSIZE == 4 ; ----------- 354 355 %ifdef RGBX_FILLER_0XFF 356 vpcmpeqb ymm6, ymm6, ymm6 ; ymm6=XE=X(02468ACE********GIKMOQSU********) 357 vpcmpeqb ymm7, ymm7, ymm7 ; ymm7=XO=X(13579BDF********HJLNPRTV********) 358 %else 359 vpxor ymm6, ymm6, ymm6 ; ymm6=XE=X(02468ACE********GIKMOQSU********) 360 vpxor ymm7, ymm7, ymm7 ; ymm7=XO=X(13579BDF********HJLNPRTV********) 361 %endif 362 ; ymmA=(00 02 04 06 08 0A 0C 0E ** 0G 0I 0K 0M 0O 0Q 0S 0U **) 363 ; ymmB=(01 03 05 07 09 0B 0D 0F ** 0H 0J 0L 0N 0P 0R 0T 0V **) 364 ; ymmC=(10 12 14 16 18 1A 1C 1E ** 1G 1I 1K 1M 1O 1Q 1S 1U **) 365 ; ymmD=(11 13 15 17 19 1B 1D 1F ** 1H 1J 1L 1N 1P 1R 1T 1V **) 366 ; ymmE=(20 22 24 26 28 2A 2C 2E ** 2G 2I 2K 2M 2O 2Q 2S 2U **) 367 ; ymmF=(21 23 25 27 29 2B 2D 2F ** 2H 2J 2L 2N 2P 2R 2T 2V **) 368 ; ymmG=(30 32 34 36 38 3A 3C 3E ** 3G 3I 3K 3M 3O 3Q 3S 3U **) 369 ; ymmH=(31 33 35 37 39 3B 3D 3F ** 3H 3J 3L 3N 3P 3R 3T 3V **) 370 371 vpunpcklbw ymmA, ymmA, ymmC ; ymmA=(00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E 372 ; 0G 1G 0I 1I 0K 1K 0M 1M 0O 1O 0Q 1Q 0S 1S 0U 1U) 373 vpunpcklbw ymmE, ymmE, ymmG ; ymmE=(20 30 22 32 24 34 26 36 28 38 2A 3A 2C 3C 2E 3E 374 ; 2G 3G 2I 3I 2K 3K 2M 3M 2O 3O 2Q 3Q 2S 3S 2U 3U) 375 vpunpcklbw ymmB, ymmB, ymmD ; ymmB=(01 11 03 13 05 15 07 17 09 19 0B 1B 0D 1D 0F 1F 376 ; 0H 1H 0J 1J 0L 1L 0N 1N 0P 1P 0R 1R 0T 1T 0V 1V) 377 vpunpcklbw ymmF, ymmF, ymmH ; ymmF=(21 31 23 33 25 35 27 37 29 39 2B 3B 2D 3D 2F 3F 378 ; 2H 3H 2J 3J 2L 3L 2N 3N 2P 3P 2R 3R 2T 3T 2V 3V) 379 380 vpunpckhwd ymmC, ymmA, ymmE ; ymmC=(08 18 28 38 0A 1A 2A 3A 0C 1C 2C 3C 0E 1E 2E 3E 381 ; 0O 1O 2O 3O 0Q 1Q 2Q 3Q 0S 1S 2S 3S 0U 1U 2U 3U) 382 vpunpcklwd ymmA, ymmA, ymmE ; ymmA=(00 10 20 30 02 12 22 32 04 14 24 34 06 16 26 36 383 ; 0G 1G 2G 3G 0I 1I 2I 3I 0K 1K 2K 3K 0M 1M 2M 3M) 384 vpunpckhwd ymmG, ymmB, ymmF ; ymmG=(09 19 29 39 0B 1B 2B 3B 0D 1D 2D 3D 0F 1F 2F 3F 385 ; 0P 1P 2P 3P 0R 1R 2R 3R 0T 1T 2T 3T 0V 1V 2V 3V) 386 vpunpcklwd ymmB, ymmB, ymmF ; ymmB=(01 11 21 31 03 13 23 33 05 15 25 35 07 17 27 37 387 ; 0H 1H 2H 3H 0J 1J 2J 3J 0L 1L 2L 3L 0N 1N 2N 3N) 388 389 vpunpckhdq ymmE, ymmA, ymmB ; ymmE=(04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37 390 ; 0K 1K 2K 3K 0L 1L 2L 3L 0M 1M 2M 3M 0N 1N 2N 3N) 391 vpunpckldq ymmB, ymmA, ymmB ; ymmB=(00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33 392 ; 0G 1G 2G 3G 0H 1H 2H 3H 0I 1I 2I 3I 0J 1J 2J 3J) 393 vpunpckhdq ymmF, ymmC, ymmG ; ymmF=(0C 1C 2C 3C 0D 1D 2D 3D 0E 1E 2E 3E 0F 1F 2F 3F 394 ; 0S 1S 2S 3S 0T 1T 2T 3T 0U 1U 2U 3U 0V 1V 2V 3V) 395 vpunpckldq ymmG, ymmC, ymmG ; ymmG=(08 18 28 38 09 19 29 39 0A 1A 2A 3A 0B 1B 2B 3B 396 ; 0O 1O 2O 3O 0P 1P 2P 3P 0Q 1Q 2Q 3Q 0R 1R 2R 3R) 397 398 vperm2i128 ymmA, ymmB, ymmE, 0x20 ; ymmA=(00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33 399 ; 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37) 400 vperm2i128 ymmD, ymmG, ymmF, 0x20 ; ymmD=(08 18 28 38 09 19 29 39 0A 1A 2A 3A 0B 1B 2B 3B 401 ; 0C 1C 2C 3C 0D 1D 2D 3D 0E 1E 2E 3E 0F 1F 2F 3F) 402 vperm2i128 ymmC, ymmB, ymmE, 0x31 ; ymmC=(0G 1G 2G 3G 0H 1H 2H 3H 0I 1I 2I 3I 0J 1J 2J 3J 403 ; 0K 1K 2K 3K 0L 1L 2L 3L 0M 1M 2M 3M 0N 1N 2N 3N) 404 vperm2i128 ymmH, ymmG, ymmF, 0x31 ; ymmH=(0O 1O 2O 3O 0P 1P 2P 3P 0Q 1Q 2Q 3Q 0R 1R 2R 3R 405 ; 0S 1S 2S 3S 0T 1T 2T 3T 0U 1U 2U 3U 0V 1V 2V 3V) 406 407 cmp rcx, byte SIZEOF_YMMWORD 408 jb short .column_st64 409 410 test rdi, SIZEOF_YMMWORD-1 411 jnz short .out1 412 ; --(aligned)------------------- 413 vmovntdq YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA 414 vmovntdq YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD 415 vmovntdq YMMWORD [rdi+2*SIZEOF_YMMWORD], ymmC 416 vmovntdq YMMWORD [rdi+3*SIZEOF_YMMWORD], ymmH 417 jmp short .out0 418 .out1: ; --(unaligned)----------------- 419 vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA 420 vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD 421 vmovdqu YMMWORD [rdi+2*SIZEOF_YMMWORD], ymmC 422 vmovdqu YMMWORD [rdi+3*SIZEOF_YMMWORD], ymmH 423 .out0: 424 add rdi, RGB_PIXELSIZE*SIZEOF_YMMWORD ; outptr 425 sub rcx, byte SIZEOF_YMMWORD 426 jz near .endcolumn 427 428 add rsi, byte SIZEOF_YMMWORD ; inptr0 429 dec al 430 jnz near .Yloop_2nd 431 432 add rbx, byte SIZEOF_YMMWORD ; inptr1 433 add rdx, byte SIZEOF_YMMWORD ; inptr2 434 jmp near .columnloop 435 436 .column_st64: 437 cmp rcx, byte SIZEOF_YMMWORD/2 438 jb short .column_st32 439 vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA 440 vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymmD 441 add rdi, byte 2*SIZEOF_YMMWORD ; outptr 442 vmovdqa ymmA, ymmC 443 vmovdqa ymmD, ymmH 444 sub rcx, byte SIZEOF_YMMWORD/2 445 .column_st32: 446 cmp rcx, byte SIZEOF_YMMWORD/4 447 jb short .column_st16 448 vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymmA 449 add rdi, byte SIZEOF_YMMWORD ; outptr 450 vmovdqa ymmA, ymmD 451 sub rcx, byte SIZEOF_YMMWORD/4 452 .column_st16: 453 cmp rcx, byte SIZEOF_YMMWORD/8 454 jb short .column_st15 455 vmovdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA 456 add rdi, byte SIZEOF_XMMWORD ; outptr 457 vperm2i128 ymmA, ymmA, ymmA, 1 458 sub rcx, byte SIZEOF_YMMWORD/8 459 .column_st15: 460 ; Store two pixels (8 bytes) of ymmA to the output when it has enough 461 ; space. 462 cmp rcx, byte SIZEOF_YMMWORD/16 463 jb short .column_st7 464 vmovq MMWORD [rdi], xmmA 465 add rdi, byte SIZEOF_YMMWORD/16*4 466 sub rcx, byte SIZEOF_YMMWORD/16 467 vpsrldq xmmA, SIZEOF_YMMWORD/16*4 468 .column_st7: 469 ; Store one pixel (4 bytes) of ymmA to the output when it has enough 470 ; space. 471 test rcx, rcx 472 jz short .endcolumn 473 vmovd XMM_DWORD [rdi], xmmA 474 475 %endif ; RGB_PIXELSIZE ; --------------- 476 477 .endcolumn: 478 sfence ; flush the write buffer 479 480 .return: 481 pop rbx 482 vzeroupper 483 uncollect_args 4 484 mov rsp, rbp ; rsp <- aligned rbp 485 pop rsp ; rsp <- original rbp 486 pop rbp 487 ret 488 489 ; -------------------------------------------------------------------------- 490 ; 491 ; Upsample and color convert for the case of 2:1 horizontal and 2:1 vertical. 492 ; 493 ; GLOBAL(void) 494 ; jsimd_h2v2_merged_upsample_avx2(JDIMENSION output_width, 495 ; JSAMPIMAGE input_buf, 496 ; JDIMENSION in_row_group_ctr, 497 ; JSAMPARRAY output_buf); 498 ; 499 500 ; r10d = JDIMENSION output_width 501 ; r11 = JSAMPIMAGE input_buf 502 ; r12d = JDIMENSION in_row_group_ctr 503 ; r13 = JSAMPARRAY output_buf 504 505 align 32 506 GLOBAL_FUNCTION(jsimd_h2v2_merged_upsample_avx2) 507 508 EXTN(jsimd_h2v2_merged_upsample_avx2): 509 push rbp 510 mov rax, rsp 511 mov rbp, rsp 512 collect_args 4 513 push rbx 514 515 mov eax, r10d 516 517 mov rdi, r11 518 mov ecx, r12d 519 mov rsi, JSAMPARRAY [rdi+0*SIZEOF_JSAMPARRAY] 520 mov rbx, JSAMPARRAY [rdi+1*SIZEOF_JSAMPARRAY] 521 mov rdx, JSAMPARRAY [rdi+2*SIZEOF_JSAMPARRAY] 522 mov rdi, r13 523 lea rsi, [rsi+rcx*SIZEOF_JSAMPROW] 524 525 push rdx ; inptr2 526 push rbx ; inptr1 527 push rsi ; inptr00 528 mov rbx, rsp 529 530 push rdi 531 push rcx 532 push rax 533 534 %ifdef WIN64 535 mov r8, rcx 536 mov r9, rdi 537 mov rcx, rax 538 mov rdx, rbx 539 %else 540 mov rdx, rcx 541 mov rcx, rdi 542 mov rdi, rax 543 mov rsi, rbx 544 %endif 545 546 call EXTN(jsimd_h2v1_merged_upsample_avx2) 547 548 pop rax 549 pop rcx 550 pop rdi 551 pop rsi 552 pop rbx 553 pop rdx 554 555 add rdi, byte SIZEOF_JSAMPROW ; outptr1 556 add rsi, byte SIZEOF_JSAMPROW ; inptr01 557 558 push rdx ; inptr2 559 push rbx ; inptr1 560 push rsi ; inptr00 561 mov rbx, rsp 562 563 push rdi 564 push rcx 565 push rax 566 567 %ifdef WIN64 568 mov r8, rcx 569 mov r9, rdi 570 mov rcx, rax 571 mov rdx, rbx 572 %else 573 mov rdx, rcx 574 mov rcx, rdi 575 mov rdi, rax 576 mov rsi, rbx 577 %endif 578 579 call EXTN(jsimd_h2v1_merged_upsample_avx2) 580 581 pop rax 582 pop rcx 583 pop rdi 584 pop rsi 585 pop rbx 586 pop rdx 587 588 pop rbx 589 uncollect_args 4 590 pop rbp 591 ret 592 593 ; For some reason, the OS X linker does not honor the request to align the 594 ; segment unless we do this. 595 align 32 596