1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+ssse3 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE 3 ; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1 4 ; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512 5 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+ssse3 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE 6 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1 7 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512 8 9 define <16 x i8> @test_x86_ssse3_pabs_b_128(<16 x i8> %a0) { 10 ; SSE-LABEL: test_x86_ssse3_pabs_b_128: 11 ; SSE: ## %bb.0: 12 ; SSE-NEXT: pabsb %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x1c,0xc0] 13 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 14 ; 15 ; AVX1-LABEL: test_x86_ssse3_pabs_b_128: 16 ; AVX1: ## %bb.0: 17 ; AVX1-NEXT: vpabsb %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1c,0xc0] 18 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 19 ; 20 ; AVX512-LABEL: test_x86_ssse3_pabs_b_128: 21 ; AVX512: ## %bb.0: 22 ; AVX512-NEXT: vpabsb %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1c,0xc0] 23 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 24 %res = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0) ; <<16 x i8>> [#uses=1] 25 ret <16 x i8> %res 26 } 27 declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone 28 29 30 define <4 x i32> @test_x86_ssse3_pabs_d_128(<4 x i32> %a0) { 31 ; SSE-LABEL: test_x86_ssse3_pabs_d_128: 32 ; SSE: ## %bb.0: 33 ; SSE-NEXT: pabsd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x1e,0xc0] 34 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 35 ; 36 ; AVX1-LABEL: test_x86_ssse3_pabs_d_128: 37 ; AVX1: ## %bb.0: 38 ; AVX1-NEXT: vpabsd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1e,0xc0] 39 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 40 ; 41 ; AVX512-LABEL: test_x86_ssse3_pabs_d_128: 42 ; AVX512: ## %bb.0: 43 ; AVX512-NEXT: vpabsd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0] 44 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 45 %res = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0) ; <<4 x i32>> [#uses=1] 46 ret <4 x i32> %res 47 } 48 declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone 49 50 51 define <8 x i16> @test_x86_ssse3_pabs_w_128(<8 x i16> %a0) { 52 ; SSE-LABEL: test_x86_ssse3_pabs_w_128: 53 ; SSE: ## %bb.0: 54 ; SSE-NEXT: pabsw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x1d,0xc0] 55 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 56 ; 57 ; AVX1-LABEL: test_x86_ssse3_pabs_w_128: 58 ; AVX1: ## %bb.0: 59 ; AVX1-NEXT: vpabsw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1d,0xc0] 60 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 61 ; 62 ; AVX512-LABEL: test_x86_ssse3_pabs_w_128: 63 ; AVX512: ## %bb.0: 64 ; AVX512-NEXT: vpabsw %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1d,0xc0] 65 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 66 %res = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0) ; <<8 x i16>> [#uses=1] 67 ret <8 x i16> %res 68 } 69 declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone 70 71 72 define <4 x i32> @test_x86_ssse3_phadd_d_128(<4 x i32> %a0, <4 x i32> %a1) { 73 ; SSE-LABEL: test_x86_ssse3_phadd_d_128: 74 ; SSE: ## %bb.0: 75 ; SSE-NEXT: phaddd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x02,0xc1] 76 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 77 ; 78 ; AVX-LABEL: test_x86_ssse3_phadd_d_128: 79 ; AVX: ## %bb.0: 80 ; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x02,0xc1] 81 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 82 %res = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1] 83 ret <4 x i32> %res 84 } 85 declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) nounwind readnone 86 87 88 define <8 x i16> @test_x86_ssse3_phadd_sw_128(<8 x i16> %a0, <8 x i16> %a1) { 89 ; SSE-LABEL: test_x86_ssse3_phadd_sw_128: 90 ; SSE: ## %bb.0: 91 ; SSE-NEXT: phaddsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x03,0xc1] 92 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 93 ; 94 ; AVX-LABEL: test_x86_ssse3_phadd_sw_128: 95 ; AVX: ## %bb.0: 96 ; AVX-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x03,0xc1] 97 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 98 %res = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] 99 ret <8 x i16> %res 100 } 101 declare <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16>, <8 x i16>) nounwind readnone 102 103 104 define <8 x i16> @test_x86_ssse3_phadd_w_128(<8 x i16> %a0, <8 x i16> %a1) { 105 ; SSE-LABEL: test_x86_ssse3_phadd_w_128: 106 ; SSE: ## %bb.0: 107 ; SSE-NEXT: phaddw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x01,0xc1] 108 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 109 ; 110 ; AVX-LABEL: test_x86_ssse3_phadd_w_128: 111 ; AVX: ## %bb.0: 112 ; AVX-NEXT: vphaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x01,0xc1] 113 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 114 %res = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] 115 ret <8 x i16> %res 116 } 117 declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) nounwind readnone 118 119 120 define <4 x i32> @test_x86_ssse3_phsub_d_128(<4 x i32> %a0, <4 x i32> %a1) { 121 ; SSE-LABEL: test_x86_ssse3_phsub_d_128: 122 ; SSE: ## %bb.0: 123 ; SSE-NEXT: phsubd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x06,0xc1] 124 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 125 ; 126 ; AVX-LABEL: test_x86_ssse3_phsub_d_128: 127 ; AVX: ## %bb.0: 128 ; AVX-NEXT: vphsubd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x06,0xc1] 129 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 130 %res = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1] 131 ret <4 x i32> %res 132 } 133 declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) nounwind readnone 134 135 136 define <8 x i16> @test_x86_ssse3_phsub_sw_128(<8 x i16> %a0, <8 x i16> %a1) { 137 ; SSE-LABEL: test_x86_ssse3_phsub_sw_128: 138 ; SSE: ## %bb.0: 139 ; SSE-NEXT: phsubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x07,0xc1] 140 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 141 ; 142 ; AVX-LABEL: test_x86_ssse3_phsub_sw_128: 143 ; AVX: ## %bb.0: 144 ; AVX-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x07,0xc1] 145 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 146 %res = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] 147 ret <8 x i16> %res 148 } 149 declare <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16>, <8 x i16>) nounwind readnone 150 151 152 define <8 x i16> @test_x86_ssse3_phsub_w_128(<8 x i16> %a0, <8 x i16> %a1) { 153 ; SSE-LABEL: test_x86_ssse3_phsub_w_128: 154 ; SSE: ## %bb.0: 155 ; SSE-NEXT: phsubw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x05,0xc1] 156 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 157 ; 158 ; AVX-LABEL: test_x86_ssse3_phsub_w_128: 159 ; AVX: ## %bb.0: 160 ; AVX-NEXT: vphsubw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x05,0xc1] 161 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 162 %res = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] 163 ret <8 x i16> %res 164 } 165 declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) nounwind readnone 166 167 168 define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128(<16 x i8> %a0, <16 x i8> %a1) { 169 ; SSE-LABEL: test_x86_ssse3_pmadd_ub_sw_128: 170 ; SSE: ## %bb.0: 171 ; SSE-NEXT: pmaddubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x04,0xc1] 172 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 173 ; 174 ; AVX1-LABEL: test_x86_ssse3_pmadd_ub_sw_128: 175 ; AVX1: ## %bb.0: 176 ; AVX1-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x04,0xc1] 177 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 178 ; 179 ; AVX512-LABEL: test_x86_ssse3_pmadd_ub_sw_128: 180 ; AVX512: ## %bb.0: 181 ; AVX512-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x04,0xc1] 182 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 183 %res = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1) ; <<8 x i16>> [#uses=1] 184 ret <8 x i16> %res 185 } 186 declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone 187 188 189 ; Make sure we don't commute this operation. 190 define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128_load_op0(<16 x i8>* %ptr, <16 x i8> %a1) { 191 ; X86-SSE-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0: 192 ; X86-SSE: ## %bb.0: 193 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] 194 ; X86-SSE-NEXT: movdqa (%eax), %xmm1 ## encoding: [0x66,0x0f,0x6f,0x08] 195 ; X86-SSE-NEXT: pmaddubsw %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x38,0x04,0xc8] 196 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc1] 197 ; X86-SSE-NEXT: retl ## encoding: [0xc3] 198 ; 199 ; X86-AVX1-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0: 200 ; X86-AVX1: ## %bb.0: 201 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] 202 ; X86-AVX1-NEXT: vmovdqa (%eax), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x08] 203 ; X86-AVX1-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0x04,0xc0] 204 ; X86-AVX1-NEXT: retl ## encoding: [0xc3] 205 ; 206 ; X86-AVX512-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0: 207 ; X86-AVX512: ## %bb.0: 208 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] 209 ; X86-AVX512-NEXT: vmovdqa (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x08] 210 ; X86-AVX512-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x04,0xc0] 211 ; X86-AVX512-NEXT: retl ## encoding: [0xc3] 212 ; 213 ; X64-SSE-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0: 214 ; X64-SSE: ## %bb.0: 215 ; X64-SSE-NEXT: movdqa (%rdi), %xmm1 ## encoding: [0x66,0x0f,0x6f,0x0f] 216 ; X64-SSE-NEXT: pmaddubsw %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x38,0x04,0xc8] 217 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc1] 218 ; X64-SSE-NEXT: retq ## encoding: [0xc3] 219 ; 220 ; X64-AVX1-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0: 221 ; X64-AVX1: ## %bb.0: 222 ; X64-AVX1-NEXT: vmovdqa (%rdi), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x0f] 223 ; X64-AVX1-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0x04,0xc0] 224 ; X64-AVX1-NEXT: retq ## encoding: [0xc3] 225 ; 226 ; X64-AVX512-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0: 227 ; X64-AVX512: ## %bb.0: 228 ; X64-AVX512-NEXT: vmovdqa (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x0f] 229 ; X64-AVX512-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x04,0xc0] 230 ; X64-AVX512-NEXT: retq ## encoding: [0xc3] 231 %a0 = load <16 x i8>, <16 x i8>* %ptr 232 %res = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1) ; <<8 x i16>> [#uses=1] 233 ret <8 x i16> %res 234 } 235 236 237 define <8 x i16> @test_x86_ssse3_pmul_hr_sw_128(<8 x i16> %a0, <8 x i16> %a1) { 238 ; SSE-LABEL: test_x86_ssse3_pmul_hr_sw_128: 239 ; SSE: ## %bb.0: 240 ; SSE-NEXT: pmulhrsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x0b,0xc1] 241 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 242 ; 243 ; AVX1-LABEL: test_x86_ssse3_pmul_hr_sw_128: 244 ; AVX1: ## %bb.0: 245 ; AVX1-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0b,0xc1] 246 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 247 ; 248 ; AVX512-LABEL: test_x86_ssse3_pmul_hr_sw_128: 249 ; AVX512: ## %bb.0: 250 ; AVX512-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0b,0xc1] 251 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 252 %res = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] 253 ret <8 x i16> %res 254 } 255 declare <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16>, <8 x i16>) nounwind readnone 256 257 258 define <16 x i8> @test_x86_ssse3_pshuf_b_128(<16 x i8> %a0, <16 x i8> %a1) { 259 ; SSE-LABEL: test_x86_ssse3_pshuf_b_128: 260 ; SSE: ## %bb.0: 261 ; SSE-NEXT: pshufb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x00,0xc1] 262 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 263 ; 264 ; AVX1-LABEL: test_x86_ssse3_pshuf_b_128: 265 ; AVX1: ## %bb.0: 266 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x00,0xc1] 267 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 268 ; 269 ; AVX512-LABEL: test_x86_ssse3_pshuf_b_128: 270 ; AVX512: ## %bb.0: 271 ; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x00,0xc1] 272 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 273 %res = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1] 274 ret <16 x i8> %res 275 } 276 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind readnone 277 278 279 define <16 x i8> @test_x86_ssse3_psign_b_128(<16 x i8> %a0, <16 x i8> %a1) { 280 ; SSE-LABEL: test_x86_ssse3_psign_b_128: 281 ; SSE: ## %bb.0: 282 ; SSE-NEXT: psignb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x08,0xc1] 283 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 284 ; 285 ; AVX-LABEL: test_x86_ssse3_psign_b_128: 286 ; AVX: ## %bb.0: 287 ; AVX-NEXT: vpsignb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x08,0xc1] 288 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 289 %res = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1] 290 ret <16 x i8> %res 291 } 292 declare <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8>, <16 x i8>) nounwind readnone 293 294 295 define <4 x i32> @test_x86_ssse3_psign_d_128(<4 x i32> %a0, <4 x i32> %a1) { 296 ; SSE-LABEL: test_x86_ssse3_psign_d_128: 297 ; SSE: ## %bb.0: 298 ; SSE-NEXT: psignd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x0a,0xc1] 299 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 300 ; 301 ; AVX-LABEL: test_x86_ssse3_psign_d_128: 302 ; AVX: ## %bb.0: 303 ; AVX-NEXT: vpsignd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0a,0xc1] 304 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 305 %res = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1] 306 ret <4 x i32> %res 307 } 308 declare <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32>, <4 x i32>) nounwind readnone 309 310 311 define <8 x i16> @test_x86_ssse3_psign_w_128(<8 x i16> %a0, <8 x i16> %a1) { 312 ; SSE-LABEL: test_x86_ssse3_psign_w_128: 313 ; SSE: ## %bb.0: 314 ; SSE-NEXT: psignw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x09,0xc1] 315 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 316 ; 317 ; AVX-LABEL: test_x86_ssse3_psign_w_128: 318 ; AVX: ## %bb.0: 319 ; AVX-NEXT: vpsignw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x09,0xc1] 320 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] 321 %res = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] 322 ret <8 x i16> %res 323 } 324 declare <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16>, <8 x i16>) nounwind readnone 325