1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 4 5 define <4 x i64> @testv4i64(<4 x i64> %in) nounwind { 6 ; AVX1-LABEL: testv4i64: 7 ; AVX1: # BB#0: 8 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 9 ; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 10 ; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3 11 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 12 ; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3 13 ; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1 14 ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 15 ; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1 16 ; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 17 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 18 ; AVX1-NEXT: vpsadbw %xmm3, %xmm1, %xmm1 19 ; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm5 20 ; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5 21 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 22 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 23 ; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm0 24 ; AVX1-NEXT: vpaddb %xmm5, %xmm0, %xmm0 25 ; AVX1-NEXT: vpsadbw %xmm3, %xmm0, %xmm0 26 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 27 ; AVX1-NEXT: retq 28 ; 29 ; AVX2-LABEL: testv4i64: 30 ; AVX2: # BB#0: 31 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 32 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2 33 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 34 ; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2 35 ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0 36 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 37 ; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0 38 ; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0 39 ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 40 ; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 41 ; AVX2-NEXT: retq 42 %out = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %in) 43 ret <4 x i64> %out 44 } 45 46 define <8 x i32> @testv8i32(<8 x i32> %in) nounwind { 47 ; AVX1-LABEL: testv8i32: 48 ; AVX1: # BB#0: 49 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 50 ; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 51 ; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3 52 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 53 ; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3 54 ; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1 55 ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 56 ; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1 57 ; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 58 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 59 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] 60 ; AVX1-NEXT: vpsadbw %xmm3, %xmm5, %xmm5 61 ; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] 62 ; AVX1-NEXT: vpsadbw %xmm3, %xmm1, %xmm1 63 ; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1 64 ; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm5 65 ; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5 66 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 67 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 68 ; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm0 69 ; AVX1-NEXT: vpaddb %xmm5, %xmm0, %xmm0 70 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm3[2],xmm0[3],xmm3[3] 71 ; AVX1-NEXT: vpsadbw %xmm3, %xmm2, %xmm2 72 ; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] 73 ; AVX1-NEXT: vpsadbw %xmm3, %xmm0, %xmm0 74 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 75 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 76 ; AVX1-NEXT: retq 77 ; 78 ; AVX2-LABEL: testv8i32: 79 ; AVX2: # BB#0: 80 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 81 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2 82 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 83 ; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2 84 ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0 85 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 86 ; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0 87 ; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0 88 ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 89 ; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] 90 ; AVX2-NEXT: vpsadbw %ymm1, %ymm2, %ymm2 91 ; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] 92 ; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 93 ; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 94 ; AVX2-NEXT: retq 95 %out = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %in) 96 ret <8 x i32> %out 97 } 98 99 define <16 x i16> @testv16i16(<16 x i16> %in) nounwind { 100 ; AVX1-LABEL: testv16i16: 101 ; AVX1: # BB#0: 102 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 103 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2 104 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 105 ; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2 106 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4 107 ; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm4 108 ; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm4 109 ; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2 110 ; AVX1-NEXT: vpsllw $8, %xmm2, %xmm4 111 ; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2 112 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 113 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 114 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm4 115 ; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm4 116 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 117 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 118 ; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0 119 ; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0 120 ; AVX1-NEXT: vpsllw $8, %xmm0, %xmm1 121 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 122 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 123 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 124 ; AVX1-NEXT: retq 125 ; 126 ; AVX2-LABEL: testv16i16: 127 ; AVX2: # BB#0: 128 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 129 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2 130 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 131 ; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2 132 ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0 133 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 134 ; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0 135 ; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0 136 ; AVX2-NEXT: vpsllw $8, %ymm0, %ymm1 137 ; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 138 ; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 139 ; AVX2-NEXT: retq 140 %out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %in) 141 ret <16 x i16> %out 142 } 143 144 define <32 x i8> @testv32i8(<32 x i8> %in) nounwind { 145 ; AVX1-LABEL: testv32i8: 146 ; AVX1: # BB#0: 147 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 148 ; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 149 ; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3 150 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 151 ; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3 152 ; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1 153 ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 154 ; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1 155 ; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 156 ; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3 157 ; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3 158 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 159 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 160 ; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm0 161 ; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0 162 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 163 ; AVX1-NEXT: retq 164 ; 165 ; AVX2-LABEL: testv32i8: 166 ; AVX2: # BB#0: 167 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 168 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2 169 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] 170 ; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2 171 ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0 172 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 173 ; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0 174 ; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0 175 ; AVX2-NEXT: retq 176 %out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %in) 177 ret <32 x i8> %out 178 } 179 180 define <4 x i64> @foldv4i64() nounwind { 181 ; ALL-LABEL: foldv4i64: 182 ; ALL: # BB#0: 183 ; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [1,64,0,8] 184 ; ALL-NEXT: retq 185 %out = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>) 186 ret <4 x i64> %out 187 } 188 189 define <8 x i32> @foldv8i32() nounwind { 190 ; ALL-LABEL: foldv8i32: 191 ; ALL: # BB#0: 192 ; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [1,32,0,8,16,3,2,3] 193 ; ALL-NEXT: retq 194 %out = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>) 195 ret <8 x i32> %out 196 } 197 198 define <16 x i16> @foldv16i16() nounwind { 199 ; ALL-LABEL: foldv16i16: 200 ; ALL: # BB#0: 201 ; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [1,16,0,8,0,3,2,3,15,7,1,1,1,1,1,1] 202 ; ALL-NEXT: retq 203 %out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>) 204 ret <16 x i16> %out 205 } 206 207 define <32 x i8> @foldv32i8() nounwind { 208 ; ALL-LABEL: foldv32i8: 209 ; ALL: # BB#0: 210 ; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1,1,1,0,0,1,2,3,4,5,6,7,8,2,2,3,7] 211 ; ALL-NEXT: retq 212 %out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>) 213 ret <32 x i8> %out 214 } 215 216 declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) 217 declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) 218 declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>) 219 declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>) 220