Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+vpclmulqdq | FileCheck %s
      3 ; FIXME: actual vpclmulqdq operation should be eliminated
      4 
      5 declare <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64>, <4 x i64>, i8) nounwind readnone
      6 
      7 define <4 x i64> @commute_v1(<4 x i64> %a0, <4 x i64> %a1) {
      8 ; CHECK-LABEL: commute_v1:
      9 ; CHECK:       # %bb.0:
     10 ; CHECK-NEXT:    vpclmulqdq $0, %ymm1, %ymm0, %ymm0
     11 ; CHECK-NEXT:    vxorps %ymm0, %ymm0, %ymm0
     12 ; CHECK-NEXT:    retq
     13   %1 = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a0, <4 x i64> %a1, i8 0)
     14   %2 = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a1, <4 x i64> %a0, i8 0)
     15   %3 = xor <4 x i64> %1, %2
     16   ret <4 x i64> %3
     17 }
     18 
     19 define <4 x i64> @commute_v2(<4 x i64> %a0, <4 x i64> %a1) {
     20 ; CHECK-LABEL: commute_v2:
     21 ; CHECK:       # %bb.0:
     22 ; CHECK-NEXT:    vpclmulqdq $16, %ymm1, %ymm0, %ymm0
     23 ; CHECK-NEXT:    vxorps %ymm0, %ymm0, %ymm0
     24 ; CHECK-NEXT:    retq
     25   %1 = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a0, <4 x i64> %a1, i8 16)
     26   %2 = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a1, <4 x i64> %a0, i8 1)
     27   %3 = xor <4 x i64> %2, %1
     28   ret <4 x i64> %3
     29 }
     30 
     31 define <4 x i64> @commute_v3(<4 x i64> %a0, <4 x i64> %a1) {
     32 ; CHECK-LABEL: commute_v3:
     33 ; CHECK:       # %bb.0:
     34 ; CHECK-NEXT:    vpclmulqdq $17, %ymm1, %ymm0, %ymm0
     35 ; CHECK-NEXT:    vxorps %ymm0, %ymm0, %ymm0
     36 ; CHECK-NEXT:    retq
     37   %1 = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a0, <4 x i64> %a1, i8 17)
     38   %2 = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a1, <4 x i64> %a0, i8 17)
     39   %3 = xor <4 x i64> %2, %1
     40   ret <4 x i64> %3
     41 }
     42 
     43