Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown -mattr=+sse2,+pclmul | FileCheck %s --check-prefix=SSE
      3 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown -mattr=+avx2,+pclmul | FileCheck %s --check-prefix=AVX
      4 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown -mattr=+avx512vl,+vpclmulqdq | FileCheck %s --check-prefix=AVX
      5 
      6 declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnone
      7 
      8 define <2 x i64> @commute_lq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
      9 ; SSE-LABEL: commute_lq_lq:
     10 ; SSE:       # %bb.0:
     11 ; SSE-NEXT:    pclmulqdq $0, (%rdi), %xmm0
     12 ; SSE-NEXT:    retq
     13 ;
     14 ; AVX-LABEL: commute_lq_lq:
     15 ; AVX:       # %bb.0:
     16 ; AVX-NEXT:    vpclmulqdq $0, (%rdi), %xmm0, %xmm0
     17 ; AVX-NEXT:    retq
     18   %1 = load <2 x i64>, <2 x i64>* %a0
     19   %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 0)
     20   ret <2 x i64> %2
     21 }
     22 
     23 define <2 x i64> @commute_lq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
     24 ; SSE-LABEL: commute_lq_hq:
     25 ; SSE:       # %bb.0:
     26 ; SSE-NEXT:    pclmulqdq $1, (%rdi), %xmm0
     27 ; SSE-NEXT:    retq
     28 ;
     29 ; AVX-LABEL: commute_lq_hq:
     30 ; AVX:       # %bb.0:
     31 ; AVX-NEXT:    vpclmulqdq $1, (%rdi), %xmm0, %xmm0
     32 ; AVX-NEXT:    retq
     33   %1 = load <2 x i64>, <2 x i64>* %a0
     34   %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 16)
     35   ret <2 x i64> %2
     36 }
     37 
     38 define <2 x i64> @commute_hq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
     39 ; SSE-LABEL: commute_hq_lq:
     40 ; SSE:       # %bb.0:
     41 ; SSE-NEXT:    pclmulqdq $16, (%rdi), %xmm0
     42 ; SSE-NEXT:    retq
     43 ;
     44 ; AVX-LABEL: commute_hq_lq:
     45 ; AVX:       # %bb.0:
     46 ; AVX-NEXT:    vpclmulqdq $16, (%rdi), %xmm0, %xmm0
     47 ; AVX-NEXT:    retq
     48   %1 = load <2 x i64>, <2 x i64>* %a0
     49   %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 1)
     50   ret <2 x i64> %2
     51 }
     52 
     53 define <2 x i64> @commute_hq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
     54 ; SSE-LABEL: commute_hq_hq:
     55 ; SSE:       # %bb.0:
     56 ; SSE-NEXT:    pclmulqdq $17, (%rdi), %xmm0
     57 ; SSE-NEXT:    retq
     58 ;
     59 ; AVX-LABEL: commute_hq_hq:
     60 ; AVX:       # %bb.0:
     61 ; AVX-NEXT:    vpclmulqdq $17, (%rdi), %xmm0, %xmm0
     62 ; AVX-NEXT:    retq
     63   %1 = load <2 x i64>, <2 x i64>* %a0
     64   %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 17)
     65   ret <2 x i64> %2
     66 }
     67