Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64
      4 
      5 %v4_varying_complex = type { <4 x float>, <4 x float> }
      6 
      7 define void @FFT(%v4_varying_complex* noalias nocapture %destination, float* noalias %re, <4 x i32>* noalias nocapture %ptr_cast_for_load) nounwind {
      8 ; X86-LABEL: FFT:
      9 ; X86:       # %bb.0: # %begin
     10 ; X86-NEXT:    pushl %ebx
     11 ; X86-NEXT:    pushl %edi
     12 ; X86-NEXT:    pushl %esi
     13 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
     14 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     15 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
     16 ; X86-NEXT:    movdqu (%edx), %xmm0
     17 ; X86-NEXT:    pslld $4, %xmm0
     18 ; X86-NEXT:    movd %xmm0, %edx
     19 ; X86-NEXT:    pextrd $1, %xmm0, %esi
     20 ; X86-NEXT:    pextrd $2, %xmm0, %edi
     21 ; X86-NEXT:    pextrd $3, %xmm0, %ebx
     22 ; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
     23 ; X86-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
     24 ; X86-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
     25 ; X86-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
     26 ; X86-NEXT:    movss %xmm0, 128(%eax)
     27 ; X86-NEXT:    movss %xmm1, 164(%eax)
     28 ; X86-NEXT:    movss %xmm2, 200(%eax)
     29 ; X86-NEXT:    movss %xmm3, 236(%eax)
     30 ; X86-NEXT:    popl %esi
     31 ; X86-NEXT:    popl %edi
     32 ; X86-NEXT:    popl %ebx
     33 ; X86-NEXT:    retl
     34 ;
     35 ; X64-LABEL: FFT:
     36 ; X64:       # %bb.0: # %begin
     37 ; X64-NEXT:    movdqu (%rdx), %xmm0
     38 ; X64-NEXT:    pslld $4, %xmm0
     39 ; X64-NEXT:    movd %xmm0, %eax
     40 ; X64-NEXT:    movslq %eax, %r8
     41 ; X64-NEXT:    pextrd $1, %xmm0, %ecx
     42 ; X64-NEXT:    movslq %ecx, %rcx
     43 ; X64-NEXT:    pextrd $2, %xmm0, %edx
     44 ; X64-NEXT:    movslq %edx, %rdx
     45 ; X64-NEXT:    pextrd $3, %xmm0, %eax
     46 ; X64-NEXT:    cltq
     47 ; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
     48 ; X64-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
     49 ; X64-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
     50 ; X64-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
     51 ; X64-NEXT:    movss %xmm0, 128(%rdi)
     52 ; X64-NEXT:    movss %xmm1, 164(%rdi)
     53 ; X64-NEXT:    movss %xmm2, 200(%rdi)
     54 ; X64-NEXT:    movss %xmm3, 236(%rdi)
     55 ; X64-NEXT:    retq
     56 begin:
     57   %ptr_masked_load79 = load <4 x i32>, <4 x i32>* %ptr_cast_for_load, align 4
     58   %mul__bitReversedProgramIndex_load = shl <4 x i32> %ptr_masked_load79, <i32 4, i32 4, i32 4, i32 4>
     59 
     60   %offset32_1 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 0
     61   %ptroffset_1 = sext i32 %offset32_1 to i64
     62   %offset32_2 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 1
     63   %ptroffset_2 = sext i32 %offset32_2 to i64
     64   %offset32_3 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 2
     65   %ptroffset_3 = sext i32 %offset32_3 to i64
     66   %offset32_4 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 3
     67   %ptroffset_4 = sext i32 %offset32_4 to i64
     68 
     69   %ptrcast_1 = getelementptr float, float* %re, i64 %ptroffset_1
     70   %val_1 = load float, float* %ptrcast_1, align 4
     71   %ptrcast_2 = getelementptr float, float* %re, i64 %ptroffset_2
     72   %val_2 = load float, float* %ptrcast_2, align 4
     73   %ptrcast_3 = getelementptr float, float* %re, i64 %ptroffset_3
     74   %val_3 = load float, float* %ptrcast_3, align 4
     75   %ptrcast_4 = getelementptr float, float* %re, i64 %ptroffset_4
     76   %val_4 = load float, float* %ptrcast_4, align 4
     77 
     78   %destination_load_ptr2int_2void = bitcast %v4_varying_complex* %destination to i8*
     79   %ptrcast1_1 = getelementptr inbounds %v4_varying_complex, %v4_varying_complex* %destination, i64 4, i32 0, i64 0
     80   store float %val_1, float* %ptrcast1_1, align 4
     81   %finalptr_2 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 164
     82   %ptrcast1_2 = bitcast i8* %finalptr_2 to float*
     83   store float %val_2, float* %ptrcast1_2, align 4
     84   %finalptr_3 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 200
     85   %ptrcast1_3 = bitcast i8* %finalptr_3 to float*
     86   store float %val_3, float* %ptrcast1_3, align 4
     87   %finalptr_4 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 236
     88   %ptrcast1_4 = bitcast i8* %finalptr_4 to float*
     89   store float %val_4, float* %ptrcast1_4, align 4
     90   ret void
     91 }
     92