Home | History | Annotate | Download | only in X86
      1 ; Use CPU parameters to ensure that a CPU-specific attribute is not overriding the AVX definition.
      2 
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown                  -mattr=+avx | FileCheck %s
      4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx             | FileCheck %s
      5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2                 | FileCheck %s
      6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown                  -mattr=-avx | FileCheck %s --check-prefix=SSE
      7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=-avx | FileCheck %s --check-prefix=SSE
      8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2     -mattr=-avx | FileCheck %s --check-prefix=SSE
      9 
     10 ; No need to load unaligned operand from memory using an explicit instruction with AVX.
     11 ; The operand should be folded into the AND instr.
     12 
     13 ; With SSE, folding memory operands into math/logic ops requires 16-byte alignment
     14 ; unless specially configured on some CPUs such as AMD Family 10H.
     15 
     16 define <4 x i32> @test1(<4 x i32>* %p0, <4 x i32> %in1) nounwind {
     17   %in0 = load <4 x i32>, <4 x i32>* %p0, align 2
     18   %a = and <4 x i32> %in0, %in1
     19   ret <4 x i32> %a
     20 
     21 ; CHECK-LABEL: @test1
     22 ; CHECK-NOT:   vmovups
     23 ; CHECK:       vandps (%rdi), %xmm0, %xmm0
     24 ; CHECK-NEXT:  ret
     25 
     26 ; SSE-LABEL: @test1
     27 ; SSE:       movups (%rdi), %xmm1
     28 ; SSE-NEXT:  andps %xmm1, %xmm0
     29 ; SSE-NEXT:  ret
     30 }
     31 
     32