Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE
      2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=ALL --check-prefix=AVX
      3 ;
      4 ; Verify that fast-isel doesn't select legacy SSE instructions on targets that
      5 ; feature AVX.
      6 ;
      7 ; Test cases are obtained from the following code snippet:
      8 ; ///
      9 ; double single_to_double_rr(float x) {
     10 ;   return (double)x;
     11 ; }
     12 ; float double_to_single_rr(double x) {
     13 ;   return (float)x;
     14 ; }
     15 ; double single_to_double_rm(float *x) {
     16 ;   return (double)*x;
     17 ; }
     18 ; float double_to_single_rm(double *x) {
     19 ;   return (float)*x;
     20 ; }
     21 ; ///
     22 
     23 define double @single_to_double_rr(float %x) {
     24 ; ALL-LABEL: single_to_double_rr:
     25 ; SSE-NOT: vcvtss2sd
     26 ; AVX: vcvtss2sd %xmm0, %xmm0, %xmm0
     27 ; ALL: ret
     28 entry:
     29   %conv = fpext float %x to double
     30   ret double %conv
     31 }
     32 
     33 define float @double_to_single_rr(double %x) {
     34 ; ALL-LABEL: double_to_single_rr:
     35 ; SSE-NOT: vcvtsd2ss
     36 ; AVX: vcvtsd2ss %xmm0, %xmm0, %xmm0
     37 ; ALL: ret
     38 entry:
     39   %conv = fptrunc double %x to float
     40   ret float %conv
     41 }
     42 
     43 define double @single_to_double_rm(float* %x) {
     44 ; ALL-LABEL: single_to_double_rm:
     45 ; SSE: cvtss2sd (%rdi), %xmm0
     46 ; AVX: vmovss (%rdi), %xmm0
     47 ; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
     48 ; ALL-NEXT: ret
     49 entry:
     50   %0 = load float, float* %x, align 4
     51   %conv = fpext float %0 to double
     52   ret double %conv
     53 }
     54 
     55 define float @double_to_single_rm(double* %x) {
     56 ; ALL-LABEL: double_to_single_rm:
     57 ; SSE: cvtsd2ss (%rdi), %xmm0
     58 ; AVX: vmovsd (%rdi), %xmm0
     59 ; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
     60 ; ALL-NEXT: ret
     61 entry:
     62   %0 = load double, double* %x, align 8
     63   %conv = fptrunc double %0 to float
     64   ret float %conv
     65 }
     66