Home | History | Annotate | Download | only in GlobalISel
      1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
      2 # RUN: llc -mtriple=x86_64-linux-gnu                                  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
      3 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                      -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
      4 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
      5 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
      6 --- |
      7 
      8   define float @test_fsub_float(float %arg1, float %arg2) {
      9     %ret = fsub float %arg1, %arg2
     10     ret float %ret
     11   }
     12 
     13   define double @test_fsub_double(double %arg1, double %arg2) {
     14     %ret = fsub double %arg1, %arg2
     15     ret double %ret
     16   }
     17 
     18 ...
     19 ---
     20 name:            test_fsub_float
     21 alignment:       4
     22 legalized:       true
     23 regBankSelected: true
     24 #
     25 registers:
     26   - { id: 0, class: vecr, preferred-register: '' }
     27   - { id: 1, class: vecr, preferred-register: '' }
     28   - { id: 2, class: vecr, preferred-register: '' }
     29   - { id: 3, class: vecr, preferred-register: '' }
     30   - { id: 4, class: vecr, preferred-register: '' }
     31   - { id: 5, class: vecr, preferred-register: '' }
     32 liveins:
     33 fixedStack:
     34 stack:
     35 constants:
     36 #
     37 #
     38 body:             |
     39   bb.1 (%ir-block.0):
     40     liveins: $xmm0, $xmm1
     41 
     42     ; SSE-LABEL: name: test_fsub_float
     43     ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     44     ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     45     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     46     ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
     47     ; SSE: [[SUBSSrr:%[0-9]+]]:fr32 = SUBSSrr [[COPY1]], [[COPY3]]
     48     ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[SUBSSrr]]
     49     ; SSE: $xmm0 = COPY [[COPY4]]
     50     ; SSE: RET 0, implicit $xmm0
     51     ; AVX-LABEL: name: test_fsub_float
     52     ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     53     ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     54     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     55     ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
     56     ; AVX: [[VSUBSSrr:%[0-9]+]]:fr32 = VSUBSSrr [[COPY1]], [[COPY3]]
     57     ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VSUBSSrr]]
     58     ; AVX: $xmm0 = COPY [[COPY4]]
     59     ; AVX: RET 0, implicit $xmm0
     60     ; AVX512F-LABEL: name: test_fsub_float
     61     ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
     62     ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     63     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     64     ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
     65     ; AVX512F: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY1]], [[COPY3]]
     66     ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSSZrr]]
     67     ; AVX512F: $xmm0 = COPY [[COPY4]]
     68     ; AVX512F: RET 0, implicit $xmm0
     69     ; AVX512VL-LABEL: name: test_fsub_float
     70     ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
     71     ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     72     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     73     ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
     74     ; AVX512VL: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY1]], [[COPY3]]
     75     ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSSZrr]]
     76     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     77     ; AVX512VL: RET 0, implicit $xmm0
     78     %2:vecr(s128) = COPY $xmm0
     79     %0:vecr(s32) = G_TRUNC %2(s128)
     80     %3:vecr(s128) = COPY $xmm1
     81     %1:vecr(s32) = G_TRUNC %3(s128)
     82     %4:vecr(s32) = G_FSUB %0, %1
     83     %5:vecr(s128) = G_ANYEXT %4(s32)
     84     $xmm0 = COPY %5(s128)
     85     RET 0, implicit $xmm0
     86 
     87 ...
     88 ---
     89 name:            test_fsub_double
     90 alignment:       4
     91 legalized:       true
     92 regBankSelected: true
     93 #
     94 registers:
     95   - { id: 0, class: vecr, preferred-register: '' }
     96   - { id: 1, class: vecr, preferred-register: '' }
     97   - { id: 2, class: vecr, preferred-register: '' }
     98   - { id: 3, class: vecr, preferred-register: '' }
     99   - { id: 4, class: vecr, preferred-register: '' }
    100   - { id: 5, class: vecr, preferred-register: '' }
    101 liveins:
    102 fixedStack:
    103 stack:
    104 constants:
    105 #
    106 #
    107 body:             |
    108   bb.1 (%ir-block.0):
    109     liveins: $xmm0, $xmm1
    110 
    111     ; SSE-LABEL: name: test_fsub_double
    112     ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
    113     ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
    114     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
    115     ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
    116     ; SSE: [[SUBSDrr:%[0-9]+]]:fr64 = SUBSDrr [[COPY1]], [[COPY3]]
    117     ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[SUBSDrr]]
    118     ; SSE: $xmm0 = COPY [[COPY4]]
    119     ; SSE: RET 0, implicit $xmm0
    120     ; AVX-LABEL: name: test_fsub_double
    121     ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
    122     ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
    123     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
    124     ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
    125     ; AVX: [[VSUBSDrr:%[0-9]+]]:fr64 = VSUBSDrr [[COPY1]], [[COPY3]]
    126     ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VSUBSDrr]]
    127     ; AVX: $xmm0 = COPY [[COPY4]]
    128     ; AVX: RET 0, implicit $xmm0
    129     ; AVX512F-LABEL: name: test_fsub_double
    130     ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
    131     ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
    132     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
    133     ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
    134     ; AVX512F: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY1]], [[COPY3]]
    135     ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSDZrr]]
    136     ; AVX512F: $xmm0 = COPY [[COPY4]]
    137     ; AVX512F: RET 0, implicit $xmm0
    138     ; AVX512VL-LABEL: name: test_fsub_double
    139     ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
    140     ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
    141     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
    142     ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
    143     ; AVX512VL: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY1]], [[COPY3]]
    144     ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSDZrr]]
    145     ; AVX512VL: $xmm0 = COPY [[COPY4]]
    146     ; AVX512VL: RET 0, implicit $xmm0
    147     %2:vecr(s128) = COPY $xmm0
    148     %0:vecr(s64) = G_TRUNC %2(s128)
    149     %3:vecr(s128) = COPY $xmm1
    150     %1:vecr(s64) = G_TRUNC %3(s128)
    151     %4:vecr(s64) = G_FSUB %0, %1
    152     %5:vecr(s128) = G_ANYEXT %4(s64)
    153     $xmm0 = COPY %5(s128)
    154     RET 0, implicit $xmm0
    155 
    156 ...
    157