Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=x86-64 -mattr=+3dnowa | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
      3 
      4 define void @test_femms() optsize {
      5 ; CHECK-LABEL: test_femms:
      6 ; CHECK:       # %bb.0:
      7 ; CHECK-NEXT:    femms # sched: [31:10.33]
      8 ; CHECK-NEXT:    retq # sched: [1:1.00]
      9   call void @llvm.x86.mmx.femms()
     10   ret void
     11 }
     12 declare void @llvm.x86.mmx.femms() nounwind readnone
     13 
     14 define i64 @test_pavgusb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
     15 ; CHECK-LABEL: test_pavgusb:
     16 ; CHECK:       # %bb.0:
     17 ; CHECK-NEXT:    pavgusb %mm1, %mm0 # sched: [3:1.00]
     18 ; CHECK-NEXT:    pavgusb (%rdi), %mm0 # sched: [8:1.00]
     19 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
     20 ; CHECK-NEXT:    retq # sched: [1:1.00]
     21   %1 = call x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx %a0, x86_mmx %a1)
     22   %2 = load x86_mmx, x86_mmx *%a2, align 8
     23   %3 = call x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx %1, x86_mmx %2)
     24   %4 = bitcast x86_mmx %3 to i64
     25   ret i64 %4
     26 }
     27 declare x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx, x86_mmx) nounwind readnone
     28 
     29 define i64 @test_pf2id(x86_mmx* %a0) optsize {
     30 ; CHECK-LABEL: test_pf2id:
     31 ; CHECK:       # %bb.0:
     32 ; CHECK-NEXT:    pf2id (%rdi), %mm0 # sched: [9:1.00]
     33 ; CHECK-NEXT:    pf2id %mm0, %mm0 # sched: [3:1.00]
     34 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
     35 ; CHECK-NEXT:    retq # sched: [1:1.00]
     36   %1 = load x86_mmx, x86_mmx *%a0, align 8
     37   %2 = call x86_mmx @llvm.x86.3dnow.pf2id(x86_mmx %1)
     38   %3 = call x86_mmx @llvm.x86.3dnow.pf2id(x86_mmx %2)
     39   %4 = bitcast x86_mmx %3 to i64
     40   ret i64 %4
     41 }
     42 declare x86_mmx @llvm.x86.3dnow.pf2id(x86_mmx) nounwind readnone
     43 
     44 define i64 @test_pf2iw(x86_mmx* %a0) optsize {
     45 ; CHECK-LABEL: test_pf2iw:
     46 ; CHECK:       # %bb.0:
     47 ; CHECK-NEXT:    pf2iw (%rdi), %mm0 # sched: [9:1.00]
     48 ; CHECK-NEXT:    pf2iw %mm0, %mm0 # sched: [3:1.00]
     49 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
     50 ; CHECK-NEXT:    retq # sched: [1:1.00]
     51   %1 = load x86_mmx, x86_mmx *%a0, align 8
     52   %2 = call x86_mmx @llvm.x86.3dnowa.pf2iw(x86_mmx %1)
     53   %3 = call x86_mmx @llvm.x86.3dnowa.pf2iw(x86_mmx %2)
     54   %4 = bitcast x86_mmx %3 to i64
     55   ret i64 %4
     56 }
     57 declare x86_mmx @llvm.x86.3dnowa.pf2iw(x86_mmx) nounwind readnone
     58 
     59 define i64 @test_pfacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
     60 ; CHECK-LABEL: test_pfacc:
     61 ; CHECK:       # %bb.0:
     62 ; CHECK-NEXT:    pfacc %mm1, %mm0 # sched: [3:1.00]
     63 ; CHECK-NEXT:    pfacc (%rdi), %mm0 # sched: [9:1.00]
     64 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
     65 ; CHECK-NEXT:    retq # sched: [1:1.00]
     66   %1 = call x86_mmx @llvm.x86.3dnow.pfacc(x86_mmx %a0, x86_mmx %a1)
     67   %2 = load x86_mmx, x86_mmx *%a2, align 8
     68   %3 = call x86_mmx @llvm.x86.3dnow.pfacc(x86_mmx %1, x86_mmx %2)
     69   %4 = bitcast x86_mmx %3 to i64
     70   ret i64 %4
     71 }
     72 declare x86_mmx @llvm.x86.3dnow.pfacc(x86_mmx, x86_mmx) nounwind readnone
     73 
     74 define i64 @test_pfadd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
     75 ; CHECK-LABEL: test_pfadd:
     76 ; CHECK:       # %bb.0:
     77 ; CHECK-NEXT:    pfadd %mm1, %mm0 # sched: [3:1.00]
     78 ; CHECK-NEXT:    pfadd (%rdi), %mm0 # sched: [9:1.00]
     79 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
     80 ; CHECK-NEXT:    retq # sched: [1:1.00]
     81   %1 = call x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx %a0, x86_mmx %a1)
     82   %2 = load x86_mmx, x86_mmx *%a2, align 8
     83   %3 = call x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx %1, x86_mmx %2)
     84   %4 = bitcast x86_mmx %3 to i64
     85   ret i64 %4
     86 }
     87 declare x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx, x86_mmx) nounwind readnone
     88 
     89 define i64 @test_pfcmpeq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
     90 ; CHECK-LABEL: test_pfcmpeq:
     91 ; CHECK:       # %bb.0:
     92 ; CHECK-NEXT:    pfcmpeq %mm1, %mm0 # sched: [3:1.00]
     93 ; CHECK-NEXT:    pfcmpeq (%rdi), %mm0 # sched: [9:1.00]
     94 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
     95 ; CHECK-NEXT:    retq # sched: [1:1.00]
     96   %1 = call x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx %a0, x86_mmx %a1)
     97   %2 = load x86_mmx, x86_mmx *%a2, align 8
     98   %3 = call x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx %1, x86_mmx %2)
     99   %4 = bitcast x86_mmx %3 to i64
    100   ret i64 %4
    101 }
    102 declare x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx, x86_mmx) nounwind readnone
    103 
    104 define i64 @test_pfcmpge(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    105 ; CHECK-LABEL: test_pfcmpge:
    106 ; CHECK:       # %bb.0:
    107 ; CHECK-NEXT:    pfcmpge %mm1, %mm0 # sched: [3:1.00]
    108 ; CHECK-NEXT:    pfcmpge (%rdi), %mm0 # sched: [9:1.00]
    109 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    110 ; CHECK-NEXT:    retq # sched: [1:1.00]
    111   %1 = call x86_mmx @llvm.x86.3dnow.pfcmpge(x86_mmx %a0, x86_mmx %a1)
    112   %2 = load x86_mmx, x86_mmx *%a2, align 8
    113   %3 = call x86_mmx @llvm.x86.3dnow.pfcmpge(x86_mmx %1, x86_mmx %2)
    114   %4 = bitcast x86_mmx %3 to i64
    115   ret i64 %4
    116 }
    117 declare x86_mmx @llvm.x86.3dnow.pfcmpge(x86_mmx, x86_mmx) nounwind readnone
    118 
    119 define i64 @test_pfcmpgt(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    120 ; CHECK-LABEL: test_pfcmpgt:
    121 ; CHECK:       # %bb.0:
    122 ; CHECK-NEXT:    pfcmpgt %mm1, %mm0 # sched: [3:1.00]
    123 ; CHECK-NEXT:    pfcmpgt (%rdi), %mm0 # sched: [9:1.00]
    124 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    125 ; CHECK-NEXT:    retq # sched: [1:1.00]
    126   %1 = call x86_mmx @llvm.x86.3dnow.pfcmpgt(x86_mmx %a0, x86_mmx %a1)
    127   %2 = load x86_mmx, x86_mmx *%a2, align 8
    128   %3 = call x86_mmx @llvm.x86.3dnow.pfcmpgt(x86_mmx %1, x86_mmx %2)
    129   %4 = bitcast x86_mmx %3 to i64
    130   ret i64 %4
    131 }
    132 declare x86_mmx @llvm.x86.3dnow.pfcmpgt(x86_mmx, x86_mmx) nounwind readnone
    133 
    134 define i64 @test_pfmax(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    135 ; CHECK-LABEL: test_pfmax:
    136 ; CHECK:       # %bb.0:
    137 ; CHECK-NEXT:    pfmax %mm1, %mm0 # sched: [3:1.00]
    138 ; CHECK-NEXT:    pfmax (%rdi), %mm0 # sched: [9:1.00]
    139 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    140 ; CHECK-NEXT:    retq # sched: [1:1.00]
    141   %1 = call x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx %a0, x86_mmx %a1)
    142   %2 = load x86_mmx, x86_mmx *%a2, align 8
    143   %3 = call x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx %1, x86_mmx %2)
    144   %4 = bitcast x86_mmx %3 to i64
    145   ret i64 %4
    146 }
    147 declare x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx, x86_mmx) nounwind readnone
    148 
    149 define i64 @test_pfmin(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    150 ; CHECK-LABEL: test_pfmin:
    151 ; CHECK:       # %bb.0:
    152 ; CHECK-NEXT:    pfmin %mm1, %mm0 # sched: [3:1.00]
    153 ; CHECK-NEXT:    pfmin (%rdi), %mm0 # sched: [9:1.00]
    154 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    155 ; CHECK-NEXT:    retq # sched: [1:1.00]
    156   %1 = call x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx %a0, x86_mmx %a1)
    157   %2 = load x86_mmx, x86_mmx *%a2, align 8
    158   %3 = call x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx %1, x86_mmx %2)
    159   %4 = bitcast x86_mmx %3 to i64
    160   ret i64 %4
    161 }
    162 declare x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx, x86_mmx) nounwind readnone
    163 
    164 define i64 @test_pfmul(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    165 ; CHECK-LABEL: test_pfmul:
    166 ; CHECK:       # %bb.0:
    167 ; CHECK-NEXT:    pfmul %mm1, %mm0 # sched: [3:1.00]
    168 ; CHECK-NEXT:    pfmul (%rdi), %mm0 # sched: [9:1.00]
    169 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    170 ; CHECK-NEXT:    retq # sched: [1:1.00]
    171   %1 = call x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx %a0, x86_mmx %a1)
    172   %2 = load x86_mmx, x86_mmx *%a2, align 8
    173   %3 = call x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx %1, x86_mmx %2)
    174   %4 = bitcast x86_mmx %3 to i64
    175   ret i64 %4
    176 }
    177 declare x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx, x86_mmx) nounwind readnone
    178 
    179 define i64 @test_pfnacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    180 ; CHECK-LABEL: test_pfnacc:
    181 ; CHECK:       # %bb.0:
    182 ; CHECK-NEXT:    pfnacc %mm1, %mm0 # sched: [3:1.00]
    183 ; CHECK-NEXT:    pfnacc (%rdi), %mm0 # sched: [9:1.00]
    184 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    185 ; CHECK-NEXT:    retq # sched: [1:1.00]
    186   %1 = call x86_mmx @llvm.x86.3dnowa.pfnacc(x86_mmx %a0, x86_mmx %a1)
    187   %2 = load x86_mmx, x86_mmx *%a2, align 8
    188   %3 = call x86_mmx @llvm.x86.3dnowa.pfnacc(x86_mmx %1, x86_mmx %2)
    189   %4 = bitcast x86_mmx %3 to i64
    190   ret i64 %4
    191 }
    192 declare x86_mmx @llvm.x86.3dnowa.pfnacc(x86_mmx, x86_mmx) nounwind readnone
    193 
    194 define i64 @test_pfpnacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    195 ; CHECK-LABEL: test_pfpnacc:
    196 ; CHECK:       # %bb.0:
    197 ; CHECK-NEXT:    pfpnacc %mm1, %mm0 # sched: [3:1.00]
    198 ; CHECK-NEXT:    pfpnacc (%rdi), %mm0 # sched: [9:1.00]
    199 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    200 ; CHECK-NEXT:    retq # sched: [1:1.00]
    201   %1 = call x86_mmx @llvm.x86.3dnowa.pfpnacc(x86_mmx %a0, x86_mmx %a1)
    202   %2 = load x86_mmx, x86_mmx *%a2, align 8
    203   %3 = call x86_mmx @llvm.x86.3dnowa.pfpnacc(x86_mmx %1, x86_mmx %2)
    204   %4 = bitcast x86_mmx %3 to i64
    205   ret i64 %4
    206 }
    207 declare x86_mmx @llvm.x86.3dnowa.pfpnacc(x86_mmx, x86_mmx) nounwind readnone
    208 
    209 define i64 @test_pfrcp(x86_mmx* %a0) optsize {
    210 ; CHECK-LABEL: test_pfrcp:
    211 ; CHECK:       # %bb.0:
    212 ; CHECK-NEXT:    pfrcp (%rdi), %mm0 # sched: [9:1.00]
    213 ; CHECK-NEXT:    pfrcp %mm0, %mm0 # sched: [3:1.00]
    214 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    215 ; CHECK-NEXT:    retq # sched: [1:1.00]
    216   %1 = load x86_mmx, x86_mmx *%a0, align 8
    217   %2 = call x86_mmx @llvm.x86.3dnow.pfrcp(x86_mmx %1)
    218   %3 = call x86_mmx @llvm.x86.3dnow.pfrcp(x86_mmx %2)
    219   %4 = bitcast x86_mmx %3 to i64
    220   ret i64 %4
    221 }
    222 declare x86_mmx @llvm.x86.3dnow.pfrcp(x86_mmx) nounwind readnone
    223 
    224 define i64 @test_pfrcpit1(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    225 ; CHECK-LABEL: test_pfrcpit1:
    226 ; CHECK:       # %bb.0:
    227 ; CHECK-NEXT:    pfrcpit1 %mm1, %mm0 # sched: [3:1.00]
    228 ; CHECK-NEXT:    pfrcpit1 (%rdi), %mm0 # sched: [9:1.00]
    229 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    230 ; CHECK-NEXT:    retq # sched: [1:1.00]
    231   %1 = call x86_mmx @llvm.x86.3dnow.pfrcpit1(x86_mmx %a0, x86_mmx %a1)
    232   %2 = load x86_mmx, x86_mmx *%a2, align 8
    233   %3 = call x86_mmx @llvm.x86.3dnow.pfrcpit1(x86_mmx %1, x86_mmx %2)
    234   %4 = bitcast x86_mmx %3 to i64
    235   ret i64 %4
    236 }
    237 declare x86_mmx @llvm.x86.3dnow.pfrcpit1(x86_mmx, x86_mmx) nounwind readnone
    238 
    239 define i64 @test_pfrcpit2(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    240 ; CHECK-LABEL: test_pfrcpit2:
    241 ; CHECK:       # %bb.0:
    242 ; CHECK-NEXT:    pfrcpit2 %mm1, %mm0 # sched: [3:1.00]
    243 ; CHECK-NEXT:    pfrcpit2 (%rdi), %mm0 # sched: [9:1.00]
    244 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    245 ; CHECK-NEXT:    retq # sched: [1:1.00]
    246   %1 = call x86_mmx @llvm.x86.3dnow.pfrcpit2(x86_mmx %a0, x86_mmx %a1)
    247   %2 = load x86_mmx, x86_mmx *%a2, align 8
    248   %3 = call x86_mmx @llvm.x86.3dnow.pfrcpit2(x86_mmx %1, x86_mmx %2)
    249   %4 = bitcast x86_mmx %3 to i64
    250   ret i64 %4
    251 }
    252 declare x86_mmx @llvm.x86.3dnow.pfrcpit2(x86_mmx, x86_mmx) nounwind readnone
    253 
    254 define i64 @test_pfrsqit1(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    255 ; CHECK-LABEL: test_pfrsqit1:
    256 ; CHECK:       # %bb.0:
    257 ; CHECK-NEXT:    pfrsqit1 %mm1, %mm0 # sched: [3:1.00]
    258 ; CHECK-NEXT:    pfrsqit1 (%rdi), %mm0 # sched: [9:1.00]
    259 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    260 ; CHECK-NEXT:    retq # sched: [1:1.00]
    261   %1 = call x86_mmx @llvm.x86.3dnow.pfrsqit1(x86_mmx %a0, x86_mmx %a1)
    262   %2 = load x86_mmx, x86_mmx *%a2, align 8
    263   %3 = call x86_mmx @llvm.x86.3dnow.pfrsqit1(x86_mmx %1, x86_mmx %2)
    264   %4 = bitcast x86_mmx %3 to i64
    265   ret i64 %4
    266 }
    267 declare x86_mmx @llvm.x86.3dnow.pfrsqit1(x86_mmx, x86_mmx) nounwind readnone
    268 
    269 define i64 @test_pfrsqrt(x86_mmx* %a0) optsize {
    270 ; CHECK-LABEL: test_pfrsqrt:
    271 ; CHECK:       # %bb.0:
    272 ; CHECK-NEXT:    pfrsqrt (%rdi), %mm0 # sched: [9:1.00]
    273 ; CHECK-NEXT:    pfrsqrt %mm0, %mm0 # sched: [3:1.00]
    274 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    275 ; CHECK-NEXT:    retq # sched: [1:1.00]
    276   %1 = load x86_mmx, x86_mmx *%a0, align 8
    277   %2 = call x86_mmx @llvm.x86.3dnow.pfrsqrt(x86_mmx %1)
    278   %3 = call x86_mmx @llvm.x86.3dnow.pfrsqrt(x86_mmx %2)
    279   %4 = bitcast x86_mmx %3 to i64
    280   ret i64 %4
    281 }
    282 declare x86_mmx @llvm.x86.3dnow.pfrsqrt(x86_mmx) nounwind readnone
    283 
    284 define i64 @test_pfsub(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    285 ; CHECK-LABEL: test_pfsub:
    286 ; CHECK:       # %bb.0:
    287 ; CHECK-NEXT:    pfsub %mm1, %mm0 # sched: [3:1.00]
    288 ; CHECK-NEXT:    pfsub (%rdi), %mm0 # sched: [9:1.00]
    289 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    290 ; CHECK-NEXT:    retq # sched: [1:1.00]
    291   %1 = call x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx %a0, x86_mmx %a1)
    292   %2 = load x86_mmx, x86_mmx *%a2, align 8
    293   %3 = call x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx %1, x86_mmx %2)
    294   %4 = bitcast x86_mmx %3 to i64
    295   ret i64 %4
    296 }
    297 declare x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx, x86_mmx) nounwind readnone
    298 
    299 define i64 @test_pfsubr(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    300 ; CHECK-LABEL: test_pfsubr:
    301 ; CHECK:       # %bb.0:
    302 ; CHECK-NEXT:    pfsubr %mm1, %mm0 # sched: [3:1.00]
    303 ; CHECK-NEXT:    pfsubr (%rdi), %mm0 # sched: [9:1.00]
    304 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    305 ; CHECK-NEXT:    retq # sched: [1:1.00]
    306   %1 = call x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx %a0, x86_mmx %a1)
    307   %2 = load x86_mmx, x86_mmx *%a2, align 8
    308   %3 = call x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx %1, x86_mmx %2)
    309   %4 = bitcast x86_mmx %3 to i64
    310   ret i64 %4
    311 }
    312 declare x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx, x86_mmx) nounwind readnone
    313 
    314 define i64 @test_pi2fd(x86_mmx* %a0) optsize {
    315 ; CHECK-LABEL: test_pi2fd:
    316 ; CHECK:       # %bb.0:
    317 ; CHECK-NEXT:    pi2fd (%rdi), %mm0 # sched: [9:1.00]
    318 ; CHECK-NEXT:    pi2fd %mm0, %mm0 # sched: [3:1.00]
    319 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    320 ; CHECK-NEXT:    retq # sched: [1:1.00]
    321   %1 = load x86_mmx, x86_mmx *%a0, align 8
    322   %2 = call x86_mmx @llvm.x86.3dnow.pi2fd(x86_mmx %1)
    323   %3 = call x86_mmx @llvm.x86.3dnow.pi2fd(x86_mmx %2)
    324   %4 = bitcast x86_mmx %3 to i64
    325   ret i64 %4
    326 }
    327 declare x86_mmx @llvm.x86.3dnow.pi2fd(x86_mmx) nounwind readnone
    328 
    329 define i64 @test_pi2fw(x86_mmx* %a0) optsize {
    330 ; CHECK-LABEL: test_pi2fw:
    331 ; CHECK:       # %bb.0:
    332 ; CHECK-NEXT:    pi2fw (%rdi), %mm0 # sched: [9:1.00]
    333 ; CHECK-NEXT:    pi2fw %mm0, %mm0 # sched: [3:1.00]
    334 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    335 ; CHECK-NEXT:    retq # sched: [1:1.00]
    336   %1 = load x86_mmx, x86_mmx *%a0, align 8
    337   %2 = call x86_mmx @llvm.x86.3dnowa.pi2fw(x86_mmx %1)
    338   %3 = call x86_mmx @llvm.x86.3dnowa.pi2fw(x86_mmx %2)
    339   %4 = bitcast x86_mmx %3 to i64
    340   ret i64 %4
    341 }
    342 declare x86_mmx @llvm.x86.3dnowa.pi2fw(x86_mmx) nounwind readnone
    343 
    344 define i64 @test_pmulhrw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
    345 ; CHECK-LABEL: test_pmulhrw:
    346 ; CHECK:       # %bb.0:
    347 ; CHECK-NEXT:    pmulhrw %mm1, %mm0 # sched: [5:1.00]
    348 ; CHECK-NEXT:    pmulhrw (%rdi), %mm0 # sched: [10:1.00]
    349 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    350 ; CHECK-NEXT:    retq # sched: [1:1.00]
    351   %1 = call x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx %a0, x86_mmx %a1)
    352   %2 = load x86_mmx, x86_mmx *%a2, align 8
    353   %3 = call x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx %1, x86_mmx %2)
    354   %4 = bitcast x86_mmx %3 to i64
    355   ret i64 %4
    356 }
    357 declare x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx, x86_mmx) nounwind readnone
    358 
    359 define void @test_prefetch(i8* %a0) optsize {
    360 ; CHECK-LABEL: test_prefetch:
    361 ; CHECK:       # %bb.0:
    362 ; CHECK-NEXT:    #APP
    363 ; CHECK-NEXT:    prefetch (%rdi) # sched: [5:0.50]
    364 ; CHECK-NEXT:    #NO_APP
    365 ; CHECK-NEXT:    retq # sched: [1:1.00]
    366   tail call void asm sideeffect "prefetch $0", "*m"(i8 *%a0) nounwind
    367   ret void
    368 }
    369 
    370 define void @test_prefetchw(i8* %a0) optsize {
    371 ; CHECK-LABEL: test_prefetchw:
    372 ; CHECK:       # %bb.0:
    373 ; CHECK-NEXT:    #APP
    374 ; CHECK-NEXT:    prefetchw (%rdi) # sched: [5:0.50]
    375 ; CHECK-NEXT:    #NO_APP
    376 ; CHECK-NEXT:    retq # sched: [1:1.00]
    377   tail call void asm sideeffect "prefetchw $0", "*m"(i8 *%a0) nounwind
    378   ret void
    379 }
    380 
    381 define i64 @test_pswapd(x86_mmx* %a0) optsize {
    382 ; CHECK-LABEL: test_pswapd:
    383 ; CHECK:       # %bb.0:
    384 ; CHECK-NEXT:    pswapd (%rdi), %mm0 # mm0 = mem[1,0] sched: [6:1.00]
    385 ; CHECK-NEXT:    pswapd %mm0, %mm0 # mm0 = mm0[1,0] sched: [1:1.00]
    386 ; CHECK-NEXT:    movq %mm0, %rax # sched: [2:1.00]
    387 ; CHECK-NEXT:    retq # sched: [1:1.00]
    388   %1 = load x86_mmx, x86_mmx *%a0, align 8
    389   %2 = call x86_mmx @llvm.x86.3dnowa.pswapd(x86_mmx %1)
    390   %3 = call x86_mmx @llvm.x86.3dnowa.pswapd(x86_mmx %2)
    391   %4 = bitcast x86_mmx %3 to i64
    392   ret i64 %4
    393 }
    394 declare x86_mmx @llvm.x86.3dnowa.pswapd(x86_mmx) nounwind readnone
    395