Home | History | Annotate | Download | only in MSP430
      1 ; RUN: llc -O0 < %s | FileCheck %s
      2 
      3 target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
      4 target triple = "msp430---elf"
      5 
      6 @g_double = global double 123.0, align 8
      7 @g_float = global float 123.0, align 8
      8 @g_i32 = global i32 123, align 8
      9 @g_i64 = global i64 456, align 8
     10 @g_i16 = global i16 789, align 8
     11 
     12 define float @d2f() #0 {
     13 entry:
     14 ; CHECK: d2f:
     15 
     16 ; CHECK: call #__mspabi_cvtdf
     17   %0 = load volatile double, double* @g_double, align 8
     18   %1 = fptrunc double %0 to float
     19 
     20   ret float %1
     21 }
     22 
     23 define double @f2d() #0 {
     24 entry:
     25 ; CHECK: f2d:
     26 
     27 ; CHECK: call #__mspabi_cvtfd
     28   %0 = load volatile float, float* @g_float, align 8
     29   %1 = fpext float %0 to double
     30 
     31   ret double %1
     32 }
     33 
     34 define i32 @d2l() #0 {
     35 entry:
     36 ; CHECK: d2l:
     37 
     38 ; CHECK: call #__mspabi_fixdli
     39   %0 = load volatile double, double* @g_double, align 8
     40   %1 = fptosi double %0 to i32
     41 
     42   ret i32 %1
     43 }
     44 
     45 define i64 @d2ll() #0 {
     46 entry:
     47 ; CHECK: d2ll:
     48 
     49 ; CHECK: call #__mspabi_fixdlli
     50   %0 = load volatile double, double* @g_double, align 8
     51   %1 = fptosi double %0 to i64
     52 
     53   ret i64 %1
     54 }
     55 
     56 define i32 @d2ul() #0 {
     57 entry:
     58 ; CHECK: d2ul:
     59 
     60 ; CHECK: call #__mspabi_fixdul
     61   %0 = load volatile double, double* @g_double, align 8
     62   %1 = fptoui double %0 to i32
     63 
     64   ret i32 %1
     65 }
     66 
     67 define i64 @d2ull() #0 {
     68 entry:
     69 ; CHECK: d2ull:
     70 
     71 ; CHECK: call #__mspabi_fixdull
     72   %0 = load volatile double, double* @g_double, align 8
     73   %1 = fptoui double %0 to i64
     74 
     75   ret i64 %1
     76 }
     77 
     78 define i32 @f2l() #0 {
     79 entry:
     80 ; CHECK: f2l:
     81 
     82 ; CHECK: call #__mspabi_fixfli
     83   %0 = load volatile float, float* @g_float, align 8
     84   %1 = fptosi float %0 to i32
     85 
     86   ret i32 %1
     87 }
     88 
     89 define i64 @f2ll() #0 {
     90 entry:
     91 ; CHECK: f2ll:
     92 
     93 ; CHECK: call #__mspabi_fixflli
     94   %0 = load volatile float, float* @g_float, align 8
     95   %1 = fptosi float %0 to i64
     96 
     97   ret i64 %1
     98 }
     99 
    100 define i32 @f2ul() #0 {
    101 entry:
    102 ; CHECK: f2ul:
    103 
    104 ; CHECK: call #__mspabi_fixful
    105   %0 = load volatile float, float* @g_float, align 8
    106   %1 = fptoui float %0 to i32
    107 
    108   ret i32 %1
    109 }
    110 
    111 define i64 @f2ull() #0 {
    112 entry:
    113 ; CHECK: f2ull:
    114 
    115 ; CHECK: call #__mspabi_fixfull
    116   %0 = load volatile float, float* @g_float, align 8
    117   %1 = fptoui float %0 to i64
    118 
    119   ret i64 %1
    120 }
    121 
    122 define double @l2d() #0 {
    123 entry:
    124 ; CHECK: l2d:
    125 
    126 ; CHECK: call #__mspabi_fltlid
    127   %0 = load volatile i32, i32* @g_i32, align 8
    128   %1 = sitofp i32 %0 to double
    129 
    130   ret double %1
    131 }
    132 
    133 define double @ll2d() #0 {
    134 entry:
    135 ; CHECK: ll2d:
    136 
    137 ; CHECK: call #__mspabi_fltllid
    138   %0 = load volatile i64, i64* @g_i64, align 8
    139   %1 = sitofp i64 %0 to double
    140 
    141   ret double %1
    142 }
    143 
    144 define double @ul2d() #0 {
    145 entry:
    146 ; CHECK: ul2d:
    147 
    148 ; CHECK: call #__mspabi_fltuld
    149   %0 = load volatile i32, i32* @g_i32, align 8
    150   %1 = uitofp i32 %0 to double
    151 
    152   ret double %1
    153 }
    154 
    155 define double @ull2d() #0 {
    156 entry:
    157 ; CHECK: ull2d:
    158 
    159 ; CHECK: call #__mspabi_fltulld
    160   %0 = load volatile i64, i64* @g_i64, align 8
    161   %1 = uitofp i64 %0 to double
    162 
    163   ret double %1
    164 }
    165 
    166 define float @l2f() #0 {
    167 entry:
    168 ; CHECK: l2f:
    169 
    170 ; CHECK: call #__mspabi_fltlif
    171   %0 = load volatile i32, i32* @g_i32, align 8
    172   %1 = sitofp i32 %0 to float
    173 
    174   ret float %1
    175 }
    176 
    177 define float @ll2f() #0 {
    178 entry:
    179 ; CHECK: ll2f:
    180 
    181 ; CHECK: call #__mspabi_fltllif
    182   %0 = load volatile i64, i64* @g_i64, align 8
    183   %1 = sitofp i64 %0 to float
    184 
    185   ret float %1
    186 }
    187 
    188 define float @ul2f() #0 {
    189 entry:
    190 ; CHECK: ul2f:
    191 
    192 ; CHECK: call #__mspabi_fltulf
    193   %0 = load volatile i32, i32* @g_i32, align 8
    194   %1 = uitofp i32 %0 to float
    195 
    196   ret float %1
    197 }
    198 
    199 define float @ull2f() #0 {
    200 entry:
    201 ; CHECK: ull2f:
    202 
    203 ; CHECK: call #__mspabi_fltullf
    204   %0 = load volatile i64, i64* @g_i64, align 8
    205   %1 = uitofp i64 %0 to float
    206 
    207   ret float %1
    208 }
    209 
    210 define i1 @cmpd_oeq() #0 {
    211 entry:
    212 ; CHECK: cmpd_oeq:
    213 
    214 ; CHECK: call #__mspabi_cmpd
    215   %0 = load volatile double, double* @g_double, align 8
    216   %1 = fcmp oeq double %0, 123.0
    217 
    218   ret i1 %1
    219 }
    220 
    221 define i1 @cmpd_une() #0 {
    222 entry:
    223 ; CHECK: cmpd_une:
    224 
    225 ; CHECK: call #__mspabi_cmpd
    226   %0 = load volatile double, double* @g_double, align 8
    227   %1 = fcmp une double %0, 123.0
    228 
    229   ret i1 %1
    230 }
    231 
    232 define i1 @cmpd_oge() #0 {
    233 entry:
    234 ; CHECK: cmpd_oge:
    235 
    236 ; CHECK: call #__mspabi_cmpd
    237   %0 = load volatile double, double* @g_double, align 8
    238   %1 = fcmp oge double %0, 123.0
    239 
    240   ret i1 %1
    241 }
    242 
    243 define i1 @cmpd_olt() #0 {
    244 entry:
    245 ; CHECK: cmpd_olt:
    246 
    247 ; CHECK: call #__mspabi_cmpd
    248   %0 = load volatile double, double* @g_double, align 8
    249   %1 = fcmp olt double %0, 123.0
    250 
    251   ret i1 %1
    252 }
    253 
    254 define i1 @cmpd_ole() #0 {
    255 entry:
    256 ; CHECK: cmpd_ole:
    257 
    258 ; CHECK: call #__mspabi_cmpd
    259   %0 = load volatile double, double* @g_double, align 8
    260   %1 = fcmp ole double %0, 123.0
    261 
    262   ret i1 %1
    263 }
    264 
    265 define i1 @cmpd_ogt() #0 {
    266 entry:
    267 ; CHECK: cmpd_ogt:
    268 
    269 ; CHECK: call #__mspabi_cmpd
    270   %0 = load volatile double, double* @g_double, align 8
    271   %1 = fcmp ogt double %0, 123.0
    272 
    273   ret i1 %1
    274 }
    275 
    276 define i1 @cmpf_oeq() #0 {
    277 entry:
    278 ; CHECK: cmpf_oeq:
    279 
    280 ; CHECK: call #__mspabi_cmpf
    281   %0 = load volatile float, float* @g_float, align 8
    282   %1 = fcmp oeq float %0, 123.0
    283 
    284   ret i1 %1
    285 }
    286 
    287 define i1 @cmpf_une() #0 {
    288 entry:
    289 ; CHECK: cmpf_une:
    290 
    291 ; CHECK: call #__mspabi_cmpf
    292   %0 = load volatile float, float* @g_float, align 8
    293   %1 = fcmp une float %0, 123.0
    294 
    295   ret i1 %1
    296 }
    297 
    298 define i1 @cmpf_oge() #0 {
    299 entry:
    300 ; CHECK: cmpf_oge:
    301 
    302 ; CHECK: call #__mspabi_cmpf
    303   %0 = load volatile float, float* @g_float, align 8
    304   %1 = fcmp oge float %0, 123.0
    305 
    306   ret i1 %1
    307 }
    308 
    309 define i1 @cmpf_olt() #0 {
    310 entry:
    311 ; CHECK: cmpf_olt:
    312 
    313 ; CHECK: call #__mspabi_cmpf
    314   %0 = load volatile float, float* @g_float, align 8
    315   %1 = fcmp olt float %0, 123.0
    316 
    317   ret i1 %1
    318 }
    319 
    320 define i1 @cmpf_ole() #0 {
    321 entry:
    322 ; CHECK: cmpf_ole:
    323 
    324 ; CHECK: call #__mspabi_cmpf
    325   %0 = load volatile float, float* @g_float, align 8
    326   %1 = fcmp ole float %0, 123.0
    327 
    328   ret i1 %1
    329 }
    330 
    331 define i1 @cmpf_ogt() #0 {
    332 entry:
    333 ; CHECK: cmpf_ogt:
    334 
    335 ; CHECK: call #__mspabi_cmpf
    336   %0 = load volatile float, float* @g_float, align 8
    337   %1 = fcmp ogt float %0, 123.0
    338 
    339   ret i1 %1
    340 }
    341 
    342 define double @addd() #0 {
    343 entry:
    344 ; CHECK: addd:
    345 
    346 ; CHECK: call #__mspabi_addd
    347   %0 = load volatile double, double* @g_double, align 8
    348   %1 = fadd double %0, 123.0
    349 
    350   ret double %1
    351 }
    352 
    353 define float @addf() #0 {
    354 entry:
    355 ; CHECK: addf:
    356 
    357 ; CHECK: call #__mspabi_addf
    358   %0 = load volatile float, float* @g_float, align 8
    359   %1 = fadd float %0, 123.0
    360 
    361   ret float %1
    362 }
    363 
    364 define double @divd() #0 {
    365 entry:
    366 ; CHECK: divd:
    367 
    368 ; CHECK: call #__mspabi_divd
    369   %0 = load volatile double, double* @g_double, align 8
    370   %1 = fdiv double %0, 123.0
    371 
    372   ret double %1
    373 }
    374 
    375 define float @divf() #0 {
    376 entry:
    377 ; CHECK: divf:
    378 
    379 ; CHECK: call #__mspabi_divf
    380   %0 = load volatile float, float* @g_float, align 8
    381   %1 = fdiv float %0, 123.0
    382 
    383   ret float %1
    384 }
    385 
    386 define double @mpyd() #0 {
    387 entry:
    388 ; CHECK: mpyd:
    389 
    390 ; CHECK: call #__mspabi_mpyd
    391   %0 = load volatile double, double* @g_double, align 8
    392   %1 = fmul double %0, 123.0
    393 
    394   ret double %1
    395 }
    396 
    397 define float @mpyf() #0 {
    398 entry:
    399 ; CHECK: mpyf:
    400 
    401 ; CHECK: call #__mspabi_mpyf
    402   %0 = load volatile float, float* @g_float, align 8
    403   %1 = fmul float %0, 123.0
    404 
    405   ret float %1
    406 }
    407 
    408 define double @subd() #0 {
    409 entry:
    410 ; CHECK: subd:
    411 
    412 ; CHECK: call #__mspabi_subd
    413   %0 = load volatile double, double* @g_double, align 8
    414   %1 = fsub double %0, %0
    415 
    416   ret double %1
    417 }
    418 
    419 define float @subf() #0 {
    420 entry:
    421 ; CHECK: subf:
    422 
    423 ; CHECK: call #__mspabi_subf
    424   %0 = load volatile float, float* @g_float, align 8
    425   %1 = fsub float %0, %0
    426 
    427   ret float %1
    428 }
    429 
    430 define i16 @divi() #0 {
    431 entry:
    432 ; CHECK: divi:
    433 
    434 ; CHECK: call #__mspabi_divi
    435   %0 = load volatile i16, i16* @g_i16, align 8
    436   %1 = sdiv i16 %0, %0
    437 
    438   ret i16 %1
    439 }
    440 
    441 define i32 @divli() #0 {
    442 entry:
    443 ; CHECK: divli:
    444 
    445 ; CHECK: call #__mspabi_divli
    446   %0 = load volatile i32, i32* @g_i32, align 8
    447   %1 = sdiv i32 %0, %0
    448 
    449   ret i32 %1
    450 }
    451 
    452 define i64 @divlli() #0 {
    453 entry:
    454 ; CHECK: divlli:
    455 
    456 ; CHECK: call #__mspabi_divlli
    457   %0 = load volatile i64, i64* @g_i64, align 8
    458   %1 = sdiv i64 %0, %0
    459 
    460   ret i64 %1
    461 }
    462 
    463 define i16 @divu() #0 {
    464 entry:
    465 ; CHECK: divu:
    466 
    467 ; CHECK: call #__mspabi_divu
    468   %0 = load volatile i16, i16* @g_i16, align 8
    469   %1 = udiv i16 %0, %0
    470 
    471   ret i16 %1
    472 }
    473 
    474 define i32 @divul() #0 {
    475 entry:
    476 ; CHECK: divul:
    477 
    478 ; CHECK: call #__mspabi_divul
    479   %0 = load volatile i32, i32* @g_i32, align 8
    480   %1 = udiv i32 %0, %0
    481 
    482   ret i32 %1
    483 }
    484 
    485 define i64 @divull() #0 {
    486 entry:
    487 ; CHECK: divull:
    488 
    489 ; CHECK: call #__mspabi_divull
    490   %0 = load volatile i64, i64* @g_i64, align 8
    491   %1 = udiv i64 %0, %0
    492 
    493   ret i64 %1
    494 }
    495 
    496 define i16 @remi() #0 {
    497 entry:
    498 ; CHECK: remi:
    499 
    500 ; CHECK: call #__mspabi_remi
    501   %0 = load volatile i16, i16* @g_i16, align 8
    502   %1 = srem i16 %0, %0
    503 
    504   ret i16 %1
    505 }
    506 
    507 define i32 @remli() #0 {
    508 entry:
    509 ; CHECK: remli:
    510 
    511 ; CHECK: call #__mspabi_remli
    512   %0 = load volatile i32, i32* @g_i32, align 8
    513   %1 = srem i32 %0, %0
    514 
    515   ret i32 %1
    516 }
    517 
    518 define i64 @remlli() #0 {
    519 entry:
    520 ; CHECK: remlli:
    521 
    522 ; CHECK: call #__mspabi_remlli
    523   %0 = load volatile i64, i64* @g_i64, align 8
    524   %1 = srem i64 %0, %0
    525 
    526   ret i64 %1
    527 }
    528 
    529 define i16 @remu() #0 {
    530 entry:
    531 ; CHECK: remu:
    532 
    533 ; CHECK: call #__mspabi_remu
    534   %0 = load volatile i16, i16* @g_i16, align 8
    535   %1 = urem i16 %0, %0
    536 
    537   ret i16 %1
    538 }
    539 
    540 define i32 @remul() #0 {
    541 entry:
    542 ; CHECK: remul:
    543 
    544 ; CHECK: call #__mspabi_remul
    545   %0 = load volatile i32, i32* @g_i32, align 8
    546   %1 = urem i32 %0, %0
    547 
    548   ret i32 %1
    549 }
    550 
    551 define i64 @remull() #0 {
    552 entry:
    553 ; CHECK: remull:
    554 
    555 ; CHECK: call #__mspabi_remull
    556   %0 = load volatile i64, i64* @g_i64, align 8
    557   %1 = urem i64 %0, %0
    558 
    559   ret i64 %1
    560 }
    561 
    562 define i16 @mpyi() #0 {
    563 entry:
    564 ; CHECK: mpyi:
    565 
    566 ; CHECK: call #__mspabi_mpyi
    567   %0 = load volatile i16, i16* @g_i16, align 8
    568   %1 = mul i16 %0, %0
    569 
    570   ret i16 %1
    571 }
    572 
    573 define i32 @mpyli() #0 {
    574 entry:
    575 ; CHECK: mpyli:
    576 
    577 ; CHECK: call #__mspabi_mpyl
    578   %0 = load volatile i32, i32* @g_i32, align 8
    579   %1 = mul i32 %0, %0
    580 
    581   ret i32 %1
    582 }
    583 
    584 define i64 @mpylli() #0 {
    585 entry:
    586 ; CHECK: mpylli:
    587 
    588 ; CHECK: call #__mspabi_mpyll
    589   %0 = load volatile i64, i64* @g_i64, align 8
    590   %1 = mul i64 %0, %0
    591 
    592   ret i64 %1
    593 }
    594 
    595 attributes #0 = { nounwind }
    596