Home | History | Annotate | Download | only in src
      1 //===-------------------- UnwindRegistersRestore.S ------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is dual licensed under the MIT and the University of Illinois Open
      6 // Source Licenses. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "assembly.h"
     11 
     12   .text
     13 
     14 #if !defined(__USING_SJLJ_EXCEPTIONS__)
     15 
     16 #if defined(__i386__)
     17 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
     18 #
     19 # void libunwind::Registers_x86::jumpto()
     20 #
     21 #if defined(_WIN32)
     22 # On windows, the 'this' pointer is passed in ecx instead of on the stack
     23   movl   %ecx, %eax
     24 #else
     25 # On entry:
     26 #  +                       +
     27 #  +-----------------------+
     28 #  + thread_state pointer  +
     29 #  +-----------------------+
     30 #  + return address        +
     31 #  +-----------------------+   <-- SP
     32 #  +                       +
     33   movl   4(%esp), %eax
     34 #endif
     35   # set up eax and ret on new stack location
     36   movl  28(%eax), %edx # edx holds new stack pointer
     37   subl  $8,%edx
     38   movl  %edx, 28(%eax)
     39   movl  0(%eax), %ebx
     40   movl  %ebx, 0(%edx)
     41   movl  40(%eax), %ebx
     42   movl  %ebx, 4(%edx)
     43   # we now have ret and eax pushed onto where new stack will be
     44   # restore all registers
     45   movl   4(%eax), %ebx
     46   movl   8(%eax), %ecx
     47   movl  12(%eax), %edx
     48   movl  16(%eax), %edi
     49   movl  20(%eax), %esi
     50   movl  24(%eax), %ebp
     51   movl  28(%eax), %esp
     52   # skip ss
     53   # skip eflags
     54   pop    %eax  # eax was already pushed on new stack
     55   ret        # eip was already pushed on new stack
     56   # skip cs
     57   # skip ds
     58   # skip es
     59   # skip fs
     60   # skip gs
     61 
     62 #elif defined(__x86_64__)
     63 
     64 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
     65 #
     66 # void libunwind::Registers_x86_64::jumpto()
     67 #
     68 #if defined(_WIN64)
     69 # On entry, thread_state pointer is in rcx; move it into rdi
     70 # to share restore code below. Since this routine restores and
     71 # overwrites all registers, we can use the same registers for
     72 # pointers and temporaries as on unix even though win64 normally
     73 # mustn't clobber some of them.
     74   movq  %rcx, %rdi
     75 #else
     76 # On entry, thread_state pointer is in rdi
     77 #endif
     78 
     79   movq  56(%rdi), %rax # rax holds new stack pointer
     80   subq  $16, %rax
     81   movq  %rax, 56(%rdi)
     82   movq  32(%rdi), %rbx  # store new rdi on new stack
     83   movq  %rbx, 0(%rax)
     84   movq  128(%rdi), %rbx # store new rip on new stack
     85   movq  %rbx, 8(%rax)
     86   # restore all registers
     87   movq    0(%rdi), %rax
     88   movq    8(%rdi), %rbx
     89   movq   16(%rdi), %rcx
     90   movq   24(%rdi), %rdx
     91   # restore rdi later
     92   movq   40(%rdi), %rsi
     93   movq   48(%rdi), %rbp
     94   # restore rsp later
     95   movq   64(%rdi), %r8
     96   movq   72(%rdi), %r9
     97   movq   80(%rdi), %r10
     98   movq   88(%rdi), %r11
     99   movq   96(%rdi), %r12
    100   movq  104(%rdi), %r13
    101   movq  112(%rdi), %r14
    102   movq  120(%rdi), %r15
    103   # skip rflags
    104   # skip cs
    105   # skip fs
    106   # skip gs
    107 
    108 #if defined(_WIN64)
    109   movdqu 176(%rdi),%xmm0
    110   movdqu 192(%rdi),%xmm1
    111   movdqu 208(%rdi),%xmm2
    112   movdqu 224(%rdi),%xmm3
    113   movdqu 240(%rdi),%xmm4
    114   movdqu 256(%rdi),%xmm5
    115   movdqu 272(%rdi),%xmm6
    116   movdqu 288(%rdi),%xmm7
    117   movdqu 304(%rdi),%xmm8
    118   movdqu 320(%rdi),%xmm9
    119   movdqu 336(%rdi),%xmm10
    120   movdqu 352(%rdi),%xmm11
    121   movdqu 368(%rdi),%xmm12
    122   movdqu 384(%rdi),%xmm13
    123   movdqu 400(%rdi),%xmm14
    124   movdqu 416(%rdi),%xmm15
    125 #endif
    126   movq  56(%rdi), %rsp  # cut back rsp to new location
    127   pop    %rdi      # rdi was saved here earlier
    128   ret            # rip was saved here
    129 
    130 
    131 #elif defined(__ppc__)
    132 
    133 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
    134 ;
    135 ; void libunwind::Registers_ppc::jumpto()
    136 ;
    137 ; On entry:
    138 ;  thread_state pointer is in r3
    139 ;
    140 
    141   ; restore integral registerrs
    142   ; skip r0 for now
    143   ; skip r1 for now
    144   lwz     r2, 16(r3)
    145   ; skip r3 for now
    146   ; skip r4 for now
    147   ; skip r5 for now
    148   lwz     r6, 32(r3)
    149   lwz     r7, 36(r3)
    150   lwz     r8, 40(r3)
    151   lwz     r9, 44(r3)
    152   lwz    r10, 48(r3)
    153   lwz    r11, 52(r3)
    154   lwz    r12, 56(r3)
    155   lwz    r13, 60(r3)
    156   lwz    r14, 64(r3)
    157   lwz    r15, 68(r3)
    158   lwz    r16, 72(r3)
    159   lwz    r17, 76(r3)
    160   lwz    r18, 80(r3)
    161   lwz    r19, 84(r3)
    162   lwz    r20, 88(r3)
    163   lwz    r21, 92(r3)
    164   lwz    r22, 96(r3)
    165   lwz    r23,100(r3)
    166   lwz    r24,104(r3)
    167   lwz    r25,108(r3)
    168   lwz    r26,112(r3)
    169   lwz    r27,116(r3)
    170   lwz    r28,120(r3)
    171   lwz    r29,124(r3)
    172   lwz    r30,128(r3)
    173   lwz    r31,132(r3)
    174 
    175   ; restore float registers
    176   lfd    f0, 160(r3)
    177   lfd    f1, 168(r3)
    178   lfd    f2, 176(r3)
    179   lfd    f3, 184(r3)
    180   lfd    f4, 192(r3)
    181   lfd    f5, 200(r3)
    182   lfd    f6, 208(r3)
    183   lfd    f7, 216(r3)
    184   lfd    f8, 224(r3)
    185   lfd    f9, 232(r3)
    186   lfd    f10,240(r3)
    187   lfd    f11,248(r3)
    188   lfd    f12,256(r3)
    189   lfd    f13,264(r3)
    190   lfd    f14,272(r3)
    191   lfd    f15,280(r3)
    192   lfd    f16,288(r3)
    193   lfd    f17,296(r3)
    194   lfd    f18,304(r3)
    195   lfd    f19,312(r3)
    196   lfd    f20,320(r3)
    197   lfd    f21,328(r3)
    198   lfd    f22,336(r3)
    199   lfd    f23,344(r3)
    200   lfd    f24,352(r3)
    201   lfd    f25,360(r3)
    202   lfd    f26,368(r3)
    203   lfd    f27,376(r3)
    204   lfd    f28,384(r3)
    205   lfd    f29,392(r3)
    206   lfd    f30,400(r3)
    207   lfd    f31,408(r3)
    208 
    209   ; restore vector registers if any are in use
    210   lwz    r5,156(r3)  ; test VRsave
    211   cmpwi  r5,0
    212   beq    Lnovec
    213 
    214   subi  r4,r1,16
    215   rlwinm  r4,r4,0,0,27  ; mask low 4-bits
    216   ; r4 is now a 16-byte aligned pointer into the red zone
    217   ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
    218 
    219 
    220 #define LOAD_VECTOR_UNALIGNEDl(_index) \
    221   andis.  r0,r5,(1<<(15-_index))  @\
    222   beq    Ldone  ## _index     @\
    223   lwz    r0, 424+_index*16(r3)  @\
    224   stw    r0, 0(r4)        @\
    225   lwz    r0, 424+_index*16+4(r3)  @\
    226   stw    r0, 4(r4)        @\
    227   lwz    r0, 424+_index*16+8(r3)  @\
    228   stw    r0, 8(r4)        @\
    229   lwz    r0, 424+_index*16+12(r3)@\
    230   stw    r0, 12(r4)        @\
    231   lvx    v ## _index,0,r4    @\
    232 Ldone  ## _index:
    233 
    234 #define LOAD_VECTOR_UNALIGNEDh(_index) \
    235   andi.  r0,r5,(1<<(31-_index))  @\
    236   beq    Ldone  ## _index    @\
    237   lwz    r0, 424+_index*16(r3)  @\
    238   stw    r0, 0(r4)        @\
    239   lwz    r0, 424+_index*16+4(r3)  @\
    240   stw    r0, 4(r4)        @\
    241   lwz    r0, 424+_index*16+8(r3)  @\
    242   stw    r0, 8(r4)        @\
    243   lwz    r0, 424+_index*16+12(r3)@\
    244   stw    r0, 12(r4)        @\
    245   lvx    v ## _index,0,r4    @\
    246   Ldone  ## _index:
    247 
    248 
    249   LOAD_VECTOR_UNALIGNEDl(0)
    250   LOAD_VECTOR_UNALIGNEDl(1)
    251   LOAD_VECTOR_UNALIGNEDl(2)
    252   LOAD_VECTOR_UNALIGNEDl(3)
    253   LOAD_VECTOR_UNALIGNEDl(4)
    254   LOAD_VECTOR_UNALIGNEDl(5)
    255   LOAD_VECTOR_UNALIGNEDl(6)
    256   LOAD_VECTOR_UNALIGNEDl(7)
    257   LOAD_VECTOR_UNALIGNEDl(8)
    258   LOAD_VECTOR_UNALIGNEDl(9)
    259   LOAD_VECTOR_UNALIGNEDl(10)
    260   LOAD_VECTOR_UNALIGNEDl(11)
    261   LOAD_VECTOR_UNALIGNEDl(12)
    262   LOAD_VECTOR_UNALIGNEDl(13)
    263   LOAD_VECTOR_UNALIGNEDl(14)
    264   LOAD_VECTOR_UNALIGNEDl(15)
    265   LOAD_VECTOR_UNALIGNEDh(16)
    266   LOAD_VECTOR_UNALIGNEDh(17)
    267   LOAD_VECTOR_UNALIGNEDh(18)
    268   LOAD_VECTOR_UNALIGNEDh(19)
    269   LOAD_VECTOR_UNALIGNEDh(20)
    270   LOAD_VECTOR_UNALIGNEDh(21)
    271   LOAD_VECTOR_UNALIGNEDh(22)
    272   LOAD_VECTOR_UNALIGNEDh(23)
    273   LOAD_VECTOR_UNALIGNEDh(24)
    274   LOAD_VECTOR_UNALIGNEDh(25)
    275   LOAD_VECTOR_UNALIGNEDh(26)
    276   LOAD_VECTOR_UNALIGNEDh(27)
    277   LOAD_VECTOR_UNALIGNEDh(28)
    278   LOAD_VECTOR_UNALIGNEDh(29)
    279   LOAD_VECTOR_UNALIGNEDh(30)
    280   LOAD_VECTOR_UNALIGNEDh(31)
    281 
    282 Lnovec:
    283   lwz    r0, 136(r3) ; __cr
    284   mtocrf  255,r0
    285   lwz    r0, 148(r3) ; __ctr
    286   mtctr  r0
    287   lwz    r0, 0(r3)  ; __ssr0
    288   mtctr  r0
    289   lwz    r0, 8(r3)  ; do r0 now
    290   lwz    r5,28(r3)  ; do r5 now
    291   lwz    r4,24(r3)  ; do r4 now
    292   lwz    r1,12(r3)  ; do sp now
    293   lwz    r3,20(r3)  ; do r3 last
    294   bctr
    295 
    296 #elif defined(__arm64__) || defined(__aarch64__)
    297 
    298 //
    299 // void libunwind::Registers_arm64::jumpto()
    300 //
    301 // On entry:
    302 //  thread_state pointer is in x0
    303 //
    304   .p2align 2
    305 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
    306   // skip restore of x0,x1 for now
    307   ldp    x2, x3,  [x0, #0x010]
    308   ldp    x4, x5,  [x0, #0x020]
    309   ldp    x6, x7,  [x0, #0x030]
    310   ldp    x8, x9,  [x0, #0x040]
    311   ldp    x10,x11, [x0, #0x050]
    312   ldp    x12,x13, [x0, #0x060]
    313   ldp    x14,x15, [x0, #0x070]
    314   ldp    x16,x17, [x0, #0x080]
    315   ldp    x18,x19, [x0, #0x090]
    316   ldp    x20,x21, [x0, #0x0A0]
    317   ldp    x22,x23, [x0, #0x0B0]
    318   ldp    x24,x25, [x0, #0x0C0]
    319   ldp    x26,x27, [x0, #0x0D0]
    320   ldp    x28,x29, [x0, #0x0E0]
    321   ldr    x30,     [x0, #0x100]  // restore pc into lr
    322   ldr    x1,      [x0, #0x0F8]
    323   mov    sp,x1                  // restore sp
    324 
    325   ldp    d0, d1,  [x0, #0x110]
    326   ldp    d2, d3,  [x0, #0x120]
    327   ldp    d4, d5,  [x0, #0x130]
    328   ldp    d6, d7,  [x0, #0x140]
    329   ldp    d8, d9,  [x0, #0x150]
    330   ldp    d10,d11, [x0, #0x160]
    331   ldp    d12,d13, [x0, #0x170]
    332   ldp    d14,d15, [x0, #0x180]
    333   ldp    d16,d17, [x0, #0x190]
    334   ldp    d18,d19, [x0, #0x1A0]
    335   ldp    d20,d21, [x0, #0x1B0]
    336   ldp    d22,d23, [x0, #0x1C0]
    337   ldp    d24,d25, [x0, #0x1D0]
    338   ldp    d26,d27, [x0, #0x1E0]
    339   ldp    d28,d29, [x0, #0x1F0]
    340   ldr    d30,     [x0, #0x200]
    341   ldr    d31,     [x0, #0x208]
    342 
    343   ldp    x0, x1,  [x0, #0x000]  // restore x0,x1
    344   ret    x30                    // jump to pc
    345 
    346 #elif defined(__arm__) && !defined(__APPLE__)
    347 
    348 #if !defined(__ARM_ARCH_ISA_ARM)
    349   .thumb
    350 #endif
    351 
    352 @
    353 @ void libunwind::Registers_arm::restoreCoreAndJumpTo()
    354 @
    355 @ On entry:
    356 @  thread_state pointer is in r0
    357 @
    358   .p2align 2
    359 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
    360 #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
    361   @ r8-r11: ldm into r1-r4, then mov to r8-r11
    362   adds r0, #0x20
    363   ldm r0!, {r1-r4}
    364   subs r0, #0x30
    365   mov r8, r1
    366   mov r9, r2
    367   mov r10, r3
    368   mov r11, r4
    369   @ r12 does not need loading, it it the intra-procedure-call scratch register
    370   ldr r2, [r0, #0x34]
    371   ldr r3, [r0, #0x3c]
    372   mov sp, r2
    373   mov lr, r3         @ restore pc into lr
    374   ldm r0, {r0-r7}
    375 #else
    376   @ Use lr as base so that r0 can be restored.
    377   mov lr, r0
    378   @ 32bit thumb-2 restrictions for ldm:
    379   @ . the sp (r13) cannot be in the list
    380   @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
    381   ldm lr, {r0-r12}
    382   ldr sp, [lr, #52]
    383   ldr lr, [lr, #60]  @ restore pc into lr
    384 #endif
    385   JMP(lr)
    386 
    387 @
    388 @ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
    389 @
    390 @ On entry:
    391 @  values pointer is in r0
    392 @
    393   .p2align 2
    394 #if defined(__ELF__)
    395   .fpu vfpv3-d16
    396 #endif
    397 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPy)
    398   @ VFP and iwMMX instructions are only available when compiling with the flags
    399   @ that enable them. We do not want to do that in the library (because we do not
    400   @ want the compiler to generate instructions that access those) but this is
    401   @ only accessed if the personality routine needs these registers. Use of
    402   @ these registers implies they are, actually, available on the target, so
    403   @ it's ok to execute.
    404   @ So, generate the instruction using the corresponding coprocessor mnemonic.
    405   vldmia r0, {d0-d15}
    406   JMP(lr)
    407 
    408 @
    409 @ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
    410 @
    411 @ On entry:
    412 @  values pointer is in r0
    413 @
    414   .p2align 2
    415 #if defined(__ELF__)
    416   .fpu vfpv3-d16
    417 #endif
    418 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPy)
    419   vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
    420   JMP(lr)
    421 
    422 @
    423 @ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
    424 @
    425 @ On entry:
    426 @  values pointer is in r0
    427 @
    428   .p2align 2
    429 #if defined(__ELF__)
    430   .fpu vfpv3
    431 #endif
    432 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPy)
    433   vldmia r0, {d16-d31}
    434   JMP(lr)
    435 
    436 #if defined(__ARM_WMMX)
    437 
    438 @
    439 @ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
    440 @
    441 @ On entry:
    442 @  values pointer is in r0
    443 @
    444   .p2align 2
    445 #if defined(__ELF__)
    446   .arch armv5te
    447 #endif
    448 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPy)
    449   ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
    450   ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
    451   ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
    452   ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
    453   ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
    454   ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
    455   ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
    456   ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
    457   ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
    458   ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
    459   ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
    460   ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
    461   ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
    462   ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
    463   ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
    464   ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
    465   JMP(lr)
    466 
    467 @
    468 @ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
    469 @
    470 @ On entry:
    471 @  values pointer is in r0
    472 @
    473   .p2align 2
    474 #if defined(__ELF__)
    475   .arch armv5te
    476 #endif
    477 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
    478   ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
    479   ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
    480   ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
    481   ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
    482   JMP(lr)
    483 
    484 #endif
    485 
    486 #elif defined(__or1k__)
    487 
    488 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
    489 #
    490 # void libunwind::Registers_or1k::jumpto()
    491 #
    492 # On entry:
    493 #  thread_state pointer is in r3
    494 #
    495 
    496   # restore integral registerrs
    497   l.lwz     r0,  0(r3)
    498   l.lwz     r1,  4(r3)
    499   l.lwz     r2,  8(r3)
    500   # skip r3 for now
    501   l.lwz     r4, 16(r3)
    502   l.lwz     r5, 20(r3)
    503   l.lwz     r6, 24(r3)
    504   l.lwz     r7, 28(r3)
    505   l.lwz     r8, 32(r3)
    506   l.lwz     r9, 36(r3)
    507   l.lwz    r10, 40(r3)
    508   l.lwz    r11, 44(r3)
    509   l.lwz    r12, 48(r3)
    510   l.lwz    r13, 52(r3)
    511   l.lwz    r14, 56(r3)
    512   l.lwz    r15, 60(r3)
    513   l.lwz    r16, 64(r3)
    514   l.lwz    r17, 68(r3)
    515   l.lwz    r18, 72(r3)
    516   l.lwz    r19, 76(r3)
    517   l.lwz    r20, 80(r3)
    518   l.lwz    r21, 84(r3)
    519   l.lwz    r22, 88(r3)
    520   l.lwz    r23, 92(r3)
    521   l.lwz    r24, 96(r3)
    522   l.lwz    r25,100(r3)
    523   l.lwz    r26,104(r3)
    524   l.lwz    r27,108(r3)
    525   l.lwz    r28,112(r3)
    526   l.lwz    r29,116(r3)
    527   l.lwz    r30,120(r3)
    528   l.lwz    r31,124(r3)
    529 
    530   # at last, restore r3
    531   l.lwz    r3,  12(r3)
    532 
    533   # jump to pc
    534   l.jr     r9
    535    l.nop
    536 
    537 #elif defined(__mips__) && defined(_ABIO32) && defined(__mips_soft_float)
    538 
    539 //
    540 // void libunwind::Registers_mips_o32::jumpto()
    541 //
    542 // On entry:
    543 //  thread state pointer is in a0 ($4)
    544 //
    545 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
    546   .set push
    547   .set noat
    548   .set noreorder
    549   .set nomacro
    550   // restore hi and lo
    551   lw    $8, (4 * 33)($4)
    552   mthi  $8
    553   lw    $8, (4 * 34)($4)
    554   mtlo  $8
    555   // r0 is zero
    556   lw    $1, (4 * 1)($4)
    557   lw    $2, (4 * 2)($4)
    558   lw    $3, (4 * 3)($4)
    559   // skip a0 for now
    560   lw    $5, (4 * 5)($4)
    561   lw    $6, (4 * 6)($4)
    562   lw    $7, (4 * 7)($4)
    563   lw    $8, (4 * 8)($4)
    564   lw    $9, (4 * 9)($4)
    565   lw    $10, (4 * 10)($4)
    566   lw    $11, (4 * 11)($4)
    567   lw    $12, (4 * 12)($4)
    568   lw    $13, (4 * 13)($4)
    569   lw    $14, (4 * 14)($4)
    570   lw    $15, (4 * 15)($4)
    571   lw    $16, (4 * 16)($4)
    572   lw    $17, (4 * 17)($4)
    573   lw    $18, (4 * 18)($4)
    574   lw    $19, (4 * 19)($4)
    575   lw    $20, (4 * 20)($4)
    576   lw    $21, (4 * 21)($4)
    577   lw    $22, (4 * 22)($4)
    578   lw    $23, (4 * 23)($4)
    579   lw    $24, (4 * 24)($4)
    580   lw    $25, (4 * 25)($4)
    581   lw    $26, (4 * 26)($4)
    582   lw    $27, (4 * 27)($4)
    583   lw    $28, (4 * 28)($4)
    584   lw    $29, (4 * 29)($4)
    585   lw    $30, (4 * 30)($4)
    586   // load new pc into ra
    587   lw    $31, (4 * 32)($4)
    588   // jump to ra, load a0 in the delay slot
    589   jr    $31
    590   lw    $4, (4 * 4)($4)
    591   .set pop
    592 
    593 #elif defined(__mips__) && defined(_ABI64) && defined(__mips_soft_float)
    594 
    595 //
    596 // void libunwind::Registers_mips_n64::jumpto()
    597 //
    598 // On entry:
    599 //  thread state pointer is in a0 ($4)
    600 //
    601 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind18Registers_mips_n646jumptoEv)
    602   .set push
    603   .set noat
    604   .set noreorder
    605   .set nomacro
    606   // restore hi and lo
    607   ld    $8, (8 * 33)($4)
    608   mthi  $8
    609   ld    $8, (8 * 34)($4)
    610   mtlo  $8
    611   // r0 is zero
    612   ld    $1, (8 * 1)($4)
    613   ld    $2, (8 * 2)($4)
    614   ld    $3, (8 * 3)($4)
    615   // skip a0 for now
    616   ld    $5, (8 * 5)($4)
    617   ld    $6, (8 * 6)($4)
    618   ld    $7, (8 * 7)($4)
    619   ld    $8, (8 * 8)($4)
    620   ld    $9, (8 * 9)($4)
    621   ld    $10, (8 * 10)($4)
    622   ld    $11, (8 * 11)($4)
    623   ld    $12, (8 * 12)($4)
    624   ld    $13, (8 * 13)($4)
    625   ld    $14, (8 * 14)($4)
    626   ld    $15, (8 * 15)($4)
    627   ld    $16, (8 * 16)($4)
    628   ld    $17, (8 * 17)($4)
    629   ld    $18, (8 * 18)($4)
    630   ld    $19, (8 * 19)($4)
    631   ld    $20, (8 * 20)($4)
    632   ld    $21, (8 * 21)($4)
    633   ld    $22, (8 * 22)($4)
    634   ld    $23, (8 * 23)($4)
    635   ld    $24, (8 * 24)($4)
    636   ld    $25, (8 * 25)($4)
    637   ld    $26, (8 * 26)($4)
    638   ld    $27, (8 * 27)($4)
    639   ld    $28, (8 * 28)($4)
    640   ld    $29, (8 * 29)($4)
    641   ld    $30, (8 * 30)($4)
    642   // load new pc into ra
    643   ld    $31, (8 * 32)($4)
    644   // jump to ra, load a0 in the delay slot
    645   jr    $31
    646   ld    $4, (8 * 4)($4)
    647   .set pop
    648 
    649 #endif
    650 
    651 #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
    652 
    653 NO_EXEC_STACK_DIRECTIVE
    654 
    655