Home | History | Annotate | Download | only in Unwind
      1 //===-------------------- UnwindRegistersRestore.S ------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is dual licensed under the MIT and the University of Illinois Open
      6 // Source Licenses. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "assembly.h"
     11 
     12   .text
     13 
     14 #if __i386__
     15 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
     16 #
     17 # void libunwind::Registers_x86::jumpto()
     18 #
     19 # On entry:
     20 #  +                       +
     21 #  +-----------------------+
     22 #  + thread_state pointer  +
     23 #  +-----------------------+
     24 #  + return address        +
     25 #  +-----------------------+   <-- SP
     26 #  +                       +
     27   movl   4(%esp), %eax
     28   # set up eax and ret on new stack location
     29   movl  28(%eax), %edx # edx holds new stack pointer
     30   subl  $8,%edx
     31   movl  %edx, 28(%eax)
     32   movl  0(%eax), %ebx
     33   movl  %ebx, 0(%edx)
     34   movl  40(%eax), %ebx
     35   movl  %ebx, 4(%edx)
     36   # we now have ret and eax pushed onto where new stack will be
     37   # restore all registers
     38   movl   4(%eax), %ebx
     39   movl   8(%eax), %ecx
     40   movl  12(%eax), %edx
     41   movl  16(%eax), %edi
     42   movl  20(%eax), %esi
     43   movl  24(%eax), %ebp
     44   movl  28(%eax), %esp
     45   # skip ss
     46   # skip eflags
     47   pop    %eax  # eax was already pushed on new stack
     48   ret        # eip was already pushed on new stack
     49   # skip cs
     50   # skip ds
     51   # skip es
     52   # skip fs
     53   # skip gs
     54 
     55 #elif __x86_64__
     56 
     57 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
     58 #
     59 # void libunwind::Registers_x86_64::jumpto()
     60 #
     61 # On entry, thread_state pointer is in rdi
     62 
     63   movq  56(%rdi), %rax # rax holds new stack pointer
     64   subq  $16, %rax
     65   movq  %rax, 56(%rdi)
     66   movq  32(%rdi), %rbx  # store new rdi on new stack
     67   movq  %rbx, 0(%rax)
     68   movq  128(%rdi), %rbx # store new rip on new stack
     69   movq  %rbx, 8(%rax)
     70   # restore all registers
     71   movq    0(%rdi), %rax
     72   movq    8(%rdi), %rbx
     73   movq   16(%rdi), %rcx
     74   movq   24(%rdi), %rdx
     75   # restore rdi later
     76   movq   40(%rdi), %rsi
     77   movq   48(%rdi), %rbp
     78   # restore rsp later
     79   movq   64(%rdi), %r8
     80   movq   72(%rdi), %r9
     81   movq   80(%rdi), %r10
     82   movq   88(%rdi), %r11
     83   movq   96(%rdi), %r12
     84   movq  104(%rdi), %r13
     85   movq  112(%rdi), %r14
     86   movq  120(%rdi), %r15
     87   # skip rflags
     88   # skip cs
     89   # skip fs
     90   # skip gs
     91   movq  56(%rdi), %rsp  # cut back rsp to new location
     92   pop    %rdi      # rdi was saved here earlier
     93   ret            # rip was saved here
     94 
     95 
     96 #elif __ppc__
     97 
     98 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
     99 ;
    100 ; void libunwind::Registers_ppc::jumpto()
    101 ;
    102 ; On entry:
    103 ;  thread_state pointer is in r3
    104 ;
    105 
    106   ; restore integral registerrs
    107   ; skip r0 for now
    108   ; skip r1 for now
    109   lwz     r2, 16(r3)
    110   ; skip r3 for now
    111   ; skip r4 for now
    112   ; skip r5 for now
    113   lwz     r6, 32(r3)
    114   lwz     r7, 36(r3)
    115   lwz     r8, 40(r3)
    116   lwz     r9, 44(r3)
    117   lwz    r10, 48(r3)
    118   lwz    r11, 52(r3)
    119   lwz    r12, 56(r3)
    120   lwz    r13, 60(r3)
    121   lwz    r14, 64(r3)
    122   lwz    r15, 68(r3)
    123   lwz    r16, 72(r3)
    124   lwz    r17, 76(r3)
    125   lwz    r18, 80(r3)
    126   lwz    r19, 84(r3)
    127   lwz    r20, 88(r3)
    128   lwz    r21, 92(r3)
    129   lwz    r22, 96(r3)
    130   lwz    r23,100(r3)
    131   lwz    r24,104(r3)
    132   lwz    r25,108(r3)
    133   lwz    r26,112(r3)
    134   lwz    r27,116(r3)
    135   lwz    r28,120(r3)
    136   lwz    r29,124(r3)
    137   lwz    r30,128(r3)
    138   lwz    r31,132(r3)
    139 
    140   ; restore float registers
    141   lfd    f0, 160(r3)
    142   lfd    f1, 168(r3)
    143   lfd    f2, 176(r3)
    144   lfd    f3, 184(r3)
    145   lfd    f4, 192(r3)
    146   lfd    f5, 200(r3)
    147   lfd    f6, 208(r3)
    148   lfd    f7, 216(r3)
    149   lfd    f8, 224(r3)
    150   lfd    f9, 232(r3)
    151   lfd    f10,240(r3)
    152   lfd    f11,248(r3)
    153   lfd    f12,256(r3)
    154   lfd    f13,264(r3)
    155   lfd    f14,272(r3)
    156   lfd    f15,280(r3)
    157   lfd    f16,288(r3)
    158   lfd    f17,296(r3)
    159   lfd    f18,304(r3)
    160   lfd    f19,312(r3)
    161   lfd    f20,320(r3)
    162   lfd    f21,328(r3)
    163   lfd    f22,336(r3)
    164   lfd    f23,344(r3)
    165   lfd    f24,352(r3)
    166   lfd    f25,360(r3)
    167   lfd    f26,368(r3)
    168   lfd    f27,376(r3)
    169   lfd    f28,384(r3)
    170   lfd    f29,392(r3)
    171   lfd    f30,400(r3)
    172   lfd    f31,408(r3)
    173 
    174   ; restore vector registers if any are in use
    175   lwz    r5,156(r3)  ; test VRsave
    176   cmpwi  r5,0
    177   beq    Lnovec
    178 
    179   subi  r4,r1,16
    180   rlwinm  r4,r4,0,0,27  ; mask low 4-bits
    181   ; r4 is now a 16-byte aligned pointer into the red zone
    182   ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
    183 
    184 
    185 #define LOAD_VECTOR_UNALIGNEDl(_index) \
    186   andis.  r0,r5,(1<<(15-_index))  @\
    187   beq    Ldone  ## _index     @\
    188   lwz    r0, 424+_index*16(r3)  @\
    189   stw    r0, 0(r4)        @\
    190   lwz    r0, 424+_index*16+4(r3)  @\
    191   stw    r0, 4(r4)        @\
    192   lwz    r0, 424+_index*16+8(r3)  @\
    193   stw    r0, 8(r4)        @\
    194   lwz    r0, 424+_index*16+12(r3)@\
    195   stw    r0, 12(r4)        @\
    196   lvx    v ## _index,0,r4    @\
    197 Ldone  ## _index:
    198 
    199 #define LOAD_VECTOR_UNALIGNEDh(_index) \
    200   andi.  r0,r5,(1<<(31-_index))  @\
    201   beq    Ldone  ## _index    @\
    202   lwz    r0, 424+_index*16(r3)  @\
    203   stw    r0, 0(r4)        @\
    204   lwz    r0, 424+_index*16+4(r3)  @\
    205   stw    r0, 4(r4)        @\
    206   lwz    r0, 424+_index*16+8(r3)  @\
    207   stw    r0, 8(r4)        @\
    208   lwz    r0, 424+_index*16+12(r3)@\
    209   stw    r0, 12(r4)        @\
    210   lvx    v ## _index,0,r4    @\
    211   Ldone  ## _index:
    212 
    213 
    214   LOAD_VECTOR_UNALIGNEDl(0)
    215   LOAD_VECTOR_UNALIGNEDl(1)
    216   LOAD_VECTOR_UNALIGNEDl(2)
    217   LOAD_VECTOR_UNALIGNEDl(3)
    218   LOAD_VECTOR_UNALIGNEDl(4)
    219   LOAD_VECTOR_UNALIGNEDl(5)
    220   LOAD_VECTOR_UNALIGNEDl(6)
    221   LOAD_VECTOR_UNALIGNEDl(7)
    222   LOAD_VECTOR_UNALIGNEDl(8)
    223   LOAD_VECTOR_UNALIGNEDl(9)
    224   LOAD_VECTOR_UNALIGNEDl(10)
    225   LOAD_VECTOR_UNALIGNEDl(11)
    226   LOAD_VECTOR_UNALIGNEDl(12)
    227   LOAD_VECTOR_UNALIGNEDl(13)
    228   LOAD_VECTOR_UNALIGNEDl(14)
    229   LOAD_VECTOR_UNALIGNEDl(15)
    230   LOAD_VECTOR_UNALIGNEDh(16)
    231   LOAD_VECTOR_UNALIGNEDh(17)
    232   LOAD_VECTOR_UNALIGNEDh(18)
    233   LOAD_VECTOR_UNALIGNEDh(19)
    234   LOAD_VECTOR_UNALIGNEDh(20)
    235   LOAD_VECTOR_UNALIGNEDh(21)
    236   LOAD_VECTOR_UNALIGNEDh(22)
    237   LOAD_VECTOR_UNALIGNEDh(23)
    238   LOAD_VECTOR_UNALIGNEDh(24)
    239   LOAD_VECTOR_UNALIGNEDh(25)
    240   LOAD_VECTOR_UNALIGNEDh(26)
    241   LOAD_VECTOR_UNALIGNEDh(27)
    242   LOAD_VECTOR_UNALIGNEDh(28)
    243   LOAD_VECTOR_UNALIGNEDh(29)
    244   LOAD_VECTOR_UNALIGNEDh(30)
    245   LOAD_VECTOR_UNALIGNEDh(31)
    246 
    247 Lnovec:
    248   lwz    r0, 136(r3) ; __cr
    249   mtocrf  255,r0
    250   lwz    r0, 148(r3) ; __ctr
    251   mtctr  r0
    252   lwz    r0, 0(r3)  ; __ssr0
    253   mtctr  r0
    254   lwz    r0, 8(r3)  ; do r0 now
    255   lwz    r5,28(r3)  ; do r5 now
    256   lwz    r4,24(r3)  ; do r4 now
    257   lwz    r1,12(r3)  ; do sp now
    258   lwz    r3,20(r3)  ; do r3 last
    259   bctr
    260 
    261 #elif __arm64__
    262 
    263 ;
    264 ; void libunwind::Registers_arm64::jumpto()
    265 ;
    266 ; On entry:
    267 ;  thread_state pointer is in x0
    268 ;
    269   .p2align 2
    270 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
    271   ; skip restore of x0,x1 for now
    272   ldp    x2, x3,  [x0, #0x010]
    273   ldp    x4, x5,  [x0, #0x020]
    274   ldp    x6, x7,  [x0, #0x030]
    275   ldp    x8, x9,  [x0, #0x040]
    276   ldp    x10,x11, [x0, #0x050]
    277   ldp    x12,x13, [x0, #0x060]
    278   ldp    x14,x15, [x0, #0x070]
    279   ldp    x16,x17, [x0, #0x080]
    280   ldp    x18,x19, [x0, #0x090]
    281   ldp    x20,x21, [x0, #0x0A0]
    282   ldp    x22,x23, [x0, #0x0B0]
    283   ldp    x24,x25, [x0, #0x0C0]
    284   ldp    x26,x27, [x0, #0x0D0]
    285   ldp    x28,fp,  [x0, #0x0E0]
    286   ldr    lr,      [x0, #0x100]  ; restore pc into lr
    287   ldr    x1,      [x0, #0x0F8]
    288   mov    sp,x1          ; restore sp
    289 
    290   ldp    d0, d1,  [x0, #0x110]
    291   ldp    d2, d3,  [x0, #0x120]
    292   ldp    d4, d5,  [x0, #0x130]
    293   ldp    d6, d7,  [x0, #0x140]
    294   ldp    d8, d9,  [x0, #0x150]
    295   ldp    d10,d11, [x0, #0x160]
    296   ldp    d12,d13, [x0, #0x170]
    297   ldp    d14,d15, [x0, #0x180]
    298   ldp    d16,d17, [x0, #0x190]
    299   ldp    d18,d19, [x0, #0x1A0]
    300   ldp    d20,d21, [x0, #0x1B0]
    301   ldp    d22,d23, [x0, #0x1C0]
    302   ldp    d24,d25, [x0, #0x1D0]
    303   ldp    d26,d27, [x0, #0x1E0]
    304   ldp    d28,d29, [x0, #0x1F0]
    305   ldr    d30,     [x0, #0x200]
    306   ldr    d31,     [x0, #0x208]
    307 
    308   ldp    x0, x1,  [x0, #0x000]  ; restore x0,x1
    309   ret    lr            ; jump to pc
    310 
    311 #elif __arm__ && !__APPLE__
    312 
    313 @
    314 @ void libunwind::Registers_arm::restoreCoreAndJumpTo()
    315 @
    316 @ On entry:
    317 @  thread_state pointer is in r0
    318 @
    319   .p2align 2
    320 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
    321   @ Use lr as base so that r0 can be restored.
    322   mov lr, r0
    323   @ 32bit thumb-2 restrictions for ldm:
    324   @ . the sp (r13) cannot be in the list
    325   @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
    326   ldm lr, {r0-r12}
    327   ldr sp, [lr, #52]
    328   ldr lr, [lr, #60]  @ restore pc into lr
    329 #if _ARM_ARCH > 4
    330   bx lr
    331 #else
    332   mov pc, lr
    333 #endif
    334 
    335 @
    336 @ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
    337 @
    338 @ On entry:
    339 @  values pointer is in r0
    340 @
    341   .p2align 2
    342 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPy)
    343   @ VFP and iwMMX instructions are only available when compiling with the flags
    344   @ that enable them. We don't want to do that in the library (because we don't
    345   @ want the compiler to generate instructions that access those) but this is
    346   @ only accessed if the personality routine needs these registers. Use of
    347   @ these registers implies they are, actually, available on the target, so
    348   @ it's ok to execute.
    349   @ So, generate the instruction using the corresponding coprocessor mnemonic.
    350   ldc p11, cr0, [r0], {#0x20}  @ fldmiad r0, {d0-d15}
    351   mov pc, lr
    352 
    353 @
    354 @ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
    355 @
    356 @ On entry:
    357 @  values pointer is in r0
    358 @
    359   .p2align 2
    360 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPy)
    361   ldc p11, cr0, [r0], {#0x21}  @ fldmiax r0, {d0-d15}
    362   mov pc, lr
    363 
    364 @
    365 @ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
    366 @
    367 @ On entry:
    368 @  values pointer is in r0
    369 @
    370   .p2align 2
    371 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPy)
    372   ldcl p11, cr0, [r0], {#0x20}  @ vldm r0, {d16-d31}
    373   mov pc, lr
    374 
    375 @
    376 @ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
    377 @
    378 @ On entry:
    379 @  values pointer is in r0
    380 @
    381   .p2align 2
    382 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPy)
    383   ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
    384   ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
    385   ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
    386   ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
    387   ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
    388   ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
    389   ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
    390   ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
    391   ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
    392   ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
    393   ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
    394   ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
    395   ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
    396   ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
    397   ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
    398   ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
    399   mov pc, lr
    400 
    401 @
    402 @ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
    403 @
    404 @ On entry:
    405 @  values pointer is in r0
    406 @
    407   .p2align 2
    408 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
    409   ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
    410   ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
    411   ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
    412   ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
    413   mov pc, lr
    414 
    415 #endif
    416