Home | History | Annotate | Download | only in aarch64
      1 /*
      2  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are met:
      6  *
      7  * Redistributions of source code must retain the above copyright notice, this
      8  * list of conditions and the following disclaimer.
      9  *
     10  * Redistributions in binary form must reproduce the above copyright notice,
     11  * this list of conditions and the following disclaimer in the documentation
     12  * and/or other materials provided with the distribution.
     13  *
     14  * Neither the name of ARM nor the names of its contributors may be used
     15  * to endorse or promote products derived from this software without specific
     16  * prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
     22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include <arch.h>
     32 #include <asm_macros.S>
     33 #include <context.h>
     34 #include <interrupt_mgmt.h>
     35 #include <platform_def.h>
     36 #include <runtime_svc.h>
     37 
     38 	.globl	runtime_exceptions
     39 	.globl	el3_exit
     40 
     41 	/* -----------------------------------------------------
     42 	 * Handle SMC exceptions separately from other sync.
     43 	 * exceptions.
     44 	 * -----------------------------------------------------
     45 	 */
     46 	.macro	handle_sync_exception
     47 	/* Enable the SError interrupt */
     48 	msr	daifclr, #DAIF_ABT_BIT
     49 
     50 	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
     51 	mrs	x30, esr_el3
     52 	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
     53 
     54 	cmp	x30, #EC_AARCH32_SMC
     55 	b.eq	smc_handler32
     56 
     57 	cmp	x30, #EC_AARCH64_SMC
     58 	b.eq	smc_handler64
     59 
     60 	/* -----------------------------------------------------
     61 	 * The following code handles any synchronous exception
     62 	 * that is not an SMC.
     63 	 * -----------------------------------------------------
     64 	 */
     65 
     66 	bl	report_unhandled_exception
     67 	.endm
     68 
     69 
     70 	/* -----------------------------------------------------
     71 	 * This macro handles FIQ or IRQ interrupts i.e. EL3,
     72 	 * S-EL1 and NS interrupts.
     73 	 * -----------------------------------------------------
     74 	 */
     75 	.macro	handle_interrupt_exception label
     76 	/* Enable the SError interrupt */
     77 	msr	daifclr, #DAIF_ABT_BIT
     78 
     79 	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
     80 	bl	save_gp_registers
     81 
     82 	/* Switch to the runtime stack i.e. SP_EL0 */
     83 	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
     84 	mov	x20, sp
     85 	msr	spsel, #0
     86 	mov	sp, x2
     87 
     88 	/*
     89 	 * Find out whether this is a valid interrupt type. If the
     90 	 * interrupt controller reports a spurious interrupt then
     91 	 * return to where we came from.
     92 	 */
     93 	bl	plat_ic_get_pending_interrupt_type
     94 	cmp	x0, #INTR_TYPE_INVAL
     95 	b.eq	interrupt_exit_\label
     96 
     97 	/*
     98 	 * Get the registered handler for this interrupt type. A
     99 	 * NULL return value implies that an interrupt was generated
    100 	 * for which there is no handler registered or the interrupt
    101 	 * was routed incorrectly. This is a problem of the framework
    102 	 * so report it as an error.
    103 	 */
    104 	bl	get_interrupt_type_handler
    105 	cbz	x0, interrupt_error_\label
    106 	mov	x21, x0
    107 
    108 	mov	x0, #INTR_ID_UNAVAILABLE
    109 #if IMF_READ_INTERRUPT_ID
    110 	/*
    111 	 * Read the id of the highest priority pending interrupt. If
    112 	 * no interrupt is asserted then return to where we came from.
    113 	 */
    114 	mov	x19,  #INTR_ID_UNAVAILABLE
    115 	bl	plat_ic_get_pending_interrupt_id
    116 	cmp	x19, x0
    117 	b.eq	interrupt_exit_\label
    118 #endif
    119 
    120 	/*
    121 	 * Save the EL3 system registers needed to return from
    122 	 * this exception.
    123 	 */
    124 	mrs	x3, spsr_el3
    125 	mrs	x4, elr_el3
    126 	stp	x3, x4, [x20, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
    127 
    128 	/* Set the current security state in the 'flags' parameter */
    129 	mrs	x2, scr_el3
    130 	ubfx	x1, x2, #0, #1
    131 
    132 	/* Restore the reference to the 'handle' i.e. SP_EL3 */
    133 	mov	x2, x20
    134 
    135 	/*  x3 will point to a cookie (not used now) */
    136 	mov	x3, xzr
    137 
    138 	/* Call the interrupt type handler */
    139 	blr	x21
    140 
    141 interrupt_exit_\label:
    142 	/* Return from exception, possibly in a different security state */
    143 	b	el3_exit
    144 
    145 	/*
    146 	 * This label signifies a problem with the interrupt management
    147 	 * framework where it is not safe to go back to the instruction
    148 	 * where the interrupt was generated.
    149 	 */
    150 interrupt_error_\label:
    151 	bl	report_unhandled_interrupt
    152 	.endm
    153 
    154 
    155 	.macro save_x18_to_x29_sp_el0
    156 	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
    157 	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
    158 	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
    159 	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
    160 	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
    161 	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
    162 	mrs	x18, sp_el0
    163 	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
    164 	.endm
    165 
    166 	.section	.vectors, "ax"; .align 11
    167 	.align	7
    168 runtime_exceptions:
    169 	/* -----------------------------------------------------
    170 	 * Current EL with _sp_el0 : 0x0 - 0x200
    171 	 * -----------------------------------------------------
    172 	 */
    173 sync_exception_sp_el0:
    174 	/* -----------------------------------------------------
    175 	 * We don't expect any synchronous exceptions from EL3
    176 	 * -----------------------------------------------------
    177 	 */
    178 	bl	report_unhandled_exception
    179 	check_vector_size sync_exception_sp_el0
    180 
    181 	.align	7
    182 	/* -----------------------------------------------------
    183 	 * EL3 code is non-reentrant. Any asynchronous exception
    184 	 * is a serious error. Loop infinitely.
    185 	 * -----------------------------------------------------
    186 	 */
    187 irq_sp_el0:
    188 	bl	report_unhandled_interrupt
    189 	check_vector_size irq_sp_el0
    190 
    191 	.align	7
    192 fiq_sp_el0:
    193 	bl	report_unhandled_interrupt
    194 	check_vector_size fiq_sp_el0
    195 
    196 	.align	7
    197 serror_sp_el0:
    198 	bl	report_unhandled_exception
    199 	check_vector_size serror_sp_el0
    200 
    201 	/* -----------------------------------------------------
    202 	 * Current EL with SPx: 0x200 - 0x400
    203 	 * -----------------------------------------------------
    204 	 */
    205 	.align	7
    206 sync_exception_sp_elx:
    207 	/* -----------------------------------------------------
    208 	 * This exception will trigger if anything went wrong
    209 	 * during a previous exception entry or exit or while
    210 	 * handling an earlier unexpected synchronous exception.
    211 	 * There is a high probability that SP_EL3 is corrupted.
    212 	 * -----------------------------------------------------
    213 	 */
    214 	bl	report_unhandled_exception
    215 	check_vector_size sync_exception_sp_elx
    216 
    217 	.align	7
    218 irq_sp_elx:
    219 	bl	report_unhandled_interrupt
    220 	check_vector_size irq_sp_elx
    221 
    222 	.align	7
    223 fiq_sp_elx:
    224 	bl	report_unhandled_interrupt
    225 	check_vector_size fiq_sp_elx
    226 
    227 	.align	7
    228 serror_sp_elx:
    229 	bl	report_unhandled_exception
    230 	check_vector_size serror_sp_elx
    231 
    232 	/* -----------------------------------------------------
    233 	 * Lower EL using AArch64 : 0x400 - 0x600
    234 	 * -----------------------------------------------------
    235 	 */
    236 	.align	7
    237 sync_exception_aarch64:
    238 	/* -----------------------------------------------------
    239 	 * This exception vector will be the entry point for
    240 	 * SMCs and traps that are unhandled at lower ELs most
    241 	 * commonly. SP_EL3 should point to a valid cpu context
    242 	 * where the general purpose and system register state
    243 	 * can be saved.
    244 	 * -----------------------------------------------------
    245 	 */
    246 	handle_sync_exception
    247 	check_vector_size sync_exception_aarch64
    248 
    249 	.align	7
    250 	/* -----------------------------------------------------
    251 	 * Asynchronous exceptions from lower ELs are not
    252 	 * currently supported. Report their occurrence.
    253 	 * -----------------------------------------------------
    254 	 */
    255 irq_aarch64:
    256 	handle_interrupt_exception irq_aarch64
    257 	check_vector_size irq_aarch64
    258 
    259 	.align	7
    260 fiq_aarch64:
    261 	handle_interrupt_exception fiq_aarch64
    262 	check_vector_size fiq_aarch64
    263 
    264 	.align	7
    265 serror_aarch64:
    266 	bl	report_unhandled_exception
    267 	check_vector_size serror_aarch64
    268 
    269 	/* -----------------------------------------------------
    270 	 * Lower EL using AArch32 : 0x600 - 0x800
    271 	 * -----------------------------------------------------
    272 	 */
    273 	.align	7
    274 sync_exception_aarch32:
    275 	/* -----------------------------------------------------
    276 	 * This exception vector will be the entry point for
    277 	 * SMCs and traps that are unhandled at lower ELs most
    278 	 * commonly. SP_EL3 should point to a valid cpu context
    279 	 * where the general purpose and system register state
    280 	 * can be saved.
    281 	 * -----------------------------------------------------
    282 	 */
    283 	handle_sync_exception
    284 	check_vector_size sync_exception_aarch32
    285 
    286 	.align	7
    287 	/* -----------------------------------------------------
    288 	 * Asynchronous exceptions from lower ELs are not
    289 	 * currently supported. Report their occurrence.
    290 	 * -----------------------------------------------------
    291 	 */
    292 irq_aarch32:
    293 	handle_interrupt_exception irq_aarch32
    294 	check_vector_size irq_aarch32
    295 
    296 	.align	7
    297 fiq_aarch32:
    298 	handle_interrupt_exception fiq_aarch32
    299 	check_vector_size fiq_aarch32
    300 
    301 	.align	7
    302 serror_aarch32:
    303 	bl	report_unhandled_exception
    304 	check_vector_size serror_aarch32
    305 
    306 	.align	7
    307 
    308 	/* -----------------------------------------------------
    309 	 * The following code handles secure monitor calls.
    310 	 * Depending upon the execution state from where the SMC
    311 	 * has been invoked, it frees some general purpose
    312 	 * registers to perform the remaining tasks. They
    313 	 * involve finding the runtime service handler that is
    314 	 * the target of the SMC & switching to runtime stacks
    315 	 * (SP_EL0) before calling the handler.
    316 	 *
    317 	 * Note that x30 has been explicitly saved and can be
    318 	 * used here
    319 	 * -----------------------------------------------------
    320 	 */
    321 func smc_handler
    322 smc_handler32:
    323 	/* Check whether aarch32 issued an SMC64 */
    324 	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
    325 
    326 	/* -----------------------------------------------------
    327 	 * Since we're are coming from aarch32, x8-x18 need to
    328 	 * be saved as per SMC32 calling convention. If a lower
    329 	 * EL in aarch64 is making an SMC32 call then it must
    330 	 * have saved x8-x17 already therein.
    331 	 * -----------------------------------------------------
    332 	 */
    333 	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
    334 	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
    335 	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
    336 	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
    337 	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
    338 
    339 	/* x4-x7, x18, sp_el0 are saved below */
    340 
    341 smc_handler64:
    342 	/* -----------------------------------------------------
    343 	 * Populate the parameters for the SMC handler. We
    344 	 * already have x0-x4 in place. x5 will point to a
    345 	 * cookie (not used now). x6 will point to the context
    346 	 * structure (SP_EL3) and x7 will contain flags we need
    347 	 * to pass to the handler Hence save x5-x7. Note that x4
    348 	 * only needs to be preserved for AArch32 callers but we
    349 	 * do it for AArch64 callers as well for convenience
    350 	 * -----------------------------------------------------
    351 	 */
    352 	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
    353 	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
    354 
    355 	/* Save rest of the gpregs and sp_el0*/
    356 	save_x18_to_x29_sp_el0
    357 
    358 	mov	x5, xzr
    359 	mov	x6, sp
    360 
    361 	/* Get the unique owning entity number */
    362 	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
    363 	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
    364 	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH
    365 
    366 	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
    367 
    368 	/* Load descriptor index from array of indices */
    369 	adr	x14, rt_svc_descs_indices
    370 	ldrb	w15, [x14, x16]
    371 
    372 	/* -----------------------------------------------------
    373 	 * Restore the saved C runtime stack value which will
    374 	 * become the new SP_EL0 i.e. EL3 runtime stack. It was
    375 	 * saved in the 'cpu_context' structure prior to the last
    376 	 * ERET from EL3.
    377 	 * -----------------------------------------------------
    378 	 */
    379 	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
    380 
    381 	/*
    382 	 * Any index greater than 127 is invalid. Check bit 7 for
    383 	 * a valid index
    384 	 */
    385 	tbnz	w15, 7, smc_unknown
    386 
    387 	/* Switch to SP_EL0 */
    388 	msr	spsel, #0
    389 
    390 	/* -----------------------------------------------------
    391 	 * Get the descriptor using the index
    392 	 * x11 = (base + off), x15 = index
    393 	 *
    394 	 * handler = (base + off) + (index << log2(size))
    395 	 * -----------------------------------------------------
    396 	 */
    397 	lsl	w10, w15, #RT_SVC_SIZE_LOG2
    398 	ldr	x15, [x11, w10, uxtw]
    399 
    400 	/* -----------------------------------------------------
    401 	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
    402 	 * is a world switch during SMC handling.
    403 	 * TODO: Revisit if all system registers can be saved
    404 	 * later.
    405 	 * -----------------------------------------------------
    406 	 */
    407 	mrs	x16, spsr_el3
    408 	mrs	x17, elr_el3
    409 	mrs	x18, scr_el3
    410 	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
    411 	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
    412 
    413 	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
    414 	bfi	x7, x18, #0, #1
    415 
    416 	mov	sp, x12
    417 
    418 	/* -----------------------------------------------------
    419 	 * Call the Secure Monitor Call handler and then drop
    420 	 * directly into el3_exit() which will program any
    421 	 * remaining architectural state prior to issuing the
    422 	 * ERET to the desired lower EL.
    423 	 * -----------------------------------------------------
    424 	 */
    425 #if DEBUG
    426 	cbz	x15, rt_svc_fw_critical_error
    427 #endif
    428 	blr	x15
    429 
    430 	/* -----------------------------------------------------
    431 	 * This routine assumes that the SP_EL3 is pointing to
    432 	 * a valid context structure from where the gp regs and
    433 	 * other special registers can be retrieved.
    434 	 *
    435 	 * Keep it in the same section as smc_handler as this
    436 	 * function uses a fall-through to el3_exit
    437 	 * -----------------------------------------------------
    438 	 */
    439 el3_exit: ; .type el3_exit, %function
    440 	/* -----------------------------------------------------
    441 	 * Save the current SP_EL0 i.e. the EL3 runtime stack
    442 	 * which will be used for handling the next SMC. Then
    443 	 * switch to SP_EL3
    444 	 * -----------------------------------------------------
    445 	 */
    446 	mov	x17, sp
    447 	msr	spsel, #1
    448 	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
    449 
    450 	/* -----------------------------------------------------
    451 	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
    452 	 * -----------------------------------------------------
    453 	 */
    454 	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
    455 	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
    456 	msr	scr_el3, x18
    457 	msr	spsr_el3, x16
    458 	msr	elr_el3, x17
    459 
    460 	/* Restore saved general purpose registers and return */
    461 	b	restore_gp_registers_eret
    462 
    463 smc_unknown:
    464 	/*
    465 	 * Here we restore x4-x18 regardless of where we came from. AArch32
    466 	 * callers will find the registers contents unchanged, but AArch64
    467 	 * callers will find the registers modified (with stale earlier NS
    468 	 * content). Either way, we aren't leaking any secure information
    469 	 * through them
    470 	 */
    471 	mov	w0, #SMC_UNK
    472 	b	restore_gp_registers_callee_eret
    473 
    474 smc_prohibited:
    475 	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
    476 	mov	w0, #SMC_UNK
    477 	eret
    478 
    479 rt_svc_fw_critical_error:
    480 	msr	spsel, #1 /* Switch to SP_ELx */
    481 	bl	report_unhandled_exception
    482 
    483 	/* -----------------------------------------------------
    484 	 * The following functions are used to saved and restore
    485 	 * all the general pupose registers. Ideally we would
    486 	 * only save and restore the callee saved registers when
    487 	 * a world switch occurs but that type of implementation
    488 	 * is more complex. So currently we will always save and
    489 	 * restore these registers on entry and exit of EL3.
    490 	 * These are not macros to ensure their invocation fits
    491 	 * within the 32 instructions per exception vector.
    492 	 * -----------------------------------------------------
    493 	 */
    494 func save_gp_registers
    495 	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
    496 	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
    497 	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
    498 	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
    499 	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
    500 	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
    501 	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
    502 	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
    503 	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
    504 	save_x18_to_x29_sp_el0
    505 	ret
    506 
    507 func restore_gp_registers_eret
    508 	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
    509 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
    510 
    511 restore_gp_registers_callee_eret:
    512 	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
    513 	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
    514 	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
    515 	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
    516 	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
    517 	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
    518 	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
    519 	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
    520 	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
    521 	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
    522 	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
    523 	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
    524 	ldp	x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
    525 	msr	sp_el0, x17
    526 	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
    527 	eret
    528