Home | History | Annotate | Download | only in aarch64
      1 /*
      2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
      3  *
      4  * SPDX-License-Identifier: BSD-3-Clause
      5  */
      6 
      7 #include <arch.h>
      8 #include <asm_macros.S>
      9 #include <assert_macros.S>
     10 
     11 	.globl	get_afflvl_shift
     12 	.globl	mpidr_mask_lower_afflvls
     13 	.globl	eret
     14 	.globl	smc
     15 
     16 	.globl	zero_normalmem
     17 	.globl	zeromem
     18 	.globl	zeromem16
     19 	.globl	memcpy16
     20 
     21 	.globl	disable_mmu_el1
     22 	.globl	disable_mmu_el3
     23 	.globl	disable_mmu_icache_el1
     24 	.globl	disable_mmu_icache_el3
     25 
     26 #if SUPPORT_VFP
     27 	.globl	enable_vfp
     28 #endif
     29 
     30 func get_afflvl_shift
     31 	cmp	x0, #3
     32 	cinc	x0, x0, eq
     33 	mov	x1, #MPIDR_AFFLVL_SHIFT
     34 	lsl	x0, x0, x1
     35 	ret
     36 endfunc get_afflvl_shift
     37 
     38 func mpidr_mask_lower_afflvls
     39 	cmp	x1, #3
     40 	cinc	x1, x1, eq
     41 	mov	x2, #MPIDR_AFFLVL_SHIFT
     42 	lsl	x2, x1, x2
     43 	lsr	x0, x0, x2
     44 	lsl	x0, x0, x2
     45 	ret
     46 endfunc mpidr_mask_lower_afflvls
     47 
     48 
     49 func eret
     50 	eret
     51 endfunc eret
     52 
     53 
     54 func smc
     55 	smc	#0
     56 endfunc smc
     57 
     58 /* -----------------------------------------------------------------------
     59  * void zeromem16(void *mem, unsigned int length);
     60  *
     61  * Initialise a memory region to 0.
     62  * The memory address must be 16-byte aligned.
     63  * NOTE: This function is deprecated and zeromem should be used instead.
     64  * -----------------------------------------------------------------------
     65  */
     66 .equ	zeromem16, zeromem
     67 
     68 /* -----------------------------------------------------------------------
     69  * void zero_normalmem(void *mem, unsigned int length);
     70  *
     71  * Initialise a region in normal memory to 0. This functions complies with the
     72  * AAPCS and can be called from C code.
     73  *
     74  * NOTE: MMU must be enabled when using this function as it can only operate on
     75  *       normal memory. It is intended to be mainly used from C code when MMU
     76  *       is usually enabled.
     77  * -----------------------------------------------------------------------
     78  */
     79 .equ	zero_normalmem, zeromem_dczva
     80 
     81 /* -----------------------------------------------------------------------
     82  * void zeromem(void *mem, unsigned int length);
     83  *
     84  * Initialise a region of device memory to 0. This functions complies with the
     85  * AAPCS and can be called from C code.
     86  *
     87  * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be
     88  *       used instead for faster zeroing.
     89  *
     90  * -----------------------------------------------------------------------
     91  */
     92 func zeromem
     93 	/* x2 is the address past the last zeroed address */
     94 	add	x2, x0, x1
     95 	/*
     96 	 * Uses the fallback path that does not use DC ZVA instruction and
     97 	 * therefore does not need enabled MMU
     98 	 */
     99 	b	.Lzeromem_dczva_fallback_entry
    100 endfunc zeromem
    101 
    102 /* -----------------------------------------------------------------------
    103  * void zeromem_dczva(void *mem, unsigned int length);
    104  *
    105  * Fill a region of normal memory of size "length" in bytes with null bytes.
    106  * MMU must be enabled and the memory be of
    107  * normal type. This is because this function internally uses the DC ZVA
    108  * instruction, which generates an Alignment fault if used on any type of
    109  * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU
    110  * is disabled, all memory behaves like Device-nGnRnE memory (see section
    111  * D4.2.8), hence the requirement on the MMU being enabled.
    112  * NOTE: The code assumes that the block size as defined in DCZID_EL0
    113  *       register is at least 16 bytes.
    114  *
    115  * -----------------------------------------------------------------------
    116  */
    117 func zeromem_dczva
    118 
    119 	/*
    120 	 * The function consists of a series of loops that zero memory one byte
    121 	 * at a time, 16 bytes at a time or using the DC ZVA instruction to
    122 	 * zero aligned block of bytes, which is assumed to be more than 16.
    123 	 * In the case where the DC ZVA instruction cannot be used or if the
    124 	 * first 16 bytes loop would overflow, there is fallback path that does
    125 	 * not use DC ZVA.
    126 	 * Note: The fallback path is also used by the zeromem function that
    127 	 *       branches to it directly.
    128 	 *
    129 	 *              +---------+   zeromem_dczva
    130 	 *              |  entry  |
    131 	 *              +----+----+
    132 	 *                   |
    133 	 *                   v
    134 	 *              +---------+
    135 	 *              | checks  |>o-------+ (If any check fails, fallback)
    136 	 *              +----+----+         |
    137 	 *                   |              |---------------+
    138 	 *                   v              | Fallback path |
    139 	 *            +------+------+       |---------------+
    140 	 *            | 1 byte loop |       |
    141 	 *            +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end
    142 	 *                   |              |
    143 	 *                   v              |
    144 	 *           +-------+-------+      |
    145 	 *           | 16 bytes loop |      |
    146 	 *           +-------+-------+      |
    147 	 *                   |              |
    148 	 *                   v              |
    149 	 *            +------+------+ .Lzeromem_dczva_blocksize_aligned
    150 	 *            | DC ZVA loop |       |
    151 	 *            +------+------+       |
    152 	 *       +--------+  |              |
    153 	 *       |        |  |              |
    154 	 *       |        v  v              |
    155 	 *       |   +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned
    156 	 *       |   | 16 bytes loop |      |
    157 	 *       |   +-------+-------+      |
    158 	 *       |           |              |
    159 	 *       |           v              |
    160 	 *       |    +------+------+ .Lzeromem_dczva_final_1byte_aligned
    161 	 *       |    | 1 byte loop |       |
    162 	 *       |    +-------------+       |
    163 	 *       |           |              |
    164 	 *       |           v              |
    165 	 *       |       +---+--+           |
    166 	 *       |       | exit |           |
    167 	 *       |       +------+           |
    168 	 *       |			    |
    169 	 *       |           +--------------+    +------------------+ zeromem
    170 	 *       |           |  +----------------| zeromem function |
    171 	 *       |           |  |                +------------------+
    172 	 *       |           v  v
    173 	 *       |    +-------------+ .Lzeromem_dczva_fallback_entry
    174 	 *       |    | 1 byte loop |
    175 	 *       |    +------+------+
    176 	 *       |           |
    177 	 *       +-----------+
    178 	 */
    179 
    180 	/*
    181 	 * Readable names for registers
    182 	 *
    183 	 * Registers x0, x1 and x2 are also set by zeromem which
    184 	 * branches into the fallback path directly, so cursor, length and
    185 	 * stop_address should not be retargeted to other registers.
    186 	 */
    187 	cursor       .req x0 /* Start address and then current address */
    188 	length       .req x1 /* Length in bytes of the region to zero out */
    189 	/* Reusing x1 as length is never used after block_mask is set */
    190 	block_mask   .req x1 /* Bitmask of the block size read in DCZID_EL0 */
    191 	stop_address .req x2 /* Address past the last zeroed byte */
    192 	block_size   .req x3 /* Size of a block in bytes as read in DCZID_EL0 */
    193 	tmp1         .req x4
    194 	tmp2         .req x5
    195 
    196 #if ENABLE_ASSERTIONS
    197 	/*
    198 	 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
    199 	 * register value and panic if the MMU is disabled.
    200 	 */
    201 #if defined(IMAGE_BL1) || defined(IMAGE_BL31)
    202 	mrs	tmp1, sctlr_el3
    203 #else
    204 	mrs	tmp1, sctlr_el1
    205 #endif
    206 
    207 	tst	tmp1, #SCTLR_M_BIT
    208 	ASM_ASSERT(ne)
    209 #endif /* ENABLE_ASSERTIONS */
    210 
    211 	/* stop_address is the address past the last to zero */
    212 	add	stop_address, cursor, length
    213 
    214 	/*
    215 	 * Get block_size = (log2(<block size>) >> 2) (see encoding of
    216 	 * dczid_el0 reg)
    217 	 */
    218 	mrs	block_size, dczid_el0
    219 
    220 	/*
    221 	 * Select the 4 lowest bits and convert the extracted log2(<block size
    222 	 * in words>) to <block size in bytes>
    223 	 */
    224 	ubfx	block_size, block_size, #0, #4
    225 	mov	tmp2, #(1 << 2)
    226 	lsl	block_size, tmp2, block_size
    227 
    228 #if ENABLE_ASSERTIONS
    229 	/*
    230 	 * Assumes block size is at least 16 bytes to avoid manual realignment
    231 	 * of the cursor at the end of the DCZVA loop.
    232 	 */
    233 	cmp	block_size, #16
    234 	ASM_ASSERT(hs)
    235 #endif
    236 	/*
    237 	 * Not worth doing all the setup for a region less than a block and
    238 	 * protects against zeroing a whole block when the area to zero is
    239 	 * smaller than that. Also, as it is assumed that the block size is at
    240 	 * least 16 bytes, this also protects the initial aligning loops from
    241 	 * trying to zero 16 bytes when length is less than 16.
    242 	 */
    243 	cmp	length, block_size
    244 	b.lo	.Lzeromem_dczva_fallback_entry
    245 
    246 	/*
    247 	 * Calculate the bitmask of the block alignment. It will never
    248 	 * underflow as the block size is between 4 bytes and 2kB.
    249 	 * block_mask = block_size - 1
    250 	 */
    251 	sub	block_mask, block_size, #1
    252 
    253 	/*
    254 	 * length alias should not be used after this point unless it is
    255 	 * defined as a register other than block_mask's.
    256 	 */
    257 	 .unreq length
    258 
    259 	/*
    260 	 * If the start address is already aligned to zero block size, go
    261 	 * straight to the cache zeroing loop. This is safe because at this
    262 	 * point, the length cannot be smaller than a block size.
    263 	 */
    264 	tst	cursor, block_mask
    265 	b.eq	.Lzeromem_dczva_blocksize_aligned
    266 
    267 	/*
    268 	 * Calculate the first block-size-aligned address. It is assumed that
    269 	 * the zero block size is at least 16 bytes. This address is the last
    270 	 * address of this initial loop.
    271 	 */
    272 	orr	tmp1, cursor, block_mask
    273 	add	tmp1, tmp1, #1
    274 
    275 	/*
    276 	 * If the addition overflows, skip the cache zeroing loops. This is
    277 	 * quite unlikely however.
    278 	 */
    279 	cbz	tmp1, .Lzeromem_dczva_fallback_entry
    280 
    281 	/*
    282 	 * If the first block-size-aligned address is past the last address,
    283 	 * fallback to the simpler code.
    284 	 */
    285 	cmp	tmp1, stop_address
    286 	b.hi	.Lzeromem_dczva_fallback_entry
    287 
    288 	/*
    289 	 * If the start address is already aligned to 16 bytes, skip this loop.
    290 	 * It is safe to do this because tmp1 (the stop address of the initial
    291 	 * 16 bytes loop) will never be greater than the final stop address.
    292 	 */
    293 	tst	cursor, #0xf
    294 	b.eq	.Lzeromem_dczva_initial_1byte_aligned_end
    295 
    296 	/* Calculate the next address aligned to 16 bytes */
    297 	orr	tmp2, cursor, #0xf
    298 	add	tmp2, tmp2, #1
    299 	/* If it overflows, fallback to the simple path (unlikely) */
    300 	cbz	tmp2, .Lzeromem_dczva_fallback_entry
    301 	/*
    302 	 * Next aligned address cannot be after the stop address because the
    303 	 * length cannot be smaller than 16 at this point.
    304 	 */
    305 
    306 	/* First loop: zero byte per byte */
    307 1:
    308 	strb	wzr, [cursor], #1
    309 	cmp	cursor, tmp2
    310 	b.ne	1b
    311 .Lzeromem_dczva_initial_1byte_aligned_end:
    312 
    313 	/*
    314 	 * Second loop: we need to zero 16 bytes at a time from cursor to tmp1
    315 	 * before being able to use the code that deals with block-size-aligned
    316 	 * addresses.
    317 	 */
    318 	cmp	cursor, tmp1
    319 	b.hs	2f
    320 1:
    321 	stp	xzr, xzr, [cursor], #16
    322 	cmp	cursor, tmp1
    323 	b.lo	1b
    324 2:
    325 
    326 	/*
    327 	 * Third loop: zero a block at a time using DC ZVA cache block zeroing
    328 	 * instruction.
    329 	 */
    330 .Lzeromem_dczva_blocksize_aligned:
    331 	/*
    332 	 * Calculate the last block-size-aligned address. If the result equals
    333 	 * to the start address, the loop will exit immediately.
    334 	 */
    335 	bic	tmp1, stop_address, block_mask
    336 
    337 	cmp	cursor, tmp1
    338 	b.hs	2f
    339 1:
    340 	/* Zero the block containing the cursor */
    341 	dc	zva, cursor
    342 	/* Increment the cursor by the size of a block */
    343 	add	cursor, cursor, block_size
    344 	cmp	cursor, tmp1
    345 	b.lo	1b
    346 2:
    347 
    348 	/*
    349 	 * Fourth loop: zero 16 bytes at a time and then byte per byte the
    350 	 * remaining area
    351 	 */
    352 .Lzeromem_dczva_final_16bytes_aligned:
    353 	/*
    354 	 * Calculate the last 16 bytes aligned address. It is assumed that the
    355 	 * block size will never be smaller than 16 bytes so that the current
    356 	 * cursor is aligned to at least 16 bytes boundary.
    357 	 */
    358 	bic	tmp1, stop_address, #15
    359 
    360 	cmp	cursor, tmp1
    361 	b.hs	2f
    362 1:
    363 	stp	xzr, xzr, [cursor], #16
    364 	cmp	cursor, tmp1
    365 	b.lo	1b
    366 2:
    367 
    368 	/* Fifth and final loop: zero byte per byte */
    369 .Lzeromem_dczva_final_1byte_aligned:
    370 	cmp	cursor, stop_address
    371 	b.eq	2f
    372 1:
    373 	strb	wzr, [cursor], #1
    374 	cmp	cursor, stop_address
    375 	b.ne	1b
    376 2:
    377 	ret
    378 
    379 	/* Fallback for unaligned start addresses */
    380 .Lzeromem_dczva_fallback_entry:
    381 	/*
    382 	 * If the start address is already aligned to 16 bytes, skip this loop.
    383 	 */
    384 	tst	cursor, #0xf
    385 	b.eq	.Lzeromem_dczva_final_16bytes_aligned
    386 
    387 	/* Calculate the next address aligned to 16 bytes */
    388 	orr	tmp1, cursor, #15
    389 	add	tmp1, tmp1, #1
    390 	/* If it overflows, fallback to byte per byte zeroing */
    391 	cbz	tmp1, .Lzeromem_dczva_final_1byte_aligned
    392 	/* If the next aligned address is after the stop address, fall back */
    393 	cmp	tmp1, stop_address
    394 	b.hs	.Lzeromem_dczva_final_1byte_aligned
    395 
    396 	/* Fallback entry loop: zero byte per byte */
    397 1:
    398 	strb	wzr, [cursor], #1
    399 	cmp	cursor, tmp1
    400 	b.ne	1b
    401 
    402 	b	.Lzeromem_dczva_final_16bytes_aligned
    403 
    404 	.unreq	cursor
    405 	/*
    406 	 * length is already unreq'ed to reuse the register for another
    407 	 * variable.
    408 	 */
    409 	.unreq	stop_address
    410 	.unreq	block_size
    411 	.unreq	block_mask
    412 	.unreq	tmp1
    413 	.unreq	tmp2
    414 endfunc zeromem_dczva
    415 
    416 /* --------------------------------------------------------------------------
    417  * void memcpy16(void *dest, const void *src, unsigned int length)
    418  *
    419  * Copy length bytes from memory area src to memory area dest.
    420  * The memory areas should not overlap.
    421  * Destination and source addresses must be 16-byte aligned.
    422  * --------------------------------------------------------------------------
    423  */
    424 func memcpy16
    425 #if ENABLE_ASSERTIONS
    426 	orr	x3, x0, x1
    427 	tst	x3, #0xf
    428 	ASM_ASSERT(eq)
    429 #endif
    430 /* copy 16 bytes at a time */
    431 m_loop16:
    432 	cmp	x2, #16
    433 	b.lo	m_loop1
    434 	ldp	x3, x4, [x1], #16
    435 	stp	x3, x4, [x0], #16
    436 	sub	x2, x2, #16
    437 	b	m_loop16
    438 /* copy byte per byte */
    439 m_loop1:
    440 	cbz	x2, m_end
    441 	ldrb	w3, [x1], #1
    442 	strb	w3, [x0], #1
    443 	subs	x2, x2, #1
    444 	b.ne	m_loop1
    445 m_end:
    446 	ret
    447 endfunc memcpy16
    448 
    449 /* ---------------------------------------------------------------------------
    450  * Disable the MMU at EL3
    451  * ---------------------------------------------------------------------------
    452  */
    453 
    454 func disable_mmu_el3
    455 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
    456 do_disable_mmu_el3:
    457 	mrs	x0, sctlr_el3
    458 	bic	x0, x0, x1
    459 	msr	sctlr_el3, x0
    460 	isb	/* ensure MMU is off */
    461 	dsb	sy
    462 	ret
    463 endfunc disable_mmu_el3
    464 
    465 
    466 func disable_mmu_icache_el3
    467 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
    468 	b	do_disable_mmu_el3
    469 endfunc disable_mmu_icache_el3
    470 
    471 /* ---------------------------------------------------------------------------
    472  * Disable the MMU at EL1
    473  * ---------------------------------------------------------------------------
    474  */
    475 
    476 func disable_mmu_el1
    477 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
    478 do_disable_mmu_el1:
    479 	mrs	x0, sctlr_el1
    480 	bic	x0, x0, x1
    481 	msr	sctlr_el1, x0
    482 	isb	/* ensure MMU is off */
    483 	dsb	sy
    484 	ret
    485 endfunc disable_mmu_el1
    486 
    487 
    488 func disable_mmu_icache_el1
    489 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
    490 	b	do_disable_mmu_el1
    491 endfunc disable_mmu_icache_el1
    492 
    493 /* ---------------------------------------------------------------------------
    494  * Enable the use of VFP at EL3
    495  * ---------------------------------------------------------------------------
    496  */
    497 #if SUPPORT_VFP
    498 func enable_vfp
    499 	mrs	x0, cpacr_el1
    500 	orr	x0, x0, #CPACR_VFP_BITS
    501 	msr	cpacr_el1, x0
    502 	mrs	x0, cptr_el3
    503 	mov	x1, #AARCH64_CPTR_TFP
    504 	bic	x0, x0, x1
    505 	msr	cptr_el3, x0
    506 	isb
    507 	ret
    508 endfunc enable_vfp
    509 #endif
    510