Home | History | Annotate | Download | only in mpc85xx
      1 /* SPDX-License-Identifier: GPL-2.0+ */
      2 /*
      3  * Copyright 2008-2012 Freescale Semiconductor, Inc.
      4  * Kumar Gala <kumar.gala (at) freescale.com>
      5  */
      6 
      7 #include <asm-offsets.h>
      8 #include <config.h>
      9 #include <mpc85xx.h>
     10 
     11 #include <ppc_asm.tmpl>
     12 #include <ppc_defs.h>
     13 
     14 #include <asm/cache.h>
     15 #include <asm/mmu.h>
     16 
     17 /* To boot secondary cpus, we need a place for them to start up.
     18  * Normally, they start at 0xfffffffc, but that's usually the
     19  * firmware, and we don't want to have to run the firmware again.
     20  * Instead, the primary cpu will set the BPTR to point here to
     21  * this page.  We then set up the core, and head to
     22  * start_secondary.  Note that this means that the code below
     23  * must never exceed 1023 instructions (the branch at the end
     24  * would then be the 1024th).
     25  */
     26 	.globl	__secondary_start_page
     27 	.align	12
     28 __secondary_start_page:
     29 #ifdef CONFIG_SYS_FSL_ERRATUM_A005125
     30 	msync
     31 	isync
     32 	mfspr	r3, SPRN_HDBCR0
     33 	oris	r3, r3, 0x0080
     34 	mtspr	SPRN_HDBCR0, r3
     35 #endif
     36 /* First do some preliminary setup */
     37 	lis	r3, HID0_EMCP@h		/* enable machine check */
     38 #ifndef CONFIG_E500MC
     39 	ori	r3,r3,HID0_TBEN@l	/* enable Timebase */
     40 #endif
     41 #ifdef CONFIG_PHYS_64BIT
     42 	ori	r3,r3,HID0_ENMAS7@l	/* enable MAS7 updates */
     43 #endif
     44 	mtspr	SPRN_HID0,r3
     45 
     46 #ifndef CONFIG_E500MC
     47 	li	r3,(HID1_ASTME|HID1_ABE)@l	/* Addr streaming & broadcast */
     48 	mfspr   r0,PVR
     49 	andi.	r0,r0,0xff
     50 	cmpwi	r0,0x50@l	/* if we are rev 5.0 or greater set MBDD */
     51 	blt 1f
     52 	/* Set MBDD bit also */
     53 	ori r3, r3, HID1_MBDD@l
     54 1:
     55 	mtspr	SPRN_HID1,r3
     56 #endif
     57 
     58 #ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
     59 	mfspr	r3,SPRN_HDBCR1
     60 	oris	r3,r3,0x0100
     61 	mtspr	SPRN_HDBCR1,r3
     62 #endif
     63 
     64 #ifdef CONFIG_SYS_FSL_ERRATUM_A004510
     65 	mfspr	r3,SPRN_SVR
     66 	rlwinm	r3,r3,0,0xff
     67 	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
     68 	cmpw	r3,r4
     69 	beq	1f
     70 
     71 #ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
     72 	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
     73 	cmpw	r3,r4
     74 	beq	1f
     75 #endif
     76 
     77 	/* Not a supported revision affected by erratum */
     78 	b	2f
     79 
     80 1:	/* Erratum says set bits 55:60 to 001001 */
     81 	msync
     82 	isync
     83 	mfspr	r3,SPRN_HDBCR0
     84 	li	r4,0x48
     85 	rlwimi	r3,r4,0,0x1f8
     86 	mtspr	SPRN_HDBCR0,r3
     87 	isync
     88 2:
     89 #endif
     90 
     91 	/* Enable branch prediction */
     92 	lis	r3,BUCSR_ENABLE@h
     93 	ori	r3,r3,BUCSR_ENABLE@l
     94 	mtspr	SPRN_BUCSR,r3
     95 
     96 	/* Ensure TB is 0 */
     97 	li	r3,0
     98 	mttbl	r3
     99 	mttbu	r3
    100 
    101 	/* Enable/invalidate the I-Cache */
    102 	lis	r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
    103 	ori	r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
    104 	mtspr	SPRN_L1CSR1,r2
    105 1:
    106 	mfspr	r3,SPRN_L1CSR1
    107 	and.	r1,r3,r2
    108 	bne	1b
    109 
    110 	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
    111 	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
    112 	mtspr	SPRN_L1CSR1,r3
    113 	isync
    114 2:
    115 	mfspr	r3,SPRN_L1CSR1
    116 	andi.	r1,r3,L1CSR1_ICE@l
    117 	beq	2b
    118 
    119 	/* Enable/invalidate the D-Cache */
    120 	lis	r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
    121 	ori	r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
    122 	mtspr	SPRN_L1CSR0,r2
    123 1:
    124 	mfspr	r3,SPRN_L1CSR0
    125 	and.	r1,r3,r2
    126 	bne	1b
    127 
    128 	lis	r3,(L1CSR0_CPE|L1CSR0_DCE)@h
    129 	ori	r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
    130 	mtspr	SPRN_L1CSR0,r3
    131 	isync
    132 2:
    133 	mfspr	r3,SPRN_L1CSR0
    134 	andi.	r1,r3,L1CSR0_DCE@l
    135 	beq	2b
    136 
    137 #define toreset(x) (x - __secondary_start_page + 0xfffff000)
    138 
    139 	/* get our PIR to figure out our table entry */
    140 	lis	r3,toreset(__spin_table_addr)@h
    141 	ori	r3,r3,toreset(__spin_table_addr)@l
    142 	lwz	r3,0(r3)
    143 
    144 	mfspr	r0,SPRN_PIR
    145 #ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
    146 /*
    147  * PIR definition for Chassis 2
    148  * 0-17 Reserved (logic 0s)
    149  * 18-19 CHIP_ID,    2'b00      - SoC 1
    150  *                  all others - reserved
    151  * 20-24 CLUSTER_ID 5'b00000   - CCM 1
    152  *                  all others - reserved
    153  * 25-26 CORE_CLUSTER_ID 2'b00 - cluster 1
    154  *                       2'b01 - cluster 2
    155  *                       2'b10 - cluster 3
    156  *                       2'b11 - cluster 4
    157  * 27-28 CORE_ID         2'b00 - core 0
    158  *                       2'b01 - core 1
    159  *                       2'b10 - core 2
    160  *                       2'b11 - core 3
    161  * 29-31 THREAD_ID       3'b000 - thread 0
    162  *                       3'b001 - thread 1
    163  *
    164  * Power-on PIR increments threads by 0x01, cores within a cluster by 0x08
    165  * and clusters by 0x20.
    166  *
    167  * We renumber PIR so that all threads in the system are consecutive.
    168  */
    169 
    170 	rlwinm	r8,r0,29,0x03	/* r8 = core within cluster */
    171 	srwi	r10,r0,5	/* r10 = cluster */
    172 
    173 	mulli	r5,r10,CONFIG_SYS_FSL_CORES_PER_CLUSTER
    174 	add	r5,r5,r8	/* for spin table index */
    175 	mulli	r4,r5,CONFIG_SYS_FSL_THREADS_PER_CORE	/* for PIR */
    176 #elif	defined(CONFIG_E500MC)
    177 	rlwinm	r4,r0,27,27,31
    178 	mr	r5,r4
    179 #else
    180 	mr	r4,r0
    181 	mr	r5,r4
    182 #endif
    183 
    184 	/*
    185 	 * r10 has the base address for the entry.
    186 	 * we cannot access it yet before setting up a new TLB
    187 	 */
    188 	slwi	r8,r5,6	/* spin table is padded to 64 byte */
    189 	add	r10,r3,r8
    190 
    191 	mtspr	SPRN_PIR,r4	/* write to PIR register */
    192 
    193 #ifdef CONFIG_SYS_FSL_ERRATUM_A007907
    194 	mfspr	r8, L1CSR2
    195 	clrrwi	r8, r8, 10	/* clear bit [54-63] DCSTASHID */
    196 	mtspr	L1CSR2, r8
    197 #else
    198 #ifdef CONFIG_SYS_CACHE_STASHING
    199 	/* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
    200 	slwi	r8,r4,1
    201 	addi	r8,r8,32
    202 	mtspr	L1CSR2,r8
    203 #endif
    204 #endif	/* CONFIG_SYS_FSL_ERRATUM_A007907 */
    205 
    206 #if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \
    207 	defined(CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011)
    208 	/*
    209 	 * CPU22 applies to P4080 rev 1.0, 2.0, fixed in 3.0
    210 	 * NMG_CPU_A011 applies to P4080 rev 1.0, 2.0, fixed in 3.0
    211 	 * also appleis to P3041 rev 1.0, 1.1, P2041 rev 1.0, 1.1
    212 	 */
    213 	mfspr   r3,SPRN_SVR
    214 	rlwinm	r6,r3,24,~0x800		/* clear E bit */
    215 
    216 	lis	r5,SVR_P4080@h
    217 	ori	r5,r5,SVR_P4080@l
    218 	cmpw	r6,r5
    219 	bne	1f
    220 
    221 	rlwinm  r3,r3,0,0xf0
    222 	li      r5,0x30
    223 	cmpw    r3,r5
    224 	bge     2f
    225 1:
    226 #ifdef	CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011
    227 	lis	r3,toreset(enable_cpu_a011_workaround)@ha
    228 	lwz	r3,toreset(enable_cpu_a011_workaround)@l(r3)
    229 	cmpwi	r3,0
    230 	beq	2f
    231 #endif
    232 	mfspr	r3,L1CSR2
    233 	oris	r3,r3,(L1CSR2_DCWS)@h
    234 	mtspr	L1CSR2,r3
    235 2:
    236 #endif
    237 
    238 #ifdef CONFIG_SYS_FSL_ERRATUM_A005812
    239 	/*
    240 	 * A-005812 workaround sets bit 32 of SPR 976 for SoCs running in
    241 	 * write shadow mode. This code should run after other code setting
    242 	 * DCWS.
    243 	 */
    244 	mfspr	r3,L1CSR2
    245 	andis.	r3,r3,(L1CSR2_DCWS)@h
    246 	beq	1f
    247 	mfspr	r3, SPRN_HDBCR0
    248 	oris	r3, r3, 0x8000
    249 	mtspr	SPRN_HDBCR0, r3
    250 1:
    251 #endif
    252 
    253 #ifdef CONFIG_BACKSIDE_L2_CACHE
    254 	/* skip L2 setup on P2040/P2040E as they have no L2 */
    255 	mfspr	r3,SPRN_SVR
    256 	rlwinm	r6,r3,24,~0x800		/* clear E bit of SVR */
    257 
    258 	lis	r3,SVR_P2040@h
    259 	ori	r3,r3,SVR_P2040@l
    260 	cmpw	r6,r3
    261 	beq 3f
    262 
    263 	/* Enable/invalidate the L2 cache */
    264 	msync
    265 	lis	r2,(L2CSR0_L2FI|L2CSR0_L2LFC)@h
    266 	ori	r2,r2,(L2CSR0_L2FI|L2CSR0_L2LFC)@l
    267 	mtspr	SPRN_L2CSR0,r2
    268 1:
    269 	mfspr	r3,SPRN_L2CSR0
    270 	and.	r1,r3,r2
    271 	bne	1b
    272 
    273 #ifdef CONFIG_SYS_CACHE_STASHING
    274 	/* set stash id to (coreID) * 2 + 32 + L2 (1) */
    275 	addi	r3,r8,1
    276 	mtspr	SPRN_L2CSR1,r3
    277 #endif
    278 
    279 	lis	r3,CONFIG_SYS_INIT_L2CSR0@h
    280 	ori	r3,r3,CONFIG_SYS_INIT_L2CSR0@l
    281 	mtspr	SPRN_L2CSR0,r3
    282 	isync
    283 2:
    284 	mfspr	r3,SPRN_L2CSR0
    285 	andis.	r1,r3,L2CSR0_L2E@h
    286 	beq	2b
    287 #endif
    288 3:
    289 	/* setup mapping for the spin table, WIMGE=0b00100 */
    290 	lis	r13,toreset(__spin_table_addr)@h
    291 	ori	r13,r13,toreset(__spin_table_addr)@l
    292 	lwz	r13,0(r13)
    293 	/* mask by 4K */
    294 	rlwinm	r13,r13,0,0,19
    295 
    296 	lis	r11,(MAS0_TLBSEL(1)|MAS0_ESEL(1))@h
    297 	mtspr	SPRN_MAS0,r11
    298 	lis	r11,(MAS1_VALID|MAS1_IPROT)@h
    299 	ori	r11,r11,(MAS1_TS|MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
    300 	mtspr	SPRN_MAS1,r11
    301 	oris	r11,r13,(MAS2_M|MAS2_G)@h
    302 	ori	r11,r13,(MAS2_M|MAS2_G)@l
    303 	mtspr	SPRN_MAS2,r11
    304 	oris	r11,r13,(MAS3_SX|MAS3_SW|MAS3_SR)@h
    305 	ori	r11,r13,(MAS3_SX|MAS3_SW|MAS3_SR)@l
    306 	mtspr	SPRN_MAS3,r11
    307 	li	r11,0
    308 	mtspr	SPRN_MAS7,r11
    309 	tlbwe
    310 
    311 	/*
    312 	 * __bootpg_addr has the address of __second_half_boot_page
    313 	 * jump there in AS=1 space with cache enabled
    314 	 */
    315 	lis	r13,toreset(__bootpg_addr)@h
    316 	ori	r13,r13,toreset(__bootpg_addr)@l
    317 	lwz	r11,0(r13)
    318 	mtspr	SPRN_SRR0,r11
    319 	mfmsr	r13
    320 	ori	r12,r13,MSR_IS|MSR_DS@l
    321 	mtspr	SPRN_SRR1,r12
    322 	rfi
    323 
    324 	/*
    325 	 * Allocate some space for the SDRAM address of the bootpg.
    326 	 * This variable has to be in the boot page so that it can
    327 	 * be accessed by secondary cores when they come out of reset.
    328 	 */
    329 	.align L1_CACHE_SHIFT
    330 	.globl __bootpg_addr
    331 __bootpg_addr:
    332 	.long	0
    333 
    334 	.global __spin_table_addr
    335 __spin_table_addr:
    336 	.long	0
    337 
    338 	/*
    339 	 * This variable is set by cpu_init_r() after parsing hwconfig
    340 	 * to enable workaround for erratum NMG_CPU_A011.
    341 	 */
    342 	.align L1_CACHE_SHIFT
    343 	.global enable_cpu_a011_workaround
    344 enable_cpu_a011_workaround:
    345 	.long	1
    346 
    347 	/* Fill in the empty space.  The actual reset vector is
    348 	 * the last word of the page */
    349 __secondary_start_code_end:
    350 	.space 4092 - (__secondary_start_code_end - __secondary_start_page)
    351 __secondary_reset_vector:
    352 	b	__secondary_start_page
    353 
    354 
    355 /* this is a separated page for the spin table and cacheable boot code */
    356 	.align L1_CACHE_SHIFT
    357 	.global __second_half_boot_page
    358 __second_half_boot_page:
    359 #ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE
    360 	lis	r3,(spin_table_compat - __second_half_boot_page)@h
    361 	ori	r3,r3,(spin_table_compat - __second_half_boot_page)@l
    362 	add	r3,r3,r11 /* r11 has the address of __second_half_boot_page */
    363 	lwz	r14,0(r3)
    364 #endif
    365 
    366 #define ENTRY_ADDR_UPPER	0
    367 #define ENTRY_ADDR_LOWER	4
    368 #define ENTRY_R3_UPPER		8
    369 #define ENTRY_R3_LOWER		12
    370 #define ENTRY_RESV		16
    371 #define ENTRY_PIR		20
    372 #define ENTRY_SIZE		64
    373 	/*
    374 	 * setup the entry
    375 	 * r10 has the base address of the spin table.
    376 	 * spin table is defined as
    377 	 * struct {
    378 	 *	uint64_t entry_addr;
    379 	 *	uint64_t r3;
    380 	 *	uint32_t rsvd1;
    381 	 *	uint32_t pir;
    382 	 * };
    383 	 * we pad this struct to 64 bytes so each entry is in its own cacheline
    384 	 */
    385 	li	r3,0
    386 	li	r8,1
    387 	mfspr	r4,SPRN_PIR
    388 	stw	r3,ENTRY_ADDR_UPPER(r10)
    389 	stw	r3,ENTRY_R3_UPPER(r10)
    390 	stw	r4,ENTRY_R3_LOWER(r10)
    391 	stw	r3,ENTRY_RESV(r10)
    392 	stw	r4,ENTRY_PIR(r10)
    393 	msync
    394 	stw	r8,ENTRY_ADDR_LOWER(r10)
    395 
    396 	/* spin waiting for addr */
    397 3:
    398 /*
    399  * To comply with ePAPR 1.1, the spin table has been moved to cache-enabled
    400  * memory. Old OS may not work with this change. A patch is waiting to be
    401  * accepted for Linux kernel. Other OS needs similar fix to spin table.
    402  * For OSes with old spin table code, we can enable this temporary fix by
    403  * setting environmental variable "spin_table_compat". For new OSes, set
    404  * "spin_table_compat=no". After Linux is fixed, we can remove this macro
    405  * and related code. For now, it is enabled by default.
    406  */
    407 #ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE
    408 	cmpwi   r14,0
    409 	beq     4f
    410 	dcbf    0, r10
    411 	sync
    412 4:
    413 #endif
    414 	lwz	r4,ENTRY_ADDR_LOWER(r10)
    415 	andi.	r11,r4,1
    416 	bne	3b
    417 	isync
    418 
    419 	/* get the upper bits of the addr */
    420 	lwz	r11,ENTRY_ADDR_UPPER(r10)
    421 
    422 	/* setup branch addr */
    423 	mtspr	SPRN_SRR0,r4
    424 
    425 	/* mark the entry as released */
    426 	li	r8,3
    427 	stw	r8,ENTRY_ADDR_LOWER(r10)
    428 
    429 	/* mask by ~64M to setup our tlb we will jump to */
    430 	rlwinm	r12,r4,0,0,5
    431 
    432 	/*
    433 	 * setup r3, r4, r5, r6, r7, r8, r9
    434 	 * r3 contains the value to put in the r3 register at secondary cpu
    435 	 * entry. The high 32-bits are ignored on 32-bit chip implementations.
    436 	 * 64-bit chip implementations however shall load all 64-bits
    437 	 */
    438 #ifdef CONFIG_SYS_PPC64
    439 	ld	r3,ENTRY_R3_UPPER(r10)
    440 #else
    441 	lwz	r3,ENTRY_R3_LOWER(r10)
    442 #endif
    443 	li	r4,0
    444 	li	r5,0
    445 	li	r6,0
    446 	lis	r7,(64*1024*1024)@h
    447 	li	r8,0
    448 	li	r9,0
    449 
    450 	/* load up the pir */
    451 	lwz	r0,ENTRY_PIR(r10)
    452 	mtspr	SPRN_PIR,r0
    453 	mfspr	r0,SPRN_PIR
    454 	stw	r0,ENTRY_PIR(r10)
    455 
    456 	mtspr	IVPR,r12
    457 /*
    458  * Coming here, we know the cpu has one TLB mapping in TLB1[0]
    459  * which maps 0xfffff000-0xffffffff one-to-one.  We set up a
    460  * second mapping that maps addr 1:1 for 64M, and then we jump to
    461  * addr
    462  */
    463 	lis	r10,(MAS0_TLBSEL(1)|MAS0_ESEL(0))@h
    464 	mtspr	SPRN_MAS0,r10
    465 	lis	r10,(MAS1_VALID|MAS1_IPROT)@h
    466 	ori	r10,r10,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
    467 	mtspr	SPRN_MAS1,r10
    468 	/* WIMGE = 0b00000 for now */
    469 	mtspr	SPRN_MAS2,r12
    470 	ori	r12,r12,(MAS3_SX|MAS3_SW|MAS3_SR)
    471 	mtspr	SPRN_MAS3,r12
    472 #ifdef CONFIG_ENABLE_36BIT_PHYS
    473 	mtspr	SPRN_MAS7,r11
    474 #endif
    475 	tlbwe
    476 
    477 /* Now we have another mapping for this page, so we jump to that
    478  * mapping
    479  */
    480 	mtspr	SPRN_SRR1,r13
    481 	rfi
    482 
    483 
    484 	.align 6
    485 	.globl __spin_table
    486 __spin_table:
    487 	.space CONFIG_MAX_CPUS*ENTRY_SIZE
    488 
    489 #ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE
    490 	.align L1_CACHE_SHIFT
    491 	.global spin_table_compat
    492 spin_table_compat:
    493 	.long	1
    494 
    495 #endif
    496 
    497 __spin_table_end:
    498 	.space 4096 - (__spin_table_end - __spin_table)
    499