xref: /linux/arch/powerpc/kernel/head_64.S (revision 460ea8980511c01c1551012b9a6ec6a06d02da59)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *
13 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
14 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
15 *
16 *  This file contains the entry point for the 64-bit kernel along
17 *  with some early initialization code common to all 64-bit powerpc
18 *  variants.
19 */
20
21#include <linux/linkage.h>
22#include <linux/threads.h>
23#include <linux/init.h>
24#include <asm/reg.h>
25#include <asm/page.h>
26#include <asm/mmu.h>
27#include <asm/ppc_asm.h>
28#include <asm/head-64.h>
29#include <asm/asm-offsets.h>
30#include <asm/bug.h>
31#include <asm/cputable.h>
32#include <asm/setup.h>
33#include <asm/hvcall.h>
34#include <asm/thread_info.h>
35#include <asm/firmware.h>
36#include <asm/page_64.h>
37#include <asm/irqflags.h>
38#include <asm/kvm_book3s_asm.h>
39#include <asm/ptrace.h>
40#include <asm/hw_irq.h>
41#include <asm/cputhreads.h>
42#include <asm/ppc-opcode.h>
43#include <asm/export.h>
44#include <asm/feature-fixups.h>
45#ifdef CONFIG_PPC_BOOK3S
46#include <asm/exception-64s.h>
47#else
48#include <asm/exception-64e.h>
49#endif
50
51/* The physical memory is laid out such that the secondary processor
52 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
53 * using the layout described in exceptions-64s.S
54 */
55
56/*
57 * Entering into this code we make the following assumptions:
58 *
59 *  For pSeries or server processors:
60 *   1. The MMU is off & open firmware is running in real mode.
61 *   2. The primary CPU enters at __start.
62 *   3. If the RTAS supports "query-cpu-stopped-state", then secondary
63 *      CPUs will enter as directed by "start-cpu" RTAS call, which is
64 *      generic_secondary_smp_init, with PIR in r3.
65 *   4. Else the secondary CPUs will enter at secondary_hold (0x60) as
66 *      directed by the "start-cpu" RTS call, with PIR in r3.
67 * -or- For OPAL entry:
68 *   1. The MMU is off, processor in HV mode.
69 *   2. The primary CPU enters at 0 with device-tree in r3, OPAL base
70 *      in r8, and entry in r9 for debugging purposes.
71 *   3. Secondary CPUs enter as directed by OPAL_START_CPU call, which
72 *      is at generic_secondary_smp_init, with PIR in r3.
73 *
74 *  For Book3E processors:
75 *   1. The MMU is on running in AS0 in a state defined in ePAPR
76 *   2. The kernel is entered at __start
77 */
78
79/*
80 * boot_from_prom and prom_init run at the physical address. Everything
81 * after prom and kexec entry run at the virtual address (PAGE_OFFSET).
82 * Secondaries run at the virtual address from generic_secondary_common_init
83 * onward.
84 */
85
86OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
87USE_FIXED_SECTION(first_256B)
88	/*
89	 * Offsets are relative from the start of fixed section, and
90	 * first_256B starts at 0. Offsets are a bit easier to use here
91	 * than the fixed section entry macros.
92	 */
93	. = 0x0
94_GLOBAL(__start)
95	/* NOP this out unconditionally */
96BEGIN_FTR_SECTION
97	FIXUP_ENDIAN
98	b	__start_initialization_multiplatform
99END_FTR_SECTION(0, 1)
100
101	/* Catch branch to 0 in real mode */
102	trap
103
104	/* Secondary processors spin on this value until it becomes non-zero.
105	 * When non-zero, it contains the real address of the function the cpu
106	 * should jump to.
107	 */
108	.balign 8
109	.globl  __secondary_hold_spinloop
110__secondary_hold_spinloop:
111	.8byte	0x0
112
113	/* Secondary processors write this value with their cpu # */
114	/* after they enter the spin loop immediately below.	  */
115	.globl	__secondary_hold_acknowledge
116__secondary_hold_acknowledge:
117	.8byte	0x0
118
119#ifdef CONFIG_RELOCATABLE
120	/* This flag is set to 1 by a loader if the kernel should run
121	 * at the loaded address instead of the linked address.  This
122	 * is used by kexec-tools to keep the kdump kernel in the
123	 * crash_kernel region.  The loader is responsible for
124	 * observing the alignment requirement.
125	 */
126
127#ifdef CONFIG_RELOCATABLE_TEST
128#define RUN_AT_LOAD_DEFAULT 1		/* Test relocation, do not copy to 0 */
129#else
130#define RUN_AT_LOAD_DEFAULT 0x72756e30  /* "run0" -- relocate to 0 by default */
131#endif
132
133	/* Do not move this variable as kexec-tools knows about it. */
134	. = 0x5c
135	.globl	__run_at_load
136__run_at_load:
137DEFINE_FIXED_SYMBOL(__run_at_load, first_256B)
138	.long	RUN_AT_LOAD_DEFAULT
139#endif
140
141	. = 0x60
142/*
143 * The following code is used to hold secondary processors
144 * in a spin loop after they have entered the kernel, but
145 * before the bulk of the kernel has been relocated.  This code
146 * is relocated to physical address 0x60 before prom_init is run.
147 * All of it must fit below the first exception vector at 0x100.
148 * Use .globl here not _GLOBAL because we want __secondary_hold
149 * to be the actual text address, not a descriptor.
150 */
151	.globl	__secondary_hold
152__secondary_hold:
153	FIXUP_ENDIAN
154#ifndef CONFIG_PPC_BOOK3E_64
155	mfmsr	r24
156	ori	r24,r24,MSR_RI
157	mtmsrd	r24			/* RI on */
158#endif
159	/* Grab our physical cpu number */
160	mr	r24,r3
161	/* stash r4 for book3e */
162	mr	r25,r4
163
164	/* Tell the master cpu we're here */
165	/* Relocation is off & we are located at an address less */
166	/* than 0x100, so only need to grab low order offset.    */
167	std	r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0)
168	sync
169
170	/* All secondary cpus wait here until told to start. */
171100:	ld	r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(0)
172	cmpdi	0,r12,0
173	beq	100b
174
175#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
176#ifdef CONFIG_PPC_BOOK3E_64
177	tovirt(r12,r12)
178#endif
179	mtctr	r12
180	mr	r3,r24
181	/*
182	 * it may be the case that other platforms have r4 right to
183	 * begin with, this gives us some safety in case it is not
184	 */
185#ifdef CONFIG_PPC_BOOK3E_64
186	mr	r4,r25
187#else
188	li	r4,0
189#endif
190	/* Make sure that patched code is visible */
191	isync
192	bctr
193#else
1940:	trap
195	EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
196#endif
197CLOSE_FIXED_SECTION(first_256B)
198
199/*
200 * On server, we include the exception vectors code here as it
201 * relies on absolute addressing which is only possible within
202 * this compilation unit
203 */
204#ifdef CONFIG_PPC_BOOK3S
205#include "exceptions-64s.S"
206#else
207OPEN_TEXT_SECTION(0x100)
208#endif
209
210USE_TEXT_SECTION()
211
212#include "interrupt_64.S"
213
214#ifdef CONFIG_PPC_BOOK3E_64
215/*
216 * The booting_thread_hwid holds the thread id we want to boot in cpu
217 * hotplug case. It is set by cpu hotplug code, and is invalid by default.
218 * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
219 * bit field.
220 */
221	.globl	booting_thread_hwid
222booting_thread_hwid:
223	.long  INVALID_THREAD_HWID
224	.align 3
225/*
226 * start a thread in the same core
227 * input parameters:
228 * r3 = the thread physical id
229 * r4 = the entry point where thread starts
230 */
231_GLOBAL(book3e_start_thread)
232	LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
233	cmpwi	r3, 0
234	beq	10f
235	cmpwi	r3, 1
236	beq	11f
237	/* If the thread id is invalid, just exit. */
238	b	13f
23910:
240	MTTMR(TMRN_IMSR0, 5)
241	MTTMR(TMRN_INIA0, 4)
242	b	12f
24311:
244	MTTMR(TMRN_IMSR1, 5)
245	MTTMR(TMRN_INIA1, 4)
24612:
247	isync
248	li	r6, 1
249	sld	r6, r6, r3
250	mtspr	SPRN_TENS, r6
25113:
252	blr
253
254/*
255 * stop a thread in the same core
256 * input parameter:
257 * r3 = the thread physical id
258 */
259_GLOBAL(book3e_stop_thread)
260	cmpwi	r3, 0
261	beq	10f
262	cmpwi	r3, 1
263	beq	10f
264	/* If the thread id is invalid, just exit. */
265	b	13f
26610:
267	li	r4, 1
268	sld	r4, r4, r3
269	mtspr	SPRN_TENC, r4
27013:
271	blr
272
273_GLOBAL(fsl_secondary_thread_init)
274	mfspr	r4,SPRN_BUCSR
275
276	/* Enable branch prediction */
277	lis     r3,BUCSR_INIT@h
278	ori     r3,r3,BUCSR_INIT@l
279	mtspr   SPRN_BUCSR,r3
280	isync
281
282	/*
283	 * Fix PIR to match the linear numbering in the device tree.
284	 *
285	 * On e6500, the reset value of PIR uses the low three bits for
286	 * the thread within a core, and the upper bits for the core
287	 * number.  There are two threads per core, so shift everything
288	 * but the low bit right by two bits so that the cpu numbering is
289	 * continuous.
290	 *
291	 * If the old value of BUCSR is non-zero, this thread has run
292	 * before.  Thus, we assume we are coming from kexec or a similar
293	 * scenario, and PIR is already set to the correct value.  This
294	 * is a bit of a hack, but there are limited opportunities for
295	 * getting information into the thread and the alternatives
296	 * seemed like they'd be overkill.  We can't tell just by looking
297	 * at the old PIR value which state it's in, since the same value
298	 * could be valid for one thread out of reset and for a different
299	 * thread in Linux.
300	 */
301
302	mfspr	r3, SPRN_PIR
303	cmpwi	r4,0
304	bne	1f
305	rlwimi	r3, r3, 30, 2, 30
306	mtspr	SPRN_PIR, r3
3071:
308	mr	r24,r3
309
310	/* turn on 64-bit mode */
311	bl	enable_64b_mode
312
313	/* Book3E initialization */
314	mr	r3,r24
315	bl	book3e_secondary_thread_init
316	bl	relative_toc
317
318	b	generic_secondary_common_init
319
320#endif /* CONFIG_PPC_BOOK3E_64 */
321
322/*
323 * On pSeries and most other platforms, secondary processors spin
324 * in the following code.
325 * At entry, r3 = this processor's number (physical cpu id)
326 *
327 * On Book3E, r4 = 1 to indicate that the initial TLB entry for
328 * this core already exists (setup via some other mechanism such
329 * as SCOM before entry).
330 */
331_GLOBAL(generic_secondary_smp_init)
332	FIXUP_ENDIAN
333
334	li	r13,0
335
336	/* Poison TOC */
337	li	r2,-1
338
339	mr	r24,r3
340	mr	r25,r4
341
342	/* turn on 64-bit mode */
343	bl	enable_64b_mode
344
345#ifdef CONFIG_PPC_BOOK3E_64
346	/* Book3E initialization */
347	mr	r3,r24
348	mr	r4,r25
349	bl	book3e_secondary_core_init
350	/* Now NIA and r2 are relocated to PAGE_OFFSET if not already */
351/*
352 * After common core init has finished, check if the current thread is the
353 * one we wanted to boot. If not, start the specified thread and stop the
354 * current thread.
355 */
356	LOAD_REG_ADDR(r4, booting_thread_hwid)
357	lwz     r3, 0(r4)
358	li	r5, INVALID_THREAD_HWID
359	cmpw	r3, r5
360	beq	20f
361
362	/*
363	 * The value of booting_thread_hwid has been stored in r3,
364	 * so make it invalid.
365	 */
366	stw	r5, 0(r4)
367
368	/*
369	 * Get the current thread id and check if it is the one we wanted.
370	 * If not, start the one specified in booting_thread_hwid and stop
371	 * the current thread.
372	 */
373	mfspr	r8, SPRN_TIR
374	cmpw	r3, r8
375	beq	20f
376
377	/* start the specified thread */
378	LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
379	ld	r4, 0(r5)
380	bl	book3e_start_thread
381
382	/* stop the current thread */
383	mr	r3, r8
384	bl	book3e_stop_thread
38510:
386	b	10b
38720:
388#else
389	/* Now the MMU is off, can branch to our PAGE_OFFSET address */
390	bcl	20,31,$+4
3911:	mflr	r11
392	addi	r11,r11,(2f - 1b)
393	tovirt(r11, r11)
394	mtctr	r11
395	bctr
3962:
397	bl	relative_toc
398#endif
399
400generic_secondary_common_init:
401	/* Set up a paca value for this processor. Since we have the
402	 * physical cpu id in r24, we need to search the pacas to find
403	 * which logical id maps to our physical one.
404	 */
405#ifndef CONFIG_SMP
406	b	kexec_wait		/* wait for next kernel if !SMP	 */
407#else
408	LOAD_REG_ADDR(r8, paca_ptrs)	/* Load paca_ptrs pointe	 */
409	ld	r8,0(r8)		/* Get base vaddr of array	 */
410#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
411	LOAD_REG_IMMEDIATE(r7, NR_CPUS)
412#else
413	LOAD_REG_ADDR(r7, nr_cpu_ids)	/* Load nr_cpu_ids address       */
414	lwz	r7,0(r7)		/* also the max paca allocated 	 */
415#endif
416	li	r5,0			/* logical cpu id                */
4171:
418	sldi	r9,r5,3			/* get paca_ptrs[] index from cpu id */
419	ldx	r13,r9,r8		/* r13 = paca_ptrs[cpu id]       */
420	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
421	cmpw	r6,r24			/* Compare to our id             */
422	beq	2f
423	addi	r5,r5,1
424	cmpw	r5,r7			/* Check if more pacas exist     */
425	blt	1b
426
427	mr	r3,r24			/* not found, copy phys to r3	 */
428	b	kexec_wait		/* next kernel might do better	 */
429
4302:	SET_PACA(r13)
431#ifdef CONFIG_PPC_BOOK3E_64
432	addi	r12,r13,PACA_EXTLB	/* and TLB exc frame in another  */
433	mtspr	SPRN_SPRG_TLB_EXFRAME,r12
434#endif
435
436	/* From now on, r24 is expected to be logical cpuid */
437	mr	r24,r5
438
439	/* Create a temp kernel stack for use before relocation is on.	*/
440	ld	r1,PACAEMERGSP(r13)
441	subi	r1,r1,STACK_FRAME_MIN_SIZE
442
443	/* See if we need to call a cpu state restore handler */
444	LOAD_REG_ADDR(r23, cur_cpu_spec)
445	ld	r23,0(r23)
446	ld	r12,CPU_SPEC_RESTORE(r23)
447	cmpdi	0,r12,0
448	beq	3f
449#ifdef CONFIG_PPC64_ELF_ABI_V1
450	ld	r12,0(r12)
451#endif
452	mtctr	r12
453	bctrl
454
4553:	LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
456	lwarx	r4,0,r3
457	subi	r4,r4,1
458	stwcx.	r4,0,r3
459	bne	3b
460	isync
461
4624:	HMT_LOW
463	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
464					/* start.			 */
465	cmpwi	0,r23,0
466	beq	4b			/* Loop until told to go	 */
467
468	sync				/* order paca.run and cur_cpu_spec */
469	isync				/* In case code patching happened */
470
471	b	__secondary_start
472#endif /* SMP */
473
474/*
475 * Turn the MMU off.
476 * Assumes we're mapped EA == RA if the MMU is on.
477 */
478#ifdef CONFIG_PPC_BOOK3S
479SYM_FUNC_START_LOCAL(__mmu_off)
480	mfmsr	r3
481	andi.	r0,r3,MSR_IR|MSR_DR
482	beqlr
483	mflr	r4
484	andc	r3,r3,r0
485	mtspr	SPRN_SRR0,r4
486	mtspr	SPRN_SRR1,r3
487	sync
488	rfid
489	b	.	/* prevent speculative execution */
490SYM_FUNC_END(__mmu_off)
491
492SYM_FUNC_START_LOCAL(start_initialization_book3s)
493	mflr	r25
494
495	/* Setup some critical 970 SPRs before switching MMU off */
496	mfspr	r0,SPRN_PVR
497	srwi	r0,r0,16
498	cmpwi	r0,0x39		/* 970 */
499	beq	1f
500	cmpwi	r0,0x3c		/* 970FX */
501	beq	1f
502	cmpwi	r0,0x44		/* 970MP */
503	beq	1f
504	cmpwi	r0,0x45		/* 970GX */
505	bne	2f
5061:	bl	__cpu_preinit_ppc970
5072:
508
509	/* Switch off MMU if not already off */
510	bl	__mmu_off
511
512	/* Now the MMU is off, can return to our PAGE_OFFSET address */
513	tovirt(r25,r25)
514	mtlr	r25
515	blr
516SYM_FUNC_END(start_initialization_book3s)
517#endif
518
519/*
520 * Here is our main kernel entry point. We support currently 2 kind of entries
521 * depending on the value of r5.
522 *
523 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
524 *                 in r3...r7
525 *
526 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
527 *                 DT block, r4 is a physical pointer to the kernel itself
528 *
529 */
530__start_initialization_multiplatform:
531	/* Make sure we are running in 64 bits mode */
532	bl	enable_64b_mode
533
534	/* Zero r13 (paca) so early program check / mce don't use it */
535	li	r13,0
536
537	/* Poison TOC */
538	li	r2,-1
539
540	/*
541	 * Are we booted from a PROM Of-type client-interface ?
542	 */
543	cmpldi	cr0,r5,0
544	beq	1f
545	b	__boot_from_prom		/* yes -> prom */
5461:
547	/* Save parameters */
548	mr	r31,r3
549	mr	r30,r4
550#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
551	/* Save OPAL entry */
552	mr	r28,r8
553	mr	r29,r9
554#endif
555
556	/* Get TOC pointer (current runtime address) */
557	bl	relative_toc
558
559	/* These functions return to the virtual (PAGE_OFFSET) address */
560#ifdef CONFIG_PPC_BOOK3E_64
561	bl	start_initialization_book3e
562#else
563	bl	start_initialization_book3s
564#endif /* CONFIG_PPC_BOOK3E_64 */
565
566	/* Get TOC pointer, virtual */
567	bl	relative_toc
568
569	/* find out where we are now */
570
571	/* OPAL doesn't pass base address in r4, have to derive it. */
572	bcl	20,31,$+4
5730:	mflr	r26			/* r26 = runtime addr here */
574	addis	r26,r26,(_stext - 0b)@ha
575	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */
576
577	b	__after_prom_start
578
579__REF
580__boot_from_prom:
581#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
582	/* Get TOC pointer, non-virtual */
583	bl	relative_toc
584
585	/* find out where we are now */
586	bcl	20,31,$+4
5870:	mflr	r26			/* r26 = runtime addr here */
588	addis	r26,r26,(_stext - 0b)@ha
589	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */
590
591	/* Save parameters */
592	mr	r31,r3
593	mr	r30,r4
594	mr	r29,r5
595	mr	r28,r6
596	mr	r27,r7
597
598	/*
599	 * Align the stack to 16-byte boundary
600	 * Depending on the size and layout of the ELF sections in the initial
601	 * boot binary, the stack pointer may be unaligned on PowerMac
602	 */
603	rldicr	r1,r1,0,59
604
605#ifdef CONFIG_RELOCATABLE
606	/* Relocate code for where we are now */
607	mr	r3,r26
608	bl	relocate
609#endif
610
611	/* Restore parameters */
612	mr	r3,r31
613	mr	r4,r30
614	mr	r5,r29
615	mr	r6,r28
616	mr	r7,r27
617
618	/* Do all of the interaction with OF client interface */
619	mr	r8,r26
620	bl	CFUNC(prom_init)
621#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
622
623	/* We never return. We also hit that trap if trying to boot
624	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
625	trap
626	.previous
627
628__after_prom_start:
629#ifdef CONFIG_RELOCATABLE
630	/* process relocations for the final address of the kernel */
631	lwz	r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
632	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
633	mr	r25,r26		/* then use current kernel base */
634	beq	1f
635	LOAD_REG_IMMEDIATE(r25, PAGE_OFFSET) /* else use static kernel base */
6361:	mr	r3,r25
637	bl	relocate
638#if defined(CONFIG_PPC_BOOK3E_64)
639	/* IVPR needs to be set after relocation. */
640	bl	init_core_book3e
641#endif
642#endif
643
644/*
645 * We need to run with _stext at physical address PHYSICAL_START.
646 * This will leave some code in the first 256B of
647 * real memory, which are reserved for software use.
648 *
649 * Note: This process overwrites the OF exception vectors.
650 */
651	LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
652	mr.	r4,r26			/* In some cases the loader may  */
653	beq	9f			/* have already put us at zero */
654	li	r6,0x100		/* Start offset, the first 0x100 */
655					/* bytes were copied earlier.	 */
656
657#ifdef CONFIG_RELOCATABLE
658/*
659 * Check if the kernel has to be running as relocatable kernel based on the
660 * variable __run_at_load, if it is set the kernel is treated as relocatable
661 * kernel, otherwise it will be moved to PHYSICAL_START
662 */
663	lwz	r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
664	cmplwi	cr0,r7,1
665	bne	3f
666
667#ifdef CONFIG_PPC_BOOK3E_64
668	LOAD_REG_ADDR(r5, __end_interrupts)
669	LOAD_REG_ADDR(r11, _stext)
670	sub	r5,r5,r11
671#else
672	/* just copy interrupts */
673	LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))
674#endif
675	b	5f
6763:
677#endif
678	/* # bytes of memory to copy */
679	lis	r5,(ABS_ADDR(copy_to_here, text))@ha
680	addi	r5,r5,(ABS_ADDR(copy_to_here, text))@l
681
682	bl	copy_and_flush		/* copy the first n bytes	 */
683					/* this includes the code being	 */
684					/* executed here.		 */
685	/* Jump to the copy of this code that we just made */
686	addis	r8,r3,(ABS_ADDR(4f, text))@ha
687	addi	r12,r8,(ABS_ADDR(4f, text))@l
688	mtctr	r12
689	bctr
690
691.balign 8
692p_end: .8byte _end - copy_to_here
693
6944:
695	/*
696	 * Now copy the rest of the kernel up to _end, add
697	 * _end - copy_to_here to the copy limit and run again.
698	 */
699	addis   r8,r26,(ABS_ADDR(p_end, text))@ha
700	ld      r8,(ABS_ADDR(p_end, text))@l(r8)
701	add	r5,r5,r8
7025:	bl	copy_and_flush		/* copy the rest */
703
7049:	b	start_here_multiplatform
705
706/*
707 * Copy routine used to copy the kernel to start at physical address 0
708 * and flush and invalidate the caches as needed.
709 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
710 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
711 *
712 * Note: this routine *only* clobbers r0, r6 and lr
713 */
714_GLOBAL(copy_and_flush)
715	addi	r5,r5,-8
716	addi	r6,r6,-8
7174:	li	r0,8			/* Use the smallest common	*/
718					/* denominator cache line	*/
719					/* size.  This results in	*/
720					/* extra cache line flushes	*/
721					/* but operation is correct.	*/
722					/* Can't get cache line size	*/
723					/* from NACA as it is being	*/
724					/* moved too.			*/
725
726	mtctr	r0			/* put # words/line in ctr	*/
7273:	addi	r6,r6,8			/* copy a cache line		*/
728	ldx	r0,r6,r4
729	stdx	r0,r6,r3
730	bdnz	3b
731	dcbst	r6,r3			/* write it to memory		*/
732	sync
733	icbi	r6,r3			/* flush the icache line	*/
734	cmpld	0,r6,r5
735	blt	4b
736	sync
737	addi	r5,r5,8
738	addi	r6,r6,8
739	isync
740	blr
741
742_ASM_NOKPROBE_SYMBOL(copy_and_flush); /* Called in real mode */
743
744.align 8
745copy_to_here:
746
747#ifdef CONFIG_SMP
748#ifdef CONFIG_PPC_PMAC
749/*
750 * On PowerMac, secondary processors starts from the reset vector, which
751 * is temporarily turned into a call to one of the functions below.
752 */
753	.section ".text";
754	.align 2 ;
755
756	.globl	__secondary_start_pmac_0
757__secondary_start_pmac_0:
758	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
759	li	r24,0
760	b	1f
761	li	r24,1
762	b	1f
763	li	r24,2
764	b	1f
765	li	r24,3
7661:
767
768_GLOBAL(pmac_secondary_start)
769	/* turn on 64-bit mode */
770	bl	enable_64b_mode
771
772	li	r0,0
773	mfspr	r3,SPRN_HID4
774	rldimi	r3,r0,40,23	/* clear bit 23 (rm_ci) */
775	sync
776	mtspr	SPRN_HID4,r3
777	isync
778	sync
779	slbia
780
781	/* Branch to our PAGE_OFFSET address */
782	bcl	20,31,$+4
7831:	mflr	r11
784	addi	r11,r11,(2f - 1b)
785	tovirt(r11, r11)
786	mtctr	r11
787	bctr
7882:
789	bl	relative_toc
790
791	/* Copy some CPU settings from CPU 0 */
792	bl	__restore_cpu_ppc970
793
794	/* pSeries do that early though I don't think we really need it */
795	mfmsr	r3
796	ori	r3,r3,MSR_RI
797	mtmsrd	r3			/* RI on */
798
799	/* Set up a paca value for this processor. */
800	LOAD_REG_ADDR(r4,paca_ptrs)	/* Load paca pointer		*/
801	ld	r4,0(r4)		/* Get base vaddr of paca_ptrs array */
802	sldi	r5,r24,3		/* get paca_ptrs[] index from cpu id */
803	ldx	r13,r5,r4		/* r13 = paca_ptrs[cpu id]       */
804	SET_PACA(r13)			/* Save vaddr of paca in an SPRG*/
805
806	/* Mark interrupts soft and hard disabled (they might be enabled
807	 * in the PACA when doing hotplug)
808	 */
809	li	r0,IRQS_DISABLED
810	stb	r0,PACAIRQSOFTMASK(r13)
811	li	r0,PACA_IRQ_HARD_DIS
812	stb	r0,PACAIRQHAPPENED(r13)
813
814	/* Create a temp kernel stack for use before relocation is on.	*/
815	ld	r1,PACAEMERGSP(r13)
816	subi	r1,r1,STACK_FRAME_MIN_SIZE
817
818	b	__secondary_start
819
820#endif /* CONFIG_PPC_PMAC */
821
822/*
823 * This function is called after the master CPU has released the
824 * secondary processors.  The execution environment is relocation off.
825 * The paca for this processor has the following fields initialized at
826 * this point:
827 *   1. Processor number
828 *   2. Segment table pointer (virtual address)
829 * On entry the following are set:
830 *   r1	       = stack pointer (real addr of temp stack)
831 *   r24       = cpu# (in Linux terms)
832 *   r13       = paca virtual address
833 *   SPRG_PACA = paca virtual address
834 */
835	.section ".text";
836	.align 2 ;
837
838	.globl	__secondary_start
839__secondary_start:
840	/* Set thread priority to MEDIUM */
841	HMT_MEDIUM
842
843	/*
844	 * Do early setup for this CPU, in particular initialising the MMU so we
845	 * can turn it on below. This is a call to C, which is OK, we're still
846	 * running on the emergency stack.
847	 */
848	bl	CFUNC(early_setup_secondary)
849
850	/*
851	 * The primary has initialized our kernel stack for us in the paca, grab
852	 * it and put it in r1. We must *not* use it until we turn on the MMU
853	 * below, because it may not be inside the RMO.
854	 */
855	ld	r1, PACAKSAVE(r13)
856
857	/* Clear backchain so we get nice backtraces */
858	li	r7,0
859	mtlr	r7
860
861	/* Mark interrupts soft and hard disabled (they might be enabled
862	 * in the PACA when doing hotplug)
863	 */
864	li	r7,IRQS_DISABLED
865	stb	r7,PACAIRQSOFTMASK(r13)
866	li	r0,PACA_IRQ_HARD_DIS
867	stb	r0,PACAIRQHAPPENED(r13)
868
869	/* enable MMU and jump to start_secondary */
870	LOAD_REG_ADDR(r3, start_secondary_prolog)
871	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
872
873	mtspr	SPRN_SRR0,r3
874	mtspr	SPRN_SRR1,r4
875	RFI_TO_KERNEL
876	b	.	/* prevent speculative execution */
877
878/*
879 * Running with relocation on at this point.  All we want to do is
880 * zero the stack back-chain pointer and get the TOC virtual address
881 * before going into C code.
882 */
883start_secondary_prolog:
884	LOAD_PACA_TOC()
885	li	r3,0
886	std	r3,0(r1)		/* Zero the stack frame pointer	*/
887	bl	CFUNC(start_secondary)
888	b	.
889/*
890 * Reset stack pointer and call start_secondary
891 * to continue with online operation when woken up
892 * from cede in cpu offline.
893 */
894_GLOBAL(start_secondary_resume)
895	ld	r1,PACAKSAVE(r13)	/* Reload kernel stack pointer */
896	li	r3,0
897	std	r3,0(r1)		/* Zero the stack frame pointer	*/
898	bl	CFUNC(start_secondary)
899	b	.
900#endif
901
902/*
903 * This subroutine clobbers r11 and r12
904 */
905SYM_FUNC_START_LOCAL(enable_64b_mode)
906	mfmsr	r11			/* grab the current MSR */
907#ifdef CONFIG_PPC_BOOK3E_64
908	oris	r11,r11,0x8000		/* CM bit set, we'll set ICM later */
909	mtmsr	r11
910#else /* CONFIG_PPC_BOOK3E_64 */
911	LOAD_REG_IMMEDIATE(r12, MSR_64BIT)
912	or	r11,r11,r12
913	mtmsrd	r11
914	isync
915#endif
916	blr
917SYM_FUNC_END(enable_64b_mode)
918
919/*
920 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
921 * by the toolchain).  It computes the correct value for wherever we
922 * are running at the moment, using position-independent code.
923 *
924 * Note: The compiler constructs pointers using offsets from the
925 * TOC in -mcmodel=medium mode. After we relocate to 0 but before
926 * the MMU is on we need our TOC to be a virtual address otherwise
927 * these pointers will be real addresses which may get stored and
928 * accessed later with the MMU on. We branch to the virtual address
929 * while still in real mode then call relative_toc again to handle
930 * this.
931 */
932_GLOBAL(relative_toc)
933#ifdef CONFIG_PPC_KERNEL_PCREL
934	tdnei	r2,-1
935	blr
936#else
937	mflr	r0
938	bcl	20,31,$+4
9390:	mflr	r11
940	ld	r2,(p_toc - 0b)(r11)
941	add	r2,r2,r11
942	mtlr	r0
943	blr
944
945.balign 8
946p_toc:	.8byte	.TOC. - 0b
947#endif
948
949/*
950 * This is where the main kernel code starts.
951 */
952__REF
953start_here_multiplatform:
954	/* Adjust TOC for moved kernel. Could adjust when moving it instead. */
955	bl	relative_toc
956
957	/* Clear out the BSS. It may have been done in prom_init,
958	 * already but that's irrelevant since prom_init will soon
959	 * be detached from the kernel completely. Besides, we need
960	 * to clear it now for kexec-style entry.
961	 */
962	LOAD_REG_ADDR(r11,__bss_stop)
963	LOAD_REG_ADDR(r8,__bss_start)
964	sub	r11,r11,r8		/* bss size			*/
965	addi	r11,r11,7		/* round up to an even double word */
966	srdi.	r11,r11,3		/* shift right by 3		*/
967	beq	4f
968	addi	r8,r8,-8
969	li	r0,0
970	mtctr	r11			/* zero this many doublewords	*/
9713:	stdu	r0,8(r8)
972	bdnz	3b
9734:
974
975#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
976	/* Setup OPAL entry */
977	LOAD_REG_ADDR(r11, opal)
978	std	r28,0(r11);
979	std	r29,8(r11);
980#endif
981
982#ifndef CONFIG_PPC_BOOK3E_64
983	mfmsr	r6
984	ori	r6,r6,MSR_RI
985	mtmsrd	r6			/* RI on */
986#endif
987
988#ifdef CONFIG_RELOCATABLE
989	/* Save the physical address we're running at in kernstart_addr */
990	LOAD_REG_ADDR(r4, kernstart_addr)
991	clrldi	r0,r25,2
992	std	r0,0(r4)
993#endif
994
995	/* set up a stack pointer */
996	LOAD_REG_ADDR(r3,init_thread_union)
997	LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
998	add	r1,r3,r1
999	li	r0,0
1000	stdu	r0,-STACK_FRAME_MIN_SIZE(r1)
1001
1002	/*
1003	 * Do very early kernel initializations, including initial hash table
1004	 * and SLB setup before we turn on relocation.
1005	 */
1006
1007#ifdef CONFIG_KASAN
1008	bl	CFUNC(kasan_early_init)
1009#endif
1010	/* Restore parameters passed from prom_init/kexec */
1011	mr	r3,r31
1012	LOAD_REG_ADDR(r12, DOTSYM(early_setup))
1013	mtctr	r12
1014	bctrl		/* also sets r13 and SPRG_PACA */
1015
1016	LOAD_REG_ADDR(r3, start_here_common)
1017	ld	r4,PACAKMSR(r13)
1018	mtspr	SPRN_SRR0,r3
1019	mtspr	SPRN_SRR1,r4
1020	RFI_TO_KERNEL
1021	b	.	/* prevent speculative execution */
1022
1023	/* This is where all platforms converge execution */
1024
1025start_here_common:
1026	/* relocation is on at this point */
1027	std	r1,PACAKSAVE(r13)
1028
1029	/* Load the TOC (virtual address) */
1030	LOAD_PACA_TOC()
1031
1032	/* Mark interrupts soft and hard disabled (they might be enabled
1033	 * in the PACA when doing hotplug)
1034	 */
1035	li	r0,IRQS_DISABLED
1036	stb	r0,PACAIRQSOFTMASK(r13)
1037	li	r0,PACA_IRQ_HARD_DIS
1038	stb	r0,PACAIRQHAPPENED(r13)
1039
1040	/* Generic kernel entry */
1041	bl	CFUNC(start_kernel)
1042
1043	/* Not reached */
10440:	trap
1045	EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
1046	.previous
1047