xref: /linux/arch/powerpc/kernel/head_64.S (revision 7b12b9137930eb821b68e1bfa11e9de692208620)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *
12 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
13 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
14 *
15 *  This file contains the low-level support and setup for the
16 *  PowerPC-64 platform, including trap and interrupt dispatch.
17 *
18 *  This program is free software; you can redistribute it and/or
19 *  modify it under the terms of the GNU General Public License
20 *  as published by the Free Software Foundation; either version
21 *  2 of the License, or (at your option) any later version.
22 */
23
24#include <linux/config.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/bug.h>
32#include <asm/cputable.h>
33#include <asm/setup.h>
34#include <asm/hvcall.h>
35#include <asm/iseries/lpar_map.h>
36#include <asm/thread_info.h>
37
38#ifdef CONFIG_PPC_ISERIES
39#define DO_SOFT_DISABLE
40#endif
41
42/*
43 * We layout physical memory as follows:
44 * 0x0000 - 0x00ff : Secondary processor spin code
45 * 0x0100 - 0x2fff : pSeries Interrupt prologs
46 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
47 * 0x6000 - 0x6fff : Initial (CPU0) segment table
48 * 0x7000 - 0x7fff : FWNMI data area
49 * 0x8000 -        : Early init and support code
50 */
51
52/*
53 *   SPRG Usage
54 *
55 *   Register	Definition
56 *
57 *   SPRG0	reserved for hypervisor
58 *   SPRG1	temp - used to save gpr
59 *   SPRG2	temp - used to save gpr
60 *   SPRG3	virt addr of paca
61 */
62
63/*
64 * Entering into this code we make the following assumptions:
65 *  For pSeries:
66 *   1. The MMU is off & open firmware is running in real mode.
67 *   2. The kernel is entered at __start
68 *
69 *  For iSeries:
70 *   1. The MMU is on (as it always is for iSeries)
71 *   2. The kernel is entered at system_reset_iSeries
72 */
73
74	.text
75	.globl  _stext
76_stext:
77#ifdef CONFIG_PPC_MULTIPLATFORM
78_GLOBAL(__start)
79	/* NOP this out unconditionally */
80BEGIN_FTR_SECTION
81	b	.__start_initialization_multiplatform
82END_FTR_SECTION(0, 1)
83#endif /* CONFIG_PPC_MULTIPLATFORM */
84
85	/* Catch branch to 0 in real mode */
86	trap
87
88#ifdef CONFIG_PPC_ISERIES
89	/*
90	 * At offset 0x20, there is a pointer to iSeries LPAR data.
91	 * This is required by the hypervisor
92	 */
93	. = 0x20
94	.llong hvReleaseData-KERNELBASE
95
96	/*
97	 * At offset 0x28 and 0x30 are offsets to the mschunks_map
98	 * array (used by the iSeries LPAR debugger to do translation
99	 * between physical addresses and absolute addresses) and
100	 * to the pidhash table (also used by the debugger)
101	 */
102	.llong mschunks_map-KERNELBASE
103	.llong 0	/* pidhash-KERNELBASE SFRXXX */
104
105	/* Offset 0x38 - Pointer to start of embedded System.map */
106	.globl	embedded_sysmap_start
107embedded_sysmap_start:
108	.llong	0
109	/* Offset 0x40 - Pointer to end of embedded System.map */
110	.globl	embedded_sysmap_end
111embedded_sysmap_end:
112	.llong	0
113
114#endif /* CONFIG_PPC_ISERIES */
115
116	/* Secondary processors spin on this value until it goes to 1. */
117	.globl  __secondary_hold_spinloop
118__secondary_hold_spinloop:
119	.llong	0x0
120
121	/* Secondary processors write this value with their cpu # */
122	/* after they enter the spin loop immediately below.	  */
123	.globl	__secondary_hold_acknowledge
124__secondary_hold_acknowledge:
125	.llong	0x0
126
127	. = 0x60
128/*
129 * The following code is used on pSeries to hold secondary processors
130 * in a spin loop after they have been freed from OpenFirmware, but
131 * before the bulk of the kernel has been relocated.  This code
132 * is relocated to physical address 0x60 before prom_init is run.
133 * All of it must fit below the first exception vector at 0x100.
134 */
135_GLOBAL(__secondary_hold)
136	mfmsr	r24
137	ori	r24,r24,MSR_RI
138	mtmsrd	r24			/* RI on */
139
140	/* Grab our physical cpu number */
141	mr	r24,r3
142
143	/* Tell the master cpu we're here */
144	/* Relocation is off & we are located at an address less */
145	/* than 0x100, so only need to grab low order offset.    */
146	std	r24,__secondary_hold_acknowledge@l(0)
147	sync
148
149	/* All secondary cpus wait here until told to start. */
150100:	ld	r4,__secondary_hold_spinloop@l(0)
151	cmpdi	0,r4,1
152	bne	100b
153
154#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
155	LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
156	mtctr	r4
157	mr	r3,r24
158	bctr
159#else
160	BUG_OPCODE
161#endif
162
163/* This value is used to mark exception frames on the stack. */
164	.section ".toc","aw"
165exception_marker:
166	.tc	ID_72656773_68657265[TC],0x7265677368657265
167	.text
168
169/*
170 * The following macros define the code that appears as
171 * the prologue to each of the exception handlers.  They
172 * are split into two parts to allow a single kernel binary
173 * to be used for pSeries and iSeries.
174 * LOL.  One day... - paulus
175 */
176
177/*
178 * We make as much of the exception code common between native
179 * exception handlers (including pSeries LPAR) and iSeries LPAR
180 * implementations as possible.
181 */
182
183/*
184 * This is the start of the interrupt handlers for pSeries
185 * This code runs with relocation off.
186 */
187#define EX_R9		0
188#define EX_R10		8
189#define EX_R11		16
190#define EX_R12		24
191#define EX_R13		32
192#define EX_SRR0		40
193#define EX_DAR		48
194#define EX_DSISR	56
195#define EX_CCR		60
196#define EX_R3		64
197#define EX_LR		72
198
199/*
200 * We're short on space and time in the exception prolog, so we can't
201 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
202 * low halfword of the address, but for Kdump we need the whole low
203 * word.
204 */
205#ifdef CONFIG_CRASH_DUMP
206#define LOAD_HANDLER(reg, label)					\
207	oris	reg,reg,(label)@h;	/* virt addr of handler ... */	\
208	ori	reg,reg,(label)@l;	/* .. and the rest */
209#else
210#define LOAD_HANDLER(reg, label)					\
211	ori	reg,reg,(label)@l;	/* virt addr of handler ... */
212#endif
213
214#define EXCEPTION_PROLOG_PSERIES(area, label)				\
215	mfspr	r13,SPRN_SPRG3;		/* get paca address into r13 */	\
216	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
217	std	r10,area+EX_R10(r13);					\
218	std	r11,area+EX_R11(r13);					\
219	std	r12,area+EX_R12(r13);					\
220	mfspr	r9,SPRN_SPRG1;						\
221	std	r9,area+EX_R13(r13);					\
222	mfcr	r9;							\
223	clrrdi	r12,r13,32;		/* get high part of &label */	\
224	mfmsr	r10;							\
225	mfspr	r11,SPRN_SRR0;		/* save SRR0 */			\
226	LOAD_HANDLER(r12,label)						\
227	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI;				\
228	mtspr	SPRN_SRR0,r12;						\
229	mfspr	r12,SPRN_SRR1;		/* and SRR1 */			\
230	mtspr	SPRN_SRR1,r10;						\
231	rfid;								\
232	b	.	/* prevent speculative execution */
233
234/*
235 * This is the start of the interrupt handlers for iSeries
236 * This code runs with relocation on.
237 */
238#define EXCEPTION_PROLOG_ISERIES_1(area)				\
239	mfspr	r13,SPRN_SPRG3;		/* get paca address into r13 */	\
240	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
241	std	r10,area+EX_R10(r13);					\
242	std	r11,area+EX_R11(r13);					\
243	std	r12,area+EX_R12(r13);					\
244	mfspr	r9,SPRN_SPRG1;						\
245	std	r9,area+EX_R13(r13);					\
246	mfcr	r9
247
248#define EXCEPTION_PROLOG_ISERIES_2					\
249	mfmsr	r10;							\
250	ld	r12,PACALPPACAPTR(r13);					\
251	ld	r11,LPPACASRR0(r12);					\
252	ld	r12,LPPACASRR1(r12);					\
253	ori	r10,r10,MSR_RI;						\
254	mtmsrd	r10,1
255
256/*
257 * The common exception prolog is used for all except a few exceptions
258 * such as a segment miss on a kernel address.  We have to be prepared
259 * to take another exception from the point where we first touch the
260 * kernel stack onwards.
261 *
262 * On entry r13 points to the paca, r9-r13 are saved in the paca,
263 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
264 * SRR1, and relocation is on.
265 */
266#define EXCEPTION_PROLOG_COMMON(n, area)				   \
267	andi.	r10,r12,MSR_PR;		/* See if coming from user	*/ \
268	mr	r10,r1;			/* Save r1			*/ \
269	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
270	beq-	1f;							   \
271	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
2721:	cmpdi	cr1,r1,0;		/* check if r1 is in userspace	*/ \
273	bge-	cr1,bad_stack;		/* abort if it is		*/ \
274	std	r9,_CCR(r1);		/* save CR in stackframe	*/ \
275	std	r11,_NIP(r1);		/* save SRR0 in stackframe	*/ \
276	std	r12,_MSR(r1);		/* save SRR1 in stackframe	*/ \
277	std	r10,0(r1);		/* make stack chain pointer	*/ \
278	std	r0,GPR0(r1);		/* save r0 in stackframe	*/ \
279	std	r10,GPR1(r1);		/* save r1 in stackframe	*/ \
280	ACCOUNT_CPU_USER_ENTRY(r9, r10);				   \
281	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \
282	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe	*/ \
283	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \
284	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \
285	ld	r10,area+EX_R10(r13);					   \
286	std	r9,GPR9(r1);						   \
287	std	r10,GPR10(r1);						   \
288	ld	r9,area+EX_R11(r13);	/* move r11 - r13 to stackframe	*/ \
289	ld	r10,area+EX_R12(r13);					   \
290	ld	r11,area+EX_R13(r13);					   \
291	std	r9,GPR11(r1);						   \
292	std	r10,GPR12(r1);						   \
293	std	r11,GPR13(r1);						   \
294	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \
295	mflr	r9;			/* save LR in stackframe	*/ \
296	std	r9,_LINK(r1);						   \
297	mfctr	r10;			/* save CTR in stackframe	*/ \
298	std	r10,_CTR(r1);						   \
299	mfspr	r11,SPRN_XER;		/* save XER in stackframe	*/ \
300	std	r11,_XER(r1);						   \
301	li	r9,(n)+1;						   \
302	std	r9,_TRAP(r1);		/* set trap number		*/ \
303	li	r10,0;							   \
304	ld	r11,exception_marker@toc(r2);				   \
305	std	r10,RESULT(r1);		/* clear regs->result		*/ \
306	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/
307
308/*
309 * Exception vectors.
310 */
311#define STD_EXCEPTION_PSERIES(n, label)			\
312	. = n;						\
313	.globl label##_pSeries;				\
314label##_pSeries:					\
315	HMT_MEDIUM;					\
316	mtspr	SPRN_SPRG1,r13;		/* save r13 */	\
317	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
318
319#define STD_EXCEPTION_ISERIES(n, label, area)		\
320	.globl label##_iSeries;				\
321label##_iSeries:					\
322	HMT_MEDIUM;					\
323	mtspr	SPRN_SPRG1,r13;		/* save r13 */	\
324	EXCEPTION_PROLOG_ISERIES_1(area);		\
325	EXCEPTION_PROLOG_ISERIES_2;			\
326	b	label##_common
327
328#define MASKABLE_EXCEPTION_ISERIES(n, label)				\
329	.globl label##_iSeries;						\
330label##_iSeries:							\
331	HMT_MEDIUM;							\
332	mtspr	SPRN_SPRG1,r13;		/* save r13 */			\
333	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);				\
334	lbz	r10,PACAPROCENABLED(r13);				\
335	cmpwi	0,r10,0;						\
336	beq-	label##_iSeries_masked;					\
337	EXCEPTION_PROLOG_ISERIES_2;					\
338	b	label##_common;						\
339
340#ifdef DO_SOFT_DISABLE
341#define DISABLE_INTS				\
342	lbz	r10,PACAPROCENABLED(r13);	\
343	li	r11,0;				\
344	std	r10,SOFTE(r1);			\
345	mfmsr	r10;				\
346	stb	r11,PACAPROCENABLED(r13);	\
347	ori	r10,r10,MSR_EE;			\
348	mtmsrd	r10,1
349
350#define ENABLE_INTS				\
351	lbz	r10,PACAPROCENABLED(r13);	\
352	mfmsr	r11;				\
353	std	r10,SOFTE(r1);			\
354	ori	r11,r11,MSR_EE;			\
355	mtmsrd	r11,1
356
357#else	/* hard enable/disable interrupts */
358#define DISABLE_INTS
359
360#define ENABLE_INTS				\
361	ld	r12,_MSR(r1);			\
362	mfmsr	r11;				\
363	rlwimi	r11,r12,0,MSR_EE;		\
364	mtmsrd	r11,1
365
366#endif
367
368#define STD_EXCEPTION_COMMON(trap, label, hdlr)		\
369	.align	7;					\
370	.globl label##_common;				\
371label##_common:						\
372	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
373	DISABLE_INTS;					\
374	bl	.save_nvgprs;				\
375	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
376	bl	hdlr;					\
377	b	.ret_from_except
378
379/*
380 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
381 * in the idle task and therefore need the special idle handling.
382 */
383#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr)	\
384	.align	7;					\
385	.globl label##_common;				\
386label##_common:						\
387	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
388	FINISH_NAP;					\
389	DISABLE_INTS;					\
390	bl	.save_nvgprs;				\
391	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
392	bl	hdlr;					\
393	b	.ret_from_except
394
395#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)	\
396	.align	7;					\
397	.globl label##_common;				\
398label##_common:						\
399	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
400	FINISH_NAP;					\
401	DISABLE_INTS;					\
402	bl	.ppc64_runlatch_on;			\
403	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
404	bl	hdlr;					\
405	b	.ret_from_except_lite
406
407/*
408 * When the idle code in power4_idle puts the CPU into NAP mode,
409 * it has to do so in a loop, and relies on the external interrupt
410 * and decrementer interrupt entry code to get it out of the loop.
411 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
412 * to signal that it is in the loop and needs help to get out.
413 */
414#ifdef CONFIG_PPC_970_NAP
415#define FINISH_NAP				\
416BEGIN_FTR_SECTION				\
417	clrrdi	r11,r1,THREAD_SHIFT;		\
418	ld	r9,TI_LOCAL_FLAGS(r11);		\
419	andi.	r10,r9,_TLF_NAPPING;		\
420	bnel	power4_fixup_nap;		\
421END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
422#else
423#define FINISH_NAP
424#endif
425
426/*
427 * Start of pSeries system interrupt routines
428 */
429	. = 0x100
430	.globl __start_interrupts
431__start_interrupts:
432
433	STD_EXCEPTION_PSERIES(0x100, system_reset)
434
435	. = 0x200
436_machine_check_pSeries:
437	HMT_MEDIUM
438	mtspr	SPRN_SPRG1,r13		/* save r13 */
439	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
440
441	. = 0x300
442	.globl data_access_pSeries
443data_access_pSeries:
444	HMT_MEDIUM
445	mtspr	SPRN_SPRG1,r13
446BEGIN_FTR_SECTION
447	mtspr	SPRN_SPRG2,r12
448	mfspr	r13,SPRN_DAR
449	mfspr	r12,SPRN_DSISR
450	srdi	r13,r13,60
451	rlwimi	r13,r12,16,0x20
452	mfcr	r12
453	cmpwi	r13,0x2c
454	beq	.do_stab_bolted_pSeries
455	mtcrf	0x80,r12
456	mfspr	r12,SPRN_SPRG2
457END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
458	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
459
460	. = 0x380
461	.globl data_access_slb_pSeries
462data_access_slb_pSeries:
463	HMT_MEDIUM
464	mtspr	SPRN_SPRG1,r13
465	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
466	std	r3,PACA_EXSLB+EX_R3(r13)
467	mfspr	r3,SPRN_DAR
468	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
469	mfcr	r9
470#ifdef __DISABLED__
471	/* Keep that around for when we re-implement dynamic VSIDs */
472	cmpdi	r3,0
473	bge	slb_miss_user_pseries
474#endif /* __DISABLED__ */
475	std	r10,PACA_EXSLB+EX_R10(r13)
476	std	r11,PACA_EXSLB+EX_R11(r13)
477	std	r12,PACA_EXSLB+EX_R12(r13)
478	mfspr	r10,SPRN_SPRG1
479	std	r10,PACA_EXSLB+EX_R13(r13)
480	mfspr	r12,SPRN_SRR1		/* and SRR1 */
481	b	.slb_miss_realmode	/* Rel. branch works in real mode */
482
483	STD_EXCEPTION_PSERIES(0x400, instruction_access)
484
485	. = 0x480
486	.globl instruction_access_slb_pSeries
487instruction_access_slb_pSeries:
488	HMT_MEDIUM
489	mtspr	SPRN_SPRG1,r13
490	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
491	std	r3,PACA_EXSLB+EX_R3(r13)
492	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
493	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
494	mfcr	r9
495#ifdef __DISABLED__
496	/* Keep that around for when we re-implement dynamic VSIDs */
497	cmpdi	r3,0
498	bge	slb_miss_user_pseries
499#endif /* __DISABLED__ */
500	std	r10,PACA_EXSLB+EX_R10(r13)
501	std	r11,PACA_EXSLB+EX_R11(r13)
502	std	r12,PACA_EXSLB+EX_R12(r13)
503	mfspr	r10,SPRN_SPRG1
504	std	r10,PACA_EXSLB+EX_R13(r13)
505	mfspr	r12,SPRN_SRR1		/* and SRR1 */
506	b	.slb_miss_realmode	/* Rel. branch works in real mode */
507
508	STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
509	STD_EXCEPTION_PSERIES(0x600, alignment)
510	STD_EXCEPTION_PSERIES(0x700, program_check)
511	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
512	STD_EXCEPTION_PSERIES(0x900, decrementer)
513	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
514	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
515
516	. = 0xc00
517	.globl	system_call_pSeries
518system_call_pSeries:
519	HMT_MEDIUM
520	mr	r9,r13
521	mfmsr	r10
522	mfspr	r13,SPRN_SPRG3
523	mfspr	r11,SPRN_SRR0
524	clrrdi	r12,r13,32
525	oris	r12,r12,system_call_common@h
526	ori	r12,r12,system_call_common@l
527	mtspr	SPRN_SRR0,r12
528	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
529	mfspr	r12,SPRN_SRR1
530	mtspr	SPRN_SRR1,r10
531	rfid
532	b	.	/* prevent speculative execution */
533
534	STD_EXCEPTION_PSERIES(0xd00, single_step)
535	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
536
537	/* We need to deal with the Altivec unavailable exception
538	 * here which is at 0xf20, thus in the middle of the
539	 * prolog code of the PerformanceMonitor one. A little
540	 * trickery is thus necessary
541	 */
542	. = 0xf00
543	b	performance_monitor_pSeries
544
545	STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
546
547	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
548	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
549
550	. = 0x3000
551
552/*** pSeries interrupt support ***/
553
554	/* moved from 0xf00 */
555	STD_EXCEPTION_PSERIES(., performance_monitor)
556
557	.align	7
558_GLOBAL(do_stab_bolted_pSeries)
559	mtcrf	0x80,r12
560	mfspr	r12,SPRN_SPRG2
561	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
562
563/*
564 * We have some room here  we use that to put
565 * the peries slb miss user trampoline code so it's reasonably
566 * away from slb_miss_user_common to avoid problems with rfid
567 *
568 * This is used for when the SLB miss handler has to go virtual,
569 * which doesn't happen for now anymore but will once we re-implement
570 * dynamic VSIDs for shared page tables
571 */
572#ifdef __DISABLED__
573slb_miss_user_pseries:
574	std	r10,PACA_EXGEN+EX_R10(r13)
575	std	r11,PACA_EXGEN+EX_R11(r13)
576	std	r12,PACA_EXGEN+EX_R12(r13)
577	mfspr	r10,SPRG1
578	ld	r11,PACA_EXSLB+EX_R9(r13)
579	ld	r12,PACA_EXSLB+EX_R3(r13)
580	std	r10,PACA_EXGEN+EX_R13(r13)
581	std	r11,PACA_EXGEN+EX_R9(r13)
582	std	r12,PACA_EXGEN+EX_R3(r13)
583	clrrdi	r12,r13,32
584	mfmsr	r10
585	mfspr	r11,SRR0			/* save SRR0 */
586	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
587	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
588	mtspr	SRR0,r12
589	mfspr	r12,SRR1			/* and SRR1 */
590	mtspr	SRR1,r10
591	rfid
592	b	.				/* prevent spec. execution */
593#endif /* __DISABLED__ */
594
595/*
596 * Vectors for the FWNMI option.  Share common code.
597 */
598	.globl system_reset_fwnmi
599      .align 7
600system_reset_fwnmi:
601	HMT_MEDIUM
602	mtspr	SPRN_SPRG1,r13		/* save r13 */
603	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
604
605	.globl machine_check_fwnmi
606      .align 7
607machine_check_fwnmi:
608	HMT_MEDIUM
609	mtspr	SPRN_SPRG1,r13		/* save r13 */
610	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
611
612#ifdef CONFIG_PPC_ISERIES
613/***  ISeries-LPAR interrupt handlers ***/
614
615	STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
616
617	.globl data_access_iSeries
618data_access_iSeries:
619	mtspr	SPRN_SPRG1,r13
620BEGIN_FTR_SECTION
621	mtspr	SPRN_SPRG2,r12
622	mfspr	r13,SPRN_DAR
623	mfspr	r12,SPRN_DSISR
624	srdi	r13,r13,60
625	rlwimi	r13,r12,16,0x20
626	mfcr	r12
627	cmpwi	r13,0x2c
628	beq	.do_stab_bolted_iSeries
629	mtcrf	0x80,r12
630	mfspr	r12,SPRN_SPRG2
631END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
632	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
633	EXCEPTION_PROLOG_ISERIES_2
634	b	data_access_common
635
636.do_stab_bolted_iSeries:
637	mtcrf	0x80,r12
638	mfspr	r12,SPRN_SPRG2
639	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
640	EXCEPTION_PROLOG_ISERIES_2
641	b	.do_stab_bolted
642
643	.globl	data_access_slb_iSeries
644data_access_slb_iSeries:
645	mtspr	SPRN_SPRG1,r13		/* save r13 */
646	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
647	std	r3,PACA_EXSLB+EX_R3(r13)
648	mfspr	r3,SPRN_DAR
649	std	r9,PACA_EXSLB+EX_R9(r13)
650	mfcr	r9
651#ifdef __DISABLED__
652	cmpdi	r3,0
653	bge	slb_miss_user_iseries
654#endif
655	std	r10,PACA_EXSLB+EX_R10(r13)
656	std	r11,PACA_EXSLB+EX_R11(r13)
657	std	r12,PACA_EXSLB+EX_R12(r13)
658	mfspr	r10,SPRN_SPRG1
659	std	r10,PACA_EXSLB+EX_R13(r13)
660	ld	r12,PACALPPACAPTR(r13)
661	ld	r12,LPPACASRR1(r12)
662	b	.slb_miss_realmode
663
664	STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
665
666	.globl	instruction_access_slb_iSeries
667instruction_access_slb_iSeries:
668	mtspr	SPRN_SPRG1,r13		/* save r13 */
669	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
670	std	r3,PACA_EXSLB+EX_R3(r13)
671	ld	r3,PACALPPACAPTR(r13)
672	ld	r3,LPPACASRR0(r3)	/* get SRR0 value */
673	std	r9,PACA_EXSLB+EX_R9(r13)
674	mfcr	r9
675#ifdef __DISABLED__
676	cmpdi	r3,0
677	bge	.slb_miss_user_iseries
678#endif
679	std	r10,PACA_EXSLB+EX_R10(r13)
680	std	r11,PACA_EXSLB+EX_R11(r13)
681	std	r12,PACA_EXSLB+EX_R12(r13)
682	mfspr	r10,SPRN_SPRG1
683	std	r10,PACA_EXSLB+EX_R13(r13)
684	ld	r12,PACALPPACAPTR(r13)
685	ld	r12,LPPACASRR1(r12)
686	b	.slb_miss_realmode
687
688#ifdef __DISABLED__
689slb_miss_user_iseries:
690	std	r10,PACA_EXGEN+EX_R10(r13)
691	std	r11,PACA_EXGEN+EX_R11(r13)
692	std	r12,PACA_EXGEN+EX_R12(r13)
693	mfspr	r10,SPRG1
694	ld	r11,PACA_EXSLB+EX_R9(r13)
695	ld	r12,PACA_EXSLB+EX_R3(r13)
696	std	r10,PACA_EXGEN+EX_R13(r13)
697	std	r11,PACA_EXGEN+EX_R9(r13)
698	std	r12,PACA_EXGEN+EX_R3(r13)
699	EXCEPTION_PROLOG_ISERIES_2
700	b	slb_miss_user_common
701#endif
702
703	MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
704	STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
705	STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
706	STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
707	MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
708	STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
709	STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
710
711	.globl	system_call_iSeries
712system_call_iSeries:
713	mr	r9,r13
714	mfspr	r13,SPRN_SPRG3
715	EXCEPTION_PROLOG_ISERIES_2
716	b	system_call_common
717
718	STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
719	STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
720	STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
721
722	.globl system_reset_iSeries
723system_reset_iSeries:
724	mfspr	r13,SPRN_SPRG3		/* Get paca address */
725	mfmsr	r24
726	ori	r24,r24,MSR_RI
727	mtmsrd	r24			/* RI on */
728	lhz	r24,PACAPACAINDEX(r13)	/* Get processor # */
729	cmpwi	0,r24,0			/* Are we processor 0? */
730	beq	.__start_initialization_iSeries	/* Start up the first processor */
731	mfspr	r4,SPRN_CTRLF
732	li	r5,CTRL_RUNLATCH	/* Turn off the run light */
733	andc	r4,r4,r5
734	mtspr	SPRN_CTRLT,r4
735
7361:
737	HMT_LOW
738#ifdef CONFIG_SMP
739	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor
740					 * should start */
741	sync
742	LOAD_REG_IMMEDIATE(r3,current_set)
743	sldi	r28,r24,3		/* get current_set[cpu#] */
744	ldx	r3,r3,r28
745	addi	r1,r3,THREAD_SIZE
746	subi	r1,r1,STACK_FRAME_OVERHEAD
747
748	cmpwi	0,r23,0
749	beq	iSeries_secondary_smp_loop	/* Loop until told to go */
750	bne	.__secondary_start		/* Loop until told to go */
751iSeries_secondary_smp_loop:
752	/* Let the Hypervisor know we are alive */
753	/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
754	lis	r3,0x8002
755	rldicr	r3,r3,32,15		/* r0 = (r3 << 32) & 0xffff000000000000 */
756#else /* CONFIG_SMP */
757	/* Yield the processor.  This is required for non-SMP kernels
758		which are running on multi-threaded machines. */
759	lis	r3,0x8000
760	rldicr	r3,r3,32,15		/* r3 = (r3 << 32) & 0xffff000000000000 */
761	addi	r3,r3,18		/* r3 = 0x8000000000000012 which is "yield" */
762	li	r4,0			/* "yield timed" */
763	li	r5,-1			/* "yield forever" */
764#endif /* CONFIG_SMP */
765	li	r0,-1			/* r0=-1 indicates a Hypervisor call */
766	sc				/* Invoke the hypervisor via a system call */
767	mfspr	r13,SPRN_SPRG3		/* Put r13 back ???? */
768	b	1b			/* If SMP not configured, secondaries
769					 * loop forever */
770
771	.globl decrementer_iSeries_masked
772decrementer_iSeries_masked:
773	/* We may not have a valid TOC pointer in here. */
774	li	r11,1
775	ld	r12,PACALPPACAPTR(r13)
776	stb	r11,LPPACADECRINT(r12)
777	LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy)
778	lwz	r12,0(r12)
779	mtspr	SPRN_DEC,r12
780	/* fall through */
781
782	.globl hardware_interrupt_iSeries_masked
783hardware_interrupt_iSeries_masked:
784	mtcrf	0x80,r9		/* Restore regs */
785	ld	r12,PACALPPACAPTR(r13)
786	ld	r11,LPPACASRR0(r12)
787	ld	r12,LPPACASRR1(r12)
788	mtspr	SPRN_SRR0,r11
789	mtspr	SPRN_SRR1,r12
790	ld	r9,PACA_EXGEN+EX_R9(r13)
791	ld	r10,PACA_EXGEN+EX_R10(r13)
792	ld	r11,PACA_EXGEN+EX_R11(r13)
793	ld	r12,PACA_EXGEN+EX_R12(r13)
794	ld	r13,PACA_EXGEN+EX_R13(r13)
795	rfid
796	b	.	/* prevent speculative execution */
797#endif /* CONFIG_PPC_ISERIES */
798
799/*** Common interrupt handlers ***/
800
801	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
802
803	/*
804	 * Machine check is different because we use a different
805	 * save area: PACA_EXMC instead of PACA_EXGEN.
806	 */
807	.align	7
808	.globl machine_check_common
809machine_check_common:
810	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
811	FINISH_NAP
812	DISABLE_INTS
813	bl	.save_nvgprs
814	addi	r3,r1,STACK_FRAME_OVERHEAD
815	bl	.machine_check_exception
816	b	.ret_from_except
817
818	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
819	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
820	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
821	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
822	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
823	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
824	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
825#ifdef CONFIG_ALTIVEC
826	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
827#else
828	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
829#endif
830
831/*
832 * Here we have detected that the kernel stack pointer is bad.
833 * R9 contains the saved CR, r13 points to the paca,
834 * r10 contains the (bad) kernel stack pointer,
835 * r11 and r12 contain the saved SRR0 and SRR1.
836 * We switch to using an emergency stack, save the registers there,
837 * and call kernel_bad_stack(), which panics.
838 */
839bad_stack:
840	ld	r1,PACAEMERGSP(r13)
841	subi	r1,r1,64+INT_FRAME_SIZE
842	std	r9,_CCR(r1)
843	std	r10,GPR1(r1)
844	std	r11,_NIP(r1)
845	std	r12,_MSR(r1)
846	mfspr	r11,SPRN_DAR
847	mfspr	r12,SPRN_DSISR
848	std	r11,_DAR(r1)
849	std	r12,_DSISR(r1)
850	mflr	r10
851	mfctr	r11
852	mfxer	r12
853	std	r10,_LINK(r1)
854	std	r11,_CTR(r1)
855	std	r12,_XER(r1)
856	SAVE_GPR(0,r1)
857	SAVE_GPR(2,r1)
858	SAVE_4GPRS(3,r1)
859	SAVE_2GPRS(7,r1)
860	SAVE_10GPRS(12,r1)
861	SAVE_10GPRS(22,r1)
862	addi	r11,r1,INT_FRAME_SIZE
863	std	r11,0(r1)
864	li	r12,0
865	std	r12,0(r11)
866	ld	r2,PACATOC(r13)
8671:	addi	r3,r1,STACK_FRAME_OVERHEAD
868	bl	.kernel_bad_stack
869	b	1b
870
871/*
872 * Return from an exception with minimal checks.
873 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
874 * If interrupts have been enabled, or anything has been
875 * done that might have changed the scheduling status of
876 * any task or sent any task a signal, you should use
877 * ret_from_except or ret_from_except_lite instead of this.
878 */
879	.globl	fast_exception_return
880fast_exception_return:
881	ld	r12,_MSR(r1)
882	ld	r11,_NIP(r1)
883	andi.	r3,r12,MSR_RI		/* check if RI is set */
884	beq-	unrecov_fer
885
886#ifdef CONFIG_VIRT_CPU_ACCOUNTING
887	andi.	r3,r12,MSR_PR
888	beq	2f
889	ACCOUNT_CPU_USER_EXIT(r3, r4)
8902:
891#endif
892
893	ld	r3,_CCR(r1)
894	ld	r4,_LINK(r1)
895	ld	r5,_CTR(r1)
896	ld	r6,_XER(r1)
897	mtcr	r3
898	mtlr	r4
899	mtctr	r5
900	mtxer	r6
901	REST_GPR(0, r1)
902	REST_8GPRS(2, r1)
903
904	mfmsr	r10
905	clrrdi	r10,r10,2		/* clear RI (LE is 0 already) */
906	mtmsrd	r10,1
907
908	mtspr	SPRN_SRR1,r12
909	mtspr	SPRN_SRR0,r11
910	REST_4GPRS(10, r1)
911	ld	r1,GPR1(r1)
912	rfid
913	b	.	/* prevent speculative execution */
914
915unrecov_fer:
916	bl	.save_nvgprs
9171:	addi	r3,r1,STACK_FRAME_OVERHEAD
918	bl	.unrecoverable_exception
919	b	1b
920
921/*
922 * Here r13 points to the paca, r9 contains the saved CR,
923 * SRR0 and SRR1 are saved in r11 and r12,
924 * r9 - r13 are saved in paca->exgen.
925 */
926	.align	7
927	.globl data_access_common
928data_access_common:
929	mfspr	r10,SPRN_DAR
930	std	r10,PACA_EXGEN+EX_DAR(r13)
931	mfspr	r10,SPRN_DSISR
932	stw	r10,PACA_EXGEN+EX_DSISR(r13)
933	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
934	ld	r3,PACA_EXGEN+EX_DAR(r13)
935	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
936	li	r5,0x300
937	b	.do_hash_page	 	/* Try to handle as hpte fault */
938
939	.align	7
940	.globl instruction_access_common
941instruction_access_common:
942	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
943	ld	r3,_NIP(r1)
944	andis.	r4,r12,0x5820
945	li	r5,0x400
946	b	.do_hash_page		/* Try to handle as hpte fault */
947
948/*
949 * Here is the common SLB miss user that is used when going to virtual
950 * mode for SLB misses, that is currently not used
951 */
952#ifdef __DISABLED__
953	.align	7
954	.globl	slb_miss_user_common
955slb_miss_user_common:
956	mflr	r10
957	std	r3,PACA_EXGEN+EX_DAR(r13)
958	stw	r9,PACA_EXGEN+EX_CCR(r13)
959	std	r10,PACA_EXGEN+EX_LR(r13)
960	std	r11,PACA_EXGEN+EX_SRR0(r13)
961	bl	.slb_allocate_user
962
963	ld	r10,PACA_EXGEN+EX_LR(r13)
964	ld	r3,PACA_EXGEN+EX_R3(r13)
965	lwz	r9,PACA_EXGEN+EX_CCR(r13)
966	ld	r11,PACA_EXGEN+EX_SRR0(r13)
967	mtlr	r10
968	beq-	slb_miss_fault
969
970	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
971	beq-	unrecov_user_slb
972	mfmsr	r10
973
974.machine push
975.machine "power4"
976	mtcrf	0x80,r9
977.machine pop
978
979	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
980	mtmsrd	r10,1
981
982	mtspr	SRR0,r11
983	mtspr	SRR1,r12
984
985	ld	r9,PACA_EXGEN+EX_R9(r13)
986	ld	r10,PACA_EXGEN+EX_R10(r13)
987	ld	r11,PACA_EXGEN+EX_R11(r13)
988	ld	r12,PACA_EXGEN+EX_R12(r13)
989	ld	r13,PACA_EXGEN+EX_R13(r13)
990	rfid
991	b	.
992
993slb_miss_fault:
994	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
995	ld	r4,PACA_EXGEN+EX_DAR(r13)
996	li	r5,0
997	std	r4,_DAR(r1)
998	std	r5,_DSISR(r1)
999	b	.handle_page_fault
1000
1001unrecov_user_slb:
1002	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1003	DISABLE_INTS
1004	bl	.save_nvgprs
10051:	addi	r3,r1,STACK_FRAME_OVERHEAD
1006	bl	.unrecoverable_exception
1007	b	1b
1008
1009#endif /* __DISABLED__ */
1010
1011
1012/*
1013 * r13 points to the PACA, r9 contains the saved CR,
1014 * r12 contain the saved SRR1, SRR0 is still ready for return
1015 * r3 has the faulting address
1016 * r9 - r13 are saved in paca->exslb.
1017 * r3 is saved in paca->slb_r3
1018 * We assume we aren't going to take any exceptions during this procedure.
1019 */
1020_GLOBAL(slb_miss_realmode)
1021	mflr	r10
1022
1023	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1024	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1025
1026	bl	.slb_allocate_realmode
1027
1028	/* All done -- return from exception. */
1029
1030	ld	r10,PACA_EXSLB+EX_LR(r13)
1031	ld	r3,PACA_EXSLB+EX_R3(r13)
1032	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1033#ifdef CONFIG_PPC_ISERIES
1034	ld	r11,PACALPPACAPTR(r13)
1035	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
1036#endif /* CONFIG_PPC_ISERIES */
1037
1038	mtlr	r10
1039
1040	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1041	beq-	unrecov_slb
1042
1043.machine	push
1044.machine	"power4"
1045	mtcrf	0x80,r9
1046	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1047.machine	pop
1048
1049#ifdef CONFIG_PPC_ISERIES
1050	mtspr	SPRN_SRR0,r11
1051	mtspr	SPRN_SRR1,r12
1052#endif /* CONFIG_PPC_ISERIES */
1053	ld	r9,PACA_EXSLB+EX_R9(r13)
1054	ld	r10,PACA_EXSLB+EX_R10(r13)
1055	ld	r11,PACA_EXSLB+EX_R11(r13)
1056	ld	r12,PACA_EXSLB+EX_R12(r13)
1057	ld	r13,PACA_EXSLB+EX_R13(r13)
1058	rfid
1059	b	.	/* prevent speculative execution */
1060
1061unrecov_slb:
1062	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1063	DISABLE_INTS
1064	bl	.save_nvgprs
10651:	addi	r3,r1,STACK_FRAME_OVERHEAD
1066	bl	.unrecoverable_exception
1067	b	1b
1068
1069	.align	7
1070	.globl hardware_interrupt_common
1071	.globl hardware_interrupt_entry
1072hardware_interrupt_common:
1073	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1074	FINISH_NAP
1075hardware_interrupt_entry:
1076	DISABLE_INTS
1077	bl	.ppc64_runlatch_on
1078	addi	r3,r1,STACK_FRAME_OVERHEAD
1079	bl	.do_IRQ
1080	b	.ret_from_except_lite
1081
1082#ifdef CONFIG_PPC_970_NAP
1083power4_fixup_nap:
1084	andc	r9,r9,r10
1085	std	r9,TI_LOCAL_FLAGS(r11)
1086	ld	r10,_LINK(r1)		/* make idle task do the */
1087	std	r10,_NIP(r1)		/* equivalent of a blr */
1088	blr
1089#endif
1090
1091	.align	7
1092	.globl alignment_common
1093alignment_common:
1094	mfspr	r10,SPRN_DAR
1095	std	r10,PACA_EXGEN+EX_DAR(r13)
1096	mfspr	r10,SPRN_DSISR
1097	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1098	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1099	ld	r3,PACA_EXGEN+EX_DAR(r13)
1100	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1101	std	r3,_DAR(r1)
1102	std	r4,_DSISR(r1)
1103	bl	.save_nvgprs
1104	addi	r3,r1,STACK_FRAME_OVERHEAD
1105	ENABLE_INTS
1106	bl	.alignment_exception
1107	b	.ret_from_except
1108
1109	.align	7
1110	.globl program_check_common
1111program_check_common:
1112	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1113	bl	.save_nvgprs
1114	addi	r3,r1,STACK_FRAME_OVERHEAD
1115	ENABLE_INTS
1116	bl	.program_check_exception
1117	b	.ret_from_except
1118
1119	.align	7
1120	.globl fp_unavailable_common
1121fp_unavailable_common:
1122	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1123	bne	.load_up_fpu		/* if from user, just load it up */
1124	bl	.save_nvgprs
1125	addi	r3,r1,STACK_FRAME_OVERHEAD
1126	ENABLE_INTS
1127	bl	.kernel_fp_unavailable_exception
1128	BUG_OPCODE
1129
1130	.align	7
1131	.globl altivec_unavailable_common
1132altivec_unavailable_common:
1133	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1134#ifdef CONFIG_ALTIVEC
1135BEGIN_FTR_SECTION
1136	bne	.load_up_altivec	/* if from user, just load it up */
1137END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1138#endif
1139	bl	.save_nvgprs
1140	addi	r3,r1,STACK_FRAME_OVERHEAD
1141	ENABLE_INTS
1142	bl	.altivec_unavailable_exception
1143	b	.ret_from_except
1144
1145#ifdef CONFIG_ALTIVEC
1146/*
1147 * load_up_altivec(unused, unused, tsk)
1148 * Disable VMX for the task which had it previously,
1149 * and save its vector registers in its thread_struct.
1150 * Enables the VMX for use in the kernel on return.
1151 * On SMP we know the VMX is free, since we give it up every
1152 * switch (ie, no lazy save of the vector registers).
1153 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1154 */
1155_STATIC(load_up_altivec)
1156	mfmsr	r5			/* grab the current MSR */
1157	oris	r5,r5,MSR_VEC@h
1158	mtmsrd	r5			/* enable use of VMX now */
1159	isync
1160
1161/*
1162 * For SMP, we don't do lazy VMX switching because it just gets too
1163 * horrendously complex, especially when a task switches from one CPU
1164 * to another.  Instead we call giveup_altvec in switch_to.
1165 * VRSAVE isn't dealt with here, that is done in the normal context
1166 * switch code. Note that we could rely on vrsave value to eventually
1167 * avoid saving all of the VREGs here...
1168 */
1169#ifndef CONFIG_SMP
1170	ld	r3,last_task_used_altivec@got(r2)
1171	ld	r4,0(r3)
1172	cmpdi	0,r4,0
1173	beq	1f
1174	/* Save VMX state to last_task_used_altivec's THREAD struct */
1175	addi	r4,r4,THREAD
1176	SAVE_32VRS(0,r5,r4)
1177	mfvscr	vr0
1178	li	r10,THREAD_VSCR
1179	stvx	vr0,r10,r4
1180	/* Disable VMX for last_task_used_altivec */
1181	ld	r5,PT_REGS(r4)
1182	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1183	lis	r6,MSR_VEC@h
1184	andc	r4,r4,r6
1185	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
11861:
1187#endif /* CONFIG_SMP */
1188	/* Hack: if we get an altivec unavailable trap with VRSAVE
1189	 * set to all zeros, we assume this is a broken application
1190	 * that fails to set it properly, and thus we switch it to
1191	 * all 1's
1192	 */
1193	mfspr	r4,SPRN_VRSAVE
1194	cmpdi	0,r4,0
1195	bne+	1f
1196	li	r4,-1
1197	mtspr	SPRN_VRSAVE,r4
11981:
1199	/* enable use of VMX after return */
1200	ld	r4,PACACURRENT(r13)
1201	addi	r5,r4,THREAD		/* Get THREAD */
1202	oris	r12,r12,MSR_VEC@h
1203	std	r12,_MSR(r1)
1204	li	r4,1
1205	li	r10,THREAD_VSCR
1206	stw	r4,THREAD_USED_VR(r5)
1207	lvx	vr0,r10,r5
1208	mtvscr	vr0
1209	REST_32VRS(0,r4,r5)
1210#ifndef CONFIG_SMP
1211	/* Update last_task_used_math to 'current' */
1212	subi	r4,r5,THREAD		/* Back to 'current' */
1213	std	r4,0(r3)
1214#endif /* CONFIG_SMP */
1215	/* restore registers and return */
1216	b	fast_exception_return
1217#endif /* CONFIG_ALTIVEC */
1218
1219/*
1220 * Hash table stuff
1221 */
1222	.align	7
1223_GLOBAL(do_hash_page)
1224	std	r3,_DAR(r1)
1225	std	r4,_DSISR(r1)
1226
1227	andis.	r0,r4,0xa450		/* weird error? */
1228	bne-	.handle_page_fault	/* if not, try to insert a HPTE */
1229BEGIN_FTR_SECTION
1230	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1231	bne-	.do_ste_alloc		/* If so handle it */
1232END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1233
1234	/*
1235	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1236	 * accessing a userspace segment (even from the kernel). We assume
1237	 * kernel addresses always have the high bit set.
1238	 */
1239	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1240	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1241	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1242	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1243	ori	r4,r4,1			/* add _PAGE_PRESENT */
1244	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1245
1246	/*
1247	 * On iSeries, we soft-disable interrupts here, then
1248	 * hard-enable interrupts so that the hash_page code can spin on
1249	 * the hash_table_lock without problems on a shared processor.
1250	 */
1251	DISABLE_INTS
1252
1253	/*
1254	 * r3 contains the faulting address
1255	 * r4 contains the required access permissions
1256	 * r5 contains the trap number
1257	 *
1258	 * at return r3 = 0 for success
1259	 */
1260	bl	.hash_page		/* build HPTE if possible */
1261	cmpdi	r3,0			/* see if hash_page succeeded */
1262
1263#ifdef DO_SOFT_DISABLE
1264	/*
1265	 * If we had interrupts soft-enabled at the point where the
1266	 * DSI/ISI occurred, and an interrupt came in during hash_page,
1267	 * handle it now.
1268	 * We jump to ret_from_except_lite rather than fast_exception_return
1269	 * because ret_from_except_lite will check for and handle pending
1270	 * interrupts if necessary.
1271	 */
1272	beq	.ret_from_except_lite
1273	/* For a hash failure, we don't bother re-enabling interrupts */
1274	ble-	12f
1275
1276	/*
1277	 * hash_page couldn't handle it, set soft interrupt enable back
1278	 * to what it was before the trap.  Note that .local_irq_restore
1279	 * handles any interrupts pending at this point.
1280	 */
1281	ld	r3,SOFTE(r1)
1282	bl	.local_irq_restore
1283	b	11f
1284#else
1285	beq	fast_exception_return   /* Return from exception on success */
1286	ble-	12f			/* Failure return from hash_page */
1287
1288	/* fall through */
1289#endif
1290
1291/* Here we have a page fault that hash_page can't handle. */
1292_GLOBAL(handle_page_fault)
1293	ENABLE_INTS
129411:	ld	r4,_DAR(r1)
1295	ld	r5,_DSISR(r1)
1296	addi	r3,r1,STACK_FRAME_OVERHEAD
1297	bl	.do_page_fault
1298	cmpdi	r3,0
1299	beq+	.ret_from_except_lite
1300	bl	.save_nvgprs
1301	mr	r5,r3
1302	addi	r3,r1,STACK_FRAME_OVERHEAD
1303	lwz	r4,_DAR(r1)
1304	bl	.bad_page_fault
1305	b	.ret_from_except
1306
1307/* We have a page fault that hash_page could handle but HV refused
1308 * the PTE insertion
1309 */
131012:	bl	.save_nvgprs
1311	addi	r3,r1,STACK_FRAME_OVERHEAD
1312	lwz	r4,_DAR(r1)
1313	bl	.low_hash_fault
1314	b	.ret_from_except
1315
1316	/* here we have a segment miss */
1317_GLOBAL(do_ste_alloc)
1318	bl	.ste_allocate		/* try to insert stab entry */
1319	cmpdi	r3,0
1320	beq+	fast_exception_return
1321	b	.handle_page_fault
1322
1323/*
1324 * r13 points to the PACA, r9 contains the saved CR,
1325 * r11 and r12 contain the saved SRR0 and SRR1.
1326 * r9 - r13 are saved in paca->exslb.
1327 * We assume we aren't going to take any exceptions during this procedure.
1328 * We assume (DAR >> 60) == 0xc.
1329 */
1330	.align	7
1331_GLOBAL(do_stab_bolted)
1332	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1333	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1334
1335	/* Hash to the primary group */
1336	ld	r10,PACASTABVIRT(r13)
1337	mfspr	r11,SPRN_DAR
1338	srdi	r11,r11,28
1339	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1340
1341	/* Calculate VSID */
1342	/* This is a kernel address, so protovsid = ESID */
1343	ASM_VSID_SCRAMBLE(r11, r9)
1344	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
1345
1346	/* Search the primary group for a free entry */
13471:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1348	andi.	r11,r11,0x80
1349	beq	2f
1350	addi	r10,r10,16
1351	andi.	r11,r10,0x70
1352	bne	1b
1353
1354	/* Stick for only searching the primary group for now.		*/
1355	/* At least for now, we use a very simple random castout scheme */
1356	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1357	mftb	r11
1358	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1359	ori	r11,r11,0x10
1360
1361	/* r10 currently points to an ste one past the group of interest */
1362	/* make it point to the randomly selected entry			*/
1363	subi	r10,r10,128
1364	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1365
1366	isync			/* mark the entry invalid		*/
1367	ld	r11,0(r10)
1368	rldicl	r11,r11,56,1	/* clear the valid bit */
1369	rotldi	r11,r11,8
1370	std	r11,0(r10)
1371	sync
1372
1373	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1374	slbie	r11
1375
13762:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1377	eieio
1378
1379	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1380	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1381	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1382	std	r11,0(r10)	/* Put new entry back into the stab	*/
1383
1384	sync
1385
1386	/* All done -- return from exception. */
1387	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1388	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1389
1390	andi.	r10,r12,MSR_RI
1391	beq-	unrecov_slb
1392
1393	mtcrf	0x80,r9			/* restore CR */
1394
1395	mfmsr	r10
1396	clrrdi	r10,r10,2
1397	mtmsrd	r10,1
1398
1399	mtspr	SPRN_SRR0,r11
1400	mtspr	SPRN_SRR1,r12
1401	ld	r9,PACA_EXSLB+EX_R9(r13)
1402	ld	r10,PACA_EXSLB+EX_R10(r13)
1403	ld	r11,PACA_EXSLB+EX_R11(r13)
1404	ld	r12,PACA_EXSLB+EX_R12(r13)
1405	ld	r13,PACA_EXSLB+EX_R13(r13)
1406	rfid
1407	b	.	/* prevent speculative execution */
1408
1409/*
1410 * Space for CPU0's segment table.
1411 *
1412 * On iSeries, the hypervisor must fill in at least one entry before
1413 * we get control (with relocate on).  The address is give to the hv
1414 * as a page number (see xLparMap in lpardata.c), so this must be at a
1415 * fixed address (the linker can't compute (u64)&initial_stab >>
1416 * PAGE_SHIFT).
1417 */
1418	. = STAB0_OFFSET	/* 0x6000 */
1419	.globl initial_stab
1420initial_stab:
1421	.space	4096
1422
1423/*
1424 * Data area reserved for FWNMI option.
1425 * This address (0x7000) is fixed by the RPA.
1426 */
1427	.= 0x7000
1428	.globl fwnmi_data_area
1429fwnmi_data_area:
1430
1431	/* iSeries does not use the FWNMI stuff, so it is safe to put
1432	 * this here, even if we later allow kernels that will boot on
1433	 * both pSeries and iSeries */
1434#ifdef CONFIG_PPC_ISERIES
1435        . = LPARMAP_PHYS
1436#include "lparmap.s"
1437/*
1438 * This ".text" is here for old compilers that generate a trailing
1439 * .note section when compiling .c files to .s
1440 */
1441	.text
1442#endif /* CONFIG_PPC_ISERIES */
1443
1444        . = 0x8000
1445
1446/*
1447 * On pSeries, secondary processors spin in the following code.
1448 * At entry, r3 = this processor's number (physical cpu id)
1449 */
1450_GLOBAL(pSeries_secondary_smp_init)
1451	mr	r24,r3
1452
1453	/* turn on 64-bit mode */
1454	bl	.enable_64b_mode
1455	isync
1456
1457	/* Copy some CPU settings from CPU 0 */
1458	bl	.__restore_cpu_setup
1459
1460	/* Set up a paca value for this processor. Since we have the
1461	 * physical cpu id in r24, we need to search the pacas to find
1462	 * which logical id maps to our physical one.
1463	 */
1464	LOAD_REG_IMMEDIATE(r13, paca)	/* Get base vaddr of paca array	 */
1465	li	r5,0			/* logical cpu id                */
14661:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
1467	cmpw	r6,r24			/* Compare to our id             */
1468	beq	2f
1469	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
1470	addi	r5,r5,1
1471	cmpwi	r5,NR_CPUS
1472	blt	1b
1473
1474	mr	r3,r24			/* not found, copy phys to r3	 */
1475	b	.kexec_wait		/* next kernel might do better	 */
1476
14772:	mtspr	SPRN_SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
1478	/* From now on, r24 is expected to be logical cpuid */
1479	mr	r24,r5
14803:	HMT_LOW
1481	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
1482					/* start.			 */
1483	sync
1484
1485	/* Create a temp kernel stack for use before relocation is on.	*/
1486	ld	r1,PACAEMERGSP(r13)
1487	subi	r1,r1,STACK_FRAME_OVERHEAD
1488
1489	cmpwi	0,r23,0
1490#ifdef CONFIG_SMP
1491	bne	.__secondary_start
1492#endif
1493	b 	3b			/* Loop until told to go	 */
1494
1495#ifdef CONFIG_PPC_ISERIES
1496_STATIC(__start_initialization_iSeries)
1497	/* Clear out the BSS */
1498	LOAD_REG_IMMEDIATE(r11,__bss_stop)
1499	LOAD_REG_IMMEDIATE(r8,__bss_start)
1500	sub	r11,r11,r8		/* bss size			*/
1501	addi	r11,r11,7		/* round up to an even double word */
1502	rldicl. r11,r11,61,3		/* shift right by 3		*/
1503	beq	4f
1504	addi	r8,r8,-8
1505	li	r0,0
1506	mtctr	r11			/* zero this many doublewords	*/
15073:	stdu	r0,8(r8)
1508	bdnz	3b
15094:
1510	LOAD_REG_IMMEDIATE(r1,init_thread_union)
1511	addi	r1,r1,THREAD_SIZE
1512	li	r0,0
1513	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1514
1515	LOAD_REG_IMMEDIATE(r3,cpu_specs)
1516	LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
1517	li	r5,0
1518	bl	.identify_cpu
1519
1520	LOAD_REG_IMMEDIATE(r2,__toc_start)
1521	addi	r2,r2,0x4000
1522	addi	r2,r2,0x4000
1523
1524	bl	.iSeries_early_setup
1525	bl	.early_setup
1526
1527	/* relocation is on at this point */
1528
1529	b	.start_here_common
1530#endif /* CONFIG_PPC_ISERIES */
1531
1532#ifdef CONFIG_PPC_MULTIPLATFORM
1533
1534_STATIC(__mmu_off)
1535	mfmsr	r3
1536	andi.	r0,r3,MSR_IR|MSR_DR
1537	beqlr
1538	andc	r3,r3,r0
1539	mtspr	SPRN_SRR0,r4
1540	mtspr	SPRN_SRR1,r3
1541	sync
1542	rfid
1543	b	.	/* prevent speculative execution */
1544
1545
1546/*
1547 * Here is our main kernel entry point. We support currently 2 kind of entries
1548 * depending on the value of r5.
1549 *
1550 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1551 *                 in r3...r7
1552 *
1553 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1554 *                 DT block, r4 is a physical pointer to the kernel itself
1555 *
1556 */
1557_GLOBAL(__start_initialization_multiplatform)
1558#ifdef CONFIG_PPC_MULTIPLATFORM
1559	/*
1560	 * Are we booted from a PROM Of-type client-interface ?
1561	 */
1562	cmpldi	cr0,r5,0
1563	bne	.__boot_from_prom		/* yes -> prom */
1564#endif
1565
1566	/* Save parameters */
1567	mr	r31,r3
1568	mr	r30,r4
1569
1570	/* Make sure we are running in 64 bits mode */
1571	bl	.enable_64b_mode
1572
1573	/* Setup some critical 970 SPRs before switching MMU off */
1574	bl	.__970_cpu_preinit
1575
1576	/* cpu # */
1577	li	r24,0
1578
1579	/* Switch off MMU if not already */
1580	LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
1581	add	r4,r4,r30
1582	bl	.__mmu_off
1583	b	.__after_prom_start
1584
1585#ifdef CONFIG_PPC_MULTIPLATFORM
1586_STATIC(__boot_from_prom)
1587	/* Save parameters */
1588	mr	r31,r3
1589	mr	r30,r4
1590	mr	r29,r5
1591	mr	r28,r6
1592	mr	r27,r7
1593
1594	/*
1595	 * Align the stack to 16-byte boundary
1596	 * Depending on the size and layout of the ELF sections in the initial
1597	 * boot binary, the stack pointer will be unalignet on PowerMac
1598	 */
1599	rldicr	r1,r1,0,59
1600
1601	/* Make sure we are running in 64 bits mode */
1602	bl	.enable_64b_mode
1603
1604	/* put a relocation offset into r3 */
1605	bl	.reloc_offset
1606
1607	LOAD_REG_IMMEDIATE(r2,__toc_start)
1608	addi	r2,r2,0x4000
1609	addi	r2,r2,0x4000
1610
1611	/* Relocate the TOC from a virt addr to a real addr */
1612	add	r2,r2,r3
1613
1614	/* Restore parameters */
1615	mr	r3,r31
1616	mr	r4,r30
1617	mr	r5,r29
1618	mr	r6,r28
1619	mr	r7,r27
1620
1621	/* Do all of the interaction with OF client interface */
1622	bl	.prom_init
1623	/* We never return */
1624	trap
1625#endif
1626
1627/*
1628 * At this point, r3 contains the physical address we are running at,
1629 * returned by prom_init()
1630 */
1631_STATIC(__after_prom_start)
1632
1633/*
1634 * We need to run with __start at physical address PHYSICAL_START.
1635 * This will leave some code in the first 256B of
1636 * real memory, which are reserved for software use.
1637 * The remainder of the first page is loaded with the fixed
1638 * interrupt vectors.  The next two pages are filled with
1639 * unknown exception placeholders.
1640 *
1641 * Note: This process overwrites the OF exception vectors.
1642 *	r26 == relocation offset
1643 *	r27 == KERNELBASE
1644 */
1645	bl	.reloc_offset
1646	mr	r26,r3
1647	LOAD_REG_IMMEDIATE(r27, KERNELBASE)
1648
1649	LOAD_REG_IMMEDIATE(r3, PHYSICAL_START)	/* target addr */
1650
1651	// XXX FIXME: Use phys returned by OF (r30)
1652	add	r4,r27,r26 		/* source addr			 */
1653					/* current address of _start	 */
1654					/*   i.e. where we are running	 */
1655					/*	the source addr		 */
1656
1657	LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
1658	sub	r5,r5,r27
1659
1660	li	r6,0x100		/* Start offset, the first 0x100 */
1661					/* bytes were copied earlier.	 */
1662
1663	bl	.copy_and_flush		/* copy the first n bytes	 */
1664					/* this includes the code being	 */
1665					/* executed here.		 */
1666
1667	LOAD_REG_IMMEDIATE(r0, 4f)	/* Jump to the copy of this code */
1668	mtctr	r0			/* that we just made/relocated	 */
1669	bctr
1670
16714:	LOAD_REG_IMMEDIATE(r5,klimit)
1672	add	r5,r5,r26
1673	ld	r5,0(r5)		/* get the value of klimit */
1674	sub	r5,r5,r27
1675	bl	.copy_and_flush		/* copy the rest */
1676	b	.start_here_multiplatform
1677
1678#endif /* CONFIG_PPC_MULTIPLATFORM */
1679
1680/*
1681 * Copy routine used to copy the kernel to start at physical address 0
1682 * and flush and invalidate the caches as needed.
1683 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1684 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1685 *
1686 * Note: this routine *only* clobbers r0, r6 and lr
1687 */
1688_GLOBAL(copy_and_flush)
1689	addi	r5,r5,-8
1690	addi	r6,r6,-8
16914:	li	r0,16			/* Use the least common		*/
1692					/* denominator cache line	*/
1693					/* size.  This results in	*/
1694					/* extra cache line flushes	*/
1695					/* but operation is correct.	*/
1696					/* Can't get cache line size	*/
1697					/* from NACA as it is being	*/
1698					/* moved too.			*/
1699
1700	mtctr	r0			/* put # words/line in ctr	*/
17013:	addi	r6,r6,8			/* copy a cache line		*/
1702	ldx	r0,r6,r4
1703	stdx	r0,r6,r3
1704	bdnz	3b
1705	dcbst	r6,r3			/* write it to memory		*/
1706	sync
1707	icbi	r6,r3			/* flush the icache line	*/
1708	cmpld	0,r6,r5
1709	blt	4b
1710	sync
1711	addi	r5,r5,8
1712	addi	r6,r6,8
1713	blr
1714
1715.align 8
1716copy_to_here:
1717
1718#ifdef CONFIG_SMP
1719#ifdef CONFIG_PPC_PMAC
1720/*
1721 * On PowerMac, secondary processors starts from the reset vector, which
1722 * is temporarily turned into a call to one of the functions below.
1723 */
1724	.section ".text";
1725	.align 2 ;
1726
1727	.globl	__secondary_start_pmac_0
1728__secondary_start_pmac_0:
1729	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1730	li	r24,0
1731	b	1f
1732	li	r24,1
1733	b	1f
1734	li	r24,2
1735	b	1f
1736	li	r24,3
17371:
1738
1739_GLOBAL(pmac_secondary_start)
1740	/* turn on 64-bit mode */
1741	bl	.enable_64b_mode
1742	isync
1743
1744	/* Copy some CPU settings from CPU 0 */
1745	bl	.__restore_cpu_setup
1746
1747	/* pSeries do that early though I don't think we really need it */
1748	mfmsr	r3
1749	ori	r3,r3,MSR_RI
1750	mtmsrd	r3			/* RI on */
1751
1752	/* Set up a paca value for this processor. */
1753	LOAD_REG_IMMEDIATE(r4, paca)	/* Get base vaddr of paca array	*/
1754	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
1755	add	r13,r13,r4		/* for this processor.		*/
1756	mtspr	SPRN_SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
1757
1758	/* Create a temp kernel stack for use before relocation is on.	*/
1759	ld	r1,PACAEMERGSP(r13)
1760	subi	r1,r1,STACK_FRAME_OVERHEAD
1761
1762	b	.__secondary_start
1763
1764#endif /* CONFIG_PPC_PMAC */
1765
1766/*
1767 * This function is called after the master CPU has released the
1768 * secondary processors.  The execution environment is relocation off.
1769 * The paca for this processor has the following fields initialized at
1770 * this point:
1771 *   1. Processor number
1772 *   2. Segment table pointer (virtual address)
1773 * On entry the following are set:
1774 *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
1775 *   r24   = cpu# (in Linux terms)
1776 *   r13   = paca virtual address
1777 *   SPRG3 = paca virtual address
1778 */
1779_GLOBAL(__secondary_start)
1780	/* Set thread priority to MEDIUM */
1781	HMT_MEDIUM
1782
1783	/* Load TOC */
1784	ld	r2,PACATOC(r13)
1785
1786	/* Do early setup for that CPU (stab, slb, hash table pointer) */
1787	bl	.early_setup_secondary
1788
1789	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
1790	LOAD_REG_ADDR(r3, current_set)
1791	sldi	r28,r24,3		/* get current_set[cpu#]	 */
1792	ldx	r1,r3,r28
1793	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1794	std	r1,PACAKSAVE(r13)
1795
1796	/* Clear backchain so we get nice backtraces */
1797	li	r7,0
1798	mtlr	r7
1799
1800	/* enable MMU and jump to start_secondary */
1801	LOAD_REG_ADDR(r3, .start_secondary_prolog)
1802	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1803#ifdef DO_SOFT_DISABLE
1804	ori	r4,r4,MSR_EE
1805#endif
1806	mtspr	SPRN_SRR0,r3
1807	mtspr	SPRN_SRR1,r4
1808	rfid
1809	b	.	/* prevent speculative execution */
1810
1811/*
1812 * Running with relocation on at this point.  All we want to do is
1813 * zero the stack back-chain pointer before going into C code.
1814 */
1815_GLOBAL(start_secondary_prolog)
1816	li	r3,0
1817	std	r3,0(r1)		/* Zero the stack frame pointer	*/
1818	bl	.start_secondary
1819	b	.
1820#endif
1821
1822/*
1823 * This subroutine clobbers r11 and r12
1824 */
1825_GLOBAL(enable_64b_mode)
1826	mfmsr	r11			/* grab the current MSR */
1827	li	r12,1
1828	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1829	or	r11,r11,r12
1830	li	r12,1
1831	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1832	or	r11,r11,r12
1833	mtmsrd	r11
1834	isync
1835	blr
1836
1837#ifdef CONFIG_PPC_MULTIPLATFORM
1838/*
1839 * This is where the main kernel code starts.
1840 */
1841_STATIC(start_here_multiplatform)
1842	/* get a new offset, now that the kernel has moved. */
1843	bl	.reloc_offset
1844	mr	r26,r3
1845
1846	/* Clear out the BSS. It may have been done in prom_init,
1847	 * already but that's irrelevant since prom_init will soon
1848	 * be detached from the kernel completely. Besides, we need
1849	 * to clear it now for kexec-style entry.
1850	 */
1851	LOAD_REG_IMMEDIATE(r11,__bss_stop)
1852	LOAD_REG_IMMEDIATE(r8,__bss_start)
1853	sub	r11,r11,r8		/* bss size			*/
1854	addi	r11,r11,7		/* round up to an even double word */
1855	rldicl. r11,r11,61,3		/* shift right by 3		*/
1856	beq	4f
1857	addi	r8,r8,-8
1858	li	r0,0
1859	mtctr	r11			/* zero this many doublewords	*/
18603:	stdu	r0,8(r8)
1861	bdnz	3b
18624:
1863
1864	mfmsr	r6
1865	ori	r6,r6,MSR_RI
1866	mtmsrd	r6			/* RI on */
1867
1868	/* The following gets the stack and TOC set up with the regs */
1869	/* pointing to the real addr of the kernel stack.  This is   */
1870	/* all done to support the C function call below which sets  */
1871	/* up the htab.  This is done because we have relocated the  */
1872	/* kernel but are still running in real mode. */
1873
1874	LOAD_REG_IMMEDIATE(r3,init_thread_union)
1875	add	r3,r3,r26
1876
1877	/* set up a stack pointer (physical address) */
1878	addi	r1,r3,THREAD_SIZE
1879	li	r0,0
1880	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1881
1882	/* set up the TOC (physical address) */
1883	LOAD_REG_IMMEDIATE(r2,__toc_start)
1884	addi	r2,r2,0x4000
1885	addi	r2,r2,0x4000
1886	add	r2,r2,r26
1887
1888	LOAD_REG_IMMEDIATE(r3, cpu_specs)
1889	add	r3,r3,r26
1890	LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
1891	add	r4,r4,r26
1892	mr	r5,r26
1893	bl	.identify_cpu
1894
1895	/* Save some low level config HIDs of CPU0 to be copied to
1896	 * other CPUs later on, or used for suspend/resume
1897	 */
1898	bl	.__save_cpu_setup
1899	sync
1900
1901	/* Do very early kernel initializations, including initial hash table,
1902	 * stab and slb setup before we turn on relocation.	*/
1903
1904	/* Restore parameters passed from prom_init/kexec */
1905	mr	r3,r31
1906 	bl	.early_setup
1907
1908	LOAD_REG_IMMEDIATE(r3, .start_here_common)
1909	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1910	mtspr	SPRN_SRR0,r3
1911	mtspr	SPRN_SRR1,r4
1912	rfid
1913	b	.	/* prevent speculative execution */
1914#endif /* CONFIG_PPC_MULTIPLATFORM */
1915
1916	/* This is where all platforms converge execution */
1917_STATIC(start_here_common)
1918	/* relocation is on at this point */
1919
1920	/* The following code sets up the SP and TOC now that we are */
1921	/* running with translation enabled. */
1922
1923	LOAD_REG_IMMEDIATE(r3,init_thread_union)
1924
1925	/* set up the stack */
1926	addi	r1,r3,THREAD_SIZE
1927	li	r0,0
1928	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1929
1930	/* Apply the CPUs-specific fixups (nop out sections not relevant
1931	 * to this CPU
1932	 */
1933	li	r3,0
1934	bl	.do_cpu_ftr_fixups
1935
1936	LOAD_REG_IMMEDIATE(r26, boot_cpuid)
1937	lwz	r26,0(r26)
1938
1939	LOAD_REG_IMMEDIATE(r24, paca)	/* Get base vaddr of paca array  */
1940	mulli	r13,r26,PACA_SIZE	/* Calculate vaddr of right paca */
1941	add	r13,r13,r24		/* for this processor.		 */
1942	mtspr	SPRN_SPRG3,r13
1943
1944	/* ptr to current */
1945	LOAD_REG_IMMEDIATE(r4, init_task)
1946	std	r4,PACACURRENT(r13)
1947
1948	/* Load the TOC */
1949	ld	r2,PACATOC(r13)
1950	std	r1,PACAKSAVE(r13)
1951
1952	bl	.setup_system
1953
1954	/* Load up the kernel context */
19555:
1956#ifdef DO_SOFT_DISABLE
1957	li	r5,0
1958	stb	r5,PACAPROCENABLED(r13)	/* Soft Disabled */
1959	mfmsr	r5
1960	ori	r5,r5,MSR_EE		/* Hard Enabled */
1961	mtmsrd	r5
1962#endif
1963
1964	bl .start_kernel
1965
1966	/* Not reached */
1967	BUG_OPCODE
1968
1969/* Put the paca pointer into r13 and SPRG3 */
1970_GLOBAL(setup_boot_paca)
1971	LOAD_REG_IMMEDIATE(r3, boot_cpuid)
1972	lwz	r3,0(r3)
1973	LOAD_REG_IMMEDIATE(r4, paca) 	/* Get base vaddr of paca array	 */
1974	mulli	r3,r3,PACA_SIZE		/* Calculate vaddr of right paca */
1975	add	r13,r3,r4		/* for this processor.		 */
1976	mtspr	SPRN_SPRG3,r13
1977
1978	blr
1979
1980/*
1981 * We put a few things here that have to be page-aligned.
1982 * This stuff goes at the beginning of the bss, which is page-aligned.
1983 */
1984	.section ".bss"
1985
1986	.align	PAGE_SHIFT
1987
1988	.globl	empty_zero_page
1989empty_zero_page:
1990	.space	PAGE_SIZE
1991
1992	.globl	swapper_pg_dir
1993swapper_pg_dir:
1994	.space	PAGE_SIZE
1995
1996/*
1997 * This space gets a copy of optional info passed to us by the bootstrap
1998 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1999 */
2000	.globl	cmd_line
2001cmd_line:
2002	.space	COMMAND_LINE_SIZE
2003