xref: /linux/arch/powerpc/kernel/head_64.S (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1/*
2 *  arch/ppc64/kernel/head.S
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 *  Adapted for Power Macintosh by Paul Mackerras.
10 *  Low-level exception handlers and MMU support
11 *  rewritten by Paul Mackerras.
12 *    Copyright (C) 1996 Paul Mackerras.
13 *
14 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 *  This file contains the low-level support and setup for the
18 *  PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 *  This program is free software; you can redistribute it and/or
21 *  modify it under the terms of the GNU General Public License
22 *  as published by the Free Software Foundation; either version
23 *  2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/reg.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/bug.h>
34#include <asm/cputable.h>
35#include <asm/setup.h>
36#include <asm/hvcall.h>
37#include <asm/iseries/lpar_map.h>
38#include <asm/thread_info.h>
39
40#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE
42#endif
43
44/*
45 * We layout physical memory as follows:
46 * 0x0000 - 0x00ff : Secondary processor spin code
47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
50 * 0x7000 - 0x7fff : FWNMI data area
51 * 0x8000 -        : Early init and support code
52 */
53
54/*
55 *   SPRG Usage
56 *
57 *   Register	Definition
58 *
59 *   SPRG0	reserved for hypervisor
60 *   SPRG1	temp - used to save gpr
61 *   SPRG2	temp - used to save gpr
62 *   SPRG3	virt addr of paca
63 */
64
65/*
66 * Entering into this code we make the following assumptions:
67 *  For pSeries:
68 *   1. The MMU is off & open firmware is running in real mode.
69 *   2. The kernel is entered at __start
70 *
71 *  For iSeries:
72 *   1. The MMU is on (as it always is for iSeries)
73 *   2. The kernel is entered at system_reset_iSeries
74 */
75
76	.text
77	.globl  _stext
78_stext:
79#ifdef CONFIG_PPC_MULTIPLATFORM
80_GLOBAL(__start)
81	/* NOP this out unconditionally */
82BEGIN_FTR_SECTION
83	b	.__start_initialization_multiplatform
84END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */
86
87	/* Catch branch to 0 in real mode */
88	trap
89
90#ifdef CONFIG_PPC_ISERIES
91	/*
92	 * At offset 0x20, there is a pointer to iSeries LPAR data.
93	 * This is required by the hypervisor
94	 */
95	. = 0x20
96	.llong hvReleaseData-KERNELBASE
97
98	/*
99	 * At offset 0x28 and 0x30 are offsets to the mschunks_map
100	 * array (used by the iSeries LPAR debugger to do translation
101	 * between physical addresses and absolute addresses) and
102	 * to the pidhash table (also used by the debugger)
103	 */
104	.llong mschunks_map-KERNELBASE
105	.llong 0	/* pidhash-KERNELBASE SFRXXX */
106
107	/* Offset 0x38 - Pointer to start of embedded System.map */
108	.globl	embedded_sysmap_start
109embedded_sysmap_start:
110	.llong	0
111	/* Offset 0x40 - Pointer to end of embedded System.map */
112	.globl	embedded_sysmap_end
113embedded_sysmap_end:
114	.llong	0
115
116#endif /* CONFIG_PPC_ISERIES */
117
118	/* Secondary processors spin on this value until it goes to 1. */
119	.globl  __secondary_hold_spinloop
120__secondary_hold_spinloop:
121	.llong	0x0
122
123	/* Secondary processors write this value with their cpu # */
124	/* after they enter the spin loop immediately below.	  */
125	.globl	__secondary_hold_acknowledge
126__secondary_hold_acknowledge:
127	.llong	0x0
128
129	. = 0x60
130/*
131 * The following code is used on pSeries to hold secondary processors
132 * in a spin loop after they have been freed from OpenFirmware, but
133 * before the bulk of the kernel has been relocated.  This code
134 * is relocated to physical address 0x60 before prom_init is run.
135 * All of it must fit below the first exception vector at 0x100.
136 */
137_GLOBAL(__secondary_hold)
138	mfmsr	r24
139	ori	r24,r24,MSR_RI
140	mtmsrd	r24			/* RI on */
141
142	/* Grab our linux cpu number */
143	mr	r24,r3
144
145	/* Tell the master cpu we're here */
146	/* Relocation is off & we are located at an address less */
147	/* than 0x100, so only need to grab low order offset.    */
148	std	r24,__secondary_hold_acknowledge@l(0)
149	sync
150
151	/* All secondary cpus wait here until told to start. */
152100:	ld	r4,__secondary_hold_spinloop@l(0)
153	cmpdi	0,r4,1
154	bne	100b
155
156#ifdef CONFIG_HMT
157	b	.hmt_init
158#else
159#ifdef CONFIG_SMP
160	mr	r3,r24
161	b	.pSeries_secondary_smp_init
162#else
163	BUG_OPCODE
164#endif
165#endif
166
167/* This value is used to mark exception frames on the stack. */
168	.section ".toc","aw"
169exception_marker:
170	.tc	ID_72656773_68657265[TC],0x7265677368657265
171	.text
172
173/*
174 * The following macros define the code that appears as
175 * the prologue to each of the exception handlers.  They
176 * are split into two parts to allow a single kernel binary
177 * to be used for pSeries and iSeries.
178 * LOL.  One day... - paulus
179 */
180
181/*
182 * We make as much of the exception code common between native
183 * exception handlers (including pSeries LPAR) and iSeries LPAR
184 * implementations as possible.
185 */
186
187/*
188 * This is the start of the interrupt handlers for pSeries
189 * This code runs with relocation off.
190 */
191#define EX_R9		0
192#define EX_R10		8
193#define EX_R11		16
194#define EX_R12		24
195#define EX_R13		32
196#define EX_SRR0		40
197#define EX_DAR		48
198#define EX_DSISR	56
199#define EX_CCR		60
200#define EX_R3		64
201#define EX_LR		72
202
203#define EXCEPTION_PROLOG_PSERIES(area, label)				\
204	mfspr	r13,SPRN_SPRG3;		/* get paca address into r13 */	\
205	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
206	std	r10,area+EX_R10(r13);					\
207	std	r11,area+EX_R11(r13);					\
208	std	r12,area+EX_R12(r13);					\
209	mfspr	r9,SPRN_SPRG1;						\
210	std	r9,area+EX_R13(r13);					\
211	mfcr	r9;							\
212	clrrdi	r12,r13,32;		/* get high part of &label */	\
213	mfmsr	r10;							\
214	mfspr	r11,SPRN_SRR0;		/* save SRR0 */			\
215	ori	r12,r12,(label)@l;	/* virt addr of handler */	\
216	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI;				\
217	mtspr	SPRN_SRR0,r12;						\
218	mfspr	r12,SPRN_SRR1;		/* and SRR1 */			\
219	mtspr	SPRN_SRR1,r10;						\
220	rfid;								\
221	b	.	/* prevent speculative execution */
222
223/*
224 * This is the start of the interrupt handlers for iSeries
225 * This code runs with relocation on.
226 */
227#define EXCEPTION_PROLOG_ISERIES_1(area)				\
228	mfspr	r13,SPRN_SPRG3;		/* get paca address into r13 */	\
229	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
230	std	r10,area+EX_R10(r13);					\
231	std	r11,area+EX_R11(r13);					\
232	std	r12,area+EX_R12(r13);					\
233	mfspr	r9,SPRN_SPRG1;						\
234	std	r9,area+EX_R13(r13);					\
235	mfcr	r9
236
237#define EXCEPTION_PROLOG_ISERIES_2					\
238	mfmsr	r10;							\
239	ld	r11,PACALPPACA+LPPACASRR0(r13);				\
240	ld	r12,PACALPPACA+LPPACASRR1(r13);				\
241	ori	r10,r10,MSR_RI;						\
242	mtmsrd	r10,1
243
244/*
245 * The common exception prolog is used for all except a few exceptions
246 * such as a segment miss on a kernel address.  We have to be prepared
247 * to take another exception from the point where we first touch the
248 * kernel stack onwards.
249 *
250 * On entry r13 points to the paca, r9-r13 are saved in the paca,
251 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
252 * SRR1, and relocation is on.
253 */
254#define EXCEPTION_PROLOG_COMMON(n, area)				   \
255	andi.	r10,r12,MSR_PR;		/* See if coming from user	*/ \
256	mr	r10,r1;			/* Save r1			*/ \
257	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
258	beq-	1f;							   \
259	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
2601:	cmpdi	cr1,r1,0;		/* check if r1 is in userspace	*/ \
261	bge-	cr1,bad_stack;		/* abort if it is		*/ \
262	std	r9,_CCR(r1);		/* save CR in stackframe	*/ \
263	std	r11,_NIP(r1);		/* save SRR0 in stackframe	*/ \
264	std	r12,_MSR(r1);		/* save SRR1 in stackframe	*/ \
265	std	r10,0(r1);		/* make stack chain pointer	*/ \
266	std	r0,GPR0(r1);		/* save r0 in stackframe	*/ \
267	std	r10,GPR1(r1);		/* save r1 in stackframe	*/ \
268	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \
269	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe	*/ \
270	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \
271	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \
272	ld	r10,area+EX_R10(r13);					   \
273	std	r9,GPR9(r1);						   \
274	std	r10,GPR10(r1);						   \
275	ld	r9,area+EX_R11(r13);	/* move r11 - r13 to stackframe	*/ \
276	ld	r10,area+EX_R12(r13);					   \
277	ld	r11,area+EX_R13(r13);					   \
278	std	r9,GPR11(r1);						   \
279	std	r10,GPR12(r1);						   \
280	std	r11,GPR13(r1);						   \
281	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \
282	mflr	r9;			/* save LR in stackframe	*/ \
283	std	r9,_LINK(r1);						   \
284	mfctr	r10;			/* save CTR in stackframe	*/ \
285	std	r10,_CTR(r1);						   \
286	mfspr	r11,SPRN_XER;		/* save XER in stackframe	*/ \
287	std	r11,_XER(r1);						   \
288	li	r9,(n)+1;						   \
289	std	r9,_TRAP(r1);		/* set trap number		*/ \
290	li	r10,0;							   \
291	ld	r11,exception_marker@toc(r2);				   \
292	std	r10,RESULT(r1);		/* clear regs->result		*/ \
293	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/
294
295/*
296 * Exception vectors.
297 */
298#define STD_EXCEPTION_PSERIES(n, label)			\
299	. = n;						\
300	.globl label##_pSeries;				\
301label##_pSeries:					\
302	HMT_MEDIUM;					\
303	mtspr	SPRN_SPRG1,r13;		/* save r13 */	\
304	RUNLATCH_ON(r13);				\
305	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306
307#define STD_EXCEPTION_ISERIES(n, label, area)		\
308	.globl label##_iSeries;				\
309label##_iSeries:					\
310	HMT_MEDIUM;					\
311	mtspr	SPRN_SPRG1,r13;		/* save r13 */	\
312	RUNLATCH_ON(r13);				\
313	EXCEPTION_PROLOG_ISERIES_1(area);		\
314	EXCEPTION_PROLOG_ISERIES_2;			\
315	b	label##_common
316
317#define MASKABLE_EXCEPTION_ISERIES(n, label)				\
318	.globl label##_iSeries;						\
319label##_iSeries:							\
320	HMT_MEDIUM;							\
321	mtspr	SPRN_SPRG1,r13;		/* save r13 */			\
322	RUNLATCH_ON(r13);						\
323	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);				\
324	lbz	r10,PACAPROCENABLED(r13);				\
325	cmpwi	0,r10,0;						\
326	beq-	label##_iSeries_masked;					\
327	EXCEPTION_PROLOG_ISERIES_2;					\
328	b	label##_common;						\
329
330#ifdef DO_SOFT_DISABLE
331#define DISABLE_INTS				\
332	lbz	r10,PACAPROCENABLED(r13);	\
333	li	r11,0;				\
334	std	r10,SOFTE(r1);			\
335	mfmsr	r10;				\
336	stb	r11,PACAPROCENABLED(r13);	\
337	ori	r10,r10,MSR_EE;			\
338	mtmsrd	r10,1
339
340#define ENABLE_INTS				\
341	lbz	r10,PACAPROCENABLED(r13);	\
342	mfmsr	r11;				\
343	std	r10,SOFTE(r1);			\
344	ori	r11,r11,MSR_EE;			\
345	mtmsrd	r11,1
346
347#else	/* hard enable/disable interrupts */
348#define DISABLE_INTS
349
350#define ENABLE_INTS				\
351	ld	r12,_MSR(r1);			\
352	mfmsr	r11;				\
353	rlwimi	r11,r12,0,MSR_EE;		\
354	mtmsrd	r11,1
355
356#endif
357
358#define STD_EXCEPTION_COMMON(trap, label, hdlr)		\
359	.align	7;					\
360	.globl label##_common;				\
361label##_common:						\
362	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
363	DISABLE_INTS;					\
364	bl	.save_nvgprs;				\
365	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
366	bl	hdlr;					\
367	b	.ret_from_except
368
369#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)	\
370	.align	7;					\
371	.globl label##_common;				\
372label##_common:						\
373	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
374	DISABLE_INTS;					\
375	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
376	bl	hdlr;					\
377	b	.ret_from_except_lite
378
379/*
380 * Start of pSeries system interrupt routines
381 */
382	. = 0x100
383	.globl __start_interrupts
384__start_interrupts:
385
386	STD_EXCEPTION_PSERIES(0x100, system_reset)
387
388	. = 0x200
389_machine_check_pSeries:
390	HMT_MEDIUM
391	mtspr	SPRN_SPRG1,r13		/* save r13 */
392	RUNLATCH_ON(r13)
393	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394
395	. = 0x300
396	.globl data_access_pSeries
397data_access_pSeries:
398	HMT_MEDIUM
399	mtspr	SPRN_SPRG1,r13
400BEGIN_FTR_SECTION
401	mtspr	SPRN_SPRG2,r12
402	mfspr	r13,SPRN_DAR
403	mfspr	r12,SPRN_DSISR
404	srdi	r13,r13,60
405	rlwimi	r13,r12,16,0x20
406	mfcr	r12
407	cmpwi	r13,0x2c
408	beq	.do_stab_bolted_pSeries
409	mtcrf	0x80,r12
410	mfspr	r12,SPRN_SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413
414	. = 0x380
415	.globl data_access_slb_pSeries
416data_access_slb_pSeries:
417	HMT_MEDIUM
418	mtspr	SPRN_SPRG1,r13
419	RUNLATCH_ON(r13)
420	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
421	std	r3,PACA_EXSLB+EX_R3(r13)
422	mfspr	r3,SPRN_DAR
423	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
424	mfcr	r9
425#ifdef __DISABLED__
426	/* Keep that around for when we re-implement dynamic VSIDs */
427	cmpdi	r3,0
428	bge	slb_miss_user_pseries
429#endif /* __DISABLED__ */
430	std	r10,PACA_EXSLB+EX_R10(r13)
431	std	r11,PACA_EXSLB+EX_R11(r13)
432	std	r12,PACA_EXSLB+EX_R12(r13)
433	mfspr	r10,SPRN_SPRG1
434	std	r10,PACA_EXSLB+EX_R13(r13)
435	mfspr	r12,SPRN_SRR1		/* and SRR1 */
436	b	.slb_miss_realmode	/* Rel. branch works in real mode */
437
438	STD_EXCEPTION_PSERIES(0x400, instruction_access)
439
440	. = 0x480
441	.globl instruction_access_slb_pSeries
442instruction_access_slb_pSeries:
443	HMT_MEDIUM
444	mtspr	SPRN_SPRG1,r13
445	RUNLATCH_ON(r13)
446	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
447	std	r3,PACA_EXSLB+EX_R3(r13)
448	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
449	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
450	mfcr	r9
451#ifdef __DISABLED__
452	/* Keep that around for when we re-implement dynamic VSIDs */
453	cmpdi	r3,0
454	bge	slb_miss_user_pseries
455#endif /* __DISABLED__ */
456	std	r10,PACA_EXSLB+EX_R10(r13)
457	std	r11,PACA_EXSLB+EX_R11(r13)
458	std	r12,PACA_EXSLB+EX_R12(r13)
459	mfspr	r10,SPRN_SPRG1
460	std	r10,PACA_EXSLB+EX_R13(r13)
461	mfspr	r12,SPRN_SRR1		/* and SRR1 */
462	b	.slb_miss_realmode	/* Rel. branch works in real mode */
463
464	STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
465	STD_EXCEPTION_PSERIES(0x600, alignment)
466	STD_EXCEPTION_PSERIES(0x700, program_check)
467	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
468	STD_EXCEPTION_PSERIES(0x900, decrementer)
469	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
470	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
471
472	. = 0xc00
473	.globl	system_call_pSeries
474system_call_pSeries:
475	HMT_MEDIUM
476	RUNLATCH_ON(r9)
477	mr	r9,r13
478	mfmsr	r10
479	mfspr	r13,SPRN_SPRG3
480	mfspr	r11,SPRN_SRR0
481	clrrdi	r12,r13,32
482	oris	r12,r12,system_call_common@h
483	ori	r12,r12,system_call_common@l
484	mtspr	SPRN_SRR0,r12
485	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
486	mfspr	r12,SPRN_SRR1
487	mtspr	SPRN_SRR1,r10
488	rfid
489	b	.	/* prevent speculative execution */
490
491	STD_EXCEPTION_PSERIES(0xd00, single_step)
492	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
493
494	/* We need to deal with the Altivec unavailable exception
495	 * here which is at 0xf20, thus in the middle of the
496	 * prolog code of the PerformanceMonitor one. A little
497	 * trickery is thus necessary
498	 */
499	. = 0xf00
500	b	performance_monitor_pSeries
501
502	STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
503
504	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
505	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
506
507	. = 0x3000
508
509/*** pSeries interrupt support ***/
510
511	/* moved from 0xf00 */
512	STD_EXCEPTION_PSERIES(., performance_monitor)
513
514	.align	7
515_GLOBAL(do_stab_bolted_pSeries)
516	mtcrf	0x80,r12
517	mfspr	r12,SPRN_SPRG2
518	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
519
520/*
521 * We have some room here  we use that to put
522 * the peries slb miss user trampoline code so it's reasonably
523 * away from slb_miss_user_common to avoid problems with rfid
524 *
525 * This is used for when the SLB miss handler has to go virtual,
526 * which doesn't happen for now anymore but will once we re-implement
527 * dynamic VSIDs for shared page tables
528 */
529#ifdef __DISABLED__
530slb_miss_user_pseries:
531	std	r10,PACA_EXGEN+EX_R10(r13)
532	std	r11,PACA_EXGEN+EX_R11(r13)
533	std	r12,PACA_EXGEN+EX_R12(r13)
534	mfspr	r10,SPRG1
535	ld	r11,PACA_EXSLB+EX_R9(r13)
536	ld	r12,PACA_EXSLB+EX_R3(r13)
537	std	r10,PACA_EXGEN+EX_R13(r13)
538	std	r11,PACA_EXGEN+EX_R9(r13)
539	std	r12,PACA_EXGEN+EX_R3(r13)
540	clrrdi	r12,r13,32
541	mfmsr	r10
542	mfspr	r11,SRR0			/* save SRR0 */
543	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
544	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
545	mtspr	SRR0,r12
546	mfspr	r12,SRR1			/* and SRR1 */
547	mtspr	SRR1,r10
548	rfid
549	b	.				/* prevent spec. execution */
550#endif /* __DISABLED__ */
551
552/*
553 * Vectors for the FWNMI option.  Share common code.
554 */
555	.globl system_reset_fwnmi
556system_reset_fwnmi:
557	HMT_MEDIUM
558	mtspr	SPRN_SPRG1,r13		/* save r13 */
559	RUNLATCH_ON(r13)
560	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
561
562	.globl machine_check_fwnmi
563machine_check_fwnmi:
564	HMT_MEDIUM
565	mtspr	SPRN_SPRG1,r13		/* save r13 */
566	RUNLATCH_ON(r13)
567	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
568
569#ifdef CONFIG_PPC_ISERIES
570/***  ISeries-LPAR interrupt handlers ***/
571
572	STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
573
574	.globl data_access_iSeries
575data_access_iSeries:
576	mtspr	SPRN_SPRG1,r13
577BEGIN_FTR_SECTION
578	mtspr	SPRN_SPRG2,r12
579	mfspr	r13,SPRN_DAR
580	mfspr	r12,SPRN_DSISR
581	srdi	r13,r13,60
582	rlwimi	r13,r12,16,0x20
583	mfcr	r12
584	cmpwi	r13,0x2c
585	beq	.do_stab_bolted_iSeries
586	mtcrf	0x80,r12
587	mfspr	r12,SPRN_SPRG2
588END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
589	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
590	EXCEPTION_PROLOG_ISERIES_2
591	b	data_access_common
592
593.do_stab_bolted_iSeries:
594	mtcrf	0x80,r12
595	mfspr	r12,SPRN_SPRG2
596	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
597	EXCEPTION_PROLOG_ISERIES_2
598	b	.do_stab_bolted
599
600	.globl	data_access_slb_iSeries
601data_access_slb_iSeries:
602	mtspr	SPRN_SPRG1,r13		/* save r13 */
603	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
604	std	r3,PACA_EXSLB+EX_R3(r13)
605	mfspr	r3,SPRN_DAR
606	std	r9,PACA_EXSLB+EX_R9(r13)
607	mfcr	r9
608#ifdef __DISABLED__
609	cmpdi	r3,0
610	bge	slb_miss_user_iseries
611#endif
612	std	r10,PACA_EXSLB+EX_R10(r13)
613	std	r11,PACA_EXSLB+EX_R11(r13)
614	std	r12,PACA_EXSLB+EX_R12(r13)
615	mfspr	r10,SPRN_SPRG1
616	std	r10,PACA_EXSLB+EX_R13(r13)
617	ld	r12,PACALPPACA+LPPACASRR1(r13);
618	b	.slb_miss_realmode
619
620	STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
621
622	.globl	instruction_access_slb_iSeries
623instruction_access_slb_iSeries:
624	mtspr	SPRN_SPRG1,r13		/* save r13 */
625	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
626	std	r3,PACA_EXSLB+EX_R3(r13)
627	ld	r3,PACALPPACA+LPPACASRR0(r13)	/* get SRR0 value */
628	std	r9,PACA_EXSLB+EX_R9(r13)
629	mfcr	r9
630#ifdef __DISABLED__
631	cmpdi	r3,0
632	bge	.slb_miss_user_iseries
633#endif
634	std	r10,PACA_EXSLB+EX_R10(r13)
635	std	r11,PACA_EXSLB+EX_R11(r13)
636	std	r12,PACA_EXSLB+EX_R12(r13)
637	mfspr	r10,SPRN_SPRG1
638	std	r10,PACA_EXSLB+EX_R13(r13)
639	ld	r12,PACALPPACA+LPPACASRR1(r13);
640	b	.slb_miss_realmode
641
642#ifdef __DISABLED__
643slb_miss_user_iseries:
644	std	r10,PACA_EXGEN+EX_R10(r13)
645	std	r11,PACA_EXGEN+EX_R11(r13)
646	std	r12,PACA_EXGEN+EX_R12(r13)
647	mfspr	r10,SPRG1
648	ld	r11,PACA_EXSLB+EX_R9(r13)
649	ld	r12,PACA_EXSLB+EX_R3(r13)
650	std	r10,PACA_EXGEN+EX_R13(r13)
651	std	r11,PACA_EXGEN+EX_R9(r13)
652	std	r12,PACA_EXGEN+EX_R3(r13)
653	EXCEPTION_PROLOG_ISERIES_2
654	b	slb_miss_user_common
655#endif
656
657	MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
658	STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
659	STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
660	STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
661	MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
662	STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
663	STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
664
665	.globl	system_call_iSeries
666system_call_iSeries:
667	mr	r9,r13
668	mfspr	r13,SPRN_SPRG3
669	EXCEPTION_PROLOG_ISERIES_2
670	b	system_call_common
671
672	STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
673	STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
674	STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
675
676	.globl system_reset_iSeries
677system_reset_iSeries:
678	mfspr	r13,SPRN_SPRG3		/* Get paca address */
679	mfmsr	r24
680	ori	r24,r24,MSR_RI
681	mtmsrd	r24			/* RI on */
682	lhz	r24,PACAPACAINDEX(r13)	/* Get processor # */
683	cmpwi	0,r24,0			/* Are we processor 0? */
684	beq	.__start_initialization_iSeries	/* Start up the first processor */
685	mfspr	r4,SPRN_CTRLF
686	li	r5,CTRL_RUNLATCH	/* Turn off the run light */
687	andc	r4,r4,r5
688	mtspr	SPRN_CTRLT,r4
689
6901:
691	HMT_LOW
692#ifdef CONFIG_SMP
693	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor
694					 * should start */
695	sync
696	LOADADDR(r3,current_set)
697	sldi	r28,r24,3		/* get current_set[cpu#] */
698	ldx	r3,r3,r28
699	addi	r1,r3,THREAD_SIZE
700	subi	r1,r1,STACK_FRAME_OVERHEAD
701
702	cmpwi	0,r23,0
703	beq	iSeries_secondary_smp_loop	/* Loop until told to go */
704	bne	.__secondary_start		/* Loop until told to go */
705iSeries_secondary_smp_loop:
706	/* Let the Hypervisor know we are alive */
707	/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
708	lis	r3,0x8002
709	rldicr	r3,r3,32,15		/* r0 = (r3 << 32) & 0xffff000000000000 */
710#else /* CONFIG_SMP */
711	/* Yield the processor.  This is required for non-SMP kernels
712		which are running on multi-threaded machines. */
713	lis	r3,0x8000
714	rldicr	r3,r3,32,15		/* r3 = (r3 << 32) & 0xffff000000000000 */
715	addi	r3,r3,18		/* r3 = 0x8000000000000012 which is "yield" */
716	li	r4,0			/* "yield timed" */
717	li	r5,-1			/* "yield forever" */
718#endif /* CONFIG_SMP */
719	li	r0,-1			/* r0=-1 indicates a Hypervisor call */
720	sc				/* Invoke the hypervisor via a system call */
721	mfspr	r13,SPRN_SPRG3		/* Put r13 back ???? */
722	b	1b			/* If SMP not configured, secondaries
723					 * loop forever */
724
725	.globl decrementer_iSeries_masked
726decrementer_iSeries_masked:
727	li	r11,1
728	stb	r11,PACALPPACA+LPPACADECRINT(r13)
729	lwz	r12,PACADEFAULTDECR(r13)
730	mtspr	SPRN_DEC,r12
731	/* fall through */
732
733	.globl hardware_interrupt_iSeries_masked
734hardware_interrupt_iSeries_masked:
735	mtcrf	0x80,r9		/* Restore regs */
736	ld	r11,PACALPPACA+LPPACASRR0(r13)
737	ld	r12,PACALPPACA+LPPACASRR1(r13)
738	mtspr	SPRN_SRR0,r11
739	mtspr	SPRN_SRR1,r12
740	ld	r9,PACA_EXGEN+EX_R9(r13)
741	ld	r10,PACA_EXGEN+EX_R10(r13)
742	ld	r11,PACA_EXGEN+EX_R11(r13)
743	ld	r12,PACA_EXGEN+EX_R12(r13)
744	ld	r13,PACA_EXGEN+EX_R13(r13)
745	rfid
746	b	.	/* prevent speculative execution */
747#endif /* CONFIG_PPC_ISERIES */
748
749/*** Common interrupt handlers ***/
750
751	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
752
753	/*
754	 * Machine check is different because we use a different
755	 * save area: PACA_EXMC instead of PACA_EXGEN.
756	 */
757	.align	7
758	.globl machine_check_common
759machine_check_common:
760	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
761	DISABLE_INTS
762	bl	.save_nvgprs
763	addi	r3,r1,STACK_FRAME_OVERHEAD
764	bl	.machine_check_exception
765	b	.ret_from_except
766
767	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
768	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
769	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
770	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
771	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
772	STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
773	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
774#ifdef CONFIG_ALTIVEC
775	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
776#else
777	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
778#endif
779
780/*
781 * Here we have detected that the kernel stack pointer is bad.
782 * R9 contains the saved CR, r13 points to the paca,
783 * r10 contains the (bad) kernel stack pointer,
784 * r11 and r12 contain the saved SRR0 and SRR1.
785 * We switch to using an emergency stack, save the registers there,
786 * and call kernel_bad_stack(), which panics.
787 */
788bad_stack:
789	ld	r1,PACAEMERGSP(r13)
790	subi	r1,r1,64+INT_FRAME_SIZE
791	std	r9,_CCR(r1)
792	std	r10,GPR1(r1)
793	std	r11,_NIP(r1)
794	std	r12,_MSR(r1)
795	mfspr	r11,SPRN_DAR
796	mfspr	r12,SPRN_DSISR
797	std	r11,_DAR(r1)
798	std	r12,_DSISR(r1)
799	mflr	r10
800	mfctr	r11
801	mfxer	r12
802	std	r10,_LINK(r1)
803	std	r11,_CTR(r1)
804	std	r12,_XER(r1)
805	SAVE_GPR(0,r1)
806	SAVE_GPR(2,r1)
807	SAVE_4GPRS(3,r1)
808	SAVE_2GPRS(7,r1)
809	SAVE_10GPRS(12,r1)
810	SAVE_10GPRS(22,r1)
811	addi	r11,r1,INT_FRAME_SIZE
812	std	r11,0(r1)
813	li	r12,0
814	std	r12,0(r11)
815	ld	r2,PACATOC(r13)
8161:	addi	r3,r1,STACK_FRAME_OVERHEAD
817	bl	.kernel_bad_stack
818	b	1b
819
820/*
821 * Return from an exception with minimal checks.
822 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
823 * If interrupts have been enabled, or anything has been
824 * done that might have changed the scheduling status of
825 * any task or sent any task a signal, you should use
826 * ret_from_except or ret_from_except_lite instead of this.
827 */
828	.globl	fast_exception_return
829fast_exception_return:
830	ld	r12,_MSR(r1)
831	ld	r11,_NIP(r1)
832	andi.	r3,r12,MSR_RI		/* check if RI is set */
833	beq-	unrecov_fer
834	ld	r3,_CCR(r1)
835	ld	r4,_LINK(r1)
836	ld	r5,_CTR(r1)
837	ld	r6,_XER(r1)
838	mtcr	r3
839	mtlr	r4
840	mtctr	r5
841	mtxer	r6
842	REST_GPR(0, r1)
843	REST_8GPRS(2, r1)
844
845	mfmsr	r10
846	clrrdi	r10,r10,2		/* clear RI (LE is 0 already) */
847	mtmsrd	r10,1
848
849	mtspr	SPRN_SRR1,r12
850	mtspr	SPRN_SRR0,r11
851	REST_4GPRS(10, r1)
852	ld	r1,GPR1(r1)
853	rfid
854	b	.	/* prevent speculative execution */
855
856unrecov_fer:
857	bl	.save_nvgprs
8581:	addi	r3,r1,STACK_FRAME_OVERHEAD
859	bl	.unrecoverable_exception
860	b	1b
861
862/*
863 * Here r13 points to the paca, r9 contains the saved CR,
864 * SRR0 and SRR1 are saved in r11 and r12,
865 * r9 - r13 are saved in paca->exgen.
866 */
867	.align	7
868	.globl data_access_common
869data_access_common:
870	RUNLATCH_ON(r10)		/* It wont fit in the 0x300 handler */
871	mfspr	r10,SPRN_DAR
872	std	r10,PACA_EXGEN+EX_DAR(r13)
873	mfspr	r10,SPRN_DSISR
874	stw	r10,PACA_EXGEN+EX_DSISR(r13)
875	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
876	ld	r3,PACA_EXGEN+EX_DAR(r13)
877	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
878	li	r5,0x300
879	b	.do_hash_page	 	/* Try to handle as hpte fault */
880
881	.align	7
882	.globl instruction_access_common
883instruction_access_common:
884	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
885	ld	r3,_NIP(r1)
886	andis.	r4,r12,0x5820
887	li	r5,0x400
888	b	.do_hash_page		/* Try to handle as hpte fault */
889
890/*
891 * Here is the common SLB miss user that is used when going to virtual
892 * mode for SLB misses, that is currently not used
893 */
894#ifdef __DISABLED__
895	.align	7
896	.globl	slb_miss_user_common
897slb_miss_user_common:
898	mflr	r10
899	std	r3,PACA_EXGEN+EX_DAR(r13)
900	stw	r9,PACA_EXGEN+EX_CCR(r13)
901	std	r10,PACA_EXGEN+EX_LR(r13)
902	std	r11,PACA_EXGEN+EX_SRR0(r13)
903	bl	.slb_allocate_user
904
905	ld	r10,PACA_EXGEN+EX_LR(r13)
906	ld	r3,PACA_EXGEN+EX_R3(r13)
907	lwz	r9,PACA_EXGEN+EX_CCR(r13)
908	ld	r11,PACA_EXGEN+EX_SRR0(r13)
909	mtlr	r10
910	beq-	slb_miss_fault
911
912	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
913	beq-	unrecov_user_slb
914	mfmsr	r10
915
916.machine push
917.machine "power4"
918	mtcrf	0x80,r9
919.machine pop
920
921	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
922	mtmsrd	r10,1
923
924	mtspr	SRR0,r11
925	mtspr	SRR1,r12
926
927	ld	r9,PACA_EXGEN+EX_R9(r13)
928	ld	r10,PACA_EXGEN+EX_R10(r13)
929	ld	r11,PACA_EXGEN+EX_R11(r13)
930	ld	r12,PACA_EXGEN+EX_R12(r13)
931	ld	r13,PACA_EXGEN+EX_R13(r13)
932	rfid
933	b	.
934
935slb_miss_fault:
936	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
937	ld	r4,PACA_EXGEN+EX_DAR(r13)
938	li	r5,0
939	std	r4,_DAR(r1)
940	std	r5,_DSISR(r1)
941	b	.handle_page_fault
942
943unrecov_user_slb:
944	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
945	DISABLE_INTS
946	bl	.save_nvgprs
9471:	addi	r3,r1,STACK_FRAME_OVERHEAD
948	bl	.unrecoverable_exception
949	b	1b
950
951#endif /* __DISABLED__ */
952
953
954/*
955 * r13 points to the PACA, r9 contains the saved CR,
956 * r12 contain the saved SRR1, SRR0 is still ready for return
957 * r3 has the faulting address
958 * r9 - r13 are saved in paca->exslb.
959 * r3 is saved in paca->slb_r3
960 * We assume we aren't going to take any exceptions during this procedure.
961 */
962_GLOBAL(slb_miss_realmode)
963	mflr	r10
964
965	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
966	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
967
968	bl	.slb_allocate_realmode
969
970	/* All done -- return from exception. */
971
972	ld	r10,PACA_EXSLB+EX_LR(r13)
973	ld	r3,PACA_EXSLB+EX_R3(r13)
974	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
975#ifdef CONFIG_PPC_ISERIES
976	ld	r11,PACALPPACA+LPPACASRR0(r13)	/* get SRR0 value */
977#endif /* CONFIG_PPC_ISERIES */
978
979	mtlr	r10
980
981	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
982	beq-	unrecov_slb
983
984.machine	push
985.machine	"power4"
986	mtcrf	0x80,r9
987	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
988.machine	pop
989
990#ifdef CONFIG_PPC_ISERIES
991	mtspr	SPRN_SRR0,r11
992	mtspr	SPRN_SRR1,r12
993#endif /* CONFIG_PPC_ISERIES */
994	ld	r9,PACA_EXSLB+EX_R9(r13)
995	ld	r10,PACA_EXSLB+EX_R10(r13)
996	ld	r11,PACA_EXSLB+EX_R11(r13)
997	ld	r12,PACA_EXSLB+EX_R12(r13)
998	ld	r13,PACA_EXSLB+EX_R13(r13)
999	rfid
1000	b	.	/* prevent speculative execution */
1001
1002unrecov_slb:
1003	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1004	DISABLE_INTS
1005	bl	.save_nvgprs
10061:	addi	r3,r1,STACK_FRAME_OVERHEAD
1007	bl	.unrecoverable_exception
1008	b	1b
1009
1010	.align	7
1011	.globl hardware_interrupt_common
1012	.globl hardware_interrupt_entry
1013hardware_interrupt_common:
1014	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1015hardware_interrupt_entry:
1016	DISABLE_INTS
1017	addi	r3,r1,STACK_FRAME_OVERHEAD
1018	bl	.do_IRQ
1019	b	.ret_from_except_lite
1020
1021	.align	7
1022	.globl alignment_common
1023alignment_common:
1024	mfspr	r10,SPRN_DAR
1025	std	r10,PACA_EXGEN+EX_DAR(r13)
1026	mfspr	r10,SPRN_DSISR
1027	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1028	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1029	ld	r3,PACA_EXGEN+EX_DAR(r13)
1030	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1031	std	r3,_DAR(r1)
1032	std	r4,_DSISR(r1)
1033	bl	.save_nvgprs
1034	addi	r3,r1,STACK_FRAME_OVERHEAD
1035	ENABLE_INTS
1036	bl	.alignment_exception
1037	b	.ret_from_except
1038
1039	.align	7
1040	.globl program_check_common
1041program_check_common:
1042	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1043	bl	.save_nvgprs
1044	addi	r3,r1,STACK_FRAME_OVERHEAD
1045	ENABLE_INTS
1046	bl	.program_check_exception
1047	b	.ret_from_except
1048
1049	.align	7
1050	.globl fp_unavailable_common
1051fp_unavailable_common:
1052	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1053	bne	.load_up_fpu		/* if from user, just load it up */
1054	bl	.save_nvgprs
1055	addi	r3,r1,STACK_FRAME_OVERHEAD
1056	ENABLE_INTS
1057	bl	.kernel_fp_unavailable_exception
1058	BUG_OPCODE
1059
1060	.align	7
1061	.globl altivec_unavailable_common
1062altivec_unavailable_common:
1063	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1064#ifdef CONFIG_ALTIVEC
1065BEGIN_FTR_SECTION
1066	bne	.load_up_altivec	/* if from user, just load it up */
1067END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1068#endif
1069	bl	.save_nvgprs
1070	addi	r3,r1,STACK_FRAME_OVERHEAD
1071	ENABLE_INTS
1072	bl	.altivec_unavailable_exception
1073	b	.ret_from_except
1074
1075#ifdef CONFIG_ALTIVEC
1076/*
1077 * load_up_altivec(unused, unused, tsk)
1078 * Disable VMX for the task which had it previously,
1079 * and save its vector registers in its thread_struct.
1080 * Enables the VMX for use in the kernel on return.
1081 * On SMP we know the VMX is free, since we give it up every
1082 * switch (ie, no lazy save of the vector registers).
1083 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1084 */
1085_STATIC(load_up_altivec)
1086	mfmsr	r5			/* grab the current MSR */
1087	oris	r5,r5,MSR_VEC@h
1088	mtmsrd	r5			/* enable use of VMX now */
1089	isync
1090
1091/*
1092 * For SMP, we don't do lazy VMX switching because it just gets too
1093 * horrendously complex, especially when a task switches from one CPU
1094 * to another.  Instead we call giveup_altvec in switch_to.
1095 * VRSAVE isn't dealt with here, that is done in the normal context
1096 * switch code. Note that we could rely on vrsave value to eventually
1097 * avoid saving all of the VREGs here...
1098 */
1099#ifndef CONFIG_SMP
1100	ld	r3,last_task_used_altivec@got(r2)
1101	ld	r4,0(r3)
1102	cmpdi	0,r4,0
1103	beq	1f
1104	/* Save VMX state to last_task_used_altivec's THREAD struct */
1105	addi	r4,r4,THREAD
1106	SAVE_32VRS(0,r5,r4)
1107	mfvscr	vr0
1108	li	r10,THREAD_VSCR
1109	stvx	vr0,r10,r4
1110	/* Disable VMX for last_task_used_altivec */
1111	ld	r5,PT_REGS(r4)
1112	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1113	lis	r6,MSR_VEC@h
1114	andc	r4,r4,r6
1115	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
11161:
1117#endif /* CONFIG_SMP */
1118	/* Hack: if we get an altivec unavailable trap with VRSAVE
1119	 * set to all zeros, we assume this is a broken application
1120	 * that fails to set it properly, and thus we switch it to
1121	 * all 1's
1122	 */
1123	mfspr	r4,SPRN_VRSAVE
1124	cmpdi	0,r4,0
1125	bne+	1f
1126	li	r4,-1
1127	mtspr	SPRN_VRSAVE,r4
11281:
1129	/* enable use of VMX after return */
1130	ld	r4,PACACURRENT(r13)
1131	addi	r5,r4,THREAD		/* Get THREAD */
1132	oris	r12,r12,MSR_VEC@h
1133	std	r12,_MSR(r1)
1134	li	r4,1
1135	li	r10,THREAD_VSCR
1136	stw	r4,THREAD_USED_VR(r5)
1137	lvx	vr0,r10,r5
1138	mtvscr	vr0
1139	REST_32VRS(0,r4,r5)
1140#ifndef CONFIG_SMP
1141	/* Update last_task_used_math to 'current' */
1142	subi	r4,r5,THREAD		/* Back to 'current' */
1143	std	r4,0(r3)
1144#endif /* CONFIG_SMP */
1145	/* restore registers and return */
1146	b	fast_exception_return
1147#endif /* CONFIG_ALTIVEC */
1148
1149/*
1150 * Hash table stuff
1151 */
1152	.align	7
1153_GLOBAL(do_hash_page)
1154	std	r3,_DAR(r1)
1155	std	r4,_DSISR(r1)
1156
1157	andis.	r0,r4,0xa450		/* weird error? */
1158	bne-	.handle_page_fault	/* if not, try to insert a HPTE */
1159BEGIN_FTR_SECTION
1160	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1161	bne-	.do_ste_alloc		/* If so handle it */
1162END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1163
1164	/*
1165	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1166	 * accessing a userspace segment (even from the kernel). We assume
1167	 * kernel addresses always have the high bit set.
1168	 */
1169	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1170	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1171	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1172	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1173	ori	r4,r4,1			/* add _PAGE_PRESENT */
1174	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1175
1176	/*
1177	 * On iSeries, we soft-disable interrupts here, then
1178	 * hard-enable interrupts so that the hash_page code can spin on
1179	 * the hash_table_lock without problems on a shared processor.
1180	 */
1181	DISABLE_INTS
1182
1183	/*
1184	 * r3 contains the faulting address
1185	 * r4 contains the required access permissions
1186	 * r5 contains the trap number
1187	 *
1188	 * at return r3 = 0 for success
1189	 */
1190	bl	.hash_page		/* build HPTE if possible */
1191	cmpdi	r3,0			/* see if hash_page succeeded */
1192
1193#ifdef DO_SOFT_DISABLE
1194	/*
1195	 * If we had interrupts soft-enabled at the point where the
1196	 * DSI/ISI occurred, and an interrupt came in during hash_page,
1197	 * handle it now.
1198	 * We jump to ret_from_except_lite rather than fast_exception_return
1199	 * because ret_from_except_lite will check for and handle pending
1200	 * interrupts if necessary.
1201	 */
1202	beq	.ret_from_except_lite
1203	/* For a hash failure, we don't bother re-enabling interrupts */
1204	ble-	12f
1205
1206	/*
1207	 * hash_page couldn't handle it, set soft interrupt enable back
1208	 * to what it was before the trap.  Note that .local_irq_restore
1209	 * handles any interrupts pending at this point.
1210	 */
1211	ld	r3,SOFTE(r1)
1212	bl	.local_irq_restore
1213	b	11f
1214#else
1215	beq	fast_exception_return   /* Return from exception on success */
1216	ble-	12f			/* Failure return from hash_page */
1217
1218	/* fall through */
1219#endif
1220
1221/* Here we have a page fault that hash_page can't handle. */
1222_GLOBAL(handle_page_fault)
1223	ENABLE_INTS
122411:	ld	r4,_DAR(r1)
1225	ld	r5,_DSISR(r1)
1226	addi	r3,r1,STACK_FRAME_OVERHEAD
1227	bl	.do_page_fault
1228	cmpdi	r3,0
1229	beq+	.ret_from_except_lite
1230	bl	.save_nvgprs
1231	mr	r5,r3
1232	addi	r3,r1,STACK_FRAME_OVERHEAD
1233	lwz	r4,_DAR(r1)
1234	bl	.bad_page_fault
1235	b	.ret_from_except
1236
1237/* We have a page fault that hash_page could handle but HV refused
1238 * the PTE insertion
1239 */
124012:	bl	.save_nvgprs
1241	addi	r3,r1,STACK_FRAME_OVERHEAD
1242	lwz	r4,_DAR(r1)
1243	bl	.low_hash_fault
1244	b	.ret_from_except
1245
1246	/* here we have a segment miss */
1247_GLOBAL(do_ste_alloc)
1248	bl	.ste_allocate		/* try to insert stab entry */
1249	cmpdi	r3,0
1250	beq+	fast_exception_return
1251	b	.handle_page_fault
1252
1253/*
1254 * r13 points to the PACA, r9 contains the saved CR,
1255 * r11 and r12 contain the saved SRR0 and SRR1.
1256 * r9 - r13 are saved in paca->exslb.
1257 * We assume we aren't going to take any exceptions during this procedure.
1258 * We assume (DAR >> 60) == 0xc.
1259 */
1260	.align	7
1261_GLOBAL(do_stab_bolted)
1262	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1263	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1264
1265	/* Hash to the primary group */
1266	ld	r10,PACASTABVIRT(r13)
1267	mfspr	r11,SPRN_DAR
1268	srdi	r11,r11,28
1269	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1270
1271	/* Calculate VSID */
1272	/* This is a kernel address, so protovsid = ESID */
1273	ASM_VSID_SCRAMBLE(r11, r9)
1274	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
1275
1276	/* Search the primary group for a free entry */
12771:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1278	andi.	r11,r11,0x80
1279	beq	2f
1280	addi	r10,r10,16
1281	andi.	r11,r10,0x70
1282	bne	1b
1283
1284	/* Stick for only searching the primary group for now.		*/
1285	/* At least for now, we use a very simple random castout scheme */
1286	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1287	mftb	r11
1288	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1289	ori	r11,r11,0x10
1290
1291	/* r10 currently points to an ste one past the group of interest */
1292	/* make it point to the randomly selected entry			*/
1293	subi	r10,r10,128
1294	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1295
1296	isync			/* mark the entry invalid		*/
1297	ld	r11,0(r10)
1298	rldicl	r11,r11,56,1	/* clear the valid bit */
1299	rotldi	r11,r11,8
1300	std	r11,0(r10)
1301	sync
1302
1303	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1304	slbie	r11
1305
13062:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1307	eieio
1308
1309	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1310	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1311	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1312	std	r11,0(r10)	/* Put new entry back into the stab	*/
1313
1314	sync
1315
1316	/* All done -- return from exception. */
1317	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1318	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1319
1320	andi.	r10,r12,MSR_RI
1321	beq-	unrecov_slb
1322
1323	mtcrf	0x80,r9			/* restore CR */
1324
1325	mfmsr	r10
1326	clrrdi	r10,r10,2
1327	mtmsrd	r10,1
1328
1329	mtspr	SPRN_SRR0,r11
1330	mtspr	SPRN_SRR1,r12
1331	ld	r9,PACA_EXSLB+EX_R9(r13)
1332	ld	r10,PACA_EXSLB+EX_R10(r13)
1333	ld	r11,PACA_EXSLB+EX_R11(r13)
1334	ld	r12,PACA_EXSLB+EX_R12(r13)
1335	ld	r13,PACA_EXSLB+EX_R13(r13)
1336	rfid
1337	b	.	/* prevent speculative execution */
1338
1339/*
1340 * Space for CPU0's segment table.
1341 *
1342 * On iSeries, the hypervisor must fill in at least one entry before
1343 * we get control (with relocate on).  The address is give to the hv
1344 * as a page number (see xLparMap in lpardata.c), so this must be at a
1345 * fixed address (the linker can't compute (u64)&initial_stab >>
1346 * PAGE_SHIFT).
1347 */
1348	. = STAB0_PHYS_ADDR	/* 0x6000 */
1349	.globl initial_stab
1350initial_stab:
1351	.space	4096
1352
1353/*
1354 * Data area reserved for FWNMI option.
1355 * This address (0x7000) is fixed by the RPA.
1356 */
1357	.= 0x7000
1358	.globl fwnmi_data_area
1359fwnmi_data_area:
1360
1361	/* iSeries does not use the FWNMI stuff, so it is safe to put
1362	 * this here, even if we later allow kernels that will boot on
1363	 * both pSeries and iSeries */
1364#ifdef CONFIG_PPC_ISERIES
1365        . = LPARMAP_PHYS
1366#include "lparmap.s"
1367/*
1368 * This ".text" is here for old compilers that generate a trailing
1369 * .note section when compiling .c files to .s
1370 */
1371	.text
1372#endif /* CONFIG_PPC_ISERIES */
1373
1374        . = 0x8000
1375
1376/*
1377 * On pSeries, secondary processors spin in the following code.
1378 * At entry, r3 = this processor's number (physical cpu id)
1379 */
1380_GLOBAL(pSeries_secondary_smp_init)
1381	mr	r24,r3
1382
1383	/* turn on 64-bit mode */
1384	bl	.enable_64b_mode
1385	isync
1386
1387	/* Copy some CPU settings from CPU 0 */
1388	bl	.__restore_cpu_setup
1389
1390	/* Set up a paca value for this processor. Since we have the
1391	 * physical cpu id in r24, we need to search the pacas to find
1392	 * which logical id maps to our physical one.
1393	 */
1394	LOADADDR(r13, paca) 		/* Get base vaddr of paca array	 */
1395	li	r5,0			/* logical cpu id                */
13961:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
1397	cmpw	r6,r24			/* Compare to our id             */
1398	beq	2f
1399	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
1400	addi	r5,r5,1
1401	cmpwi	r5,NR_CPUS
1402	blt	1b
1403
1404	mr	r3,r24			/* not found, copy phys to r3	 */
1405	b	.kexec_wait		/* next kernel might do better	 */
1406
14072:	mtspr	SPRN_SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
1408	/* From now on, r24 is expected to be logical cpuid */
1409	mr	r24,r5
14103:	HMT_LOW
1411	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
1412					/* start.			 */
1413	sync
1414
1415	/* Create a temp kernel stack for use before relocation is on.	*/
1416	ld	r1,PACAEMERGSP(r13)
1417	subi	r1,r1,STACK_FRAME_OVERHEAD
1418
1419	cmpwi	0,r23,0
1420#ifdef CONFIG_SMP
1421	bne	.__secondary_start
1422#endif
1423	b 	3b			/* Loop until told to go	 */
1424
1425#ifdef CONFIG_PPC_ISERIES
1426_STATIC(__start_initialization_iSeries)
1427	/* Clear out the BSS */
1428	LOADADDR(r11,__bss_stop)
1429	LOADADDR(r8,__bss_start)
1430	sub	r11,r11,r8		/* bss size			*/
1431	addi	r11,r11,7		/* round up to an even double word */
1432	rldicl. r11,r11,61,3		/* shift right by 3		*/
1433	beq	4f
1434	addi	r8,r8,-8
1435	li	r0,0
1436	mtctr	r11			/* zero this many doublewords	*/
14373:	stdu	r0,8(r8)
1438	bdnz	3b
14394:
1440	LOADADDR(r1,init_thread_union)
1441	addi	r1,r1,THREAD_SIZE
1442	li	r0,0
1443	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1444
1445	LOADADDR(r3,cpu_specs)
1446	LOADADDR(r4,cur_cpu_spec)
1447	li	r5,0
1448	bl	.identify_cpu
1449
1450	LOADADDR(r2,__toc_start)
1451	addi	r2,r2,0x4000
1452	addi	r2,r2,0x4000
1453
1454	bl	.iSeries_early_setup
1455	bl	.early_setup
1456
1457	/* relocation is on at this point */
1458
1459	b	.start_here_common
1460#endif /* CONFIG_PPC_ISERIES */
1461
1462#ifdef CONFIG_PPC_MULTIPLATFORM
1463
1464_STATIC(__mmu_off)
1465	mfmsr	r3
1466	andi.	r0,r3,MSR_IR|MSR_DR
1467	beqlr
1468	andc	r3,r3,r0
1469	mtspr	SPRN_SRR0,r4
1470	mtspr	SPRN_SRR1,r3
1471	sync
1472	rfid
1473	b	.	/* prevent speculative execution */
1474
1475
1476/*
1477 * Here is our main kernel entry point. We support currently 2 kind of entries
1478 * depending on the value of r5.
1479 *
1480 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1481 *                 in r3...r7
1482 *
1483 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1484 *                 DT block, r4 is a physical pointer to the kernel itself
1485 *
1486 */
1487_GLOBAL(__start_initialization_multiplatform)
1488	/*
1489	 * Are we booted from a PROM Of-type client-interface ?
1490	 */
1491	cmpldi	cr0,r5,0
1492	bne	.__boot_from_prom		/* yes -> prom */
1493
1494	/* Save parameters */
1495	mr	r31,r3
1496	mr	r30,r4
1497
1498	/* Make sure we are running in 64 bits mode */
1499	bl	.enable_64b_mode
1500
1501	/* Setup some critical 970 SPRs before switching MMU off */
1502	bl	.__970_cpu_preinit
1503
1504	/* cpu # */
1505	li	r24,0
1506
1507	/* Switch off MMU if not already */
1508	LOADADDR(r4, .__after_prom_start - KERNELBASE)
1509	add	r4,r4,r30
1510	bl	.__mmu_off
1511	b	.__after_prom_start
1512
1513_STATIC(__boot_from_prom)
1514	/* Save parameters */
1515	mr	r31,r3
1516	mr	r30,r4
1517	mr	r29,r5
1518	mr	r28,r6
1519	mr	r27,r7
1520
1521	/* Make sure we are running in 64 bits mode */
1522	bl	.enable_64b_mode
1523
1524	/* put a relocation offset into r3 */
1525	bl	.reloc_offset
1526
1527	LOADADDR(r2,__toc_start)
1528	addi	r2,r2,0x4000
1529	addi	r2,r2,0x4000
1530
1531	/* Relocate the TOC from a virt addr to a real addr */
1532	add	r2,r2,r3
1533
1534	/* Restore parameters */
1535	mr	r3,r31
1536	mr	r4,r30
1537	mr	r5,r29
1538	mr	r6,r28
1539	mr	r7,r27
1540
1541	/* Do all of the interaction with OF client interface */
1542	bl	.prom_init
1543	/* We never return */
1544	trap
1545
1546/*
1547 * At this point, r3 contains the physical address we are running at,
1548 * returned by prom_init()
1549 */
1550_STATIC(__after_prom_start)
1551
1552/*
1553 * We need to run with __start at physical address 0.
1554 * This will leave some code in the first 256B of
1555 * real memory, which are reserved for software use.
1556 * The remainder of the first page is loaded with the fixed
1557 * interrupt vectors.  The next two pages are filled with
1558 * unknown exception placeholders.
1559 *
1560 * Note: This process overwrites the OF exception vectors.
1561 *	r26 == relocation offset
1562 *	r27 == KERNELBASE
1563 */
1564	bl	.reloc_offset
1565	mr	r26,r3
1566	SET_REG_TO_CONST(r27,KERNELBASE)
1567
1568	li	r3,0			/* target addr */
1569
1570	// XXX FIXME: Use phys returned by OF (r30)
1571	add	r4,r27,r26 		/* source addr			 */
1572					/* current address of _start	 */
1573					/*   i.e. where we are running	 */
1574					/*	the source addr		 */
1575
1576	LOADADDR(r5,copy_to_here)	/* # bytes of memory to copy	 */
1577	sub	r5,r5,r27
1578
1579	li	r6,0x100		/* Start offset, the first 0x100 */
1580					/* bytes were copied earlier.	 */
1581
1582	bl	.copy_and_flush		/* copy the first n bytes	 */
1583					/* this includes the code being	 */
1584					/* executed here.		 */
1585
1586	LOADADDR(r0, 4f)		/* Jump to the copy of this code */
1587	mtctr	r0			/* that we just made/relocated	 */
1588	bctr
1589
15904:	LOADADDR(r5,klimit)
1591	add	r5,r5,r26
1592	ld	r5,0(r5)		/* get the value of klimit */
1593	sub	r5,r5,r27
1594	bl	.copy_and_flush		/* copy the rest */
1595	b	.start_here_multiplatform
1596
1597#endif /* CONFIG_PPC_MULTIPLATFORM */
1598
1599/*
1600 * Copy routine used to copy the kernel to start at physical address 0
1601 * and flush and invalidate the caches as needed.
1602 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1603 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1604 *
1605 * Note: this routine *only* clobbers r0, r6 and lr
1606 */
1607_GLOBAL(copy_and_flush)
1608	addi	r5,r5,-8
1609	addi	r6,r6,-8
16104:	li	r0,16			/* Use the least common		*/
1611					/* denominator cache line	*/
1612					/* size.  This results in	*/
1613					/* extra cache line flushes	*/
1614					/* but operation is correct.	*/
1615					/* Can't get cache line size	*/
1616					/* from NACA as it is being	*/
1617					/* moved too.			*/
1618
1619	mtctr	r0			/* put # words/line in ctr	*/
16203:	addi	r6,r6,8			/* copy a cache line		*/
1621	ldx	r0,r6,r4
1622	stdx	r0,r6,r3
1623	bdnz	3b
1624	dcbst	r6,r3			/* write it to memory		*/
1625	sync
1626	icbi	r6,r3			/* flush the icache line	*/
1627	cmpld	0,r6,r5
1628	blt	4b
1629	sync
1630	addi	r5,r5,8
1631	addi	r6,r6,8
1632	blr
1633
1634.align 8
1635copy_to_here:
1636
1637#ifdef CONFIG_SMP
1638#ifdef CONFIG_PPC_PMAC
1639/*
1640 * On PowerMac, secondary processors starts from the reset vector, which
1641 * is temporarily turned into a call to one of the functions below.
1642 */
1643	.section ".text";
1644	.align 2 ;
1645
1646	.globl	__secondary_start_pmac_0
1647__secondary_start_pmac_0:
1648	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1649	li	r24,0
1650	b	1f
1651	li	r24,1
1652	b	1f
1653	li	r24,2
1654	b	1f
1655	li	r24,3
16561:
1657
1658_GLOBAL(pmac_secondary_start)
1659	/* turn on 64-bit mode */
1660	bl	.enable_64b_mode
1661	isync
1662
1663	/* Copy some CPU settings from CPU 0 */
1664	bl	.__restore_cpu_setup
1665
1666	/* pSeries do that early though I don't think we really need it */
1667	mfmsr	r3
1668	ori	r3,r3,MSR_RI
1669	mtmsrd	r3			/* RI on */
1670
1671	/* Set up a paca value for this processor. */
1672	LOADADDR(r4, paca) 		 /* Get base vaddr of paca array	*/
1673	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
1674	add	r13,r13,r4		/* for this processor.		*/
1675	mtspr	SPRN_SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
1676
1677	/* Create a temp kernel stack for use before relocation is on.	*/
1678	ld	r1,PACAEMERGSP(r13)
1679	subi	r1,r1,STACK_FRAME_OVERHEAD
1680
1681	b	.__secondary_start
1682
1683#endif /* CONFIG_PPC_PMAC */
1684
1685/*
1686 * This function is called after the master CPU has released the
1687 * secondary processors.  The execution environment is relocation off.
1688 * The paca for this processor has the following fields initialized at
1689 * this point:
1690 *   1. Processor number
1691 *   2. Segment table pointer (virtual address)
1692 * On entry the following are set:
1693 *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
1694 *   r24   = cpu# (in Linux terms)
1695 *   r13   = paca virtual address
1696 *   SPRG3 = paca virtual address
1697 */
1698_GLOBAL(__secondary_start)
1699	/* Set thread priority to MEDIUM */
1700	HMT_MEDIUM
1701
1702	/* Load TOC */
1703	ld	r2,PACATOC(r13)
1704
1705	/* Do early setup for that CPU (stab, slb, hash table pointer) */
1706	bl	.early_setup_secondary
1707
1708	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
1709	LOADADDR(r3,current_set)
1710	sldi	r28,r24,3		/* get current_set[cpu#]	 */
1711	ldx	r1,r3,r28
1712	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1713	std	r1,PACAKSAVE(r13)
1714
1715	/* Clear backchain so we get nice backtraces */
1716	li	r7,0
1717	mtlr	r7
1718
1719	/* enable MMU and jump to start_secondary */
1720	LOADADDR(r3,.start_secondary_prolog)
1721	SET_REG_TO_CONST(r4, MSR_KERNEL)
1722#ifdef DO_SOFT_DISABLE
1723	ori	r4,r4,MSR_EE
1724#endif
1725	mtspr	SPRN_SRR0,r3
1726	mtspr	SPRN_SRR1,r4
1727	rfid
1728	b	.	/* prevent speculative execution */
1729
1730/*
1731 * Running with relocation on at this point.  All we want to do is
1732 * zero the stack back-chain pointer before going into C code.
1733 */
1734_GLOBAL(start_secondary_prolog)
1735	li	r3,0
1736	std	r3,0(r1)		/* Zero the stack frame pointer	*/
1737	bl	.start_secondary
1738	b	.
1739#endif
1740
1741/*
1742 * This subroutine clobbers r11 and r12
1743 */
1744_GLOBAL(enable_64b_mode)
1745	mfmsr	r11			/* grab the current MSR */
1746	li	r12,1
1747	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1748	or	r11,r11,r12
1749	li	r12,1
1750	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1751	or	r11,r11,r12
1752	mtmsrd	r11
1753	isync
1754	blr
1755
1756#ifdef CONFIG_PPC_MULTIPLATFORM
1757/*
1758 * This is where the main kernel code starts.
1759 */
1760_STATIC(start_here_multiplatform)
1761	/* get a new offset, now that the kernel has moved. */
1762	bl	.reloc_offset
1763	mr	r26,r3
1764
1765	/* Clear out the BSS. It may have been done in prom_init,
1766	 * already but that's irrelevant since prom_init will soon
1767	 * be detached from the kernel completely. Besides, we need
1768	 * to clear it now for kexec-style entry.
1769	 */
1770	LOADADDR(r11,__bss_stop)
1771	LOADADDR(r8,__bss_start)
1772	sub	r11,r11,r8		/* bss size			*/
1773	addi	r11,r11,7		/* round up to an even double word */
1774	rldicl. r11,r11,61,3		/* shift right by 3		*/
1775	beq	4f
1776	addi	r8,r8,-8
1777	li	r0,0
1778	mtctr	r11			/* zero this many doublewords	*/
17793:	stdu	r0,8(r8)
1780	bdnz	3b
17814:
1782
1783	mfmsr	r6
1784	ori	r6,r6,MSR_RI
1785	mtmsrd	r6			/* RI on */
1786
1787#ifdef CONFIG_HMT
1788	/* Start up the second thread on cpu 0 */
1789	mfspr	r3,SPRN_PVR
1790	srwi	r3,r3,16
1791	cmpwi	r3,0x34			/* Pulsar  */
1792	beq	90f
1793	cmpwi	r3,0x36			/* Icestar */
1794	beq	90f
1795	cmpwi	r3,0x37			/* SStar   */
1796	beq	90f
1797	b	91f			/* HMT not supported */
179890:	li	r3,0
1799	bl	.hmt_start_secondary
180091:
1801#endif
1802
1803	/* The following gets the stack and TOC set up with the regs */
1804	/* pointing to the real addr of the kernel stack.  This is   */
1805	/* all done to support the C function call below which sets  */
1806	/* up the htab.  This is done because we have relocated the  */
1807	/* kernel but are still running in real mode. */
1808
1809	LOADADDR(r3,init_thread_union)
1810	add	r3,r3,r26
1811
1812	/* set up a stack pointer (physical address) */
1813	addi	r1,r3,THREAD_SIZE
1814	li	r0,0
1815	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1816
1817	/* set up the TOC (physical address) */
1818	LOADADDR(r2,__toc_start)
1819	addi	r2,r2,0x4000
1820	addi	r2,r2,0x4000
1821	add	r2,r2,r26
1822
1823	LOADADDR(r3,cpu_specs)
1824	add	r3,r3,r26
1825	LOADADDR(r4,cur_cpu_spec)
1826	add	r4,r4,r26
1827	mr	r5,r26
1828	bl	.identify_cpu
1829
1830	/* Save some low level config HIDs of CPU0 to be copied to
1831	 * other CPUs later on, or used for suspend/resume
1832	 */
1833	bl	.__save_cpu_setup
1834	sync
1835
1836	/* Setup a valid physical PACA pointer in SPRG3 for early_setup
1837	 * note that boot_cpuid can always be 0 nowadays since there is
1838	 * nowhere it can be initialized differently before we reach this
1839	 * code
1840	 */
1841	LOADADDR(r27, boot_cpuid)
1842	add	r27,r27,r26
1843	lwz	r27,0(r27)
1844
1845	LOADADDR(r24, paca) 		/* Get base vaddr of paca array	 */
1846	mulli	r13,r27,PACA_SIZE	/* Calculate vaddr of right paca */
1847	add	r13,r13,r24		/* for this processor.		 */
1848	add	r13,r13,r26		/* convert to physical addr	 */
1849	mtspr	SPRN_SPRG3,r13		/* PPPBBB: Temp... -Peter */
1850
1851	/* Do very early kernel initializations, including initial hash table,
1852	 * stab and slb setup before we turn on relocation.	*/
1853
1854	/* Restore parameters passed from prom_init/kexec */
1855	mr	r3,r31
1856 	bl	.early_setup
1857
1858	LOADADDR(r3,.start_here_common)
1859	SET_REG_TO_CONST(r4, MSR_KERNEL)
1860	mtspr	SPRN_SRR0,r3
1861	mtspr	SPRN_SRR1,r4
1862	rfid
1863	b	.	/* prevent speculative execution */
1864#endif /* CONFIG_PPC_MULTIPLATFORM */
1865
1866	/* This is where all platforms converge execution */
1867_STATIC(start_here_common)
1868	/* relocation is on at this point */
1869
1870	/* The following code sets up the SP and TOC now that we are */
1871	/* running with translation enabled. */
1872
1873	LOADADDR(r3,init_thread_union)
1874
1875	/* set up the stack */
1876	addi	r1,r3,THREAD_SIZE
1877	li	r0,0
1878	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1879
1880	/* Apply the CPUs-specific fixups (nop out sections not relevant
1881	 * to this CPU
1882	 */
1883	li	r3,0
1884	bl	.do_cpu_ftr_fixups
1885
1886	LOADADDR(r26, boot_cpuid)
1887	lwz	r26,0(r26)
1888
1889	LOADADDR(r24, paca) 		/* Get base vaddr of paca array  */
1890	mulli	r13,r26,PACA_SIZE	/* Calculate vaddr of right paca */
1891	add	r13,r13,r24		/* for this processor.		 */
1892	mtspr	SPRN_SPRG3,r13
1893
1894	/* ptr to current */
1895	LOADADDR(r4,init_task)
1896	std	r4,PACACURRENT(r13)
1897
1898	/* Load the TOC */
1899	ld	r2,PACATOC(r13)
1900	std	r1,PACAKSAVE(r13)
1901
1902	bl	.setup_system
1903
1904	/* Load up the kernel context */
19055:
1906#ifdef DO_SOFT_DISABLE
1907	li	r5,0
1908	stb	r5,PACAPROCENABLED(r13)	/* Soft Disabled */
1909	mfmsr	r5
1910	ori	r5,r5,MSR_EE		/* Hard Enabled */
1911	mtmsrd	r5
1912#endif
1913
1914	bl .start_kernel
1915
1916_GLOBAL(hmt_init)
1917#ifdef CONFIG_HMT
1918	LOADADDR(r5, hmt_thread_data)
1919	mfspr	r7,SPRN_PVR
1920	srwi	r7,r7,16
1921	cmpwi	r7,0x34			/* Pulsar  */
1922	beq	90f
1923	cmpwi	r7,0x36			/* Icestar */
1924	beq	91f
1925	cmpwi	r7,0x37			/* SStar   */
1926	beq	91f
1927	b	101f
192890:	mfspr	r6,SPRN_PIR
1929	andi.	r6,r6,0x1f
1930	b	92f
193191:	mfspr	r6,SPRN_PIR
1932	andi.	r6,r6,0x3ff
193392:	sldi	r4,r24,3
1934	stwx	r6,r5,r4
1935	bl	.hmt_start_secondary
1936	b	101f
1937
1938__hmt_secondary_hold:
1939	LOADADDR(r5, hmt_thread_data)
1940	clrldi	r5,r5,4
1941	li	r7,0
1942	mfspr	r6,SPRN_PIR
1943	mfspr	r8,SPRN_PVR
1944	srwi	r8,r8,16
1945	cmpwi	r8,0x34
1946	bne	93f
1947	andi.	r6,r6,0x1f
1948	b	103f
194993:	andi.	r6,r6,0x3f
1950
1951103:	lwzx	r8,r5,r7
1952	cmpw	r8,r6
1953	beq	104f
1954	addi	r7,r7,8
1955	b	103b
1956
1957104:	addi	r7,r7,4
1958	lwzx	r9,r5,r7
1959	mr	r24,r9
1960101:
1961#endif
1962	mr	r3,r24
1963	b	.pSeries_secondary_smp_init
1964
1965#ifdef CONFIG_HMT
1966_GLOBAL(hmt_start_secondary)
1967	LOADADDR(r4,__hmt_secondary_hold)
1968	clrldi	r4,r4,4
1969	mtspr	SPRN_NIADORM, r4
1970	mfspr	r4, SPRN_MSRDORM
1971	li	r5, -65
1972	and	r4, r4, r5
1973	mtspr	SPRN_MSRDORM, r4
1974	lis	r4,0xffef
1975	ori	r4,r4,0x7403
1976	mtspr	SPRN_TSC, r4
1977	li	r4,0x1f4
1978	mtspr	SPRN_TST, r4
1979	mfspr	r4, SPRN_HID0
1980	ori	r4, r4, 0x1
1981	mtspr	SPRN_HID0, r4
1982	mfspr	r4, SPRN_CTRLF
1983	oris	r4, r4, 0x40
1984	mtspr	SPRN_CTRLT, r4
1985	blr
1986#endif
1987
1988/*
1989 * We put a few things here that have to be page-aligned.
1990 * This stuff goes at the beginning of the bss, which is page-aligned.
1991 */
1992	.section ".bss"
1993
1994	.align	PAGE_SHIFT
1995
1996	.globl	empty_zero_page
1997empty_zero_page:
1998	.space	PAGE_SIZE
1999
2000	.globl	swapper_pg_dir
2001swapper_pg_dir:
2002	.space	PAGE_SIZE
2003
2004/*
2005 * This space gets a copy of optional info passed to us by the bootstrap
2006 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2007 */
2008	.globl	cmd_line
2009cmd_line:
2010	.space	COMMAND_LINE_SIZE
2011