xref: /linux/arch/powerpc/kernel/head_64.S (revision 87c2ce3b9305b9b723faeedf6e32ef703ec9b33a)
1/*
2 *  arch/ppc64/kernel/head.S
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 *  Adapted for Power Macintosh by Paul Mackerras.
10 *  Low-level exception handlers and MMU support
11 *  rewritten by Paul Mackerras.
12 *    Copyright (C) 1996 Paul Mackerras.
13 *
14 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 *  This file contains the low-level support and setup for the
18 *  PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 *  This program is free software; you can redistribute it and/or
21 *  modify it under the terms of the GNU General Public License
22 *  as published by the Free Software Foundation; either version
23 *  2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/reg.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/bug.h>
34#include <asm/cputable.h>
35#include <asm/setup.h>
36#include <asm/hvcall.h>
37#include <asm/iseries/lpar_map.h>
38#include <asm/thread_info.h>
39
40#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE
42#endif
43
44/*
45 * We layout physical memory as follows:
46 * 0x0000 - 0x00ff : Secondary processor spin code
47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
50 * 0x7000 - 0x7fff : FWNMI data area
51 * 0x8000 -        : Early init and support code
52 */
53
54/*
55 *   SPRG Usage
56 *
57 *   Register	Definition
58 *
59 *   SPRG0	reserved for hypervisor
60 *   SPRG1	temp - used to save gpr
61 *   SPRG2	temp - used to save gpr
62 *   SPRG3	virt addr of paca
63 */
64
65/*
66 * Entering into this code we make the following assumptions:
67 *  For pSeries:
68 *   1. The MMU is off & open firmware is running in real mode.
69 *   2. The kernel is entered at __start
70 *
71 *  For iSeries:
72 *   1. The MMU is on (as it always is for iSeries)
73 *   2. The kernel is entered at system_reset_iSeries
74 */
75
76	.text
77	.globl  _stext
78_stext:
79#ifdef CONFIG_PPC_MULTIPLATFORM
80_GLOBAL(__start)
81	/* NOP this out unconditionally */
82BEGIN_FTR_SECTION
83	b	.__start_initialization_multiplatform
84END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */
86
87	/* Catch branch to 0 in real mode */
88	trap
89
90#ifdef CONFIG_PPC_ISERIES
91	/*
92	 * At offset 0x20, there is a pointer to iSeries LPAR data.
93	 * This is required by the hypervisor
94	 */
95	. = 0x20
96	.llong hvReleaseData-KERNELBASE
97
98	/*
99	 * At offset 0x28 and 0x30 are offsets to the mschunks_map
100	 * array (used by the iSeries LPAR debugger to do translation
101	 * between physical addresses and absolute addresses) and
102	 * to the pidhash table (also used by the debugger)
103	 */
104	.llong mschunks_map-KERNELBASE
105	.llong 0	/* pidhash-KERNELBASE SFRXXX */
106
107	/* Offset 0x38 - Pointer to start of embedded System.map */
108	.globl	embedded_sysmap_start
109embedded_sysmap_start:
110	.llong	0
111	/* Offset 0x40 - Pointer to end of embedded System.map */
112	.globl	embedded_sysmap_end
113embedded_sysmap_end:
114	.llong	0
115
116#endif /* CONFIG_PPC_ISERIES */
117
118	/* Secondary processors spin on this value until it goes to 1. */
119	.globl  __secondary_hold_spinloop
120__secondary_hold_spinloop:
121	.llong	0x0
122
123	/* Secondary processors write this value with their cpu # */
124	/* after they enter the spin loop immediately below.	  */
125	.globl	__secondary_hold_acknowledge
126__secondary_hold_acknowledge:
127	.llong	0x0
128
129	. = 0x60
130/*
131 * The following code is used on pSeries to hold secondary processors
132 * in a spin loop after they have been freed from OpenFirmware, but
133 * before the bulk of the kernel has been relocated.  This code
134 * is relocated to physical address 0x60 before prom_init is run.
135 * All of it must fit below the first exception vector at 0x100.
136 */
137_GLOBAL(__secondary_hold)
138	mfmsr	r24
139	ori	r24,r24,MSR_RI
140	mtmsrd	r24			/* RI on */
141
142	/* Grab our linux cpu number */
143	mr	r24,r3
144
145	/* Tell the master cpu we're here */
146	/* Relocation is off & we are located at an address less */
147	/* than 0x100, so only need to grab low order offset.    */
148	std	r24,__secondary_hold_acknowledge@l(0)
149	sync
150
151	/* All secondary cpus wait here until told to start. */
152100:	ld	r4,__secondary_hold_spinloop@l(0)
153	cmpdi	0,r4,1
154	bne	100b
155
156#ifdef CONFIG_HMT
157	LOADADDR(r4, .hmt_init)
158	mtctr	r4
159	bctr
160#else
161#ifdef CONFIG_SMP
162	LOADADDR(r4, .pSeries_secondary_smp_init)
163	mtctr	r4
164	mr	r3,r24
165	bctr
166#else
167	BUG_OPCODE
168#endif
169#endif
170
171/* This value is used to mark exception frames on the stack. */
172	.section ".toc","aw"
173exception_marker:
174	.tc	ID_72656773_68657265[TC],0x7265677368657265
175	.text
176
177/*
178 * The following macros define the code that appears as
179 * the prologue to each of the exception handlers.  They
180 * are split into two parts to allow a single kernel binary
181 * to be used for pSeries and iSeries.
182 * LOL.  One day... - paulus
183 */
184
185/*
186 * We make as much of the exception code common between native
187 * exception handlers (including pSeries LPAR) and iSeries LPAR
188 * implementations as possible.
189 */
190
191/*
192 * This is the start of the interrupt handlers for pSeries
193 * This code runs with relocation off.
194 */
195#define EX_R9		0
196#define EX_R10		8
197#define EX_R11		16
198#define EX_R12		24
199#define EX_R13		32
200#define EX_SRR0		40
201#define EX_DAR		48
202#define EX_DSISR	56
203#define EX_CCR		60
204#define EX_R3		64
205#define EX_LR		72
206
207/*
208 * We're short on space and time in the exception prolog, so we can't use
209 * the normal LOADADDR macro. Normally we just need the low halfword of the
210 * address, but for Kdump we need the whole low word.
211 */
212#ifdef CONFIG_CRASH_DUMP
213#define LOAD_HANDLER(reg, label)					\
214	oris	reg,reg,(label)@h;	/* virt addr of handler ... */	\
215	ori	reg,reg,(label)@l;	/* .. and the rest */
216#else
217#define LOAD_HANDLER(reg, label)					\
218	ori	reg,reg,(label)@l;	/* virt addr of handler ... */
219#endif
220
221#define EXCEPTION_PROLOG_PSERIES(area, label)				\
222	mfspr	r13,SPRN_SPRG3;		/* get paca address into r13 */	\
223	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
224	std	r10,area+EX_R10(r13);					\
225	std	r11,area+EX_R11(r13);					\
226	std	r12,area+EX_R12(r13);					\
227	mfspr	r9,SPRN_SPRG1;						\
228	std	r9,area+EX_R13(r13);					\
229	mfcr	r9;							\
230	clrrdi	r12,r13,32;		/* get high part of &label */	\
231	mfmsr	r10;							\
232	mfspr	r11,SPRN_SRR0;		/* save SRR0 */			\
233	LOAD_HANDLER(r12,label)						\
234	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI;				\
235	mtspr	SPRN_SRR0,r12;						\
236	mfspr	r12,SPRN_SRR1;		/* and SRR1 */			\
237	mtspr	SPRN_SRR1,r10;						\
238	rfid;								\
239	b	.	/* prevent speculative execution */
240
241/*
242 * This is the start of the interrupt handlers for iSeries
243 * This code runs with relocation on.
244 */
245#define EXCEPTION_PROLOG_ISERIES_1(area)				\
246	mfspr	r13,SPRN_SPRG3;		/* get paca address into r13 */	\
247	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
248	std	r10,area+EX_R10(r13);					\
249	std	r11,area+EX_R11(r13);					\
250	std	r12,area+EX_R12(r13);					\
251	mfspr	r9,SPRN_SPRG1;						\
252	std	r9,area+EX_R13(r13);					\
253	mfcr	r9
254
255#define EXCEPTION_PROLOG_ISERIES_2					\
256	mfmsr	r10;							\
257	ld	r11,PACALPPACA+LPPACASRR0(r13);				\
258	ld	r12,PACALPPACA+LPPACASRR1(r13);				\
259	ori	r10,r10,MSR_RI;						\
260	mtmsrd	r10,1
261
262/*
263 * The common exception prolog is used for all except a few exceptions
264 * such as a segment miss on a kernel address.  We have to be prepared
265 * to take another exception from the point where we first touch the
266 * kernel stack onwards.
267 *
268 * On entry r13 points to the paca, r9-r13 are saved in the paca,
269 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
270 * SRR1, and relocation is on.
271 */
272#define EXCEPTION_PROLOG_COMMON(n, area)				   \
273	andi.	r10,r12,MSR_PR;		/* See if coming from user	*/ \
274	mr	r10,r1;			/* Save r1			*/ \
275	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
276	beq-	1f;							   \
277	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
2781:	cmpdi	cr1,r1,0;		/* check if r1 is in userspace	*/ \
279	bge-	cr1,bad_stack;		/* abort if it is		*/ \
280	std	r9,_CCR(r1);		/* save CR in stackframe	*/ \
281	std	r11,_NIP(r1);		/* save SRR0 in stackframe	*/ \
282	std	r12,_MSR(r1);		/* save SRR1 in stackframe	*/ \
283	std	r10,0(r1);		/* make stack chain pointer	*/ \
284	std	r0,GPR0(r1);		/* save r0 in stackframe	*/ \
285	std	r10,GPR1(r1);		/* save r1 in stackframe	*/ \
286	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \
287	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe	*/ \
288	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \
289	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \
290	ld	r10,area+EX_R10(r13);					   \
291	std	r9,GPR9(r1);						   \
292	std	r10,GPR10(r1);						   \
293	ld	r9,area+EX_R11(r13);	/* move r11 - r13 to stackframe	*/ \
294	ld	r10,area+EX_R12(r13);					   \
295	ld	r11,area+EX_R13(r13);					   \
296	std	r9,GPR11(r1);						   \
297	std	r10,GPR12(r1);						   \
298	std	r11,GPR13(r1);						   \
299	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \
300	mflr	r9;			/* save LR in stackframe	*/ \
301	std	r9,_LINK(r1);						   \
302	mfctr	r10;			/* save CTR in stackframe	*/ \
303	std	r10,_CTR(r1);						   \
304	mfspr	r11,SPRN_XER;		/* save XER in stackframe	*/ \
305	std	r11,_XER(r1);						   \
306	li	r9,(n)+1;						   \
307	std	r9,_TRAP(r1);		/* set trap number		*/ \
308	li	r10,0;							   \
309	ld	r11,exception_marker@toc(r2);				   \
310	std	r10,RESULT(r1);		/* clear regs->result		*/ \
311	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/
312
313/*
314 * Exception vectors.
315 */
316#define STD_EXCEPTION_PSERIES(n, label)			\
317	. = n;						\
318	.globl label##_pSeries;				\
319label##_pSeries:					\
320	HMT_MEDIUM;					\
321	mtspr	SPRN_SPRG1,r13;		/* save r13 */	\
322	RUNLATCH_ON(r13);				\
323	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
324
325#define STD_EXCEPTION_ISERIES(n, label, area)		\
326	.globl label##_iSeries;				\
327label##_iSeries:					\
328	HMT_MEDIUM;					\
329	mtspr	SPRN_SPRG1,r13;		/* save r13 */	\
330	RUNLATCH_ON(r13);				\
331	EXCEPTION_PROLOG_ISERIES_1(area);		\
332	EXCEPTION_PROLOG_ISERIES_2;			\
333	b	label##_common
334
335#define MASKABLE_EXCEPTION_ISERIES(n, label)				\
336	.globl label##_iSeries;						\
337label##_iSeries:							\
338	HMT_MEDIUM;							\
339	mtspr	SPRN_SPRG1,r13;		/* save r13 */			\
340	RUNLATCH_ON(r13);						\
341	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);				\
342	lbz	r10,PACAPROCENABLED(r13);				\
343	cmpwi	0,r10,0;						\
344	beq-	label##_iSeries_masked;					\
345	EXCEPTION_PROLOG_ISERIES_2;					\
346	b	label##_common;						\
347
348#ifdef DO_SOFT_DISABLE
349#define DISABLE_INTS				\
350	lbz	r10,PACAPROCENABLED(r13);	\
351	li	r11,0;				\
352	std	r10,SOFTE(r1);			\
353	mfmsr	r10;				\
354	stb	r11,PACAPROCENABLED(r13);	\
355	ori	r10,r10,MSR_EE;			\
356	mtmsrd	r10,1
357
358#define ENABLE_INTS				\
359	lbz	r10,PACAPROCENABLED(r13);	\
360	mfmsr	r11;				\
361	std	r10,SOFTE(r1);			\
362	ori	r11,r11,MSR_EE;			\
363	mtmsrd	r11,1
364
365#else	/* hard enable/disable interrupts */
366#define DISABLE_INTS
367
368#define ENABLE_INTS				\
369	ld	r12,_MSR(r1);			\
370	mfmsr	r11;				\
371	rlwimi	r11,r12,0,MSR_EE;		\
372	mtmsrd	r11,1
373
374#endif
375
376#define STD_EXCEPTION_COMMON(trap, label, hdlr)		\
377	.align	7;					\
378	.globl label##_common;				\
379label##_common:						\
380	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
381	DISABLE_INTS;					\
382	bl	.save_nvgprs;				\
383	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
384	bl	hdlr;					\
385	b	.ret_from_except
386
387#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)	\
388	.align	7;					\
389	.globl label##_common;				\
390label##_common:						\
391	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
392	DISABLE_INTS;					\
393	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
394	bl	hdlr;					\
395	b	.ret_from_except_lite
396
397/*
398 * Start of pSeries system interrupt routines
399 */
400	. = 0x100
401	.globl __start_interrupts
402__start_interrupts:
403
404	STD_EXCEPTION_PSERIES(0x100, system_reset)
405
406	. = 0x200
407_machine_check_pSeries:
408	HMT_MEDIUM
409	mtspr	SPRN_SPRG1,r13		/* save r13 */
410	RUNLATCH_ON(r13)
411	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
412
413	. = 0x300
414	.globl data_access_pSeries
415data_access_pSeries:
416	HMT_MEDIUM
417	mtspr	SPRN_SPRG1,r13
418BEGIN_FTR_SECTION
419	mtspr	SPRN_SPRG2,r12
420	mfspr	r13,SPRN_DAR
421	mfspr	r12,SPRN_DSISR
422	srdi	r13,r13,60
423	rlwimi	r13,r12,16,0x20
424	mfcr	r12
425	cmpwi	r13,0x2c
426	beq	.do_stab_bolted_pSeries
427	mtcrf	0x80,r12
428	mfspr	r12,SPRN_SPRG2
429END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
430	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
431
432	. = 0x380
433	.globl data_access_slb_pSeries
434data_access_slb_pSeries:
435	HMT_MEDIUM
436	mtspr	SPRN_SPRG1,r13
437	RUNLATCH_ON(r13)
438	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
439	std	r3,PACA_EXSLB+EX_R3(r13)
440	mfspr	r3,SPRN_DAR
441	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
442	mfcr	r9
443#ifdef __DISABLED__
444	/* Keep that around for when we re-implement dynamic VSIDs */
445	cmpdi	r3,0
446	bge	slb_miss_user_pseries
447#endif /* __DISABLED__ */
448	std	r10,PACA_EXSLB+EX_R10(r13)
449	std	r11,PACA_EXSLB+EX_R11(r13)
450	std	r12,PACA_EXSLB+EX_R12(r13)
451	mfspr	r10,SPRN_SPRG1
452	std	r10,PACA_EXSLB+EX_R13(r13)
453	mfspr	r12,SPRN_SRR1		/* and SRR1 */
454	b	.slb_miss_realmode	/* Rel. branch works in real mode */
455
456	STD_EXCEPTION_PSERIES(0x400, instruction_access)
457
458	. = 0x480
459	.globl instruction_access_slb_pSeries
460instruction_access_slb_pSeries:
461	HMT_MEDIUM
462	mtspr	SPRN_SPRG1,r13
463	RUNLATCH_ON(r13)
464	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
465	std	r3,PACA_EXSLB+EX_R3(r13)
466	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
467	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
468	mfcr	r9
469#ifdef __DISABLED__
470	/* Keep that around for when we re-implement dynamic VSIDs */
471	cmpdi	r3,0
472	bge	slb_miss_user_pseries
473#endif /* __DISABLED__ */
474	std	r10,PACA_EXSLB+EX_R10(r13)
475	std	r11,PACA_EXSLB+EX_R11(r13)
476	std	r12,PACA_EXSLB+EX_R12(r13)
477	mfspr	r10,SPRN_SPRG1
478	std	r10,PACA_EXSLB+EX_R13(r13)
479	mfspr	r12,SPRN_SRR1		/* and SRR1 */
480	b	.slb_miss_realmode	/* Rel. branch works in real mode */
481
482	STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
483	STD_EXCEPTION_PSERIES(0x600, alignment)
484	STD_EXCEPTION_PSERIES(0x700, program_check)
485	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
486	STD_EXCEPTION_PSERIES(0x900, decrementer)
487	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
488	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
489
490	. = 0xc00
491	.globl	system_call_pSeries
492system_call_pSeries:
493	HMT_MEDIUM
494	RUNLATCH_ON(r9)
495	mr	r9,r13
496	mfmsr	r10
497	mfspr	r13,SPRN_SPRG3
498	mfspr	r11,SPRN_SRR0
499	clrrdi	r12,r13,32
500	oris	r12,r12,system_call_common@h
501	ori	r12,r12,system_call_common@l
502	mtspr	SPRN_SRR0,r12
503	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
504	mfspr	r12,SPRN_SRR1
505	mtspr	SPRN_SRR1,r10
506	rfid
507	b	.	/* prevent speculative execution */
508
509	STD_EXCEPTION_PSERIES(0xd00, single_step)
510	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
511
512	/* We need to deal with the Altivec unavailable exception
513	 * here which is at 0xf20, thus in the middle of the
514	 * prolog code of the PerformanceMonitor one. A little
515	 * trickery is thus necessary
516	 */
517	. = 0xf00
518	b	performance_monitor_pSeries
519
520	STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
521
522	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
523	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
524
525	. = 0x3000
526
527/*** pSeries interrupt support ***/
528
529	/* moved from 0xf00 */
530	STD_EXCEPTION_PSERIES(., performance_monitor)
531
532	.align	7
533_GLOBAL(do_stab_bolted_pSeries)
534	mtcrf	0x80,r12
535	mfspr	r12,SPRN_SPRG2
536	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
537
538/*
539 * We have some room here  we use that to put
540 * the peries slb miss user trampoline code so it's reasonably
541 * away from slb_miss_user_common to avoid problems with rfid
542 *
543 * This is used for when the SLB miss handler has to go virtual,
544 * which doesn't happen for now anymore but will once we re-implement
545 * dynamic VSIDs for shared page tables
546 */
547#ifdef __DISABLED__
548slb_miss_user_pseries:
549	std	r10,PACA_EXGEN+EX_R10(r13)
550	std	r11,PACA_EXGEN+EX_R11(r13)
551	std	r12,PACA_EXGEN+EX_R12(r13)
552	mfspr	r10,SPRG1
553	ld	r11,PACA_EXSLB+EX_R9(r13)
554	ld	r12,PACA_EXSLB+EX_R3(r13)
555	std	r10,PACA_EXGEN+EX_R13(r13)
556	std	r11,PACA_EXGEN+EX_R9(r13)
557	std	r12,PACA_EXGEN+EX_R3(r13)
558	clrrdi	r12,r13,32
559	mfmsr	r10
560	mfspr	r11,SRR0			/* save SRR0 */
561	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
562	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
563	mtspr	SRR0,r12
564	mfspr	r12,SRR1			/* and SRR1 */
565	mtspr	SRR1,r10
566	rfid
567	b	.				/* prevent spec. execution */
568#endif /* __DISABLED__ */
569
570/*
571 * Vectors for the FWNMI option.  Share common code.
572 */
573	.globl system_reset_fwnmi
574      .align 7
575system_reset_fwnmi:
576	HMT_MEDIUM
577	mtspr	SPRN_SPRG1,r13		/* save r13 */
578	RUNLATCH_ON(r13)
579	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
580
581	.globl machine_check_fwnmi
582      .align 7
583machine_check_fwnmi:
584	HMT_MEDIUM
585	mtspr	SPRN_SPRG1,r13		/* save r13 */
586	RUNLATCH_ON(r13)
587	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
588
589#ifdef CONFIG_PPC_ISERIES
590/***  ISeries-LPAR interrupt handlers ***/
591
592	STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
593
594	.globl data_access_iSeries
595data_access_iSeries:
596	mtspr	SPRN_SPRG1,r13
597BEGIN_FTR_SECTION
598	mtspr	SPRN_SPRG2,r12
599	mfspr	r13,SPRN_DAR
600	mfspr	r12,SPRN_DSISR
601	srdi	r13,r13,60
602	rlwimi	r13,r12,16,0x20
603	mfcr	r12
604	cmpwi	r13,0x2c
605	beq	.do_stab_bolted_iSeries
606	mtcrf	0x80,r12
607	mfspr	r12,SPRN_SPRG2
608END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
609	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
610	EXCEPTION_PROLOG_ISERIES_2
611	b	data_access_common
612
613.do_stab_bolted_iSeries:
614	mtcrf	0x80,r12
615	mfspr	r12,SPRN_SPRG2
616	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
617	EXCEPTION_PROLOG_ISERIES_2
618	b	.do_stab_bolted
619
620	.globl	data_access_slb_iSeries
621data_access_slb_iSeries:
622	mtspr	SPRN_SPRG1,r13		/* save r13 */
623	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
624	std	r3,PACA_EXSLB+EX_R3(r13)
625	mfspr	r3,SPRN_DAR
626	std	r9,PACA_EXSLB+EX_R9(r13)
627	mfcr	r9
628#ifdef __DISABLED__
629	cmpdi	r3,0
630	bge	slb_miss_user_iseries
631#endif
632	std	r10,PACA_EXSLB+EX_R10(r13)
633	std	r11,PACA_EXSLB+EX_R11(r13)
634	std	r12,PACA_EXSLB+EX_R12(r13)
635	mfspr	r10,SPRN_SPRG1
636	std	r10,PACA_EXSLB+EX_R13(r13)
637	ld	r12,PACALPPACA+LPPACASRR1(r13);
638	b	.slb_miss_realmode
639
640	STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
641
642	.globl	instruction_access_slb_iSeries
643instruction_access_slb_iSeries:
644	mtspr	SPRN_SPRG1,r13		/* save r13 */
645	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
646	std	r3,PACA_EXSLB+EX_R3(r13)
647	ld	r3,PACALPPACA+LPPACASRR0(r13)	/* get SRR0 value */
648	std	r9,PACA_EXSLB+EX_R9(r13)
649	mfcr	r9
650#ifdef __DISABLED__
651	cmpdi	r3,0
652	bge	.slb_miss_user_iseries
653#endif
654	std	r10,PACA_EXSLB+EX_R10(r13)
655	std	r11,PACA_EXSLB+EX_R11(r13)
656	std	r12,PACA_EXSLB+EX_R12(r13)
657	mfspr	r10,SPRN_SPRG1
658	std	r10,PACA_EXSLB+EX_R13(r13)
659	ld	r12,PACALPPACA+LPPACASRR1(r13);
660	b	.slb_miss_realmode
661
662#ifdef __DISABLED__
663slb_miss_user_iseries:
664	std	r10,PACA_EXGEN+EX_R10(r13)
665	std	r11,PACA_EXGEN+EX_R11(r13)
666	std	r12,PACA_EXGEN+EX_R12(r13)
667	mfspr	r10,SPRG1
668	ld	r11,PACA_EXSLB+EX_R9(r13)
669	ld	r12,PACA_EXSLB+EX_R3(r13)
670	std	r10,PACA_EXGEN+EX_R13(r13)
671	std	r11,PACA_EXGEN+EX_R9(r13)
672	std	r12,PACA_EXGEN+EX_R3(r13)
673	EXCEPTION_PROLOG_ISERIES_2
674	b	slb_miss_user_common
675#endif
676
677	MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
678	STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
679	STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
680	STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
681	MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
682	STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
683	STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
684
685	.globl	system_call_iSeries
686system_call_iSeries:
687	mr	r9,r13
688	mfspr	r13,SPRN_SPRG3
689	EXCEPTION_PROLOG_ISERIES_2
690	b	system_call_common
691
692	STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
693	STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
694	STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
695
696	.globl system_reset_iSeries
697system_reset_iSeries:
698	mfspr	r13,SPRN_SPRG3		/* Get paca address */
699	mfmsr	r24
700	ori	r24,r24,MSR_RI
701	mtmsrd	r24			/* RI on */
702	lhz	r24,PACAPACAINDEX(r13)	/* Get processor # */
703	cmpwi	0,r24,0			/* Are we processor 0? */
704	beq	.__start_initialization_iSeries	/* Start up the first processor */
705	mfspr	r4,SPRN_CTRLF
706	li	r5,CTRL_RUNLATCH	/* Turn off the run light */
707	andc	r4,r4,r5
708	mtspr	SPRN_CTRLT,r4
709
7101:
711	HMT_LOW
712#ifdef CONFIG_SMP
713	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor
714					 * should start */
715	sync
716	LOADADDR(r3,current_set)
717	sldi	r28,r24,3		/* get current_set[cpu#] */
718	ldx	r3,r3,r28
719	addi	r1,r3,THREAD_SIZE
720	subi	r1,r1,STACK_FRAME_OVERHEAD
721
722	cmpwi	0,r23,0
723	beq	iSeries_secondary_smp_loop	/* Loop until told to go */
724	bne	.__secondary_start		/* Loop until told to go */
725iSeries_secondary_smp_loop:
726	/* Let the Hypervisor know we are alive */
727	/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
728	lis	r3,0x8002
729	rldicr	r3,r3,32,15		/* r0 = (r3 << 32) & 0xffff000000000000 */
730#else /* CONFIG_SMP */
731	/* Yield the processor.  This is required for non-SMP kernels
732		which are running on multi-threaded machines. */
733	lis	r3,0x8000
734	rldicr	r3,r3,32,15		/* r3 = (r3 << 32) & 0xffff000000000000 */
735	addi	r3,r3,18		/* r3 = 0x8000000000000012 which is "yield" */
736	li	r4,0			/* "yield timed" */
737	li	r5,-1			/* "yield forever" */
738#endif /* CONFIG_SMP */
739	li	r0,-1			/* r0=-1 indicates a Hypervisor call */
740	sc				/* Invoke the hypervisor via a system call */
741	mfspr	r13,SPRN_SPRG3		/* Put r13 back ???? */
742	b	1b			/* If SMP not configured, secondaries
743					 * loop forever */
744
745	.globl decrementer_iSeries_masked
746decrementer_iSeries_masked:
747	li	r11,1
748	stb	r11,PACALPPACA+LPPACADECRINT(r13)
749	LOADBASE(r12,tb_ticks_per_jiffy)
750	lwz	r12,OFF(tb_ticks_per_jiffy)(r12)
751	mtspr	SPRN_DEC,r12
752	/* fall through */
753
754	.globl hardware_interrupt_iSeries_masked
755hardware_interrupt_iSeries_masked:
756	mtcrf	0x80,r9		/* Restore regs */
757	ld	r11,PACALPPACA+LPPACASRR0(r13)
758	ld	r12,PACALPPACA+LPPACASRR1(r13)
759	mtspr	SPRN_SRR0,r11
760	mtspr	SPRN_SRR1,r12
761	ld	r9,PACA_EXGEN+EX_R9(r13)
762	ld	r10,PACA_EXGEN+EX_R10(r13)
763	ld	r11,PACA_EXGEN+EX_R11(r13)
764	ld	r12,PACA_EXGEN+EX_R12(r13)
765	ld	r13,PACA_EXGEN+EX_R13(r13)
766	rfid
767	b	.	/* prevent speculative execution */
768#endif /* CONFIG_PPC_ISERIES */
769
770/*** Common interrupt handlers ***/
771
772	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
773
774	/*
775	 * Machine check is different because we use a different
776	 * save area: PACA_EXMC instead of PACA_EXGEN.
777	 */
778	.align	7
779	.globl machine_check_common
780machine_check_common:
781	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
782	DISABLE_INTS
783	bl	.save_nvgprs
784	addi	r3,r1,STACK_FRAME_OVERHEAD
785	bl	.machine_check_exception
786	b	.ret_from_except
787
788	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
789	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
790	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
791	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
792	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
793	STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
794	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
795#ifdef CONFIG_ALTIVEC
796	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
797#else
798	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
799#endif
800
801/*
802 * Here we have detected that the kernel stack pointer is bad.
803 * R9 contains the saved CR, r13 points to the paca,
804 * r10 contains the (bad) kernel stack pointer,
805 * r11 and r12 contain the saved SRR0 and SRR1.
806 * We switch to using an emergency stack, save the registers there,
807 * and call kernel_bad_stack(), which panics.
808 */
809bad_stack:
810	ld	r1,PACAEMERGSP(r13)
811	subi	r1,r1,64+INT_FRAME_SIZE
812	std	r9,_CCR(r1)
813	std	r10,GPR1(r1)
814	std	r11,_NIP(r1)
815	std	r12,_MSR(r1)
816	mfspr	r11,SPRN_DAR
817	mfspr	r12,SPRN_DSISR
818	std	r11,_DAR(r1)
819	std	r12,_DSISR(r1)
820	mflr	r10
821	mfctr	r11
822	mfxer	r12
823	std	r10,_LINK(r1)
824	std	r11,_CTR(r1)
825	std	r12,_XER(r1)
826	SAVE_GPR(0,r1)
827	SAVE_GPR(2,r1)
828	SAVE_4GPRS(3,r1)
829	SAVE_2GPRS(7,r1)
830	SAVE_10GPRS(12,r1)
831	SAVE_10GPRS(22,r1)
832	addi	r11,r1,INT_FRAME_SIZE
833	std	r11,0(r1)
834	li	r12,0
835	std	r12,0(r11)
836	ld	r2,PACATOC(r13)
8371:	addi	r3,r1,STACK_FRAME_OVERHEAD
838	bl	.kernel_bad_stack
839	b	1b
840
841/*
842 * Return from an exception with minimal checks.
843 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
844 * If interrupts have been enabled, or anything has been
845 * done that might have changed the scheduling status of
846 * any task or sent any task a signal, you should use
847 * ret_from_except or ret_from_except_lite instead of this.
848 */
849	.globl	fast_exception_return
850fast_exception_return:
851	ld	r12,_MSR(r1)
852	ld	r11,_NIP(r1)
853	andi.	r3,r12,MSR_RI		/* check if RI is set */
854	beq-	unrecov_fer
855	ld	r3,_CCR(r1)
856	ld	r4,_LINK(r1)
857	ld	r5,_CTR(r1)
858	ld	r6,_XER(r1)
859	mtcr	r3
860	mtlr	r4
861	mtctr	r5
862	mtxer	r6
863	REST_GPR(0, r1)
864	REST_8GPRS(2, r1)
865
866	mfmsr	r10
867	clrrdi	r10,r10,2		/* clear RI (LE is 0 already) */
868	mtmsrd	r10,1
869
870	mtspr	SPRN_SRR1,r12
871	mtspr	SPRN_SRR0,r11
872	REST_4GPRS(10, r1)
873	ld	r1,GPR1(r1)
874	rfid
875	b	.	/* prevent speculative execution */
876
877unrecov_fer:
878	bl	.save_nvgprs
8791:	addi	r3,r1,STACK_FRAME_OVERHEAD
880	bl	.unrecoverable_exception
881	b	1b
882
883/*
884 * Here r13 points to the paca, r9 contains the saved CR,
885 * SRR0 and SRR1 are saved in r11 and r12,
886 * r9 - r13 are saved in paca->exgen.
887 */
888	.align	7
889	.globl data_access_common
890data_access_common:
891	RUNLATCH_ON(r10)		/* It wont fit in the 0x300 handler */
892	mfspr	r10,SPRN_DAR
893	std	r10,PACA_EXGEN+EX_DAR(r13)
894	mfspr	r10,SPRN_DSISR
895	stw	r10,PACA_EXGEN+EX_DSISR(r13)
896	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
897	ld	r3,PACA_EXGEN+EX_DAR(r13)
898	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
899	li	r5,0x300
900	b	.do_hash_page	 	/* Try to handle as hpte fault */
901
902	.align	7
903	.globl instruction_access_common
904instruction_access_common:
905	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
906	ld	r3,_NIP(r1)
907	andis.	r4,r12,0x5820
908	li	r5,0x400
909	b	.do_hash_page		/* Try to handle as hpte fault */
910
911/*
912 * Here is the common SLB miss user that is used when going to virtual
913 * mode for SLB misses, that is currently not used
914 */
915#ifdef __DISABLED__
916	.align	7
917	.globl	slb_miss_user_common
918slb_miss_user_common:
919	mflr	r10
920	std	r3,PACA_EXGEN+EX_DAR(r13)
921	stw	r9,PACA_EXGEN+EX_CCR(r13)
922	std	r10,PACA_EXGEN+EX_LR(r13)
923	std	r11,PACA_EXGEN+EX_SRR0(r13)
924	bl	.slb_allocate_user
925
926	ld	r10,PACA_EXGEN+EX_LR(r13)
927	ld	r3,PACA_EXGEN+EX_R3(r13)
928	lwz	r9,PACA_EXGEN+EX_CCR(r13)
929	ld	r11,PACA_EXGEN+EX_SRR0(r13)
930	mtlr	r10
931	beq-	slb_miss_fault
932
933	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
934	beq-	unrecov_user_slb
935	mfmsr	r10
936
937.machine push
938.machine "power4"
939	mtcrf	0x80,r9
940.machine pop
941
942	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
943	mtmsrd	r10,1
944
945	mtspr	SRR0,r11
946	mtspr	SRR1,r12
947
948	ld	r9,PACA_EXGEN+EX_R9(r13)
949	ld	r10,PACA_EXGEN+EX_R10(r13)
950	ld	r11,PACA_EXGEN+EX_R11(r13)
951	ld	r12,PACA_EXGEN+EX_R12(r13)
952	ld	r13,PACA_EXGEN+EX_R13(r13)
953	rfid
954	b	.
955
956slb_miss_fault:
957	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
958	ld	r4,PACA_EXGEN+EX_DAR(r13)
959	li	r5,0
960	std	r4,_DAR(r1)
961	std	r5,_DSISR(r1)
962	b	.handle_page_fault
963
964unrecov_user_slb:
965	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
966	DISABLE_INTS
967	bl	.save_nvgprs
9681:	addi	r3,r1,STACK_FRAME_OVERHEAD
969	bl	.unrecoverable_exception
970	b	1b
971
972#endif /* __DISABLED__ */
973
974
975/*
976 * r13 points to the PACA, r9 contains the saved CR,
977 * r12 contain the saved SRR1, SRR0 is still ready for return
978 * r3 has the faulting address
979 * r9 - r13 are saved in paca->exslb.
980 * r3 is saved in paca->slb_r3
981 * We assume we aren't going to take any exceptions during this procedure.
982 */
983_GLOBAL(slb_miss_realmode)
984	mflr	r10
985
986	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
987	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
988
989	bl	.slb_allocate_realmode
990
991	/* All done -- return from exception. */
992
993	ld	r10,PACA_EXSLB+EX_LR(r13)
994	ld	r3,PACA_EXSLB+EX_R3(r13)
995	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
996#ifdef CONFIG_PPC_ISERIES
997	ld	r11,PACALPPACA+LPPACASRR0(r13)	/* get SRR0 value */
998#endif /* CONFIG_PPC_ISERIES */
999
1000	mtlr	r10
1001
1002	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1003	beq-	unrecov_slb
1004
1005.machine	push
1006.machine	"power4"
1007	mtcrf	0x80,r9
1008	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1009.machine	pop
1010
1011#ifdef CONFIG_PPC_ISERIES
1012	mtspr	SPRN_SRR0,r11
1013	mtspr	SPRN_SRR1,r12
1014#endif /* CONFIG_PPC_ISERIES */
1015	ld	r9,PACA_EXSLB+EX_R9(r13)
1016	ld	r10,PACA_EXSLB+EX_R10(r13)
1017	ld	r11,PACA_EXSLB+EX_R11(r13)
1018	ld	r12,PACA_EXSLB+EX_R12(r13)
1019	ld	r13,PACA_EXSLB+EX_R13(r13)
1020	rfid
1021	b	.	/* prevent speculative execution */
1022
1023unrecov_slb:
1024	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1025	DISABLE_INTS
1026	bl	.save_nvgprs
10271:	addi	r3,r1,STACK_FRAME_OVERHEAD
1028	bl	.unrecoverable_exception
1029	b	1b
1030
1031	.align	7
1032	.globl hardware_interrupt_common
1033	.globl hardware_interrupt_entry
1034hardware_interrupt_common:
1035	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1036hardware_interrupt_entry:
1037	DISABLE_INTS
1038	addi	r3,r1,STACK_FRAME_OVERHEAD
1039	bl	.do_IRQ
1040	b	.ret_from_except_lite
1041
1042	.align	7
1043	.globl alignment_common
1044alignment_common:
1045	mfspr	r10,SPRN_DAR
1046	std	r10,PACA_EXGEN+EX_DAR(r13)
1047	mfspr	r10,SPRN_DSISR
1048	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1049	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1050	ld	r3,PACA_EXGEN+EX_DAR(r13)
1051	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1052	std	r3,_DAR(r1)
1053	std	r4,_DSISR(r1)
1054	bl	.save_nvgprs
1055	addi	r3,r1,STACK_FRAME_OVERHEAD
1056	ENABLE_INTS
1057	bl	.alignment_exception
1058	b	.ret_from_except
1059
1060	.align	7
1061	.globl program_check_common
1062program_check_common:
1063	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1064	bl	.save_nvgprs
1065	addi	r3,r1,STACK_FRAME_OVERHEAD
1066	ENABLE_INTS
1067	bl	.program_check_exception
1068	b	.ret_from_except
1069
1070	.align	7
1071	.globl fp_unavailable_common
1072fp_unavailable_common:
1073	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1074	bne	.load_up_fpu		/* if from user, just load it up */
1075	bl	.save_nvgprs
1076	addi	r3,r1,STACK_FRAME_OVERHEAD
1077	ENABLE_INTS
1078	bl	.kernel_fp_unavailable_exception
1079	BUG_OPCODE
1080
1081	.align	7
1082	.globl altivec_unavailable_common
1083altivec_unavailable_common:
1084	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1085#ifdef CONFIG_ALTIVEC
1086BEGIN_FTR_SECTION
1087	bne	.load_up_altivec	/* if from user, just load it up */
1088END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1089#endif
1090	bl	.save_nvgprs
1091	addi	r3,r1,STACK_FRAME_OVERHEAD
1092	ENABLE_INTS
1093	bl	.altivec_unavailable_exception
1094	b	.ret_from_except
1095
1096#ifdef CONFIG_ALTIVEC
1097/*
1098 * load_up_altivec(unused, unused, tsk)
1099 * Disable VMX for the task which had it previously,
1100 * and save its vector registers in its thread_struct.
1101 * Enables the VMX for use in the kernel on return.
1102 * On SMP we know the VMX is free, since we give it up every
1103 * switch (ie, no lazy save of the vector registers).
1104 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1105 */
1106_STATIC(load_up_altivec)
1107	mfmsr	r5			/* grab the current MSR */
1108	oris	r5,r5,MSR_VEC@h
1109	mtmsrd	r5			/* enable use of VMX now */
1110	isync
1111
1112/*
1113 * For SMP, we don't do lazy VMX switching because it just gets too
1114 * horrendously complex, especially when a task switches from one CPU
1115 * to another.  Instead we call giveup_altvec in switch_to.
1116 * VRSAVE isn't dealt with here, that is done in the normal context
1117 * switch code. Note that we could rely on vrsave value to eventually
1118 * avoid saving all of the VREGs here...
1119 */
1120#ifndef CONFIG_SMP
1121	ld	r3,last_task_used_altivec@got(r2)
1122	ld	r4,0(r3)
1123	cmpdi	0,r4,0
1124	beq	1f
1125	/* Save VMX state to last_task_used_altivec's THREAD struct */
1126	addi	r4,r4,THREAD
1127	SAVE_32VRS(0,r5,r4)
1128	mfvscr	vr0
1129	li	r10,THREAD_VSCR
1130	stvx	vr0,r10,r4
1131	/* Disable VMX for last_task_used_altivec */
1132	ld	r5,PT_REGS(r4)
1133	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1134	lis	r6,MSR_VEC@h
1135	andc	r4,r4,r6
1136	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
11371:
1138#endif /* CONFIG_SMP */
1139	/* Hack: if we get an altivec unavailable trap with VRSAVE
1140	 * set to all zeros, we assume this is a broken application
1141	 * that fails to set it properly, and thus we switch it to
1142	 * all 1's
1143	 */
1144	mfspr	r4,SPRN_VRSAVE
1145	cmpdi	0,r4,0
1146	bne+	1f
1147	li	r4,-1
1148	mtspr	SPRN_VRSAVE,r4
11491:
1150	/* enable use of VMX after return */
1151	ld	r4,PACACURRENT(r13)
1152	addi	r5,r4,THREAD		/* Get THREAD */
1153	oris	r12,r12,MSR_VEC@h
1154	std	r12,_MSR(r1)
1155	li	r4,1
1156	li	r10,THREAD_VSCR
1157	stw	r4,THREAD_USED_VR(r5)
1158	lvx	vr0,r10,r5
1159	mtvscr	vr0
1160	REST_32VRS(0,r4,r5)
1161#ifndef CONFIG_SMP
1162	/* Update last_task_used_math to 'current' */
1163	subi	r4,r5,THREAD		/* Back to 'current' */
1164	std	r4,0(r3)
1165#endif /* CONFIG_SMP */
1166	/* restore registers and return */
1167	b	fast_exception_return
1168#endif /* CONFIG_ALTIVEC */
1169
1170/*
1171 * Hash table stuff
1172 */
1173	.align	7
1174_GLOBAL(do_hash_page)
1175	std	r3,_DAR(r1)
1176	std	r4,_DSISR(r1)
1177
1178	andis.	r0,r4,0xa450		/* weird error? */
1179	bne-	.handle_page_fault	/* if not, try to insert a HPTE */
1180BEGIN_FTR_SECTION
1181	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
1182	bne-	.do_ste_alloc		/* If so handle it */
1183END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1184
1185	/*
1186	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1187	 * accessing a userspace segment (even from the kernel). We assume
1188	 * kernel addresses always have the high bit set.
1189	 */
1190	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1191	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1192	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1193	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1194	ori	r4,r4,1			/* add _PAGE_PRESENT */
1195	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1196
1197	/*
1198	 * On iSeries, we soft-disable interrupts here, then
1199	 * hard-enable interrupts so that the hash_page code can spin on
1200	 * the hash_table_lock without problems on a shared processor.
1201	 */
1202	DISABLE_INTS
1203
1204	/*
1205	 * r3 contains the faulting address
1206	 * r4 contains the required access permissions
1207	 * r5 contains the trap number
1208	 *
1209	 * at return r3 = 0 for success
1210	 */
1211	bl	.hash_page		/* build HPTE if possible */
1212	cmpdi	r3,0			/* see if hash_page succeeded */
1213
1214#ifdef DO_SOFT_DISABLE
1215	/*
1216	 * If we had interrupts soft-enabled at the point where the
1217	 * DSI/ISI occurred, and an interrupt came in during hash_page,
1218	 * handle it now.
1219	 * We jump to ret_from_except_lite rather than fast_exception_return
1220	 * because ret_from_except_lite will check for and handle pending
1221	 * interrupts if necessary.
1222	 */
1223	beq	.ret_from_except_lite
1224	/* For a hash failure, we don't bother re-enabling interrupts */
1225	ble-	12f
1226
1227	/*
1228	 * hash_page couldn't handle it, set soft interrupt enable back
1229	 * to what it was before the trap.  Note that .local_irq_restore
1230	 * handles any interrupts pending at this point.
1231	 */
1232	ld	r3,SOFTE(r1)
1233	bl	.local_irq_restore
1234	b	11f
1235#else
1236	beq	fast_exception_return   /* Return from exception on success */
1237	ble-	12f			/* Failure return from hash_page */
1238
1239	/* fall through */
1240#endif
1241
1242/* Here we have a page fault that hash_page can't handle. */
1243_GLOBAL(handle_page_fault)
1244	ENABLE_INTS
124511:	ld	r4,_DAR(r1)
1246	ld	r5,_DSISR(r1)
1247	addi	r3,r1,STACK_FRAME_OVERHEAD
1248	bl	.do_page_fault
1249	cmpdi	r3,0
1250	beq+	.ret_from_except_lite
1251	bl	.save_nvgprs
1252	mr	r5,r3
1253	addi	r3,r1,STACK_FRAME_OVERHEAD
1254	lwz	r4,_DAR(r1)
1255	bl	.bad_page_fault
1256	b	.ret_from_except
1257
1258/* We have a page fault that hash_page could handle but HV refused
1259 * the PTE insertion
1260 */
126112:	bl	.save_nvgprs
1262	addi	r3,r1,STACK_FRAME_OVERHEAD
1263	lwz	r4,_DAR(r1)
1264	bl	.low_hash_fault
1265	b	.ret_from_except
1266
1267	/* here we have a segment miss */
1268_GLOBAL(do_ste_alloc)
1269	bl	.ste_allocate		/* try to insert stab entry */
1270	cmpdi	r3,0
1271	beq+	fast_exception_return
1272	b	.handle_page_fault
1273
1274/*
1275 * r13 points to the PACA, r9 contains the saved CR,
1276 * r11 and r12 contain the saved SRR0 and SRR1.
1277 * r9 - r13 are saved in paca->exslb.
1278 * We assume we aren't going to take any exceptions during this procedure.
1279 * We assume (DAR >> 60) == 0xc.
1280 */
1281	.align	7
1282_GLOBAL(do_stab_bolted)
1283	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1284	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
1285
1286	/* Hash to the primary group */
1287	ld	r10,PACASTABVIRT(r13)
1288	mfspr	r11,SPRN_DAR
1289	srdi	r11,r11,28
1290	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
1291
1292	/* Calculate VSID */
1293	/* This is a kernel address, so protovsid = ESID */
1294	ASM_VSID_SCRAMBLE(r11, r9)
1295	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
1296
1297	/* Search the primary group for a free entry */
12981:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
1299	andi.	r11,r11,0x80
1300	beq	2f
1301	addi	r10,r10,16
1302	andi.	r11,r10,0x70
1303	bne	1b
1304
1305	/* Stick for only searching the primary group for now.		*/
1306	/* At least for now, we use a very simple random castout scheme */
1307	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
1308	mftb	r11
1309	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
1310	ori	r11,r11,0x10
1311
1312	/* r10 currently points to an ste one past the group of interest */
1313	/* make it point to the randomly selected entry			*/
1314	subi	r10,r10,128
1315	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
1316
1317	isync			/* mark the entry invalid		*/
1318	ld	r11,0(r10)
1319	rldicl	r11,r11,56,1	/* clear the valid bit */
1320	rotldi	r11,r11,8
1321	std	r11,0(r10)
1322	sync
1323
1324	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
1325	slbie	r11
1326
13272:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
1328	eieio
1329
1330	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
1331	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
1332	ori	r11,r11,0x90	/* Turn on valid and kp			*/
1333	std	r11,0(r10)	/* Put new entry back into the stab	*/
1334
1335	sync
1336
1337	/* All done -- return from exception. */
1338	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1339	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
1340
1341	andi.	r10,r12,MSR_RI
1342	beq-	unrecov_slb
1343
1344	mtcrf	0x80,r9			/* restore CR */
1345
1346	mfmsr	r10
1347	clrrdi	r10,r10,2
1348	mtmsrd	r10,1
1349
1350	mtspr	SPRN_SRR0,r11
1351	mtspr	SPRN_SRR1,r12
1352	ld	r9,PACA_EXSLB+EX_R9(r13)
1353	ld	r10,PACA_EXSLB+EX_R10(r13)
1354	ld	r11,PACA_EXSLB+EX_R11(r13)
1355	ld	r12,PACA_EXSLB+EX_R12(r13)
1356	ld	r13,PACA_EXSLB+EX_R13(r13)
1357	rfid
1358	b	.	/* prevent speculative execution */
1359
1360/*
1361 * Space for CPU0's segment table.
1362 *
1363 * On iSeries, the hypervisor must fill in at least one entry before
1364 * we get control (with relocate on).  The address is give to the hv
1365 * as a page number (see xLparMap in lpardata.c), so this must be at a
1366 * fixed address (the linker can't compute (u64)&initial_stab >>
1367 * PAGE_SHIFT).
1368 */
1369	. = STAB0_OFFSET	/* 0x6000 */
1370	.globl initial_stab
1371initial_stab:
1372	.space	4096
1373
1374/*
1375 * Data area reserved for FWNMI option.
1376 * This address (0x7000) is fixed by the RPA.
1377 */
1378	.= 0x7000
1379	.globl fwnmi_data_area
1380fwnmi_data_area:
1381
1382	/* iSeries does not use the FWNMI stuff, so it is safe to put
1383	 * this here, even if we later allow kernels that will boot on
1384	 * both pSeries and iSeries */
1385#ifdef CONFIG_PPC_ISERIES
1386        . = LPARMAP_PHYS
1387#include "lparmap.s"
1388/*
1389 * This ".text" is here for old compilers that generate a trailing
1390 * .note section when compiling .c files to .s
1391 */
1392	.text
1393#endif /* CONFIG_PPC_ISERIES */
1394
1395        . = 0x8000
1396
1397/*
1398 * On pSeries, secondary processors spin in the following code.
1399 * At entry, r3 = this processor's number (physical cpu id)
1400 */
1401_GLOBAL(pSeries_secondary_smp_init)
1402	mr	r24,r3
1403
1404	/* turn on 64-bit mode */
1405	bl	.enable_64b_mode
1406	isync
1407
1408	/* Copy some CPU settings from CPU 0 */
1409	bl	.__restore_cpu_setup
1410
1411	/* Set up a paca value for this processor. Since we have the
1412	 * physical cpu id in r24, we need to search the pacas to find
1413	 * which logical id maps to our physical one.
1414	 */
1415	LOADADDR(r13, paca) 		/* Get base vaddr of paca array	 */
1416	li	r5,0			/* logical cpu id                */
14171:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
1418	cmpw	r6,r24			/* Compare to our id             */
1419	beq	2f
1420	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
1421	addi	r5,r5,1
1422	cmpwi	r5,NR_CPUS
1423	blt	1b
1424
1425	mr	r3,r24			/* not found, copy phys to r3	 */
1426	b	.kexec_wait		/* next kernel might do better	 */
1427
14282:	mtspr	SPRN_SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
1429	/* From now on, r24 is expected to be logical cpuid */
1430	mr	r24,r5
14313:	HMT_LOW
1432	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
1433					/* start.			 */
1434	sync
1435
1436	/* Create a temp kernel stack for use before relocation is on.	*/
1437	ld	r1,PACAEMERGSP(r13)
1438	subi	r1,r1,STACK_FRAME_OVERHEAD
1439
1440	cmpwi	0,r23,0
1441#ifdef CONFIG_SMP
1442	bne	.__secondary_start
1443#endif
1444	b 	3b			/* Loop until told to go	 */
1445
1446#ifdef CONFIG_PPC_ISERIES
1447_STATIC(__start_initialization_iSeries)
1448	/* Clear out the BSS */
1449	LOADADDR(r11,__bss_stop)
1450	LOADADDR(r8,__bss_start)
1451	sub	r11,r11,r8		/* bss size			*/
1452	addi	r11,r11,7		/* round up to an even double word */
1453	rldicl. r11,r11,61,3		/* shift right by 3		*/
1454	beq	4f
1455	addi	r8,r8,-8
1456	li	r0,0
1457	mtctr	r11			/* zero this many doublewords	*/
14583:	stdu	r0,8(r8)
1459	bdnz	3b
14604:
1461	LOADADDR(r1,init_thread_union)
1462	addi	r1,r1,THREAD_SIZE
1463	li	r0,0
1464	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1465
1466	LOADADDR(r3,cpu_specs)
1467	LOADADDR(r4,cur_cpu_spec)
1468	li	r5,0
1469	bl	.identify_cpu
1470
1471	LOADADDR(r2,__toc_start)
1472	addi	r2,r2,0x4000
1473	addi	r2,r2,0x4000
1474
1475	bl	.iSeries_early_setup
1476	bl	.early_setup
1477
1478	/* relocation is on at this point */
1479
1480	b	.start_here_common
1481#endif /* CONFIG_PPC_ISERIES */
1482
1483#ifdef CONFIG_PPC_MULTIPLATFORM
1484
1485_STATIC(__mmu_off)
1486	mfmsr	r3
1487	andi.	r0,r3,MSR_IR|MSR_DR
1488	beqlr
1489	andc	r3,r3,r0
1490	mtspr	SPRN_SRR0,r4
1491	mtspr	SPRN_SRR1,r3
1492	sync
1493	rfid
1494	b	.	/* prevent speculative execution */
1495
1496
1497/*
1498 * Here is our main kernel entry point. We support currently 2 kind of entries
1499 * depending on the value of r5.
1500 *
1501 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1502 *                 in r3...r7
1503 *
1504 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1505 *                 DT block, r4 is a physical pointer to the kernel itself
1506 *
1507 */
1508_GLOBAL(__start_initialization_multiplatform)
1509#ifdef CONFIG_PPC_MULTIPLATFORM
1510	/*
1511	 * Are we booted from a PROM Of-type client-interface ?
1512	 */
1513	cmpldi	cr0,r5,0
1514	bne	.__boot_from_prom		/* yes -> prom */
1515#endif
1516
1517	/* Save parameters */
1518	mr	r31,r3
1519	mr	r30,r4
1520
1521	/* Make sure we are running in 64 bits mode */
1522	bl	.enable_64b_mode
1523
1524	/* Setup some critical 970 SPRs before switching MMU off */
1525	bl	.__970_cpu_preinit
1526
1527	/* cpu # */
1528	li	r24,0
1529
1530	/* Switch off MMU if not already */
1531	LOADADDR(r4, .__after_prom_start - KERNELBASE)
1532	add	r4,r4,r30
1533	bl	.__mmu_off
1534	b	.__after_prom_start
1535
1536#ifdef CONFIG_PPC_MULTIPLATFORM
1537_STATIC(__boot_from_prom)
1538	/* Save parameters */
1539	mr	r31,r3
1540	mr	r30,r4
1541	mr	r29,r5
1542	mr	r28,r6
1543	mr	r27,r7
1544
1545	/* Make sure we are running in 64 bits mode */
1546	bl	.enable_64b_mode
1547
1548	/* put a relocation offset into r3 */
1549	bl	.reloc_offset
1550
1551	LOADADDR(r2,__toc_start)
1552	addi	r2,r2,0x4000
1553	addi	r2,r2,0x4000
1554
1555	/* Relocate the TOC from a virt addr to a real addr */
1556	add	r2,r2,r3
1557
1558	/* Restore parameters */
1559	mr	r3,r31
1560	mr	r4,r30
1561	mr	r5,r29
1562	mr	r6,r28
1563	mr	r7,r27
1564
1565	/* Do all of the interaction with OF client interface */
1566	bl	.prom_init
1567	/* We never return */
1568	trap
1569#endif
1570
1571/*
1572 * At this point, r3 contains the physical address we are running at,
1573 * returned by prom_init()
1574 */
1575_STATIC(__after_prom_start)
1576
1577/*
1578 * We need to run with __start at physical address PHYSICAL_START.
1579 * This will leave some code in the first 256B of
1580 * real memory, which are reserved for software use.
1581 * The remainder of the first page is loaded with the fixed
1582 * interrupt vectors.  The next two pages are filled with
1583 * unknown exception placeholders.
1584 *
1585 * Note: This process overwrites the OF exception vectors.
1586 *	r26 == relocation offset
1587 *	r27 == KERNELBASE
1588 */
1589	bl	.reloc_offset
1590	mr	r26,r3
1591	SET_REG_TO_CONST(r27,KERNELBASE)
1592
1593	LOADADDR(r3, PHYSICAL_START)	/* target addr */
1594
1595	// XXX FIXME: Use phys returned by OF (r30)
1596	add	r4,r27,r26 		/* source addr			 */
1597					/* current address of _start	 */
1598					/*   i.e. where we are running	 */
1599					/*	the source addr		 */
1600
1601	LOADADDR(r5,copy_to_here)	/* # bytes of memory to copy	 */
1602	sub	r5,r5,r27
1603
1604	li	r6,0x100		/* Start offset, the first 0x100 */
1605					/* bytes were copied earlier.	 */
1606
1607	bl	.copy_and_flush		/* copy the first n bytes	 */
1608					/* this includes the code being	 */
1609					/* executed here.		 */
1610
1611	LOADADDR(r0, 4f)		/* Jump to the copy of this code */
1612	mtctr	r0			/* that we just made/relocated	 */
1613	bctr
1614
16154:	LOADADDR(r5,klimit)
1616	add	r5,r5,r26
1617	ld	r5,0(r5)		/* get the value of klimit */
1618	sub	r5,r5,r27
1619	bl	.copy_and_flush		/* copy the rest */
1620	b	.start_here_multiplatform
1621
1622#endif /* CONFIG_PPC_MULTIPLATFORM */
1623
1624/*
1625 * Copy routine used to copy the kernel to start at physical address 0
1626 * and flush and invalidate the caches as needed.
1627 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1628 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1629 *
1630 * Note: this routine *only* clobbers r0, r6 and lr
1631 */
1632_GLOBAL(copy_and_flush)
1633	addi	r5,r5,-8
1634	addi	r6,r6,-8
16354:	li	r0,16			/* Use the least common		*/
1636					/* denominator cache line	*/
1637					/* size.  This results in	*/
1638					/* extra cache line flushes	*/
1639					/* but operation is correct.	*/
1640					/* Can't get cache line size	*/
1641					/* from NACA as it is being	*/
1642					/* moved too.			*/
1643
1644	mtctr	r0			/* put # words/line in ctr	*/
16453:	addi	r6,r6,8			/* copy a cache line		*/
1646	ldx	r0,r6,r4
1647	stdx	r0,r6,r3
1648	bdnz	3b
1649	dcbst	r6,r3			/* write it to memory		*/
1650	sync
1651	icbi	r6,r3			/* flush the icache line	*/
1652	cmpld	0,r6,r5
1653	blt	4b
1654	sync
1655	addi	r5,r5,8
1656	addi	r6,r6,8
1657	blr
1658
1659.align 8
1660copy_to_here:
1661
1662#ifdef CONFIG_SMP
1663#ifdef CONFIG_PPC_PMAC
1664/*
1665 * On PowerMac, secondary processors starts from the reset vector, which
1666 * is temporarily turned into a call to one of the functions below.
1667 */
1668	.section ".text";
1669	.align 2 ;
1670
1671	.globl	__secondary_start_pmac_0
1672__secondary_start_pmac_0:
1673	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1674	li	r24,0
1675	b	1f
1676	li	r24,1
1677	b	1f
1678	li	r24,2
1679	b	1f
1680	li	r24,3
16811:
1682
1683_GLOBAL(pmac_secondary_start)
1684	/* turn on 64-bit mode */
1685	bl	.enable_64b_mode
1686	isync
1687
1688	/* Copy some CPU settings from CPU 0 */
1689	bl	.__restore_cpu_setup
1690
1691	/* pSeries do that early though I don't think we really need it */
1692	mfmsr	r3
1693	ori	r3,r3,MSR_RI
1694	mtmsrd	r3			/* RI on */
1695
1696	/* Set up a paca value for this processor. */
1697	LOADADDR(r4, paca) 		 /* Get base vaddr of paca array	*/
1698	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
1699	add	r13,r13,r4		/* for this processor.		*/
1700	mtspr	SPRN_SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
1701
1702	/* Create a temp kernel stack for use before relocation is on.	*/
1703	ld	r1,PACAEMERGSP(r13)
1704	subi	r1,r1,STACK_FRAME_OVERHEAD
1705
1706	b	.__secondary_start
1707
1708#endif /* CONFIG_PPC_PMAC */
1709
1710/*
1711 * This function is called after the master CPU has released the
1712 * secondary processors.  The execution environment is relocation off.
1713 * The paca for this processor has the following fields initialized at
1714 * this point:
1715 *   1. Processor number
1716 *   2. Segment table pointer (virtual address)
1717 * On entry the following are set:
1718 *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
1719 *   r24   = cpu# (in Linux terms)
1720 *   r13   = paca virtual address
1721 *   SPRG3 = paca virtual address
1722 */
1723_GLOBAL(__secondary_start)
1724	/* Set thread priority to MEDIUM */
1725	HMT_MEDIUM
1726
1727	/* Load TOC */
1728	ld	r2,PACATOC(r13)
1729
1730	/* Do early setup for that CPU (stab, slb, hash table pointer) */
1731	bl	.early_setup_secondary
1732
1733	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
1734	LOADADDR(r3,current_set)
1735	sldi	r28,r24,3		/* get current_set[cpu#]	 */
1736	ldx	r1,r3,r28
1737	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1738	std	r1,PACAKSAVE(r13)
1739
1740	/* Clear backchain so we get nice backtraces */
1741	li	r7,0
1742	mtlr	r7
1743
1744	/* enable MMU and jump to start_secondary */
1745	LOADADDR(r3,.start_secondary_prolog)
1746	SET_REG_TO_CONST(r4, MSR_KERNEL)
1747#ifdef DO_SOFT_DISABLE
1748	ori	r4,r4,MSR_EE
1749#endif
1750	mtspr	SPRN_SRR0,r3
1751	mtspr	SPRN_SRR1,r4
1752	rfid
1753	b	.	/* prevent speculative execution */
1754
1755/*
1756 * Running with relocation on at this point.  All we want to do is
1757 * zero the stack back-chain pointer before going into C code.
1758 */
1759_GLOBAL(start_secondary_prolog)
1760	li	r3,0
1761	std	r3,0(r1)		/* Zero the stack frame pointer	*/
1762	bl	.start_secondary
1763	b	.
1764#endif
1765
1766/*
1767 * This subroutine clobbers r11 and r12
1768 */
1769_GLOBAL(enable_64b_mode)
1770	mfmsr	r11			/* grab the current MSR */
1771	li	r12,1
1772	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1773	or	r11,r11,r12
1774	li	r12,1
1775	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1776	or	r11,r11,r12
1777	mtmsrd	r11
1778	isync
1779	blr
1780
1781#ifdef CONFIG_PPC_MULTIPLATFORM
1782/*
1783 * This is where the main kernel code starts.
1784 */
1785_STATIC(start_here_multiplatform)
1786	/* get a new offset, now that the kernel has moved. */
1787	bl	.reloc_offset
1788	mr	r26,r3
1789
1790	/* Clear out the BSS. It may have been done in prom_init,
1791	 * already but that's irrelevant since prom_init will soon
1792	 * be detached from the kernel completely. Besides, we need
1793	 * to clear it now for kexec-style entry.
1794	 */
1795	LOADADDR(r11,__bss_stop)
1796	LOADADDR(r8,__bss_start)
1797	sub	r11,r11,r8		/* bss size			*/
1798	addi	r11,r11,7		/* round up to an even double word */
1799	rldicl. r11,r11,61,3		/* shift right by 3		*/
1800	beq	4f
1801	addi	r8,r8,-8
1802	li	r0,0
1803	mtctr	r11			/* zero this many doublewords	*/
18043:	stdu	r0,8(r8)
1805	bdnz	3b
18064:
1807
1808	mfmsr	r6
1809	ori	r6,r6,MSR_RI
1810	mtmsrd	r6			/* RI on */
1811
1812#ifdef CONFIG_HMT
1813	/* Start up the second thread on cpu 0 */
1814	mfspr	r3,SPRN_PVR
1815	srwi	r3,r3,16
1816	cmpwi	r3,0x34			/* Pulsar  */
1817	beq	90f
1818	cmpwi	r3,0x36			/* Icestar */
1819	beq	90f
1820	cmpwi	r3,0x37			/* SStar   */
1821	beq	90f
1822	b	91f			/* HMT not supported */
182390:	li	r3,0
1824	bl	.hmt_start_secondary
182591:
1826#endif
1827
1828	/* The following gets the stack and TOC set up with the regs */
1829	/* pointing to the real addr of the kernel stack.  This is   */
1830	/* all done to support the C function call below which sets  */
1831	/* up the htab.  This is done because we have relocated the  */
1832	/* kernel but are still running in real mode. */
1833
1834	LOADADDR(r3,init_thread_union)
1835	add	r3,r3,r26
1836
1837	/* set up a stack pointer (physical address) */
1838	addi	r1,r3,THREAD_SIZE
1839	li	r0,0
1840	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1841
1842	/* set up the TOC (physical address) */
1843	LOADADDR(r2,__toc_start)
1844	addi	r2,r2,0x4000
1845	addi	r2,r2,0x4000
1846	add	r2,r2,r26
1847
1848	LOADADDR(r3,cpu_specs)
1849	add	r3,r3,r26
1850	LOADADDR(r4,cur_cpu_spec)
1851	add	r4,r4,r26
1852	mr	r5,r26
1853	bl	.identify_cpu
1854
1855	/* Save some low level config HIDs of CPU0 to be copied to
1856	 * other CPUs later on, or used for suspend/resume
1857	 */
1858	bl	.__save_cpu_setup
1859	sync
1860
1861	/* Setup a valid physical PACA pointer in SPRG3 for early_setup
1862	 * note that boot_cpuid can always be 0 nowadays since there is
1863	 * nowhere it can be initialized differently before we reach this
1864	 * code
1865	 */
1866	LOADADDR(r27, boot_cpuid)
1867	add	r27,r27,r26
1868	lwz	r27,0(r27)
1869
1870	LOADADDR(r24, paca) 		/* Get base vaddr of paca array	 */
1871	mulli	r13,r27,PACA_SIZE	/* Calculate vaddr of right paca */
1872	add	r13,r13,r24		/* for this processor.		 */
1873	add	r13,r13,r26		/* convert to physical addr	 */
1874	mtspr	SPRN_SPRG3,r13
1875
1876	/* Do very early kernel initializations, including initial hash table,
1877	 * stab and slb setup before we turn on relocation.	*/
1878
1879	/* Restore parameters passed from prom_init/kexec */
1880	mr	r3,r31
1881 	bl	.early_setup
1882
1883	LOADADDR(r3,.start_here_common)
1884	SET_REG_TO_CONST(r4, MSR_KERNEL)
1885	mtspr	SPRN_SRR0,r3
1886	mtspr	SPRN_SRR1,r4
1887	rfid
1888	b	.	/* prevent speculative execution */
1889#endif /* CONFIG_PPC_MULTIPLATFORM */
1890
1891	/* This is where all platforms converge execution */
1892_STATIC(start_here_common)
1893	/* relocation is on at this point */
1894
1895	/* The following code sets up the SP and TOC now that we are */
1896	/* running with translation enabled. */
1897
1898	LOADADDR(r3,init_thread_union)
1899
1900	/* set up the stack */
1901	addi	r1,r3,THREAD_SIZE
1902	li	r0,0
1903	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1904
1905	/* Apply the CPUs-specific fixups (nop out sections not relevant
1906	 * to this CPU
1907	 */
1908	li	r3,0
1909	bl	.do_cpu_ftr_fixups
1910
1911	LOADADDR(r26, boot_cpuid)
1912	lwz	r26,0(r26)
1913
1914	LOADADDR(r24, paca) 		/* Get base vaddr of paca array  */
1915	mulli	r13,r26,PACA_SIZE	/* Calculate vaddr of right paca */
1916	add	r13,r13,r24		/* for this processor.		 */
1917	mtspr	SPRN_SPRG3,r13
1918
1919	/* ptr to current */
1920	LOADADDR(r4,init_task)
1921	std	r4,PACACURRENT(r13)
1922
1923	/* Load the TOC */
1924	ld	r2,PACATOC(r13)
1925	std	r1,PACAKSAVE(r13)
1926
1927	bl	.setup_system
1928
1929	/* Load up the kernel context */
19305:
1931#ifdef DO_SOFT_DISABLE
1932	li	r5,0
1933	stb	r5,PACAPROCENABLED(r13)	/* Soft Disabled */
1934	mfmsr	r5
1935	ori	r5,r5,MSR_EE		/* Hard Enabled */
1936	mtmsrd	r5
1937#endif
1938
1939	bl .start_kernel
1940
1941_GLOBAL(hmt_init)
1942#ifdef CONFIG_HMT
1943	LOADADDR(r5, hmt_thread_data)
1944	mfspr	r7,SPRN_PVR
1945	srwi	r7,r7,16
1946	cmpwi	r7,0x34			/* Pulsar  */
1947	beq	90f
1948	cmpwi	r7,0x36			/* Icestar */
1949	beq	91f
1950	cmpwi	r7,0x37			/* SStar   */
1951	beq	91f
1952	b	101f
195390:	mfspr	r6,SPRN_PIR
1954	andi.	r6,r6,0x1f
1955	b	92f
195691:	mfspr	r6,SPRN_PIR
1957	andi.	r6,r6,0x3ff
195892:	sldi	r4,r24,3
1959	stwx	r6,r5,r4
1960	bl	.hmt_start_secondary
1961	b	101f
1962
1963__hmt_secondary_hold:
1964	LOADADDR(r5, hmt_thread_data)
1965	clrldi	r5,r5,4
1966	li	r7,0
1967	mfspr	r6,SPRN_PIR
1968	mfspr	r8,SPRN_PVR
1969	srwi	r8,r8,16
1970	cmpwi	r8,0x34
1971	bne	93f
1972	andi.	r6,r6,0x1f
1973	b	103f
197493:	andi.	r6,r6,0x3f
1975
1976103:	lwzx	r8,r5,r7
1977	cmpw	r8,r6
1978	beq	104f
1979	addi	r7,r7,8
1980	b	103b
1981
1982104:	addi	r7,r7,4
1983	lwzx	r9,r5,r7
1984	mr	r24,r9
1985101:
1986#endif
1987	mr	r3,r24
1988	b	.pSeries_secondary_smp_init
1989
1990#ifdef CONFIG_HMT
1991_GLOBAL(hmt_start_secondary)
1992	LOADADDR(r4,__hmt_secondary_hold)
1993	clrldi	r4,r4,4
1994	mtspr	SPRN_NIADORM, r4
1995	mfspr	r4, SPRN_MSRDORM
1996	li	r5, -65
1997	and	r4, r4, r5
1998	mtspr	SPRN_MSRDORM, r4
1999	lis	r4,0xffef
2000	ori	r4,r4,0x7403
2001	mtspr	SPRN_TSC, r4
2002	li	r4,0x1f4
2003	mtspr	SPRN_TST, r4
2004	mfspr	r4, SPRN_HID0
2005	ori	r4, r4, 0x1
2006	mtspr	SPRN_HID0, r4
2007	mfspr	r4, SPRN_CTRLF
2008	oris	r4, r4, 0x40
2009	mtspr	SPRN_CTRLT, r4
2010	blr
2011#endif
2012
2013/*
2014 * We put a few things here that have to be page-aligned.
2015 * This stuff goes at the beginning of the bss, which is page-aligned.
2016 */
2017	.section ".bss"
2018
2019	.align	PAGE_SHIFT
2020
2021	.globl	empty_zero_page
2022empty_zero_page:
2023	.space	PAGE_SIZE
2024
2025	.globl	swapper_pg_dir
2026swapper_pg_dir:
2027	.space	PAGE_SIZE
2028
2029/*
2030 * This space gets a copy of optional info passed to us by the bootstrap
2031 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2032 */
2033	.globl	cmd_line
2034cmd_line:
2035	.space	COMMAND_LINE_SIZE
2036