xref: /linux/arch/powerpc/kernel/head_8xx.S (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 *  Low-level exception handlers and MMU support
7 *  rewritten by Paul Mackerras.
8 *    Copyright (C) 1996 Paul Mackerras.
9 *  MPC8xx modifications by Dan Malek
10 *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains low-level support and setup for PowerPC 8xx
13 *  embedded processors, including trap and interrupt dispatch.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/cache.h>
27#include <asm/pgtable.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32
33/* Macro to make the code more readable. */
34#ifdef CONFIG_8xx_CPU6
35#define DO_8xx_CPU6(val, reg)	\
36	li	reg, val;	\
37	stw	reg, 12(r0);	\
38	lwz	reg, 12(r0);
39#else
40#define DO_8xx_CPU6(val, reg)
41#endif
42	.text
43	.globl	_stext
44_stext:
45	.text
46	.globl	_start
47_start:
48
49/* MPC8xx
50 * This port was done on an MBX board with an 860.  Right now I only
51 * support an ELF compressed (zImage) boot from EPPC-Bug because the
52 * code there loads up some registers before calling us:
53 *   r3: ptr to board info data
54 *   r4: initrd_start or if no initrd then 0
55 *   r5: initrd_end - unused if r4 is 0
56 *   r6: Start of command line string
57 *   r7: End of command line string
58 *
59 * I decided to use conditional compilation instead of checking PVR and
60 * adding more processor specific branches around code I don't need.
61 * Since this is an embedded processor, I also appreciate any memory
62 * savings I can get.
63 *
64 * The MPC8xx does not have any BATs, but it supports large page sizes.
65 * We first initialize the MMU to support 8M byte pages, then load one
66 * entry into each of the instruction and data TLBs to map the first
67 * 8M 1:1.  I also mapped an additional I/O space 1:1 so we can get to
68 * the "internal" processor registers before MMU_init is called.
69 *
70 * The TLB code currently contains a major hack.  Since I use the condition
71 * code register, I have to save and restore it.  I am out of registers, so
72 * I just store it in memory location 0 (the TLB handlers are not reentrant).
73 * To avoid making any decisions, I need to use the "segment" valid bit
74 * in the first level table, but that would require many changes to the
75 * Linux page directory/table functions that I don't want to do right now.
76 *
77 * I used to use SPRG2 for a temporary register in the TLB handler, but it
78 * has since been put to other uses.  I now use a hack to save a register
79 * and the CCR at memory location 0.....Someday I'll fix this.....
80 *	-- Dan
81 */
82	.globl	__start
83__start:
84	mr	r31,r3			/* save parameters */
85	mr	r30,r4
86	mr	r29,r5
87	mr	r28,r6
88	mr	r27,r7
89
90	/* We have to turn on the MMU right away so we get cache modes
91	 * set correctly.
92	 */
93	bl	initial_mmu
94
95/* We now have the lower 8 Meg mapped into TLB entries, and the caches
96 * ready to work.
97 */
98
99turn_on_mmu:
100	mfmsr	r0
101	ori	r0,r0,MSR_DR|MSR_IR
102	mtspr	SPRN_SRR1,r0
103	lis	r0,start_here@h
104	ori	r0,r0,start_here@l
105	mtspr	SPRN_SRR0,r0
106	SYNC
107	rfi				/* enables MMU */
108
109/*
110 * Exception entry code.  This code runs with address translation
111 * turned off, i.e. using physical addresses.
112 * We assume sprg3 has the physical address of the current
113 * task's thread_struct.
114 */
115#define EXCEPTION_PROLOG	\
116	mtspr	SPRN_SPRG0,r10;	\
117	mtspr	SPRN_SPRG1,r11;	\
118	mfcr	r10;		\
119	EXCEPTION_PROLOG_1;	\
120	EXCEPTION_PROLOG_2
121
122#define EXCEPTION_PROLOG_1	\
123	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel */ \
124	andi.	r11,r11,MSR_PR;	\
125	tophys(r11,r1);			/* use tophys(r1) if kernel */ \
126	beq	1f;		\
127	mfspr	r11,SPRN_SPRG3;	\
128	lwz	r11,THREAD_INFO-THREAD(r11);	\
129	addi	r11,r11,THREAD_SIZE;	\
130	tophys(r11,r11);	\
1311:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */
132
133
134#define EXCEPTION_PROLOG_2	\
135	CLR_TOP32(r11);		\
136	stw	r10,_CCR(r11);		/* save registers */ \
137	stw	r12,GPR12(r11);	\
138	stw	r9,GPR9(r11);	\
139	mfspr	r10,SPRN_SPRG0;	\
140	stw	r10,GPR10(r11);	\
141	mfspr	r12,SPRN_SPRG1;	\
142	stw	r12,GPR11(r11);	\
143	mflr	r10;		\
144	stw	r10,_LINK(r11);	\
145	mfspr	r12,SPRN_SRR0;	\
146	mfspr	r9,SPRN_SRR1;	\
147	stw	r1,GPR1(r11);	\
148	stw	r1,0(r11);	\
149	tovirt(r1,r11);			/* set new kernel sp */	\
150	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
151	MTMSRD(r10);			/* (except for mach check in rtas) */ \
152	stw	r0,GPR0(r11);	\
153	SAVE_4GPRS(3, r11);	\
154	SAVE_2GPRS(7, r11)
155
156/*
157 * Note: code which follows this uses cr0.eq (set if from kernel),
158 * r11, r12 (SRR0), and r9 (SRR1).
159 *
160 * Note2: once we have set r1 we are in a position to take exceptions
161 * again, and we could thus set MSR:RI at that point.
162 */
163
164/*
165 * Exception vectors.
166 */
167#define EXCEPTION(n, label, hdlr, xfer)		\
168	. = n;					\
169label:						\
170	EXCEPTION_PROLOG;			\
171	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
172	xfer(n, hdlr)
173
174#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)	\
175	li	r10,trap;					\
176	stw	r10,_TRAP(r11);					\
177	li	r10,MSR_KERNEL;					\
178	copyee(r10, r9);					\
179	bl	tfer;						\
180i##n:								\
181	.long	hdlr;						\
182	.long	ret
183
184#define COPY_EE(d, s)		rlwimi d,s,0,16,16
185#define NOCOPY(d, s)
186
187#define EXC_XFER_STD(n, hdlr)		\
188	EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full,	\
189			  ret_from_except_full)
190
191#define EXC_XFER_LITE(n, hdlr)		\
192	EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
193			  ret_from_except)
194
195#define EXC_XFER_EE(n, hdlr)		\
196	EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
197			  ret_from_except_full)
198
199#define EXC_XFER_EE_LITE(n, hdlr)	\
200	EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
201			  ret_from_except)
202
203/* System reset */
204	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
205
206/* Machine check */
207	. = 0x200
208MachineCheck:
209	EXCEPTION_PROLOG
210	mfspr r4,SPRN_DAR
211	stw r4,_DAR(r11)
212	mfspr r5,SPRN_DSISR
213	stw r5,_DSISR(r11)
214	addi r3,r1,STACK_FRAME_OVERHEAD
215	EXC_XFER_STD(0x200, machine_check_exception)
216
217/* Data access exception.
218 * This is "never generated" by the MPC8xx.  We jump to it for other
219 * translation errors.
220 */
221	. = 0x300
222DataAccess:
223	EXCEPTION_PROLOG
224	mfspr	r10,SPRN_DSISR
225	stw	r10,_DSISR(r11)
226	mr	r5,r10
227	mfspr	r4,SPRN_DAR
228	EXC_XFER_EE_LITE(0x300, handle_page_fault)
229
230/* Instruction access exception.
231 * This is "never generated" by the MPC8xx.  We jump to it for other
232 * translation errors.
233 */
234	. = 0x400
235InstructionAccess:
236	EXCEPTION_PROLOG
237	mr	r4,r12
238	mr	r5,r9
239	EXC_XFER_EE_LITE(0x400, handle_page_fault)
240
241/* External interrupt */
242	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
243
244/* Alignment exception */
245	. = 0x600
246Alignment:
247	EXCEPTION_PROLOG
248	mfspr	r4,SPRN_DAR
249	stw	r4,_DAR(r11)
250	mfspr	r5,SPRN_DSISR
251	stw	r5,_DSISR(r11)
252	addi	r3,r1,STACK_FRAME_OVERHEAD
253	EXC_XFER_EE(0x600, alignment_exception)
254
255/* Program check exception */
256	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
257
258/* No FPU on MPC8xx.  This exception is not supposed to happen.
259*/
260	EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
261
262/* Decrementer */
263	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
264
265	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
266	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
267
268/* System call */
269	. = 0xc00
270SystemCall:
271	EXCEPTION_PROLOG
272	EXC_XFER_EE_LITE(0xc00, DoSyscall)
273
274/* Single step - not used on 601 */
275	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
276	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
277	EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
278
279/* On the MPC8xx, this is a software emulation interrupt.  It occurs
280 * for all unimplemented and illegal instructions.
281 */
282	EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
283
284	. = 0x1100
285/*
286 * For the MPC8xx, this is a software tablewalk to load the instruction
287 * TLB.  It is modelled after the example in the Motorola manual.  The task
288 * switch loads the M_TWB register with the pointer to the first level table.
289 * If we discover there is no second level table (value is zero) or if there
290 * is an invalid pte, we load that into the TLB, which causes another fault
291 * into the TLB Error interrupt where we can handle such problems.
292 * We have to use the MD_xxx registers for the tablewalk because the
293 * equivalent MI_xxx registers only perform the attribute functions.
294 */
295InstructionTLBMiss:
296#ifdef CONFIG_8xx_CPU6
297	stw	r3, 8(r0)
298#endif
299	DO_8xx_CPU6(0x3f80, r3)
300	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */
301	mfcr	r10
302	stw	r10, 0(r0)
303	stw	r11, 4(r0)
304	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
305	DO_8xx_CPU6(0x3780, r3)
306	mtspr	SPRN_MD_EPN, r10	/* Have to use MD_EPN for walk, MI_EPN can't */
307	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */
308
309	/* If we are faulting a kernel address, we have to use the
310	 * kernel page tables.
311	 */
312	andi.	r11, r10, 0x0800	/* Address >= 0x80000000 */
313	beq	3f
314	lis	r11, swapper_pg_dir@h
315	ori	r11, r11, swapper_pg_dir@l
316	rlwimi	r10, r11, 0, 2, 19
3173:
318	lwz	r11, 0(r10)	/* Get the level 1 entry */
319	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */
320	beq	2f		/* If zero, don't try to find a pte */
321
322	/* We have a pte table, so load the MI_TWC with the attributes
323	 * for this "segment."
324	 */
325	ori	r11,r11,1		/* Set valid bit */
326	DO_8xx_CPU6(0x2b80, r3)
327	mtspr	SPRN_MI_TWC, r11	/* Set segment attributes */
328	DO_8xx_CPU6(0x3b80, r3)
329	mtspr	SPRN_MD_TWC, r11	/* Load pte table base address */
330	mfspr	r11, SPRN_MD_TWC	/* ....and get the pte address */
331	lwz	r10, 0(r11)	/* Get the pte */
332
333	ori	r10, r10, _PAGE_ACCESSED
334	stw	r10, 0(r11)
335
336	/* The Linux PTE won't go exactly into the MMU TLB.
337	 * Software indicator bits 21, 22 and 28 must be clear.
338	 * Software indicator bits 24, 25, 26, and 27 must be
339	 * set.  All other Linux PTE bits control the behavior
340	 * of the MMU.
341	 */
3422:	li	r11, 0x00f0
343	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
344	DO_8xx_CPU6(0x2d80, r3)
345	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
346
347	mfspr	r10, SPRN_M_TW	/* Restore registers */
348	lwz	r11, 0(r0)
349	mtcr	r11
350	lwz	r11, 4(r0)
351#ifdef CONFIG_8xx_CPU6
352	lwz	r3, 8(r0)
353#endif
354	rfi
355
356	. = 0x1200
357DataStoreTLBMiss:
358#ifdef CONFIG_8xx_CPU6
359	stw	r3, 8(r0)
360#endif
361	DO_8xx_CPU6(0x3f80, r3)
362	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */
363	mfcr	r10
364	stw	r10, 0(r0)
365	stw	r11, 4(r0)
366	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */
367
368	/* If we are faulting a kernel address, we have to use the
369	 * kernel page tables.
370	 */
371	andi.	r11, r10, 0x0800
372	beq	3f
373	lis	r11, swapper_pg_dir@h
374	ori	r11, r11, swapper_pg_dir@l
375	rlwimi	r10, r11, 0, 2, 19
3763:
377	lwz	r11, 0(r10)	/* Get the level 1 entry */
378	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */
379	beq	2f		/* If zero, don't try to find a pte */
380
381	/* We have a pte table, so load fetch the pte from the table.
382	 */
383	ori	r11, r11, 1	/* Set valid bit in physical L2 page */
384	DO_8xx_CPU6(0x3b80, r3)
385	mtspr	SPRN_MD_TWC, r11	/* Load pte table base address */
386	mfspr	r10, SPRN_MD_TWC	/* ....and get the pte address */
387	lwz	r10, 0(r10)	/* Get the pte */
388
389	/* Insert the Guarded flag into the TWC from the Linux PTE.
390	 * It is bit 27 of both the Linux PTE and the TWC (at least
391	 * I got that right :-).  It will be better when we can put
392	 * this into the Linux pgd/pmd and load it in the operation
393	 * above.
394	 */
395	rlwimi	r11, r10, 0, 27, 27
396	DO_8xx_CPU6(0x3b80, r3)
397	mtspr	SPRN_MD_TWC, r11
398
399	mfspr	r11, SPRN_MD_TWC	/* get the pte address again */
400	ori	r10, r10, _PAGE_ACCESSED
401	stw	r10, 0(r11)
402
403	/* The Linux PTE won't go exactly into the MMU TLB.
404	 * Software indicator bits 21, 22 and 28 must be clear.
405	 * Software indicator bits 24, 25, 26, and 27 must be
406	 * set.  All other Linux PTE bits control the behavior
407	 * of the MMU.
408	 */
4092:	li	r11, 0x00f0
410	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
411	DO_8xx_CPU6(0x3d80, r3)
412	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
413
414	mfspr	r10, SPRN_M_TW	/* Restore registers */
415	lwz	r11, 0(r0)
416	mtcr	r11
417	lwz	r11, 4(r0)
418#ifdef CONFIG_8xx_CPU6
419	lwz	r3, 8(r0)
420#endif
421	rfi
422
423/* This is an instruction TLB error on the MPC8xx.  This could be due
424 * to many reasons, such as executing guarded memory or illegal instruction
425 * addresses.  There is nothing to do but handle a big time error fault.
426 */
427	. = 0x1300
428InstructionTLBError:
429	b	InstructionAccess
430
431/* This is the data TLB error on the MPC8xx.  This could be due to
432 * many reasons, including a dirty update to a pte.  We can catch that
433 * one here, but anything else is an error.  First, we track down the
434 * Linux pte.  If it is valid, write access is allowed, but the
435 * page dirty bit is not set, we will set it and reload the TLB.  For
436 * any other case, we bail out to a higher level function that can
437 * handle it.
438 */
439	. = 0x1400
440DataTLBError:
441#ifdef CONFIG_8xx_CPU6
442	stw	r3, 8(r0)
443#endif
444	DO_8xx_CPU6(0x3f80, r3)
445	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */
446	mfcr	r10
447	stw	r10, 0(r0)
448	stw	r11, 4(r0)
449
450	/* First, make sure this was a store operation.
451	*/
452	mfspr	r10, SPRN_DSISR
453	andis.	r11, r10, 0x0200	/* If set, indicates store op */
454	beq	2f
455
456	/* The EA of a data TLB miss is automatically stored in the MD_EPN
457	 * register.  The EA of a data TLB error is automatically stored in
458	 * the DAR, but not the MD_EPN register.  We must copy the 20 most
459	 * significant bits of the EA from the DAR to MD_EPN before we
460	 * start walking the page tables.  We also need to copy the CASID
461	 * value from the M_CASID register.
462	 * Addendum:  The EA of a data TLB error is _supposed_ to be stored
463	 * in DAR, but it seems that this doesn't happen in some cases, such
464	 * as when the error is due to a dcbi instruction to a page with a
465	 * TLB that doesn't have the changed bit set.  In such cases, there
466	 * does not appear to be any way  to recover the EA of the error
467	 * since it is neither in DAR nor MD_EPN.  As a workaround, the
468	 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
469	 * are initialized in mapin_ram().  This will avoid the problem,
470	 * assuming we only use the dcbi instruction on kernel addresses.
471	 */
472	mfspr	r10, SPRN_DAR
473	rlwinm	r11, r10, 0, 0, 19
474	ori	r11, r11, MD_EVALID
475	mfspr	r10, SPRN_M_CASID
476	rlwimi	r11, r10, 0, 28, 31
477	DO_8xx_CPU6(0x3780, r3)
478	mtspr	SPRN_MD_EPN, r11
479
480	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */
481
482	/* If we are faulting a kernel address, we have to use the
483	 * kernel page tables.
484	 */
485	andi.	r11, r10, 0x0800
486	beq	3f
487	lis	r11, swapper_pg_dir@h
488	ori	r11, r11, swapper_pg_dir@l
489	rlwimi	r10, r11, 0, 2, 19
4903:
491	lwz	r11, 0(r10)	/* Get the level 1 entry */
492	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */
493	beq	2f		/* If zero, bail */
494
495	/* We have a pte table, so fetch the pte from the table.
496	 */
497	ori	r11, r11, 1		/* Set valid bit in physical L2 page */
498	DO_8xx_CPU6(0x3b80, r3)
499	mtspr	SPRN_MD_TWC, r11		/* Load pte table base address */
500	mfspr	r11, SPRN_MD_TWC		/* ....and get the pte address */
501	lwz	r10, 0(r11)		/* Get the pte */
502
503	andi.	r11, r10, _PAGE_RW	/* Is it writeable? */
504	beq	2f			/* Bail out if not */
505
506	/* Update 'changed', among others.
507	*/
508	ori	r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
509	mfspr	r11, SPRN_MD_TWC		/* Get pte address again */
510	stw	r10, 0(r11)		/* and update pte in table */
511
512	/* The Linux PTE won't go exactly into the MMU TLB.
513	 * Software indicator bits 21, 22 and 28 must be clear.
514	 * Software indicator bits 24, 25, 26, and 27 must be
515	 * set.  All other Linux PTE bits control the behavior
516	 * of the MMU.
517	 */
518	li	r11, 0x00f0
519	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
520	DO_8xx_CPU6(0x3d80, r3)
521	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
522
523	mfspr	r10, SPRN_M_TW	/* Restore registers */
524	lwz	r11, 0(r0)
525	mtcr	r11
526	lwz	r11, 4(r0)
527#ifdef CONFIG_8xx_CPU6
528	lwz	r3, 8(r0)
529#endif
530	rfi
5312:
532	mfspr	r10, SPRN_M_TW	/* Restore registers */
533	lwz	r11, 0(r0)
534	mtcr	r11
535	lwz	r11, 4(r0)
536#ifdef CONFIG_8xx_CPU6
537	lwz	r3, 8(r0)
538#endif
539	b	DataAccess
540
541	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
542	EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
543	EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
544	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
545	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
546	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
547	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
548
549/* On the MPC8xx, these next four traps are used for development
550 * support of breakpoints and such.  Someday I will get around to
551 * using them.
552 */
553	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
554	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
555	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
556	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
557
558	. = 0x2000
559
560	.globl	giveup_fpu
561giveup_fpu:
562	blr
563
564/*
565 * This is where the main kernel code starts.
566 */
567start_here:
568	/* ptr to current */
569	lis	r2,init_task@h
570	ori	r2,r2,init_task@l
571
572	/* ptr to phys current thread */
573	tophys(r4,r2)
574	addi	r4,r4,THREAD	/* init task's THREAD */
575	mtspr	SPRN_SPRG3,r4
576	li	r3,0
577	mtspr	SPRN_SPRG2,r3	/* 0 => r1 has kernel sp */
578
579	/* stack */
580	lis	r1,init_thread_union@ha
581	addi	r1,r1,init_thread_union@l
582	li	r0,0
583	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
584
585	bl	early_init	/* We have to do this with MMU on */
586
587/*
588 * Decide what sort of machine this is and initialize the MMU.
589 */
590	mr	r3,r31
591	mr	r4,r30
592	mr	r5,r29
593	mr	r6,r28
594	mr	r7,r27
595	bl	machine_init
596	bl	MMU_init
597
598/*
599 * Go back to running unmapped so we can load up new values
600 * and change to using our exception vectors.
601 * On the 8xx, all we have to do is invalidate the TLB to clear
602 * the old 8M byte TLB mappings and load the page table base register.
603 */
604	/* The right way to do this would be to track it down through
605	 * init's THREAD like the context switch code does, but this is
606	 * easier......until someone changes init's static structures.
607	 */
608	lis	r6, swapper_pg_dir@h
609	ori	r6, r6, swapper_pg_dir@l
610	tophys(r6,r6)
611#ifdef CONFIG_8xx_CPU6
612	lis	r4, cpu6_errata_word@h
613	ori	r4, r4, cpu6_errata_word@l
614	li	r3, 0x3980
615	stw	r3, 12(r4)
616	lwz	r3, 12(r4)
617#endif
618	mtspr	SPRN_M_TWB, r6
619	lis	r4,2f@h
620	ori	r4,r4,2f@l
621	tophys(r4,r4)
622	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
623	mtspr	SPRN_SRR0,r4
624	mtspr	SPRN_SRR1,r3
625	rfi
626/* Load up the kernel context */
6272:
628	SYNC			/* Force all PTE updates to finish */
629	tlbia			/* Clear all TLB entries */
630	sync			/* wait for tlbia/tlbie to finish */
631	TLBSYNC			/* ... on all CPUs */
632
633	/* set up the PTE pointers for the Abatron bdiGDB.
634	*/
635	tovirt(r6,r6)
636	lis	r5, abatron_pteptrs@h
637	ori	r5, r5, abatron_pteptrs@l
638	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */
639	tophys(r5,r5)
640	stw	r6, 0(r5)
641
642/* Now turn on the MMU for real! */
643	li	r4,MSR_KERNEL
644	lis	r3,start_kernel@h
645	ori	r3,r3,start_kernel@l
646	mtspr	SPRN_SRR0,r3
647	mtspr	SPRN_SRR1,r4
648	rfi			/* enable MMU and jump to start_kernel */
649
650/* Set up the initial MMU state so we can do the first level of
651 * kernel initialization.  This maps the first 8 MBytes of memory 1:1
652 * virtual to physical.  Also, set the cache mode since that is defined
653 * by TLB entries and perform any additional mapping (like of the IMMR).
654 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
655 * 24 Mbytes of data, and the 8M IMMR space.  Anything not covered by
656 * these mappings is mapped by page tables.
657 */
658initial_mmu:
659	tlbia			/* Invalidate all TLB entries */
660#ifdef CONFIG_PIN_TLB
661	lis	r8, MI_RSV4I@h
662	ori	r8, r8, 0x1c00
663#else
664	li	r8, 0
665#endif
666	mtspr	SPRN_MI_CTR, r8	/* Set instruction MMU control */
667
668#ifdef CONFIG_PIN_TLB
669	lis	r10, (MD_RSV4I | MD_RESETVAL)@h
670	ori	r10, r10, 0x1c00
671	mr	r8, r10
672#else
673	lis	r10, MD_RESETVAL@h
674#endif
675#ifndef CONFIG_8xx_COPYBACK
676	oris	r10, r10, MD_WTDEF@h
677#endif
678	mtspr	SPRN_MD_CTR, r10	/* Set data TLB control */
679
680	/* Now map the lower 8 Meg into the TLBs.  For this quick hack,
681	 * we can load the instruction and data TLB registers with the
682	 * same values.
683	 */
684	lis	r8, KERNELBASE@h	/* Create vaddr for TLB */
685	ori	r8, r8, MI_EVALID	/* Mark it valid */
686	mtspr	SPRN_MI_EPN, r8
687	mtspr	SPRN_MD_EPN, r8
688	li	r8, MI_PS8MEG		/* Set 8M byte page */
689	ori	r8, r8, MI_SVALID	/* Make it valid */
690	mtspr	SPRN_MI_TWC, r8
691	mtspr	SPRN_MD_TWC, r8
692	li	r8, MI_BOOTINIT		/* Create RPN for address 0 */
693	mtspr	SPRN_MI_RPN, r8		/* Store TLB entry */
694	mtspr	SPRN_MD_RPN, r8
695	lis	r8, MI_Kp@h		/* Set the protection mode */
696	mtspr	SPRN_MI_AP, r8
697	mtspr	SPRN_MD_AP, r8
698
699	/* Map another 8 MByte at the IMMR to get the processor
700	 * internal registers (among other things).
701	 */
702#ifdef CONFIG_PIN_TLB
703	addi	r10, r10, 0x0100
704	mtspr	SPRN_MD_CTR, r10
705#endif
706	mfspr	r9, 638			/* Get current IMMR */
707	andis.	r9, r9, 0xff80		/* Get 8Mbyte boundary */
708
709	mr	r8, r9			/* Create vaddr for TLB */
710	ori	r8, r8, MD_EVALID	/* Mark it valid */
711	mtspr	SPRN_MD_EPN, r8
712	li	r8, MD_PS8MEG		/* Set 8M byte page */
713	ori	r8, r8, MD_SVALID	/* Make it valid */
714	mtspr	SPRN_MD_TWC, r8
715	mr	r8, r9			/* Create paddr for TLB */
716	ori	r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
717	mtspr	SPRN_MD_RPN, r8
718
719#ifdef CONFIG_PIN_TLB
720	/* Map two more 8M kernel data pages.
721	*/
722	addi	r10, r10, 0x0100
723	mtspr	SPRN_MD_CTR, r10
724
725	lis	r8, KERNELBASE@h	/* Create vaddr for TLB */
726	addis	r8, r8, 0x0080		/* Add 8M */
727	ori	r8, r8, MI_EVALID	/* Mark it valid */
728	mtspr	SPRN_MD_EPN, r8
729	li	r9, MI_PS8MEG		/* Set 8M byte page */
730	ori	r9, r9, MI_SVALID	/* Make it valid */
731	mtspr	SPRN_MD_TWC, r9
732	li	r11, MI_BOOTINIT	/* Create RPN for address 0 */
733	addis	r11, r11, 0x0080	/* Add 8M */
734	mtspr	SPRN_MD_RPN, r8
735
736	addis	r8, r8, 0x0080		/* Add 8M */
737	mtspr	SPRN_MD_EPN, r8
738	mtspr	SPRN_MD_TWC, r9
739	addis	r11, r11, 0x0080	/* Add 8M */
740	mtspr	SPRN_MD_RPN, r8
741#endif
742
743	/* Since the cache is enabled according to the information we
744	 * just loaded into the TLB, invalidate and enable the caches here.
745	 * We should probably check/set other modes....later.
746	 */
747	lis	r8, IDC_INVALL@h
748	mtspr	SPRN_IC_CST, r8
749	mtspr	SPRN_DC_CST, r8
750	lis	r8, IDC_ENABLE@h
751	mtspr	SPRN_IC_CST, r8
752#ifdef CONFIG_8xx_COPYBACK
753	mtspr	SPRN_DC_CST, r8
754#else
755	/* For a debug option, I left this here to easily enable
756	 * the write through cache mode
757	 */
758	lis	r8, DC_SFWT@h
759	mtspr	SPRN_DC_CST, r8
760	lis	r8, IDC_ENABLE@h
761	mtspr	SPRN_DC_CST, r8
762#endif
763	blr
764
765
766/*
767 * Set up to use a given MMU context.
768 * r3 is context number, r4 is PGD pointer.
769 *
770 * We place the physical address of the new task page directory loaded
771 * into the MMU base register, and set the ASID compare register with
772 * the new "context."
773 */
774_GLOBAL(set_context)
775
776#ifdef CONFIG_BDI_SWITCH
777	/* Context switch the PTE pointer for the Abatron BDI2000.
778	 * The PGDIR is passed as second argument.
779	 */
780	lis	r5, KERNELBASE@h
781	lwz	r5, 0xf0(r5)
782	stw	r4, 0x4(r5)
783#endif
784
785#ifdef CONFIG_8xx_CPU6
786	lis	r6, cpu6_errata_word@h
787	ori	r6, r6, cpu6_errata_word@l
788	tophys	(r4, r4)
789	li	r7, 0x3980
790	stw	r7, 12(r6)
791	lwz	r7, 12(r6)
792        mtspr   SPRN_M_TWB, r4               /* Update MMU base address */
793	li	r7, 0x3380
794	stw	r7, 12(r6)
795	lwz	r7, 12(r6)
796        mtspr   SPRN_M_CASID, r3             /* Update context */
797#else
798        mtspr   SPRN_M_CASID,r3		/* Update context */
799	tophys	(r4, r4)
800	mtspr	SPRN_M_TWB, r4		/* and pgd */
801#endif
802	SYNC
803	blr
804
805#ifdef CONFIG_8xx_CPU6
806/* It's here because it is unique to the 8xx.
807 * It is important we get called with interrupts disabled.  I used to
808 * do that, but it appears that all code that calls this already had
809 * interrupt disabled.
810 */
811	.globl	set_dec_cpu6
812set_dec_cpu6:
813	lis	r7, cpu6_errata_word@h
814	ori	r7, r7, cpu6_errata_word@l
815	li	r4, 0x2c00
816	stw	r4, 8(r7)
817	lwz	r4, 8(r7)
818        mtspr   22, r3		/* Update Decrementer */
819	SYNC
820	blr
821#endif
822
823/*
824 * We put a few things here that have to be page-aligned.
825 * This stuff goes at the beginning of the data segment,
826 * which is page-aligned.
827 */
828	.data
829	.globl	sdata
830sdata:
831	.globl	empty_zero_page
832empty_zero_page:
833	.space	4096
834
835	.globl	swapper_pg_dir
836swapper_pg_dir:
837	.space	4096
838
839/*
840 * This space gets a copy of optional info passed to us by the bootstrap
841 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
842 */
843	.globl	cmd_line
844cmd_line:
845	.space	512
846
847/* Room for two PTE table poiners, usually the kernel and current user
848 * pointer to their respective root page table (pgdir).
849 */
850abatron_pteptrs:
851	.space	8
852
853#ifdef CONFIG_8xx_CPU6
854	.globl	cpu6_errata_word
855cpu6_errata_word:
856	.space	16
857#endif
858
859