xref: /linux/arch/powerpc/kernel/head_44x.S (revision 2c1ba398ac9da3305815f6ae8e95ae2b9fd3b5ff)
1/*
2 * Kernel execution entry point code.
3 *
4 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 *      Initial PowerPC version.
6 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
7 *      Rewritten for PReP
8 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 *      Low-level exception handers, MMU support, and rewrite.
10 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 *      PowerPC 8xx modifications.
12 *    Copyright (c) 1998-1999 TiVo, Inc.
13 *      PowerPC 403GCX modifications.
14 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 *      PowerPC 403GCX/405GP modifications.
16 *    Copyright 2000 MontaVista Software Inc.
17 *	PPC405 modifications
18 *      PowerPC 403GCX/405GP modifications.
19 * 	Author: MontaVista Software, Inc.
20 *         	frank_rowand@mvista.com or source@mvista.com
21 * 	   	debbie_chu@mvista.com
22 *    Copyright 2002-2005 MontaVista Software, Inc.
23 *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 *
25 * This program is free software; you can redistribute  it and/or modify it
26 * under  the terms of  the GNU General  Public License as published by the
27 * Free Software Foundation;  either version 2 of the  License, or (at your
28 * option) any later version.
29 */
30
31#include <linux/init.h>
32#include <asm/processor.h>
33#include <asm/page.h>
34#include <asm/mmu.h>
35#include <asm/pgtable.h>
36#include <asm/cputable.h>
37#include <asm/thread_info.h>
38#include <asm/ppc_asm.h>
39#include <asm/asm-offsets.h>
40#include <asm/ptrace.h>
41#include <asm/synch.h>
42#include "head_booke.h"
43
44
45/* As with the other PowerPC ports, it is expected that when code
46 * execution begins here, the following registers contain valid, yet
47 * optional, information:
48 *
49 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
50 *   r4 - Starting address of the init RAM disk
51 *   r5 - Ending address of the init RAM disk
52 *   r6 - Start of kernel command line string (e.g. "mem=128")
53 *   r7 - End of kernel command line string
54 *
55 */
56	__HEAD
57_ENTRY(_stext);
58_ENTRY(_start);
59	/*
60	 * Reserve a word at a fixed location to store the address
61	 * of abatron_pteptrs
62	 */
63	nop
64/*
65 * Save parameters we are passed
66 */
67	mr	r31,r3
68	mr	r30,r4
69	mr	r29,r5
70	mr	r28,r6
71	mr	r27,r7
72	li	r24,0		/* CPU number */
73
74	bl	init_cpu_state
75
76	/*
77	 * This is where the main kernel code starts.
78	 */
79
80	/* ptr to current */
81	lis	r2,init_task@h
82	ori	r2,r2,init_task@l
83
84	/* ptr to current thread */
85	addi	r4,r2,THREAD	/* init task's THREAD */
86	mtspr	SPRN_SPRG_THREAD,r4
87
88	/* stack */
89	lis	r1,init_thread_union@h
90	ori	r1,r1,init_thread_union@l
91	li	r0,0
92	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
93
94	bl	early_init
95
96#ifdef CONFIG_RELOCATABLE
97	/*
98	 * r25 will contain RPN/ERPN for the start address of memory
99	 *
100	 * Add the difference between KERNELBASE and PAGE_OFFSET to the
101	 * start of physical memory to get kernstart_addr.
102	 */
103	lis	r3,kernstart_addr@ha
104	la	r3,kernstart_addr@l(r3)
105
106	lis	r4,KERNELBASE@h
107	ori	r4,r4,KERNELBASE@l
108	lis	r5,PAGE_OFFSET@h
109	ori	r5,r5,PAGE_OFFSET@l
110	subf	r4,r5,r4
111
112	rlwinm	r6,r25,0,28,31	/* ERPN */
113	rlwinm	r7,r25,0,0,3	/* RPN - assuming 256 MB page size */
114	add	r7,r7,r4
115
116	stw	r6,0(r3)
117	stw	r7,4(r3)
118#endif
119
120/*
121 * Decide what sort of machine this is and initialize the MMU.
122 */
123	mr	r3,r31
124	mr	r4,r30
125	mr	r5,r29
126	mr	r6,r28
127	mr	r7,r27
128	bl	machine_init
129	bl	MMU_init
130
131	/* Setup PTE pointers for the Abatron bdiGDB */
132	lis	r6, swapper_pg_dir@h
133	ori	r6, r6, swapper_pg_dir@l
134	lis	r5, abatron_pteptrs@h
135	ori	r5, r5, abatron_pteptrs@l
136	lis	r4, KERNELBASE@h
137	ori	r4, r4, KERNELBASE@l
138	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
139	stw	r6, 0(r5)
140
141	/* Clear the Machine Check Syndrome Register */
142	li	r0,0
143	mtspr	SPRN_MCSR,r0
144
145	/* Let's move on */
146	lis	r4,start_kernel@h
147	ori	r4,r4,start_kernel@l
148	lis	r3,MSR_KERNEL@h
149	ori	r3,r3,MSR_KERNEL@l
150	mtspr	SPRN_SRR0,r4
151	mtspr	SPRN_SRR1,r3
152	rfi			/* change context and jump to start_kernel */
153
154/*
155 * Interrupt vector entry code
156 *
157 * The Book E MMUs are always on so we don't need to handle
158 * interrupts in real mode as with previous PPC processors. In
159 * this case we handle interrupts in the kernel virtual address
160 * space.
161 *
162 * Interrupt vectors are dynamically placed relative to the
163 * interrupt prefix as determined by the address of interrupt_base.
164 * The interrupt vectors offsets are programmed using the labels
165 * for each interrupt vector entry.
166 *
167 * Interrupt vectors must be aligned on a 16 byte boundary.
168 * We align on a 32 byte cache line boundary for good measure.
169 */
170
171interrupt_base:
172	/* Critical Input Interrupt */
173	CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
174
175	/* Machine Check Interrupt */
176	CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
177	MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
178
179	/* Data Storage Interrupt */
180	DATA_STORAGE_EXCEPTION
181
182		/* Instruction Storage Interrupt */
183	INSTRUCTION_STORAGE_EXCEPTION
184
185	/* External Input Interrupt */
186	EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
187
188	/* Alignment Interrupt */
189	ALIGNMENT_EXCEPTION
190
191	/* Program Interrupt */
192	PROGRAM_EXCEPTION
193
194	/* Floating Point Unavailable Interrupt */
195#ifdef CONFIG_PPC_FPU
196	FP_UNAVAILABLE_EXCEPTION
197#else
198	EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
199#endif
200	/* System Call Interrupt */
201	START_EXCEPTION(SystemCall)
202	NORMAL_EXCEPTION_PROLOG
203	EXC_XFER_EE_LITE(0x0c00, DoSyscall)
204
205	/* Auxiliary Processor Unavailable Interrupt */
206	EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
207
208	/* Decrementer Interrupt */
209	DECREMENTER_EXCEPTION
210
211	/* Fixed Internal Timer Interrupt */
212	/* TODO: Add FIT support */
213	EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
214
215	/* Watchdog Timer Interrupt */
216	/* TODO: Add watchdog support */
217#ifdef CONFIG_BOOKE_WDT
218	CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
219#else
220	CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
221#endif
222
223	/* Data TLB Error Interrupt */
224	START_EXCEPTION(DataTLBError44x)
225	mtspr	SPRN_SPRG_WSCRATCH0, r10		/* Save some working registers */
226	mtspr	SPRN_SPRG_WSCRATCH1, r11
227	mtspr	SPRN_SPRG_WSCRATCH2, r12
228	mtspr	SPRN_SPRG_WSCRATCH3, r13
229	mfcr	r11
230	mtspr	SPRN_SPRG_WSCRATCH4, r11
231	mfspr	r10, SPRN_DEAR		/* Get faulting address */
232
233	/* If we are faulting a kernel address, we have to use the
234	 * kernel page tables.
235	 */
236	lis	r11, PAGE_OFFSET@h
237	cmplw	r10, r11
238	blt+	3f
239	lis	r11, swapper_pg_dir@h
240	ori	r11, r11, swapper_pg_dir@l
241
242	mfspr	r12,SPRN_MMUCR
243	rlwinm	r12,r12,0,0,23		/* Clear TID */
244
245	b	4f
246
247	/* Get the PGD for the current thread */
2483:
249	mfspr	r11,SPRN_SPRG_THREAD
250	lwz	r11,PGDIR(r11)
251
252	/* Load PID into MMUCR TID */
253	mfspr	r12,SPRN_MMUCR
254	mfspr   r13,SPRN_PID		/* Get PID */
255	rlwimi	r12,r13,0,24,31		/* Set TID */
256
2574:
258	mtspr	SPRN_MMUCR,r12
259
260	/* Mask of required permission bits. Note that while we
261	 * do copy ESR:ST to _PAGE_RW position as trying to write
262	 * to an RO page is pretty common, we don't do it with
263	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
264	 * event so I'd rather take the overhead when it happens
265	 * rather than adding an instruction here. We should measure
266	 * whether the whole thing is worth it in the first place
267	 * as we could avoid loading SPRN_ESR completely in the first
268	 * place...
269	 *
270	 * TODO: Is it worth doing that mfspr & rlwimi in the first
271	 *       place or can we save a couple of instructions here ?
272	 */
273	mfspr	r12,SPRN_ESR
274	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
275	rlwimi	r13,r12,10,30,30
276
277	/* Load the PTE */
278	/* Compute pgdir/pmd offset */
279	rlwinm  r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
280	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
281	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
282	beq	2f			/* Bail if no table */
283
284	/* Compute pte address */
285	rlwimi  r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
286	lwz	r11, 0(r12)		/* Get high word of pte entry */
287	lwz	r12, 4(r12)		/* Get low word of pte entry */
288
289	lis	r10,tlb_44x_index@ha
290
291	andc.	r13,r13,r12		/* Check permission */
292
293	/* Load the next available TLB index */
294	lwz	r13,tlb_44x_index@l(r10)
295
296	bne	2f			/* Bail if permission mismach */
297
298	/* Increment, rollover, and store TLB index */
299	addi	r13,r13,1
300
301	/* Compare with watermark (instruction gets patched) */
302	.globl tlb_44x_patch_hwater_D
303tlb_44x_patch_hwater_D:
304	cmpwi	0,r13,1			/* reserve entries */
305	ble	5f
306	li	r13,0
3075:
308	/* Store the next available TLB index */
309	stw	r13,tlb_44x_index@l(r10)
310
311	/* Re-load the faulting address */
312	mfspr	r10,SPRN_DEAR
313
314	 /* Jump to common tlb load */
315	b	finish_tlb_load_44x
316
3172:
318	/* The bailout.  Restore registers to pre-exception conditions
319	 * and call the heavyweights to help us out.
320	 */
321	mfspr	r11, SPRN_SPRG_RSCRATCH4
322	mtcr	r11
323	mfspr	r13, SPRN_SPRG_RSCRATCH3
324	mfspr	r12, SPRN_SPRG_RSCRATCH2
325	mfspr	r11, SPRN_SPRG_RSCRATCH1
326	mfspr	r10, SPRN_SPRG_RSCRATCH0
327	b	DataStorage
328
329	/* Instruction TLB Error Interrupt */
330	/*
331	 * Nearly the same as above, except we get our
332	 * information from different registers and bailout
333	 * to a different point.
334	 */
335	START_EXCEPTION(InstructionTLBError44x)
336	mtspr	SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
337	mtspr	SPRN_SPRG_WSCRATCH1, r11
338	mtspr	SPRN_SPRG_WSCRATCH2, r12
339	mtspr	SPRN_SPRG_WSCRATCH3, r13
340	mfcr	r11
341	mtspr	SPRN_SPRG_WSCRATCH4, r11
342	mfspr	r10, SPRN_SRR0		/* Get faulting address */
343
344	/* If we are faulting a kernel address, we have to use the
345	 * kernel page tables.
346	 */
347	lis	r11, PAGE_OFFSET@h
348	cmplw	r10, r11
349	blt+	3f
350	lis	r11, swapper_pg_dir@h
351	ori	r11, r11, swapper_pg_dir@l
352
353	mfspr	r12,SPRN_MMUCR
354	rlwinm	r12,r12,0,0,23		/* Clear TID */
355
356	b	4f
357
358	/* Get the PGD for the current thread */
3593:
360	mfspr	r11,SPRN_SPRG_THREAD
361	lwz	r11,PGDIR(r11)
362
363	/* Load PID into MMUCR TID */
364	mfspr	r12,SPRN_MMUCR
365	mfspr   r13,SPRN_PID		/* Get PID */
366	rlwimi	r12,r13,0,24,31		/* Set TID */
367
3684:
369	mtspr	SPRN_MMUCR,r12
370
371	/* Make up the required permissions */
372	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
373
374	/* Compute pgdir/pmd offset */
375	rlwinm 	r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
376	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
377	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
378	beq	2f			/* Bail if no table */
379
380	/* Compute pte address */
381	rlwimi	r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
382	lwz	r11, 0(r12)		/* Get high word of pte entry */
383	lwz	r12, 4(r12)		/* Get low word of pte entry */
384
385	lis	r10,tlb_44x_index@ha
386
387	andc.	r13,r13,r12		/* Check permission */
388
389	/* Load the next available TLB index */
390	lwz	r13,tlb_44x_index@l(r10)
391
392	bne	2f			/* Bail if permission mismach */
393
394	/* Increment, rollover, and store TLB index */
395	addi	r13,r13,1
396
397	/* Compare with watermark (instruction gets patched) */
398	.globl tlb_44x_patch_hwater_I
399tlb_44x_patch_hwater_I:
400	cmpwi	0,r13,1			/* reserve entries */
401	ble	5f
402	li	r13,0
4035:
404	/* Store the next available TLB index */
405	stw	r13,tlb_44x_index@l(r10)
406
407	/* Re-load the faulting address */
408	mfspr	r10,SPRN_SRR0
409
410	/* Jump to common TLB load point */
411	b	finish_tlb_load_44x
412
4132:
414	/* The bailout.  Restore registers to pre-exception conditions
415	 * and call the heavyweights to help us out.
416	 */
417	mfspr	r11, SPRN_SPRG_RSCRATCH4
418	mtcr	r11
419	mfspr	r13, SPRN_SPRG_RSCRATCH3
420	mfspr	r12, SPRN_SPRG_RSCRATCH2
421	mfspr	r11, SPRN_SPRG_RSCRATCH1
422	mfspr	r10, SPRN_SPRG_RSCRATCH0
423	b	InstructionStorage
424
425/*
426 * Both the instruction and data TLB miss get to this
427 * point to load the TLB.
428 * 	r10 - EA of fault
429 * 	r11 - PTE high word value
430 *	r12 - PTE low word value
431 *	r13 - TLB index
432 *	MMUCR - loaded with proper value when we get here
433 *	Upon exit, we reload everything and RFI.
434 */
435finish_tlb_load_44x:
436	/* Combine RPN & ERPN an write WS 0 */
437	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
438	tlbwe	r11,r13,PPC44x_TLB_XLAT
439
440	/*
441	 * Create WS1. This is the faulting address (EPN),
442	 * page size, and valid flag.
443	 */
444	li	r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
445	/* Insert valid and page size */
446	rlwimi	r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
447	tlbwe	r10,r13,PPC44x_TLB_PAGEID	/* Write PAGEID */
448
449	/* And WS 2 */
450	li	r10,0xf85			/* Mask to apply from PTE */
451	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
452	and	r11,r12,r10			/* Mask PTE bits to keep */
453	andi.	r10,r12,_PAGE_USER		/* User page ? */
454	beq	1f				/* nope, leave U bits empty */
455	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
4561:	tlbwe	r11,r13,PPC44x_TLB_ATTRIB	/* Write ATTRIB */
457
458	/* Done...restore registers and get out of here.
459	*/
460	mfspr	r11, SPRN_SPRG_RSCRATCH4
461	mtcr	r11
462	mfspr	r13, SPRN_SPRG_RSCRATCH3
463	mfspr	r12, SPRN_SPRG_RSCRATCH2
464	mfspr	r11, SPRN_SPRG_RSCRATCH1
465	mfspr	r10, SPRN_SPRG_RSCRATCH0
466	rfi					/* Force context change */
467
468/* TLB error interrupts for 476
469 */
470#ifdef CONFIG_PPC_47x
471	START_EXCEPTION(DataTLBError47x)
472	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
473	mtspr	SPRN_SPRG_WSCRATCH1,r11
474	mtspr	SPRN_SPRG_WSCRATCH2,r12
475	mtspr	SPRN_SPRG_WSCRATCH3,r13
476	mfcr	r11
477	mtspr	SPRN_SPRG_WSCRATCH4,r11
478	mfspr	r10,SPRN_DEAR		/* Get faulting address */
479
480	/* If we are faulting a kernel address, we have to use the
481	 * kernel page tables.
482	 */
483	lis	r11,PAGE_OFFSET@h
484	cmplw	cr0,r10,r11
485	blt+	3f
486	lis	r11,swapper_pg_dir@h
487	ori	r11,r11, swapper_pg_dir@l
488	li	r12,0			/* MMUCR = 0 */
489	b	4f
490
491	/* Get the PGD for the current thread and setup MMUCR */
4923:	mfspr	r11,SPRN_SPRG3
493	lwz	r11,PGDIR(r11)
494	mfspr   r12,SPRN_PID		/* Get PID */
4954:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
496
497	/* Mask of required permission bits. Note that while we
498	 * do copy ESR:ST to _PAGE_RW position as trying to write
499	 * to an RO page is pretty common, we don't do it with
500	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
501	 * event so I'd rather take the overhead when it happens
502	 * rather than adding an instruction here. We should measure
503	 * whether the whole thing is worth it in the first place
504	 * as we could avoid loading SPRN_ESR completely in the first
505	 * place...
506	 *
507	 * TODO: Is it worth doing that mfspr & rlwimi in the first
508	 *       place or can we save a couple of instructions here ?
509	 */
510	mfspr	r12,SPRN_ESR
511	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
512	rlwimi	r13,r12,10,30,30
513
514	/* Load the PTE */
515	/* Compute pgdir/pmd offset */
516	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
517	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
518
519	/* Word 0 is EPN,V,TS,DSIZ */
520	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
521	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
522	li	r12,0
523	tlbwe	r10,r12,0
524
525	/* XXX can we do better ? Need to make sure tlbwe has established
526	 * latch V bit in MMUCR0 before the PTE is loaded further down */
527#ifdef CONFIG_SMP
528	isync
529#endif
530
531	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
532	/* Compute pte address */
533	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
534	beq	2f			/* Bail if no table */
535	lwz	r11,0(r12)		/* Get high word of pte entry */
536
537	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
538	 * bottom of r12 to create a data dependency... We can also use r10
539	 * as destination nowadays
540	 */
541#ifdef CONFIG_SMP
542	lwsync
543#endif
544	lwz	r12,4(r12)		/* Get low word of pte entry */
545
546	andc.	r13,r13,r12		/* Check permission */
547
548	 /* Jump to common tlb load */
549	beq	finish_tlb_load_47x
550
5512:	/* The bailout.  Restore registers to pre-exception conditions
552	 * and call the heavyweights to help us out.
553	 */
554	mfspr	r11,SPRN_SPRG_RSCRATCH4
555	mtcr	r11
556	mfspr	r13,SPRN_SPRG_RSCRATCH3
557	mfspr	r12,SPRN_SPRG_RSCRATCH2
558	mfspr	r11,SPRN_SPRG_RSCRATCH1
559	mfspr	r10,SPRN_SPRG_RSCRATCH0
560	b	DataStorage
561
562	/* Instruction TLB Error Interrupt */
563	/*
564	 * Nearly the same as above, except we get our
565	 * information from different registers and bailout
566	 * to a different point.
567	 */
568	START_EXCEPTION(InstructionTLBError47x)
569	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
570	mtspr	SPRN_SPRG_WSCRATCH1,r11
571	mtspr	SPRN_SPRG_WSCRATCH2,r12
572	mtspr	SPRN_SPRG_WSCRATCH3,r13
573	mfcr	r11
574	mtspr	SPRN_SPRG_WSCRATCH4,r11
575	mfspr	r10,SPRN_SRR0		/* Get faulting address */
576
577	/* If we are faulting a kernel address, we have to use the
578	 * kernel page tables.
579	 */
580	lis	r11,PAGE_OFFSET@h
581	cmplw	cr0,r10,r11
582	blt+	3f
583	lis	r11,swapper_pg_dir@h
584	ori	r11,r11, swapper_pg_dir@l
585	li	r12,0			/* MMUCR = 0 */
586	b	4f
587
588	/* Get the PGD for the current thread and setup MMUCR */
5893:	mfspr	r11,SPRN_SPRG_THREAD
590	lwz	r11,PGDIR(r11)
591	mfspr   r12,SPRN_PID		/* Get PID */
5924:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
593
594	/* Make up the required permissions */
595	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
596
597	/* Load PTE */
598	/* Compute pgdir/pmd offset */
599	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
600	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
601
602	/* Word 0 is EPN,V,TS,DSIZ */
603	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
604	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
605	li	r12,0
606	tlbwe	r10,r12,0
607
608	/* XXX can we do better ? Need to make sure tlbwe has established
609	 * latch V bit in MMUCR0 before the PTE is loaded further down */
610#ifdef CONFIG_SMP
611	isync
612#endif
613
614	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
615	/* Compute pte address */
616	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
617	beq	2f			/* Bail if no table */
618
619	lwz	r11,0(r12)		/* Get high word of pte entry */
620	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
621	 * bottom of r12 to create a data dependency... We can also use r10
622	 * as destination nowadays
623	 */
624#ifdef CONFIG_SMP
625	lwsync
626#endif
627	lwz	r12,4(r12)		/* Get low word of pte entry */
628
629	andc.	r13,r13,r12		/* Check permission */
630
631	/* Jump to common TLB load point */
632	beq	finish_tlb_load_47x
633
6342:	/* The bailout.  Restore registers to pre-exception conditions
635	 * and call the heavyweights to help us out.
636	 */
637	mfspr	r11, SPRN_SPRG_RSCRATCH4
638	mtcr	r11
639	mfspr	r13, SPRN_SPRG_RSCRATCH3
640	mfspr	r12, SPRN_SPRG_RSCRATCH2
641	mfspr	r11, SPRN_SPRG_RSCRATCH1
642	mfspr	r10, SPRN_SPRG_RSCRATCH0
643	b	InstructionStorage
644
645/*
646 * Both the instruction and data TLB miss get to this
647 * point to load the TLB.
648 * 	r10 - free to use
649 * 	r11 - PTE high word value
650 *	r12 - PTE low word value
651 *      r13 - free to use
652 *	MMUCR - loaded with proper value when we get here
653 *	Upon exit, we reload everything and RFI.
654 */
655finish_tlb_load_47x:
656	/* Combine RPN & ERPN an write WS 1 */
657	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
658	tlbwe	r11,r13,1
659
660	/* And make up word 2 */
661	li	r10,0xf85			/* Mask to apply from PTE */
662	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
663	and	r11,r12,r10			/* Mask PTE bits to keep */
664	andi.	r10,r12,_PAGE_USER		/* User page ? */
665	beq	1f				/* nope, leave U bits empty */
666	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
6671:	tlbwe	r11,r13,2
668
669	/* Done...restore registers and get out of here.
670	*/
671	mfspr	r11, SPRN_SPRG_RSCRATCH4
672	mtcr	r11
673	mfspr	r13, SPRN_SPRG_RSCRATCH3
674	mfspr	r12, SPRN_SPRG_RSCRATCH2
675	mfspr	r11, SPRN_SPRG_RSCRATCH1
676	mfspr	r10, SPRN_SPRG_RSCRATCH0
677	rfi
678
679#endif /* CONFIG_PPC_47x */
680
681	/* Debug Interrupt */
682	/*
683	 * This statement needs to exist at the end of the IVPR
684	 * definition just in case you end up taking a debug
685	 * exception within another exception.
686	 */
687	DEBUG_CRIT_EXCEPTION
688
689/*
690 * Global functions
691 */
692
693/*
694 * Adjust the machine check IVOR on 440A cores
695 */
696_GLOBAL(__fixup_440A_mcheck)
697	li	r3,MachineCheckA@l
698	mtspr	SPRN_IVOR1,r3
699	sync
700	blr
701
702/*
703 * extern void giveup_altivec(struct task_struct *prev)
704 *
705 * The 44x core does not have an AltiVec unit.
706 */
707_GLOBAL(giveup_altivec)
708	blr
709
710/*
711 * extern void giveup_fpu(struct task_struct *prev)
712 *
713 * The 44x core does not have an FPU.
714 */
715#ifndef CONFIG_PPC_FPU
716_GLOBAL(giveup_fpu)
717	blr
718#endif
719
720_GLOBAL(set_context)
721
722#ifdef CONFIG_BDI_SWITCH
723	/* Context switch the PTE pointer for the Abatron BDI2000.
724	 * The PGDIR is the second parameter.
725	 */
726	lis	r5, abatron_pteptrs@h
727	ori	r5, r5, abatron_pteptrs@l
728	stw	r4, 0x4(r5)
729#endif
730	mtspr	SPRN_PID,r3
731	isync			/* Force context change */
732	blr
733
734/*
735 * Init CPU state. This is called at boot time or for secondary CPUs
736 * to setup initial TLB entries, setup IVORs, etc...
737 *
738 */
739_GLOBAL(init_cpu_state)
740	mflr	r22
741#ifdef CONFIG_PPC_47x
742	/* We use the PVR to differenciate 44x cores from 476 */
743	mfspr	r3,SPRN_PVR
744	srwi	r3,r3,16
745	cmplwi	cr0,r3,PVR_476@h
746	beq	head_start_47x
747	cmplwi	cr0,r3,PVR_476_ISS@h
748	beq	head_start_47x
749#endif /* CONFIG_PPC_47x */
750
751/*
752 * In case the firmware didn't do it, we apply some workarounds
753 * that are good for all 440 core variants here
754 */
755	mfspr	r3,SPRN_CCR0
756	rlwinm	r3,r3,0,0,27	/* disable icache prefetch */
757	isync
758	mtspr	SPRN_CCR0,r3
759	isync
760	sync
761
762/*
763 * Set up the initial MMU state for 44x
764 *
765 * We are still executing code at the virtual address
766 * mappings set by the firmware for the base of RAM.
767 *
768 * We first invalidate all TLB entries but the one
769 * we are running from.  We then load the KERNELBASE
770 * mappings so we can begin to use kernel addresses
771 * natively and so the interrupt vector locations are
772 * permanently pinned (necessary since Book E
773 * implementations always have translation enabled).
774 *
775 * TODO: Use the known TLB entry we are running from to
776 *	 determine which physical region we are located
777 *	 in.  This can be used to determine where in RAM
778 *	 (on a shared CPU system) or PCI memory space
779 *	 (on a DRAMless system) we are located.
780 *       For now, we assume a perfect world which means
781 *	 we are located at the base of DRAM (physical 0).
782 */
783
784/*
785 * Search TLB for entry that we are currently using.
786 * Invalidate all entries but the one we are using.
787 */
788	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
789	mfspr	r3,SPRN_PID			/* Get PID */
790	mfmsr	r4				/* Get MSR */
791	andi.	r4,r4,MSR_IS@l			/* TS=1? */
792	beq	wmmucr				/* If not, leave STS=0 */
793	oris	r3,r3,PPC44x_MMUCR_STS@h	/* Set STS=1 */
794wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
795	sync
796
797	bl	invstr				/* Find our address */
798invstr:	mflr	r5				/* Make it accessible */
799	tlbsx	r23,0,r5			/* Find entry we are in */
800	li	r4,0				/* Start at TLB entry 0 */
801	li	r3,0				/* Set PAGEID inval value */
8021:	cmpw	r23,r4				/* Is this our entry? */
803	beq	skpinv				/* If so, skip the inval */
804	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
805skpinv:	addi	r4,r4,1				/* Increment */
806	cmpwi	r4,64				/* Are we done? */
807	bne	1b				/* If not, repeat */
808	isync					/* If so, context change */
809
810/*
811 * Configure and load pinned entry into TLB slot 63.
812 */
813
814	lis	r3,PAGE_OFFSET@h
815	ori	r3,r3,PAGE_OFFSET@l
816
817	/* Kernel is at the base of RAM */
818	li r4, 0			/* Load the kernel physical address */
819
820	/* Load the kernel PID = 0 */
821	li	r0,0
822	mtspr	SPRN_PID,r0
823	sync
824
825	/* Initialize MMUCR */
826	li	r5,0
827	mtspr	SPRN_MMUCR,r5
828	sync
829
830	/* pageid fields */
831	clrrwi	r3,r3,10		/* Mask off the effective page number */
832	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
833
834	/* xlat fields */
835	clrrwi	r4,r4,10		/* Mask off the real page number */
836					/* ERPN is 0 for first 4GB page */
837
838	/* attrib fields */
839	/* Added guarded bit to protect against speculative loads/stores */
840	li	r5,0
841	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
842
843        li      r0,63                    /* TLB slot 63 */
844
845	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
846	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
847	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
848
849	/* Force context change */
850	mfmsr	r0
851	mtspr	SPRN_SRR1, r0
852	lis	r0,3f@h
853	ori	r0,r0,3f@l
854	mtspr	SPRN_SRR0,r0
855	sync
856	rfi
857
858	/* If necessary, invalidate original entry we used */
8593:	cmpwi	r23,63
860	beq	4f
861	li	r6,0
862	tlbwe   r6,r23,PPC44x_TLB_PAGEID
863	isync
864
8654:
866#ifdef CONFIG_PPC_EARLY_DEBUG_44x
867	/* Add UART mapping for early debug. */
868
869	/* pageid fields */
870	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
871	ori	r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
872
873	/* xlat fields */
874	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
875	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
876
877	/* attrib fields */
878	li	r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
879        li      r0,62                    /* TLB slot 0 */
880
881	tlbwe	r3,r0,PPC44x_TLB_PAGEID
882	tlbwe	r4,r0,PPC44x_TLB_XLAT
883	tlbwe	r5,r0,PPC44x_TLB_ATTRIB
884
885	/* Force context change */
886	isync
887#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
888
889	/* Establish the interrupt vector offsets */
890	SET_IVOR(0,  CriticalInput);
891	SET_IVOR(1,  MachineCheck);
892	SET_IVOR(2,  DataStorage);
893	SET_IVOR(3,  InstructionStorage);
894	SET_IVOR(4,  ExternalInput);
895	SET_IVOR(5,  Alignment);
896	SET_IVOR(6,  Program);
897	SET_IVOR(7,  FloatingPointUnavailable);
898	SET_IVOR(8,  SystemCall);
899	SET_IVOR(9,  AuxillaryProcessorUnavailable);
900	SET_IVOR(10, Decrementer);
901	SET_IVOR(11, FixedIntervalTimer);
902	SET_IVOR(12, WatchdogTimer);
903	SET_IVOR(13, DataTLBError44x);
904	SET_IVOR(14, InstructionTLBError44x);
905	SET_IVOR(15, DebugCrit);
906
907	b	head_start_common
908
909
910#ifdef CONFIG_PPC_47x
911
912#ifdef CONFIG_SMP
913
914/* Entry point for secondary 47x processors */
915_GLOBAL(start_secondary_47x)
916        mr      r24,r3          /* CPU number */
917
918	bl	init_cpu_state
919
920	/* Now we need to bolt the rest of kernel memory which
921	 * is done in C code. We must be careful because our task
922	 * struct or our stack can (and will probably) be out
923	 * of reach of the initial 256M TLB entry, so we use a
924	 * small temporary stack in .bss for that. This works
925	 * because only one CPU at a time can be in this code
926	 */
927	lis	r1,temp_boot_stack@h
928	ori	r1,r1,temp_boot_stack@l
929	addi	r1,r1,1024-STACK_FRAME_OVERHEAD
930	li	r0,0
931	stw	r0,0(r1)
932	bl	mmu_init_secondary
933
934	/* Now we can get our task struct and real stack pointer */
935
936	/* Get current_thread_info and current */
937	lis	r1,secondary_ti@ha
938	lwz	r1,secondary_ti@l(r1)
939	lwz	r2,TI_TASK(r1)
940
941	/* Current stack pointer */
942	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
943	li	r0,0
944	stw	r0,0(r1)
945
946	/* Kernel stack for exception entry in SPRG3 */
947	addi	r4,r2,THREAD	/* init task's THREAD */
948	mtspr	SPRN_SPRG3,r4
949
950	b	start_secondary
951
952#endif /* CONFIG_SMP */
953
954/*
955 * Set up the initial MMU state for 44x
956 *
957 * We are still executing code at the virtual address
958 * mappings set by the firmware for the base of RAM.
959 */
960
961head_start_47x:
962	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
963	mfspr	r3,SPRN_PID			/* Get PID */
964	mfmsr	r4				/* Get MSR */
965	andi.	r4,r4,MSR_IS@l			/* TS=1? */
966	beq	1f				/* If not, leave STS=0 */
967	oris	r3,r3,PPC47x_MMUCR_STS@h	/* Set STS=1 */
9681:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
969	sync
970
971	/* Find the entry we are running from */
972	bl	1f
9731:	mflr	r23
974	tlbsx	r23,0,r23
975	tlbre	r24,r23,0
976	tlbre	r25,r23,1
977	tlbre	r26,r23,2
978
979/*
980 * Cleanup time
981 */
982
983	/* Initialize MMUCR */
984	li	r5,0
985	mtspr	SPRN_MMUCR,r5
986	sync
987
988clear_all_utlb_entries:
989
990	#; Set initial values.
991
992	addis		r3,0,0x8000
993	addi		r4,0,0
994	addi		r5,0,0
995	b		clear_utlb_entry
996
997	#; Align the loop to speed things up.
998
999	.align		6
1000
1001clear_utlb_entry:
1002
1003	tlbwe		r4,r3,0
1004	tlbwe		r5,r3,1
1005	tlbwe		r5,r3,2
1006	addis		r3,r3,0x2000
1007	cmpwi		r3,0
1008	bne		clear_utlb_entry
1009	addis		r3,0,0x8000
1010	addis		r4,r4,0x100
1011	cmpwi		r4,0
1012	bne		clear_utlb_entry
1013
1014	#; Restore original entry.
1015
1016	oris	r23,r23,0x8000  /* specify the way */
1017	tlbwe		r24,r23,0
1018	tlbwe		r25,r23,1
1019	tlbwe		r26,r23,2
1020
1021/*
1022 * Configure and load pinned entry into TLB for the kernel core
1023 */
1024
1025	lis	r3,PAGE_OFFSET@h
1026	ori	r3,r3,PAGE_OFFSET@l
1027
1028	/* Load the kernel PID = 0 */
1029	li	r0,0
1030	mtspr	SPRN_PID,r0
1031	sync
1032
1033	/* Word 0 */
1034	clrrwi	r3,r3,12		/* Mask off the effective page number */
1035	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1036
1037	/* Word 1 - use r25.  RPN is the same as the original entry */
1038
1039	/* Word 2 */
1040	li	r5,0
1041	ori	r5,r5,PPC47x_TLB2_S_RWX
1042#ifdef CONFIG_SMP
1043	ori	r5,r5,PPC47x_TLB2_M
1044#endif
1045
1046	/* We write to way 0 and bolted 0 */
1047	lis	r0,0x8800
1048	tlbwe	r3,r0,0
1049	tlbwe	r25,r0,1
1050	tlbwe	r5,r0,2
1051
1052/*
1053 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1054 * them up later
1055 */
1056	LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1057	mtspr	SPRN_SSPCR,r3
1058	mtspr	SPRN_USPCR,r3
1059	LOAD_REG_IMMEDIATE(r3, 0x12345670)
1060	mtspr	SPRN_ISPCR,r3
1061
1062	/* Force context change */
1063	mfmsr	r0
1064	mtspr	SPRN_SRR1, r0
1065	lis	r0,3f@h
1066	ori	r0,r0,3f@l
1067	mtspr	SPRN_SRR0,r0
1068	sync
1069	rfi
1070
1071	/* Invalidate original entry we used */
10723:
1073	rlwinm	r24,r24,0,21,19 /* clear the "valid" bit */
1074	tlbwe	r24,r23,0
1075	addi	r24,0,0
1076	tlbwe	r24,r23,1
1077	tlbwe	r24,r23,2
1078	isync                   /* Clear out the shadow TLB entries */
1079
1080#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1081	/* Add UART mapping for early debug. */
1082
1083	/* Word 0 */
1084	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1085	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1086
1087	/* Word 1 */
1088	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1089	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1090
1091	/* Word 2 */
1092	li	r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1093
1094	/* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1095	 * congruence class as the kernel, we need to make sure of it at
1096	 * some point
1097	 */
1098        lis	r0,0x8d00
1099	tlbwe	r3,r0,0
1100	tlbwe	r4,r0,1
1101	tlbwe	r5,r0,2
1102
1103	/* Force context change */
1104	isync
1105#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1106
1107	/* Establish the interrupt vector offsets */
1108	SET_IVOR(0,  CriticalInput);
1109	SET_IVOR(1,  MachineCheckA);
1110	SET_IVOR(2,  DataStorage);
1111	SET_IVOR(3,  InstructionStorage);
1112	SET_IVOR(4,  ExternalInput);
1113	SET_IVOR(5,  Alignment);
1114	SET_IVOR(6,  Program);
1115	SET_IVOR(7,  FloatingPointUnavailable);
1116	SET_IVOR(8,  SystemCall);
1117	SET_IVOR(9,  AuxillaryProcessorUnavailable);
1118	SET_IVOR(10, Decrementer);
1119	SET_IVOR(11, FixedIntervalTimer);
1120	SET_IVOR(12, WatchdogTimer);
1121	SET_IVOR(13, DataTLBError47x);
1122	SET_IVOR(14, InstructionTLBError47x);
1123	SET_IVOR(15, DebugCrit);
1124
1125	/* We configure icbi to invalidate 128 bytes at a time since the
1126	 * current 32-bit kernel code isn't too happy with icache != dcache
1127	 * block size
1128	 */
1129	mfspr	r3,SPRN_CCR0
1130	oris	r3,r3,0x0020
1131	mtspr	SPRN_CCR0,r3
1132	isync
1133
1134#endif /* CONFIG_PPC_47x */
1135
1136/*
1137 * Here we are back to code that is common between 44x and 47x
1138 *
1139 * We proceed to further kernel initialization and return to the
1140 * main kernel entry
1141 */
1142head_start_common:
1143	/* Establish the interrupt vector base */
1144	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
1145	mtspr	SPRN_IVPR,r4
1146
1147	/*
1148	 * If the kernel was loaded at a non-zero 256 MB page, we need to
1149	 * mask off the most significant 4 bits to get the relative address
1150	 * from the start of physical memory
1151	 */
1152	rlwinm	r22,r22,0,4,31
1153	addis	r22,r22,PAGE_OFFSET@h
1154	mtlr	r22
1155	isync
1156	blr
1157
1158/*
1159 * We put a few things here that have to be page-aligned. This stuff
1160 * goes at the beginning of the data segment, which is page-aligned.
1161 */
1162	.data
1163	.align	PAGE_SHIFT
1164	.globl	sdata
1165sdata:
1166	.globl	empty_zero_page
1167empty_zero_page:
1168	.space	PAGE_SIZE
1169
1170/*
1171 * To support >32-bit physical addresses, we use an 8KB pgdir.
1172 */
1173	.globl	swapper_pg_dir
1174swapper_pg_dir:
1175	.space	PGD_TABLE_SIZE
1176
1177/*
1178 * Room for two PTE pointers, usually the kernel and current user pointers
1179 * to their respective root page table.
1180 */
1181abatron_pteptrs:
1182	.space	8
1183
1184#ifdef CONFIG_SMP
1185	.align	12
1186temp_boot_stack:
1187	.space	1024
1188#endif /* CONFIG_SMP */
1189