xref: /linux/arch/parisc/kernel/head.S (revision c715f13bb30f9f4d1bd8888667ef32e43b6fedc1)
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License.  See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <linux/pgtable.h>
24
25	.level	1.1
26
27	__INITDATA
28ENTRY(boot_args)
29	.word 0 /* arg0 */
30	.word 0 /* arg1 */
31	.word 0 /* arg2 */
32	.word 0 /* arg3 */
33END(boot_args)
34
35	__HEAD
36
37	.align	4
38	.import init_task,data
39	.import init_stack,data
40	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
41#ifndef CONFIG_64BIT
42        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
43	.import	$global$		/* forward declaration */
44#endif /*!CONFIG_64BIT*/
45ENTRY(parisc_kernel_start)
46	.proc
47	.callinfo
48
49	/* Make sure sr4-sr7 are set to zero for the kernel address space */
50	mtsp	%r0,%sr4
51	mtsp	%r0,%sr5
52	mtsp	%r0,%sr6
53	mtsp	%r0,%sr7
54
55	/* Clear BSS (shouldn't the boot loader do this?) */
56
57	.import __bss_start,data
58	.import __bss_stop,data
59	.import __end,data
60
61	load32		PA(__bss_start),%r3
62	load32		PA(__bss_stop),%r4
63$bss_loop:
64	cmpb,<<,n       %r3,%r4,$bss_loop
65	stw,ma          %r0,4(%r3)
66
67	/* Save away the arguments the boot loader passed in (32 bit args) */
68	load32		PA(boot_args),%r1
69	stw,ma          %arg0,4(%r1)
70	stw,ma          %arg1,4(%r1)
71	stw,ma          %arg2,4(%r1)
72	stw,ma          %arg3,4(%r1)
73
74#if defined(CONFIG_PA20)
75	/* check for 64-bit capable CPU as required by current kernel */
76	ldi		32,%r10
77	mtctl		%r10,%cr11
78	.level 2.0
79	mfctl,w		%cr11,%r10
80	.level 1.1
81	comib,<>,n	0,%r10,$cpu_ok
82
83	load32		PA(msg1),%arg0
84	ldi		msg1_end-msg1,%arg1
85$iodc_panic:
86	copy		%arg0, %r10
87	copy		%arg1, %r11
88	load32		PA(init_stack),%sp
89#define MEM_CONS 0x3A0
90	ldw		MEM_CONS+32(%r0),%arg0	// HPA
91	ldi		ENTRY_IO_COUT,%arg1
92	ldw		MEM_CONS+36(%r0),%arg2	// SPA
93	ldw		MEM_CONS+8(%r0),%arg3	// layers
94	load32		PA(__bss_start),%r1
95	stw		%r1,-52(%sp)		// arg4
96	stw		%r0,-56(%sp)		// arg5
97	stw		%r10,-60(%sp)		// arg6 = ptr to text
98	stw		%r11,-64(%sp)		// arg7 = len
99	stw		%r0,-68(%sp)		// arg8
100	load32		PA(.iodc_panic_ret), %rp
101	ldw		MEM_CONS+40(%r0),%r1	// ENTRY_IODC
102	bv,n		(%r1)
103.iodc_panic_ret:
104	b .				/* wait endless with ... */
105	or		%r10,%r10,%r10	/* qemu idle sleep */
106msg1:	.ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
107msg1_end:
108
109$cpu_ok:
110#endif
111
112	.level	PA_ASM_LEVEL
113
114	/* Initialize startup VM. Just map first 16/32 MB of memory */
115	load32		PA(swapper_pg_dir),%r4
116	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
117	mtctl		%r4,%cr25	/* Initialize user root pointer */
118
119#if CONFIG_PGTABLE_LEVELS == 3
120	/* Set pmd in pgd */
121	load32		PA(pmd0),%r5
122	shrd            %r5,PxD_VALUE_SHIFT,%r3
123	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
124	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
125	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
126#else
127	/* 2-level page table, so pmd == pgd */
128	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
129#endif
130
131	/* Fill in pmd with enough pte directories */
132	load32		PA(pg0),%r1
133	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
134	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
135
136	ldi		ASM_PT_INITIAL,%r1
137
1381:
139	stw		%r3,0(%r4)
140	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
141	addib,>		-1,%r1,1b
142#if CONFIG_PGTABLE_LEVELS == 3
143	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
144#else
145	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
146#endif
147
148
149	/* Now initialize the PTEs themselves.  We use RWX for
150	 * everything ... it will get remapped correctly later */
151	ldo		0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
152	load32		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
153	load32		PA(_end),%r1
154	SHRREG		%r1,PAGE_SHIFT,%r1  /* %r1 is PFN count for _end symbol */
155	cmpb,<<,n	%r11,%r1,1f
156	copy		%r1,%r11	/* %r1 PFN count smaller than %r11 */
1571:	load32		PA(pg0),%r1
158
159$pgt_fill_loop:
160	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
161	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
162	addib,>		-1,%r11,$pgt_fill_loop
163	nop
164
165	/* Load the return address...er...crash 'n burn */
166	copy		%r0,%r2
167
168	/* And the RFI Target address too */
169	load32		start_parisc,%r11
170
171	/* And the initial task pointer */
172	load32		init_task,%r6
173	mtctl           %r6,%cr30
174
175	/* And the stack pointer too */
176	load32		init_stack,%sp
177	tophys_r1	%sp
178#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
179	.import _mcount,data
180	/* initialize mcount FPTR */
181	/* Get the global data pointer */
182	loadgp
183	load32		PA(_mcount), %r10
184	std		%dp,0x18(%r10)
185#endif
186
187#define MEM_PDC_LO 0x388
188#define MEM_PDC_HI 0x35C
189#ifdef CONFIG_64BIT
190	/* Get PDCE_PROC for monarch CPU. */
191	ldw             MEM_PDC_LO(%r0),%r3
192	ldw             MEM_PDC_HI(%r0),%r10
193	depd            %r10, 31, 32, %r3        /* move to upper word */
194#endif
195
196
197#ifdef CONFIG_SMP
198	/* Set the smp rendezvous address into page zero.
199	** It would be safer to do this in init_smp_config() but
200	** it's just way easier to deal with here because
201	** of 64-bit function ptrs and the address is local to this file.
202	*/
203	load32		PA(smp_slave_stext),%r10
204	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
205	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
206
207	/* FALLTHROUGH */
208	.procend
209
210#ifdef CONFIG_HOTPLUG_CPU
211	/* common_stext is far away in another section... jump there */
212	load32		PA(common_stext), %rp
213	bv,n		(%rp)
214
215	/* common_stext and smp_slave_stext needs to be in text section */
216	.text
217#endif
218
219	/*
220	** Code Common to both Monarch and Slave processors.
221	** Entry:
222	**
223	**  1.1:
224	**    %r11 must contain RFI target address.
225	**    %r25/%r26 args to pass to target function
226	**    %r2  in case rfi target decides it didn't like something
227	**
228	**  2.0w:
229	**    %r3  PDCE_PROC address
230	**    %r11 RFI target address
231	**
232	** Caller must init: SR4-7, %sp, %r10, %cr24/25,
233	*/
234common_stext:
235	.proc
236	.callinfo
237#else
238	/* Clear PDC entry point - we won't use it */
239	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
240	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
241#endif /*CONFIG_SMP*/
242
243#ifdef CONFIG_64BIT
244	mfctl		%cr30,%r6		/* PCX-W2 firmware bug */
245	tophys_r1	%r6
246
247	/* Save the rfi target address */
248	STREG		%r11,  TASK_PT_GR11(%r6)
249	/* Switch to wide mode Superdome doesn't support narrow PDC
250	** calls.
251	*/
2521:	mfia            %rp             /* clear upper part of pcoq */
253	ldo             2f-1b(%rp),%rp
254	depdi           0,31,32,%rp
255	bv              (%rp)
256	ssm             PSW_SM_W,%r0
257
258        /* Set Wide mode as the "Default" (eg for traps)
259        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
260        ** Someday, palo might not do this for the Monarch either.
261        */
2622:
263
264	ldo             PDC_PSW(%r0),%arg0              /* 21 */
265	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
266	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
267	load32          PA(stext_pdc_ret), %rp
268	bv              (%r3)
269	copy            %r0,%arg3
270
271stext_pdc_ret:
272	LDREG		TASK_PT_GR11(%r6), %r11
273	tovirt_r1	%r6
274	mtctl		%r6,%cr30		/* restore task thread info */
275#endif
276
277#ifndef CONFIG_64BIT
278	/* clear all BTLBs */
279	ldi		PDC_BLOCK_TLB,%arg0
280	load32          PA(stext_pdc_btlb_ret), %rp
281	ldw             MEM_PDC_LO(%r0),%r3
282	bv              (%r3)
283	ldi		PDC_BTLB_PURGE_ALL,%arg1
284stext_pdc_btlb_ret:
285#endif
286
287	/* PARANOID: clear user scratch/user space SR's */
288	mtsp	%r0,%sr0
289	mtsp	%r0,%sr1
290	mtsp	%r0,%sr2
291	mtsp	%r0,%sr3
292
293	/* Initialize Protection Registers */
294	mtctl	%r0,%cr8
295	mtctl	%r0,%cr9
296	mtctl	%r0,%cr12
297	mtctl	%r0,%cr13
298
299	/* Initialize the global data pointer */
300	loadgp
301
302	/* Set up our interrupt table.  HPMCs might not work after this!
303	 *
304	 * We need to install the correct iva for PA1.1 or PA2.0. The
305	 * following short sequence of instructions can determine this
306	 * (without being illegal on a PA1.1 machine).
307	 */
308#ifndef CONFIG_64BIT
309	ldi		32,%r10
310	mtctl		%r10,%cr11
311	.level 2.0
312	mfctl,w		%cr11,%r10
313	.level 1.1
314	comib,<>,n	0,%r10,$is_pa20
315	ldil		L%PA(fault_vector_11),%r10
316	b		$install_iva
317	ldo		R%PA(fault_vector_11)(%r10),%r10
318
319$is_pa20:
320	.level		PA_ASM_LEVEL /* restore 1.1 || 2.0w */
321#endif /*!CONFIG_64BIT*/
322	load32		PA(fault_vector_20),%r10
323
324$install_iva:
325	mtctl		%r10,%cr14
326
327	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
328	nop
329
330	.align 128
331aligned_rfi:
332	pcxt_ssm_bug
333
334	copy		%r3, %arg0	/* PDCE_PROC for smp_callin() */
335
336	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
337	/* Don't need NOPs, have 8 compliant insn before rfi */
338
339	mtctl		%r0,%cr17	/* Clear IIASQ tail */
340	mtctl		%r0,%cr17	/* Clear IIASQ head */
341
342	/* Load RFI target into PC queue */
343	mtctl		%r11,%cr18	/* IIAOQ head */
344	ldo		4(%r11),%r11
345	mtctl		%r11,%cr18	/* IIAOQ tail */
346
347	load32		KERNEL_PSW,%r10
348	mtctl		%r10,%ipsw
349
350	tovirt_r1	%sp
351
352	/* Jump through hyperspace to Virt Mode */
353	rfi
354	nop
355
356	.procend
357
358#ifdef CONFIG_SMP
359
360	.import smp_init_current_idle_task,data
361	.import	smp_callin,code
362
363#ifndef CONFIG_64BIT
364smp_callin_rtn:
365        .proc
366	.callinfo
367	break	1,1		/*  Break if returned from start_secondary */
368	nop
369	nop
370        .procend
371#endif /*!CONFIG_64BIT*/
372
373/***************************************************************************
374* smp_slave_stext is executed by all non-monarch Processors when the Monarch
375* pokes the slave CPUs in smp.c:smp_boot_cpus().
376*
377* Once here, registers values are initialized in order to branch to virtual
378* mode. Once all available/eligible CPUs are in virtual mode, all are
379* released and start out by executing their own idle task.
380*****************************************************************************/
381smp_slave_stext:
382        .proc
383	.callinfo
384
385	/*
386	** Initialize Space registers
387	*/
388	mtsp	   %r0,%sr4
389	mtsp	   %r0,%sr5
390	mtsp	   %r0,%sr6
391	mtsp	   %r0,%sr7
392
393#ifdef CONFIG_64BIT
394	/*
395	 *  Enable Wide mode early, in case the task_struct for the idle
396	 *  task in smp_init_current_idle_task was allocated above 4GB.
397	 */
3981:	mfia            %rp             /* clear upper part of pcoq */
399	ldo             2f-1b(%rp),%rp
400	depdi           0,31,32,%rp
401	bv              (%rp)
402	ssm             PSW_SM_W,%r0
4032:
404#endif
405
406	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
407	load32		PA(smp_init_current_idle_task),%r6
408	LDREG		0(%r6),%r6
409	mtctl		%r6,%cr30
410	tophys_r1	%r6
411	LDREG           TASK_STACK(%r6),%sp
412	tophys_r1	%sp
413	ldo		FRAME_SIZE(%sp),%sp
414
415	/* point CPU to kernel page tables */
416	load32		PA(swapper_pg_dir),%r4
417	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
418	mtctl		%r4,%cr25	/* Initialize user root pointer */
419
420#ifdef CONFIG_64BIT
421	/* Setup PDCE_PROC entry */
422	copy            %arg0,%r3
423#else
424	/* Load RFI *return* address in case smp_callin bails */
425	load32		smp_callin_rtn,%r2
426#endif
427
428	/* Load RFI target address.  */
429	load32		smp_callin,%r11
430
431	/* ok...common code can handle the rest */
432	b		common_stext
433	nop
434
435	.procend
436#endif /* CONFIG_SMP */
437
438#ifndef CONFIG_64BIT
439	.section .data..ro_after_init
440
441	.align	4
442	.export	$global$,data
443
444	.type	$global$,@object
445	.size	$global$,4
446$global$:
447	.word 0
448#endif /*!CONFIG_64BIT*/
449