xref: /linux/arch/parisc/kernel/head.S (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License.  See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <linux/pgtable.h>
24
25	.level	PA_ASM_LEVEL
26
27	__INITDATA
28ENTRY(boot_args)
29	.word 0 /* arg0 */
30	.word 0 /* arg1 */
31	.word 0 /* arg2 */
32	.word 0 /* arg3 */
33END(boot_args)
34
35	__HEAD
36
37	.align	4
38	.import init_task,data
39	.import init_stack,data
40	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
41#ifndef CONFIG_64BIT
42        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
43	.import	$global$		/* forward declaration */
44#endif /*!CONFIG_64BIT*/
45ENTRY(parisc_kernel_start)
46	.proc
47	.callinfo
48
49	/* Make sure sr4-sr7 are set to zero for the kernel address space */
50	mtsp	%r0,%sr4
51	mtsp	%r0,%sr5
52	mtsp	%r0,%sr6
53	mtsp	%r0,%sr7
54
55	/* Clear BSS (shouldn't the boot loader do this?) */
56
57	.import __bss_start,data
58	.import __bss_stop,data
59
60	load32		PA(__bss_start),%r3
61	load32		PA(__bss_stop),%r4
62$bss_loop:
63	cmpb,<<,n       %r3,%r4,$bss_loop
64	stw,ma          %r0,4(%r3)
65
66	/* Save away the arguments the boot loader passed in (32 bit args) */
67	load32		PA(boot_args),%r1
68	stw,ma          %arg0,4(%r1)
69	stw,ma          %arg1,4(%r1)
70	stw,ma          %arg2,4(%r1)
71	stw,ma          %arg3,4(%r1)
72
73	/* Initialize startup VM. Just map first 16/32 MB of memory */
74	load32		PA(swapper_pg_dir),%r4
75	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
76	mtctl		%r4,%cr25	/* Initialize user root pointer */
77
78#if CONFIG_PGTABLE_LEVELS == 3
79	/* Set pmd in pgd */
80	load32		PA(pmd0),%r5
81	shrd            %r5,PxD_VALUE_SHIFT,%r3
82	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
83	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
84	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
85#else
86	/* 2-level page table, so pmd == pgd */
87	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
88#endif
89
90	/* Fill in pmd with enough pte directories */
91	load32		PA(pg0),%r1
92	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
93	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
94
95	ldi		ASM_PT_INITIAL,%r1
96
971:
98	stw		%r3,0(%r4)
99	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
100	addib,>		-1,%r1,1b
101#if CONFIG_PGTABLE_LEVELS == 3
102	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
103#else
104	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
105#endif
106
107
108	/* Now initialize the PTEs themselves.  We use RWX for
109	 * everything ... it will get remapped correctly later */
110	ldo		0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
111	load32		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
112	load32		PA(pg0),%r1
113
114$pgt_fill_loop:
115	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
116	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
117	addib,>		-1,%r11,$pgt_fill_loop
118	nop
119
120	/* Load the return address...er...crash 'n burn */
121	copy		%r0,%r2
122
123	/* And the RFI Target address too */
124	load32		start_parisc,%r11
125
126	/* And the initial task pointer */
127	load32		init_task,%r6
128	mtctl           %r6,%cr30
129
130	/* And the stack pointer too */
131	load32		init_stack,%sp
132	tophys_r1	%sp
133#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
134	.import _mcount,data
135	/* initialize mcount FPTR */
136	/* Get the global data pointer */
137	loadgp
138	load32		PA(_mcount), %r10
139	std		%dp,0x18(%r10)
140#endif
141
142#ifdef CONFIG_64BIT
143	/* Get PDCE_PROC for monarch CPU. */
144#define MEM_PDC_LO 0x388
145#define MEM_PDC_HI 0x35C
146	ldw             MEM_PDC_LO(%r0),%r3
147	ldw             MEM_PDC_HI(%r0),%r10
148	depd            %r10, 31, 32, %r3        /* move to upper word */
149#endif
150
151
152#ifdef CONFIG_SMP
153	/* Set the smp rendezvous address into page zero.
154	** It would be safer to do this in init_smp_config() but
155	** it's just way easier to deal with here because
156	** of 64-bit function ptrs and the address is local to this file.
157	*/
158	load32		PA(smp_slave_stext),%r10
159	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
160	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
161
162	/* FALLTHROUGH */
163	.procend
164
165	/*
166	** Code Common to both Monarch and Slave processors.
167	** Entry:
168	**
169	**  1.1:
170	**    %r11 must contain RFI target address.
171	**    %r25/%r26 args to pass to target function
172	**    %r2  in case rfi target decides it didn't like something
173	**
174	**  2.0w:
175	**    %r3  PDCE_PROC address
176	**    %r11 RFI target address
177	**
178	** Caller must init: SR4-7, %sp, %r10, %cr24/25,
179	*/
180common_stext:
181	.proc
182	.callinfo
183#else
184	/* Clear PDC entry point - we won't use it */
185	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
186	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
187#endif /*CONFIG_SMP*/
188
189#ifdef CONFIG_64BIT
190	mfctl		%cr30,%r6		/* PCX-W2 firmware bug */
191	tophys_r1	%r6
192
193	/* Save the rfi target address */
194	STREG		%r11,  TASK_PT_GR11(%r6)
195	/* Switch to wide mode Superdome doesn't support narrow PDC
196	** calls.
197	*/
1981:	mfia            %rp             /* clear upper part of pcoq */
199	ldo             2f-1b(%rp),%rp
200	depdi           0,31,32,%rp
201	bv              (%rp)
202	ssm             PSW_SM_W,%r0
203
204        /* Set Wide mode as the "Default" (eg for traps)
205        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
206        ** Someday, palo might not do this for the Monarch either.
207        */
2082:
209
210	ldo             PDC_PSW(%r0),%arg0              /* 21 */
211	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
212	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
213	load32          PA(stext_pdc_ret), %rp
214	bv              (%r3)
215	copy            %r0,%arg3
216
217stext_pdc_ret:
218	LDREG		TASK_PT_GR11(%r6), %r11
219	tovirt_r1	%r6
220	mtctl		%r6,%cr30		/* restore task thread info */
221#endif
222
223	/* PARANOID: clear user scratch/user space SR's */
224	mtsp	%r0,%sr0
225	mtsp	%r0,%sr1
226	mtsp	%r0,%sr2
227	mtsp	%r0,%sr3
228
229	/* Initialize Protection Registers */
230	mtctl	%r0,%cr8
231	mtctl	%r0,%cr9
232	mtctl	%r0,%cr12
233	mtctl	%r0,%cr13
234
235	/* Initialize the global data pointer */
236	loadgp
237
238	/* Set up our interrupt table.  HPMCs might not work after this!
239	 *
240	 * We need to install the correct iva for PA1.1 or PA2.0. The
241	 * following short sequence of instructions can determine this
242	 * (without being illegal on a PA1.1 machine).
243	 */
244#ifndef CONFIG_64BIT
245	ldi		32,%r10
246	mtctl		%r10,%cr11
247	.level 2.0
248	mfctl,w		%cr11,%r10
249	.level 1.1
250	comib,<>,n	0,%r10,$is_pa20
251	ldil		L%PA(fault_vector_11),%r10
252	b		$install_iva
253	ldo		R%PA(fault_vector_11)(%r10),%r10
254
255$is_pa20:
256	.level		PA_ASM_LEVEL /* restore 1.1 || 2.0w */
257#endif /*!CONFIG_64BIT*/
258	load32		PA(fault_vector_20),%r10
259
260$install_iva:
261	mtctl		%r10,%cr14
262
263	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
264	nop
265
266	.align 128
267aligned_rfi:
268	pcxt_ssm_bug
269
270	copy		%r3, %arg0	/* PDCE_PROC for smp_callin() */
271
272	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
273	/* Don't need NOPs, have 8 compliant insn before rfi */
274
275	mtctl		%r0,%cr17	/* Clear IIASQ tail */
276	mtctl		%r0,%cr17	/* Clear IIASQ head */
277
278	/* Load RFI target into PC queue */
279	mtctl		%r11,%cr18	/* IIAOQ head */
280	ldo		4(%r11),%r11
281	mtctl		%r11,%cr18	/* IIAOQ tail */
282
283	load32		KERNEL_PSW,%r10
284	mtctl		%r10,%ipsw
285
286	tovirt_r1	%sp
287
288	/* Jump through hyperspace to Virt Mode */
289	rfi
290	nop
291
292	.procend
293
294#ifdef CONFIG_SMP
295
296	.import smp_init_current_idle_task,data
297	.import	smp_callin,code
298
299#ifndef CONFIG_64BIT
300smp_callin_rtn:
301        .proc
302	.callinfo
303	break	1,1		/*  Break if returned from start_secondary */
304	nop
305	nop
306        .procend
307#endif /*!CONFIG_64BIT*/
308
309/***************************************************************************
310* smp_slave_stext is executed by all non-monarch Processors when the Monarch
311* pokes the slave CPUs in smp.c:smp_boot_cpus().
312*
313* Once here, registers values are initialized in order to branch to virtual
314* mode. Once all available/eligible CPUs are in virtual mode, all are
315* released and start out by executing their own idle task.
316*****************************************************************************/
317smp_slave_stext:
318        .proc
319	.callinfo
320
321	/*
322	** Initialize Space registers
323	*/
324	mtsp	   %r0,%sr4
325	mtsp	   %r0,%sr5
326	mtsp	   %r0,%sr6
327	mtsp	   %r0,%sr7
328
329#ifdef CONFIG_64BIT
330	/*
331	 *  Enable Wide mode early, in case the task_struct for the idle
332	 *  task in smp_init_current_idle_task was allocated above 4GB.
333	 */
3341:	mfia            %rp             /* clear upper part of pcoq */
335	ldo             2f-1b(%rp),%rp
336	depdi           0,31,32,%rp
337	bv              (%rp)
338	ssm             PSW_SM_W,%r0
3392:
340#endif
341
342	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
343	load32		PA(smp_init_current_idle_task),%r6
344	LDREG		0(%r6),%r6
345	mtctl		%r6,%cr30
346	tophys_r1	%r6
347	LDREG           TASK_STACK(%r6),%sp
348	tophys_r1	%sp
349	ldo		FRAME_SIZE(%sp),%sp
350
351	/* point CPU to kernel page tables */
352	load32		PA(swapper_pg_dir),%r4
353	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
354	mtctl		%r4,%cr25	/* Initialize user root pointer */
355
356#ifdef CONFIG_64BIT
357	/* Setup PDCE_PROC entry */
358	copy            %arg0,%r3
359#else
360	/* Load RFI *return* address in case smp_callin bails */
361	load32		smp_callin_rtn,%r2
362#endif
363
364	/* Load RFI target address.  */
365	load32		smp_callin,%r11
366
367	/* ok...common code can handle the rest */
368	b		common_stext
369	nop
370
371	.procend
372#endif /* CONFIG_SMP */
373
374ENDPROC(parisc_kernel_start)
375
376#ifndef CONFIG_64BIT
377	.section .data..ro_after_init
378
379	.align	4
380	.export	$global$,data
381
382	.type	$global$,@object
383	.size	$global$,4
384$global$:
385	.word 0
386#endif /*!CONFIG_64BIT*/
387