xref: /linux/arch/parisc/kernel/head.S (revision 23c48a124b469cee2eb0c75e6d22d366d1caa118)
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License.  See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <linux/pgtable.h>
24
25	.level	PA_ASM_LEVEL
26
27	__INITDATA
28ENTRY(boot_args)
29	.word 0 /* arg0 */
30	.word 0 /* arg1 */
31	.word 0 /* arg2 */
32	.word 0 /* arg3 */
33END(boot_args)
34
35	__HEAD
36
37	.align	4
38	.import init_task,data
39	.import init_stack,data
40	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
41#ifndef CONFIG_64BIT
42        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
43	.import	$global$		/* forward declaration */
44#endif /*!CONFIG_64BIT*/
45ENTRY(parisc_kernel_start)
46	.proc
47	.callinfo
48
49	/* Make sure sr4-sr7 are set to zero for the kernel address space */
50	mtsp	%r0,%sr4
51	mtsp	%r0,%sr5
52	mtsp	%r0,%sr6
53	mtsp	%r0,%sr7
54
55	/* Clear BSS (shouldn't the boot loader do this?) */
56
57	.import __bss_start,data
58	.import __bss_stop,data
59
60	load32		PA(__bss_start),%r3
61	load32		PA(__bss_stop),%r4
62$bss_loop:
63	cmpb,<<,n       %r3,%r4,$bss_loop
64	stw,ma          %r0,4(%r3)
65
66	/* Save away the arguments the boot loader passed in (32 bit args) */
67	load32		PA(boot_args),%r1
68	stw,ma          %arg0,4(%r1)
69	stw,ma          %arg1,4(%r1)
70	stw,ma          %arg2,4(%r1)
71	stw,ma          %arg3,4(%r1)
72
73	/* Initialize startup VM. Just map first 16/32 MB of memory */
74	load32		PA(swapper_pg_dir),%r4
75	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
76	mtctl		%r4,%cr25	/* Initialize user root pointer */
77
78#if CONFIG_PGTABLE_LEVELS == 3
79	/* Set pmd in pgd */
80	load32		PA(pmd0),%r5
81	shrd            %r5,PxD_VALUE_SHIFT,%r3
82	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
83	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
84	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
85#else
86	/* 2-level page table, so pmd == pgd */
87	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
88#endif
89
90	/* Fill in pmd with enough pte directories */
91	load32		PA(pg0),%r1
92	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
93	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
94
95	ldi		ASM_PT_INITIAL,%r1
96
971:
98	stw		%r3,0(%r4)
99	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
100	addib,>		-1,%r1,1b
101#if CONFIG_PGTABLE_LEVELS == 3
102	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
103#else
104	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
105#endif
106
107
108	/* Now initialize the PTEs themselves.  We use RWX for
109	 * everything ... it will get remapped correctly later */
110	ldo		0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
111	load32		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
112	load32		PA(pg0),%r1
113
114$pgt_fill_loop:
115	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
116	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
117	addib,>		-1,%r11,$pgt_fill_loop
118	nop
119
120	/* Load the return address...er...crash 'n burn */
121	copy		%r0,%r2
122
123	/* And the RFI Target address too */
124	load32		start_parisc,%r11
125
126	/* And the initial task pointer */
127	load32		init_task,%r6
128	mtctl           %r6,%cr30
129
130	/* And the stack pointer too */
131	load32		init_stack,%sp
132	tophys_r1	%sp
133#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
134	.import _mcount,data
135	/* initialize mcount FPTR */
136	/* Get the global data pointer */
137	loadgp
138	load32		PA(_mcount), %r10
139	std		%dp,0x18(%r10)
140#endif
141
142#ifdef CONFIG_64BIT
143	/* Get PDCE_PROC for monarch CPU. */
144#define MEM_PDC_LO 0x388
145#define MEM_PDC_HI 0x35C
146	ldw             MEM_PDC_LO(%r0),%r3
147	ldw             MEM_PDC_HI(%r0),%r10
148	depd            %r10, 31, 32, %r3        /* move to upper word */
149#endif
150
151
152#ifdef CONFIG_SMP
153	/* Set the smp rendezvous address into page zero.
154	** It would be safer to do this in init_smp_config() but
155	** it's just way easier to deal with here because
156	** of 64-bit function ptrs and the address is local to this file.
157	*/
158	load32		PA(smp_slave_stext),%r10
159	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
160	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
161
162	/* FALLTHROUGH */
163	.procend
164
165#ifdef CONFIG_HOTPLUG_CPU
166	/* common_stext is far away in another section... jump there */
167	load32		PA(common_stext), %rp
168	bv,n		(%rp)
169
170	/* common_stext and smp_slave_stext needs to be in text section */
171	.text
172#endif
173
174	/*
175	** Code Common to both Monarch and Slave processors.
176	** Entry:
177	**
178	**  1.1:
179	**    %r11 must contain RFI target address.
180	**    %r25/%r26 args to pass to target function
181	**    %r2  in case rfi target decides it didn't like something
182	**
183	**  2.0w:
184	**    %r3  PDCE_PROC address
185	**    %r11 RFI target address
186	**
187	** Caller must init: SR4-7, %sp, %r10, %cr24/25,
188	*/
189common_stext:
190	.proc
191	.callinfo
192#else
193	/* Clear PDC entry point - we won't use it */
194	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
195	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
196#endif /*CONFIG_SMP*/
197
198#ifdef CONFIG_64BIT
199	mfctl		%cr30,%r6		/* PCX-W2 firmware bug */
200	tophys_r1	%r6
201
202	/* Save the rfi target address */
203	STREG		%r11,  TASK_PT_GR11(%r6)
204	/* Switch to wide mode Superdome doesn't support narrow PDC
205	** calls.
206	*/
2071:	mfia            %rp             /* clear upper part of pcoq */
208	ldo             2f-1b(%rp),%rp
209	depdi           0,31,32,%rp
210	bv              (%rp)
211	ssm             PSW_SM_W,%r0
212
213        /* Set Wide mode as the "Default" (eg for traps)
214        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
215        ** Someday, palo might not do this for the Monarch either.
216        */
2172:
218
219	ldo             PDC_PSW(%r0),%arg0              /* 21 */
220	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
221	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
222	load32          PA(stext_pdc_ret), %rp
223	bv              (%r3)
224	copy            %r0,%arg3
225
226stext_pdc_ret:
227	LDREG		TASK_PT_GR11(%r6), %r11
228	tovirt_r1	%r6
229	mtctl		%r6,%cr30		/* restore task thread info */
230#endif
231
232	/* PARANOID: clear user scratch/user space SR's */
233	mtsp	%r0,%sr0
234	mtsp	%r0,%sr1
235	mtsp	%r0,%sr2
236	mtsp	%r0,%sr3
237
238	/* Initialize Protection Registers */
239	mtctl	%r0,%cr8
240	mtctl	%r0,%cr9
241	mtctl	%r0,%cr12
242	mtctl	%r0,%cr13
243
244	/* Initialize the global data pointer */
245	loadgp
246
247	/* Set up our interrupt table.  HPMCs might not work after this!
248	 *
249	 * We need to install the correct iva for PA1.1 or PA2.0. The
250	 * following short sequence of instructions can determine this
251	 * (without being illegal on a PA1.1 machine).
252	 */
253#ifndef CONFIG_64BIT
254	ldi		32,%r10
255	mtctl		%r10,%cr11
256	.level 2.0
257	mfctl,w		%cr11,%r10
258	.level 1.1
259	comib,<>,n	0,%r10,$is_pa20
260	ldil		L%PA(fault_vector_11),%r10
261	b		$install_iva
262	ldo		R%PA(fault_vector_11)(%r10),%r10
263
264$is_pa20:
265	.level		PA_ASM_LEVEL /* restore 1.1 || 2.0w */
266#endif /*!CONFIG_64BIT*/
267	load32		PA(fault_vector_20),%r10
268
269$install_iva:
270	mtctl		%r10,%cr14
271
272	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
273	nop
274
275	.align 128
276aligned_rfi:
277	pcxt_ssm_bug
278
279	copy		%r3, %arg0	/* PDCE_PROC for smp_callin() */
280
281	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
282	/* Don't need NOPs, have 8 compliant insn before rfi */
283
284	mtctl		%r0,%cr17	/* Clear IIASQ tail */
285	mtctl		%r0,%cr17	/* Clear IIASQ head */
286
287	/* Load RFI target into PC queue */
288	mtctl		%r11,%cr18	/* IIAOQ head */
289	ldo		4(%r11),%r11
290	mtctl		%r11,%cr18	/* IIAOQ tail */
291
292	load32		KERNEL_PSW,%r10
293	mtctl		%r10,%ipsw
294
295	tovirt_r1	%sp
296
297	/* Jump through hyperspace to Virt Mode */
298	rfi
299	nop
300
301	.procend
302
303#ifdef CONFIG_SMP
304
305	.import smp_init_current_idle_task,data
306	.import	smp_callin,code
307
308#ifndef CONFIG_64BIT
309smp_callin_rtn:
310        .proc
311	.callinfo
312	break	1,1		/*  Break if returned from start_secondary */
313	nop
314	nop
315        .procend
316#endif /*!CONFIG_64BIT*/
317
318/***************************************************************************
319* smp_slave_stext is executed by all non-monarch Processors when the Monarch
320* pokes the slave CPUs in smp.c:smp_boot_cpus().
321*
322* Once here, registers values are initialized in order to branch to virtual
323* mode. Once all available/eligible CPUs are in virtual mode, all are
324* released and start out by executing their own idle task.
325*****************************************************************************/
326smp_slave_stext:
327        .proc
328	.callinfo
329
330	/*
331	** Initialize Space registers
332	*/
333	mtsp	   %r0,%sr4
334	mtsp	   %r0,%sr5
335	mtsp	   %r0,%sr6
336	mtsp	   %r0,%sr7
337
338#ifdef CONFIG_64BIT
339	/*
340	 *  Enable Wide mode early, in case the task_struct for the idle
341	 *  task in smp_init_current_idle_task was allocated above 4GB.
342	 */
3431:	mfia            %rp             /* clear upper part of pcoq */
344	ldo             2f-1b(%rp),%rp
345	depdi           0,31,32,%rp
346	bv              (%rp)
347	ssm             PSW_SM_W,%r0
3482:
349#endif
350
351	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
352	load32		PA(smp_init_current_idle_task),%r6
353	LDREG		0(%r6),%r6
354	mtctl		%r6,%cr30
355	tophys_r1	%r6
356	LDREG           TASK_STACK(%r6),%sp
357	tophys_r1	%sp
358	ldo		FRAME_SIZE(%sp),%sp
359
360	/* point CPU to kernel page tables */
361	load32		PA(swapper_pg_dir),%r4
362	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
363	mtctl		%r4,%cr25	/* Initialize user root pointer */
364
365#ifdef CONFIG_64BIT
366	/* Setup PDCE_PROC entry */
367	copy            %arg0,%r3
368#else
369	/* Load RFI *return* address in case smp_callin bails */
370	load32		smp_callin_rtn,%r2
371#endif
372
373	/* Load RFI target address.  */
374	load32		smp_callin,%r11
375
376	/* ok...common code can handle the rest */
377	b		common_stext
378	nop
379
380	.procend
381#endif /* CONFIG_SMP */
382
383#ifndef CONFIG_64BIT
384	.section .data..ro_after_init
385
386	.align	4
387	.export	$global$,data
388
389	.type	$global$,@object
390	.size	$global$,4
391$global$:
392	.word 0
393#endif /*!CONFIG_64BIT*/
394