xref: /freebsd/sys/arm/arm/locore.S (revision 38f0b757fd84d17d0fc24739a7cda160c4516d81)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	STOP_UNWINDING		/* Can't unwind into the bootloader! */
81
82	mov	r9, r0		/* 0 or boot mode from boot2 */
83	mov	r8, r1		/* Save Machine type */
84	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expantion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147	/*
148	 * Build page table from scratch.
149	 */
150
151	/* Find the delta between VA and PA */
152	adr	r0, Lpagetable
153	ldr	r1, [r0]
154	sub	r2, r1, r0
155	/* At this point: r2 = VA - PA */
156
157	/*
158	 * Find the physical address of the table. After these two
159	 * instructions:
160	 * r1 = va(pagetable)
161	 *
162	 * r0 = va(pagetable) - (VA - PA)
163	 *    = va(pagetable) - VA + PA
164	 *    = pa(pagetable)
165	 */
166	ldr	r1, [r0, #4]
167	sub	r0, r1, r2
168
169	/*
170	 * Map PA == VA
171	 */
172	/* Find the start kernels load address */
173	adr	r5, _start
174	ldr	r2, =(L1_S_OFFSET)
175	bic	r5, r2
176	mov	r1, r5
177	mov	r2, r5
178	/* Map 64MiB, preserved over calls to build_pagetables */
179	mov	r3, #64
180	bl	build_pagetables
181
182	/* Create the kernel map to jump to */
183	mov	r1, r5
184	ldr	r2, =(KERNVIRTADDR)
185	bl	build_pagetables
186
187#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
188	/* Create the custom map */
189	ldr	r1, =SOCDEV_PA
190	ldr	r2, =SOCDEV_VA
191	bl	build_pagetables
192#endif
193
194#if defined(SMP)
195	orr 	r0, r0, #2		/* Set TTB shared memory flag */
196#endif
197	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
198	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
199
200#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
201	mov	r0, #0
202	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
203#endif
204
205	/* Set the Domain Access register.  Very important! */
206	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
207	mcr	p15, 0, r0, c3, c0, 0
208	/*
209	 * Enable MMU.
210	 * On armv6 enable extended page tables, and set alignment checking
211	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
212	 * instructions emitted by clang.
213	 */
214	mrc	p15, 0, r0, c1, c0, 0
215#ifdef _ARM_ARCH_6
216	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
217	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
218	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
219#endif
220	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
221	mcr	p15, 0, r0, c1, c0, 0
222	nop
223	nop
224	nop
225	CPWAIT(r0)
226
227mmu_done:
228	nop
229	adr	r1, .Lstart
230	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
231	sub	r2, r2, r1		/* get zero init data */
232	mov	r3, #0
233.L1:
234	str	r3, [r1], #0x0004	/* get zero init data */
235	subs	r2, r2, #4
236	bgt	.L1
237	ldr	pc, .Lvirt_done
238
239virt_done:
240	mov	r1, #28			/* loader info size is 28 bytes also second arg */
241	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
242	mov	r0, sp			/* loader info pointer is first arg */
243	bic	sp, sp, #7		/* align stack to 8 bytes */
244	str	r1, [r0]		/* Store length of loader info */
245	str	r9, [r0, #4]		/* Store r0 from boot loader */
246	str	r8, [r0, #8]		/* Store r1 from boot loader */
247	str	ip, [r0, #12]		/* store r2 from boot loader */
248	str	fp, [r0, #16]		/* store r3 from boot loader */
249	str	r5, [r0, #20]		/* store the physical address */
250	adr	r4, Lpagetable		/* load the pagetable address */
251	ldr	r5, [r4, #4]
252	str	r5, [r0, #24]		/* store the pagetable address */
253	mov	fp, #0			/* trace back starts here */
254	bl	_C_LABEL(initarm)	/* Off we go */
255
256	/* init arm will return the new stack pointer. */
257	mov	sp, r0
258
259	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
260
261	adr	r0, .Lmainreturned
262	b	_C_LABEL(panic)
263	/* NOTREACHED */
264END(btext)
265END(_start)
266
267/*
268 * Builds the page table
269 * r0 - The table base address
270 * r1 - The physical address (trashed)
271 * r2 - The virtual address (trashed)
272 * r3 - The number of 1MiB sections
273 * r4 - Trashed
274 *
275 * Addresses must be 1MiB aligned
276 */
277build_pagetables:
278	/* Set the required page attributed */
279	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
280#if defined(SMP)
281	orr	r4, #(L1_SHARED)
282#endif
283	orr	r1, r4
284
285	/* Move the virtual address to the correct bit location */
286	lsr	r2, #(L1_S_SHIFT - 2)
287
288	mov	r4, r3
2891:
290	str	r1, [r0, r2]
291	add	r2, r2, #4
292	add	r1, r1, #(L1_S_SIZE)
293	adds	r4, r4, #-1
294	bhi	1b
295
296	RET
297
298Lpagetable:
299	.word	.
300	.word	pagetable
301
302Lvirtaddr:
303	.word	KERNVIRTADDR
304Lphysaddr:
305	.word	KERNPHYSADDR
306Lreal_start:
307	.word	_start
308Lend:
309	.word	_edata
310
311#ifdef SMP
312Lstartup_pagetable_secondary:
313	.word	temp_pagetable
314#endif
315
316.Lstart:
317	.word	_edata
318	.word	_ebss
319	.word	svcstk + INIT_ARM_STACK_SIZE
320
321.Lvirt_done:
322	.word	virt_done
323#if defined(SMP)
324.Lmpvirt_done:
325	.word	mpvirt_done
326#endif
327
328.Lmainreturned:
329	.asciz	"main() returned"
330	.align	0
331
332	.bss
333svcstk:
334	.space	INIT_ARM_STACK_SIZE
335
336/*
337 * Memory for the initial pagetable. We are unable to place this in
338 * the bss as this will be cleared after the table is loaded.
339 */
340	.section ".init_pagetable"
341	.align	14 /* 16KiB aligned */
342pagetable:
343	.space	L1_TABLE_SIZE
344
345	.text
346	.align	0
347
348.Lcpufuncs:
349	.word	_C_LABEL(cpufuncs)
350
351#if defined(SMP)
352Lsramaddr:
353	.word	0xffff0080
354
355#if 0
356#define	AP_DEBUG(tmp)			\
357	mrc	p15, 0, r1, c0, c0, 5;	\
358	ldr	r0, Lsramaddr;		\
359	add	r0, r1, lsl #2;		\
360	mov	r1, tmp;		\
361	str	r1, [r0], #0x0000;
362#else
363#define AP_DEBUG(tmp)
364#endif
365
366
367ASENTRY_NP(mptramp)
368	mov	r0, #0
369	mcr	p15, 0, r0, c7, c7, 0
370
371	AP_DEBUG(#1)
372
373	mrs	r3, cpsr
374	bic	r3, r3, #(PSR_MODE)
375	orr	r3, r3, #(PSR_SVC32_MODE)
376        msr	cpsr_fsxc, r3
377
378	mrc	p15, 0, r0, c0, c0, 5
379	and	r0, #0x0f		/* Get CPU ID */
380
381	/* Read boot address for CPU */
382	mov	r1, #0x100
383	mul	r2, r0, r1
384	ldr	r1, Lpmureg
385	add	r0, r2, r1
386	ldr	r1, [r0], #0x00
387
388	mov pc, r1
389
390Lpmureg:
391        .word   0xd0022124
392END(mptramp)
393
394ASENTRY_NP(mpentry)
395
396	AP_DEBUG(#2)
397
398	/* Make sure interrupts are disabled. */
399	mrs	r7, cpsr
400	orr	r7, r7, #(I32_bit|F32_bit)
401	msr	cpsr_c, r7
402
403
404	adr     r7, Ltag
405	bic     r7, r7, #0xf0000000
406	orr     r7, r7, #PHYSADDR
407
408	/* Disable MMU for a while */
409	mrc	p15, 0, r2, c1, c0, 0
410	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
411	    CPU_CONTROL_WBUF_ENABLE)
412	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
413	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
414	mcr	p15, 0, r2, c1, c0, 0
415
416	nop
417	nop
418	nop
419
420	AP_DEBUG(#3)
421
422Ltag:
423	ldr	r0, Lstartup_pagetable_secondary
424	bic	r0, r0, #0xf0000000
425	orr	r0, r0, #PHYSADDR
426	ldr	r0, [r0]
427#if defined(SMP)
428	orr 	r0, r0, #2		/* Set TTB shared memory flag */
429#endif
430	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
431	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
432
433#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
434	mov	r0, #0
435	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
436#endif
437
438	AP_DEBUG(#4)
439
440	/* Set the Domain Access register.  Very important! */
441	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
442	mcr	p15, 0, r0, c3, c0, 0
443	/* Enable MMU */
444	mrc	p15, 0, r0, c1, c0, 0
445#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
446	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
447	orr	r0, r0, #CPU_CONTROL_AF_ENABLE
448#endif
449	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
450	mcr	p15, 0, r0, c1, c0, 0
451	nop
452	nop
453	nop
454	CPWAIT(r0)
455
456	adr	r1, .Lstart
457	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
458	mrc	p15, 0, r0, c0, c0, 5
459	and	r0, r0, #15
460	mov	r1, #2048
461	mul	r2, r1, r0
462	sub	sp, sp, r2
463	str	r1, [sp]
464	ldr	pc, .Lmpvirt_done
465
466mpvirt_done:
467
468	mov	fp, #0			/* trace back starts here */
469	bl	_C_LABEL(init_secondary)	/* Off we go */
470
471	adr	r0, .Lmpreturned
472	b	_C_LABEL(panic)
473	/* NOTREACHED */
474
475.Lmpreturned:
476	.asciz	"main() returned"
477	.align	0
478END(mpentry)
479#endif
480
481ENTRY_NP(cpu_halt)
482	mrs     r2, cpsr
483	bic	r2, r2, #(PSR_MODE)
484	orr     r2, r2, #(PSR_SVC32_MODE)
485	orr	r2, r2, #(I32_bit | F32_bit)
486	msr     cpsr_fsxc, r2
487
488	ldr	r4, .Lcpu_reset_address
489	ldr	r4, [r4]
490
491	ldr	r0, .Lcpufuncs
492	mov	lr, pc
493	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
494	mov	lr, pc
495	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
496
497	/*
498	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
499	 * necessary.
500	 */
501
502	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
503	ldr	r1, [r1]
504	cmp	r1, #0
505	mov	r2, #0
506
507	/*
508	 * MMU & IDC off, 32 bit program & data space
509	 * Hurl ourselves into the ROM
510	 */
511	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
512	mcr     15, 0, r0, c1, c0, 0
513	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
514	mov     pc, r4
515
516	/*
517	 * _cpu_reset_address contains the address to branch to, to complete
518	 * the cpu reset after turning the MMU off
519	 * This variable is provided by the hardware specific code
520	 */
521.Lcpu_reset_address:
522	.word	_C_LABEL(cpu_reset_address)
523
524	/*
525	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
526	 * v4 MMU disable instruction needs executing... it is an illegal instruction
527	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
528	 * instruction / data-abort / reset loop.
529	 */
530.Lcpu_reset_needs_v4_MMU_disable:
531	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
532END(cpu_halt)
533
534
535/*
536 * setjump + longjmp
537 */
538ENTRY(setjmp)
539	stmia	r0, {r4-r14}
540	mov	r0, #0x00000000
541	RET
542END(setjmp)
543
544ENTRY(longjmp)
545	ldmia	r0, {r4-r14}
546	mov	r0, #0x00000001
547	RET
548END(longjmp)
549
550	.data
551	.global _C_LABEL(esym)
552_C_LABEL(esym):	.word	_C_LABEL(end)
553
554ENTRY_NP(abort)
555	b	_C_LABEL(abort)
556END(abort)
557
558ENTRY_NP(sigcode)
559	mov	r0, sp
560	add	r0, r0, #SIGF_UC
561
562	/*
563	 * Call the sigreturn system call.
564	 *
565	 * We have to load r7 manually rather than using
566	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
567	 * correct. Using the alternative places esigcode at the address
568	 * of the data rather than the address one past the data.
569	 */
570
571	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
572	swi	SYS_sigreturn
573
574	/* Well if that failed we better exit quick ! */
575
576	ldr	r7, [pc, #8]	/* Load SYS_exit */
577	swi	SYS_exit
578
579	/* Branch back to retry SYS_sigreturn */
580	b	. - 16
581
582	.word	SYS_sigreturn
583	.word	SYS_exit
584
585	.align	0
586	.global _C_LABEL(esigcode)
587		_C_LABEL(esigcode):
588
589	.data
590	.global szsigcode
591szsigcode:
592	.long esigcode-sigcode
593END(sigcode)
594/* End of locore.S */
595