xref: /linux/arch/hexagon/kernel/head.S (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1*08dbd0f8SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
2e03167b2SRichard Kuo/*
3e03167b2SRichard Kuo * Early kernel startup code for Hexagon
4e03167b2SRichard Kuo *
57c6a5df4SRichard Kuo * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6e03167b2SRichard Kuo */
7e03167b2SRichard Kuo
8e03167b2SRichard Kuo#include <linux/linkage.h>
9e03167b2SRichard Kuo#include <linux/init.h>
10e03167b2SRichard Kuo#include <asm/asm-offsets.h>
11e03167b2SRichard Kuo#include <asm/mem-layout.h>
12e03167b2SRichard Kuo#include <asm/vm_mmu.h>
13e03167b2SRichard Kuo#include <asm/page.h>
1420f704b6SRichard Kuo#include <asm/hexagon_vm.h>
1520f704b6SRichard Kuo
1620f704b6SRichard Kuo#define SEGTABLE_ENTRIES #0x0e0
17e03167b2SRichard Kuo
18e03167b2SRichard Kuo	__INIT
19e03167b2SRichard KuoENTRY(stext)
20e03167b2SRichard Kuo	/*
21e03167b2SRichard Kuo	 * VMM will already have set up true vector page, MMU, etc.
22e03167b2SRichard Kuo	 * To set up initial kernel identity map, we have to pass
23e03167b2SRichard Kuo	 * the VMM a pointer to some canonical page tables. In
24e03167b2SRichard Kuo	 * this implementation, we're assuming that we've got
25e03167b2SRichard Kuo	 * them precompiled. Generate value in R24, as we'll need
26e03167b2SRichard Kuo	 * it again shortly.
27e03167b2SRichard Kuo	 */
28e03167b2SRichard Kuo	r24.L = #LO(swapper_pg_dir)
29e03167b2SRichard Kuo	r24.H = #HI(swapper_pg_dir)
30e03167b2SRichard Kuo
31e03167b2SRichard Kuo	/*
32e03167b2SRichard Kuo	 * Symbol is kernel segment address, but we need
33e03167b2SRichard Kuo	 * the logical/physical address.
34e03167b2SRichard Kuo	 */
358f5a0b9dSRichard Kuo	r25 = pc;
368f5a0b9dSRichard Kuo	r2.h = #0xffc0;
378f5a0b9dSRichard Kuo	r2.l = #0x0000;
388f5a0b9dSRichard Kuo	r25 = and(r2,r25);	/*  R25 holds PHYS_OFFSET now  */
398f5a0b9dSRichard Kuo	r1.h = #HI(PAGE_OFFSET);
408f5a0b9dSRichard Kuo	r1.l = #LO(PAGE_OFFSET);
418f5a0b9dSRichard Kuo	r24 = sub(r24,r1);	/* swapper_pg_dir - PAGE_OFFSET */
428f5a0b9dSRichard Kuo	r24 = add(r24,r25);	/* + PHYS_OFFSET */
43e03167b2SRichard Kuo
448f5a0b9dSRichard Kuo	r0 = r24;  /* aka __pa(swapper_pg_dir)  */
45e03167b2SRichard Kuo
46e03167b2SRichard Kuo	/*
478f5a0b9dSRichard Kuo	 * Initialize page dir to make the virtual and physical
48e03167b2SRichard Kuo	 * addresses where the kernel was loaded be identical.
498f5a0b9dSRichard Kuo	 * Done in 4MB chunks.
50e03167b2SRichard Kuo	 */
51e03167b2SRichard Kuo#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X	\
52e03167b2SRichard Kuo		  | __HEXAGON_C_WB_L2 << 6			\
53e03167b2SRichard Kuo		  | __HVM_PDE_S_4MB)
54e03167b2SRichard Kuo
5520f704b6SRichard Kuo	/*
5620f704b6SRichard Kuo	 * Get number of VA=PA entries; only really needed for jump
5720f704b6SRichard Kuo	 * to hyperspace; gets blown away immediately after
5820f704b6SRichard Kuo	 */
5920f704b6SRichard Kuo
6020f704b6SRichard Kuo	{
6120f704b6SRichard Kuo		r1.l = #LO(_end);
6220f704b6SRichard Kuo		r2.l = #LO(stext);
6320f704b6SRichard Kuo		r3 = #1;
6420f704b6SRichard Kuo	}
6520f704b6SRichard Kuo	{
6620f704b6SRichard Kuo		r1.h = #HI(_end);
6720f704b6SRichard Kuo		r2.h = #HI(stext);
6820f704b6SRichard Kuo		r3 = asl(r3, #22);
6920f704b6SRichard Kuo	}
7020f704b6SRichard Kuo	{
7120f704b6SRichard Kuo		r1 = sub(r1, r2);
7220f704b6SRichard Kuo		r3 = add(r3, #-1);
7320f704b6SRichard Kuo	}  /* r1 =  _end - stext  */
7420f704b6SRichard Kuo	r1 = add(r1, r3);  /*  + (4M-1) */
7520f704b6SRichard Kuo	r26 = lsr(r1, #22); /*  / 4M = # of entries */
7620f704b6SRichard Kuo
7720f704b6SRichard Kuo	r1 = r25;
7820f704b6SRichard Kuo	r2.h = #0xffc0;
7920f704b6SRichard Kuo	r2.l = #0x0000;		/* round back down to 4MB boundary  */
8020f704b6SRichard Kuo	r1 = and(r1,r2);
81e03167b2SRichard Kuo	r2 = lsr(r1, #22)	/* 4MB page number		*/
82e03167b2SRichard Kuo	r2 = asl(r2, #2)	/* times sizeof(PTE) (4bytes)	*/
83e03167b2SRichard Kuo	r0 = add(r0,r2)		/* r0 = address of correct PTE	*/
84e03167b2SRichard Kuo	r2 = #PTE_BITS
85e03167b2SRichard Kuo	r1 = add(r1,r2)		/* r1 = 4MB PTE for the first entry	*/
86e03167b2SRichard Kuo	r2.h = #0x0040
8720f704b6SRichard Kuo	r2.l = #0x0000		/* 4MB increments */
8820f704b6SRichard Kuo	loop0(1f,r26);
8920f704b6SRichard Kuo1:
90e03167b2SRichard Kuo	memw(r0 ++ #4) = r1
9120f704b6SRichard Kuo	{ r1 = add(r1, r2); } :endloop0
92e03167b2SRichard Kuo
9320f704b6SRichard Kuo	/*  Also need to overwrite the initial 0xc0000000 entries  */
9420f704b6SRichard Kuo	/*  PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift)  */
9520f704b6SRichard Kuo	R1.H = #HI(PAGE_OFFSET >> (22 - 2))
9620f704b6SRichard Kuo	R1.L = #LO(PAGE_OFFSET >> (22 - 2))
9720f704b6SRichard Kuo
9820f704b6SRichard Kuo	r0 = add(r1, r24);	/* advance to 0xc0000000 entry */
9920f704b6SRichard Kuo	r1 = r25;
10020f704b6SRichard Kuo	r2.h = #0xffc0;
10120f704b6SRichard Kuo	r2.l = #0x0000;		/* round back down to 4MB boundary  */
10220f704b6SRichard Kuo	r1 = and(r1,r2);	/* for huge page */
10320f704b6SRichard Kuo	r2 = #PTE_BITS
10420f704b6SRichard Kuo	r1 = add(r1,r2);
10520f704b6SRichard Kuo	r2.h = #0x0040
10620f704b6SRichard Kuo	r2.l = #0x0000		/* 4MB increments */
10720f704b6SRichard Kuo
10820f704b6SRichard Kuo	loop0(1f,SEGTABLE_ENTRIES);
10920f704b6SRichard Kuo1:
11020f704b6SRichard Kuo	memw(r0 ++ #4) = r1;
11120f704b6SRichard Kuo	{ r1 = add(r1,r2); } :endloop0
11220f704b6SRichard Kuo
11320f704b6SRichard Kuo	r0 = r24;
114e03167b2SRichard Kuo
115e03167b2SRichard Kuo	/*
116e03167b2SRichard Kuo	 * The subroutine wrapper around the virtual instruction touches
117e03167b2SRichard Kuo	 * no memory, so we should be able to use it even here.
11820f704b6SRichard Kuo	 * Note that in this version, R1 and R2 get "clobbered"; see
11920f704b6SRichard Kuo	 * vm_ops.S
120e03167b2SRichard Kuo	 */
12120f704b6SRichard Kuo	r1 = #VM_TRANS_TYPE_TABLE
122e03167b2SRichard Kuo	call	__vmnewmap;
123e03167b2SRichard Kuo
124e03167b2SRichard Kuo	/*  Jump into virtual address range.  */
125e03167b2SRichard Kuo
126e03167b2SRichard Kuo	r31.h = #hi(__head_s_vaddr_target)
127e03167b2SRichard Kuo	r31.l = #lo(__head_s_vaddr_target)
128e03167b2SRichard Kuo	jumpr r31
129e03167b2SRichard Kuo
130e03167b2SRichard Kuo	/*  Insert trippy space effects.  */
131e03167b2SRichard Kuo
132e03167b2SRichard Kuo__head_s_vaddr_target:
133e03167b2SRichard Kuo	/*
134e03167b2SRichard Kuo	 * Tear down VA=PA translation now that we are running
13520f704b6SRichard Kuo	 * in kernel virtual space.
136e03167b2SRichard Kuo	 */
137e03167b2SRichard Kuo	r0 = #__HVM_PDE_S_INVALID
13820f704b6SRichard Kuo
13920f704b6SRichard Kuo	r1.h = #0xffc0;
14020f704b6SRichard Kuo	r1.l = #0x0000;
14120f704b6SRichard Kuo	r2 = r25;		/* phys_offset */
14220f704b6SRichard Kuo	r2 = and(r1,r2);
14320f704b6SRichard Kuo
14420f704b6SRichard Kuo	r1.l = #lo(swapper_pg_dir)
14520f704b6SRichard Kuo	r1.h = #hi(swapper_pg_dir)
14620f704b6SRichard Kuo	r2 = lsr(r2, #22)	/* 4MB page number		*/
14720f704b6SRichard Kuo	r2 = asl(r2, #2)	/* times sizeof(PTE) (4bytes)	*/
14820f704b6SRichard Kuo	r1 = add(r1,r2);
14920f704b6SRichard Kuo	loop0(1f,r26)
15020f704b6SRichard Kuo
151e03167b2SRichard Kuo1:
152e03167b2SRichard Kuo	{
153e03167b2SRichard Kuo		memw(R1 ++ #4) = R0
154e03167b2SRichard Kuo	}:endloop0
155e03167b2SRichard Kuo
156e03167b2SRichard Kuo	r0 = r24
15720f704b6SRichard Kuo	r1 = #VM_TRANS_TYPE_TABLE
158e03167b2SRichard Kuo	call __vmnewmap
159e03167b2SRichard Kuo
160e03167b2SRichard Kuo	/*  Go ahead and install the trap0 return so angel calls work  */
161e03167b2SRichard Kuo	r0.h = #hi(_K_provisional_vec)
162e03167b2SRichard Kuo	r0.l = #lo(_K_provisional_vec)
163e03167b2SRichard Kuo	call __vmsetvec
164e03167b2SRichard Kuo
165e03167b2SRichard Kuo	/*
166e03167b2SRichard Kuo	 * OK, at this point we should start to be much more careful,
167e03167b2SRichard Kuo	 * we're going to enter C code and start touching memory
168e03167b2SRichard Kuo	 * in all sorts of places.
169e03167b2SRichard Kuo	 * This means:
170e03167b2SRichard Kuo	 *      SGP needs to be OK
171e03167b2SRichard Kuo	 *	Need to lock shared resources
172e03167b2SRichard Kuo	 *	A bunch of other things that will cause
173e03167b2SRichard Kuo	 * 	all kinds of painful bugs
174e03167b2SRichard Kuo	 */
175e03167b2SRichard Kuo
176e03167b2SRichard Kuo	/*
177e03167b2SRichard Kuo	 * Stack pointer should be pointed at the init task's
178e03167b2SRichard Kuo	 * thread stack, which should have been declared in arch/init_task.c.
179e03167b2SRichard Kuo	 * So uhhhhh...
180e03167b2SRichard Kuo	 * It's accessible via the init_thread_union, which is a union
181e03167b2SRichard Kuo	 * of a thread_info struct and a stack; of course, the top
182e03167b2SRichard Kuo	 * of the stack is not for you.  The end of the stack
183e03167b2SRichard Kuo	 * is simply init_thread_union + THREAD_SIZE.
184e03167b2SRichard Kuo	 */
185e03167b2SRichard Kuo
186e03167b2SRichard Kuo	{r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
187e03167b2SRichard Kuo	{r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
188e03167b2SRichard Kuo
189e03167b2SRichard Kuo	/*  initialize the register used to point to current_thread_info */
190e03167b2SRichard Kuo	/*  Fixme:  THREADINFO_REG can't be R2 because of that memset thing. */
191e03167b2SRichard Kuo	{r29 = add(r29,r0); THREADINFO_REG = r29; }
192e03167b2SRichard Kuo
193e03167b2SRichard Kuo	/*  Hack:  zero bss; */
194e03167b2SRichard Kuo	{ r0.L = #LO(__bss_start);  r1 = #0; r2.l = #LO(__bss_stop); }
195e03167b2SRichard Kuo	{ r0.H = #HI(__bss_start);           r2.h = #HI(__bss_stop); }
196e03167b2SRichard Kuo
197e03167b2SRichard Kuo	r2 = sub(r2,r0);
198e03167b2SRichard Kuo	call memset;
199e03167b2SRichard Kuo
20020f704b6SRichard Kuo	/*  Set PHYS_OFFSET; should be in R25 */
2018f5a0b9dSRichard Kuo#ifdef CONFIG_HEXAGON_PHYS_OFFSET
2028f5a0b9dSRichard Kuo	r0.l = #LO(__phys_offset);
2038f5a0b9dSRichard Kuo	r0.h = #HI(__phys_offset);
2048f5a0b9dSRichard Kuo	memw(r0) = r25;
2058f5a0b9dSRichard Kuo#endif
2068f5a0b9dSRichard Kuo
207e03167b2SRichard Kuo	/* Time to make the doughnuts.   */
208e03167b2SRichard Kuo	call start_kernel
209e03167b2SRichard Kuo
210e03167b2SRichard Kuo	/*
211e03167b2SRichard Kuo	 * Should not reach here.
212e03167b2SRichard Kuo	 */
213e03167b2SRichard Kuo1:
214e03167b2SRichard Kuo	jump 1b
215e03167b2SRichard Kuo
216e03167b2SRichard Kuo.p2align PAGE_SHIFT
217e03167b2SRichard KuoENTRY(external_cmdline_buffer)
218e03167b2SRichard Kuo        .fill _PAGE_SIZE,1,0
219e03167b2SRichard Kuo
220e03167b2SRichard Kuo.data
221e03167b2SRichard Kuo.p2align PAGE_SHIFT
222e03167b2SRichard KuoENTRY(empty_zero_page)
223e03167b2SRichard Kuo        .fill _PAGE_SIZE,1,0
224