xref: /freebsd/sys/arm/arm/machdep.c (revision 06e374b774cd2bccf3c433fc2aa23667b9303cde)
1  /*	$NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $	*/
2  
3  /*-
4   * SPDX-License-Identifier: BSD-4-Clause
5   *
6   * Copyright (c) 2004 Olivier Houchard
7   * Copyright (c) 1994-1998 Mark Brinicombe.
8   * Copyright (c) 1994 Brini.
9   * All rights reserved.
10   *
11   * This code is derived from software written for Brini by Mark Brinicombe
12   *
13   * Redistribution and use in source and binary forms, with or without
14   * modification, are permitted provided that the following conditions
15   * are met:
16   * 1. Redistributions of source code must retain the above copyright
17   *    notice, this list of conditions and the following disclaimer.
18   * 2. Redistributions in binary form must reproduce the above copyright
19   *    notice, this list of conditions and the following disclaimer in the
20   *    documentation and/or other materials provided with the distribution.
21   * 3. All advertising materials mentioning features or use of this software
22   *    must display the following acknowledgement:
23   *	This product includes software developed by Mark Brinicombe
24   *	for the NetBSD Project.
25   * 4. The name of the company nor the name of the author may be used to
26   *    endorse or promote products derived from this software without specific
27   *    prior written permission.
28   *
29   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
30   * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31   * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32   * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
33   * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
34   * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
35   * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36   * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37   * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38   * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39   * SUCH DAMAGE.
40   *
41   * Machine dependent functions for kernel setup
42   *
43   * Created      : 17/09/94
44   * Updated	: 18/04/01 updated for new wscons
45   */
46  
47  #include "opt_ddb.h"
48  #include "opt_kstack_pages.h"
49  #include "opt_platform.h"
50  #include "opt_sched.h"
51  
52  #include <sys/param.h>
53  #include <sys/buf.h>
54  #include <sys/bus.h>
55  #include <sys/cons.h>
56  #include <sys/cpu.h>
57  #include <sys/devmap.h>
58  #include <sys/efi.h>
59  #include <sys/efi_map.h>
60  #include <sys/imgact.h>
61  #include <sys/kdb.h>
62  #include <sys/kernel.h>
63  #include <sys/ktr.h>
64  #include <sys/linker.h>
65  #include <sys/msgbuf.h>
66  #include <sys/physmem.h>
67  #include <sys/reboot.h>
68  #include <sys/rwlock.h>
69  #include <sys/sched.h>
70  #include <sys/syscallsubr.h>
71  #include <sys/sysent.h>
72  #include <sys/sysproto.h>
73  #include <sys/vmmeter.h>
74  
75  #include <vm/vm_object.h>
76  #include <vm/vm_page.h>
77  #include <vm/vm_pager.h>
78  
79  #include <machine/asm.h>
80  #include <machine/debug_monitor.h>
81  #include <machine/machdep.h>
82  #include <machine/metadata.h>
83  #include <machine/pcb.h>
84  #include <machine/platform.h>
85  #include <machine/sysarch.h>
86  #include <machine/undefined.h>
87  #include <machine/vfp.h>
88  #include <machine/vmparam.h>
89  
90  #ifdef FDT
91  #include <dev/fdt/fdt_common.h>
92  #include <machine/ofw_machdep.h>
93  #endif
94  
95  #ifdef DEBUG
96  #define	debugf(fmt, args...) printf(fmt, ##args)
97  #else
98  #define	debugf(fmt, args...)
99  #endif
100  
101  #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
102      defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
103      defined(COMPAT_FREEBSD9)
104  #error FreeBSD/arm doesn't provide compatibility with releases prior to 10
105  #endif
106  
107  
108  #if __ARM_ARCH < 7
109  #error FreeBSD requires ARMv7 or later
110  #endif
111  
112  struct pcpu __pcpu[MAXCPU];
113  struct pcpu *pcpup = &__pcpu[0];
114  
115  static struct trapframe proc0_tf;
116  uint32_t cpu_reset_address = 0;
117  int cold = 1;
118  vm_offset_t vector_page;
119  
120  /* The address at which the kernel was loaded.  Set early in initarm(). */
121  vm_paddr_t arm_physmem_kernaddr;
122  
123  extern int *end;
124  
125  #ifdef FDT
126  vm_paddr_t pmap_pa;
127  vm_offset_t systempage;
128  vm_offset_t irqstack;
129  vm_offset_t undstack;
130  vm_offset_t abtstack;
131  #endif /* FDT */
132  
133  #ifdef PLATFORM
134  static delay_func *delay_impl;
135  static void *delay_arg;
136  #endif
137  
138  #if defined(SOCDEV_PA)
139  #if !defined(SOCDEV_VA)
140  #error SOCDEV_PA defined, but not SOCDEV_VA
141  #endif
142  uintptr_t socdev_va = SOCDEV_VA;
143  #endif
144  
145  
146  struct kva_md_info kmi;
147  /*
148   * arm32_vector_init:
149   *
150   *	Initialize the vector page, and select whether or not to
151   *	relocate the vectors.
152   *
153   *	NOTE: We expect the vector page to be mapped at its expected
154   *	destination.
155   */
156  
157  extern unsigned int page0[], page0_data[];
158  void
arm_vector_init(vm_offset_t va,int which)159  arm_vector_init(vm_offset_t va, int which)
160  {
161  	unsigned int *vectors = (int *) va;
162  	unsigned int *vectors_data = vectors + (page0_data - page0);
163  	int vec;
164  
165  	/*
166  	 * Loop through the vectors we're taking over, and copy the
167  	 * vector's insn and data word.
168  	 */
169  	for (vec = 0; vec < ARM_NVEC; vec++) {
170  		if ((which & (1 << vec)) == 0) {
171  			/* Don't want to take over this vector. */
172  			continue;
173  		}
174  		vectors[vec] = page0[vec];
175  		vectors_data[vec] = page0_data[vec];
176  	}
177  
178  	/* Now sync the vectors. */
179  	icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
180  
181  	vector_page = va;
182  }
183  
184  static void
cpu_startup(void * dummy)185  cpu_startup(void *dummy)
186  {
187  	struct pcb *pcb = thread0.td_pcb;
188  	const unsigned int mbyte = 1024 * 1024;
189  
190  	identify_arm_cpu();
191  
192  	vm_ksubmap_init(&kmi);
193  
194  	/*
195  	 * Display the RAM layout.
196  	 */
197  	printf("real memory  = %ju (%ju MB)\n",
198  	    (uintmax_t)arm32_ptob(realmem),
199  	    (uintmax_t)arm32_ptob(realmem) / mbyte);
200  	printf("avail memory = %ju (%ju MB)\n",
201  	    (uintmax_t)arm32_ptob(vm_free_count()),
202  	    (uintmax_t)arm32_ptob(vm_free_count()) / mbyte);
203  	if (bootverbose) {
204  		physmem_print_tables();
205  		devmap_print_table();
206  	}
207  
208  	bufinit();
209  	vm_pager_bufferinit();
210  	pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
211  	    USPACE_SVC_STACK_TOP;
212  	pmap_set_pcb_pagedir(kernel_pmap, pcb);
213  }
214  
215  SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
216  
217  /*
218   * Flush the D-cache for non-DMA I/O so that the I-cache can
219   * be made coherent later.
220   */
221  void
cpu_flush_dcache(void * ptr,size_t len)222  cpu_flush_dcache(void *ptr, size_t len)
223  {
224  
225  	dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
226  }
227  
228  /* Get current clock frequency for the given cpu id. */
229  int
cpu_est_clockrate(int cpu_id,uint64_t * rate)230  cpu_est_clockrate(int cpu_id, uint64_t *rate)
231  {
232  	struct pcpu *pc;
233  
234  	pc = pcpu_find(cpu_id);
235  	if (pc == NULL || rate == NULL)
236  		return (EINVAL);
237  
238  	if (pc->pc_clock == 0)
239  		return (EOPNOTSUPP);
240  
241  	*rate = pc->pc_clock;
242  
243  	return (0);
244  }
245  
246  void
cpu_idle(int busy)247  cpu_idle(int busy)
248  {
249  
250  	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
251  	spinlock_enter();
252  	if (!busy)
253  		cpu_idleclock();
254  	if (!sched_runnable())
255  		cpu_sleep(0);
256  	if (!busy)
257  		cpu_activeclock();
258  	spinlock_exit();
259  	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
260  }
261  
262  int
cpu_idle_wakeup(int cpu)263  cpu_idle_wakeup(int cpu)
264  {
265  
266  	return (0);
267  }
268  
269  void
cpu_initclocks(void)270  cpu_initclocks(void)
271  {
272  
273  #ifdef SMP
274  	if (PCPU_GET(cpuid) == 0)
275  		cpu_initclocks_bsp();
276  	else
277  		cpu_initclocks_ap();
278  #else
279  	cpu_initclocks_bsp();
280  #endif
281  }
282  
283  #ifdef PLATFORM
284  void
arm_set_delay(delay_func * impl,void * arg)285  arm_set_delay(delay_func *impl, void *arg)
286  {
287  
288  	KASSERT(impl != NULL, ("No DELAY implementation"));
289  	delay_impl = impl;
290  	delay_arg = arg;
291  }
292  
293  void
DELAY(int usec)294  DELAY(int usec)
295  {
296  
297  	TSENTER();
298  	delay_impl(usec, delay_arg);
299  	TSEXIT();
300  }
301  #endif
302  
303  void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t size)304  cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
305  {
306  
307  	pcpu->pc_mpidr = 0xffffffff;
308  }
309  
310  void
spinlock_enter(void)311  spinlock_enter(void)
312  {
313  	struct thread *td;
314  	register_t cspr;
315  
316  	td = curthread;
317  	if (td->td_md.md_spinlock_count == 0) {
318  		cspr = disable_interrupts(PSR_I);
319  		td->td_md.md_spinlock_count = 1;
320  		td->td_md.md_saved_cspr = cspr;
321  		critical_enter();
322  	} else
323  		td->td_md.md_spinlock_count++;
324  }
325  
326  void
spinlock_exit(void)327  spinlock_exit(void)
328  {
329  	struct thread *td;
330  	register_t cspr;
331  
332  	td = curthread;
333  	cspr = td->td_md.md_saved_cspr;
334  	td->td_md.md_spinlock_count--;
335  	if (td->td_md.md_spinlock_count == 0) {
336  		critical_exit();
337  		restore_interrupts(cspr);
338  	}
339  }
340  
341  /*
342   * Construct a PCB from a trapframe. This is called from kdb_trap() where
343   * we want to start a backtrace from the function that caused us to enter
344   * the debugger. We have the context in the trapframe, but base the trace
345   * on the PCB. The PCB doesn't have to be perfect, as long as it contains
346   * enough for a backtrace.
347   */
348  void
makectx(struct trapframe * tf,struct pcb * pcb)349  makectx(struct trapframe *tf, struct pcb *pcb)
350  {
351  	pcb->pcb_regs.sf_r4 = tf->tf_r4;
352  	pcb->pcb_regs.sf_r5 = tf->tf_r5;
353  	pcb->pcb_regs.sf_r6 = tf->tf_r6;
354  	pcb->pcb_regs.sf_r7 = tf->tf_r7;
355  	pcb->pcb_regs.sf_r8 = tf->tf_r8;
356  	pcb->pcb_regs.sf_r9 = tf->tf_r9;
357  	pcb->pcb_regs.sf_r10 = tf->tf_r10;
358  	pcb->pcb_regs.sf_r11 = tf->tf_r11;
359  	pcb->pcb_regs.sf_r12 = tf->tf_r12;
360  	pcb->pcb_regs.sf_pc = tf->tf_pc;
361  	pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
362  	pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
363  }
364  
365  void
pcpu0_init(void)366  pcpu0_init(void)
367  {
368  	set_curthread(&thread0);
369  	pcpu_init(pcpup, 0, sizeof(struct pcpu));
370  	pcpup->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF;
371  	PCPU_SET(curthread, &thread0);
372  }
373  
374  /*
375   * Initialize proc0
376   */
377  void
init_proc0(vm_offset_t kstack)378  init_proc0(vm_offset_t kstack)
379  {
380  	proc_linkup0(&proc0, &thread0);
381  	thread0.td_kstack = kstack;
382  	thread0.td_kstack_pages = kstack_pages;
383  	thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
384  	    thread0.td_kstack_pages * PAGE_SIZE) - 1;
385  	thread0.td_pcb->pcb_flags = 0;
386  	thread0.td_pcb->pcb_fpflags = 0;
387  	thread0.td_pcb->pcb_vfpcpu = -1;
388  	thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
389  	thread0.td_pcb->pcb_vfpsaved = &thread0.td_pcb->pcb_vfpstate;
390  	thread0.td_frame = &proc0_tf;
391  	pcpup->pc_curpcb = thread0.td_pcb;
392  }
393  
394  void
set_stackptrs(int cpu)395  set_stackptrs(int cpu)
396  {
397  
398  	set_stackptr(PSR_IRQ32_MODE,
399  	    irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
400  	set_stackptr(PSR_ABT32_MODE,
401  	    abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
402  	set_stackptr(PSR_UND32_MODE,
403  	    undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
404  }
405  
406  static void
arm_kdb_init(void)407  arm_kdb_init(void)
408  {
409  
410  	kdb_init();
411  #ifdef KDB
412  	if (boothowto & RB_KDB)
413  		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
414  #endif
415  }
416  
417  #ifdef FDT
418  static void
fdt_physmem_hardware_region_cb(const struct mem_region * mr,void * arg __unused)419  fdt_physmem_hardware_region_cb(const struct mem_region *mr, void *arg __unused)
420  {
421  	physmem_hardware_region(mr->mr_start, mr->mr_size);
422  }
423  
424  static void
fdt_physmem_exclude_region_cb(const struct mem_region * mr,void * arg __unused)425  fdt_physmem_exclude_region_cb(const struct mem_region *mr, void *arg __unused)
426  {
427  	physmem_exclude_region(mr->mr_start, mr->mr_size,
428  	    EXFLAG_NODUMP | EXFLAG_NOALLOC);
429  }
430  
431  void *
initarm(struct arm_boot_params * abp)432  initarm(struct arm_boot_params *abp)
433  {
434  	vm_paddr_t lastaddr;
435  	vm_offset_t dtbp, kernelstack, dpcpu;
436  	char *env;
437  	int err_devmap;
438  	phandle_t root;
439  	char dts_version[255];
440  #ifdef EFI
441  	struct efi_map_header *efihdr;
442  #endif
443  
444  	/* get last allocated physical address */
445  	arm_physmem_kernaddr = abp->abp_physaddr;
446  	lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
447  
448  	set_cpufuncs();
449  	cpuinfo_init();
450  
451  	/*
452  	 * Find the dtb passed in by the boot loader.
453  	 */
454  	dtbp = MD_FETCH(preload_kmdp, MODINFOMD_DTBP, vm_offset_t);
455  #if defined(FDT_DTB_STATIC)
456  	/*
457  	 * In case the device tree blob was not retrieved (from metadata) try
458  	 * to use the statically embedded one.
459  	 */
460  	if (dtbp == (vm_offset_t)NULL)
461  		dtbp = (vm_offset_t)&fdt_static_dtb;
462  #endif
463  
464  	if (OF_install(OFW_FDT, 0) == FALSE)
465  		panic("Cannot install FDT");
466  
467  	if (OF_init((void *)dtbp) != 0)
468  		panic("OF_init failed with the found device tree");
469  
470  #if defined(LINUX_BOOT_ABI)
471  	arm_parse_fdt_bootargs();
472  #endif
473  
474  #ifdef EFI
475  	efihdr = (struct efi_map_header *)preload_search_info(preload_kmdp,
476  	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
477  	if (efihdr != NULL) {
478  		efi_map_add_entries(efihdr);
479  		efi_map_exclude_entries(efihdr);
480  	} else
481  #endif
482  	{
483  		/* Grab physical memory regions information from device tree. */
484  		if (fdt_foreach_mem_region(fdt_physmem_hardware_region_cb,
485  		    NULL) != 0)
486  			panic("Cannot get physical memory regions");
487  
488  		/* Grab reserved memory regions information from device tree. */
489  		fdt_foreach_reserved_region(fdt_physmem_exclude_region_cb,
490  		    NULL);
491  	}
492  
493  	/*
494  	 * Set TEX remapping registers.
495  	 * Setup kernel page tables and switch to kernel L1 page table.
496  	 */
497  	pmap_set_tex();
498  	pmap_bootstrap_prepare(lastaddr);
499  
500  	/*
501  	 * If EARLY_PRINTF support is enabled, we need to re-establish the
502  	 * mapping after pmap_bootstrap_prepare() switches to new page tables.
503  	 * Note that we can only do the remapping if the VA is outside the
504  	 * kernel, now that we have real virtual (not VA=PA) mappings in effect.
505  	 * Early printf does not work between the time pmap_set_tex() does
506  	 * cp15_prrr_set() and this code remaps the VA.
507  	 */
508  #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
509  	pmap_preboot_map_attr(SOCDEV_PA, SOCDEV_VA, 1024 * 1024,
510  	    VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
511  #endif
512  
513  	/*
514  	 * Now that proper page tables are installed, call cpu_setup() to enable
515  	 * instruction and data caches and other chip-specific features.
516  	 */
517  	cpu_setup();
518  
519  	/* Platform-specific initialisation */
520  	platform_probe_and_attach();
521  	pcpu0_init();
522  
523  	/* Do basic tuning, hz etc */
524  	init_param1();
525  
526  	/*
527  	 * Allocate a page for the system page mapped to 0xffff0000
528  	 * This page will just contain the system vectors and can be
529  	 * shared by all processes.
530  	 */
531  	systempage = pmap_preboot_get_pages(1);
532  
533  	/* Map the vector page. */
534  	pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH,  1);
535  	if (virtual_end >= ARM_VECTORS_HIGH)
536  		virtual_end = ARM_VECTORS_HIGH - 1;
537  
538  	/* Allocate dynamic per-cpu area. */
539  	dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
540  	dpcpu_init((void *)dpcpu, 0);
541  
542  	/* Allocate stacks for all modes */
543  	irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
544  	abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
545  	undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
546  	kernelstack = pmap_preboot_get_vpages(kstack_pages);
547  
548  	/* Allocate message buffer. */
549  	msgbufp = (void *)pmap_preboot_get_vpages(
550  	    round_page(msgbufsize) / PAGE_SIZE);
551  
552  	/*
553  	 * Pages were allocated during the secondary bootstrap for the
554  	 * stacks for different CPU modes.
555  	 * We must now set the r13 registers in the different CPU modes to
556  	 * point to these stacks.
557  	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
558  	 * of the stack memory.
559  	 */
560  	set_stackptrs(0);
561  	mutex_init();
562  
563  	/* Establish static device mappings. */
564  	err_devmap = platform_devmap_init();
565  	devmap_bootstrap();
566  	vm_max_kernel_address = platform_lastaddr();
567  
568  	/*
569  	 * Only after the SOC registers block is mapped we can perform device
570  	 * tree fixups, as they may attempt to read parameters from hardware.
571  	 */
572  	OF_interpret("perform-fixup", 0);
573  	platform_gpio_init();
574  	cninit();
575  
576  	/*
577  	 * If we made a mapping for EARLY_PRINTF after pmap_bootstrap_prepare(),
578  	 * undo it now that the normal console printf works.
579  	 */
580  #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
581  	pmap_kremove(SOCDEV_VA);
582  #endif
583  
584  	debugf("initarm: console initialized\n");
585  	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)preload_kmdp);
586  	debugf(" boothowto = 0x%08x\n", boothowto);
587  	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
588  	debugf(" lastaddr1: 0x%08x\n", lastaddr);
589  	arm_print_kenv();
590  
591  	env = kern_getenv("kernelname");
592  	if (env != NULL)
593  		strlcpy(kernelname, env, sizeof(kernelname));
594  
595  	if (err_devmap != 0)
596  		printf("WARNING: could not fully configure devmap, error=%d\n",
597  		    err_devmap);
598  
599  	platform_late_init();
600  
601  	root = OF_finddevice("/");
602  	if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
603  		if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
604  			printf("WARNING: DTB version is %s while kernel expects %s, "
605  			    "please update the DTB in the ESP\n",
606  			    dts_version,
607  			    LINUX_DTS_VERSION);
608  	} else {
609  		printf("WARNING: Cannot find freebsd,dts-version property, "
610  		    "cannot check DTB compliance\n");
611  	}
612  
613  	/*
614  	 * We must now clean the cache again....
615  	 * Cleaning may be done by reading new data to displace any
616  	 * dirty data in the cache. This will have happened in cpu_setttb()
617  	 * but since we are boot strapping the addresses used for the read
618  	 * may have just been remapped and thus the cache could be out
619  	 * of sync. A re-clean after the switch will cure this.
620  	 * After booting there are no gross relocations of the kernel thus
621  	 * this problem will not occur after initarm().
622  	 */
623  	/* Set stack for exception handlers */
624  	undefined_init();
625  	init_proc0(kernelstack);
626  	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
627  	enable_interrupts(PSR_A);
628  	pmap_bootstrap(0);
629  
630  	/* Exclude the kernel (and all the things we allocated which immediately
631  	 * follow the kernel) from the VM allocation pool but not from crash
632  	 * dumps.  virtual_avail is a global variable which tracks the kva we've
633  	 * "allocated" while setting up pmaps.
634  	 *
635  	 * Prepare the list of physical memory available to the vm subsystem.
636  	 */
637  	physmem_exclude_region(abp->abp_physaddr,
638  		pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
639  	physmem_init_kernel_globals();
640  
641  	init_param2(physmem);
642  	/* Init message buffer. */
643  	msgbufinit(msgbufp, msgbufsize);
644  	dbg_monitor_init();
645  	arm_kdb_init();
646  	/* Apply possible BP hardening. */
647  	cpuinfo_init_bp_hardening();
648  
649  #ifdef EFI
650  	if (boothowto & RB_VERBOSE) {
651  		if (efihdr != NULL)
652  			efi_map_print_entries(efihdr);
653  	}
654  #endif
655  
656  	return ((void *)STACKALIGN(thread0.td_pcb));
657  
658  }
659  #endif /* FDT */
660