xref: /freebsd/sys/riscv/riscv/machdep.c (revision a5d9ecfa8e99961c4282fecc8315dbd12a8a11b3)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
4  * All rights reserved.
5  *
6  * Portions of this software were developed by SRI International and the
7  * University of Cambridge Computer Laboratory under DARPA/AFRL contract
8  * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Portions of this software were developed by the University of Cambridge
11  * Computer Laboratory as part of the CTSRD Project, with support from the
12  * UK Higher Education Innovation Fund (HEIF).
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include "opt_ddb.h"
37 #include "opt_kstack_pages.h"
38 #include "opt_platform.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/boot.h>
43 #include <sys/buf.h>
44 #include <sys/bus.h>
45 #include <sys/cons.h>
46 #include <sys/cpu.h>
47 #include <sys/efi_map.h>
48 #include <sys/exec.h>
49 #include <sys/imgact.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/limits.h>
54 #include <sys/linker.h>
55 #include <sys/msgbuf.h>
56 #include <sys/pcpu.h>
57 #include <sys/physmem.h>
58 #include <sys/proc.h>
59 #include <sys/ptrace.h>
60 #include <sys/reboot.h>
61 #include <sys/reg.h>
62 #include <sys/rwlock.h>
63 #include <sys/sched.h>
64 #include <sys/signalvar.h>
65 #include <sys/syscallsubr.h>
66 #include <sys/sysent.h>
67 #include <sys/sysproto.h>
68 #include <sys/tslog.h>
69 #include <sys/ucontext.h>
70 #include <sys/vmmeter.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_param.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_phys.h>
78 #include <vm/pmap.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_pager.h>
81 
82 #include <machine/cpu.h>
83 #include <machine/fpe.h>
84 #include <machine/intr.h>
85 #include <machine/kdb.h>
86 #include <machine/machdep.h>
87 #include <machine/metadata.h>
88 #include <machine/pcb.h>
89 #include <machine/pte.h>
90 #include <machine/riscvreg.h>
91 #include <machine/sbi.h>
92 #include <machine/trap.h>
93 #include <machine/vmparam.h>
94 
95 #ifdef DDB
96 #include <ddb/ddb.h>
97 #endif
98 
99 #ifdef FDT
100 #include <contrib/libfdt/libfdt.h>
101 #include <dev/fdt/fdt_common.h>
102 #include <dev/ofw/openfirm.h>
103 #endif
104 
105 struct pcpu __pcpu[MAXCPU];
106 
107 static struct trapframe proc0_tf;
108 
109 int early_boot = 1;
110 int cold = 1;
111 
112 #define	DTB_SIZE_MAX	(1024 * 1024)
113 
114 struct kva_md_info kmi;
115 
116 #define BOOT_HART_INVALID	0xffffffff
117 uint32_t boot_hart = BOOT_HART_INVALID;	/* The hart we booted on. */
118 
119 cpuset_t all_harts;
120 
121 extern int *end;
122 
123 static char static_kenv[PAGE_SIZE];
124 
125 static void
cpu_startup(void * dummy)126 cpu_startup(void *dummy)
127 {
128 
129 	sbi_print_version();
130 	printcpuinfo(0);
131 
132 	printf("real memory  = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
133 	    ptoa((uintmax_t)realmem) / (1024 * 1024));
134 
135 	/*
136 	 * Display any holes after the first chunk of extended memory.
137 	 */
138 	if (bootverbose) {
139 		int indx;
140 
141 		printf("Physical memory chunk(s):\n");
142 		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
143 			vm_paddr_t size;
144 
145 			size = phys_avail[indx + 1] - phys_avail[indx];
146 			printf(
147 			    "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
148 			    (uintmax_t)phys_avail[indx],
149 			    (uintmax_t)phys_avail[indx + 1] - 1,
150 			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
151 		}
152 	}
153 
154 	vm_ksubmap_init(&kmi);
155 
156 	printf("avail memory = %ju (%ju MB)\n",
157 	    ptoa((uintmax_t)vm_free_count()),
158 	    ptoa((uintmax_t)vm_free_count()) / (1024 * 1024));
159 
160 	bufinit();
161 	vm_pager_bufferinit();
162 }
163 
164 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
165 
166 int
cpu_idle_wakeup(int cpu)167 cpu_idle_wakeup(int cpu)
168 {
169 
170 	return (0);
171 }
172 
173 void
cpu_idle(int busy)174 cpu_idle(int busy)
175 {
176 
177 	spinlock_enter();
178 	if (!busy)
179 		cpu_idleclock();
180 	if (!sched_runnable())
181 		__asm __volatile(
182 		    "fence \n"
183 		    "wfi   \n");
184 	if (!busy)
185 		cpu_activeclock();
186 	spinlock_exit();
187 }
188 
189 void
cpu_halt(void)190 cpu_halt(void)
191 {
192 
193 	/*
194 	 * Try to power down using the HSM SBI extension and fall back to a
195 	 * simple wfi loop.
196 	 */
197 	intr_disable();
198 	if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0)
199 		sbi_hsm_hart_stop();
200 	for (;;)
201 		__asm __volatile("wfi");
202 	/* NOTREACHED */
203 }
204 
205 /*
206  * Flush the D-cache for non-DMA I/O so that the I-cache can
207  * be made coherent later.
208  */
209 void
cpu_flush_dcache(void * ptr,size_t len)210 cpu_flush_dcache(void *ptr, size_t len)
211 {
212 
213 	/* TBD */
214 }
215 
216 /* Get current clock frequency for the given CPU ID. */
217 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)218 cpu_est_clockrate(int cpu_id, uint64_t *rate)
219 {
220 	struct pcpu *pc;
221 
222 	pc = pcpu_find(cpu_id);
223 	if (pc == NULL || rate == NULL)
224 		return (EINVAL);
225 
226 	if (pc->pc_clock == 0)
227 		return (EOPNOTSUPP);
228 
229 	*rate = pc->pc_clock;
230 
231 	return (0);
232 }
233 
234 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t size)235 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
236 {
237 }
238 
239 void
spinlock_enter(void)240 spinlock_enter(void)
241 {
242 	struct thread *td;
243 	register_t reg;
244 
245 	td = curthread;
246 	if (td->td_md.md_spinlock_count == 0) {
247 		reg = intr_disable();
248 		td->td_md.md_spinlock_count = 1;
249 		td->td_md.md_saved_sstatus_ie = reg;
250 		critical_enter();
251 	} else
252 		td->td_md.md_spinlock_count++;
253 }
254 
255 void
spinlock_exit(void)256 spinlock_exit(void)
257 {
258 	struct thread *td;
259 	register_t sstatus_ie;
260 
261 	td = curthread;
262 	sstatus_ie = td->td_md.md_saved_sstatus_ie;
263 	td->td_md.md_spinlock_count--;
264 	if (td->td_md.md_spinlock_count == 0) {
265 		critical_exit();
266 		intr_restore(sstatus_ie);
267 	}
268 }
269 
270 /*
271  * Construct a PCB from a trapframe. This is called from kdb_trap() where
272  * we want to start a backtrace from the function that caused us to enter
273  * the debugger. We have the context in the trapframe, but base the trace
274  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
275  * enough for a backtrace.
276  */
277 void
makectx(struct trapframe * tf,struct pcb * pcb)278 makectx(struct trapframe *tf, struct pcb *pcb)
279 {
280 
281 	memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s));
282 
283 	pcb->pcb_ra = tf->tf_sepc;
284 	pcb->pcb_sp = tf->tf_sp;
285 	pcb->pcb_gp = tf->tf_gp;
286 	pcb->pcb_tp = tf->tf_tp;
287 }
288 
289 static void
init_proc0(vm_offset_t kstack)290 init_proc0(vm_offset_t kstack)
291 {
292 	struct pcpu *pcpup;
293 
294 	pcpup = &__pcpu[0];
295 
296 	proc_linkup0(&proc0, &thread0);
297 	thread0.td_kstack = kstack;
298 	thread0.td_kstack_pages = KSTACK_PAGES;
299 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
300 	    thread0.td_kstack_pages * PAGE_SIZE) - 1;
301 	thread0.td_pcb->pcb_fpflags = 0;
302 	thread0.td_frame = &proc0_tf;
303 	pcpup->pc_curpcb = thread0.td_pcb;
304 }
305 
306 #ifdef FDT
307 static void
try_load_dtb(void)308 try_load_dtb(void)
309 {
310 	vm_offset_t dtbp;
311 
312 	dtbp = MD_FETCH(preload_kmdp, MODINFOMD_DTBP, vm_offset_t);
313 
314 #if defined(FDT_DTB_STATIC)
315 	/*
316 	 * In case the device tree blob was not retrieved (from metadata) try
317 	 * to use the statically embedded one.
318 	 */
319 	if (dtbp == (vm_offset_t)NULL)
320 		dtbp = (vm_offset_t)&fdt_static_dtb;
321 #endif
322 
323 	if (dtbp == (vm_offset_t)NULL) {
324 		printf("ERROR loading DTB\n");
325 		return;
326 	}
327 
328 	if (!OF_install(OFW_FDT, 0))
329 		panic("Cannot install FDT");
330 
331 	if (OF_init((void *)dtbp) != 0)
332 		panic("OF_init failed with the found device tree");
333 }
334 #endif
335 
336 /*
337  * Fake up a boot descriptor table.
338  */
339 static void
fake_preload_metadata(struct riscv_bootparams * rvbp)340 fake_preload_metadata(struct riscv_bootparams *rvbp)
341 {
342 	static uint32_t fake_preload[48];
343 	vm_offset_t lastaddr;
344 	size_t fake_size, dtb_size;
345 
346 #define PRELOAD_PUSH_VALUE(type, value) do {			\
347 	*(type *)((char *)fake_preload + fake_size) = (value);	\
348 	fake_size += sizeof(type);				\
349 } while (0)
350 
351 #define PRELOAD_PUSH_STRING(str) do {				\
352 	uint32_t ssize;						\
353 	ssize = strlen(str) + 1;				\
354 	PRELOAD_PUSH_VALUE(uint32_t, ssize);			\
355 	strcpy(((char *)fake_preload + fake_size), str);	\
356 	fake_size += ssize;					\
357 	fake_size = roundup(fake_size, sizeof(u_long));		\
358 } while (0)
359 
360 	fake_size = 0;
361 	lastaddr = (vm_offset_t)&end;
362 
363 	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_NAME);
364 	PRELOAD_PUSH_STRING("kernel");
365 	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_TYPE);
366 	PRELOAD_PUSH_STRING(preload_kerntype);
367 
368 	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_ADDR);
369 	PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
370 	PRELOAD_PUSH_VALUE(uint64_t, KERNBASE);
371 
372 	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_SIZE);
373 	PRELOAD_PUSH_VALUE(uint32_t, sizeof(size_t));
374 	PRELOAD_PUSH_VALUE(uint64_t, (size_t)((vm_offset_t)&end - KERNBASE));
375 
376 	/*
377 	 * Copy the DTB to KVA space. We are able to dereference the physical
378 	 * address due to the identity map created in locore.
379 	 */
380 	lastaddr = roundup(lastaddr, sizeof(int));
381 	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_DTBP);
382 	PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
383 	PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr);
384 	dtb_size = fdt_totalsize(rvbp->dtbp_phys);
385 	memmove((void *)lastaddr, (const void *)rvbp->dtbp_phys, dtb_size);
386 	lastaddr = roundup(lastaddr + dtb_size, sizeof(int));
387 
388 	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_KERNEND);
389 	PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
390 	PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr);
391 
392 	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_HOWTO);
393 	PRELOAD_PUSH_VALUE(uint32_t, sizeof(int));
394 	PRELOAD_PUSH_VALUE(int, RB_VERBOSE);
395 
396 	/* End marker */
397 	PRELOAD_PUSH_VALUE(uint32_t, 0);
398 	PRELOAD_PUSH_VALUE(uint32_t, 0);
399 	preload_metadata = (caddr_t)fake_preload;
400 
401 	/* Check if bootloader clobbered part of the kernel with the DTB. */
402 	KASSERT(rvbp->dtbp_phys + dtb_size <= rvbp->kern_phys ||
403 		rvbp->dtbp_phys >= rvbp->kern_phys + (lastaddr - KERNBASE),
404 	    ("FDT (%lx-%lx) and kernel (%lx-%lx) overlap", rvbp->dtbp_phys,
405 		rvbp->dtbp_phys + dtb_size, rvbp->kern_phys,
406 		rvbp->kern_phys + (lastaddr - KERNBASE)));
407 	KASSERT(fake_size < sizeof(fake_preload),
408 	    ("Too many fake_preload items"));
409 
410 	if (boothowto & RB_VERBOSE)
411 		printf("FDT phys (%lx-%lx), kernel phys (%lx-%lx)\n",
412 		    rvbp->dtbp_phys, rvbp->dtbp_phys + dtb_size,
413 		    rvbp->kern_phys, rvbp->kern_phys + (lastaddr - KERNBASE));
414 }
415 
416 /* Support for FDT configurations only. */
417 CTASSERT(FDT);
418 
419 static void
parse_boot_hartid(void)420 parse_boot_hartid(void)
421 {
422 	uint64_t *mdp;
423 #ifdef FDT
424 	phandle_t chosen;
425 	uint32_t hart;
426 #endif
427 
428 	mdp = (uint64_t *)preload_search_info(preload_kmdp,
429 	    MODINFO_METADATA | MODINFOMD_BOOT_HARTID);
430 	if (mdp != NULL && *mdp < UINT32_MAX) {
431 		boot_hart = (uint32_t)*mdp;
432 		goto out;
433 	}
434 
435 #ifdef FDT
436 	/*
437 	 * Deprecated:
438 	 *
439 	 * Look for the boot hart ID. This was either passed in directly from
440 	 * the SBI firmware and handled by locore, or was stored in the device
441 	 * tree by an earlier boot stage.
442 	 */
443 	chosen = OF_finddevice("/chosen");
444 	if (OF_getencprop(chosen, "boot-hartid", &hart, sizeof(hart)) != -1) {
445 		boot_hart = hart;
446 	}
447 #endif
448 
449 	/* We failed... */
450 	if (boot_hart == BOOT_HART_INVALID) {
451 		panic("Boot hart ID was not properly set");
452 	}
453 
454 out:
455 	PCPU_SET(hart, boot_hart);
456 }
457 
458 #ifdef FDT
459 static void
parse_fdt_bootargs(void)460 parse_fdt_bootargs(void)
461 {
462 	char bootargs[512];
463 
464 	bootargs[sizeof(bootargs) - 1] = '\0';
465 	if (fdt_get_chosen_bootargs(bootargs, sizeof(bootargs) - 1) == 0) {
466 		boothowto |= boot_parse_cmdline(bootargs);
467 	}
468 }
469 #endif
470 
471 static vm_offset_t
parse_metadata(void)472 parse_metadata(void)
473 {
474 	vm_offset_t lastaddr;
475 #ifdef DDB
476 	vm_offset_t ksym_start, ksym_end;
477 #endif
478 	char *kern_envp;
479 
480 	/* Initialize preload_kmdp */
481 	preload_initkmdp(true);
482 
483 	/* Read the boot metadata */
484 	boothowto = MD_FETCH(preload_kmdp, MODINFOMD_HOWTO, int);
485 	lastaddr = MD_FETCH(preload_kmdp, MODINFOMD_KERNEND, vm_offset_t);
486 	kern_envp = MD_FETCH(preload_kmdp, MODINFOMD_ENVP, char *);
487 	if (kern_envp != NULL)
488 		init_static_kenv(kern_envp, 0);
489 	else
490 		init_static_kenv(static_kenv, sizeof(static_kenv));
491 #ifdef DDB
492 	ksym_start = MD_FETCH(preload_kmdp, MODINFOMD_SSYM, uintptr_t);
493 	ksym_end = MD_FETCH(preload_kmdp, MODINFOMD_ESYM, uintptr_t);
494 	db_fetch_ksymtab(ksym_start, ksym_end, 0);
495 #endif
496 #ifdef FDT
497 	try_load_dtb();
498 	if (kern_envp == NULL)
499 		parse_fdt_bootargs();
500 #endif
501 	parse_boot_hartid();
502 
503 	return (lastaddr);
504 }
505 
506 #ifdef FDT
507 static void
fdt_physmem_hardware_region_cb(const struct mem_region * mr,void * arg)508 fdt_physmem_hardware_region_cb(const struct mem_region *mr, void *arg)
509 {
510 	bool *first = arg;
511 
512 	physmem_hardware_region(mr->mr_start, mr->mr_size);
513 
514 	if (*first) {
515 		/*
516 		 * XXX: Unconditionally exclude the lowest 2MB of physical
517 		 * memory, as this area is assumed to contain the SBI firmware,
518 		 * and this is not properly reserved in all cases (e.g. in
519 		 * older firmware like BBL).
520 		 *
521 		 * This is a little fragile, but it is consistent with the
522 		 * platforms we support so far.
523 		 *
524 		 * TODO: remove this when the all regular booting methods
525 		 * properly report their reserved memory in the device tree.
526 		 */
527 		physmem_exclude_region(mr->mr_start, L2_SIZE,
528 		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
529 		*first = false;
530 	}
531 }
532 
533 static void
fdt_physmem_exclude_region_cb(const struct mem_region * mr,void * arg __unused)534 fdt_physmem_exclude_region_cb(const struct mem_region *mr, void *arg __unused)
535 {
536 	physmem_exclude_region(mr->mr_start, mr->mr_size,
537 	    EXFLAG_NODUMP | EXFLAG_NOALLOC);
538 }
539 #endif
540 
541 static void
efi_exclude_sbi_pmp_cb(struct efi_md * p,void * argp)542 efi_exclude_sbi_pmp_cb(struct efi_md *p, void *argp)
543 {
544 	bool *first = (bool *)argp;
545 
546 	if (!*first)
547 		return;
548 
549 	*first = false;
550 	if (p->md_type == EFI_MD_TYPE_BS_DATA) {
551 		physmem_exclude_region(p->md_phys,
552 		    min(p->md_pages * EFI_PAGE_SIZE, L2_SIZE),
553 		    EXFLAG_NOALLOC);
554 	}
555 }
556 
557 void
initriscv(struct riscv_bootparams * rvbp)558 initriscv(struct riscv_bootparams *rvbp)
559 {
560 	struct efi_map_header *efihdr;
561 	struct pcpu *pcpup;
562 	vm_offset_t lastaddr;
563 	vm_size_t kernlen;
564 	bool first;
565 	char *env;
566 
567 	TSRAW(&thread0, TS_ENTER, __func__, NULL);
568 
569 	/* Set the pcpu data, this is needed by pmap_bootstrap */
570 	pcpup = &__pcpu[0];
571 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
572 
573 	/* Set the pcpu pointer */
574 	__asm __volatile("mv tp, %0" :: "r"(pcpup));
575 
576 	PCPU_SET(curthread, &thread0);
577 
578 	/* Initialize SBI interface. */
579 	sbi_init();
580 
581 	/* Parse the boot metadata. */
582 	if (rvbp->modulep != 0) {
583 		preload_metadata = (caddr_t)rvbp->modulep;
584 	} else {
585 		fake_preload_metadata(rvbp);
586 	}
587 	lastaddr = parse_metadata();
588 
589 	efihdr = (struct efi_map_header *)preload_search_info(preload_kmdp,
590 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
591 	if (efihdr != NULL) {
592 		efi_map_add_entries(efihdr);
593 		efi_map_exclude_entries(efihdr);
594 
595 		/*
596 		 * OpenSBI uses the first PMP entry to prevent buggy supervisor
597 		 * software from overwriting the firmware. However, this
598 		 * region may not be properly marked as reserved, leading
599 		 * to an access violation exception whenever the kernel
600 		 * attempts to write to a page from that region.
601 		 *
602 		 * Fix this by excluding first EFI memory map entry
603 		 * if it is marked as "BootServicesData".
604 		 */
605 		first = true;
606 		efi_map_foreach_entry(efihdr, efi_exclude_sbi_pmp_cb, &first);
607 	}
608 #ifdef FDT
609 	else {
610 		/* Exclude reserved memory specified by the device tree. */
611 		fdt_foreach_reserved_mem(fdt_physmem_exclude_region_cb, NULL);
612 
613 		/* Grab physical memory regions information from device tree. */
614 		first = true;
615 		if (fdt_foreach_mem_region(fdt_physmem_hardware_region_cb,
616 		    &first) != 0)
617 			panic("Cannot get physical memory regions");
618 
619 	}
620 #endif
621 
622 	/*
623 	 * Identify CPU/ISA features.
624 	 */
625 	identify_cpu(0);
626 
627 	/* Do basic tuning, hz etc */
628 	init_param1();
629 
630 	/* Bootstrap enough of pmap to enter the kernel proper */
631 	kernlen = (lastaddr - KERNBASE);
632 	pmap_bootstrap(rvbp->kern_phys, kernlen);
633 
634 	physmem_init_kernel_globals();
635 
636 	cninit();
637 
638 	/*
639 	 * Dump the boot metadata. We have to wait for cninit() since console
640 	 * output is required. If it's grossly incorrect the kernel will never
641 	 * make it this far.
642 	 */
643 	if (getenv_is_true("debug.dump_modinfo_at_boot"))
644 		preload_dump();
645 
646 	init_proc0(rvbp->kern_stack);
647 
648 	msgbufinit(msgbufp, msgbufsize);
649 	mutex_init();
650 	init_param2(physmem);
651 	kdb_init();
652 #ifdef KDB
653 	if ((boothowto & RB_KDB) != 0)
654 		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
655 #endif
656 
657 	env = kern_getenv("kernelname");
658 	if (env != NULL)
659 		strlcpy(kernelname, env, sizeof(kernelname));
660 
661 	if (boothowto & RB_VERBOSE) {
662 		if (efihdr != NULL)
663 			efi_map_print_entries(efihdr);
664 		physmem_print_tables();
665 	}
666 
667 	early_boot = 0;
668 
669 	if (bootverbose && kstack_pages != KSTACK_PAGES)
670 		printf("kern.kstack_pages = %d ignored for thread0\n",
671 		    kstack_pages);
672 
673 	TSEXIT();
674 }
675