xref: /titanic_51/usr/src/uts/i86pc/dboot/dboot_startkern.c (revision f4b3ec61df05330d25f55a36b975b4d7519fdeb1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/machparam.h>
31 #include <sys/x86_archext.h>
32 #include <sys/systm.h>
33 #include <sys/mach_mmu.h>
34 
35 #include <sys/multiboot.h>
36 
37 extern multiboot_header_t mb_header;
38 extern int have_cpuid(void);
39 extern uint32_t get_cpuid_edx(uint32_t *eax);
40 
41 #include <sys/inttypes.h>
42 #include <sys/bootinfo.h>
43 #include <sys/mach_mmu.h>
44 #include <sys/boot_console.h>
45 
46 #include "dboot_printf.h"
47 #include "dboot_xboot.h"
48 #include "dboot_elfload.h"
49 
50 /*
51  * This file contains code that runs to transition us from either a multiboot
52  * compliant loader (32 bit non-paging) or Xen domain loader to regular kernel
53  * execution. Its task is to setup the kernel memory image and page tables.
54  *
55  * The code executes as:
56  *	- 32 bits under GRUB (for 32 or 64 bit Solaris)
57  * 	- 32 bit program for Xen 32 bit
58  *	- 64 bit program for Xen 64 bit (at least that's my assumption for now)
59  *
60  * Under Xen, we must create mappings for any memory beyond the initial
61  * start of day allocation (such as the kernel itself).
62  *
63  * When not under Xen, the mapping between maddr_t and paddr_t is 1:1.
64  * Since we are running in real mode, so all such memory is accessible.
65  */
66 
67 /*
68  * Standard bits used in PTE (page level) and PTP (internal levels)
69  */
70 x86pte_t ptp_bits = PT_VALID | PT_REF | PT_USER | PT_WRITABLE | PT_USER;
71 x86pte_t pte_bits = PT_VALID | PT_REF | PT_MOD | PT_NOCONSIST | PT_WRITABLE;
72 
73 /*
74  * This is the target addresses (physical) where the kernel text and data
75  * nucleus pages will be unpacked. On Xen this is actually a virtual address.
76  */
77 paddr_t ktext_phys;
78 uint32_t ksize = 2 * FOUR_MEG;	/* kernel nucleus is 8Meg */
79 
80 static uint64_t target_kernel_text;	/* value to use for KERNEL_TEXT */
81 
82 /*
83  * The stack is setup in assembler before entering startup_kernel()
84  */
85 char stack_space[STACK_SIZE];
86 
87 /*
88  * Used to track physical memory allocation
89  */
90 static paddr_t next_avail_addr = 0;
91 
92 multiboot_info_t *mb_info;
93 
94 /*
95  * This contains information passed to the kernel
96  */
97 struct xboot_info boot_info[2];	/* extra space to fix alignement for amd64 */
98 struct xboot_info *bi;
99 
100 /*
101  * Page table and memory stuff.
102  */
103 static uint64_t max_mem;			/* maximum memory address */
104 
105 /*
106  * Information about processor MMU
107  */
108 int amd64_support = 0;
109 int largepage_support = 0;
110 int pae_support = 0;
111 int pge_support = 0;
112 int NX_support = 0;
113 
114 /*
115  * Low 32 bits of kernel entry address passed back to assembler.
116  * When running a 64 bit kernel, the high 32 bits are 0xffffffff.
117  */
118 uint32_t entry_addr_low;
119 
120 /*
121  * Memlists for the kernel. We shouldn't need a lot of these.
122  */
123 #define	MAX_MEMLIST (10)
124 struct boot_memlist memlists[MAX_MEMLIST];
125 uint_t memlists_used = 0;
126 
127 #define	MAX_MODULES (10)
128 struct boot_modules modules[MAX_MODULES];
129 uint_t modules_used = 0;
130 
131 /*
132  * Debugging macros
133  */
134 uint_t prom_debug = 0;
135 uint_t map_debug = 0;
136 
137 /*
138  * The Xen/Grub specific code builds the initial memlists. This code does
139  * sort/merge/link for final use.
140  */
141 static void
142 sort_physinstall(void)
143 {
144 	int i;
145 	int j;
146 	struct boot_memlist tmp;
147 
148 	/*
149 	 * Now sort the memlists, in case they weren't in order.
150 	 * Yeah, this is a bubble sort; small, simple and easy to get right.
151 	 */
152 	DBG_MSG("Sorting phys-installed list\n");
153 	for (j = memlists_used - 1; j > 0; --j) {
154 		for (i = 0; i < j; ++i) {
155 			if (memlists[i].addr < memlists[i + 1].addr)
156 				continue;
157 			tmp = memlists[i];
158 			memlists[i] = memlists[i + 1];
159 			memlists[i + 1] = tmp;
160 		}
161 	}
162 
163 	/*
164 	 * Merge any memlists that don't have holes between them.
165 	 */
166 	for (i = 0; i <= memlists_used - 1; ++i) {
167 		if (memlists[i].addr + memlists[i].size != memlists[i + 1].addr)
168 			continue;
169 
170 		if (prom_debug)
171 			dboot_printf(
172 			    "merging mem segs %" PRIx64 "...%" PRIx64
173 			    " w/ %" PRIx64 "...%" PRIx64 "\n",
174 			    memlists[i].addr,
175 			    memlists[i].addr + memlists[i].size,
176 			    memlists[i + 1].addr,
177 			    memlists[i + 1].addr + memlists[i + 1].size);
178 
179 		memlists[i].size += memlists[i + 1].size;
180 		for (j = i + 1; j < memlists_used - 1; ++j)
181 			memlists[j] = memlists[j + 1];
182 		--memlists_used;
183 		DBG(memlists_used);
184 		--i;	/* after merging we need to reexamine, so do this */
185 	}
186 
187 	if (prom_debug) {
188 		dboot_printf("\nFinal memlists:\n");
189 		for (i = 0; i < memlists_used; ++i) {
190 			dboot_printf("\t%d: addr=%" PRIx64 " size=%"
191 			    PRIx64 "\n", i, memlists[i].addr, memlists[i].size);
192 		}
193 	}
194 
195 	/*
196 	 * link together the memlists with native size pointers
197 	 */
198 	memlists[0].next = 0;
199 	memlists[0].prev = 0;
200 	for (i = 1; i < memlists_used; ++i) {
201 		memlists[i].prev = (native_ptr_t)(uintptr_t)(memlists + i - 1);
202 		memlists[i].next = 0;
203 		memlists[i - 1].next = (native_ptr_t)(uintptr_t)(memlists + i);
204 	}
205 	bi->bi_phys_install = (native_ptr_t)memlists;
206 	DBG(bi->bi_phys_install);
207 }
208 
209 x86pte_t
210 get_pteval(paddr_t table, uint_t index)
211 {
212 	if (pae_support)
213 		return (((x86pte_t *)(uintptr_t)table)[index]);
214 	return (((x86pte32_t *)(uintptr_t)table)[index]);
215 }
216 
217 /*ARGSUSED*/
218 void
219 set_pteval(paddr_t table, uint_t index, uint_t level, x86pte_t pteval)
220 {
221 	uintptr_t tab_addr = (uintptr_t)table;
222 
223 	if (pae_support)
224 		((x86pte_t *)tab_addr)[index] = pteval;
225 	else
226 		((x86pte32_t *)tab_addr)[index] = (x86pte32_t)pteval;
227 	if (level == top_level && level == 2)
228 		reload_cr3();
229 }
230 
231 paddr_t
232 make_ptable(x86pte_t *pteval, uint_t level)
233 {
234 	paddr_t new_table = (paddr_t)(uintptr_t)mem_alloc(MMU_PAGESIZE);
235 
236 	if (level == top_level && level == 2)
237 		*pteval = pa_to_ma((uintptr_t)new_table) | PT_VALID;
238 	else
239 		*pteval = pa_to_ma((uintptr_t)new_table) | ptp_bits;
240 
241 	if (map_debug)
242 		dboot_printf("new page table lvl=%d paddr=0x%lx ptp=0x%"
243 		    PRIx64 "\n", level, (ulong_t)new_table, *pteval);
244 	return (new_table);
245 }
246 
247 x86pte_t *
248 map_pte(paddr_t table, uint_t index)
249 {
250 	return ((x86pte_t *)(uintptr_t)(table + index * pte_size));
251 }
252 
253 #if 0	/* useful if debugging */
254 /*
255  * dump out the contents of page tables...
256  */
257 static void
258 dump_tables(void)
259 {
260 	uint_t save_index[4];	/* for recursion */
261 	char *save_table[4];	/* for recursion */
262 	uint_t	l;
263 	uint64_t va;
264 	uint64_t pgsize;
265 	int index;
266 	int i;
267 	x86pte_t pteval;
268 	char *table;
269 	static char *tablist = "\t\t\t";
270 	char *tabs = tablist + 3 - top_level;
271 	uint_t pa, pa1;
272 
273 	dboot_printf("Finished pagetables:\n");
274 	table = (char *)top_page_table;
275 	l = top_level;
276 	va = 0;
277 	for (index = 0; index < ptes_per_table; ++index) {
278 		pgsize = 1ull << shift_amt[l];
279 		if (pae_support)
280 			pteval = ((x86pte_t *)table)[index];
281 		else
282 			pteval = ((x86pte32_t *)table)[index];
283 		if (pteval == 0)
284 			goto next_entry;
285 
286 		dboot_printf("%s %lx[0x%x] = %" PRIx64 ", va=%" PRIx64,
287 		    tabs + l, table, index, (uint64_t)pteval, va);
288 		pa = ma_to_pa(pteval & MMU_PAGEMASK);
289 		dboot_printf(" physaddr=%" PRIx64 "\n", pa);
290 
291 		/*
292 		 * Don't try to walk hypervisor private pagetables
293 		 */
294 		if ((l > 1 || (l == 1 && (pteval & PT_PAGESIZE) == 0))) {
295 			save_table[l] = table;
296 			save_index[l] = index;
297 			--l;
298 			index = -1;
299 			table = (char *)(uintptr_t)
300 			    ma_to_pa(pteval & MMU_PAGEMASK);
301 			goto recursion;
302 		}
303 
304 		/*
305 		 * shorten dump for consecutive mappings
306 		 */
307 		for (i = 1; index + i < ptes_per_table; ++i) {
308 			if (pae_support)
309 				pteval = ((x86pte_t *)table)[index + i];
310 			else
311 				pteval = ((x86pte32_t *)table)[index + i];
312 			if (pteval == 0)
313 				break;
314 			pa1 = ma_to_pa(pteval & MMU_PAGEMASK);
315 			if (pa1 != pa + i * pgsize)
316 				break;
317 		}
318 		if (i > 2) {
319 			dboot_printf("%s...\n", tabs + l);
320 			va += pgsize * (i - 2);
321 			index += i - 2;
322 		}
323 next_entry:
324 		va += pgsize;
325 		if (l == 3 && index == 256)	/* VA hole */
326 			va = 0xffff800000000000ull;
327 recursion:
328 		;
329 	}
330 	if (l < top_level) {
331 		++l;
332 		index = save_index[l];
333 		table = save_table[l];
334 		goto recursion;
335 	}
336 }
337 #endif
338 
339 /*
340  * Add a mapping for the physical page at the given virtual address.
341  */
342 static void
343 map_pa_at_va(paddr_t pa, native_ptr_t va, uint_t level)
344 {
345 	x86pte_t *ptep;
346 	x86pte_t pteval;
347 
348 	pteval = pa_to_ma(pa) | pte_bits;
349 	if (level > 0)
350 		pteval |= PT_PAGESIZE;
351 	if (va >= target_kernel_text && pge_support)
352 		pteval |= PT_GLOBAL;
353 
354 	if (map_debug && pa != va)
355 		dboot_printf("mapping pa=0x%" PRIx64 " va=0x%" PRIx64
356 		    " pte=0x%" PRIx64 " l=%d\n",
357 		    (uint64_t)pa, (uint64_t)va, pteval, level);
358 
359 	/*
360 	 * Find the pte that will map this address. This creates any
361 	 * missing intermediate level page tables
362 	 */
363 	ptep = find_pte(va, NULL, level, 0);
364 
365 	/*
366 	 * On Xen we must use hypervisor calls to modify the PTE, since
367 	 * paging is active. On real hardware we just write to the pagetables
368 	 * which aren't in use yet.
369 	 */
370 	if (va < 1024 * 1024)
371 		pteval |= PT_NOCACHE;		/* for video RAM */
372 	if (pae_support)
373 		*ptep = pteval;
374 	else
375 		*((x86pte32_t *)ptep) = (x86pte32_t)pteval;
376 }
377 
378 /*
379  * During memory allocation, find the highest address not used yet.
380  */
381 static void
382 check_higher(paddr_t a)
383 {
384 	if (a < next_avail_addr)
385 		return;
386 	next_avail_addr = RNDUP(a + 1, MMU_PAGESIZE);
387 	DBG(next_avail_addr);
388 }
389 
390 /*
391  * Walk through the module information finding the last used address.
392  * The first available address will become the top level page table.
393  *
394  * We then build the phys_install memlist from the multiboot information.
395  */
396 static void
397 init_mem_alloc(void)
398 {
399 	mb_memory_map_t *mmap;
400 	mb_module_t *mod;
401 	uint64_t start;
402 	uint64_t end;
403 	uint64_t page_offset = MMU_PAGEOFFSET;	/* needs to be 64 bits */
404 	extern char _end[];
405 	int i;
406 
407 	DBG_MSG("Entered init_mem_alloc()\n");
408 	DBG((uintptr_t)mb_info);
409 
410 	/*
411 	 * search the modules to find the last used address
412 	 * we'll build the module list while we're walking through here
413 	 */
414 	DBG_MSG("\nFinding Modules\n");
415 	check_higher((paddr_t)&_end);
416 	for (mod = (mb_module_t *)(mb_info->mods_addr), i = 0;
417 	    i < mb_info->mods_count;
418 	    ++mod, ++i) {
419 		if (prom_debug) {
420 			dboot_printf("\tmodule #%d: %s at: 0x%lx, len 0x%lx\n",
421 			    i, (char *)(mod->mod_name),
422 			    (ulong_t)mod->mod_start, (ulong_t)mod->mod_end);
423 		}
424 		modules[i].bm_addr = mod->mod_start;
425 		modules[i].bm_size = mod->mod_end;
426 
427 		check_higher(mod->mod_end);
428 	}
429 	bi->bi_modules = (native_ptr_t)modules;
430 	DBG(bi->bi_modules);
431 	bi->bi_module_cnt = mb_info->mods_count;
432 	DBG(bi->bi_module_cnt);
433 
434 	/*
435 	 * Walk through the memory map from multiboot and build our memlist
436 	 * structures. Note these will have native format pointers.
437 	 */
438 	DBG_MSG("\nFinding Memory Map\n");
439 	DBG(mb_info->flags);
440 	max_mem = 0;
441 	if (mb_info->flags & 0x40) {
442 		DBG(mb_info->mmap_addr);
443 		DBG(mb_info->mmap_length);
444 		check_higher(mb_info->mmap_addr + mb_info->mmap_length);
445 
446 		for (mmap = (mb_memory_map_t *)mb_info->mmap_addr;
447 		    (uint32_t)mmap < mb_info->mmap_addr + mb_info->mmap_length;
448 		    mmap = (mb_memory_map_t *)((uint32_t)mmap + mmap->size
449 		    + sizeof (mmap->size))) {
450 
451 			start = ((uint64_t)mmap->base_addr_high << 32) +
452 			    mmap->base_addr_low;
453 			end = start + ((uint64_t)mmap->length_high << 32) +
454 			    mmap->length_low;
455 
456 			if (prom_debug) {
457 				dboot_printf("\ttype: %d %" PRIx64 "..%"
458 				    PRIx64 "\n", mmap->type, start, end);
459 			}
460 
461 			/*
462 			 * only type 1 is usable RAM
463 			 */
464 			if (mmap->type != 1)
465 				continue;
466 
467 			/*
468 			 * page align start and end
469 			 */
470 			start = (start + page_offset) & ~page_offset;
471 			end &= ~page_offset;
472 			if (end <= start)
473 				continue;
474 
475 			if (end > max_mem)
476 				max_mem = end;
477 
478 			memlists[memlists_used].addr = start;
479 			memlists[memlists_used].size = end - start;
480 			++memlists_used;	/* no overflow check */
481 		}
482 	} else if (mb_info->flags & 0x01) {
483 		DBG(mb_info->mem_lower);
484 		memlists[memlists_used].addr = 0;
485 		memlists[memlists_used].size = mb_info->mem_lower * 1024;
486 		++memlists_used;
487 		DBG(mb_info->mem_upper);
488 		memlists[memlists_used].addr = 1024 * 1024;
489 		memlists[memlists_used].size = mb_info->mem_upper * 1024;
490 		++memlists_used;
491 	} else {
492 		dboot_panic("No memory info from boot loader!!!\n");
493 	}
494 
495 	check_higher(bi->bi_cmdline);
496 
497 	/*
498 	 * finish processing the physinstall list
499 	 */
500 	sort_physinstall();
501 }
502 
503 /*
504  * Simple memory allocator, allocates aligned physical memory.
505  * Note that startup_kernel() only allocates memory, never frees.
506  * Memory usage just grows in an upward direction.
507  */
508 static void *
509 do_mem_alloc(uint32_t size, uint32_t align)
510 {
511 	uint_t i;
512 	uint64_t best;
513 	uint64_t start;
514 	uint64_t end;
515 
516 	/*
517 	 * make sure size is a multiple of pagesize
518 	 */
519 	size = RNDUP(size, MMU_PAGESIZE);
520 	next_avail_addr = RNDUP(next_avail_addr, align);
521 
522 	/*
523 	 * a really large bootarchive that causes you to run out of memory
524 	 * may cause this to blow up
525 	 */
526 	/* LINTED E_UNEXPECTED_UINT_PROMOTION */
527 	best = (uint64_t)-size;
528 	for (i = 0; i < memlists_used; ++i) {
529 		start = memlists[i].addr;
530 		end = start + memlists[i].size;
531 
532 		/*
533 		 * did we find the desired address?
534 		 */
535 		if (start <= next_avail_addr && next_avail_addr + size <= end) {
536 			best = next_avail_addr;
537 			goto done;
538 		}
539 
540 		/*
541 		 * if not is this address the best so far?
542 		 */
543 		if (start > next_avail_addr && start < best &&
544 		    RNDUP(start, align) + size <= end)
545 			best = RNDUP(start, align);
546 	}
547 
548 	/*
549 	 * We didn't find exactly the address we wanted, due to going off the
550 	 * end of a memory region. Return the best found memory address.
551 	 */
552 done:
553 	next_avail_addr = best + size;
554 	(void) memset((void *)(uintptr_t)best, 0, size);
555 	return ((void *)(uintptr_t)best);
556 }
557 
558 void *
559 mem_alloc(uint32_t size)
560 {
561 	return (do_mem_alloc(size, MMU_PAGESIZE));
562 }
563 
564 
565 /*
566  * Build page tables to map all of memory used so far as well as the kernel.
567  */
568 static void
569 build_page_tables(void)
570 {
571 	uint32_t psize;
572 	uint32_t level;
573 	uint32_t off;
574 	uint32_t i;
575 	uint64_t start;
576 	uint64_t end;
577 	uint64_t next_mapping;
578 
579 	/*
580 	 * If we're not using Xen, we need to create the top level pagetable.
581 	 */
582 	top_page_table = (paddr_t)(uintptr_t)mem_alloc(MMU_PAGESIZE);
583 	DBG((uintptr_t)top_page_table);
584 
585 	/*
586 	 * Determine if we'll use large mappings for kernel, then map it.
587 	 */
588 	if (largepage_support) {
589 		psize = lpagesize;
590 		level = 1;
591 	} else {
592 		psize = MMU_PAGESIZE;
593 		level = 0;
594 	}
595 
596 	DBG_MSG("Mapping kernel\n");
597 	DBG(ktext_phys);
598 	DBG(target_kernel_text);
599 	DBG(ksize);
600 	DBG(psize);
601 	for (off = 0; off < ksize; off += psize)
602 		map_pa_at_va(ktext_phys + off, target_kernel_text + off, level);
603 
604 	/*
605 	 * The kernel will need a 1 page window to work with page tables
606 	 */
607 	bi->bi_pt_window = (uintptr_t)mem_alloc(MMU_PAGESIZE);
608 	DBG(bi->bi_pt_window);
609 	bi->bi_pte_to_pt_window =
610 	    (uintptr_t)find_pte(bi->bi_pt_window, NULL, 0, 0);
611 	DBG(bi->bi_pte_to_pt_window);
612 
613 	/*
614 	 * Under multiboot we need 1:1 mappings for all of low memory, which
615 	 * includes our pagetables. The following code works because our
616 	 * simple memory allocator only grows usage in an upwards direction.
617 	 *
618 	 * We map *all* possible addresses below 1 Meg, since things like
619 	 * the video RAM are down there.
620 	 *
621 	 * Skip memory between 1M and _start, this acts as a reserve
622 	 * of memory usable for DMA.
623 	 */
624 	next_mapping = (uintptr_t)_start & MMU_PAGEMASK;
625 	if (map_debug)
626 		dboot_printf("1:1 map pa=0..1Meg\n");
627 	for (start = 0; start < 1024 * 1024; start += MMU_PAGESIZE)
628 		map_pa_at_va(start, start, 0);
629 
630 	for (i = 0; i < memlists_used; ++i) {
631 		start = memlists[i].addr;
632 		if (start < next_mapping)
633 			start = next_mapping;
634 
635 		end = start + memlists[i].size;
636 
637 		if (map_debug)
638 			dboot_printf("1:1 map pa=%" PRIx64 "..%" PRIx64 "\n",
639 			    start, end);
640 		while (start < end && start < next_avail_addr) {
641 			map_pa_at_va(start, start, 0);
642 			start += MMU_PAGESIZE;
643 		}
644 	}
645 
646 	DBG_MSG("\nPage tables constructed\n");
647 }
648 
649 #define	NO_MULTIBOOT	\
650 "multiboot is no longer used to boot the Solaris Operating System.\n\
651 The grub entry should be changed to:\n\
652 kernel$ /platform/i86pc/kernel/$ISADIR/unix\n\
653 module$ /platform/i86pc/$ISADIR/boot_archive\n\
654 See http://www.sun.com/msg/SUNOS-8000-AK for details.\n"
655 
656 /*
657  * startup_kernel has a pretty simple job. It builds pagetables which reflect
658  * 1:1 mappings for all memory in use. It then also adds mappings for
659  * the kernel nucleus at virtual address of target_kernel_text using large page
660  * mappings. The page table pages are also accessible at 1:1 mapped
661  * virtual addresses.
662  */
663 /*ARGSUSED*/
664 void
665 startup_kernel(void)
666 {
667 	char *cmdline;
668 	uintptr_t addr;
669 
670 	/*
671 	 * At this point we are executing in a 32 bit real mode.
672 	 */
673 	cmdline = (char *)mb_info->cmdline;
674 	prom_debug = (strstr(cmdline, "prom_debug") != NULL);
675 	map_debug = (strstr(cmdline, "map_debug") != NULL);
676 	bcons_init(cmdline);
677 	DBG_MSG("\n\nSolaris prekernel set: ");
678 	DBG_MSG(cmdline);
679 	DBG_MSG("\n");
680 
681 	if (strstr(cmdline, "multiboot") != NULL) {
682 		dboot_panic(NO_MULTIBOOT);
683 	}
684 
685 	/*
686 	 * boot info must be 16 byte aligned for 64 bit kernel ABI
687 	 */
688 	addr = (uintptr_t)boot_info;
689 	addr = (addr + 0xf) & ~0xf;
690 	bi = (struct xboot_info *)addr;
691 	DBG((uintptr_t)bi);
692 	bi->bi_cmdline = (native_ptr_t)(uintptr_t)cmdline;
693 
694 	/*
695 	 * Need correct target_kernel_text value
696 	 */
697 #if defined(_BOOT_TARGET_amd64)
698 	target_kernel_text = KERNEL_TEXT_amd64;
699 #else
700 	target_kernel_text = KERNEL_TEXT_i386;
701 #endif
702 	DBG(target_kernel_text);
703 
704 	/*
705 	 * use cpuid to enable MMU features
706 	 */
707 	if (have_cpuid()) {
708 		uint32_t eax, edx;
709 
710 		eax = 1;
711 		edx = get_cpuid_edx(&eax);
712 		if (edx & CPUID_INTC_EDX_PSE)
713 			largepage_support = 1;
714 		if (edx & CPUID_INTC_EDX_PGE)
715 			pge_support = 1;
716 		if (edx & CPUID_INTC_EDX_PAE)
717 			pae_support = 1;
718 
719 		eax = 0x80000000;
720 		edx = get_cpuid_edx(&eax);
721 		if (eax >= 0x80000001) {
722 			eax = 0x80000001;
723 			edx = get_cpuid_edx(&eax);
724 			if (edx & CPUID_AMD_EDX_LM)
725 				amd64_support = 1;
726 			if (edx & CPUID_AMD_EDX_NX)
727 				NX_support = 1;
728 		}
729 	} else {
730 		dboot_printf("cpuid not supported\n");
731 	}
732 
733 #if defined(_BOOT_TARGET_amd64)
734 	if (amd64_support == 0)
735 		dboot_panic("long mode not supported, rebooting\n");
736 	else if (pae_support == 0)
737 		dboot_panic("long mode, but no PAE; rebooting\n");
738 #endif
739 
740 	/*
741 	 * initialize our memory allocator
742 	 */
743 	init_mem_alloc();
744 
745 	/*
746 	 * configure mmu information
747 	 */
748 #if !defined(_BOOT_TARGET_amd64)
749 	if (pae_support && (max_mem > FOUR_GIG || NX_support)) {
750 #endif
751 		shift_amt = shift_amt_pae;
752 		ptes_per_table = 512;
753 		pte_size = 8;
754 		lpagesize = TWO_MEG;
755 #if defined(_BOOT_TARGET_amd64)
756 		top_level = 3;
757 #else
758 		top_level = 2;
759 #endif
760 #if !defined(_BOOT_TARGET_amd64)
761 	} else {
762 		pae_support = 0;
763 		NX_support = 0;
764 		shift_amt = shift_amt_nopae;
765 		ptes_per_table = 1024;
766 		pte_size = 4;
767 		lpagesize = FOUR_MEG;
768 		top_level = 1;
769 	}
770 #endif
771 
772 	DBG(pge_support);
773 	DBG(NX_support);
774 	DBG(largepage_support);
775 	DBG(amd64_support);
776 	DBG(top_level);
777 	DBG(pte_size);
778 	DBG(ptes_per_table);
779 	DBG(lpagesize);
780 
781 	ktext_phys = FOUR_MEG;		/* from UNIX Mapfile */
782 
783 #if defined(_BOOT_TARGET_amd64)
784 	/*
785 	 * For grub, copy kernel bits from the ELF64 file to final place.
786 	 */
787 	DBG_MSG("\nAllocating nucleus pages.\n");
788 	ktext_phys = (uintptr_t)do_mem_alloc(ksize, FOUR_MEG);
789 	if (ktext_phys == 0)
790 		dboot_panic("failed to allocate aligned kernel memory\n");
791 	if (dboot_elfload64(mb_header.load_addr) != 0)
792 		dboot_panic("failed to parse kernel ELF image, rebooting\n");
793 
794 #endif
795 	DBG(ktext_phys);
796 
797 	/*
798 	 * Allocate page tables.
799 	 */
800 	build_page_tables();
801 
802 	/*
803 	 * return to assembly code to switch to running kernel
804 	 */
805 	entry_addr_low = (uint32_t)target_kernel_text;
806 	DBG(entry_addr_low);
807 	bi->bi_use_largepage = largepage_support;
808 	bi->bi_use_pae = pae_support;
809 	bi->bi_use_pge = pge_support;
810 	bi->bi_use_nx = NX_support;
811 	bi->bi_next_paddr = next_avail_addr;
812 	DBG(bi->bi_next_paddr);
813 	bi->bi_next_vaddr = (uintptr_t)next_avail_addr;
814 	DBG(bi->bi_next_vaddr);
815 	bi->bi_mb_info = (uintptr_t)mb_info;
816 	bi->bi_top_page_table = (uintptr_t)top_page_table;
817 
818 	bi->bi_kseg_size = FOUR_MEG;
819 	DBG(bi->bi_kseg_size);
820 
821 #if 0		/* useful if debugging initial page tables */
822 	if (prom_debug)
823 		dump_tables();
824 #endif
825 
826 	DBG_MSG("\n\n*** DBOOT DONE -- back to asm to jump to kernel\n\n");
827 }
828