xref: /titanic_51/usr/src/uts/i86pc/os/startup.c (revision bf1d7e28fd966a3f7e92b40aa301efdedc81ef7b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/t_lock.h>
30 #include <sys/param.h>
31 #include <sys/sysmacros.h>
32 #include <sys/signal.h>
33 #include <sys/systm.h>
34 #include <sys/user.h>
35 #include <sys/mman.h>
36 #include <sys/vm.h>
37 #include <sys/conf.h>
38 #include <sys/avintr.h>
39 #include <sys/autoconf.h>
40 #include <sys/disp.h>
41 #include <sys/class.h>
42 #include <sys/bitmap.h>
43 
44 #include <sys/privregs.h>
45 
46 #include <sys/proc.h>
47 #include <sys/buf.h>
48 #include <sys/kmem.h>
49 #include <sys/mem.h>
50 #include <sys/kstat.h>
51 
52 #include <sys/reboot.h>
53 
54 #include <sys/cred.h>
55 #include <sys/vnode.h>
56 #include <sys/file.h>
57 
58 #include <sys/procfs.h>
59 
60 #include <sys/vfs.h>
61 #include <sys/cmn_err.h>
62 #include <sys/utsname.h>
63 #include <sys/debug.h>
64 #include <sys/kdi.h>
65 
66 #include <sys/dumphdr.h>
67 #include <sys/bootconf.h>
68 #include <sys/varargs.h>
69 #include <sys/promif.h>
70 #include <sys/modctl.h>		/* for "procfs" hack */
71 
72 #include <sys/sunddi.h>
73 #include <sys/sunndi.h>
74 #include <sys/ndi_impldefs.h>
75 #include <sys/ddidmareq.h>
76 #include <sys/psw.h>
77 #include <sys/regset.h>
78 #include <sys/clock.h>
79 #include <sys/pte.h>
80 #include <sys/tss.h>
81 #include <sys/stack.h>
82 #include <sys/trap.h>
83 #include <sys/fp.h>
84 #include <vm/anon.h>
85 #include <vm/as.h>
86 #include <vm/page.h>
87 #include <vm/seg.h>
88 #include <vm/seg_dev.h>
89 #include <vm/seg_kmem.h>
90 #include <vm/seg_kpm.h>
91 #include <vm/seg_map.h>
92 #include <vm/seg_vn.h>
93 #include <vm/seg_kp.h>
94 #include <sys/memnode.h>
95 #include <vm/vm_dep.h>
96 #include <sys/thread.h>
97 #include <sys/sysconf.h>
98 #include <sys/vm_machparam.h>
99 #include <sys/archsystm.h>
100 #include <sys/machsystm.h>
101 #include <vm/hat.h>
102 #include <vm/hat_i86.h>
103 #include <sys/pmem.h>
104 #include <sys/smp_impldefs.h>
105 #include <sys/x86_archext.h>
106 #include <sys/segments.h>
107 #include <sys/clconf.h>
108 #include <sys/kobj.h>
109 #include <sys/kobj_lex.h>
110 #include <sys/cpc_impl.h>
111 #include <sys/pg.h>
112 #include <sys/x86_archext.h>
113 #include <sys/cpu_module.h>
114 #include <sys/smbios.h>
115 #include <sys/debug_info.h>
116 
117 
118 #include <sys/bootinfo.h>
119 #include <vm/kboot_mmu.h>
120 
121 extern void progressbar_init(void);
122 extern void progressbar_start(void);
123 extern void brand_init(void);
124 
125 /*
126  * XXX make declaration below "static" when drivers no longer use this
127  * interface.
128  */
129 extern caddr_t p0_va;	/* Virtual address for accessing physical page 0 */
130 
131 /*
132  * segkp
133  */
134 extern int segkp_fromheap;
135 
136 static void kvm_init(void);
137 static void startup_init(void);
138 static void startup_memlist(void);
139 static void startup_kmem(void);
140 static void startup_modules(void);
141 static void startup_vm(void);
142 static void startup_end(void);
143 
144 /*
145  * Declare these as initialized data so we can patch them.
146  */
147 #ifdef __i386
148 /*
149  * Due to virtual address space limitations running in 32 bit mode, restrict
150  * the amount of physical memory configured to a max of PHYSMEM32 pages (16g).
151  *
152  * If the physical max memory size of 64g were allowed to be configured, the
153  * size of user virtual address space will be less than 1g. A limited user
154  * address space greatly reduces the range of applications that can run.
155  *
156  * If more physical memory than PHYSMEM32 is required, users should preferably
157  * run in 64 bit mode which has no virtual address space limitation issues.
158  *
159  * If 64 bit mode is not available (as in IA32) and/or more physical memory
160  * than PHYSMEM32 is required in 32 bit mode, physmem can be set to the desired
161  * value or to 0 (to configure all available memory) via eeprom(1M). kernelbase
162  * should also be carefully tuned to balance out the need of the user
163  * application while minimizing the risk of kernel heap exhaustion due to
164  * kernelbase being set too high.
165  */
166 #define	PHYSMEM32	0x400000
167 
168 pgcnt_t physmem = PHYSMEM32;
169 #else
170 pgcnt_t physmem = 0;	/* memory size in pages, patch if you want less */
171 #endif
172 pgcnt_t obp_pages;	/* Memory used by PROM for its text and data */
173 
174 char *kobj_file_buf;
175 int kobj_file_bufsize;	/* set in /etc/system */
176 
177 /* Global variables for MP support. Used in mp_startup */
178 caddr_t	rm_platter_va;
179 uint32_t rm_platter_pa;
180 
181 int	auto_lpg_disable = 1;
182 
183 /*
184  * Some CPUs have holes in the middle of the 64-bit virtual address range.
185  */
186 uintptr_t hole_start, hole_end;
187 
188 /*
189  * kpm mapping window
190  */
191 caddr_t kpm_vbase;
192 size_t  kpm_size;
193 static int kpm_desired = 0;		/* Do we want to try to use segkpm? */
194 
195 /*
196  * Configuration parameters set at boot time.
197  */
198 
199 caddr_t econtig;		/* end of first block of contiguous kernel */
200 
201 struct bootops		*bootops = 0;	/* passed in from boot */
202 struct bootops		**bootopsp;
203 struct boot_syscalls	*sysp;		/* passed in from boot */
204 
205 char bootblock_fstype[16];
206 
207 char kern_bootargs[OBP_MAXPATHLEN];
208 
209 /*
210  * ZFS zio segment.  This allows us to exclude large portions of ZFS data that
211  * gets cached in kmem caches on the heap.  If this is set to zero, we allocate
212  * zio buffers from their own segment, otherwise they are allocated from the
213  * heap.  The optimization of allocating zio buffers from their own segment is
214  * only valid on 64-bit kernels.
215  */
216 #if defined(__amd64)
217 int segzio_fromheap = 0;
218 #else
219 int segzio_fromheap = 1;
220 #endif
221 
222 /*
223  * new memory fragmentations are possible in startup() due to BOP_ALLOCs. this
224  * depends on number of BOP_ALLOC calls made and requested size, memory size
225  * combination and whether boot.bin memory needs to be freed.
226  */
227 #define	POSS_NEW_FRAGMENTS	12
228 
229 /*
230  * VM data structures
231  */
232 long page_hashsz;		/* Size of page hash table (power of two) */
233 struct page *pp_base;		/* Base of initial system page struct array */
234 struct page **page_hash;	/* Page hash table */
235 struct seg ktextseg;		/* Segment used for kernel executable image */
236 struct seg kvalloc;		/* Segment used for "valloc" mapping */
237 struct seg kpseg;		/* Segment used for pageable kernel virt mem */
238 struct seg kmapseg;		/* Segment used for generic kernel mappings */
239 struct seg kdebugseg;		/* Segment used for the kernel debugger */
240 
241 struct seg *segkmap = &kmapseg;	/* Kernel generic mapping segment */
242 static struct seg *segmap = &kmapseg;	/* easier to use name for in here */
243 
244 struct seg *segkp = &kpseg;	/* Pageable kernel virtual memory segment */
245 
246 #if defined(__amd64)
247 struct seg kvseg_core;		/* Segment used for the core heap */
248 struct seg kpmseg;		/* Segment used for physical mapping */
249 struct seg *segkpm = &kpmseg;	/* 64bit kernel physical mapping segment */
250 #else
251 struct seg *segkpm = NULL;	/* Unused on IA32 */
252 #endif
253 
254 caddr_t segkp_base;		/* Base address of segkp */
255 caddr_t segzio_base;		/* Base address of segzio */
256 #if defined(__amd64)
257 pgcnt_t segkpsize = btop(SEGKPDEFSIZE);	/* size of segkp segment in pages */
258 #else
259 pgcnt_t segkpsize = 0;
260 #endif
261 pgcnt_t segziosize = 0;		/* size of zio segment in pages */
262 
263 /*
264  * VA range available to the debugger
265  */
266 const caddr_t kdi_segdebugbase = (const caddr_t)SEGDEBUGBASE;
267 const size_t kdi_segdebugsize = SEGDEBUGSIZE;
268 
269 struct memseg *memseg_base;
270 struct vnode unused_pages_vp;
271 
272 #define	FOURGB	0x100000000LL
273 
274 struct memlist *memlist;
275 
276 caddr_t s_text;		/* start of kernel text segment */
277 caddr_t e_text;		/* end of kernel text segment */
278 caddr_t s_data;		/* start of kernel data segment */
279 caddr_t e_data;		/* end of kernel data segment */
280 caddr_t modtext;	/* start of loadable module text reserved */
281 caddr_t e_modtext;	/* end of loadable module text reserved */
282 caddr_t moddata;	/* start of loadable module data reserved */
283 caddr_t e_moddata;	/* end of loadable module data reserved */
284 
285 struct memlist *phys_install;	/* Total installed physical memory */
286 struct memlist *phys_avail;	/* Total available physical memory */
287 
288 /*
289  * kphysm_init returns the number of pages that were processed
290  */
291 static pgcnt_t kphysm_init(page_t *, pgcnt_t);
292 
293 #define	IO_PROP_SIZE	64	/* device property size */
294 
295 /*
296  * a couple useful roundup macros
297  */
298 #define	ROUND_UP_PAGE(x)	\
299 	((uintptr_t)P2ROUNDUP((uintptr_t)(x), (uintptr_t)MMU_PAGESIZE))
300 #define	ROUND_UP_LPAGE(x)	\
301 	((uintptr_t)P2ROUNDUP((uintptr_t)(x), mmu.level_size[1]))
302 #define	ROUND_UP_4MEG(x)	\
303 	((uintptr_t)P2ROUNDUP((uintptr_t)(x), (uintptr_t)FOUR_MEG))
304 #define	ROUND_UP_TOPLEVEL(x)	\
305 	((uintptr_t)P2ROUNDUP((uintptr_t)(x), mmu.level_size[mmu.max_level]))
306 
307 /*
308  *	32-bit Kernel's Virtual memory layout.
309  *		+-----------------------+
310  *		|			|
311  * 0xFFC00000  -|-----------------------|- ARGSBASE
312  *		|	debugger	|
313  * 0xFF800000  -|-----------------------|- SEGDEBUGBASE
314  *		|      Kernel Data	|
315  * 0xFEC00000  -|-----------------------|
316  *              |      Kernel Text	|
317  * 0xFE800000  -|-----------------------|- KERNEL_TEXT
318  *		|---       GDT       ---|- GDT page (GDT_VA)
319  *		|---    debug info   ---|- debug info (DEBUG_INFO_VA)
320  *		|			|
321  * 		|   page_t structures	|
322  * 		|   memsegs, memlists, 	|
323  * 		|   page hash, etc.	|
324  * ---	       -|-----------------------|- ekernelheap, valloc_base (floating)
325  *		|			|  (segkp is just an arena in the heap)
326  *		|			|
327  *		|	kvseg		|
328  *		|			|
329  *		|			|
330  * ---         -|-----------------------|- kernelheap (floating)
331  * 		|        Segkmap	|
332  * 0xC3002000  -|-----------------------|- segmap_start (floating)
333  *		|	Red Zone	|
334  * 0xC3000000  -|-----------------------|- kernelbase / userlimit (floating)
335  *		|			|			||
336  *		|     Shared objects	|			\/
337  *		|			|
338  *		:			:
339  *		|	user data	|
340  *		|-----------------------|
341  *		|	user text	|
342  * 0x08048000  -|-----------------------|
343  *		|	user stack	|
344  *		:			:
345  *		|	invalid		|
346  * 0x00000000	+-----------------------+
347  *
348  *
349  *		64-bit Kernel's Virtual memory layout. (assuming 64 bit app)
350  *			+-----------------------+
351  *			|			|
352  * 0xFFFFFFFF.FFC00000  |-----------------------|- ARGSBASE
353  *			|	debugger (?)	|
354  * 0xFFFFFFFF.FF800000  |-----------------------|- SEGDEBUGBASE
355  *			|      unused    	|
356  *			+-----------------------+
357  *			|      Kernel Data	|
358  * 0xFFFFFFFF.FBC00000  |-----------------------|
359  *			|      Kernel Text	|
360  * 0xFFFFFFFF.FB800000  |-----------------------|- KERNEL_TEXT
361  *			|---       GDT       ---|- GDT page (GDT_VA)
362  *			|---    debug info   ---|- debug info (DEBUG_INFO_VA)
363  *			|			|
364  * 			|      Core heap	| (used for loadable modules)
365  * 0xFFFFFFFF.C0000000  |-----------------------|- core_base / ekernelheap
366  *			|	 Kernel		|
367  *			|	  heap		|
368  * 0xFFFFFXXX.XXX00000  |-----------------------|- kernelheap (floating)
369  *			|	 segmap		|
370  * 0xFFFFFXXX.XXX00000  |-----------------------|- segmap_start (floating)
371  *			|    device mappings	|
372  * 0xFFFFFXXX.XXX00000  |-----------------------|- toxic_addr (floating)
373  *			|	  segzio	|
374  * 0xFFFFFXXX.XXX00000  |-----------------------|- segzio_base (floating)
375  *			|	  segkp		|
376  * ---                  |-----------------------|- segkp_base (floating)
377  * 			|   page_t structures	|  valloc_base + valloc_sz
378  * 			|   memsegs, memlists, 	|
379  * 			|   page hash, etc.	|
380  * 0xFFFFFF00.00000000  |-----------------------|- valloc_base
381  *			|	 segkpm		|
382  * 0xFFFFFE00.00000000  |-----------------------|
383  *			|	Red Zone	|
384  * 0xFFFFFD80.00000000  |-----------------------|- KERNELBASE
385  *			|     User stack	|- User space memory
386  * 			|			|
387  * 			| shared objects, etc	|	(grows downwards)
388  *			:			:
389  * 			|			|
390  * 0xFFFF8000.00000000  |-----------------------|
391  * 			|			|
392  * 			| VA Hole / unused	|
393  * 			|			|
394  * 0x00008000.00000000  |-----------------------|
395  *			|			|
396  *			|			|
397  *			:			:
398  *			|	user heap	|	(grows upwards)
399  *			|			|
400  *			|	user data	|
401  *			|-----------------------|
402  *			|	user text	|
403  * 0x00000000.04000000  |-----------------------|
404  *			|	invalid		|
405  * 0x00000000.00000000	+-----------------------+
406  *
407  * A 32 bit app on the 64 bit kernel sees the same layout as on the 32 bit
408  * kernel, except that userlimit is raised to 0xfe000000
409  *
410  * Floating values:
411  *
412  * valloc_base: start of the kernel's memory management/tracking data
413  * structures.  This region contains page_t structures for
414  * physical memory, memsegs, memlists, and the page hash.
415  *
416  * core_base: start of the kernel's "core" heap area on 64-bit systems.
417  * This area is intended to be used for global data as well as for module
418  * text/data that does not fit into the nucleus pages.  The core heap is
419  * restricted to a 2GB range, allowing every address within it to be
420  * accessed using rip-relative addressing
421  *
422  * ekernelheap: end of kernelheap and start of segmap.
423  *
424  * kernelheap: start of kernel heap.  On 32-bit systems, this starts right
425  * above a red zone that separates the user's address space from the
426  * kernel's.  On 64-bit systems, it sits above segkp and segkpm.
427  *
428  * segmap_start: start of segmap. The length of segmap can be modified
429  * by changing segmapsize in /etc/system (preferred) or eeprom (deprecated).
430  * The default length is 16MB on 32-bit systems and 64MB on 64-bit systems.
431  *
432  * kernelbase: On a 32-bit kernel the default value of 0xd4000000 will be
433  * decreased by 2X the size required for page_t.  This allows the kernel
434  * heap to grow in size with physical memory.  With sizeof(page_t) == 80
435  * bytes, the following shows the values of kernelbase and kernel heap
436  * sizes for different memory configurations (assuming default segmap and
437  * segkp sizes).
438  *
439  *	mem	size for	kernelbase	kernel heap
440  *	size	page_t's			size
441  *	----	---------	----------	-----------
442  *	1gb	0x01400000	0xd1800000	684MB
443  *	2gb	0x02800000	0xcf000000	704MB
444  *	4gb	0x05000000	0xca000000	744MB
445  *	6gb	0x07800000	0xc5000000	784MB
446  *	8gb	0x0a000000	0xc0000000	824MB
447  *	16gb	0x14000000	0xac000000	984MB
448  *	32gb	0x28000000	0x84000000	1304MB
449  *	64gb	0x50000000	0x34000000	1944MB (*)
450  *
451  * kernelbase is less than the abi minimum of 0xc0000000 for memory
452  * configurations above 8gb.
453  *
454  * (*) support for memory configurations above 32gb will require manual tuning
455  * of kernelbase to balance out the need of user applications.
456  */
457 
458 /* real-time-clock initialization parameters */
459 extern time_t process_rtc_config_file(void);
460 
461 char		*final_kernelheap;
462 char		*boot_kernelheap;
463 uintptr_t	kernelbase;
464 uintptr_t	postbootkernelbase;	/* not set till boot loader is gone */
465 uintptr_t	eprom_kernelbase;
466 size_t		segmapsize;
467 static uintptr_t segmap_reserved;
468 uintptr_t	segmap_start;
469 int		segmapfreelists;
470 pgcnt_t		npages;
471 pgcnt_t		orig_npages;
472 size_t		core_size;		/* size of "core" heap */
473 uintptr_t	core_base;		/* base address of "core" heap */
474 
475 /*
476  * List of bootstrap pages. We mark these as allocated in startup.
477  * release_bootstrap() will free them when we're completely done with
478  * the bootstrap.
479  */
480 static page_t *bootpages;
481 
482 /*
483  * boot time pages that have a vnode from the ramdisk will keep that forever.
484  */
485 static page_t *rd_pages;
486 
487 struct system_hardware system_hardware;
488 
489 /*
490  * Enable some debugging messages concerning memory usage...
491  */
492 static void
493 print_memlist(char *title, struct memlist *mp)
494 {
495 	prom_printf("MEMLIST: %s:\n", title);
496 	while (mp != NULL)  {
497 		prom_printf("\tAddress 0x%" PRIx64 ", size 0x%" PRIx64 "\n",
498 		    mp->address, mp->size);
499 		mp = mp->next;
500 	}
501 }
502 
503 /*
504  * XX64 need a comment here.. are these just default values, surely
505  * we read the "cpuid" type information to figure this out.
506  */
507 int	l2cache_sz = 0x80000;
508 int	l2cache_linesz = 0x40;
509 int	l2cache_assoc = 1;
510 
511 /*
512  * on 64 bit we use a predifined VA range for mapping devices in the kernel
513  * on 32 bit the mappings are intermixed in the heap, so we use a bit map
514  */
515 #ifdef __amd64
516 
517 vmem_t		*device_arena;
518 uintptr_t	toxic_addr = (uintptr_t)NULL;
519 size_t		toxic_size = 1024 * 1024 * 1024; /* Sparc uses 1 gig too */
520 
521 #else	/* __i386 */
522 
523 ulong_t		*toxic_bit_map;	/* one bit for each 4k of VA in heap_arena */
524 size_t		toxic_bit_map_len = 0;	/* in bits */
525 
526 #endif	/* __i386 */
527 
528 /*
529  * Simple boot time debug facilities
530  */
531 static char *prm_dbg_str[] = {
532 	"%s:%d: '%s' is 0x%x\n",
533 	"%s:%d: '%s' is 0x%llx\n"
534 };
535 
536 int prom_debug;
537 
538 #define	PRM_DEBUG(q)	if (prom_debug) 	\
539 	prom_printf(prm_dbg_str[sizeof (q) >> 3], "startup.c", __LINE__, #q, q);
540 #define	PRM_POINT(q)	if (prom_debug) 	\
541 	prom_printf("%s:%d: %s\n", "startup.c", __LINE__, q);
542 
543 /*
544  * This structure is used to keep track of the intial allocations
545  * done in startup_memlist(). The value of NUM_ALLOCATIONS needs to
546  * be >= the number of ADD_TO_ALLOCATIONS() executed in the code.
547  */
548 #define	NUM_ALLOCATIONS 7
549 int num_allocations = 0;
550 struct {
551 	void **al_ptr;
552 	size_t al_size;
553 } allocations[NUM_ALLOCATIONS];
554 size_t valloc_sz = 0;
555 uintptr_t valloc_base;
556 
557 #define	ADD_TO_ALLOCATIONS(ptr, size) {					\
558 		size = ROUND_UP_PAGE(size);		 		\
559 		if (num_allocations == NUM_ALLOCATIONS)			\
560 			panic("too many ADD_TO_ALLOCATIONS()");		\
561 		allocations[num_allocations].al_ptr = (void**)&ptr;	\
562 		allocations[num_allocations].al_size = size;		\
563 		valloc_sz += size;					\
564 		++num_allocations;				 	\
565 	}
566 
567 /*
568  * Allocate all the initial memory needed by the page allocator.
569  */
570 static void
571 perform_allocations(void)
572 {
573 	caddr_t mem;
574 	int i;
575 	int valloc_align;
576 
577 	PRM_DEBUG(valloc_base);
578 	PRM_DEBUG(valloc_sz);
579 	valloc_align = mmu.level_size[mmu.max_page_level > 0];
580 	mem = BOP_ALLOC(bootops, (caddr_t)valloc_base, valloc_sz, valloc_align);
581 	if (mem != (caddr_t)valloc_base)
582 		panic("BOP_ALLOC() failed");
583 	bzero(mem, valloc_sz);
584 	for (i = 0; i < num_allocations; ++i) {
585 		*allocations[i].al_ptr = (void *)mem;
586 		mem += allocations[i].al_size;
587 	}
588 }
589 
590 /*
591  * Our world looks like this at startup time.
592  *
593  * In a 32-bit OS, boot loads the kernel text at 0xfe800000 and kernel data
594  * at 0xfec00000.  On a 64-bit OS, kernel text and data are loaded at
595  * 0xffffffff.fe800000 and 0xffffffff.fec00000 respectively.  Those
596  * addresses are fixed in the binary at link time.
597  *
598  * On the text page:
599  * unix/genunix/krtld/module text loads.
600  *
601  * On the data page:
602  * unix/genunix/krtld/module data loads.
603  *
604  * Machine-dependent startup code
605  */
606 void
607 startup(void)
608 {
609 	extern void startup_bios_disk(void);
610 	extern void startup_pci_bios(void);
611 	/*
612 	 * Make sure that nobody tries to use sekpm until we have
613 	 * initialized it properly.
614 	 */
615 #if defined(__amd64)
616 	kpm_desired = kpm_enable;
617 #endif
618 	kpm_enable = 0;
619 
620 	progressbar_init();
621 	startup_init();
622 	startup_memlist();
623 	startup_kmem();
624 	startup_pci_bios();
625 	startup_modules();
626 	startup_bios_disk();
627 	startup_vm();
628 	startup_end();
629 	progressbar_start();
630 }
631 
632 static void
633 startup_init()
634 {
635 	PRM_POINT("startup_init() starting...");
636 
637 	/*
638 	 * Complete the extraction of cpuid data
639 	 */
640 	cpuid_pass2(CPU);
641 
642 	(void) check_boot_version(BOP_GETVERSION(bootops));
643 
644 	/*
645 	 * Check for prom_debug in boot environment
646 	 */
647 	if (BOP_GETPROPLEN(bootops, "prom_debug") >= 0) {
648 		++prom_debug;
649 		PRM_POINT("prom_debug found in boot enviroment");
650 	}
651 
652 	/*
653 	 * Collect node, cpu and memory configuration information.
654 	 */
655 	get_system_configuration();
656 
657 	/*
658 	 * Halt if this is an unsupported processor.
659 	 */
660 	if (x86_type == X86_TYPE_486 || x86_type == X86_TYPE_CYRIX_486) {
661 		printf("\n486 processor (\"%s\") detected.\n",
662 		    CPU->cpu_brandstr);
663 		halt("This processor is not supported by this release "
664 		    "of Solaris.");
665 	}
666 
667 	PRM_POINT("startup_init() done");
668 }
669 
670 /*
671  * Callback for copy_memlist_filter() to filter nucleus, kadb/kmdb, (ie.
672  * everything mapped above KERNEL_TEXT) pages from phys_avail. Note it
673  * also filters out physical page zero.  There is some reliance on the
674  * boot loader allocating only a few contiguous physical memory chunks.
675  */
676 static void
677 avail_filter(uint64_t *addr, uint64_t *size)
678 {
679 	uintptr_t va;
680 	uintptr_t next_va;
681 	pfn_t pfn;
682 	uint64_t pfn_addr;
683 	uint64_t pfn_eaddr;
684 	uint_t prot;
685 	size_t len;
686 	uint_t change;
687 
688 	if (prom_debug)
689 		prom_printf("\tFilter: in: a=%" PRIx64 ", s=%" PRIx64 "\n",
690 		    *addr, *size);
691 
692 	/*
693 	 * page zero is required for BIOS.. never make it available
694 	 */
695 	if (*addr == 0) {
696 		*addr += MMU_PAGESIZE;
697 		*size -= MMU_PAGESIZE;
698 	}
699 
700 	/*
701 	 * First we trim from the front of the range. Since kbm_probe()
702 	 * walks ranges in virtual order, but addr/size are physical, we need
703 	 * to the list until no changes are seen.  This deals with the case
704 	 * where page "p" is mapped at v, page "p + PAGESIZE" is mapped at w
705 	 * but w < v.
706 	 */
707 	do {
708 		change = 0;
709 		for (va = KERNEL_TEXT;
710 		    *size > 0 && kbm_probe(&va, &len, &pfn, &prot) != 0;
711 		    va = next_va) {
712 
713 			next_va = va + len;
714 			pfn_addr = pfn_to_pa(pfn);
715 			pfn_eaddr = pfn_addr + len;
716 
717 			if (pfn_addr <= *addr && pfn_eaddr > *addr) {
718 				change = 1;
719 				while (*size > 0 && len > 0) {
720 					*addr += MMU_PAGESIZE;
721 					*size -= MMU_PAGESIZE;
722 					len -= MMU_PAGESIZE;
723 				}
724 			}
725 		}
726 		if (change && prom_debug)
727 			prom_printf("\t\ttrim: a=%" PRIx64 ", s=%" PRIx64 "\n",
728 			    *addr, *size);
729 	} while (change);
730 
731 	/*
732 	 * Trim pages from the end of the range.
733 	 */
734 	for (va = KERNEL_TEXT;
735 	    *size > 0 && kbm_probe(&va, &len, &pfn, &prot) != 0;
736 	    va = next_va) {
737 
738 		next_va = va + len;
739 		pfn_addr = pfn_to_pa(pfn);
740 
741 		if (pfn_addr >= *addr && pfn_addr < *addr + *size)
742 			*size = pfn_addr - *addr;
743 	}
744 
745 	if (prom_debug)
746 		prom_printf("\tFilter out: a=%" PRIx64 ", s=%" PRIx64 "\n",
747 		    *addr, *size);
748 }
749 
750 static void
751 kpm_init()
752 {
753 	struct segkpm_crargs b;
754 
755 	/*
756 	 * These variables were all designed for sfmmu in which segkpm is
757 	 * mapped using a single pagesize - either 8KB or 4MB.  On x86, we
758 	 * might use 2+ page sizes on a single machine, so none of these
759 	 * variables have a single correct value.  They are set up as if we
760 	 * always use a 4KB pagesize, which should do no harm.  In the long
761 	 * run, we should get rid of KPM's assumption that only a single
762 	 * pagesize is used.
763 	 */
764 	kpm_pgshft = MMU_PAGESHIFT;
765 	kpm_pgsz =  MMU_PAGESIZE;
766 	kpm_pgoff = MMU_PAGEOFFSET;
767 	kpmp2pshft = 0;
768 	kpmpnpgs = 1;
769 	ASSERT(((uintptr_t)kpm_vbase & (kpm_pgsz - 1)) == 0);
770 
771 	PRM_POINT("about to create segkpm");
772 	rw_enter(&kas.a_lock, RW_WRITER);
773 
774 	if (seg_attach(&kas, kpm_vbase, kpm_size, segkpm) < 0)
775 		panic("cannot attach segkpm");
776 
777 	b.prot = PROT_READ | PROT_WRITE;
778 	b.nvcolors = 1;
779 
780 	if (segkpm_create(segkpm, (caddr_t)&b) != 0)
781 		panic("segkpm_create segkpm");
782 
783 	rw_exit(&kas.a_lock);
784 }
785 
786 /*
787  * The debug info page provides enough information to allow external
788  * inspectors (e.g. when running under a hypervisor) to bootstrap
789  * themselves into allowing full-blown kernel debugging.
790  */
791 static void
792 init_debug_info(void)
793 {
794 	caddr_t mem;
795 	debug_info_t *di;
796 
797 #ifndef __lint
798 	ASSERT(sizeof (debug_info_t) < MMU_PAGESIZE);
799 #endif
800 
801 	mem = BOP_ALLOC(bootops, (caddr_t)DEBUG_INFO_VA, MMU_PAGESIZE,
802 	    MMU_PAGESIZE);
803 
804 	if (mem != (caddr_t)DEBUG_INFO_VA)
805 		panic("BOP_ALLOC() failed");
806 	bzero(mem, MMU_PAGESIZE);
807 
808 	di = (debug_info_t *)mem;
809 
810 	di->di_magic = DEBUG_INFO_MAGIC;
811 	di->di_version = DEBUG_INFO_VERSION;
812 }
813 
814 /*
815  * Build the memlists and other kernel essential memory system data structures.
816  * This is everything at valloc_base.
817  */
818 static void
819 startup_memlist(void)
820 {
821 	size_t memlist_sz;
822 	size_t memseg_sz;
823 	size_t pagehash_sz;
824 	size_t pp_sz;
825 	uintptr_t va;
826 	size_t len;
827 	uint_t prot;
828 	pfn_t pfn;
829 	int memblocks;
830 	caddr_t pagecolor_mem;
831 	size_t pagecolor_memsz;
832 	caddr_t page_ctrs_mem;
833 	size_t page_ctrs_size;
834 	struct memlist *current;
835 	extern void startup_build_mem_nodes(struct memlist *);
836 
837 	/* XX64 fix these - they should be in include files */
838 	extern size_t page_coloring_init(uint_t, int, int);
839 	extern void page_coloring_setup(caddr_t);
840 
841 	PRM_POINT("startup_memlist() starting...");
842 
843 	/*
844 	 * Use leftover large page nucleus text/data space for loadable modules.
845 	 * Use at most MODTEXT/MODDATA.
846 	 */
847 	len = kbm_nucleus_size;
848 	ASSERT(len > MMU_PAGESIZE);
849 
850 	moddata = (caddr_t)ROUND_UP_PAGE(e_data);
851 	e_moddata = (caddr_t)P2ROUNDUP((uintptr_t)e_data, (uintptr_t)len);
852 	if (e_moddata - moddata > MODDATA)
853 		e_moddata = moddata + MODDATA;
854 
855 	modtext = (caddr_t)ROUND_UP_PAGE(e_text);
856 	e_modtext = (caddr_t)P2ROUNDUP((uintptr_t)e_text, (uintptr_t)len);
857 	if (e_modtext - modtext > MODTEXT)
858 		e_modtext = modtext + MODTEXT;
859 
860 	econtig = e_moddata;
861 
862 	PRM_DEBUG(modtext);
863 	PRM_DEBUG(e_modtext);
864 	PRM_DEBUG(moddata);
865 	PRM_DEBUG(e_moddata);
866 	PRM_DEBUG(econtig);
867 
868 	/*
869 	 * Examine the boot loader physical memory map to find out:
870 	 * - total memory in system - physinstalled
871 	 * - the max physical address - physmax
872 	 * - the number of discontiguous segments of memory.
873 	 */
874 	if (prom_debug)
875 		print_memlist("boot physinstalled",
876 		    bootops->boot_mem->physinstalled);
877 	installed_top_size(bootops->boot_mem->physinstalled, &physmax,
878 	    &physinstalled, &memblocks);
879 	PRM_DEBUG(physmax);
880 	PRM_DEBUG(physinstalled);
881 	PRM_DEBUG(memblocks);
882 
883 	/*
884 	 * Initialize hat's mmu parameters.
885 	 * Check for enforce-prot-exec in boot environment. It's used to
886 	 * enable/disable support for the page table entry NX bit.
887 	 * The default is to enforce PROT_EXEC on processors that support NX.
888 	 * Boot seems to round up the "len", but 8 seems to be big enough.
889 	 */
890 	mmu_init();
891 
892 #ifdef	__i386
893 	/*
894 	 * physmax is lowered if there is more memory than can be
895 	 * physically addressed in 32 bit (PAE/non-PAE) modes.
896 	 */
897 	if (mmu.pae_hat) {
898 		if (PFN_ABOVE64G(physmax)) {
899 			physinstalled -= (physmax - (PFN_64G - 1));
900 			physmax = PFN_64G - 1;
901 		}
902 	} else {
903 		if (PFN_ABOVE4G(physmax)) {
904 			physinstalled -= (physmax - (PFN_4G - 1));
905 			physmax = PFN_4G - 1;
906 		}
907 	}
908 #endif
909 
910 	startup_build_mem_nodes(bootops->boot_mem->physinstalled);
911 
912 	if (BOP_GETPROPLEN(bootops, "enforce-prot-exec") >= 0) {
913 		int len = BOP_GETPROPLEN(bootops, "enforce-prot-exec");
914 		char value[8];
915 
916 		if (len < 8)
917 			(void) BOP_GETPROP(bootops, "enforce-prot-exec", value);
918 		else
919 			(void) strcpy(value, "");
920 		if (strcmp(value, "off") == 0)
921 			mmu.pt_nx = 0;
922 	}
923 	PRM_DEBUG(mmu.pt_nx);
924 
925 	/*
926 	 * We will need page_t's for every page in the system, except for
927 	 * memory mapped at or above above the start of the kernel text segment.
928 	 *
929 	 * pages above e_modtext are attributed to kernel debugger (obp_pages)
930 	 */
931 	npages = physinstalled - 1; /* avail_filter() skips page 0, so "- 1" */
932 	obp_pages = 0;
933 	va = KERNEL_TEXT;
934 	while (kbm_probe(&va, &len, &pfn, &prot) != 0) {
935 		npages -= len >> MMU_PAGESHIFT;
936 		if (va >= (uintptr_t)e_moddata)
937 			obp_pages += len >> MMU_PAGESHIFT;
938 		va += len;
939 	}
940 	PRM_DEBUG(npages);
941 	PRM_DEBUG(obp_pages);
942 
943 	/*
944 	 * If physmem is patched to be non-zero, use it instead of
945 	 * the computed value unless it is larger than the real
946 	 * amount of memory on hand.
947 	 */
948 	if (physmem == 0 || physmem > npages) {
949 		physmem = npages;
950 	} else if (physmem < npages) {
951 		orig_npages = npages;
952 		npages = physmem;
953 	}
954 	PRM_DEBUG(physmem);
955 
956 	/*
957 	 * We now compute the sizes of all the  initial allocations for
958 	 * structures the kernel needs in order do kmem_alloc(). These
959 	 * include:
960 	 *	memsegs
961 	 *	memlists
962 	 *	page hash table
963 	 *	page_t's
964 	 *	page coloring data structs
965 	 */
966 	memseg_sz = sizeof (struct memseg) * (memblocks + POSS_NEW_FRAGMENTS);
967 	ADD_TO_ALLOCATIONS(memseg_base, memseg_sz);
968 	PRM_DEBUG(memseg_sz);
969 
970 	/*
971 	 * Reserve space for memlists. There's no real good way to know exactly
972 	 * how much room we'll need, but this should be a good upper bound.
973 	 */
974 	memlist_sz = ROUND_UP_PAGE(2 * sizeof (struct memlist) *
975 	    (memblocks + POSS_NEW_FRAGMENTS));
976 	ADD_TO_ALLOCATIONS(memlist, memlist_sz);
977 	PRM_DEBUG(memlist_sz);
978 
979 	/*
980 	 * The page structure hash table size is a power of 2
981 	 * such that the average hash chain length is PAGE_HASHAVELEN.
982 	 */
983 	page_hashsz = npages / PAGE_HASHAVELEN;
984 	page_hashsz = 1 << highbit(page_hashsz);
985 	pagehash_sz = sizeof (struct page *) * page_hashsz;
986 	ADD_TO_ALLOCATIONS(page_hash, pagehash_sz);
987 	PRM_DEBUG(pagehash_sz);
988 
989 	/*
990 	 * Set aside room for the page structures themselves.
991 	 */
992 	PRM_DEBUG(npages);
993 	pp_sz = sizeof (struct page) * npages;
994 	ADD_TO_ALLOCATIONS(pp_base, pp_sz);
995 	PRM_DEBUG(pp_sz);
996 
997 	/*
998 	 * determine l2 cache info and memory size for page coloring
999 	 */
1000 	(void) getl2cacheinfo(CPU,
1001 	    &l2cache_sz, &l2cache_linesz, &l2cache_assoc);
1002 	pagecolor_memsz =
1003 	    page_coloring_init(l2cache_sz, l2cache_linesz, l2cache_assoc);
1004 	ADD_TO_ALLOCATIONS(pagecolor_mem, pagecolor_memsz);
1005 	PRM_DEBUG(pagecolor_memsz);
1006 
1007 	page_ctrs_size = page_ctrs_sz();
1008 	ADD_TO_ALLOCATIONS(page_ctrs_mem, page_ctrs_size);
1009 	PRM_DEBUG(page_ctrs_size);
1010 
1011 #if defined(__amd64)
1012 	valloc_sz = ROUND_UP_LPAGE(valloc_sz);
1013 	valloc_base = VALLOC_BASE;
1014 #else	/* __i386 */
1015 	valloc_base = (uintptr_t)(MISC_VA_BASE - valloc_sz);
1016 	valloc_base = P2ALIGN(valloc_base, mmu.level_size[1]);
1017 #endif	/* __i386 */
1018 	PRM_DEBUG(valloc_base);
1019 
1020 	/*
1021 	 * do all the initial allocations
1022 	 */
1023 	perform_allocations();
1024 
1025 	/*
1026 	 * Build phys_install and phys_avail in kernel memspace.
1027 	 * - phys_install should be all memory in the system.
1028 	 * - phys_avail is phys_install minus any memory mapped before this
1029 	 *    point above KERNEL_TEXT.
1030 	 */
1031 	current = phys_install = memlist;
1032 	copy_memlist_filter(bootops->boot_mem->physinstalled, &current, NULL);
1033 	if ((caddr_t)current > (caddr_t)memlist + memlist_sz)
1034 		panic("physinstalled was too big!");
1035 	if (prom_debug)
1036 		print_memlist("phys_install", phys_install);
1037 
1038 	phys_avail = current;
1039 	PRM_POINT("Building phys_avail:\n");
1040 	copy_memlist_filter(bootops->boot_mem->physinstalled, &current,
1041 	    avail_filter);
1042 	if ((caddr_t)current > (caddr_t)memlist + memlist_sz)
1043 		panic("physavail was too big!");
1044 	if (prom_debug)
1045 		print_memlist("phys_avail", phys_avail);
1046 
1047 	/*
1048 	 * setup page coloring
1049 	 */
1050 	page_coloring_setup(pagecolor_mem);
1051 	page_lock_init();	/* currently a no-op */
1052 
1053 	/*
1054 	 * free page list counters
1055 	 */
1056 	(void) page_ctrs_alloc(page_ctrs_mem);
1057 
1058 	/*
1059 	 * Initialize the page structures from the memory lists.
1060 	 */
1061 	availrmem_initial = availrmem = freemem = 0;
1062 	PRM_POINT("Calling kphysm_init()...");
1063 	npages = kphysm_init(pp_base, npages);
1064 	PRM_POINT("kphysm_init() done");
1065 	PRM_DEBUG(npages);
1066 
1067 	init_debug_info();
1068 
1069 	/*
1070 	 * Now that page_t's have been initialized, remove all the
1071 	 * initial allocation pages from the kernel free page lists.
1072 	 */
1073 	boot_mapin((caddr_t)valloc_base, valloc_sz);
1074 	boot_mapin((caddr_t)GDT_VA, MMU_PAGESIZE);
1075 	boot_mapin((caddr_t)DEBUG_INFO_VA, MMU_PAGESIZE);
1076 	PRM_POINT("startup_memlist() done");
1077 
1078 	PRM_DEBUG(valloc_sz);
1079 }
1080 
1081 /*
1082  * Layout the kernel's part of address space and initialize kmem allocator.
1083  */
1084 static void
1085 startup_kmem(void)
1086 {
1087 	PRM_POINT("startup_kmem() starting...");
1088 
1089 #if defined(__amd64)
1090 	if (eprom_kernelbase && eprom_kernelbase != KERNELBASE)
1091 		cmn_err(CE_NOTE, "!kernelbase cannot be changed on 64-bit "
1092 		    "systems.");
1093 	kernelbase = (uintptr_t)KERNELBASE;
1094 	core_base = (uintptr_t)COREHEAP_BASE;
1095 	core_size = (size_t)MISC_VA_BASE - COREHEAP_BASE;
1096 #else	/* __i386 */
1097 	/*
1098 	 * We configure kernelbase based on:
1099 	 *
1100 	 * 1. user specified kernelbase via eeprom command. Value cannot exceed
1101 	 *    KERNELBASE_MAX. we large page align eprom_kernelbase
1102 	 *
1103 	 * 2. Default to KERNELBASE and adjust to 2X less the size for page_t.
1104 	 *    On large memory systems we must lower kernelbase to allow
1105 	 *    enough room for page_t's for all of memory.
1106 	 *
1107 	 * The value set here, might be changed a little later.
1108 	 */
1109 	if (eprom_kernelbase) {
1110 		kernelbase = eprom_kernelbase & mmu.level_mask[1];
1111 		if (kernelbase > KERNELBASE_MAX)
1112 			kernelbase = KERNELBASE_MAX;
1113 	} else {
1114 		kernelbase = (uintptr_t)KERNELBASE;
1115 		kernelbase -= ROUND_UP_4MEG(2 * valloc_sz);
1116 	}
1117 	ASSERT((kernelbase & mmu.level_offset[1]) == 0);
1118 	core_base = valloc_base;
1119 	core_size = 0;
1120 #endif	/* __i386 */
1121 
1122 	PRM_DEBUG(core_base);
1123 	PRM_DEBUG(core_size);
1124 	PRM_DEBUG(kernelbase);
1125 
1126 	/*
1127 	 * At this point, we can only use a portion of the kernelheap that
1128 	 * will be available after we boot.  32-bit systems have this
1129 	 * limitation.
1130 	 *
1131 	 * On 32-bit systems we have to leave room to place segmap below
1132 	 * the heap.  We don't yet know how large segmap will be, so we
1133 	 * have to be very conservative.
1134 	 *
1135 	 * On 64 bit systems there should be LOTS of room so just use
1136 	 * the next 4Gig below core_base.
1137 	 */
1138 #if defined(__amd64)
1139 
1140 	boot_kernelheap = (caddr_t)core_base  - FOURGB;
1141 	segmap_reserved = 0;
1142 
1143 #else	/* __i386 */
1144 
1145 	segkp_fromheap = 1;
1146 	segmap_reserved = ROUND_UP_LPAGE(MAX(segmapsize, SEGMAPMAX));
1147 	boot_kernelheap =
1148 	    (caddr_t)ROUND_UP_LPAGE(kernelbase) + segmap_reserved;
1149 
1150 #endif	/* __i386 */
1151 	PRM_DEBUG(boot_kernelheap);
1152 	ekernelheap = (char *)core_base;
1153 	PRM_DEBUG(ekernelheap);
1154 	kernelheap = boot_kernelheap;
1155 
1156 	/*
1157 	 * If segmap is too large we can push the bottom of the kernel heap
1158 	 * higher than the base.  Or worse, it could exceed the top of the
1159 	 * VA space entirely, causing it to wrap around.
1160 	 */
1161 	if (kernelheap >= ekernelheap || (uintptr_t)kernelheap < kernelbase)
1162 		panic("too little memory available for kernelheap,"
1163 			    " use a different kernelbase");
1164 
1165 	/*
1166 	 * Now that we know the real value of kernelbase,
1167 	 * update variables that were initialized with a value of
1168 	 * KERNELBASE (in common/conf/param.c).
1169 	 *
1170 	 * XXX	The problem with this sort of hackery is that the
1171 	 *	compiler just may feel like putting the const declarations
1172 	 *	(in param.c) into the .text section.  Perhaps they should
1173 	 *	just be declared as variables there?
1174 	 */
1175 
1176 #if defined(__amd64)
1177 	ASSERT(_kernelbase == KERNELBASE);
1178 	ASSERT(_userlimit == USERLIMIT);
1179 #else
1180 	*(uintptr_t *)&_kernelbase = kernelbase;
1181 	*(uintptr_t *)&_userlimit = kernelbase;
1182 	*(uintptr_t *)&_userlimit32 = _userlimit;
1183 #endif
1184 	PRM_DEBUG(_kernelbase);
1185 	PRM_DEBUG(_userlimit);
1186 	PRM_DEBUG(_userlimit32);
1187 
1188 	/*
1189 	 * Initialize the kernel heap. Note 3rd argument must be > 1st.
1190 	 */
1191 	kernelheap_init(boot_kernelheap, ekernelheap,
1192 	    boot_kernelheap + MMU_PAGESIZE,
1193 	    (void *)core_base, (void *)(core_base + core_size));
1194 
1195 	/*
1196 	 * Initialize kernel memory allocator.
1197 	 */
1198 	kmem_init();
1199 
1200 	/*
1201 	 * print this out early so that we know what's going on
1202 	 */
1203 	cmn_err(CE_CONT, "?features: %b\n", x86_feature, FMT_X86_FEATURE);
1204 
1205 	/*
1206 	 * Initialize bp_mapin().
1207 	 */
1208 	bp_init(MMU_PAGESIZE, HAT_STORECACHING_OK);
1209 
1210 	/*
1211 	 * orig_npages is non-zero if physmem has been configured for less
1212 	 * than the available memory.
1213 	 */
1214 	if (orig_npages) {
1215 #ifdef __i386
1216 		/*
1217 		 * use npages for physmem in case it has been temporarily
1218 		 * modified via /etc/system in kmem_init/mod_read_system_file.
1219 		 */
1220 		if (npages == PHYSMEM32) {
1221 			cmn_err(CE_WARN, "!Due to 32-bit virtual"
1222 			    " address space limitations, limiting"
1223 			    " physmem to 0x%lx of 0x%lx available pages",
1224 			    npages, orig_npages);
1225 		} else {
1226 			cmn_err(CE_WARN, "!limiting physmem to 0x%lx of"
1227 			    " 0x%lx available pages", npages, orig_npages);
1228 		}
1229 #else
1230 		cmn_err(CE_WARN, "!limiting physmem to 0x%lx of"
1231 		    " 0x%lx available pages", npages, orig_npages);
1232 #endif
1233 	}
1234 #if defined(__i386)
1235 	if (eprom_kernelbase && (eprom_kernelbase != kernelbase))
1236 		cmn_err(CE_WARN, "kernelbase value, User specified 0x%lx, "
1237 		    "System using 0x%lx",
1238 		    (uintptr_t)eprom_kernelbase, (uintptr_t)kernelbase);
1239 #endif
1240 
1241 #ifdef	KERNELBASE_ABI_MIN
1242 	if (kernelbase < (uintptr_t)KERNELBASE_ABI_MIN) {
1243 		cmn_err(CE_NOTE, "!kernelbase set to 0x%lx, system is not "
1244 		    "i386 ABI compliant.", (uintptr_t)kernelbase);
1245 	}
1246 #endif
1247 
1248 	PRM_POINT("startup_kmem() done");
1249 }
1250 
1251 static void
1252 startup_modules(void)
1253 {
1254 	unsigned int i;
1255 	extern void prom_setup(void);
1256 
1257 	PRM_POINT("startup_modules() starting...");
1258 	/*
1259 	 * Initialize ten-micro second timer so that drivers will
1260 	 * not get short changed in their init phase. This was
1261 	 * not getting called until clkinit which, on fast cpu's
1262 	 * caused the drv_usecwait to be way too short.
1263 	 */
1264 	microfind();
1265 
1266 	/*
1267 	 * Read the GMT lag from /etc/rtc_config.
1268 	 */
1269 	sgmtl(process_rtc_config_file());
1270 
1271 	/*
1272 	 * Calculate default settings of system parameters based upon
1273 	 * maxusers, yet allow to be overridden via the /etc/system file.
1274 	 */
1275 	param_calc(0);
1276 
1277 	mod_setup();
1278 
1279 	/*
1280 	 * Initialize system parameters.
1281 	 */
1282 	param_init();
1283 
1284 	/*
1285 	 * Initialize the default brands
1286 	 */
1287 	brand_init();
1288 
1289 	/*
1290 	 * maxmem is the amount of physical memory we're playing with.
1291 	 */
1292 	maxmem = physmem;
1293 
1294 	/*
1295 	 * Initialize the hat layer.
1296 	 */
1297 	hat_init();
1298 
1299 	/*
1300 	 * Initialize segment management stuff.
1301 	 */
1302 	seg_init();
1303 
1304 	if (modload("fs", "specfs") == -1)
1305 		halt("Can't load specfs");
1306 
1307 	if (modload("fs", "devfs") == -1)
1308 		halt("Can't load devfs");
1309 
1310 	if (modload("fs", "dev") == -1)
1311 		halt("Can't load dev");
1312 
1313 	(void) modloadonly("sys", "lbl_edition");
1314 
1315 	dispinit();
1316 
1317 	/*
1318 	 * This is needed here to initialize hw_serial[] for cluster booting.
1319 	 */
1320 	if ((i = modload("misc", "sysinit")) != (unsigned int)-1)
1321 		(void) modunload(i);
1322 	else
1323 		cmn_err(CE_CONT, "sysinit load failed");
1324 
1325 	/* Read cluster configuration data. */
1326 	clconf_init();
1327 
1328 	/*
1329 	 * Create a kernel device tree. First, create rootnex and
1330 	 * then invoke bus specific code to probe devices.
1331 	 */
1332 	setup_ddi();
1333 
1334 	/*
1335 	 * Set up the CPU module subsystem.  Modifies the device tree, so it
1336 	 * must be done after setup_ddi().
1337 	 */
1338 	cmi_init();
1339 
1340 	/*
1341 	 * Initialize the MCA handlers
1342 	 */
1343 	if (x86_feature & X86_MCA)
1344 		cmi_mca_init();
1345 
1346 	/*
1347 	 * Fake a prom tree such that /dev/openprom continues to work
1348 	 */
1349 	PRM_POINT("startup_modules: calling prom_setup...");
1350 	prom_setup();
1351 	PRM_POINT("startup_modules: done");
1352 
1353 	/*
1354 	 * Load all platform specific modules
1355 	 */
1356 	PRM_POINT("startup_modules: calling psm_modload...");
1357 	psm_modload();
1358 
1359 	PRM_POINT("startup_modules() done");
1360 }
1361 
1362 /*
1363  * claim a "setaside" boot page for use in the kernel
1364  */
1365 page_t *
1366 boot_claim_page(pfn_t pfn)
1367 {
1368 	page_t *pp;
1369 
1370 	pp = page_numtopp_nolock(pfn);
1371 	ASSERT(pp != NULL);
1372 
1373 	if (PP_ISBOOTPAGES(pp)) {
1374 		if (pp->p_next != NULL)
1375 			pp->p_next->p_prev = pp->p_prev;
1376 		if (pp->p_prev == NULL)
1377 			bootpages = pp->p_next;
1378 		else
1379 			pp->p_prev->p_next = pp->p_next;
1380 	} else {
1381 		/*
1382 		 * htable_attach() expects a base pagesize page
1383 		 */
1384 		if (pp->p_szc != 0)
1385 			page_boot_demote(pp);
1386 		pp = page_numtopp(pfn, SE_EXCL);
1387 	}
1388 	return (pp);
1389 }
1390 
1391 /*
1392  * Walk through the pagetables looking for pages mapped in by boot.  If the
1393  * setaside flag is set the pages are expected to be returned to the
1394  * kernel later in boot, so we add them to the bootpages list.
1395  */
1396 static void
1397 protect_boot_range(uintptr_t low, uintptr_t high, int setaside)
1398 {
1399 	uintptr_t va = low;
1400 	size_t len;
1401 	uint_t prot;
1402 	pfn_t pfn;
1403 	page_t *pp;
1404 	pgcnt_t boot_protect_cnt = 0;
1405 
1406 	while (kbm_probe(&va, &len, &pfn, &prot) != 0 && va < high) {
1407 		if (va + len >= high)
1408 			panic("0x%lx byte mapping at 0x%p exceeds boot's "
1409 			    "legal range.", len, (void *)va);
1410 
1411 		while (len > 0) {
1412 			pp = page_numtopp_alloc(pfn);
1413 			if (pp != NULL) {
1414 				if (setaside == 0)
1415 					panic("Unexpected mapping by boot.  "
1416 					    "addr=%p pfn=%lx\n",
1417 					    (void *)va, pfn);
1418 
1419 				pp->p_next = bootpages;
1420 				pp->p_prev = NULL;
1421 				PP_SETBOOTPAGES(pp);
1422 				if (bootpages != NULL) {
1423 					bootpages->p_prev = pp;
1424 				}
1425 				bootpages = pp;
1426 				++boot_protect_cnt;
1427 			}
1428 
1429 			++pfn;
1430 			len -= MMU_PAGESIZE;
1431 			va += MMU_PAGESIZE;
1432 		}
1433 	}
1434 	PRM_DEBUG(boot_protect_cnt);
1435 }
1436 
1437 /*
1438  * Finish initializing the VM system, now that we are no longer
1439  * relying on the boot time memory allocators.
1440  */
1441 static void
1442 startup_vm(void)
1443 {
1444 	struct segmap_crargs a;
1445 
1446 	extern int use_brk_lpg, use_stk_lpg;
1447 
1448 	PRM_POINT("startup_vm() starting...");
1449 
1450 	/*
1451 	 * Establish the final size of the kernel's heap, size of segmap,
1452 	 * segkp, etc.
1453 	 */
1454 
1455 #if defined(__amd64)
1456 
1457 	/*
1458 	 * Check if there is enough virtual address space in KPM region to
1459 	 * map physmax.
1460 	 */
1461 	kpm_vbase = (caddr_t)(uintptr_t)SEGKPM_BASE;
1462 	kpm_size = 0;
1463 	if (kpm_desired) {
1464 		kpm_size = ROUND_UP_LPAGE(mmu_ptob(physmax + 1));
1465 		if ((uintptr_t)kpm_vbase + kpm_size > (uintptr_t)VALLOC_BASE) {
1466 			kpm_size = 0;
1467 			kpm_desired = 0;
1468 		}
1469 	}
1470 
1471 	PRM_DEBUG(kpm_size);
1472 	PRM_DEBUG(kpm_vbase);
1473 
1474 	/*
1475 	 * By default we create a seg_kp in 64 bit kernels, it's a little
1476 	 * faster to access than embedding it in the heap.
1477 	 */
1478 	segkp_base = (caddr_t)valloc_base + valloc_sz;
1479 	if (!segkp_fromheap) {
1480 		size_t sz = mmu_ptob(segkpsize);
1481 
1482 		/*
1483 		 * determine size of segkp
1484 		 */
1485 		if (sz < SEGKPMINSIZE || sz > SEGKPMAXSIZE) {
1486 			sz = SEGKPDEFSIZE;
1487 			cmn_err(CE_WARN, "!Illegal value for segkpsize. "
1488 			    "segkpsize has been reset to %ld pages",
1489 			    mmu_btop(sz));
1490 		}
1491 		sz = MIN(sz, MAX(SEGKPMINSIZE, mmu_ptob(physmem)));
1492 
1493 		segkpsize = mmu_btop(ROUND_UP_LPAGE(sz));
1494 	}
1495 	PRM_DEBUG(segkp_base);
1496 	PRM_DEBUG(segkpsize);
1497 
1498 	segzio_base = segkp_base + mmu_ptob(segkpsize);
1499 	if (segzio_fromheap) {
1500 		segziosize = 0;
1501 	} else {
1502 		size_t size;
1503 		size_t physmem_b = mmu_ptob(physmem);
1504 
1505 		/* size is in bytes, segziosize is in pages */
1506 		if (segziosize == 0) {
1507 			size = physmem_b;
1508 		} else {
1509 			size = mmu_ptob(segziosize);
1510 		}
1511 
1512 		if (size < SEGZIOMINSIZE) {
1513 			size = SEGZIOMINSIZE;
1514 		} else if (size > SEGZIOMAXSIZE) {
1515 			size = SEGZIOMAXSIZE;
1516 			/*
1517 			 * SEGZIOMAXSIZE is capped at 512gb so that segzio
1518 			 * doesn't consume all of KVA.  However, if we have a
1519 			 * system that has more thant 512gb of physical memory,
1520 			 * we can actually consume about half of the difference
1521 			 * between 512gb and the rest of the available physical
1522 			 * memory.
1523 			 */
1524 			if (physmem_b > SEGZIOMAXSIZE) {
1525 				size += (physmem_b - SEGZIOMAXSIZE) / 2;
1526 			}
1527 		}
1528 		segziosize = mmu_btop(ROUND_UP_LPAGE(size));
1529 	}
1530 	PRM_DEBUG(segziosize);
1531 	PRM_DEBUG(segzio_base);
1532 
1533 	/*
1534 	 * Put the range of VA for device mappings next, kmdb knows to not
1535 	 * grep in this range of addresses.
1536 	 */
1537 	toxic_addr =
1538 	    ROUND_UP_LPAGE((uintptr_t)segzio_base + mmu_ptob(segziosize));
1539 	PRM_DEBUG(toxic_addr);
1540 	segmap_start = ROUND_UP_LPAGE(toxic_addr + toxic_size);
1541 #else /* __i386 */
1542 	segmap_start = ROUND_UP_LPAGE(kernelbase);
1543 #endif /* __i386 */
1544 	PRM_DEBUG(segmap_start);
1545 	ASSERT((caddr_t)segmap_start < boot_kernelheap);
1546 
1547 	/*
1548 	 * Users can change segmapsize through eeprom or /etc/system.
1549 	 * If the variable is tuned through eeprom, there is no upper
1550 	 * bound on the size of segmap.  If it is tuned through
1551 	 * /etc/system on 32-bit systems, it must be no larger than we
1552 	 * planned for in startup_memlist().
1553 	 */
1554 	segmapsize = MAX(ROUND_UP_LPAGE(segmapsize), SEGMAPDEFAULT);
1555 
1556 #if defined(__i386)
1557 	if (segmapsize > segmap_reserved) {
1558 		cmn_err(CE_NOTE, "!segmapsize may not be set > 0x%lx in "
1559 		    "/etc/system.  Use eeprom.", (long)SEGMAPMAX);
1560 		segmapsize = segmap_reserved;
1561 	}
1562 	/*
1563 	 * 32-bit systems don't have segkpm or segkp, so segmap appears at
1564 	 * the bottom of the kernel's address range.  Set aside space for a
1565 	 * small red zone just below the start of segmap.
1566 	 */
1567 	segmap_start += KERNEL_REDZONE_SIZE;
1568 	segmapsize -= KERNEL_REDZONE_SIZE;
1569 #endif
1570 
1571 	PRM_DEBUG(segmap_start);
1572 	PRM_DEBUG(segmapsize);
1573 	final_kernelheap = (caddr_t)ROUND_UP_LPAGE(segmap_start + segmapsize);
1574 	PRM_DEBUG(final_kernelheap);
1575 
1576 	/*
1577 	 * Do final allocations of HAT data structures that need to
1578 	 * be allocated before quiescing the boot loader.
1579 	 */
1580 	PRM_POINT("Calling hat_kern_alloc()...");
1581 	hat_kern_alloc((caddr_t)segmap_start, segmapsize, ekernelheap);
1582 	PRM_POINT("hat_kern_alloc() done");
1583 
1584 	/*
1585 	 * Setup MTRR (Memory type range registers)
1586 	 */
1587 	setup_mtrr();
1588 
1589 	/*
1590 	 * The next two loops are done in distinct steps in order
1591 	 * to be sure that any page that is doubly mapped (both above
1592 	 * KERNEL_TEXT and below kernelbase) is dealt with correctly.
1593 	 * Note this may never happen, but it might someday.
1594 	 */
1595 	bootpages = NULL;
1596 	PRM_POINT("Protecting boot pages");
1597 
1598 	/*
1599 	 * Protect any pages mapped above KERNEL_TEXT that somehow have
1600 	 * page_t's. This can only happen if something weird allocated
1601 	 * in this range (like kadb/kmdb).
1602 	 */
1603 	protect_boot_range(KERNEL_TEXT, (uintptr_t)-1, 0);
1604 
1605 	/*
1606 	 * Before we can take over memory allocation/mapping from the boot
1607 	 * loader we must remove from our free page lists any boot allocated
1608 	 * pages that stay mapped until release_bootstrap().
1609 	 */
1610 	protect_boot_range(0, kernelbase, 1);
1611 
1612 	/*
1613 	 * Switch to running on regular HAT (not boot_mmu)
1614 	 */
1615 	PRM_POINT("Calling hat_kern_setup()...");
1616 	hat_kern_setup();
1617 
1618 	/*
1619 	 * It is no longer safe to call BOP_ALLOC(), so make sure we don't.
1620 	 */
1621 	bop_no_more_mem();
1622 
1623 	PRM_POINT("hat_kern_setup() done");
1624 
1625 	hat_cpu_online(CPU);
1626 
1627 	/*
1628 	 * Initialize VM system
1629 	 */
1630 	PRM_POINT("Calling kvm_init()...");
1631 	kvm_init();
1632 	PRM_POINT("kvm_init() done");
1633 
1634 	/*
1635 	 * Tell kmdb that the VM system is now working
1636 	 */
1637 	if (boothowto & RB_DEBUG)
1638 		kdi_dvec_vmready();
1639 
1640 	/*
1641 	 * Mangle the brand string etc.
1642 	 */
1643 	cpuid_pass3(CPU);
1644 
1645 	/*
1646 	 * Now that we can use memory outside the top 4GB (on 64-bit
1647 	 * systems) and we know the size of segmap, we can set the final
1648 	 * size of the kernel's heap.
1649 	 */
1650 	if (final_kernelheap < boot_kernelheap) {
1651 		PRM_POINT("kernelheap_extend()");
1652 		PRM_DEBUG(boot_kernelheap);
1653 		PRM_DEBUG(final_kernelheap);
1654 		kernelheap_extend(final_kernelheap, boot_kernelheap);
1655 	}
1656 
1657 #if defined(__amd64)
1658 
1659 	/*
1660 	 * Create the device arena for toxic (to dtrace/kmdb) mappings.
1661 	 */
1662 	device_arena = vmem_create("device", (void *)toxic_addr,
1663 	    toxic_size, MMU_PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
1664 
1665 #else	/* __i386 */
1666 
1667 	/*
1668 	 * allocate the bit map that tracks toxic pages
1669 	 */
1670 	toxic_bit_map_len = btop((ulong_t)(valloc_base - kernelbase));
1671 	PRM_DEBUG(toxic_bit_map_len);
1672 	toxic_bit_map =
1673 	    kmem_zalloc(BT_SIZEOFMAP(toxic_bit_map_len), KM_NOSLEEP);
1674 	ASSERT(toxic_bit_map != NULL);
1675 	PRM_DEBUG(toxic_bit_map);
1676 
1677 #endif	/* __i386 */
1678 
1679 
1680 	/*
1681 	 * Now that we've got more VA, as well as the ability to allocate from
1682 	 * it, tell the debugger.
1683 	 */
1684 	if (boothowto & RB_DEBUG)
1685 		kdi_dvec_memavail();
1686 
1687 	/*
1688 	 * The following code installs a special page fault handler (#pf)
1689 	 * to work around a pentium bug.
1690 	 */
1691 #if !defined(__amd64)
1692 	if (x86_type == X86_TYPE_P5) {
1693 		desctbr_t idtr;
1694 		gate_desc_t *newidt;
1695 		struct machcpu *mcpu = &CPU->cpu_m;
1696 
1697 		if ((newidt = kmem_zalloc(MMU_PAGESIZE, KM_NOSLEEP)) == NULL)
1698 			panic("failed to install pentium_pftrap");
1699 
1700 		bcopy(idt0, newidt, sizeof (idt0));
1701 		set_gatesegd(&newidt[T_PGFLT], &pentium_pftrap,
1702 		    KCS_SEL, SDT_SYSIGT, SEL_KPL);
1703 
1704 		(void) as_setprot(&kas, (caddr_t)newidt, MMU_PAGESIZE,
1705 		    PROT_READ|PROT_EXEC);
1706 
1707 		mcpu->mcpu_idt = newidt;
1708 		idtr.dtr_base = (uintptr_t)mcpu->mcpu_idt;
1709 		idtr.dtr_limit = sizeof (idt0) - 1;
1710 		wr_idtr(&idtr);
1711 	}
1712 #endif	/* !__amd64 */
1713 
1714 	/*
1715 	 * Map page pfn=0 for drivers, such as kd, that need to pick up
1716 	 * parameters left there by controllers/BIOS.
1717 	 */
1718 	PRM_POINT("setup up p0_va");
1719 	p0_va = i86devmap(0, 1, PROT_READ);
1720 	PRM_DEBUG(p0_va);
1721 
1722 	cmn_err(CE_CONT, "?mem = %luK (0x%lx)\n",
1723 	    physinstalled << (MMU_PAGESHIFT - 10), ptob(physinstalled));
1724 
1725 	/*
1726 	 * disable automatic large pages for small memory systems or
1727 	 * when the disable flag is set.
1728 	 */
1729 	if (!auto_lpg_disable && mmu.max_page_level > 0) {
1730 		max_uheap_lpsize = LEVEL_SIZE(1);
1731 		max_ustack_lpsize = LEVEL_SIZE(1);
1732 		max_privmap_lpsize = LEVEL_SIZE(1);
1733 		max_uidata_lpsize = LEVEL_SIZE(1);
1734 		max_utext_lpsize = LEVEL_SIZE(1);
1735 		max_shm_lpsize = LEVEL_SIZE(1);
1736 	}
1737 	if (physmem < privm_lpg_min_physmem || mmu.max_page_level == 0 ||
1738 	    auto_lpg_disable) {
1739 		use_brk_lpg = 0;
1740 		use_stk_lpg = 0;
1741 	}
1742 	if (mmu.max_page_level > 0) {
1743 		mcntl0_lpsize = LEVEL_SIZE(1);
1744 	}
1745 
1746 	PRM_POINT("Calling hat_init_finish()...");
1747 	hat_init_finish();
1748 	PRM_POINT("hat_init_finish() done");
1749 
1750 	/*
1751 	 * Initialize the segkp segment type.
1752 	 */
1753 	rw_enter(&kas.a_lock, RW_WRITER);
1754 	PRM_POINT("Attaching segkp");
1755 	if (segkp_fromheap) {
1756 		segkp->s_as = &kas;
1757 	} else if (seg_attach(&kas, (caddr_t)segkp_base, mmu_ptob(segkpsize),
1758 	    segkp) < 0) {
1759 		panic("startup: cannot attach segkp");
1760 		/*NOTREACHED*/
1761 	}
1762 	PRM_POINT("Doing segkp_create()");
1763 	if (segkp_create(segkp) != 0) {
1764 		panic("startup: segkp_create failed");
1765 		/*NOTREACHED*/
1766 	}
1767 	PRM_DEBUG(segkp);
1768 	rw_exit(&kas.a_lock);
1769 
1770 	/*
1771 	 * kpm segment
1772 	 */
1773 	segmap_kpm = 0;
1774 	if (kpm_desired) {
1775 		kpm_init();
1776 		kpm_enable = 1;
1777 		vpm_enable = 1;
1778 	}
1779 
1780 	/*
1781 	 * Now create segmap segment.
1782 	 */
1783 	rw_enter(&kas.a_lock, RW_WRITER);
1784 	if (seg_attach(&kas, (caddr_t)segmap_start, segmapsize, segmap) < 0) {
1785 		panic("cannot attach segmap");
1786 		/*NOTREACHED*/
1787 	}
1788 	PRM_DEBUG(segmap);
1789 
1790 	a.prot = PROT_READ | PROT_WRITE;
1791 	a.shmsize = 0;
1792 	a.nfreelist = segmapfreelists;
1793 
1794 	if (segmap_create(segmap, (caddr_t)&a) != 0)
1795 		panic("segmap_create segmap");
1796 	rw_exit(&kas.a_lock);
1797 
1798 	setup_vaddr_for_ppcopy(CPU);
1799 
1800 	segdev_init();
1801 	pmem_init();
1802 
1803 	PRM_POINT("startup_vm() done");
1804 }
1805 
1806 /*
1807  * Load a tod module for the non-standard tod part found on this system.
1808  */
1809 static void
1810 load_tod_module(char *todmod)
1811 {
1812 	if (modload("tod", todmod) == -1)
1813 		halt("Can't load TOD module");
1814 }
1815 
1816 static void
1817 startup_end(void)
1818 {
1819 	extern void setx86isalist(void);
1820 
1821 	PRM_POINT("startup_end() starting...");
1822 
1823 	/*
1824 	 * Perform tasks that get done after most of the VM
1825 	 * initialization has been done but before the clock
1826 	 * and other devices get started.
1827 	 */
1828 	kern_setup1();
1829 
1830 	/*
1831 	 * Perform CPC initialization for this CPU.
1832 	 */
1833 	kcpc_hw_init(CPU);
1834 
1835 #if defined(OPTERON_WORKAROUND_6323525)
1836 	if (opteron_workaround_6323525)
1837 		patch_workaround_6323525();
1838 #endif
1839 	/*
1840 	 * If needed, load TOD module now so that ddi_get_time(9F) etc. work
1841 	 * (For now, "needed" is defined as set tod_module_name in /etc/system)
1842 	 */
1843 	if (tod_module_name != NULL) {
1844 		PRM_POINT("load_tod_module()");
1845 		load_tod_module(tod_module_name);
1846 	}
1847 
1848 	/*
1849 	 * Configure the system.
1850 	 */
1851 	PRM_POINT("Calling configure()...");
1852 	configure();		/* set up devices */
1853 	PRM_POINT("configure() done");
1854 
1855 	/*
1856 	 * Set the isa_list string to the defined instruction sets we
1857 	 * support.
1858 	 */
1859 	setx86isalist();
1860 	cpu_intr_alloc(CPU, NINTR_THREADS);
1861 	psm_install();
1862 
1863 	/*
1864 	 * We're done with bootops.  We don't unmap the bootstrap yet because
1865 	 * we're still using bootsvcs.
1866 	 */
1867 	PRM_POINT("NULLing out bootops");
1868 	*bootopsp = (struct bootops *)NULL;
1869 	bootops = (struct bootops *)NULL;
1870 
1871 	PRM_POINT("Enabling interrupts");
1872 	(*picinitf)();
1873 	sti();
1874 
1875 	(void) add_avsoftintr((void *)&softlevel1_hdl, 1, softlevel1,
1876 		"softlevel1", NULL, NULL); /* XXX to be moved later */
1877 
1878 	PRM_POINT("startup_end() done");
1879 }
1880 
1881 extern char hw_serial[];
1882 char *_hs1107 = hw_serial;
1883 ulong_t  _bdhs34;
1884 
1885 void
1886 post_startup(void)
1887 {
1888 	/*
1889 	 * Set the system wide, processor-specific flags to be passed
1890 	 * to userland via the aux vector for performance hints and
1891 	 * instruction set extensions.
1892 	 */
1893 	bind_hwcap();
1894 
1895 	/*
1896 	 * Load the System Management BIOS into the global ksmbios
1897 	 * handle, if an SMBIOS is present on this system.
1898 	 */
1899 	ksmbios = smbios_open(NULL, SMB_VERSION, ksmbios_flags, NULL);
1900 
1901 	/*
1902 	 * Startup the memory scrubber.
1903 	 */
1904 	memscrub_init();
1905 
1906 	/*
1907 	 * Complete CPU module initialization
1908 	 */
1909 	cmi_post_init();
1910 
1911 	/*
1912 	 * Perform forceloading tasks for /etc/system.
1913 	 */
1914 	(void) mod_sysctl(SYS_FORCELOAD, NULL);
1915 
1916 	/*
1917 	 * ON4.0: Force /proc module in until clock interrupt handle fixed
1918 	 * ON4.0: This must be fixed or restated in /etc/systems.
1919 	 */
1920 	(void) modload("fs", "procfs");
1921 
1922 #if defined(__i386)
1923 	/*
1924 	 * Check for required functional Floating Point hardware,
1925 	 * unless FP hardware explicitly disabled.
1926 	 */
1927 	if (fpu_exists && (fpu_pentium_fdivbug || fp_kind == FP_NO))
1928 		halt("No working FP hardware found");
1929 #endif
1930 
1931 	maxmem = freemem;
1932 
1933 	add_cpunode2devtree(CPU->cpu_id, CPU->cpu_m.mcpu_cpi);
1934 }
1935 
1936 static int
1937 pp_in_ramdisk(page_t *pp)
1938 {
1939 	extern uint64_t ramdisk_start, ramdisk_end;
1940 
1941 	return ((pp->p_pagenum >= btop(ramdisk_start)) &&
1942 	    (pp->p_pagenum < btopr(ramdisk_end)));
1943 }
1944 
1945 void
1946 release_bootstrap(void)
1947 {
1948 	int root_is_ramdisk;
1949 	page_t *pp;
1950 	extern void kobj_boot_unmountroot(void);
1951 	extern dev_t rootdev;
1952 
1953 	/* unmount boot ramdisk and release kmem usage */
1954 	kobj_boot_unmountroot();
1955 
1956 	/*
1957 	 * We're finished using the boot loader so free its pages.
1958 	 */
1959 	PRM_POINT("Unmapping lower boot pages");
1960 	clear_boot_mappings(0, _userlimit);
1961 	postbootkernelbase = kernelbase;
1962 
1963 	/*
1964 	 * If root isn't on ramdisk, destroy the hardcoded
1965 	 * ramdisk node now and release the memory. Else,
1966 	 * ramdisk memory is kept in rd_pages.
1967 	 */
1968 	root_is_ramdisk = (getmajor(rootdev) == ddi_name_to_major("ramdisk"));
1969 	if (!root_is_ramdisk) {
1970 		dev_info_t *dip = ddi_find_devinfo("ramdisk", -1, 0);
1971 		ASSERT(dip && ddi_get_parent(dip) == ddi_root_node());
1972 		ndi_rele_devi(dip);	/* held from ddi_find_devinfo */
1973 		(void) ddi_remove_child(dip, 0);
1974 	}
1975 
1976 	PRM_POINT("Releasing boot pages");
1977 	while (bootpages) {
1978 		pp = bootpages;
1979 		bootpages = pp->p_next;
1980 		if (root_is_ramdisk && pp_in_ramdisk(pp)) {
1981 			pp->p_next = rd_pages;
1982 			rd_pages = pp;
1983 			continue;
1984 		}
1985 		pp->p_next = (struct page *)0;
1986 		pp->p_prev = (struct page *)0;
1987 		PP_CLRBOOTPAGES(pp);
1988 		page_free(pp, 1);
1989 	}
1990 	PRM_POINT("Boot pages released");
1991 
1992 	/*
1993 	 * Find 1 page below 1 MB so that other processors can boot up.
1994 	 * Make sure it has a kernel VA as well as a 1:1 mapping.
1995 	 * We should have just free'd one up.
1996 	 */
1997 	if (use_mp) {
1998 		pfn_t pfn;
1999 
2000 		for (pfn = 1; pfn < btop(1*1024*1024); pfn++) {
2001 			if (page_numtopp_alloc(pfn) == NULL)
2002 				continue;
2003 			rm_platter_va = i86devmap(pfn, 1,
2004 			    PROT_READ | PROT_WRITE | PROT_EXEC);
2005 			rm_platter_pa = ptob(pfn);
2006 			hat_devload(kas.a_hat,
2007 			    (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
2008 			    pfn, PROT_READ | PROT_WRITE | PROT_EXEC,
2009 			    HAT_LOAD_NOCONSIST);
2010 			break;
2011 		}
2012 		if (pfn == btop(1*1024*1024))
2013 			panic("No page available for starting "
2014 			    "other processors");
2015 	}
2016 
2017 }
2018 
2019 /*
2020  * Initialize the platform-specific parts of a page_t.
2021  */
2022 void
2023 add_physmem_cb(page_t *pp, pfn_t pnum)
2024 {
2025 	pp->p_pagenum = pnum;
2026 	pp->p_mapping = NULL;
2027 	pp->p_embed = 0;
2028 	pp->p_share = 0;
2029 	pp->p_mlentry = 0;
2030 }
2031 
2032 /*
2033  * kphysm_init() initializes physical memory.
2034  */
2035 static pgcnt_t
2036 kphysm_init(
2037 	page_t *pp,
2038 	pgcnt_t npages)
2039 {
2040 	struct memlist	*pmem;
2041 	struct memseg	*cur_memseg;
2042 	pfn_t		base_pfn;
2043 	pgcnt_t		num;
2044 	pgcnt_t		pages_done = 0;
2045 	uint64_t	addr;
2046 	uint64_t	size;
2047 	extern pfn_t	ddiphysmin;
2048 
2049 	ASSERT(page_hash != NULL && page_hashsz != 0);
2050 
2051 	cur_memseg = memseg_base;
2052 	for (pmem = phys_avail; pmem && npages; pmem = pmem->next) {
2053 		/*
2054 		 * In a 32 bit kernel can't use higher memory if we're
2055 		 * not booting in PAE mode. This check takes care of that.
2056 		 */
2057 		addr = pmem->address;
2058 		size = pmem->size;
2059 		if (btop(addr) > physmax)
2060 			continue;
2061 
2062 		/*
2063 		 * align addr and size - they may not be at page boundaries
2064 		 */
2065 		if ((addr & MMU_PAGEOFFSET) != 0) {
2066 			addr += MMU_PAGEOFFSET;
2067 			addr &= ~(uint64_t)MMU_PAGEOFFSET;
2068 			size -= addr - pmem->address;
2069 		}
2070 
2071 		/* only process pages below or equal to physmax */
2072 		if ((btop(addr + size) - 1) > physmax)
2073 			size = ptob(physmax - btop(addr) + 1);
2074 
2075 		num = btop(size);
2076 		if (num == 0)
2077 			continue;
2078 
2079 		if (num > npages)
2080 			num = npages;
2081 
2082 		npages -= num;
2083 		pages_done += num;
2084 		base_pfn = btop(addr);
2085 
2086 		if (prom_debug)
2087 			prom_printf("MEMSEG addr=0x%" PRIx64
2088 			    " pgs=0x%lx pfn 0x%lx-0x%lx\n",
2089 			    addr, num, base_pfn, base_pfn + num);
2090 
2091 		/*
2092 		 * Ignore pages below ddiphysmin to simplify ddi memory
2093 		 * allocation with non-zero addr_lo requests.
2094 		 */
2095 		if (base_pfn < ddiphysmin) {
2096 			if (base_pfn + num <= ddiphysmin)
2097 				continue;
2098 			pp += (ddiphysmin - base_pfn);
2099 			num -= (ddiphysmin - base_pfn);
2100 			base_pfn = ddiphysmin;
2101 		}
2102 
2103 		/*
2104 		 * Build the memsegs entry
2105 		 */
2106 		cur_memseg->pages = pp;
2107 		cur_memseg->epages = pp + num;
2108 		cur_memseg->pages_base = base_pfn;
2109 		cur_memseg->pages_end = base_pfn + num;
2110 
2111 		/*
2112 		 * Insert into memseg list in decreasing pfn range order.
2113 		 * Low memory is typically more fragmented such that this
2114 		 * ordering keeps the larger ranges at the front of the list
2115 		 * for code that searches memseg.
2116 		 * This ASSERTS that the memsegs coming in from boot are in
2117 		 * increasing physical address order and not contiguous.
2118 		 */
2119 		if (memsegs != NULL) {
2120 			ASSERT(cur_memseg->pages_base >= memsegs->pages_end);
2121 			cur_memseg->next = memsegs;
2122 		}
2123 		memsegs = cur_memseg;
2124 
2125 		/*
2126 		 * add_physmem() initializes the PSM part of the page
2127 		 * struct by calling the PSM back with add_physmem_cb().
2128 		 * In addition it coalesces pages into larger pages as
2129 		 * it initializes them.
2130 		 */
2131 		add_physmem(pp, num, base_pfn);
2132 		cur_memseg++;
2133 		availrmem_initial += num;
2134 		availrmem += num;
2135 
2136 		pp += num;
2137 	}
2138 
2139 	PRM_DEBUG(availrmem_initial);
2140 	PRM_DEBUG(availrmem);
2141 	PRM_DEBUG(freemem);
2142 	build_pfn_hash();
2143 	return (pages_done);
2144 }
2145 
2146 /*
2147  * Kernel VM initialization.
2148  */
2149 static void
2150 kvm_init(void)
2151 {
2152 	ASSERT((((uintptr_t)s_text) & MMU_PAGEOFFSET) == 0);
2153 
2154 	/*
2155 	 * Put the kernel segments in kernel address space.
2156 	 */
2157 	rw_enter(&kas.a_lock, RW_WRITER);
2158 	as_avlinit(&kas);
2159 
2160 	(void) seg_attach(&kas, s_text, e_moddata - s_text, &ktextseg);
2161 	(void) segkmem_create(&ktextseg);
2162 
2163 	(void) seg_attach(&kas, (caddr_t)valloc_base, valloc_sz, &kvalloc);
2164 	(void) segkmem_create(&kvalloc);
2165 
2166 	/*
2167 	 * We're about to map out /boot.  This is the beginning of the
2168 	 * system resource management transition. We can no longer
2169 	 * call into /boot for I/O or memory allocations.
2170 	 */
2171 	(void) seg_attach(&kas, final_kernelheap,
2172 	    ekernelheap - final_kernelheap, &kvseg);
2173 	(void) segkmem_create(&kvseg);
2174 
2175 	if (core_size > 0) {
2176 		PRM_POINT("attaching kvseg_core");
2177 		(void) seg_attach(&kas, (caddr_t)core_base, core_size,
2178 		    &kvseg_core);
2179 		(void) segkmem_create(&kvseg_core);
2180 	}
2181 
2182 	if (segziosize > 0) {
2183 		PRM_POINT("attaching segzio");
2184 		(void) seg_attach(&kas, segzio_base, mmu_ptob(segziosize),
2185 		    &kzioseg);
2186 		(void) segkmem_zio_create(&kzioseg);
2187 
2188 		/* create zio area covering new segment */
2189 		segkmem_zio_init(segzio_base, mmu_ptob(segziosize));
2190 	}
2191 
2192 	(void) seg_attach(&kas, kdi_segdebugbase, kdi_segdebugsize, &kdebugseg);
2193 	(void) segkmem_create(&kdebugseg);
2194 
2195 	rw_exit(&kas.a_lock);
2196 
2197 	/*
2198 	 * Ensure that the red zone at kernelbase is never accessible.
2199 	 */
2200 	PRM_POINT("protecting redzone");
2201 	(void) as_setprot(&kas, (caddr_t)kernelbase, KERNEL_REDZONE_SIZE, 0);
2202 
2203 	/*
2204 	 * Make the text writable so that it can be hot patched by DTrace.
2205 	 */
2206 	(void) as_setprot(&kas, s_text, e_modtext - s_text,
2207 	    PROT_READ | PROT_WRITE | PROT_EXEC);
2208 
2209 	/*
2210 	 * Make data writable until end.
2211 	 */
2212 	(void) as_setprot(&kas, s_data, e_moddata - s_data,
2213 	    PROT_READ | PROT_WRITE | PROT_EXEC);
2214 }
2215 
2216 /*
2217  * These are MTTR registers supported by P6
2218  */
2219 static struct	mtrrvar	mtrrphys_arr[MAX_MTRRVAR];
2220 static uint64_t mtrr64k, mtrr16k1, mtrr16k2;
2221 static uint64_t mtrr4k1, mtrr4k2, mtrr4k3;
2222 static uint64_t mtrr4k4, mtrr4k5, mtrr4k6;
2223 static uint64_t mtrr4k7, mtrr4k8, mtrrcap;
2224 uint64_t mtrrdef, pat_attr_reg;
2225 
2226 /*
2227  * Disable reprogramming of MTRRs by default.
2228  */
2229 int	enable_relaxed_mtrr = 0;
2230 
2231 void
2232 setup_mtrr(void)
2233 {
2234 	int i, ecx;
2235 	int vcnt;
2236 	struct	mtrrvar	*mtrrphys;
2237 
2238 	if (!(x86_feature & X86_MTRR))
2239 		return;
2240 
2241 	mtrrcap = rdmsr(REG_MTRRCAP);
2242 	mtrrdef = rdmsr(REG_MTRRDEF);
2243 	if (mtrrcap & MTRRCAP_FIX) {
2244 		mtrr64k = rdmsr(REG_MTRR64K);
2245 		mtrr16k1 = rdmsr(REG_MTRR16K1);
2246 		mtrr16k2 = rdmsr(REG_MTRR16K2);
2247 		mtrr4k1 = rdmsr(REG_MTRR4K1);
2248 		mtrr4k2 = rdmsr(REG_MTRR4K2);
2249 		mtrr4k3 = rdmsr(REG_MTRR4K3);
2250 		mtrr4k4 = rdmsr(REG_MTRR4K4);
2251 		mtrr4k5 = rdmsr(REG_MTRR4K5);
2252 		mtrr4k6 = rdmsr(REG_MTRR4K6);
2253 		mtrr4k7 = rdmsr(REG_MTRR4K7);
2254 		mtrr4k8 = rdmsr(REG_MTRR4K8);
2255 	}
2256 	if ((vcnt = (mtrrcap & MTRRCAP_VCNTMASK)) > MAX_MTRRVAR)
2257 		vcnt = MAX_MTRRVAR;
2258 
2259 	for (i = 0, ecx = REG_MTRRPHYSBASE0, mtrrphys = mtrrphys_arr;
2260 		i <  vcnt - 1; i++, ecx += 2, mtrrphys++) {
2261 		mtrrphys->mtrrphys_base = rdmsr(ecx);
2262 		mtrrphys->mtrrphys_mask = rdmsr(ecx + 1);
2263 		if ((x86_feature & X86_PAT) && enable_relaxed_mtrr) {
2264 			mtrrphys->mtrrphys_mask &= ~MTRRPHYSMASK_V;
2265 		}
2266 	}
2267 	if (x86_feature & X86_PAT) {
2268 		if (enable_relaxed_mtrr)
2269 			mtrrdef = MTRR_TYPE_WB|MTRRDEF_FE|MTRRDEF_E;
2270 		pat_attr_reg = PAT_DEFAULT_ATTRIBUTE;
2271 	}
2272 
2273 	mtrr_sync();
2274 }
2275 
2276 /*
2277  * Sync current cpu mtrr with the incore copy of mtrr.
2278  * This function has to be invoked with interrupts disabled
2279  * Currently we do not capture other cpu's. This is invoked on cpu0
2280  * just after reading /etc/system.
2281  * On other cpu's its invoked from mp_startup().
2282  */
2283 void
2284 mtrr_sync(void)
2285 {
2286 	uint_t	crvalue, cr0_orig;
2287 	int	vcnt, i, ecx;
2288 	struct	mtrrvar	*mtrrphys;
2289 
2290 	cr0_orig = crvalue = getcr0();
2291 	crvalue |= CR0_CD;
2292 	crvalue &= ~CR0_NW;
2293 	setcr0(crvalue);
2294 	invalidate_cache();
2295 
2296 	reload_cr3();
2297 	if (x86_feature & X86_PAT)
2298 		wrmsr(REG_MTRRPAT, pat_attr_reg);
2299 
2300 	wrmsr(REG_MTRRDEF, rdmsr(REG_MTRRDEF) &
2301 	    ~((uint64_t)(uintptr_t)MTRRDEF_E));
2302 
2303 	if (mtrrcap & MTRRCAP_FIX) {
2304 		wrmsr(REG_MTRR64K, mtrr64k);
2305 		wrmsr(REG_MTRR16K1, mtrr16k1);
2306 		wrmsr(REG_MTRR16K2, mtrr16k2);
2307 		wrmsr(REG_MTRR4K1, mtrr4k1);
2308 		wrmsr(REG_MTRR4K2, mtrr4k2);
2309 		wrmsr(REG_MTRR4K3, mtrr4k3);
2310 		wrmsr(REG_MTRR4K4, mtrr4k4);
2311 		wrmsr(REG_MTRR4K5, mtrr4k5);
2312 		wrmsr(REG_MTRR4K6, mtrr4k6);
2313 		wrmsr(REG_MTRR4K7, mtrr4k7);
2314 		wrmsr(REG_MTRR4K8, mtrr4k8);
2315 	}
2316 	if ((vcnt = (mtrrcap & MTRRCAP_VCNTMASK)) > MAX_MTRRVAR)
2317 		vcnt = MAX_MTRRVAR;
2318 	for (i = 0, ecx = REG_MTRRPHYSBASE0, mtrrphys = mtrrphys_arr;
2319 	    i <  vcnt - 1; i++, ecx += 2, mtrrphys++) {
2320 		wrmsr(ecx, mtrrphys->mtrrphys_base);
2321 		wrmsr(ecx + 1, mtrrphys->mtrrphys_mask);
2322 	}
2323 	wrmsr(REG_MTRRDEF, mtrrdef);
2324 
2325 	reload_cr3();
2326 	invalidate_cache();
2327 	setcr0(cr0_orig);
2328 }
2329 
2330 /*
2331  * resync mtrr so that BIOS is happy. Called from mdboot
2332  */
2333 void
2334 mtrr_resync(void)
2335 {
2336 	if ((x86_feature & X86_PAT) && enable_relaxed_mtrr) {
2337 		/*
2338 		 * We could have changed the default mtrr definition.
2339 		 * Put it back to uncached which is what it is at power on
2340 		 */
2341 		mtrrdef = MTRR_TYPE_UC|MTRRDEF_FE|MTRRDEF_E;
2342 		mtrr_sync();
2343 	}
2344 }
2345 
2346 void
2347 get_system_configuration(void)
2348 {
2349 	char	prop[32];
2350 	u_longlong_t nodes_ll, cpus_pernode_ll, lvalue;
2351 
2352 	if (((BOP_GETPROPLEN(bootops, "nodes") > sizeof (prop)) ||
2353 		(BOP_GETPROP(bootops, "nodes", prop) < 0) 	||
2354 		(kobj_getvalue(prop, &nodes_ll) == -1) ||
2355 		(nodes_ll > MAXNODES))			   ||
2356 	    ((BOP_GETPROPLEN(bootops, "cpus_pernode") > sizeof (prop)) ||
2357 		(BOP_GETPROP(bootops, "cpus_pernode", prop) < 0) ||
2358 		(kobj_getvalue(prop, &cpus_pernode_ll) == -1))) {
2359 
2360 		system_hardware.hd_nodes = 1;
2361 		system_hardware.hd_cpus_per_node = 0;
2362 	} else {
2363 		system_hardware.hd_nodes = (int)nodes_ll;
2364 		system_hardware.hd_cpus_per_node = (int)cpus_pernode_ll;
2365 	}
2366 	if ((BOP_GETPROPLEN(bootops, "kernelbase") > sizeof (prop)) ||
2367 		(BOP_GETPROP(bootops, "kernelbase", prop) < 0) 	||
2368 		(kobj_getvalue(prop, &lvalue) == -1))
2369 			eprom_kernelbase = NULL;
2370 	else
2371 			eprom_kernelbase = (uintptr_t)lvalue;
2372 
2373 	if ((BOP_GETPROPLEN(bootops, "segmapsize") > sizeof (prop)) ||
2374 	    (BOP_GETPROP(bootops, "segmapsize", prop) < 0) ||
2375 	    (kobj_getvalue(prop, &lvalue) == -1)) {
2376 		segmapsize = SEGMAPDEFAULT;
2377 	} else {
2378 		segmapsize = (uintptr_t)lvalue;
2379 	}
2380 
2381 	if ((BOP_GETPROPLEN(bootops, "segmapfreelists") > sizeof (prop)) ||
2382 	    (BOP_GETPROP(bootops, "segmapfreelists", prop) < 0) ||
2383 	    (kobj_getvalue(prop, &lvalue) == -1)) {
2384 		segmapfreelists = 0;	/* use segmap driver default */
2385 	} else {
2386 		segmapfreelists = (int)lvalue;
2387 	}
2388 
2389 	if ((BOP_GETPROPLEN(bootops, "physmem") <= sizeof (prop)) &&
2390 	    (BOP_GETPROP(bootops, "physmem", prop) >= 0) &&
2391 	    (kobj_getvalue(prop, &lvalue) != -1)) {
2392 		physmem = (uintptr_t)lvalue;
2393 	}
2394 }
2395 
2396 /*
2397  * Add to a memory list.
2398  * start = start of new memory segment
2399  * len = length of new memory segment in bytes
2400  * new = pointer to a new struct memlist
2401  * memlistp = memory list to which to add segment.
2402  */
2403 void
2404 memlist_add(
2405 	uint64_t start,
2406 	uint64_t len,
2407 	struct memlist *new,
2408 	struct memlist **memlistp)
2409 {
2410 	struct memlist *cur;
2411 	uint64_t end = start + len;
2412 
2413 	new->address = start;
2414 	new->size = len;
2415 
2416 	cur = *memlistp;
2417 
2418 	while (cur) {
2419 		if (cur->address >= end) {
2420 			new->next = cur;
2421 			*memlistp = new;
2422 			new->prev = cur->prev;
2423 			cur->prev = new;
2424 			return;
2425 		}
2426 		ASSERT(cur->address + cur->size <= start);
2427 		if (cur->next == NULL) {
2428 			cur->next = new;
2429 			new->prev = cur;
2430 			new->next = NULL;
2431 			return;
2432 		}
2433 		memlistp = &cur->next;
2434 		cur = cur->next;
2435 	}
2436 }
2437 
2438 void
2439 kobj_vmem_init(vmem_t **text_arena, vmem_t **data_arena)
2440 {
2441 	size_t tsize = e_modtext - modtext;
2442 	size_t dsize = e_moddata - moddata;
2443 
2444 	*text_arena = vmem_create("module_text", tsize ? modtext : NULL, tsize,
2445 	    1, segkmem_alloc, segkmem_free, heaptext_arena, 0, VM_SLEEP);
2446 	*data_arena = vmem_create("module_data", dsize ? moddata : NULL, dsize,
2447 	    1, segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
2448 }
2449 
2450 caddr_t
2451 kobj_text_alloc(vmem_t *arena, size_t size)
2452 {
2453 	return (vmem_alloc(arena, size, VM_SLEEP | VM_BESTFIT));
2454 }
2455 
2456 /*ARGSUSED*/
2457 caddr_t
2458 kobj_texthole_alloc(caddr_t addr, size_t size)
2459 {
2460 	panic("unexpected call to kobj_texthole_alloc()");
2461 	/*NOTREACHED*/
2462 	return (0);
2463 }
2464 
2465 /*ARGSUSED*/
2466 void
2467 kobj_texthole_free(caddr_t addr, size_t size)
2468 {
2469 	panic("unexpected call to kobj_texthole_free()");
2470 }
2471 
2472 /*
2473  * This is called just after configure() in startup().
2474  *
2475  * The ISALIST concept is a bit hopeless on Intel, because
2476  * there's no guarantee of an ever-more-capable processor
2477  * given that various parts of the instruction set may appear
2478  * and disappear between different implementations.
2479  *
2480  * While it would be possible to correct it and even enhance
2481  * it somewhat, the explicit hardware capability bitmask allows
2482  * more flexibility.
2483  *
2484  * So, we just leave this alone.
2485  */
2486 void
2487 setx86isalist(void)
2488 {
2489 	char *tp;
2490 	size_t len;
2491 	extern char *isa_list;
2492 
2493 #define	TBUFSIZE	1024
2494 
2495 	tp = kmem_alloc(TBUFSIZE, KM_SLEEP);
2496 	*tp = '\0';
2497 
2498 #if defined(__amd64)
2499 	(void) strcpy(tp, "amd64 ");
2500 #endif
2501 
2502 	switch (x86_vendor) {
2503 	case X86_VENDOR_Intel:
2504 	case X86_VENDOR_AMD:
2505 	case X86_VENDOR_TM:
2506 		if (x86_feature & X86_CMOV) {
2507 			/*
2508 			 * Pentium Pro or later
2509 			 */
2510 			(void) strcat(tp, "pentium_pro");
2511 			(void) strcat(tp, x86_feature & X86_MMX ?
2512 			    "+mmx pentium_pro " : " ");
2513 		}
2514 		/*FALLTHROUGH*/
2515 	case X86_VENDOR_Cyrix:
2516 		/*
2517 		 * The Cyrix 6x86 does not have any Pentium features
2518 		 * accessible while not at privilege level 0.
2519 		 */
2520 		if (x86_feature & X86_CPUID) {
2521 			(void) strcat(tp, "pentium");
2522 			(void) strcat(tp, x86_feature & X86_MMX ?
2523 			    "+mmx pentium " : " ");
2524 		}
2525 		break;
2526 	default:
2527 		break;
2528 	}
2529 	(void) strcat(tp, "i486 i386 i86");
2530 	len = strlen(tp) + 1;   /* account for NULL at end of string */
2531 	isa_list = strcpy(kmem_alloc(len, KM_SLEEP), tp);
2532 	kmem_free(tp, TBUFSIZE);
2533 
2534 #undef TBUFSIZE
2535 }
2536 
2537 
2538 #ifdef __amd64
2539 
2540 void *
2541 device_arena_alloc(size_t size, int vm_flag)
2542 {
2543 	return (vmem_alloc(device_arena, size, vm_flag));
2544 }
2545 
2546 void
2547 device_arena_free(void *vaddr, size_t size)
2548 {
2549 	vmem_free(device_arena, vaddr, size);
2550 }
2551 
2552 #else /* __i386 */
2553 
2554 void *
2555 device_arena_alloc(size_t size, int vm_flag)
2556 {
2557 	caddr_t	vaddr;
2558 	uintptr_t v;
2559 	size_t	start;
2560 	size_t	end;
2561 
2562 	vaddr = vmem_alloc(heap_arena, size, vm_flag);
2563 	if (vaddr == NULL)
2564 		return (NULL);
2565 
2566 	v = (uintptr_t)vaddr;
2567 	ASSERT(v >= kernelbase);
2568 	ASSERT(v + size <= valloc_base);
2569 
2570 	start = btop(v - kernelbase);
2571 	end = btop(v + size - 1 - kernelbase);
2572 	ASSERT(start < toxic_bit_map_len);
2573 	ASSERT(end < toxic_bit_map_len);
2574 
2575 	while (start <= end) {
2576 		BT_ATOMIC_SET(toxic_bit_map, start);
2577 		++start;
2578 	}
2579 	return (vaddr);
2580 }
2581 
2582 void
2583 device_arena_free(void *vaddr, size_t size)
2584 {
2585 	uintptr_t v = (uintptr_t)vaddr;
2586 	size_t	start;
2587 	size_t	end;
2588 
2589 	ASSERT(v >= kernelbase);
2590 	ASSERT(v + size <= valloc_base);
2591 
2592 	start = btop(v - kernelbase);
2593 	end = btop(v + size - 1 - kernelbase);
2594 	ASSERT(start < toxic_bit_map_len);
2595 	ASSERT(end < toxic_bit_map_len);
2596 
2597 	while (start <= end) {
2598 		ASSERT(BT_TEST(toxic_bit_map, start) != 0);
2599 		BT_ATOMIC_CLEAR(toxic_bit_map, start);
2600 		++start;
2601 	}
2602 	vmem_free(heap_arena, vaddr, size);
2603 }
2604 
2605 /*
2606  * returns 1st address in range that is in device arena, or NULL
2607  * if len is not NULL it returns the length of the toxic range
2608  */
2609 void *
2610 device_arena_contains(void *vaddr, size_t size, size_t *len)
2611 {
2612 	uintptr_t v = (uintptr_t)vaddr;
2613 	uintptr_t eaddr = v + size;
2614 	size_t start;
2615 	size_t end;
2616 
2617 	/*
2618 	 * if called very early by kmdb, just return NULL
2619 	 */
2620 	if (toxic_bit_map == NULL)
2621 		return (NULL);
2622 
2623 	/*
2624 	 * First check if we're completely outside the bitmap range.
2625 	 */
2626 	if (v >= valloc_base || eaddr < kernelbase)
2627 		return (NULL);
2628 
2629 	/*
2630 	 * Trim ends of search to look at only what the bitmap covers.
2631 	 */
2632 	if (v < kernelbase)
2633 		v = kernelbase;
2634 	start = btop(v - kernelbase);
2635 	end = btop(eaddr - kernelbase);
2636 	if (end >= toxic_bit_map_len)
2637 		end = toxic_bit_map_len;
2638 
2639 	if (bt_range(toxic_bit_map, &start, &end, end) == 0)
2640 		return (NULL);
2641 
2642 	v = kernelbase + ptob(start);
2643 	if (len != NULL)
2644 		*len = ptob(end - start);
2645 	return ((void *)v);
2646 }
2647 
2648 #endif	/* __i386 */
2649