xref: /illumos-gate/usr/src/uts/i86pc/os/mlsetup.c (revision d5dbd18d69de8954ab5ceb588e99d43fc9b21d46)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/disp.h>
31 #include <sys/promif.h>
32 #include <sys/clock.h>
33 #include <sys/cpuvar.h>
34 #include <sys/stack.h>
35 #include <vm/as.h>
36 #include <vm/hat.h>
37 #include <sys/reboot.h>
38 #include <sys/avintr.h>
39 #include <sys/vtrace.h>
40 #include <sys/proc.h>
41 #include <sys/thread.h>
42 #include <sys/cpupart.h>
43 #include <sys/pset.h>
44 #include <sys/copyops.h>
45 #include <sys/chip.h>
46 #include <sys/disp.h>
47 #include <sys/debug.h>
48 #include <sys/sunddi.h>
49 #include <sys/x86_archext.h>
50 #include <sys/privregs.h>
51 #include <sys/machsystm.h>
52 #include <sys/ontrap.h>
53 #include <sys/bootconf.h>
54 #include <sys/kdi.h>
55 #include <sys/archsystm.h>
56 #include <sys/promif.h>
57 #include <sys/bootconf.h>
58 #include <sys/kobj.h>
59 #include <sys/kobj_lex.h>
60 #if defined(__amd64)
61 #include <sys/bootsvcs.h>
62 
63 /*
64  * XX64	This stuff deals with switching stacks in case a trapping
65  *	thread wants to call back into boot -after- boot has lost track
66  *	of the mappings but before the kernel owns the console.
67  *
68  *	(A better way to hide this would be to add a 'this' pointer to
69  *	every boot syscall so that vmx could get at the resulting save
70  *	area.)
71  */
72 
73 struct boot_syscalls *_vmx_sysp;
74 static struct boot_syscalls __kbootsvcs;
75 extern struct boot_syscalls *sysp;
76 extern void _stack_safe_putchar(int c);
77 #endif
78 
79 /*
80  * some globals for patching the result of cpuid
81  * to solve problems w/ creative cpu vendors
82  */
83 
84 extern uint32_t cpuid_feature_ecx_include;
85 extern uint32_t cpuid_feature_ecx_exclude;
86 extern uint32_t cpuid_feature_edx_include;
87 extern uint32_t cpuid_feature_edx_exclude;
88 
89 /*
90  * External Routines:
91  */
92 
93 extern void init_tables(void);
94 
95 
96 static uint32_t
97 cpuid_getval(char *name)
98 {
99 	char prop[32];
100 	u_longlong_t ll;
101 	extern struct bootops *bootops;
102 	if ((BOP_GETPROPLEN(bootops, name) > sizeof (prop)) ||
103 	    (BOP_GETPROP(bootops, name, prop) < 0) ||
104 	    (kobj_getvalue(prop, &ll) == -1))
105 		return (0);
106 	return ((uint32_t)ll);
107 }
108 
109 /*
110  * Setup routine called right before main(). Interposing this function
111  * before main() allows us to call it in a machine-independent fashion.
112  */
113 void
114 mlsetup(struct regs *rp)
115 {
116 	extern struct classfuncs sys_classfuncs;
117 	extern struct chip cpu0_chip;
118 	extern disp_t cpu0_disp;
119 	extern char t0stack[];
120 
121 	ASSERT_STACK_ALIGNED();
122 
123 #if defined(__amd64)
124 
125 #if (BS_VERSION > 4)
126 	/*
127 	 * When new boot_syscalls are added to the vector, this routine
128 	 * must be modified to copy them into the kernel's copy of the
129 	 * vector.
130 	 */
131 #error mlsetup() must be updated for amd64 to support new boot_syscalls
132 #endif	/* (BS_VERSION > 4) */
133 
134 	/*
135 	 * XX64	This remaps vmx's putchar to use the kernel's version
136 	 *	that switches stacks before diving into vmx
137 	 *	See explanation/complaints in commentary above.
138 	 */
139 	_vmx_sysp = sysp;
140 	sysp = &__kbootsvcs;
141 
142 	sysp->bsvc_getchar = _vmx_sysp->bsvc_getchar;
143 	sysp->bsvc_putchar = _stack_safe_putchar;
144 	sysp->bsvc_ischar = _vmx_sysp->bsvc_ischar;
145 #endif
146 	/*
147 	 * initialize cpu_self
148 	 */
149 	cpu[0]->cpu_self = cpu[0];
150 
151 	/*
152 	 * check if we've got special bits to clear or set
153 	 * when checking cpu features
154 	 */
155 
156 	cpuid_feature_ecx_include =
157 	    cpuid_getval("cpuid_feature_ecx_include");
158 	cpuid_feature_ecx_exclude =
159 	    cpuid_getval("cpuid_feature_ecx_exclude");
160 	cpuid_feature_edx_include =
161 	    cpuid_getval("cpuid_feature_edx_include");
162 	cpuid_feature_edx_exclude =
163 	    cpuid_getval("cpuid_feature_edx_exclude");
164 
165 	/*
166 	 * The first lightweight pass (pass0) through the cpuid data
167 	 * was done in locore before mlsetup was called.  Do the next
168 	 * pass in C code.
169 	 *
170 	 * The x86_feature bits are set here on the basis of the capabilities
171 	 * of the boot CPU.  Note that if we choose to support CPUs that have
172 	 * different feature sets (at which point we would almost certainly
173 	 * want to set the feature bits to correspond to the feature
174 	 * minimum) this value may be altered.
175 	 */
176 
177 	x86_feature = cpuid_pass1(cpu[0]);
178 
179 	/*
180 	 * Initialize idt0, gdt0, ldt0_default, ktss0 and dftss.
181 	 */
182 	init_tables();
183 
184 #if defined(__amd64)
185 	/*CSTYLED*/
186 	{
187 		/*
188 		 * setup %gs for the kernel
189 		 */
190 		uint64_t addr64 = (uint64_t)&cpus[0];
191 		wrmsr(MSR_AMD_GSBASE, &addr64);
192 		/*
193 		 * XX64 We should never dereference off "other gsbase" or
194 		 * "fsbase".  So, we should arrange to point FSBASE and
195 		 * KGSBASE somewhere truly awful e.g. point it at the last
196 		 * valid address below the hole so that any attempts to index
197 		 * off them cause an exception.
198 		 *
199 		 * For now, point it at 8G -- at least it should be unmapped
200 		 * until some 64-bit processes run.
201 		 */
202 		addr64 = 0x200000000ul;
203 		wrmsr(MSR_AMD_FSBASE, &addr64);
204 		wrmsr(MSR_AMD_KGSBASE, &addr64);
205 	}
206 
207 #elif defined(__i386)
208 	/*
209 	 * enable large page support right here.
210 	 */
211 	if (x86_feature & X86_LARGEPAGE) {
212 		cr4_value |= CR4_PSE;
213 		if (x86_feature & X86_PGE)
214 			cr4_value |= CR4_PGE;
215 		setup_121_andcall(enable_big_page_support, cr4_value);
216 	}
217 
218 	/*
219 	 * Some i386 processors do not implement the rdtsc instruction,
220 	 * or at least they do not implement it correctly.
221 	 *
222 	 * For those that do, patch in the rdtsc instructions in
223 	 * various parts of the kernel right now while the text is
224 	 * still writable.
225 	 */
226 	if (x86_feature & X86_TSC)
227 		patch_tsc();
228 #endif
229 
230 	/*
231 	 * initialize t0
232 	 */
233 	t0.t_stk = (caddr_t)rp - MINFRAME;
234 	t0.t_stkbase = t0stack;
235 	t0.t_pri = maxclsyspri - 3;
236 	t0.t_schedflag = TS_LOAD | TS_DONT_SWAP;
237 	t0.t_procp = &p0;
238 	t0.t_plockp = &p0lock.pl_lock;
239 	t0.t_lwp = &lwp0;
240 	t0.t_forw = &t0;
241 	t0.t_back = &t0;
242 	t0.t_next = &t0;
243 	t0.t_prev = &t0;
244 	t0.t_cpu = cpu[0];
245 	t0.t_disp_queue = &cpu0_disp;
246 	t0.t_bind_cpu = PBIND_NONE;
247 	t0.t_bind_pset = PS_NONE;
248 	t0.t_cpupart = &cp_default;
249 	t0.t_clfuncs = &sys_classfuncs.thread;
250 	t0.t_copyops = NULL;
251 	THREAD_ONPROC(&t0, CPU);
252 
253 	lwp0.lwp_thread = &t0;
254 	lwp0.lwp_regs = (void *) rp;
255 	lwp0.lwp_procp = &p0;
256 	t0.t_tid = p0.p_lwpcnt = p0.p_lwprcnt = p0.p_lwpid = 1;
257 
258 	p0.p_exec = NULL;
259 	p0.p_stat = SRUN;
260 	p0.p_flag = SSYS;
261 	p0.p_tlist = &t0;
262 	p0.p_stksize = 2*PAGESIZE;
263 	p0.p_stkpageszc = 0;
264 	p0.p_as = &kas;
265 	p0.p_lockp = &p0lock;
266 	p0.p_brkpageszc = 0;
267 	sigorset(&p0.p_ignore, &ignoredefault);
268 
269 	CPU->cpu_thread = &t0;
270 	bzero(&cpu0_disp, sizeof (disp_t));
271 	CPU->cpu_disp = &cpu0_disp;
272 	CPU->cpu_disp->disp_cpu = CPU;
273 	CPU->cpu_dispthread = &t0;
274 	CPU->cpu_idle_thread = &t0;
275 	CPU->cpu_flags = CPU_READY | CPU_RUNNING | CPU_EXISTS | CPU_ENABLE;
276 	CPU->cpu_dispatch_pri = t0.t_pri;
277 
278 	CPU->cpu_mask = 1;
279 	CPU->cpu_id = 0;
280 
281 	CPU->cpu_tss = &ktss0;
282 
283 	CPU->cpu_pri = 12;		/* initial PIL for the boot CPU */
284 
285 	CPU->cpu_ldt = ldt0_default;	/* default LDT */
286 	CPU->cpu_gdt = gdt0;
287 
288 	/*
289 	 * This must be done _after_ init_tables(), called above, has set up
290 	 * ldt0_default_desc.
291 	 */
292 #if defined(__amd64)
293 	/*
294 	 * ldt0_default64 contains all invalid entries. We use that as p0's LDT
295 	 * because p0 should never have any reason to use the LDT. This will
296 	 * catch things early if such a scenario should ever occur.
297 	 */
298 	p0.p_ldt_desc = ldt0_default64_desc;
299 #else
300 	p0.p_ldt_desc = ldt0_default_desc;
301 #endif /* __amd64 */
302 
303 	/*
304 	 * Kernel IDT.
305 	 */
306 	CPU->cpu_idt = idt0;
307 
308 	/*
309 	 * Initialize thread/cpu microstate accounting here
310 	 */
311 	init_mstate(&t0, LMS_SYSTEM);
312 	init_cpu_mstate(CPU, CMS_SYSTEM);
313 
314 	/*
315 	 * Initialize lists of available and active CPUs.
316 	 */
317 	cpu_list_init(CPU);
318 
319 	cpu_vm_data_init(CPU);
320 
321 	/*
322 	 * Initialize the lgrp framework
323 	 */
324 	lgrp_init();
325 
326 	/*
327 	 * The lgroup code needs to at least know about a CPU's
328 	 * chip association, but it's too early to fully initialize
329 	 * cpu0_chip, since the device node for the boot CPU doesn't
330 	 * exist yet. Initialize enough of it to get by until formal
331 	 * initialization.
332 	 */
333 	CPU->cpu_rechoose = rechoose_interval;
334 	CPU->cpu_chip = &cpu0_chip;
335 
336 	rp->r_fp = 0;	/* terminate kernel stack traces! */
337 
338 	prom_init("kernel", (void *)NULL);
339 
340 	if (boothowto & RB_HALT) {
341 		prom_printf("unix: kernel halted by -h flag\n");
342 		prom_enter_mon();
343 	}
344 
345 	ASSERT_STACK_ALIGNED();
346 
347 	if (workaround_errata(CPU) != 0)
348 		panic("critical workaround(s) missing for boot cpu");
349 }
350