1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/disp.h> 31 #include <sys/promif.h> 32 #include <sys/clock.h> 33 #include <sys/cpuvar.h> 34 #include <sys/stack.h> 35 #include <vm/as.h> 36 #include <vm/hat.h> 37 #include <sys/reboot.h> 38 #include <sys/avintr.h> 39 #include <sys/vtrace.h> 40 #include <sys/proc.h> 41 #include <sys/thread.h> 42 #include <sys/cpupart.h> 43 #include <sys/pset.h> 44 #include <sys/copyops.h> 45 #include <sys/chip.h> 46 #include <sys/disp.h> 47 #include <sys/debug.h> 48 #include <sys/sunddi.h> 49 #include <sys/x86_archext.h> 50 #include <sys/privregs.h> 51 #include <sys/machsystm.h> 52 #include <sys/ontrap.h> 53 #include <sys/bootconf.h> 54 #include <sys/kdi.h> 55 #include <sys/archsystm.h> 56 #include <sys/promif.h> 57 #include <sys/bootconf.h> 58 #include <sys/kobj.h> 59 #include <sys/kobj_lex.h> 60 #include <sys/pci_cfgspace.h> 61 #if defined(__amd64) 62 #include <sys/bootsvcs.h> 63 64 /* 65 * XX64 This stuff deals with switching stacks in case a trapping 66 * thread wants to call back into boot -after- boot has lost track 67 * of the mappings but before the kernel owns the console. 68 * 69 * (A better way to hide this would be to add a 'this' pointer to 70 * every boot syscall so that vmx could get at the resulting save 71 * area.) 72 */ 73 74 struct boot_syscalls *_vmx_sysp; 75 static struct boot_syscalls __kbootsvcs; 76 extern struct boot_syscalls *sysp; 77 extern void _stack_safe_putchar(int c); 78 #endif 79 80 /* 81 * some globals for patching the result of cpuid 82 * to solve problems w/ creative cpu vendors 83 */ 84 85 extern uint32_t cpuid_feature_ecx_include; 86 extern uint32_t cpuid_feature_ecx_exclude; 87 extern uint32_t cpuid_feature_edx_include; 88 extern uint32_t cpuid_feature_edx_exclude; 89 90 /* 91 * Dummy spl priority masks 92 */ 93 static unsigned char dummy_cpu_pri[MAXIPL + 1] = { 94 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 95 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf 96 }; 97 98 /* 99 * External Routines: 100 */ 101 102 extern void init_tables(void); 103 104 105 static uint32_t 106 cpuid_getval(char *name) 107 { 108 char prop[32]; 109 u_longlong_t ll; 110 extern struct bootops *bootops; 111 if ((BOP_GETPROPLEN(bootops, name) > sizeof (prop)) || 112 (BOP_GETPROP(bootops, name, prop) < 0) || 113 (kobj_getvalue(prop, &ll) == -1)) 114 return (0); 115 return ((uint32_t)ll); 116 } 117 118 /* 119 * Setup routine called right before main(). Interposing this function 120 * before main() allows us to call it in a machine-independent fashion. 121 */ 122 void 123 mlsetup(struct regs *rp) 124 { 125 extern struct classfuncs sys_classfuncs; 126 extern struct chip cpu0_chip; 127 extern disp_t cpu0_disp; 128 extern char t0stack[]; 129 130 ASSERT_STACK_ALIGNED(); 131 132 #if defined(__amd64) 133 134 #if (BS_VERSION > 4) 135 /* 136 * When new boot_syscalls are added to the vector, this routine 137 * must be modified to copy them into the kernel's copy of the 138 * vector. 139 */ 140 #error mlsetup() must be updated for amd64 to support new boot_syscalls 141 #endif /* (BS_VERSION > 4) */ 142 143 /* 144 * XX64 This remaps vmx's putchar to use the kernel's version 145 * that switches stacks before diving into vmx 146 * See explanation/complaints in commentary above. 147 */ 148 _vmx_sysp = sysp; 149 sysp = &__kbootsvcs; 150 151 sysp->bsvc_getchar = _vmx_sysp->bsvc_getchar; 152 sysp->bsvc_putchar = _stack_safe_putchar; 153 sysp->bsvc_ischar = _vmx_sysp->bsvc_ischar; 154 #endif 155 /* 156 * initialize cpu_self 157 */ 158 cpu[0]->cpu_self = cpu[0]; 159 160 /* 161 * Set up dummy cpu_pri_data values till psm spl code is 162 * installed. This allows splx() to work on amd64. 163 */ 164 165 cpu[0]->cpu_pri_data = dummy_cpu_pri; 166 167 /* 168 * check if we've got special bits to clear or set 169 * when checking cpu features 170 */ 171 172 cpuid_feature_ecx_include = 173 cpuid_getval("cpuid_feature_ecx_include"); 174 cpuid_feature_ecx_exclude = 175 cpuid_getval("cpuid_feature_ecx_exclude"); 176 cpuid_feature_edx_include = 177 cpuid_getval("cpuid_feature_edx_include"); 178 cpuid_feature_edx_exclude = 179 cpuid_getval("cpuid_feature_edx_exclude"); 180 181 /* 182 * The first lightweight pass (pass0) through the cpuid data 183 * was done in locore before mlsetup was called. Do the next 184 * pass in C code. 185 * 186 * The x86_feature bits are set here on the basis of the capabilities 187 * of the boot CPU. Note that if we choose to support CPUs that have 188 * different feature sets (at which point we would almost certainly 189 * want to set the feature bits to correspond to the feature 190 * minimum) this value may be altered. 191 */ 192 193 x86_feature = cpuid_pass1(cpu[0]); 194 195 /* 196 * Initialize idt0, gdt0, ldt0_default, ktss0 and dftss. 197 */ 198 init_tables(); 199 200 #if defined(__amd64) 201 /*CSTYLED*/ 202 { 203 /* 204 * setup %gs for the kernel 205 */ 206 wrmsr(MSR_AMD_GSBASE, (uint64_t)&cpus[0]); 207 /* 208 * XX64 We should never dereference off "other gsbase" or 209 * "fsbase". So, we should arrange to point FSBASE and 210 * KGSBASE somewhere truly awful e.g. point it at the last 211 * valid address below the hole so that any attempts to index 212 * off them cause an exception. 213 * 214 * For now, point it at 8G -- at least it should be unmapped 215 * until some 64-bit processes run. 216 */ 217 wrmsr(MSR_AMD_FSBASE, 0x200000000UL); 218 wrmsr(MSR_AMD_KGSBASE, 0x200000000UL); 219 } 220 221 #elif defined(__i386) 222 /* 223 * enable large page support right here. 224 */ 225 if (x86_feature & X86_LARGEPAGE) { 226 cr4_value |= CR4_PSE; 227 if (x86_feature & X86_PGE) 228 cr4_value |= CR4_PGE; 229 setup_121_andcall(enable_big_page_support, cr4_value); 230 } 231 232 /* 233 * Some i386 processors do not implement the rdtsc instruction, 234 * or at least they do not implement it correctly. 235 * 236 * For those that do, patch in the rdtsc instructions in 237 * various parts of the kernel right now while the text is 238 * still writable. 239 */ 240 if (x86_feature & X86_TSC) 241 patch_tsc(); 242 #endif 243 244 /* 245 * initialize t0 246 */ 247 t0.t_stk = (caddr_t)rp - MINFRAME; 248 t0.t_stkbase = t0stack; 249 t0.t_pri = maxclsyspri - 3; 250 t0.t_schedflag = TS_LOAD | TS_DONT_SWAP; 251 t0.t_procp = &p0; 252 t0.t_plockp = &p0lock.pl_lock; 253 t0.t_lwp = &lwp0; 254 t0.t_forw = &t0; 255 t0.t_back = &t0; 256 t0.t_next = &t0; 257 t0.t_prev = &t0; 258 t0.t_cpu = cpu[0]; 259 t0.t_disp_queue = &cpu0_disp; 260 t0.t_bind_cpu = PBIND_NONE; 261 t0.t_bind_pset = PS_NONE; 262 t0.t_cpupart = &cp_default; 263 t0.t_clfuncs = &sys_classfuncs.thread; 264 t0.t_copyops = NULL; 265 THREAD_ONPROC(&t0, CPU); 266 267 lwp0.lwp_thread = &t0; 268 lwp0.lwp_regs = (void *) rp; 269 lwp0.lwp_procp = &p0; 270 t0.t_tid = p0.p_lwpcnt = p0.p_lwprcnt = p0.p_lwpid = 1; 271 272 p0.p_exec = NULL; 273 p0.p_stat = SRUN; 274 p0.p_flag = SSYS; 275 p0.p_tlist = &t0; 276 p0.p_stksize = 2*PAGESIZE; 277 p0.p_stkpageszc = 0; 278 p0.p_as = &kas; 279 p0.p_lockp = &p0lock; 280 p0.p_brkpageszc = 0; 281 sigorset(&p0.p_ignore, &ignoredefault); 282 283 CPU->cpu_thread = &t0; 284 bzero(&cpu0_disp, sizeof (disp_t)); 285 CPU->cpu_disp = &cpu0_disp; 286 CPU->cpu_disp->disp_cpu = CPU; 287 CPU->cpu_dispthread = &t0; 288 CPU->cpu_idle_thread = &t0; 289 CPU->cpu_flags = CPU_READY | CPU_RUNNING | CPU_EXISTS | CPU_ENABLE; 290 CPU->cpu_dispatch_pri = t0.t_pri; 291 292 CPU->cpu_mask = 1; 293 CPU->cpu_id = 0; 294 295 CPU->cpu_tss = &ktss0; 296 297 CPU->cpu_pri = 12; /* initial PIL for the boot CPU */ 298 299 CPU->cpu_gdt = gdt0; 300 301 /* 302 * The kernel doesn't use LDTs unless a process explicitly requests one. 303 */ 304 p0.p_ldt_desc = zero_sdesc; 305 306 /* 307 * Kernel IDT. 308 */ 309 CPU->cpu_idt = idt0; 310 311 /* 312 * Initialize thread/cpu microstate accounting here 313 */ 314 init_mstate(&t0, LMS_SYSTEM); 315 init_cpu_mstate(CPU, CMS_SYSTEM); 316 317 /* 318 * Initialize lists of available and active CPUs. 319 */ 320 cpu_list_init(CPU); 321 322 cpu_vm_data_init(CPU); 323 324 /* lgrp_init() needs PCI config space access */ 325 pci_cfgspace_init(); 326 327 /* 328 * Initialize the lgrp framework 329 */ 330 lgrp_init(); 331 332 /* 333 * The lgroup code needs to at least know about a CPU's 334 * chip association, but it's too early to fully initialize 335 * cpu0_chip, since the device node for the boot CPU doesn't 336 * exist yet. Initialize enough of it to get by until formal 337 * initialization. 338 */ 339 CPU->cpu_rechoose = rechoose_interval; 340 CPU->cpu_chip = &cpu0_chip; 341 342 rp->r_fp = 0; /* terminate kernel stack traces! */ 343 344 prom_init("kernel", (void *)NULL); 345 346 if (boothowto & RB_HALT) { 347 prom_printf("unix: kernel halted by -h flag\n"); 348 prom_enter_mon(); 349 } 350 351 ASSERT_STACK_ALIGNED(); 352 353 if (workaround_errata(CPU) != 0) 354 panic("critical workaround(s) missing for boot cpu"); 355 } 356