1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 2018 The FreeBSD Foundation
5 * Copyright (c) 1992 Terrence R. Lambert.
6 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * William Jolitz.
11 *
12 * Portions of this software were developed by A. Joseph Koshy under
13 * sponsorship from the FreeBSD Foundation and Google, Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 */
43
44 #include <sys/cdefs.h>
45 #include "opt_apic.h"
46 #include "opt_atpic.h"
47 #include "opt_cpu.h"
48 #include "opt_ddb.h"
49 #include "opt_inet.h"
50 #include "opt_isa.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_perfmon.h"
54 #include "opt_platform.h"
55
56 #include <sys/param.h>
57 #include <sys/proc.h>
58 #include <sys/systm.h>
59 #include <sys/bio.h>
60 #include <sys/buf.h>
61 #include <sys/bus.h>
62 #include <sys/callout.h>
63 #include <sys/cons.h>
64 #include <sys/cpu.h>
65 #include <sys/eventhandler.h>
66 #include <sys/exec.h>
67 #include <sys/imgact.h>
68 #include <sys/kdb.h>
69 #include <sys/kernel.h>
70 #include <sys/ktr.h>
71 #include <sys/linker.h>
72 #include <sys/lock.h>
73 #include <sys/malloc.h>
74 #include <sys/memrange.h>
75 #include <sys/msgbuf.h>
76 #include <sys/mutex.h>
77 #include <sys/pcpu.h>
78 #include <sys/ptrace.h>
79 #include <sys/reboot.h>
80 #include <sys/reg.h>
81 #include <sys/rwlock.h>
82 #include <sys/sched.h>
83 #include <sys/signalvar.h>
84 #include <sys/smp.h>
85 #include <sys/syscallsubr.h>
86 #include <sys/sysctl.h>
87 #include <sys/sysent.h>
88 #include <sys/sysproto.h>
89 #include <sys/ucontext.h>
90 #include <sys/vmmeter.h>
91
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_phys.h>
101 #include <vm/vm_dumpset.h>
102
103 #ifdef DDB
104 #ifndef KDB
105 #error KDB must be enabled in order for DDB to work!
106 #endif
107 #include <ddb/ddb.h>
108 #include <ddb/db_sym.h>
109 #endif
110
111 #include <isa/rtc.h>
112
113 #include <net/netisr.h>
114
115 #include <dev/smbios/smbios.h>
116
117 #include <machine/bootinfo.h>
118 #include <machine/clock.h>
119 #include <machine/cpu.h>
120 #include <machine/cputypes.h>
121 #include <machine/intr_machdep.h>
122 #include <x86/mca.h>
123 #include <machine/md_var.h>
124 #include <machine/metadata.h>
125 #include <machine/pc/bios.h>
126 #include <machine/pcb.h>
127 #include <machine/pcb_ext.h>
128 #include <machine/proc.h>
129 #include <machine/sigframe.h>
130 #include <machine/specialreg.h>
131 #include <machine/sysarch.h>
132 #include <machine/trap.h>
133 #include <x86/ucode.h>
134 #include <machine/vm86.h>
135 #include <x86/init.h>
136 #ifdef PERFMON
137 #include <machine/perfmon.h>
138 #endif
139 #ifdef SMP
140 #include <machine/smp.h>
141 #endif
142 #ifdef FDT
143 #include <x86/fdt.h>
144 #endif
145
146 #ifdef DEV_APIC
147 #include <x86/apicvar.h>
148 #endif
149
150 #ifdef DEV_ISA
151 #include <x86/isa/icu.h>
152 #endif
153
154 /* Sanity check for __curthread() */
155 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
156
157 register_t init386(int first);
158 void dblfault_handler(void);
159 void identify_cpu(void);
160
161 static void cpu_startup(void *);
162 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
163
164 /* Intel ICH registers */
165 #define ICH_PMBASE 0x400
166 #define ICH_SMI_EN ICH_PMBASE + 0x30
167
168 int _udatasel, _ucodesel;
169 u_int basemem;
170 static int above4g_allow = 1;
171 static int above24g_allow = 0;
172
173 int cold = 1;
174
175 long Maxmem = 0;
176 long realmem = 0;
177 int late_console = 1;
178
179 #ifdef PAE
180 FEATURE(pae, "Physical Address Extensions");
181 #endif
182
183 struct kva_md_info kmi;
184
185 static struct trapframe proc0_tf;
186 struct pcpu __pcpu[MAXCPU];
187
188 static void i386_clock_source_init(void);
189
190 struct mtx icu_lock;
191
192 struct mem_range_softc mem_range_softc;
193
194 extern char start_exceptions[], end_exceptions[];
195
196 extern struct sysentvec elf32_freebsd_sysvec;
197
198 /* Default init_ops implementation. */
199 struct init_ops init_ops = {
200 .early_clock_source_init = i386_clock_source_init,
201 .early_delay = i8254_delay,
202 };
203
204 static void
i386_clock_source_init(void)205 i386_clock_source_init(void)
206 {
207 i8254_init();
208 }
209
210 static void
cpu_startup(void * dummy)211 cpu_startup(void *dummy)
212 {
213 uintmax_t memsize;
214 char *sysenv;
215
216 /*
217 * On MacBooks, we need to disallow the legacy USB circuit to
218 * generate an SMI# because this can cause several problems,
219 * namely: incorrect CPU frequency detection and failure to
220 * start the APs.
221 * We do this by disabling a bit in the SMI_EN (SMI Control and
222 * Enable register) of the Intel ICH LPC Interface Bridge.
223 */
224 sysenv = kern_getenv("smbios.system.product");
225 if (sysenv != NULL) {
226 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
227 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
228 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
229 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
230 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
231 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
232 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
233 strncmp(sysenv, "Macmini1,1", 10) == 0) {
234 if (bootverbose)
235 printf("Disabling LEGACY_USB_EN bit on "
236 "Intel ICH.\n");
237 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
238 }
239 freeenv(sysenv);
240 }
241
242 /*
243 * Good {morning,afternoon,evening,night}.
244 */
245 startrtclock();
246 printcpuinfo();
247 panicifcpuunsupported();
248 #ifdef PERFMON
249 perfmon_init();
250 #endif
251
252 /*
253 * Display physical memory if SMBIOS reports reasonable amount.
254 */
255 memsize = 0;
256 sysenv = kern_getenv("smbios.memory.enabled");
257 if (sysenv != NULL) {
258 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
259 freeenv(sysenv);
260 }
261 if (memsize < ptoa((uintmax_t)vm_free_count()))
262 memsize = ptoa((uintmax_t)Maxmem);
263 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
264 realmem = atop(memsize);
265
266 /*
267 * Display any holes after the first chunk of extended memory.
268 */
269 if (bootverbose) {
270 int indx;
271
272 printf("Physical memory chunk(s):\n");
273 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
274 vm_paddr_t size;
275
276 size = phys_avail[indx + 1] - phys_avail[indx];
277 printf(
278 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
279 (uintmax_t)phys_avail[indx],
280 (uintmax_t)phys_avail[indx + 1] - 1,
281 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
282 }
283 }
284
285 vm_ksubmap_init(&kmi);
286
287 printf("avail memory = %ju (%ju MB)\n",
288 ptoa((uintmax_t)vm_free_count()),
289 ptoa((uintmax_t)vm_free_count()) / 1048576);
290
291 /*
292 * Set up buffers, so they can be used to read disk labels.
293 */
294 bufinit();
295 vm_pager_bufferinit();
296 cpu_setregs();
297 }
298
299 void
cpu_setregs(void)300 cpu_setregs(void)
301 {
302 unsigned int cr0;
303
304 cr0 = rcr0();
305
306 /*
307 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
308 *
309 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
310 * instructions. We must set the CR0_MP bit and use the CR0_TS
311 * bit to control the trap, because setting the CR0_EM bit does
312 * not cause WAIT instructions to trap. It's important to trap
313 * WAIT instructions - otherwise the "wait" variants of no-wait
314 * control instructions would degenerate to the "no-wait" variants
315 * after FP context switches but work correctly otherwise. It's
316 * particularly important to trap WAITs when there is no NPX -
317 * otherwise the "wait" variants would always degenerate.
318 *
319 * Try setting CR0_NE to get correct error reporting on 486DX's.
320 * Setting it should fail or do nothing on lesser processors.
321 */
322 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
323 load_cr0(cr0);
324 load_gs(_udatasel);
325 }
326
327 u_long bootdev; /* not a struct cdev *- encoding is different */
328 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
329 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
330
331 /*
332 * Initialize 386 and configure to run kernel
333 */
334
335 /*
336 * Initialize segments & interrupt table
337 */
338
339 int _default_ldt;
340
341 struct mtx dt_lock; /* lock for GDT and LDT */
342
343 union descriptor gdt0[NGDT]; /* initial global descriptor table */
344 union descriptor *gdt = gdt0; /* global descriptor table */
345
346 union descriptor *ldt; /* local descriptor table */
347
348 static struct gate_descriptor idt0[NIDT];
349 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
350
351 static struct i386tss *dblfault_tss;
352 static char *dblfault_stack;
353
354 static struct i386tss common_tss0;
355
356 vm_offset_t proc0kstack;
357
358 /*
359 * software prototypes -- in more palatable form.
360 *
361 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
362 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
363 */
364 struct soft_segment_descriptor gdt_segs[] = {
365 /* GNULL_SEL 0 Null Descriptor */
366 { .ssd_base = 0x0,
367 .ssd_limit = 0x0,
368 .ssd_type = 0,
369 .ssd_dpl = SEL_KPL,
370 .ssd_p = 0,
371 .ssd_xx = 0, .ssd_xx1 = 0,
372 .ssd_def32 = 0,
373 .ssd_gran = 0 },
374 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
375 { .ssd_base = 0x0,
376 .ssd_limit = 0xfffff,
377 .ssd_type = SDT_MEMRWA,
378 .ssd_dpl = SEL_KPL,
379 .ssd_p = 1,
380 .ssd_xx = 0, .ssd_xx1 = 0,
381 .ssd_def32 = 1,
382 .ssd_gran = 1 },
383 /* GUFS_SEL 2 %fs Descriptor for user */
384 { .ssd_base = 0x0,
385 .ssd_limit = 0xfffff,
386 .ssd_type = SDT_MEMRWA,
387 .ssd_dpl = SEL_UPL,
388 .ssd_p = 1,
389 .ssd_xx = 0, .ssd_xx1 = 0,
390 .ssd_def32 = 1,
391 .ssd_gran = 1 },
392 /* GUGS_SEL 3 %gs Descriptor for user */
393 { .ssd_base = 0x0,
394 .ssd_limit = 0xfffff,
395 .ssd_type = SDT_MEMRWA,
396 .ssd_dpl = SEL_UPL,
397 .ssd_p = 1,
398 .ssd_xx = 0, .ssd_xx1 = 0,
399 .ssd_def32 = 1,
400 .ssd_gran = 1 },
401 /* GCODE_SEL 4 Code Descriptor for kernel */
402 { .ssd_base = 0x0,
403 .ssd_limit = 0xfffff,
404 .ssd_type = SDT_MEMERA,
405 .ssd_dpl = SEL_KPL,
406 .ssd_p = 1,
407 .ssd_xx = 0, .ssd_xx1 = 0,
408 .ssd_def32 = 1,
409 .ssd_gran = 1 },
410 /* GDATA_SEL 5 Data Descriptor for kernel */
411 { .ssd_base = 0x0,
412 .ssd_limit = 0xfffff,
413 .ssd_type = SDT_MEMRWA,
414 .ssd_dpl = SEL_KPL,
415 .ssd_p = 1,
416 .ssd_xx = 0, .ssd_xx1 = 0,
417 .ssd_def32 = 1,
418 .ssd_gran = 1 },
419 /* GUCODE_SEL 6 Code Descriptor for user */
420 { .ssd_base = 0x0,
421 .ssd_limit = 0xfffff,
422 .ssd_type = SDT_MEMERA,
423 .ssd_dpl = SEL_UPL,
424 .ssd_p = 1,
425 .ssd_xx = 0, .ssd_xx1 = 0,
426 .ssd_def32 = 1,
427 .ssd_gran = 1 },
428 /* GUDATA_SEL 7 Data Descriptor for user */
429 { .ssd_base = 0x0,
430 .ssd_limit = 0xfffff,
431 .ssd_type = SDT_MEMRWA,
432 .ssd_dpl = SEL_UPL,
433 .ssd_p = 1,
434 .ssd_xx = 0, .ssd_xx1 = 0,
435 .ssd_def32 = 1,
436 .ssd_gran = 1 },
437 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
438 { .ssd_base = 0x400,
439 .ssd_limit = 0xfffff,
440 .ssd_type = SDT_MEMRWA,
441 .ssd_dpl = SEL_KPL,
442 .ssd_p = 1,
443 .ssd_xx = 0, .ssd_xx1 = 0,
444 .ssd_def32 = 1,
445 .ssd_gran = 1 },
446 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
447 {
448 .ssd_base = 0x0,
449 .ssd_limit = sizeof(struct i386tss)-1,
450 .ssd_type = SDT_SYS386TSS,
451 .ssd_dpl = 0,
452 .ssd_p = 1,
453 .ssd_xx = 0, .ssd_xx1 = 0,
454 .ssd_def32 = 0,
455 .ssd_gran = 0 },
456 /* GLDT_SEL 10 LDT Descriptor */
457 { .ssd_base = 0,
458 .ssd_limit = sizeof(union descriptor) * NLDT - 1,
459 .ssd_type = SDT_SYSLDT,
460 .ssd_dpl = SEL_UPL,
461 .ssd_p = 1,
462 .ssd_xx = 0, .ssd_xx1 = 0,
463 .ssd_def32 = 0,
464 .ssd_gran = 0 },
465 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
466 { .ssd_base = 0,
467 .ssd_limit = (512 * sizeof(union descriptor)-1),
468 .ssd_type = SDT_SYSLDT,
469 .ssd_dpl = 0,
470 .ssd_p = 1,
471 .ssd_xx = 0, .ssd_xx1 = 0,
472 .ssd_def32 = 0,
473 .ssd_gran = 0 },
474 /* GPANIC_SEL 12 Panic Tss Descriptor */
475 { .ssd_base = 0,
476 .ssd_limit = sizeof(struct i386tss)-1,
477 .ssd_type = SDT_SYS386TSS,
478 .ssd_dpl = 0,
479 .ssd_p = 1,
480 .ssd_xx = 0, .ssd_xx1 = 0,
481 .ssd_def32 = 0,
482 .ssd_gran = 0 },
483 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
484 { .ssd_base = 0,
485 .ssd_limit = 0xfffff,
486 .ssd_type = SDT_MEMERA,
487 .ssd_dpl = 0,
488 .ssd_p = 1,
489 .ssd_xx = 0, .ssd_xx1 = 0,
490 .ssd_def32 = 0,
491 .ssd_gran = 1 },
492 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
493 { .ssd_base = 0,
494 .ssd_limit = 0xfffff,
495 .ssd_type = SDT_MEMERA,
496 .ssd_dpl = 0,
497 .ssd_p = 1,
498 .ssd_xx = 0, .ssd_xx1 = 0,
499 .ssd_def32 = 0,
500 .ssd_gran = 1 },
501 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
502 { .ssd_base = 0,
503 .ssd_limit = 0xfffff,
504 .ssd_type = SDT_MEMRWA,
505 .ssd_dpl = 0,
506 .ssd_p = 1,
507 .ssd_xx = 0, .ssd_xx1 = 0,
508 .ssd_def32 = 1,
509 .ssd_gran = 1 },
510 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
511 { .ssd_base = 0,
512 .ssd_limit = 0xfffff,
513 .ssd_type = SDT_MEMRWA,
514 .ssd_dpl = 0,
515 .ssd_p = 1,
516 .ssd_xx = 0, .ssd_xx1 = 0,
517 .ssd_def32 = 0,
518 .ssd_gran = 1 },
519 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
520 { .ssd_base = 0,
521 .ssd_limit = 0xfffff,
522 .ssd_type = SDT_MEMRWA,
523 .ssd_dpl = 0,
524 .ssd_p = 1,
525 .ssd_xx = 0, .ssd_xx1 = 0,
526 .ssd_def32 = 0,
527 .ssd_gran = 1 },
528 /* GNDIS_SEL 18 NDIS Descriptor */
529 { .ssd_base = 0x0,
530 .ssd_limit = 0x0,
531 .ssd_type = 0,
532 .ssd_dpl = 0,
533 .ssd_p = 0,
534 .ssd_xx = 0, .ssd_xx1 = 0,
535 .ssd_def32 = 0,
536 .ssd_gran = 0 },
537 };
538
539 static struct soft_segment_descriptor ldt_segs[] = {
540 /* Null Descriptor - overwritten by call gate */
541 { .ssd_base = 0x0,
542 .ssd_limit = 0x0,
543 .ssd_type = 0,
544 .ssd_dpl = 0,
545 .ssd_p = 0,
546 .ssd_xx = 0, .ssd_xx1 = 0,
547 .ssd_def32 = 0,
548 .ssd_gran = 0 },
549 /* Null Descriptor - overwritten by call gate */
550 { .ssd_base = 0x0,
551 .ssd_limit = 0x0,
552 .ssd_type = 0,
553 .ssd_dpl = 0,
554 .ssd_p = 0,
555 .ssd_xx = 0, .ssd_xx1 = 0,
556 .ssd_def32 = 0,
557 .ssd_gran = 0 },
558 /* Null Descriptor - overwritten by call gate */
559 { .ssd_base = 0x0,
560 .ssd_limit = 0x0,
561 .ssd_type = 0,
562 .ssd_dpl = 0,
563 .ssd_p = 0,
564 .ssd_xx = 0, .ssd_xx1 = 0,
565 .ssd_def32 = 0,
566 .ssd_gran = 0 },
567 /* Code Descriptor for user */
568 { .ssd_base = 0x0,
569 .ssd_limit = 0xfffff,
570 .ssd_type = SDT_MEMERA,
571 .ssd_dpl = SEL_UPL,
572 .ssd_p = 1,
573 .ssd_xx = 0, .ssd_xx1 = 0,
574 .ssd_def32 = 1,
575 .ssd_gran = 1 },
576 /* Null Descriptor - overwritten by call gate */
577 { .ssd_base = 0x0,
578 .ssd_limit = 0x0,
579 .ssd_type = 0,
580 .ssd_dpl = 0,
581 .ssd_p = 0,
582 .ssd_xx = 0, .ssd_xx1 = 0,
583 .ssd_def32 = 0,
584 .ssd_gran = 0 },
585 /* Data Descriptor for user */
586 { .ssd_base = 0x0,
587 .ssd_limit = 0xfffff,
588 .ssd_type = SDT_MEMRWA,
589 .ssd_dpl = SEL_UPL,
590 .ssd_p = 1,
591 .ssd_xx = 0, .ssd_xx1 = 0,
592 .ssd_def32 = 1,
593 .ssd_gran = 1 },
594 };
595
596 size_t setidt_disp;
597
598 void
setidt(int idx,inthand_t * func,int typ,int dpl,int selec)599 setidt(int idx, inthand_t *func, int typ, int dpl, int selec)
600 {
601 uintptr_t off;
602
603 off = func != NULL ? (uintptr_t)func + setidt_disp : 0;
604 setidt_nodisp(idx, off, typ, dpl, selec);
605 }
606
607 void
setidt_nodisp(int idx,uintptr_t off,int typ,int dpl,int selec)608 setidt_nodisp(int idx, uintptr_t off, int typ, int dpl, int selec)
609 {
610 struct gate_descriptor *ip;
611
612 ip = idt + idx;
613 ip->gd_looffset = off;
614 ip->gd_selector = selec;
615 ip->gd_stkcpy = 0;
616 ip->gd_xx = 0;
617 ip->gd_type = typ;
618 ip->gd_dpl = dpl;
619 ip->gd_p = 1;
620 ip->gd_hioffset = ((u_int)off) >> 16 ;
621 }
622
623 extern inthand_t
624 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
625 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
626 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
627 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
628 IDTVEC(xmm),
629 #ifdef KDTRACE_HOOKS
630 IDTVEC(dtrace_ret),
631 #endif
632 #ifdef XENHVM
633 IDTVEC(xen_intr_upcall),
634 #endif
635 IDTVEC(int0x80_syscall);
636
637 #ifdef DDB
638 /*
639 * Display the index and function name of any IDT entries that don't use
640 * the default 'rsvd' entry point.
641 */
DB_SHOW_COMMAND_FLAGS(idt,db_show_idt,DB_CMD_MEMSAFE)642 DB_SHOW_COMMAND_FLAGS(idt, db_show_idt, DB_CMD_MEMSAFE)
643 {
644 struct gate_descriptor *ip;
645 int idx;
646 uintptr_t func, func_trm;
647 bool trm;
648
649 ip = idt;
650 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
651 if (ip->gd_type == SDT_SYSTASKGT) {
652 db_printf("%3d\t<TASK>\n", idx);
653 } else {
654 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
655 if (func >= PMAP_TRM_MIN_ADDRESS) {
656 func_trm = func;
657 func -= setidt_disp;
658 trm = true;
659 } else
660 trm = false;
661 if (func != (uintptr_t)&IDTVEC(rsvd)) {
662 db_printf("%3d\t", idx);
663 db_printsym(func, DB_STGY_PROC);
664 if (trm)
665 db_printf(" (trampoline %#x)",
666 func_trm);
667 db_printf("\n");
668 }
669 }
670 ip++;
671 }
672 }
673
674 /* Show privileged registers. */
DB_SHOW_COMMAND_FLAGS(sysregs,db_show_sysregs,DB_CMD_MEMSAFE)675 DB_SHOW_COMMAND_FLAGS(sysregs, db_show_sysregs, DB_CMD_MEMSAFE)
676 {
677 uint64_t idtr, gdtr;
678
679 idtr = ridt();
680 db_printf("idtr\t0x%08x/%04x\n",
681 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
682 gdtr = rgdt();
683 db_printf("gdtr\t0x%08x/%04x\n",
684 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
685 db_printf("ldtr\t0x%04x\n", rldt());
686 db_printf("tr\t0x%04x\n", rtr());
687 db_printf("cr0\t0x%08x\n", rcr0());
688 db_printf("cr2\t0x%08x\n", rcr2());
689 db_printf("cr3\t0x%08x\n", rcr3());
690 db_printf("cr4\t0x%08x\n", rcr4());
691 if (rcr4() & CR4_XSAVE)
692 db_printf("xcr0\t0x%016llx\n", rxcr(0));
693 if (amd_feature & (AMDID_NX | AMDID_LM))
694 db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER));
695 if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
696 db_printf("FEATURES_CTL\t0x%016llx\n",
697 rdmsr(MSR_IA32_FEATURE_CONTROL));
698 if (((cpu_vendor_id == CPU_VENDOR_INTEL ||
699 cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6) ||
700 cpu_vendor_id == CPU_VENDOR_HYGON)
701 db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR));
702 if (cpu_feature & CPUID_PAT)
703 db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT));
704 }
705
DB_SHOW_COMMAND_FLAGS(dbregs,db_show_dbregs,DB_CMD_MEMSAFE)706 DB_SHOW_COMMAND_FLAGS(dbregs, db_show_dbregs, DB_CMD_MEMSAFE)
707 {
708
709 db_printf("dr0\t0x%08x\n", rdr0());
710 db_printf("dr1\t0x%08x\n", rdr1());
711 db_printf("dr2\t0x%08x\n", rdr2());
712 db_printf("dr3\t0x%08x\n", rdr3());
713 db_printf("dr6\t0x%08x\n", rdr6());
714 db_printf("dr7\t0x%08x\n", rdr7());
715 }
716
DB_SHOW_COMMAND(frame,db_show_frame)717 DB_SHOW_COMMAND(frame, db_show_frame)
718 {
719 struct trapframe *frame;
720
721 frame = have_addr ? (struct trapframe *)addr : curthread->td_frame;
722 printf("ss %#x esp %#x efl %#x cs %#x eip %#x\n",
723 frame->tf_ss, frame->tf_esp, frame->tf_eflags, frame->tf_cs,
724 frame->tf_eip);
725 printf("err %#x trapno %d\n", frame->tf_err, frame->tf_trapno);
726 printf("ds %#x es %#x fs %#x\n",
727 frame->tf_ds, frame->tf_es, frame->tf_fs);
728 printf("eax %#x ecx %#x edx %#x ebx %#x\n",
729 frame->tf_eax, frame->tf_ecx, frame->tf_edx, frame->tf_ebx);
730 printf("ebp %#x esi %#x edi %#x\n",
731 frame->tf_ebp, frame->tf_esi, frame->tf_edi);
732
733 }
734 #endif
735
736 void
sdtossd(struct segment_descriptor * sd,struct soft_segment_descriptor * ssd)737 sdtossd(struct segment_descriptor *sd, struct soft_segment_descriptor *ssd)
738 {
739 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
740 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
741 ssd->ssd_type = sd->sd_type;
742 ssd->ssd_dpl = sd->sd_dpl;
743 ssd->ssd_p = sd->sd_p;
744 ssd->ssd_def32 = sd->sd_def32;
745 ssd->ssd_gran = sd->sd_gran;
746 }
747
748 static int
add_physmap_entry(uint64_t base,uint64_t length,vm_paddr_t * physmap,int * physmap_idxp)749 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
750 int *physmap_idxp)
751 {
752 uint64_t lim, ign;
753 int i, insert_idx, physmap_idx;
754
755 physmap_idx = *physmap_idxp;
756
757 if (length == 0)
758 return (1);
759
760 lim = 0x100000000; /* 4G */
761 if (pae_mode && above4g_allow)
762 lim = above24g_allow ? -1ULL : 0x600000000; /* 24G */
763 if (base >= lim) {
764 printf("%uK of memory above %uGB ignored, pae %d "
765 "above4g_allow %d above24g_allow %d\n",
766 (u_int)(length / 1024), (u_int)(lim >> 30), pae_mode,
767 above4g_allow, above24g_allow);
768 return (1);
769 }
770 if (base + length >= lim) {
771 ign = base + length - lim;
772 length -= ign;
773 printf("%uK of memory above %uGB ignored, pae %d "
774 "above4g_allow %d above24g_allow %d\n",
775 (u_int)(ign / 1024), (u_int)(lim >> 30), pae_mode,
776 above4g_allow, above24g_allow);
777 }
778
779 /*
780 * Find insertion point while checking for overlap. Start off by
781 * assuming the new entry will be added to the end.
782 */
783 insert_idx = physmap_idx + 2;
784 for (i = 0; i <= physmap_idx; i += 2) {
785 if (base < physmap[i + 1]) {
786 if (base + length <= physmap[i]) {
787 insert_idx = i;
788 break;
789 }
790 if (boothowto & RB_VERBOSE)
791 printf(
792 "Overlapping memory regions, ignoring second region\n");
793 return (1);
794 }
795 }
796
797 /* See if we can prepend to the next entry. */
798 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
799 physmap[insert_idx] = base;
800 return (1);
801 }
802
803 /* See if we can append to the previous entry. */
804 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
805 physmap[insert_idx - 1] += length;
806 return (1);
807 }
808
809 physmap_idx += 2;
810 *physmap_idxp = physmap_idx;
811 if (physmap_idx == PHYS_AVAIL_ENTRIES) {
812 printf(
813 "Too many segments in the physical address map, giving up\n");
814 return (0);
815 }
816
817 /*
818 * Move the last 'N' entries down to make room for the new
819 * entry if needed.
820 */
821 for (i = physmap_idx; i > insert_idx; i -= 2) {
822 physmap[i] = physmap[i - 2];
823 physmap[i + 1] = physmap[i - 1];
824 }
825
826 /* Insert the new entry. */
827 physmap[insert_idx] = base;
828 physmap[insert_idx + 1] = base + length;
829 return (1);
830 }
831
832 static int
add_smap_entry(struct bios_smap * smap,vm_paddr_t * physmap,int * physmap_idxp)833 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
834 {
835 if (boothowto & RB_VERBOSE)
836 printf("SMAP type=%02x base=%016llx len=%016llx\n",
837 smap->type, smap->base, smap->length);
838
839 if (smap->type != SMAP_TYPE_MEMORY)
840 return (1);
841
842 return (add_physmap_entry(smap->base, smap->length, physmap,
843 physmap_idxp));
844 }
845
846 static void
add_smap_entries(struct bios_smap * smapbase,vm_paddr_t * physmap,int * physmap_idxp)847 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
848 int *physmap_idxp)
849 {
850 struct bios_smap *smap, *smapend;
851 u_int32_t smapsize;
852 /*
853 * Memory map from INT 15:E820.
854 *
855 * subr_module.c says:
856 * "Consumer may safely assume that size value precedes data."
857 * ie: an int32_t immediately precedes SMAP.
858 */
859 smapsize = *((u_int32_t *)smapbase - 1);
860 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
861
862 for (smap = smapbase; smap < smapend; smap++)
863 if (!add_smap_entry(smap, physmap, physmap_idxp))
864 break;
865 }
866
867 static void
basemem_setup(void)868 basemem_setup(void)
869 {
870
871 if (basemem > 640) {
872 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
873 basemem);
874 basemem = 640;
875 }
876
877 pmap_basemem_setup(basemem);
878 }
879
880 /*
881 * Populate the (physmap) array with base/bound pairs describing the
882 * available physical memory in the system, then test this memory and
883 * build the phys_avail array describing the actually-available memory.
884 *
885 * If we cannot accurately determine the physical memory map, then use
886 * value from the 0xE801 call, and failing that, the RTC.
887 *
888 * Total memory size may be set by the kernel environment variable
889 * hw.physmem or the compile-time define MAXMEM.
890 *
891 * XXX first should be vm_paddr_t.
892 */
893 static void
getmemsize(int first)894 getmemsize(int first)
895 {
896 int has_smap, off, physmap_idx, pa_indx, da_indx;
897 u_long memtest;
898 vm_paddr_t physmap[PHYS_AVAIL_ENTRIES];
899 quad_t dcons_addr, dcons_size, physmem_tunable;
900 int hasbrokenint12, i, res __diagused;
901 u_int extmem;
902 struct vm86frame vmf;
903 struct vm86context vmc;
904 vm_paddr_t pa;
905 struct bios_smap *smap, *smapbase;
906
907 has_smap = 0;
908 bzero(&vmf, sizeof(vmf));
909 bzero(physmap, sizeof(physmap));
910 basemem = 0;
911
912 /*
913 * Tell the physical memory allocator about pages used to store
914 * the kernel and preloaded data. See kmem_bootstrap_free().
915 */
916 vm_phys_early_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first));
917
918 TUNABLE_INT_FETCH("hw.above4g_allow", &above4g_allow);
919 TUNABLE_INT_FETCH("hw.above24g_allow", &above24g_allow);
920
921 /*
922 * Check if the loader supplied an SMAP memory map. If so,
923 * use that and do not make any VM86 calls.
924 */
925 physmap_idx = 0;
926 smapbase = (struct bios_smap *)preload_search_info(preload_kmdp,
927 MODINFO_METADATA | MODINFOMD_SMAP);
928 if (smapbase != NULL) {
929 add_smap_entries(smapbase, physmap, &physmap_idx);
930 has_smap = 1;
931 goto have_smap;
932 }
933
934 /*
935 * Some newer BIOSes have a broken INT 12H implementation
936 * which causes a kernel panic immediately. In this case, we
937 * need use the SMAP to determine the base memory size.
938 */
939 hasbrokenint12 = 0;
940 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
941 if (hasbrokenint12 == 0) {
942 /* Use INT12 to determine base memory size. */
943 vm86_intcall(0x12, &vmf);
944 basemem = vmf.vmf_ax;
945 basemem_setup();
946 }
947
948 /*
949 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
950 * the kernel page table so we can use it as a buffer. The
951 * kernel will unmap this page later.
952 */
953 vmc.npages = 0;
954 smap = (void *)vm86_addpage(&vmc, 1, PMAP_MAP_LOW + ptoa(1));
955 res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
956 KASSERT(res != 0, ("vm86_getptr() failed: address not found"));
957
958 vmf.vmf_ebx = 0;
959 do {
960 vmf.vmf_eax = 0xE820;
961 vmf.vmf_edx = SMAP_SIG;
962 vmf.vmf_ecx = sizeof(struct bios_smap);
963 i = vm86_datacall(0x15, &vmf, &vmc);
964 if (i || vmf.vmf_eax != SMAP_SIG)
965 break;
966 has_smap = 1;
967 if (!add_smap_entry(smap, physmap, &physmap_idx))
968 break;
969 } while (vmf.vmf_ebx != 0);
970
971 have_smap:
972 /*
973 * If we didn't fetch the "base memory" size from INT12,
974 * figure it out from the SMAP (or just guess).
975 */
976 if (basemem == 0) {
977 for (i = 0; i <= physmap_idx; i += 2) {
978 if (physmap[i] == 0x00000000) {
979 basemem = physmap[i + 1] / 1024;
980 break;
981 }
982 }
983
984 /* XXX: If we couldn't find basemem from SMAP, just guess. */
985 if (basemem == 0)
986 basemem = 640;
987 basemem_setup();
988 }
989
990 if (physmap[1] != 0)
991 goto physmap_done;
992
993 /*
994 * If we failed to find an SMAP, figure out the extended
995 * memory size. We will then build a simple memory map with
996 * two segments, one for "base memory" and the second for
997 * "extended memory". Note that "extended memory" starts at a
998 * physical address of 1MB and that both basemem and extmem
999 * are in units of 1KB.
1000 *
1001 * First, try to fetch the extended memory size via INT 15:E801.
1002 */
1003 vmf.vmf_ax = 0xE801;
1004 if (vm86_intcall(0x15, &vmf) == 0) {
1005 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1006 } else {
1007 /*
1008 * If INT15:E801 fails, this is our last ditch effort
1009 * to determine the extended memory size. Currently
1010 * we prefer the RTC value over INT15:88.
1011 */
1012 #if 0
1013 vmf.vmf_ah = 0x88;
1014 vm86_intcall(0x15, &vmf);
1015 extmem = vmf.vmf_ax;
1016 #else
1017 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1018 #endif
1019 }
1020
1021 /*
1022 * Special hack for chipsets that still remap the 384k hole when
1023 * there's 16MB of memory - this really confuses people that
1024 * are trying to use bus mastering ISA controllers with the
1025 * "16MB limit"; they only have 16MB, but the remapping puts
1026 * them beyond the limit.
1027 *
1028 * If extended memory is between 15-16MB (16-17MB phys address range),
1029 * chop it to 15MB.
1030 */
1031 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1032 extmem = 15 * 1024;
1033
1034 physmap[0] = 0;
1035 physmap[1] = basemem * 1024;
1036 physmap_idx = 2;
1037 physmap[physmap_idx] = 0x100000;
1038 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1039
1040 physmap_done:
1041 /*
1042 * Now, physmap contains a map of physical memory.
1043 */
1044
1045 #ifdef SMP
1046 /* make hole for AP bootstrap code */
1047 alloc_ap_trampoline(physmap, &physmap_idx);
1048 #endif
1049
1050 /*
1051 * Maxmem isn't the "maximum memory", it's one larger than the
1052 * highest page of the physical address space. It should be
1053 * called something like "Maxphyspage". We may adjust this
1054 * based on ``hw.physmem'' and the results of the memory test.
1055 *
1056 * This is especially confusing when it is much larger than the
1057 * memory size and is displayed as "realmem".
1058 */
1059 Maxmem = atop(physmap[physmap_idx + 1]);
1060
1061 #ifdef MAXMEM
1062 Maxmem = MAXMEM / 4;
1063 #endif
1064
1065 if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable))
1066 Maxmem = atop(physmem_tunable);
1067
1068 /*
1069 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
1070 * the amount of memory in the system.
1071 */
1072 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
1073 Maxmem = atop(physmap[physmap_idx + 1]);
1074
1075 /*
1076 * The boot memory test is disabled by default, as it takes a
1077 * significant amount of time on large-memory systems, and is
1078 * unfriendly to virtual machines as it unnecessarily touches all
1079 * pages.
1080 *
1081 * A general name is used as the code may be extended to support
1082 * additional tests beyond the current "page present" test.
1083 */
1084 memtest = 0;
1085 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1086
1087 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1088 (boothowto & RB_VERBOSE))
1089 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1090
1091 /*
1092 * If Maxmem has been increased beyond what the system has detected,
1093 * extend the last memory segment to the new limit.
1094 */
1095 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1096 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1097
1098 /* call pmap initialization to make new kernel address space */
1099 pmap_bootstrap(first);
1100
1101 /*
1102 * Size up each available chunk of physical memory.
1103 */
1104 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1105 pa_indx = 0;
1106 da_indx = 1;
1107 phys_avail[pa_indx++] = physmap[0];
1108 phys_avail[pa_indx] = physmap[0];
1109 dump_avail[da_indx] = physmap[0];
1110
1111 /*
1112 * Get dcons buffer address
1113 */
1114 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1115 getenv_quad("dcons.size", &dcons_size) == 0)
1116 dcons_addr = 0;
1117
1118 /*
1119 * physmap is in bytes, so when converting to page boundaries,
1120 * round up the start address and round down the end address.
1121 */
1122 for (i = 0; i <= physmap_idx; i += 2) {
1123 vm_paddr_t end;
1124
1125 end = ptoa((vm_paddr_t)Maxmem);
1126 if (physmap[i + 1] < end)
1127 end = trunc_page(physmap[i + 1]);
1128 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1129 int *ptr;
1130 int tmp;
1131 bool full, page_bad;
1132
1133 full = false;
1134 /*
1135 * block out kernel memory as not available.
1136 */
1137 if (pa >= KERNLOAD && pa < first)
1138 goto do_dump_avail;
1139
1140 /*
1141 * block out dcons buffer
1142 */
1143 if (dcons_addr > 0
1144 && pa >= trunc_page(dcons_addr)
1145 && pa < dcons_addr + dcons_size)
1146 goto do_dump_avail;
1147
1148 page_bad = false;
1149 if (memtest == 0)
1150 goto skip_memtest;
1151
1152 /*
1153 * map page into kernel: valid, read/write,non-cacheable
1154 */
1155 ptr = (int *)pmap_cmap3(pa, PG_V | PG_RW | PG_N);
1156
1157 tmp = *(int *)ptr;
1158 /*
1159 * Test for alternating 1's and 0's
1160 */
1161 *(volatile int *)ptr = 0xaaaaaaaa;
1162 if (*(volatile int *)ptr != 0xaaaaaaaa)
1163 page_bad = true;
1164 /*
1165 * Test for alternating 0's and 1's
1166 */
1167 *(volatile int *)ptr = 0x55555555;
1168 if (*(volatile int *)ptr != 0x55555555)
1169 page_bad = true;
1170 /*
1171 * Test for all 1's
1172 */
1173 *(volatile int *)ptr = 0xffffffff;
1174 if (*(volatile int *)ptr != 0xffffffff)
1175 page_bad = true;
1176 /*
1177 * Test for all 0's
1178 */
1179 *(volatile int *)ptr = 0x0;
1180 if (*(volatile int *)ptr != 0x0)
1181 page_bad = true;
1182 /*
1183 * Restore original value.
1184 */
1185 *(int *)ptr = tmp;
1186
1187 skip_memtest:
1188 /*
1189 * Adjust array of valid/good pages.
1190 */
1191 if (page_bad == true)
1192 continue;
1193 /*
1194 * If this good page is a continuation of the
1195 * previous set of good pages, then just increase
1196 * the end pointer. Otherwise start a new chunk.
1197 * Note that "end" points one higher than end,
1198 * making the range >= start and < end.
1199 * If we're also doing a speculative memory
1200 * test and we at or past the end, bump up Maxmem
1201 * so that we keep going. The first bad page
1202 * will terminate the loop.
1203 */
1204 if (phys_avail[pa_indx] == pa) {
1205 phys_avail[pa_indx] += PAGE_SIZE;
1206 } else {
1207 pa_indx++;
1208 if (pa_indx == PHYS_AVAIL_ENTRIES) {
1209 printf(
1210 "Too many holes in the physical address space, giving up\n");
1211 pa_indx--;
1212 full = true;
1213 goto do_dump_avail;
1214 }
1215 phys_avail[pa_indx++] = pa; /* start */
1216 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1217 }
1218 physmem++;
1219 do_dump_avail:
1220 if (dump_avail[da_indx] == pa) {
1221 dump_avail[da_indx] += PAGE_SIZE;
1222 } else {
1223 da_indx++;
1224 if (da_indx == PHYS_AVAIL_ENTRIES) {
1225 da_indx--;
1226 goto do_next;
1227 }
1228 dump_avail[da_indx++] = pa; /* start */
1229 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1230 }
1231 do_next:
1232 if (full)
1233 break;
1234 }
1235 }
1236 pmap_cmap3(0, 0);
1237
1238 /*
1239 * XXX
1240 * The last chunk must contain at least one page plus the message
1241 * buffer to avoid complicating other code (message buffer address
1242 * calculation, etc.).
1243 */
1244 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1245 round_page(msgbufsize) >= phys_avail[pa_indx]) {
1246 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1247 phys_avail[pa_indx--] = 0;
1248 phys_avail[pa_indx--] = 0;
1249 }
1250
1251 Maxmem = atop(phys_avail[pa_indx]);
1252
1253 /* Trim off space for the message buffer. */
1254 phys_avail[pa_indx] -= round_page(msgbufsize);
1255
1256 /* Map the message buffer. */
1257 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
1258 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
1259 off);
1260 }
1261
1262 static void
i386_kdb_init(void)1263 i386_kdb_init(void)
1264 {
1265 #ifdef DDB
1266 db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab, 0);
1267 #endif
1268 kdb_init();
1269 #ifdef KDB
1270 if (boothowto & RB_KDB)
1271 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
1272 #endif
1273 }
1274
1275 static void
fixup_idt(void)1276 fixup_idt(void)
1277 {
1278 struct gate_descriptor *ip;
1279 uintptr_t off;
1280 int x;
1281
1282 for (x = 0; x < NIDT; x++) {
1283 ip = &idt[x];
1284 if (ip->gd_type != SDT_SYS386IGT &&
1285 ip->gd_type != SDT_SYS386TGT)
1286 continue;
1287 off = ip->gd_looffset + (((u_int)ip->gd_hioffset) << 16);
1288 KASSERT(off >= (uintptr_t)start_exceptions &&
1289 off < (uintptr_t)end_exceptions,
1290 ("IDT[%d] type %d off %#x", x, ip->gd_type, off));
1291 off += setidt_disp;
1292 MPASS(off >= PMAP_TRM_MIN_ADDRESS &&
1293 off < PMAP_TRM_MAX_ADDRESS);
1294 ip->gd_looffset = off;
1295 ip->gd_hioffset = off >> 16;
1296 }
1297 }
1298
1299 static void
i386_setidt1(void)1300 i386_setidt1(void)
1301 {
1302 int x;
1303
1304 /* exceptions */
1305 for (x = 0; x < NIDT; x++)
1306 setidt(x, &IDTVEC(rsvd), SDT_SYS386IGT, SEL_KPL,
1307 GSEL(GCODE_SEL, SEL_KPL));
1308 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386IGT, SEL_KPL,
1309 GSEL(GCODE_SEL, SEL_KPL));
1310 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
1311 GSEL(GCODE_SEL, SEL_KPL));
1312 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
1313 GSEL(GCODE_SEL, SEL_KPL));
1314 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
1315 GSEL(GCODE_SEL, SEL_KPL));
1316 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386IGT, SEL_UPL,
1317 GSEL(GCODE_SEL, SEL_KPL));
1318 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386IGT, SEL_KPL,
1319 GSEL(GCODE_SEL, SEL_KPL));
1320 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL,
1321 GSEL(GCODE_SEL, SEL_KPL));
1322 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386IGT, SEL_KPL,
1323 GSEL(GCODE_SEL, SEL_KPL));
1324 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL,
1325 SEL_KPL));
1326 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386IGT,
1327 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1328 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386IGT, SEL_KPL,
1329 GSEL(GCODE_SEL, SEL_KPL));
1330 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386IGT, SEL_KPL,
1331 GSEL(GCODE_SEL, SEL_KPL));
1332 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386IGT, SEL_KPL,
1333 GSEL(GCODE_SEL, SEL_KPL));
1334 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL,
1335 GSEL(GCODE_SEL, SEL_KPL));
1336 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
1337 GSEL(GCODE_SEL, SEL_KPL));
1338 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386IGT, SEL_KPL,
1339 GSEL(GCODE_SEL, SEL_KPL));
1340 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386IGT, SEL_KPL,
1341 GSEL(GCODE_SEL, SEL_KPL));
1342 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386IGT, SEL_KPL,
1343 GSEL(GCODE_SEL, SEL_KPL));
1344 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386IGT, SEL_KPL,
1345 GSEL(GCODE_SEL, SEL_KPL));
1346 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall),
1347 SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1348 #ifdef KDTRACE_HOOKS
1349 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret),
1350 SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1351 #endif
1352 #ifdef XENHVM
1353 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall),
1354 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1355 #endif
1356 }
1357
1358 static void
i386_setidt2(void)1359 i386_setidt2(void)
1360 {
1361
1362 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL,
1363 GSEL(GCODE_SEL, SEL_KPL));
1364 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL,
1365 GSEL(GCODE_SEL, SEL_KPL));
1366 }
1367
1368 #if defined(DEV_ISA) && !defined(DEV_ATPIC)
1369 static void
i386_setidt3(void)1370 i386_setidt3(void)
1371 {
1372
1373 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint),
1374 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1375 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint),
1376 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1377 }
1378 #endif
1379
1380 register_t
init386(int first)1381 init386(int first)
1382 {
1383 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1384 int gsel_tss, metadata_missing, x, pa;
1385 struct pcpu *pc;
1386 struct xstate_hdr *xhdr;
1387 vm_offset_t addend;
1388 size_t ucode_len;
1389
1390 thread0.td_kstack = proc0kstack;
1391 thread0.td_kstack_pages = TD0_KSTACK_PAGES;
1392
1393 /*
1394 * This may be done better later if it gets more high level
1395 * components in it. If so just link td->td_proc here.
1396 */
1397 proc_linkup0(&proc0, &thread0);
1398
1399 if (bootinfo.bi_modulep) {
1400 metadata_missing = 0;
1401 addend = (vm_paddr_t)bootinfo.bi_modulep < KERNBASE ?
1402 PMAP_MAP_LOW : 0;
1403 preload_metadata = (caddr_t)bootinfo.bi_modulep + addend;
1404 preload_bootstrap_relocate(addend);
1405 } else {
1406 metadata_missing = 1;
1407 }
1408
1409 if (bootinfo.bi_envp != 0) {
1410 addend = (vm_paddr_t)bootinfo.bi_envp < KERNBASE ?
1411 PMAP_MAP_LOW : 0;
1412 init_static_kenv((char *)bootinfo.bi_envp + addend, 0);
1413 } else {
1414 init_static_kenv(NULL, 0);
1415 }
1416
1417 /*
1418 * Re-evaluate CPU features if we loaded a microcode update.
1419 */
1420 ucode_len = ucode_load_bsp(first);
1421 if (ucode_len != 0) {
1422 identify_cpu();
1423 first = roundup2(first + ucode_len, PAGE_SIZE);
1424 }
1425
1426 identify_hypervisor();
1427 identify_hypervisor_smbios();
1428
1429 /* Init basic tunables, hz etc */
1430 init_param1();
1431
1432 /* Set bootmethod to BIOS: it's the only supported on i386. */
1433 strlcpy(bootmethod, "BIOS", sizeof(bootmethod));
1434
1435 /*
1436 * Make gdt memory segments. All segments cover the full 4GB
1437 * of address space and permissions are enforced at page level.
1438 */
1439 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1440 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
1441 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
1442 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
1443 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
1444 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
1445
1446 pc = &__pcpu[0];
1447 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
1448 gdt_segs[GPRIV_SEL].ssd_base = (int)pc;
1449 gdt_segs[GPROC0_SEL].ssd_base = (int)&common_tss0;
1450
1451 for (x = 0; x < NGDT; x++)
1452 ssdtosd(&gdt_segs[x], &gdt0[x].sd);
1453
1454 r_gdt.rd_limit = NGDT * sizeof(gdt0[0]) - 1;
1455 r_gdt.rd_base = (int)gdt0;
1456 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
1457 lgdt(&r_gdt);
1458
1459 pcpu_init(pc, 0, sizeof(struct pcpu));
1460 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
1461 pmap_kenter(pa, pa);
1462 dpcpu_init((void *)first, 0);
1463 first += DPCPU_SIZE;
1464 PCPU_SET(prvspace, pc);
1465 PCPU_SET(curthread, &thread0);
1466 /* Non-late cninit() and printf() can be moved up to here. */
1467
1468 /*
1469 * Initialize mutexes.
1470 *
1471 * icu_lock: in order to allow an interrupt to occur in a critical
1472 * section, to set pcpu->ipending (etc...) properly, we
1473 * must be able to get the icu lock, so it can't be
1474 * under witness.
1475 */
1476 mutex_init();
1477 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
1478
1479 i386_setidt1();
1480
1481 r_idt.rd_limit = sizeof(idt0) - 1;
1482 r_idt.rd_base = (int) idt;
1483 lidt(&r_idt);
1484
1485 finishidentcpu(); /* Final stage of CPU initialization */
1486
1487 /*
1488 * Initialize the clock before the console so that console
1489 * initialization can use DELAY().
1490 */
1491 clock_init();
1492
1493 i386_setidt2();
1494 pmap_set_nx();
1495 initializecpu(); /* Initialize CPU registers */
1496 initializecpucache();
1497
1498 /* pointer to selector slot for %fs/%gs */
1499 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
1500
1501 /* Initialize the tss (except for the final esp0) early for vm86. */
1502 common_tss0.tss_esp0 = thread0.td_kstack + thread0.td_kstack_pages *
1503 PAGE_SIZE - VM86_STACK_SPACE;
1504 common_tss0.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
1505 common_tss0.tss_ioopt = sizeof(struct i386tss) << 16;
1506 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1507 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
1508 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
1509 ltr(gsel_tss);
1510
1511 /* Initialize the PIC early for vm86 calls. */
1512 #ifdef DEV_ISA
1513 #ifdef DEV_ATPIC
1514 elcr_probe();
1515 atpic_startup();
1516 #else
1517 /* Reset and mask the atpics and leave them shut down. */
1518 atpic_reset();
1519
1520 /*
1521 * Point the ICU spurious interrupt vectors at the APIC spurious
1522 * interrupt handler.
1523 */
1524 i386_setidt3();
1525 #endif
1526 #endif
1527
1528 /*
1529 * The console and kdb should be initialized even earlier than here,
1530 * but some console drivers don't work until after getmemsize().
1531 * Default to late console initialization to support these drivers.
1532 * This loses mainly printf()s in getmemsize() and early debugging.
1533 */
1534 TUNABLE_INT_FETCH("debug.late_console", &late_console);
1535 if (!late_console) {
1536 cninit();
1537 i386_kdb_init();
1538 }
1539
1540 if (cpu_fxsr && (cpu_feature2 & CPUID2_XSAVE) != 0) {
1541 use_xsave = 1;
1542 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
1543 }
1544
1545 /* Initialize preload_kmdp */
1546 preload_initkmdp(!metadata_missing);
1547 link_elf_ireloc();
1548
1549 vm86_initialize();
1550 getmemsize(first);
1551 init_param2(physmem);
1552
1553 /* now running on new page tables, configured,and u/iom is accessible */
1554
1555 if (late_console)
1556 cninit();
1557
1558 if (metadata_missing)
1559 printf("WARNING: loader(8) metadata is missing!\n");
1560
1561 if (late_console)
1562 i386_kdb_init();
1563
1564 msgbufinit(msgbufp, msgbufsize);
1565 npxinit(true);
1566
1567 /*
1568 * Set up thread0 pcb after npxinit calculated pcb + fpu save
1569 * area size. Zero out the extended state header in fpu save
1570 * area.
1571 */
1572 thread0.td_pcb = get_pcb_td(&thread0);
1573 thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0);
1574 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
1575 if (use_xsave) {
1576 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
1577 1);
1578 xhdr->xstate_bv = xsave_mask;
1579 }
1580 PCPU_SET(curpcb, thread0.td_pcb);
1581 /* Move esp0 in the tss to its final place. */
1582 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
1583 common_tss0.tss_esp0 = (vm_offset_t)thread0.td_pcb - VM86_STACK_SPACE;
1584 PCPU_SET(kesp0, common_tss0.tss_esp0);
1585 gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; /* clear busy bit */
1586 ltr(gsel_tss);
1587
1588 /* transfer to user mode */
1589
1590 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1591 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1592
1593 /* setup proc 0's pcb */
1594 thread0.td_pcb->pcb_flags = 0;
1595 thread0.td_pcb->pcb_cr3 = pmap_get_kcr3();
1596 thread0.td_pcb->pcb_ext = 0;
1597 thread0.td_frame = &proc0_tf;
1598
1599 #ifdef FDT
1600 x86_init_fdt();
1601 #endif
1602
1603 /* Location of kernel stack for locore */
1604 return ((register_t)thread0.td_pcb);
1605 }
1606
1607 static void
machdep_init_trampoline(void)1608 machdep_init_trampoline(void)
1609 {
1610 struct region_descriptor r_gdt, r_idt;
1611 struct i386tss *tss;
1612 char *copyout_buf, *trampoline, *tramp_stack_base;
1613 int x;
1614
1615 gdt = pmap_trm_alloc(sizeof(union descriptor) * NGDT * mp_ncpus,
1616 M_NOWAIT | M_ZERO);
1617 bcopy(gdt0, gdt, sizeof(union descriptor) * NGDT);
1618 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1619 r_gdt.rd_base = (int)gdt;
1620 lgdt(&r_gdt);
1621
1622 tss = pmap_trm_alloc(sizeof(struct i386tss) * mp_ncpus,
1623 M_NOWAIT | M_ZERO);
1624 bcopy(&common_tss0, tss, sizeof(struct i386tss));
1625 gdt[GPROC0_SEL].sd.sd_lobase = (int)tss;
1626 gdt[GPROC0_SEL].sd.sd_hibase = (u_int)tss >> 24;
1627 gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
1628
1629 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
1630 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
1631 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
1632 PCPU_SET(common_tssp, tss);
1633 ltr(GSEL(GPROC0_SEL, SEL_KPL));
1634
1635 trampoline = pmap_trm_alloc(end_exceptions - start_exceptions,
1636 M_NOWAIT);
1637 bcopy(start_exceptions, trampoline, end_exceptions - start_exceptions);
1638 tramp_stack_base = pmap_trm_alloc(TRAMP_STACK_SZ, M_NOWAIT);
1639 PCPU_SET(trampstk, (uintptr_t)tramp_stack_base + TRAMP_STACK_SZ -
1640 VM86_STACK_SPACE);
1641 tss[0].tss_esp0 = PCPU_GET(trampstk);
1642
1643 idt = pmap_trm_alloc(sizeof(idt0), M_NOWAIT | M_ZERO);
1644 bcopy(idt0, idt, sizeof(idt0));
1645
1646 /* Re-initialize new IDT since the handlers were relocated */
1647 setidt_disp = trampoline - start_exceptions;
1648 if (bootverbose)
1649 printf("Trampoline disposition %#zx\n", setidt_disp);
1650 fixup_idt();
1651
1652 r_idt.rd_limit = sizeof(struct gate_descriptor) * NIDT - 1;
1653 r_idt.rd_base = (int)idt;
1654 lidt(&r_idt);
1655
1656 /* dblfault TSS */
1657 dblfault_tss = pmap_trm_alloc(sizeof(struct i386tss), M_NOWAIT | M_ZERO);
1658 dblfault_stack = pmap_trm_alloc(PAGE_SIZE, M_NOWAIT);
1659 dblfault_tss->tss_esp = dblfault_tss->tss_esp0 =
1660 dblfault_tss->tss_esp1 = dblfault_tss->tss_esp2 =
1661 (int)dblfault_stack + PAGE_SIZE;
1662 dblfault_tss->tss_ss = dblfault_tss->tss_ss0 = dblfault_tss->tss_ss1 =
1663 dblfault_tss->tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
1664 dblfault_tss->tss_cr3 = pmap_get_kcr3();
1665 dblfault_tss->tss_eip = (int)dblfault_handler;
1666 dblfault_tss->tss_eflags = PSL_KERNEL;
1667 dblfault_tss->tss_ds = dblfault_tss->tss_es =
1668 dblfault_tss->tss_gs = GSEL(GDATA_SEL, SEL_KPL);
1669 dblfault_tss->tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
1670 dblfault_tss->tss_cs = GSEL(GCODE_SEL, SEL_KPL);
1671 dblfault_tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
1672 gdt[GPANIC_SEL].sd.sd_lobase = (int)dblfault_tss;
1673 gdt[GPANIC_SEL].sd.sd_hibase = (u_int)dblfault_tss >> 24;
1674
1675 /* make ldt memory segments */
1676 ldt = pmap_trm_alloc(sizeof(union descriptor) * NLDT,
1677 M_NOWAIT | M_ZERO);
1678 gdt[GLDT_SEL].sd.sd_lobase = (int)ldt;
1679 gdt[GLDT_SEL].sd.sd_hibase = (u_int)ldt >> 24;
1680 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
1681 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
1682 for (x = 0; x < nitems(ldt_segs); x++)
1683 ssdtosd(&ldt_segs[x], &ldt[x].sd);
1684
1685 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
1686 lldt(_default_ldt);
1687 PCPU_SET(currentldt, _default_ldt);
1688
1689 copyout_buf = pmap_trm_alloc(TRAMP_COPYOUT_SZ, M_NOWAIT);
1690 PCPU_SET(copyout_buf, copyout_buf);
1691 copyout_init_tramp();
1692 }
1693 SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_SECOND, machdep_init_trampoline, NULL);
1694
1695 #ifdef COMPAT_43
1696 static void
i386_setup_lcall_gate(void)1697 i386_setup_lcall_gate(void)
1698 {
1699 struct sysentvec *sv;
1700 struct user_segment_descriptor desc;
1701 u_int lcall_addr;
1702
1703 sv = &elf32_freebsd_sysvec;
1704 lcall_addr = (uintptr_t)sv->sv_psstrings - sz_lcall_tramp;
1705
1706 bzero(&desc, sizeof(desc));
1707 desc.sd_type = SDT_MEMERA;
1708 desc.sd_dpl = SEL_UPL;
1709 desc.sd_p = 1;
1710 desc.sd_def32 = 1;
1711 desc.sd_gran = 1;
1712 desc.sd_lolimit = 0xffff;
1713 desc.sd_hilimit = 0xf;
1714 desc.sd_lobase = lcall_addr;
1715 desc.sd_hibase = lcall_addr >> 24;
1716 bcopy(&desc, &ldt[LSYS5CALLS_SEL], sizeof(desc));
1717 }
1718 SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_ANY, i386_setup_lcall_gate, NULL);
1719 #endif
1720
1721 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t size)1722 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1723 {
1724
1725 pcpu->pc_acpi_id = 0xffffffff;
1726 }
1727
1728 static int
smap_sysctl_handler(SYSCTL_HANDLER_ARGS)1729 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
1730 {
1731 struct bios_smap *smapbase;
1732 struct bios_smap_xattr smap;
1733 uint32_t *smapattr;
1734 int count, error, i;
1735
1736 /* Retrieve the system memory map from the loader. */
1737 smapbase = (struct bios_smap *)preload_search_info(preload_kmdp,
1738 MODINFO_METADATA | MODINFOMD_SMAP);
1739 if (smapbase == NULL)
1740 return (0);
1741 smapattr = (uint32_t *)preload_search_info(preload_kmdp,
1742 MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
1743 count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase);
1744 error = 0;
1745 for (i = 0; i < count; i++) {
1746 smap.base = smapbase[i].base;
1747 smap.length = smapbase[i].length;
1748 smap.type = smapbase[i].type;
1749 if (smapattr != NULL)
1750 smap.xattr = smapattr[i];
1751 else
1752 smap.xattr = 0;
1753 error = SYSCTL_OUT(req, &smap, sizeof(smap));
1754 }
1755 return (error);
1756 }
1757 SYSCTL_PROC(_machdep, OID_AUTO, smap,
1758 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1759 smap_sysctl_handler, "S,bios_smap_xattr",
1760 "Raw BIOS SMAP data");
1761
1762 void
spinlock_enter(void)1763 spinlock_enter(void)
1764 {
1765 struct thread *td;
1766 register_t flags;
1767
1768 td = curthread;
1769 if (td->td_md.md_spinlock_count == 0) {
1770 flags = intr_disable();
1771 td->td_md.md_spinlock_count = 1;
1772 td->td_md.md_saved_flags = flags;
1773 critical_enter();
1774 } else
1775 td->td_md.md_spinlock_count++;
1776 }
1777
1778 void
spinlock_exit(void)1779 spinlock_exit(void)
1780 {
1781 struct thread *td;
1782 register_t flags;
1783
1784 td = curthread;
1785 flags = td->td_md.md_saved_flags;
1786 td->td_md.md_spinlock_count--;
1787 if (td->td_md.md_spinlock_count == 0) {
1788 critical_exit();
1789 intr_restore(flags);
1790 }
1791 }
1792
1793 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1794 static void f00f_hack(void *unused);
1795 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
1796
1797 static void
f00f_hack(void * unused)1798 f00f_hack(void *unused)
1799 {
1800 struct region_descriptor r_idt;
1801 struct gate_descriptor *new_idt;
1802 vm_offset_t tmp;
1803
1804 if (!has_f00f_bug)
1805 return;
1806
1807 printf("Intel Pentium detected, installing workaround for F00F bug\n");
1808
1809 tmp = (vm_offset_t)pmap_trm_alloc(PAGE_SIZE * 3, M_NOWAIT | M_ZERO);
1810 if (tmp == 0)
1811 panic("kmem_malloc returned 0");
1812 tmp = round_page(tmp);
1813
1814 /* Put the problematic entry (#6) at the end of the lower page. */
1815 new_idt = (struct gate_descriptor *)
1816 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
1817 bcopy(idt, new_idt, sizeof(idt0));
1818 r_idt.rd_base = (u_int)new_idt;
1819 r_idt.rd_limit = sizeof(idt0) - 1;
1820 lidt(&r_idt);
1821 /* SMP machines do not need the F00F hack. */
1822 idt = new_idt;
1823 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
1824 }
1825 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
1826
1827 /*
1828 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1829 * we want to start a backtrace from the function that caused us to enter
1830 * the debugger. We have the context in the trapframe, but base the trace
1831 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1832 * enough for a backtrace.
1833 */
1834 void
makectx(struct trapframe * tf,struct pcb * pcb)1835 makectx(struct trapframe *tf, struct pcb *pcb)
1836 {
1837
1838 pcb->pcb_edi = tf->tf_edi;
1839 pcb->pcb_esi = tf->tf_esi;
1840 pcb->pcb_ebp = tf->tf_ebp;
1841 pcb->pcb_ebx = tf->tf_ebx;
1842 pcb->pcb_eip = tf->tf_eip;
1843 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
1844 pcb->pcb_gs = rgs();
1845 }
1846
1847 #ifdef KDB
1848
1849 /*
1850 * Provide inb() and outb() as functions. They are normally only available as
1851 * inline functions, thus cannot be called from the debugger.
1852 */
1853
1854 /* silence compiler warnings */
1855 u_char inb_(u_short);
1856 void outb_(u_short, u_char);
1857
1858 u_char
inb_(u_short port)1859 inb_(u_short port)
1860 {
1861 return inb(port);
1862 }
1863
1864 void
outb_(u_short port,u_char data)1865 outb_(u_short port, u_char data)
1866 {
1867 outb(port, data);
1868 }
1869
1870 #endif /* KDB */
1871