1 /*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 * $FreeBSD$ 39 */ 40 41 /* 42 * 386 Trap and System call handling 43 */ 44 45 #include "opt_cpu.h" 46 #include "opt_ddb.h" 47 #include "opt_ktrace.h" 48 #include "opt_clock.h" 49 #include "opt_trap.h" 50 51 #include <sys/param.h> 52 #include <sys/bus.h> 53 #include <sys/systm.h> 54 #include <sys/proc.h> 55 #include <sys/pioctl.h> 56 #include <sys/ipl.h> 57 #include <sys/kernel.h> 58 #include <sys/ktr.h> 59 #include <sys/mutex.h> 60 #include <sys/resourcevar.h> 61 #include <sys/signalvar.h> 62 #include <sys/syscall.h> 63 #include <sys/sysctl.h> 64 #include <sys/sysent.h> 65 #include <sys/uio.h> 66 #include <sys/vmmeter.h> 67 #ifdef KTRACE 68 #include <sys/ktrace.h> 69 #endif 70 71 #include <vm/vm.h> 72 #include <vm/vm_param.h> 73 #include <sys/lock.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_extern.h> 79 80 #include <machine/cpu.h> 81 #include <machine/md_var.h> 82 #include <machine/pcb.h> 83 #ifdef SMP 84 #include <machine/smp.h> 85 #endif 86 #include <machine/tss.h> 87 88 #include <i386/isa/icu.h> 89 #include <i386/isa/intr_machdep.h> 90 91 #ifdef POWERFAIL_NMI 92 #include <sys/syslog.h> 93 #include <machine/clock.h> 94 #endif 95 96 #include <machine/vm86.h> 97 98 #include <ddb/ddb.h> 99 100 #include "isa.h" 101 #include "npx.h" 102 103 #include <sys/sysctl.h> 104 105 int (*pmath_emulate) __P((struct trapframe *)); 106 107 extern void trap __P((struct trapframe frame)); 108 extern int trapwrite __P((unsigned addr)); 109 extern void syscall2 __P((struct trapframe frame)); 110 extern void ast __P((struct trapframe frame)); 111 112 static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); 113 static void trap_fatal __P((struct trapframe *, vm_offset_t)); 114 void dblfault_handler __P((void)); 115 116 extern inthand_t IDTVEC(syscall); 117 118 #define MAX_TRAP_MSG 28 119 static char *trap_msg[] = { 120 "", /* 0 unused */ 121 "privileged instruction fault", /* 1 T_PRIVINFLT */ 122 "", /* 2 unused */ 123 "breakpoint instruction fault", /* 3 T_BPTFLT */ 124 "", /* 4 unused */ 125 "", /* 5 unused */ 126 "arithmetic trap", /* 6 T_ARITHTRAP */ 127 "system forced exception", /* 7 T_ASTFLT */ 128 "", /* 8 unused */ 129 "general protection fault", /* 9 T_PROTFLT */ 130 "trace trap", /* 10 T_TRCTRAP */ 131 "", /* 11 unused */ 132 "page fault", /* 12 T_PAGEFLT */ 133 "", /* 13 unused */ 134 "alignment fault", /* 14 T_ALIGNFLT */ 135 "", /* 15 unused */ 136 "", /* 16 unused */ 137 "", /* 17 unused */ 138 "integer divide fault", /* 18 T_DIVIDE */ 139 "non-maskable interrupt trap", /* 19 T_NMI */ 140 "overflow trap", /* 20 T_OFLOW */ 141 "FPU bounds check fault", /* 21 T_BOUND */ 142 "FPU device not available", /* 22 T_DNA */ 143 "double fault", /* 23 T_DOUBLEFLT */ 144 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 145 "invalid TSS fault", /* 25 T_TSSFLT */ 146 "segment not present fault", /* 26 T_SEGNPFLT */ 147 "stack fault", /* 27 T_STKFLT */ 148 "machine check trap", /* 28 T_MCHK */ 149 }; 150 151 static __inline int userret __P((struct proc *p, struct trapframe *frame, 152 u_quad_t oticks, int have_giant)); 153 154 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 155 extern int has_f00f_bug; 156 #endif 157 158 #ifdef DDB 159 static int ddb_on_nmi = 1; 160 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 161 &ddb_on_nmi, 0, "Go to DDB on NMI"); 162 #endif 163 static int panic_on_nmi = 1; 164 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 165 &panic_on_nmi, 0, "Panic on NMI"); 166 167 static __inline int 168 userret(p, frame, oticks, have_giant) 169 struct proc *p; 170 struct trapframe *frame; 171 u_quad_t oticks; 172 int have_giant; 173 { 174 int sig, s; 175 176 while ((sig = CURSIG(p)) != 0) { 177 if (have_giant == 0) { 178 mtx_enter(&Giant, MTX_DEF); 179 have_giant = 1; 180 } 181 postsig(sig); 182 } 183 184 p->p_priority = p->p_usrpri; 185 if (resched_wanted()) { 186 /* 187 * Since we are curproc, clock will normally just change 188 * our priority without moving us from one queue to another 189 * (since the running process is not on a queue.) 190 * If that happened after we setrunqueue ourselves but before we 191 * mi_switch()'ed, we might not be on the queue indicated by 192 * our priority. 193 */ 194 s = splhigh(); 195 mtx_enter(&sched_lock, MTX_SPIN); 196 DROP_GIANT_NOSWITCH(); 197 setrunqueue(p); 198 p->p_stats->p_ru.ru_nivcsw++; 199 mi_switch(); 200 mtx_exit(&sched_lock, MTX_SPIN); 201 PICKUP_GIANT(); 202 splx(s); 203 while ((sig = CURSIG(p)) != 0) { 204 if (have_giant == 0) { 205 mtx_enter(&Giant, MTX_DEF); 206 have_giant = 1; 207 } 208 postsig(sig); 209 } 210 } 211 /* 212 * Charge system time if profiling. 213 */ 214 if (p->p_flag & P_PROFIL) { 215 if (have_giant == 0) { 216 mtx_enter(&Giant, MTX_DEF); 217 have_giant = 1; 218 } 219 addupc_task(p, frame->tf_eip, 220 (u_int)(p->p_sticks - oticks) * psratio); 221 } 222 curpriority = p->p_priority; 223 return(have_giant); 224 } 225 226 /* 227 * Exception, fault, and trap interface to the FreeBSD kernel. 228 * This common code is called from assembly language IDT gate entry 229 * routines that prepare a suitable stack frame, and restore this 230 * frame after the exception has been processed. 231 */ 232 233 void 234 trap(frame) 235 struct trapframe frame; 236 { 237 struct proc *p = curproc; 238 u_quad_t sticks = 0; 239 int i = 0, ucode = 0, type, code; 240 vm_offset_t eva; 241 #ifdef POWERFAIL_NMI 242 static int lastalert = 0; 243 #endif 244 245 atomic_add_int(&cnt.v_trap, 1); 246 247 if ((frame.tf_eflags & PSL_I) == 0) { 248 /* 249 * Buggy application or kernel code has disabled 250 * interrupts and then trapped. Enabling interrupts 251 * now is wrong, but it is better than running with 252 * interrupts disabled until they are accidentally 253 * enabled later. XXX Consider whether is this still 254 * correct. 255 */ 256 type = frame.tf_trapno; 257 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM)) 258 printf( 259 "pid %ld (%s): trap %d with interrupts disabled\n", 260 (long)curproc->p_pid, curproc->p_comm, type); 261 else if (type != T_BPTFLT && type != T_TRCTRAP) 262 /* 263 * XXX not quite right, since this may be for a 264 * multiple fault in user mode. 265 */ 266 printf("kernel trap %d with interrupts disabled\n", 267 type); 268 enable_intr(); 269 } 270 271 eva = 0; 272 if (frame.tf_trapno == T_PAGEFLT) { 273 /* 274 * For some Cyrix CPUs, %cr2 is clobbered by 275 * interrupts. This problem is worked around by using 276 * an interrupt gate for the pagefault handler. We 277 * are finally ready to read %cr2 and then must 278 * reenable interrupts. 279 */ 280 eva = rcr2(); 281 enable_intr(); 282 } 283 284 mtx_enter(&Giant, MTX_DEF); 285 286 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 287 restart: 288 #endif 289 290 type = frame.tf_trapno; 291 code = frame.tf_err; 292 293 if ((ISPL(frame.tf_cs) == SEL_UPL) || 294 ((frame.tf_eflags & PSL_VM) && !in_vm86call)) { 295 /* user trap */ 296 297 sticks = p->p_sticks; 298 p->p_md.md_regs = &frame; 299 300 switch (type) { 301 case T_PRIVINFLT: /* privileged instruction fault */ 302 ucode = type; 303 i = SIGILL; 304 break; 305 306 case T_BPTFLT: /* bpt instruction fault */ 307 case T_TRCTRAP: /* trace trap */ 308 frame.tf_eflags &= ~PSL_T; 309 i = SIGTRAP; 310 break; 311 312 case T_ARITHTRAP: /* arithmetic trap */ 313 ucode = code; 314 i = SIGFPE; 315 break; 316 317 /* 318 * The following two traps can happen in 319 * vm86 mode, and, if so, we want to handle 320 * them specially. 321 */ 322 case T_PROTFLT: /* general protection fault */ 323 case T_STKFLT: /* stack fault */ 324 if (frame.tf_eflags & PSL_VM) { 325 i = vm86_emulate((struct vm86frame *)&frame); 326 if (i == 0) 327 goto user; 328 break; 329 } 330 /* FALL THROUGH */ 331 332 case T_SEGNPFLT: /* segment not present fault */ 333 case T_TSSFLT: /* invalid TSS fault */ 334 case T_DOUBLEFLT: /* double fault */ 335 default: 336 ucode = code + BUS_SEGM_FAULT ; 337 i = SIGBUS; 338 break; 339 340 case T_PAGEFLT: /* page fault */ 341 i = trap_pfault(&frame, TRUE, eva); 342 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 343 if (i == -2) { 344 /* 345 * f00f hack workaround has triggered, treat 346 * as illegal instruction not page fault. 347 */ 348 frame.tf_trapno = T_PRIVINFLT; 349 goto restart; 350 } 351 #endif 352 if (i == -1) 353 goto out; 354 if (i == 0) 355 goto user; 356 357 ucode = T_PAGEFLT; 358 break; 359 360 case T_DIVIDE: /* integer divide fault */ 361 ucode = FPE_INTDIV; 362 i = SIGFPE; 363 break; 364 365 #if NISA > 0 366 case T_NMI: 367 #ifdef POWERFAIL_NMI 368 #ifndef TIMER_FREQ 369 # define TIMER_FREQ 1193182 370 #endif 371 if (time_second - lastalert > 10) { 372 log(LOG_WARNING, "NMI: power fail\n"); 373 sysbeep(TIMER_FREQ/880, hz); 374 lastalert = time_second; 375 } 376 goto out; 377 #else /* !POWERFAIL_NMI */ 378 /* machine/parity/power fail/"kitchen sink" faults */ 379 if (isa_nmi(code) == 0) { 380 #ifdef DDB 381 /* 382 * NMI can be hooked up to a pushbutton 383 * for debugging. 384 */ 385 if (ddb_on_nmi) { 386 printf ("NMI ... going to debugger\n"); 387 kdb_trap (type, 0, &frame); 388 } 389 #endif /* DDB */ 390 goto out; 391 } else if (panic_on_nmi) 392 panic("NMI indicates hardware failure"); 393 break; 394 #endif /* POWERFAIL_NMI */ 395 #endif /* NISA > 0 */ 396 397 case T_OFLOW: /* integer overflow fault */ 398 ucode = FPE_INTOVF; 399 i = SIGFPE; 400 break; 401 402 case T_BOUND: /* bounds check fault */ 403 ucode = FPE_FLTSUB; 404 i = SIGFPE; 405 break; 406 407 case T_DNA: 408 #if NNPX > 0 409 /* transparent fault (due to context switch "late") */ 410 if (npxdna()) 411 goto out; 412 #endif 413 if (!pmath_emulate) { 414 i = SIGFPE; 415 ucode = FPE_FPU_NP_TRAP; 416 break; 417 } 418 i = (*pmath_emulate)(&frame); 419 if (i == 0) { 420 if (!(frame.tf_eflags & PSL_T)) 421 goto out; 422 frame.tf_eflags &= ~PSL_T; 423 i = SIGTRAP; 424 } 425 /* else ucode = emulator_only_knows() XXX */ 426 break; 427 428 case T_FPOPFLT: /* FPU operand fetch fault */ 429 ucode = T_FPOPFLT; 430 i = SIGILL; 431 break; 432 } 433 } else { 434 /* kernel trap */ 435 436 switch (type) { 437 case T_PAGEFLT: /* page fault */ 438 (void) trap_pfault(&frame, FALSE, eva); 439 goto out; 440 441 case T_DNA: 442 #if NNPX > 0 443 /* 444 * The kernel is apparently using npx for copying. 445 * XXX this should be fatal unless the kernel has 446 * registered such use. 447 */ 448 if (npxdna()) 449 goto out; 450 #endif 451 break; 452 453 /* 454 * The following two traps can happen in 455 * vm86 mode, and, if so, we want to handle 456 * them specially. 457 */ 458 case T_PROTFLT: /* general protection fault */ 459 case T_STKFLT: /* stack fault */ 460 if (frame.tf_eflags & PSL_VM) { 461 i = vm86_emulate((struct vm86frame *)&frame); 462 if (i != 0) 463 /* 464 * returns to original process 465 */ 466 vm86_trap((struct vm86frame *)&frame); 467 goto out; 468 } 469 if (type == T_STKFLT) 470 break; 471 472 /* FALL THROUGH */ 473 474 case T_SEGNPFLT: /* segment not present fault */ 475 if (in_vm86call) 476 break; 477 478 if (intr_nesting_level != 0) 479 break; 480 481 /* 482 * Invalid %fs's and %gs's can be created using 483 * procfs or PT_SETREGS or by invalidating the 484 * underlying LDT entry. This causes a fault 485 * in kernel mode when the kernel attempts to 486 * switch contexts. Lose the bad context 487 * (XXX) so that we can continue, and generate 488 * a signal. 489 */ 490 if (frame.tf_eip == (int)cpu_switch_load_gs) { 491 curpcb->pcb_gs = 0; 492 psignal(p, SIGBUS); 493 goto out; 494 } 495 496 /* 497 * Invalid segment selectors and out of bounds 498 * %eip's and %esp's can be set up in user mode. 499 * This causes a fault in kernel mode when the 500 * kernel tries to return to user mode. We want 501 * to get this fault so that we can fix the 502 * problem here and not have to check all the 503 * selectors and pointers when the user changes 504 * them. 505 */ 506 if (frame.tf_eip == (int)doreti_iret) { 507 frame.tf_eip = (int)doreti_iret_fault; 508 goto out; 509 } 510 if (frame.tf_eip == (int)doreti_popl_ds) { 511 frame.tf_eip = (int)doreti_popl_ds_fault; 512 goto out; 513 } 514 if (frame.tf_eip == (int)doreti_popl_es) { 515 frame.tf_eip = (int)doreti_popl_es_fault; 516 goto out; 517 } 518 if (frame.tf_eip == (int)doreti_popl_fs) { 519 frame.tf_eip = (int)doreti_popl_fs_fault; 520 goto out; 521 } 522 if (curpcb && curpcb->pcb_onfault) { 523 frame.tf_eip = (int)curpcb->pcb_onfault; 524 goto out; 525 } 526 break; 527 528 case T_TSSFLT: 529 /* 530 * PSL_NT can be set in user mode and isn't cleared 531 * automatically when the kernel is entered. This 532 * causes a TSS fault when the kernel attempts to 533 * `iret' because the TSS link is uninitialized. We 534 * want to get this fault so that we can fix the 535 * problem here and not every time the kernel is 536 * entered. 537 */ 538 if (frame.tf_eflags & PSL_NT) { 539 frame.tf_eflags &= ~PSL_NT; 540 goto out; 541 } 542 break; 543 544 case T_TRCTRAP: /* trace trap */ 545 if (frame.tf_eip == (int)IDTVEC(syscall)) { 546 /* 547 * We've just entered system mode via the 548 * syscall lcall. Continue single stepping 549 * silently until the syscall handler has 550 * saved the flags. 551 */ 552 goto out; 553 } 554 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 555 /* 556 * The syscall handler has now saved the 557 * flags. Stop single stepping it. 558 */ 559 frame.tf_eflags &= ~PSL_T; 560 goto out; 561 } 562 /* 563 * Ignore debug register trace traps due to 564 * accesses in the user's address space, which 565 * can happen under several conditions such as 566 * if a user sets a watchpoint on a buffer and 567 * then passes that buffer to a system call. 568 * We still want to get TRCTRAPS for addresses 569 * in kernel space because that is useful when 570 * debugging the kernel. 571 */ 572 if (user_dbreg_trap() && !in_vm86call) { 573 /* 574 * Reset breakpoint bits because the 575 * processor doesn't 576 */ 577 load_dr6(rdr6() & 0xfffffff0); 578 goto out; 579 } 580 /* 581 * Fall through (TRCTRAP kernel mode, kernel address) 582 */ 583 case T_BPTFLT: 584 /* 585 * If DDB is enabled, let it handle the debugger trap. 586 * Otherwise, debugger traps "can't happen". 587 */ 588 #ifdef DDB 589 if (kdb_trap (type, 0, &frame)) 590 goto out; 591 #endif 592 break; 593 594 #if NISA > 0 595 case T_NMI: 596 #ifdef POWERFAIL_NMI 597 if (time_second - lastalert > 10) { 598 log(LOG_WARNING, "NMI: power fail\n"); 599 sysbeep(TIMER_FREQ/880, hz); 600 lastalert = time_second; 601 } 602 goto out; 603 #else /* !POWERFAIL_NMI */ 604 /* machine/parity/power fail/"kitchen sink" faults */ 605 if (isa_nmi(code) == 0) { 606 #ifdef DDB 607 /* 608 * NMI can be hooked up to a pushbutton 609 * for debugging. 610 */ 611 if (ddb_on_nmi) { 612 printf ("NMI ... going to debugger\n"); 613 kdb_trap (type, 0, &frame); 614 } 615 #endif /* DDB */ 616 goto out; 617 } else if (panic_on_nmi == 0) 618 goto out; 619 /* FALL THROUGH */ 620 #endif /* POWERFAIL_NMI */ 621 #endif /* NISA > 0 */ 622 } 623 624 trap_fatal(&frame, eva); 625 goto out; 626 } 627 628 /* Translate fault for emulators (e.g. Linux) */ 629 if (*p->p_sysent->sv_transtrap) 630 i = (*p->p_sysent->sv_transtrap)(i, type); 631 632 trapsignal(p, i, ucode); 633 634 #ifdef DEBUG 635 if (type <= MAX_TRAP_MSG) { 636 uprintf("fatal process exception: %s", 637 trap_msg[type]); 638 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 639 uprintf(", fault VA = 0x%lx", (u_long)eva); 640 uprintf("\n"); 641 } 642 #endif 643 644 user: 645 userret(p, &frame, sticks, 1); 646 out: 647 mtx_exit(&Giant, MTX_DEF); 648 } 649 650 #ifdef notyet 651 /* 652 * This version doesn't allow a page fault to user space while 653 * in the kernel. The rest of the kernel needs to be made "safe" 654 * before this can be used. I think the only things remaining 655 * to be made safe are the iBCS2 code and the process tracing/ 656 * debugging code. 657 */ 658 static int 659 trap_pfault(frame, usermode, eva) 660 struct trapframe *frame; 661 int usermode; 662 vm_offset_t eva; 663 { 664 vm_offset_t va; 665 struct vmspace *vm = NULL; 666 vm_map_t map = 0; 667 int rv = 0; 668 vm_prot_t ftype; 669 struct proc *p = curproc; 670 671 if (frame->tf_err & PGEX_W) 672 ftype = VM_PROT_WRITE; 673 else 674 ftype = VM_PROT_READ; 675 676 va = trunc_page(eva); 677 if (va < VM_MIN_KERNEL_ADDRESS) { 678 vm_offset_t v; 679 vm_page_t mpte; 680 681 if (p == NULL || 682 (!usermode && va < VM_MAXUSER_ADDRESS && 683 (intr_nesting_level != 0 || curpcb == NULL || 684 curpcb->pcb_onfault == NULL))) { 685 trap_fatal(frame, eva); 686 return (-1); 687 } 688 689 /* 690 * This is a fault on non-kernel virtual memory. 691 * vm is initialized above to NULL. If curproc is NULL 692 * or curproc->p_vmspace is NULL the fault is fatal. 693 */ 694 vm = p->p_vmspace; 695 if (vm == NULL) 696 goto nogo; 697 698 map = &vm->vm_map; 699 700 /* 701 * Keep swapout from messing with us during this 702 * critical time. 703 */ 704 ++p->p_lock; 705 706 /* 707 * Grow the stack if necessary 708 */ 709 /* grow_stack returns false only if va falls into 710 * a growable stack region and the stack growth 711 * fails. It returns true if va was not within 712 * a growable stack region, or if the stack 713 * growth succeeded. 714 */ 715 if (!grow_stack (p, va)) { 716 rv = KERN_FAILURE; 717 --p->p_lock; 718 goto nogo; 719 } 720 721 /* Fault in the user page: */ 722 rv = vm_fault(map, va, ftype, 723 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 724 : VM_FAULT_NORMAL); 725 726 --p->p_lock; 727 } else { 728 /* 729 * Don't allow user-mode faults in kernel address space. 730 */ 731 if (usermode) 732 goto nogo; 733 734 /* 735 * Since we know that kernel virtual address addresses 736 * always have pte pages mapped, we just have to fault 737 * the page. 738 */ 739 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); 740 } 741 742 if (rv == KERN_SUCCESS) 743 return (0); 744 nogo: 745 if (!usermode) { 746 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 747 frame->tf_eip = (int)curpcb->pcb_onfault; 748 return (0); 749 } 750 trap_fatal(frame, eva); 751 return (-1); 752 } 753 754 /* kludge to pass faulting virtual address to sendsig */ 755 frame->tf_err = eva; 756 757 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 758 } 759 #endif 760 761 int 762 trap_pfault(frame, usermode, eva) 763 struct trapframe *frame; 764 int usermode; 765 vm_offset_t eva; 766 { 767 vm_offset_t va; 768 struct vmspace *vm = NULL; 769 vm_map_t map = 0; 770 int rv = 0; 771 vm_prot_t ftype; 772 struct proc *p = curproc; 773 774 va = trunc_page(eva); 775 if (va >= KERNBASE) { 776 /* 777 * Don't allow user-mode faults in kernel address space. 778 * An exception: if the faulting address is the invalid 779 * instruction entry in the IDT, then the Intel Pentium 780 * F00F bug workaround was triggered, and we need to 781 * treat it is as an illegal instruction, and not a page 782 * fault. 783 */ 784 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 785 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) 786 return -2; 787 #endif 788 if (usermode) 789 goto nogo; 790 791 map = kernel_map; 792 } else { 793 /* 794 * This is a fault on non-kernel virtual memory. 795 * vm is initialized above to NULL. If curproc is NULL 796 * or curproc->p_vmspace is NULL the fault is fatal. 797 */ 798 if (p != NULL) 799 vm = p->p_vmspace; 800 801 if (vm == NULL) 802 goto nogo; 803 804 map = &vm->vm_map; 805 } 806 807 if (frame->tf_err & PGEX_W) 808 ftype = VM_PROT_WRITE; 809 else 810 ftype = VM_PROT_READ; 811 812 if (map != kernel_map) { 813 /* 814 * Keep swapout from messing with us during this 815 * critical time. 816 */ 817 ++p->p_lock; 818 819 /* 820 * Grow the stack if necessary 821 */ 822 /* grow_stack returns false only if va falls into 823 * a growable stack region and the stack growth 824 * fails. It returns true if va was not within 825 * a growable stack region, or if the stack 826 * growth succeeded. 827 */ 828 if (!grow_stack (p, va)) { 829 rv = KERN_FAILURE; 830 --p->p_lock; 831 goto nogo; 832 } 833 834 /* Fault in the user page: */ 835 rv = vm_fault(map, va, ftype, 836 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 837 : VM_FAULT_NORMAL); 838 839 --p->p_lock; 840 } else { 841 /* 842 * Don't have to worry about process locking or stacks in the kernel. 843 */ 844 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 845 } 846 847 if (rv == KERN_SUCCESS) 848 return (0); 849 nogo: 850 if (!usermode) { 851 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 852 frame->tf_eip = (int)curpcb->pcb_onfault; 853 return (0); 854 } 855 trap_fatal(frame, eva); 856 return (-1); 857 } 858 859 /* kludge to pass faulting virtual address to sendsig */ 860 frame->tf_err = eva; 861 862 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 863 } 864 865 static void 866 trap_fatal(frame, eva) 867 struct trapframe *frame; 868 vm_offset_t eva; 869 { 870 int code, type, ss, esp; 871 struct soft_segment_descriptor softseg; 872 873 code = frame->tf_err; 874 type = frame->tf_trapno; 875 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 876 877 if (type <= MAX_TRAP_MSG) 878 printf("\n\nFatal trap %d: %s while in %s mode\n", 879 type, trap_msg[type], 880 frame->tf_eflags & PSL_VM ? "vm86" : 881 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 882 #ifdef SMP 883 /* two seperate prints in case of a trap on an unmapped page */ 884 printf("cpuid = %d; ", cpuid); 885 printf("lapic.id = %08x\n", lapic.id); 886 #endif 887 if (type == T_PAGEFLT) { 888 printf("fault virtual address = 0x%x\n", eva); 889 printf("fault code = %s %s, %s\n", 890 code & PGEX_U ? "user" : "supervisor", 891 code & PGEX_W ? "write" : "read", 892 code & PGEX_P ? "protection violation" : "page not present"); 893 } 894 printf("instruction pointer = 0x%x:0x%x\n", 895 frame->tf_cs & 0xffff, frame->tf_eip); 896 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { 897 ss = frame->tf_ss & 0xffff; 898 esp = frame->tf_esp; 899 } else { 900 ss = GSEL(GDATA_SEL, SEL_KPL); 901 esp = (int)&frame->tf_esp; 902 } 903 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 904 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 905 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 906 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 907 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 908 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 909 softseg.ssd_gran); 910 printf("processor eflags = "); 911 if (frame->tf_eflags & PSL_T) 912 printf("trace trap, "); 913 if (frame->tf_eflags & PSL_I) 914 printf("interrupt enabled, "); 915 if (frame->tf_eflags & PSL_NT) 916 printf("nested task, "); 917 if (frame->tf_eflags & PSL_RF) 918 printf("resume, "); 919 if (frame->tf_eflags & PSL_VM) 920 printf("vm86, "); 921 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 922 printf("current process = "); 923 if (curproc) { 924 printf("%lu (%s)\n", 925 (u_long)curproc->p_pid, curproc->p_comm ? 926 curproc->p_comm : ""); 927 } else { 928 printf("Idle\n"); 929 } 930 931 #ifdef KDB 932 if (kdb_trap(&psl)) 933 return; 934 #endif 935 #ifdef DDB 936 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame)) 937 return; 938 #endif 939 printf("trap number = %d\n", type); 940 if (type <= MAX_TRAP_MSG) 941 panic(trap_msg[type]); 942 else 943 panic("unknown/reserved trap"); 944 } 945 946 /* 947 * Double fault handler. Called when a fault occurs while writing 948 * a frame for a trap/exception onto the stack. This usually occurs 949 * when the stack overflows (such is the case with infinite recursion, 950 * for example). 951 * 952 * XXX Note that the current PTD gets replaced by IdlePTD when the 953 * task switch occurs. This means that the stack that was active at 954 * the time of the double fault is not available at <kstack> unless 955 * the machine was idle when the double fault occurred. The downside 956 * of this is that "trace <ebp>" in ddb won't work. 957 */ 958 void 959 dblfault_handler() 960 { 961 printf("\nFatal double fault:\n"); 962 printf("eip = 0x%x\n", common_tss.tss_eip); 963 printf("esp = 0x%x\n", common_tss.tss_esp); 964 printf("ebp = 0x%x\n", common_tss.tss_ebp); 965 #ifdef SMP 966 /* two seperate prints in case of a trap on an unmapped page */ 967 printf("cpuid = %d; ", cpuid); 968 printf("lapic.id = %08x\n", lapic.id); 969 #endif 970 panic("double fault"); 971 } 972 973 /* 974 * Compensate for 386 brain damage (missing URKR). 975 * This is a little simpler than the pagefault handler in trap() because 976 * it the page tables have already been faulted in and high addresses 977 * are thrown out early for other reasons. 978 */ 979 int trapwrite(addr) 980 unsigned addr; 981 { 982 struct proc *p; 983 vm_offset_t va; 984 struct vmspace *vm; 985 int rv; 986 987 va = trunc_page((vm_offset_t)addr); 988 /* 989 * XXX - MAX is END. Changed > to >= for temp. fix. 990 */ 991 if (va >= VM_MAXUSER_ADDRESS) 992 return (1); 993 994 p = curproc; 995 vm = p->p_vmspace; 996 997 ++p->p_lock; 998 999 if (!grow_stack (p, va)) { 1000 --p->p_lock; 1001 return (1); 1002 } 1003 1004 /* 1005 * fault the data page 1006 */ 1007 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY); 1008 1009 --p->p_lock; 1010 1011 if (rv != KERN_SUCCESS) 1012 return 1; 1013 1014 return (0); 1015 } 1016 1017 /* 1018 * syscall2 - MP aware system call request C handler 1019 * 1020 * A system call is essentially treated as a trap except that the 1021 * MP lock is not held on entry or return. We are responsible for 1022 * obtaining the MP lock if necessary and for handling ASTs 1023 * (e.g. a task switch) prior to return. 1024 * 1025 * In general, only simple access and manipulation of curproc and 1026 * the current stack is allowed without having to hold MP lock. 1027 */ 1028 void 1029 syscall2(frame) 1030 struct trapframe frame; 1031 { 1032 caddr_t params; 1033 int i; 1034 struct sysent *callp; 1035 struct proc *p = curproc; 1036 u_quad_t sticks; 1037 int error; 1038 int narg; 1039 int args[8]; 1040 int have_giant = 0; 1041 u_int code; 1042 1043 atomic_add_int(&cnt.v_syscall, 1); 1044 1045 #ifdef DIAGNOSTIC 1046 if (ISPL(frame.tf_cs) != SEL_UPL) { 1047 mtx_enter(&Giant, MTX_DEF); 1048 panic("syscall"); 1049 /* NOT REACHED */ 1050 } 1051 #endif 1052 1053 /* 1054 * handle atomicy by looping since interrupts are enabled and the 1055 * MP lock is not held. 1056 */ 1057 sticks = ((volatile struct proc *)p)->p_sticks; 1058 while (sticks != ((volatile struct proc *)p)->p_sticks) 1059 sticks = ((volatile struct proc *)p)->p_sticks; 1060 1061 p->p_md.md_regs = &frame; 1062 params = (caddr_t)frame.tf_esp + sizeof(int); 1063 code = frame.tf_eax; 1064 1065 if (p->p_sysent->sv_prepsyscall) { 1066 /* 1067 * The prep code is not MP aware. 1068 */ 1069 mtx_enter(&Giant, MTX_DEF); 1070 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 1071 mtx_exit(&Giant, MTX_DEF); 1072 } else { 1073 /* 1074 * Need to check if this is a 32 bit or 64 bit syscall. 1075 * fuword is MP aware. 1076 */ 1077 if (code == SYS_syscall) { 1078 /* 1079 * Code is first argument, followed by actual args. 1080 */ 1081 code = fuword(params); 1082 params += sizeof(int); 1083 } else if (code == SYS___syscall) { 1084 /* 1085 * Like syscall, but code is a quad, so as to maintain 1086 * quad alignment for the rest of the arguments. 1087 */ 1088 code = fuword(params); 1089 params += sizeof(quad_t); 1090 } 1091 } 1092 1093 if (p->p_sysent->sv_mask) 1094 code &= p->p_sysent->sv_mask; 1095 1096 if (code >= p->p_sysent->sv_size) 1097 callp = &p->p_sysent->sv_table[0]; 1098 else 1099 callp = &p->p_sysent->sv_table[code]; 1100 1101 narg = callp->sy_narg & SYF_ARGMASK; 1102 1103 /* 1104 * copyin is MP aware, but the tracing code is not 1105 */ 1106 if (params && (i = narg * sizeof(int)) && 1107 (error = copyin(params, (caddr_t)args, (u_int)i))) { 1108 mtx_enter(&Giant, MTX_DEF); 1109 have_giant = 1; 1110 #ifdef KTRACE 1111 if (KTRPOINT(p, KTR_SYSCALL)) 1112 ktrsyscall(p->p_tracep, code, narg, args); 1113 #endif 1114 goto bad; 1115 } 1116 1117 /* 1118 * Try to run the syscall without the MP lock if the syscall 1119 * is MP safe. We have to obtain the MP lock no matter what if 1120 * we are ktracing 1121 */ 1122 if ((callp->sy_narg & SYF_MPSAFE) == 0) { 1123 mtx_enter(&Giant, MTX_DEF); 1124 have_giant = 1; 1125 } 1126 1127 #ifdef KTRACE 1128 if (KTRPOINT(p, KTR_SYSCALL)) { 1129 if (have_giant == 0) { 1130 mtx_enter(&Giant, MTX_DEF); 1131 have_giant = 1; 1132 } 1133 ktrsyscall(p->p_tracep, code, narg, args); 1134 } 1135 #endif 1136 p->p_retval[0] = 0; 1137 p->p_retval[1] = frame.tf_edx; 1138 1139 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1140 1141 error = (*callp->sy_call)(p, args); 1142 1143 /* 1144 * MP SAFE (we may or may not have the MP lock at this point) 1145 */ 1146 switch (error) { 1147 case 0: 1148 /* 1149 * Reinitialize proc pointer `p' as it may be different 1150 * if this is a child returning from fork syscall. 1151 */ 1152 p = curproc; 1153 frame.tf_eax = p->p_retval[0]; 1154 frame.tf_edx = p->p_retval[1]; 1155 frame.tf_eflags &= ~PSL_C; 1156 break; 1157 1158 case ERESTART: 1159 /* 1160 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 1161 * int 0x80 is 2 bytes. We saved this in tf_err. 1162 */ 1163 frame.tf_eip -= frame.tf_err; 1164 break; 1165 1166 case EJUSTRETURN: 1167 break; 1168 1169 default: 1170 bad: 1171 if (p->p_sysent->sv_errsize) { 1172 if (error >= p->p_sysent->sv_errsize) 1173 error = -1; /* XXX */ 1174 else 1175 error = p->p_sysent->sv_errtbl[error]; 1176 } 1177 frame.tf_eax = error; 1178 frame.tf_eflags |= PSL_C; 1179 break; 1180 } 1181 1182 /* 1183 * Traced syscall. trapsignal() is not MP aware. 1184 */ 1185 if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { 1186 if (have_giant == 0) { 1187 mtx_enter(&Giant, MTX_DEF); 1188 have_giant = 1; 1189 } 1190 frame.tf_eflags &= ~PSL_T; 1191 trapsignal(p, SIGTRAP, 0); 1192 } 1193 1194 /* 1195 * Handle reschedule and other end-of-syscall issues 1196 */ 1197 have_giant = userret(p, &frame, sticks, have_giant); 1198 1199 #ifdef KTRACE 1200 if (KTRPOINT(p, KTR_SYSRET)) { 1201 if (have_giant == 0) { 1202 mtx_enter(&Giant, MTX_DEF); 1203 have_giant = 1; 1204 } 1205 ktrsysret(p->p_tracep, code, error, p->p_retval[0]); 1206 } 1207 #endif 1208 1209 /* 1210 * This works because errno is findable through the 1211 * register set. If we ever support an emulation where this 1212 * is not the case, this code will need to be revisited. 1213 */ 1214 STOPEVENT(p, S_SCX, code); 1215 1216 /* 1217 * Release the MP lock if we had to get it 1218 */ 1219 if (have_giant) 1220 mtx_exit(&Giant, MTX_DEF); 1221 1222 mtx_assert(&sched_lock, MA_NOTOWNED); 1223 mtx_assert(&Giant, MA_NOTOWNED); 1224 } 1225 1226 void 1227 ast(frame) 1228 struct trapframe frame; 1229 { 1230 struct proc *p = CURPROC; 1231 u_quad_t sticks; 1232 1233 /* 1234 * handle atomicy by looping since interrupts are enabled and the 1235 * MP lock is not held. 1236 */ 1237 sticks = ((volatile struct proc *)p)->p_sticks; 1238 while (sticks != ((volatile struct proc *)p)->p_sticks) 1239 sticks = ((volatile struct proc *)p)->p_sticks; 1240 1241 astoff(); 1242 atomic_add_int(&cnt.v_soft, 1); 1243 if (p->p_flag & P_OWEUPC) { 1244 mtx_enter(&Giant, MTX_DEF); 1245 p->p_flag &= ~P_OWEUPC; 1246 addupc_task(p, p->p_stats->p_prof.pr_addr, 1247 p->p_stats->p_prof.pr_ticks); 1248 } 1249 if (p->p_flag & P_ALRMPEND) { 1250 if (!mtx_owned(&Giant)) 1251 mtx_enter(&Giant, MTX_DEF); 1252 p->p_flag &= ~P_ALRMPEND; 1253 psignal(p, SIGVTALRM); 1254 } 1255 if (p->p_flag & P_PROFPEND) { 1256 if (!mtx_owned(&Giant)) 1257 mtx_enter(&Giant, MTX_DEF); 1258 p->p_flag &= ~P_PROFPEND; 1259 psignal(p, SIGPROF); 1260 } 1261 if (userret(p, &frame, sticks, mtx_owned(&Giant)) != 0) 1262 mtx_exit(&Giant, MTX_DEF); 1263 } 1264 1265 /* 1266 * Simplified back end of syscall(), used when returning from fork() 1267 * directly into user mode. Giant is not held on entry, and must not 1268 * be held on return. 1269 */ 1270 void 1271 fork_return(p, frame) 1272 struct proc *p; 1273 struct trapframe frame; 1274 { 1275 int have_giant; 1276 1277 frame.tf_eax = 0; /* Child returns zero */ 1278 frame.tf_eflags &= ~PSL_C; /* success */ 1279 frame.tf_edx = 1; 1280 1281 have_giant = userret(p, &frame, 0, mtx_owned(&Giant)); 1282 #ifdef KTRACE 1283 if (KTRPOINT(p, KTR_SYSRET)) { 1284 if (have_giant == 0) { 1285 mtx_enter(&Giant, MTX_DEF); 1286 have_giant = 1; 1287 } 1288 ktrsysret(p->p_tracep, SYS_fork, 0, 0); 1289 } 1290 #endif 1291 if (have_giant) 1292 mtx_exit(&Giant, MTX_DEF); 1293 } 1294