1 /*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 * $FreeBSD$ 39 */ 40 41 /* 42 * 386 Trap and System call handling 43 */ 44 45 #include "opt_clock.h" 46 #include "opt_cpu.h" 47 #include "opt_ddb.h" 48 #include "opt_ktrace.h" 49 #include "opt_npx.h" 50 #include "opt_trap.h" 51 52 #include <sys/param.h> 53 #include <sys/bus.h> 54 #include <sys/systm.h> 55 #include <sys/proc.h> 56 #include <sys/pioctl.h> 57 #include <sys/ipl.h> 58 #include <sys/kernel.h> 59 #include <sys/ktr.h> 60 #include <sys/mutex.h> 61 #include <sys/resourcevar.h> 62 #include <sys/signalvar.h> 63 #include <sys/syscall.h> 64 #include <sys/sysctl.h> 65 #include <sys/sysent.h> 66 #include <sys/uio.h> 67 #include <sys/vmmeter.h> 68 #ifdef KTRACE 69 #include <sys/ktrace.h> 70 #endif 71 72 #include <vm/vm.h> 73 #include <vm/vm_param.h> 74 #include <sys/lock.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_kern.h> 77 #include <vm/vm_map.h> 78 #include <vm/vm_page.h> 79 #include <vm/vm_extern.h> 80 81 #include <machine/cpu.h> 82 #include <machine/md_var.h> 83 #include <machine/pcb.h> 84 #ifdef SMP 85 #include <machine/smp.h> 86 #endif 87 #include <machine/tss.h> 88 89 #include <i386/isa/icu.h> 90 #include <i386/isa/intr_machdep.h> 91 92 #ifdef POWERFAIL_NMI 93 #include <sys/syslog.h> 94 #include <machine/clock.h> 95 #endif 96 97 #include <machine/vm86.h> 98 99 #include <ddb/ddb.h> 100 101 #include "isa.h" 102 103 #include <sys/sysctl.h> 104 105 int (*pmath_emulate) __P((struct trapframe *)); 106 107 extern void trap __P((struct trapframe frame)); 108 extern int trapwrite __P((unsigned addr)); 109 extern void syscall2 __P((struct trapframe frame)); 110 extern void ast __P((struct trapframe frame)); 111 112 static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); 113 static void trap_fatal __P((struct trapframe *, vm_offset_t)); 114 void dblfault_handler __P((void)); 115 116 extern inthand_t IDTVEC(syscall); 117 118 #define MAX_TRAP_MSG 28 119 static char *trap_msg[] = { 120 "", /* 0 unused */ 121 "privileged instruction fault", /* 1 T_PRIVINFLT */ 122 "", /* 2 unused */ 123 "breakpoint instruction fault", /* 3 T_BPTFLT */ 124 "", /* 4 unused */ 125 "", /* 5 unused */ 126 "arithmetic trap", /* 6 T_ARITHTRAP */ 127 "system forced exception", /* 7 T_ASTFLT */ 128 "", /* 8 unused */ 129 "general protection fault", /* 9 T_PROTFLT */ 130 "trace trap", /* 10 T_TRCTRAP */ 131 "", /* 11 unused */ 132 "page fault", /* 12 T_PAGEFLT */ 133 "", /* 13 unused */ 134 "alignment fault", /* 14 T_ALIGNFLT */ 135 "", /* 15 unused */ 136 "", /* 16 unused */ 137 "", /* 17 unused */ 138 "integer divide fault", /* 18 T_DIVIDE */ 139 "non-maskable interrupt trap", /* 19 T_NMI */ 140 "overflow trap", /* 20 T_OFLOW */ 141 "FPU bounds check fault", /* 21 T_BOUND */ 142 "FPU device not available", /* 22 T_DNA */ 143 "double fault", /* 23 T_DOUBLEFLT */ 144 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 145 "invalid TSS fault", /* 25 T_TSSFLT */ 146 "segment not present fault", /* 26 T_SEGNPFLT */ 147 "stack fault", /* 27 T_STKFLT */ 148 "machine check trap", /* 28 T_MCHK */ 149 }; 150 151 static __inline int userret __P((struct proc *p, struct trapframe *frame, 152 u_quad_t oticks, int have_giant)); 153 154 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 155 extern int has_f00f_bug; 156 #endif 157 158 #ifdef DDB 159 static int ddb_on_nmi = 1; 160 SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 161 &ddb_on_nmi, 0, "Go to DDB on NMI"); 162 #endif 163 static int panic_on_nmi = 1; 164 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 165 &panic_on_nmi, 0, "Panic on NMI"); 166 167 #ifdef WITNESS 168 extern char *syscallnames[]; 169 #endif 170 171 static __inline int 172 userret(p, frame, oticks, have_giant) 173 struct proc *p; 174 struct trapframe *frame; 175 u_quad_t oticks; 176 int have_giant; 177 { 178 int sig, s; 179 180 while ((sig = CURSIG(p)) != 0) { 181 if (have_giant == 0) { 182 mtx_enter(&Giant, MTX_DEF); 183 have_giant = 1; 184 } 185 postsig(sig); 186 } 187 188 p->p_priority = p->p_usrpri; 189 if (resched_wanted()) { 190 /* 191 * Since we are curproc, clock will normally just change 192 * our priority without moving us from one queue to another 193 * (since the running process is not on a queue.) 194 * If that happened after we setrunqueue ourselves but before we 195 * mi_switch()'ed, we might not be on the queue indicated by 196 * our priority. 197 */ 198 s = splhigh(); 199 mtx_enter(&sched_lock, MTX_SPIN); 200 DROP_GIANT_NOSWITCH(); 201 setrunqueue(p); 202 p->p_stats->p_ru.ru_nivcsw++; 203 mi_switch(); 204 mtx_exit(&sched_lock, MTX_SPIN); 205 PICKUP_GIANT(); 206 splx(s); 207 while ((sig = CURSIG(p)) != 0) { 208 if (have_giant == 0) { 209 mtx_enter(&Giant, MTX_DEF); 210 have_giant = 1; 211 } 212 postsig(sig); 213 } 214 } 215 /* 216 * Charge system time if profiling. 217 */ 218 if (p->p_flag & P_PROFIL) { 219 if (have_giant == 0) { 220 mtx_enter(&Giant, MTX_DEF); 221 have_giant = 1; 222 } 223 addupc_task(p, frame->tf_eip, 224 (u_int)(p->p_sticks - oticks) * psratio); 225 } 226 curpriority = p->p_priority; 227 return(have_giant); 228 } 229 230 /* 231 * Exception, fault, and trap interface to the FreeBSD kernel. 232 * This common code is called from assembly language IDT gate entry 233 * routines that prepare a suitable stack frame, and restore this 234 * frame after the exception has been processed. 235 */ 236 237 void 238 trap(frame) 239 struct trapframe frame; 240 { 241 struct proc *p = curproc; 242 u_quad_t sticks = 0; 243 int i = 0, ucode = 0, type, code; 244 vm_offset_t eva; 245 #ifdef POWERFAIL_NMI 246 static int lastalert = 0; 247 #endif 248 249 atomic_add_int(&cnt.v_trap, 1); 250 251 if ((frame.tf_eflags & PSL_I) == 0) { 252 /* 253 * Buggy application or kernel code has disabled 254 * interrupts and then trapped. Enabling interrupts 255 * now is wrong, but it is better than running with 256 * interrupts disabled until they are accidentally 257 * enabled later. XXX Consider whether is this still 258 * correct. 259 */ 260 type = frame.tf_trapno; 261 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM)) 262 printf( 263 "pid %ld (%s): trap %d with interrupts disabled\n", 264 (long)curproc->p_pid, curproc->p_comm, type); 265 else if (type != T_BPTFLT && type != T_TRCTRAP) 266 /* 267 * XXX not quite right, since this may be for a 268 * multiple fault in user mode. 269 */ 270 printf("kernel trap %d with interrupts disabled\n", 271 type); 272 enable_intr(); 273 } 274 275 eva = 0; 276 if (frame.tf_trapno == T_PAGEFLT) { 277 /* 278 * For some Cyrix CPUs, %cr2 is clobbered by 279 * interrupts. This problem is worked around by using 280 * an interrupt gate for the pagefault handler. We 281 * are finally ready to read %cr2 and then must 282 * reenable interrupts. 283 */ 284 eva = rcr2(); 285 enable_intr(); 286 } 287 288 mtx_enter(&Giant, MTX_DEF); 289 290 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 291 restart: 292 #endif 293 294 type = frame.tf_trapno; 295 code = frame.tf_err; 296 297 if ((ISPL(frame.tf_cs) == SEL_UPL) || 298 ((frame.tf_eflags & PSL_VM) && !in_vm86call)) { 299 /* user trap */ 300 301 sticks = p->p_sticks; 302 p->p_md.md_regs = &frame; 303 304 switch (type) { 305 case T_PRIVINFLT: /* privileged instruction fault */ 306 ucode = type; 307 i = SIGILL; 308 break; 309 310 case T_BPTFLT: /* bpt instruction fault */ 311 case T_TRCTRAP: /* trace trap */ 312 frame.tf_eflags &= ~PSL_T; 313 i = SIGTRAP; 314 break; 315 316 case T_ARITHTRAP: /* arithmetic trap */ 317 ucode = code; 318 i = SIGFPE; 319 break; 320 321 /* 322 * The following two traps can happen in 323 * vm86 mode, and, if so, we want to handle 324 * them specially. 325 */ 326 case T_PROTFLT: /* general protection fault */ 327 case T_STKFLT: /* stack fault */ 328 if (frame.tf_eflags & PSL_VM) { 329 i = vm86_emulate((struct vm86frame *)&frame); 330 if (i == 0) 331 goto user; 332 break; 333 } 334 /* FALL THROUGH */ 335 336 case T_SEGNPFLT: /* segment not present fault */ 337 case T_TSSFLT: /* invalid TSS fault */ 338 case T_DOUBLEFLT: /* double fault */ 339 default: 340 ucode = code + BUS_SEGM_FAULT ; 341 i = SIGBUS; 342 break; 343 344 case T_PAGEFLT: /* page fault */ 345 i = trap_pfault(&frame, TRUE, eva); 346 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 347 if (i == -2) { 348 /* 349 * f00f hack workaround has triggered, treat 350 * as illegal instruction not page fault. 351 */ 352 frame.tf_trapno = T_PRIVINFLT; 353 goto restart; 354 } 355 #endif 356 if (i == -1) 357 goto out; 358 if (i == 0) 359 goto user; 360 361 ucode = T_PAGEFLT; 362 break; 363 364 case T_DIVIDE: /* integer divide fault */ 365 ucode = FPE_INTDIV; 366 i = SIGFPE; 367 break; 368 369 #if NISA > 0 370 case T_NMI: 371 #ifdef POWERFAIL_NMI 372 #ifndef TIMER_FREQ 373 # define TIMER_FREQ 1193182 374 #endif 375 if (time_second - lastalert > 10) { 376 log(LOG_WARNING, "NMI: power fail\n"); 377 sysbeep(TIMER_FREQ/880, hz); 378 lastalert = time_second; 379 } 380 goto out; 381 #else /* !POWERFAIL_NMI */ 382 /* machine/parity/power fail/"kitchen sink" faults */ 383 if (isa_nmi(code) == 0) { 384 #ifdef DDB 385 /* 386 * NMI can be hooked up to a pushbutton 387 * for debugging. 388 */ 389 if (ddb_on_nmi) { 390 printf ("NMI ... going to debugger\n"); 391 kdb_trap (type, 0, &frame); 392 } 393 #endif /* DDB */ 394 goto out; 395 } else if (panic_on_nmi) 396 panic("NMI indicates hardware failure"); 397 break; 398 #endif /* POWERFAIL_NMI */ 399 #endif /* NISA > 0 */ 400 401 case T_OFLOW: /* integer overflow fault */ 402 ucode = FPE_INTOVF; 403 i = SIGFPE; 404 break; 405 406 case T_BOUND: /* bounds check fault */ 407 ucode = FPE_FLTSUB; 408 i = SIGFPE; 409 break; 410 411 case T_DNA: 412 #ifdef DEV_NPX 413 /* transparent fault (due to context switch "late") */ 414 if (npxdna()) 415 goto out; 416 #endif 417 if (!pmath_emulate) { 418 i = SIGFPE; 419 ucode = FPE_FPU_NP_TRAP; 420 break; 421 } 422 i = (*pmath_emulate)(&frame); 423 if (i == 0) { 424 if (!(frame.tf_eflags & PSL_T)) 425 goto out; 426 frame.tf_eflags &= ~PSL_T; 427 i = SIGTRAP; 428 } 429 /* else ucode = emulator_only_knows() XXX */ 430 break; 431 432 case T_FPOPFLT: /* FPU operand fetch fault */ 433 ucode = T_FPOPFLT; 434 i = SIGILL; 435 break; 436 } 437 } else { 438 /* kernel trap */ 439 440 switch (type) { 441 case T_PAGEFLT: /* page fault */ 442 (void) trap_pfault(&frame, FALSE, eva); 443 goto out; 444 445 case T_DNA: 446 #ifdef DEV_NPX 447 /* 448 * The kernel is apparently using npx for copying. 449 * XXX this should be fatal unless the kernel has 450 * registered such use. 451 */ 452 if (npxdna()) 453 goto out; 454 #endif 455 break; 456 457 /* 458 * The following two traps can happen in 459 * vm86 mode, and, if so, we want to handle 460 * them specially. 461 */ 462 case T_PROTFLT: /* general protection fault */ 463 case T_STKFLT: /* stack fault */ 464 if (frame.tf_eflags & PSL_VM) { 465 i = vm86_emulate((struct vm86frame *)&frame); 466 if (i != 0) 467 /* 468 * returns to original process 469 */ 470 mtx_exit(&Giant, MTX_DEF); 471 vm86_trap((struct vm86frame *)&frame); 472 goto out; 473 } 474 if (type == T_STKFLT) 475 break; 476 477 /* FALL THROUGH */ 478 479 case T_SEGNPFLT: /* segment not present fault */ 480 if (in_vm86call) 481 break; 482 483 if (PCPU_GET(intr_nesting_level) != 0) 484 break; 485 486 /* 487 * Invalid %fs's and %gs's can be created using 488 * procfs or PT_SETREGS or by invalidating the 489 * underlying LDT entry. This causes a fault 490 * in kernel mode when the kernel attempts to 491 * switch contexts. Lose the bad context 492 * (XXX) so that we can continue, and generate 493 * a signal. 494 */ 495 if (frame.tf_eip == (int)cpu_switch_load_gs) { 496 PCPU_GET(curpcb)->pcb_gs = 0; 497 psignal(p, SIGBUS); 498 goto out; 499 } 500 501 /* 502 * Invalid segment selectors and out of bounds 503 * %eip's and %esp's can be set up in user mode. 504 * This causes a fault in kernel mode when the 505 * kernel tries to return to user mode. We want 506 * to get this fault so that we can fix the 507 * problem here and not have to check all the 508 * selectors and pointers when the user changes 509 * them. 510 */ 511 if (frame.tf_eip == (int)doreti_iret) { 512 frame.tf_eip = (int)doreti_iret_fault; 513 goto out; 514 } 515 if (frame.tf_eip == (int)doreti_popl_ds) { 516 frame.tf_eip = (int)doreti_popl_ds_fault; 517 goto out; 518 } 519 if (frame.tf_eip == (int)doreti_popl_es) { 520 frame.tf_eip = (int)doreti_popl_es_fault; 521 goto out; 522 } 523 if (frame.tf_eip == (int)doreti_popl_fs) { 524 frame.tf_eip = (int)doreti_popl_fs_fault; 525 goto out; 526 } 527 if (PCPU_GET(curpcb) != NULL && 528 PCPU_GET(curpcb)->pcb_onfault != NULL) { 529 frame.tf_eip = 530 (int)PCPU_GET(curpcb)->pcb_onfault; 531 goto out; 532 } 533 break; 534 535 case T_TSSFLT: 536 /* 537 * PSL_NT can be set in user mode and isn't cleared 538 * automatically when the kernel is entered. This 539 * causes a TSS fault when the kernel attempts to 540 * `iret' because the TSS link is uninitialized. We 541 * want to get this fault so that we can fix the 542 * problem here and not every time the kernel is 543 * entered. 544 */ 545 if (frame.tf_eflags & PSL_NT) { 546 frame.tf_eflags &= ~PSL_NT; 547 goto out; 548 } 549 break; 550 551 case T_TRCTRAP: /* trace trap */ 552 if (frame.tf_eip == (int)IDTVEC(syscall)) { 553 /* 554 * We've just entered system mode via the 555 * syscall lcall. Continue single stepping 556 * silently until the syscall handler has 557 * saved the flags. 558 */ 559 goto out; 560 } 561 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 562 /* 563 * The syscall handler has now saved the 564 * flags. Stop single stepping it. 565 */ 566 frame.tf_eflags &= ~PSL_T; 567 goto out; 568 } 569 /* 570 * Ignore debug register trace traps due to 571 * accesses in the user's address space, which 572 * can happen under several conditions such as 573 * if a user sets a watchpoint on a buffer and 574 * then passes that buffer to a system call. 575 * We still want to get TRCTRAPS for addresses 576 * in kernel space because that is useful when 577 * debugging the kernel. 578 */ 579 if (user_dbreg_trap() && !in_vm86call) { 580 /* 581 * Reset breakpoint bits because the 582 * processor doesn't 583 */ 584 load_dr6(rdr6() & 0xfffffff0); 585 goto out; 586 } 587 /* 588 * Fall through (TRCTRAP kernel mode, kernel address) 589 */ 590 case T_BPTFLT: 591 /* 592 * If DDB is enabled, let it handle the debugger trap. 593 * Otherwise, debugger traps "can't happen". 594 */ 595 #ifdef DDB 596 if (kdb_trap (type, 0, &frame)) 597 goto out; 598 #endif 599 break; 600 601 #if NISA > 0 602 case T_NMI: 603 #ifdef POWERFAIL_NMI 604 if (time_second - lastalert > 10) { 605 log(LOG_WARNING, "NMI: power fail\n"); 606 sysbeep(TIMER_FREQ/880, hz); 607 lastalert = time_second; 608 } 609 goto out; 610 #else /* !POWERFAIL_NMI */ 611 /* machine/parity/power fail/"kitchen sink" faults */ 612 if (isa_nmi(code) == 0) { 613 #ifdef DDB 614 /* 615 * NMI can be hooked up to a pushbutton 616 * for debugging. 617 */ 618 if (ddb_on_nmi) { 619 printf ("NMI ... going to debugger\n"); 620 kdb_trap (type, 0, &frame); 621 } 622 #endif /* DDB */ 623 goto out; 624 } else if (panic_on_nmi == 0) 625 goto out; 626 /* FALL THROUGH */ 627 #endif /* POWERFAIL_NMI */ 628 #endif /* NISA > 0 */ 629 } 630 631 trap_fatal(&frame, eva); 632 goto out; 633 } 634 635 /* Translate fault for emulators (e.g. Linux) */ 636 if (*p->p_sysent->sv_transtrap) 637 i = (*p->p_sysent->sv_transtrap)(i, type); 638 639 trapsignal(p, i, ucode); 640 641 #ifdef DEBUG 642 if (type <= MAX_TRAP_MSG) { 643 uprintf("fatal process exception: %s", 644 trap_msg[type]); 645 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 646 uprintf(", fault VA = 0x%lx", (u_long)eva); 647 uprintf("\n"); 648 } 649 #endif 650 651 user: 652 userret(p, &frame, sticks, 1); 653 out: 654 mtx_exit(&Giant, MTX_DEF); 655 } 656 657 #ifdef notyet 658 /* 659 * This version doesn't allow a page fault to user space while 660 * in the kernel. The rest of the kernel needs to be made "safe" 661 * before this can be used. I think the only things remaining 662 * to be made safe are the iBCS2 code and the process tracing/ 663 * debugging code. 664 */ 665 static int 666 trap_pfault(frame, usermode, eva) 667 struct trapframe *frame; 668 int usermode; 669 vm_offset_t eva; 670 { 671 vm_offset_t va; 672 struct vmspace *vm = NULL; 673 vm_map_t map = 0; 674 int rv = 0; 675 vm_prot_t ftype; 676 struct proc *p = curproc; 677 678 if (frame->tf_err & PGEX_W) 679 ftype = VM_PROT_WRITE; 680 else 681 ftype = VM_PROT_READ; 682 683 va = trunc_page(eva); 684 if (va < VM_MIN_KERNEL_ADDRESS) { 685 vm_offset_t v; 686 vm_page_t mpte; 687 688 if (p == NULL || 689 (!usermode && va < VM_MAXUSER_ADDRESS && 690 (PCPU_GET(intr_nesting_level) != 0 || 691 PCPU_GET(curpcb) == NULL || 692 PCPU_GET(curpcb)->pcb_onfault == NULL))) { 693 trap_fatal(frame, eva); 694 return (-1); 695 } 696 697 /* 698 * This is a fault on non-kernel virtual memory. 699 * vm is initialized above to NULL. If curproc is NULL 700 * or curproc->p_vmspace is NULL the fault is fatal. 701 */ 702 vm = p->p_vmspace; 703 if (vm == NULL) 704 goto nogo; 705 706 map = &vm->vm_map; 707 708 /* 709 * Keep swapout from messing with us during this 710 * critical time. 711 */ 712 ++p->p_lock; 713 714 /* 715 * Grow the stack if necessary 716 */ 717 /* grow_stack returns false only if va falls into 718 * a growable stack region and the stack growth 719 * fails. It returns true if va was not within 720 * a growable stack region, or if the stack 721 * growth succeeded. 722 */ 723 if (!grow_stack (p, va)) { 724 rv = KERN_FAILURE; 725 --p->p_lock; 726 goto nogo; 727 } 728 729 /* Fault in the user page: */ 730 rv = vm_fault(map, va, ftype, 731 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 732 : VM_FAULT_NORMAL); 733 734 --p->p_lock; 735 } else { 736 /* 737 * Don't allow user-mode faults in kernel address space. 738 */ 739 if (usermode) 740 goto nogo; 741 742 /* 743 * Since we know that kernel virtual address addresses 744 * always have pte pages mapped, we just have to fault 745 * the page. 746 */ 747 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); 748 } 749 750 if (rv == KERN_SUCCESS) 751 return (0); 752 nogo: 753 if (!usermode) { 754 if (PCPU_GET(intr_nesting_level) == 0 && 755 PCPU_GET(curpcb) != NULL && 756 PCPU_GET(curpcb)->pcb_onfault != NULL) { 757 frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault; 758 return (0); 759 } 760 trap_fatal(frame, eva); 761 return (-1); 762 } 763 764 /* kludge to pass faulting virtual address to sendsig */ 765 frame->tf_err = eva; 766 767 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 768 } 769 #endif 770 771 int 772 trap_pfault(frame, usermode, eva) 773 struct trapframe *frame; 774 int usermode; 775 vm_offset_t eva; 776 { 777 vm_offset_t va; 778 struct vmspace *vm = NULL; 779 vm_map_t map = 0; 780 int rv = 0; 781 vm_prot_t ftype; 782 struct proc *p = curproc; 783 784 va = trunc_page(eva); 785 if (va >= KERNBASE) { 786 /* 787 * Don't allow user-mode faults in kernel address space. 788 * An exception: if the faulting address is the invalid 789 * instruction entry in the IDT, then the Intel Pentium 790 * F00F bug workaround was triggered, and we need to 791 * treat it is as an illegal instruction, and not a page 792 * fault. 793 */ 794 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 795 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) 796 return -2; 797 #endif 798 if (usermode) 799 goto nogo; 800 801 map = kernel_map; 802 } else { 803 /* 804 * This is a fault on non-kernel virtual memory. 805 * vm is initialized above to NULL. If curproc is NULL 806 * or curproc->p_vmspace is NULL the fault is fatal. 807 */ 808 if (p != NULL) 809 vm = p->p_vmspace; 810 811 if (vm == NULL) 812 goto nogo; 813 814 map = &vm->vm_map; 815 } 816 817 if (frame->tf_err & PGEX_W) 818 ftype = VM_PROT_WRITE; 819 else 820 ftype = VM_PROT_READ; 821 822 if (map != kernel_map) { 823 /* 824 * Keep swapout from messing with us during this 825 * critical time. 826 */ 827 ++p->p_lock; 828 829 /* 830 * Grow the stack if necessary 831 */ 832 /* grow_stack returns false only if va falls into 833 * a growable stack region and the stack growth 834 * fails. It returns true if va was not within 835 * a growable stack region, or if the stack 836 * growth succeeded. 837 */ 838 if (!grow_stack (p, va)) { 839 rv = KERN_FAILURE; 840 --p->p_lock; 841 goto nogo; 842 } 843 844 /* Fault in the user page: */ 845 rv = vm_fault(map, va, ftype, 846 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 847 : VM_FAULT_NORMAL); 848 849 --p->p_lock; 850 } else { 851 /* 852 * Don't have to worry about process locking or stacks in the kernel. 853 */ 854 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 855 } 856 857 if (rv == KERN_SUCCESS) 858 return (0); 859 nogo: 860 if (!usermode) { 861 if (PCPU_GET(intr_nesting_level) == 0 && 862 PCPU_GET(curpcb) != NULL && 863 PCPU_GET(curpcb)->pcb_onfault != NULL) { 864 frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault; 865 return (0); 866 } 867 trap_fatal(frame, eva); 868 return (-1); 869 } 870 871 /* kludge to pass faulting virtual address to sendsig */ 872 frame->tf_err = eva; 873 874 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 875 } 876 877 static void 878 trap_fatal(frame, eva) 879 struct trapframe *frame; 880 vm_offset_t eva; 881 { 882 int code, type, ss, esp; 883 struct soft_segment_descriptor softseg; 884 885 code = frame->tf_err; 886 type = frame->tf_trapno; 887 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 888 889 if (type <= MAX_TRAP_MSG) 890 printf("\n\nFatal trap %d: %s while in %s mode\n", 891 type, trap_msg[type], 892 frame->tf_eflags & PSL_VM ? "vm86" : 893 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 894 #ifdef SMP 895 /* two seperate prints in case of a trap on an unmapped page */ 896 printf("cpuid = %d; ", PCPU_GET(cpuid)); 897 printf("lapic.id = %08x\n", lapic.id); 898 #endif 899 if (type == T_PAGEFLT) { 900 printf("fault virtual address = 0x%x\n", eva); 901 printf("fault code = %s %s, %s\n", 902 code & PGEX_U ? "user" : "supervisor", 903 code & PGEX_W ? "write" : "read", 904 code & PGEX_P ? "protection violation" : "page not present"); 905 } 906 printf("instruction pointer = 0x%x:0x%x\n", 907 frame->tf_cs & 0xffff, frame->tf_eip); 908 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { 909 ss = frame->tf_ss & 0xffff; 910 esp = frame->tf_esp; 911 } else { 912 ss = GSEL(GDATA_SEL, SEL_KPL); 913 esp = (int)&frame->tf_esp; 914 } 915 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 916 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 917 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 918 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 919 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 920 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 921 softseg.ssd_gran); 922 printf("processor eflags = "); 923 if (frame->tf_eflags & PSL_T) 924 printf("trace trap, "); 925 if (frame->tf_eflags & PSL_I) 926 printf("interrupt enabled, "); 927 if (frame->tf_eflags & PSL_NT) 928 printf("nested task, "); 929 if (frame->tf_eflags & PSL_RF) 930 printf("resume, "); 931 if (frame->tf_eflags & PSL_VM) 932 printf("vm86, "); 933 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 934 printf("current process = "); 935 if (curproc) { 936 printf("%lu (%s)\n", 937 (u_long)curproc->p_pid, curproc->p_comm ? 938 curproc->p_comm : ""); 939 } else { 940 printf("Idle\n"); 941 } 942 943 #ifdef KDB 944 if (kdb_trap(&psl)) 945 return; 946 #endif 947 #ifdef DDB 948 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame)) 949 return; 950 #endif 951 printf("trap number = %d\n", type); 952 if (type <= MAX_TRAP_MSG) 953 panic(trap_msg[type]); 954 else 955 panic("unknown/reserved trap"); 956 } 957 958 /* 959 * Double fault handler. Called when a fault occurs while writing 960 * a frame for a trap/exception onto the stack. This usually occurs 961 * when the stack overflows (such is the case with infinite recursion, 962 * for example). 963 * 964 * XXX Note that the current PTD gets replaced by IdlePTD when the 965 * task switch occurs. This means that the stack that was active at 966 * the time of the double fault is not available at <kstack> unless 967 * the machine was idle when the double fault occurred. The downside 968 * of this is that "trace <ebp>" in ddb won't work. 969 */ 970 void 971 dblfault_handler() 972 { 973 printf("\nFatal double fault:\n"); 974 printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip)); 975 printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp)); 976 printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp)); 977 #ifdef SMP 978 /* two seperate prints in case of a trap on an unmapped page */ 979 printf("cpuid = %d; ", PCPU_GET(cpuid)); 980 printf("lapic.id = %08x\n", lapic.id); 981 #endif 982 panic("double fault"); 983 } 984 985 /* 986 * Compensate for 386 brain damage (missing URKR). 987 * This is a little simpler than the pagefault handler in trap() because 988 * it the page tables have already been faulted in and high addresses 989 * are thrown out early for other reasons. 990 */ 991 int trapwrite(addr) 992 unsigned addr; 993 { 994 struct proc *p; 995 vm_offset_t va; 996 struct vmspace *vm; 997 int rv; 998 999 va = trunc_page((vm_offset_t)addr); 1000 /* 1001 * XXX - MAX is END. Changed > to >= for temp. fix. 1002 */ 1003 if (va >= VM_MAXUSER_ADDRESS) 1004 return (1); 1005 1006 p = curproc; 1007 vm = p->p_vmspace; 1008 1009 ++p->p_lock; 1010 1011 if (!grow_stack (p, va)) { 1012 --p->p_lock; 1013 return (1); 1014 } 1015 1016 /* 1017 * fault the data page 1018 */ 1019 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY); 1020 1021 --p->p_lock; 1022 1023 if (rv != KERN_SUCCESS) 1024 return 1; 1025 1026 return (0); 1027 } 1028 1029 /* 1030 * syscall2 - MP aware system call request C handler 1031 * 1032 * A system call is essentially treated as a trap except that the 1033 * MP lock is not held on entry or return. We are responsible for 1034 * obtaining the MP lock if necessary and for handling ASTs 1035 * (e.g. a task switch) prior to return. 1036 * 1037 * In general, only simple access and manipulation of curproc and 1038 * the current stack is allowed without having to hold MP lock. 1039 */ 1040 void 1041 syscall2(frame) 1042 struct trapframe frame; 1043 { 1044 caddr_t params; 1045 int i; 1046 struct sysent *callp; 1047 struct proc *p = curproc; 1048 u_quad_t sticks; 1049 int error; 1050 int narg; 1051 int args[8]; 1052 int have_giant = 0; 1053 u_int code; 1054 1055 atomic_add_int(&cnt.v_syscall, 1); 1056 1057 #ifdef DIAGNOSTIC 1058 if (ISPL(frame.tf_cs) != SEL_UPL) { 1059 mtx_enter(&Giant, MTX_DEF); 1060 panic("syscall"); 1061 /* NOT REACHED */ 1062 } 1063 #endif 1064 1065 /* 1066 * handle atomicy by looping since interrupts are enabled and the 1067 * MP lock is not held. 1068 */ 1069 sticks = ((volatile struct proc *)p)->p_sticks; 1070 while (sticks != ((volatile struct proc *)p)->p_sticks) 1071 sticks = ((volatile struct proc *)p)->p_sticks; 1072 1073 p->p_md.md_regs = &frame; 1074 params = (caddr_t)frame.tf_esp + sizeof(int); 1075 code = frame.tf_eax; 1076 1077 if (p->p_sysent->sv_prepsyscall) { 1078 /* 1079 * The prep code is not MP aware. 1080 */ 1081 mtx_enter(&Giant, MTX_DEF); 1082 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 1083 mtx_exit(&Giant, MTX_DEF); 1084 } else { 1085 /* 1086 * Need to check if this is a 32 bit or 64 bit syscall. 1087 * fuword is MP aware. 1088 */ 1089 if (code == SYS_syscall) { 1090 /* 1091 * Code is first argument, followed by actual args. 1092 */ 1093 code = fuword(params); 1094 params += sizeof(int); 1095 } else if (code == SYS___syscall) { 1096 /* 1097 * Like syscall, but code is a quad, so as to maintain 1098 * quad alignment for the rest of the arguments. 1099 */ 1100 code = fuword(params); 1101 params += sizeof(quad_t); 1102 } 1103 } 1104 1105 if (p->p_sysent->sv_mask) 1106 code &= p->p_sysent->sv_mask; 1107 1108 if (code >= p->p_sysent->sv_size) 1109 callp = &p->p_sysent->sv_table[0]; 1110 else 1111 callp = &p->p_sysent->sv_table[code]; 1112 1113 narg = callp->sy_narg & SYF_ARGMASK; 1114 1115 /* 1116 * copyin is MP aware, but the tracing code is not 1117 */ 1118 if (params && (i = narg * sizeof(int)) && 1119 (error = copyin(params, (caddr_t)args, (u_int)i))) { 1120 mtx_enter(&Giant, MTX_DEF); 1121 have_giant = 1; 1122 #ifdef KTRACE 1123 if (KTRPOINT(p, KTR_SYSCALL)) 1124 ktrsyscall(p->p_tracep, code, narg, args); 1125 #endif 1126 goto bad; 1127 } 1128 1129 /* 1130 * Try to run the syscall without the MP lock if the syscall 1131 * is MP safe. We have to obtain the MP lock no matter what if 1132 * we are ktracing 1133 */ 1134 if ((callp->sy_narg & SYF_MPSAFE) == 0) { 1135 mtx_enter(&Giant, MTX_DEF); 1136 have_giant = 1; 1137 } 1138 1139 #ifdef KTRACE 1140 if (KTRPOINT(p, KTR_SYSCALL)) { 1141 if (have_giant == 0) { 1142 mtx_enter(&Giant, MTX_DEF); 1143 have_giant = 1; 1144 } 1145 ktrsyscall(p->p_tracep, code, narg, args); 1146 } 1147 #endif 1148 p->p_retval[0] = 0; 1149 p->p_retval[1] = frame.tf_edx; 1150 1151 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1152 1153 error = (*callp->sy_call)(p, args); 1154 1155 /* 1156 * MP SAFE (we may or may not have the MP lock at this point) 1157 */ 1158 switch (error) { 1159 case 0: 1160 /* 1161 * Reinitialize proc pointer `p' as it may be different 1162 * if this is a child returning from fork syscall. 1163 */ 1164 p = curproc; 1165 frame.tf_eax = p->p_retval[0]; 1166 frame.tf_edx = p->p_retval[1]; 1167 frame.tf_eflags &= ~PSL_C; 1168 break; 1169 1170 case ERESTART: 1171 /* 1172 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 1173 * int 0x80 is 2 bytes. We saved this in tf_err. 1174 */ 1175 frame.tf_eip -= frame.tf_err; 1176 break; 1177 1178 case EJUSTRETURN: 1179 break; 1180 1181 default: 1182 bad: 1183 if (p->p_sysent->sv_errsize) { 1184 if (error >= p->p_sysent->sv_errsize) 1185 error = -1; /* XXX */ 1186 else 1187 error = p->p_sysent->sv_errtbl[error]; 1188 } 1189 frame.tf_eax = error; 1190 frame.tf_eflags |= PSL_C; 1191 break; 1192 } 1193 1194 /* 1195 * Traced syscall. trapsignal() is not MP aware. 1196 */ 1197 if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { 1198 if (have_giant == 0) { 1199 mtx_enter(&Giant, MTX_DEF); 1200 have_giant = 1; 1201 } 1202 frame.tf_eflags &= ~PSL_T; 1203 trapsignal(p, SIGTRAP, 0); 1204 } 1205 1206 /* 1207 * Handle reschedule and other end-of-syscall issues 1208 */ 1209 have_giant = userret(p, &frame, sticks, have_giant); 1210 1211 #ifdef KTRACE 1212 if (KTRPOINT(p, KTR_SYSRET)) { 1213 if (have_giant == 0) { 1214 mtx_enter(&Giant, MTX_DEF); 1215 have_giant = 1; 1216 } 1217 ktrsysret(p->p_tracep, code, error, p->p_retval[0]); 1218 } 1219 #endif 1220 1221 /* 1222 * This works because errno is findable through the 1223 * register set. If we ever support an emulation where this 1224 * is not the case, this code will need to be revisited. 1225 */ 1226 STOPEVENT(p, S_SCX, code); 1227 1228 /* 1229 * Release the MP lock if we had to get it 1230 */ 1231 if (have_giant) 1232 mtx_exit(&Giant, MTX_DEF); 1233 1234 mtx_assert(&sched_lock, MA_NOTOWNED); 1235 mtx_assert(&Giant, MA_NOTOWNED); 1236 #ifdef WITNESS 1237 if (witness_list(p)) { 1238 panic("system call %s returning with mutex(s) held\n", 1239 syscallnames[code]); 1240 } 1241 #endif 1242 } 1243 1244 void 1245 ast(frame) 1246 struct trapframe frame; 1247 { 1248 struct proc *p = CURPROC; 1249 u_quad_t sticks; 1250 1251 /* 1252 * handle atomicy by looping since interrupts are enabled and the 1253 * MP lock is not held. 1254 */ 1255 sticks = ((volatile struct proc *)p)->p_sticks; 1256 while (sticks != ((volatile struct proc *)p)->p_sticks) 1257 sticks = ((volatile struct proc *)p)->p_sticks; 1258 1259 astoff(); 1260 atomic_add_int(&cnt.v_soft, 1); 1261 if (p->p_flag & P_OWEUPC) { 1262 mtx_enter(&Giant, MTX_DEF); 1263 p->p_flag &= ~P_OWEUPC; 1264 addupc_task(p, p->p_stats->p_prof.pr_addr, 1265 p->p_stats->p_prof.pr_ticks); 1266 } 1267 if (p->p_flag & P_ALRMPEND) { 1268 if (!mtx_owned(&Giant)) 1269 mtx_enter(&Giant, MTX_DEF); 1270 p->p_flag &= ~P_ALRMPEND; 1271 psignal(p, SIGVTALRM); 1272 } 1273 if (p->p_flag & P_PROFPEND) { 1274 if (!mtx_owned(&Giant)) 1275 mtx_enter(&Giant, MTX_DEF); 1276 p->p_flag &= ~P_PROFPEND; 1277 psignal(p, SIGPROF); 1278 } 1279 if (userret(p, &frame, sticks, mtx_owned(&Giant)) != 0) 1280 mtx_exit(&Giant, MTX_DEF); 1281 } 1282 1283 /* 1284 * Simplified back end of syscall(), used when returning from fork() 1285 * directly into user mode. Giant is not held on entry, and must not 1286 * be held on return. 1287 */ 1288 void 1289 fork_return(p, frame) 1290 struct proc *p; 1291 struct trapframe frame; 1292 { 1293 int have_giant; 1294 1295 frame.tf_eax = 0; /* Child returns zero */ 1296 frame.tf_eflags &= ~PSL_C; /* success */ 1297 frame.tf_edx = 1; 1298 1299 have_giant = userret(p, &frame, 0, mtx_owned(&Giant)); 1300 #ifdef KTRACE 1301 if (KTRPOINT(p, KTR_SYSRET)) { 1302 if (have_giant == 0) { 1303 mtx_enter(&Giant, MTX_DEF); 1304 have_giant = 1; 1305 } 1306 ktrsysret(p->p_tracep, SYS_fork, 0, 0); 1307 } 1308 #endif 1309 if (have_giant) 1310 mtx_exit(&Giant, MTX_DEF); 1311 } 1312