1 /*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 * $FreeBSD$ 39 */ 40 41 /* 42 * 386 Trap and System call handling 43 */ 44 45 #include "opt_cpu.h" 46 #include "opt_ddb.h" 47 #include "opt_ktrace.h" 48 #include "opt_clock.h" 49 #include "opt_trap.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/proc.h> 54 #include <sys/pioctl.h> 55 #include <sys/kernel.h> 56 #include <sys/resourcevar.h> 57 #include <sys/signalvar.h> 58 #include <sys/syscall.h> 59 #include <sys/sysent.h> 60 #include <sys/uio.h> 61 #include <sys/vmmeter.h> 62 #ifdef KTRACE 63 #include <sys/ktrace.h> 64 #endif 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <sys/lock.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_kern.h> 71 #include <vm/vm_map.h> 72 #include <vm/vm_page.h> 73 #include <vm/vm_extern.h> 74 75 #include <machine/cpu.h> 76 #include <machine/ipl.h> 77 #include <machine/md_var.h> 78 #include <machine/pcb.h> 79 #ifdef SMP 80 #include <machine/smp.h> 81 #endif 82 #include <machine/tss.h> 83 84 #include <i386/isa/intr_machdep.h> 85 86 #ifdef POWERFAIL_NMI 87 #include <sys/syslog.h> 88 #include <machine/clock.h> 89 #endif 90 91 #include <machine/vm86.h> 92 93 #ifdef DDB 94 extern int in_Debugger, debugger_on_panic; 95 #endif 96 97 #include "isa.h" 98 #include "npx.h" 99 100 int (*pmath_emulate) __P((struct trapframe *)); 101 102 extern void trap __P((struct trapframe frame)); 103 extern int trapwrite __P((unsigned addr)); 104 extern void syscall __P((struct trapframe frame)); 105 106 static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); 107 static void trap_fatal __P((struct trapframe *, vm_offset_t)); 108 void dblfault_handler __P((void)); 109 110 extern inthand_t IDTVEC(syscall); 111 112 #define MAX_TRAP_MSG 28 113 static char *trap_msg[] = { 114 "", /* 0 unused */ 115 "privileged instruction fault", /* 1 T_PRIVINFLT */ 116 "", /* 2 unused */ 117 "breakpoint instruction fault", /* 3 T_BPTFLT */ 118 "", /* 4 unused */ 119 "", /* 5 unused */ 120 "arithmetic trap", /* 6 T_ARITHTRAP */ 121 "system forced exception", /* 7 T_ASTFLT */ 122 "", /* 8 unused */ 123 "general protection fault", /* 9 T_PROTFLT */ 124 "trace trap", /* 10 T_TRCTRAP */ 125 "", /* 11 unused */ 126 "page fault", /* 12 T_PAGEFLT */ 127 "", /* 13 unused */ 128 "alignment fault", /* 14 T_ALIGNFLT */ 129 "", /* 15 unused */ 130 "", /* 16 unused */ 131 "", /* 17 unused */ 132 "integer divide fault", /* 18 T_DIVIDE */ 133 "non-maskable interrupt trap", /* 19 T_NMI */ 134 "overflow trap", /* 20 T_OFLOW */ 135 "FPU bounds check fault", /* 21 T_BOUND */ 136 "FPU device not available", /* 22 T_DNA */ 137 "double fault", /* 23 T_DOUBLEFLT */ 138 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 139 "invalid TSS fault", /* 25 T_TSSFLT */ 140 "segment not present fault", /* 26 T_SEGNPFLT */ 141 "stack fault", /* 27 T_STKFLT */ 142 "machine check trap", /* 28 T_MCHK */ 143 }; 144 145 static __inline void userret __P((struct proc *p, struct trapframe *frame, 146 u_quad_t oticks)); 147 148 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 149 extern int has_f00f_bug; 150 #endif 151 152 static __inline void 153 userret(p, frame, oticks) 154 struct proc *p; 155 struct trapframe *frame; 156 u_quad_t oticks; 157 { 158 int sig, s; 159 160 while ((sig = CURSIG(p)) != 0) 161 postsig(sig); 162 163 #if 0 164 if (!want_resched && 165 (p->p_priority <= p->p_usrpri) && 166 (p->p_rtprio.type == RTP_PRIO_NORMAL)) { 167 int newpriority; 168 p->p_estcpu += 1; 169 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice; 170 newpriority = min(newpriority, MAXPRI); 171 p->p_usrpri = newpriority; 172 } 173 #endif 174 175 p->p_priority = p->p_usrpri; 176 if (want_resched) { 177 /* 178 * Since we are curproc, clock will normally just change 179 * our priority without moving us from one queue to another 180 * (since the running process is not on a queue.) 181 * If that happened after we setrunqueue ourselves but before we 182 * mi_switch()'ed, we might not be on the queue indicated by 183 * our priority. 184 */ 185 s = splhigh(); 186 setrunqueue(p); 187 p->p_stats->p_ru.ru_nivcsw++; 188 mi_switch(); 189 splx(s); 190 while ((sig = CURSIG(p)) != 0) 191 postsig(sig); 192 } 193 /* 194 * Charge system time if profiling. 195 */ 196 if (p->p_flag & P_PROFIL) 197 addupc_task(p, frame->tf_eip, 198 (u_int)(p->p_sticks - oticks) * psratio); 199 200 curpriority = p->p_priority; 201 } 202 203 /* 204 * Exception, fault, and trap interface to the FreeBSD kernel. 205 * This common code is called from assembly language IDT gate entry 206 * routines that prepare a suitable stack frame, and restore this 207 * frame after the exception has been processed. 208 */ 209 210 void 211 trap(frame) 212 struct trapframe frame; 213 { 214 struct proc *p = curproc; 215 u_quad_t sticks = 0; 216 int i = 0, ucode = 0, type, code; 217 vm_offset_t eva; 218 219 if (!(frame.tf_eflags & PSL_I)) { 220 /* 221 * Buggy application or kernel code has disabled interrupts 222 * and then trapped. Enabling interrupts now is wrong, but 223 * it is better than running with interrupts disabled until 224 * they are accidentally enabled later. 225 */ 226 type = frame.tf_trapno; 227 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM)) 228 printf( 229 "pid %ld (%s): trap %d with interrupts disabled\n", 230 (long)curproc->p_pid, curproc->p_comm, type); 231 else if (type != T_BPTFLT && type != T_TRCTRAP) 232 /* 233 * XXX not quite right, since this may be for a 234 * multiple fault in user mode. 235 */ 236 printf("kernel trap %d with interrupts disabled\n", 237 type); 238 enable_intr(); 239 } 240 241 eva = 0; 242 if (frame.tf_trapno == T_PAGEFLT) { 243 /* 244 * For some Cyrix CPUs, %cr2 is clobbered by interrupts. 245 * This problem is worked around by using an interrupt 246 * gate for the pagefault handler. We are finally ready 247 * to read %cr2 and then must reenable interrupts. 248 * 249 * XXX this should be in the switch statement, but the 250 * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the 251 * flow of control too much for this to be obviously 252 * correct. 253 */ 254 eva = rcr2(); 255 enable_intr(); 256 } 257 258 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 259 restart: 260 #endif 261 type = frame.tf_trapno; 262 code = frame.tf_err; 263 264 if (in_vm86call) { 265 if (frame.tf_eflags & PSL_VM && 266 (type == T_PROTFLT || type == T_STKFLT)) { 267 i = vm86_emulate((struct vm86frame *)&frame); 268 if (i != 0) 269 /* 270 * returns to original process 271 */ 272 vm86_trap((struct vm86frame *)&frame); 273 return; 274 } 275 switch (type) { 276 /* 277 * these traps want either a process context, or 278 * assume a normal userspace trap. 279 */ 280 case T_PROTFLT: 281 case T_SEGNPFLT: 282 trap_fatal(&frame, eva); 283 return; 284 case T_TRCTRAP: 285 type = T_BPTFLT; /* kernel breakpoint */ 286 /* FALL THROUGH */ 287 } 288 goto kernel_trap; /* normal kernel trap handling */ 289 } 290 291 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) { 292 /* user trap */ 293 294 sticks = p->p_sticks; 295 p->p_md.md_regs = &frame; 296 297 switch (type) { 298 case T_PRIVINFLT: /* privileged instruction fault */ 299 ucode = type; 300 i = SIGILL; 301 break; 302 303 case T_BPTFLT: /* bpt instruction fault */ 304 case T_TRCTRAP: /* trace trap */ 305 frame.tf_eflags &= ~PSL_T; 306 i = SIGTRAP; 307 break; 308 309 case T_ARITHTRAP: /* arithmetic trap */ 310 ucode = code; 311 i = SIGFPE; 312 break; 313 314 case T_ASTFLT: /* Allow process switch */ 315 astoff(); 316 cnt.v_soft++; 317 if (p->p_flag & P_OWEUPC) { 318 p->p_flag &= ~P_OWEUPC; 319 addupc_task(p, p->p_stats->p_prof.pr_addr, 320 p->p_stats->p_prof.pr_ticks); 321 } 322 goto out; 323 324 /* 325 * The following two traps can happen in 326 * vm86 mode, and, if so, we want to handle 327 * them specially. 328 */ 329 case T_PROTFLT: /* general protection fault */ 330 case T_STKFLT: /* stack fault */ 331 if (frame.tf_eflags & PSL_VM) { 332 i = vm86_emulate((struct vm86frame *)&frame); 333 if (i == 0) 334 goto out; 335 break; 336 } 337 /* FALL THROUGH */ 338 339 case T_SEGNPFLT: /* segment not present fault */ 340 case T_TSSFLT: /* invalid TSS fault */ 341 case T_DOUBLEFLT: /* double fault */ 342 default: 343 ucode = code + BUS_SEGM_FAULT ; 344 i = SIGBUS; 345 break; 346 347 case T_PAGEFLT: /* page fault */ 348 i = trap_pfault(&frame, TRUE, eva); 349 if (i == -1) 350 return; 351 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 352 if (i == -2) 353 goto restart; 354 #endif 355 if (i == 0) 356 goto out; 357 358 ucode = T_PAGEFLT; 359 break; 360 361 case T_DIVIDE: /* integer divide fault */ 362 ucode = FPE_INTDIV; 363 i = SIGFPE; 364 break; 365 366 #if NISA > 0 367 case T_NMI: 368 #ifdef POWERFAIL_NMI 369 goto handle_powerfail; 370 #else /* !POWERFAIL_NMI */ 371 #ifdef DDB 372 /* NMI can be hooked up to a pushbutton for debugging */ 373 printf ("NMI ... going to debugger\n"); 374 if (kdb_trap (type, 0, &frame)) 375 return; 376 #endif /* DDB */ 377 /* machine/parity/power fail/"kitchen sink" faults */ 378 if (isa_nmi(code) == 0) return; 379 panic("NMI indicates hardware failure"); 380 #endif /* POWERFAIL_NMI */ 381 #endif /* NISA > 0 */ 382 383 case T_OFLOW: /* integer overflow fault */ 384 ucode = FPE_INTOVF; 385 i = SIGFPE; 386 break; 387 388 case T_BOUND: /* bounds check fault */ 389 ucode = FPE_FLTSUB; 390 i = SIGFPE; 391 break; 392 393 case T_DNA: 394 #if NNPX > 0 395 /* if a transparent fault (due to context switch "late") */ 396 if (npxdna()) 397 return; 398 #endif 399 if (!pmath_emulate) { 400 i = SIGFPE; 401 ucode = FPE_FPU_NP_TRAP; 402 break; 403 } 404 i = (*pmath_emulate)(&frame); 405 if (i == 0) { 406 if (!(frame.tf_eflags & PSL_T)) 407 return; 408 frame.tf_eflags &= ~PSL_T; 409 i = SIGTRAP; 410 } 411 /* else ucode = emulator_only_knows() XXX */ 412 break; 413 414 case T_FPOPFLT: /* FPU operand fetch fault */ 415 ucode = T_FPOPFLT; 416 i = SIGILL; 417 break; 418 } 419 } else { 420 kernel_trap: 421 /* kernel trap */ 422 423 switch (type) { 424 case T_PAGEFLT: /* page fault */ 425 (void) trap_pfault(&frame, FALSE, eva); 426 return; 427 428 case T_DNA: 429 #if NNPX > 0 430 /* 431 * The kernel is apparently using npx for copying. 432 * XXX this should be fatal unless the kernel has 433 * registered such use. 434 */ 435 if (npxdna()) 436 return; 437 #endif 438 break; 439 440 case T_PROTFLT: /* general protection fault */ 441 case T_SEGNPFLT: /* segment not present fault */ 442 /* 443 * Invalid segment selectors and out of bounds 444 * %eip's and %esp's can be set up in user mode. 445 * This causes a fault in kernel mode when the 446 * kernel tries to return to user mode. We want 447 * to get this fault so that we can fix the 448 * problem here and not have to check all the 449 * selectors and pointers when the user changes 450 * them. 451 */ 452 #define MAYBE_DORETI_FAULT(where, whereto) \ 453 do { \ 454 if (frame.tf_eip == (int)where) { \ 455 frame.tf_eip = (int)whereto; \ 456 return; \ 457 } \ 458 } while (0) 459 460 if (intr_nesting_level == 0) { 461 /* 462 * Invalid %fs's and %gs's can be created using 463 * procfs or PT_SETREGS or by invalidating the 464 * underlying LDT entry. This causes a fault 465 * in kernel mode when the kernel attempts to 466 * switch contexts. Lose the bad context 467 * (XXX) so that we can continue, and generate 468 * a signal. 469 */ 470 if (frame.tf_eip == (int)cpu_switch_load_gs) { 471 curpcb->pcb_gs = 0; 472 psignal(p, SIGBUS); 473 return; 474 } 475 MAYBE_DORETI_FAULT(doreti_iret, 476 doreti_iret_fault); 477 MAYBE_DORETI_FAULT(doreti_popl_ds, 478 doreti_popl_ds_fault); 479 MAYBE_DORETI_FAULT(doreti_popl_es, 480 doreti_popl_es_fault); 481 MAYBE_DORETI_FAULT(doreti_popl_fs, 482 doreti_popl_fs_fault); 483 if (curpcb && curpcb->pcb_onfault) { 484 frame.tf_eip = (int)curpcb->pcb_onfault; 485 return; 486 } 487 } 488 break; 489 490 case T_TSSFLT: 491 /* 492 * PSL_NT can be set in user mode and isn't cleared 493 * automatically when the kernel is entered. This 494 * causes a TSS fault when the kernel attempts to 495 * `iret' because the TSS link is uninitialized. We 496 * want to get this fault so that we can fix the 497 * problem here and not every time the kernel is 498 * entered. 499 */ 500 if (frame.tf_eflags & PSL_NT) { 501 frame.tf_eflags &= ~PSL_NT; 502 return; 503 } 504 break; 505 506 case T_TRCTRAP: /* trace trap */ 507 if (frame.tf_eip == (int)IDTVEC(syscall)) { 508 /* 509 * We've just entered system mode via the 510 * syscall lcall. Continue single stepping 511 * silently until the syscall handler has 512 * saved the flags. 513 */ 514 return; 515 } 516 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 517 /* 518 * The syscall handler has now saved the 519 * flags. Stop single stepping it. 520 */ 521 frame.tf_eflags &= ~PSL_T; 522 return; 523 } 524 /* 525 * Fall through. 526 */ 527 case T_BPTFLT: 528 /* 529 * If DDB is enabled, let it handle the debugger trap. 530 * Otherwise, debugger traps "can't happen". 531 */ 532 #ifdef DDB 533 if (kdb_trap (type, 0, &frame)) 534 return; 535 #endif 536 break; 537 538 #if NISA > 0 539 case T_NMI: 540 #ifdef POWERFAIL_NMI 541 #ifndef TIMER_FREQ 542 # define TIMER_FREQ 1193182 543 #endif 544 handle_powerfail: 545 { 546 static unsigned lastalert = 0; 547 548 if(time_second - lastalert > 10) 549 { 550 log(LOG_WARNING, "NMI: power fail\n"); 551 sysbeep(TIMER_FREQ/880, hz); 552 lastalert = time_second; 553 } 554 return; 555 } 556 #else /* !POWERFAIL_NMI */ 557 #ifdef DDB 558 /* NMI can be hooked up to a pushbutton for debugging */ 559 printf ("NMI ... going to debugger\n"); 560 if (kdb_trap (type, 0, &frame)) 561 return; 562 #endif /* DDB */ 563 /* machine/parity/power fail/"kitchen sink" faults */ 564 if (isa_nmi(code) == 0) return; 565 /* FALL THROUGH */ 566 #endif /* POWERFAIL_NMI */ 567 #endif /* NISA > 0 */ 568 } 569 570 trap_fatal(&frame, eva); 571 return; 572 } 573 574 /* Translate fault for emulators (e.g. Linux) */ 575 if (*p->p_sysent->sv_transtrap) 576 i = (*p->p_sysent->sv_transtrap)(i, type); 577 578 trapsignal(p, i, ucode); 579 580 #ifdef DEBUG 581 if (type <= MAX_TRAP_MSG) { 582 uprintf("fatal process exception: %s", 583 trap_msg[type]); 584 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 585 uprintf(", fault VA = 0x%lx", (u_long)eva); 586 uprintf("\n"); 587 } 588 #endif 589 590 out: 591 userret(p, &frame, sticks); 592 } 593 594 #ifdef notyet 595 /* 596 * This version doesn't allow a page fault to user space while 597 * in the kernel. The rest of the kernel needs to be made "safe" 598 * before this can be used. I think the only things remaining 599 * to be made safe are the iBCS2 code and the process tracing/ 600 * debugging code. 601 */ 602 static int 603 trap_pfault(frame, usermode, eva) 604 struct trapframe *frame; 605 int usermode; 606 vm_offset_t eva; 607 { 608 vm_offset_t va; 609 struct vmspace *vm = NULL; 610 vm_map_t map = 0; 611 int rv = 0; 612 vm_prot_t ftype; 613 struct proc *p = curproc; 614 615 if (frame->tf_err & PGEX_W) 616 ftype = VM_PROT_READ | VM_PROT_WRITE; 617 else 618 ftype = VM_PROT_READ; 619 620 va = trunc_page(eva); 621 if (va < VM_MIN_KERNEL_ADDRESS) { 622 vm_offset_t v; 623 vm_page_t mpte; 624 625 if (p == NULL || 626 (!usermode && va < VM_MAXUSER_ADDRESS && 627 (intr_nesting_level != 0 || curpcb == NULL || 628 curpcb->pcb_onfault == NULL))) { 629 trap_fatal(frame, eva); 630 return (-1); 631 } 632 633 /* 634 * This is a fault on non-kernel virtual memory. 635 * vm is initialized above to NULL. If curproc is NULL 636 * or curproc->p_vmspace is NULL the fault is fatal. 637 */ 638 vm = p->p_vmspace; 639 if (vm == NULL) 640 goto nogo; 641 642 map = &vm->vm_map; 643 644 /* 645 * Keep swapout from messing with us during this 646 * critical time. 647 */ 648 ++p->p_lock; 649 650 /* 651 * Grow the stack if necessary 652 */ 653 /* grow_stack returns false only if va falls into 654 * a growable stack region and the stack growth 655 * fails. It returns true if va was not within 656 * a growable stack region, or if the stack 657 * growth succeeded. 658 */ 659 if (!grow_stack (p, va)) { 660 rv = KERN_FAILURE; 661 --p->p_lock; 662 goto nogo; 663 } 664 665 /* Fault in the user page: */ 666 rv = vm_fault(map, va, ftype, 667 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 668 : VM_FAULT_NORMAL); 669 670 --p->p_lock; 671 } else { 672 /* 673 * Don't allow user-mode faults in kernel address space. 674 */ 675 if (usermode) 676 goto nogo; 677 678 /* 679 * Since we know that kernel virtual address addresses 680 * always have pte pages mapped, we just have to fault 681 * the page. 682 */ 683 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); 684 } 685 686 if (rv == KERN_SUCCESS) 687 return (0); 688 nogo: 689 if (!usermode) { 690 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 691 frame->tf_eip = (int)curpcb->pcb_onfault; 692 return (0); 693 } 694 trap_fatal(frame, eva); 695 return (-1); 696 } 697 698 /* kludge to pass faulting virtual address to sendsig */ 699 frame->tf_err = eva; 700 701 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 702 } 703 #endif 704 705 int 706 trap_pfault(frame, usermode, eva) 707 struct trapframe *frame; 708 int usermode; 709 vm_offset_t eva; 710 { 711 vm_offset_t va; 712 struct vmspace *vm = NULL; 713 vm_map_t map = 0; 714 int rv = 0; 715 vm_prot_t ftype; 716 struct proc *p = curproc; 717 718 va = trunc_page(eva); 719 if (va >= KERNBASE) { 720 /* 721 * Don't allow user-mode faults in kernel address space. 722 * An exception: if the faulting address is the invalid 723 * instruction entry in the IDT, then the Intel Pentium 724 * F00F bug workaround was triggered, and we need to 725 * treat it is as an illegal instruction, and not a page 726 * fault. 727 */ 728 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 729 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) { 730 frame->tf_trapno = T_PRIVINFLT; 731 return -2; 732 } 733 #endif 734 if (usermode) 735 goto nogo; 736 737 map = kernel_map; 738 } else { 739 /* 740 * This is a fault on non-kernel virtual memory. 741 * vm is initialized above to NULL. If curproc is NULL 742 * or curproc->p_vmspace is NULL the fault is fatal. 743 */ 744 if (p != NULL) 745 vm = p->p_vmspace; 746 747 if (vm == NULL) 748 goto nogo; 749 750 map = &vm->vm_map; 751 } 752 753 if (frame->tf_err & PGEX_W) 754 ftype = VM_PROT_READ | VM_PROT_WRITE; 755 else 756 ftype = VM_PROT_READ; 757 758 if (map != kernel_map) { 759 /* 760 * Keep swapout from messing with us during this 761 * critical time. 762 */ 763 ++p->p_lock; 764 765 /* 766 * Grow the stack if necessary 767 */ 768 /* grow_stack returns false only if va falls into 769 * a growable stack region and the stack growth 770 * fails. It returns true if va was not within 771 * a growable stack region, or if the stack 772 * growth succeeded. 773 */ 774 if (!grow_stack (p, va)) { 775 rv = KERN_FAILURE; 776 --p->p_lock; 777 goto nogo; 778 } 779 780 /* Fault in the user page: */ 781 rv = vm_fault(map, va, ftype, 782 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 783 : VM_FAULT_NORMAL); 784 785 --p->p_lock; 786 } else { 787 /* 788 * Don't have to worry about process locking or stacks in the kernel. 789 */ 790 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 791 } 792 793 if (rv == KERN_SUCCESS) 794 return (0); 795 nogo: 796 if (!usermode) { 797 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 798 frame->tf_eip = (int)curpcb->pcb_onfault; 799 return (0); 800 } 801 trap_fatal(frame, eva); 802 return (-1); 803 } 804 805 /* kludge to pass faulting virtual address to sendsig */ 806 frame->tf_err = eva; 807 808 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 809 } 810 811 static void 812 trap_fatal(frame, eva) 813 struct trapframe *frame; 814 vm_offset_t eva; 815 { 816 int code, type, ss, esp; 817 struct soft_segment_descriptor softseg; 818 819 code = frame->tf_err; 820 type = frame->tf_trapno; 821 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 822 823 if (type <= MAX_TRAP_MSG) 824 printf("\n\nFatal trap %d: %s while in %s mode\n", 825 type, trap_msg[type], 826 frame->tf_eflags & PSL_VM ? "vm86" : 827 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 828 #ifdef SMP 829 /* three seperate prints in case of a trap on an unmapped page */ 830 printf("mp_lock = %08x; ", mp_lock); 831 printf("cpuid = %d; ", cpuid); 832 printf("lapic.id = %08x\n", lapic.id); 833 #endif 834 if (type == T_PAGEFLT) { 835 printf("fault virtual address = 0x%x\n", eva); 836 printf("fault code = %s %s, %s\n", 837 code & PGEX_U ? "user" : "supervisor", 838 code & PGEX_W ? "write" : "read", 839 code & PGEX_P ? "protection violation" : "page not present"); 840 } 841 printf("instruction pointer = 0x%x:0x%x\n", 842 frame->tf_cs & 0xffff, frame->tf_eip); 843 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { 844 ss = frame->tf_ss & 0xffff; 845 esp = frame->tf_esp; 846 } else { 847 ss = GSEL(GDATA_SEL, SEL_KPL); 848 esp = (int)&frame->tf_esp; 849 } 850 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 851 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 852 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 853 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 854 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 855 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 856 softseg.ssd_gran); 857 printf("processor eflags = "); 858 if (frame->tf_eflags & PSL_T) 859 printf("trace trap, "); 860 if (frame->tf_eflags & PSL_I) 861 printf("interrupt enabled, "); 862 if (frame->tf_eflags & PSL_NT) 863 printf("nested task, "); 864 if (frame->tf_eflags & PSL_RF) 865 printf("resume, "); 866 if (frame->tf_eflags & PSL_VM) 867 printf("vm86, "); 868 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 869 printf("current process = "); 870 if (curproc) { 871 printf("%lu (%s)\n", 872 (u_long)curproc->p_pid, curproc->p_comm ? 873 curproc->p_comm : ""); 874 } else { 875 printf("Idle\n"); 876 } 877 printf("interrupt mask = "); 878 if ((cpl & net_imask) == net_imask) 879 printf("net "); 880 if ((cpl & tty_imask) == tty_imask) 881 printf("tty "); 882 if ((cpl & bio_imask) == bio_imask) 883 printf("bio "); 884 if ((cpl & cam_imask) == cam_imask) 885 printf("cam "); 886 if (cpl == 0) 887 printf("none"); 888 #ifdef SMP 889 /** 890 * XXX FIXME: 891 * we probably SHOULD have stopped the other CPUs before now! 892 * another CPU COULD have been touching cpl at this moment... 893 */ 894 printf(" <- SMP: XXX"); 895 #endif 896 printf("\n"); 897 898 #ifdef KDB 899 if (kdb_trap(&psl)) 900 return; 901 #endif 902 #ifdef DDB 903 if ((debugger_on_panic || in_Debugger) && kdb_trap(type, 0, frame)) 904 return; 905 #endif 906 printf("trap number = %d\n", type); 907 if (type <= MAX_TRAP_MSG) 908 panic(trap_msg[type]); 909 else 910 panic("unknown/reserved trap"); 911 } 912 913 /* 914 * Double fault handler. Called when a fault occurs while writing 915 * a frame for a trap/exception onto the stack. This usually occurs 916 * when the stack overflows (such is the case with infinite recursion, 917 * for example). 918 * 919 * XXX Note that the current PTD gets replaced by IdlePTD when the 920 * task switch occurs. This means that the stack that was active at 921 * the time of the double fault is not available at <kstack> unless 922 * the machine was idle when the double fault occurred. The downside 923 * of this is that "trace <ebp>" in ddb won't work. 924 */ 925 void 926 dblfault_handler() 927 { 928 printf("\nFatal double fault:\n"); 929 printf("eip = 0x%x\n", common_tss.tss_eip); 930 printf("esp = 0x%x\n", common_tss.tss_esp); 931 printf("ebp = 0x%x\n", common_tss.tss_ebp); 932 #ifdef SMP 933 /* three seperate prints in case of a trap on an unmapped page */ 934 printf("mp_lock = %08x; ", mp_lock); 935 printf("cpuid = %d; ", cpuid); 936 printf("lapic.id = %08x\n", lapic.id); 937 #endif 938 panic("double fault"); 939 } 940 941 /* 942 * Compensate for 386 brain damage (missing URKR). 943 * This is a little simpler than the pagefault handler in trap() because 944 * it the page tables have already been faulted in and high addresses 945 * are thrown out early for other reasons. 946 */ 947 int trapwrite(addr) 948 unsigned addr; 949 { 950 struct proc *p; 951 vm_offset_t va; 952 struct vmspace *vm; 953 int rv; 954 955 va = trunc_page((vm_offset_t)addr); 956 /* 957 * XXX - MAX is END. Changed > to >= for temp. fix. 958 */ 959 if (va >= VM_MAXUSER_ADDRESS) 960 return (1); 961 962 p = curproc; 963 vm = p->p_vmspace; 964 965 ++p->p_lock; 966 967 if (!grow_stack (p, va)) { 968 --p->p_lock; 969 return (1); 970 } 971 972 /* 973 * fault the data page 974 */ 975 rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, VM_FAULT_DIRTY); 976 977 --p->p_lock; 978 979 if (rv != KERN_SUCCESS) 980 return 1; 981 982 return (0); 983 } 984 985 /* 986 * System call request from POSIX system call gate interface to kernel. 987 * Like trap(), argument is call by reference. 988 */ 989 void 990 syscall(frame) 991 struct trapframe frame; 992 { 993 caddr_t params; 994 int i; 995 struct sysent *callp; 996 struct proc *p = curproc; 997 u_quad_t sticks; 998 int error; 999 int args[8]; 1000 u_int code; 1001 1002 #ifdef DIAGNOSTIC 1003 if (ISPL(frame.tf_cs) != SEL_UPL) 1004 panic("syscall"); 1005 #endif 1006 sticks = p->p_sticks; 1007 p->p_md.md_regs = &frame; 1008 params = (caddr_t)frame.tf_esp + sizeof(int); 1009 code = frame.tf_eax; 1010 if (p->p_sysent->sv_prepsyscall) { 1011 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 1012 } else { 1013 /* 1014 * Need to check if this is a 32 bit or 64 bit syscall. 1015 */ 1016 if (code == SYS_syscall) { 1017 /* 1018 * Code is first argument, followed by actual args. 1019 */ 1020 code = fuword(params); 1021 params += sizeof(int); 1022 } else if (code == SYS___syscall) { 1023 /* 1024 * Like syscall, but code is a quad, so as to maintain 1025 * quad alignment for the rest of the arguments. 1026 */ 1027 code = fuword(params); 1028 params += sizeof(quad_t); 1029 } 1030 } 1031 1032 if (p->p_sysent->sv_mask) 1033 code &= p->p_sysent->sv_mask; 1034 1035 if (code >= p->p_sysent->sv_size) 1036 callp = &p->p_sysent->sv_table[0]; 1037 else 1038 callp = &p->p_sysent->sv_table[code]; 1039 1040 if (params && (i = callp->sy_narg * sizeof(int)) && 1041 (error = copyin(params, (caddr_t)args, (u_int)i))) { 1042 #ifdef KTRACE 1043 if (KTRPOINT(p, KTR_SYSCALL)) 1044 ktrsyscall(p->p_tracep, code, callp->sy_narg, args); 1045 #endif 1046 goto bad; 1047 } 1048 #ifdef KTRACE 1049 if (KTRPOINT(p, KTR_SYSCALL)) 1050 ktrsyscall(p->p_tracep, code, callp->sy_narg, args); 1051 #endif 1052 p->p_retval[0] = 0; 1053 p->p_retval[1] = frame.tf_edx; 1054 1055 STOPEVENT(p, S_SCE, callp->sy_narg); 1056 1057 error = (*callp->sy_call)(p, args); 1058 1059 switch (error) { 1060 1061 case 0: 1062 /* 1063 * Reinitialize proc pointer `p' as it may be different 1064 * if this is a child returning from fork syscall. 1065 */ 1066 p = curproc; 1067 frame.tf_eax = p->p_retval[0]; 1068 frame.tf_edx = p->p_retval[1]; 1069 frame.tf_eflags &= ~PSL_C; 1070 break; 1071 1072 case ERESTART: 1073 /* 1074 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 1075 * int 0x80 is 2 bytes. We saved this in tf_err. 1076 */ 1077 frame.tf_eip -= frame.tf_err; 1078 break; 1079 1080 case EJUSTRETURN: 1081 break; 1082 1083 default: 1084 bad: 1085 if (p->p_sysent->sv_errsize) { 1086 if (error >= p->p_sysent->sv_errsize) 1087 error = -1; /* XXX */ 1088 else 1089 error = p->p_sysent->sv_errtbl[error]; 1090 } 1091 frame.tf_eax = error; 1092 frame.tf_eflags |= PSL_C; 1093 break; 1094 } 1095 1096 if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { 1097 /* Traced syscall. */ 1098 frame.tf_eflags &= ~PSL_T; 1099 trapsignal(p, SIGTRAP, 0); 1100 } 1101 1102 userret(p, &frame, sticks); 1103 1104 #ifdef KTRACE 1105 if (KTRPOINT(p, KTR_SYSRET)) 1106 ktrsysret(p->p_tracep, code, error, p->p_retval[0]); 1107 #endif 1108 1109 /* 1110 * This works because errno is findable through the 1111 * register set. If we ever support an emulation where this 1112 * is not the case, this code will need to be revisited. 1113 */ 1114 STOPEVENT(p, S_SCX, code); 1115 1116 } 1117 1118 /* 1119 * Simplified back end of syscall(), used when returning from fork() 1120 * directly into user mode. 1121 */ 1122 void 1123 fork_return(p, frame) 1124 struct proc *p; 1125 struct trapframe frame; 1126 { 1127 frame.tf_eax = 0; /* Child returns zero */ 1128 frame.tf_eflags &= ~PSL_C; /* success */ 1129 frame.tf_edx = 1; 1130 1131 userret(p, &frame, 0); 1132 #ifdef KTRACE 1133 if (KTRPOINT(p, KTR_SYSRET)) 1134 ktrsysret(p->p_tracep, SYS_fork, 0, 0); 1135 #endif 1136 } 1137