1 /*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 * $FreeBSD$ 39 */ 40 41 /* 42 * 386 Trap and System call handling 43 */ 44 45 #include "opt_ktrace.h" 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/acct.h> 52 #include <sys/kernel.h> 53 #include <sys/syscall.h> 54 #include <sys/sysent.h> 55 #include <sys/queue.h> 56 #include <sys/vmmeter.h> 57 #ifdef KTRACE 58 #include <sys/ktrace.h> 59 #endif 60 61 #include <vm/vm.h> 62 #include <vm/vm_param.h> 63 #include <vm/vm_prot.h> 64 #include <sys/lock.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_kern.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_page.h> 69 #include <vm/vm_extern.h> 70 71 #include <sys/user.h> 72 73 #include <machine/cpu.h> 74 #include <machine/md_var.h> 75 #include <machine/psl.h> 76 #include <machine/reg.h> 77 #include <machine/trap.h> 78 #include <machine/../isa/isa_device.h> 79 80 #ifdef POWERFAIL_NMI 81 #include <sys/syslog.h> 82 #include <machine/clock.h> 83 #endif 84 85 #include "isa.h" 86 #include "npx.h" 87 88 int (*pmath_emulate) __P((struct trapframe *)); 89 90 extern void trap __P((struct trapframe frame)); 91 extern int trapwrite __P((unsigned addr)); 92 extern void syscall __P((struct trapframe frame)); 93 94 static int trap_pfault __P((struct trapframe *, int)); 95 static void trap_fatal __P((struct trapframe *)); 96 void dblfault_handler __P((void)); 97 98 extern inthand_t IDTVEC(syscall); 99 100 #define MAX_TRAP_MSG 28 101 static char *trap_msg[] = { 102 "", /* 0 unused */ 103 "privileged instruction fault", /* 1 T_PRIVINFLT */ 104 "", /* 2 unused */ 105 "breakpoint instruction fault", /* 3 T_BPTFLT */ 106 "", /* 4 unused */ 107 "", /* 5 unused */ 108 "arithmetic trap", /* 6 T_ARITHTRAP */ 109 "system forced exception", /* 7 T_ASTFLT */ 110 "", /* 8 unused */ 111 "general protection fault", /* 9 T_PROTFLT */ 112 "trace trap", /* 10 T_TRCTRAP */ 113 "", /* 11 unused */ 114 "page fault", /* 12 T_PAGEFLT */ 115 "", /* 13 unused */ 116 "alignment fault", /* 14 T_ALIGNFLT */ 117 "", /* 15 unused */ 118 "", /* 16 unused */ 119 "", /* 17 unused */ 120 "integer divide fault", /* 18 T_DIVIDE */ 121 "non-maskable interrupt trap", /* 19 T_NMI */ 122 "overflow trap", /* 20 T_OFLOW */ 123 "FPU bounds check fault", /* 21 T_BOUND */ 124 "FPU device not available", /* 22 T_DNA */ 125 "double fault", /* 23 T_DOUBLEFLT */ 126 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 127 "invalid TSS fault", /* 25 T_TSSFLT */ 128 "segment not present fault", /* 26 T_SEGNPFLT */ 129 "stack fault", /* 27 T_STKFLT */ 130 "machine check trap", /* 28 T_MCHK */ 131 }; 132 133 static void userret __P((struct proc *p, struct trapframe *frame, 134 u_quad_t oticks)); 135 136 static inline void 137 userret(p, frame, oticks) 138 struct proc *p; 139 struct trapframe *frame; 140 u_quad_t oticks; 141 { 142 int sig, s; 143 144 while ((sig = CURSIG(p)) != 0) 145 postsig(sig); 146 p->p_priority = p->p_usrpri; 147 if (want_resched) { 148 /* 149 * Since we are curproc, clock will normally just change 150 * our priority without moving us from one queue to another 151 * (since the running process is not on a queue.) 152 * If that happened after we setrunqueue ourselves but before we 153 * mi_switch()'ed, we might not be on the queue indicated by 154 * our priority. 155 */ 156 s = splhigh(); 157 setrunqueue(p); 158 p->p_stats->p_ru.ru_nivcsw++; 159 mi_switch(); 160 splx(s); 161 while ((sig = CURSIG(p)) != 0) 162 postsig(sig); 163 } 164 /* 165 * Charge system time if profiling. 166 */ 167 if (p->p_flag & P_PROFIL) 168 addupc_task(p, frame->tf_eip, 169 (u_int)(p->p_sticks - oticks) * psratio); 170 171 curpriority = p->p_priority; 172 } 173 174 /* 175 * Exception, fault, and trap interface to the FreeBSD kernel. 176 * This common code is called from assembly language IDT gate entry 177 * routines that prepare a suitable stack frame, and restore this 178 * frame after the exception has been processed. 179 */ 180 181 void 182 trap(frame) 183 struct trapframe frame; 184 { 185 struct proc *p = curproc; 186 u_quad_t sticks = 0; 187 int i = 0, ucode = 0, type, code; 188 #ifdef DEBUG 189 u_long eva; 190 #endif 191 192 type = frame.tf_trapno; 193 code = frame.tf_err; 194 195 if (ISPL(frame.tf_cs) == SEL_UPL) { 196 /* user trap */ 197 198 sticks = p->p_sticks; 199 p->p_md.md_regs = (int *)&frame; 200 201 switch (type) { 202 case T_PRIVINFLT: /* privileged instruction fault */ 203 ucode = type; 204 i = SIGILL; 205 break; 206 207 case T_BPTFLT: /* bpt instruction fault */ 208 case T_TRCTRAP: /* trace trap */ 209 frame.tf_eflags &= ~PSL_T; 210 i = SIGTRAP; 211 break; 212 213 case T_ARITHTRAP: /* arithmetic trap */ 214 ucode = code; 215 i = SIGFPE; 216 break; 217 218 case T_ASTFLT: /* Allow process switch */ 219 astoff(); 220 cnt.v_soft++; 221 if (p->p_flag & P_OWEUPC) { 222 p->p_flag &= ~P_OWEUPC; 223 addupc_task(p, p->p_stats->p_prof.pr_addr, 224 p->p_stats->p_prof.pr_ticks); 225 } 226 goto out; 227 228 case T_PROTFLT: /* general protection fault */ 229 case T_SEGNPFLT: /* segment not present fault */ 230 case T_STKFLT: /* stack fault */ 231 case T_TSSFLT: /* invalid TSS fault */ 232 case T_DOUBLEFLT: /* double fault */ 233 default: 234 ucode = code + BUS_SEGM_FAULT ; 235 i = SIGBUS; 236 break; 237 238 case T_PAGEFLT: /* page fault */ 239 i = trap_pfault(&frame, TRUE); 240 if (i == -1) 241 return; 242 if (i == 0) 243 goto out; 244 245 ucode = T_PAGEFLT; 246 break; 247 248 case T_DIVIDE: /* integer divide fault */ 249 ucode = FPE_INTDIV_TRAP; 250 i = SIGFPE; 251 break; 252 253 #if NISA > 0 254 case T_NMI: 255 #ifdef POWERFAIL_NMI 256 goto handle_powerfail; 257 #else /* !POWERFAIL_NMI */ 258 #ifdef DDB 259 /* NMI can be hooked up to a pushbutton for debugging */ 260 printf ("NMI ... going to debugger\n"); 261 if (kdb_trap (type, 0, &frame)) 262 return; 263 #endif /* DDB */ 264 /* machine/parity/power fail/"kitchen sink" faults */ 265 if (isa_nmi(code) == 0) return; 266 panic("NMI indicates hardware failure"); 267 #endif /* POWERFAIL_NMI */ 268 #endif /* NISA > 0 */ 269 270 case T_OFLOW: /* integer overflow fault */ 271 ucode = FPE_INTOVF_TRAP; 272 i = SIGFPE; 273 break; 274 275 case T_BOUND: /* bounds check fault */ 276 ucode = FPE_SUBRNG_TRAP; 277 i = SIGFPE; 278 break; 279 280 case T_DNA: 281 #if NNPX > 0 282 /* if a transparent fault (due to context switch "late") */ 283 if (npxdna()) 284 return; 285 #endif 286 if (!pmath_emulate) { 287 i = SIGFPE; 288 ucode = FPE_FPU_NP_TRAP; 289 break; 290 } 291 i = (*pmath_emulate)(&frame); 292 if (i == 0) { 293 if (!(frame.tf_eflags & PSL_T)) 294 return; 295 frame.tf_eflags &= ~PSL_T; 296 i = SIGTRAP; 297 } 298 /* else ucode = emulator_only_knows() XXX */ 299 break; 300 301 case T_FPOPFLT: /* FPU operand fetch fault */ 302 ucode = T_FPOPFLT; 303 i = SIGILL; 304 break; 305 } 306 } else { 307 /* kernel trap */ 308 309 switch (type) { 310 case T_PAGEFLT: /* page fault */ 311 (void) trap_pfault(&frame, FALSE); 312 return; 313 314 case T_DNA: 315 #if NNPX > 0 316 /* 317 * The kernel is apparently using npx for copying. 318 * XXX this should be fatal unless the kernel has 319 * registered such use. 320 */ 321 if (npxdna()) 322 return; 323 #endif 324 break; 325 326 case T_PROTFLT: /* general protection fault */ 327 case T_SEGNPFLT: /* segment not present fault */ 328 /* 329 * Invalid segment selectors and out of bounds 330 * %eip's and %esp's can be set up in user mode. 331 * This causes a fault in kernel mode when the 332 * kernel tries to return to user mode. We want 333 * to get this fault so that we can fix the 334 * problem here and not have to check all the 335 * selectors and pointers when the user changes 336 * them. 337 */ 338 #define MAYBE_DORETI_FAULT(where, whereto) \ 339 do { \ 340 if (frame.tf_eip == (int)where) { \ 341 frame.tf_eip = (int)whereto; \ 342 return; \ 343 } \ 344 } while (0) 345 346 if (intr_nesting_level == 0) { 347 MAYBE_DORETI_FAULT(doreti_iret, 348 doreti_iret_fault); 349 MAYBE_DORETI_FAULT(doreti_popl_ds, 350 doreti_popl_ds_fault); 351 MAYBE_DORETI_FAULT(doreti_popl_es, 352 doreti_popl_es_fault); 353 if (curpcb && curpcb->pcb_onfault) { 354 frame.tf_eip = (int)curpcb->pcb_onfault; 355 return; 356 } 357 } 358 break; 359 360 case T_TSSFLT: 361 /* 362 * PSL_NT can be set in user mode and isn't cleared 363 * automatically when the kernel is entered. This 364 * causes a TSS fault when the kernel attempts to 365 * `iret' because the TSS link is uninitialized. We 366 * want to get this fault so that we can fix the 367 * problem here and not every time the kernel is 368 * entered. 369 */ 370 if (frame.tf_eflags & PSL_NT) { 371 frame.tf_eflags &= ~PSL_NT; 372 return; 373 } 374 break; 375 376 case T_TRCTRAP: /* trace trap */ 377 if (frame.tf_eip == (int)IDTVEC(syscall)) { 378 /* 379 * We've just entered system mode via the 380 * syscall lcall. Continue single stepping 381 * silently until the syscall handler has 382 * saved the flags. 383 */ 384 return; 385 } 386 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 387 /* 388 * The syscall handler has now saved the 389 * flags. Stop single stepping it. 390 */ 391 frame.tf_eflags &= ~PSL_T; 392 return; 393 } 394 /* 395 * Fall through. 396 */ 397 case T_BPTFLT: 398 /* 399 * If DDB is enabled, let it handle the debugger trap. 400 * Otherwise, debugger traps "can't happen". 401 */ 402 #ifdef DDB 403 if (kdb_trap (type, 0, &frame)) 404 return; 405 #endif 406 break; 407 408 #if NISA > 0 409 case T_NMI: 410 #ifdef POWERFAIL_NMI 411 #ifndef TIMER_FREQ 412 # define TIMER_FREQ 1193182 413 #endif 414 handle_powerfail: 415 { 416 static unsigned lastalert = 0; 417 418 if(time.tv_sec - lastalert > 10) 419 { 420 log(LOG_WARNING, "NMI: power fail\n"); 421 sysbeep(TIMER_FREQ/880, hz); 422 lastalert = time.tv_sec; 423 } 424 return; 425 } 426 #else /* !POWERFAIL_NMI */ 427 #ifdef DDB 428 /* NMI can be hooked up to a pushbutton for debugging */ 429 printf ("NMI ... going to debugger\n"); 430 if (kdb_trap (type, 0, &frame)) 431 return; 432 #endif /* DDB */ 433 /* machine/parity/power fail/"kitchen sink" faults */ 434 if (isa_nmi(code) == 0) return; 435 /* FALL THROUGH */ 436 #endif /* POWERFAIL_NMI */ 437 #endif /* NISA > 0 */ 438 } 439 440 trap_fatal(&frame); 441 return; 442 } 443 444 trapsignal(p, i, ucode); 445 446 #ifdef DEBUG 447 eva = rcr2(); 448 if (type <= MAX_TRAP_MSG) { 449 uprintf("fatal process exception: %s", 450 trap_msg[type]); 451 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 452 uprintf(", fault VA = 0x%x", eva); 453 uprintf("\n"); 454 } 455 #endif 456 457 out: 458 userret(p, &frame, sticks); 459 } 460 461 #ifdef notyet 462 /* 463 * This version doesn't allow a page fault to user space while 464 * in the kernel. The rest of the kernel needs to be made "safe" 465 * before this can be used. I think the only things remaining 466 * to be made safe are the iBCS2 code and the process tracing/ 467 * debugging code. 468 */ 469 static int 470 trap_pfault(frame, usermode) 471 struct trapframe *frame; 472 int usermode; 473 { 474 vm_offset_t va; 475 struct vmspace *vm = NULL; 476 vm_map_t map = 0; 477 int rv = 0; 478 vm_prot_t ftype; 479 int eva; 480 struct proc *p = curproc; 481 482 if (frame->tf_err & PGEX_W) 483 ftype = VM_PROT_READ | VM_PROT_WRITE; 484 else 485 ftype = VM_PROT_READ; 486 487 eva = rcr2(); 488 va = trunc_page((vm_offset_t)eva); 489 490 if (va < VM_MIN_KERNEL_ADDRESS) { 491 vm_offset_t v; 492 vm_page_t mpte; 493 494 if (p == NULL || 495 (!usermode && va < VM_MAXUSER_ADDRESS && 496 (intr_nesting_level != 0 || curpcb == NULL || 497 curpcb->pcb_onfault == NULL))) { 498 trap_fatal(frame); 499 return (-1); 500 } 501 502 /* 503 * This is a fault on non-kernel virtual memory. 504 * vm is initialized above to NULL. If curproc is NULL 505 * or curproc->p_vmspace is NULL the fault is fatal. 506 */ 507 vm = p->p_vmspace; 508 if (vm == NULL) 509 goto nogo; 510 511 map = &vm->vm_map; 512 513 /* 514 * Keep swapout from messing with us during this 515 * critical time. 516 */ 517 ++p->p_lock; 518 519 /* 520 * Grow the stack if necessary 521 */ 522 if ((caddr_t)va > vm->vm_maxsaddr 523 && (caddr_t)va < (caddr_t)USRSTACK) { 524 if (!grow(p, va)) { 525 rv = KERN_FAILURE; 526 --p->p_lock; 527 goto nogo; 528 } 529 } 530 531 /* Fault in the user page: */ 532 rv = vm_fault(map, va, ftype, FALSE); 533 534 --p->p_lock; 535 } else { 536 /* 537 * Don't allow user-mode faults in kernel address space. 538 */ 539 if (usermode) 540 goto nogo; 541 542 /* 543 * Since we know that kernel virtual address addresses 544 * always have pte pages mapped, we just have to fault 545 * the page. 546 */ 547 rv = vm_fault(kernel_map, va, ftype, FALSE); 548 } 549 550 if (rv == KERN_SUCCESS) 551 return (0); 552 nogo: 553 if (!usermode) { 554 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 555 frame->tf_eip = (int)curpcb->pcb_onfault; 556 return (0); 557 } 558 trap_fatal(frame); 559 return (-1); 560 } 561 562 /* kludge to pass faulting virtual address to sendsig */ 563 frame->tf_err = eva; 564 565 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 566 } 567 #endif 568 569 int 570 trap_pfault(frame, usermode) 571 struct trapframe *frame; 572 int usermode; 573 { 574 vm_offset_t va; 575 struct vmspace *vm = NULL; 576 vm_map_t map = 0; 577 int rv = 0; 578 vm_prot_t ftype; 579 int eva; 580 struct proc *p = curproc; 581 582 eva = rcr2(); 583 va = trunc_page((vm_offset_t)eva); 584 585 if (va >= KERNBASE) { 586 /* 587 * Don't allow user-mode faults in kernel address space. 588 */ 589 if (usermode) 590 goto nogo; 591 592 map = kernel_map; 593 } else { 594 /* 595 * This is a fault on non-kernel virtual memory. 596 * vm is initialized above to NULL. If curproc is NULL 597 * or curproc->p_vmspace is NULL the fault is fatal. 598 */ 599 if (p != NULL) 600 vm = p->p_vmspace; 601 602 if (vm == NULL) 603 goto nogo; 604 605 map = &vm->vm_map; 606 } 607 608 if (frame->tf_err & PGEX_W) 609 ftype = VM_PROT_READ | VM_PROT_WRITE; 610 else 611 ftype = VM_PROT_READ; 612 613 if (map != kernel_map) { 614 /* 615 * Keep swapout from messing with us during this 616 * critical time. 617 */ 618 ++p->p_lock; 619 620 /* 621 * Grow the stack if necessary 622 */ 623 if ((caddr_t)va > vm->vm_maxsaddr 624 && (caddr_t)va < (caddr_t)USRSTACK) { 625 if (!grow(p, va)) { 626 rv = KERN_FAILURE; 627 --p->p_lock; 628 goto nogo; 629 } 630 } 631 632 /* Fault in the user page: */ 633 rv = vm_fault(map, va, ftype, FALSE); 634 635 --p->p_lock; 636 } else { 637 /* 638 * Since we know that kernel virtual address addresses 639 * always have pte pages mapped, we just have to fault 640 * the page. 641 */ 642 rv = vm_fault(map, va, ftype, FALSE); 643 } 644 645 if (rv == KERN_SUCCESS) 646 return (0); 647 nogo: 648 if (!usermode) { 649 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 650 frame->tf_eip = (int)curpcb->pcb_onfault; 651 return (0); 652 } 653 trap_fatal(frame); 654 return (-1); 655 } 656 657 /* kludge to pass faulting virtual address to sendsig */ 658 frame->tf_err = eva; 659 660 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 661 } 662 663 static void 664 trap_fatal(frame) 665 struct trapframe *frame; 666 { 667 int code, type, eva, ss, esp; 668 struct soft_segment_descriptor softseg; 669 670 code = frame->tf_err; 671 type = frame->tf_trapno; 672 eva = rcr2(); 673 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 674 675 if (type <= MAX_TRAP_MSG) 676 printf("\n\nFatal trap %d: %s while in %s mode\n", 677 type, trap_msg[type], 678 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 679 if (type == T_PAGEFLT) { 680 printf("fault virtual address = 0x%x\n", eva); 681 printf("fault code = %s %s, %s\n", 682 code & PGEX_U ? "user" : "supervisor", 683 code & PGEX_W ? "write" : "read", 684 code & PGEX_P ? "protection violation" : "page not present"); 685 } 686 printf("instruction pointer = 0x%x:0x%x\n", 687 frame->tf_cs & 0xffff, frame->tf_eip); 688 if (ISPL(frame->tf_cs) == SEL_UPL) { 689 ss = frame->tf_ss & 0xffff; 690 esp = frame->tf_esp; 691 } else { 692 ss = GSEL(GDATA_SEL, SEL_KPL); 693 esp = (int)&frame->tf_esp; 694 } 695 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 696 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 697 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 698 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 699 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 700 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 701 softseg.ssd_gran); 702 printf("processor eflags = "); 703 if (frame->tf_eflags & PSL_T) 704 printf("trace trap, "); 705 if (frame->tf_eflags & PSL_I) 706 printf("interrupt enabled, "); 707 if (frame->tf_eflags & PSL_NT) 708 printf("nested task, "); 709 if (frame->tf_eflags & PSL_RF) 710 printf("resume, "); 711 if (frame->tf_eflags & PSL_VM) 712 printf("vm86, "); 713 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 714 printf("current process = "); 715 if (curproc) { 716 printf("%lu (%s)\n", 717 (u_long)curproc->p_pid, curproc->p_comm ? 718 curproc->p_comm : ""); 719 } else { 720 printf("Idle\n"); 721 } 722 printf("interrupt mask = "); 723 if ((cpl & net_imask) == net_imask) 724 printf("net "); 725 if ((cpl & tty_imask) == tty_imask) 726 printf("tty "); 727 if ((cpl & bio_imask) == bio_imask) 728 printf("bio "); 729 if (cpl == 0) 730 printf("none"); 731 printf("\n"); 732 733 #ifdef KDB 734 if (kdb_trap(&psl)) 735 return; 736 #endif 737 #ifdef DDB 738 if (kdb_trap (type, 0, frame)) 739 return; 740 #endif 741 if (type <= MAX_TRAP_MSG) 742 panic(trap_msg[type]); 743 else 744 panic("unknown/reserved trap"); 745 } 746 747 /* 748 * Double fault handler. Called when a fault occurs while writing 749 * a frame for a trap/exception onto the stack. This usually occurs 750 * when the stack overflows (such is the case with infinite recursion, 751 * for example). 752 * 753 * XXX Note that the current PTD gets replaced by IdlePTD when the 754 * task switch occurs. This means that the stack that was active at 755 * the time of the double fault is not available at <kstack> unless 756 * the machine was idle when the double fault occurred. The downside 757 * of this is that "trace <ebp>" in ddb won't work. 758 */ 759 void 760 dblfault_handler() 761 { 762 struct pcb *pcb = curpcb; 763 764 if (pcb != NULL) { 765 printf("\nFatal double fault:\n"); 766 printf("eip = 0x%x\n", pcb->pcb_tss.tss_eip); 767 printf("esp = 0x%x\n", pcb->pcb_tss.tss_esp); 768 printf("ebp = 0x%x\n", pcb->pcb_tss.tss_ebp); 769 } 770 771 panic("double fault"); 772 } 773 774 /* 775 * Compensate for 386 brain damage (missing URKR). 776 * This is a little simpler than the pagefault handler in trap() because 777 * it the page tables have already been faulted in and high addresses 778 * are thrown out early for other reasons. 779 */ 780 int trapwrite(addr) 781 unsigned addr; 782 { 783 struct proc *p; 784 vm_offset_t va; 785 struct vmspace *vm; 786 int rv; 787 788 va = trunc_page((vm_offset_t)addr); 789 /* 790 * XXX - MAX is END. Changed > to >= for temp. fix. 791 */ 792 if (va >= VM_MAXUSER_ADDRESS) 793 return (1); 794 795 p = curproc; 796 vm = p->p_vmspace; 797 798 ++p->p_lock; 799 800 if ((caddr_t)va >= vm->vm_maxsaddr 801 && (caddr_t)va < (caddr_t)USRSTACK) { 802 if (!grow(p, va)) { 803 --p->p_lock; 804 return (1); 805 } 806 } 807 808 /* 809 * fault the data page 810 */ 811 rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE); 812 813 --p->p_lock; 814 815 if (rv != KERN_SUCCESS) 816 return 1; 817 818 return (0); 819 } 820 821 /* 822 * System call request from POSIX system call gate interface to kernel. 823 * Like trap(), argument is call by reference. 824 */ 825 void 826 syscall(frame) 827 struct trapframe frame; 828 { 829 caddr_t params; 830 int i; 831 struct sysent *callp; 832 struct proc *p = curproc; 833 u_quad_t sticks; 834 int error; 835 int args[8], rval[2]; 836 u_int code; 837 838 sticks = p->p_sticks; 839 if (ISPL(frame.tf_cs) != SEL_UPL) 840 panic("syscall"); 841 842 p->p_md.md_regs = (int *)&frame; 843 params = (caddr_t)frame.tf_esp + sizeof(int); 844 code = frame.tf_eax; 845 if (p->p_sysent->sv_prepsyscall) { 846 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 847 } else { 848 /* 849 * Need to check if this is a 32 bit or 64 bit syscall. 850 */ 851 if (code == SYS_syscall) { 852 /* 853 * Code is first argument, followed by actual args. 854 */ 855 code = fuword(params); 856 params += sizeof(int); 857 } else if (code == SYS___syscall) { 858 /* 859 * Like syscall, but code is a quad, so as to maintain 860 * quad alignment for the rest of the arguments. 861 */ 862 code = fuword(params); 863 params += sizeof(quad_t); 864 } 865 } 866 867 if (p->p_sysent->sv_mask) 868 code &= p->p_sysent->sv_mask; 869 870 if (code >= p->p_sysent->sv_size) 871 callp = &p->p_sysent->sv_table[0]; 872 else 873 callp = &p->p_sysent->sv_table[code]; 874 875 if (params && (i = callp->sy_narg * sizeof(int)) && 876 (error = copyin(params, (caddr_t)args, (u_int)i))) { 877 #ifdef KTRACE 878 if (KTRPOINT(p, KTR_SYSCALL)) 879 ktrsyscall(p->p_tracep, code, callp->sy_narg, args); 880 #endif 881 goto bad; 882 } 883 #ifdef KTRACE 884 if (KTRPOINT(p, KTR_SYSCALL)) 885 ktrsyscall(p->p_tracep, code, callp->sy_narg, args); 886 #endif 887 rval[0] = 0; 888 rval[1] = frame.tf_edx; 889 890 error = (*callp->sy_call)(p, args, rval); 891 892 switch (error) { 893 894 case 0: 895 /* 896 * Reinitialize proc pointer `p' as it may be different 897 * if this is a child returning from fork syscall. 898 */ 899 p = curproc; 900 frame.tf_eax = rval[0]; 901 frame.tf_edx = rval[1]; 902 frame.tf_eflags &= ~PSL_C; 903 break; 904 905 case ERESTART: 906 /* 907 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 908 * int 0x80 is 2 bytes. We saved this in tf_err. 909 */ 910 frame.tf_eip -= frame.tf_err; 911 break; 912 913 case EJUSTRETURN: 914 break; 915 916 default: 917 bad: 918 if (p->p_sysent->sv_errsize) 919 if (error >= p->p_sysent->sv_errsize) 920 error = -1; /* XXX */ 921 else 922 error = p->p_sysent->sv_errtbl[error]; 923 frame.tf_eax = error; 924 frame.tf_eflags |= PSL_C; 925 break; 926 } 927 928 if (frame.tf_eflags & PSL_T) { 929 /* Traced syscall. */ 930 frame.tf_eflags &= ~PSL_T; 931 trapsignal(p, SIGTRAP, 0); 932 } 933 934 userret(p, &frame, sticks); 935 936 #ifdef KTRACE 937 if (KTRPOINT(p, KTR_SYSRET)) 938 ktrsysret(p->p_tracep, code, error, rval[0]); 939 #endif 940 } 941