1 /*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 * $Id: trap.c,v 1.106 1997/08/20 05:25:48 fsmp Exp $ 39 */ 40 41 /* 42 * 386 Trap and System call handling 43 */ 44 45 #include "opt_ktrace.h" 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/kernel.h> 52 #include <sys/syscall.h> 53 #include <sys/sysent.h> 54 #include <sys/vmmeter.h> 55 #ifdef KTRACE 56 #include <sys/ktrace.h> 57 #endif 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <vm/vm_prot.h> 62 #include <sys/lock.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_kern.h> 65 #include <vm/vm_map.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_extern.h> 68 69 #include <sys/user.h> 70 71 #include <machine/cpu.h> 72 #include <machine/ipl.h> 73 #include <machine/md_var.h> 74 #include <machine/psl.h> 75 #include <machine/../isa/intr_machdep.h> 76 #include <machine/smp.h> 77 78 #ifdef POWERFAIL_NMI 79 #include <sys/syslog.h> 80 #include <machine/clock.h> 81 #endif 82 83 #include "isa.h" 84 #include "npx.h" 85 86 extern struct i386tss common_tss; 87 88 int (*vm86_emulate) __P((struct vm86frame *)); 89 int (*pmath_emulate) __P((struct trapframe *)); 90 91 extern void trap __P((struct trapframe frame)); 92 extern int trapwrite __P((unsigned addr)); 93 extern void syscall __P((struct trapframe frame)); 94 95 static int trap_pfault __P((struct trapframe *, int)); 96 static void trap_fatal __P((struct trapframe *)); 97 void dblfault_handler __P((void)); 98 99 extern inthand_t IDTVEC(syscall); 100 101 #define MAX_TRAP_MSG 28 102 static char *trap_msg[] = { 103 "", /* 0 unused */ 104 "privileged instruction fault", /* 1 T_PRIVINFLT */ 105 "", /* 2 unused */ 106 "breakpoint instruction fault", /* 3 T_BPTFLT */ 107 "", /* 4 unused */ 108 "", /* 5 unused */ 109 "arithmetic trap", /* 6 T_ARITHTRAP */ 110 "system forced exception", /* 7 T_ASTFLT */ 111 "", /* 8 unused */ 112 "general protection fault", /* 9 T_PROTFLT */ 113 "trace trap", /* 10 T_TRCTRAP */ 114 "", /* 11 unused */ 115 "page fault", /* 12 T_PAGEFLT */ 116 "", /* 13 unused */ 117 "alignment fault", /* 14 T_ALIGNFLT */ 118 "", /* 15 unused */ 119 "", /* 16 unused */ 120 "", /* 17 unused */ 121 "integer divide fault", /* 18 T_DIVIDE */ 122 "non-maskable interrupt trap", /* 19 T_NMI */ 123 "overflow trap", /* 20 T_OFLOW */ 124 "FPU bounds check fault", /* 21 T_BOUND */ 125 "FPU device not available", /* 22 T_DNA */ 126 "double fault", /* 23 T_DOUBLEFLT */ 127 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 128 "invalid TSS fault", /* 25 T_TSSFLT */ 129 "segment not present fault", /* 26 T_SEGNPFLT */ 130 "stack fault", /* 27 T_STKFLT */ 131 "machine check trap", /* 28 T_MCHK */ 132 }; 133 134 static void userret __P((struct proc *p, struct trapframe *frame, 135 u_quad_t oticks)); 136 137 static inline void 138 userret(p, frame, oticks) 139 struct proc *p; 140 struct trapframe *frame; 141 u_quad_t oticks; 142 { 143 int sig, s; 144 145 while ((sig = CURSIG(p)) != 0) 146 postsig(sig); 147 148 #if 0 149 if (!want_resched && 150 (p->p_priority <= p->p_usrpri) && 151 (p->p_rtprio.type == RTP_PRIO_NORMAL)) { 152 int newpriority; 153 p->p_estcpu += 1; 154 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice; 155 newpriority = min(newpriority, MAXPRI); 156 p->p_usrpri = newpriority; 157 } 158 #endif 159 160 p->p_priority = p->p_usrpri; 161 if (want_resched) { 162 /* 163 * Since we are curproc, clock will normally just change 164 * our priority without moving us from one queue to another 165 * (since the running process is not on a queue.) 166 * If that happened after we setrunqueue ourselves but before we 167 * mi_switch()'ed, we might not be on the queue indicated by 168 * our priority. 169 */ 170 s = splhigh(); 171 setrunqueue(p); 172 p->p_stats->p_ru.ru_nivcsw++; 173 mi_switch(); 174 splx(s); 175 while ((sig = CURSIG(p)) != 0) 176 postsig(sig); 177 } 178 /* 179 * Charge system time if profiling. 180 */ 181 if (p->p_flag & P_PROFIL) 182 addupc_task(p, frame->tf_eip, 183 (u_int)(p->p_sticks - oticks) * psratio); 184 185 curpriority = p->p_priority; 186 } 187 188 /* 189 * Exception, fault, and trap interface to the FreeBSD kernel. 190 * This common code is called from assembly language IDT gate entry 191 * routines that prepare a suitable stack frame, and restore this 192 * frame after the exception has been processed. 193 */ 194 195 void 196 trap(frame) 197 struct trapframe frame; 198 { 199 struct proc *p = curproc; 200 u_quad_t sticks = 0; 201 int i = 0, ucode = 0, type, code; 202 #ifdef DEBUG 203 u_long eva; 204 #endif 205 206 type = frame.tf_trapno; 207 code = frame.tf_err; 208 209 if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) { 210 /* user trap */ 211 212 sticks = p->p_sticks; 213 p->p_md.md_regs = &frame; 214 215 switch (type) { 216 case T_PRIVINFLT: /* privileged instruction fault */ 217 ucode = type; 218 i = SIGILL; 219 break; 220 221 case T_BPTFLT: /* bpt instruction fault */ 222 case T_TRCTRAP: /* trace trap */ 223 frame.tf_eflags &= ~PSL_T; 224 i = SIGTRAP; 225 break; 226 227 case T_ARITHTRAP: /* arithmetic trap */ 228 ucode = code; 229 i = SIGFPE; 230 break; 231 232 case T_ASTFLT: /* Allow process switch */ 233 astoff(); 234 cnt.v_soft++; 235 if (p->p_flag & P_OWEUPC) { 236 p->p_flag &= ~P_OWEUPC; 237 addupc_task(p, p->p_stats->p_prof.pr_addr, 238 p->p_stats->p_prof.pr_ticks); 239 } 240 goto out; 241 242 /* 243 * The following two traps can happen in 244 * vm86 mode, and, if so, we want to handle 245 * them specially. 246 */ 247 case T_PROTFLT: /* general protection fault */ 248 case T_STKFLT: /* stack fault */ 249 if (vm86_emulate && (frame.tf_eflags & PSL_VM)) { 250 i = (*vm86_emulate)((struct vm86frame *)&frame); 251 if (i == 0) 252 goto out; 253 break; 254 } 255 /* FALL THROUGH */ 256 257 case T_SEGNPFLT: /* segment not present fault */ 258 case T_TSSFLT: /* invalid TSS fault */ 259 case T_DOUBLEFLT: /* double fault */ 260 default: 261 ucode = code + BUS_SEGM_FAULT ; 262 i = SIGBUS; 263 break; 264 265 case T_PAGEFLT: /* page fault */ 266 i = trap_pfault(&frame, TRUE); 267 if (i == -1) 268 return; 269 if (i == 0) 270 goto out; 271 272 ucode = T_PAGEFLT; 273 break; 274 275 case T_DIVIDE: /* integer divide fault */ 276 ucode = FPE_INTDIV_TRAP; 277 i = SIGFPE; 278 break; 279 280 #if NISA > 0 281 case T_NMI: 282 #ifdef POWERFAIL_NMI 283 goto handle_powerfail; 284 #else /* !POWERFAIL_NMI */ 285 #ifdef DDB 286 /* NMI can be hooked up to a pushbutton for debugging */ 287 printf ("NMI ... going to debugger\n"); 288 if (kdb_trap (type, 0, &frame)) 289 return; 290 #endif /* DDB */ 291 /* machine/parity/power fail/"kitchen sink" faults */ 292 if (isa_nmi(code) == 0) return; 293 panic("NMI indicates hardware failure"); 294 #endif /* POWERFAIL_NMI */ 295 #endif /* NISA > 0 */ 296 297 case T_OFLOW: /* integer overflow fault */ 298 ucode = FPE_INTOVF_TRAP; 299 i = SIGFPE; 300 break; 301 302 case T_BOUND: /* bounds check fault */ 303 ucode = FPE_SUBRNG_TRAP; 304 i = SIGFPE; 305 break; 306 307 case T_DNA: 308 #if NNPX > 0 309 /* if a transparent fault (due to context switch "late") */ 310 if (npxdna()) 311 return; 312 #endif 313 if (!pmath_emulate) { 314 i = SIGFPE; 315 ucode = FPE_FPU_NP_TRAP; 316 break; 317 } 318 i = (*pmath_emulate)(&frame); 319 if (i == 0) { 320 if (!(frame.tf_eflags & PSL_T)) 321 return; 322 frame.tf_eflags &= ~PSL_T; 323 i = SIGTRAP; 324 } 325 /* else ucode = emulator_only_knows() XXX */ 326 break; 327 328 case T_FPOPFLT: /* FPU operand fetch fault */ 329 ucode = T_FPOPFLT; 330 i = SIGILL; 331 break; 332 } 333 } else { 334 /* kernel trap */ 335 336 switch (type) { 337 case T_PAGEFLT: /* page fault */ 338 (void) trap_pfault(&frame, FALSE); 339 return; 340 341 case T_DNA: 342 #if NNPX > 0 343 /* 344 * The kernel is apparently using npx for copying. 345 * XXX this should be fatal unless the kernel has 346 * registered such use. 347 */ 348 if (npxdna()) 349 return; 350 #endif 351 break; 352 353 case T_PROTFLT: /* general protection fault */ 354 case T_SEGNPFLT: /* segment not present fault */ 355 /* 356 * Invalid segment selectors and out of bounds 357 * %eip's and %esp's can be set up in user mode. 358 * This causes a fault in kernel mode when the 359 * kernel tries to return to user mode. We want 360 * to get this fault so that we can fix the 361 * problem here and not have to check all the 362 * selectors and pointers when the user changes 363 * them. 364 */ 365 #define MAYBE_DORETI_FAULT(where, whereto) \ 366 do { \ 367 if (frame.tf_eip == (int)where) { \ 368 frame.tf_eip = (int)whereto; \ 369 return; \ 370 } \ 371 } while (0) 372 373 if (intr_nesting_level == 0) { 374 /* 375 * Invalid %fs's and %gs's can be created using 376 * procfs or PT_SETREGS or by invalidating the 377 * underlying LDT entry. This causes a fault 378 * in kernel mode when the kernel attempts to 379 * switch contexts. Lose the bad context 380 * (XXX) so that we can continue, and generate 381 * a signal. 382 */ 383 if (frame.tf_eip == (int)cpu_switch_load_fs) { 384 curpcb->pcb_fs = 0; 385 psignal(p, SIGBUS); 386 return; 387 } 388 if (frame.tf_eip == (int)cpu_switch_load_gs) { 389 curpcb->pcb_gs = 0; 390 psignal(p, SIGBUS); 391 return; 392 } 393 MAYBE_DORETI_FAULT(doreti_iret, 394 doreti_iret_fault); 395 MAYBE_DORETI_FAULT(doreti_popl_ds, 396 doreti_popl_ds_fault); 397 MAYBE_DORETI_FAULT(doreti_popl_es, 398 doreti_popl_es_fault); 399 if (curpcb && curpcb->pcb_onfault) { 400 frame.tf_eip = (int)curpcb->pcb_onfault; 401 return; 402 } 403 } 404 break; 405 406 case T_TSSFLT: 407 /* 408 * PSL_NT can be set in user mode and isn't cleared 409 * automatically when the kernel is entered. This 410 * causes a TSS fault when the kernel attempts to 411 * `iret' because the TSS link is uninitialized. We 412 * want to get this fault so that we can fix the 413 * problem here and not every time the kernel is 414 * entered. 415 */ 416 if (frame.tf_eflags & PSL_NT) { 417 frame.tf_eflags &= ~PSL_NT; 418 return; 419 } 420 break; 421 422 case T_TRCTRAP: /* trace trap */ 423 if (frame.tf_eip == (int)IDTVEC(syscall)) { 424 /* 425 * We've just entered system mode via the 426 * syscall lcall. Continue single stepping 427 * silently until the syscall handler has 428 * saved the flags. 429 */ 430 return; 431 } 432 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 433 /* 434 * The syscall handler has now saved the 435 * flags. Stop single stepping it. 436 */ 437 frame.tf_eflags &= ~PSL_T; 438 return; 439 } 440 /* 441 * Fall through. 442 */ 443 case T_BPTFLT: 444 /* 445 * If DDB is enabled, let it handle the debugger trap. 446 * Otherwise, debugger traps "can't happen". 447 */ 448 #ifdef DDB 449 if (kdb_trap (type, 0, &frame)) 450 return; 451 #endif 452 break; 453 454 #if NISA > 0 455 case T_NMI: 456 #ifdef POWERFAIL_NMI 457 #ifndef TIMER_FREQ 458 # define TIMER_FREQ 1193182 459 #endif 460 handle_powerfail: 461 { 462 static unsigned lastalert = 0; 463 464 if(time.tv_sec - lastalert > 10) 465 { 466 log(LOG_WARNING, "NMI: power fail\n"); 467 sysbeep(TIMER_FREQ/880, hz); 468 lastalert = time.tv_sec; 469 } 470 return; 471 } 472 #else /* !POWERFAIL_NMI */ 473 #ifdef DDB 474 /* NMI can be hooked up to a pushbutton for debugging */ 475 printf ("NMI ... going to debugger\n"); 476 if (kdb_trap (type, 0, &frame)) 477 return; 478 #endif /* DDB */ 479 /* machine/parity/power fail/"kitchen sink" faults */ 480 if (isa_nmi(code) == 0) return; 481 /* FALL THROUGH */ 482 #endif /* POWERFAIL_NMI */ 483 #endif /* NISA > 0 */ 484 } 485 486 trap_fatal(&frame); 487 return; 488 } 489 490 trapsignal(p, i, ucode); 491 492 #ifdef DEBUG 493 eva = rcr2(); 494 if (type <= MAX_TRAP_MSG) { 495 uprintf("fatal process exception: %s", 496 trap_msg[type]); 497 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 498 uprintf(", fault VA = 0x%x", eva); 499 uprintf("\n"); 500 } 501 #endif 502 503 out: 504 userret(p, &frame, sticks); 505 } 506 507 #ifdef notyet 508 /* 509 * This version doesn't allow a page fault to user space while 510 * in the kernel. The rest of the kernel needs to be made "safe" 511 * before this can be used. I think the only things remaining 512 * to be made safe are the iBCS2 code and the process tracing/ 513 * debugging code. 514 */ 515 static int 516 trap_pfault(frame, usermode) 517 struct trapframe *frame; 518 int usermode; 519 { 520 vm_offset_t va; 521 struct vmspace *vm = NULL; 522 vm_map_t map = 0; 523 int rv = 0; 524 vm_prot_t ftype; 525 int eva; 526 struct proc *p = curproc; 527 528 if (frame->tf_err & PGEX_W) 529 ftype = VM_PROT_READ | VM_PROT_WRITE; 530 else 531 ftype = VM_PROT_READ; 532 533 eva = rcr2(); 534 va = trunc_page((vm_offset_t)eva); 535 536 if (va < VM_MIN_KERNEL_ADDRESS) { 537 vm_offset_t v; 538 vm_page_t mpte; 539 540 if (p == NULL || 541 (!usermode && va < VM_MAXUSER_ADDRESS && 542 (intr_nesting_level != 0 || curpcb == NULL || 543 curpcb->pcb_onfault == NULL))) { 544 trap_fatal(frame); 545 return (-1); 546 } 547 548 /* 549 * This is a fault on non-kernel virtual memory. 550 * vm is initialized above to NULL. If curproc is NULL 551 * or curproc->p_vmspace is NULL the fault is fatal. 552 */ 553 vm = p->p_vmspace; 554 if (vm == NULL) 555 goto nogo; 556 557 map = &vm->vm_map; 558 559 /* 560 * Keep swapout from messing with us during this 561 * critical time. 562 */ 563 ++p->p_lock; 564 565 /* 566 * Grow the stack if necessary 567 */ 568 if ((caddr_t)va > vm->vm_maxsaddr 569 && (caddr_t)va < (caddr_t)USRSTACK) { 570 if (!grow(p, va)) { 571 rv = KERN_FAILURE; 572 --p->p_lock; 573 goto nogo; 574 } 575 } 576 577 /* Fault in the user page: */ 578 rv = vm_fault(map, va, ftype, 579 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0); 580 581 --p->p_lock; 582 } else { 583 /* 584 * Don't allow user-mode faults in kernel address space. 585 */ 586 if (usermode) 587 goto nogo; 588 589 /* 590 * Since we know that kernel virtual address addresses 591 * always have pte pages mapped, we just have to fault 592 * the page. 593 */ 594 rv = vm_fault(kernel_map, va, ftype, FALSE); 595 } 596 597 if (rv == KERN_SUCCESS) 598 return (0); 599 nogo: 600 if (!usermode) { 601 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 602 frame->tf_eip = (int)curpcb->pcb_onfault; 603 return (0); 604 } 605 trap_fatal(frame); 606 return (-1); 607 } 608 609 /* kludge to pass faulting virtual address to sendsig */ 610 frame->tf_err = eva; 611 612 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 613 } 614 #endif 615 616 int 617 trap_pfault(frame, usermode) 618 struct trapframe *frame; 619 int usermode; 620 { 621 vm_offset_t va; 622 struct vmspace *vm = NULL; 623 vm_map_t map = 0; 624 int rv = 0; 625 vm_prot_t ftype; 626 int eva; 627 struct proc *p = curproc; 628 629 eva = rcr2(); 630 va = trunc_page((vm_offset_t)eva); 631 632 if (va >= KERNBASE) { 633 /* 634 * Don't allow user-mode faults in kernel address space. 635 */ 636 if (usermode) 637 goto nogo; 638 639 map = kernel_map; 640 } else { 641 /* 642 * This is a fault on non-kernel virtual memory. 643 * vm is initialized above to NULL. If curproc is NULL 644 * or curproc->p_vmspace is NULL the fault is fatal. 645 */ 646 if (p != NULL) 647 vm = p->p_vmspace; 648 649 if (vm == NULL) 650 goto nogo; 651 652 map = &vm->vm_map; 653 } 654 655 if (frame->tf_err & PGEX_W) 656 ftype = VM_PROT_READ | VM_PROT_WRITE; 657 else 658 ftype = VM_PROT_READ; 659 660 if (map != kernel_map) { 661 /* 662 * Keep swapout from messing with us during this 663 * critical time. 664 */ 665 ++p->p_lock; 666 667 /* 668 * Grow the stack if necessary 669 */ 670 if ((caddr_t)va > vm->vm_maxsaddr 671 && (caddr_t)va < (caddr_t)USRSTACK) { 672 if (!grow(p, va)) { 673 rv = KERN_FAILURE; 674 --p->p_lock; 675 goto nogo; 676 } 677 } 678 679 /* Fault in the user page: */ 680 rv = vm_fault(map, va, ftype, 681 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0); 682 683 --p->p_lock; 684 } else { 685 /* 686 * Don't have to worry about process locking or stacks in the kernel. 687 */ 688 rv = vm_fault(map, va, ftype, FALSE); 689 } 690 691 if (rv == KERN_SUCCESS) 692 return (0); 693 nogo: 694 if (!usermode) { 695 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 696 frame->tf_eip = (int)curpcb->pcb_onfault; 697 return (0); 698 } 699 trap_fatal(frame); 700 return (-1); 701 } 702 703 /* kludge to pass faulting virtual address to sendsig */ 704 frame->tf_err = eva; 705 706 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 707 } 708 709 static void 710 trap_fatal(frame) 711 struct trapframe *frame; 712 { 713 int code, type, eva, ss, esp; 714 struct soft_segment_descriptor softseg; 715 716 code = frame->tf_err; 717 type = frame->tf_trapno; 718 eva = rcr2(); 719 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 720 721 if (type <= MAX_TRAP_MSG) 722 printf("\n\nFatal trap %d: %s while in %s mode\n", 723 type, trap_msg[type], 724 frame->tf_eflags & PSL_VM ? "vm86" : 725 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 726 #ifdef SMP 727 printf("cpuid = %d\n", cpuid); 728 #endif 729 if (type == T_PAGEFLT) { 730 printf("fault virtual address = 0x%x\n", eva); 731 printf("fault code = %s %s, %s\n", 732 code & PGEX_U ? "user" : "supervisor", 733 code & PGEX_W ? "write" : "read", 734 code & PGEX_P ? "protection violation" : "page not present"); 735 } 736 printf("instruction pointer = 0x%x:0x%x\n", 737 frame->tf_cs & 0xffff, frame->tf_eip); 738 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { 739 ss = frame->tf_ss & 0xffff; 740 esp = frame->tf_esp; 741 } else { 742 ss = GSEL(GDATA_SEL, SEL_KPL); 743 esp = (int)&frame->tf_esp; 744 } 745 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 746 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 747 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 748 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 749 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 750 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 751 softseg.ssd_gran); 752 printf("processor eflags = "); 753 if (frame->tf_eflags & PSL_T) 754 printf("trace trap, "); 755 if (frame->tf_eflags & PSL_I) 756 printf("interrupt enabled, "); 757 if (frame->tf_eflags & PSL_NT) 758 printf("nested task, "); 759 if (frame->tf_eflags & PSL_RF) 760 printf("resume, "); 761 if (frame->tf_eflags & PSL_VM) 762 printf("vm86, "); 763 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 764 printf("current process = "); 765 if (curproc) { 766 printf("%lu (%s)\n", 767 (u_long)curproc->p_pid, curproc->p_comm ? 768 curproc->p_comm : ""); 769 } else { 770 printf("Idle\n"); 771 } 772 printf("interrupt mask = "); 773 if ((cpl & net_imask) == net_imask) 774 printf("net "); 775 if ((cpl & tty_imask) == tty_imask) 776 printf("tty "); 777 if ((cpl & bio_imask) == bio_imask) 778 printf("bio "); 779 if (cpl == 0) 780 printf("none"); 781 #ifdef SMP 782 /** 783 * XXX FIXME: 784 * we probably SHOULD have stopped the other CPUs before now! 785 * another CPU COULD have been touching cpl at this moment... 786 */ 787 printf(" <- SMP: XXX"); 788 #endif 789 printf("\n"); 790 791 #ifdef KDB 792 if (kdb_trap(&psl)) 793 return; 794 #endif 795 #ifdef DDB 796 if (kdb_trap (type, 0, frame)) 797 return; 798 #endif 799 printf("trap number = %d\n", type); 800 if (type <= MAX_TRAP_MSG) 801 panic(trap_msg[type]); 802 else 803 panic("unknown/reserved trap"); 804 } 805 806 /* 807 * Double fault handler. Called when a fault occurs while writing 808 * a frame for a trap/exception onto the stack. This usually occurs 809 * when the stack overflows (such is the case with infinite recursion, 810 * for example). 811 * 812 * XXX Note that the current PTD gets replaced by IdlePTD when the 813 * task switch occurs. This means that the stack that was active at 814 * the time of the double fault is not available at <kstack> unless 815 * the machine was idle when the double fault occurred. The downside 816 * of this is that "trace <ebp>" in ddb won't work. 817 */ 818 void 819 dblfault_handler() 820 { 821 printf("\nFatal double fault:\n"); 822 printf("eip = 0x%x\n", common_tss.tss_eip); 823 printf("esp = 0x%x\n", common_tss.tss_esp); 824 printf("ebp = 0x%x\n", common_tss.tss_ebp); 825 #ifdef SMP 826 printf("cpuid = %d\n", cpuid); 827 #endif 828 panic("double fault"); 829 } 830 831 /* 832 * Compensate for 386 brain damage (missing URKR). 833 * This is a little simpler than the pagefault handler in trap() because 834 * it the page tables have already been faulted in and high addresses 835 * are thrown out early for other reasons. 836 */ 837 int trapwrite(addr) 838 unsigned addr; 839 { 840 struct proc *p; 841 vm_offset_t va; 842 struct vmspace *vm; 843 int rv; 844 845 va = trunc_page((vm_offset_t)addr); 846 /* 847 * XXX - MAX is END. Changed > to >= for temp. fix. 848 */ 849 if (va >= VM_MAXUSER_ADDRESS) 850 return (1); 851 852 p = curproc; 853 vm = p->p_vmspace; 854 855 ++p->p_lock; 856 857 if ((caddr_t)va >= vm->vm_maxsaddr 858 && (caddr_t)va < (caddr_t)USRSTACK) { 859 if (!grow(p, va)) { 860 --p->p_lock; 861 return (1); 862 } 863 } 864 865 /* 866 * fault the data page 867 */ 868 rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, VM_FAULT_DIRTY); 869 870 --p->p_lock; 871 872 if (rv != KERN_SUCCESS) 873 return 1; 874 875 return (0); 876 } 877 878 /* 879 * System call request from POSIX system call gate interface to kernel. 880 * Like trap(), argument is call by reference. 881 */ 882 void 883 syscall(frame) 884 struct trapframe frame; 885 { 886 caddr_t params; 887 int i; 888 struct sysent *callp; 889 struct proc *p = curproc; 890 u_quad_t sticks; 891 int error; 892 int args[8], rval[2]; 893 u_int code; 894 895 sticks = p->p_sticks; 896 if (ISPL(frame.tf_cs) != SEL_UPL) 897 panic("syscall"); 898 899 p->p_md.md_regs = &frame; 900 params = (caddr_t)frame.tf_esp + sizeof(int); 901 code = frame.tf_eax; 902 if (p->p_sysent->sv_prepsyscall) { 903 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 904 } else { 905 /* 906 * Need to check if this is a 32 bit or 64 bit syscall. 907 */ 908 if (code == SYS_syscall) { 909 /* 910 * Code is first argument, followed by actual args. 911 */ 912 code = fuword(params); 913 params += sizeof(int); 914 } else if (code == SYS___syscall) { 915 /* 916 * Like syscall, but code is a quad, so as to maintain 917 * quad alignment for the rest of the arguments. 918 */ 919 code = fuword(params); 920 params += sizeof(quad_t); 921 } 922 } 923 924 if (p->p_sysent->sv_mask) 925 code &= p->p_sysent->sv_mask; 926 927 if (code >= p->p_sysent->sv_size) 928 callp = &p->p_sysent->sv_table[0]; 929 else 930 callp = &p->p_sysent->sv_table[code]; 931 932 if (params && (i = callp->sy_narg * sizeof(int)) && 933 (error = copyin(params, (caddr_t)args, (u_int)i))) { 934 #ifdef KTRACE 935 if (KTRPOINT(p, KTR_SYSCALL)) 936 ktrsyscall(p->p_tracep, code, callp->sy_narg, args); 937 #endif 938 goto bad; 939 } 940 #ifdef KTRACE 941 if (KTRPOINT(p, KTR_SYSCALL)) 942 ktrsyscall(p->p_tracep, code, callp->sy_narg, args); 943 #endif 944 rval[0] = 0; 945 rval[1] = frame.tf_edx; 946 947 error = (*callp->sy_call)(p, args, rval); 948 949 switch (error) { 950 951 case 0: 952 /* 953 * Reinitialize proc pointer `p' as it may be different 954 * if this is a child returning from fork syscall. 955 */ 956 p = curproc; 957 frame.tf_eax = rval[0]; 958 frame.tf_edx = rval[1]; 959 frame.tf_eflags &= ~PSL_C; 960 break; 961 962 case ERESTART: 963 /* 964 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 965 * int 0x80 is 2 bytes. We saved this in tf_err. 966 */ 967 frame.tf_eip -= frame.tf_err; 968 break; 969 970 case EJUSTRETURN: 971 break; 972 973 default: 974 bad: 975 if (p->p_sysent->sv_errsize) 976 if (error >= p->p_sysent->sv_errsize) 977 error = -1; /* XXX */ 978 else 979 error = p->p_sysent->sv_errtbl[error]; 980 frame.tf_eax = error; 981 frame.tf_eflags |= PSL_C; 982 break; 983 } 984 985 if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { 986 /* Traced syscall. */ 987 frame.tf_eflags &= ~PSL_T; 988 trapsignal(p, SIGTRAP, 0); 989 } 990 991 userret(p, &frame, sticks); 992 993 #ifdef KTRACE 994 if (KTRPOINT(p, KTR_SYSRET)) 995 ktrsysret(p->p_tracep, code, error, rval[0]); 996 #endif 997 } 998 999 /* 1000 * Simplified back end of syscall(), used when returning from fork() 1001 * directly into user mode. 1002 */ 1003 void 1004 fork_return(p, frame) 1005 struct proc *p; 1006 struct trapframe frame; 1007 { 1008 frame.tf_eax = 0; /* Child returns zero */ 1009 frame.tf_eflags &= ~PSL_C; /* success */ 1010 frame.tf_edx = 1; 1011 1012 userret(p, &frame, 0); 1013 #ifdef KTRACE 1014 if (KTRPOINT(p, KTR_SYSRET)) 1015 ktrsysret(p->p_tracep, SYS_fork, 0, 0); 1016 #endif 1017 } 1018