subr_trap.c (0172c219f163c03bb69a33da7e3283ad1d2737e8) subr_trap.c (b9d60b3f59eab96442d96d8102d2cbe3fdada7c3)
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the University of Utah, and William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 20 unchanged lines hidden (view full) ---

29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the University of Utah, and William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 20 unchanged lines hidden (view full) ---

29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
37 * $Id: trap.c,v 1.15 1994/01/17 09:32:32 davidg Exp $
37 * $Id: trap.c,v 1.16 1994/02/01 23:07:35 davidg Exp $
38 */
39
40/*
41 * 386 Trap and System call handleing
42 */
43
44#include "isa.h"
45#include "npx.h"

--- 33 unchanged lines hidden (view full) ---

79
80#else /* not __GNUC__ */
81
82u_short read_gs __P((void));
83void write_gs __P((/* promoted u_short */ int gs));
84
85#endif /* __GNUC__ */
86
38 */
39
40/*
41 * 386 Trap and System call handleing
42 */
43
44#include "isa.h"
45#include "npx.h"

--- 33 unchanged lines hidden (view full) ---

79
80#else /* not __GNUC__ */
81
82u_short read_gs __P((void));
83void write_gs __P((/* promoted u_short */ int gs));
84
85#endif /* __GNUC__ */
86
87extern int grow(struct proc *,int);
88
87struct sysent sysent[];
88int nsysent;
89extern short cpl;
90extern short netmask, ttymask, biomask;
91
92#define MAX_TRAP_MSG 27
93char *trap_msg[] = {
94 "reserved addressing fault", /* 0 T_RESADFLT */

--- 150 unchanged lines hidden (view full) ---

245 i = SIGFPE;
246 break;
247
248 case T_ARITHTRAP|T_USER:
249 ucode = code;
250 i = SIGFPE;
251 break;
252
89struct sysent sysent[];
90int nsysent;
91extern short cpl;
92extern short netmask, ttymask, biomask;
93
94#define MAX_TRAP_MSG 27
95char *trap_msg[] = {
96 "reserved addressing fault", /* 0 T_RESADFLT */

--- 150 unchanged lines hidden (view full) ---

247 i = SIGFPE;
248 break;
249
250 case T_ARITHTRAP|T_USER:
251 ucode = code;
252 i = SIGFPE;
253 break;
254
253 case T_PAGEFLT: /* allow page faults in kernel mode */
254#if 0
255 /* XXX - check only applies to 386's and 486's with WP off */
256 if (code & PGEX_P) goto we_re_toast;
257#endif
258
259 pfault:
255 pfault:
260 /* fall into */
256 case T_PAGEFLT: /* allow page faults in kernel mode */
261 case T_PAGEFLT|T_USER: /* page fault */
262 {
257 case T_PAGEFLT|T_USER: /* page fault */
258 {
263 register vm_offset_t va;
264 register struct vmspace *vm;
265 register vm_map_t map;
266 int rv=0;
259 vm_offset_t va;
260 struct vmspace *vm;
261 vm_map_t map = 0;
262 int rv = 0, oldflags;
267 vm_prot_t ftype;
263 vm_prot_t ftype;
264 unsigned nss, v;
268 extern vm_map_t kernel_map;
265 extern vm_map_t kernel_map;
269 unsigned nss,v;
270 int oldflags;
271
272 va = trunc_page((vm_offset_t)eva);
266
267 va = trunc_page((vm_offset_t)eva);
268
273 /*
269 /*
274 * It is only a kernel address space fault iff:
275 * 1. (type & T_USER) == 0 and
276 * 2. pcb_onfault not set or
277 * 3. pcb_onfault set but supervisor space fault
278 * The last can occur during an exec() copyin where the
279 * argument space is lazy-allocated.
270 * Don't allow user-mode faults in kernel address space
280 */
271 */
272 if ((type == (T_PAGEFLT|T_USER)) && (va >= KERNBASE)) {
273 goto nogo;
274 }
281
282 if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) {
283 vm = 0;
284 map = kernel_map;
285 } else {
286 vm = p->p_vmspace;
287 map = &vm->vm_map;
288 }
289
290 if (code & PGEX_W)
291 ftype = VM_PROT_READ | VM_PROT_WRITE;
292 else
293 ftype = VM_PROT_READ;
294
275
276 if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) {
277 vm = 0;
278 map = kernel_map;
279 } else {
280 vm = p->p_vmspace;
281 map = &vm->vm_map;
282 }
283
284 if (code & PGEX_W)
285 ftype = VM_PROT_READ | VM_PROT_WRITE;
286 else
287 ftype = VM_PROT_READ;
288
295/*
296 * keep swapout from messing with us during this
297 * critical time.
298 */
299 oldflags = p->p_flag;
300 if (map != kernel_map) {
289 oldflags = p->p_flag;
290 if (map != kernel_map) {
301 p->p_flag |= SLOCK;
302 }
303 /*
304 * XXX: rude hack to make stack limits "work"
305 */
291 vm_offset_t pa;
292 vm_offset_t v = (vm_offset_t) vtopte(va);
306
293
307 nss = 0;
308 if (map != kernel_map && (caddr_t)va >= vm->vm_maxsaddr
309 && (caddr_t)va < (caddr_t)USRSTACK) {
310 caddr_t v;
311 nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
312 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
313 rv = KERN_FAILURE;
314 p->p_flag &= ~SLOCK;
315 p->p_flag |= (oldflags & SLOCK);
316 goto nogo;
317 }
294 /*
295 * Keep swapout from messing with us during this
296 * critical time.
297 */
298 p->p_flag |= SLOCK;
318
299
319 if (vm->vm_ssize && roundup(vm->vm_ssize << PGSHIFT,
320 DFLSSIZ) < nss) {
321 int grow_amount;
322 /*
323 * If necessary, grow the VM that the stack occupies
324 * to allow for the rlimit. This allows us to not have
325 * to allocate all of the VM up-front in execve (which
326 * is expensive).
327 * Grow the VM by the amount requested rounded up to
328 * the nearest DFLSSIZ to provide for some hysteresis.
329 */
330 grow_amount = roundup((nss - (vm->vm_ssize << PGSHIFT)), DFLSSIZ);
331 v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT,
332 DFLSSIZ) - grow_amount;
333 /*
334 * If there isn't enough room to extend by DFLSSIZ, then
335 * just extend to the maximum size
336 */
337 if (v < vm->vm_maxsaddr) {
338 v = vm->vm_maxsaddr;
339 grow_amount = MAXSSIZ - (vm->vm_ssize << PGSHIFT);
340 }
341 if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
342 grow_amount, FALSE) !=
343 KERN_SUCCESS) {
300 /*
301 * Grow the stack if necessary
302 */
303 if ((caddr_t)va > vm->vm_maxsaddr
304 && (caddr_t)va < (caddr_t)USRSTACK) {
305 if (!grow(p, va)) {
306 rv = KERN_FAILURE;
344 p->p_flag &= ~SLOCK;
345 p->p_flag |= (oldflags & SLOCK);
346 goto nogo;
347 }
348 }
307 p->p_flag &= ~SLOCK;
308 p->p_flag |= (oldflags & SLOCK);
309 goto nogo;
310 }
311 }
349 }
350
312
313 /*
314 * Check if page table is mapped, if not,
315 * fault it first
316 */
351
317
352 /* check if page table is mapped, if not, fault it first */
353#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
354 {
318 /* Fault the pte only if needed: */
319 *(volatile char *)v += 0;
355
320
356 if (map != kernel_map) {
357 vm_offset_t pa;
358 vm_offset_t v = (vm_offset_t) vtopte(va);
321 /* Get the physical address: */
322 pa = pmap_extract(vm_map_pmap(map), v);
359
323
360 /* Fault the pte only if needed: */
361 *(volatile char *)v += 0;
324 /* And wire the pte page at system vm level: */
325 vm_page_wire(PHYS_TO_VM_PAGE(pa));
362
326
363 /* Get the physical address: */
364 pa = pmap_extract(vm_map_pmap(map), v);
327 /* Fault in the user page: */
328 rv = vm_fault(map, va, ftype, FALSE);
365
329
366 /* And wire the pte page at system vm level: */
367 vm_page_wire(PHYS_TO_VM_PAGE(pa));
330 /* Unwire the pte page: */
331 vm_page_unwire(PHYS_TO_VM_PAGE(pa));
368
332
369 /* Fault in the user page: */
370 rv = vm_fault(map, va, ftype, FALSE);
371
372 /* Unwire the pte page: */
373 vm_page_unwire(PHYS_TO_VM_PAGE(pa));
374
375 } else {
376 /*
377 * Since we know that kernel virtual address addresses
378 * always have pte pages mapped, we just have to fault
379 * the page.
380 */
381 rv = vm_fault(map, va, ftype, FALSE);
382 }
383
384 }
385 if (map != kernel_map) {
386 p->p_flag &= ~SLOCK;
387 p->p_flag |= (oldflags & SLOCK);
333 p->p_flag &= ~SLOCK;
334 p->p_flag |= (oldflags & SLOCK);
388 }
389 if (rv == KERN_SUCCESS) {
335 } else {
390 /*
336 /*
391 * XXX: continuation of rude stack hack
337 * Since we know that kernel virtual address addresses
338 * always have pte pages mapped, we just have to fault
339 * the page.
392 */
340 */
393 nss = nss >> PGSHIFT;
394 if (vm && nss > vm->vm_ssize) {
395 vm->vm_ssize = nss;
396 }
397 /*
398 * va could be a page table address, if the fault
399 */
341 rv = vm_fault(map, va, ftype, FALSE);
342 }
343
344 if (rv == KERN_SUCCESS) {
400 if (type == T_PAGEFLT)
401 return;
402 goto out;
403 }
404nogo:
405 if (type == T_PAGEFLT) {
406 if (curpcb->pcb_onfault)
407 goto copyfault;

--- 149 unchanged lines hidden (view full) ---

557 * it the page tables have already been faulted in and high addresses
558 * are thrown out early for other reasons.
559 */
560int trapwrite(addr)
561 unsigned addr;
562{
563 unsigned nss;
564 struct proc *p;
345 if (type == T_PAGEFLT)
346 return;
347 goto out;
348 }
349nogo:
350 if (type == T_PAGEFLT) {
351 if (curpcb->pcb_onfault)
352 goto copyfault;

--- 149 unchanged lines hidden (view full) ---

502 * it the page tables have already been faulted in and high addresses
503 * are thrown out early for other reasons.
504 */
505int trapwrite(addr)
506 unsigned addr;
507{
508 unsigned nss;
509 struct proc *p;
565 vm_offset_t va;
510 vm_offset_t va, v;
566 struct vmspace *vm;
567 int oldflags;
568 int rv;
569
570 va = trunc_page((vm_offset_t)addr);
571 /*
572 * XXX - MAX is END. Changed > to >= for temp. fix.
573 */
574 if (va >= VM_MAXUSER_ADDRESS)
575 return (1);
511 struct vmspace *vm;
512 int oldflags;
513 int rv;
514
515 va = trunc_page((vm_offset_t)addr);
516 /*
517 * XXX - MAX is END. Changed > to >= for temp. fix.
518 */
519 if (va >= VM_MAXUSER_ADDRESS)
520 return (1);
576 /*
577 * XXX: rude stack hack adapted from trap().
578 */
579 nss = 0;
521
580 p = curproc;
581 vm = p->p_vmspace;
582
583 oldflags = p->p_flag;
584 p->p_flag |= SLOCK;
585
586 if ((caddr_t)va >= vm->vm_maxsaddr
587 && (caddr_t)va < (caddr_t)USRSTACK) {
522 p = curproc;
523 vm = p->p_vmspace;
524
525 oldflags = p->p_flag;
526 p->p_flag |= SLOCK;
527
528 if ((caddr_t)va >= vm->vm_maxsaddr
529 && (caddr_t)va < (caddr_t)USRSTACK) {
588 nss = roundup(((unsigned)USRSTACK - (unsigned)va), PAGE_SIZE);
589 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
530 if (!grow(p, va)) {
590 p->p_flag &= ~SLOCK;
591 p->p_flag |= (oldflags & SLOCK);
592 return (1);
593 }
531 p->p_flag &= ~SLOCK;
532 p->p_flag |= (oldflags & SLOCK);
533 return (1);
534 }
594
595 if (vm->vm_ssize && roundup(vm->vm_ssize << PGSHIFT,
596 DFLSSIZ) < nss) {
597 caddr_t v;
598 int grow_amount;
599 /*
600 * If necessary, grow the VM that the stack occupies
601 * to allow for the rlimit. This allows us to not have
602 * to allocate all of the VM up-front in execve (which
603 * is expensive).
604 * Grow the VM by the amount requested rounded up to
605 * the nearest DFLSSIZ to provide for some hysteresis.
606 */
607 grow_amount = roundup((nss - (vm->vm_ssize << PGSHIFT)), DFLSSIZ);
608 v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT, DFLSSIZ) -
609 grow_amount;
610 /*
611 * If there isn't enough room to extend by DFLSSIZ, then
612 * just extend to the maximum size
613 */
614 if (v < vm->vm_maxsaddr) {
615 v = vm->vm_maxsaddr;
616 grow_amount = MAXSSIZ - (vm->vm_ssize << PGSHIFT);
617 }
618 if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
619 grow_amount, FALSE)
620 != KERN_SUCCESS) {
621 p->p_flag &= ~SLOCK;
622 p->p_flag |= (oldflags & SLOCK);
623 return(1);
624 }
625 printf("new stack growth: %lx, %d\n", v, grow_amount);
626 }
627 }
628
535 }
536
537 v = trunc_page(vtopte(va));
629
538
630 {
631 vm_offset_t v;
632 v = trunc_page(vtopte(va));
633 /*
634 * wire the pte page
635 */
636 if (va < USRSTACK) {
637 vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
638 }
639 /*
640 * fault the data page
641 */
642 rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
643 /*
644 * unwire the pte page
645 */
646 if (va < USRSTACK) {
647 vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
648 }
539 /*
540 * wire the pte page
541 */
542 if (va < USRSTACK) {
543 vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
649 }
544 }
545
546 /*
547 * fault the data page
548 */
549 rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
550
551 /*
552 * unwire the pte page
553 */
554 if (va < USRSTACK) {
555 vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
556 }
557
650 p->p_flag &= ~SLOCK;
651 p->p_flag |= (oldflags & SLOCK);
652
653 if (rv != KERN_SUCCESS)
654 return 1;
558 p->p_flag &= ~SLOCK;
559 p->p_flag |= (oldflags & SLOCK);
560
561 if (rv != KERN_SUCCESS)
562 return 1;
655 /*
656 * XXX: continuation of rude stack hack
657 */
658 nss >>= PGSHIFT;
659 if (nss > vm->vm_ssize) {
660 vm->vm_ssize = nss;
661 }
563
662 return (0);
663}
664
665/*
666 * syscall(frame):
667 * System call request from POSIX system call gate interface to kernel.
668 * Like trap(), argument is call by reference.
669 */

--- 133 unchanged lines hidden ---
564 return (0);
565}
566
567/*
568 * syscall(frame):
569 * System call request from POSIX system call gate interface to kernel.
570 * Like trap(), argument is call by reference.
571 */

--- 133 unchanged lines hidden ---