sys_machdep.c (02b0a160dcf4c6458cfcc86bc11aec043439bf87) sys_machdep.c (05dfa22fe94a29b531209d56817b1e4ef4cd508b)
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 28 unchanged lines hidden (view full) ---

37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/priv.h>
44#include <sys/proc.h>
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 28 unchanged lines hidden (view full) ---

37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/priv.h>
44#include <sys/proc.h>
45#include <sys/refcount.h>
46#include <sys/smp.h>
47#include <sys/sysproto.h>
48
49#include <vm/vm.h>
50#include <vm/pmap.h>
51#include <vm/vm_map.h>
52#include <vm/vm_extern.h>
53

--- 7 unchanged lines hidden (view full) ---

61
62#include <vm/vm_kern.h> /* for kernel_map */
63
64#define MAX_LD 8192
65#define LD_PER_PAGE 512
66#define NEW_MAX_LD(num) ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
67#define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
68
45#include <sys/smp.h>
46#include <sys/sysproto.h>
47
48#include <vm/vm.h>
49#include <vm/pmap.h>
50#include <vm/vm_map.h>
51#include <vm/vm_extern.h>
52

--- 7 unchanged lines hidden (view full) ---

60
61#include <vm/vm_kern.h> /* for kernel_map */
62
63#define MAX_LD 8192
64#define LD_PER_PAGE 512
65#define NEW_MAX_LD(num) ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
66#define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
67
68#ifdef SMP
69#define NULL_LDT_BASE ((caddr_t)NULL)
69
70
71static void set_user_ldt_rv(struct vmspace *vmsp);
72#endif
70
71static int i386_set_ldt_data(struct thread *, int start, int num,
72 union descriptor *descs);
73static int i386_ldt_grow(struct thread *td, int len);
73
74static int i386_set_ldt_data(struct thread *, int start, int num,
75 union descriptor *descs);
76static int i386_ldt_grow(struct thread *td, int len);
74#ifdef SMP
75static void set_user_ldt_rv(struct thread *);
76#endif
77
78#ifndef _SYS_SYSPROTO_H_
79struct sysarch_args {
80 int op;
81 char *parms;
82};
83#endif
84

--- 288 unchanged lines hidden (view full) ---

373 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
374 PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
375 if (dtlocked)
376 mtx_unlock_spin(&dt_lock);
377}
378
379#ifdef SMP
380static void
77
78#ifndef _SYS_SYSPROTO_H_
79struct sysarch_args {
80 int op;
81 char *parms;
82};
83#endif
84

--- 288 unchanged lines hidden (view full) ---

373 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
374 PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
375 if (dtlocked)
376 mtx_unlock_spin(&dt_lock);
377}
378
379#ifdef SMP
380static void
381set_user_ldt_rv(struct thread *td)
381set_user_ldt_rv(struct vmspace *vmsp)
382{
382{
383 struct thread *td;
383
384
384 if (td->td_proc != curthread->td_proc)
385 td = curthread;
386 if (vmsp != td->td_proc->p_vmspace)
385 return;
386
387 set_user_ldt(&td->td_proc->p_md);
388}
389#endif
390
391/*
392 * dt_lock must be held. Returns with dt_lock held.

--- 10 unchanged lines hidden (view full) ---

403
404 new_ldt->ldt_len = len = NEW_MAX_LD(len);
405 new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map,
406 len * sizeof(union descriptor));
407 if (new_ldt->ldt_base == NULL) {
408 FREE(new_ldt, M_SUBPROC);
409 return NULL;
410 }
387 return;
388
389 set_user_ldt(&td->td_proc->p_md);
390}
391#endif
392
393/*
394 * dt_lock must be held. Returns with dt_lock held.

--- 10 unchanged lines hidden (view full) ---

405
406 new_ldt->ldt_len = len = NEW_MAX_LD(len);
407 new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map,
408 len * sizeof(union descriptor));
409 if (new_ldt->ldt_base == NULL) {
410 FREE(new_ldt, M_SUBPROC);
411 return NULL;
412 }
411 refcount_init(&new_ldt->ldt_refcnt, 1);
413 new_ldt->ldt_refcnt = 1;
412 new_ldt->ldt_active = 0;
413
414 mtx_lock_spin(&dt_lock);
415 gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
416 gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
417 ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
418
419 if ((pldt = mdp->md_ldt) != NULL) {

--- 21 unchanged lines hidden (view full) ---

441 return;
442
443 if (td == PCPU_GET(curthread)) {
444 lldt(_default_ldt);
445 PCPU_SET(currentldt, _default_ldt);
446 }
447
448 mdp->md_ldt = NULL;
414 new_ldt->ldt_active = 0;
415
416 mtx_lock_spin(&dt_lock);
417 gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
418 gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
419 ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
420
421 if ((pldt = mdp->md_ldt) != NULL) {

--- 21 unchanged lines hidden (view full) ---

443 return;
444
445 if (td == PCPU_GET(curthread)) {
446 lldt(_default_ldt);
447 PCPU_SET(currentldt, _default_ldt);
448 }
449
450 mdp->md_ldt = NULL;
449 mtx_unlock_spin(&dt_lock);
450 if (refcount_release(&pldt->ldt_refcnt)) {
451 if (--pldt->ldt_refcnt == 0) {
452 mtx_unlock_spin(&dt_lock);
451 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
452 pldt->ldt_len * sizeof(union descriptor));
453 FREE(pldt, M_SUBPROC);
453 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
454 pldt->ldt_len * sizeof(union descriptor));
455 FREE(pldt, M_SUBPROC);
454 }
456 } else
457 mtx_unlock_spin(&dt_lock);
455}
456
457/*
458 * Note for the authors of compat layers (linux, etc): copyout() in
459 * the function below is not a problem since it presents data in
460 * arch-specific format (i.e. i386-specific in this case), not in
461 * the OS-specific one.
462 */

--- 222 unchanged lines hidden (view full) ---

685 num * sizeof(union descriptor));
686 return (0);
687}
688
689static int
690i386_ldt_grow(struct thread *td, int len)
691{
692 struct mdproc *mdp = &td->td_proc->p_md;
458}
459
460/*
461 * Note for the authors of compat layers (linux, etc): copyout() in
462 * the function below is not a problem since it presents data in
463 * arch-specific format (i.e. i386-specific in this case), not in
464 * the OS-specific one.
465 */

--- 222 unchanged lines hidden (view full) ---

688 num * sizeof(union descriptor));
689 return (0);
690}
691
692static int
693i386_ldt_grow(struct thread *td, int len)
694{
695 struct mdproc *mdp = &td->td_proc->p_md;
693 struct proc_ldt *pldt;
694 caddr_t old_ldt_base;
695 int old_ldt_len;
696 struct proc_ldt *new_ldt, *pldt;
697 caddr_t old_ldt_base = NULL_LDT_BASE;
698 int old_ldt_len = 0;
696
697 mtx_assert(&dt_lock, MA_OWNED);
698
699 if (len > MAX_LD)
700 return (ENOMEM);
701 if (len < NLDT + 1)
702 len = NLDT + 1;
703
704 /* Allocate a user ldt. */
705 if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
699
700 mtx_assert(&dt_lock, MA_OWNED);
701
702 if (len > MAX_LD)
703 return (ENOMEM);
704 if (len < NLDT + 1)
705 len = NLDT + 1;
706
707 /* Allocate a user ldt. */
708 if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
706 struct proc_ldt *new_ldt;
707
708 new_ldt = user_ldt_alloc(mdp, len);
709 if (new_ldt == NULL)
710 return (ENOMEM);
711 pldt = mdp->md_ldt;
712
713 if (pldt != NULL) {
709 new_ldt = user_ldt_alloc(mdp, len);
710 if (new_ldt == NULL)
711 return (ENOMEM);
712 pldt = mdp->md_ldt;
713
714 if (pldt != NULL) {
714 if (new_ldt->ldt_len > pldt->ldt_len) {
715 old_ldt_base = pldt->ldt_base;
716 old_ldt_len = pldt->ldt_len;
717 pldt->ldt_sd = new_ldt->ldt_sd;
718 pldt->ldt_base = new_ldt->ldt_base;
719 pldt->ldt_len = new_ldt->ldt_len;
720 mtx_unlock_spin(&dt_lock);
721 kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
722 old_ldt_len * sizeof(union descriptor));
723 FREE(new_ldt, M_SUBPROC);
724 mtx_lock_spin(&dt_lock);
725 } else {
715 if (new_ldt->ldt_len <= pldt->ldt_len) {
726 /*
716 /*
727 * If other threads already did the work,
728 * do nothing.
717 * We just lost the race for allocation, so
718 * free the new object and return.
729 */
730 mtx_unlock_spin(&dt_lock);
731 kmem_free(kernel_map,
732 (vm_offset_t)new_ldt->ldt_base,
733 new_ldt->ldt_len * sizeof(union descriptor));
734 FREE(new_ldt, M_SUBPROC);
735 mtx_lock_spin(&dt_lock);
736 return (0);
737 }
719 */
720 mtx_unlock_spin(&dt_lock);
721 kmem_free(kernel_map,
722 (vm_offset_t)new_ldt->ldt_base,
723 new_ldt->ldt_len * sizeof(union descriptor));
724 FREE(new_ldt, M_SUBPROC);
725 mtx_lock_spin(&dt_lock);
726 return (0);
727 }
728
729 /*
730 * We have to substitute the current LDT entry for
731 * curproc with the new one since its size grew.
732 */
733 old_ldt_base = pldt->ldt_base;
734 old_ldt_len = pldt->ldt_len;
735 pldt->ldt_sd = new_ldt->ldt_sd;
736 pldt->ldt_base = new_ldt->ldt_base;
737 pldt->ldt_len = new_ldt->ldt_len;
738 } else
739 mdp->md_ldt = pldt = new_ldt;
740#ifdef SMP
741 /*
742 * Signal other cpus to reload ldt. We need to unlock dt_lock
743 * here because other CPU will contest on it since their
744 * curthreads won't hold the lock and will block when trying
745 * to acquire it.
746 */
747 mtx_unlock_spin(&dt_lock);
748 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
738 } else
739 mdp->md_ldt = pldt = new_ldt;
740#ifdef SMP
741 /*
742 * Signal other cpus to reload ldt. We need to unlock dt_lock
743 * here because other CPU will contest on it since their
744 * curthreads won't hold the lock and will block when trying
745 * to acquire it.
746 */
747 mtx_unlock_spin(&dt_lock);
748 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
749 NULL, td);
750 mtx_lock_spin(&dt_lock);
749 NULL, td->td_proc->p_vmspace);
751#else
750#else
752 set_user_ldt(mdp);
751 set_user_ldt(td);
752 mtx_unlock_spin(&dt_lock);
753#endif
753#endif
754 if (old_ldt_base != NULL_LDT_BASE) {
755 kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
756 old_ldt_len * sizeof(union descriptor));
757 FREE(new_ldt, M_SUBPROC);
758 }
759 mtx_lock_spin(&dt_lock);
754 }
755 return (0);
756}
760 }
761 return (0);
762}