sys_machdep.c (0ad5e7f3263f6ec29977404631ace10bc813d306) sys_machdep.c (02b0a160dcf4c6458cfcc86bc11aec043439bf87)
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 414 unchanged lines hidden (view full) ---

423 len * sizeof(union descriptor));
424 } else
425 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
426
427 return (new_ldt);
428}
429
430/*
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 414 unchanged lines hidden (view full) ---

423 len * sizeof(union descriptor));
424 } else
425 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
426
427 return (new_ldt);
428}
429
430/*
431 * Must be called with dt_lock held.
431 * Must be called with dt_lock held. Returns with dt_lock unheld.
432 */
433void
434user_ldt_free(struct thread *td)
435{
436 struct mdproc *mdp = &td->td_proc->p_md;
437 struct proc_ldt *pldt;
438
439 mtx_assert(&dt_lock, MA_OWNED);
440 if ((pldt = mdp->md_ldt) == NULL)
441 return;
442
443 if (td == PCPU_GET(curthread)) {
444 lldt(_default_ldt);
445 PCPU_SET(currentldt, _default_ldt);
446 }
447
448 mdp->md_ldt = NULL;
432 */
433void
434user_ldt_free(struct thread *td)
435{
436 struct mdproc *mdp = &td->td_proc->p_md;
437 struct proc_ldt *pldt;
438
439 mtx_assert(&dt_lock, MA_OWNED);
440 if ((pldt = mdp->md_ldt) == NULL)
441 return;
442
443 if (td == PCPU_GET(curthread)) {
444 lldt(_default_ldt);
445 PCPU_SET(currentldt, _default_ldt);
446 }
447
448 mdp->md_ldt = NULL;
449 mtx_unlock_spin(&dt_lock);
449 if (refcount_release(&pldt->ldt_refcnt)) {
450 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
451 pldt->ldt_len * sizeof(union descriptor));
452 FREE(pldt, M_SUBPROC);
453 }
454}
455
456/*

--- 239 unchanged lines hidden (view full) ---

696 mtx_assert(&dt_lock, MA_OWNED);
697
698 if (len > MAX_LD)
699 return (ENOMEM);
700 if (len < NLDT + 1)
701 len = NLDT + 1;
702
703 /* Allocate a user ldt. */
450 if (refcount_release(&pldt->ldt_refcnt)) {
451 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
452 pldt->ldt_len * sizeof(union descriptor));
453 FREE(pldt, M_SUBPROC);
454 }
455}
456
457/*

--- 239 unchanged lines hidden (view full) ---

697 mtx_assert(&dt_lock, MA_OWNED);
698
699 if (len > MAX_LD)
700 return (ENOMEM);
701 if (len < NLDT + 1)
702 len = NLDT + 1;
703
704 /* Allocate a user ldt. */
704 if ((pldt = mdp->md_ldt) != NULL || len > pldt->ldt_len) {
705 if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
705 struct proc_ldt *new_ldt;
706
707 new_ldt = user_ldt_alloc(mdp, len);
708 if (new_ldt == NULL)
709 return (ENOMEM);
710 pldt = mdp->md_ldt;
711
712 if (pldt != NULL) {
713 if (new_ldt->ldt_len > pldt->ldt_len) {
714 old_ldt_base = pldt->ldt_base;
715 old_ldt_len = pldt->ldt_len;
716 pldt->ldt_sd = new_ldt->ldt_sd;
717 pldt->ldt_base = new_ldt->ldt_base;
718 pldt->ldt_len = new_ldt->ldt_len;
706 struct proc_ldt *new_ldt;
707
708 new_ldt = user_ldt_alloc(mdp, len);
709 if (new_ldt == NULL)
710 return (ENOMEM);
711 pldt = mdp->md_ldt;
712
713 if (pldt != NULL) {
714 if (new_ldt->ldt_len > pldt->ldt_len) {
715 old_ldt_base = pldt->ldt_base;
716 old_ldt_len = pldt->ldt_len;
717 pldt->ldt_sd = new_ldt->ldt_sd;
718 pldt->ldt_base = new_ldt->ldt_base;
719 pldt->ldt_len = new_ldt->ldt_len;
720 mtx_unlock_spin(&dt_lock);
719 kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
720 old_ldt_len * sizeof(union descriptor));
721 FREE(new_ldt, M_SUBPROC);
721 kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
722 old_ldt_len * sizeof(union descriptor));
723 FREE(new_ldt, M_SUBPROC);
724 mtx_lock_spin(&dt_lock);
722 } else {
723 /*
724 * If other threads already did the work,
725 * do nothing.
726 */
725 } else {
726 /*
727 * If other threads already did the work,
728 * do nothing.
729 */
730 mtx_unlock_spin(&dt_lock);
727 kmem_free(kernel_map,
728 (vm_offset_t)new_ldt->ldt_base,
729 new_ldt->ldt_len * sizeof(union descriptor));
730 FREE(new_ldt, M_SUBPROC);
731 kmem_free(kernel_map,
732 (vm_offset_t)new_ldt->ldt_base,
733 new_ldt->ldt_len * sizeof(union descriptor));
734 FREE(new_ldt, M_SUBPROC);
735 mtx_lock_spin(&dt_lock);
731 return (0);
732 }
733 } else
734 mdp->md_ldt = pldt = new_ldt;
735#ifdef SMP
736 return (0);
737 }
738 } else
739 mdp->md_ldt = pldt = new_ldt;
740#ifdef SMP
736 /* signal other cpus to reload ldt */
741 /*
742 * Signal other cpus to reload ldt. We need to unlock dt_lock
743 * here because other CPU will contest on it since their
744 * curthreads won't hold the lock and will block when trying
745 * to acquire it.
746 */
747 mtx_unlock_spin(&dt_lock);
737 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
738 NULL, td);
748 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
749 NULL, td);
750 mtx_lock_spin(&dt_lock);
739#else
740 set_user_ldt(mdp);
741#endif
742 }
743 return (0);
744}
751#else
752 set_user_ldt(mdp);
753#endif
754 }
755 return (0);
756}