Lines Matching +full:protect +full:- +full:exec
60 * Page handling structures. This is set up as a list of per-page
61 * control structures (sc_page_ctl), with p->p_pagep pointing to
62 * the first. The per-page structures point to the actual pages
65 * All data is protected by p->p_sc_lock. Since this lock is
76 caddr_t spc_uaddr; /* user-level address of the page */
115 if (t->t_schedctl == NULL) { in schedctl()
125 thread_lock(t); /* protect against ts_tick and ts_update */ in schedctl()
126 t->t_schedctl = ssp; in schedctl()
127 t->t_sc_uaddr = uaddr; in schedctl()
128 ssp->sc_cid = t->t_cid; in schedctl()
129 ssp->sc_cpri = t->t_cpri; in schedctl()
130 ssp->sc_priority = DISP_PRIO(t); in schedctl()
134 return ((caddr_t)t->t_sc_uaddr); in schedctl()
145 sc_shared_t *ssp = t->t_schedctl; in schedctl_lwp_cleanup()
150 ASSERT(MUTEX_NOT_HELD(&p->p_lock)); in schedctl_lwp_cleanup()
152 thread_lock(t); /* protect against ts_tick and ts_update */ in schedctl_lwp_cleanup()
153 t->t_schedctl = NULL; in schedctl_lwp_cleanup()
154 t->t_sc_uaddr = 0; in schedctl_lwp_cleanup()
165 * User-level library code relies on this for adaptive mutex locking. in schedctl_lwp_cleanup()
167 mutex_enter(&p->p_sc_lock); in schedctl_lwp_cleanup()
168 ssp->sc_state = SC_FREE; in schedctl_lwp_cleanup()
170 index = (index_t)(ssp - pagep->spc_base); in schedctl_lwp_cleanup()
171 BT_CLEAR(pagep->spc_map, index); in schedctl_lwp_cleanup()
172 pagep->spc_space += sizeof (sc_shared_t); in schedctl_lwp_cleanup()
173 mutex_exit(&p->p_sc_lock); in schedctl_lwp_cleanup()
179 * Called from exec() and exit() system calls.
188 ASSERT(p->p_lwpcnt == 1); /* we are single-threaded now */ in schedctl_proc_cleanup()
189 ASSERT(curthread->t_schedctl == NULL); in schedctl_proc_cleanup()
192 * Since we are single-threaded, we don't have to hold p->p_sc_lock. in schedctl_proc_cleanup()
194 pagep = p->p_pagep; in schedctl_proc_cleanup()
195 p->p_pagep = NULL; in schedctl_proc_cleanup()
197 ASSERT(pagep->spc_space == sc_pagesize); in schedctl_proc_cleanup()
198 next = pagep->spc_next; in schedctl_proc_cleanup()
202 (void) as_unmap(p->p_as, pagep->spc_uaddr, PAGESIZE); in schedctl_proc_cleanup()
203 schedctl_freepage(pagep->spc_amp, (caddr_t)(pagep->spc_base)); in schedctl_proc_cleanup()
204 kmem_free(pagep->spc_map, sizeof (ulong_t) * sc_bitmap_words); in schedctl_proc_cleanup()
220 ssp->sc_state = curthread->t_state; in schedctl_save()
233 ssp->sc_state = SC_ONPROC; in schedctl_restore()
234 ssp->sc_cpu = CPU->cpu_id; in schedctl_restore()
250 ASSERT(ct->t_schedctl == NULL); in schedctl_fork()
257 if (pt != curthread || (cp->p_flag & SVFORK)) in schedctl_fork()
260 mutex_enter(&pp->p_sc_lock); in schedctl_fork()
261 for (pagep = pp->p_pagep; pagep != NULL; pagep = pagep->spc_next) in schedctl_fork()
262 (void) as_unmap(cp->p_as, pagep->spc_uaddr, PAGESIZE); in schedctl_fork()
263 mutex_exit(&pp->p_sc_lock); in schedctl_fork()
268 * Returns non-zero if the specified thread shouldn't be preempted at this time.
275 return (t->t_schedctl->sc_preemptctl.sc_nopreempt); in schedctl_get_nopreempt()
287 t->t_schedctl->sc_preemptctl.sc_nopreempt = val; in schedctl_set_nopreempt()
302 t->t_schedctl->sc_preemptctl.sc_yield = val; in schedctl_set_yield()
314 sc_shared_t *tdp = t->t_schedctl; in schedctl_set_cidpri()
317 tdp->sc_cid = t->t_cid; in schedctl_set_cidpri()
318 tdp->sc_cpri = t->t_cpri; in schedctl_set_cidpri()
319 tdp->sc_priority = DISP_PRIO(t); in schedctl_set_cidpri()
325 * Returns non-zero if the specified thread has requested that all
326 * signals be blocked. Called by signal-related code that tests
333 sc_shared_t *tdp = t->t_schedctl; in schedctl_sigblock()
336 return (tdp->sc_sigblock); in schedctl_sigblock()
344 * accomplishes what user-level code requested to be done when it set
345 * tdp->sc_shared->sc_sigblock non-zero.
347 * This is generally called by signal-related code in the current thread. In
357 sc_shared_t *tdp = t->t_schedctl; in schedctl_finish_sigblock()
359 ASSERT(t == curthread || MUTEX_HELD(&ttoproc(t)->p_lock)); in schedctl_finish_sigblock()
361 if (tdp != NULL && tdp->sc_sigblock) { in schedctl_finish_sigblock()
362 t->t_hold.__sigbits[0] = FILLSET0 & ~CANTMASK0; in schedctl_finish_sigblock()
363 t->t_hold.__sigbits[1] = FILLSET1 & ~CANTMASK1; in schedctl_finish_sigblock()
364 t->t_hold.__sigbits[2] = FILLSET2 & ~CANTMASK2; in schedctl_finish_sigblock()
365 tdp->sc_sigblock = 0; in schedctl_finish_sigblock()
371 * Return non-zero if the current thread has declared that it has
379 sc_shared_t *tdp = curthread->t_schedctl; in schedctl_cancel_pending()
382 (tdp->sc_flgs & SC_CANCEL_FLG) && in schedctl_cancel_pending()
383 !tdp->sc_sigblock && in schedctl_cancel_pending()
384 !sigismember(&curthread->t_hold, SIGCANCEL)) in schedctl_cancel_pending()
394 * than because of some other signal. User-level code can try to
401 sc_shared_t *tdp = curthread->t_schedctl; in schedctl_cancel_eintr()
404 tdp->sc_flgs |= SC_EINTR_FLG; in schedctl_cancel_eintr()
409 * Return non-zero if the current thread has declared that
415 sc_shared_t *tdp = curthread->t_schedctl; in schedctl_is_park()
418 return ((tdp->sc_flgs & SC_PARK_FLG) != 0); in schedctl_is_park()
444 sc_shared_t *tdp = curthread->t_schedctl; in schedctl_set_park()
446 tdp->sc_flgs |= SC_PARK_FLG; in schedctl_set_park()
456 sc_shared_t *tdp = curthread->t_schedctl; in schedctl_unpark()
459 tdp->sc_flgs &= ~SC_PARK_FLG; in schedctl_unpark()
475 sc_pagesize = PAGESIZE - (PAGESIZE % sizeof (sc_shared_t)); in schedctl_init()
495 ASSERT(MUTEX_NOT_HELD(&p->p_lock)); in schedctl_shared_alloc()
496 mutex_enter(&p->p_sc_lock); in schedctl_shared_alloc()
502 for (pagep = p->p_pagep; pagep != NULL; pagep = pagep->spc_next) in schedctl_shared_alloc()
503 if (pagep->spc_space != 0) in schedctl_shared_alloc()
507 base = pagep->spc_uaddr; in schedctl_shared_alloc()
518 mutex_exit(&p->p_sc_lock); in schedctl_shared_alloc()
523 mutex_exit(&p->p_sc_lock); in schedctl_shared_alloc()
531 pagep->spc_amp = amp; in schedctl_shared_alloc()
532 pagep->spc_base = (sc_shared_t *)kaddr; in schedctl_shared_alloc()
533 pagep->spc_end = (sc_shared_t *)(kaddr + sc_pagesize); in schedctl_shared_alloc()
534 pagep->spc_uaddr = base; in schedctl_shared_alloc()
536 pagep->spc_map = kmem_zalloc(sizeof (ulong_t) * sc_bitmap_words, in schedctl_shared_alloc()
538 pagep->spc_space = sc_pagesize; in schedctl_shared_alloc()
540 pagep->spc_next = p->p_pagep; in schedctl_shared_alloc()
541 p->p_pagep = pagep; in schedctl_shared_alloc()
548 ASSERT(pagep != NULL && pagep->spc_space >= sizeof (sc_shared_t)); in schedctl_shared_alloc()
549 index = bt_availbit(pagep->spc_map, sc_bitmap_len); in schedctl_shared_alloc()
550 ASSERT(index != -1); in schedctl_shared_alloc()
556 ssp = pagep->spc_base + index; in schedctl_shared_alloc()
557 BT_SET(pagep->spc_map, index); in schedctl_shared_alloc()
558 pagep->spc_space -= sizeof (sc_shared_t); in schedctl_shared_alloc()
560 mutex_exit(&p->p_sc_lock); in schedctl_shared_alloc()
580 ASSERT(MUTEX_HELD(&p->p_sc_lock)); in schedctl_page_lookup()
581 for (pagep = p->p_pagep; pagep != NULL; pagep = pagep->spc_next) { in schedctl_page_lookup()
582 if (ssp >= pagep->spc_base && ssp < pagep->spc_end) in schedctl_page_lookup()
599 struct as *as = curproc->p_as; in schedctl_map()
656 amp->refcnt--; in schedctl_getpage()
674 * This is called when the process is doing exit() or exec().
684 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); in schedctl_freepage()
690 if (--amp->refcnt == 0) { in schedctl_freepage()
697 anon_free(amp->ahp, 0, PAGESIZE); in schedctl_freepage()
698 ANON_LOCK_EXIT(&->a_rwlock); in schedctl_freepage()
701 ANON_LOCK_EXIT(&->a_rwlock); in schedctl_freepage()