Lines Matching refs:t

333 	kthread_t *t;
378 t = (kthread_t *)(stk + stksize);
379 bzero(t, sizeof (kthread_t));
381 audit_thread_create(t);
382 t->t_stk = stk + stksize;
383 t->t_stkbase = stk;
386 t = (kthread_t *)(stk);
387 bzero(t, sizeof (kthread_t));
388 t->t_stk = stk + sizeof (kthread_t);
389 t->t_stkbase = stk + stksize + sizeof (kthread_t);
391 t->t_flag |= T_TALLOCSTK;
392 t->t_swap = stk;
394 t = kmem_cache_alloc(thread_cache, KM_SLEEP);
395 bzero(t, sizeof (kthread_t));
396 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
398 audit_thread_create(t);
404 t->t_stk = stk + stksize;
405 t->t_stkbase = stk;
407 t->t_stk = stk; /* 3b2-like */
408 t->t_stkbase = stk + stksize;
413 stkinfo_begin(t);
416 t->t_ts = ts;
424 crhold(t->t_cred = pp->p_cred);
426 t->t_start = gethrestime_sec();
427 t->t_startpc = proc;
428 t->t_procp = pp;
429 t->t_clfuncs = &sys_classfuncs.thread;
430 t->t_cid = syscid;
431 t->t_pri = pri;
432 t->t_stime = ddi_get_lbolt();
433 t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
434 t->t_bind_cpu = PBIND_NONE;
435 t->t_bindflag = (uchar_t)default_binding_mode;
436 t->t_bind_pset = PS_NONE;
437 t->t_plockp = &pp->p_lock;
438 t->t_copyops = NULL;
439 t->t_taskq = NULL;
440 t->t_anttime = 0;
441 t->t_hatdepth = 0;
443 t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */
448 tnf_thread_create(t);
450 LOCK_INIT_CLEAR(&t->t_lock);
457 t->t_stk = thread_stk_init(t->t_stk);
458 thread_load(t, proc, arg, len);
466 t->t_proj = project_hold(proj0p);
468 lgrp_affinity_init(&t->t_lgrp_affinity);
472 t->t_did = next_t_id++;
473 t->t_prev = curthread->t_prev;
474 t->t_next = curthread;
485 curthread->t_prev->t_next = t;
486 curthread->t_prev = t;
496 * (if this isn't a kernel thread, t_cpupart will be changed
499 t->t_cpupart = &cp_default;
508 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
511 * Inherit the current cpu. If this cpu isn't part of the chosen
516 t->t_cpu = CPU;
518 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
519 t->t_pri, NULL);
521 t->t_disp_queue = t->t_cpu->cpu_disp;
532 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
533 CL_SETRUN(t);
534 thread_unlock(t);
538 THREAD_ONPROC(t, t->t_cpu);
548 THREAD_FREEINTR(t, CPU);
552 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
559 return (t);
566 thread_rele(kthread_t *t)
570 thread_lock(t);
572 ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
573 kpj = ttoproj(t);
574 t->t_proj = proj0p;
576 thread_unlock(t);
587 kthread_t *t = curthread;
589 if ((t->t_proc_flag & TP_ZTHREAD) != 0)
600 ASSERT(t->t_pollstate == NULL);
601 ASSERT(t->t_schedctl == NULL);
602 if (t->t_door)
607 if (t->t_tnf_tpdp)
611 thread_rele(t);
612 t->t_preempt++;
619 t->t_next->t_prev = t->t_prev;
620 t->t_prev->t_next = t->t_next;
621 ASSERT(allthreads != t); /* t0 never exits */
622 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */
625 if (t->t_ctx != NULL)
626 exitctx(t);
627 if (t->t_procp->p_pctx != NULL)
628 exitpctx(t->t_procp);
631 stkinfo_end(t);
634 t->t_state = TS_ZOMB; /* set zombie thread */
649 kthread_t *t;
652 for (t = curthread->t_next; t != curthread; t = t->t_next) {
653 if (t->t_did == tid)
656 if (t->t_did == tid)
657 return (t);
670 kthread_t *t;
679 * a cv that's already been freed. In other words, don't cache
685 * The broadcast doesn't have to happen right away, but it
686 * shouldn't be postponed indefinitely (e.g., by doing it in
690 while (t = did_to_thread(tid))
691 cv_wait(&t->t_joincv, &pidlock);
696 thread_free_prevent(kthread_t *t)
700 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
705 thread_free_allow(kthread_t *t)
709 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
714 thread_free_barrier(kthread_t *t)
718 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
724 thread_free(kthread_t *t)
726 boolean_t allocstk = (t->t_flag & T_TALLOCSTK);
727 klwp_t *lwp = t->t_lwp;
728 caddr_t swap = t->t_swap;
730 ASSERT(t != &t0 && t->t_state == TS_FREE);
731 ASSERT(t->t_door == NULL);
732 ASSERT(t->t_schedctl == NULL);
733 ASSERT(t->t_pollstate == NULL);
735 t->t_pri = 0;
736 t->t_pc = 0;
737 t->t_sp = 0;
738 t->t_wchan0 = NULL;
739 t->t_wchan = NULL;
740 if (t->t_cred != NULL) {
741 crfree(t->t_cred);
742 t->t_cred = 0;
744 if (t->t_pdmsg) {
745 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
746 t->t_pdmsg = NULL;
749 audit_thread_free(t);
751 if (t->t_tnf_tpdp)
752 tnf_thread_free(t);
754 if (t->t_cldata) {
755 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
757 if (t->t_rprof != NULL) {
758 kmem_free(t->t_rprof, sizeof (*t->t_rprof));
759 t->t_rprof = NULL;
761 t->t_lockp = NULL; /* nothing should try to lock this thread now */
764 if (t->t_ctx)
765 freectx(t, 0);
766 t->t_stk = NULL;
769 lock_clear(&t->t_lock);
771 if (t->t_ts->ts_waiters > 0)
774 kmem_cache_free(turnstile_cache, t->t_ts);
776 free_afd(&t->t_activefd);
783 thread_free_barrier(t);
785 ASSERT(ttoproj(t) == proj0p);
786 project_rele(ttoproj(t));
788 lgrp_affinity_free(&t->t_lgrp_affinity);
798 t->t_lwp = NULL;
799 t->t_swap = NULL;
808 kmem_cache_free(thread_cache, t);
840 thread_reap_list(kthread_t *t)
844 while (t != NULL) {
845 next = t->t_forw;
846 thread_free(t);
847 t = next;
855 kthread_t *t, *l;
861 t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
883 thread_reap_list(t);
897 kthread_t *t, *l;
919 t = thread_deathrow;
944 thread_reap_list(t);
961 reapq_move_lq_to_tq(kthread_t *t)
963 ASSERT(t->t_state == TS_FREE);
965 t->t_forw = thread_deathrow;
966 thread_deathrow = t;
978 reapq_add(kthread_t *t)
990 if (t->t_flag & T_LWPREUSE) {
991 ASSERT(ttolwp(t) != NULL);
992 t->t_forw = lwp_deathrow;
993 lwp_deathrow = t;
996 t->t_forw = thread_deathrow;
997 thread_deathrow = t;
1002 t->t_state = TS_FREE;
1003 lock_clear(&t->t_lock);
1022 thread_lock(t);
1023 thread_unlock(t);
1033 kthread_t *t,
1052 ctx->next = t->t_ctx;
1053 t->t_ctx = ctx;
1061 kthread_t *t,
1087 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1088 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1091 * Serialize modifications to t->t_ctx to prevent the agent thread
1094 mutex_enter(&t->t_ctx_lock);
1097 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
1105 t->t_ctx = ctx->next;
1106 mutex_exit(&t->t_ctx_lock);
1115 mutex_exit(&t->t_ctx_lock);
1122 savectx(kthread_t *t)
1126 ASSERT(t == curthread);
1127 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1133 restorectx(kthread_t *t)
1137 ASSERT(t == curthread);
1138 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1144 forkctx(kthread_t *t, kthread_t *ct)
1148 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1150 (ctx->fork_op)(t, ct);
1159 lwp_createctx(kthread_t *t, kthread_t *ct)
1163 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1165 (ctx->lwp_create_op)(t, ct);
1173 * clean-up that can't wait until thread_free().
1176 exitctx(kthread_t *t)
1180 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1182 (ctx->exit_op)(t);
1190 freectx(kthread_t *t, int isexec)
1195 while ((ctx = t->t_ctx) != NULL) {
1196 t->t_ctx = ctx->next;
1208 * freed by the thread reaper so free_op implementations shouldn't rely
1232 setrun_locked(kthread_t *t)
1234 ASSERT(THREAD_LOCK_HELD(t));
1235 if (t->t_state == TS_SLEEP) {
1239 SOBJ_UNSLEEP(t->t_sobj_ops, t);
1240 } else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1245 } else if (t->t_state == TS_WAIT) {
1246 waitq_setrun(t);
1247 } else if (t->t_state == TS_STOPPED) {
1256 * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1257 * The thread won't be stopped unless one of these
1260 * These flags must be set before calling setrun_locked(t).
1261 * They can't be passed as arguments because the streams
1266 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1271 t->t_whystop = 0;
1272 t->t_whatstop = 0;
1279 t->t_schedflag &= ~TS_ALLSTART;
1280 THREAD_TRANSITION(t); /* drop stopped-thread lock */
1281 ASSERT(t->t_lockp == &transition_lock);
1282 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1286 CL_SETRUN(t);
1291 setrun(kthread_t *t)
1293 thread_lock(t);
1294 setrun_locked(t);
1295 thread_unlock(t);
1313 kthread_t *t = curthread; /* current thread */
1318 ASSERT(t->t_intr != NULL);
1320 itp = t->t_intr; /* interrupted thread */
1321 t->t_intr = NULL; /* clear interrupt ptr */
1328 i = intr_passivate(t, itp);
1332 i, t, t, itp, itp);
1337 t->t_lwp = NULL;
1395 * Don't make a user-requested binding on this thread so that
1562 tsd_agent_get(kthread_t *t, uint_t key)
1564 struct tsd_thread *tsd = t->t_tsd;
1566 ASSERT(t == curthread ||
1567 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1584 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1586 struct tsd_thread *tsd = t->t_tsd;
1588 ASSERT(t == curthread ||
1589 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1594 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1756 * The thread_change(e)pri() function doesn't drop the thread
1760 thread_change_epri(kthread_t *t, pri_t disp_pri)
1764 ASSERT(THREAD_LOCK_HELD(t));
1767 * If the inherited priority hasn't actually changed,
1770 if (t->t_epri == disp_pri)
1773 state = t->t_state;
1779 t->t_epri = disp_pri;
1781 cpu_t *cp = t->t_disp_queue->disp_cpu;
1783 if (t == cp->cpu_dispthread)
1784 cp->cpu_dispatch_pri = DISP_PRIO(t);
1794 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1800 if (disp_pri != t->t_epri)
1801 waitq_change_pri(t, disp_pri);
1809 (void) dispdeq(t);
1810 t->t_epri = disp_pri;
1811 setbackdq(t);
1813 schedctl_set_cidpri(t);
1823 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1828 ASSERT(THREAD_LOCK_HELD(t));
1830 state = t->t_state;
1831 THREAD_WILLCHANGE_PRI(t, disp_pri);
1837 t->t_pri = disp_pri;
1840 cpu_t *cp = t->t_disp_queue->disp_cpu;
1842 if (t == cp->cpu_dispthread)
1843 cp->cpu_dispatch_pri = DISP_PRIO(t);
1853 if (disp_pri != t->t_pri)
1854 SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1860 if (disp_pri != t->t_pri)
1861 waitq_change_pri(t, disp_pri);
1873 on_rq = dispdeq(t);
1875 t->t_pri = disp_pri;
1877 setfrontdq(t);
1879 setbackdq(t);
1882 schedctl_set_cidpri(t);
1891 stkinfo_begin(kthread_t *t)
1901 if (t->t_stk > t->t_stkbase) {
1903 start = t->t_stkbase;
1904 end = t->t_stk;
1907 start = t->t_stk;
1908 end = t->t_stkbase;
1935 * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist,
1940 stkinfo_end(kthread_t *t)
1953 /* create the stackinfo log, if doesn't already exist */
1970 if (t->t_stk > t->t_stkbase) {
1972 start = t->t_stkbase;
1973 end = t->t_stk;
1976 start = t->t_stk;
1977 end = t->t_stkbase;
1999 if (t->t_stk > t->t_stkbase) {
2034 DTRACE_PROBE3(stack__usage, kthread_t *, t,
2069 kmem_stkinfo_log[index].kthread = (caddr_t)t;
2070 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc;
2074 kmem_stkinfo_log[index].t_tid = t->t_tid;
2076 if (t->t_tid != 0) {
2077 stksz = strlen((t->t_procp)->p_user.u_comm);
2085 (t->t_procp)->p_user.u_comm, stksz);