xref: /titanic_41/usr/src/uts/common/disp/shuttle.c (revision 37714ae43602c675f9dc59b070bfdf9fa702872c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Routines to support shuttle synchronization objects
31  */
32 
33 #include <sys/types.h>
34 #include <sys/proc.h>
35 #include <sys/thread.h>
36 #include <sys/class.h>
37 #include <sys/debug.h>
38 #include <sys/sobject.h>
39 #include <sys/cpuvar.h>
40 #include <sys/schedctl.h>
41 #include <sys/sdt.h>
42 
43 static	disp_lock_t	shuttle_lock;	/* lock on shuttle objects */
44 
45 /*
46  * Place the thread in question on the run q.
47  */
48 static void
49 shuttle_unsleep(kthread_t *t)
50 {
51 	ASSERT(THREAD_LOCK_HELD(t));
52 
53 	/* Waiting on a shuttle */
54 	ASSERT(t->t_wchan0 == (caddr_t)1 && t->t_wchan == NULL);
55 	t->t_flag &= ~T_WAKEABLE;
56 	t->t_wchan0 = NULL;
57 	t->t_sobj_ops = NULL;
58 	THREAD_TRANSITION(t);
59 	CL_SETRUN(t);
60 }
61 
62 static kthread_t *
63 shuttle_owner()
64 {
65 	return (NULL);
66 }
67 
68 /*ARGSUSED*/
69 static void
70 shuttle_change_pri(kthread_t *t, pri_t p, pri_t *t_prip)
71 {
72 	ASSERT(THREAD_LOCK_HELD(t));
73 	*t_prip = p;
74 }
75 
76 static sobj_ops_t shuttle_sobj_ops = {
77 	SOBJ_SHUTTLE, shuttle_owner, shuttle_unsleep, shuttle_change_pri
78 };
79 
80 /*
81  * Mark the current thread as sleeping on a shuttle object, and
82  * resume the specified thread. The 't' thread must be marked as ONPROC.
83  *
84  * No locks other than 'l' should be held at this point.
85  */
86 void
87 shuttle_resume(kthread_t *t, kmutex_t *l)
88 {
89 	klwp_t	*lwp = ttolwp(curthread);
90 	cpu_t	*cp;
91 	disp_lock_t *oldtlp;
92 
93 	thread_lock(curthread);
94 	disp_lock_enter_high(&shuttle_lock);
95 	if (lwp != NULL) {
96 		lwp->lwp_asleep = 1;			/* /proc */
97 		lwp->lwp_sysabort = 0;			/* /proc */
98 		lwp->lwp_ru.nvcsw++;
99 	}
100 	curthread->t_flag |= T_WAKEABLE;
101 	curthread->t_sobj_ops = &shuttle_sobj_ops;
102 	/*
103 	 * setting cpu_dispthread before changing thread state
104 	 * so that kernel preemption will be deferred to after swtch_to()
105 	 */
106 	cp = CPU;
107 	cp->cpu_dispthread = t;
108 	cp->cpu_dispatch_pri = DISP_PRIO(t);
109 	/*
110 	 * Set the wchan0 field so that /proc won't just do a setrun
111 	 * on this thread when trying to stop a process. Instead,
112 	 * /proc will mark the thread as VSTOPPED similar to threads
113 	 * that are blocked on user level condition variables.
114 	 */
115 	curthread->t_wchan0 = (caddr_t)1;
116 	CL_INACTIVE(curthread);
117 	DTRACE_SCHED1(wakeup, kthread_t *, t);
118 	DTRACE_SCHED(sleep);
119 	THREAD_SLEEP(curthread, &shuttle_lock);
120 	disp_lock_exit_high(&shuttle_lock);
121 
122 	/*
123 	 * Update ustate records (there is no waitrq obviously)
124 	 */
125 	(void) new_mstate(curthread, LMS_SLEEP);
126 
127 	thread_lock_high(t);
128 	oldtlp = t->t_lockp;
129 
130 	t->t_flag &= ~T_WAKEABLE;
131 	t->t_wchan0 = NULL;
132 	t->t_sobj_ops = NULL;
133 
134 	/*
135 	 * Make sure we end up on the right CPU if we are dealing with bound
136 	 * CPU's or processor partitions.
137 	 */
138 	if (t->t_bound_cpu != NULL || t->t_cpupart != cp->cpu_part) {
139 		aston(t);
140 		cp->cpu_runrun = 1;
141 	}
142 
143 	/*
144 	 * We re-assign t_disp_queue and t_lockp of 't' here because
145 	 * 't' could have been preempted.
146 	 */
147 	if (t->t_disp_queue != cp->cpu_disp) {
148 		t->t_disp_queue = cp->cpu_disp;
149 		thread_onproc(t, cp);
150 	}
151 
152 	/*
153 	 * We can't call thread_unlock_high() here because t's thread lock
154 	 * could have changed by thread_onproc() call above to point to
155 	 * CPU->cpu_thread_lock.
156 	 */
157 	disp_lock_exit_high(oldtlp);
158 
159 	mutex_exit(l);
160 	/*
161 	 * Make sure we didn't receive any important events while
162 	 * we weren't looking
163 	 */
164 	if (lwp && (ISSIG(curthread, JUSTLOOKING) ||
165 	    MUSTRETURN(curproc, curthread) || schedctl_cancel_pending()))
166 		setrun(curthread);
167 
168 	swtch_to(t);
169 	/*
170 	 * Caller must check for ISSIG/lwp_sysabort conditions
171 	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
172 	 */
173 }
174 
175 /*
176  * Mark the current thread as sleeping on a shuttle object, and
177  * switch to a new thread.
178  * No locks other than 'l' should be held at this point.
179  */
180 void
181 shuttle_swtch(kmutex_t *l)
182 {
183 	klwp_t	*lwp = ttolwp(curthread);
184 
185 	thread_lock(curthread);
186 	disp_lock_enter_high(&shuttle_lock);
187 	lwp->lwp_asleep = 1;			/* /proc */
188 	lwp->lwp_sysabort = 0;			/* /proc */
189 	lwp->lwp_ru.nvcsw++;
190 	curthread->t_flag |= T_WAKEABLE;
191 	curthread->t_sobj_ops = &shuttle_sobj_ops;
192 	curthread->t_wchan0 = (caddr_t)1;
193 	CL_INACTIVE(curthread);
194 	DTRACE_SCHED(sleep);
195 	THREAD_SLEEP(curthread, &shuttle_lock);
196 	(void) new_mstate(curthread, LMS_SLEEP);
197 	disp_lock_exit_high(&shuttle_lock);
198 	mutex_exit(l);
199 	if (ISSIG(curthread, JUSTLOOKING) ||
200 	    MUSTRETURN(curproc, curthread) || schedctl_cancel_pending())
201 		setrun(curthread);
202 	swtch();
203 	/*
204 	 * Caller must check for ISSIG/lwp_sysabort conditions
205 	 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
206 	 */
207 }
208 
209 /*
210  * Mark the specified thread as once again sleeping on a shuttle object.  This
211  * routine is called to put a server thread -- one that was dequeued but for
212  * which shuttle_resume() was _not_ called -- back to sleep on a shuttle
213  * object.  Because we don't hit the sched:::wakeup DTrace probe until
214  * shuttle_resume(), we do _not_ have a sched:::sleep probe here.
215  */
216 void
217 shuttle_sleep(kthread_t *t)
218 {
219 	klwp_t	*lwp = ttolwp(t);
220 	proc_t	*p = ttoproc(t);
221 
222 	thread_lock(t);
223 	disp_lock_enter_high(&shuttle_lock);
224 	if (lwp != NULL) {
225 		lwp->lwp_asleep = 1;			/* /proc */
226 		lwp->lwp_sysabort = 0;			/* /proc */
227 		lwp->lwp_ru.nvcsw++;
228 	}
229 	t->t_flag |= T_WAKEABLE;
230 	t->t_sobj_ops = &shuttle_sobj_ops;
231 	t->t_wchan0 = (caddr_t)1;
232 	CL_INACTIVE(t);
233 	ASSERT(t->t_mstate == LMS_SLEEP);
234 	THREAD_SLEEP(t, &shuttle_lock);
235 	disp_lock_exit_high(&shuttle_lock);
236 	if (lwp && (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)))
237 		setrun(t);
238 }
239