xref: /titanic_51/usr/src/uts/common/disp/disp_lock.c (revision a563a037ee1e9e7c39304f3775eb7327ab86b914)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
31 #include <sys/systm.h>
32 #include <sys/cmn_err.h>
33 #include <sys/debug.h>
34 #include <sys/inline.h>
35 #include <sys/disp.h>
36 #include <sys/kmem.h>
37 #include <sys/cpuvar.h>
38 #include <sys/vtrace.h>
39 #include <sys/lockstat.h>
40 #include <sys/spl.h>
41 #include <sys/atomic.h>
42 #include <sys/cpu.h>
43 
44 /*
45  * We check CPU_ON_INTR(CPU) when exiting a disp lock, rather than when
46  * entering it, for a purely pragmatic reason: when exiting a disp lock
47  * we know that we must be at PIL 10, and thus not preemptible; therefore
48  * we can safely load the CPU pointer without worrying about it changing.
49  */
50 static void
51 disp_onintr_panic(void)
52 {
53 	panic("dispatcher invoked from high-level interrupt handler");
54 }
55 
56 /* ARGSUSED */
57 void
58 disp_lock_init(disp_lock_t *lp, char *name)
59 {
60 	DISP_LOCK_INIT(lp);
61 }
62 
63 /* ARGSUSED */
64 void
65 disp_lock_destroy(disp_lock_t *lp)
66 {
67 	DISP_LOCK_DESTROY(lp);
68 }
69 
70 void
71 disp_lock_enter_high(disp_lock_t *lp)
72 {
73 	lock_set(lp);
74 }
75 
76 void
77 disp_lock_exit_high(disp_lock_t *lp)
78 {
79 	if (CPU_ON_INTR(CPU) != 0)
80 		disp_onintr_panic();
81 	ASSERT(DISP_LOCK_HELD(lp));
82 	lock_clear(lp);
83 }
84 
85 void
86 disp_lock_enter(disp_lock_t *lp)
87 {
88 	lock_set_spl(lp, ipltospl(DISP_LEVEL), &curthread->t_oldspl);
89 }
90 
91 void
92 disp_lock_exit(disp_lock_t *lp)
93 {
94 	if (CPU_ON_INTR(CPU) != 0)
95 		disp_onintr_panic();
96 	ASSERT(DISP_LOCK_HELD(lp));
97 	if (CPU->cpu_kprunrun) {
98 		lock_clear_splx(lp, curthread->t_oldspl);
99 		kpreempt(KPREEMPT_SYNC);
100 	} else {
101 		lock_clear_splx(lp, curthread->t_oldspl);
102 	}
103 }
104 
105 void
106 disp_lock_exit_nopreempt(disp_lock_t *lp)
107 {
108 	if (CPU_ON_INTR(CPU) != 0)
109 		disp_onintr_panic();
110 	ASSERT(DISP_LOCK_HELD(lp));
111 	lock_clear_splx(lp, curthread->t_oldspl);
112 }
113 
114 /*
115  * Thread_lock() - get the correct dispatcher lock for the thread.
116  */
117 void
118 thread_lock(kthread_id_t t)
119 {
120 	int s = splhigh();
121 
122 	if (CPU_ON_INTR(CPU) != 0)
123 		disp_onintr_panic();
124 
125 	for (;;) {
126 		lock_t *volatile *tlpp = &t->t_lockp;
127 		lock_t *lp = *tlpp;
128 		if (lock_try(lp)) {
129 			if (lp == *tlpp) {
130 				curthread->t_oldspl = (ushort_t)s;
131 				return;
132 			}
133 			lock_clear(lp);
134 		} else {
135 			hrtime_t spin_time =
136 			    LOCKSTAT_START_TIME(LS_THREAD_LOCK_SPIN);
137 			/*
138 			 * Lower spl and spin on lock with non-atomic load
139 			 * to avoid cache activity.  Spin until the lock
140 			 * becomes available or spontaneously changes.
141 			 */
142 			splx(s);
143 			while (lp == *tlpp && LOCK_HELD(lp)) {
144 				if (panicstr) {
145 					curthread->t_oldspl = splhigh();
146 					return;
147 				}
148 				SMT_PAUSE();
149 			}
150 
151 			LOCKSTAT_RECORD_TIME(LS_THREAD_LOCK_SPIN,
152 			    lp, spin_time);
153 			s = splhigh();
154 		}
155 	}
156 }
157 
158 /*
159  * Thread_lock_high() - get the correct dispatcher lock for the thread.
160  *	This version is called when already at high spl.
161  */
162 void
163 thread_lock_high(kthread_id_t t)
164 {
165 	if (CPU_ON_INTR(CPU) != 0)
166 		disp_onintr_panic();
167 
168 	for (;;) {
169 		lock_t *volatile *tlpp = &t->t_lockp;
170 		lock_t *lp = *tlpp;
171 		if (lock_try(lp)) {
172 			if (lp == *tlpp)
173 				return;
174 			lock_clear(lp);
175 		} else {
176 			hrtime_t spin_time =
177 			    LOCKSTAT_START_TIME(LS_THREAD_LOCK_HIGH_SPIN);
178 			while (lp == *tlpp && LOCK_HELD(lp)) {
179 				if (panicstr)
180 					return;
181 				SMT_PAUSE();
182 			}
183 			LOCKSTAT_RECORD_TIME(LS_THREAD_LOCK_HIGH_SPIN,
184 			    lp, spin_time);
185 		}
186 	}
187 }
188 
189 /*
190  * Called by THREAD_TRANSITION macro to change the thread state to
191  * the intermediate state-in-transititon state.
192  */
193 void
194 thread_transition(kthread_id_t t)
195 {
196 	disp_lock_t	*lp;
197 
198 	ASSERT(THREAD_LOCK_HELD(t));
199 	ASSERT(t->t_lockp != &transition_lock);
200 
201 	lp = t->t_lockp;
202 	t->t_lockp = &transition_lock;
203 	disp_lock_exit_high(lp);
204 }
205 
206 /*
207  * Put thread in stop state, and set the lock pointer to the stop_lock.
208  * This effectively drops the lock on the thread, since the stop_lock
209  * isn't held.
210  * Eventually, stop_lock could be hashed if there is too much contention.
211  */
212 void
213 thread_stop(kthread_id_t t)
214 {
215 	disp_lock_t	*lp;
216 
217 	ASSERT(THREAD_LOCK_HELD(t));
218 	ASSERT(t->t_lockp != &stop_lock);
219 
220 	lp = t->t_lockp;
221 	t->t_state = TS_STOPPED;
222 	/*
223 	 * Ensure that t_state reaches global visibility before t_lockp
224 	 */
225 	membar_producer();
226 	t->t_lockp = &stop_lock;
227 	disp_lock_exit(lp);
228 }
229