xref: /titanic_52/usr/src/uts/common/disp/disp_lock.c (revision c2580b931007758eab8cb5ae8726ebe1588e259b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2003 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/sysmacros.h>
32 #include <sys/systm.h>
33 #include <sys/cmn_err.h>
34 #include <sys/debug.h>
35 #include <sys/inline.h>
36 #include <sys/disp.h>
37 #include <sys/kmem.h>
38 #include <sys/cpuvar.h>
39 #include <sys/vtrace.h>
40 #include <sys/lockstat.h>
41 #include <sys/spl.h>
42 #include <sys/atomic.h>
43 #include <sys/cpu.h>
44 
45 /*
46  * We check CPU_ON_INTR(CPU) when exiting a disp lock, rather than when
47  * entering it, for a purely pragmatic reason: when exiting a disp lock
48  * we know that we must be at PIL 10, and thus not preemptible; therefore
49  * we can safely load the CPU pointer without worrying about it changing.
50  */
51 static void
52 disp_onintr_panic(void)
53 {
54 	panic("dispatcher invoked from high-level interrupt handler");
55 }
56 
57 /* ARGSUSED */
58 void
59 disp_lock_init(disp_lock_t *lp, char *name)
60 {
61 	DISP_LOCK_INIT(lp);
62 }
63 
64 /* ARGSUSED */
65 void
66 disp_lock_destroy(disp_lock_t *lp)
67 {
68 	DISP_LOCK_DESTROY(lp);
69 }
70 
71 void
72 disp_lock_enter_high(disp_lock_t *lp)
73 {
74 	lock_set(lp);
75 }
76 
77 void
78 disp_lock_exit_high(disp_lock_t *lp)
79 {
80 	if (CPU_ON_INTR(CPU) != 0)
81 		disp_onintr_panic();
82 	ASSERT(DISP_LOCK_HELD(lp));
83 	lock_clear(lp);
84 }
85 
86 void
87 disp_lock_enter(disp_lock_t *lp)
88 {
89 	lock_set_spl(lp, ipltospl(DISP_LEVEL), &curthread->t_oldspl);
90 }
91 
92 void
93 disp_lock_exit(disp_lock_t *lp)
94 {
95 	if (CPU_ON_INTR(CPU) != 0)
96 		disp_onintr_panic();
97 	ASSERT(DISP_LOCK_HELD(lp));
98 	if (CPU->cpu_kprunrun) {
99 		lock_clear_splx(lp, curthread->t_oldspl);
100 		kpreempt(KPREEMPT_SYNC);
101 	} else {
102 		lock_clear_splx(lp, curthread->t_oldspl);
103 	}
104 }
105 
106 void
107 disp_lock_exit_nopreempt(disp_lock_t *lp)
108 {
109 	if (CPU_ON_INTR(CPU) != 0)
110 		disp_onintr_panic();
111 	ASSERT(DISP_LOCK_HELD(lp));
112 	lock_clear_splx(lp, curthread->t_oldspl);
113 }
114 
115 /*
116  * Thread_lock() - get the correct dispatcher lock for the thread.
117  */
118 void
119 thread_lock(kthread_id_t t)
120 {
121 	int s = splhigh();
122 
123 	if (CPU_ON_INTR(CPU) != 0)
124 		disp_onintr_panic();
125 
126 	for (;;) {
127 		lock_t *volatile *tlpp = &t->t_lockp;
128 		lock_t *lp = *tlpp;
129 		if (lock_try(lp)) {
130 			if (lp == *tlpp) {
131 				curthread->t_oldspl = (ushort_t)s;
132 				return;
133 			}
134 			lock_clear(lp);
135 		} else {
136 			uint_t spin_count = 1;
137 			/*
138 			 * Lower spl and spin on lock with non-atomic load
139 			 * to avoid cache activity.  Spin until the lock
140 			 * becomes available or spontaneously changes.
141 			 */
142 			splx(s);
143 			while (lp == *tlpp && LOCK_HELD(lp)) {
144 				if (panicstr) {
145 					curthread->t_oldspl = splhigh();
146 					return;
147 				}
148 				spin_count++;
149 				SMT_PAUSE();
150 			}
151 			LOCKSTAT_RECORD(LS_THREAD_LOCK_SPIN, lp, spin_count);
152 			s = splhigh();
153 		}
154 	}
155 }
156 
157 /*
158  * Thread_lock_high() - get the correct dispatcher lock for the thread.
159  *	This version is called when already at high spl.
160  */
161 void
162 thread_lock_high(kthread_id_t t)
163 {
164 	if (CPU_ON_INTR(CPU) != 0)
165 		disp_onintr_panic();
166 
167 	for (;;) {
168 		lock_t *volatile *tlpp = &t->t_lockp;
169 		lock_t *lp = *tlpp;
170 		if (lock_try(lp)) {
171 			if (lp == *tlpp)
172 				return;
173 			lock_clear(lp);
174 		} else {
175 			uint_t spin_count = 1;
176 			while (lp == *tlpp && LOCK_HELD(lp)) {
177 				if (panicstr)
178 					return;
179 				spin_count++;
180 				SMT_PAUSE();
181 			}
182 			LOCKSTAT_RECORD(LS_THREAD_LOCK_HIGH_SPIN,
183 			    lp, spin_count);
184 		}
185 	}
186 }
187 
188 /*
189  * Called by THREAD_TRANSITION macro to change the thread state to
190  * the intermediate state-in-transititon state.
191  */
192 void
193 thread_transition(kthread_id_t t)
194 {
195 	disp_lock_t	*lp;
196 
197 	ASSERT(THREAD_LOCK_HELD(t));
198 	ASSERT(t->t_lockp != &transition_lock);
199 
200 	lp = t->t_lockp;
201 	t->t_lockp = &transition_lock;
202 	disp_lock_exit_high(lp);
203 }
204 
205 /*
206  * Put thread in stop state, and set the lock pointer to the stop_lock.
207  * This effectively drops the lock on the thread, since the stop_lock
208  * isn't held.
209  * Eventually, stop_lock could be hashed if there is too much contention.
210  */
211 void
212 thread_stop(kthread_id_t t)
213 {
214 	disp_lock_t	*lp;
215 
216 	ASSERT(THREAD_LOCK_HELD(t));
217 	ASSERT(t->t_lockp != &stop_lock);
218 
219 	lp = t->t_lockp;
220 	t->t_state = TS_STOPPED;
221 	/*
222 	 * Ensure that t_state reaches global visibility before t_lockp
223 	 */
224 	membar_producer();
225 	t->t_lockp = &stop_lock;
226 	disp_lock_exit(lp);
227 }
228