xref: /titanic_41/usr/src/lib/libc/port/threads/cancel.c (revision a3c4695861e3f0a8d3706f77ccd53683cca48d67)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 
32 /*
33  * pthread_cancel: tries to cancel the targeted thread.
34  * If the target thread has already exited no action is taken.
35  * Else send SIGCANCEL to request the other thread to cancel itself.
36  */
37 #pragma weak pthread_cancel = _pthread_cancel
38 int
39 _pthread_cancel(thread_t tid)
40 {
41 	ulwp_t *self = curthread;
42 	uberdata_t *udp = self->ul_uberdata;
43 	ulwp_t *ulwp;
44 	int error = 0;
45 
46 	if ((ulwp = find_lwp(tid)) == NULL)
47 		return (ESRCH);
48 
49 	if (ulwp->ul_cancel_pending) {
50 		/*
51 		 * Don't send SIGCANCEL more than once.
52 		 */
53 		ulwp_unlock(ulwp, udp);
54 	} else if (ulwp == self) {
55 		/*
56 		 * Unlock self before cancelling.
57 		 */
58 		ulwp_unlock(self, udp);
59 		self->ul_nocancel = 0;	/* cancellation is now possible */
60 		if (self->ul_sigdefer == 0)
61 			do_sigcancel();
62 		else {
63 			self->ul_cancel_pending = 1;
64 			set_cancel_pending_flag(self, 0);
65 		}
66 	} else if (ulwp->ul_cancel_disabled) {
67 		/*
68 		 * Don't send SIGCANCEL if cancellation is disabled;
69 		 * just set the thread's ulwp->ul_cancel_pending flag.
70 		 * This avoids a potential EINTR for the target thread.
71 		 * We don't call set_cancel_pending_flag() here because
72 		 * we cannot modify another thread's schedctl data.
73 		 */
74 		ulwp->ul_cancel_pending = 1;
75 		ulwp_unlock(ulwp, udp);
76 	} else {
77 		/*
78 		 * Request the other thread to cancel itself.
79 		 */
80 		error = __lwp_kill(tid, SIGCANCEL);
81 		ulwp_unlock(ulwp, udp);
82 	}
83 
84 	return (error);
85 }
86 
87 /*
88  * pthread_setcancelstate: sets the state ENABLED or DISABLED.
89  * If the state is already ENABLED or is being set to ENABLED,
90  * the type of cancellation is ASYNCHRONOUS, and a cancel request
91  * is pending, then the thread is cancelled right here.
92  * Otherwise, pthread_setcancelstate() is not a cancellation point.
93  */
94 #pragma weak pthread_setcancelstate = _pthread_setcancelstate
95 int
96 _pthread_setcancelstate(int state, int *oldstate)
97 {
98 	ulwp_t *self = curthread;
99 	uberdata_t *udp = self->ul_uberdata;
100 	int was_disabled;
101 
102 	/*
103 	 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled
104 	 * since it is tested under this lock by pthread_cancel(), above.
105 	 * This has the side-effect of calling enter_critical() and this
106 	 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical()
107 	 * is called.  (self->ul_cancel_pending is set in the SIGCANCEL
108 	 * handler and we must be async-signal safe here.)
109 	 */
110 	ulwp_lock(self, udp);
111 
112 	was_disabled = self->ul_cancel_disabled;
113 	switch (state) {
114 	case PTHREAD_CANCEL_ENABLE:
115 		self->ul_cancel_disabled = 0;
116 		break;
117 	case PTHREAD_CANCEL_DISABLE:
118 		self->ul_cancel_disabled = 1;
119 		break;
120 	default:
121 		ulwp_unlock(self, udp);
122 		return (EINVAL);
123 	}
124 	set_cancel_pending_flag(self, 0);
125 
126 	/*
127 	 * If this thread has been requested to be canceled and
128 	 * is in async mode and is or was enabled, then exit.
129 	 */
130 	if ((!self->ul_cancel_disabled || !was_disabled) &&
131 	    self->ul_cancel_async && self->ul_cancel_pending) {
132 		ulwp_unlock(self, udp);
133 		_pthread_exit(PTHREAD_CANCELED);
134 	}
135 
136 	ulwp_unlock(self, udp);
137 
138 	if (oldstate != NULL) {
139 		if (was_disabled)
140 			*oldstate = PTHREAD_CANCEL_DISABLE;
141 		else
142 			*oldstate = PTHREAD_CANCEL_ENABLE;
143 	}
144 	return (0);
145 }
146 
147 /*
148  * pthread_setcanceltype: sets the type DEFERRED or ASYNCHRONOUS
149  * If the type is being set as ASYNC, then it becomes
150  * a cancellation point if there is a cancellation pending.
151  */
152 #pragma weak pthread_setcanceltype = _pthread_setcanceltype
153 int
154 _pthread_setcanceltype(int type, int *oldtype)
155 {
156 	ulwp_t *self = curthread;
157 	int was_async;
158 
159 	/*
160 	 * Call enter_critical() to defer SIGCANCEL until exit_critical().
161 	 * We do this because curthread->ul_cancel_pending is set in the
162 	 * SIGCANCEL handler and we must be async-signal safe here.
163 	 */
164 	enter_critical(self);
165 
166 	was_async = self->ul_cancel_async;
167 	switch (type) {
168 	case PTHREAD_CANCEL_ASYNCHRONOUS:
169 		self->ul_cancel_async = 1;
170 		break;
171 	case PTHREAD_CANCEL_DEFERRED:
172 		self->ul_cancel_async = 0;
173 		break;
174 	default:
175 		exit_critical(self);
176 		return (EINVAL);
177 	}
178 	self->ul_save_async = self->ul_cancel_async;
179 
180 	/*
181 	 * If this thread has been requested to be canceled and
182 	 * is in enabled mode and is or was in async mode, exit.
183 	 */
184 	if ((self->ul_cancel_async || was_async) &&
185 	    self->ul_cancel_pending && !self->ul_cancel_disabled) {
186 		exit_critical(self);
187 		_pthread_exit(PTHREAD_CANCELED);
188 	}
189 
190 	exit_critical(self);
191 
192 	if (oldtype != NULL) {
193 		if (was_async)
194 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
195 		else
196 			*oldtype = PTHREAD_CANCEL_DEFERRED;
197 	}
198 	return (0);
199 }
200 
201 /*
202  * pthread_testcancel: tests for any cancellation pending
203  * if the cancellation is enabled and is pending, act on
204  * it by calling thr_exit. thr_exit takes care of calling
205  * cleanup handlers.
206  */
207 #pragma weak _private_testcancel = _pthread_testcancel
208 #pragma weak pthread_testcancel = _pthread_testcancel
209 void
210 _pthread_testcancel(void)
211 {
212 	ulwp_t *self = curthread;
213 
214 	if (self->ul_cancel_pending && !self->ul_cancel_disabled)
215 		_pthread_exit(PTHREAD_CANCELED);
216 }
217 
218 /*
219  * For deferred mode, this routine makes a thread cancelable.
220  * It is called from the functions which want to be cancellation
221  * points and are about to block, such as cond_wait().
222  */
223 void
224 _cancelon()
225 {
226 	ulwp_t *self = curthread;
227 
228 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
229 	if (!self->ul_cancel_disabled) {
230 		ASSERT(self->ul_cancelable >= 0);
231 		self->ul_cancelable++;
232 		if (self->ul_cancel_pending)
233 			_pthread_exit(PTHREAD_CANCELED);
234 	}
235 }
236 
237 /*
238  * This routine turns cancelability off and possible calls pthread_exit().
239  * It is called from functions which are cancellation points, like cond_wait().
240  */
241 void
242 _canceloff()
243 {
244 	ulwp_t *self = curthread;
245 
246 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
247 	if (!self->ul_cancel_disabled) {
248 		if (self->ul_cancel_pending)
249 			_pthread_exit(PTHREAD_CANCELED);
250 		self->ul_cancelable--;
251 		ASSERT(self->ul_cancelable >= 0);
252 	}
253 }
254 
255 /*
256  * Same as _canceloff() but don't actually cancel the thread.
257  * This is used by cond_wait() and sema_wait() when they don't get EINTR.
258  */
259 void
260 _canceloff_nocancel()
261 {
262 	ulwp_t *self = curthread;
263 
264 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
265 	if (!self->ul_cancel_disabled) {
266 		self->ul_cancelable--;
267 		ASSERT(self->ul_cancelable >= 0);
268 	}
269 }
270 
271 /*
272  * __pthread_cleanup_push: called by macro in pthread.h which defines
273  * POSIX.1c pthread_cleanup_push(). Macro in pthread.h allocates the
274  * cleanup struct and calls this routine to push the handler off the
275  * curthread's struct.
276  */
277 void
278 __pthread_cleanup_push(void (*routine)(void *),
279 	void *args, caddr_t fp, _cleanup_t *clnup_info)
280 {
281 	ulwp_t *self = curthread;
282 	__cleanup_t *infop = (__cleanup_t *)clnup_info;
283 
284 	infop->func = routine;
285 	infop->arg = args;
286 	infop->fp = fp;
287 	infop->next = self->ul_clnup_hdr;
288 	self->ul_clnup_hdr = infop;
289 }
290 
291 /*
292  * __pthread_cleanup_pop: called by macro in pthread.h which defines
293  * POSIX.1c pthread_cleanup_pop(). It calls this routine to pop the
294  * handler off the curthread's struct and execute it if necessary.
295  */
296 /* ARGSUSED1 */
297 void
298 __pthread_cleanup_pop(int ex, _cleanup_t *clnup_info)
299 {
300 	ulwp_t *self = curthread;
301 	__cleanup_t *infop = self->ul_clnup_hdr;
302 
303 	self->ul_clnup_hdr = infop->next;
304 	if (ex)
305 		(*infop->func)(infop->arg);
306 }
307 
308 /*
309  * Called when either self->ul_cancel_disabled or self->ul_cancel_pending
310  * is modified.  Setting SC_CANCEL_FLG informs the kernel that we have
311  * a pending cancellation and we do not have cancellation disabled.
312  * In this situation, we will not go to sleep on any system call but
313  * will instead return EINTR immediately on any attempt to sleep,
314  * with SC_EINTR_FLG set in sc_flgs.  Clearing SC_CANCEL_FLG rescinds
315  * this condition, but SC_EINTR_FLG never goes away until the thread
316  * terminates (indicated by clear_flags != 0).
317  */
318 void
319 set_cancel_pending_flag(ulwp_t *self, int clear_flags)
320 {
321 	volatile sc_shared_t *scp;
322 
323 	if (self->ul_vfork | self->ul_nocancel)
324 		return;
325 	enter_critical(self);
326 	if ((scp = self->ul_schedctl) != NULL ||
327 	    (scp = setup_schedctl()) != NULL) {
328 		if (clear_flags)
329 			scp->sc_flgs &= ~(SC_CANCEL_FLG | SC_EINTR_FLG);
330 		else if (self->ul_cancel_pending && !self->ul_cancel_disabled)
331 			scp->sc_flgs |= SC_CANCEL_FLG;
332 		else
333 			scp->sc_flgs &= ~SC_CANCEL_FLG;
334 	}
335 	exit_critical(self);
336 }
337 
338 /*
339  * Called from the PROLOGUE macro in scalls.c to inform subsequent
340  * code that a cancellation point has been called and that the
341  * current thread should cancel itself as soon as all of its locks
342  * have been dropped (see safe_mutex_unlock()).
343  */
344 void
345 set_cancel_eintr_flag(ulwp_t *self)
346 {
347 	volatile sc_shared_t *scp;
348 
349 	if (self->ul_vfork | self->ul_nocancel)
350 		return;
351 	enter_critical(self);
352 	if ((scp = self->ul_schedctl) != NULL ||
353 	    (scp = setup_schedctl()) != NULL)
354 		scp->sc_flgs |= SC_EINTR_FLG;
355 	exit_critical(self);
356 }
357 
358 /*
359  * Calling set_parking_flag(curthread, 1) informs the kernel that we are
360  * calling __lwp_park or ___lwp_cond_wait().  If we take a signal in
361  * the unprotected (from signals) interval before reaching the kernel,
362  * sigacthandler() will call set_parking_flag(curthread, 0) to inform
363  * the kernel to return immediately from these system calls, giving us
364  * a spurious wakeup but not a deadlock.
365  */
366 void
367 set_parking_flag(ulwp_t *self, int park)
368 {
369 	volatile sc_shared_t *scp;
370 
371 	enter_critical(self);
372 	if ((scp = self->ul_schedctl) != NULL ||
373 	    (scp = setup_schedctl()) != NULL) {
374 		if (park) {
375 			scp->sc_flgs |= SC_PARK_FLG;
376 			/*
377 			 * We are parking; allow the __lwp_park() call to
378 			 * block even if we have a pending cancellation.
379 			 */
380 			scp->sc_flgs &= ~SC_CANCEL_FLG;
381 		} else {
382 			scp->sc_flgs &= ~(SC_PARK_FLG | SC_CANCEL_FLG);
383 			/*
384 			 * We are no longer parking; restore the
385 			 * pending cancellation flag if necessary.
386 			 */
387 			if (self->ul_cancel_pending &&
388 			    !self->ul_cancel_disabled)
389 				scp->sc_flgs |= SC_CANCEL_FLG;
390 		}
391 	} else if (park == 0) {	/* schedctl failed, do it the long way */
392 		__lwp_unpark(self->ul_lwpid);
393 	}
394 	exit_critical(self);
395 }
396 
397 /*
398  * Test if the current thread is due to exit because of cancellation.
399  */
400 int
401 cancel_active(void)
402 {
403 	ulwp_t *self = curthread;
404 	volatile sc_shared_t *scp;
405 	int exit_soon;
406 
407 	/*
408 	 * If there is a pending cancellation and cancellation
409 	 * is not disabled (SC_CANCEL_FLG) and we received
410 	 * EINTR from a recent system call (SC_EINTR_FLG),
411 	 * then we will soon be exiting.
412 	 */
413 	enter_critical(self);
414 	exit_soon =
415 	    (((scp = self->ul_schedctl) != NULL ||
416 	    (scp = setup_schedctl()) != NULL) &&
417 	    (scp->sc_flgs & (SC_CANCEL_FLG | SC_EINTR_FLG)) ==
418 	    (SC_CANCEL_FLG | SC_EINTR_FLG));
419 	exit_critical(self);
420 
421 	return (exit_soon);
422 }
423