xref: /titanic_50/usr/src/lib/libc/port/threads/cancel.c (revision b56bf881a9655cb27b53cba1468312f7c6dfb0a2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include "lint.h"
28 #include "thr_uberdata.h"
29 
30 /*
31  * pthread_cancel: tries to cancel the targeted thread.
32  * If the target thread has already exited no action is taken.
33  * Else send SIGCANCEL to request the other thread to cancel itself.
34  */
35 int
36 pthread_cancel(thread_t tid)
37 {
38 	ulwp_t *self = curthread;
39 	uberdata_t *udp = self->ul_uberdata;
40 	ulwp_t *ulwp;
41 	int error = 0;
42 
43 	if ((ulwp = find_lwp(tid)) == NULL)
44 		return (ESRCH);
45 
46 	if (ulwp->ul_cancel_pending) {
47 		/*
48 		 * Don't send SIGCANCEL more than once.
49 		 */
50 		ulwp_unlock(ulwp, udp);
51 	} else if (ulwp == self) {
52 		/*
53 		 * Unlock self before cancelling.
54 		 */
55 		ulwp_unlock(self, udp);
56 		self->ul_nocancel = 0;	/* cancellation is now possible */
57 		if (self->ul_sigdefer == 0)
58 			do_sigcancel();
59 		else {
60 			self->ul_cancel_pending = 1;
61 			set_cancel_pending_flag(self, 0);
62 		}
63 	} else if (ulwp->ul_cancel_disabled) {
64 		/*
65 		 * Don't send SIGCANCEL if cancellation is disabled;
66 		 * just set the thread's ulwp->ul_cancel_pending flag.
67 		 * This avoids a potential EINTR for the target thread.
68 		 * We don't call set_cancel_pending_flag() here because
69 		 * we cannot modify another thread's schedctl data.
70 		 */
71 		ulwp->ul_cancel_pending = 1;
72 		ulwp_unlock(ulwp, udp);
73 	} else {
74 		/*
75 		 * Request the other thread to cancel itself.
76 		 */
77 		error = _lwp_kill(tid, SIGCANCEL);
78 		ulwp_unlock(ulwp, udp);
79 	}
80 
81 	return (error);
82 }
83 
84 /*
85  * pthread_setcancelstate: sets the state ENABLED or DISABLED.
86  * If the state is already ENABLED or is being set to ENABLED,
87  * the type of cancellation is ASYNCHRONOUS, and a cancel request
88  * is pending, then the thread is cancelled right here.
89  * Otherwise, pthread_setcancelstate() is not a cancellation point.
90  */
91 int
92 pthread_setcancelstate(int state, int *oldstate)
93 {
94 	ulwp_t *self = curthread;
95 	uberdata_t *udp = self->ul_uberdata;
96 	int was_disabled;
97 
98 	/*
99 	 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled
100 	 * since it is tested under this lock by pthread_cancel(), above.
101 	 * This has the side-effect of calling enter_critical() and this
102 	 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical()
103 	 * is called.  (self->ul_cancel_pending is set in the SIGCANCEL
104 	 * handler and we must be async-signal safe here.)
105 	 */
106 	ulwp_lock(self, udp);
107 
108 	was_disabled = self->ul_cancel_disabled;
109 	switch (state) {
110 	case PTHREAD_CANCEL_ENABLE:
111 		self->ul_cancel_disabled = 0;
112 		break;
113 	case PTHREAD_CANCEL_DISABLE:
114 		self->ul_cancel_disabled = 1;
115 		break;
116 	default:
117 		ulwp_unlock(self, udp);
118 		return (EINVAL);
119 	}
120 	set_cancel_pending_flag(self, 0);
121 
122 	/*
123 	 * If this thread has been requested to be canceled and
124 	 * is in async mode and is or was enabled, then exit.
125 	 */
126 	if ((!self->ul_cancel_disabled || !was_disabled) &&
127 	    self->ul_cancel_async && self->ul_cancel_pending) {
128 		ulwp_unlock(self, udp);
129 		pthread_exit(PTHREAD_CANCELED);
130 	}
131 
132 	ulwp_unlock(self, udp);
133 
134 	if (oldstate != NULL) {
135 		if (was_disabled)
136 			*oldstate = PTHREAD_CANCEL_DISABLE;
137 		else
138 			*oldstate = PTHREAD_CANCEL_ENABLE;
139 	}
140 	return (0);
141 }
142 
143 /*
144  * pthread_setcanceltype: sets the type DEFERRED or ASYNCHRONOUS
145  * If the type is being set as ASYNC, then it becomes
146  * a cancellation point if there is a cancellation pending.
147  */
148 int
149 pthread_setcanceltype(int type, int *oldtype)
150 {
151 	ulwp_t *self = curthread;
152 	int was_async;
153 
154 	/*
155 	 * Call enter_critical() to defer SIGCANCEL until exit_critical().
156 	 * We do this because curthread->ul_cancel_pending is set in the
157 	 * SIGCANCEL handler and we must be async-signal safe here.
158 	 */
159 	enter_critical(self);
160 
161 	was_async = self->ul_cancel_async;
162 	switch (type) {
163 	case PTHREAD_CANCEL_ASYNCHRONOUS:
164 		self->ul_cancel_async = 1;
165 		break;
166 	case PTHREAD_CANCEL_DEFERRED:
167 		self->ul_cancel_async = 0;
168 		break;
169 	default:
170 		exit_critical(self);
171 		return (EINVAL);
172 	}
173 	self->ul_save_async = self->ul_cancel_async;
174 
175 	/*
176 	 * If this thread has been requested to be canceled and
177 	 * is in enabled mode and is or was in async mode, exit.
178 	 */
179 	if ((self->ul_cancel_async || was_async) &&
180 	    self->ul_cancel_pending && !self->ul_cancel_disabled) {
181 		exit_critical(self);
182 		pthread_exit(PTHREAD_CANCELED);
183 	}
184 
185 	exit_critical(self);
186 
187 	if (oldtype != NULL) {
188 		if (was_async)
189 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
190 		else
191 			*oldtype = PTHREAD_CANCEL_DEFERRED;
192 	}
193 	return (0);
194 }
195 
196 /*
197  * pthread_testcancel: tests for any cancellation pending
198  * if the cancellation is enabled and is pending, act on
199  * it by calling thr_exit. thr_exit takes care of calling
200  * cleanup handlers.
201  */
202 void
203 pthread_testcancel(void)
204 {
205 	ulwp_t *self = curthread;
206 
207 	if (self->ul_cancel_pending && !self->ul_cancel_disabled)
208 		pthread_exit(PTHREAD_CANCELED);
209 }
210 
211 /*
212  * For deferred mode, this routine makes a thread cancelable.
213  * It is called from the functions which want to be cancellation
214  * points and are about to block, such as cond_wait().
215  */
216 void
217 _cancelon()
218 {
219 	ulwp_t *self = curthread;
220 
221 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
222 	if (!self->ul_cancel_disabled) {
223 		ASSERT(self->ul_cancelable >= 0);
224 		self->ul_cancelable++;
225 		if (self->ul_cancel_pending)
226 			pthread_exit(PTHREAD_CANCELED);
227 	}
228 }
229 
230 /*
231  * This routine turns cancelability off and possible calls pthread_exit().
232  * It is called from functions which are cancellation points, like cond_wait().
233  */
234 void
235 _canceloff()
236 {
237 	ulwp_t *self = curthread;
238 
239 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
240 	if (!self->ul_cancel_disabled) {
241 		if (self->ul_cancel_pending)
242 			pthread_exit(PTHREAD_CANCELED);
243 		self->ul_cancelable--;
244 		ASSERT(self->ul_cancelable >= 0);
245 	}
246 }
247 
248 /*
249  * Same as _canceloff() but don't actually cancel the thread.
250  * This is used by cond_wait() and sema_wait() when they don't get EINTR.
251  */
252 void
253 _canceloff_nocancel()
254 {
255 	ulwp_t *self = curthread;
256 
257 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
258 	if (!self->ul_cancel_disabled) {
259 		self->ul_cancelable--;
260 		ASSERT(self->ul_cancelable >= 0);
261 	}
262 }
263 
264 /*
265  * __pthread_cleanup_push: called by macro in pthread.h which defines
266  * POSIX.1c pthread_cleanup_push(). Macro in pthread.h allocates the
267  * cleanup struct and calls this routine to push the handler off the
268  * curthread's struct.
269  */
270 void
271 __pthread_cleanup_push(void (*routine)(void *),
272 	void *args, caddr_t fp, _cleanup_t *clnup_info)
273 {
274 	ulwp_t *self = curthread;
275 	__cleanup_t *infop = (__cleanup_t *)clnup_info;
276 
277 	infop->func = routine;
278 	infop->arg = args;
279 	infop->fp = fp;
280 	infop->next = self->ul_clnup_hdr;
281 	self->ul_clnup_hdr = infop;
282 }
283 
284 /*
285  * __pthread_cleanup_pop: called by macro in pthread.h which defines
286  * POSIX.1c pthread_cleanup_pop(). It calls this routine to pop the
287  * handler off the curthread's struct and execute it if necessary.
288  */
289 /* ARGSUSED1 */
290 void
291 __pthread_cleanup_pop(int ex, _cleanup_t *clnup_info)
292 {
293 	ulwp_t *self = curthread;
294 	__cleanup_t *infop = self->ul_clnup_hdr;
295 
296 	self->ul_clnup_hdr = infop->next;
297 	if (ex)
298 		(*infop->func)(infop->arg);
299 }
300 
301 /*
302  * Called when either self->ul_cancel_disabled or self->ul_cancel_pending
303  * is modified.  Setting SC_CANCEL_FLG informs the kernel that we have
304  * a pending cancellation and we do not have cancellation disabled.
305  * In this situation, we will not go to sleep on any system call but
306  * will instead return EINTR immediately on any attempt to sleep,
307  * with SC_EINTR_FLG set in sc_flgs.  Clearing SC_CANCEL_FLG rescinds
308  * this condition, but SC_EINTR_FLG never goes away until the thread
309  * terminates (indicated by clear_flags != 0).
310  */
311 void
312 set_cancel_pending_flag(ulwp_t *self, int clear_flags)
313 {
314 	volatile sc_shared_t *scp;
315 
316 	if (self->ul_vfork | self->ul_nocancel)
317 		return;
318 	enter_critical(self);
319 	if ((scp = self->ul_schedctl) != NULL ||
320 	    (scp = setup_schedctl()) != NULL) {
321 		if (clear_flags)
322 			scp->sc_flgs &= ~(SC_CANCEL_FLG | SC_EINTR_FLG);
323 		else if (self->ul_cancel_pending && !self->ul_cancel_disabled)
324 			scp->sc_flgs |= SC_CANCEL_FLG;
325 		else
326 			scp->sc_flgs &= ~SC_CANCEL_FLG;
327 	}
328 	exit_critical(self);
329 }
330 
331 /*
332  * Called from the PROLOGUE macro in scalls.c to inform subsequent
333  * code that a cancellation point has been called and that the
334  * current thread should cancel itself as soon as all of its locks
335  * have been dropped (see safe_mutex_unlock()).
336  */
337 void
338 set_cancel_eintr_flag(ulwp_t *self)
339 {
340 	volatile sc_shared_t *scp;
341 
342 	if (self->ul_vfork | self->ul_nocancel)
343 		return;
344 	enter_critical(self);
345 	if ((scp = self->ul_schedctl) != NULL ||
346 	    (scp = setup_schedctl()) != NULL)
347 		scp->sc_flgs |= SC_EINTR_FLG;
348 	exit_critical(self);
349 }
350 
351 /*
352  * Calling set_parking_flag(curthread, 1) informs the kernel that we are
353  * calling __lwp_park or ___lwp_cond_wait().  If we take a signal in
354  * the unprotected (from signals) interval before reaching the kernel,
355  * sigacthandler() will call set_parking_flag(curthread, 0) to inform
356  * the kernel to return immediately from these system calls, giving us
357  * a spurious wakeup but not a deadlock.
358  */
359 void
360 set_parking_flag(ulwp_t *self, int park)
361 {
362 	volatile sc_shared_t *scp;
363 
364 	enter_critical(self);
365 	if ((scp = self->ul_schedctl) != NULL ||
366 	    (scp = setup_schedctl()) != NULL) {
367 		if (park) {
368 			scp->sc_flgs |= SC_PARK_FLG;
369 			/*
370 			 * We are parking; allow the __lwp_park() call to
371 			 * block even if we have a pending cancellation.
372 			 */
373 			scp->sc_flgs &= ~SC_CANCEL_FLG;
374 		} else {
375 			scp->sc_flgs &= ~(SC_PARK_FLG | SC_CANCEL_FLG);
376 			/*
377 			 * We are no longer parking; restore the
378 			 * pending cancellation flag if necessary.
379 			 */
380 			if (self->ul_cancel_pending &&
381 			    !self->ul_cancel_disabled)
382 				scp->sc_flgs |= SC_CANCEL_FLG;
383 		}
384 	} else if (park == 0) {	/* schedctl failed, do it the long way */
385 		(void) __lwp_unpark(self->ul_lwpid);
386 	}
387 	exit_critical(self);
388 }
389 
390 /*
391  * Test if the current thread is due to exit because of cancellation.
392  */
393 int
394 cancel_active(void)
395 {
396 	ulwp_t *self = curthread;
397 	volatile sc_shared_t *scp;
398 	int exit_soon;
399 
400 	/*
401 	 * If there is a pending cancellation and cancellation
402 	 * is not disabled (SC_CANCEL_FLG) and we received
403 	 * EINTR from a recent system call (SC_EINTR_FLG),
404 	 * then we will soon be exiting.
405 	 */
406 	enter_critical(self);
407 	exit_soon =
408 	    (((scp = self->ul_schedctl) != NULL ||
409 	    (scp = setup_schedctl()) != NULL) &&
410 	    (scp->sc_flgs & (SC_CANCEL_FLG | SC_EINTR_FLG)) ==
411 	    (SC_CANCEL_FLG | SC_EINTR_FLG));
412 	exit_critical(self);
413 
414 	return (exit_soon);
415 }
416