xref: /titanic_44/usr/src/lib/libc/port/threads/cancel.c (revision 643e2e74e1c00e6b3d1896a6a67dbdb7308135c3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 
32 /*
33  * pthread_cancel: tries to cancel the targeted thread.
34  * If the target thread has already exited no action is taken.
35  * Else send SIGCANCEL to request the other thread to cancel itself.
36  */
37 int
38 pthread_cancel(thread_t tid)
39 {
40 	ulwp_t *self = curthread;
41 	uberdata_t *udp = self->ul_uberdata;
42 	ulwp_t *ulwp;
43 	int error = 0;
44 
45 	if ((ulwp = find_lwp(tid)) == NULL)
46 		return (ESRCH);
47 
48 	if (ulwp->ul_cancel_pending) {
49 		/*
50 		 * Don't send SIGCANCEL more than once.
51 		 */
52 		ulwp_unlock(ulwp, udp);
53 	} else if (ulwp == self) {
54 		/*
55 		 * Unlock self before cancelling.
56 		 */
57 		ulwp_unlock(self, udp);
58 		self->ul_nocancel = 0;	/* cancellation is now possible */
59 		if (self->ul_sigdefer == 0)
60 			do_sigcancel();
61 		else {
62 			self->ul_cancel_pending = 1;
63 			set_cancel_pending_flag(self, 0);
64 		}
65 	} else if (ulwp->ul_cancel_disabled) {
66 		/*
67 		 * Don't send SIGCANCEL if cancellation is disabled;
68 		 * just set the thread's ulwp->ul_cancel_pending flag.
69 		 * This avoids a potential EINTR for the target thread.
70 		 * We don't call set_cancel_pending_flag() here because
71 		 * we cannot modify another thread's schedctl data.
72 		 */
73 		ulwp->ul_cancel_pending = 1;
74 		ulwp_unlock(ulwp, udp);
75 	} else {
76 		/*
77 		 * Request the other thread to cancel itself.
78 		 */
79 		error = _lwp_kill(tid, SIGCANCEL);
80 		ulwp_unlock(ulwp, udp);
81 	}
82 
83 	return (error);
84 }
85 
86 /*
87  * pthread_setcancelstate: sets the state ENABLED or DISABLED.
88  * If the state is already ENABLED or is being set to ENABLED,
89  * the type of cancellation is ASYNCHRONOUS, and a cancel request
90  * is pending, then the thread is cancelled right here.
91  * Otherwise, pthread_setcancelstate() is not a cancellation point.
92  */
93 int
94 pthread_setcancelstate(int state, int *oldstate)
95 {
96 	ulwp_t *self = curthread;
97 	uberdata_t *udp = self->ul_uberdata;
98 	int was_disabled;
99 
100 	/*
101 	 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled
102 	 * since it is tested under this lock by pthread_cancel(), above.
103 	 * This has the side-effect of calling enter_critical() and this
104 	 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical()
105 	 * is called.  (self->ul_cancel_pending is set in the SIGCANCEL
106 	 * handler and we must be async-signal safe here.)
107 	 */
108 	ulwp_lock(self, udp);
109 
110 	was_disabled = self->ul_cancel_disabled;
111 	switch (state) {
112 	case PTHREAD_CANCEL_ENABLE:
113 		self->ul_cancel_disabled = 0;
114 		break;
115 	case PTHREAD_CANCEL_DISABLE:
116 		self->ul_cancel_disabled = 1;
117 		break;
118 	default:
119 		ulwp_unlock(self, udp);
120 		return (EINVAL);
121 	}
122 	set_cancel_pending_flag(self, 0);
123 
124 	/*
125 	 * If this thread has been requested to be canceled and
126 	 * is in async mode and is or was enabled, then exit.
127 	 */
128 	if ((!self->ul_cancel_disabled || !was_disabled) &&
129 	    self->ul_cancel_async && self->ul_cancel_pending) {
130 		ulwp_unlock(self, udp);
131 		pthread_exit(PTHREAD_CANCELED);
132 	}
133 
134 	ulwp_unlock(self, udp);
135 
136 	if (oldstate != NULL) {
137 		if (was_disabled)
138 			*oldstate = PTHREAD_CANCEL_DISABLE;
139 		else
140 			*oldstate = PTHREAD_CANCEL_ENABLE;
141 	}
142 	return (0);
143 }
144 
145 /*
146  * pthread_setcanceltype: sets the type DEFERRED or ASYNCHRONOUS
147  * If the type is being set as ASYNC, then it becomes
148  * a cancellation point if there is a cancellation pending.
149  */
150 int
151 pthread_setcanceltype(int type, int *oldtype)
152 {
153 	ulwp_t *self = curthread;
154 	int was_async;
155 
156 	/*
157 	 * Call enter_critical() to defer SIGCANCEL until exit_critical().
158 	 * We do this because curthread->ul_cancel_pending is set in the
159 	 * SIGCANCEL handler and we must be async-signal safe here.
160 	 */
161 	enter_critical(self);
162 
163 	was_async = self->ul_cancel_async;
164 	switch (type) {
165 	case PTHREAD_CANCEL_ASYNCHRONOUS:
166 		self->ul_cancel_async = 1;
167 		break;
168 	case PTHREAD_CANCEL_DEFERRED:
169 		self->ul_cancel_async = 0;
170 		break;
171 	default:
172 		exit_critical(self);
173 		return (EINVAL);
174 	}
175 	self->ul_save_async = self->ul_cancel_async;
176 
177 	/*
178 	 * If this thread has been requested to be canceled and
179 	 * is in enabled mode and is or was in async mode, exit.
180 	 */
181 	if ((self->ul_cancel_async || was_async) &&
182 	    self->ul_cancel_pending && !self->ul_cancel_disabled) {
183 		exit_critical(self);
184 		pthread_exit(PTHREAD_CANCELED);
185 	}
186 
187 	exit_critical(self);
188 
189 	if (oldtype != NULL) {
190 		if (was_async)
191 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
192 		else
193 			*oldtype = PTHREAD_CANCEL_DEFERRED;
194 	}
195 	return (0);
196 }
197 
198 /*
199  * pthread_testcancel: tests for any cancellation pending
200  * if the cancellation is enabled and is pending, act on
201  * it by calling thr_exit. thr_exit takes care of calling
202  * cleanup handlers.
203  */
204 void
205 pthread_testcancel(void)
206 {
207 	ulwp_t *self = curthread;
208 
209 	if (self->ul_cancel_pending && !self->ul_cancel_disabled)
210 		pthread_exit(PTHREAD_CANCELED);
211 }
212 
213 /*
214  * For deferred mode, this routine makes a thread cancelable.
215  * It is called from the functions which want to be cancellation
216  * points and are about to block, such as cond_wait().
217  */
218 void
219 _cancelon()
220 {
221 	ulwp_t *self = curthread;
222 
223 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
224 	if (!self->ul_cancel_disabled) {
225 		ASSERT(self->ul_cancelable >= 0);
226 		self->ul_cancelable++;
227 		if (self->ul_cancel_pending)
228 			pthread_exit(PTHREAD_CANCELED);
229 	}
230 }
231 
232 /*
233  * This routine turns cancelability off and possible calls pthread_exit().
234  * It is called from functions which are cancellation points, like cond_wait().
235  */
236 void
237 _canceloff()
238 {
239 	ulwp_t *self = curthread;
240 
241 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
242 	if (!self->ul_cancel_disabled) {
243 		if (self->ul_cancel_pending)
244 			pthread_exit(PTHREAD_CANCELED);
245 		self->ul_cancelable--;
246 		ASSERT(self->ul_cancelable >= 0);
247 	}
248 }
249 
250 /*
251  * Same as _canceloff() but don't actually cancel the thread.
252  * This is used by cond_wait() and sema_wait() when they don't get EINTR.
253  */
254 void
255 _canceloff_nocancel()
256 {
257 	ulwp_t *self = curthread;
258 
259 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
260 	if (!self->ul_cancel_disabled) {
261 		self->ul_cancelable--;
262 		ASSERT(self->ul_cancelable >= 0);
263 	}
264 }
265 
266 /*
267  * __pthread_cleanup_push: called by macro in pthread.h which defines
268  * POSIX.1c pthread_cleanup_push(). Macro in pthread.h allocates the
269  * cleanup struct and calls this routine to push the handler off the
270  * curthread's struct.
271  */
272 void
273 __pthread_cleanup_push(void (*routine)(void *),
274 	void *args, caddr_t fp, _cleanup_t *clnup_info)
275 {
276 	ulwp_t *self = curthread;
277 	__cleanup_t *infop = (__cleanup_t *)clnup_info;
278 
279 	infop->func = routine;
280 	infop->arg = args;
281 	infop->fp = fp;
282 	infop->next = self->ul_clnup_hdr;
283 	self->ul_clnup_hdr = infop;
284 }
285 
286 /*
287  * __pthread_cleanup_pop: called by macro in pthread.h which defines
288  * POSIX.1c pthread_cleanup_pop(). It calls this routine to pop the
289  * handler off the curthread's struct and execute it if necessary.
290  */
291 /* ARGSUSED1 */
292 void
293 __pthread_cleanup_pop(int ex, _cleanup_t *clnup_info)
294 {
295 	ulwp_t *self = curthread;
296 	__cleanup_t *infop = self->ul_clnup_hdr;
297 
298 	self->ul_clnup_hdr = infop->next;
299 	if (ex)
300 		(*infop->func)(infop->arg);
301 }
302 
303 /*
304  * Called when either self->ul_cancel_disabled or self->ul_cancel_pending
305  * is modified.  Setting SC_CANCEL_FLG informs the kernel that we have
306  * a pending cancellation and we do not have cancellation disabled.
307  * In this situation, we will not go to sleep on any system call but
308  * will instead return EINTR immediately on any attempt to sleep,
309  * with SC_EINTR_FLG set in sc_flgs.  Clearing SC_CANCEL_FLG rescinds
310  * this condition, but SC_EINTR_FLG never goes away until the thread
311  * terminates (indicated by clear_flags != 0).
312  */
313 void
314 set_cancel_pending_flag(ulwp_t *self, int clear_flags)
315 {
316 	volatile sc_shared_t *scp;
317 
318 	if (self->ul_vfork | self->ul_nocancel)
319 		return;
320 	enter_critical(self);
321 	if ((scp = self->ul_schedctl) != NULL ||
322 	    (scp = setup_schedctl()) != NULL) {
323 		if (clear_flags)
324 			scp->sc_flgs &= ~(SC_CANCEL_FLG | SC_EINTR_FLG);
325 		else if (self->ul_cancel_pending && !self->ul_cancel_disabled)
326 			scp->sc_flgs |= SC_CANCEL_FLG;
327 		else
328 			scp->sc_flgs &= ~SC_CANCEL_FLG;
329 	}
330 	exit_critical(self);
331 }
332 
333 /*
334  * Called from the PROLOGUE macro in scalls.c to inform subsequent
335  * code that a cancellation point has been called and that the
336  * current thread should cancel itself as soon as all of its locks
337  * have been dropped (see safe_mutex_unlock()).
338  */
339 void
340 set_cancel_eintr_flag(ulwp_t *self)
341 {
342 	volatile sc_shared_t *scp;
343 
344 	if (self->ul_vfork | self->ul_nocancel)
345 		return;
346 	enter_critical(self);
347 	if ((scp = self->ul_schedctl) != NULL ||
348 	    (scp = setup_schedctl()) != NULL)
349 		scp->sc_flgs |= SC_EINTR_FLG;
350 	exit_critical(self);
351 }
352 
353 /*
354  * Calling set_parking_flag(curthread, 1) informs the kernel that we are
355  * calling __lwp_park or ___lwp_cond_wait().  If we take a signal in
356  * the unprotected (from signals) interval before reaching the kernel,
357  * sigacthandler() will call set_parking_flag(curthread, 0) to inform
358  * the kernel to return immediately from these system calls, giving us
359  * a spurious wakeup but not a deadlock.
360  */
361 void
362 set_parking_flag(ulwp_t *self, int park)
363 {
364 	volatile sc_shared_t *scp;
365 
366 	enter_critical(self);
367 	if ((scp = self->ul_schedctl) != NULL ||
368 	    (scp = setup_schedctl()) != NULL) {
369 		if (park) {
370 			scp->sc_flgs |= SC_PARK_FLG;
371 			/*
372 			 * We are parking; allow the __lwp_park() call to
373 			 * block even if we have a pending cancellation.
374 			 */
375 			scp->sc_flgs &= ~SC_CANCEL_FLG;
376 		} else {
377 			scp->sc_flgs &= ~(SC_PARK_FLG | SC_CANCEL_FLG);
378 			/*
379 			 * We are no longer parking; restore the
380 			 * pending cancellation flag if necessary.
381 			 */
382 			if (self->ul_cancel_pending &&
383 			    !self->ul_cancel_disabled)
384 				scp->sc_flgs |= SC_CANCEL_FLG;
385 		}
386 	} else if (park == 0) {	/* schedctl failed, do it the long way */
387 		__lwp_unpark(self->ul_lwpid);
388 	}
389 	exit_critical(self);
390 }
391 
392 /*
393  * Test if the current thread is due to exit because of cancellation.
394  */
395 int
396 cancel_active(void)
397 {
398 	ulwp_t *self = curthread;
399 	volatile sc_shared_t *scp;
400 	int exit_soon;
401 
402 	/*
403 	 * If there is a pending cancellation and cancellation
404 	 * is not disabled (SC_CANCEL_FLG) and we received
405 	 * EINTR from a recent system call (SC_EINTR_FLG),
406 	 * then we will soon be exiting.
407 	 */
408 	enter_critical(self);
409 	exit_soon =
410 	    (((scp = self->ul_schedctl) != NULL ||
411 	    (scp = setup_schedctl()) != NULL) &&
412 	    (scp->sc_flgs & (SC_CANCEL_FLG | SC_EINTR_FLG)) ==
413 	    (SC_CANCEL_FLG | SC_EINTR_FLG));
414 	exit_critical(self);
415 
416 	return (exit_soon);
417 }
418