1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "lint.h" 28 #include "thr_uberdata.h" 29 30 /* 31 * pthread_cancel: tries to cancel the targeted thread. 32 * If the target thread has already exited no action is taken. 33 * Else send SIGCANCEL to request the other thread to cancel itself. 34 */ 35 int 36 pthread_cancel(thread_t tid) 37 { 38 ulwp_t *self = curthread; 39 uberdata_t *udp = self->ul_uberdata; 40 ulwp_t *ulwp; 41 int error = 0; 42 43 if ((ulwp = find_lwp(tid)) == NULL) 44 return (ESRCH); 45 46 if (ulwp->ul_cancel_pending) { 47 /* 48 * Don't send SIGCANCEL more than once. 49 */ 50 ulwp_unlock(ulwp, udp); 51 } else if (ulwp == self) { 52 /* 53 * Unlock self before cancelling. 54 */ 55 ulwp_unlock(self, udp); 56 self->ul_nocancel = 0; /* cancellation is now possible */ 57 if (self->ul_sigdefer == 0) 58 do_sigcancel(); 59 else { 60 self->ul_cancel_pending = 1; 61 set_cancel_pending_flag(self, 0); 62 } 63 } else if (ulwp->ul_cancel_disabled) { 64 /* 65 * Don't send SIGCANCEL if cancellation is disabled; 66 * just set the thread's ulwp->ul_cancel_pending flag. 67 * This avoids a potential EINTR for the target thread. 68 * We don't call set_cancel_pending_flag() here because 69 * we cannot modify another thread's schedctl data. 70 */ 71 ulwp->ul_cancel_pending = 1; 72 ulwp_unlock(ulwp, udp); 73 } else { 74 /* 75 * Request the other thread to cancel itself. 76 */ 77 error = _lwp_kill(tid, SIGCANCEL); 78 ulwp_unlock(ulwp, udp); 79 } 80 81 return (error); 82 } 83 84 /* 85 * pthread_setcancelstate: sets the state ENABLED or DISABLED. 86 * If the state is already ENABLED or is being set to ENABLED, 87 * the type of cancellation is ASYNCHRONOUS, and a cancel request 88 * is pending, then the thread is cancelled right here. 89 * Otherwise, pthread_setcancelstate() is not a cancellation point. 90 */ 91 int 92 pthread_setcancelstate(int state, int *oldstate) 93 { 94 ulwp_t *self = curthread; 95 uberdata_t *udp = self->ul_uberdata; 96 int was_disabled; 97 98 /* 99 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled 100 * since it is tested under this lock by pthread_cancel(), above. 101 * This has the side-effect of calling enter_critical() and this 102 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical() 103 * is called. (self->ul_cancel_pending is set in the SIGCANCEL 104 * handler and we must be async-signal safe here.) 105 */ 106 ulwp_lock(self, udp); 107 108 was_disabled = self->ul_cancel_disabled; 109 switch (state) { 110 case PTHREAD_CANCEL_ENABLE: 111 self->ul_cancel_disabled = 0; 112 break; 113 case PTHREAD_CANCEL_DISABLE: 114 self->ul_cancel_disabled = 1; 115 break; 116 default: 117 ulwp_unlock(self, udp); 118 return (EINVAL); 119 } 120 set_cancel_pending_flag(self, 0); 121 122 /* 123 * If this thread has been requested to be canceled and 124 * is in async mode and is or was enabled, then exit. 125 */ 126 if ((!self->ul_cancel_disabled || !was_disabled) && 127 self->ul_cancel_async && self->ul_cancel_pending) { 128 ulwp_unlock(self, udp); 129 pthread_exit(PTHREAD_CANCELED); 130 } 131 132 ulwp_unlock(self, udp); 133 134 if (oldstate != NULL) { 135 if (was_disabled) 136 *oldstate = PTHREAD_CANCEL_DISABLE; 137 else 138 *oldstate = PTHREAD_CANCEL_ENABLE; 139 } 140 return (0); 141 } 142 143 /* 144 * pthread_setcanceltype: sets the type DEFERRED or ASYNCHRONOUS 145 * If the type is being set as ASYNC, then it becomes 146 * a cancellation point if there is a cancellation pending. 147 */ 148 int 149 pthread_setcanceltype(int type, int *oldtype) 150 { 151 ulwp_t *self = curthread; 152 int was_async; 153 154 /* 155 * Call enter_critical() to defer SIGCANCEL until exit_critical(). 156 * We do this because curthread->ul_cancel_pending is set in the 157 * SIGCANCEL handler and we must be async-signal safe here. 158 */ 159 enter_critical(self); 160 161 was_async = self->ul_cancel_async; 162 switch (type) { 163 case PTHREAD_CANCEL_ASYNCHRONOUS: 164 self->ul_cancel_async = 1; 165 break; 166 case PTHREAD_CANCEL_DEFERRED: 167 self->ul_cancel_async = 0; 168 break; 169 default: 170 exit_critical(self); 171 return (EINVAL); 172 } 173 self->ul_save_async = self->ul_cancel_async; 174 175 /* 176 * If this thread has been requested to be canceled and 177 * is in enabled mode and is or was in async mode, exit. 178 */ 179 if ((self->ul_cancel_async || was_async) && 180 self->ul_cancel_pending && !self->ul_cancel_disabled) { 181 exit_critical(self); 182 pthread_exit(PTHREAD_CANCELED); 183 } 184 185 exit_critical(self); 186 187 if (oldtype != NULL) { 188 if (was_async) 189 *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS; 190 else 191 *oldtype = PTHREAD_CANCEL_DEFERRED; 192 } 193 return (0); 194 } 195 196 /* 197 * pthread_testcancel: tests for any cancellation pending 198 * if the cancellation is enabled and is pending, act on 199 * it by calling thr_exit. thr_exit takes care of calling 200 * cleanup handlers. 201 */ 202 void 203 pthread_testcancel(void) 204 { 205 ulwp_t *self = curthread; 206 207 if (self->ul_cancel_pending && !self->ul_cancel_disabled) 208 pthread_exit(PTHREAD_CANCELED); 209 } 210 211 /* 212 * For deferred mode, this routine makes a thread cancelable. 213 * It is called from the functions which want to be cancellation 214 * points and are about to block, such as cond_wait(). 215 */ 216 void 217 _cancelon() 218 { 219 ulwp_t *self = curthread; 220 221 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled)); 222 if (!self->ul_cancel_disabled) { 223 ASSERT(self->ul_cancelable >= 0); 224 self->ul_cancelable++; 225 if (self->ul_cancel_pending) 226 pthread_exit(PTHREAD_CANCELED); 227 } 228 } 229 230 /* 231 * This routine turns cancelability off and possible calls pthread_exit(). 232 * It is called from functions which are cancellation points, like cond_wait(). 233 */ 234 void 235 _canceloff() 236 { 237 ulwp_t *self = curthread; 238 239 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled)); 240 if (!self->ul_cancel_disabled) { 241 if (self->ul_cancel_pending) 242 pthread_exit(PTHREAD_CANCELED); 243 self->ul_cancelable--; 244 ASSERT(self->ul_cancelable >= 0); 245 } 246 } 247 248 /* 249 * Same as _canceloff() but don't actually cancel the thread. 250 * This is used by cond_wait() and sema_wait() when they don't get EINTR. 251 */ 252 void 253 _canceloff_nocancel() 254 { 255 ulwp_t *self = curthread; 256 257 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled)); 258 if (!self->ul_cancel_disabled) { 259 self->ul_cancelable--; 260 ASSERT(self->ul_cancelable >= 0); 261 } 262 } 263 264 /* 265 * __pthread_cleanup_push: called by macro in pthread.h which defines 266 * POSIX.1c pthread_cleanup_push(). Macro in pthread.h allocates the 267 * cleanup struct and calls this routine to push the handler off the 268 * curthread's struct. 269 */ 270 void 271 __pthread_cleanup_push(void (*routine)(void *), 272 void *args, caddr_t fp, _cleanup_t *clnup_info) 273 { 274 ulwp_t *self = curthread; 275 __cleanup_t *infop = (__cleanup_t *)clnup_info; 276 277 infop->func = routine; 278 infop->arg = args; 279 infop->fp = fp; 280 infop->next = self->ul_clnup_hdr; 281 self->ul_clnup_hdr = infop; 282 } 283 284 /* 285 * __pthread_cleanup_pop: called by macro in pthread.h which defines 286 * POSIX.1c pthread_cleanup_pop(). It calls this routine to pop the 287 * handler off the curthread's struct and execute it if necessary. 288 */ 289 void 290 __pthread_cleanup_pop(int ex, _cleanup_t *clnup_info __unused) 291 { 292 ulwp_t *self = curthread; 293 __cleanup_t *infop = self->ul_clnup_hdr; 294 295 self->ul_clnup_hdr = infop->next; 296 if (ex) 297 (*infop->func)(infop->arg); 298 } 299 300 /* 301 * Called when either self->ul_cancel_disabled or self->ul_cancel_pending 302 * is modified. Setting SC_CANCEL_FLG informs the kernel that we have 303 * a pending cancellation and we do not have cancellation disabled. 304 * In this situation, we will not go to sleep on any system call but 305 * will instead return EINTR immediately on any attempt to sleep, 306 * with SC_EINTR_FLG set in sc_flgs. Clearing SC_CANCEL_FLG rescinds 307 * this condition, but SC_EINTR_FLG never goes away until the thread 308 * terminates (indicated by clear_flags != 0). 309 */ 310 void 311 set_cancel_pending_flag(ulwp_t *self, int clear_flags) 312 { 313 volatile sc_shared_t *scp; 314 315 if (self->ul_vfork | self->ul_nocancel) 316 return; 317 enter_critical(self); 318 if ((scp = self->ul_schedctl) != NULL || 319 (scp = setup_schedctl()) != NULL) { 320 if (clear_flags) 321 scp->sc_flgs &= ~(SC_CANCEL_FLG | SC_EINTR_FLG); 322 else if (self->ul_cancel_pending && !self->ul_cancel_disabled) 323 scp->sc_flgs |= SC_CANCEL_FLG; 324 else 325 scp->sc_flgs &= ~SC_CANCEL_FLG; 326 } 327 exit_critical(self); 328 } 329 330 /* 331 * Called from the PROLOGUE macro in scalls.c to inform subsequent 332 * code that a cancellation point has been called and that the 333 * current thread should cancel itself as soon as all of its locks 334 * have been dropped (see safe_mutex_unlock()). 335 */ 336 void 337 set_cancel_eintr_flag(ulwp_t *self) 338 { 339 volatile sc_shared_t *scp; 340 341 if (self->ul_vfork | self->ul_nocancel) 342 return; 343 enter_critical(self); 344 if ((scp = self->ul_schedctl) != NULL || 345 (scp = setup_schedctl()) != NULL) 346 scp->sc_flgs |= SC_EINTR_FLG; 347 exit_critical(self); 348 } 349 350 /* 351 * Calling set_parking_flag(curthread, 1) informs the kernel that we are 352 * calling __lwp_park or ___lwp_cond_wait(). If we take a signal in 353 * the unprotected (from signals) interval before reaching the kernel, 354 * sigacthandler() will call set_parking_flag(curthread, 0) to inform 355 * the kernel to return immediately from these system calls, giving us 356 * a spurious wakeup but not a deadlock. 357 */ 358 void 359 set_parking_flag(ulwp_t *self, int park) 360 { 361 volatile sc_shared_t *scp; 362 363 enter_critical(self); 364 if ((scp = self->ul_schedctl) != NULL || 365 (scp = setup_schedctl()) != NULL) { 366 if (park) { 367 scp->sc_flgs |= SC_PARK_FLG; 368 /* 369 * We are parking; allow the __lwp_park() call to 370 * block even if we have a pending cancellation. 371 */ 372 scp->sc_flgs &= ~SC_CANCEL_FLG; 373 } else { 374 scp->sc_flgs &= ~(SC_PARK_FLG | SC_CANCEL_FLG); 375 /* 376 * We are no longer parking; restore the 377 * pending cancellation flag if necessary. 378 */ 379 if (self->ul_cancel_pending && 380 !self->ul_cancel_disabled) 381 scp->sc_flgs |= SC_CANCEL_FLG; 382 } 383 } else if (park == 0) { /* schedctl failed, do it the long way */ 384 (void) __lwp_unpark(self->ul_lwpid); 385 } 386 exit_critical(self); 387 } 388 389 /* 390 * Test if the current thread is due to exit because of cancellation. 391 */ 392 int 393 cancel_active(void) 394 { 395 ulwp_t *self = curthread; 396 volatile sc_shared_t *scp; 397 int exit_soon; 398 399 /* 400 * If there is a pending cancellation and cancellation 401 * is not disabled (SC_CANCEL_FLG) and we received 402 * EINTR from a recent system call (SC_EINTR_FLG), 403 * then we will soon be exiting. 404 */ 405 enter_critical(self); 406 exit_soon = 407 (((scp = self->ul_schedctl) != NULL || 408 (scp = setup_schedctl()) != NULL) && 409 (scp->sc_flgs & (SC_CANCEL_FLG | SC_EINTR_FLG)) == 410 (SC_CANCEL_FLG | SC_EINTR_FLG)); 411 exit_critical(self); 412 413 return (exit_soon); 414 } 415