1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #include <sys/param.h> 32 #include <sys/types.h> 33 #include <sys/sysmacros.h> 34 #include <sys/systm.h> 35 #include <sys/prsystm.h> 36 #include <sys/cred.h> 37 #include <sys/errno.h> 38 #include <sys/proc.h> 39 #include <sys/signal.h> 40 #include <sys/kmem.h> 41 #include <sys/unistd.h> 42 #include <sys/cmn_err.h> 43 #include <sys/schedctl.h> 44 #include <sys/debug.h> 45 #include <sys/contract/process_impl.h> 46 47 kthread_t * 48 idtot(proc_t *p, id_t lwpid) 49 { 50 lwpdir_t *ldp; 51 52 if ((ldp = lwp_hash_lookup(p, lwpid)) != NULL) 53 return (ldp->ld_entry->le_thread); 54 return (NULL); 55 } 56 57 /* 58 * Stop an lwp of the current process 59 */ 60 int 61 syslwp_suspend(id_t lwpid) 62 { 63 kthread_t *t; 64 int error; 65 proc_t *p = ttoproc(curthread); 66 67 mutex_enter(&p->p_lock); 68 if ((t = idtot(p, lwpid)) == NULL) 69 error = ESRCH; 70 else 71 error = lwp_suspend(t); 72 mutex_exit(&p->p_lock); 73 if (error) 74 return (set_errno(error)); 75 return (0); 76 } 77 78 int 79 syslwp_continue(id_t lwpid) 80 { 81 kthread_t *t; 82 proc_t *p = ttoproc(curthread); 83 84 mutex_enter(&p->p_lock); 85 if ((t = idtot(p, lwpid)) == NULL) { 86 mutex_exit(&p->p_lock); 87 return (set_errno(ESRCH)); 88 } 89 lwp_continue(t); 90 mutex_exit(&p->p_lock); 91 return (0); 92 } 93 94 int 95 lwp_kill(id_t lwpid, int sig) 96 { 97 sigqueue_t *sqp; 98 kthread_t *t; 99 proc_t *p = ttoproc(curthread); 100 101 if (sig < 0 || sig >= NSIG) 102 return (set_errno(EINVAL)); 103 if (sig != 0) 104 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 105 mutex_enter(&p->p_lock); 106 if ((t = idtot(p, lwpid)) == NULL) { 107 mutex_exit(&p->p_lock); 108 if (sig != 0) 109 kmem_free(sqp, sizeof (sigqueue_t)); 110 return (set_errno(ESRCH)); 111 } 112 if (sig == 0) { 113 mutex_exit(&p->p_lock); 114 return (0); 115 } 116 sqp->sq_info.si_signo = sig; 117 sqp->sq_info.si_code = SI_LWP; 118 sqp->sq_info.si_pid = p->p_pid; 119 sqp->sq_info.si_ctid = PRCTID(p); 120 sqp->sq_info.si_zoneid = getzoneid(); 121 sqp->sq_info.si_uid = crgetruid(CRED()); 122 sigaddqa(p, t, sqp); 123 mutex_exit(&p->p_lock); 124 return (0); 125 } 126 127 /* 128 * This is the specification of lwp_wait() from the _lwp_wait(2) manual page: 129 * 130 * The lwp_wait() function blocks the current lwp until the lwp specified 131 * by 'lwpid' terminates. If the specified lwp terminated prior to the call 132 * to lwp_wait(), then lwp_wait() returns immediately. If 'lwpid' is zero, 133 * then lwp_wait() waits for any undetached lwp in the current process. 134 * If 'lwpid' is not zero, then it must specify an undetached lwp in the 135 * current process. If 'departed' is not NULL, then it points to a location 136 * where the id of the exited lwp is stored. 137 * 138 * When an lwp exits and there are one or more lwps in the process waiting 139 * for this specific lwp to exit, then one of the waiting lwps is unblocked 140 * and it returns from lwp_wait() successfully. Any other lwps waiting for 141 * this same lwp to exit are also unblocked, however, they return from 142 * lwp_wait() with the error ESRCH. If there are no lwps in the process 143 * waiting for this specific lwp to exit but there are one or more lwps 144 * waiting for any lwp to exit, then one of the waiting lwps is unblocked 145 * and it returns from lwp_wait() successfully. 146 * 147 * If an lwp is waiting for any lwp to exit, it blocks until an undetached 148 * lwp for which no other lwp is waiting terminates, at which time it returns 149 * successfully, or until all other lwps in the process are either daemon 150 * lwps or lwps waiting in lwp_wait(), in which case it returns EDEADLK. 151 */ 152 int 153 lwp_wait(id_t lwpid, id_t *departed) 154 { 155 proc_t *p = ttoproc(curthread); 156 int error = 0; 157 int daemon = (curthread->t_proc_flag & TP_DAEMON)? 1 : 0; 158 lwpent_t *target_lep; 159 lwpdir_t *ldp; 160 lwpent_t *lep; 161 162 /* 163 * lwp_wait() is not supported for the /proc agent lwp. 164 */ 165 if (curthread == p->p_agenttp) 166 return (set_errno(ENOTSUP)); 167 168 mutex_enter(&p->p_lock); 169 prbarrier(p); 170 171 curthread->t_waitfor = lwpid; 172 p->p_lwpwait++; 173 p->p_lwpdwait += daemon; 174 175 if (lwpid != 0) { 176 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL) 177 target_lep = NULL; 178 else { 179 target_lep = ldp->ld_entry; 180 target_lep->le_waiters++; 181 target_lep->le_dwaiters += daemon; 182 } 183 } 184 185 while (error == 0) { 186 kthread_t *t; 187 id_t tid; 188 int i; 189 190 if (lwpid != 0) { 191 /* 192 * Look for a specific zombie lwp. 193 */ 194 if (target_lep == NULL) 195 error = ESRCH; 196 else if ((t = target_lep->le_thread) != NULL) { 197 if (!(t->t_proc_flag & TP_TWAIT)) 198 error = EINVAL; 199 } else { 200 /* 201 * We found the zombie we are waiting for. 202 */ 203 ASSERT(p->p_zombcnt > 0); 204 p->p_zombcnt--; 205 p->p_lwpwait--; 206 p->p_lwpdwait -= daemon; 207 curthread->t_waitfor = -1; 208 lwp_hash_out(p, lwpid); 209 mutex_exit(&p->p_lock); 210 if (departed != NULL && 211 copyout(&lwpid, departed, sizeof (id_t))) 212 return (set_errno(EFAULT)); 213 return (0); 214 } 215 } else { 216 /* 217 * Look for any zombie lwp. 218 */ 219 int some_non_daemon_will_return = 0; 220 221 /* for each entry in the lwp directory... */ 222 ldp = p->p_lwpdir; 223 for (i = 0; i < p->p_lwpdir_sz; i++, ldp++) { 224 225 if ((lep = ldp->ld_entry) == NULL || 226 lep->le_thread != NULL) 227 continue; 228 229 /* 230 * We found a zombie lwp. If there is some 231 * other thread waiting specifically for the 232 * zombie we just found, then defer to the other 233 * waiting thread and continue searching for 234 * another zombie. Also check to see if there 235 * is some non-daemon thread sleeping here in 236 * lwp_wait() that will succeed and return when 237 * we drop p->p_lock. This is tested below. 238 */ 239 tid = lep->le_lwpid; 240 if (lep->le_waiters != 0) { 241 if (lep->le_waiters - lep->le_dwaiters) 242 some_non_daemon_will_return = 1; 243 continue; 244 } 245 246 /* 247 * We found a zombie that no one else 248 * is specifically waiting for. 249 */ 250 ASSERT(p->p_zombcnt > 0); 251 p->p_zombcnt--; 252 p->p_lwpwait--; 253 p->p_lwpdwait -= daemon; 254 curthread->t_waitfor = -1; 255 lwp_hash_out(p, tid); 256 mutex_exit(&p->p_lock); 257 if (departed != NULL && 258 copyout(&tid, departed, sizeof (id_t))) 259 return (set_errno(EFAULT)); 260 return (0); 261 } 262 263 /* 264 * We are waiting for anyone. If all non-daemon lwps 265 * are waiting here, and if we determined above that 266 * no non-daemon lwp will return, we have deadlock. 267 */ 268 if (!some_non_daemon_will_return && 269 p->p_lwpcnt == p->p_lwpdaemon + 270 (p->p_lwpwait - p->p_lwpdwait)) 271 error = EDEADLK; 272 } 273 274 if (error == 0 && lwpid != 0) { 275 /* 276 * We are waiting for a specific non-zombie lwp. 277 * Fail if there is a deadlock loop. 278 */ 279 for (;;) { 280 if (t == curthread) { 281 error = EDEADLK; 282 break; 283 } 284 /* who is he waiting for? */ 285 if ((tid = t->t_waitfor) == -1) 286 break; 287 if (tid == 0) { 288 /* 289 * The lwp we are waiting for is 290 * waiting for anyone (transitively). 291 * If there are no zombies right now 292 * and if we would have deadlock due 293 * to all non-daemon lwps waiting here, 294 * wake up the lwp that is waiting for 295 * anyone so it can return EDEADLK. 296 */ 297 if (p->p_zombcnt == 0 && 298 p->p_lwpcnt == p->p_lwpdaemon + 299 p->p_lwpwait - p->p_lwpdwait) 300 cv_broadcast(&p->p_lwpexit); 301 break; 302 } 303 if ((ldp = lwp_hash_lookup(p, tid)) == NULL || 304 (t = ldp->ld_entry->le_thread) == NULL) 305 break; 306 } 307 } 308 309 if (error) 310 break; 311 312 /* 313 * Wait for some lwp to terminate. 314 */ 315 if (!cv_wait_sig(&p->p_lwpexit, &p->p_lock)) 316 error = EINTR; 317 prbarrier(p); 318 319 if (lwpid != 0) { 320 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL) 321 target_lep = NULL; 322 else 323 target_lep = ldp->ld_entry; 324 } 325 } 326 327 if (lwpid != 0 && target_lep != NULL) { 328 target_lep->le_waiters--; 329 target_lep->le_dwaiters -= daemon; 330 } 331 p->p_lwpwait--; 332 p->p_lwpdwait -= daemon; 333 curthread->t_waitfor = -1; 334 mutex_exit(&p->p_lock); 335 return (set_errno(error)); 336 } 337 338 int 339 lwp_detach(id_t lwpid) 340 { 341 kthread_t *t; 342 proc_t *p = ttoproc(curthread); 343 lwpdir_t *ldp; 344 int error = 0; 345 346 mutex_enter(&p->p_lock); 347 prbarrier(p); 348 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL) 349 error = ESRCH; 350 else if ((t = ldp->ld_entry->le_thread) != NULL) { 351 if (!(t->t_proc_flag & TP_TWAIT)) 352 error = EINVAL; 353 else { 354 t->t_proc_flag &= ~TP_TWAIT; 355 cv_broadcast(&p->p_lwpexit); 356 } 357 } else { 358 ASSERT(p->p_zombcnt > 0); 359 p->p_zombcnt--; 360 lwp_hash_out(p, lwpid); 361 } 362 mutex_exit(&p->p_lock); 363 364 if (error) 365 return (set_errno(error)); 366 return (0); 367 } 368 369 /* 370 * Unpark the specified lwp. 371 */ 372 static int 373 lwp_unpark(id_t lwpid) 374 { 375 proc_t *p = ttoproc(curthread); 376 kthread_t *t; 377 int error = 0; 378 379 mutex_enter(&p->p_lock); 380 if ((t = idtot(p, lwpid)) == NULL) 381 error = ESRCH; 382 else { 383 mutex_enter(&t->t_delay_lock); 384 t->t_unpark = 1; 385 cv_signal(&t->t_delay_cv); 386 mutex_exit(&t->t_delay_lock); 387 } 388 mutex_exit(&p->p_lock); 389 return (error); 390 } 391 392 /* 393 * Sleep until we are set running by lwp_unpark() or until we are 394 * interrupted by a signal or until we exhaust our timeout. 395 * timeoutp is an in/out parameter. On entry, it contains the relative 396 * time until timeout. On exit, we copyout the residual time left to it. 397 */ 398 static int 399 lwp_park(timespec_t *timeoutp, id_t lwpid) 400 { 401 timespec_t rqtime; 402 timespec_t rmtime; 403 timespec_t now; 404 timespec_t *rqtp = NULL; 405 kthread_t *t = curthread; 406 int timecheck = 0; 407 int error = 0; 408 model_t datamodel = ttoproc(t)->p_model; 409 410 if (lwpid != 0) /* unpark the other lwp, if any */ 411 (void) lwp_unpark(lwpid); 412 413 if (timeoutp) { 414 timecheck = timechanged; 415 gethrestime(&now); 416 if (datamodel == DATAMODEL_NATIVE) { 417 if (copyin(timeoutp, &rqtime, sizeof (timespec_t))) { 418 error = EFAULT; 419 goto out; 420 } 421 } else { 422 timespec32_t timeout32; 423 424 if (copyin(timeoutp, &timeout32, sizeof (timeout32))) { 425 error = EFAULT; 426 goto out; 427 } 428 TIMESPEC32_TO_TIMESPEC(&rqtime, &timeout32) 429 } 430 431 if (itimerspecfix(&rqtime)) { 432 error = EINVAL; 433 goto out; 434 } 435 /* 436 * Convert the timespec value into absolute time. 437 */ 438 timespecadd(&rqtime, &now); 439 rqtp = &rqtime; 440 } 441 442 (void) new_mstate(t, LMS_USER_LOCK); 443 444 mutex_enter(&t->t_delay_lock); 445 if (!schedctl_is_park()) 446 error = EINTR; 447 while (error == 0 && t->t_unpark == 0) { 448 switch (cv_waituntil_sig(&t->t_delay_cv, 449 &t->t_delay_lock, rqtp, timecheck)) { 450 case 0: 451 error = EINTR; 452 break; 453 case -1: 454 error = ETIME; 455 break; 456 } 457 } 458 t->t_unpark = 0; 459 mutex_exit(&t->t_delay_lock); 460 461 if (timeoutp != NULL) { 462 rmtime.tv_sec = rmtime.tv_nsec = 0; 463 if (error != ETIME) { 464 gethrestime(&now); 465 if ((now.tv_sec < rqtime.tv_sec) || 466 ((now.tv_sec == rqtime.tv_sec) && 467 (now.tv_nsec < rqtime.tv_nsec))) { 468 rmtime = rqtime; 469 timespecsub(&rmtime, &now); 470 } 471 } 472 if (datamodel == DATAMODEL_NATIVE) { 473 if (copyout(&rmtime, timeoutp, sizeof (rmtime))) 474 error = EFAULT; 475 } else { 476 timespec32_t rmtime32; 477 478 TIMESPEC_TO_TIMESPEC32(&rmtime32, &rmtime); 479 if (copyout(&rmtime32, timeoutp, sizeof (rmtime32))) 480 error = EFAULT; 481 } 482 } 483 out: 484 schedctl_unpark(); 485 if (t->t_mstate == LMS_USER_LOCK) 486 (void) new_mstate(t, LMS_SYSTEM); 487 return (error); 488 } 489 490 #define MAXLWPIDS 1024 491 492 /* 493 * Unpark all of the specified lwps. 494 * Do it in chunks of MAXLWPIDS to avoid allocating too much memory. 495 */ 496 static int 497 lwp_unpark_all(id_t *lwpidp, int nids) 498 { 499 proc_t *p = ttoproc(curthread); 500 kthread_t *t; 501 int error = 0; 502 id_t *lwpid; 503 size_t lwpidsz; 504 int n; 505 int i; 506 507 if (nids <= 0) 508 return (EINVAL); 509 510 lwpidsz = MIN(nids, MAXLWPIDS) * sizeof (id_t); 511 lwpid = kmem_alloc(lwpidsz, KM_SLEEP); 512 while (nids > 0) { 513 n = MIN(nids, MAXLWPIDS); 514 if (copyin(lwpidp, lwpid, n * sizeof (id_t))) { 515 error = EFAULT; 516 break; 517 } 518 mutex_enter(&p->p_lock); 519 for (i = 0; i < n; i++) { 520 if ((t = idtot(p, lwpid[i])) == NULL) 521 error = ESRCH; 522 else { 523 mutex_enter(&t->t_delay_lock); 524 t->t_unpark = 1; 525 cv_signal(&t->t_delay_cv); 526 mutex_exit(&t->t_delay_lock); 527 } 528 } 529 mutex_exit(&p->p_lock); 530 lwpidp += n; 531 nids -= n; 532 } 533 kmem_free(lwpid, lwpidsz); 534 return (error); 535 } 536 537 /* 538 * SYS_lwp_park() system call. 539 */ 540 int 541 syslwp_park(int which, uintptr_t arg1, uintptr_t arg2) 542 { 543 int error; 544 545 switch (which) { 546 case 0: 547 error = lwp_park((timespec_t *)arg1, (id_t)arg2); 548 break; 549 case 1: 550 error = lwp_unpark((id_t)arg1); 551 break; 552 case 2: 553 error = lwp_unpark_all((id_t *)arg1, (int)arg2); 554 break; 555 default: 556 error = EINVAL; 557 break; 558 } 559 560 if (error) 561 return (set_errno(error)); 562 return (0); 563 } 564