1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
32 #include <sys/systm.h>
33 #include <sys/prsystm.h>
34 #include <sys/cred.h>
35 #include <sys/errno.h>
36 #include <sys/proc.h>
37 #include <sys/signal.h>
38 #include <sys/kmem.h>
39 #include <sys/unistd.h>
40 #include <sys/cmn_err.h>
41 #include <sys/schedctl.h>
42 #include <sys/debug.h>
43 #include <sys/contract/process_impl.h>
44
45 kthread_t *
idtot(proc_t * p,id_t lwpid)46 idtot(proc_t *p, id_t lwpid)
47 {
48 lwpdir_t *ldp;
49
50 if ((ldp = lwp_hash_lookup(p, lwpid)) != NULL)
51 return (ldp->ld_entry->le_thread);
52 return (NULL);
53 }
54
55 /*
56 * Same as idtot(), but acquire and return
57 * the tid hash table entry lock on success.
58 * This allows lwp_unpark() to do its job without acquiring
59 * p->p_lock (and thereby causing congestion problems when
60 * the application calls lwp_unpark() too often).
61 */
62 static kthread_t *
idtot_and_lock(proc_t * p,id_t lwpid,kmutex_t ** mpp)63 idtot_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp)
64 {
65 lwpdir_t *ldp;
66 kthread_t *t;
67
68 if ((ldp = lwp_hash_lookup_and_lock(p, lwpid, mpp)) != NULL) {
69 if ((t = ldp->ld_entry->le_thread) == NULL)
70 mutex_exit(*mpp);
71 return (t);
72 }
73 return (NULL);
74 }
75
76 /*
77 * Stop an lwp of the current process
78 */
79 int
syslwp_suspend(id_t lwpid)80 syslwp_suspend(id_t lwpid)
81 {
82 kthread_t *t;
83 int error;
84 proc_t *p = ttoproc(curthread);
85
86 mutex_enter(&p->p_lock);
87 if ((t = idtot(p, lwpid)) == NULL)
88 error = ESRCH;
89 else
90 error = lwp_suspend(t);
91 mutex_exit(&p->p_lock);
92 if (error)
93 return (set_errno(error));
94 return (0);
95 }
96
97 int
syslwp_continue(id_t lwpid)98 syslwp_continue(id_t lwpid)
99 {
100 kthread_t *t;
101 proc_t *p = ttoproc(curthread);
102
103 mutex_enter(&p->p_lock);
104 if ((t = idtot(p, lwpid)) == NULL) {
105 mutex_exit(&p->p_lock);
106 return (set_errno(ESRCH));
107 }
108 lwp_continue(t);
109 mutex_exit(&p->p_lock);
110 return (0);
111 }
112
113 int
lwp_kill(id_t lwpid,int sig)114 lwp_kill(id_t lwpid, int sig)
115 {
116 sigqueue_t *sqp;
117 kthread_t *t;
118 proc_t *p = ttoproc(curthread);
119
120 if (sig < 0 || sig >= NSIG)
121 return (set_errno(EINVAL));
122 if (sig != 0)
123 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
124 mutex_enter(&p->p_lock);
125 if ((t = idtot(p, lwpid)) == NULL) {
126 mutex_exit(&p->p_lock);
127 if (sig != 0)
128 kmem_free(sqp, sizeof (sigqueue_t));
129 return (set_errno(ESRCH));
130 }
131 if (sig == 0) {
132 mutex_exit(&p->p_lock);
133 return (0);
134 }
135 sqp->sq_info.si_signo = sig;
136 sqp->sq_info.si_code = SI_LWP;
137 sqp->sq_info.si_pid = p->p_pid;
138 sqp->sq_info.si_ctid = PRCTID(p);
139 sqp->sq_info.si_zoneid = getzoneid();
140 sqp->sq_info.si_uid = crgetruid(CRED());
141 sigaddqa(p, t, sqp);
142 mutex_exit(&p->p_lock);
143 return (0);
144 }
145
146 /*
147 * This is the specification of lwp_wait() from the _lwp_wait(2) manual page:
148 *
149 * The lwp_wait() function blocks the current lwp until the lwp specified
150 * by 'lwpid' terminates. If the specified lwp terminated prior to the call
151 * to lwp_wait(), then lwp_wait() returns immediately. If 'lwpid' is zero,
152 * then lwp_wait() waits for any undetached lwp in the current process.
153 * If 'lwpid' is not zero, then it must specify an undetached lwp in the
154 * current process. If 'departed' is not NULL, then it points to a location
155 * where the id of the exited lwp is stored.
156 *
157 * When an lwp exits and there are one or more lwps in the process waiting
158 * for this specific lwp to exit, then one of the waiting lwps is unblocked
159 * and it returns from lwp_wait() successfully. Any other lwps waiting for
160 * this same lwp to exit are also unblocked, however, they return from
161 * lwp_wait() with the error ESRCH. If there are no lwps in the process
162 * waiting for this specific lwp to exit but there are one or more lwps
163 * waiting for any lwp to exit, then one of the waiting lwps is unblocked
164 * and it returns from lwp_wait() successfully.
165 *
166 * If an lwp is waiting for any lwp to exit, it blocks until an undetached
167 * lwp for which no other lwp is waiting terminates, at which time it returns
168 * successfully, or until all other lwps in the process are either daemon
169 * lwps or lwps waiting in lwp_wait(), in which case it returns EDEADLK.
170 */
171 int
lwp_wait(id_t lwpid,id_t * departed)172 lwp_wait(id_t lwpid, id_t *departed)
173 {
174 proc_t *p = ttoproc(curthread);
175 int error = 0;
176 int daemon = (curthread->t_proc_flag & TP_DAEMON)? 1 : 0;
177 lwpent_t *target_lep;
178 lwpdir_t *ldp;
179 lwpent_t *lep;
180
181 /*
182 * lwp_wait() is not supported for the /proc agent lwp.
183 */
184 if (curthread == p->p_agenttp)
185 return (set_errno(ENOTSUP));
186
187 mutex_enter(&p->p_lock);
188 prbarrier(p);
189
190 curthread->t_waitfor = lwpid;
191 p->p_lwpwait++;
192 p->p_lwpdwait += daemon;
193
194 if (lwpid != 0) {
195 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
196 target_lep = NULL;
197 else {
198 target_lep = ldp->ld_entry;
199 target_lep->le_waiters++;
200 target_lep->le_dwaiters += daemon;
201 }
202 }
203
204 while (error == 0) {
205 kthread_t *t;
206 id_t tid;
207 int i;
208
209 if (lwpid != 0) {
210 /*
211 * Look for a specific zombie lwp.
212 */
213 if (target_lep == NULL)
214 error = ESRCH;
215 else if ((t = target_lep->le_thread) != NULL) {
216 if (!(t->t_proc_flag & TP_TWAIT))
217 error = EINVAL;
218 } else {
219 /*
220 * We found the zombie we are waiting for.
221 */
222 ASSERT(p->p_zombcnt > 0);
223 p->p_zombcnt--;
224 p->p_lwpwait--;
225 p->p_lwpdwait -= daemon;
226 curthread->t_waitfor = -1;
227 lwp_hash_out(p, lwpid);
228 mutex_exit(&p->p_lock);
229 if (departed != NULL &&
230 copyout(&lwpid, departed, sizeof (id_t)))
231 return (set_errno(EFAULT));
232 return (0);
233 }
234 } else {
235 /*
236 * Look for any zombie lwp.
237 */
238 int some_non_daemon_will_return = 0;
239
240 /* for each entry in the lwp directory... */
241 ldp = p->p_lwpdir;
242 for (i = 0; i < p->p_lwpdir_sz; i++, ldp++) {
243
244 if ((lep = ldp->ld_entry) == NULL ||
245 lep->le_thread != NULL)
246 continue;
247
248 /*
249 * We found a zombie lwp. If there is some
250 * other thread waiting specifically for the
251 * zombie we just found, then defer to the other
252 * waiting thread and continue searching for
253 * another zombie. Also check to see if there
254 * is some non-daemon thread sleeping here in
255 * lwp_wait() that will succeed and return when
256 * we drop p->p_lock. This is tested below.
257 */
258 tid = lep->le_lwpid;
259 if (lep->le_waiters != 0) {
260 if (lep->le_waiters - lep->le_dwaiters)
261 some_non_daemon_will_return = 1;
262 continue;
263 }
264
265 /*
266 * We found a zombie that no one else
267 * is specifically waiting for.
268 */
269 ASSERT(p->p_zombcnt > 0);
270 p->p_zombcnt--;
271 p->p_lwpwait--;
272 p->p_lwpdwait -= daemon;
273 curthread->t_waitfor = -1;
274 lwp_hash_out(p, tid);
275 mutex_exit(&p->p_lock);
276 if (departed != NULL &&
277 copyout(&tid, departed, sizeof (id_t)))
278 return (set_errno(EFAULT));
279 return (0);
280 }
281
282 /*
283 * We are waiting for anyone. If all non-daemon lwps
284 * are waiting here, and if we determined above that
285 * no non-daemon lwp will return, we have deadlock.
286 */
287 if (!some_non_daemon_will_return &&
288 p->p_lwpcnt == p->p_lwpdaemon +
289 (p->p_lwpwait - p->p_lwpdwait))
290 error = EDEADLK;
291 }
292
293 if (error == 0 && lwpid != 0) {
294 /*
295 * We are waiting for a specific non-zombie lwp.
296 * Fail if there is a deadlock loop.
297 */
298 for (;;) {
299 if (t == curthread) {
300 error = EDEADLK;
301 break;
302 }
303 /* who is he waiting for? */
304 if ((tid = t->t_waitfor) == -1)
305 break;
306 if (tid == 0) {
307 /*
308 * The lwp we are waiting for is
309 * waiting for anyone (transitively).
310 * If there are no zombies right now
311 * and if we would have deadlock due
312 * to all non-daemon lwps waiting here,
313 * wake up the lwp that is waiting for
314 * anyone so it can return EDEADLK.
315 */
316 if (p->p_zombcnt == 0 &&
317 p->p_lwpcnt == p->p_lwpdaemon +
318 p->p_lwpwait - p->p_lwpdwait)
319 cv_broadcast(&p->p_lwpexit);
320 break;
321 }
322 if ((ldp = lwp_hash_lookup(p, tid)) == NULL ||
323 (t = ldp->ld_entry->le_thread) == NULL)
324 break;
325 }
326 }
327
328 if (error)
329 break;
330
331 /*
332 * Wait for some lwp to terminate.
333 */
334 if (!cv_wait_sig(&p->p_lwpexit, &p->p_lock))
335 error = EINTR;
336 prbarrier(p);
337
338 if (lwpid != 0) {
339 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
340 target_lep = NULL;
341 else
342 target_lep = ldp->ld_entry;
343 }
344 }
345
346 if (lwpid != 0 && target_lep != NULL) {
347 target_lep->le_waiters--;
348 target_lep->le_dwaiters -= daemon;
349 }
350 p->p_lwpwait--;
351 p->p_lwpdwait -= daemon;
352 curthread->t_waitfor = -1;
353 mutex_exit(&p->p_lock);
354 return (set_errno(error));
355 }
356
357 int
lwp_detach(id_t lwpid)358 lwp_detach(id_t lwpid)
359 {
360 kthread_t *t;
361 proc_t *p = ttoproc(curthread);
362 lwpdir_t *ldp;
363 int error = 0;
364
365 mutex_enter(&p->p_lock);
366 prbarrier(p);
367 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
368 error = ESRCH;
369 else if ((t = ldp->ld_entry->le_thread) != NULL) {
370 if (!(t->t_proc_flag & TP_TWAIT))
371 error = EINVAL;
372 else {
373 t->t_proc_flag &= ~TP_TWAIT;
374 cv_broadcast(&p->p_lwpexit);
375 }
376 } else {
377 ASSERT(p->p_zombcnt > 0);
378 p->p_zombcnt--;
379 lwp_hash_out(p, lwpid);
380 }
381 mutex_exit(&p->p_lock);
382
383 if (error)
384 return (set_errno(error));
385 return (0);
386 }
387
388 /*
389 * Unpark the specified lwp.
390 */
391 static int
lwp_unpark(id_t lwpid)392 lwp_unpark(id_t lwpid)
393 {
394 proc_t *p = ttoproc(curthread);
395 kthread_t *t;
396 kmutex_t *mp;
397 int error = 0;
398
399 if ((t = idtot_and_lock(p, lwpid, &mp)) == NULL) {
400 error = ESRCH;
401 } else {
402 mutex_enter(&t->t_delay_lock);
403 t->t_unpark = 1;
404 cv_signal(&t->t_delay_cv);
405 mutex_exit(&t->t_delay_lock);
406 mutex_exit(mp);
407 }
408 return (error);
409 }
410
411 /*
412 * Cancel a previous unpark for the specified lwp.
413 *
414 * This interface exists ONLY to support older versions of libthread, which
415 * called lwp_unpark(self) to force calls to lwp_park(self) to return
416 * immediately. These older libthreads required a mechanism to cancel the
417 * lwp_unpark(self).
418 *
419 * libc does not call this interface. Instead, the sc_park flag in the
420 * schedctl page is cleared to force calls to lwp_park() to return
421 * immediately.
422 */
423 static int
lwp_unpark_cancel(id_t lwpid)424 lwp_unpark_cancel(id_t lwpid)
425 {
426 proc_t *p = ttoproc(curthread);
427 kthread_t *t;
428 kmutex_t *mp;
429 int error = 0;
430
431 if ((t = idtot_and_lock(p, lwpid, &mp)) == NULL) {
432 error = ESRCH;
433 } else {
434 mutex_enter(&t->t_delay_lock);
435 t->t_unpark = 0;
436 mutex_exit(&t->t_delay_lock);
437 mutex_exit(mp);
438 }
439 return (error);
440 }
441
442 /*
443 * Sleep until we are set running by lwp_unpark() or until we are
444 * interrupted by a signal or until we exhaust our timeout.
445 * timeoutp is an in/out parameter. On entry, it contains the relative
446 * time until timeout. On exit, we copyout the residual time left to it.
447 */
448 static int
lwp_park(timespec_t * timeoutp,id_t lwpid)449 lwp_park(timespec_t *timeoutp, id_t lwpid)
450 {
451 timespec_t rqtime;
452 timespec_t rmtime;
453 timespec_t now;
454 timespec_t *rqtp = NULL;
455 kthread_t *t = curthread;
456 int timecheck = 0;
457 int error = 0;
458 model_t datamodel = ttoproc(t)->p_model;
459
460 if (lwpid != 0) /* unpark the other lwp, if any */
461 (void) lwp_unpark(lwpid);
462
463 if (timeoutp) {
464 timecheck = timechanged;
465 gethrestime(&now);
466 if (datamodel == DATAMODEL_NATIVE) {
467 if (copyin(timeoutp, &rqtime, sizeof (timespec_t))) {
468 error = EFAULT;
469 goto out;
470 }
471 } else {
472 timespec32_t timeout32;
473
474 if (copyin(timeoutp, &timeout32, sizeof (timeout32))) {
475 error = EFAULT;
476 goto out;
477 }
478 TIMESPEC32_TO_TIMESPEC(&rqtime, &timeout32)
479 }
480
481 if (itimerspecfix(&rqtime)) {
482 error = EINVAL;
483 goto out;
484 }
485 /*
486 * Convert the timespec value into absolute time.
487 */
488 timespecadd(&rqtime, &now);
489 rqtp = &rqtime;
490 }
491
492 (void) new_mstate(t, LMS_USER_LOCK);
493
494 mutex_enter(&t->t_delay_lock);
495 if (!schedctl_is_park())
496 error = EINTR;
497 while (error == 0 && t->t_unpark == 0) {
498 switch (cv_waituntil_sig(&t->t_delay_cv,
499 &t->t_delay_lock, rqtp, timecheck)) {
500 case 0:
501 error = EINTR;
502 break;
503 case -1:
504 error = ETIME;
505 break;
506 }
507 }
508 t->t_unpark = 0;
509 mutex_exit(&t->t_delay_lock);
510
511 if (timeoutp != NULL) {
512 rmtime.tv_sec = rmtime.tv_nsec = 0;
513 if (error != ETIME) {
514 gethrestime(&now);
515 if ((now.tv_sec < rqtime.tv_sec) ||
516 ((now.tv_sec == rqtime.tv_sec) &&
517 (now.tv_nsec < rqtime.tv_nsec))) {
518 rmtime = rqtime;
519 timespecsub(&rmtime, &now);
520 }
521 }
522 if (datamodel == DATAMODEL_NATIVE) {
523 if (copyout(&rmtime, timeoutp, sizeof (rmtime)))
524 error = EFAULT;
525 } else {
526 timespec32_t rmtime32;
527
528 TIMESPEC_TO_TIMESPEC32(&rmtime32, &rmtime);
529 if (copyout(&rmtime32, timeoutp, sizeof (rmtime32)))
530 error = EFAULT;
531 }
532 }
533 out:
534 schedctl_unpark();
535 if (t->t_mstate == LMS_USER_LOCK)
536 (void) new_mstate(t, LMS_SYSTEM);
537 return (error);
538 }
539
540 #define MAXLWPIDS 1024
541
542 /*
543 * Unpark all of the specified lwps.
544 * Do it in chunks of MAXLWPIDS to avoid allocating too much memory.
545 */
546 static int
lwp_unpark_all(id_t * lwpidp,int nids)547 lwp_unpark_all(id_t *lwpidp, int nids)
548 {
549 proc_t *p = ttoproc(curthread);
550 kthread_t *t;
551 kmutex_t *mp;
552 int error = 0;
553 id_t *lwpid;
554 size_t lwpidsz;
555 int n;
556 int i;
557
558 if (nids <= 0)
559 return (EINVAL);
560
561 lwpidsz = MIN(nids, MAXLWPIDS) * sizeof (id_t);
562 lwpid = kmem_alloc(lwpidsz, KM_SLEEP);
563 while (nids > 0) {
564 n = MIN(nids, MAXLWPIDS);
565 if (copyin(lwpidp, lwpid, n * sizeof (id_t))) {
566 error = EFAULT;
567 break;
568 }
569 for (i = 0; i < n; i++) {
570 if ((t = idtot_and_lock(p, lwpid[i], &mp)) == NULL) {
571 error = ESRCH;
572 } else {
573 mutex_enter(&t->t_delay_lock);
574 t->t_unpark = 1;
575 cv_signal(&t->t_delay_cv);
576 mutex_exit(&t->t_delay_lock);
577 mutex_exit(mp);
578 }
579 }
580 lwpidp += n;
581 nids -= n;
582 }
583 kmem_free(lwpid, lwpidsz);
584 return (error);
585 }
586
587 /*
588 * SYS_lwp_park() system call.
589 */
590 int
syslwp_park(int which,uintptr_t arg1,uintptr_t arg2)591 syslwp_park(int which, uintptr_t arg1, uintptr_t arg2)
592 {
593 int error;
594
595 switch (which) {
596 case 0:
597 error = lwp_park((timespec_t *)arg1, (id_t)arg2);
598 break;
599 case 1:
600 error = lwp_unpark((id_t)arg1);
601 break;
602 case 2:
603 error = lwp_unpark_all((id_t *)arg1, (int)arg2);
604 break;
605 case 3:
606 /*
607 * This subcode is not used by libc. It exists ONLY to
608 * support older versions of libthread which do not use
609 * the sc_park flag in the schedctl page.
610 *
611 * These versions of libthread need to be modifed or emulated
612 * to change calls to syslwp_park(1, tid, 0) to
613 * syslwp_park(3, tid).
614 */
615 error = lwp_unpark_cancel((id_t)arg1);
616 break;
617 case 4:
618 /*
619 * This subcode is not used by libc. It exists ONLY to
620 * support older versions of libthread which do not use
621 * the sc_park flag in the schedctl page.
622 *
623 * These versions of libthread need to be modified or emulated
624 * to change calls to syslwp_park(0, ts, tid) to
625 * syslwp_park(4, ts, tid).
626 */
627 schedctl_set_park();
628 error = lwp_park((timespec_t *)arg1, (id_t)arg2);
629 break;
630 default:
631 error = EINVAL;
632 break;
633 }
634
635 if (error)
636 return (set_errno(error));
637 return (0);
638 }
639