1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 */
27
28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
33 #include <sys/systm.h>
34 #include <sys/prsystm.h>
35 #include <sys/cred.h>
36 #include <sys/errno.h>
37 #include <sys/proc.h>
38 #include <sys/signal.h>
39 #include <sys/kmem.h>
40 #include <sys/unistd.h>
41 #include <sys/cmn_err.h>
42 #include <sys/schedctl.h>
43 #include <sys/debug.h>
44 #include <sys/contract/process_impl.h>
45
46 kthread_t *
idtot(proc_t * p,id_t lwpid)47 idtot(proc_t *p, id_t lwpid)
48 {
49 lwpdir_t *ldp;
50
51 if ((ldp = lwp_hash_lookup(p, lwpid)) != NULL)
52 return (ldp->ld_entry->le_thread);
53 return (NULL);
54 }
55
56 /*
57 * Same as idtot(), but acquire and return
58 * the tid hash table entry lock on success.
59 * This allows lwp_unpark() to do its job without acquiring
60 * p->p_lock (and thereby causing congestion problems when
61 * the application calls lwp_unpark() too often).
62 */
63 static kthread_t *
idtot_and_lock(proc_t * p,id_t lwpid,kmutex_t ** mpp)64 idtot_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp)
65 {
66 lwpdir_t *ldp;
67 kthread_t *t;
68
69 if ((ldp = lwp_hash_lookup_and_lock(p, lwpid, mpp)) != NULL) {
70 if ((t = ldp->ld_entry->le_thread) == NULL)
71 mutex_exit(*mpp);
72 return (t);
73 }
74 return (NULL);
75 }
76
77 /*
78 * Stop an lwp of the current process
79 */
80 int
syslwp_suspend(id_t lwpid)81 syslwp_suspend(id_t lwpid)
82 {
83 kthread_t *t;
84 int error;
85 proc_t *p = ttoproc(curthread);
86
87 mutex_enter(&p->p_lock);
88 if ((t = idtot(p, lwpid)) == NULL)
89 error = ESRCH;
90 else
91 error = lwp_suspend(t);
92 mutex_exit(&p->p_lock);
93 if (error)
94 return (set_errno(error));
95 return (0);
96 }
97
98 int
syslwp_continue(id_t lwpid)99 syslwp_continue(id_t lwpid)
100 {
101 kthread_t *t;
102 proc_t *p = ttoproc(curthread);
103
104 mutex_enter(&p->p_lock);
105 if ((t = idtot(p, lwpid)) == NULL) {
106 mutex_exit(&p->p_lock);
107 return (set_errno(ESRCH));
108 }
109 lwp_continue(t);
110 mutex_exit(&p->p_lock);
111 return (0);
112 }
113
114 int
lwp_kill(id_t lwpid,int sig)115 lwp_kill(id_t lwpid, int sig)
116 {
117 sigqueue_t *sqp;
118 kthread_t *t;
119 proc_t *p = ttoproc(curthread);
120
121 if (sig < 0 || sig >= NSIG)
122 return (set_errno(EINVAL));
123 if (sig != 0)
124 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
125 mutex_enter(&p->p_lock);
126 if ((t = idtot(p, lwpid)) == NULL) {
127 mutex_exit(&p->p_lock);
128 if (sig != 0)
129 kmem_free(sqp, sizeof (sigqueue_t));
130 return (set_errno(ESRCH));
131 }
132 if (sig == 0) {
133 mutex_exit(&p->p_lock);
134 return (0);
135 }
136 sqp->sq_info.si_signo = sig;
137 sqp->sq_info.si_code = SI_LWP;
138 sqp->sq_info.si_pid = p->p_pid;
139 sqp->sq_info.si_ctid = PRCTID(p);
140 sqp->sq_info.si_zoneid = getzoneid();
141 sqp->sq_info.si_uid = crgetruid(CRED());
142 sigaddqa(p, t, sqp);
143 mutex_exit(&p->p_lock);
144 return (0);
145 }
146
147 /*
148 * This is the specification of lwp_wait() from the _lwp_wait(2) manual page:
149 *
150 * The lwp_wait() function blocks the current lwp until the lwp specified
151 * by 'lwpid' terminates. If the specified lwp terminated prior to the call
152 * to lwp_wait(), then lwp_wait() returns immediately. If 'lwpid' is zero,
153 * then lwp_wait() waits for any undetached lwp in the current process.
154 * If 'lwpid' is not zero, then it must specify an undetached lwp in the
155 * current process. If 'departed' is not NULL, then it points to a location
156 * where the id of the exited lwp is stored.
157 *
158 * When an lwp exits and there are one or more lwps in the process waiting
159 * for this specific lwp to exit, then one of the waiting lwps is unblocked
160 * and it returns from lwp_wait() successfully. Any other lwps waiting for
161 * this same lwp to exit are also unblocked, however, they return from
162 * lwp_wait() with the error ESRCH. If there are no lwps in the process
163 * waiting for this specific lwp to exit but there are one or more lwps
164 * waiting for any lwp to exit, then one of the waiting lwps is unblocked
165 * and it returns from lwp_wait() successfully.
166 *
167 * If an lwp is waiting for any lwp to exit, it blocks until an undetached
168 * lwp for which no other lwp is waiting terminates, at which time it returns
169 * successfully, or until all other lwps in the process are either daemon
170 * lwps or lwps waiting in lwp_wait(), in which case it returns EDEADLK.
171 */
172 int
lwp_wait(id_t lwpid,id_t * departed)173 lwp_wait(id_t lwpid, id_t *departed)
174 {
175 proc_t *p = ttoproc(curthread);
176 int error = 0;
177 int daemon = (curthread->t_proc_flag & TP_DAEMON)? 1 : 0;
178 lwpent_t *target_lep;
179 lwpdir_t *ldp;
180 lwpent_t *lep;
181
182 /*
183 * lwp_wait() is not supported for the /proc agent lwp.
184 */
185 if (curthread == p->p_agenttp)
186 return (set_errno(ENOTSUP));
187
188 mutex_enter(&p->p_lock);
189 prbarrier(p);
190
191 curthread->t_waitfor = lwpid;
192 p->p_lwpwait++;
193 p->p_lwpdwait += daemon;
194 target_lep = NULL;
195
196 if (lwpid != 0) {
197 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
198 target_lep = NULL;
199 else {
200 target_lep = ldp->ld_entry;
201 target_lep->le_waiters++;
202 target_lep->le_dwaiters += daemon;
203 }
204 }
205
206 while (error == 0) {
207 kthread_t *t;
208 id_t tid;
209 int i;
210
211 if (lwpid != 0) {
212 /*
213 * Look for a specific zombie lwp.
214 */
215 if (target_lep == NULL)
216 error = ESRCH;
217 else if ((t = target_lep->le_thread) != NULL) {
218 if (!(t->t_proc_flag & TP_TWAIT))
219 error = EINVAL;
220 } else {
221 /*
222 * We found the zombie we are waiting for.
223 */
224 ASSERT(p->p_zombcnt > 0);
225 p->p_zombcnt--;
226 p->p_lwpwait--;
227 p->p_lwpdwait -= daemon;
228 curthread->t_waitfor = -1;
229 lwp_hash_out(p, lwpid);
230 mutex_exit(&p->p_lock);
231 if (departed != NULL &&
232 copyout(&lwpid, departed, sizeof (id_t)))
233 return (set_errno(EFAULT));
234 return (0);
235 }
236 } else {
237 /*
238 * Look for any zombie lwp.
239 */
240 int some_non_daemon_will_return = 0;
241
242 /* for each entry in the lwp directory... */
243 ldp = p->p_lwpdir;
244 for (i = 0; i < p->p_lwpdir_sz; i++, ldp++) {
245
246 if ((lep = ldp->ld_entry) == NULL ||
247 lep->le_thread != NULL)
248 continue;
249
250 /*
251 * We found a zombie lwp. If there is some
252 * other thread waiting specifically for the
253 * zombie we just found, then defer to the other
254 * waiting thread and continue searching for
255 * another zombie. Also check to see if there
256 * is some non-daemon thread sleeping here in
257 * lwp_wait() that will succeed and return when
258 * we drop p->p_lock. This is tested below.
259 */
260 tid = lep->le_lwpid;
261 if (lep->le_waiters != 0) {
262 if (lep->le_waiters - lep->le_dwaiters)
263 some_non_daemon_will_return = 1;
264 continue;
265 }
266
267 /*
268 * We found a zombie that no one else
269 * is specifically waiting for.
270 */
271 ASSERT(p->p_zombcnt > 0);
272 p->p_zombcnt--;
273 p->p_lwpwait--;
274 p->p_lwpdwait -= daemon;
275 curthread->t_waitfor = -1;
276 lwp_hash_out(p, tid);
277 mutex_exit(&p->p_lock);
278 if (departed != NULL &&
279 copyout(&tid, departed, sizeof (id_t)))
280 return (set_errno(EFAULT));
281 return (0);
282 }
283
284 /*
285 * We are waiting for anyone. If all non-daemon lwps
286 * are waiting here, and if we determined above that
287 * no non-daemon lwp will return, we have deadlock.
288 */
289 if (!some_non_daemon_will_return &&
290 p->p_lwpcnt == p->p_lwpdaemon +
291 (p->p_lwpwait - p->p_lwpdwait))
292 error = EDEADLK;
293 }
294
295 if (error == 0 && lwpid != 0) {
296 /*
297 * We are waiting for a specific non-zombie lwp.
298 * Fail if there is a deadlock loop.
299 */
300 for (;;) {
301 if (t == curthread) {
302 error = EDEADLK;
303 break;
304 }
305 /* who are they waiting for? */
306 if ((tid = t->t_waitfor) == -1)
307 break;
308 if (tid == 0) {
309 /*
310 * The lwp we are waiting for is
311 * waiting for anyone (transitively).
312 * If there are no zombies right now
313 * and if we would have deadlock due
314 * to all non-daemon lwps waiting here,
315 * wake up the lwp that is waiting for
316 * anyone so it can return EDEADLK.
317 */
318 if (p->p_zombcnt == 0 &&
319 p->p_lwpcnt == p->p_lwpdaemon +
320 p->p_lwpwait - p->p_lwpdwait)
321 cv_broadcast(&p->p_lwpexit);
322 break;
323 }
324 if ((ldp = lwp_hash_lookup(p, tid)) == NULL ||
325 (t = ldp->ld_entry->le_thread) == NULL)
326 break;
327 }
328 }
329
330 if (error)
331 break;
332
333 /*
334 * Wait for some lwp to terminate.
335 */
336 if (!cv_wait_sig(&p->p_lwpexit, &p->p_lock))
337 error = EINTR;
338 prbarrier(p);
339
340 if (lwpid != 0) {
341 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
342 target_lep = NULL;
343 else
344 target_lep = ldp->ld_entry;
345 }
346 }
347
348 if (lwpid != 0 && target_lep != NULL) {
349 target_lep->le_waiters--;
350 target_lep->le_dwaiters -= daemon;
351 }
352 p->p_lwpwait--;
353 p->p_lwpdwait -= daemon;
354 curthread->t_waitfor = -1;
355 mutex_exit(&p->p_lock);
356 return (set_errno(error));
357 }
358
359 int
lwp_detach(id_t lwpid)360 lwp_detach(id_t lwpid)
361 {
362 kthread_t *t;
363 proc_t *p = ttoproc(curthread);
364 lwpdir_t *ldp;
365 int error = 0;
366
367 mutex_enter(&p->p_lock);
368 prbarrier(p);
369 if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
370 error = ESRCH;
371 else if ((t = ldp->ld_entry->le_thread) != NULL) {
372 if (!(t->t_proc_flag & TP_TWAIT))
373 error = EINVAL;
374 else {
375 t->t_proc_flag &= ~TP_TWAIT;
376 cv_broadcast(&p->p_lwpexit);
377 }
378 } else {
379 ASSERT(p->p_zombcnt > 0);
380 p->p_zombcnt--;
381 lwp_hash_out(p, lwpid);
382 }
383 mutex_exit(&p->p_lock);
384
385 if (error)
386 return (set_errno(error));
387 return (0);
388 }
389
390 /*
391 * Unpark the specified lwp.
392 */
393 static int
lwp_unpark(id_t lwpid)394 lwp_unpark(id_t lwpid)
395 {
396 proc_t *p = ttoproc(curthread);
397 kthread_t *t;
398 kmutex_t *mp;
399 int error = 0;
400
401 if ((t = idtot_and_lock(p, lwpid, &mp)) == NULL) {
402 error = ESRCH;
403 } else {
404 mutex_enter(&t->t_delay_lock);
405 t->t_unpark = 1;
406 cv_signal(&t->t_delay_cv);
407 mutex_exit(&t->t_delay_lock);
408 mutex_exit(mp);
409 }
410 return (error);
411 }
412
413 /*
414 * Cancel a previous unpark for the specified lwp.
415 *
416 * This interface exists ONLY to support older versions of libthread, which
417 * called lwp_unpark(self) to force calls to lwp_park(self) to return
418 * immediately. These older libthreads required a mechanism to cancel the
419 * lwp_unpark(self).
420 *
421 * libc does not call this interface. Instead, the sc_park flag in the
422 * schedctl page is cleared to force calls to lwp_park() to return
423 * immediately.
424 */
425 static int
lwp_unpark_cancel(id_t lwpid)426 lwp_unpark_cancel(id_t lwpid)
427 {
428 proc_t *p = ttoproc(curthread);
429 kthread_t *t;
430 kmutex_t *mp;
431 int error = 0;
432
433 if ((t = idtot_and_lock(p, lwpid, &mp)) == NULL) {
434 error = ESRCH;
435 } else {
436 mutex_enter(&t->t_delay_lock);
437 t->t_unpark = 0;
438 mutex_exit(&t->t_delay_lock);
439 mutex_exit(mp);
440 }
441 return (error);
442 }
443
444 /*
445 * Sleep until we are set running by lwp_unpark() or until we are
446 * interrupted by a signal or until we exhaust our timeout.
447 * timeoutp is an in/out parameter. On entry, it contains the relative
448 * time until timeout. On exit, we copyout the residual time left to it.
449 */
450 static int
lwp_park(timespec_t * timeoutp,id_t lwpid)451 lwp_park(timespec_t *timeoutp, id_t lwpid)
452 {
453 timespec_t rqtime;
454 timespec_t rmtime;
455 timespec_t now;
456 timespec_t *rqtp = NULL;
457 kthread_t *t = curthread;
458 int timecheck = 0;
459 int error = 0;
460 model_t datamodel = ttoproc(t)->p_model;
461
462 if (lwpid != 0) /* unpark the other lwp, if any */
463 (void) lwp_unpark(lwpid);
464
465 if (timeoutp) {
466 timecheck = timechanged;
467 gethrestime(&now);
468 if (datamodel == DATAMODEL_NATIVE) {
469 if (copyin(timeoutp, &rqtime, sizeof (timespec_t))) {
470 error = EFAULT;
471 goto out;
472 }
473 } else {
474 timespec32_t timeout32;
475
476 if (copyin(timeoutp, &timeout32, sizeof (timeout32))) {
477 error = EFAULT;
478 goto out;
479 }
480 TIMESPEC32_TO_TIMESPEC(&rqtime, &timeout32)
481 }
482
483 if (itimerspecfix(&rqtime)) {
484 error = EINVAL;
485 goto out;
486 }
487 /*
488 * Convert the timespec value into absolute time.
489 */
490 timespecadd(&rqtime, &now);
491 rqtp = &rqtime;
492 }
493
494 (void) new_mstate(t, LMS_USER_LOCK);
495
496 mutex_enter(&t->t_delay_lock);
497 if (!schedctl_is_park())
498 error = EINTR;
499 while (error == 0 && t->t_unpark == 0) {
500 switch (cv_waituntil_sig(&t->t_delay_cv,
501 &t->t_delay_lock, rqtp, timecheck)) {
502 case 0:
503 error = EINTR;
504 break;
505 case -1:
506 error = ETIME;
507 break;
508 }
509 }
510 t->t_unpark = 0;
511 mutex_exit(&t->t_delay_lock);
512
513 if (timeoutp != NULL) {
514 rmtime.tv_sec = rmtime.tv_nsec = 0;
515 if (error != ETIME) {
516 gethrestime(&now);
517 if ((now.tv_sec < rqtime.tv_sec) ||
518 ((now.tv_sec == rqtime.tv_sec) &&
519 (now.tv_nsec < rqtime.tv_nsec))) {
520 rmtime = rqtime;
521 timespecsub(&rmtime, &now);
522 }
523 }
524 if (datamodel == DATAMODEL_NATIVE) {
525 if (copyout(&rmtime, timeoutp, sizeof (rmtime)))
526 error = EFAULT;
527 } else {
528 timespec32_t rmtime32;
529
530 TIMESPEC_TO_TIMESPEC32(&rmtime32, &rmtime);
531 if (copyout(&rmtime32, timeoutp, sizeof (rmtime32)))
532 error = EFAULT;
533 }
534 }
535 out:
536 schedctl_unpark();
537 if (t->t_mstate == LMS_USER_LOCK)
538 (void) new_mstate(t, LMS_SYSTEM);
539 return (error);
540 }
541
542 #define MAXLWPIDS 1024
543
544 /*
545 * Unpark all of the specified lwps.
546 * Do it in chunks of MAXLWPIDS to avoid allocating too much memory.
547 */
548 static int
lwp_unpark_all(id_t * lwpidp,int nids)549 lwp_unpark_all(id_t *lwpidp, int nids)
550 {
551 proc_t *p = ttoproc(curthread);
552 kthread_t *t;
553 kmutex_t *mp;
554 int error = 0;
555 id_t *lwpid;
556 size_t lwpidsz;
557 int n;
558 int i;
559
560 if (nids <= 0)
561 return (EINVAL);
562
563 lwpidsz = MIN(nids, MAXLWPIDS) * sizeof (id_t);
564 lwpid = kmem_alloc(lwpidsz, KM_SLEEP);
565 while (nids > 0) {
566 n = MIN(nids, MAXLWPIDS);
567 if (copyin(lwpidp, lwpid, n * sizeof (id_t))) {
568 error = EFAULT;
569 break;
570 }
571 for (i = 0; i < n; i++) {
572 if ((t = idtot_and_lock(p, lwpid[i], &mp)) == NULL) {
573 error = ESRCH;
574 } else {
575 mutex_enter(&t->t_delay_lock);
576 t->t_unpark = 1;
577 cv_signal(&t->t_delay_cv);
578 mutex_exit(&t->t_delay_lock);
579 mutex_exit(mp);
580 }
581 }
582 lwpidp += n;
583 nids -= n;
584 }
585 kmem_free(lwpid, lwpidsz);
586 return (error);
587 }
588
589 /*
590 * SYS_lwp_park() system call.
591 */
592 int
syslwp_park(int which,uintptr_t arg1,uintptr_t arg2)593 syslwp_park(int which, uintptr_t arg1, uintptr_t arg2)
594 {
595 int error;
596
597 switch (which) {
598 case 0:
599 error = lwp_park((timespec_t *)arg1, (id_t)arg2);
600 break;
601 case 1:
602 error = lwp_unpark((id_t)arg1);
603 break;
604 case 2:
605 error = lwp_unpark_all((id_t *)arg1, (int)arg2);
606 break;
607 case 3:
608 /*
609 * This subcode is not used by libc. It exists ONLY to
610 * support older versions of libthread which do not use
611 * the sc_park flag in the schedctl page.
612 *
613 * These versions of libthread need to be modifed or emulated
614 * to change calls to syslwp_park(1, tid, 0) to
615 * syslwp_park(3, tid).
616 */
617 error = lwp_unpark_cancel((id_t)arg1);
618 break;
619 case 4:
620 /*
621 * This subcode is not used by libc. It exists ONLY to
622 * support older versions of libthread which do not use
623 * the sc_park flag in the schedctl page.
624 *
625 * These versions of libthread need to be modified or emulated
626 * to change calls to syslwp_park(0, ts, tid) to
627 * syslwp_park(4, ts, tid).
628 */
629 schedctl_set_park();
630 error = lwp_park((timespec_t *)arg1, (id_t)arg2);
631 break;
632 default:
633 error = EINVAL;
634 break;
635 }
636
637 if (error)
638 return (set_errno(error));
639 return (0);
640 }
641