xref: /titanic_50/usr/src/uts/common/syscall/lwpsys.c (revision 1573d361e97690e67db291a1e0dc9a9b58f73fd8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
27 
28 #pragma ident	"%Z%%M%	%I%	%E% SMI"
29 
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
33 #include <sys/systm.h>
34 #include <sys/prsystm.h>
35 #include <sys/cred.h>
36 #include <sys/errno.h>
37 #include <sys/proc.h>
38 #include <sys/signal.h>
39 #include <sys/kmem.h>
40 #include <sys/unistd.h>
41 #include <sys/cmn_err.h>
42 #include <sys/schedctl.h>
43 #include <sys/debug.h>
44 #include <sys/contract/process_impl.h>
45 
46 kthread_t *
47 idtot(proc_t *p, id_t lwpid)
48 {
49 	lwpdir_t *ldp;
50 
51 	if ((ldp = lwp_hash_lookup(p, lwpid)) != NULL)
52 		return (ldp->ld_entry->le_thread);
53 	return (NULL);
54 }
55 
56 /*
57  * Stop an lwp of the current process
58  */
59 int
60 syslwp_suspend(id_t lwpid)
61 {
62 	kthread_t *t;
63 	int error;
64 	proc_t *p = ttoproc(curthread);
65 
66 	mutex_enter(&p->p_lock);
67 	if ((t = idtot(p, lwpid)) == NULL)
68 		error = ESRCH;
69 	else
70 		error = lwp_suspend(t);
71 	mutex_exit(&p->p_lock);
72 	if (error)
73 		return (set_errno(error));
74 	return (0);
75 }
76 
77 int
78 syslwp_continue(id_t lwpid)
79 {
80 	kthread_t *t;
81 	proc_t *p = ttoproc(curthread);
82 
83 	mutex_enter(&p->p_lock);
84 	if ((t = idtot(p, lwpid)) == NULL) {
85 		mutex_exit(&p->p_lock);
86 		return (set_errno(ESRCH));
87 	}
88 	lwp_continue(t);
89 	mutex_exit(&p->p_lock);
90 	return (0);
91 }
92 
93 int
94 lwp_kill(id_t lwpid, int sig)
95 {
96 	sigqueue_t *sqp;
97 	kthread_t *t;
98 	proc_t *p = ttoproc(curthread);
99 
100 	if (sig < 0 || sig >= NSIG)
101 		return (set_errno(EINVAL));
102 	if (sig != 0)
103 		sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
104 	mutex_enter(&p->p_lock);
105 	if ((t = idtot(p, lwpid)) == NULL) {
106 		mutex_exit(&p->p_lock);
107 		if (sig != 0)
108 			kmem_free(sqp, sizeof (sigqueue_t));
109 		return (set_errno(ESRCH));
110 	}
111 	if (sig == 0) {
112 		mutex_exit(&p->p_lock);
113 		return (0);
114 	}
115 	sqp->sq_info.si_signo = sig;
116 	sqp->sq_info.si_code = SI_LWP;
117 	sqp->sq_info.si_pid = p->p_pid;
118 	sqp->sq_info.si_ctid = PRCTID(p);
119 	sqp->sq_info.si_zoneid = getzoneid();
120 	sqp->sq_info.si_uid = crgetruid(CRED());
121 	sigaddqa(p, t, sqp);
122 	mutex_exit(&p->p_lock);
123 	return (0);
124 }
125 
126 /*
127  * This is the specification of lwp_wait() from the _lwp_wait(2) manual page:
128  *
129  * The lwp_wait() function blocks the current lwp until the lwp specified
130  * by 'lwpid' terminates.  If the specified lwp terminated prior to the call
131  * to lwp_wait(), then lwp_wait() returns immediately.  If 'lwpid' is zero,
132  * then lwp_wait() waits for any undetached lwp in the current process.
133  * If 'lwpid' is not zero, then it must specify an undetached lwp in the
134  * current process.  If 'departed' is not NULL, then it points to a location
135  * where the id of the exited lwp is stored.
136  *
137  * When an lwp exits and there are one or more lwps in the process waiting
138  * for this specific lwp to exit, then one of the waiting lwps is unblocked
139  * and it returns from lwp_wait() successfully.  Any other lwps waiting for
140  * this same lwp to exit are also unblocked, however, they return from
141  * lwp_wait() with the error ESRCH.  If there are no lwps in the process
142  * waiting for this specific lwp to exit but there are one or more lwps
143  * waiting for any lwp to exit, then one of the waiting lwps is unblocked
144  * and it returns from lwp_wait() successfully.
145  *
146  * If an lwp is waiting for any lwp to exit, it blocks until an undetached
147  * lwp for which no other lwp is waiting terminates, at which time it returns
148  * successfully, or until all other lwps in the process are either daemon
149  * lwps or lwps waiting in lwp_wait(), in which case it returns EDEADLK.
150  */
151 int
152 lwp_wait(id_t lwpid, id_t *departed)
153 {
154 	proc_t *p = ttoproc(curthread);
155 	int error = 0;
156 	int daemon = (curthread->t_proc_flag & TP_DAEMON)? 1 : 0;
157 	lwpent_t *target_lep;
158 	lwpdir_t *ldp;
159 	lwpent_t *lep;
160 
161 	/*
162 	 * lwp_wait() is not supported for the /proc agent lwp.
163 	 */
164 	if (curthread == p->p_agenttp)
165 		return (set_errno(ENOTSUP));
166 
167 	mutex_enter(&p->p_lock);
168 	prbarrier(p);
169 
170 	curthread->t_waitfor = lwpid;
171 	p->p_lwpwait++;
172 	p->p_lwpdwait += daemon;
173 
174 	if (lwpid != 0) {
175 		if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
176 			target_lep = NULL;
177 		else {
178 			target_lep = ldp->ld_entry;
179 			target_lep->le_waiters++;
180 			target_lep->le_dwaiters += daemon;
181 		}
182 	}
183 
184 	while (error == 0) {
185 		kthread_t *t;
186 		id_t tid;
187 		int i;
188 
189 		if (lwpid != 0) {
190 			/*
191 			 * Look for a specific zombie lwp.
192 			 */
193 			if (target_lep == NULL)
194 				error = ESRCH;
195 			else if ((t = target_lep->le_thread) != NULL) {
196 				if (!(t->t_proc_flag & TP_TWAIT))
197 					error = EINVAL;
198 			} else {
199 				/*
200 				 * We found the zombie we are waiting for.
201 				 */
202 				ASSERT(p->p_zombcnt > 0);
203 				p->p_zombcnt--;
204 				p->p_lwpwait--;
205 				p->p_lwpdwait -= daemon;
206 				curthread->t_waitfor = -1;
207 				lwp_hash_out(p, lwpid);
208 				mutex_exit(&p->p_lock);
209 				if (departed != NULL &&
210 				    copyout(&lwpid, departed, sizeof (id_t)))
211 					return (set_errno(EFAULT));
212 				return (0);
213 			}
214 		} else {
215 			/*
216 			 * Look for any zombie lwp.
217 			 */
218 			int some_non_daemon_will_return = 0;
219 
220 			/* for each entry in the lwp directory... */
221 			ldp = p->p_lwpdir;
222 			for (i = 0; i < p->p_lwpdir_sz; i++, ldp++) {
223 
224 				if ((lep = ldp->ld_entry) == NULL ||
225 				    lep->le_thread != NULL)
226 					continue;
227 
228 				/*
229 				 * We found a zombie lwp.  If there is some
230 				 * other thread waiting specifically for the
231 				 * zombie we just found, then defer to the other
232 				 * waiting thread and continue searching for
233 				 * another zombie.  Also check to see if there
234 				 * is some non-daemon thread sleeping here in
235 				 * lwp_wait() that will succeed and return when
236 				 * we drop p->p_lock.  This is tested below.
237 				 */
238 				tid = lep->le_lwpid;
239 				if (lep->le_waiters != 0) {
240 					if (lep->le_waiters - lep->le_dwaiters)
241 						some_non_daemon_will_return = 1;
242 					continue;
243 				}
244 
245 				/*
246 				 * We found a zombie that no one else
247 				 * is specifically waiting for.
248 				 */
249 				ASSERT(p->p_zombcnt > 0);
250 				p->p_zombcnt--;
251 				p->p_lwpwait--;
252 				p->p_lwpdwait -= daemon;
253 				curthread->t_waitfor = -1;
254 				lwp_hash_out(p, tid);
255 				mutex_exit(&p->p_lock);
256 				if (departed != NULL &&
257 				    copyout(&tid, departed, sizeof (id_t)))
258 					return (set_errno(EFAULT));
259 				return (0);
260 			}
261 
262 			/*
263 			 * We are waiting for anyone.  If all non-daemon lwps
264 			 * are waiting here, and if we determined above that
265 			 * no non-daemon lwp will return, we have deadlock.
266 			 */
267 			if (!some_non_daemon_will_return &&
268 			    p->p_lwpcnt == p->p_lwpdaemon +
269 			    (p->p_lwpwait - p->p_lwpdwait))
270 				error = EDEADLK;
271 		}
272 
273 		if (error == 0 && lwpid != 0) {
274 			/*
275 			 * We are waiting for a specific non-zombie lwp.
276 			 * Fail if there is a deadlock loop.
277 			 */
278 			for (;;) {
279 				if (t == curthread) {
280 					error = EDEADLK;
281 					break;
282 				}
283 				/* who is he waiting for? */
284 				if ((tid = t->t_waitfor) == -1)
285 					break;
286 				if (tid == 0) {
287 					/*
288 					 * The lwp we are waiting for is
289 					 * waiting for anyone (transitively).
290 					 * If there are no zombies right now
291 					 * and if we would have deadlock due
292 					 * to all non-daemon lwps waiting here,
293 					 * wake up the lwp that is waiting for
294 					 * anyone so it can return EDEADLK.
295 					 */
296 					if (p->p_zombcnt == 0 &&
297 					    p->p_lwpcnt == p->p_lwpdaemon +
298 					    p->p_lwpwait - p->p_lwpdwait)
299 						cv_broadcast(&p->p_lwpexit);
300 					break;
301 				}
302 				if ((ldp = lwp_hash_lookup(p, tid)) == NULL ||
303 				    (t = ldp->ld_entry->le_thread) == NULL)
304 					break;
305 			}
306 		}
307 
308 		if (error)
309 			break;
310 
311 		/*
312 		 * Wait for some lwp to terminate.
313 		 */
314 		if (!cv_wait_sig(&p->p_lwpexit, &p->p_lock))
315 			error = EINTR;
316 		prbarrier(p);
317 
318 		if (lwpid != 0) {
319 			if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
320 				target_lep = NULL;
321 			else
322 				target_lep = ldp->ld_entry;
323 		}
324 	}
325 
326 	if (lwpid != 0 && target_lep != NULL) {
327 		target_lep->le_waiters--;
328 		target_lep->le_dwaiters -= daemon;
329 	}
330 	p->p_lwpwait--;
331 	p->p_lwpdwait -= daemon;
332 	curthread->t_waitfor = -1;
333 	mutex_exit(&p->p_lock);
334 	return (set_errno(error));
335 }
336 
337 int
338 lwp_detach(id_t lwpid)
339 {
340 	kthread_t *t;
341 	proc_t *p = ttoproc(curthread);
342 	lwpdir_t *ldp;
343 	int error = 0;
344 
345 	mutex_enter(&p->p_lock);
346 	prbarrier(p);
347 	if ((ldp = lwp_hash_lookup(p, lwpid)) == NULL)
348 		error = ESRCH;
349 	else if ((t = ldp->ld_entry->le_thread) != NULL) {
350 		if (!(t->t_proc_flag & TP_TWAIT))
351 			error = EINVAL;
352 		else {
353 			t->t_proc_flag &= ~TP_TWAIT;
354 			cv_broadcast(&p->p_lwpexit);
355 		}
356 	} else {
357 		ASSERT(p->p_zombcnt > 0);
358 		p->p_zombcnt--;
359 		lwp_hash_out(p, lwpid);
360 	}
361 	mutex_exit(&p->p_lock);
362 
363 	if (error)
364 		return (set_errno(error));
365 	return (0);
366 }
367 
368 /*
369  * Unpark the specified lwp.
370  */
371 static int
372 lwp_unpark(id_t lwpid)
373 {
374 	proc_t *p = ttoproc(curthread);
375 	kthread_t *t;
376 	int error = 0;
377 
378 	mutex_enter(&p->p_lock);
379 	if ((t = idtot(p, lwpid)) == NULL)
380 		error = ESRCH;
381 	else {
382 		mutex_enter(&t->t_delay_lock);
383 		t->t_unpark = 1;
384 		cv_signal(&t->t_delay_cv);
385 		mutex_exit(&t->t_delay_lock);
386 	}
387 	mutex_exit(&p->p_lock);
388 	return (error);
389 }
390 
391 /*
392  * Cancel a previous unpark for the specified lwp.
393  *
394  * This interface exists ONLY to support older versions of libthread, which
395  * called lwp_unpark(self) to force calls to lwp_park(self) to return
396  * immediately.  These older libthreads required a mechanism to cancel the
397  * lwp_unpark(self).
398  *
399  * libc does not call this interface.  Instead, the sc_park flag in the
400  * schedctl page is cleared to force calls to lwp_park() to return
401  * immediately.
402  */
403 static int
404 lwp_unpark_cancel(id_t lwpid)
405 {
406 	proc_t *p = ttoproc(curthread);
407 	kthread_t *t;
408 	int error = 0;
409 
410 	mutex_enter(&p->p_lock);
411 	if ((t = idtot(p, lwpid)) == NULL) {
412 		error = ESRCH;
413 	} else {
414 		mutex_enter(&t->t_delay_lock);
415 		t->t_unpark = 0;
416 		mutex_exit(&t->t_delay_lock);
417 	}
418 	mutex_exit(&p->p_lock);
419 	return (error);
420 }
421 
422 /*
423  * Sleep until we are set running by lwp_unpark() or until we are
424  * interrupted by a signal or until we exhaust our timeout.
425  * timeoutp is an in/out parameter.  On entry, it contains the relative
426  * time until timeout.  On exit, we copyout the residual time left to it.
427  */
428 static int
429 lwp_park(timespec_t *timeoutp, id_t lwpid)
430 {
431 	timespec_t rqtime;
432 	timespec_t rmtime;
433 	timespec_t now;
434 	timespec_t *rqtp = NULL;
435 	kthread_t *t = curthread;
436 	int timecheck = 0;
437 	int error = 0;
438 	model_t datamodel = ttoproc(t)->p_model;
439 
440 	if (lwpid != 0)		/* unpark the other lwp, if any */
441 		(void) lwp_unpark(lwpid);
442 
443 	if (timeoutp) {
444 		timecheck = timechanged;
445 		gethrestime(&now);
446 		if (datamodel == DATAMODEL_NATIVE) {
447 			if (copyin(timeoutp, &rqtime, sizeof (timespec_t))) {
448 				error = EFAULT;
449 				goto out;
450 			}
451 		} else {
452 			timespec32_t timeout32;
453 
454 			if (copyin(timeoutp, &timeout32, sizeof (timeout32))) {
455 				error = EFAULT;
456 				goto out;
457 			}
458 			TIMESPEC32_TO_TIMESPEC(&rqtime, &timeout32)
459 		}
460 
461 		if (itimerspecfix(&rqtime)) {
462 			error = EINVAL;
463 			goto out;
464 		}
465 		/*
466 		 * Convert the timespec value into absolute time.
467 		 */
468 		timespecadd(&rqtime, &now);
469 		rqtp = &rqtime;
470 	}
471 
472 	(void) new_mstate(t, LMS_USER_LOCK);
473 
474 	mutex_enter(&t->t_delay_lock);
475 	if (!schedctl_is_park())
476 		error = EINTR;
477 	while (error == 0 && t->t_unpark == 0) {
478 		switch (cv_waituntil_sig(&t->t_delay_cv,
479 		    &t->t_delay_lock, rqtp, timecheck)) {
480 		case 0:
481 			error = EINTR;
482 			break;
483 		case -1:
484 			error = ETIME;
485 			break;
486 		}
487 	}
488 	t->t_unpark = 0;
489 	mutex_exit(&t->t_delay_lock);
490 
491 	if (timeoutp != NULL) {
492 		rmtime.tv_sec = rmtime.tv_nsec = 0;
493 		if (error != ETIME) {
494 			gethrestime(&now);
495 			if ((now.tv_sec < rqtime.tv_sec) ||
496 			    ((now.tv_sec == rqtime.tv_sec) &&
497 			    (now.tv_nsec < rqtime.tv_nsec))) {
498 				rmtime = rqtime;
499 				timespecsub(&rmtime, &now);
500 			}
501 		}
502 		if (datamodel == DATAMODEL_NATIVE) {
503 			if (copyout(&rmtime, timeoutp, sizeof (rmtime)))
504 				error = EFAULT;
505 		} else {
506 			timespec32_t rmtime32;
507 
508 			TIMESPEC_TO_TIMESPEC32(&rmtime32, &rmtime);
509 			if (copyout(&rmtime32, timeoutp, sizeof (rmtime32)))
510 				error = EFAULT;
511 		}
512 	}
513 out:
514 	schedctl_unpark();
515 	if (t->t_mstate == LMS_USER_LOCK)
516 		(void) new_mstate(t, LMS_SYSTEM);
517 	return (error);
518 }
519 
520 #define	MAXLWPIDS	1024
521 
522 /*
523  * Unpark all of the specified lwps.
524  * Do it in chunks of MAXLWPIDS to avoid allocating too much memory.
525  */
526 static int
527 lwp_unpark_all(id_t *lwpidp, int nids)
528 {
529 	proc_t *p = ttoproc(curthread);
530 	kthread_t *t;
531 	int error = 0;
532 	id_t *lwpid;
533 	size_t lwpidsz;
534 	int n;
535 	int i;
536 
537 	if (nids <= 0)
538 		return (EINVAL);
539 
540 	lwpidsz = MIN(nids, MAXLWPIDS) * sizeof (id_t);
541 	lwpid = kmem_alloc(lwpidsz, KM_SLEEP);
542 	while (nids > 0) {
543 		n = MIN(nids, MAXLWPIDS);
544 		if (copyin(lwpidp, lwpid, n * sizeof (id_t))) {
545 			error = EFAULT;
546 			break;
547 		}
548 		mutex_enter(&p->p_lock);
549 		for (i = 0; i < n; i++) {
550 			if ((t = idtot(p, lwpid[i])) == NULL)
551 				error = ESRCH;
552 			else {
553 				mutex_enter(&t->t_delay_lock);
554 				t->t_unpark = 1;
555 				cv_signal(&t->t_delay_cv);
556 				mutex_exit(&t->t_delay_lock);
557 			}
558 		}
559 		mutex_exit(&p->p_lock);
560 		lwpidp += n;
561 		nids -= n;
562 	}
563 	kmem_free(lwpid, lwpidsz);
564 	return (error);
565 }
566 
567 /*
568  * SYS_lwp_park() system call.
569  */
570 int
571 syslwp_park(int which, uintptr_t arg1, uintptr_t arg2)
572 {
573 	int error;
574 
575 	switch (which) {
576 	case 0:
577 		error = lwp_park((timespec_t *)arg1, (id_t)arg2);
578 		break;
579 	case 1:
580 		error = lwp_unpark((id_t)arg1);
581 		break;
582 	case 2:
583 		error = lwp_unpark_all((id_t *)arg1, (int)arg2);
584 		break;
585 	case 3:
586 		/*
587 		 * This subcode is not used by libc.  It exists ONLY to
588 		 * support older versions of libthread which do not use
589 		 * the sc_park flag in the schedctl page.
590 		 *
591 		 * These versions of libthread need to be modifed or emulated
592 		 * to change calls to syslwp_park(1, tid, 0) to
593 		 * syslwp_park(3, tid).
594 		 */
595 		error = lwp_unpark_cancel((id_t)arg1);
596 		break;
597 	case 4:
598 		/*
599 		 * This subcode is not used by libc.  It exists ONLY to
600 		 * support older versions of libthread which do not use
601 		 * the sc_park flag in the schedctl page.
602 		 *
603 		 * These versions of libthread need to be modified or emulated
604 		 * to change calls to syslwp_park(0, ts, tid) to
605 		 * syslwp_park(4, ts, tid).
606 		 */
607 		schedctl_set_park();
608 		error = lwp_park((timespec_t *)arg1, (id_t)arg2);
609 		break;
610 	default:
611 		error = EINVAL;
612 		break;
613 	}
614 
615 	if (error)
616 		return (set_errno(error));
617 	return (0);
618 }
619