xref: /freebsd/sys/kern/kern_thr.c (revision 4a5216a6dc0c3ce4cf5f2d3ee8af0c3ff3402c4f)
1 /*-
2  * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_compat.h"
31 #include "opt_posix.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/priv.h>
37 #include <sys/proc.h>
38 #include <sys/posix4.h>
39 #include <sys/resourcevar.h>
40 #include <sys/sched.h>
41 #include <sys/sysctl.h>
42 #include <sys/smp.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysent.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/signalvar.h>
48 #include <sys/ucontext.h>
49 #include <sys/thr.h>
50 #include <sys/rtprio.h>
51 #include <sys/umtx.h>
52 #include <sys/limits.h>
53 
54 #include <machine/frame.h>
55 
56 #include <security/audit/audit.h>
57 
58 #ifdef COMPAT_IA32
59 
60 extern struct sysentvec ia32_freebsd_sysvec;
61 
62 static inline int
63 suword_lwpid(void *addr, lwpid_t lwpid)
64 {
65 	int error;
66 
67 	if (curproc->p_sysent != &ia32_freebsd_sysvec)
68 		error = suword(addr, lwpid);
69 	else
70 		error = suword32(addr, lwpid);
71 	return (error);
72 }
73 
74 #else
75 #define suword_lwpid	suword
76 #endif
77 
78 extern int max_threads_per_proc;
79 
80 static int create_thread(struct thread *td, mcontext_t *ctx,
81 			 void (*start_func)(void *), void *arg,
82 			 char *stack_base, size_t stack_size,
83 			 char *tls_base,
84 			 long *child_tid, long *parent_tid,
85 			 int flags, struct rtprio *rtp);
86 
87 /*
88  * System call interface.
89  */
90 int
91 thr_create(struct thread *td, struct thr_create_args *uap)
92     /* ucontext_t *ctx, long *id, int flags */
93 {
94 	ucontext_t ctx;
95 	int error;
96 
97 	if ((error = copyin(uap->ctx, &ctx, sizeof(ctx))))
98 		return (error);
99 
100 	error = create_thread(td, &ctx.uc_mcontext, NULL, NULL,
101 		NULL, 0, NULL, uap->id, NULL, uap->flags, NULL);
102 	return (error);
103 }
104 
105 int
106 thr_new(struct thread *td, struct thr_new_args *uap)
107     /* struct thr_param * */
108 {
109 	struct thr_param param;
110 	int error;
111 
112 	if (uap->param_size < 0 || uap->param_size > sizeof(param))
113 		return (EINVAL);
114 	bzero(&param, sizeof(param));
115 	if ((error = copyin(uap->param, &param, uap->param_size)))
116 		return (error);
117 	return (kern_thr_new(td, &param));
118 }
119 
120 int
121 kern_thr_new(struct thread *td, struct thr_param *param)
122 {
123 	struct rtprio rtp, *rtpp;
124 	int error;
125 
126 	rtpp = NULL;
127 	if (param->rtp != 0) {
128 		error = copyin(param->rtp, &rtp, sizeof(struct rtprio));
129 		if (error)
130 			return (error);
131 		rtpp = &rtp;
132 	}
133 	error = create_thread(td, NULL, param->start_func, param->arg,
134 		param->stack_base, param->stack_size, param->tls_base,
135 		param->child_tid, param->parent_tid, param->flags,
136 		rtpp);
137 	return (error);
138 }
139 
140 static int
141 create_thread(struct thread *td, mcontext_t *ctx,
142 	    void (*start_func)(void *), void *arg,
143 	    char *stack_base, size_t stack_size,
144 	    char *tls_base,
145 	    long *child_tid, long *parent_tid,
146 	    int flags, struct rtprio *rtp)
147 {
148 	stack_t stack;
149 	struct thread *newtd;
150 	struct proc *p;
151 	int error;
152 
153 	error = 0;
154 	p = td->td_proc;
155 
156 	/* Have race condition but it is cheap. */
157 	if (p->p_numthreads >= max_threads_per_proc)
158 		return (EPROCLIM);
159 
160 	if (rtp != NULL) {
161 		switch(rtp->type) {
162 		case RTP_PRIO_REALTIME:
163 		case RTP_PRIO_FIFO:
164 			/* Only root can set scheduler policy */
165 			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
166 				return (EPERM);
167 			if (rtp->prio > RTP_PRIO_MAX)
168 				return (EINVAL);
169 			break;
170 		case RTP_PRIO_NORMAL:
171 			rtp->prio = 0;
172 			break;
173 		default:
174 			return (EINVAL);
175 		}
176 	}
177 
178 	/* Initialize our td */
179 	newtd = thread_alloc();
180 	if (newtd == NULL)
181 		return (ENOMEM);
182 
183 	/*
184 	 * Try the copyout as soon as we allocate the td so we don't
185 	 * have to tear things down in a failure case below.
186 	 * Here we copy out tid to two places, one for child and one
187 	 * for parent, because pthread can create a detached thread,
188 	 * if parent wants to safely access child tid, it has to provide
189 	 * its storage, because child thread may exit quickly and
190 	 * memory is freed before parent thread can access it.
191 	 */
192 	if ((child_tid != NULL &&
193 	    suword_lwpid(child_tid, newtd->td_tid)) ||
194 	    (parent_tid != NULL &&
195 	    suword_lwpid(parent_tid, newtd->td_tid))) {
196 		thread_free(newtd);
197 		return (EFAULT);
198 	}
199 
200 	bzero(&newtd->td_startzero,
201 	    __rangeof(struct thread, td_startzero, td_endzero));
202 	bcopy(&td->td_startcopy, &newtd->td_startcopy,
203 	    __rangeof(struct thread, td_startcopy, td_endcopy));
204 	newtd->td_proc = td->td_proc;
205 	newtd->td_ucred = crhold(td->td_ucred);
206 
207 	cpu_set_upcall(newtd, td);
208 
209 	if (ctx != NULL) { /* old way to set user context */
210 		error = set_mcontext(newtd, ctx);
211 		if (error != 0) {
212 			thread_free(newtd);
213 			crfree(td->td_ucred);
214 			return (error);
215 		}
216 	} else {
217 		/* Set up our machine context. */
218 		stack.ss_sp = stack_base;
219 		stack.ss_size = stack_size;
220 		/* Set upcall address to user thread entry function. */
221 		cpu_set_upcall_kse(newtd, start_func, arg, &stack);
222 		/* Setup user TLS address and TLS pointer register. */
223 		error = cpu_set_user_tls(newtd, tls_base);
224 		if (error != 0) {
225 			thread_free(newtd);
226 			crfree(td->td_ucred);
227 			return (error);
228 		}
229 	}
230 
231 	PROC_LOCK(td->td_proc);
232 	td->td_proc->p_flag |= P_HADTHREADS;
233 	newtd->td_sigmask = td->td_sigmask;
234 	thread_link(newtd, p);
235 	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
236 	thread_lock(td);
237 	/* let the scheduler know about these things. */
238 	sched_fork_thread(td, newtd);
239 	thread_unlock(td);
240 	if (P_SHOULDSTOP(p))
241 		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
242 	PROC_UNLOCK(p);
243 	thread_lock(newtd);
244 	if (rtp != NULL) {
245 		if (!(td->td_pri_class == PRI_TIMESHARE &&
246 		      rtp->type == RTP_PRIO_NORMAL)) {
247 			rtp_to_pri(rtp, newtd);
248 			sched_prio(newtd, newtd->td_user_pri);
249 		} /* ignore timesharing class */
250 	}
251 	TD_SET_CAN_RUN(newtd);
252 	sched_add(newtd, SRQ_BORING);
253 	thread_unlock(newtd);
254 
255 	return (error);
256 }
257 
258 int
259 thr_self(struct thread *td, struct thr_self_args *uap)
260     /* long *id */
261 {
262 	int error;
263 
264 	error = suword_lwpid(uap->id, (unsigned)td->td_tid);
265 	if (error == -1)
266 		return (EFAULT);
267 	return (0);
268 }
269 
270 int
271 thr_exit(struct thread *td, struct thr_exit_args *uap)
272     /* long *state */
273 {
274 	struct proc *p;
275 
276 	p = td->td_proc;
277 
278 	/* Signal userland that it can free the stack. */
279 	if ((void *)uap->state != NULL) {
280 		suword_lwpid(uap->state, 1);
281 		kern_umtx_wake(td, uap->state, INT_MAX, 0);
282 	}
283 
284 	PROC_LOCK(p);
285 	sigqueue_flush(&td->td_sigqueue);
286 	PROC_SLOCK(p);
287 
288 	/*
289 	 * Shutting down last thread in the proc.  This will actually
290 	 * call exit() in the trampoline when it returns.
291 	 */
292 	if (p->p_numthreads != 1) {
293 		thread_stopped(p);
294 		thread_exit();
295 		/* NOTREACHED */
296 	}
297 	PROC_SUNLOCK(p);
298 	PROC_UNLOCK(p);
299 	return (0);
300 }
301 
302 int
303 thr_kill(struct thread *td, struct thr_kill_args *uap)
304     /* long id, int sig */
305 {
306 	struct thread *ttd;
307 	struct proc *p;
308 	int error;
309 
310 	p = td->td_proc;
311 	error = 0;
312 	PROC_LOCK(p);
313 	if (uap->id == -1) {
314 		if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
315 			error = EINVAL;
316 		} else {
317 			error = ESRCH;
318 			FOREACH_THREAD_IN_PROC(p, ttd) {
319 				if (ttd != td) {
320 					error = 0;
321 					if (uap->sig == 0)
322 						break;
323 					tdsignal(p, ttd, uap->sig, NULL);
324 				}
325 			}
326 		}
327 	} else {
328 		if (uap->id != td->td_tid)
329 			ttd = thread_find(p, uap->id);
330 		else
331 			ttd = td;
332 		if (ttd == NULL)
333 			error = ESRCH;
334 		else if (uap->sig == 0)
335 			;
336 		else if (!_SIG_VALID(uap->sig))
337 			error = EINVAL;
338 		else
339 			tdsignal(p, ttd, uap->sig, NULL);
340 	}
341 	PROC_UNLOCK(p);
342 	return (error);
343 }
344 
345 int
346 thr_kill2(struct thread *td, struct thr_kill2_args *uap)
347     /* pid_t pid, long id, int sig */
348 {
349 	struct thread *ttd;
350 	struct proc *p;
351 	int error;
352 
353 	AUDIT_ARG(signum, uap->sig);
354 
355 	if (uap->pid == td->td_proc->p_pid) {
356 		p = td->td_proc;
357 		PROC_LOCK(p);
358 	} else if ((p = pfind(uap->pid)) == NULL) {
359 		return (ESRCH);
360 	}
361 	AUDIT_ARG(process, p);
362 
363 	error = p_cansignal(td, p, uap->sig);
364 	if (error == 0) {
365 		if (uap->id == -1) {
366 			if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
367 				error = EINVAL;
368 			} else {
369 				error = ESRCH;
370 				FOREACH_THREAD_IN_PROC(p, ttd) {
371 					if (ttd != td) {
372 						error = 0;
373 						if (uap->sig == 0)
374 							break;
375 						tdsignal(p, ttd, uap->sig, NULL);
376 					}
377 				}
378 			}
379 		} else {
380 			if (uap->id != td->td_tid)
381 				ttd = thread_find(p, uap->id);
382 			else
383 				ttd = td;
384 			if (ttd == NULL)
385 				error = ESRCH;
386 			else if (uap->sig == 0)
387 				;
388 			else if (!_SIG_VALID(uap->sig))
389 				error = EINVAL;
390 			else
391 				tdsignal(p, ttd, uap->sig, NULL);
392 		}
393 	}
394 	PROC_UNLOCK(p);
395 	return (error);
396 }
397 
398 int
399 thr_suspend(struct thread *td, struct thr_suspend_args *uap)
400 	/* const struct timespec *timeout */
401 {
402 	struct timespec ts, *tsp;
403 	int error;
404 
405 	error = 0;
406 	tsp = NULL;
407 	if (uap->timeout != NULL) {
408 		error = copyin((const void *)uap->timeout, (void *)&ts,
409 		    sizeof(struct timespec));
410 		if (error != 0)
411 			return (error);
412 		tsp = &ts;
413 	}
414 
415 	return (kern_thr_suspend(td, tsp));
416 }
417 
418 int
419 kern_thr_suspend(struct thread *td, struct timespec *tsp)
420 {
421 	struct timeval tv;
422 	int error = 0, hz = 0;
423 
424 	if (tsp != NULL) {
425 		if (tsp->tv_nsec < 0 || tsp->tv_nsec > 1000000000)
426 			return (EINVAL);
427 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
428 			return (ETIMEDOUT);
429 		TIMESPEC_TO_TIMEVAL(&tv, tsp);
430 		hz = tvtohz(&tv);
431 	}
432 
433 	if (td->td_pflags & TDP_WAKEUP) {
434 		td->td_pflags &= ~TDP_WAKEUP;
435 		return (0);
436 	}
437 
438 	PROC_LOCK(td->td_proc);
439 	if ((td->td_flags & TDF_THRWAKEUP) == 0)
440 		error = msleep((void *)td, &td->td_proc->p_mtx, PCATCH, "lthr",
441 		    hz);
442 	if (td->td_flags & TDF_THRWAKEUP) {
443 		thread_lock(td);
444 		td->td_flags &= ~TDF_THRWAKEUP;
445 		thread_unlock(td);
446 		PROC_UNLOCK(td->td_proc);
447 		return (0);
448 	}
449 	PROC_UNLOCK(td->td_proc);
450 	if (error == EWOULDBLOCK)
451 		error = ETIMEDOUT;
452 	else if (error == ERESTART) {
453 		if (hz != 0)
454 			error = EINTR;
455 	}
456 	return (error);
457 }
458 
459 int
460 thr_wake(struct thread *td, struct thr_wake_args *uap)
461 	/* long id */
462 {
463 	struct proc *p;
464 	struct thread *ttd;
465 
466 	if (uap->id == td->td_tid) {
467 		td->td_pflags |= TDP_WAKEUP;
468 		return (0);
469 	}
470 
471 	p = td->td_proc;
472 	PROC_LOCK(p);
473 	ttd = thread_find(p, uap->id);
474 	if (ttd == NULL) {
475 		PROC_UNLOCK(p);
476 		return (ESRCH);
477 	}
478 	thread_lock(ttd);
479 	ttd->td_flags |= TDF_THRWAKEUP;
480 	thread_unlock(ttd);
481 	wakeup((void *)ttd);
482 	PROC_UNLOCK(p);
483 	return (0);
484 }
485 
486 int
487 thr_set_name(struct thread *td, struct thr_set_name_args *uap)
488 {
489 	struct proc *p = td->td_proc;
490 	char name[MAXCOMLEN + 1];
491 	struct thread *ttd;
492 	int error;
493 
494 	error = 0;
495 	name[0] = '\0';
496 	if (uap->name != NULL) {
497 		error = copyinstr(uap->name, name, sizeof(name),
498 			NULL);
499 		if (error)
500 			return (error);
501 	}
502 	PROC_LOCK(p);
503 	if (uap->id == td->td_tid)
504 		ttd = td;
505 	else
506 		ttd = thread_find(p, uap->id);
507 	if (ttd != NULL)
508 		strcpy(ttd->td_name, name);
509 	else
510 		error = ESRCH;
511 	PROC_UNLOCK(p);
512 	return (error);
513 }
514