xref: /freebsd/sys/kern/kern_thr.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_compat.h"
31 #include "opt_posix.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/priv.h>
37 #include <sys/proc.h>
38 #include <sys/posix4.h>
39 #include <sys/resourcevar.h>
40 #include <sys/sched.h>
41 #include <sys/sysctl.h>
42 #include <sys/smp.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysent.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/signalvar.h>
48 #include <sys/ucontext.h>
49 #include <sys/thr.h>
50 #include <sys/rtprio.h>
51 #include <sys/umtx.h>
52 #include <sys/limits.h>
53 
54 #include <machine/frame.h>
55 
56 #include <security/audit/audit.h>
57 
58 #ifdef COMPAT_IA32
59 
60 extern struct sysentvec ia32_freebsd_sysvec;
61 
62 static inline int
63 suword_lwpid(void *addr, lwpid_t lwpid)
64 {
65 	int error;
66 
67 	if (curproc->p_sysent != &ia32_freebsd_sysvec)
68 		error = suword(addr, lwpid);
69 	else
70 		error = suword32(addr, lwpid);
71 	return (error);
72 }
73 
74 #else
75 #define suword_lwpid	suword
76 #endif
77 
78 extern int max_threads_per_proc;
79 
80 static int create_thread(struct thread *td, mcontext_t *ctx,
81 			 void (*start_func)(void *), void *arg,
82 			 char *stack_base, size_t stack_size,
83 			 char *tls_base,
84 			 long *child_tid, long *parent_tid,
85 			 int flags, struct rtprio *rtp);
86 
87 /*
88  * System call interface.
89  */
90 int
91 thr_create(struct thread *td, struct thr_create_args *uap)
92     /* ucontext_t *ctx, long *id, int flags */
93 {
94 	ucontext_t ctx;
95 	int error;
96 
97 	if ((error = copyin(uap->ctx, &ctx, sizeof(ctx))))
98 		return (error);
99 
100 	error = create_thread(td, &ctx.uc_mcontext, NULL, NULL,
101 		NULL, 0, NULL, uap->id, NULL, uap->flags, NULL);
102 	return (error);
103 }
104 
105 int
106 thr_new(struct thread *td, struct thr_new_args *uap)
107     /* struct thr_param * */
108 {
109 	struct thr_param param;
110 	int error;
111 
112 	if (uap->param_size < 0 || uap->param_size > sizeof(param))
113 		return (EINVAL);
114 	bzero(&param, sizeof(param));
115 	if ((error = copyin(uap->param, &param, uap->param_size)))
116 		return (error);
117 	return (kern_thr_new(td, &param));
118 }
119 
120 int
121 kern_thr_new(struct thread *td, struct thr_param *param)
122 {
123 	struct rtprio rtp, *rtpp;
124 	int error;
125 
126 	rtpp = NULL;
127 	if (param->rtp != 0) {
128 		error = copyin(param->rtp, &rtp, sizeof(struct rtprio));
129 		rtpp = &rtp;
130 	}
131 	error = create_thread(td, NULL, param->start_func, param->arg,
132 		param->stack_base, param->stack_size, param->tls_base,
133 		param->child_tid, param->parent_tid, param->flags,
134 		rtpp);
135 	return (error);
136 }
137 
138 static int
139 create_thread(struct thread *td, mcontext_t *ctx,
140 	    void (*start_func)(void *), void *arg,
141 	    char *stack_base, size_t stack_size,
142 	    char *tls_base,
143 	    long *child_tid, long *parent_tid,
144 	    int flags, struct rtprio *rtp)
145 {
146 	stack_t stack;
147 	struct thread *newtd;
148 	struct proc *p;
149 	int error;
150 
151 	error = 0;
152 	p = td->td_proc;
153 
154 	/* Have race condition but it is cheap. */
155 	if (p->p_numthreads >= max_threads_per_proc)
156 		return (EPROCLIM);
157 
158 	if (rtp != NULL) {
159 		switch(rtp->type) {
160 		case RTP_PRIO_REALTIME:
161 		case RTP_PRIO_FIFO:
162 			/* Only root can set scheduler policy */
163 			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
164 				return (EPERM);
165 			if (rtp->prio > RTP_PRIO_MAX)
166 				return (EINVAL);
167 			break;
168 		case RTP_PRIO_NORMAL:
169 			rtp->prio = 0;
170 			break;
171 		default:
172 			return (EINVAL);
173 		}
174 	}
175 
176 	/* Initialize our td */
177 	newtd = thread_alloc();
178 	if (newtd == NULL)
179 		return (ENOMEM);
180 
181 	/*
182 	 * Try the copyout as soon as we allocate the td so we don't
183 	 * have to tear things down in a failure case below.
184 	 * Here we copy out tid to two places, one for child and one
185 	 * for parent, because pthread can create a detached thread,
186 	 * if parent wants to safely access child tid, it has to provide
187 	 * its storage, because child thread may exit quickly and
188 	 * memory is freed before parent thread can access it.
189 	 */
190 	if ((child_tid != NULL &&
191 	    suword_lwpid(child_tid, newtd->td_tid)) ||
192 	    (parent_tid != NULL &&
193 	    suword_lwpid(parent_tid, newtd->td_tid))) {
194 		thread_free(newtd);
195 		return (EFAULT);
196 	}
197 
198 	bzero(&newtd->td_startzero,
199 	    __rangeof(struct thread, td_startzero, td_endzero));
200 	bcopy(&td->td_startcopy, &newtd->td_startcopy,
201 	    __rangeof(struct thread, td_startcopy, td_endcopy));
202 	newtd->td_proc = td->td_proc;
203 	newtd->td_ucred = crhold(td->td_ucred);
204 
205 	cpu_set_upcall(newtd, td);
206 
207 	if (ctx != NULL) { /* old way to set user context */
208 		error = set_mcontext(newtd, ctx);
209 		if (error != 0) {
210 			thread_free(newtd);
211 			crfree(td->td_ucred);
212 			return (error);
213 		}
214 	} else {
215 		/* Set up our machine context. */
216 		stack.ss_sp = stack_base;
217 		stack.ss_size = stack_size;
218 		/* Set upcall address to user thread entry function. */
219 		cpu_set_upcall_kse(newtd, start_func, arg, &stack);
220 		/* Setup user TLS address and TLS pointer register. */
221 		error = cpu_set_user_tls(newtd, tls_base);
222 		if (error != 0) {
223 			thread_free(newtd);
224 			crfree(td->td_ucred);
225 			return (error);
226 		}
227 	}
228 
229 	PROC_LOCK(td->td_proc);
230 	td->td_proc->p_flag |= P_HADTHREADS;
231 	newtd->td_sigmask = td->td_sigmask;
232 	PROC_SLOCK(p);
233 	thread_link(newtd, p);
234 	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
235 	thread_lock(td);
236 	/* let the scheduler know about these things. */
237 	sched_fork_thread(td, newtd);
238 	thread_unlock(td);
239 	PROC_SUNLOCK(p);
240 	PROC_UNLOCK(p);
241 	thread_lock(newtd);
242 	if (rtp != NULL) {
243 		if (!(td->td_pri_class == PRI_TIMESHARE &&
244 		      rtp->type == RTP_PRIO_NORMAL)) {
245 			rtp_to_pri(rtp, newtd);
246 			sched_prio(newtd, newtd->td_user_pri);
247 		} /* ignore timesharing class */
248 	}
249 	TD_SET_CAN_RUN(newtd);
250 	/* if ((flags & THR_SUSPENDED) == 0) */
251 		sched_add(newtd, SRQ_BORING);
252 	thread_unlock(newtd);
253 
254 	return (error);
255 }
256 
257 int
258 thr_self(struct thread *td, struct thr_self_args *uap)
259     /* long *id */
260 {
261 	int error;
262 
263 	error = suword_lwpid(uap->id, (unsigned)td->td_tid);
264 	if (error == -1)
265 		return (EFAULT);
266 	return (0);
267 }
268 
269 int
270 thr_exit(struct thread *td, struct thr_exit_args *uap)
271     /* long *state */
272 {
273 	struct proc *p;
274 
275 	p = td->td_proc;
276 
277 	/* Signal userland that it can free the stack. */
278 	if ((void *)uap->state != NULL) {
279 		suword_lwpid(uap->state, 1);
280 		kern_umtx_wake(td, uap->state, INT_MAX);
281 	}
282 
283 	PROC_LOCK(p);
284 	sigqueue_flush(&td->td_sigqueue);
285 	PROC_SLOCK(p);
286 
287 	/*
288 	 * Shutting down last thread in the proc.  This will actually
289 	 * call exit() in the trampoline when it returns.
290 	 */
291 	if (p->p_numthreads != 1) {
292 		thread_stopped(p);
293 		thread_exit();
294 		/* NOTREACHED */
295 	}
296 	PROC_SUNLOCK(p);
297 	PROC_UNLOCK(p);
298 	return (0);
299 }
300 
301 int
302 thr_kill(struct thread *td, struct thr_kill_args *uap)
303     /* long id, int sig */
304 {
305 	struct thread *ttd;
306 	struct proc *p;
307 	int error;
308 
309 	p = td->td_proc;
310 	error = 0;
311 	PROC_LOCK(p);
312 	if (uap->id == -1) {
313 		if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
314 			error = EINVAL;
315 		} else {
316 			error = ESRCH;
317 			FOREACH_THREAD_IN_PROC(p, ttd) {
318 				if (ttd != td) {
319 					error = 0;
320 					if (uap->sig == 0)
321 						break;
322 					tdsignal(p, ttd, uap->sig, NULL);
323 				}
324 			}
325 		}
326 	} else {
327 		if (uap->id != td->td_tid)
328 			ttd = thread_find(p, uap->id);
329 		else
330 			ttd = td;
331 		if (ttd == NULL)
332 			error = ESRCH;
333 		else if (uap->sig == 0)
334 			;
335 		else if (!_SIG_VALID(uap->sig))
336 			error = EINVAL;
337 		else
338 			tdsignal(p, ttd, uap->sig, NULL);
339 	}
340 	PROC_UNLOCK(p);
341 	return (error);
342 }
343 
344 int
345 thr_kill2(struct thread *td, struct thr_kill2_args *uap)
346     /* pid_t pid, long id, int sig */
347 {
348 	struct thread *ttd;
349 	struct proc *p;
350 	int error;
351 
352 	AUDIT_ARG(signum, uap->sig);
353 
354 	if (uap->pid == td->td_proc->p_pid) {
355 		p = td->td_proc;
356 		PROC_LOCK(p);
357 	} else if ((p = pfind(uap->pid)) == NULL) {
358 		return (ESRCH);
359 	}
360 	AUDIT_ARG(process, p);
361 
362 	error = p_cansignal(td, p, uap->sig);
363 	if (error == 0) {
364 		if (uap->id == -1) {
365 			if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
366 				error = EINVAL;
367 			} else {
368 				error = ESRCH;
369 				FOREACH_THREAD_IN_PROC(p, ttd) {
370 					if (ttd != td) {
371 						error = 0;
372 						if (uap->sig == 0)
373 							break;
374 						tdsignal(p, ttd, uap->sig, NULL);
375 					}
376 				}
377 			}
378 		} else {
379 			if (uap->id != td->td_tid)
380 				ttd = thread_find(p, uap->id);
381 			else
382 				ttd = td;
383 			if (ttd == NULL)
384 				error = ESRCH;
385 			else if (uap->sig == 0)
386 				;
387 			else if (!_SIG_VALID(uap->sig))
388 				error = EINVAL;
389 			else
390 				tdsignal(p, ttd, uap->sig, NULL);
391 		}
392 	}
393 	PROC_UNLOCK(p);
394 	return (error);
395 }
396 
397 int
398 thr_suspend(struct thread *td, struct thr_suspend_args *uap)
399 	/* const struct timespec *timeout */
400 {
401 	struct timespec ts, *tsp;
402 	int error;
403 
404 	error = 0;
405 	tsp = NULL;
406 	if (uap->timeout != NULL) {
407 		error = copyin((const void *)uap->timeout, (void *)&ts,
408 		    sizeof(struct timespec));
409 		if (error != 0)
410 			return (error);
411 		tsp = &ts;
412 	}
413 
414 	return (kern_thr_suspend(td, tsp));
415 }
416 
417 int
418 kern_thr_suspend(struct thread *td, struct timespec *tsp)
419 {
420 	struct timeval tv;
421 	int error = 0, hz = 0;
422 
423 	if (tsp != NULL) {
424 		if (tsp->tv_nsec < 0 || tsp->tv_nsec > 1000000000)
425 			return (EINVAL);
426 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
427 			return (ETIMEDOUT);
428 		TIMESPEC_TO_TIMEVAL(&tv, tsp);
429 		hz = tvtohz(&tv);
430 	}
431 
432 	if (td->td_pflags & TDP_WAKEUP) {
433 		td->td_pflags &= ~TDP_WAKEUP;
434 		return (0);
435 	}
436 
437 	PROC_LOCK(td->td_proc);
438 	if ((td->td_flags & TDF_THRWAKEUP) == 0)
439 		error = msleep((void *)td, &td->td_proc->p_mtx, PCATCH, "lthr",
440 		    hz);
441 	if (td->td_flags & TDF_THRWAKEUP) {
442 		thread_lock(td);
443 		td->td_flags &= ~TDF_THRWAKEUP;
444 		thread_unlock(td);
445 		PROC_UNLOCK(td->td_proc);
446 		return (0);
447 	}
448 	PROC_UNLOCK(td->td_proc);
449 	if (error == EWOULDBLOCK)
450 		error = ETIMEDOUT;
451 	else if (error == ERESTART) {
452 		if (hz != 0)
453 			error = EINTR;
454 	}
455 	return (error);
456 }
457 
458 int
459 thr_wake(struct thread *td, struct thr_wake_args *uap)
460 	/* long id */
461 {
462 	struct proc *p;
463 	struct thread *ttd;
464 
465 	if (uap->id == td->td_tid) {
466 		td->td_pflags |= TDP_WAKEUP;
467 		return (0);
468 	}
469 
470 	p = td->td_proc;
471 	PROC_LOCK(p);
472 	ttd = thread_find(p, uap->id);
473 	if (ttd == NULL) {
474 		PROC_UNLOCK(p);
475 		return (ESRCH);
476 	}
477 	thread_lock(ttd);
478 	ttd->td_flags |= TDF_THRWAKEUP;
479 	thread_unlock(ttd);
480 	wakeup((void *)ttd);
481 	PROC_UNLOCK(p);
482 	return (0);
483 }
484 
485 int
486 thr_set_name(struct thread *td, struct thr_set_name_args *uap)
487 {
488 	struct proc *p = td->td_proc;
489 	char name[MAXCOMLEN + 1];
490 	struct thread *ttd;
491 	int error;
492 
493 	error = 0;
494 	name[0] = '\0';
495 	if (uap->name != NULL) {
496 		error = copyinstr(uap->name, name, sizeof(name),
497 			NULL);
498 		if (error)
499 			return (error);
500 	}
501 	PROC_LOCK(p);
502 	if (uap->id == td->td_tid)
503 		ttd = td;
504 	else
505 		ttd = thread_find(p, uap->id);
506 	if (ttd != NULL)
507 		strcpy(ttd->td_name, name);
508 	else
509 		error = ESRCH;
510 	PROC_UNLOCK(p);
511 	return (error);
512 }
513