xref: /freebsd/lib/libthr/thread/thr_create.c (revision cab6a39d7b343596a5823e65c0f7b426551ec22d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003 Daniel M. Eischen <deischen@gdeb.com>
5  * Copyright (c) 2005, David Xu <davidxu@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "namespace.h"
34 #include <sys/types.h>
35 #include <sys/rtprio.h>
36 #include <sys/signalvar.h>
37 #include <errno.h>
38 #include <link.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <stddef.h>
42 #include <pthread.h>
43 #include <pthread_np.h>
44 #include "un-namespace.h"
45 
46 #include "libc_private.h"
47 #include "thr_private.h"
48 
49 static int  create_stack(struct pthread_attr *pattr);
50 static void thread_start(struct pthread *curthread);
51 
52 __weak_reference(_pthread_create, pthread_create);
53 
54 int
55 _pthread_create(pthread_t * __restrict thread,
56     const pthread_attr_t * __restrict attr, void *(*start_routine) (void *),
57     void * __restrict arg)
58 {
59 	struct pthread *curthread, *new_thread;
60 	struct thr_param param;
61 	struct sched_param sched_param;
62 	struct rtprio rtp;
63 	sigset_t set, oset;
64 	cpuset_t *cpusetp;
65 	int i, cpusetsize, create_suspended, locked, old_stack_prot, ret;
66 
67 	cpusetp = NULL;
68 	ret = cpusetsize = 0;
69 	_thr_check_init();
70 
71 	/*
72 	 * Tell libc and others now they need lock to protect their data.
73 	 */
74 	if (_thr_isthreaded() == 0) {
75 		_malloc_first_thread();
76 		_thr_setthreaded(1);
77 	}
78 
79 	curthread = _get_curthread();
80 	if ((new_thread = _thr_alloc(curthread)) == NULL)
81 		return (EAGAIN);
82 
83 	memset(&param, 0, sizeof(param));
84 
85 	if (attr == NULL || *attr == NULL)
86 		/* Use the default thread attributes: */
87 		new_thread->attr = _pthread_attr_default;
88 	else {
89 		new_thread->attr = *(*attr);
90 		cpusetp = new_thread->attr.cpuset;
91 		cpusetsize = new_thread->attr.cpusetsize;
92 		new_thread->attr.cpuset = NULL;
93 		new_thread->attr.cpusetsize = 0;
94 	}
95 	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
96 		/* inherit scheduling contention scope */
97 		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
98 			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
99 		else
100 			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
101 
102 		new_thread->attr.prio = curthread->attr.prio;
103 		new_thread->attr.sched_policy = curthread->attr.sched_policy;
104 	}
105 
106 	new_thread->tid = TID_TERMINATED;
107 
108 	old_stack_prot = _rtld_get_stack_prot();
109 	if (create_stack(&new_thread->attr) != 0) {
110 		/* Insufficient memory to create a stack: */
111 		_thr_free(curthread, new_thread);
112 		return (EAGAIN);
113 	}
114 	/*
115 	 * Write a magic value to the thread structure
116 	 * to help identify valid ones:
117 	 */
118 	new_thread->magic = THR_MAGIC;
119 	new_thread->start_routine = start_routine;
120 	new_thread->arg = arg;
121 	new_thread->cancel_enable = 1;
122 	new_thread->cancel_async = 0;
123 	/* Initialize the mutex queue: */
124 	for (i = 0; i < TMQ_NITEMS; i++)
125 		TAILQ_INIT(&new_thread->mq[i]);
126 
127 	/* Initialise hooks in the thread structure: */
128 	if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
129 		new_thread->flags = THR_FLAGS_NEED_SUSPEND;
130 		create_suspended = 1;
131 	} else {
132 		create_suspended = 0;
133 	}
134 
135 	new_thread->state = PS_RUNNING;
136 
137 	if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
138 		new_thread->flags |= THR_FLAGS_DETACHED;
139 
140 	/* Add the new thread. */
141 	new_thread->refcount = 1;
142 	_thr_link(curthread, new_thread);
143 
144 	/*
145 	 * Handle the race between __pthread_map_stacks_exec and
146 	 * thread linkage.
147 	 */
148 	if (old_stack_prot != _rtld_get_stack_prot())
149 		_thr_stack_fix_protection(new_thread);
150 
151 	/* Return thread pointer eariler so that new thread can use it. */
152 	(*thread) = new_thread;
153 	if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) {
154 		THR_THREAD_LOCK(curthread, new_thread);
155 		locked = 1;
156 	} else
157 		locked = 0;
158 	param.start_func = (void (*)(void *)) thread_start;
159 	param.arg = new_thread;
160 	param.stack_base = new_thread->attr.stackaddr_attr;
161 	param.stack_size = new_thread->attr.stacksize_attr;
162 	param.tls_base = (char *)new_thread->tcb;
163 	param.tls_size = sizeof(struct tcb);
164 	param.child_tid = &new_thread->tid;
165 	param.parent_tid = &new_thread->tid;
166 	param.flags = 0;
167 	if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM)
168 		param.flags |= THR_SYSTEM_SCOPE;
169 	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED)
170 		param.rtp = NULL;
171 	else {
172 		sched_param.sched_priority = new_thread->attr.prio;
173 		_schedparam_to_rtp(new_thread->attr.sched_policy,
174 			&sched_param, &rtp);
175 		param.rtp = &rtp;
176 	}
177 
178 	/* Schedule the new thread. */
179 	if (create_suspended) {
180 		SIGFILLSET(set);
181 		SIGDELSET(set, SIGTRAP);
182 		__sys_sigprocmask(SIG_SETMASK, &set, &oset);
183 		new_thread->sigmask = oset;
184 		SIGDELSET(new_thread->sigmask, SIGCANCEL);
185 	}
186 
187 	ret = thr_new(&param, sizeof(param));
188 
189 	if (ret != 0) {
190 		ret = errno;
191 		/*
192 		 * Translate EPROCLIM into well-known POSIX code EAGAIN.
193 		 */
194 		if (ret == EPROCLIM)
195 			ret = EAGAIN;
196 	}
197 
198 	if (create_suspended)
199 		__sys_sigprocmask(SIG_SETMASK, &oset, NULL);
200 
201 	if (ret != 0) {
202 		if (!locked)
203 			THR_THREAD_LOCK(curthread, new_thread);
204 		new_thread->state = PS_DEAD;
205 		new_thread->tid = TID_TERMINATED;
206 		new_thread->flags |= THR_FLAGS_DETACHED;
207 		new_thread->refcount--;
208 		if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
209 			new_thread->cycle++;
210 			_thr_umtx_wake(&new_thread->cycle, INT_MAX, 0);
211 		}
212 		_thr_try_gc(curthread, new_thread); /* thread lock released */
213 		atomic_add_int(&_thread_active_threads, -1);
214 	} else if (locked) {
215 		if (cpusetp != NULL) {
216 			if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
217 				TID(new_thread), cpusetsize, cpusetp)) {
218 				ret = errno;
219 				/* kill the new thread */
220 				new_thread->force_exit = 1;
221 				new_thread->flags |= THR_FLAGS_DETACHED;
222 				_thr_try_gc(curthread, new_thread);
223 				 /* thread lock released */
224 				goto out;
225 			}
226 		}
227 
228 		_thr_report_creation(curthread, new_thread);
229 		THR_THREAD_UNLOCK(curthread, new_thread);
230 	}
231 out:
232 	if (ret)
233 		(*thread) = 0;
234 	return (ret);
235 }
236 
237 static int
238 create_stack(struct pthread_attr *pattr)
239 {
240 	int ret;
241 
242 	/* Check if a stack was specified in the thread attributes: */
243 	if ((pattr->stackaddr_attr) != NULL) {
244 		pattr->guardsize_attr = 0;
245 		pattr->flags |= THR_STACK_USER;
246 		ret = 0;
247 	}
248 	else
249 		ret = _thr_stack_alloc(pattr);
250 	return (ret);
251 }
252 
253 static void
254 thread_start(struct pthread *curthread)
255 {
256 	sigset_t set;
257 
258 	if (curthread->attr.suspend == THR_CREATE_SUSPENDED)
259 		set = curthread->sigmask;
260 	_thr_signal_block_setup(curthread);
261 
262 	/*
263 	 * This is used as a serialization point to allow parent
264 	 * to report 'new thread' event to debugger or tweak new thread's
265 	 * attributes before the new thread does real-world work.
266 	 */
267 	THR_LOCK(curthread);
268 	THR_UNLOCK(curthread);
269 
270 	if (curthread->force_exit)
271 		_pthread_exit(PTHREAD_CANCELED);
272 
273 	if (curthread->attr.suspend == THR_CREATE_SUSPENDED) {
274 #if 0
275 		/* Done in THR_UNLOCK() */
276 		_thr_ast(curthread);
277 #endif
278 
279 		/*
280 		 * Parent thread have stored signal mask for us,
281 		 * we should restore it now.
282 		 */
283 		__sys_sigprocmask(SIG_SETMASK, &set, NULL);
284 	}
285 
286 #ifdef _PTHREAD_FORCED_UNWIND
287 	curthread->unwind_stackend = (char *)curthread->attr.stackaddr_attr +
288 		curthread->attr.stacksize_attr;
289 #endif
290 
291 	/* Run the current thread's start routine with argument: */
292 	_pthread_exit(curthread->start_routine(curthread->arg));
293 
294 	/* This point should never be reached. */
295 	PANIC("Thread has resumed after exit");
296 }
297