xref: /freebsd/sys/compat/linuxkpi/common/src/linux_current.c (revision fe75646a0234a261c0013bf1840fdac4acaf0cec)
1 /*-
2  * Copyright (c) 2017 Hans Petter Selasky
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #ifdef __amd64__
29 #define	DEV_APIC
30 #elif defined(__i386__)
31 #include "opt_apic.h"
32 #endif
33 
34 #include <linux/compat.h>
35 #include <linux/completion.h>
36 #include <linux/mm.h>
37 #include <linux/kthread.h>
38 #include <linux/moduleparam.h>
39 
40 #include <sys/kernel.h>
41 #include <sys/eventhandler.h>
42 #include <sys/malloc.h>
43 #include <sys/sysctl.h>
44 #include <vm/uma.h>
45 
46 #if defined(__aarch64__) || defined(__arm__) || defined(__amd64__) ||	\
47     defined(__i386__)
48 #include <machine/fpu.h>
49 #endif
50 
51 #ifdef DEV_APIC
52 extern u_int first_msi_irq, num_msi_irqs;
53 #endif
54 
55 static eventhandler_tag linuxkpi_thread_dtor_tag;
56 
57 static uma_zone_t linux_current_zone;
58 static uma_zone_t linux_mm_zone;
59 
60 /* check if another thread already has a mm_struct */
61 static struct mm_struct *
62 find_other_mm(struct proc *p)
63 {
64 	struct thread *td;
65 	struct task_struct *ts;
66 	struct mm_struct *mm;
67 
68 	PROC_LOCK_ASSERT(p, MA_OWNED);
69 	FOREACH_THREAD_IN_PROC(p, td) {
70 		ts = td->td_lkpi_task;
71 		if (ts == NULL)
72 			continue;
73 		mm = ts->mm;
74 		if (mm == NULL)
75 			continue;
76 		/* try to share other mm_struct */
77 		if (atomic_inc_not_zero(&mm->mm_users))
78 			return (mm);
79 	}
80 	return (NULL);
81 }
82 
83 int
84 linux_alloc_current(struct thread *td, int flags)
85 {
86 	struct proc *proc;
87 	struct task_struct *ts;
88 	struct mm_struct *mm, *mm_other;
89 
90 	MPASS(td->td_lkpi_task == NULL);
91 
92 	if ((td->td_pflags & TDP_ITHREAD) != 0 || !THREAD_CAN_SLEEP()) {
93 		flags &= ~M_WAITOK;
94 		flags |= M_NOWAIT | M_USE_RESERVE;
95 	}
96 
97 	ts = uma_zalloc(linux_current_zone, flags | M_ZERO);
98 	if (ts == NULL) {
99 		if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
100 			panic("linux_alloc_current: failed to allocate task");
101 		return (ENOMEM);
102 	}
103 	mm = NULL;
104 
105 	/* setup new task structure */
106 	atomic_set(&ts->kthread_flags, 0);
107 	ts->task_thread = td;
108 	ts->comm = td->td_name;
109 	ts->pid = td->td_tid;
110 	ts->group_leader = ts;
111 	atomic_set(&ts->usage, 1);
112 	atomic_set(&ts->state, TASK_RUNNING);
113 	init_completion(&ts->parked);
114 	init_completion(&ts->exited);
115 
116 	proc = td->td_proc;
117 
118 	PROC_LOCK(proc);
119 	mm_other = find_other_mm(proc);
120 
121 	/* use allocated mm_struct as a fallback */
122 	if (mm_other == NULL) {
123 		PROC_UNLOCK(proc);
124 		mm = uma_zalloc(linux_mm_zone, flags | M_ZERO);
125 		if (mm == NULL) {
126 			if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
127 				panic(
128 			    "linux_alloc_current: failed to allocate mm");
129 			uma_zfree(linux_current_zone, mm);
130 			return (ENOMEM);
131 		}
132 
133 		PROC_LOCK(proc);
134 		mm_other = find_other_mm(proc);
135 		if (mm_other == NULL) {
136 			/* setup new mm_struct */
137 			init_rwsem(&mm->mmap_sem);
138 			atomic_set(&mm->mm_count, 1);
139 			atomic_set(&mm->mm_users, 1);
140 			/* set mm_struct pointer */
141 			ts->mm = mm;
142 			/* clear pointer to not free memory */
143 			mm = NULL;
144 		} else {
145 			ts->mm = mm_other;
146 		}
147 	} else {
148 		ts->mm = mm_other;
149 	}
150 
151 	/* store pointer to task struct */
152 	td->td_lkpi_task = ts;
153 	PROC_UNLOCK(proc);
154 
155 	/* free mm_struct pointer, if any */
156 	uma_zfree(linux_mm_zone, mm);
157 
158 	return (0);
159 }
160 
161 int
162 linux_set_fpu_ctx(struct task_struct *task)
163 {
164 #if defined(__aarch64__) || defined(__arm__) || defined(__amd64__) ||	\
165     defined(__i386__)
166 	if (task->fpu_ctx == NULL && curthread->td_critnest == 0)
167 		task->fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NOWAIT);
168 #endif
169 	return (task->fpu_ctx != NULL ? 0 : ENOMEM);
170 }
171 
172 struct mm_struct *
173 linux_get_task_mm(struct task_struct *task)
174 {
175 	struct mm_struct *mm;
176 
177 	mm = task->mm;
178 	if (mm != NULL) {
179 		atomic_inc(&mm->mm_users);
180 		return (mm);
181 	}
182 	return (NULL);
183 }
184 
185 void
186 linux_mm_dtor(struct mm_struct *mm)
187 {
188 	uma_zfree(linux_mm_zone, mm);
189 }
190 
191 void
192 linux_free_current(struct task_struct *ts)
193 {
194 	mmput(ts->mm);
195 #if defined(__aarch64__) || defined(__arm__) || defined(__amd64__) ||	\
196     defined(__i386__)
197 	if (ts->fpu_ctx != NULL)
198 		fpu_kern_free_ctx(ts->fpu_ctx);
199 #endif
200 	uma_zfree(linux_current_zone, ts);
201 }
202 
203 static void
204 linuxkpi_thread_dtor(void *arg __unused, struct thread *td)
205 {
206 	struct task_struct *ts;
207 
208 	ts = td->td_lkpi_task;
209 	if (ts == NULL)
210 		return;
211 
212 	td->td_lkpi_task = NULL;
213 	put_task_struct(ts);
214 }
215 
216 static struct task_struct *
217 linux_get_pid_task_int(pid_t pid, const bool do_get)
218 {
219 	struct thread *td;
220 	struct proc *p;
221 	struct task_struct *ts;
222 
223 	if (pid > PID_MAX) {
224 		/* try to find corresponding thread */
225 		td = tdfind(pid, -1);
226 		if (td != NULL) {
227 			ts = td->td_lkpi_task;
228 			if (do_get && ts != NULL)
229 				get_task_struct(ts);
230 			PROC_UNLOCK(td->td_proc);
231 			return (ts);
232 		}
233 	} else {
234 		/* try to find corresponding procedure */
235 		p = pfind(pid);
236 		if (p != NULL) {
237 			FOREACH_THREAD_IN_PROC(p, td) {
238 				ts = td->td_lkpi_task;
239 				if (ts != NULL) {
240 					if (do_get)
241 						get_task_struct(ts);
242 					PROC_UNLOCK(p);
243 					return (ts);
244 				}
245 			}
246 			PROC_UNLOCK(p);
247 		}
248 	}
249 	return (NULL);
250 }
251 
252 struct task_struct *
253 linux_pid_task(pid_t pid)
254 {
255 	return (linux_get_pid_task_int(pid, false));
256 }
257 
258 struct task_struct *
259 linux_get_pid_task(pid_t pid)
260 {
261 	return (linux_get_pid_task_int(pid, true));
262 }
263 
264 bool
265 linux_task_exiting(struct task_struct *task)
266 {
267 	struct thread *td;
268 	struct proc *p;
269 	bool ret;
270 
271 	ret = false;
272 
273 	/* try to find corresponding thread */
274 	td = tdfind(task->pid, -1);
275 	if (td != NULL) {
276 		p = td->td_proc;
277 	} else {
278 		/* try to find corresponding procedure */
279 		p = pfind(task->pid);
280 	}
281 
282 	if (p != NULL) {
283 		if ((p->p_flag & P_WEXIT) != 0)
284 			ret = true;
285 		PROC_UNLOCK(p);
286 	}
287 	return (ret);
288 }
289 
290 static int lkpi_task_resrv;
291 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, task_struct_reserve,
292     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lkpi_task_resrv, 0,
293     "Number of struct task and struct mm to reserve for non-sleepable "
294     "allocations");
295 
296 static void
297 linux_current_init(void *arg __unused)
298 {
299 	TUNABLE_INT_FETCH("compat.linuxkpi.task_struct_reserve",
300 	    &lkpi_task_resrv);
301 	if (lkpi_task_resrv == 0) {
302 #ifdef DEV_APIC
303 		/*
304 		 * Number of interrupt threads plus per-cpu callout
305 		 * SWI threads.
306 		 */
307 		lkpi_task_resrv = first_msi_irq + num_msi_irqs + MAXCPU;
308 #else
309 		lkpi_task_resrv = 1024;		/* XXXKIB arbitrary */
310 #endif
311 	}
312 	linux_current_zone = uma_zcreate("lkpicurr",
313 	    sizeof(struct task_struct), NULL, NULL, NULL, NULL,
314 	    UMA_ALIGN_PTR, 0);
315 	uma_zone_reserve(linux_current_zone, lkpi_task_resrv);
316 	uma_prealloc(linux_current_zone, lkpi_task_resrv);
317 	linux_mm_zone = uma_zcreate("lkpimm",
318 	    sizeof(struct task_struct), NULL, NULL, NULL, NULL,
319 	    UMA_ALIGN_PTR, 0);
320 	uma_zone_reserve(linux_mm_zone, lkpi_task_resrv);
321 	uma_prealloc(linux_mm_zone, lkpi_task_resrv);
322 
323 	atomic_thread_fence_seq_cst();
324 
325 	linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
326 	    linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
327 	lkpi_alloc_current = linux_alloc_current;
328 }
329 SYSINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND,
330     linux_current_init, NULL);
331 
332 static void
333 linux_current_uninit(void *arg __unused)
334 {
335 	struct proc *p;
336 	struct task_struct *ts;
337 	struct thread *td;
338 
339 	lkpi_alloc_current = linux_alloc_current_noop;
340 
341 	atomic_thread_fence_seq_cst();
342 
343 	sx_slock(&allproc_lock);
344 	FOREACH_PROC_IN_SYSTEM(p) {
345 		PROC_LOCK(p);
346 		FOREACH_THREAD_IN_PROC(p, td) {
347 			if ((ts = td->td_lkpi_task) != NULL) {
348 				td->td_lkpi_task = NULL;
349 				put_task_struct(ts);
350 			}
351 		}
352 		PROC_UNLOCK(p);
353 	}
354 	sx_sunlock(&allproc_lock);
355 
356 	thread_reap_barrier();
357 
358 	EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag);
359 
360 	uma_zdestroy(linux_current_zone);
361 	uma_zdestroy(linux_mm_zone);
362 }
363 SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND,
364     linux_current_uninit, NULL);
365