xref: /freebsd/sys/compat/linuxkpi/common/src/linux_current.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*-
2  * Copyright (c) 2017 Hans Petter Selasky
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #ifdef __amd64__
31 #define	DEV_APIC
32 #elif defined(__i386__)
33 #include "opt_apic.h"
34 #endif
35 
36 #include <linux/compat.h>
37 #include <linux/completion.h>
38 #include <linux/mm.h>
39 #include <linux/kthread.h>
40 #include <linux/moduleparam.h>
41 
42 #include <sys/kernel.h>
43 #include <sys/eventhandler.h>
44 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <vm/uma.h>
47 
48 #ifdef DEV_APIC
49 extern u_int first_msi_irq, num_msi_irqs;
50 #endif
51 
52 static eventhandler_tag linuxkpi_thread_dtor_tag;
53 
54 static uma_zone_t linux_current_zone;
55 static uma_zone_t linux_mm_zone;
56 
57 /* check if another thread already has a mm_struct */
58 static struct mm_struct *
59 find_other_mm(struct proc *p)
60 {
61 	struct thread *td;
62 	struct task_struct *ts;
63 	struct mm_struct *mm;
64 
65 	PROC_LOCK_ASSERT(p, MA_OWNED);
66 	FOREACH_THREAD_IN_PROC(p, td) {
67 		ts = td->td_lkpi_task;
68 		if (ts == NULL)
69 			continue;
70 		mm = ts->mm;
71 		if (mm == NULL)
72 			continue;
73 		/* try to share other mm_struct */
74 		if (atomic_inc_not_zero(&mm->mm_users))
75 			return (mm);
76 	}
77 	return (NULL);
78 }
79 
80 int
81 linux_alloc_current(struct thread *td, int flags)
82 {
83 	struct proc *proc;
84 	struct task_struct *ts;
85 	struct mm_struct *mm, *mm_other;
86 
87 	MPASS(td->td_lkpi_task == NULL);
88 
89 	if ((td->td_pflags & TDP_ITHREAD) != 0 || !THREAD_CAN_SLEEP()) {
90 		flags &= ~M_WAITOK;
91 		flags |= M_NOWAIT | M_USE_RESERVE;
92 	}
93 
94 	ts = uma_zalloc(linux_current_zone, flags | M_ZERO);
95 	if (ts == NULL) {
96 		if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
97 			panic("linux_alloc_current: failed to allocate task");
98 		return (ENOMEM);
99 	}
100 	mm = NULL;
101 
102 	/* setup new task structure */
103 	atomic_set(&ts->kthread_flags, 0);
104 	ts->task_thread = td;
105 	ts->comm = td->td_name;
106 	ts->pid = td->td_tid;
107 	ts->group_leader = ts;
108 	atomic_set(&ts->usage, 1);
109 	atomic_set(&ts->state, TASK_RUNNING);
110 	init_completion(&ts->parked);
111 	init_completion(&ts->exited);
112 
113 	proc = td->td_proc;
114 
115 	PROC_LOCK(proc);
116 	mm_other = find_other_mm(proc);
117 
118 	/* use allocated mm_struct as a fallback */
119 	if (mm_other == NULL) {
120 		PROC_UNLOCK(proc);
121 		mm = uma_zalloc(linux_mm_zone, flags | M_ZERO);
122 		if (mm == NULL) {
123 			if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
124 				panic(
125 			    "linux_alloc_current: failed to allocate mm");
126 			uma_zfree(linux_current_zone, mm);
127 			return (ENOMEM);
128 		}
129 
130 		PROC_LOCK(proc);
131 		mm_other = find_other_mm(proc);
132 		if (mm_other == NULL) {
133 			/* setup new mm_struct */
134 			init_rwsem(&mm->mmap_sem);
135 			atomic_set(&mm->mm_count, 1);
136 			atomic_set(&mm->mm_users, 1);
137 			/* set mm_struct pointer */
138 			ts->mm = mm;
139 			/* clear pointer to not free memory */
140 			mm = NULL;
141 		} else {
142 			ts->mm = mm_other;
143 		}
144 	} else {
145 		ts->mm = mm_other;
146 	}
147 
148 	/* store pointer to task struct */
149 	td->td_lkpi_task = ts;
150 	PROC_UNLOCK(proc);
151 
152 	/* free mm_struct pointer, if any */
153 	uma_zfree(linux_mm_zone, mm);
154 
155 	return (0);
156 }
157 
158 struct mm_struct *
159 linux_get_task_mm(struct task_struct *task)
160 {
161 	struct mm_struct *mm;
162 
163 	mm = task->mm;
164 	if (mm != NULL) {
165 		atomic_inc(&mm->mm_users);
166 		return (mm);
167 	}
168 	return (NULL);
169 }
170 
171 void
172 linux_mm_dtor(struct mm_struct *mm)
173 {
174 	uma_zfree(linux_mm_zone, mm);
175 }
176 
177 void
178 linux_free_current(struct task_struct *ts)
179 {
180 	mmput(ts->mm);
181 	uma_zfree(linux_current_zone, ts);
182 }
183 
184 static void
185 linuxkpi_thread_dtor(void *arg __unused, struct thread *td)
186 {
187 	struct task_struct *ts;
188 
189 	ts = td->td_lkpi_task;
190 	if (ts == NULL)
191 		return;
192 
193 	td->td_lkpi_task = NULL;
194 	put_task_struct(ts);
195 }
196 
197 static struct task_struct *
198 linux_get_pid_task_int(pid_t pid, const bool do_get)
199 {
200 	struct thread *td;
201 	struct proc *p;
202 	struct task_struct *ts;
203 
204 	if (pid > PID_MAX) {
205 		/* try to find corresponding thread */
206 		td = tdfind(pid, -1);
207 		if (td != NULL) {
208 			ts = td->td_lkpi_task;
209 			if (do_get && ts != NULL)
210 				get_task_struct(ts);
211 			PROC_UNLOCK(td->td_proc);
212 			return (ts);
213 		}
214 	} else {
215 		/* try to find corresponding procedure */
216 		p = pfind(pid);
217 		if (p != NULL) {
218 			FOREACH_THREAD_IN_PROC(p, td) {
219 				ts = td->td_lkpi_task;
220 				if (ts != NULL) {
221 					if (do_get)
222 						get_task_struct(ts);
223 					PROC_UNLOCK(p);
224 					return (ts);
225 				}
226 			}
227 			PROC_UNLOCK(p);
228 		}
229 	}
230 	return (NULL);
231 }
232 
233 struct task_struct *
234 linux_pid_task(pid_t pid)
235 {
236 	return (linux_get_pid_task_int(pid, false));
237 }
238 
239 struct task_struct *
240 linux_get_pid_task(pid_t pid)
241 {
242 	return (linux_get_pid_task_int(pid, true));
243 }
244 
245 bool
246 linux_task_exiting(struct task_struct *task)
247 {
248 	struct thread *td;
249 	struct proc *p;
250 	bool ret;
251 
252 	ret = false;
253 
254 	/* try to find corresponding thread */
255 	td = tdfind(task->pid, -1);
256 	if (td != NULL) {
257 		p = td->td_proc;
258 	} else {
259 		/* try to find corresponding procedure */
260 		p = pfind(task->pid);
261 	}
262 
263 	if (p != NULL) {
264 		if ((p->p_flag & P_WEXIT) != 0)
265 			ret = true;
266 		PROC_UNLOCK(p);
267 	}
268 	return (ret);
269 }
270 
271 static int lkpi_task_resrv;
272 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, task_struct_reserve,
273     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lkpi_task_resrv, 0,
274     "Number of struct task and struct mm to reserve for non-sleepable "
275     "allocations");
276 
277 static void
278 linux_current_init(void *arg __unused)
279 {
280 	TUNABLE_INT_FETCH("compat.linuxkpi.task_struct_reserve",
281 	    &lkpi_task_resrv);
282 	if (lkpi_task_resrv == 0) {
283 #ifdef DEV_APIC
284 		/*
285 		 * Number of interrupt threads plus per-cpu callout
286 		 * SWI threads.
287 		 */
288 		lkpi_task_resrv = first_msi_irq + num_msi_irqs + MAXCPU;
289 #else
290 		lkpi_task_resrv = 1024;		/* XXXKIB arbitrary */
291 #endif
292 	}
293 	linux_current_zone = uma_zcreate("lkpicurr",
294 	    sizeof(struct task_struct), NULL, NULL, NULL, NULL,
295 	    UMA_ALIGN_PTR, 0);
296 	uma_zone_reserve(linux_current_zone, lkpi_task_resrv);
297 	uma_prealloc(linux_current_zone, lkpi_task_resrv);
298 	linux_mm_zone = uma_zcreate("lkpimm",
299 	    sizeof(struct task_struct), NULL, NULL, NULL, NULL,
300 	    UMA_ALIGN_PTR, 0);
301 	uma_zone_reserve(linux_mm_zone, lkpi_task_resrv);
302 	uma_prealloc(linux_mm_zone, lkpi_task_resrv);
303 
304 	atomic_thread_fence_seq_cst();
305 
306 	linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
307 	    linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
308 	lkpi_alloc_current = linux_alloc_current;
309 }
310 SYSINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND,
311     linux_current_init, NULL);
312 
313 static void
314 linux_current_uninit(void *arg __unused)
315 {
316 	struct proc *p;
317 	struct task_struct *ts;
318 	struct thread *td;
319 
320 	lkpi_alloc_current = linux_alloc_current_noop;
321 
322 	atomic_thread_fence_seq_cst();
323 
324 	sx_slock(&allproc_lock);
325 	FOREACH_PROC_IN_SYSTEM(p) {
326 		PROC_LOCK(p);
327 		FOREACH_THREAD_IN_PROC(p, td) {
328 			if ((ts = td->td_lkpi_task) != NULL) {
329 				td->td_lkpi_task = NULL;
330 				put_task_struct(ts);
331 			}
332 		}
333 		PROC_UNLOCK(p);
334 	}
335 	sx_sunlock(&allproc_lock);
336 
337 	thread_reap_barrier();
338 
339 	EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag);
340 
341 	uma_zdestroy(linux_current_zone);
342 	uma_zdestroy(linux_mm_zone);
343 }
344 SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND,
345     linux_current_uninit, NULL);
346