xref: /freebsd/sys/compat/linuxkpi/common/src/linux_current.c (revision cab6a39d7b343596a5823e65c0f7b426551ec22d)
1 /*-
2  * Copyright (c) 2017 Hans Petter Selasky
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <linux/compat.h>
31 #include <linux/completion.h>
32 #include <linux/mm.h>
33 #include <linux/kthread.h>
34 #include <linux/moduleparam.h>
35 
36 #include <sys/kernel.h>
37 #include <sys/eventhandler.h>
38 #include <sys/malloc.h>
39 #include <sys/sysctl.h>
40 #include <vm/uma.h>
41 
42 #if defined(__i386__) || defined(__amd64__)
43 extern u_int first_msi_irq, num_msi_irqs;
44 #endif
45 
46 static eventhandler_tag linuxkpi_thread_dtor_tag;
47 
48 static uma_zone_t linux_current_zone;
49 static uma_zone_t linux_mm_zone;
50 
51 /* check if another thread already has a mm_struct */
52 static struct mm_struct *
53 find_other_mm(struct proc *p)
54 {
55 	struct thread *td;
56 	struct task_struct *ts;
57 	struct mm_struct *mm;
58 
59 	PROC_LOCK_ASSERT(p, MA_OWNED);
60 	FOREACH_THREAD_IN_PROC(p, td) {
61 		ts = td->td_lkpi_task;
62 		if (ts == NULL)
63 			continue;
64 		mm = ts->mm;
65 		if (mm == NULL)
66 			continue;
67 		/* try to share other mm_struct */
68 		if (atomic_inc_not_zero(&mm->mm_users))
69 			return (mm);
70 	}
71 	return (NULL);
72 }
73 
74 int
75 linux_alloc_current(struct thread *td, int flags)
76 {
77 	struct proc *proc;
78 	struct task_struct *ts;
79 	struct mm_struct *mm, *mm_other;
80 
81 	MPASS(td->td_lkpi_task == NULL);
82 
83 	if ((td->td_pflags & TDP_ITHREAD) != 0 || !THREAD_CAN_SLEEP()) {
84 		flags &= ~M_WAITOK;
85 		flags |= M_NOWAIT | M_USE_RESERVE;
86 	}
87 
88 	ts = uma_zalloc(linux_current_zone, flags | M_ZERO);
89 	if (ts == NULL) {
90 		if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
91 			panic("linux_alloc_current: failed to allocate task");
92 		return (ENOMEM);
93 	}
94 	mm = NULL;
95 
96 	/* setup new task structure */
97 	atomic_set(&ts->kthread_flags, 0);
98 	ts->task_thread = td;
99 	ts->comm = td->td_name;
100 	ts->pid = td->td_tid;
101 	ts->group_leader = ts;
102 	atomic_set(&ts->usage, 1);
103 	atomic_set(&ts->state, TASK_RUNNING);
104 	init_completion(&ts->parked);
105 	init_completion(&ts->exited);
106 
107 	proc = td->td_proc;
108 
109 	PROC_LOCK(proc);
110 	mm_other = find_other_mm(proc);
111 
112 	/* use allocated mm_struct as a fallback */
113 	if (mm_other == NULL) {
114 		PROC_UNLOCK(proc);
115 		mm = uma_zalloc(linux_mm_zone, flags | M_ZERO);
116 		if (mm == NULL) {
117 			if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
118 				panic(
119 			    "linux_alloc_current: failed to allocate mm");
120 			uma_zfree(linux_current_zone, mm);
121 			return (ENOMEM);
122 		}
123 
124 		PROC_LOCK(proc);
125 		mm_other = find_other_mm(proc);
126 		if (mm_other == NULL) {
127 			/* setup new mm_struct */
128 			init_rwsem(&mm->mmap_sem);
129 			atomic_set(&mm->mm_count, 1);
130 			atomic_set(&mm->mm_users, 1);
131 			/* set mm_struct pointer */
132 			ts->mm = mm;
133 			/* clear pointer to not free memory */
134 			mm = NULL;
135 		} else {
136 			ts->mm = mm_other;
137 		}
138 	} else {
139 		ts->mm = mm_other;
140 	}
141 
142 	/* store pointer to task struct */
143 	td->td_lkpi_task = ts;
144 	PROC_UNLOCK(proc);
145 
146 	/* free mm_struct pointer, if any */
147 	uma_zfree(linux_mm_zone, mm);
148 
149 	return (0);
150 }
151 
152 struct mm_struct *
153 linux_get_task_mm(struct task_struct *task)
154 {
155 	struct mm_struct *mm;
156 
157 	mm = task->mm;
158 	if (mm != NULL) {
159 		atomic_inc(&mm->mm_users);
160 		return (mm);
161 	}
162 	return (NULL);
163 }
164 
165 void
166 linux_mm_dtor(struct mm_struct *mm)
167 {
168 	uma_zfree(linux_mm_zone, mm);
169 }
170 
171 void
172 linux_free_current(struct task_struct *ts)
173 {
174 	mmput(ts->mm);
175 	uma_zfree(linux_current_zone, ts);
176 }
177 
178 static void
179 linuxkpi_thread_dtor(void *arg __unused, struct thread *td)
180 {
181 	struct task_struct *ts;
182 
183 	ts = td->td_lkpi_task;
184 	if (ts == NULL)
185 		return;
186 
187 	td->td_lkpi_task = NULL;
188 	put_task_struct(ts);
189 }
190 
191 static struct task_struct *
192 linux_get_pid_task_int(pid_t pid, const bool do_get)
193 {
194 	struct thread *td;
195 	struct proc *p;
196 	struct task_struct *ts;
197 
198 	if (pid > PID_MAX) {
199 		/* try to find corresponding thread */
200 		td = tdfind(pid, -1);
201 		if (td != NULL) {
202 			ts = td->td_lkpi_task;
203 			if (do_get && ts != NULL)
204 				get_task_struct(ts);
205 			PROC_UNLOCK(td->td_proc);
206 			return (ts);
207 		}
208 	} else {
209 		/* try to find corresponding procedure */
210 		p = pfind(pid);
211 		if (p != NULL) {
212 			FOREACH_THREAD_IN_PROC(p, td) {
213 				ts = td->td_lkpi_task;
214 				if (ts != NULL) {
215 					if (do_get)
216 						get_task_struct(ts);
217 					PROC_UNLOCK(p);
218 					return (ts);
219 				}
220 			}
221 			PROC_UNLOCK(p);
222 		}
223 	}
224 	return (NULL);
225 }
226 
227 struct task_struct *
228 linux_pid_task(pid_t pid)
229 {
230 	return (linux_get_pid_task_int(pid, false));
231 }
232 
233 struct task_struct *
234 linux_get_pid_task(pid_t pid)
235 {
236 	return (linux_get_pid_task_int(pid, true));
237 }
238 
239 bool
240 linux_task_exiting(struct task_struct *task)
241 {
242 	struct thread *td;
243 	struct proc *p;
244 	bool ret;
245 
246 	ret = false;
247 
248 	/* try to find corresponding thread */
249 	td = tdfind(task->pid, -1);
250 	if (td != NULL) {
251 		p = td->td_proc;
252 	} else {
253 		/* try to find corresponding procedure */
254 		p = pfind(task->pid);
255 	}
256 
257 	if (p != NULL) {
258 		if ((p->p_flag & P_WEXIT) != 0)
259 			ret = true;
260 		PROC_UNLOCK(p);
261 	}
262 	return (ret);
263 }
264 
265 static int lkpi_task_resrv;
266 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, task_struct_reserve,
267     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lkpi_task_resrv, 0,
268     "Number of struct task and struct mm to reserve for non-sleepable "
269     "allocations");
270 
271 static void
272 linux_current_init(void *arg __unused)
273 {
274 	TUNABLE_INT_FETCH("compat.linuxkpi.task_struct_reserve",
275 	    &lkpi_task_resrv);
276 	if (lkpi_task_resrv == 0) {
277 #if defined(__i386__) || defined(__amd64__)
278 		/*
279 		 * Number of interrupt threads plus per-cpu callout
280 		 * SWI threads.
281 		 */
282 		lkpi_task_resrv = first_msi_irq + num_msi_irqs + MAXCPU;
283 #else
284 		lkpi_task_resrv = 1024;		/* XXXKIB arbitrary */
285 #endif
286 	}
287 	linux_current_zone = uma_zcreate("lkpicurr",
288 	    sizeof(struct task_struct), NULL, NULL, NULL, NULL,
289 	    UMA_ALIGN_PTR, 0);
290 	uma_zone_reserve(linux_current_zone, lkpi_task_resrv);
291 	uma_prealloc(linux_current_zone, lkpi_task_resrv);
292 	linux_mm_zone = uma_zcreate("lkpimm",
293 	    sizeof(struct task_struct), NULL, NULL, NULL, NULL,
294 	    UMA_ALIGN_PTR, 0);
295 	uma_zone_reserve(linux_mm_zone, lkpi_task_resrv);
296 	uma_prealloc(linux_mm_zone, lkpi_task_resrv);
297 
298 	atomic_thread_fence_seq_cst();
299 
300 	linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
301 	    linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
302 	lkpi_alloc_current = linux_alloc_current;
303 }
304 SYSINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND,
305     linux_current_init, NULL);
306 
307 static void
308 linux_current_uninit(void *arg __unused)
309 {
310 	struct proc *p;
311 	struct task_struct *ts;
312 	struct thread *td;
313 
314 	lkpi_alloc_current = linux_alloc_current_noop;
315 
316 	atomic_thread_fence_seq_cst();
317 
318 	sx_slock(&allproc_lock);
319 	FOREACH_PROC_IN_SYSTEM(p) {
320 		PROC_LOCK(p);
321 		FOREACH_THREAD_IN_PROC(p, td) {
322 			if ((ts = td->td_lkpi_task) != NULL) {
323 				td->td_lkpi_task = NULL;
324 				put_task_struct(ts);
325 			}
326 		}
327 		PROC_UNLOCK(p);
328 	}
329 	sx_sunlock(&allproc_lock);
330 
331 	thread_reap_barrier();
332 
333 	EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag);
334 
335 	uma_zdestroy(linux_current_zone);
336 	uma_zdestroy(linux_mm_zone);
337 }
338 SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND,
339     linux_current_uninit, NULL);
340