1 /*- 2 * Copyright (c) 2017 Hans Petter Selasky 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <linux/compat.h> 31 #include <linux/mm.h> 32 #include <linux/kthread.h> 33 34 #include <sys/kernel.h> 35 #include <sys/eventhandler.h> 36 #include <sys/malloc.h> 37 38 static eventhandler_tag linuxkpi_thread_dtor_tag; 39 40 static MALLOC_DEFINE(M_LINUX_CURRENT, "linuxcurrent", "LinuxKPI task structure"); 41 42 int 43 linux_alloc_current(struct thread *td, int flags) 44 { 45 struct proc *proc; 46 struct thread *td_other; 47 struct task_struct *ts; 48 struct task_struct *ts_other; 49 struct mm_struct *mm; 50 struct mm_struct *mm_other; 51 52 MPASS(td->td_lkpi_task == NULL); 53 54 ts = malloc(sizeof(*ts), M_LINUX_CURRENT, flags | M_ZERO); 55 if (ts == NULL) 56 return (ENOMEM); 57 58 mm = malloc(sizeof(*mm), M_LINUX_CURRENT, flags | M_ZERO); 59 if (mm == NULL) { 60 free(ts, M_LINUX_CURRENT); 61 return (ENOMEM); 62 } 63 64 /* setup new task structure */ 65 atomic_set(&ts->kthread_flags, 0); 66 ts->task_thread = td; 67 ts->comm = td->td_name; 68 ts->pid = td->td_tid; 69 atomic_set(&ts->usage, 1); 70 ts->state = TASK_RUNNING; 71 72 proc = td->td_proc; 73 74 /* check if another thread already has a mm_struct */ 75 PROC_LOCK(proc); 76 FOREACH_THREAD_IN_PROC(proc, td_other) { 77 ts_other = td_other->td_lkpi_task; 78 if (ts_other == NULL) 79 continue; 80 81 mm_other = ts_other->mm; 82 if (mm_other == NULL) 83 continue; 84 85 /* try to share other mm_struct */ 86 if (atomic_inc_not_zero(&mm_other->mm_users)) { 87 /* set mm_struct pointer */ 88 ts->mm = mm_other; 89 break; 90 } 91 } 92 93 /* use allocated mm_struct as a fallback */ 94 if (ts->mm == NULL) { 95 /* setup new mm_struct */ 96 init_rwsem(&mm->mmap_sem); 97 atomic_set(&mm->mm_count, 1); 98 atomic_set(&mm->mm_users, 1); 99 mm->vmspace = vmspace_acquire_ref(proc); 100 /* set mm_struct pointer */ 101 ts->mm = mm; 102 /* clear pointer to not free memory */ 103 mm = NULL; 104 } 105 106 /* store pointer to task struct */ 107 td->td_lkpi_task = ts; 108 PROC_UNLOCK(proc); 109 110 /* free mm_struct pointer, if any */ 111 free(mm, M_LINUX_CURRENT); 112 113 return (0); 114 } 115 116 struct mm_struct * 117 linux_get_task_mm(struct task_struct *task) 118 { 119 struct mm_struct *mm; 120 121 mm = task->mm; 122 if (mm != NULL && mm->vmspace != NULL) { 123 atomic_inc(&mm->mm_users); 124 return (mm); 125 } 126 return (NULL); 127 } 128 129 void 130 linux_mm_dtor(struct mm_struct *mm) 131 { 132 if (mm->vmspace != NULL) 133 vmspace_free(mm->vmspace); 134 free(mm, M_LINUX_CURRENT); 135 } 136 137 void 138 linux_free_current(struct task_struct *ts) 139 { 140 mmput(ts->mm); 141 free(ts, M_LINUX_CURRENT); 142 } 143 144 static void 145 linuxkpi_thread_dtor(void *arg __unused, struct thread *td) 146 { 147 struct task_struct *ts; 148 149 ts = td->td_lkpi_task; 150 if (ts == NULL) 151 return; 152 153 td->td_lkpi_task = NULL; 154 put_task_struct(ts); 155 } 156 157 struct task_struct * 158 linux_pid_task(pid_t pid) 159 { 160 struct thread *td; 161 struct proc *p; 162 163 /* try to find corresponding thread */ 164 td = tdfind(pid, -1); 165 if (td != NULL) { 166 struct task_struct *ts = td->td_lkpi_task; 167 PROC_UNLOCK(td->td_proc); 168 return (ts); 169 } 170 171 /* try to find corresponding procedure */ 172 p = pfind(pid); 173 if (p != NULL) { 174 FOREACH_THREAD_IN_PROC(p, td) { 175 struct task_struct *ts = td->td_lkpi_task; 176 if (ts != NULL) { 177 PROC_UNLOCK(p); 178 return (ts); 179 } 180 } 181 PROC_UNLOCK(p); 182 } 183 return (NULL); 184 } 185 186 struct task_struct * 187 linux_get_pid_task(pid_t pid) 188 { 189 struct thread *td; 190 struct proc *p; 191 192 /* try to find corresponding thread */ 193 td = tdfind(pid, -1); 194 if (td != NULL) { 195 struct task_struct *ts = td->td_lkpi_task; 196 if (ts != NULL) 197 get_task_struct(ts); 198 PROC_UNLOCK(td->td_proc); 199 return (ts); 200 } 201 202 /* try to find corresponding procedure */ 203 p = pfind(pid); 204 if (p != NULL) { 205 FOREACH_THREAD_IN_PROC(p, td) { 206 struct task_struct *ts = td->td_lkpi_task; 207 if (ts != NULL) { 208 get_task_struct(ts); 209 PROC_UNLOCK(p); 210 return (ts); 211 } 212 } 213 PROC_UNLOCK(p); 214 } 215 return (NULL); 216 } 217 218 static void 219 linux_current_init(void *arg __unused) 220 { 221 linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor, 222 linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY); 223 } 224 SYSINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, linux_current_init, NULL); 225 226 static void 227 linux_current_uninit(void *arg __unused) 228 { 229 EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag); 230 } 231 SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, linux_current_uninit, NULL); 232