1 /* 2 * SPDX-License-Identifier: CDDL 1.0 3 * 4 * Copyright 2022 Christos Margiolis <christos@FreeBSD.org> 5 * Copyright 2022 Mark Johnston <markj@FreeBSD.org> 6 */ 7 8 #include <sys/param.h> 9 #include <sys/bitset.h> 10 #include <sys/cred.h> 11 #include <sys/eventhandler.h> 12 #include <sys/kernel.h> 13 #include <sys/lock.h> 14 #include <sys/malloc.h> 15 #include <sys/proc.h> 16 #include <sys/queue.h> 17 #include <sys/sx.h> 18 19 #include <vm/vm.h> 20 #include <vm/vm_param.h> 21 #include <vm/pmap.h> 22 #include <vm/vm_map.h> 23 #include <vm/vm_kern.h> 24 #include <vm/vm_object.h> 25 26 #include <cddl/dev/dtrace/dtrace_cddl.h> 27 28 #include "kinst.h" 29 #include "kinst_isa.h" 30 31 #define KINST_TRAMP_FILL_PATTERN ((kinst_patchval_t []){KINST_PATCHVAL}) 32 #define KINST_TRAMP_FILL_SIZE sizeof(kinst_patchval_t) 33 34 #define KINST_TRAMPCHUNK_SIZE PAGE_SIZE 35 #define KINST_TRAMPS_PER_CHUNK (KINST_TRAMPCHUNK_SIZE / KINST_TRAMP_SIZE) 36 37 struct trampchunk { 38 TAILQ_ENTRY(trampchunk) next; 39 uint8_t *addr; 40 /* 0 -> allocated, 1 -> free */ 41 BITSET_DEFINE(, KINST_TRAMPS_PER_CHUNK) free; 42 }; 43 44 static TAILQ_HEAD(, trampchunk) kinst_trampchunks = 45 TAILQ_HEAD_INITIALIZER(kinst_trampchunks); 46 static struct sx kinst_tramp_sx; 47 SX_SYSINIT(kinst_tramp_sx, &kinst_tramp_sx, "kinst tramp"); 48 static eventhandler_tag kinst_thread_ctor_handler; 49 static eventhandler_tag kinst_thread_dtor_handler; 50 51 /* 52 * Fill the trampolines with KINST_TRAMP_FILL_PATTERN so that the kernel will 53 * crash cleanly if things somehow go wrong. 54 */ 55 static void 56 kinst_trampoline_fill(uint8_t *addr, int size) 57 { 58 int i; 59 60 for (i = 0; i < size; i += KINST_TRAMP_FILL_SIZE) { 61 memcpy(&addr[i], KINST_TRAMP_FILL_PATTERN, 62 KINST_TRAMP_FILL_SIZE); 63 } 64 } 65 66 static struct trampchunk * 67 kinst_trampchunk_alloc(void) 68 { 69 struct trampchunk *chunk; 70 vm_offset_t trampaddr; 71 int error __diagused; 72 73 sx_assert(&kinst_tramp_sx, SX_XLOCKED); 74 75 #ifdef __amd64__ 76 /* 77 * To simplify population of trampolines, we follow the amd64 kernel's 78 * code model and allocate them above KERNBASE, i.e., in the top 2GB of 79 * the kernel's virtual address space (not the case for other 80 * platforms). 81 */ 82 trampaddr = KERNBASE; 83 #else 84 trampaddr = VM_MIN_KERNEL_ADDRESS; 85 #endif 86 /* 87 * Allocate virtual memory for the trampoline chunk. The returned 88 * address is saved in "trampaddr". Trampolines must be executable so 89 * max_prot must include VM_PROT_EXECUTE. 90 */ 91 error = vm_map_find(kernel_map, NULL, 0, &trampaddr, 92 KINST_TRAMPCHUNK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 93 0); 94 if (error != KERN_SUCCESS) { 95 KINST_LOG("trampoline chunk allocation failed: %d", error); 96 return (NULL); 97 } 98 99 error = kmem_back(kernel_object, trampaddr, KINST_TRAMPCHUNK_SIZE, 100 M_WAITOK | M_EXEC); 101 KASSERT(error == KERN_SUCCESS, ("kmem_back failed: %d", error)); 102 103 kinst_trampoline_fill((uint8_t *)trampaddr, KINST_TRAMPCHUNK_SIZE); 104 105 /* Allocate a tracker for this chunk. */ 106 chunk = malloc(sizeof(*chunk), M_KINST, M_WAITOK); 107 chunk->addr = (void *)trampaddr; 108 BIT_FILL(KINST_TRAMPS_PER_CHUNK, &chunk->free); 109 110 TAILQ_INSERT_HEAD(&kinst_trampchunks, chunk, next); 111 112 return (chunk); 113 } 114 115 static void 116 kinst_trampchunk_free(struct trampchunk *chunk) 117 { 118 sx_assert(&kinst_tramp_sx, SX_XLOCKED); 119 120 TAILQ_REMOVE(&kinst_trampchunks, chunk, next); 121 kmem_unback(kernel_object, (vm_offset_t)chunk->addr, 122 KINST_TRAMPCHUNK_SIZE); 123 (void)vm_map_remove(kernel_map, (vm_offset_t)chunk->addr, 124 (vm_offset_t)(chunk->addr + KINST_TRAMPCHUNK_SIZE)); 125 free(chunk, M_KINST); 126 } 127 128 static uint8_t * 129 kinst_trampoline_alloc_locked(int how) 130 { 131 struct trampchunk *chunk; 132 uint8_t *tramp; 133 int off; 134 135 sx_assert(&kinst_tramp_sx, SX_XLOCKED); 136 137 TAILQ_FOREACH(chunk, &kinst_trampchunks, next) { 138 /* All trampolines from this chunk are already allocated. */ 139 if ((off = BIT_FFS(KINST_TRAMPS_PER_CHUNK, &chunk->free)) == 0) 140 continue; 141 /* BIT_FFS() returns indices starting at 1 instead of 0. */ 142 off--; 143 break; 144 } 145 if (chunk == NULL) { 146 if ((how & M_NOWAIT) != 0) 147 return (NULL); 148 149 /* 150 * We didn't find any free trampoline in the current list, 151 * allocate a new one. If that fails the provider will no 152 * longer be reliable, so try to warn the user. 153 */ 154 if ((chunk = kinst_trampchunk_alloc()) == NULL) { 155 static bool once = true; 156 157 if (once) { 158 once = false; 159 KINST_LOG( 160 "kinst: failed to allocate trampoline, " 161 "probes may not fire"); 162 } 163 return (NULL); 164 } 165 off = 0; 166 } 167 BIT_CLR(KINST_TRAMPS_PER_CHUNK, off, &chunk->free); 168 tramp = chunk->addr + off * KINST_TRAMP_SIZE; 169 return (tramp); 170 } 171 172 uint8_t * 173 kinst_trampoline_alloc(int how) 174 { 175 uint8_t *tramp; 176 177 sx_xlock(&kinst_tramp_sx); 178 tramp = kinst_trampoline_alloc_locked(how); 179 sx_xunlock(&kinst_tramp_sx); 180 return (tramp); 181 } 182 183 static void 184 kinst_trampoline_dealloc_locked(uint8_t *tramp, bool freechunks) 185 { 186 struct trampchunk *chunk; 187 int off; 188 189 sx_assert(&kinst_tramp_sx, SX_XLOCKED); 190 191 if (tramp == NULL) 192 return; 193 194 TAILQ_FOREACH(chunk, &kinst_trampchunks, next) { 195 for (off = 0; off < KINST_TRAMPS_PER_CHUNK; off++) { 196 if (chunk->addr + off * KINST_TRAMP_SIZE == tramp) { 197 kinst_trampoline_fill(tramp, KINST_TRAMP_SIZE); 198 BIT_SET(KINST_TRAMPS_PER_CHUNK, off, 199 &chunk->free); 200 if (freechunks && 201 BIT_ISFULLSET(KINST_TRAMPS_PER_CHUNK, 202 &chunk->free)) 203 kinst_trampchunk_free(chunk); 204 return; 205 } 206 } 207 } 208 panic("%s: did not find trampoline chunk for %p", __func__, tramp); 209 } 210 211 void 212 kinst_trampoline_dealloc(uint8_t *tramp) 213 { 214 sx_xlock(&kinst_tramp_sx); 215 kinst_trampoline_dealloc_locked(tramp, true); 216 sx_xunlock(&kinst_tramp_sx); 217 } 218 219 static void 220 kinst_thread_ctor(void *arg __unused, struct thread *td) 221 { 222 td->t_kinst_tramp = kinst_trampoline_alloc(M_WAITOK); 223 } 224 225 static void 226 kinst_thread_dtor(void *arg __unused, struct thread *td) 227 { 228 void *tramp; 229 230 tramp = td->t_kinst_tramp; 231 td->t_kinst_tramp = NULL; 232 233 /* 234 * This assumes that the thread_dtor event permits sleeping, which 235 * appears to be true for the time being. 236 */ 237 kinst_trampoline_dealloc(tramp); 238 } 239 240 int 241 kinst_trampoline_init(void) 242 { 243 struct proc *p; 244 struct thread *td; 245 void *tramp; 246 int error; 247 248 kinst_thread_ctor_handler = EVENTHANDLER_REGISTER(thread_ctor, 249 kinst_thread_ctor, NULL, EVENTHANDLER_PRI_ANY); 250 kinst_thread_dtor_handler = EVENTHANDLER_REGISTER(thread_dtor, 251 kinst_thread_dtor, NULL, EVENTHANDLER_PRI_ANY); 252 253 error = 0; 254 tramp = NULL; 255 256 sx_slock(&allproc_lock); 257 sx_xlock(&kinst_tramp_sx); 258 FOREACH_PROC_IN_SYSTEM(p) { 259 retry: 260 PROC_LOCK(p); 261 FOREACH_THREAD_IN_PROC(p, td) { 262 if (td->t_kinst_tramp != NULL) 263 continue; 264 if (tramp == NULL) { 265 /* 266 * Try to allocate a trampoline without dropping 267 * the process lock. If all chunks are fully 268 * utilized, we must release the lock and try 269 * again. 270 */ 271 tramp = kinst_trampoline_alloc_locked(M_NOWAIT); 272 if (tramp == NULL) { 273 PROC_UNLOCK(p); 274 tramp = kinst_trampoline_alloc_locked( 275 M_WAITOK); 276 if (tramp == NULL) { 277 /* 278 * Let the unload handler clean 279 * up. 280 */ 281 error = ENOMEM; 282 goto out; 283 } else 284 goto retry; 285 } 286 } 287 td->t_kinst_tramp = tramp; 288 tramp = NULL; 289 } 290 PROC_UNLOCK(p); 291 } 292 out: 293 sx_xunlock(&kinst_tramp_sx); 294 sx_sunlock(&allproc_lock); 295 return (error); 296 } 297 298 int 299 kinst_trampoline_deinit(void) 300 { 301 struct trampchunk *chunk, *tmp; 302 struct proc *p; 303 struct thread *td; 304 305 EVENTHANDLER_DEREGISTER(thread_ctor, kinst_thread_ctor_handler); 306 EVENTHANDLER_DEREGISTER(thread_dtor, kinst_thread_dtor_handler); 307 308 sx_slock(&allproc_lock); 309 sx_xlock(&kinst_tramp_sx); 310 FOREACH_PROC_IN_SYSTEM(p) { 311 PROC_LOCK(p); 312 FOREACH_THREAD_IN_PROC(p, td) { 313 kinst_trampoline_dealloc_locked(td->t_kinst_tramp, 314 false); 315 td->t_kinst_tramp = NULL; 316 } 317 PROC_UNLOCK(p); 318 } 319 sx_sunlock(&allproc_lock); 320 TAILQ_FOREACH_SAFE(chunk, &kinst_trampchunks, next, tmp) 321 kinst_trampchunk_free(chunk); 322 sx_xunlock(&kinst_tramp_sx); 323 324 return (0); 325 } 326