1 /*
2 * SPDX-License-Identifier: CDDL 1.0
3 *
4 * Copyright (c) 2022 Christos Margiolis <christos@FreeBSD.org>
5 * Copyright (c) 2022 Mark Johnston <markj@FreeBSD.org>
6 * Copyright (c) 2023 The FreeBSD Foundation
7 *
8 * Portions of this software were developed by Christos Margiolis
9 * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
10 */
11
12 #include <sys/param.h>
13 #include <sys/bitset.h>
14 #include <sys/cred.h>
15 #include <sys/eventhandler.h>
16 #include <sys/kernel.h>
17 #include <sys/lock.h>
18 #include <sys/malloc.h>
19 #include <sys/proc.h>
20 #include <sys/queue.h>
21 #include <sys/sx.h>
22
23 #include <vm/vm.h>
24 #include <vm/vm_param.h>
25 #include <vm/pmap.h>
26 #include <vm/vm_map.h>
27 #include <vm/vm_kern.h>
28 #include <vm/vm_object.h>
29
30 #include <cddl/dev/dtrace/dtrace_cddl.h>
31
32 #include "kinst.h"
33 #include "kinst_isa.h"
34
35 #define KINST_TRAMP_FILL_PATTERN ((kinst_patchval_t []){KINST_PATCHVAL})
36 #define KINST_TRAMP_FILL_SIZE sizeof(kinst_patchval_t)
37
38 #define KINST_TRAMPCHUNK_SIZE PAGE_SIZE
39 #define KINST_TRAMPS_PER_CHUNK (KINST_TRAMPCHUNK_SIZE / KINST_TRAMP_SIZE)
40
41 struct trampchunk {
42 TAILQ_ENTRY(trampchunk) next;
43 uint8_t *addr;
44 /* 0 -> allocated, 1 -> free */
45 BITSET_DEFINE(, KINST_TRAMPS_PER_CHUNK) free;
46 };
47
48 static TAILQ_HEAD(, trampchunk) kinst_trampchunks =
49 TAILQ_HEAD_INITIALIZER(kinst_trampchunks);
50 static struct sx kinst_tramp_sx;
51 SX_SYSINIT(kinst_tramp_sx, &kinst_tramp_sx, "kinst tramp");
52 #ifdef __amd64__
53 static eventhandler_tag kinst_thread_ctor_handler;
54 static eventhandler_tag kinst_thread_dtor_handler;
55 #endif
56
57 /*
58 * Fill the trampolines with KINST_TRAMP_FILL_PATTERN so that the kernel will
59 * crash cleanly if things somehow go wrong.
60 */
61 static void
kinst_trampoline_fill(uint8_t * addr,int size)62 kinst_trampoline_fill(uint8_t *addr, int size)
63 {
64 int i;
65
66 for (i = 0; i < size; i += KINST_TRAMP_FILL_SIZE) {
67 memcpy(&addr[i], KINST_TRAMP_FILL_PATTERN,
68 KINST_TRAMP_FILL_SIZE);
69 }
70 }
71
72 static struct trampchunk *
kinst_trampchunk_alloc(void)73 kinst_trampchunk_alloc(void)
74 {
75 struct trampchunk *chunk;
76 vm_offset_t trampaddr;
77 int error __diagused;
78
79 sx_assert(&kinst_tramp_sx, SX_XLOCKED);
80
81 #ifdef __amd64__
82 /*
83 * To simplify population of trampolines, we follow the amd64 kernel's
84 * code model and allocate them above KERNBASE, i.e., in the top 2GB of
85 * the kernel's virtual address space (not the case for other
86 * platforms).
87 */
88 trampaddr = KERNBASE;
89 #else
90 trampaddr = VM_MIN_KERNEL_ADDRESS;
91 #endif
92 /*
93 * Allocate virtual memory for the trampoline chunk. The returned
94 * address is saved in "trampaddr". Trampolines must be executable so
95 * max_prot must include VM_PROT_EXECUTE.
96 */
97 error = vm_map_find(kernel_map, NULL, 0, &trampaddr,
98 KINST_TRAMPCHUNK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
99 0);
100 if (error != KERN_SUCCESS) {
101 KINST_LOG("trampoline chunk allocation failed: %d", error);
102 return (NULL);
103 }
104
105 error = kmem_back(kernel_object, trampaddr, KINST_TRAMPCHUNK_SIZE,
106 M_WAITOK | M_EXEC);
107 KASSERT(error == KERN_SUCCESS, ("kmem_back failed: %d", error));
108
109 kinst_trampoline_fill((uint8_t *)trampaddr, KINST_TRAMPCHUNK_SIZE);
110
111 /* Allocate a tracker for this chunk. */
112 chunk = malloc(sizeof(*chunk), M_KINST, M_WAITOK);
113 chunk->addr = (void *)trampaddr;
114 BIT_FILL(KINST_TRAMPS_PER_CHUNK, &chunk->free);
115
116 TAILQ_INSERT_HEAD(&kinst_trampchunks, chunk, next);
117
118 return (chunk);
119 }
120
121 static void
kinst_trampchunk_free(struct trampchunk * chunk)122 kinst_trampchunk_free(struct trampchunk *chunk)
123 {
124 sx_assert(&kinst_tramp_sx, SX_XLOCKED);
125
126 TAILQ_REMOVE(&kinst_trampchunks, chunk, next);
127 kmem_unback(kernel_object, (vm_offset_t)chunk->addr,
128 KINST_TRAMPCHUNK_SIZE);
129 (void)vm_map_remove(kernel_map, (vm_offset_t)chunk->addr,
130 (vm_offset_t)(chunk->addr + KINST_TRAMPCHUNK_SIZE));
131 free(chunk, M_KINST);
132 }
133
134 static uint8_t *
kinst_trampoline_alloc_locked(int how)135 kinst_trampoline_alloc_locked(int how)
136 {
137 struct trampchunk *chunk;
138 uint8_t *tramp;
139 int off;
140
141 sx_assert(&kinst_tramp_sx, SX_XLOCKED);
142
143 TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
144 /* All trampolines from this chunk are already allocated. */
145 if ((off = BIT_FFS(KINST_TRAMPS_PER_CHUNK, &chunk->free)) == 0)
146 continue;
147 /* BIT_FFS() returns indices starting at 1 instead of 0. */
148 off--;
149 break;
150 }
151 if (chunk == NULL) {
152 if ((how & M_NOWAIT) != 0)
153 return (NULL);
154
155 if ((chunk = kinst_trampchunk_alloc()) == NULL) {
156 #ifdef __amd64__
157 /*
158 * We didn't find any free trampoline in the current
159 * list, allocate a new one. If that fails the
160 * provider will no longer be reliable, so try to warn
161 * the user.
162 */
163 static bool once = true;
164
165 if (once) {
166 once = false;
167 KINST_LOG(
168 "kinst: failed to allocate trampoline, "
169 "probes may not fire");
170 }
171 #endif
172 return (NULL);
173 }
174 off = 0;
175 }
176 BIT_CLR(KINST_TRAMPS_PER_CHUNK, off, &chunk->free);
177 tramp = chunk->addr + off * KINST_TRAMP_SIZE;
178 return (tramp);
179 }
180
181 uint8_t *
kinst_trampoline_alloc(int how)182 kinst_trampoline_alloc(int how)
183 {
184 uint8_t *tramp;
185
186 sx_xlock(&kinst_tramp_sx);
187 tramp = kinst_trampoline_alloc_locked(how);
188 sx_xunlock(&kinst_tramp_sx);
189 return (tramp);
190 }
191
192 static void
kinst_trampoline_dealloc_locked(uint8_t * tramp,bool freechunks)193 kinst_trampoline_dealloc_locked(uint8_t *tramp, bool freechunks)
194 {
195 struct trampchunk *chunk;
196 int off;
197
198 sx_assert(&kinst_tramp_sx, SX_XLOCKED);
199
200 if (tramp == NULL)
201 return;
202
203 TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
204 for (off = 0; off < KINST_TRAMPS_PER_CHUNK; off++) {
205 if (chunk->addr + off * KINST_TRAMP_SIZE == tramp) {
206 kinst_trampoline_fill(tramp, KINST_TRAMP_SIZE);
207 BIT_SET(KINST_TRAMPS_PER_CHUNK, off,
208 &chunk->free);
209 if (freechunks &&
210 BIT_ISFULLSET(KINST_TRAMPS_PER_CHUNK,
211 &chunk->free))
212 kinst_trampchunk_free(chunk);
213 return;
214 }
215 }
216 }
217 panic("%s: did not find trampoline chunk for %p", __func__, tramp);
218 }
219
220 void
kinst_trampoline_dealloc(uint8_t * tramp)221 kinst_trampoline_dealloc(uint8_t *tramp)
222 {
223 sx_xlock(&kinst_tramp_sx);
224 kinst_trampoline_dealloc_locked(tramp, true);
225 sx_xunlock(&kinst_tramp_sx);
226 }
227
228 #ifdef __amd64__
229 static void
kinst_thread_ctor(void * arg __unused,struct thread * td)230 kinst_thread_ctor(void *arg __unused, struct thread *td)
231 {
232 td->t_kinst_tramp = kinst_trampoline_alloc(M_WAITOK);
233 }
234
235 static void
kinst_thread_dtor(void * arg __unused,struct thread * td)236 kinst_thread_dtor(void *arg __unused, struct thread *td)
237 {
238 void *tramp;
239
240 tramp = td->t_kinst_tramp;
241 td->t_kinst_tramp = NULL;
242
243 /*
244 * This assumes that the thread_dtor event permits sleeping, which
245 * appears to be true for the time being.
246 */
247 kinst_trampoline_dealloc(tramp);
248 }
249 #endif
250
251 int
kinst_trampoline_init(void)252 kinst_trampoline_init(void)
253 {
254 #ifdef __amd64__
255 struct proc *p;
256 struct thread *td;
257 void *tramp;
258 int error;
259
260 kinst_thread_ctor_handler = EVENTHANDLER_REGISTER(thread_ctor,
261 kinst_thread_ctor, NULL, EVENTHANDLER_PRI_ANY);
262 kinst_thread_dtor_handler = EVENTHANDLER_REGISTER(thread_dtor,
263 kinst_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
264
265 error = 0;
266 tramp = NULL;
267
268 sx_slock(&allproc_lock);
269 sx_xlock(&kinst_tramp_sx);
270 FOREACH_PROC_IN_SYSTEM(p) {
271 retry:
272 PROC_LOCK(p);
273 FOREACH_THREAD_IN_PROC(p, td) {
274 if (td->t_kinst_tramp != NULL)
275 continue;
276 if (tramp == NULL) {
277 /*
278 * Try to allocate a trampoline without dropping
279 * the process lock. If all chunks are fully
280 * utilized, we must release the lock and try
281 * again.
282 */
283 tramp = kinst_trampoline_alloc_locked(M_NOWAIT);
284 if (tramp == NULL) {
285 PROC_UNLOCK(p);
286 tramp = kinst_trampoline_alloc_locked(
287 M_WAITOK);
288 if (tramp == NULL) {
289 /*
290 * Let the unload handler clean
291 * up.
292 */
293 error = ENOMEM;
294 goto out;
295 } else
296 goto retry;
297 }
298 }
299 td->t_kinst_tramp = tramp;
300 tramp = NULL;
301 }
302 PROC_UNLOCK(p);
303 }
304 out:
305 sx_xunlock(&kinst_tramp_sx);
306 sx_sunlock(&allproc_lock);
307 #else
308 int error = 0;
309
310 sx_xlock(&kinst_tramp_sx);
311 TAILQ_INIT(&kinst_trampchunks);
312 sx_xunlock(&kinst_tramp_sx);
313 #endif
314
315 return (error);
316 }
317
318 int
kinst_trampoline_deinit(void)319 kinst_trampoline_deinit(void)
320 {
321 #ifdef __amd64__
322 struct trampchunk *chunk, *tmp;
323 struct proc *p;
324 struct thread *td;
325
326 EVENTHANDLER_DEREGISTER(thread_ctor, kinst_thread_ctor_handler);
327 EVENTHANDLER_DEREGISTER(thread_dtor, kinst_thread_dtor_handler);
328
329 sx_slock(&allproc_lock);
330 sx_xlock(&kinst_tramp_sx);
331 FOREACH_PROC_IN_SYSTEM(p) {
332 PROC_LOCK(p);
333 FOREACH_THREAD_IN_PROC(p, td) {
334 kinst_trampoline_dealloc_locked(td->t_kinst_tramp,
335 false);
336 td->t_kinst_tramp = NULL;
337 }
338 PROC_UNLOCK(p);
339 }
340 sx_sunlock(&allproc_lock);
341 TAILQ_FOREACH_SAFE(chunk, &kinst_trampchunks, next, tmp)
342 kinst_trampchunk_free(chunk);
343 sx_xunlock(&kinst_tramp_sx);
344 #else
345 struct trampchunk *chunk, *tmp;
346
347 sx_xlock(&kinst_tramp_sx);
348 TAILQ_FOREACH_SAFE(chunk, &kinst_trampchunks, next, tmp)
349 kinst_trampchunk_free(chunk);
350 sx_xunlock(&kinst_tramp_sx);
351 #endif
352
353 return (0);
354 }
355