xref: /freebsd/sys/cddl/dev/kinst/trampoline.c (revision 911f0260390e18cf85f3dbf2c719b593efdc1e3c)
1 /*
2  * SPDX-License-Identifier: CDDL 1.0
3  *
4  * Copyright (c) 2022 Christos Margiolis <christos@FreeBSD.org>
5  * Copyright (c) 2022 Mark Johnston <markj@FreeBSD.org>
6  * Copyright (c) 2023 The FreeBSD Foundation
7  *
8  * Portions of this software were developed by Christos Margiolis
9  * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
10  */
11 
12 #include <sys/param.h>
13 #include <sys/bitset.h>
14 #include <sys/cred.h>
15 #include <sys/eventhandler.h>
16 #include <sys/kernel.h>
17 #include <sys/lock.h>
18 #include <sys/malloc.h>
19 #include <sys/proc.h>
20 #include <sys/queue.h>
21 #include <sys/sx.h>
22 
23 #include <vm/vm.h>
24 #include <vm/vm_param.h>
25 #include <vm/pmap.h>
26 #include <vm/vm_map.h>
27 #include <vm/vm_kern.h>
28 #include <vm/vm_object.h>
29 
30 #include <cddl/dev/dtrace/dtrace_cddl.h>
31 
32 #include "kinst.h"
33 #include "kinst_isa.h"
34 
35 #define KINST_TRAMP_FILL_PATTERN	((kinst_patchval_t []){KINST_PATCHVAL})
36 #define KINST_TRAMP_FILL_SIZE		sizeof(kinst_patchval_t)
37 
38 #define KINST_TRAMPCHUNK_SIZE		PAGE_SIZE
39 #define KINST_TRAMPS_PER_CHUNK		(KINST_TRAMPCHUNK_SIZE / KINST_TRAMP_SIZE)
40 
41 struct trampchunk {
42 	TAILQ_ENTRY(trampchunk) next;
43 	uint8_t *addr;
44 	/* 0 -> allocated, 1 -> free */
45 	BITSET_DEFINE(, KINST_TRAMPS_PER_CHUNK) free;
46 };
47 
48 static TAILQ_HEAD(, trampchunk)	kinst_trampchunks =
49     TAILQ_HEAD_INITIALIZER(kinst_trampchunks);
50 static struct sx		kinst_tramp_sx;
51 SX_SYSINIT(kinst_tramp_sx, &kinst_tramp_sx, "kinst tramp");
52 static eventhandler_tag		kinst_thread_ctor_handler;
53 static eventhandler_tag		kinst_thread_dtor_handler;
54 
55 /*
56  * Fill the trampolines with KINST_TRAMP_FILL_PATTERN so that the kernel will
57  * crash cleanly if things somehow go wrong.
58  */
59 static void
60 kinst_trampoline_fill(uint8_t *addr, int size)
61 {
62 	int i;
63 
64 	for (i = 0; i < size; i += KINST_TRAMP_FILL_SIZE) {
65 		memcpy(&addr[i], KINST_TRAMP_FILL_PATTERN,
66 		    KINST_TRAMP_FILL_SIZE);
67 	}
68 }
69 
70 static struct trampchunk *
71 kinst_trampchunk_alloc(void)
72 {
73 	struct trampchunk *chunk;
74 	vm_offset_t trampaddr;
75 	int error __diagused;
76 
77 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
78 
79 #ifdef __amd64__
80 	/*
81 	 * To simplify population of trampolines, we follow the amd64 kernel's
82 	 * code model and allocate them above KERNBASE, i.e., in the top 2GB of
83 	 * the kernel's virtual address space (not the case for other
84 	 * platforms).
85 	 */
86 	trampaddr = KERNBASE;
87 #else
88 	trampaddr = VM_MIN_KERNEL_ADDRESS;
89 #endif
90 	/*
91 	 * Allocate virtual memory for the trampoline chunk. The returned
92 	 * address is saved in "trampaddr". Trampolines must be executable so
93 	 * max_prot must include VM_PROT_EXECUTE.
94 	 */
95 	error = vm_map_find(kernel_map, NULL, 0, &trampaddr,
96 	    KINST_TRAMPCHUNK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
97 	    0);
98 	if (error != KERN_SUCCESS) {
99 		KINST_LOG("trampoline chunk allocation failed: %d", error);
100 		return (NULL);
101 	}
102 
103 	error = kmem_back(kernel_object, trampaddr, KINST_TRAMPCHUNK_SIZE,
104 	    M_WAITOK | M_EXEC);
105 	KASSERT(error == KERN_SUCCESS, ("kmem_back failed: %d", error));
106 
107 	kinst_trampoline_fill((uint8_t *)trampaddr, KINST_TRAMPCHUNK_SIZE);
108 
109 	/* Allocate a tracker for this chunk. */
110 	chunk = malloc(sizeof(*chunk), M_KINST, M_WAITOK);
111 	chunk->addr = (void *)trampaddr;
112 	BIT_FILL(KINST_TRAMPS_PER_CHUNK, &chunk->free);
113 
114 	TAILQ_INSERT_HEAD(&kinst_trampchunks, chunk, next);
115 
116 	return (chunk);
117 }
118 
119 static void
120 kinst_trampchunk_free(struct trampchunk *chunk)
121 {
122 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
123 
124 	TAILQ_REMOVE(&kinst_trampchunks, chunk, next);
125 	kmem_unback(kernel_object, (vm_offset_t)chunk->addr,
126 	    KINST_TRAMPCHUNK_SIZE);
127 	(void)vm_map_remove(kernel_map, (vm_offset_t)chunk->addr,
128 	    (vm_offset_t)(chunk->addr + KINST_TRAMPCHUNK_SIZE));
129 	free(chunk, M_KINST);
130 }
131 
132 static uint8_t *
133 kinst_trampoline_alloc_locked(int how)
134 {
135 	struct trampchunk *chunk;
136 	uint8_t *tramp;
137 	int off;
138 
139 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
140 
141 	TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
142 		/* All trampolines from this chunk are already allocated. */
143 		if ((off = BIT_FFS(KINST_TRAMPS_PER_CHUNK, &chunk->free)) == 0)
144 			continue;
145 		/* BIT_FFS() returns indices starting at 1 instead of 0. */
146 		off--;
147 		break;
148 	}
149 	if (chunk == NULL) {
150 		if ((how & M_NOWAIT) != 0)
151 			return (NULL);
152 
153 		/*
154 		 * We didn't find any free trampoline in the current list,
155 		 * allocate a new one.  If that fails the provider will no
156 		 * longer be reliable, so try to warn the user.
157 		 */
158 		if ((chunk = kinst_trampchunk_alloc()) == NULL) {
159 			static bool once = true;
160 
161 			if (once) {
162 				once = false;
163 				KINST_LOG(
164 				    "kinst: failed to allocate trampoline, "
165 				    "probes may not fire");
166 			}
167 			return (NULL);
168 		}
169 		off = 0;
170 	}
171 	BIT_CLR(KINST_TRAMPS_PER_CHUNK, off, &chunk->free);
172 	tramp = chunk->addr + off * KINST_TRAMP_SIZE;
173 	return (tramp);
174 }
175 
176 uint8_t *
177 kinst_trampoline_alloc(int how)
178 {
179 	uint8_t *tramp;
180 
181 	sx_xlock(&kinst_tramp_sx);
182 	tramp = kinst_trampoline_alloc_locked(how);
183 	sx_xunlock(&kinst_tramp_sx);
184 	return (tramp);
185 }
186 
187 static void
188 kinst_trampoline_dealloc_locked(uint8_t *tramp, bool freechunks)
189 {
190 	struct trampchunk *chunk;
191 	int off;
192 
193 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
194 
195 	if (tramp == NULL)
196 		return;
197 
198 	TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
199 		for (off = 0; off < KINST_TRAMPS_PER_CHUNK; off++) {
200 			if (chunk->addr + off * KINST_TRAMP_SIZE == tramp) {
201 				kinst_trampoline_fill(tramp, KINST_TRAMP_SIZE);
202 				BIT_SET(KINST_TRAMPS_PER_CHUNK, off,
203 				    &chunk->free);
204 				if (freechunks &&
205 				    BIT_ISFULLSET(KINST_TRAMPS_PER_CHUNK,
206 				    &chunk->free))
207 					kinst_trampchunk_free(chunk);
208 				return;
209 			}
210 		}
211 	}
212 	panic("%s: did not find trampoline chunk for %p", __func__, tramp);
213 }
214 
215 void
216 kinst_trampoline_dealloc(uint8_t *tramp)
217 {
218 	sx_xlock(&kinst_tramp_sx);
219 	kinst_trampoline_dealloc_locked(tramp, true);
220 	sx_xunlock(&kinst_tramp_sx);
221 }
222 
223 static void
224 kinst_thread_ctor(void *arg __unused, struct thread *td)
225 {
226 	td->t_kinst_tramp = kinst_trampoline_alloc(M_WAITOK);
227 }
228 
229 static void
230 kinst_thread_dtor(void *arg __unused, struct thread *td)
231 {
232 	void *tramp;
233 
234 	tramp = td->t_kinst_tramp;
235 	td->t_kinst_tramp = NULL;
236 
237 	/*
238 	 * This assumes that the thread_dtor event permits sleeping, which
239 	 * appears to be true for the time being.
240 	 */
241 	kinst_trampoline_dealloc(tramp);
242 }
243 
244 int
245 kinst_trampoline_init(void)
246 {
247 	struct proc *p;
248 	struct thread *td;
249 	void *tramp;
250 	int error;
251 
252 	kinst_thread_ctor_handler = EVENTHANDLER_REGISTER(thread_ctor,
253 	    kinst_thread_ctor, NULL, EVENTHANDLER_PRI_ANY);
254 	kinst_thread_dtor_handler = EVENTHANDLER_REGISTER(thread_dtor,
255 	    kinst_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
256 
257 	error = 0;
258 	tramp = NULL;
259 
260 	sx_slock(&allproc_lock);
261 	sx_xlock(&kinst_tramp_sx);
262 	FOREACH_PROC_IN_SYSTEM(p) {
263 retry:
264 		PROC_LOCK(p);
265 		FOREACH_THREAD_IN_PROC(p, td) {
266 			if (td->t_kinst_tramp != NULL)
267 				continue;
268 			if (tramp == NULL) {
269 				/*
270 				 * Try to allocate a trampoline without dropping
271 				 * the process lock.  If all chunks are fully
272 				 * utilized, we must release the lock and try
273 				 * again.
274 				 */
275 				tramp = kinst_trampoline_alloc_locked(M_NOWAIT);
276 				if (tramp == NULL) {
277 					PROC_UNLOCK(p);
278 					tramp = kinst_trampoline_alloc_locked(
279 					    M_WAITOK);
280 					if (tramp == NULL) {
281 						/*
282 						 * Let the unload handler clean
283 						 * up.
284 						 */
285 						error = ENOMEM;
286 						goto out;
287 					} else
288 						goto retry;
289 				}
290 			}
291 			td->t_kinst_tramp = tramp;
292 			tramp = NULL;
293 		}
294 		PROC_UNLOCK(p);
295 	}
296 out:
297 	sx_xunlock(&kinst_tramp_sx);
298 	sx_sunlock(&allproc_lock);
299 	return (error);
300 }
301 
302 int
303 kinst_trampoline_deinit(void)
304 {
305 	struct trampchunk *chunk, *tmp;
306 	struct proc *p;
307 	struct thread *td;
308 
309 	EVENTHANDLER_DEREGISTER(thread_ctor, kinst_thread_ctor_handler);
310 	EVENTHANDLER_DEREGISTER(thread_dtor, kinst_thread_dtor_handler);
311 
312 	sx_slock(&allproc_lock);
313 	sx_xlock(&kinst_tramp_sx);
314 	FOREACH_PROC_IN_SYSTEM(p) {
315 		PROC_LOCK(p);
316 		FOREACH_THREAD_IN_PROC(p, td) {
317 			kinst_trampoline_dealloc_locked(td->t_kinst_tramp,
318 			    false);
319 			td->t_kinst_tramp = NULL;
320 		}
321 		PROC_UNLOCK(p);
322 	}
323 	sx_sunlock(&allproc_lock);
324 	TAILQ_FOREACH_SAFE(chunk, &kinst_trampchunks, next, tmp)
325 		kinst_trampchunk_free(chunk);
326 	sx_xunlock(&kinst_tramp_sx);
327 
328 	return (0);
329 }
330