xref: /freebsd/sys/cddl/dev/kinst/trampoline.c (revision 54b955f4df5e76b5679ba7f3eb6bb2d5fc62923d)
1 /*
2  * SPDX-License-Identifier: CDDL 1.0
3  *
4  * Copyright 2022 Christos Margiolis <christos@FreeBSD.org>
5  * Copyright 2022 Mark Johnston <markj@FreeBSD.org>
6  */
7 
8 #include <sys/param.h>
9 #include <sys/bitset.h>
10 #include <sys/cred.h>
11 #include <sys/eventhandler.h>
12 #include <sys/kernel.h>
13 #include <sys/lock.h>
14 #include <sys/malloc.h>
15 #include <sys/proc.h>
16 #include <sys/queue.h>
17 #include <sys/sx.h>
18 
19 #include <vm/vm.h>
20 #include <vm/vm_param.h>
21 #include <vm/pmap.h>
22 #include <vm/vm_map.h>
23 #include <vm/vm_kern.h>
24 #include <vm/vm_object.h>
25 
26 #include <cddl/dev/dtrace/dtrace_cddl.h>
27 
28 #include "kinst.h"
29 #include "kinst_isa.h"
30 
31 #define KINST_TRAMPS_PER_CHUNK	(KINST_TRAMPCHUNK_SIZE / KINST_TRAMP_SIZE)
32 
33 struct trampchunk {
34 	TAILQ_ENTRY(trampchunk) next;
35 	uint8_t *addr;
36 	/* 0 -> allocated, 1 -> free */
37 	BITSET_DEFINE(, KINST_TRAMPS_PER_CHUNK) free;
38 };
39 
40 static TAILQ_HEAD(, trampchunk)	kinst_trampchunks =
41     TAILQ_HEAD_INITIALIZER(kinst_trampchunks);
42 static struct sx		kinst_tramp_sx;
43 SX_SYSINIT(kinst_tramp_sx, &kinst_tramp_sx, "kinst tramp");
44 static eventhandler_tag		kinst_thread_ctor_handler;
45 static eventhandler_tag		kinst_thread_dtor_handler;
46 
47 /*
48  * Fill the trampolines with KINST_TRAMP_FILL_PATTERN so that the kernel will
49  * crash cleanly if things somehow go wrong.
50  */
51 static void
52 kinst_trampoline_fill(uint8_t *addr, int size)
53 {
54 	int i;
55 
56 	for (i = 0; i < size; i += KINST_TRAMP_FILL_SIZE) {
57 		memcpy(&addr[i], KINST_TRAMP_FILL_PATTERN,
58 		    KINST_TRAMP_FILL_SIZE);
59 	}
60 }
61 
62 static struct trampchunk *
63 kinst_trampchunk_alloc(void)
64 {
65 	struct trampchunk *chunk;
66 	vm_offset_t trampaddr;
67 	int error __diagused;
68 
69 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
70 
71 #ifdef __amd64__
72 	/*
73 	 * To simplify population of trampolines, we follow the amd64 kernel's
74 	 * code model and allocate them above KERNBASE, i.e., in the top 2GB of
75 	 * the kernel's virtual address space (not the case for other
76 	 * platforms).
77 	 */
78 	trampaddr = KERNBASE;
79 #else
80 	trampaddr = VM_MIN_KERNEL_ADDRESS;
81 #endif
82 	/*
83 	 * Allocate virtual memory for the trampoline chunk. The returned
84 	 * address is saved in "trampaddr". Trampolines must be executable so
85 	 * max_prot must include VM_PROT_EXECUTE.
86 	 */
87 	error = vm_map_find(kernel_map, NULL, 0, &trampaddr,
88 	    KINST_TRAMPCHUNK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
89 	    0);
90 	if (error != KERN_SUCCESS) {
91 		KINST_LOG("trampoline chunk allocation failed: %d", error);
92 		return (NULL);
93 	}
94 
95 	error = kmem_back(kernel_object, trampaddr, KINST_TRAMPCHUNK_SIZE,
96 	    M_WAITOK | M_EXEC);
97 	KASSERT(error == KERN_SUCCESS, ("kmem_back failed: %d", error));
98 
99 	kinst_trampoline_fill((uint8_t *)trampaddr, KINST_TRAMPCHUNK_SIZE);
100 
101 	/* Allocate a tracker for this chunk. */
102 	chunk = malloc(sizeof(*chunk), M_KINST, M_WAITOK);
103 	chunk->addr = (void *)trampaddr;
104 	BIT_FILL(KINST_TRAMPS_PER_CHUNK, &chunk->free);
105 
106 	TAILQ_INSERT_HEAD(&kinst_trampchunks, chunk, next);
107 
108 	return (chunk);
109 }
110 
111 static void
112 kinst_trampchunk_free(struct trampchunk *chunk)
113 {
114 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
115 
116 	TAILQ_REMOVE(&kinst_trampchunks, chunk, next);
117 	kmem_unback(kernel_object, (vm_offset_t)chunk->addr,
118 	    KINST_TRAMPCHUNK_SIZE);
119 	(void)vm_map_remove(kernel_map, (vm_offset_t)chunk->addr,
120 	    (vm_offset_t)(chunk->addr + KINST_TRAMPCHUNK_SIZE));
121 	free(chunk, M_KINST);
122 }
123 
124 static uint8_t *
125 kinst_trampoline_alloc_locked(int how)
126 {
127 	struct trampchunk *chunk;
128 	uint8_t *tramp;
129 	int off;
130 
131 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
132 
133 	TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
134 		/* All trampolines from this chunk are already allocated. */
135 		if ((off = BIT_FFS(KINST_TRAMPS_PER_CHUNK, &chunk->free)) == 0)
136 			continue;
137 		/* BIT_FFS() returns indices starting at 1 instead of 0. */
138 		off--;
139 		break;
140 	}
141 	if (chunk == NULL) {
142 		if ((how & M_NOWAIT) != 0)
143 			return (NULL);
144 
145 		/*
146 		 * We didn't find any free trampoline in the current list,
147 		 * allocate a new one.  If that fails the provider will no
148 		 * longer be reliable, so try to warn the user.
149 		 */
150 		if ((chunk = kinst_trampchunk_alloc()) == NULL) {
151 			static bool once = true;
152 
153 			if (once) {
154 				once = false;
155 				KINST_LOG(
156 				    "kinst: failed to allocate trampoline, "
157 				    "probes may not fire");
158 			}
159 			return (NULL);
160 		}
161 		off = 0;
162 	}
163 	BIT_CLR(KINST_TRAMPS_PER_CHUNK, off, &chunk->free);
164 	tramp = chunk->addr + off * KINST_TRAMP_SIZE;
165 	return (tramp);
166 }
167 
168 uint8_t *
169 kinst_trampoline_alloc(int how)
170 {
171 	uint8_t *tramp;
172 
173 	sx_xlock(&kinst_tramp_sx);
174 	tramp = kinst_trampoline_alloc_locked(how);
175 	sx_xunlock(&kinst_tramp_sx);
176 	return (tramp);
177 }
178 
179 static void
180 kinst_trampoline_dealloc_locked(uint8_t *tramp, bool freechunks)
181 {
182 	struct trampchunk *chunk;
183 	int off;
184 
185 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
186 
187 	if (tramp == NULL)
188 		return;
189 
190 	TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
191 		for (off = 0; off < KINST_TRAMPS_PER_CHUNK; off++) {
192 			if (chunk->addr + off * KINST_TRAMP_SIZE == tramp) {
193 				kinst_trampoline_fill(tramp, KINST_TRAMP_SIZE);
194 				BIT_SET(KINST_TRAMPS_PER_CHUNK, off,
195 				    &chunk->free);
196 				if (freechunks &&
197 				    BIT_ISFULLSET(KINST_TRAMPS_PER_CHUNK,
198 				    &chunk->free))
199 					kinst_trampchunk_free(chunk);
200 				return;
201 			}
202 		}
203 	}
204 	panic("%s: did not find trampoline chunk for %p", __func__, tramp);
205 }
206 
207 void
208 kinst_trampoline_dealloc(uint8_t *tramp)
209 {
210 	sx_xlock(&kinst_tramp_sx);
211 	kinst_trampoline_dealloc_locked(tramp, true);
212 	sx_xunlock(&kinst_tramp_sx);
213 }
214 
215 static void
216 kinst_thread_ctor(void *arg __unused, struct thread *td)
217 {
218 	td->t_kinst = kinst_trampoline_alloc(M_WAITOK);
219 }
220 
221 static void
222 kinst_thread_dtor(void *arg __unused, struct thread *td)
223 {
224 	void *tramp;
225 
226 	tramp = td->t_kinst;
227 	td->t_kinst = NULL;
228 
229 	/*
230 	 * This assumes that the thread_dtor event permits sleeping, which
231 	 * appears to be true for the time being.
232 	 */
233 	kinst_trampoline_dealloc(tramp);
234 }
235 
236 int
237 kinst_trampoline_init(void)
238 {
239 	struct proc *p;
240 	struct thread *td;
241 	void *tramp;
242 	int error;
243 
244 	kinst_thread_ctor_handler = EVENTHANDLER_REGISTER(thread_ctor,
245 	    kinst_thread_ctor, NULL, EVENTHANDLER_PRI_ANY);
246 	kinst_thread_dtor_handler = EVENTHANDLER_REGISTER(thread_dtor,
247 	    kinst_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
248 
249 	error = 0;
250 	tramp = NULL;
251 
252 	sx_slock(&allproc_lock);
253 	sx_xlock(&kinst_tramp_sx);
254 	FOREACH_PROC_IN_SYSTEM(p) {
255 retry:
256 		PROC_LOCK(p);
257 		FOREACH_THREAD_IN_PROC(p, td) {
258 			if (td->t_kinst != NULL)
259 				continue;
260 			if (tramp == NULL) {
261 				/*
262 				 * Try to allocate a trampoline without dropping
263 				 * the process lock.  If all chunks are fully
264 				 * utilized, we must release the lock and try
265 				 * again.
266 				 */
267 				tramp = kinst_trampoline_alloc_locked(M_NOWAIT);
268 				if (tramp == NULL) {
269 					PROC_UNLOCK(p);
270 					tramp = kinst_trampoline_alloc_locked(
271 					    M_WAITOK);
272 					if (tramp == NULL) {
273 						/*
274 						 * Let the unload handler clean
275 						 * up.
276 						 */
277 						error = ENOMEM;
278 						goto out;
279 					} else
280 						goto retry;
281 				}
282 			}
283 			td->t_kinst = tramp;
284 			tramp = NULL;
285 		}
286 		PROC_UNLOCK(p);
287 	}
288 out:
289 	sx_xunlock(&kinst_tramp_sx);
290 	sx_sunlock(&allproc_lock);
291 	return (error);
292 }
293 
294 int
295 kinst_trampoline_deinit(void)
296 {
297 	struct trampchunk *chunk, *tmp;
298 	struct proc *p;
299 	struct thread *td;
300 
301 	EVENTHANDLER_DEREGISTER(thread_ctor, kinst_thread_ctor_handler);
302 	EVENTHANDLER_DEREGISTER(thread_dtor, kinst_thread_dtor_handler);
303 
304 	sx_slock(&allproc_lock);
305 	sx_xlock(&kinst_tramp_sx);
306 	FOREACH_PROC_IN_SYSTEM(p) {
307 		PROC_LOCK(p);
308 		FOREACH_THREAD_IN_PROC(p, td) {
309 			kinst_trampoline_dealloc_locked(td->t_kinst, false);
310 			td->t_kinst = NULL;
311 		}
312 		PROC_UNLOCK(p);
313 	}
314 	sx_sunlock(&allproc_lock);
315 	TAILQ_FOREACH_SAFE(chunk, &kinst_trampchunks, next, tmp)
316 		kinst_trampchunk_free(chunk);
317 	sx_xunlock(&kinst_tramp_sx);
318 
319 	return (0);
320 }
321