xref: /freebsd/sys/kern/kern_kcov.c (revision 6829dae12bb055451fa467da4589c43bd03b1e64)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2018 The FreeBSD Foundation. All rights reserved.
5  * Copyright (C) 2018, 2019 Andrew Turner
6  *
7  * This software was developed by Mitchell Horne under sponsorship of
8  * the FreeBSD Foundation.
9  *
10  * This software was developed by SRI International and the University of
11  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
12  * ("CTSRD"), as part of the DARPA CRASH research programme.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/conf.h>
44 #include <sys/kcov.h>
45 #include <sys/kernel.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mman.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/rwlock.h>
53 #include <sys/sysctl.h>
54 
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vm_param.h>
62 
63 MALLOC_DEFINE(M_KCOV_INFO, "kcovinfo", "KCOV info type");
64 
65 #define	KCOV_ELEMENT_SIZE	sizeof(uint64_t)
66 
67 /*
68  * To know what the code can safely perform at any point in time we use a
69  * state machine. In the normal case the state transitions are:
70  *
71  * OPEN -> READY -> RUNNING -> DYING
72  *  |       | ^        |        ^ ^
73  *  |       | +--------+        | |
74  *  |       +-------------------+ |
75  *  +-----------------------------+
76  *
77  * The states are:
78  *  OPEN:   The kcov fd has been opened, but no buffer is available to store
79  *          coverage data.
80  *  READY:  The buffer to store coverage data has been allocated. Userspace
81  *          can set this by using ioctl(fd, KIOSETBUFSIZE, entries);. When
82  *          this has been set the buffer can be written to by the kernel,
83  *          and mmaped by userspace.
84  * RUNNING: The coverage probes are able to store coverage data in the buffer.
85  *          This is entered with ioctl(fd, KIOENABLE, mode);. The READY state
86  *          can be exited by ioctl(fd, KIODISABLE); or exiting the thread to
87  *          return to the READY state to allow tracing to be reused, or by
88  *          closing the kcov fd to enter the DYING state.
89  * DYING:   The fd has been closed. All states can enter into this state when
90  *          userspace closes the kcov fd.
91  *
92  * We need to be careful when moving into and out of the RUNNING state. As
93  * an interrupt may happen while this is happening the ordering of memory
94  * operations is important so struct kcov_info is valid for the tracing
95  * functions.
96  *
97  * When moving into the RUNNING state prior stores to struct kcov_info need
98  * to be observed before the state is set. This allows for interrupts that
99  * may call into one of the coverage functions to fire at any point while
100  * being enabled and see a consistent struct kcov_info.
101  *
102  * When moving out of the RUNNING state any later stores to struct kcov_info
103  * need to be observed after the state is set. As with entering this is to
104  * present a consistent struct kcov_info to interrupts.
105  */
106 typedef enum {
107 	KCOV_STATE_INVALID,
108 	KCOV_STATE_OPEN,	/* The device is open, but with no buffer */
109 	KCOV_STATE_READY,	/* The buffer has been allocated */
110 	KCOV_STATE_RUNNING,	/* Recording trace data */
111 	KCOV_STATE_DYING,	/* The fd was closed */
112 } kcov_state_t;
113 
114 /*
115  * (l) Set while holding the kcov_lock mutex and not in the RUNNING state.
116  * (o) Only set once while in the OPEN state. Cleaned up while in the DYING
117  *     state, and with no thread associated with the struct kcov_info.
118  * (s) Set atomically to enter or exit the RUNNING state, non-atomically
119  *     otherwise. See above for a description of the other constraints while
120  *     moving into or out of the RUNNING state.
121  */
122 struct kcov_info {
123 	struct thread	*thread;	/* (l) */
124 	vm_object_t	bufobj;		/* (o) */
125 	vm_offset_t	kvaddr;		/* (o) */
126 	size_t		entries;	/* (o) */
127 	size_t		bufsize;	/* (o) */
128 	kcov_state_t	state;		/* (s) */
129 	int		mode;		/* (l) */
130 };
131 
132 /* Prototypes */
133 static d_open_t		kcov_open;
134 static d_close_t	kcov_close;
135 static d_mmap_single_t	kcov_mmap_single;
136 static d_ioctl_t	kcov_ioctl;
137 
138 static int  kcov_alloc(struct kcov_info *info, size_t entries);
139 static void kcov_free(struct kcov_info *info);
140 static void kcov_init(const void *unused);
141 
142 static struct cdevsw kcov_cdevsw = {
143 	.d_version =	D_VERSION,
144 	.d_open =	kcov_open,
145 	.d_close =	kcov_close,
146 	.d_mmap_single = kcov_mmap_single,
147 	.d_ioctl =	kcov_ioctl,
148 	.d_name =	"kcov",
149 };
150 
151 SYSCTL_NODE(_kern, OID_AUTO, kcov, CTLFLAG_RW, 0, "Kernel coverage");
152 
153 static u_int kcov_max_entries = KCOV_MAXENTRIES;
154 SYSCTL_UINT(_kern_kcov, OID_AUTO, max_entries, CTLFLAG_RW,
155     &kcov_max_entries, 0,
156     "Maximum number of entries in the kcov buffer");
157 
158 static struct mtx kcov_lock;
159 static int active_count;
160 
161 static struct kcov_info *
162 get_kinfo(struct thread *td)
163 {
164 	struct kcov_info *info;
165 
166 	/* We might have a NULL thread when releasing the secondary CPUs */
167 	if (td == NULL)
168 		return (NULL);
169 
170 	/*
171 	 * We are in an interrupt, stop tracing as it is not explicitly
172 	 * part of a syscall.
173 	 */
174 	if (td->td_intr_nesting_level > 0 || td->td_intr_frame != NULL)
175 		return (NULL);
176 
177 	/*
178 	 * If info is NULL or the state is not running we are not tracing.
179 	 */
180 	info = td->td_kcov_info;
181 	if (info == NULL ||
182 	    atomic_load_acq_int(&info->state) != KCOV_STATE_RUNNING)
183 		return (NULL);
184 
185 	return (info);
186 }
187 
188 static void
189 trace_pc(uintptr_t ret)
190 {
191 	struct thread *td;
192 	struct kcov_info *info;
193 	uint64_t *buf, index;
194 
195 	td = curthread;
196 	info = get_kinfo(td);
197 	if (info == NULL)
198 		return;
199 
200 	/*
201 	 * Check we are in the PC-trace mode.
202 	 */
203 	if (info->mode != KCOV_MODE_TRACE_PC)
204 		return;
205 
206 	KASSERT(info->kvaddr != 0,
207 	    ("__sanitizer_cov_trace_pc: NULL buf while running"));
208 
209 	buf = (uint64_t *)info->kvaddr;
210 
211 	/* The first entry of the buffer holds the index */
212 	index = buf[0];
213 	if (index + 2 > info->entries)
214 		return;
215 
216 	buf[index + 1] = ret;
217 	buf[0] = index + 1;
218 }
219 
220 static bool
221 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uint64_t ret)
222 {
223 	struct thread *td;
224 	struct kcov_info *info;
225 	uint64_t *buf, index;
226 
227 	td = curthread;
228 	info = get_kinfo(td);
229 	if (info == NULL)
230 		return (false);
231 
232 	/*
233 	 * Check we are in the comparison-trace mode.
234 	 */
235 	if (info->mode != KCOV_MODE_TRACE_CMP)
236 		return (false);
237 
238 	KASSERT(info->kvaddr != 0,
239 	    ("__sanitizer_cov_trace_pc: NULL buf while running"));
240 
241 	buf = (uint64_t *)info->kvaddr;
242 
243 	/* The first entry of the buffer holds the index */
244 	index = buf[0];
245 
246 	/* Check we have space to store all elements */
247 	if (index * 4 + 4 + 1 > info->entries)
248 		return (false);
249 
250 	while (1) {
251 		buf[index * 4 + 1] = type;
252 		buf[index * 4 + 2] = arg1;
253 		buf[index * 4 + 3] = arg2;
254 		buf[index * 4 + 4] = ret;
255 
256 		if (atomic_cmpset_64(&buf[0], index, index + 1))
257 			break;
258 		buf[0] = index;
259 	}
260 
261 	return (true);
262 }
263 
264 /*
265  * The fd is being closed, cleanup everything we can.
266  */
267 static void
268 kcov_mmap_cleanup(void *arg)
269 {
270 	struct kcov_info *info = arg;
271 	struct thread *thread;
272 
273 	mtx_lock_spin(&kcov_lock);
274 	/*
275 	 * Move to KCOV_STATE_DYING to stop adding new entries.
276 	 *
277 	 * If the thread is running we need to wait until thread exit to
278 	 * clean up as it may currently be adding a new entry. If this is
279 	 * the case being in KCOV_STATE_DYING will signal that the buffer
280 	 * needs to be cleaned up.
281 	 */
282 	atomic_store_int(&info->state, KCOV_STATE_DYING);
283 	atomic_thread_fence_seq_cst();
284 	thread = info->thread;
285 	mtx_unlock_spin(&kcov_lock);
286 
287 	if (thread != NULL)
288 		return;
289 
290 	/*
291 	 * We can safely clean up the info struct as it is in the
292 	 * KCOV_STATE_DYING state with no thread associated.
293 	 *
294 	 * The KCOV_STATE_DYING stops new threads from using it.
295 	 * The lack of a thread means nothing is currently using the buffers.
296 	 */
297 	kcov_free(info);
298 }
299 
300 static int
301 kcov_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
302 {
303 	struct kcov_info *info;
304 	int error;
305 
306 	info = malloc(sizeof(struct kcov_info), M_KCOV_INFO, M_ZERO | M_WAITOK);
307 	info->state = KCOV_STATE_OPEN;
308 	info->thread = NULL;
309 	info->mode = -1;
310 
311 	if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0)
312 		kcov_mmap_cleanup(info);
313 
314 	return (error);
315 }
316 
317 static int
318 kcov_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
319 {
320 	struct kcov_info *info;
321 	int error;
322 
323 
324 	if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
325 		return (error);
326 
327 	KASSERT(info != NULL, ("kcov_close with no kcov_info structure"));
328 
329 	/* Trying to close, but haven't disabled */
330 	if (info->state == KCOV_STATE_RUNNING)
331 		return (EBUSY);
332 
333 	return (0);
334 }
335 
336 static int
337 kcov_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
338     struct vm_object **object, int nprot)
339 {
340 	struct kcov_info *info;
341 	int error;
342 
343 	if ((nprot & (PROT_EXEC | PROT_READ | PROT_WRITE)) !=
344 	    (PROT_READ | PROT_WRITE))
345 		return (EINVAL);
346 
347 	if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
348 		return (error);
349 
350 	if (info->kvaddr == 0 || size / KCOV_ELEMENT_SIZE != info->entries)
351 		return (EINVAL);
352 
353 	vm_object_reference(info->bufobj);
354 	*offset = 0;
355 	*object = info->bufobj;
356 	return (0);
357 }
358 
359 static int
360 kcov_alloc(struct kcov_info *info, size_t entries)
361 {
362 	size_t n, pages;
363 	vm_page_t m;
364 
365 	KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer"));
366 	KASSERT(info->state == KCOV_STATE_OPEN,
367 	    ("kcov_alloc: Not in open state (%x)", info->state));
368 
369 	if (entries < 2 || entries > kcov_max_entries)
370 		return (EINVAL);
371 
372 	/* Align to page size so mmap can't access other kernel memory */
373 	info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE);
374 	pages = info->bufsize / PAGE_SIZE;
375 
376 	if ((info->kvaddr = kva_alloc(info->bufsize)) == 0)
377 		return (ENOMEM);
378 
379 	info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize,
380 	    PROT_READ | PROT_WRITE, 0, curthread->td_ucred);
381 
382 	VM_OBJECT_WLOCK(info->bufobj);
383 	for (n = 0; n < pages; n++) {
384 		m = vm_page_grab(info->bufobj, n,
385 		    VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
386 		m->valid = VM_PAGE_BITS_ALL;
387 		pmap_qenter(info->kvaddr + n * PAGE_SIZE, &m, 1);
388 	}
389 	VM_OBJECT_WUNLOCK(info->bufobj);
390 
391 	info->entries = entries;
392 
393 	return (0);
394 }
395 
396 static void
397 kcov_free(struct kcov_info *info)
398 {
399 	vm_page_t m;
400 	size_t i;
401 
402 	if (info->kvaddr != 0) {
403 		pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE);
404 		kva_free(info->kvaddr, info->bufsize);
405 	}
406 	if (info->bufobj != NULL) {
407 		VM_OBJECT_WLOCK(info->bufobj);
408 		m = vm_page_lookup(info->bufobj, 0);
409 		for (i = 0; i < info->bufsize / PAGE_SIZE; i++) {
410 			vm_page_lock(m);
411 			vm_page_unwire_noq(m);
412 			vm_page_unlock(m);
413 
414 			m = vm_page_next(m);
415 		}
416 		VM_OBJECT_WUNLOCK(info->bufobj);
417 		vm_object_deallocate(info->bufobj);
418 	}
419 	free(info, M_KCOV_INFO);
420 }
421 
422 static int
423 kcov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag __unused,
424     struct thread *td)
425 {
426 	struct kcov_info *info;
427 	int mode, error;
428 
429 	if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
430 		return (error);
431 
432 	if (cmd == KIOSETBUFSIZE) {
433 		/*
434 		 * Set the size of the coverage buffer. Should be called
435 		 * before enabling coverage collection for that thread.
436 		 */
437 		if (info->state != KCOV_STATE_OPEN) {
438 			return (EBUSY);
439 		}
440 		error = kcov_alloc(info, *(u_int *)data);
441 		if (error == 0)
442 			info->state = KCOV_STATE_READY;
443 		return (error);
444 	}
445 
446 	mtx_lock_spin(&kcov_lock);
447 	switch (cmd) {
448 	case KIOENABLE:
449 		if (info->state != KCOV_STATE_READY) {
450 			error = EBUSY;
451 			break;
452 		}
453 		if (td->td_kcov_info != NULL) {
454 			error = EINVAL;
455 			break;
456 		}
457 		mode = *(int *)data;
458 		if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
459 			error = EINVAL;
460 			break;
461 		}
462 
463 		/* Lets hope nobody opens this 2 billion times */
464 		KASSERT(active_count < INT_MAX,
465 		    ("%s: Open too many times", __func__));
466 		active_count++;
467 		if (active_count == 1) {
468 			cov_register_pc(&trace_pc);
469 			cov_register_cmp(&trace_cmp);
470 		}
471 
472 		KASSERT(info->thread == NULL,
473 		    ("Enabling kcov when already enabled"));
474 		info->thread = td;
475 		info->mode = mode;
476 		/*
477 		 * Ensure the mode has been set before starting coverage
478 		 * tracing.
479 		 */
480 		atomic_store_rel_int(&info->state, KCOV_STATE_RUNNING);
481 		td->td_kcov_info = info;
482 		break;
483 	case KIODISABLE:
484 		/* Only the currently enabled thread may disable itself */
485 		if (info->state != KCOV_STATE_RUNNING ||
486 		    info != td->td_kcov_info) {
487 			error = EINVAL;
488 			break;
489 		}
490 		KASSERT(active_count > 0, ("%s: Open count is zero", __func__));
491 		active_count--;
492 		if (active_count == 0) {
493 			cov_unregister_pc();
494 			cov_unregister_cmp();
495 		}
496 
497 		td->td_kcov_info = NULL;
498 		atomic_store_int(&info->state, KCOV_STATE_READY);
499 		/*
500 		 * Ensure we have exited the READY state before clearing the
501 		 * rest of the info struct.
502 		 */
503 		atomic_thread_fence_rel();
504 		info->mode = -1;
505 		info->thread = NULL;
506 		break;
507 	default:
508 		error = EINVAL;
509 		break;
510 	}
511 	mtx_unlock_spin(&kcov_lock);
512 
513 	return (error);
514 }
515 
516 static void
517 kcov_thread_dtor(void *arg __unused, struct thread *td)
518 {
519 	struct kcov_info *info;
520 
521 	info = td->td_kcov_info;
522 	if (info == NULL)
523 		return;
524 
525 	mtx_lock_spin(&kcov_lock);
526 	KASSERT(active_count > 0, ("%s: Open count is zero", __func__));
527 	active_count--;
528 	if (active_count == 0) {
529 		cov_unregister_pc();
530 		cov_unregister_cmp();
531 	}
532 	td->td_kcov_info = NULL;
533 	if (info->state != KCOV_STATE_DYING) {
534 		/*
535 		 * The kcov file is still open. Mark it as unused and
536 		 * wait for it to be closed before cleaning up.
537 		 */
538 		atomic_store_int(&info->state, KCOV_STATE_READY);
539 		atomic_thread_fence_seq_cst();
540 		/* This info struct is unused */
541 		info->thread = NULL;
542 		mtx_unlock_spin(&kcov_lock);
543 		return;
544 	}
545 	mtx_unlock_spin(&kcov_lock);
546 
547 	/*
548 	 * We can safely clean up the info struct as it is in the
549 	 * KCOV_STATE_DYING state where the info struct is associated with
550 	 * the current thread that's about to exit.
551 	 *
552 	 * The KCOV_STATE_DYING stops new threads from using it.
553 	 * It also stops the current thread from trying to use the info struct.
554 	 */
555 	kcov_free(info);
556 }
557 
558 static void
559 kcov_init(const void *unused)
560 {
561 	struct make_dev_args args;
562 	struct cdev *dev;
563 
564 	mtx_init(&kcov_lock, "kcov lock", NULL, MTX_SPIN);
565 
566 	make_dev_args_init(&args);
567 	args.mda_devsw = &kcov_cdevsw;
568 	args.mda_uid = UID_ROOT;
569 	args.mda_gid = GID_WHEEL;
570 	args.mda_mode = 0600;
571 	if (make_dev_s(&args, &dev, "kcov") != 0) {
572 		printf("%s", "Failed to create kcov device");
573 		return;
574 	}
575 
576 	EVENTHANDLER_REGISTER(thread_dtor, kcov_thread_dtor, NULL,
577 	    EVENTHANDLER_PRI_ANY);
578 }
579 
580 SYSINIT(kcovdev, SI_SUB_LAST, SI_ORDER_ANY, kcov_init, NULL);
581