xref: /freebsd/sys/kern/kern_kcov.c (revision 28f4385e45a2681c14bd04b83fe1796eaefe8265)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2018 The FreeBSD Foundation. All rights reserved.
5  * Copyright (C) 2018, 2019 Andrew Turner
6  *
7  * This software was developed by Mitchell Horne under sponsorship of
8  * the FreeBSD Foundation.
9  *
10  * This software was developed by SRI International and the University of
11  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
12  * ("CTSRD"), as part of the DARPA CRASH research programme.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/conf.h>
43 #include <sys/file.h>
44 #include <sys/kcov.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mman.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/rwlock.h>
52 #include <sys/stat.h>
53 #include <sys/sysctl.h>
54 #include <sys/systm.h>
55 #include <sys/types.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 
63 #include <vm/pmap.h>
64 
65 MALLOC_DEFINE(M_KCOV_INFO, "kcovinfo", "KCOV info type");
66 
67 #define	KCOV_ELEMENT_SIZE	sizeof(uint64_t)
68 
69 /*
70  * To know what the code can safely perform at any point in time we use a
71  * state machine. In the normal case the state transitions are:
72  *
73  * OPEN -> READY -> RUNNING -> DYING
74  *  |       | ^        |        ^ ^
75  *  |       | +--------+        | |
76  *  |       +-------------------+ |
77  *  +-----------------------------+
78  *
79  * The states are:
80  *  OPEN:   The kcov fd has been opened, but no buffer is available to store
81  *          coverage data.
82  *  READY:  The buffer to store coverage data has been allocated. Userspace
83  *          can set this by using ioctl(fd, KIOSETBUFSIZE, entries);. When
84  *          this has been set the buffer can be written to by the kernel,
85  *          and mmaped by userspace.
86  * RUNNING: The coverage probes are able to store coverage data in the buffer.
87  *          This is entered with ioctl(fd, KIOENABLE, mode);. The READY state
88  *          can be exited by ioctl(fd, KIODISABLE); or exiting the thread to
89  *          return to the READY state to allow tracing to be reused, or by
90  *          closing the kcov fd to enter the DYING state.
91  * DYING:   The fd has been closed. All states can enter into this state when
92  *          userspace closes the kcov fd.
93  *
94  * We need to be careful when moving into and out of the RUNNING state. As
95  * an interrupt may happen while this is happening the ordering of memory
96  * operations is important so struct kcov_info is valid for the tracing
97  * functions.
98  *
99  * When moving into the RUNNING state prior stores to struct kcov_info need
100  * to be observed before the state is set. This allows for interrupts that
101  * may call into one of the coverage functions to fire at any point while
102  * being enabled and see a consistent struct kcov_info.
103  *
104  * When moving out of the RUNNING state any later stores to struct kcov_info
105  * need to be observed after the state is set. As with entering this is to
106  * present a consistent struct kcov_info to interrupts.
107  */
108 typedef enum {
109 	KCOV_STATE_INVALID,
110 	KCOV_STATE_OPEN,	/* The device is open, but with no buffer */
111 	KCOV_STATE_READY,	/* The buffer has been allocated */
112 	KCOV_STATE_RUNNING,	/* Recording trace data */
113 	KCOV_STATE_DYING,	/* The fd was closed */
114 } kcov_state_t;
115 
116 /*
117  * (l) Set while holding the kcov_lock mutex and not in the RUNNING state.
118  * (o) Only set once while in the OPEN state. Cleaned up while in the DYING
119  *     state, and with no thread associated with the struct kcov_info.
120  * (s) Set atomically to enter or exit the RUNNING state, non-atomically
121  *     otherwise. See above for a description of the other constraints while
122  *     moving into or out of the RUNNING state.
123  */
124 struct kcov_info {
125 	struct thread	*thread;	/* (l) */
126 	vm_object_t	bufobj;		/* (o) */
127 	vm_offset_t	kvaddr;		/* (o) */
128 	size_t		entries;	/* (o) */
129 	size_t		bufsize;	/* (o) */
130 	kcov_state_t	state;		/* (s) */
131 	int		mode;		/* (l) */
132 	bool		mmap;
133 };
134 
135 /* Prototypes */
136 static d_open_t		kcov_open;
137 static d_close_t	kcov_close;
138 static d_mmap_single_t	kcov_mmap_single;
139 static d_ioctl_t	kcov_ioctl;
140 
141 void __sanitizer_cov_trace_pc(void);
142 void __sanitizer_cov_trace_cmp1(uint8_t, uint8_t);
143 void __sanitizer_cov_trace_cmp2(uint16_t, uint16_t);
144 void __sanitizer_cov_trace_cmp4(uint32_t, uint32_t);
145 void __sanitizer_cov_trace_cmp8(uint64_t, uint64_t);
146 void __sanitizer_cov_trace_const_cmp1(uint8_t, uint8_t);
147 void __sanitizer_cov_trace_const_cmp2(uint16_t, uint16_t);
148 void __sanitizer_cov_trace_const_cmp4(uint32_t, uint32_t);
149 void __sanitizer_cov_trace_const_cmp8(uint64_t, uint64_t);
150 void __sanitizer_cov_trace_switch(uint64_t, uint64_t *);
151 
152 static int  kcov_alloc(struct kcov_info *info, size_t entries);
153 static void kcov_init(const void *unused);
154 
155 static struct cdevsw kcov_cdevsw = {
156 	.d_version =	D_VERSION,
157 	.d_open =	kcov_open,
158 	.d_close =	kcov_close,
159 	.d_mmap_single = kcov_mmap_single,
160 	.d_ioctl =	kcov_ioctl,
161 	.d_name =	"kcov",
162 };
163 
164 SYSCTL_NODE(_kern, OID_AUTO, kcov, CTLFLAG_RW, 0, "Kernel coverage");
165 
166 static u_int kcov_max_entries = KCOV_MAXENTRIES;
167 SYSCTL_UINT(_kern_kcov, OID_AUTO, max_entries, CTLFLAG_RW,
168     &kcov_max_entries, 0,
169     "Maximum number of entries in the kcov buffer");
170 
171 static struct mtx kcov_lock;
172 
173 static struct kcov_info *
174 get_kinfo(struct thread *td)
175 {
176 	struct kcov_info *info;
177 
178 	/* We might have a NULL thread when releasing the secondary CPUs */
179 	if (td == NULL)
180 		return (NULL);
181 
182 	/*
183 	 * We are in an interrupt, stop tracing as it is not explicitly
184 	 * part of a syscall.
185 	 */
186 	if (td->td_intr_nesting_level > 0 || td->td_intr_frame != NULL)
187 		return (NULL);
188 
189 	/*
190 	 * If info is NULL or the state is not running we are not tracing.
191 	 */
192 	info = td->td_kcov_info;
193 	if (info == NULL ||
194 	    atomic_load_acq_int(&info->state) != KCOV_STATE_RUNNING)
195 		return (NULL);
196 
197 	return (info);
198 }
199 
200 /*
201  * Main entry point. A call to this function will be inserted
202  * at every edge, and if coverage is enabled for the thread
203  * this function will add the PC to the buffer.
204  */
205 void
206 __sanitizer_cov_trace_pc(void)
207 {
208 	struct thread *td;
209 	struct kcov_info *info;
210 	uint64_t *buf, index;
211 
212 	/*
213 	 * To guarantee curthread is properly set, we exit early
214 	 * until the driver has been initialized
215 	 */
216 	if (cold)
217 		return;
218 
219 	td = curthread;
220 	info = get_kinfo(td);
221 	if (info == NULL)
222 		return;
223 
224 	/*
225 	 * Check we are in the PC-trace mode.
226 	 */
227 	if (info->mode != KCOV_MODE_TRACE_PC)
228 		return;
229 
230 	KASSERT(info->kvaddr != 0,
231 	    ("__sanitizer_cov_trace_pc: NULL buf while running"));
232 
233 	buf = (uint64_t *)info->kvaddr;
234 
235 	/* The first entry of the buffer holds the index */
236 	index = buf[0];
237 	if (index + 2 > info->entries)
238 		return;
239 
240 	buf[index + 1] = (uint64_t)__builtin_return_address(0);
241 	buf[0] = index + 1;
242 }
243 
244 static bool
245 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uint64_t ret)
246 {
247 	struct thread *td;
248 	struct kcov_info *info;
249 	uint64_t *buf, index;
250 
251 	/*
252 	 * To guarantee curthread is properly set, we exit early
253 	 * until the driver has been initialized
254 	 */
255 	if (cold)
256 		return (false);
257 
258 	td = curthread;
259 	info = get_kinfo(td);
260 	if (info == NULL)
261 		return (false);
262 
263 	/*
264 	 * Check we are in the comparison-trace mode.
265 	 */
266 	if (info->mode != KCOV_MODE_TRACE_CMP)
267 		return (false);
268 
269 	KASSERT(info->kvaddr != 0,
270 	    ("__sanitizer_cov_trace_pc: NULL buf while running"));
271 
272 	buf = (uint64_t *)info->kvaddr;
273 
274 	/* The first entry of the buffer holds the index */
275 	index = buf[0];
276 
277 	/* Check we have space to store all elements */
278 	if (index * 4 + 4 + 1 > info->entries)
279 		return (false);
280 
281 	buf[index * 4 + 1] = type;
282 	buf[index * 4 + 2] = arg1;
283 	buf[index * 4 + 3] = arg2;
284 	buf[index * 4 + 4] = ret;
285 	buf[0] = index + 1;
286 
287 	return (true);
288 }
289 
290 void
291 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
292 {
293 
294 	trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
295 	    (uint64_t)__builtin_return_address(0));
296 }
297 
298 void
299 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
300 {
301 
302 	trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
303 	    (uint64_t)__builtin_return_address(0));
304 }
305 
306 void
307 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
308 {
309 
310 	trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
311 	    (uint64_t)__builtin_return_address(0));
312 }
313 
314 void
315 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
316 {
317 
318 	trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
319 	    (uint64_t)__builtin_return_address(0));
320 }
321 
322 void
323 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
324 {
325 
326 	trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
327 	    (uint64_t)__builtin_return_address(0));
328 }
329 
330 void
331 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
332 {
333 
334 	trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
335 	    (uint64_t)__builtin_return_address(0));
336 }
337 
338 void
339 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
340 {
341 
342 	trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
343 	    (uint64_t)__builtin_return_address(0));
344 }
345 
346 void
347 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
348 {
349 
350 	trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
351 	    (uint64_t)__builtin_return_address(0));
352 }
353 
354 /*
355  * val is the switch operand
356  * cases[0] is the number of case constants
357  * cases[1] is the size of val in bits
358  * cases[2..n] are the case constants
359  */
360 void
361 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
362 {
363 	uint64_t i, count, ret, type;
364 
365 	count = cases[0];
366 	ret = (uint64_t)__builtin_return_address(0);
367 
368 	switch (cases[1]) {
369 	case 8:
370 		type = KCOV_CMP_SIZE(0);
371 		break;
372 	case 16:
373 		type = KCOV_CMP_SIZE(1);
374 		break;
375 	case 32:
376 		type = KCOV_CMP_SIZE(2);
377 		break;
378 	case 64:
379 		type = KCOV_CMP_SIZE(3);
380 		break;
381 	default:
382 		return;
383 	}
384 
385 	val |= KCOV_CMP_CONST;
386 
387 	for (i = 0; i < count; i++)
388 		if (!trace_cmp(type, val, cases[i + 2], ret))
389 			return;
390 }
391 
392 /*
393  * The fd is being closed, cleanup everything we can.
394  */
395 static void
396 kcov_mmap_cleanup(void *arg)
397 {
398 	struct kcov_info *info = arg;
399 	struct thread *thread;
400 
401 	mtx_lock_spin(&kcov_lock);
402 	/*
403 	 * Move to KCOV_STATE_DYING to stop adding new entries.
404 	 *
405 	 * If the thread is running we need to wait until thread exit to
406 	 * clean up as it may currently be adding a new entry. If this is
407 	 * the case being in KCOV_STATE_DYING will signal that the buffer
408 	 * needs to be cleaned up.
409 	 */
410 	atomic_store_int(&info->state, KCOV_STATE_DYING);
411 	atomic_thread_fence_seq_cst();
412 	thread = info->thread;
413 	mtx_unlock_spin(&kcov_lock);
414 
415 	if (thread != NULL)
416 		return;
417 
418 	/*
419 	 * We can safely clean up the info struct as it is in the
420 	 * KCOV_STATE_DYING state with no thread associated.
421 	 *
422 	 * The KCOV_STATE_DYING stops new threads from using it.
423 	 * The lack of a thread means nothing is currently using the buffers.
424 	 */
425 
426 	if (info->kvaddr != 0) {
427 		pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE);
428 		kva_free(info->kvaddr, info->bufsize);
429 	}
430 	if (info->bufobj != NULL && !info->mmap)
431 		vm_object_deallocate(info->bufobj);
432 	free(info, M_KCOV_INFO);
433 }
434 
435 static int
436 kcov_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
437 {
438 	struct kcov_info *info;
439 	int error;
440 
441 	info = malloc(sizeof(struct kcov_info), M_KCOV_INFO, M_ZERO | M_WAITOK);
442 	info->state = KCOV_STATE_OPEN;
443 	info->thread = NULL;
444 	info->mode = -1;
445 	info->mmap = false;
446 
447 	if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0)
448 		kcov_mmap_cleanup(info);
449 
450 	return (error);
451 }
452 
453 static int
454 kcov_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
455 {
456 	struct kcov_info *info;
457 	int error;
458 
459 	if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
460 		return (error);
461 
462 	KASSERT(info != NULL, ("kcov_close with no kcov_info structure"));
463 
464 	/* Trying to close, but haven't disabled */
465 	if (info->state == KCOV_STATE_RUNNING)
466 		return (EBUSY);
467 
468 	return (0);
469 }
470 
471 static int
472 kcov_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
473     struct vm_object **object, int nprot)
474 {
475 	struct kcov_info *info;
476 	int error;
477 
478 	if ((nprot & (PROT_EXEC | PROT_READ | PROT_WRITE)) !=
479 	    (PROT_READ | PROT_WRITE))
480 		return (EINVAL);
481 
482 	if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
483 		return (error);
484 
485 	if (info->kvaddr == 0 || size / KCOV_ELEMENT_SIZE != info->entries ||
486 	    info->mmap != false)
487 		return (EINVAL);
488 
489 	info->mmap = true;
490 	*offset = 0;
491 	*object = info->bufobj;
492 	return (0);
493 }
494 
495 static int
496 kcov_alloc(struct kcov_info *info, size_t entries)
497 {
498 	size_t n, pages;
499 	vm_page_t *m;
500 
501 	KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer"));
502 	KASSERT(info->state == KCOV_STATE_OPEN,
503 	    ("kcov_alloc: Not in open state (%x)", info->state));
504 
505 	if (entries < 2 || entries > kcov_max_entries)
506 		return (EINVAL);
507 
508 	/* Align to page size so mmap can't access other kernel memory */
509 	info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE);
510 	pages = info->bufsize / PAGE_SIZE;
511 
512 	if ((info->kvaddr = kva_alloc(info->bufsize)) == 0)
513 		return (ENOMEM);
514 
515 	info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize,
516 	    PROT_READ | PROT_WRITE, 0, curthread->td_ucred);
517 
518 	m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK);
519 	VM_OBJECT_WLOCK(info->bufobj);
520 	for (n = 0; n < pages; n++) {
521 		m[n] = vm_page_grab(info->bufobj, n,
522 		    VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
523 		m[n]->valid = VM_PAGE_BITS_ALL;
524 	}
525 	VM_OBJECT_WUNLOCK(info->bufobj);
526 	pmap_qenter(info->kvaddr, m, pages);
527 	free(m, M_TEMP);
528 
529 	info->entries = entries;
530 
531 	return (0);
532 }
533 
534 static int
535 kcov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag __unused,
536     struct thread *td)
537 {
538 	struct kcov_info *info;
539 	int mode, error;
540 
541 	if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
542 		return (error);
543 
544 	if (cmd == KIOSETBUFSIZE) {
545 		/*
546 		 * Set the size of the coverage buffer. Should be called
547 		 * before enabling coverage collection for that thread.
548 		 */
549 		if (info->state != KCOV_STATE_OPEN) {
550 			return (EBUSY);
551 		}
552 		error = kcov_alloc(info, *(u_int *)data);
553 		if (error == 0)
554 			info->state = KCOV_STATE_READY;
555 		return (error);
556 	}
557 
558 	mtx_lock_spin(&kcov_lock);
559 	switch (cmd) {
560 	case KIOENABLE:
561 		if (info->state != KCOV_STATE_READY) {
562 			error = EBUSY;
563 			break;
564 		}
565 		if (td->td_kcov_info != NULL) {
566 			error = EINVAL;
567 			break;
568 		}
569 		mode = *(int *)data;
570 		if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
571 			error = EINVAL;
572 			break;
573 		}
574 		KASSERT(info->thread == NULL,
575 		    ("Enabling kcov when already enabled"));
576 		info->thread = td;
577 		info->mode = mode;
578 		/*
579 		 * Ensure the mode has been set before starting coverage
580 		 * tracing.
581 		 */
582 		atomic_store_rel_int(&info->state, KCOV_STATE_RUNNING);
583 		td->td_kcov_info = info;
584 		break;
585 	case KIODISABLE:
586 		/* Only the currently enabled thread may disable itself */
587 		if (info->state != KCOV_STATE_RUNNING ||
588 		    info != td->td_kcov_info) {
589 			error = EINVAL;
590 			break;
591 		}
592 		td->td_kcov_info = NULL;
593 		atomic_store_int(&info->state, KCOV_STATE_READY);
594 		/*
595 		 * Ensure we have exited the READY state before clearing the
596 		 * rest of the info struct.
597 		 */
598 		atomic_thread_fence_rel();
599 		info->mode = -1;
600 		info->thread = NULL;
601 		break;
602 	default:
603 		error = EINVAL;
604 		break;
605 	}
606 	mtx_unlock_spin(&kcov_lock);
607 
608 	return (error);
609 }
610 
611 static void
612 kcov_thread_dtor(void *arg __unused, struct thread *td)
613 {
614 	struct kcov_info *info;
615 
616 	info = td->td_kcov_info;
617 	if (info == NULL)
618 		return;
619 
620 	mtx_lock_spin(&kcov_lock);
621 	td->td_kcov_info = NULL;
622 	if (info->state != KCOV_STATE_DYING) {
623 		/*
624 		 * The kcov file is still open. Mark it as unused and
625 		 * wait for it to be closed before cleaning up.
626 		 */
627 		atomic_store_int(&info->state, KCOV_STATE_READY);
628 		atomic_thread_fence_seq_cst();
629 		/* This info struct is unused */
630 		info->thread = NULL;
631 		mtx_unlock_spin(&kcov_lock);
632 		return;
633 	}
634 	mtx_unlock_spin(&kcov_lock);
635 
636 	/*
637 	 * We can safely clean up the info struct as it is in the
638 	 * KCOV_STATE_DYING state where the info struct is associated with
639 	 * the current thread that's about to exit.
640 	 *
641 	 * The KCOV_STATE_DYING stops new threads from using it.
642 	 * It also stops the current thread from trying to use the info struct.
643 	 */
644 
645 	if (info->kvaddr != 0) {
646 		pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE);
647 		kva_free(info->kvaddr, info->bufsize);
648 	}
649 	if (info->bufobj != NULL && !info->mmap)
650 		vm_object_deallocate(info->bufobj);
651 	free(info, M_KCOV_INFO);
652 }
653 
654 static void
655 kcov_init(const void *unused)
656 {
657 	struct make_dev_args args;
658 	struct cdev *dev;
659 
660 	mtx_init(&kcov_lock, "kcov lock", NULL, MTX_SPIN);
661 
662 	make_dev_args_init(&args);
663 	args.mda_devsw = &kcov_cdevsw;
664 	args.mda_uid = UID_ROOT;
665 	args.mda_gid = GID_WHEEL;
666 	args.mda_mode = 0600;
667 	if (make_dev_s(&args, &dev, "kcov") != 0) {
668 		printf("%s", "Failed to create kcov device");
669 		return;
670 	}
671 
672 	EVENTHANDLER_REGISTER(thread_dtor, kcov_thread_dtor, NULL,
673 	    EVENTHANDLER_PRI_ANY);
674 }
675 
676 SYSINIT(kcovdev, SI_SUB_DEVFS, SI_ORDER_ANY, kcov_init, NULL);
677