1b3c0d957SAndrew Turner /*- 2b3c0d957SAndrew Turner * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3b3c0d957SAndrew Turner * 4b3c0d957SAndrew Turner * Copyright (C) 2018 The FreeBSD Foundation. All rights reserved. 5b3c0d957SAndrew Turner * Copyright (C) 2018, 2019 Andrew Turner 6b3c0d957SAndrew Turner * 7b3c0d957SAndrew Turner * This software was developed by Mitchell Horne under sponsorship of 8b3c0d957SAndrew Turner * the FreeBSD Foundation. 9b3c0d957SAndrew Turner * 10b3c0d957SAndrew Turner * This software was developed by SRI International and the University of 11b3c0d957SAndrew Turner * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 12b3c0d957SAndrew Turner * ("CTSRD"), as part of the DARPA CRASH research programme. 13b3c0d957SAndrew Turner * 14b3c0d957SAndrew Turner * Redistribution and use in source and binary forms, with or without 15b3c0d957SAndrew Turner * modification, are permitted provided that the following conditions 16b3c0d957SAndrew Turner * are met: 17b3c0d957SAndrew Turner * 1. Redistributions of source code must retain the above copyright 18b3c0d957SAndrew Turner * notice, this list of conditions and the following disclaimer. 19b3c0d957SAndrew Turner * 2. Redistributions in binary form must reproduce the above copyright 20b3c0d957SAndrew Turner * notice, this list of conditions and the following disclaimer in the 21b3c0d957SAndrew Turner * documentation and/or other materials provided with the distribution. 22b3c0d957SAndrew Turner * 23b3c0d957SAndrew Turner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24b3c0d957SAndrew Turner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25b3c0d957SAndrew Turner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26b3c0d957SAndrew Turner * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27b3c0d957SAndrew Turner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28b3c0d957SAndrew Turner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29b3c0d957SAndrew Turner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30b3c0d957SAndrew Turner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31b3c0d957SAndrew Turner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32b3c0d957SAndrew Turner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33b3c0d957SAndrew Turner * SUCH DAMAGE. 34b3c0d957SAndrew Turner * 35b3c0d957SAndrew Turner * $FreeBSD$ 36b3c0d957SAndrew Turner */ 37b3c0d957SAndrew Turner 38*cf4670feSMark Johnston /* Interceptors are required for KMSAN. */ 39*cf4670feSMark Johnston #if defined(KASAN) || defined(KCSAN) 403ead6023SMark Johnston #define SAN_RUNTIME 413ead6023SMark Johnston #endif 42fd8f4f3bSAndrew Turner 43b3c0d957SAndrew Turner #include <sys/cdefs.h> 44b3c0d957SAndrew Turner __FBSDID("$FreeBSD$"); 45b3c0d957SAndrew Turner 46b3c0d957SAndrew Turner #include <sys/param.h> 47524553f5SAndrew Turner #include <sys/systm.h> 48b3c0d957SAndrew Turner #include <sys/conf.h> 49a1fa04c0SMark Johnston #include <sys/eventhandler.h> 50b3c0d957SAndrew Turner #include <sys/kcov.h> 51b3c0d957SAndrew Turner #include <sys/kernel.h> 52524553f5SAndrew Turner #include <sys/limits.h> 53b3c0d957SAndrew Turner #include <sys/lock.h> 54b3c0d957SAndrew Turner #include <sys/malloc.h> 55b3c0d957SAndrew Turner #include <sys/mman.h> 56b3c0d957SAndrew Turner #include <sys/mutex.h> 57b3c0d957SAndrew Turner #include <sys/proc.h> 58b3c0d957SAndrew Turner #include <sys/rwlock.h> 59b3c0d957SAndrew Turner #include <sys/sysctl.h> 60b3c0d957SAndrew Turner 61b3c0d957SAndrew Turner #include <vm/vm.h> 62524553f5SAndrew Turner #include <vm/pmap.h> 63b3c0d957SAndrew Turner #include <vm/vm_extern.h> 64b3c0d957SAndrew Turner #include <vm/vm_object.h> 65b3c0d957SAndrew Turner #include <vm/vm_page.h> 66b3c0d957SAndrew Turner #include <vm/vm_pager.h> 6701ffedf5SAndrew Turner #include <vm/vm_param.h> 68b3c0d957SAndrew Turner 69b3c0d957SAndrew Turner MALLOC_DEFINE(M_KCOV_INFO, "kcovinfo", "KCOV info type"); 70b3c0d957SAndrew Turner 71b3c0d957SAndrew Turner #define KCOV_ELEMENT_SIZE sizeof(uint64_t) 72b3c0d957SAndrew Turner 73b3c0d957SAndrew Turner /* 74b3c0d957SAndrew Turner * To know what the code can safely perform at any point in time we use a 75b3c0d957SAndrew Turner * state machine. In the normal case the state transitions are: 76b3c0d957SAndrew Turner * 77b3c0d957SAndrew Turner * OPEN -> READY -> RUNNING -> DYING 78b3c0d957SAndrew Turner * | | ^ | ^ ^ 79b3c0d957SAndrew Turner * | | +--------+ | | 80b3c0d957SAndrew Turner * | +-------------------+ | 81b3c0d957SAndrew Turner * +-----------------------------+ 82b3c0d957SAndrew Turner * 83b3c0d957SAndrew Turner * The states are: 84b3c0d957SAndrew Turner * OPEN: The kcov fd has been opened, but no buffer is available to store 85b3c0d957SAndrew Turner * coverage data. 86b3c0d957SAndrew Turner * READY: The buffer to store coverage data has been allocated. Userspace 87b3c0d957SAndrew Turner * can set this by using ioctl(fd, KIOSETBUFSIZE, entries);. When 88b3c0d957SAndrew Turner * this has been set the buffer can be written to by the kernel, 89b3c0d957SAndrew Turner * and mmaped by userspace. 90b3c0d957SAndrew Turner * RUNNING: The coverage probes are able to store coverage data in the buffer. 91b3c0d957SAndrew Turner * This is entered with ioctl(fd, KIOENABLE, mode);. The READY state 92b3c0d957SAndrew Turner * can be exited by ioctl(fd, KIODISABLE); or exiting the thread to 93b3c0d957SAndrew Turner * return to the READY state to allow tracing to be reused, or by 94b3c0d957SAndrew Turner * closing the kcov fd to enter the DYING state. 95b3c0d957SAndrew Turner * DYING: The fd has been closed. All states can enter into this state when 96b3c0d957SAndrew Turner * userspace closes the kcov fd. 97b3c0d957SAndrew Turner * 98b3c0d957SAndrew Turner * We need to be careful when moving into and out of the RUNNING state. As 99b3c0d957SAndrew Turner * an interrupt may happen while this is happening the ordering of memory 100b3c0d957SAndrew Turner * operations is important so struct kcov_info is valid for the tracing 101b3c0d957SAndrew Turner * functions. 102b3c0d957SAndrew Turner * 103b3c0d957SAndrew Turner * When moving into the RUNNING state prior stores to struct kcov_info need 104b3c0d957SAndrew Turner * to be observed before the state is set. This allows for interrupts that 105b3c0d957SAndrew Turner * may call into one of the coverage functions to fire at any point while 106b3c0d957SAndrew Turner * being enabled and see a consistent struct kcov_info. 107b3c0d957SAndrew Turner * 108b3c0d957SAndrew Turner * When moving out of the RUNNING state any later stores to struct kcov_info 109b3c0d957SAndrew Turner * need to be observed after the state is set. As with entering this is to 110b3c0d957SAndrew Turner * present a consistent struct kcov_info to interrupts. 111b3c0d957SAndrew Turner */ 112b3c0d957SAndrew Turner typedef enum { 113b3c0d957SAndrew Turner KCOV_STATE_INVALID, 114b3c0d957SAndrew Turner KCOV_STATE_OPEN, /* The device is open, but with no buffer */ 115b3c0d957SAndrew Turner KCOV_STATE_READY, /* The buffer has been allocated */ 116b3c0d957SAndrew Turner KCOV_STATE_RUNNING, /* Recording trace data */ 117b3c0d957SAndrew Turner KCOV_STATE_DYING, /* The fd was closed */ 118b3c0d957SAndrew Turner } kcov_state_t; 119b3c0d957SAndrew Turner 120b3c0d957SAndrew Turner /* 121b3c0d957SAndrew Turner * (l) Set while holding the kcov_lock mutex and not in the RUNNING state. 122b3c0d957SAndrew Turner * (o) Only set once while in the OPEN state. Cleaned up while in the DYING 123b3c0d957SAndrew Turner * state, and with no thread associated with the struct kcov_info. 124b3c0d957SAndrew Turner * (s) Set atomically to enter or exit the RUNNING state, non-atomically 125b3c0d957SAndrew Turner * otherwise. See above for a description of the other constraints while 126b3c0d957SAndrew Turner * moving into or out of the RUNNING state. 127b3c0d957SAndrew Turner */ 128b3c0d957SAndrew Turner struct kcov_info { 129b3c0d957SAndrew Turner struct thread *thread; /* (l) */ 130b3c0d957SAndrew Turner vm_object_t bufobj; /* (o) */ 131b3c0d957SAndrew Turner vm_offset_t kvaddr; /* (o) */ 132b3c0d957SAndrew Turner size_t entries; /* (o) */ 133b3c0d957SAndrew Turner size_t bufsize; /* (o) */ 134b3c0d957SAndrew Turner kcov_state_t state; /* (s) */ 135b3c0d957SAndrew Turner int mode; /* (l) */ 136b3c0d957SAndrew Turner }; 137b3c0d957SAndrew Turner 138b3c0d957SAndrew Turner /* Prototypes */ 139b3c0d957SAndrew Turner static d_open_t kcov_open; 140b3c0d957SAndrew Turner static d_close_t kcov_close; 141b3c0d957SAndrew Turner static d_mmap_single_t kcov_mmap_single; 142b3c0d957SAndrew Turner static d_ioctl_t kcov_ioctl; 143b3c0d957SAndrew Turner 144b3c0d957SAndrew Turner static int kcov_alloc(struct kcov_info *info, size_t entries); 14572b66398SAndrew Turner static void kcov_free(struct kcov_info *info); 146b3c0d957SAndrew Turner static void kcov_init(const void *unused); 147b3c0d957SAndrew Turner 148b3c0d957SAndrew Turner static struct cdevsw kcov_cdevsw = { 149b3c0d957SAndrew Turner .d_version = D_VERSION, 150b3c0d957SAndrew Turner .d_open = kcov_open, 151b3c0d957SAndrew Turner .d_close = kcov_close, 152b3c0d957SAndrew Turner .d_mmap_single = kcov_mmap_single, 153b3c0d957SAndrew Turner .d_ioctl = kcov_ioctl, 154b3c0d957SAndrew Turner .d_name = "kcov", 155b3c0d957SAndrew Turner }; 156b3c0d957SAndrew Turner 1577029da5cSPawel Biernacki SYSCTL_NODE(_kern, OID_AUTO, kcov, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1587029da5cSPawel Biernacki "Kernel coverage"); 159b3c0d957SAndrew Turner 160b3c0d957SAndrew Turner static u_int kcov_max_entries = KCOV_MAXENTRIES; 161b3c0d957SAndrew Turner SYSCTL_UINT(_kern_kcov, OID_AUTO, max_entries, CTLFLAG_RW, 162b3c0d957SAndrew Turner &kcov_max_entries, 0, 163b3c0d957SAndrew Turner "Maximum number of entries in the kcov buffer"); 164b3c0d957SAndrew Turner 165b3c0d957SAndrew Turner static struct mtx kcov_lock; 166524553f5SAndrew Turner static int active_count; 167b3c0d957SAndrew Turner 168b3c0d957SAndrew Turner static struct kcov_info * 169b3c0d957SAndrew Turner get_kinfo(struct thread *td) 170b3c0d957SAndrew Turner { 171b3c0d957SAndrew Turner struct kcov_info *info; 172b3c0d957SAndrew Turner 173b3c0d957SAndrew Turner /* We might have a NULL thread when releasing the secondary CPUs */ 174b3c0d957SAndrew Turner if (td == NULL) 175b3c0d957SAndrew Turner return (NULL); 176b3c0d957SAndrew Turner 177b3c0d957SAndrew Turner /* 178b3c0d957SAndrew Turner * We are in an interrupt, stop tracing as it is not explicitly 179b3c0d957SAndrew Turner * part of a syscall. 180b3c0d957SAndrew Turner */ 181b3c0d957SAndrew Turner if (td->td_intr_nesting_level > 0 || td->td_intr_frame != NULL) 182b3c0d957SAndrew Turner return (NULL); 183b3c0d957SAndrew Turner 184b3c0d957SAndrew Turner /* 185b3c0d957SAndrew Turner * If info is NULL or the state is not running we are not tracing. 186b3c0d957SAndrew Turner */ 187b3c0d957SAndrew Turner info = td->td_kcov_info; 188b3c0d957SAndrew Turner if (info == NULL || 189b3c0d957SAndrew Turner atomic_load_acq_int(&info->state) != KCOV_STATE_RUNNING) 190b3c0d957SAndrew Turner return (NULL); 191b3c0d957SAndrew Turner 192b3c0d957SAndrew Turner return (info); 193b3c0d957SAndrew Turner } 194b3c0d957SAndrew Turner 195*cf4670feSMark Johnston static void __nosanitizeaddress __nosanitizememory 196524553f5SAndrew Turner trace_pc(uintptr_t ret) 197b3c0d957SAndrew Turner { 198b3c0d957SAndrew Turner struct thread *td; 199b3c0d957SAndrew Turner struct kcov_info *info; 200b3c0d957SAndrew Turner uint64_t *buf, index; 201b3c0d957SAndrew Turner 202b3c0d957SAndrew Turner td = curthread; 203b3c0d957SAndrew Turner info = get_kinfo(td); 204b3c0d957SAndrew Turner if (info == NULL) 205b3c0d957SAndrew Turner return; 206b3c0d957SAndrew Turner 207b3c0d957SAndrew Turner /* 208b3c0d957SAndrew Turner * Check we are in the PC-trace mode. 209b3c0d957SAndrew Turner */ 210b3c0d957SAndrew Turner if (info->mode != KCOV_MODE_TRACE_PC) 211b3c0d957SAndrew Turner return; 212b3c0d957SAndrew Turner 213b3c0d957SAndrew Turner KASSERT(info->kvaddr != 0, 214b3c0d957SAndrew Turner ("__sanitizer_cov_trace_pc: NULL buf while running")); 215b3c0d957SAndrew Turner 216b3c0d957SAndrew Turner buf = (uint64_t *)info->kvaddr; 217b3c0d957SAndrew Turner 218b3c0d957SAndrew Turner /* The first entry of the buffer holds the index */ 219b3c0d957SAndrew Turner index = buf[0]; 220b3c0d957SAndrew Turner if (index + 2 > info->entries) 221b3c0d957SAndrew Turner return; 222b3c0d957SAndrew Turner 223524553f5SAndrew Turner buf[index + 1] = ret; 224b3c0d957SAndrew Turner buf[0] = index + 1; 225b3c0d957SAndrew Turner } 226b3c0d957SAndrew Turner 227*cf4670feSMark Johnston static bool __nosanitizeaddress __nosanitizememory 228b3c0d957SAndrew Turner trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uint64_t ret) 229b3c0d957SAndrew Turner { 230b3c0d957SAndrew Turner struct thread *td; 231b3c0d957SAndrew Turner struct kcov_info *info; 232b3c0d957SAndrew Turner uint64_t *buf, index; 233b3c0d957SAndrew Turner 234b3c0d957SAndrew Turner td = curthread; 235b3c0d957SAndrew Turner info = get_kinfo(td); 236b3c0d957SAndrew Turner if (info == NULL) 237b3c0d957SAndrew Turner return (false); 238b3c0d957SAndrew Turner 239b3c0d957SAndrew Turner /* 240b3c0d957SAndrew Turner * Check we are in the comparison-trace mode. 241b3c0d957SAndrew Turner */ 242b3c0d957SAndrew Turner if (info->mode != KCOV_MODE_TRACE_CMP) 243b3c0d957SAndrew Turner return (false); 244b3c0d957SAndrew Turner 245b3c0d957SAndrew Turner KASSERT(info->kvaddr != 0, 246b3c0d957SAndrew Turner ("__sanitizer_cov_trace_pc: NULL buf while running")); 247b3c0d957SAndrew Turner 248b3c0d957SAndrew Turner buf = (uint64_t *)info->kvaddr; 249b3c0d957SAndrew Turner 250b3c0d957SAndrew Turner /* The first entry of the buffer holds the index */ 251b3c0d957SAndrew Turner index = buf[0]; 252b3c0d957SAndrew Turner 253b3c0d957SAndrew Turner /* Check we have space to store all elements */ 254b3c0d957SAndrew Turner if (index * 4 + 4 + 1 > info->entries) 255b3c0d957SAndrew Turner return (false); 256b3c0d957SAndrew Turner 257feb2cc80SAndrew Turner while (1) { 258b3c0d957SAndrew Turner buf[index * 4 + 1] = type; 259b3c0d957SAndrew Turner buf[index * 4 + 2] = arg1; 260b3c0d957SAndrew Turner buf[index * 4 + 3] = arg2; 261b3c0d957SAndrew Turner buf[index * 4 + 4] = ret; 262feb2cc80SAndrew Turner 263feb2cc80SAndrew Turner if (atomic_cmpset_64(&buf[0], index, index + 1)) 264feb2cc80SAndrew Turner break; 265feb2cc80SAndrew Turner buf[0] = index; 266feb2cc80SAndrew Turner } 267b3c0d957SAndrew Turner 268b3c0d957SAndrew Turner return (true); 269b3c0d957SAndrew Turner } 270b3c0d957SAndrew Turner 271b3c0d957SAndrew Turner /* 272b3c0d957SAndrew Turner * The fd is being closed, cleanup everything we can. 273b3c0d957SAndrew Turner */ 274b3c0d957SAndrew Turner static void 275b3c0d957SAndrew Turner kcov_mmap_cleanup(void *arg) 276b3c0d957SAndrew Turner { 277b3c0d957SAndrew Turner struct kcov_info *info = arg; 278b3c0d957SAndrew Turner struct thread *thread; 279b3c0d957SAndrew Turner 280b3c0d957SAndrew Turner mtx_lock_spin(&kcov_lock); 281b3c0d957SAndrew Turner /* 282b3c0d957SAndrew Turner * Move to KCOV_STATE_DYING to stop adding new entries. 283b3c0d957SAndrew Turner * 284b3c0d957SAndrew Turner * If the thread is running we need to wait until thread exit to 285b3c0d957SAndrew Turner * clean up as it may currently be adding a new entry. If this is 286b3c0d957SAndrew Turner * the case being in KCOV_STATE_DYING will signal that the buffer 287b3c0d957SAndrew Turner * needs to be cleaned up. 288b3c0d957SAndrew Turner */ 289b3c0d957SAndrew Turner atomic_store_int(&info->state, KCOV_STATE_DYING); 290b3c0d957SAndrew Turner atomic_thread_fence_seq_cst(); 291b3c0d957SAndrew Turner thread = info->thread; 292b3c0d957SAndrew Turner mtx_unlock_spin(&kcov_lock); 293b3c0d957SAndrew Turner 294b3c0d957SAndrew Turner if (thread != NULL) 295b3c0d957SAndrew Turner return; 296b3c0d957SAndrew Turner 297b3c0d957SAndrew Turner /* 298b3c0d957SAndrew Turner * We can safely clean up the info struct as it is in the 299b3c0d957SAndrew Turner * KCOV_STATE_DYING state with no thread associated. 300b3c0d957SAndrew Turner * 301b3c0d957SAndrew Turner * The KCOV_STATE_DYING stops new threads from using it. 302b3c0d957SAndrew Turner * The lack of a thread means nothing is currently using the buffers. 303b3c0d957SAndrew Turner */ 30472b66398SAndrew Turner kcov_free(info); 305b3c0d957SAndrew Turner } 306b3c0d957SAndrew Turner 307b3c0d957SAndrew Turner static int 308b3c0d957SAndrew Turner kcov_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 309b3c0d957SAndrew Turner { 310b3c0d957SAndrew Turner struct kcov_info *info; 311b3c0d957SAndrew Turner int error; 312b3c0d957SAndrew Turner 313b3c0d957SAndrew Turner info = malloc(sizeof(struct kcov_info), M_KCOV_INFO, M_ZERO | M_WAITOK); 314b3c0d957SAndrew Turner info->state = KCOV_STATE_OPEN; 315b3c0d957SAndrew Turner info->thread = NULL; 316b3c0d957SAndrew Turner info->mode = -1; 317b3c0d957SAndrew Turner 318b3c0d957SAndrew Turner if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0) 319b3c0d957SAndrew Turner kcov_mmap_cleanup(info); 320b3c0d957SAndrew Turner 321b3c0d957SAndrew Turner return (error); 322b3c0d957SAndrew Turner } 323b3c0d957SAndrew Turner 324b3c0d957SAndrew Turner static int 325b3c0d957SAndrew Turner kcov_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 326b3c0d957SAndrew Turner { 327b3c0d957SAndrew Turner struct kcov_info *info; 328b3c0d957SAndrew Turner int error; 329b3c0d957SAndrew Turner 330b3c0d957SAndrew Turner if ((error = devfs_get_cdevpriv((void **)&info)) != 0) 331b3c0d957SAndrew Turner return (error); 332b3c0d957SAndrew Turner 333b3c0d957SAndrew Turner KASSERT(info != NULL, ("kcov_close with no kcov_info structure")); 334b3c0d957SAndrew Turner 335b3c0d957SAndrew Turner /* Trying to close, but haven't disabled */ 336b3c0d957SAndrew Turner if (info->state == KCOV_STATE_RUNNING) 337b3c0d957SAndrew Turner return (EBUSY); 338b3c0d957SAndrew Turner 339b3c0d957SAndrew Turner return (0); 340b3c0d957SAndrew Turner } 341b3c0d957SAndrew Turner 342b3c0d957SAndrew Turner static int 343b3c0d957SAndrew Turner kcov_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, 344b3c0d957SAndrew Turner struct vm_object **object, int nprot) 345b3c0d957SAndrew Turner { 346b3c0d957SAndrew Turner struct kcov_info *info; 347b3c0d957SAndrew Turner int error; 348b3c0d957SAndrew Turner 349b3c0d957SAndrew Turner if ((nprot & (PROT_EXEC | PROT_READ | PROT_WRITE)) != 350b3c0d957SAndrew Turner (PROT_READ | PROT_WRITE)) 351b3c0d957SAndrew Turner return (EINVAL); 352b3c0d957SAndrew Turner 353b3c0d957SAndrew Turner if ((error = devfs_get_cdevpriv((void **)&info)) != 0) 354b3c0d957SAndrew Turner return (error); 355b3c0d957SAndrew Turner 356bdffe3b5SAndrew Turner if (info->kvaddr == 0 || size / KCOV_ELEMENT_SIZE != info->entries) 357b3c0d957SAndrew Turner return (EINVAL); 358b3c0d957SAndrew Turner 35901ffedf5SAndrew Turner vm_object_reference(info->bufobj); 360b3c0d957SAndrew Turner *offset = 0; 361b3c0d957SAndrew Turner *object = info->bufobj; 362b3c0d957SAndrew Turner return (0); 363b3c0d957SAndrew Turner } 364b3c0d957SAndrew Turner 365b3c0d957SAndrew Turner static int 366b3c0d957SAndrew Turner kcov_alloc(struct kcov_info *info, size_t entries) 367b3c0d957SAndrew Turner { 368b3c0d957SAndrew Turner size_t n, pages; 369a759a0a0SAndrew Turner vm_page_t m; 370b3c0d957SAndrew Turner 371b3c0d957SAndrew Turner KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer")); 372b3c0d957SAndrew Turner KASSERT(info->state == KCOV_STATE_OPEN, 373b3c0d957SAndrew Turner ("kcov_alloc: Not in open state (%x)", info->state)); 374b3c0d957SAndrew Turner 375b3c0d957SAndrew Turner if (entries < 2 || entries > kcov_max_entries) 376b3c0d957SAndrew Turner return (EINVAL); 377b3c0d957SAndrew Turner 378b3c0d957SAndrew Turner /* Align to page size so mmap can't access other kernel memory */ 379b3c0d957SAndrew Turner info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE); 380b3c0d957SAndrew Turner pages = info->bufsize / PAGE_SIZE; 381b3c0d957SAndrew Turner 382b3c0d957SAndrew Turner if ((info->kvaddr = kva_alloc(info->bufsize)) == 0) 383b3c0d957SAndrew Turner return (ENOMEM); 384b3c0d957SAndrew Turner 385b3c0d957SAndrew Turner info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize, 386b3c0d957SAndrew Turner PROT_READ | PROT_WRITE, 0, curthread->td_ucred); 387b3c0d957SAndrew Turner 388b3c0d957SAndrew Turner VM_OBJECT_WLOCK(info->bufobj); 389b3c0d957SAndrew Turner for (n = 0; n < pages; n++) { 390a759a0a0SAndrew Turner m = vm_page_grab(info->bufobj, n, 39191e31c3cSJeff Roberson VM_ALLOC_ZERO | VM_ALLOC_WIRED); 39291e31c3cSJeff Roberson vm_page_valid(m); 39391e31c3cSJeff Roberson vm_page_xunbusy(m); 394a759a0a0SAndrew Turner pmap_qenter(info->kvaddr + n * PAGE_SIZE, &m, 1); 395b3c0d957SAndrew Turner } 396b3c0d957SAndrew Turner VM_OBJECT_WUNLOCK(info->bufobj); 397b3c0d957SAndrew Turner 398b3c0d957SAndrew Turner info->entries = entries; 399b3c0d957SAndrew Turner 400b3c0d957SAndrew Turner return (0); 401b3c0d957SAndrew Turner } 402b3c0d957SAndrew Turner 40372b66398SAndrew Turner static void 40472b66398SAndrew Turner kcov_free(struct kcov_info *info) 40572b66398SAndrew Turner { 40601ffedf5SAndrew Turner vm_page_t m; 40701ffedf5SAndrew Turner size_t i; 40872b66398SAndrew Turner 40972b66398SAndrew Turner if (info->kvaddr != 0) { 41072b66398SAndrew Turner pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE); 41172b66398SAndrew Turner kva_free(info->kvaddr, info->bufsize); 41272b66398SAndrew Turner } 41301ffedf5SAndrew Turner if (info->bufobj != NULL) { 41401ffedf5SAndrew Turner VM_OBJECT_WLOCK(info->bufobj); 41501ffedf5SAndrew Turner m = vm_page_lookup(info->bufobj, 0); 41601ffedf5SAndrew Turner for (i = 0; i < info->bufsize / PAGE_SIZE; i++) { 41701ffedf5SAndrew Turner vm_page_unwire_noq(m); 41801ffedf5SAndrew Turner m = vm_page_next(m); 41901ffedf5SAndrew Turner } 42001ffedf5SAndrew Turner VM_OBJECT_WUNLOCK(info->bufobj); 42172b66398SAndrew Turner vm_object_deallocate(info->bufobj); 42201ffedf5SAndrew Turner } 42372b66398SAndrew Turner free(info, M_KCOV_INFO); 42472b66398SAndrew Turner } 42572b66398SAndrew Turner 426b3c0d957SAndrew Turner static int 427b3c0d957SAndrew Turner kcov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag __unused, 428b3c0d957SAndrew Turner struct thread *td) 429b3c0d957SAndrew Turner { 430b3c0d957SAndrew Turner struct kcov_info *info; 431b3c0d957SAndrew Turner int mode, error; 432b3c0d957SAndrew Turner 433b3c0d957SAndrew Turner if ((error = devfs_get_cdevpriv((void **)&info)) != 0) 434b3c0d957SAndrew Turner return (error); 435b3c0d957SAndrew Turner 436b3c0d957SAndrew Turner if (cmd == KIOSETBUFSIZE) { 437b3c0d957SAndrew Turner /* 438b3c0d957SAndrew Turner * Set the size of the coverage buffer. Should be called 439b3c0d957SAndrew Turner * before enabling coverage collection for that thread. 440b3c0d957SAndrew Turner */ 441b3c0d957SAndrew Turner if (info->state != KCOV_STATE_OPEN) { 442b3c0d957SAndrew Turner return (EBUSY); 443b3c0d957SAndrew Turner } 444b3c0d957SAndrew Turner error = kcov_alloc(info, *(u_int *)data); 445b3c0d957SAndrew Turner if (error == 0) 446b3c0d957SAndrew Turner info->state = KCOV_STATE_READY; 447b3c0d957SAndrew Turner return (error); 448b3c0d957SAndrew Turner } 449b3c0d957SAndrew Turner 450b3c0d957SAndrew Turner mtx_lock_spin(&kcov_lock); 451b3c0d957SAndrew Turner switch (cmd) { 452b3c0d957SAndrew Turner case KIOENABLE: 453b3c0d957SAndrew Turner if (info->state != KCOV_STATE_READY) { 454b3c0d957SAndrew Turner error = EBUSY; 455b3c0d957SAndrew Turner break; 456b3c0d957SAndrew Turner } 457b3c0d957SAndrew Turner if (td->td_kcov_info != NULL) { 458b3c0d957SAndrew Turner error = EINVAL; 459b3c0d957SAndrew Turner break; 460b3c0d957SAndrew Turner } 461b3c0d957SAndrew Turner mode = *(int *)data; 462b3c0d957SAndrew Turner if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) { 463b3c0d957SAndrew Turner error = EINVAL; 464b3c0d957SAndrew Turner break; 465b3c0d957SAndrew Turner } 466524553f5SAndrew Turner 467524553f5SAndrew Turner /* Lets hope nobody opens this 2 billion times */ 468524553f5SAndrew Turner KASSERT(active_count < INT_MAX, 469524553f5SAndrew Turner ("%s: Open too many times", __func__)); 470524553f5SAndrew Turner active_count++; 471524553f5SAndrew Turner if (active_count == 1) { 472524553f5SAndrew Turner cov_register_pc(&trace_pc); 473524553f5SAndrew Turner cov_register_cmp(&trace_cmp); 474524553f5SAndrew Turner } 475524553f5SAndrew Turner 476b3c0d957SAndrew Turner KASSERT(info->thread == NULL, 477b3c0d957SAndrew Turner ("Enabling kcov when already enabled")); 478b3c0d957SAndrew Turner info->thread = td; 479b3c0d957SAndrew Turner info->mode = mode; 480b3c0d957SAndrew Turner /* 481b3c0d957SAndrew Turner * Ensure the mode has been set before starting coverage 482b3c0d957SAndrew Turner * tracing. 483b3c0d957SAndrew Turner */ 484b3c0d957SAndrew Turner atomic_store_rel_int(&info->state, KCOV_STATE_RUNNING); 485b3c0d957SAndrew Turner td->td_kcov_info = info; 486b3c0d957SAndrew Turner break; 487b3c0d957SAndrew Turner case KIODISABLE: 488b3c0d957SAndrew Turner /* Only the currently enabled thread may disable itself */ 489b3c0d957SAndrew Turner if (info->state != KCOV_STATE_RUNNING || 490b3c0d957SAndrew Turner info != td->td_kcov_info) { 491b3c0d957SAndrew Turner error = EINVAL; 492b3c0d957SAndrew Turner break; 493b3c0d957SAndrew Turner } 494524553f5SAndrew Turner KASSERT(active_count > 0, ("%s: Open count is zero", __func__)); 495524553f5SAndrew Turner active_count--; 496524553f5SAndrew Turner if (active_count == 0) { 497c50c26aaSAndrew Turner cov_unregister_pc(); 498c50c26aaSAndrew Turner cov_unregister_cmp(); 499524553f5SAndrew Turner } 500524553f5SAndrew Turner 501b3c0d957SAndrew Turner td->td_kcov_info = NULL; 502b3c0d957SAndrew Turner atomic_store_int(&info->state, KCOV_STATE_READY); 503b3c0d957SAndrew Turner /* 504b3c0d957SAndrew Turner * Ensure we have exited the READY state before clearing the 505b3c0d957SAndrew Turner * rest of the info struct. 506b3c0d957SAndrew Turner */ 507b3c0d957SAndrew Turner atomic_thread_fence_rel(); 508b3c0d957SAndrew Turner info->mode = -1; 509b3c0d957SAndrew Turner info->thread = NULL; 510b3c0d957SAndrew Turner break; 511b3c0d957SAndrew Turner default: 512b3c0d957SAndrew Turner error = EINVAL; 513b3c0d957SAndrew Turner break; 514b3c0d957SAndrew Turner } 515b3c0d957SAndrew Turner mtx_unlock_spin(&kcov_lock); 516b3c0d957SAndrew Turner 517b3c0d957SAndrew Turner return (error); 518b3c0d957SAndrew Turner } 519b3c0d957SAndrew Turner 520b3c0d957SAndrew Turner static void 521b3c0d957SAndrew Turner kcov_thread_dtor(void *arg __unused, struct thread *td) 522b3c0d957SAndrew Turner { 523b3c0d957SAndrew Turner struct kcov_info *info; 524b3c0d957SAndrew Turner 525b3c0d957SAndrew Turner info = td->td_kcov_info; 526b3c0d957SAndrew Turner if (info == NULL) 527b3c0d957SAndrew Turner return; 528b3c0d957SAndrew Turner 529b3c0d957SAndrew Turner mtx_lock_spin(&kcov_lock); 530524553f5SAndrew Turner KASSERT(active_count > 0, ("%s: Open count is zero", __func__)); 531524553f5SAndrew Turner active_count--; 532524553f5SAndrew Turner if (active_count == 0) { 533c50c26aaSAndrew Turner cov_unregister_pc(); 534c50c26aaSAndrew Turner cov_unregister_cmp(); 535524553f5SAndrew Turner } 536b3c0d957SAndrew Turner td->td_kcov_info = NULL; 537b3c0d957SAndrew Turner if (info->state != KCOV_STATE_DYING) { 538b3c0d957SAndrew Turner /* 539b3c0d957SAndrew Turner * The kcov file is still open. Mark it as unused and 540b3c0d957SAndrew Turner * wait for it to be closed before cleaning up. 541b3c0d957SAndrew Turner */ 542b3c0d957SAndrew Turner atomic_store_int(&info->state, KCOV_STATE_READY); 543b3c0d957SAndrew Turner atomic_thread_fence_seq_cst(); 544b3c0d957SAndrew Turner /* This info struct is unused */ 545b3c0d957SAndrew Turner info->thread = NULL; 546b3c0d957SAndrew Turner mtx_unlock_spin(&kcov_lock); 547b3c0d957SAndrew Turner return; 548b3c0d957SAndrew Turner } 549b3c0d957SAndrew Turner mtx_unlock_spin(&kcov_lock); 550b3c0d957SAndrew Turner 551b3c0d957SAndrew Turner /* 552b3c0d957SAndrew Turner * We can safely clean up the info struct as it is in the 553b3c0d957SAndrew Turner * KCOV_STATE_DYING state where the info struct is associated with 554b3c0d957SAndrew Turner * the current thread that's about to exit. 555b3c0d957SAndrew Turner * 556b3c0d957SAndrew Turner * The KCOV_STATE_DYING stops new threads from using it. 557b3c0d957SAndrew Turner * It also stops the current thread from trying to use the info struct. 558b3c0d957SAndrew Turner */ 55972b66398SAndrew Turner kcov_free(info); 560b3c0d957SAndrew Turner } 561b3c0d957SAndrew Turner 562b3c0d957SAndrew Turner static void 563b3c0d957SAndrew Turner kcov_init(const void *unused) 564b3c0d957SAndrew Turner { 565b3c0d957SAndrew Turner struct make_dev_args args; 566b3c0d957SAndrew Turner struct cdev *dev; 567b3c0d957SAndrew Turner 568b3c0d957SAndrew Turner mtx_init(&kcov_lock, "kcov lock", NULL, MTX_SPIN); 569b3c0d957SAndrew Turner 570b3c0d957SAndrew Turner make_dev_args_init(&args); 571b3c0d957SAndrew Turner args.mda_devsw = &kcov_cdevsw; 572b3c0d957SAndrew Turner args.mda_uid = UID_ROOT; 573b3c0d957SAndrew Turner args.mda_gid = GID_WHEEL; 574b3c0d957SAndrew Turner args.mda_mode = 0600; 575b3c0d957SAndrew Turner if (make_dev_s(&args, &dev, "kcov") != 0) { 576b3c0d957SAndrew Turner printf("%s", "Failed to create kcov device"); 577b3c0d957SAndrew Turner return; 578b3c0d957SAndrew Turner } 579b3c0d957SAndrew Turner 580b3c0d957SAndrew Turner EVENTHANDLER_REGISTER(thread_dtor, kcov_thread_dtor, NULL, 581b3c0d957SAndrew Turner EVENTHANDLER_PRI_ANY); 582b3c0d957SAndrew Turner } 583b3c0d957SAndrew Turner 584524553f5SAndrew Turner SYSINIT(kcovdev, SI_SUB_LAST, SI_ORDER_ANY, kcov_init, NULL); 585