1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) "kcov: " fmt 3 4 #define DISABLE_BRANCH_PROFILING 5 #include <linux/atomic.h> 6 #include <linux/compiler.h> 7 #include <linux/errno.h> 8 #include <linux/export.h> 9 #include <linux/types.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/hashtable.h> 13 #include <linux/init.h> 14 #include <linux/kmsan-checks.h> 15 #include <linux/mm.h> 16 #include <linux/preempt.h> 17 #include <linux/printk.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/vmalloc.h> 22 #include <linux/debugfs.h> 23 #include <linux/uaccess.h> 24 #include <linux/kcov.h> 25 #include <linux/refcount.h> 26 #include <linux/log2.h> 27 #include <asm/setup.h> 28 29 #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__) 30 31 /* Number of 64-bit words written per one comparison: */ 32 #define KCOV_WORDS_PER_CMP 4 33 34 /* 35 * kcov descriptor (one per opened debugfs file). 36 * State transitions of the descriptor: 37 * - initial state after open() 38 * - then there must be a single ioctl(KCOV_INIT_TRACE) call 39 * - then, mmap() call (several calls are allowed but not useful) 40 * - then, ioctl(KCOV_ENABLE, arg), where arg is 41 * KCOV_TRACE_PC - to trace only the PCs 42 * or 43 * KCOV_TRACE_CMP - to trace only the comparison operands 44 * - then, ioctl(KCOV_DISABLE) to disable the task. 45 * Enabling/disabling ioctls can be repeated (only one task a time allowed). 46 */ 47 struct kcov { 48 /* 49 * Reference counter. We keep one for: 50 * - opened file descriptor 51 * - task with enabled coverage (we can't unwire it from another task) 52 * - each code section for remote coverage collection 53 */ 54 refcount_t refcount; 55 /* The lock protects mode, size, area and t. */ 56 spinlock_t lock; 57 enum kcov_mode mode; 58 /* Size of arena (in long's). */ 59 unsigned int size; 60 /* Coverage buffer shared with user space. */ 61 void *area; 62 /* Task for which we collect coverage, or NULL. */ 63 struct task_struct *t; 64 /* Collecting coverage from remote (background) threads. */ 65 bool remote; 66 /* Size of remote area (in long's). */ 67 unsigned int remote_size; 68 /* 69 * Sequence is incremented each time kcov is reenabled, used by 70 * kcov_remote_stop(), see the comment there. 71 */ 72 int sequence; 73 }; 74 75 struct kcov_remote_area { 76 struct list_head list; 77 unsigned int size; 78 }; 79 80 struct kcov_remote { 81 u64 handle; 82 struct kcov *kcov; 83 struct hlist_node hnode; 84 }; 85 86 static DEFINE_SPINLOCK(kcov_remote_lock); 87 static DEFINE_HASHTABLE(kcov_remote_map, 4); 88 static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); 89 90 struct kcov_percpu_data { 91 void *irq_area; 92 local_lock_t lock; 93 94 unsigned int saved_mode; 95 unsigned int saved_size; 96 void *saved_area; 97 struct kcov *saved_kcov; 98 int saved_sequence; 99 }; 100 101 static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = { 102 .lock = INIT_LOCAL_LOCK(lock), 103 }; 104 105 /* Must be called with kcov_remote_lock locked. */ 106 static struct kcov_remote *kcov_remote_find(u64 handle) 107 { 108 struct kcov_remote *remote; 109 110 hash_for_each_possible(kcov_remote_map, remote, hnode, handle) { 111 if (remote->handle == handle) 112 return remote; 113 } 114 return NULL; 115 } 116 117 /* Must be called with kcov_remote_lock locked. */ 118 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle) 119 { 120 struct kcov_remote *remote; 121 122 if (kcov_remote_find(handle)) 123 return ERR_PTR(-EEXIST); 124 remote = kmalloc(sizeof(*remote), GFP_ATOMIC); 125 if (!remote) 126 return ERR_PTR(-ENOMEM); 127 remote->handle = handle; 128 remote->kcov = kcov; 129 hash_add(kcov_remote_map, &remote->hnode, handle); 130 return remote; 131 } 132 133 /* Must be called with kcov_remote_lock locked. */ 134 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) 135 { 136 struct kcov_remote_area *area; 137 struct list_head *pos; 138 139 list_for_each(pos, &kcov_remote_areas) { 140 area = list_entry(pos, struct kcov_remote_area, list); 141 if (area->size == size) { 142 list_del(&area->list); 143 return area; 144 } 145 } 146 return NULL; 147 } 148 149 /* Must be called with kcov_remote_lock locked. */ 150 static void kcov_remote_area_put(struct kcov_remote_area *area, 151 unsigned int size) 152 { 153 INIT_LIST_HEAD(&area->list); 154 area->size = size; 155 list_add(&area->list, &kcov_remote_areas); 156 /* 157 * KMSAN doesn't instrument this file, so it may not know area->list 158 * is initialized. Unpoison it explicitly to avoid reports in 159 * kcov_remote_area_get(). 160 */ 161 kmsan_unpoison_memory(&area->list, sizeof(area->list)); 162 } 163 164 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 165 { 166 unsigned int mode; 167 168 /* 169 * We are interested in code coverage as a function of a syscall inputs, 170 * so we ignore code executed in interrupts, unless we are in a remote 171 * coverage collection section in a softirq. 172 */ 173 if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) 174 return false; 175 mode = READ_ONCE(t->kcov_mode); 176 /* 177 * There is some code that runs in interrupts but for which 178 * in_interrupt() returns false (e.g. preempt_schedule_irq()). 179 * READ_ONCE()/barrier() effectively provides load-acquire wrt 180 * interrupts, there are paired barrier()/WRITE_ONCE() in 181 * kcov_start(). 182 */ 183 barrier(); 184 return mode == needed_mode; 185 } 186 187 static notrace unsigned long canonicalize_ip(unsigned long ip) 188 { 189 #ifdef CONFIG_RANDOMIZE_BASE 190 ip -= kaslr_offset(); 191 #endif 192 return ip; 193 } 194 195 /* 196 * Entry point from instrumented code. 197 * This is called once per basic-block/edge. 198 */ 199 void notrace __sanitizer_cov_trace_pc(void) 200 { 201 struct task_struct *t; 202 unsigned long *area; 203 unsigned long ip = canonicalize_ip(_RET_IP_); 204 unsigned long pos; 205 206 t = current; 207 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) 208 return; 209 210 area = t->kcov_area; 211 /* The first 64-bit word is the number of subsequent PCs. */ 212 pos = READ_ONCE(area[0]) + 1; 213 if (likely(pos < t->kcov_size)) { 214 /* Previously we write pc before updating pos. However, some 215 * early interrupt code could bypass check_kcov_mode() check 216 * and invoke __sanitizer_cov_trace_pc(). If such interrupt is 217 * raised between writing pc and updating pos, the pc could be 218 * overitten by the recursive __sanitizer_cov_trace_pc(). 219 * Update pos before writing pc to avoid such interleaving. 220 */ 221 WRITE_ONCE(area[0], pos); 222 barrier(); 223 area[pos] = ip; 224 } 225 } 226 EXPORT_SYMBOL(__sanitizer_cov_trace_pc); 227 228 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 229 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) 230 { 231 struct task_struct *t; 232 u64 *area; 233 u64 count, start_index, end_pos, max_pos; 234 235 t = current; 236 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t)) 237 return; 238 239 ip = canonicalize_ip(ip); 240 241 /* 242 * We write all comparison arguments and types as u64. 243 * The buffer was allocated for t->kcov_size unsigned longs. 244 */ 245 area = (u64 *)t->kcov_area; 246 max_pos = t->kcov_size * sizeof(unsigned long); 247 248 count = READ_ONCE(area[0]); 249 250 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */ 251 start_index = 1 + count * KCOV_WORDS_PER_CMP; 252 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); 253 if (likely(end_pos <= max_pos)) { 254 /* See comment in __sanitizer_cov_trace_pc(). */ 255 WRITE_ONCE(area[0], count + 1); 256 barrier(); 257 area[start_index] = type; 258 area[start_index + 1] = arg1; 259 area[start_index + 2] = arg2; 260 area[start_index + 3] = ip; 261 } 262 } 263 264 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2) 265 { 266 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_); 267 } 268 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1); 269 270 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2) 271 { 272 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_); 273 } 274 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); 275 276 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2) 277 { 278 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); 279 } 280 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4); 281 282 void notrace __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2) 283 { 284 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_); 285 } 286 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8); 287 288 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2) 289 { 290 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2, 291 _RET_IP_); 292 } 293 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1); 294 295 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2) 296 { 297 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2, 298 _RET_IP_); 299 } 300 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); 301 302 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2) 303 { 304 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, 305 _RET_IP_); 306 } 307 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4); 308 309 void notrace __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2) 310 { 311 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2, 312 _RET_IP_); 313 } 314 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8); 315 316 void notrace __sanitizer_cov_trace_switch(kcov_u64 val, void *arg) 317 { 318 u64 i; 319 u64 *cases = arg; 320 u64 count = cases[0]; 321 u64 size = cases[1]; 322 u64 type = KCOV_CMP_CONST; 323 324 switch (size) { 325 case 8: 326 type |= KCOV_CMP_SIZE(0); 327 break; 328 case 16: 329 type |= KCOV_CMP_SIZE(1); 330 break; 331 case 32: 332 type |= KCOV_CMP_SIZE(2); 333 break; 334 case 64: 335 type |= KCOV_CMP_SIZE(3); 336 break; 337 default: 338 return; 339 } 340 for (i = 0; i < count; i++) 341 write_comp_data(type, cases[i + 2], val, _RET_IP_); 342 } 343 EXPORT_SYMBOL(__sanitizer_cov_trace_switch); 344 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */ 345 346 static void kcov_start(struct task_struct *t, struct kcov *kcov, 347 unsigned int size, void *area, enum kcov_mode mode, 348 int sequence) 349 { 350 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); 351 t->kcov = kcov; 352 /* Cache in task struct for performance. */ 353 t->kcov_size = size; 354 t->kcov_area = area; 355 t->kcov_sequence = sequence; 356 /* See comment in check_kcov_mode(). */ 357 barrier(); 358 WRITE_ONCE(t->kcov_mode, mode); 359 } 360 361 static void kcov_stop(struct task_struct *t) 362 { 363 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); 364 barrier(); 365 t->kcov = NULL; 366 t->kcov_size = 0; 367 t->kcov_area = NULL; 368 } 369 370 static void kcov_task_reset(struct task_struct *t) 371 { 372 kcov_stop(t); 373 t->kcov_sequence = 0; 374 t->kcov_handle = 0; 375 } 376 377 void kcov_task_init(struct task_struct *t) 378 { 379 kcov_task_reset(t); 380 t->kcov_handle = current->kcov_handle; 381 } 382 383 static void kcov_reset(struct kcov *kcov) 384 { 385 kcov->t = NULL; 386 kcov->mode = KCOV_MODE_INIT; 387 kcov->remote = false; 388 kcov->remote_size = 0; 389 kcov->sequence++; 390 } 391 392 static void kcov_remote_reset(struct kcov *kcov) 393 { 394 int bkt; 395 struct kcov_remote *remote; 396 struct hlist_node *tmp; 397 unsigned long flags; 398 399 spin_lock_irqsave(&kcov_remote_lock, flags); 400 hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) { 401 if (remote->kcov != kcov) 402 continue; 403 hash_del(&remote->hnode); 404 kfree(remote); 405 } 406 /* Do reset before unlock to prevent races with kcov_remote_start(). */ 407 kcov_reset(kcov); 408 spin_unlock_irqrestore(&kcov_remote_lock, flags); 409 } 410 411 static void kcov_disable(struct task_struct *t, struct kcov *kcov) 412 { 413 kcov_task_reset(t); 414 if (kcov->remote) 415 kcov_remote_reset(kcov); 416 else 417 kcov_reset(kcov); 418 } 419 420 static void kcov_get(struct kcov *kcov) 421 { 422 refcount_inc(&kcov->refcount); 423 } 424 425 static void kcov_put(struct kcov *kcov) 426 { 427 if (refcount_dec_and_test(&kcov->refcount)) { 428 kcov_remote_reset(kcov); 429 vfree(kcov->area); 430 kfree(kcov); 431 } 432 } 433 434 void kcov_task_exit(struct task_struct *t) 435 { 436 struct kcov *kcov; 437 unsigned long flags; 438 439 kcov = t->kcov; 440 if (kcov == NULL) 441 return; 442 443 spin_lock_irqsave(&kcov->lock, flags); 444 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t); 445 /* 446 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t, 447 * which comes down to: 448 * WARN_ON(!kcov->remote && kcov->t != t); 449 * 450 * For KCOV_REMOTE_ENABLE devices, the exiting task is either: 451 * 452 * 1. A remote task between kcov_remote_start() and kcov_remote_stop(). 453 * In this case we should print a warning right away, since a task 454 * shouldn't be exiting when it's in a kcov coverage collection 455 * section. Here t points to the task that is collecting remote 456 * coverage, and t->kcov->t points to the thread that created the 457 * kcov device. Which means that to detect this case we need to 458 * check that t != t->kcov->t, and this gives us the following: 459 * WARN_ON(kcov->remote && kcov->t != t); 460 * 461 * 2. The task that created kcov exiting without calling KCOV_DISABLE, 462 * and then again we make sure that t->kcov->t == t: 463 * WARN_ON(kcov->remote && kcov->t != t); 464 * 465 * By combining all three checks into one we get: 466 */ 467 if (WARN_ON(kcov->t != t)) { 468 spin_unlock_irqrestore(&kcov->lock, flags); 469 return; 470 } 471 /* Just to not leave dangling references behind. */ 472 kcov_disable(t, kcov); 473 spin_unlock_irqrestore(&kcov->lock, flags); 474 kcov_put(kcov); 475 } 476 477 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) 478 { 479 int res = 0; 480 struct kcov *kcov = vma->vm_file->private_data; 481 unsigned long size, off; 482 struct page *page; 483 unsigned long flags; 484 485 spin_lock_irqsave(&kcov->lock, flags); 486 size = kcov->size * sizeof(unsigned long); 487 if (kcov->area == NULL || vma->vm_pgoff != 0 || 488 vma->vm_end - vma->vm_start != size) { 489 res = -EINVAL; 490 goto exit; 491 } 492 spin_unlock_irqrestore(&kcov->lock, flags); 493 vm_flags_set(vma, VM_DONTEXPAND); 494 for (off = 0; off < size; off += PAGE_SIZE) { 495 page = vmalloc_to_page(kcov->area + off); 496 res = vm_insert_page(vma, vma->vm_start + off, page); 497 if (res) { 498 pr_warn_once("kcov: vm_insert_page() failed\n"); 499 return res; 500 } 501 } 502 return 0; 503 exit: 504 spin_unlock_irqrestore(&kcov->lock, flags); 505 return res; 506 } 507 508 static int kcov_open(struct inode *inode, struct file *filep) 509 { 510 struct kcov *kcov; 511 512 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); 513 if (!kcov) 514 return -ENOMEM; 515 kcov->mode = KCOV_MODE_DISABLED; 516 kcov->sequence = 1; 517 refcount_set(&kcov->refcount, 1); 518 spin_lock_init(&kcov->lock); 519 filep->private_data = kcov; 520 return nonseekable_open(inode, filep); 521 } 522 523 static int kcov_close(struct inode *inode, struct file *filep) 524 { 525 kcov_put(filep->private_data); 526 return 0; 527 } 528 529 static int kcov_get_mode(unsigned long arg) 530 { 531 if (arg == KCOV_TRACE_PC) 532 return KCOV_MODE_TRACE_PC; 533 else if (arg == KCOV_TRACE_CMP) 534 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 535 return KCOV_MODE_TRACE_CMP; 536 #else 537 return -ENOTSUPP; 538 #endif 539 else 540 return -EINVAL; 541 } 542 543 /* 544 * Fault in a lazily-faulted vmalloc area before it can be used by 545 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the 546 * vmalloc fault handling path is instrumented. 547 */ 548 static void kcov_fault_in_area(struct kcov *kcov) 549 { 550 unsigned long stride = PAGE_SIZE / sizeof(unsigned long); 551 unsigned long *area = kcov->area; 552 unsigned long offset; 553 554 for (offset = 0; offset < kcov->size; offset += stride) 555 READ_ONCE(area[offset]); 556 } 557 558 static inline bool kcov_check_handle(u64 handle, bool common_valid, 559 bool uncommon_valid, bool zero_valid) 560 { 561 if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK)) 562 return false; 563 switch (handle & KCOV_SUBSYSTEM_MASK) { 564 case KCOV_SUBSYSTEM_COMMON: 565 return (handle & KCOV_INSTANCE_MASK) ? 566 common_valid : zero_valid; 567 case KCOV_SUBSYSTEM_USB: 568 return uncommon_valid; 569 default: 570 return false; 571 } 572 return false; 573 } 574 575 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, 576 unsigned long arg) 577 { 578 struct task_struct *t; 579 unsigned long flags, unused; 580 int mode, i; 581 struct kcov_remote_arg *remote_arg; 582 struct kcov_remote *remote; 583 584 switch (cmd) { 585 case KCOV_ENABLE: 586 /* 587 * Enable coverage for the current task. 588 * At this point user must have been enabled trace mode, 589 * and mmapped the file. Coverage collection is disabled only 590 * at task exit or voluntary by KCOV_DISABLE. After that it can 591 * be enabled for another task. 592 */ 593 if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 594 return -EINVAL; 595 t = current; 596 if (kcov->t != NULL || t->kcov != NULL) 597 return -EBUSY; 598 mode = kcov_get_mode(arg); 599 if (mode < 0) 600 return mode; 601 kcov_fault_in_area(kcov); 602 kcov->mode = mode; 603 kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode, 604 kcov->sequence); 605 kcov->t = t; 606 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ 607 kcov_get(kcov); 608 return 0; 609 case KCOV_DISABLE: 610 /* Disable coverage for the current task. */ 611 unused = arg; 612 if (unused != 0 || current->kcov != kcov) 613 return -EINVAL; 614 t = current; 615 if (WARN_ON(kcov->t != t)) 616 return -EINVAL; 617 kcov_disable(t, kcov); 618 kcov_put(kcov); 619 return 0; 620 case KCOV_REMOTE_ENABLE: 621 if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 622 return -EINVAL; 623 t = current; 624 if (kcov->t != NULL || t->kcov != NULL) 625 return -EBUSY; 626 remote_arg = (struct kcov_remote_arg *)arg; 627 mode = kcov_get_mode(remote_arg->trace_mode); 628 if (mode < 0) 629 return mode; 630 if ((unsigned long)remote_arg->area_size > 631 LONG_MAX / sizeof(unsigned long)) 632 return -EINVAL; 633 kcov->mode = mode; 634 t->kcov = kcov; 635 kcov->t = t; 636 kcov->remote = true; 637 kcov->remote_size = remote_arg->area_size; 638 spin_lock_irqsave(&kcov_remote_lock, flags); 639 for (i = 0; i < remote_arg->num_handles; i++) { 640 if (!kcov_check_handle(remote_arg->handles[i], 641 false, true, false)) { 642 spin_unlock_irqrestore(&kcov_remote_lock, 643 flags); 644 kcov_disable(t, kcov); 645 return -EINVAL; 646 } 647 remote = kcov_remote_add(kcov, remote_arg->handles[i]); 648 if (IS_ERR(remote)) { 649 spin_unlock_irqrestore(&kcov_remote_lock, 650 flags); 651 kcov_disable(t, kcov); 652 return PTR_ERR(remote); 653 } 654 } 655 if (remote_arg->common_handle) { 656 if (!kcov_check_handle(remote_arg->common_handle, 657 true, false, false)) { 658 spin_unlock_irqrestore(&kcov_remote_lock, 659 flags); 660 kcov_disable(t, kcov); 661 return -EINVAL; 662 } 663 remote = kcov_remote_add(kcov, 664 remote_arg->common_handle); 665 if (IS_ERR(remote)) { 666 spin_unlock_irqrestore(&kcov_remote_lock, 667 flags); 668 kcov_disable(t, kcov); 669 return PTR_ERR(remote); 670 } 671 t->kcov_handle = remote_arg->common_handle; 672 } 673 spin_unlock_irqrestore(&kcov_remote_lock, flags); 674 /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ 675 kcov_get(kcov); 676 return 0; 677 default: 678 return -ENOTTY; 679 } 680 } 681 682 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 683 { 684 struct kcov *kcov; 685 int res; 686 struct kcov_remote_arg *remote_arg = NULL; 687 unsigned int remote_num_handles; 688 unsigned long remote_arg_size; 689 unsigned long size, flags; 690 void *area; 691 692 kcov = filep->private_data; 693 switch (cmd) { 694 case KCOV_INIT_TRACE: 695 /* 696 * Enable kcov in trace mode and setup buffer size. 697 * Must happen before anything else. 698 * 699 * First check the size argument - it must be at least 2 700 * to hold the current position and one PC. 701 */ 702 size = arg; 703 if (size < 2 || size > INT_MAX / sizeof(unsigned long)) 704 return -EINVAL; 705 area = vmalloc_user(size * sizeof(unsigned long)); 706 if (area == NULL) 707 return -ENOMEM; 708 spin_lock_irqsave(&kcov->lock, flags); 709 if (kcov->mode != KCOV_MODE_DISABLED) { 710 spin_unlock_irqrestore(&kcov->lock, flags); 711 vfree(area); 712 return -EBUSY; 713 } 714 kcov->area = area; 715 kcov->size = size; 716 kcov->mode = KCOV_MODE_INIT; 717 spin_unlock_irqrestore(&kcov->lock, flags); 718 return 0; 719 case KCOV_REMOTE_ENABLE: 720 if (get_user(remote_num_handles, (unsigned __user *)(arg + 721 offsetof(struct kcov_remote_arg, num_handles)))) 722 return -EFAULT; 723 if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES) 724 return -EINVAL; 725 remote_arg_size = struct_size(remote_arg, handles, 726 remote_num_handles); 727 remote_arg = memdup_user((void __user *)arg, remote_arg_size); 728 if (IS_ERR(remote_arg)) 729 return PTR_ERR(remote_arg); 730 if (remote_arg->num_handles != remote_num_handles) { 731 kfree(remote_arg); 732 return -EINVAL; 733 } 734 arg = (unsigned long)remote_arg; 735 fallthrough; 736 default: 737 /* 738 * All other commands can be normally executed under a spin lock, so we 739 * obtain and release it here in order to simplify kcov_ioctl_locked(). 740 */ 741 spin_lock_irqsave(&kcov->lock, flags); 742 res = kcov_ioctl_locked(kcov, cmd, arg); 743 spin_unlock_irqrestore(&kcov->lock, flags); 744 kfree(remote_arg); 745 return res; 746 } 747 } 748 749 static const struct file_operations kcov_fops = { 750 .open = kcov_open, 751 .unlocked_ioctl = kcov_ioctl, 752 .compat_ioctl = kcov_ioctl, 753 .mmap = kcov_mmap, 754 .release = kcov_close, 755 }; 756 757 /* 758 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section 759 * of code in a kernel background thread or in a softirq to allow kcov to be 760 * used to collect coverage from that part of code. 761 * 762 * The handle argument of kcov_remote_start() identifies a code section that is 763 * used for coverage collection. A userspace process passes this handle to 764 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting 765 * coverage for the code section identified by this handle. 766 * 767 * The usage of these annotations in the kernel code is different depending on 768 * the type of the kernel thread whose code is being annotated. 769 * 770 * For global kernel threads that are spawned in a limited number of instances 771 * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for 772 * softirqs, each instance must be assigned a unique 4-byte instance id. The 773 * instance id is then combined with a 1-byte subsystem id to get a handle via 774 * kcov_remote_handle(subsystem_id, instance_id). 775 * 776 * For local kernel threads that are spawned from system calls handler when a 777 * user interacts with some kernel interface (e.g. vhost workers), a handle is 778 * passed from a userspace process as the common_handle field of the 779 * kcov_remote_arg struct (note, that the user must generate a handle by using 780 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an 781 * arbitrary 4-byte non-zero number as the instance id). This common handle 782 * then gets saved into the task_struct of the process that issued the 783 * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn 784 * kernel threads, the common handle must be retrieved via kcov_common_handle() 785 * and passed to the spawned threads via custom annotations. Those kernel 786 * threads must in turn be annotated with kcov_remote_start(common_handle) and 787 * kcov_remote_stop(). All of the threads that are spawned by the same process 788 * obtain the same handle, hence the name "common". 789 * 790 * See Documentation/dev-tools/kcov.rst for more details. 791 * 792 * Internally, kcov_remote_start() looks up the kcov device associated with the 793 * provided handle, allocates an area for coverage collection, and saves the 794 * pointers to kcov and area into the current task_struct to allow coverage to 795 * be collected via __sanitizer_cov_trace_pc(). 796 * In turns kcov_remote_stop() clears those pointers from task_struct to stop 797 * collecting coverage and copies all collected coverage into the kcov area. 798 */ 799 800 static inline bool kcov_mode_enabled(unsigned int mode) 801 { 802 return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED; 803 } 804 805 static void kcov_remote_softirq_start(struct task_struct *t) 806 { 807 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); 808 unsigned int mode; 809 810 mode = READ_ONCE(t->kcov_mode); 811 barrier(); 812 if (kcov_mode_enabled(mode)) { 813 data->saved_mode = mode; 814 data->saved_size = t->kcov_size; 815 data->saved_area = t->kcov_area; 816 data->saved_sequence = t->kcov_sequence; 817 data->saved_kcov = t->kcov; 818 kcov_stop(t); 819 } 820 } 821 822 static void kcov_remote_softirq_stop(struct task_struct *t) 823 { 824 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); 825 826 if (data->saved_kcov) { 827 kcov_start(t, data->saved_kcov, data->saved_size, 828 data->saved_area, data->saved_mode, 829 data->saved_sequence); 830 data->saved_mode = 0; 831 data->saved_size = 0; 832 data->saved_area = NULL; 833 data->saved_sequence = 0; 834 data->saved_kcov = NULL; 835 } 836 } 837 838 void kcov_remote_start(u64 handle) 839 { 840 struct task_struct *t = current; 841 struct kcov_remote *remote; 842 struct kcov *kcov; 843 unsigned int mode; 844 void *area; 845 unsigned int size; 846 int sequence; 847 unsigned long flags; 848 849 if (WARN_ON(!kcov_check_handle(handle, true, true, true))) 850 return; 851 if (!in_task() && !in_serving_softirq()) 852 return; 853 854 local_lock_irqsave(&kcov_percpu_data.lock, flags); 855 856 /* 857 * Check that kcov_remote_start() is not called twice in background 858 * threads nor called by user tasks (with enabled kcov). 859 */ 860 mode = READ_ONCE(t->kcov_mode); 861 if (WARN_ON(in_task() && kcov_mode_enabled(mode))) { 862 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 863 return; 864 } 865 /* 866 * Check that kcov_remote_start() is not called twice in softirqs. 867 * Note, that kcov_remote_start() can be called from a softirq that 868 * happened while collecting coverage from a background thread. 869 */ 870 if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) { 871 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 872 return; 873 } 874 875 spin_lock(&kcov_remote_lock); 876 remote = kcov_remote_find(handle); 877 if (!remote) { 878 spin_unlock(&kcov_remote_lock); 879 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 880 return; 881 } 882 kcov_debug("handle = %llx, context: %s\n", handle, 883 in_task() ? "task" : "softirq"); 884 kcov = remote->kcov; 885 /* Put in kcov_remote_stop(). */ 886 kcov_get(kcov); 887 /* 888 * Read kcov fields before unlock to prevent races with 889 * KCOV_DISABLE / kcov_remote_reset(). 890 */ 891 mode = kcov->mode; 892 sequence = kcov->sequence; 893 if (in_task()) { 894 size = kcov->remote_size; 895 area = kcov_remote_area_get(size); 896 } else { 897 size = CONFIG_KCOV_IRQ_AREA_SIZE; 898 area = this_cpu_ptr(&kcov_percpu_data)->irq_area; 899 } 900 spin_unlock(&kcov_remote_lock); 901 902 /* Can only happen when in_task(). */ 903 if (!area) { 904 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 905 area = vmalloc(size * sizeof(unsigned long)); 906 if (!area) { 907 kcov_put(kcov); 908 return; 909 } 910 local_lock_irqsave(&kcov_percpu_data.lock, flags); 911 } 912 913 /* Reset coverage size. */ 914 *(u64 *)area = 0; 915 916 if (in_serving_softirq()) { 917 kcov_remote_softirq_start(t); 918 t->kcov_softirq = 1; 919 } 920 kcov_start(t, kcov, size, area, mode, sequence); 921 922 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 923 924 } 925 EXPORT_SYMBOL(kcov_remote_start); 926 927 static void kcov_move_area(enum kcov_mode mode, void *dst_area, 928 unsigned int dst_area_size, void *src_area) 929 { 930 u64 word_size = sizeof(unsigned long); 931 u64 count_size, entry_size_log; 932 u64 dst_len, src_len; 933 void *dst_entries, *src_entries; 934 u64 dst_occupied, dst_free, bytes_to_move, entries_moved; 935 936 kcov_debug("%px %u <= %px %lu\n", 937 dst_area, dst_area_size, src_area, *(unsigned long *)src_area); 938 939 switch (mode) { 940 case KCOV_MODE_TRACE_PC: 941 dst_len = READ_ONCE(*(unsigned long *)dst_area); 942 src_len = *(unsigned long *)src_area; 943 count_size = sizeof(unsigned long); 944 entry_size_log = __ilog2_u64(sizeof(unsigned long)); 945 break; 946 case KCOV_MODE_TRACE_CMP: 947 dst_len = READ_ONCE(*(u64 *)dst_area); 948 src_len = *(u64 *)src_area; 949 count_size = sizeof(u64); 950 BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP)); 951 entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP); 952 break; 953 default: 954 WARN_ON(1); 955 return; 956 } 957 958 /* As arm can't divide u64 integers use log of entry size. */ 959 if (dst_len > ((dst_area_size * word_size - count_size) >> 960 entry_size_log)) 961 return; 962 dst_occupied = count_size + (dst_len << entry_size_log); 963 dst_free = dst_area_size * word_size - dst_occupied; 964 bytes_to_move = min(dst_free, src_len << entry_size_log); 965 dst_entries = dst_area + dst_occupied; 966 src_entries = src_area + count_size; 967 memcpy(dst_entries, src_entries, bytes_to_move); 968 entries_moved = bytes_to_move >> entry_size_log; 969 970 switch (mode) { 971 case KCOV_MODE_TRACE_PC: 972 WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved); 973 break; 974 case KCOV_MODE_TRACE_CMP: 975 WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved); 976 break; 977 default: 978 break; 979 } 980 } 981 982 /* See the comment before kcov_remote_start() for usage details. */ 983 void kcov_remote_stop(void) 984 { 985 struct task_struct *t = current; 986 struct kcov *kcov; 987 unsigned int mode; 988 void *area; 989 unsigned int size; 990 int sequence; 991 unsigned long flags; 992 993 if (!in_task() && !in_serving_softirq()) 994 return; 995 996 local_lock_irqsave(&kcov_percpu_data.lock, flags); 997 998 mode = READ_ONCE(t->kcov_mode); 999 barrier(); 1000 if (!kcov_mode_enabled(mode)) { 1001 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1002 return; 1003 } 1004 /* 1005 * When in softirq, check if the corresponding kcov_remote_start() 1006 * actually found the remote handle and started collecting coverage. 1007 */ 1008 if (in_serving_softirq() && !t->kcov_softirq) { 1009 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1010 return; 1011 } 1012 /* Make sure that kcov_softirq is only set when in softirq. */ 1013 if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) { 1014 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1015 return; 1016 } 1017 1018 kcov = t->kcov; 1019 area = t->kcov_area; 1020 size = t->kcov_size; 1021 sequence = t->kcov_sequence; 1022 1023 kcov_stop(t); 1024 if (in_serving_softirq()) { 1025 t->kcov_softirq = 0; 1026 kcov_remote_softirq_stop(t); 1027 } 1028 1029 spin_lock(&kcov->lock); 1030 /* 1031 * KCOV_DISABLE could have been called between kcov_remote_start() 1032 * and kcov_remote_stop(), hence the sequence check. 1033 */ 1034 if (sequence == kcov->sequence && kcov->remote) 1035 kcov_move_area(kcov->mode, kcov->area, kcov->size, area); 1036 spin_unlock(&kcov->lock); 1037 1038 if (in_task()) { 1039 spin_lock(&kcov_remote_lock); 1040 kcov_remote_area_put(area, size); 1041 spin_unlock(&kcov_remote_lock); 1042 } 1043 1044 local_unlock_irqrestore(&kcov_percpu_data.lock, flags); 1045 1046 /* Get in kcov_remote_start(). */ 1047 kcov_put(kcov); 1048 } 1049 EXPORT_SYMBOL(kcov_remote_stop); 1050 1051 /* See the comment before kcov_remote_start() for usage details. */ 1052 u64 kcov_common_handle(void) 1053 { 1054 if (!in_task()) 1055 return 0; 1056 return current->kcov_handle; 1057 } 1058 EXPORT_SYMBOL(kcov_common_handle); 1059 1060 static int __init kcov_init(void) 1061 { 1062 int cpu; 1063 1064 for_each_possible_cpu(cpu) { 1065 void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE * 1066 sizeof(unsigned long), cpu_to_node(cpu)); 1067 if (!area) 1068 return -ENOMEM; 1069 per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area; 1070 } 1071 1072 /* 1073 * The kcov debugfs file won't ever get removed and thus, 1074 * there is no need to protect it against removal races. The 1075 * use of debugfs_create_file_unsafe() is actually safe here. 1076 */ 1077 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops); 1078 1079 return 0; 1080 } 1081 1082 device_initcall(kcov_init); 1083