1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/list.h> 87 #include <linux/kthread.h> 88 #include <linux/kernel.h> 89 #include <linux/compat.h> 90 #include <linux/poll.h> 91 #include <linux/smp.h> 92 #include <linux/wait_bit.h> 93 #include <linux/rcupdate.h> 94 95 #if defined(__i386__) || defined(__amd64__) 96 #include <asm/smp.h> 97 #endif 98 99 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 100 "LinuxKPI parameters"); 101 102 int linuxkpi_debug; 103 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 104 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 105 106 int linuxkpi_warn_dump_stack = 0; 107 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 108 &linuxkpi_warn_dump_stack, 0, 109 "Set to enable stack traces from WARN_ON(). Clear to disable."); 110 111 static struct timeval lkpi_net_lastlog; 112 static int lkpi_net_curpps; 113 static int lkpi_net_maxpps = 99; 114 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 115 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 116 117 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 118 119 #include <linux/rbtree.h> 120 /* Undo Linux compat changes. */ 121 #undef RB_ROOT 122 #undef file 123 #undef cdev 124 #define RB_ROOT(head) (head)->rbh_root 125 126 static void linux_destroy_dev(struct linux_cdev *); 127 static void linux_cdev_deref(struct linux_cdev *ldev); 128 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 129 130 cpumask_t cpu_online_mask; 131 struct kobject linux_class_root; 132 struct device linux_root_device; 133 struct class linux_class_misc; 134 struct list_head pci_drivers; 135 struct list_head pci_devices; 136 spinlock_t pci_lock; 137 138 unsigned long linux_timer_hz_mask; 139 140 wait_queue_head_t linux_bit_waitq; 141 wait_queue_head_t linux_var_waitq; 142 143 int 144 panic_cmp(struct rb_node *one, struct rb_node *two) 145 { 146 panic("no cmp"); 147 } 148 149 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 150 151 int 152 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 153 { 154 va_list tmp_va; 155 int len; 156 char *old; 157 char *name; 158 char dummy; 159 160 old = kobj->name; 161 162 if (old && fmt == NULL) 163 return (0); 164 165 /* compute length of string */ 166 va_copy(tmp_va, args); 167 len = vsnprintf(&dummy, 0, fmt, tmp_va); 168 va_end(tmp_va); 169 170 /* account for zero termination */ 171 len++; 172 173 /* check for error */ 174 if (len < 1) 175 return (-EINVAL); 176 177 /* allocate memory for string */ 178 name = kzalloc(len, GFP_KERNEL); 179 if (name == NULL) 180 return (-ENOMEM); 181 vsnprintf(name, len, fmt, args); 182 kobj->name = name; 183 184 /* free old string */ 185 kfree(old); 186 187 /* filter new string */ 188 for (; *name != '\0'; name++) 189 if (*name == '/') 190 *name = '!'; 191 return (0); 192 } 193 194 int 195 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 196 { 197 va_list args; 198 int error; 199 200 va_start(args, fmt); 201 error = kobject_set_name_vargs(kobj, fmt, args); 202 va_end(args); 203 204 return (error); 205 } 206 207 static int 208 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 209 { 210 const struct kobj_type *t; 211 int error; 212 213 kobj->parent = parent; 214 error = sysfs_create_dir(kobj); 215 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 216 struct attribute **attr; 217 t = kobj->ktype; 218 219 for (attr = t->default_attrs; *attr != NULL; attr++) { 220 error = sysfs_create_file(kobj, *attr); 221 if (error) 222 break; 223 } 224 if (error) 225 sysfs_remove_dir(kobj); 226 } 227 return (error); 228 } 229 230 int 231 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 232 { 233 va_list args; 234 int error; 235 236 va_start(args, fmt); 237 error = kobject_set_name_vargs(kobj, fmt, args); 238 va_end(args); 239 if (error) 240 return (error); 241 242 return kobject_add_complete(kobj, parent); 243 } 244 245 void 246 linux_kobject_release(struct kref *kref) 247 { 248 struct kobject *kobj; 249 char *name; 250 251 kobj = container_of(kref, struct kobject, kref); 252 sysfs_remove_dir(kobj); 253 name = kobj->name; 254 if (kobj->ktype && kobj->ktype->release) 255 kobj->ktype->release(kobj); 256 kfree(name); 257 } 258 259 static void 260 linux_kobject_kfree(struct kobject *kobj) 261 { 262 kfree(kobj); 263 } 264 265 static void 266 linux_kobject_kfree_name(struct kobject *kobj) 267 { 268 if (kobj) { 269 kfree(kobj->name); 270 } 271 } 272 273 const struct kobj_type linux_kfree_type = { 274 .release = linux_kobject_kfree 275 }; 276 277 static void 278 linux_device_release(struct device *dev) 279 { 280 pr_debug("linux_device_release: %s\n", dev_name(dev)); 281 kfree(dev); 282 } 283 284 static ssize_t 285 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 286 { 287 struct class_attribute *dattr; 288 ssize_t error; 289 290 dattr = container_of(attr, struct class_attribute, attr); 291 error = -EIO; 292 if (dattr->show) 293 error = dattr->show(container_of(kobj, struct class, kobj), 294 dattr, buf); 295 return (error); 296 } 297 298 static ssize_t 299 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 300 size_t count) 301 { 302 struct class_attribute *dattr; 303 ssize_t error; 304 305 dattr = container_of(attr, struct class_attribute, attr); 306 error = -EIO; 307 if (dattr->store) 308 error = dattr->store(container_of(kobj, struct class, kobj), 309 dattr, buf, count); 310 return (error); 311 } 312 313 static void 314 linux_class_release(struct kobject *kobj) 315 { 316 struct class *class; 317 318 class = container_of(kobj, struct class, kobj); 319 if (class->class_release) 320 class->class_release(class); 321 } 322 323 static const struct sysfs_ops linux_class_sysfs = { 324 .show = linux_class_show, 325 .store = linux_class_store, 326 }; 327 328 const struct kobj_type linux_class_ktype = { 329 .release = linux_class_release, 330 .sysfs_ops = &linux_class_sysfs 331 }; 332 333 static void 334 linux_dev_release(struct kobject *kobj) 335 { 336 struct device *dev; 337 338 dev = container_of(kobj, struct device, kobj); 339 /* This is the precedence defined by linux. */ 340 if (dev->release) 341 dev->release(dev); 342 else if (dev->class && dev->class->dev_release) 343 dev->class->dev_release(dev); 344 } 345 346 static ssize_t 347 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 348 { 349 struct device_attribute *dattr; 350 ssize_t error; 351 352 dattr = container_of(attr, struct device_attribute, attr); 353 error = -EIO; 354 if (dattr->show) 355 error = dattr->show(container_of(kobj, struct device, kobj), 356 dattr, buf); 357 return (error); 358 } 359 360 static ssize_t 361 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 362 size_t count) 363 { 364 struct device_attribute *dattr; 365 ssize_t error; 366 367 dattr = container_of(attr, struct device_attribute, attr); 368 error = -EIO; 369 if (dattr->store) 370 error = dattr->store(container_of(kobj, struct device, kobj), 371 dattr, buf, count); 372 return (error); 373 } 374 375 static const struct sysfs_ops linux_dev_sysfs = { 376 .show = linux_dev_show, 377 .store = linux_dev_store, 378 }; 379 380 const struct kobj_type linux_dev_ktype = { 381 .release = linux_dev_release, 382 .sysfs_ops = &linux_dev_sysfs 383 }; 384 385 struct device * 386 device_create(struct class *class, struct device *parent, dev_t devt, 387 void *drvdata, const char *fmt, ...) 388 { 389 struct device *dev; 390 va_list args; 391 392 dev = kzalloc(sizeof(*dev), M_WAITOK); 393 dev->parent = parent; 394 dev->class = class; 395 dev->devt = devt; 396 dev->driver_data = drvdata; 397 dev->release = linux_device_release; 398 va_start(args, fmt); 399 kobject_set_name_vargs(&dev->kobj, fmt, args); 400 va_end(args); 401 device_register(dev); 402 403 return (dev); 404 } 405 406 int 407 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 408 struct kobject *parent, const char *fmt, ...) 409 { 410 va_list args; 411 int error; 412 413 kobject_init(kobj, ktype); 414 kobj->ktype = ktype; 415 kobj->parent = parent; 416 kobj->name = NULL; 417 418 va_start(args, fmt); 419 error = kobject_set_name_vargs(kobj, fmt, args); 420 va_end(args); 421 if (error) 422 return (error); 423 return kobject_add_complete(kobj, parent); 424 } 425 426 static void 427 linux_kq_lock(void *arg) 428 { 429 spinlock_t *s = arg; 430 431 spin_lock(s); 432 } 433 static void 434 linux_kq_unlock(void *arg) 435 { 436 spinlock_t *s = arg; 437 438 spin_unlock(s); 439 } 440 441 static void 442 linux_kq_assert_lock(void *arg, int what) 443 { 444 #ifdef INVARIANTS 445 spinlock_t *s = arg; 446 447 if (what == LA_LOCKED) 448 mtx_assert(&s->m, MA_OWNED); 449 else 450 mtx_assert(&s->m, MA_NOTOWNED); 451 #endif 452 } 453 454 static void 455 linux_file_kqfilter_poll(struct linux_file *, int); 456 457 struct linux_file * 458 linux_file_alloc(void) 459 { 460 struct linux_file *filp; 461 462 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 463 464 /* set initial refcount */ 465 filp->f_count = 1; 466 467 /* setup fields needed by kqueue support */ 468 spin_lock_init(&filp->f_kqlock); 469 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 470 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 471 472 return (filp); 473 } 474 475 void 476 linux_file_free(struct linux_file *filp) 477 { 478 if (filp->_file == NULL) { 479 if (filp->f_op != NULL && filp->f_op->release != NULL) 480 filp->f_op->release(filp->f_vnode, filp); 481 if (filp->f_shmem != NULL) 482 vm_object_deallocate(filp->f_shmem); 483 kfree_rcu(filp, rcu); 484 } else { 485 /* 486 * The close method of the character device or file 487 * will free the linux_file structure: 488 */ 489 _fdrop(filp->_file, curthread); 490 } 491 } 492 493 static int 494 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 495 vm_page_t *mres) 496 { 497 struct vm_area_struct *vmap; 498 499 vmap = linux_cdev_handle_find(vm_obj->handle); 500 501 MPASS(vmap != NULL); 502 MPASS(vmap->vm_private_data == vm_obj->handle); 503 504 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 505 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 506 vm_page_t page; 507 508 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 509 /* 510 * If the passed in result page is a fake 511 * page, update it with the new physical 512 * address. 513 */ 514 page = *mres; 515 vm_page_updatefake(page, paddr, vm_obj->memattr); 516 } else { 517 /* 518 * Replace the passed in "mres" page with our 519 * own fake page and free up the all of the 520 * original pages. 521 */ 522 VM_OBJECT_WUNLOCK(vm_obj); 523 page = vm_page_getfake(paddr, vm_obj->memattr); 524 VM_OBJECT_WLOCK(vm_obj); 525 526 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 527 *mres = page; 528 } 529 vm_page_valid(page); 530 return (VM_PAGER_OK); 531 } 532 return (VM_PAGER_FAIL); 533 } 534 535 static int 536 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 537 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 538 { 539 struct vm_area_struct *vmap; 540 int err; 541 542 /* get VM area structure */ 543 vmap = linux_cdev_handle_find(vm_obj->handle); 544 MPASS(vmap != NULL); 545 MPASS(vmap->vm_private_data == vm_obj->handle); 546 547 VM_OBJECT_WUNLOCK(vm_obj); 548 549 linux_set_current(curthread); 550 551 down_write(&vmap->vm_mm->mmap_sem); 552 if (unlikely(vmap->vm_ops == NULL)) { 553 err = VM_FAULT_SIGBUS; 554 } else { 555 struct vm_fault vmf; 556 557 /* fill out VM fault structure */ 558 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 559 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 560 vmf.pgoff = 0; 561 vmf.page = NULL; 562 vmf.vma = vmap; 563 564 vmap->vm_pfn_count = 0; 565 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 566 vmap->vm_obj = vm_obj; 567 568 err = vmap->vm_ops->fault(&vmf); 569 570 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 571 kern_yield(PRI_USER); 572 err = vmap->vm_ops->fault(&vmf); 573 } 574 } 575 576 /* translate return code */ 577 switch (err) { 578 case VM_FAULT_OOM: 579 err = VM_PAGER_AGAIN; 580 break; 581 case VM_FAULT_SIGBUS: 582 err = VM_PAGER_BAD; 583 break; 584 case VM_FAULT_NOPAGE: 585 /* 586 * By contract the fault handler will return having 587 * busied all the pages itself. If pidx is already 588 * found in the object, it will simply xbusy the first 589 * page and return with vm_pfn_count set to 1. 590 */ 591 *first = vmap->vm_pfn_first; 592 *last = *first + vmap->vm_pfn_count - 1; 593 err = VM_PAGER_OK; 594 break; 595 default: 596 err = VM_PAGER_ERROR; 597 break; 598 } 599 up_write(&vmap->vm_mm->mmap_sem); 600 VM_OBJECT_WLOCK(vm_obj); 601 return (err); 602 } 603 604 static struct rwlock linux_vma_lock; 605 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 606 TAILQ_HEAD_INITIALIZER(linux_vma_head); 607 608 static void 609 linux_cdev_handle_free(struct vm_area_struct *vmap) 610 { 611 /* Drop reference on vm_file */ 612 if (vmap->vm_file != NULL) 613 fput(vmap->vm_file); 614 615 /* Drop reference on mm_struct */ 616 mmput(vmap->vm_mm); 617 618 kfree(vmap); 619 } 620 621 static void 622 linux_cdev_handle_remove(struct vm_area_struct *vmap) 623 { 624 rw_wlock(&linux_vma_lock); 625 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 626 rw_wunlock(&linux_vma_lock); 627 } 628 629 static struct vm_area_struct * 630 linux_cdev_handle_find(void *handle) 631 { 632 struct vm_area_struct *vmap; 633 634 rw_rlock(&linux_vma_lock); 635 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 636 if (vmap->vm_private_data == handle) 637 break; 638 } 639 rw_runlock(&linux_vma_lock); 640 return (vmap); 641 } 642 643 static int 644 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 645 vm_ooffset_t foff, struct ucred *cred, u_short *color) 646 { 647 648 MPASS(linux_cdev_handle_find(handle) != NULL); 649 *color = 0; 650 return (0); 651 } 652 653 static void 654 linux_cdev_pager_dtor(void *handle) 655 { 656 const struct vm_operations_struct *vm_ops; 657 struct vm_area_struct *vmap; 658 659 vmap = linux_cdev_handle_find(handle); 660 MPASS(vmap != NULL); 661 662 /* 663 * Remove handle before calling close operation to prevent 664 * other threads from reusing the handle pointer. 665 */ 666 linux_cdev_handle_remove(vmap); 667 668 down_write(&vmap->vm_mm->mmap_sem); 669 vm_ops = vmap->vm_ops; 670 if (likely(vm_ops != NULL)) 671 vm_ops->close(vmap); 672 up_write(&vmap->vm_mm->mmap_sem); 673 674 linux_cdev_handle_free(vmap); 675 } 676 677 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 678 { 679 /* OBJT_MGTDEVICE */ 680 .cdev_pg_populate = linux_cdev_pager_populate, 681 .cdev_pg_ctor = linux_cdev_pager_ctor, 682 .cdev_pg_dtor = linux_cdev_pager_dtor 683 }, 684 { 685 /* OBJT_DEVICE */ 686 .cdev_pg_fault = linux_cdev_pager_fault, 687 .cdev_pg_ctor = linux_cdev_pager_ctor, 688 .cdev_pg_dtor = linux_cdev_pager_dtor 689 }, 690 }; 691 692 int 693 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 694 unsigned long size) 695 { 696 vm_object_t obj; 697 vm_page_t m; 698 699 obj = vma->vm_obj; 700 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 701 return (-ENOTSUP); 702 VM_OBJECT_RLOCK(obj); 703 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 704 m != NULL && m->pindex < OFF_TO_IDX(address + size); 705 m = TAILQ_NEXT(m, listq)) 706 pmap_remove_all(m); 707 VM_OBJECT_RUNLOCK(obj); 708 return (0); 709 } 710 711 static struct file_operations dummy_ldev_ops = { 712 /* XXXKIB */ 713 }; 714 715 static struct linux_cdev dummy_ldev = { 716 .ops = &dummy_ldev_ops, 717 }; 718 719 #define LDEV_SI_DTR 0x0001 720 #define LDEV_SI_REF 0x0002 721 722 static void 723 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 724 struct linux_cdev **dev) 725 { 726 struct linux_cdev *ldev; 727 u_int siref; 728 729 ldev = filp->f_cdev; 730 *fop = filp->f_op; 731 if (ldev != NULL) { 732 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 733 refcount_acquire(&ldev->refs); 734 } else { 735 for (siref = ldev->siref;;) { 736 if ((siref & LDEV_SI_DTR) != 0) { 737 ldev = &dummy_ldev; 738 *fop = ldev->ops; 739 siref = ldev->siref; 740 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 741 } else if (atomic_fcmpset_int(&ldev->siref, 742 &siref, siref + LDEV_SI_REF)) { 743 break; 744 } 745 } 746 } 747 } 748 *dev = ldev; 749 } 750 751 static void 752 linux_drop_fop(struct linux_cdev *ldev) 753 { 754 755 if (ldev == NULL) 756 return; 757 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 758 linux_cdev_deref(ldev); 759 } else { 760 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 761 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 762 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 763 } 764 } 765 766 #define OPW(fp,td,code) ({ \ 767 struct file *__fpop; \ 768 __typeof(code) __retval; \ 769 \ 770 __fpop = (td)->td_fpop; \ 771 (td)->td_fpop = (fp); \ 772 __retval = (code); \ 773 (td)->td_fpop = __fpop; \ 774 __retval; \ 775 }) 776 777 static int 778 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 779 struct file *file) 780 { 781 struct linux_cdev *ldev; 782 struct linux_file *filp; 783 const struct file_operations *fop; 784 int error; 785 786 ldev = dev->si_drv1; 787 788 filp = linux_file_alloc(); 789 filp->f_dentry = &filp->f_dentry_store; 790 filp->f_op = ldev->ops; 791 filp->f_mode = file->f_flag; 792 filp->f_flags = file->f_flag; 793 filp->f_vnode = file->f_vnode; 794 filp->_file = file; 795 refcount_acquire(&ldev->refs); 796 filp->f_cdev = ldev; 797 798 linux_set_current(td); 799 linux_get_fop(filp, &fop, &ldev); 800 801 if (fop->open != NULL) { 802 error = -fop->open(file->f_vnode, filp); 803 if (error != 0) { 804 linux_drop_fop(ldev); 805 linux_cdev_deref(filp->f_cdev); 806 kfree(filp); 807 return (error); 808 } 809 } 810 811 /* hold on to the vnode - used for fstat() */ 812 vhold(filp->f_vnode); 813 814 /* release the file from devfs */ 815 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 816 linux_drop_fop(ldev); 817 return (ENXIO); 818 } 819 820 #define LINUX_IOCTL_MIN_PTR 0x10000UL 821 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 822 823 static inline int 824 linux_remap_address(void **uaddr, size_t len) 825 { 826 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 827 828 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 829 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 830 struct task_struct *pts = current; 831 if (pts == NULL) { 832 *uaddr = NULL; 833 return (1); 834 } 835 836 /* compute data offset */ 837 uaddr_val -= LINUX_IOCTL_MIN_PTR; 838 839 /* check that length is within bounds */ 840 if ((len > IOCPARM_MAX) || 841 (uaddr_val + len) > pts->bsd_ioctl_len) { 842 *uaddr = NULL; 843 return (1); 844 } 845 846 /* re-add kernel buffer address */ 847 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 848 849 /* update address location */ 850 *uaddr = (void *)uaddr_val; 851 return (1); 852 } 853 return (0); 854 } 855 856 int 857 linux_copyin(const void *uaddr, void *kaddr, size_t len) 858 { 859 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 860 if (uaddr == NULL) 861 return (-EFAULT); 862 memcpy(kaddr, uaddr, len); 863 return (0); 864 } 865 return (-copyin(uaddr, kaddr, len)); 866 } 867 868 int 869 linux_copyout(const void *kaddr, void *uaddr, size_t len) 870 { 871 if (linux_remap_address(&uaddr, len)) { 872 if (uaddr == NULL) 873 return (-EFAULT); 874 memcpy(uaddr, kaddr, len); 875 return (0); 876 } 877 return (-copyout(kaddr, uaddr, len)); 878 } 879 880 size_t 881 linux_clear_user(void *_uaddr, size_t _len) 882 { 883 uint8_t *uaddr = _uaddr; 884 size_t len = _len; 885 886 /* make sure uaddr is aligned before going into the fast loop */ 887 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 888 if (subyte(uaddr, 0)) 889 return (_len); 890 uaddr++; 891 len--; 892 } 893 894 /* zero 8 bytes at a time */ 895 while (len > 7) { 896 #ifdef __LP64__ 897 if (suword64(uaddr, 0)) 898 return (_len); 899 #else 900 if (suword32(uaddr, 0)) 901 return (_len); 902 if (suword32(uaddr + 4, 0)) 903 return (_len); 904 #endif 905 uaddr += 8; 906 len -= 8; 907 } 908 909 /* zero fill end, if any */ 910 while (len > 0) { 911 if (subyte(uaddr, 0)) 912 return (_len); 913 uaddr++; 914 len--; 915 } 916 return (0); 917 } 918 919 int 920 linux_access_ok(const void *uaddr, size_t len) 921 { 922 uintptr_t saddr; 923 uintptr_t eaddr; 924 925 /* get start and end address */ 926 saddr = (uintptr_t)uaddr; 927 eaddr = (uintptr_t)uaddr + len; 928 929 /* verify addresses are valid for userspace */ 930 return ((saddr == eaddr) || 931 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 932 } 933 934 /* 935 * This function should return either EINTR or ERESTART depending on 936 * the signal type sent to this thread: 937 */ 938 static int 939 linux_get_error(struct task_struct *task, int error) 940 { 941 /* check for signal type interrupt code */ 942 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 943 error = -linux_schedule_get_interrupt_value(task); 944 if (error == 0) 945 error = EINTR; 946 } 947 return (error); 948 } 949 950 static int 951 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 952 const struct file_operations *fop, u_long cmd, caddr_t data, 953 struct thread *td) 954 { 955 struct task_struct *task = current; 956 unsigned size; 957 int error; 958 959 size = IOCPARM_LEN(cmd); 960 /* refer to logic in sys_ioctl() */ 961 if (size > 0) { 962 /* 963 * Setup hint for linux_copyin() and linux_copyout(). 964 * 965 * Background: Linux code expects a user-space address 966 * while FreeBSD supplies a kernel-space address. 967 */ 968 task->bsd_ioctl_data = data; 969 task->bsd_ioctl_len = size; 970 data = (void *)LINUX_IOCTL_MIN_PTR; 971 } else { 972 /* fetch user-space pointer */ 973 data = *(void **)data; 974 } 975 #ifdef COMPAT_FREEBSD32 976 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 977 /* try the compat IOCTL handler first */ 978 if (fop->compat_ioctl != NULL) { 979 error = -OPW(fp, td, fop->compat_ioctl(filp, 980 cmd, (u_long)data)); 981 } else { 982 error = ENOTTY; 983 } 984 985 /* fallback to the regular IOCTL handler, if any */ 986 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 987 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 988 cmd, (u_long)data)); 989 } 990 } else 991 #endif 992 { 993 if (fop->unlocked_ioctl != NULL) { 994 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 995 cmd, (u_long)data)); 996 } else { 997 error = ENOTTY; 998 } 999 } 1000 if (size > 0) { 1001 task->bsd_ioctl_data = NULL; 1002 task->bsd_ioctl_len = 0; 1003 } 1004 1005 if (error == EWOULDBLOCK) { 1006 /* update kqfilter status, if any */ 1007 linux_file_kqfilter_poll(filp, 1008 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1009 } else { 1010 error = linux_get_error(task, error); 1011 } 1012 return (error); 1013 } 1014 1015 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1016 1017 /* 1018 * This function atomically updates the poll wakeup state and returns 1019 * the previous state at the time of update. 1020 */ 1021 static uint8_t 1022 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1023 { 1024 int c, old; 1025 1026 c = v->counter; 1027 1028 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1029 c = old; 1030 1031 return (c); 1032 } 1033 1034 static int 1035 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1036 { 1037 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1038 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1039 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1040 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1041 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1042 }; 1043 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1044 1045 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1046 case LINUX_FWQ_STATE_QUEUED: 1047 linux_poll_wakeup(filp); 1048 return (1); 1049 default: 1050 return (0); 1051 } 1052 } 1053 1054 void 1055 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1056 { 1057 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1058 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1059 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1060 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1061 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1062 }; 1063 1064 /* check if we are called inside the select system call */ 1065 if (p == LINUX_POLL_TABLE_NORMAL) 1066 selrecord(curthread, &filp->f_selinfo); 1067 1068 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1069 case LINUX_FWQ_STATE_INIT: 1070 /* NOTE: file handles can only belong to one wait-queue */ 1071 filp->f_wait_queue.wqh = wqh; 1072 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1073 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1074 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1075 break; 1076 default: 1077 break; 1078 } 1079 } 1080 1081 static void 1082 linux_poll_wait_dequeue(struct linux_file *filp) 1083 { 1084 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1085 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1086 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1087 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1088 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1089 }; 1090 1091 seldrain(&filp->f_selinfo); 1092 1093 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1094 case LINUX_FWQ_STATE_NOT_READY: 1095 case LINUX_FWQ_STATE_QUEUED: 1096 case LINUX_FWQ_STATE_READY: 1097 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1098 break; 1099 default: 1100 break; 1101 } 1102 } 1103 1104 void 1105 linux_poll_wakeup(struct linux_file *filp) 1106 { 1107 /* this function should be NULL-safe */ 1108 if (filp == NULL) 1109 return; 1110 1111 selwakeup(&filp->f_selinfo); 1112 1113 spin_lock(&filp->f_kqlock); 1114 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1115 LINUX_KQ_FLAG_NEED_WRITE; 1116 1117 /* make sure the "knote" gets woken up */ 1118 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1119 spin_unlock(&filp->f_kqlock); 1120 } 1121 1122 static void 1123 linux_file_kqfilter_detach(struct knote *kn) 1124 { 1125 struct linux_file *filp = kn->kn_hook; 1126 1127 spin_lock(&filp->f_kqlock); 1128 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1129 spin_unlock(&filp->f_kqlock); 1130 } 1131 1132 static int 1133 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1134 { 1135 struct linux_file *filp = kn->kn_hook; 1136 1137 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1138 1139 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1140 } 1141 1142 static int 1143 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1144 { 1145 struct linux_file *filp = kn->kn_hook; 1146 1147 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1148 1149 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1150 } 1151 1152 static struct filterops linux_dev_kqfiltops_read = { 1153 .f_isfd = 1, 1154 .f_detach = linux_file_kqfilter_detach, 1155 .f_event = linux_file_kqfilter_read_event, 1156 }; 1157 1158 static struct filterops linux_dev_kqfiltops_write = { 1159 .f_isfd = 1, 1160 .f_detach = linux_file_kqfilter_detach, 1161 .f_event = linux_file_kqfilter_write_event, 1162 }; 1163 1164 static void 1165 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1166 { 1167 struct thread *td; 1168 const struct file_operations *fop; 1169 struct linux_cdev *ldev; 1170 int temp; 1171 1172 if ((filp->f_kqflags & kqflags) == 0) 1173 return; 1174 1175 td = curthread; 1176 1177 linux_get_fop(filp, &fop, &ldev); 1178 /* get the latest polling state */ 1179 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1180 linux_drop_fop(ldev); 1181 1182 spin_lock(&filp->f_kqlock); 1183 /* clear kqflags */ 1184 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1185 LINUX_KQ_FLAG_NEED_WRITE); 1186 /* update kqflags */ 1187 if ((temp & (POLLIN | POLLOUT)) != 0) { 1188 if ((temp & POLLIN) != 0) 1189 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1190 if ((temp & POLLOUT) != 0) 1191 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1192 1193 /* make sure the "knote" gets woken up */ 1194 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1195 } 1196 spin_unlock(&filp->f_kqlock); 1197 } 1198 1199 static int 1200 linux_file_kqfilter(struct file *file, struct knote *kn) 1201 { 1202 struct linux_file *filp; 1203 struct thread *td; 1204 int error; 1205 1206 td = curthread; 1207 filp = (struct linux_file *)file->f_data; 1208 filp->f_flags = file->f_flag; 1209 if (filp->f_op->poll == NULL) 1210 return (EINVAL); 1211 1212 spin_lock(&filp->f_kqlock); 1213 switch (kn->kn_filter) { 1214 case EVFILT_READ: 1215 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1216 kn->kn_fop = &linux_dev_kqfiltops_read; 1217 kn->kn_hook = filp; 1218 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1219 error = 0; 1220 break; 1221 case EVFILT_WRITE: 1222 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1223 kn->kn_fop = &linux_dev_kqfiltops_write; 1224 kn->kn_hook = filp; 1225 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1226 error = 0; 1227 break; 1228 default: 1229 error = EINVAL; 1230 break; 1231 } 1232 spin_unlock(&filp->f_kqlock); 1233 1234 if (error == 0) { 1235 linux_set_current(td); 1236 1237 /* update kqfilter status, if any */ 1238 linux_file_kqfilter_poll(filp, 1239 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1240 } 1241 return (error); 1242 } 1243 1244 static int 1245 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1246 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1247 int nprot, bool is_shared, struct thread *td) 1248 { 1249 struct task_struct *task; 1250 struct vm_area_struct *vmap; 1251 struct mm_struct *mm; 1252 struct linux_file *filp; 1253 vm_memattr_t attr; 1254 int error; 1255 1256 filp = (struct linux_file *)fp->f_data; 1257 filp->f_flags = fp->f_flag; 1258 1259 if (fop->mmap == NULL) 1260 return (EOPNOTSUPP); 1261 1262 linux_set_current(td); 1263 1264 /* 1265 * The same VM object might be shared by multiple processes 1266 * and the mm_struct is usually freed when a process exits. 1267 * 1268 * The atomic reference below makes sure the mm_struct is 1269 * available as long as the vmap is in the linux_vma_head. 1270 */ 1271 task = current; 1272 mm = task->mm; 1273 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1274 return (EINVAL); 1275 1276 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1277 vmap->vm_start = 0; 1278 vmap->vm_end = size; 1279 vmap->vm_pgoff = *offset / PAGE_SIZE; 1280 vmap->vm_pfn = 0; 1281 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1282 if (is_shared) 1283 vmap->vm_flags |= VM_SHARED; 1284 vmap->vm_ops = NULL; 1285 vmap->vm_file = get_file(filp); 1286 vmap->vm_mm = mm; 1287 1288 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1289 error = linux_get_error(task, EINTR); 1290 } else { 1291 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1292 error = linux_get_error(task, error); 1293 up_write(&vmap->vm_mm->mmap_sem); 1294 } 1295 1296 if (error != 0) { 1297 linux_cdev_handle_free(vmap); 1298 return (error); 1299 } 1300 1301 attr = pgprot2cachemode(vmap->vm_page_prot); 1302 1303 if (vmap->vm_ops != NULL) { 1304 struct vm_area_struct *ptr; 1305 void *vm_private_data; 1306 bool vm_no_fault; 1307 1308 if (vmap->vm_ops->open == NULL || 1309 vmap->vm_ops->close == NULL || 1310 vmap->vm_private_data == NULL) { 1311 /* free allocated VM area struct */ 1312 linux_cdev_handle_free(vmap); 1313 return (EINVAL); 1314 } 1315 1316 vm_private_data = vmap->vm_private_data; 1317 1318 rw_wlock(&linux_vma_lock); 1319 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1320 if (ptr->vm_private_data == vm_private_data) 1321 break; 1322 } 1323 /* check if there is an existing VM area struct */ 1324 if (ptr != NULL) { 1325 /* check if the VM area structure is invalid */ 1326 if (ptr->vm_ops == NULL || 1327 ptr->vm_ops->open == NULL || 1328 ptr->vm_ops->close == NULL) { 1329 error = ESTALE; 1330 vm_no_fault = 1; 1331 } else { 1332 error = EEXIST; 1333 vm_no_fault = (ptr->vm_ops->fault == NULL); 1334 } 1335 } else { 1336 /* insert VM area structure into list */ 1337 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1338 error = 0; 1339 vm_no_fault = (vmap->vm_ops->fault == NULL); 1340 } 1341 rw_wunlock(&linux_vma_lock); 1342 1343 if (error != 0) { 1344 /* free allocated VM area struct */ 1345 linux_cdev_handle_free(vmap); 1346 /* check for stale VM area struct */ 1347 if (error != EEXIST) 1348 return (error); 1349 } 1350 1351 /* check if there is no fault handler */ 1352 if (vm_no_fault) { 1353 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1354 &linux_cdev_pager_ops[1], size, nprot, *offset, 1355 td->td_ucred); 1356 } else { 1357 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1358 &linux_cdev_pager_ops[0], size, nprot, *offset, 1359 td->td_ucred); 1360 } 1361 1362 /* check if allocating the VM object failed */ 1363 if (*object == NULL) { 1364 if (error == 0) { 1365 /* remove VM area struct from list */ 1366 linux_cdev_handle_remove(vmap); 1367 /* free allocated VM area struct */ 1368 linux_cdev_handle_free(vmap); 1369 } 1370 return (EINVAL); 1371 } 1372 } else { 1373 struct sglist *sg; 1374 1375 sg = sglist_alloc(1, M_WAITOK); 1376 sglist_append_phys(sg, 1377 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1378 1379 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1380 nprot, 0, td->td_ucred); 1381 1382 linux_cdev_handle_free(vmap); 1383 1384 if (*object == NULL) { 1385 sglist_free(sg); 1386 return (EINVAL); 1387 } 1388 } 1389 1390 if (attr != VM_MEMATTR_DEFAULT) { 1391 VM_OBJECT_WLOCK(*object); 1392 vm_object_set_memattr(*object, attr); 1393 VM_OBJECT_WUNLOCK(*object); 1394 } 1395 *offset = 0; 1396 return (0); 1397 } 1398 1399 struct cdevsw linuxcdevsw = { 1400 .d_version = D_VERSION, 1401 .d_fdopen = linux_dev_fdopen, 1402 .d_name = "lkpidev", 1403 }; 1404 1405 static int 1406 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1407 int flags, struct thread *td) 1408 { 1409 struct linux_file *filp; 1410 const struct file_operations *fop; 1411 struct linux_cdev *ldev; 1412 ssize_t bytes; 1413 int error; 1414 1415 error = 0; 1416 filp = (struct linux_file *)file->f_data; 1417 filp->f_flags = file->f_flag; 1418 /* XXX no support for I/O vectors currently */ 1419 if (uio->uio_iovcnt != 1) 1420 return (EOPNOTSUPP); 1421 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1422 return (EINVAL); 1423 linux_set_current(td); 1424 linux_get_fop(filp, &fop, &ldev); 1425 if (fop->read != NULL) { 1426 bytes = OPW(file, td, fop->read(filp, 1427 uio->uio_iov->iov_base, 1428 uio->uio_iov->iov_len, &uio->uio_offset)); 1429 if (bytes >= 0) { 1430 uio->uio_iov->iov_base = 1431 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1432 uio->uio_iov->iov_len -= bytes; 1433 uio->uio_resid -= bytes; 1434 } else { 1435 error = linux_get_error(current, -bytes); 1436 } 1437 } else 1438 error = ENXIO; 1439 1440 /* update kqfilter status, if any */ 1441 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1442 linux_drop_fop(ldev); 1443 1444 return (error); 1445 } 1446 1447 static int 1448 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1449 int flags, struct thread *td) 1450 { 1451 struct linux_file *filp; 1452 const struct file_operations *fop; 1453 struct linux_cdev *ldev; 1454 ssize_t bytes; 1455 int error; 1456 1457 filp = (struct linux_file *)file->f_data; 1458 filp->f_flags = file->f_flag; 1459 /* XXX no support for I/O vectors currently */ 1460 if (uio->uio_iovcnt != 1) 1461 return (EOPNOTSUPP); 1462 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1463 return (EINVAL); 1464 linux_set_current(td); 1465 linux_get_fop(filp, &fop, &ldev); 1466 if (fop->write != NULL) { 1467 bytes = OPW(file, td, fop->write(filp, 1468 uio->uio_iov->iov_base, 1469 uio->uio_iov->iov_len, &uio->uio_offset)); 1470 if (bytes >= 0) { 1471 uio->uio_iov->iov_base = 1472 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1473 uio->uio_iov->iov_len -= bytes; 1474 uio->uio_resid -= bytes; 1475 error = 0; 1476 } else { 1477 error = linux_get_error(current, -bytes); 1478 } 1479 } else 1480 error = ENXIO; 1481 1482 /* update kqfilter status, if any */ 1483 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1484 1485 linux_drop_fop(ldev); 1486 1487 return (error); 1488 } 1489 1490 static int 1491 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1492 struct thread *td) 1493 { 1494 struct linux_file *filp; 1495 const struct file_operations *fop; 1496 struct linux_cdev *ldev; 1497 int revents; 1498 1499 filp = (struct linux_file *)file->f_data; 1500 filp->f_flags = file->f_flag; 1501 linux_set_current(td); 1502 linux_get_fop(filp, &fop, &ldev); 1503 if (fop->poll != NULL) { 1504 revents = OPW(file, td, fop->poll(filp, 1505 LINUX_POLL_TABLE_NORMAL)) & events; 1506 } else { 1507 revents = 0; 1508 } 1509 linux_drop_fop(ldev); 1510 return (revents); 1511 } 1512 1513 static int 1514 linux_file_close(struct file *file, struct thread *td) 1515 { 1516 struct linux_file *filp; 1517 int (*release)(struct inode *, struct linux_file *); 1518 const struct file_operations *fop; 1519 struct linux_cdev *ldev; 1520 int error; 1521 1522 filp = (struct linux_file *)file->f_data; 1523 1524 KASSERT(file_count(filp) == 0, 1525 ("File refcount(%d) is not zero", file_count(filp))); 1526 1527 if (td == NULL) 1528 td = curthread; 1529 1530 error = 0; 1531 filp->f_flags = file->f_flag; 1532 linux_set_current(td); 1533 linux_poll_wait_dequeue(filp); 1534 linux_get_fop(filp, &fop, &ldev); 1535 /* 1536 * Always use the real release function, if any, to avoid 1537 * leaking device resources: 1538 */ 1539 release = filp->f_op->release; 1540 if (release != NULL) 1541 error = -OPW(file, td, release(filp->f_vnode, filp)); 1542 funsetown(&filp->f_sigio); 1543 if (filp->f_vnode != NULL) 1544 vdrop(filp->f_vnode); 1545 linux_drop_fop(ldev); 1546 ldev = filp->f_cdev; 1547 if (ldev != NULL) 1548 linux_cdev_deref(ldev); 1549 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1550 kfree(filp); 1551 1552 return (error); 1553 } 1554 1555 static int 1556 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1557 struct thread *td) 1558 { 1559 struct linux_file *filp; 1560 const struct file_operations *fop; 1561 struct linux_cdev *ldev; 1562 struct fiodgname_arg *fgn; 1563 const char *p; 1564 int error, i; 1565 1566 error = 0; 1567 filp = (struct linux_file *)fp->f_data; 1568 filp->f_flags = fp->f_flag; 1569 linux_get_fop(filp, &fop, &ldev); 1570 1571 linux_set_current(td); 1572 switch (cmd) { 1573 case FIONBIO: 1574 break; 1575 case FIOASYNC: 1576 if (fop->fasync == NULL) 1577 break; 1578 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1579 break; 1580 case FIOSETOWN: 1581 error = fsetown(*(int *)data, &filp->f_sigio); 1582 if (error == 0) { 1583 if (fop->fasync == NULL) 1584 break; 1585 error = -OPW(fp, td, fop->fasync(0, filp, 1586 fp->f_flag & FASYNC)); 1587 } 1588 break; 1589 case FIOGETOWN: 1590 *(int *)data = fgetown(&filp->f_sigio); 1591 break; 1592 case FIODGNAME: 1593 #ifdef COMPAT_FREEBSD32 1594 case FIODGNAME_32: 1595 #endif 1596 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1597 error = ENXIO; 1598 break; 1599 } 1600 fgn = data; 1601 p = devtoname(filp->f_cdev->cdev); 1602 i = strlen(p) + 1; 1603 if (i > fgn->len) { 1604 error = EINVAL; 1605 break; 1606 } 1607 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1608 break; 1609 default: 1610 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1611 break; 1612 } 1613 linux_drop_fop(ldev); 1614 return (error); 1615 } 1616 1617 static int 1618 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1619 vm_prot_t maxprot, int flags, struct file *fp, 1620 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1621 { 1622 /* 1623 * Character devices do not provide private mappings 1624 * of any kind: 1625 */ 1626 if ((maxprot & VM_PROT_WRITE) == 0 && 1627 (prot & VM_PROT_WRITE) != 0) 1628 return (EACCES); 1629 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1630 return (EINVAL); 1631 1632 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1633 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1634 } 1635 1636 static int 1637 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1638 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1639 struct thread *td) 1640 { 1641 struct linux_file *filp; 1642 const struct file_operations *fop; 1643 struct linux_cdev *ldev; 1644 struct mount *mp; 1645 struct vnode *vp; 1646 vm_object_t object; 1647 vm_prot_t maxprot; 1648 int error; 1649 1650 filp = (struct linux_file *)fp->f_data; 1651 1652 vp = filp->f_vnode; 1653 if (vp == NULL) 1654 return (EOPNOTSUPP); 1655 1656 /* 1657 * Ensure that file and memory protections are 1658 * compatible. 1659 */ 1660 mp = vp->v_mount; 1661 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1662 maxprot = VM_PROT_NONE; 1663 if ((prot & VM_PROT_EXECUTE) != 0) 1664 return (EACCES); 1665 } else 1666 maxprot = VM_PROT_EXECUTE; 1667 if ((fp->f_flag & FREAD) != 0) 1668 maxprot |= VM_PROT_READ; 1669 else if ((prot & VM_PROT_READ) != 0) 1670 return (EACCES); 1671 1672 /* 1673 * If we are sharing potential changes via MAP_SHARED and we 1674 * are trying to get write permission although we opened it 1675 * without asking for it, bail out. 1676 * 1677 * Note that most character devices always share mappings. 1678 * 1679 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1680 * requests rather than doing it here. 1681 */ 1682 if ((flags & MAP_SHARED) != 0) { 1683 if ((fp->f_flag & FWRITE) != 0) 1684 maxprot |= VM_PROT_WRITE; 1685 else if ((prot & VM_PROT_WRITE) != 0) 1686 return (EACCES); 1687 } 1688 maxprot &= cap_maxprot; 1689 1690 linux_get_fop(filp, &fop, &ldev); 1691 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1692 &foff, fop, &object); 1693 if (error != 0) 1694 goto out; 1695 1696 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1697 foff, FALSE, td); 1698 if (error != 0) 1699 vm_object_deallocate(object); 1700 out: 1701 linux_drop_fop(ldev); 1702 return (error); 1703 } 1704 1705 static int 1706 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1707 { 1708 struct linux_file *filp; 1709 struct vnode *vp; 1710 int error; 1711 1712 filp = (struct linux_file *)fp->f_data; 1713 if (filp->f_vnode == NULL) 1714 return (EOPNOTSUPP); 1715 1716 vp = filp->f_vnode; 1717 1718 vn_lock(vp, LK_SHARED | LK_RETRY); 1719 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1720 VOP_UNLOCK(vp); 1721 1722 return (error); 1723 } 1724 1725 static int 1726 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1727 struct filedesc *fdp) 1728 { 1729 struct linux_file *filp; 1730 struct vnode *vp; 1731 int error; 1732 1733 filp = fp->f_data; 1734 vp = filp->f_vnode; 1735 if (vp == NULL) { 1736 error = 0; 1737 kif->kf_type = KF_TYPE_DEV; 1738 } else { 1739 vref(vp); 1740 FILEDESC_SUNLOCK(fdp); 1741 error = vn_fill_kinfo_vnode(vp, kif); 1742 vrele(vp); 1743 kif->kf_type = KF_TYPE_VNODE; 1744 FILEDESC_SLOCK(fdp); 1745 } 1746 return (error); 1747 } 1748 1749 unsigned int 1750 linux_iminor(struct inode *inode) 1751 { 1752 struct linux_cdev *ldev; 1753 1754 if (inode == NULL || inode->v_rdev == NULL || 1755 inode->v_rdev->si_devsw != &linuxcdevsw) 1756 return (-1U); 1757 ldev = inode->v_rdev->si_drv1; 1758 if (ldev == NULL) 1759 return (-1U); 1760 1761 return (minor(ldev->dev)); 1762 } 1763 1764 struct fileops linuxfileops = { 1765 .fo_read = linux_file_read, 1766 .fo_write = linux_file_write, 1767 .fo_truncate = invfo_truncate, 1768 .fo_kqfilter = linux_file_kqfilter, 1769 .fo_stat = linux_file_stat, 1770 .fo_fill_kinfo = linux_file_fill_kinfo, 1771 .fo_poll = linux_file_poll, 1772 .fo_close = linux_file_close, 1773 .fo_ioctl = linux_file_ioctl, 1774 .fo_mmap = linux_file_mmap, 1775 .fo_chmod = invfo_chmod, 1776 .fo_chown = invfo_chown, 1777 .fo_sendfile = invfo_sendfile, 1778 .fo_flags = DFLAG_PASSABLE, 1779 }; 1780 1781 /* 1782 * Hash of vmmap addresses. This is infrequently accessed and does not 1783 * need to be particularly large. This is done because we must store the 1784 * caller's idea of the map size to properly unmap. 1785 */ 1786 struct vmmap { 1787 LIST_ENTRY(vmmap) vm_next; 1788 void *vm_addr; 1789 unsigned long vm_size; 1790 }; 1791 1792 struct vmmaphd { 1793 struct vmmap *lh_first; 1794 }; 1795 #define VMMAP_HASH_SIZE 64 1796 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1797 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1798 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1799 static struct mtx vmmaplock; 1800 1801 static void 1802 vmmap_add(void *addr, unsigned long size) 1803 { 1804 struct vmmap *vmmap; 1805 1806 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1807 mtx_lock(&vmmaplock); 1808 vmmap->vm_size = size; 1809 vmmap->vm_addr = addr; 1810 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1811 mtx_unlock(&vmmaplock); 1812 } 1813 1814 static struct vmmap * 1815 vmmap_remove(void *addr) 1816 { 1817 struct vmmap *vmmap; 1818 1819 mtx_lock(&vmmaplock); 1820 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1821 if (vmmap->vm_addr == addr) 1822 break; 1823 if (vmmap) 1824 LIST_REMOVE(vmmap, vm_next); 1825 mtx_unlock(&vmmaplock); 1826 1827 return (vmmap); 1828 } 1829 1830 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1831 void * 1832 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1833 { 1834 void *addr; 1835 1836 addr = pmap_mapdev_attr(phys_addr, size, attr); 1837 if (addr == NULL) 1838 return (NULL); 1839 vmmap_add(addr, size); 1840 1841 return (addr); 1842 } 1843 #endif 1844 1845 void 1846 iounmap(void *addr) 1847 { 1848 struct vmmap *vmmap; 1849 1850 vmmap = vmmap_remove(addr); 1851 if (vmmap == NULL) 1852 return; 1853 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1854 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1855 #endif 1856 kfree(vmmap); 1857 } 1858 1859 void * 1860 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1861 { 1862 vm_offset_t off; 1863 size_t size; 1864 1865 size = count * PAGE_SIZE; 1866 off = kva_alloc(size); 1867 if (off == 0) 1868 return (NULL); 1869 vmmap_add((void *)off, size); 1870 pmap_qenter(off, pages, count); 1871 1872 return ((void *)off); 1873 } 1874 1875 void 1876 vunmap(void *addr) 1877 { 1878 struct vmmap *vmmap; 1879 1880 vmmap = vmmap_remove(addr); 1881 if (vmmap == NULL) 1882 return; 1883 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1884 kva_free((vm_offset_t)addr, vmmap->vm_size); 1885 kfree(vmmap); 1886 } 1887 1888 static char * 1889 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1890 { 1891 unsigned int len; 1892 char *p; 1893 va_list aq; 1894 1895 va_copy(aq, ap); 1896 len = vsnprintf(NULL, 0, fmt, aq); 1897 va_end(aq); 1898 1899 if (dev != NULL) 1900 p = devm_kmalloc(dev, len + 1, gfp); 1901 else 1902 p = kmalloc(len + 1, gfp); 1903 if (p != NULL) 1904 vsnprintf(p, len + 1, fmt, ap); 1905 1906 return (p); 1907 } 1908 1909 char * 1910 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1911 { 1912 1913 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1914 } 1915 1916 char * 1917 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1918 { 1919 va_list ap; 1920 char *p; 1921 1922 va_start(ap, fmt); 1923 p = devm_kvasprintf(dev, gfp, fmt, ap); 1924 va_end(ap); 1925 1926 return (p); 1927 } 1928 1929 char * 1930 kasprintf(gfp_t gfp, const char *fmt, ...) 1931 { 1932 va_list ap; 1933 char *p; 1934 1935 va_start(ap, fmt); 1936 p = kvasprintf(gfp, fmt, ap); 1937 va_end(ap); 1938 1939 return (p); 1940 } 1941 1942 static void 1943 linux_timer_callback_wrapper(void *context) 1944 { 1945 struct timer_list *timer; 1946 1947 timer = context; 1948 1949 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1950 /* try again later */ 1951 callout_reset(&timer->callout, 1, 1952 &linux_timer_callback_wrapper, timer); 1953 return; 1954 } 1955 1956 timer->function(timer->data); 1957 } 1958 1959 int 1960 mod_timer(struct timer_list *timer, int expires) 1961 { 1962 int ret; 1963 1964 timer->expires = expires; 1965 ret = callout_reset(&timer->callout, 1966 linux_timer_jiffies_until(expires), 1967 &linux_timer_callback_wrapper, timer); 1968 1969 MPASS(ret == 0 || ret == 1); 1970 1971 return (ret == 1); 1972 } 1973 1974 void 1975 add_timer(struct timer_list *timer) 1976 { 1977 1978 callout_reset(&timer->callout, 1979 linux_timer_jiffies_until(timer->expires), 1980 &linux_timer_callback_wrapper, timer); 1981 } 1982 1983 void 1984 add_timer_on(struct timer_list *timer, int cpu) 1985 { 1986 1987 callout_reset_on(&timer->callout, 1988 linux_timer_jiffies_until(timer->expires), 1989 &linux_timer_callback_wrapper, timer, cpu); 1990 } 1991 1992 int 1993 del_timer(struct timer_list *timer) 1994 { 1995 1996 if (callout_stop(&(timer)->callout) == -1) 1997 return (0); 1998 return (1); 1999 } 2000 2001 int 2002 del_timer_sync(struct timer_list *timer) 2003 { 2004 2005 if (callout_drain(&(timer)->callout) == -1) 2006 return (0); 2007 return (1); 2008 } 2009 2010 /* greatest common divisor, Euclid equation */ 2011 static uint64_t 2012 lkpi_gcd_64(uint64_t a, uint64_t b) 2013 { 2014 uint64_t an; 2015 uint64_t bn; 2016 2017 while (b != 0) { 2018 an = b; 2019 bn = a % b; 2020 a = an; 2021 b = bn; 2022 } 2023 return (a); 2024 } 2025 2026 uint64_t lkpi_nsec2hz_rem; 2027 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2028 uint64_t lkpi_nsec2hz_max; 2029 2030 uint64_t lkpi_usec2hz_rem; 2031 uint64_t lkpi_usec2hz_div = 1000000ULL; 2032 uint64_t lkpi_usec2hz_max; 2033 2034 uint64_t lkpi_msec2hz_rem; 2035 uint64_t lkpi_msec2hz_div = 1000ULL; 2036 uint64_t lkpi_msec2hz_max; 2037 2038 static void 2039 linux_timer_init(void *arg) 2040 { 2041 uint64_t gcd; 2042 2043 /* 2044 * Compute an internal HZ value which can divide 2**32 to 2045 * avoid timer rounding problems when the tick value wraps 2046 * around 2**32: 2047 */ 2048 linux_timer_hz_mask = 1; 2049 while (linux_timer_hz_mask < (unsigned long)hz) 2050 linux_timer_hz_mask *= 2; 2051 linux_timer_hz_mask--; 2052 2053 /* compute some internal constants */ 2054 2055 lkpi_nsec2hz_rem = hz; 2056 lkpi_usec2hz_rem = hz; 2057 lkpi_msec2hz_rem = hz; 2058 2059 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2060 lkpi_nsec2hz_rem /= gcd; 2061 lkpi_nsec2hz_div /= gcd; 2062 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2063 2064 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2065 lkpi_usec2hz_rem /= gcd; 2066 lkpi_usec2hz_div /= gcd; 2067 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2068 2069 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2070 lkpi_msec2hz_rem /= gcd; 2071 lkpi_msec2hz_div /= gcd; 2072 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2073 } 2074 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2075 2076 void 2077 linux_complete_common(struct completion *c, int all) 2078 { 2079 int wakeup_swapper; 2080 2081 sleepq_lock(c); 2082 if (all) { 2083 c->done = UINT_MAX; 2084 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2085 } else { 2086 if (c->done != UINT_MAX) 2087 c->done++; 2088 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2089 } 2090 sleepq_release(c); 2091 if (wakeup_swapper) 2092 kick_proc0(); 2093 } 2094 2095 /* 2096 * Indefinite wait for done != 0 with or without signals. 2097 */ 2098 int 2099 linux_wait_for_common(struct completion *c, int flags) 2100 { 2101 struct task_struct *task; 2102 int error; 2103 2104 if (SCHEDULER_STOPPED()) 2105 return (0); 2106 2107 task = current; 2108 2109 if (flags != 0) 2110 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2111 else 2112 flags = SLEEPQ_SLEEP; 2113 error = 0; 2114 for (;;) { 2115 sleepq_lock(c); 2116 if (c->done) 2117 break; 2118 sleepq_add(c, NULL, "completion", flags, 0); 2119 if (flags & SLEEPQ_INTERRUPTIBLE) { 2120 DROP_GIANT(); 2121 error = -sleepq_wait_sig(c, 0); 2122 PICKUP_GIANT(); 2123 if (error != 0) { 2124 linux_schedule_save_interrupt_value(task, error); 2125 error = -ERESTARTSYS; 2126 goto intr; 2127 } 2128 } else { 2129 DROP_GIANT(); 2130 sleepq_wait(c, 0); 2131 PICKUP_GIANT(); 2132 } 2133 } 2134 if (c->done != UINT_MAX) 2135 c->done--; 2136 sleepq_release(c); 2137 2138 intr: 2139 return (error); 2140 } 2141 2142 /* 2143 * Time limited wait for done != 0 with or without signals. 2144 */ 2145 int 2146 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2147 { 2148 struct task_struct *task; 2149 int end = jiffies + timeout; 2150 int error; 2151 2152 if (SCHEDULER_STOPPED()) 2153 return (0); 2154 2155 task = current; 2156 2157 if (flags != 0) 2158 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2159 else 2160 flags = SLEEPQ_SLEEP; 2161 2162 for (;;) { 2163 sleepq_lock(c); 2164 if (c->done) 2165 break; 2166 sleepq_add(c, NULL, "completion", flags, 0); 2167 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2168 2169 DROP_GIANT(); 2170 if (flags & SLEEPQ_INTERRUPTIBLE) 2171 error = -sleepq_timedwait_sig(c, 0); 2172 else 2173 error = -sleepq_timedwait(c, 0); 2174 PICKUP_GIANT(); 2175 2176 if (error != 0) { 2177 /* check for timeout */ 2178 if (error == -EWOULDBLOCK) { 2179 error = 0; /* timeout */ 2180 } else { 2181 /* signal happened */ 2182 linux_schedule_save_interrupt_value(task, error); 2183 error = -ERESTARTSYS; 2184 } 2185 goto done; 2186 } 2187 } 2188 if (c->done != UINT_MAX) 2189 c->done--; 2190 sleepq_release(c); 2191 2192 /* return how many jiffies are left */ 2193 error = linux_timer_jiffies_until(end); 2194 done: 2195 return (error); 2196 } 2197 2198 int 2199 linux_try_wait_for_completion(struct completion *c) 2200 { 2201 int isdone; 2202 2203 sleepq_lock(c); 2204 isdone = (c->done != 0); 2205 if (c->done != 0 && c->done != UINT_MAX) 2206 c->done--; 2207 sleepq_release(c); 2208 return (isdone); 2209 } 2210 2211 int 2212 linux_completion_done(struct completion *c) 2213 { 2214 int isdone; 2215 2216 sleepq_lock(c); 2217 isdone = (c->done != 0); 2218 sleepq_release(c); 2219 return (isdone); 2220 } 2221 2222 static void 2223 linux_cdev_deref(struct linux_cdev *ldev) 2224 { 2225 if (refcount_release(&ldev->refs) && 2226 ldev->kobj.ktype == &linux_cdev_ktype) 2227 kfree(ldev); 2228 } 2229 2230 static void 2231 linux_cdev_release(struct kobject *kobj) 2232 { 2233 struct linux_cdev *cdev; 2234 struct kobject *parent; 2235 2236 cdev = container_of(kobj, struct linux_cdev, kobj); 2237 parent = kobj->parent; 2238 linux_destroy_dev(cdev); 2239 linux_cdev_deref(cdev); 2240 kobject_put(parent); 2241 } 2242 2243 static void 2244 linux_cdev_static_release(struct kobject *kobj) 2245 { 2246 struct cdev *cdev; 2247 struct linux_cdev *ldev; 2248 2249 ldev = container_of(kobj, struct linux_cdev, kobj); 2250 cdev = ldev->cdev; 2251 if (cdev != NULL) { 2252 destroy_dev(cdev); 2253 ldev->cdev = NULL; 2254 } 2255 kobject_put(kobj->parent); 2256 } 2257 2258 int 2259 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2260 { 2261 int ret; 2262 2263 if (dev->devt != 0) { 2264 /* Set parent kernel object. */ 2265 ldev->kobj.parent = &dev->kobj; 2266 2267 /* 2268 * Unlike Linux we require the kobject of the 2269 * character device structure to have a valid name 2270 * before calling this function: 2271 */ 2272 if (ldev->kobj.name == NULL) 2273 return (-EINVAL); 2274 2275 ret = cdev_add(ldev, dev->devt, 1); 2276 if (ret) 2277 return (ret); 2278 } 2279 ret = device_add(dev); 2280 if (ret != 0 && dev->devt != 0) 2281 cdev_del(ldev); 2282 return (ret); 2283 } 2284 2285 void 2286 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2287 { 2288 device_del(dev); 2289 2290 if (dev->devt != 0) 2291 cdev_del(ldev); 2292 } 2293 2294 static void 2295 linux_destroy_dev(struct linux_cdev *ldev) 2296 { 2297 2298 if (ldev->cdev == NULL) 2299 return; 2300 2301 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2302 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2303 2304 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2305 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2306 pause("ldevdtr", hz / 4); 2307 2308 destroy_dev(ldev->cdev); 2309 ldev->cdev = NULL; 2310 } 2311 2312 const struct kobj_type linux_cdev_ktype = { 2313 .release = linux_cdev_release, 2314 }; 2315 2316 const struct kobj_type linux_cdev_static_ktype = { 2317 .release = linux_cdev_static_release, 2318 }; 2319 2320 static void 2321 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2322 { 2323 struct notifier_block *nb; 2324 struct netdev_notifier_info ni; 2325 2326 nb = arg; 2327 ni.ifp = ifp; 2328 ni.dev = (struct net_device *)ifp; 2329 if (linkstate == LINK_STATE_UP) 2330 nb->notifier_call(nb, NETDEV_UP, &ni); 2331 else 2332 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2333 } 2334 2335 static void 2336 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2337 { 2338 struct notifier_block *nb; 2339 struct netdev_notifier_info ni; 2340 2341 nb = arg; 2342 ni.ifp = ifp; 2343 ni.dev = (struct net_device *)ifp; 2344 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2345 } 2346 2347 static void 2348 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2349 { 2350 struct notifier_block *nb; 2351 struct netdev_notifier_info ni; 2352 2353 nb = arg; 2354 ni.ifp = ifp; 2355 ni.dev = (struct net_device *)ifp; 2356 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2357 } 2358 2359 static void 2360 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2361 { 2362 struct notifier_block *nb; 2363 struct netdev_notifier_info ni; 2364 2365 nb = arg; 2366 ni.ifp = ifp; 2367 ni.dev = (struct net_device *)ifp; 2368 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2369 } 2370 2371 static void 2372 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2373 { 2374 struct notifier_block *nb; 2375 struct netdev_notifier_info ni; 2376 2377 nb = arg; 2378 ni.ifp = ifp; 2379 ni.dev = (struct net_device *)ifp; 2380 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2381 } 2382 2383 int 2384 register_netdevice_notifier(struct notifier_block *nb) 2385 { 2386 2387 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2388 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2389 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2390 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2391 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2392 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2393 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2394 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2395 2396 return (0); 2397 } 2398 2399 int 2400 register_inetaddr_notifier(struct notifier_block *nb) 2401 { 2402 2403 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2404 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2405 return (0); 2406 } 2407 2408 int 2409 unregister_netdevice_notifier(struct notifier_block *nb) 2410 { 2411 2412 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2413 nb->tags[NETDEV_UP]); 2414 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2415 nb->tags[NETDEV_REGISTER]); 2416 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2417 nb->tags[NETDEV_UNREGISTER]); 2418 EVENTHANDLER_DEREGISTER(iflladdr_event, 2419 nb->tags[NETDEV_CHANGEADDR]); 2420 2421 return (0); 2422 } 2423 2424 int 2425 unregister_inetaddr_notifier(struct notifier_block *nb) 2426 { 2427 2428 EVENTHANDLER_DEREGISTER(ifaddr_event, 2429 nb->tags[NETDEV_CHANGEIFADDR]); 2430 2431 return (0); 2432 } 2433 2434 struct list_sort_thunk { 2435 int (*cmp)(void *, struct list_head *, struct list_head *); 2436 void *priv; 2437 }; 2438 2439 static inline int 2440 linux_le_cmp(void *priv, const void *d1, const void *d2) 2441 { 2442 struct list_head *le1, *le2; 2443 struct list_sort_thunk *thunk; 2444 2445 thunk = priv; 2446 le1 = *(__DECONST(struct list_head **, d1)); 2447 le2 = *(__DECONST(struct list_head **, d2)); 2448 return ((thunk->cmp)(thunk->priv, le1, le2)); 2449 } 2450 2451 void 2452 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2453 struct list_head *a, struct list_head *b)) 2454 { 2455 struct list_sort_thunk thunk; 2456 struct list_head **ar, *le; 2457 size_t count, i; 2458 2459 count = 0; 2460 list_for_each(le, head) 2461 count++; 2462 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2463 i = 0; 2464 list_for_each(le, head) 2465 ar[i++] = le; 2466 thunk.cmp = cmp; 2467 thunk.priv = priv; 2468 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2469 INIT_LIST_HEAD(head); 2470 for (i = 0; i < count; i++) 2471 list_add_tail(ar[i], head); 2472 free(ar, M_KMALLOC); 2473 } 2474 2475 #if defined(__i386__) || defined(__amd64__) 2476 int 2477 linux_wbinvd_on_all_cpus(void) 2478 { 2479 2480 pmap_invalidate_cache(); 2481 return (0); 2482 } 2483 #endif 2484 2485 int 2486 linux_on_each_cpu(void callback(void *), void *data) 2487 { 2488 2489 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2490 smp_no_rendezvous_barrier, data); 2491 return (0); 2492 } 2493 2494 int 2495 linux_in_atomic(void) 2496 { 2497 2498 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2499 } 2500 2501 struct linux_cdev * 2502 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2503 { 2504 dev_t dev = MKDEV(major, minor); 2505 struct cdev *cdev; 2506 2507 dev_lock(); 2508 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2509 struct linux_cdev *ldev = cdev->si_drv1; 2510 if (ldev->dev == dev && 2511 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2512 break; 2513 } 2514 } 2515 dev_unlock(); 2516 2517 return (cdev != NULL ? cdev->si_drv1 : NULL); 2518 } 2519 2520 int 2521 __register_chrdev(unsigned int major, unsigned int baseminor, 2522 unsigned int count, const char *name, 2523 const struct file_operations *fops) 2524 { 2525 struct linux_cdev *cdev; 2526 int ret = 0; 2527 int i; 2528 2529 for (i = baseminor; i < baseminor + count; i++) { 2530 cdev = cdev_alloc(); 2531 cdev->ops = fops; 2532 kobject_set_name(&cdev->kobj, name); 2533 2534 ret = cdev_add(cdev, makedev(major, i), 1); 2535 if (ret != 0) 2536 break; 2537 } 2538 return (ret); 2539 } 2540 2541 int 2542 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2543 unsigned int count, const char *name, 2544 const struct file_operations *fops, uid_t uid, 2545 gid_t gid, int mode) 2546 { 2547 struct linux_cdev *cdev; 2548 int ret = 0; 2549 int i; 2550 2551 for (i = baseminor; i < baseminor + count; i++) { 2552 cdev = cdev_alloc(); 2553 cdev->ops = fops; 2554 kobject_set_name(&cdev->kobj, name); 2555 2556 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2557 if (ret != 0) 2558 break; 2559 } 2560 return (ret); 2561 } 2562 2563 void 2564 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2565 unsigned int count, const char *name) 2566 { 2567 struct linux_cdev *cdevp; 2568 int i; 2569 2570 for (i = baseminor; i < baseminor + count; i++) { 2571 cdevp = linux_find_cdev(name, major, i); 2572 if (cdevp != NULL) 2573 cdev_del(cdevp); 2574 } 2575 } 2576 2577 void 2578 linux_dump_stack(void) 2579 { 2580 #ifdef STACK 2581 struct stack st; 2582 2583 stack_zero(&st); 2584 stack_save(&st); 2585 stack_print(&st); 2586 #endif 2587 } 2588 2589 int 2590 linuxkpi_net_ratelimit(void) 2591 { 2592 2593 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2594 lkpi_net_maxpps)); 2595 } 2596 2597 #if defined(__i386__) || defined(__amd64__) 2598 bool linux_cpu_has_clflush; 2599 #endif 2600 2601 static void 2602 linux_compat_init(void *arg) 2603 { 2604 struct sysctl_oid *rootoid; 2605 int i; 2606 2607 #if defined(__i386__) || defined(__amd64__) 2608 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2609 #endif 2610 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2611 2612 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2613 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2614 kobject_init(&linux_class_root, &linux_class_ktype); 2615 kobject_set_name(&linux_class_root, "class"); 2616 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2617 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2618 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2619 kobject_set_name(&linux_root_device.kobj, "device"); 2620 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2621 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2622 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2623 linux_root_device.bsddev = root_bus; 2624 linux_class_misc.name = "misc"; 2625 class_register(&linux_class_misc); 2626 INIT_LIST_HEAD(&pci_drivers); 2627 INIT_LIST_HEAD(&pci_devices); 2628 spin_lock_init(&pci_lock); 2629 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2630 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2631 LIST_INIT(&vmmaphead[i]); 2632 init_waitqueue_head(&linux_bit_waitq); 2633 init_waitqueue_head(&linux_var_waitq); 2634 2635 CPU_COPY(&all_cpus, &cpu_online_mask); 2636 } 2637 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2638 2639 static void 2640 linux_compat_uninit(void *arg) 2641 { 2642 linux_kobject_kfree_name(&linux_class_root); 2643 linux_kobject_kfree_name(&linux_root_device.kobj); 2644 linux_kobject_kfree_name(&linux_class_misc.kobj); 2645 2646 mtx_destroy(&vmmaplock); 2647 spin_lock_destroy(&pci_lock); 2648 rw_destroy(&linux_vma_lock); 2649 } 2650 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2651 2652 /* 2653 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2654 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2655 * used. Assert these types have the same size, else some parts of the 2656 * LinuxKPI may not work like expected: 2657 */ 2658 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2659