1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/list.h> 87 #include <linux/kthread.h> 88 #include <linux/kernel.h> 89 #include <linux/compat.h> 90 #include <linux/io-mapping.h> 91 #include <linux/poll.h> 92 #include <linux/smp.h> 93 #include <linux/wait_bit.h> 94 #include <linux/rcupdate.h> 95 #include <linux/interval_tree.h> 96 #include <linux/interval_tree_generic.h> 97 98 #if defined(__i386__) || defined(__amd64__) 99 #include <asm/smp.h> 100 #endif 101 102 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 103 "LinuxKPI parameters"); 104 105 int linuxkpi_debug; 106 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 107 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 108 109 int linuxkpi_warn_dump_stack = 0; 110 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 111 &linuxkpi_warn_dump_stack, 0, 112 "Set to enable stack traces from WARN_ON(). Clear to disable."); 113 114 static struct timeval lkpi_net_lastlog; 115 static int lkpi_net_curpps; 116 static int lkpi_net_maxpps = 99; 117 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 118 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 119 120 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 121 122 #include <linux/rbtree.h> 123 /* Undo Linux compat changes. */ 124 #undef RB_ROOT 125 #undef file 126 #undef cdev 127 #define RB_ROOT(head) (head)->rbh_root 128 129 static void linux_destroy_dev(struct linux_cdev *); 130 static void linux_cdev_deref(struct linux_cdev *ldev); 131 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 132 133 cpumask_t cpu_online_mask; 134 struct kobject linux_class_root; 135 struct device linux_root_device; 136 struct class linux_class_misc; 137 struct list_head pci_drivers; 138 struct list_head pci_devices; 139 spinlock_t pci_lock; 140 141 unsigned long linux_timer_hz_mask; 142 143 wait_queue_head_t linux_bit_waitq; 144 wait_queue_head_t linux_var_waitq; 145 146 int 147 panic_cmp(struct rb_node *one, struct rb_node *two) 148 { 149 panic("no cmp"); 150 } 151 152 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 153 154 #define START(node) ((node)->start) 155 #define LAST(node) ((node)->last) 156 157 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 158 LAST,, lkpi_interval_tree) 159 160 struct kobject * 161 kobject_create(void) 162 { 163 struct kobject *kobj; 164 165 kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); 166 if (kobj == NULL) 167 return (NULL); 168 kobject_init(kobj, &linux_kfree_type); 169 170 return (kobj); 171 } 172 173 174 int 175 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 176 { 177 va_list tmp_va; 178 int len; 179 char *old; 180 char *name; 181 char dummy; 182 183 old = kobj->name; 184 185 if (old && fmt == NULL) 186 return (0); 187 188 /* compute length of string */ 189 va_copy(tmp_va, args); 190 len = vsnprintf(&dummy, 0, fmt, tmp_va); 191 va_end(tmp_va); 192 193 /* account for zero termination */ 194 len++; 195 196 /* check for error */ 197 if (len < 1) 198 return (-EINVAL); 199 200 /* allocate memory for string */ 201 name = kzalloc(len, GFP_KERNEL); 202 if (name == NULL) 203 return (-ENOMEM); 204 vsnprintf(name, len, fmt, args); 205 kobj->name = name; 206 207 /* free old string */ 208 kfree(old); 209 210 /* filter new string */ 211 for (; *name != '\0'; name++) 212 if (*name == '/') 213 *name = '!'; 214 return (0); 215 } 216 217 int 218 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 219 { 220 va_list args; 221 int error; 222 223 va_start(args, fmt); 224 error = kobject_set_name_vargs(kobj, fmt, args); 225 va_end(args); 226 227 return (error); 228 } 229 230 static int 231 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 232 { 233 const struct kobj_type *t; 234 int error; 235 236 kobj->parent = parent; 237 error = sysfs_create_dir(kobj); 238 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 239 struct attribute **attr; 240 t = kobj->ktype; 241 242 for (attr = t->default_attrs; *attr != NULL; attr++) { 243 error = sysfs_create_file(kobj, *attr); 244 if (error) 245 break; 246 } 247 if (error) 248 sysfs_remove_dir(kobj); 249 } 250 return (error); 251 } 252 253 int 254 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 255 { 256 va_list args; 257 int error; 258 259 va_start(args, fmt); 260 error = kobject_set_name_vargs(kobj, fmt, args); 261 va_end(args); 262 if (error) 263 return (error); 264 265 return kobject_add_complete(kobj, parent); 266 } 267 268 void 269 linux_kobject_release(struct kref *kref) 270 { 271 struct kobject *kobj; 272 char *name; 273 274 kobj = container_of(kref, struct kobject, kref); 275 sysfs_remove_dir(kobj); 276 name = kobj->name; 277 if (kobj->ktype && kobj->ktype->release) 278 kobj->ktype->release(kobj); 279 kfree(name); 280 } 281 282 static void 283 linux_kobject_kfree(struct kobject *kobj) 284 { 285 kfree(kobj); 286 } 287 288 static void 289 linux_kobject_kfree_name(struct kobject *kobj) 290 { 291 if (kobj) { 292 kfree(kobj->name); 293 } 294 } 295 296 const struct kobj_type linux_kfree_type = { 297 .release = linux_kobject_kfree 298 }; 299 300 static ssize_t 301 lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) 302 { 303 struct kobj_attribute *ka = 304 container_of(attr, struct kobj_attribute, attr); 305 306 if (ka->show == NULL) 307 return (-EIO); 308 309 return (ka->show(kobj, ka, buf)); 310 } 311 312 static ssize_t 313 lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr, 314 const char *buf, size_t count) 315 { 316 struct kobj_attribute *ka = 317 container_of(attr, struct kobj_attribute, attr); 318 319 if (ka->store == NULL) 320 return (-EIO); 321 322 return (ka->store(kobj, ka, buf, count)); 323 } 324 325 const struct sysfs_ops kobj_sysfs_ops = { 326 .show = lkpi_kobj_attr_show, 327 .store = lkpi_kobj_attr_store, 328 }; 329 330 static void 331 linux_device_release(struct device *dev) 332 { 333 pr_debug("linux_device_release: %s\n", dev_name(dev)); 334 kfree(dev); 335 } 336 337 static ssize_t 338 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 339 { 340 struct class_attribute *dattr; 341 ssize_t error; 342 343 dattr = container_of(attr, struct class_attribute, attr); 344 error = -EIO; 345 if (dattr->show) 346 error = dattr->show(container_of(kobj, struct class, kobj), 347 dattr, buf); 348 return (error); 349 } 350 351 static ssize_t 352 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 353 size_t count) 354 { 355 struct class_attribute *dattr; 356 ssize_t error; 357 358 dattr = container_of(attr, struct class_attribute, attr); 359 error = -EIO; 360 if (dattr->store) 361 error = dattr->store(container_of(kobj, struct class, kobj), 362 dattr, buf, count); 363 return (error); 364 } 365 366 static void 367 linux_class_release(struct kobject *kobj) 368 { 369 struct class *class; 370 371 class = container_of(kobj, struct class, kobj); 372 if (class->class_release) 373 class->class_release(class); 374 } 375 376 static const struct sysfs_ops linux_class_sysfs = { 377 .show = linux_class_show, 378 .store = linux_class_store, 379 }; 380 381 const struct kobj_type linux_class_ktype = { 382 .release = linux_class_release, 383 .sysfs_ops = &linux_class_sysfs 384 }; 385 386 static void 387 linux_dev_release(struct kobject *kobj) 388 { 389 struct device *dev; 390 391 dev = container_of(kobj, struct device, kobj); 392 /* This is the precedence defined by linux. */ 393 if (dev->release) 394 dev->release(dev); 395 else if (dev->class && dev->class->dev_release) 396 dev->class->dev_release(dev); 397 } 398 399 static ssize_t 400 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 401 { 402 struct device_attribute *dattr; 403 ssize_t error; 404 405 dattr = container_of(attr, struct device_attribute, attr); 406 error = -EIO; 407 if (dattr->show) 408 error = dattr->show(container_of(kobj, struct device, kobj), 409 dattr, buf); 410 return (error); 411 } 412 413 static ssize_t 414 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 415 size_t count) 416 { 417 struct device_attribute *dattr; 418 ssize_t error; 419 420 dattr = container_of(attr, struct device_attribute, attr); 421 error = -EIO; 422 if (dattr->store) 423 error = dattr->store(container_of(kobj, struct device, kobj), 424 dattr, buf, count); 425 return (error); 426 } 427 428 static const struct sysfs_ops linux_dev_sysfs = { 429 .show = linux_dev_show, 430 .store = linux_dev_store, 431 }; 432 433 const struct kobj_type linux_dev_ktype = { 434 .release = linux_dev_release, 435 .sysfs_ops = &linux_dev_sysfs 436 }; 437 438 struct device * 439 device_create(struct class *class, struct device *parent, dev_t devt, 440 void *drvdata, const char *fmt, ...) 441 { 442 struct device *dev; 443 va_list args; 444 445 dev = kzalloc(sizeof(*dev), M_WAITOK); 446 dev->parent = parent; 447 dev->class = class; 448 dev->devt = devt; 449 dev->driver_data = drvdata; 450 dev->release = linux_device_release; 451 va_start(args, fmt); 452 kobject_set_name_vargs(&dev->kobj, fmt, args); 453 va_end(args); 454 device_register(dev); 455 456 return (dev); 457 } 458 459 struct device * 460 device_create_groups_vargs(struct class *class, struct device *parent, 461 dev_t devt, void *drvdata, const struct attribute_group **groups, 462 const char *fmt, va_list args) 463 { 464 struct device *dev = NULL; 465 int retval = -ENODEV; 466 467 if (class == NULL || IS_ERR(class)) 468 goto error; 469 470 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 471 if (!dev) { 472 retval = -ENOMEM; 473 goto error; 474 } 475 476 dev->devt = devt; 477 dev->class = class; 478 dev->parent = parent; 479 dev->groups = groups; 480 dev->release = device_create_release; 481 /* device_initialize() needs the class and parent to be set */ 482 device_initialize(dev); 483 dev_set_drvdata(dev, drvdata); 484 485 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 486 if (retval) 487 goto error; 488 489 retval = device_add(dev); 490 if (retval) 491 goto error; 492 493 return dev; 494 495 error: 496 put_device(dev); 497 return ERR_PTR(retval); 498 } 499 500 struct class * 501 class_create(struct module *owner, const char *name) 502 { 503 struct class *class; 504 int error; 505 506 class = kzalloc(sizeof(*class), M_WAITOK); 507 class->owner = owner; 508 class->name = name; 509 class->class_release = linux_class_kfree; 510 error = class_register(class); 511 if (error) { 512 kfree(class); 513 return (NULL); 514 } 515 516 return (class); 517 } 518 519 int 520 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 521 struct kobject *parent, const char *fmt, ...) 522 { 523 va_list args; 524 int error; 525 526 kobject_init(kobj, ktype); 527 kobj->ktype = ktype; 528 kobj->parent = parent; 529 kobj->name = NULL; 530 531 va_start(args, fmt); 532 error = kobject_set_name_vargs(kobj, fmt, args); 533 va_end(args); 534 if (error) 535 return (error); 536 return kobject_add_complete(kobj, parent); 537 } 538 539 static void 540 linux_kq_lock(void *arg) 541 { 542 spinlock_t *s = arg; 543 544 spin_lock(s); 545 } 546 static void 547 linux_kq_unlock(void *arg) 548 { 549 spinlock_t *s = arg; 550 551 spin_unlock(s); 552 } 553 554 static void 555 linux_kq_assert_lock(void *arg, int what) 556 { 557 #ifdef INVARIANTS 558 spinlock_t *s = arg; 559 560 if (what == LA_LOCKED) 561 mtx_assert(&s->m, MA_OWNED); 562 else 563 mtx_assert(&s->m, MA_NOTOWNED); 564 #endif 565 } 566 567 static void 568 linux_file_kqfilter_poll(struct linux_file *, int); 569 570 struct linux_file * 571 linux_file_alloc(void) 572 { 573 struct linux_file *filp; 574 575 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 576 577 /* set initial refcount */ 578 filp->f_count = 1; 579 580 /* setup fields needed by kqueue support */ 581 spin_lock_init(&filp->f_kqlock); 582 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 583 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 584 585 return (filp); 586 } 587 588 void 589 linux_file_free(struct linux_file *filp) 590 { 591 if (filp->_file == NULL) { 592 if (filp->f_op != NULL && filp->f_op->release != NULL) 593 filp->f_op->release(filp->f_vnode, filp); 594 if (filp->f_shmem != NULL) 595 vm_object_deallocate(filp->f_shmem); 596 kfree_rcu(filp, rcu); 597 } else { 598 /* 599 * The close method of the character device or file 600 * will free the linux_file structure: 601 */ 602 _fdrop(filp->_file, curthread); 603 } 604 } 605 606 struct linux_cdev * 607 cdev_alloc(void) 608 { 609 struct linux_cdev *cdev; 610 611 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); 612 kobject_init(&cdev->kobj, &linux_cdev_ktype); 613 cdev->refs = 1; 614 return (cdev); 615 } 616 617 static int 618 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 619 vm_page_t *mres) 620 { 621 struct vm_area_struct *vmap; 622 623 vmap = linux_cdev_handle_find(vm_obj->handle); 624 625 MPASS(vmap != NULL); 626 MPASS(vmap->vm_private_data == vm_obj->handle); 627 628 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 629 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 630 vm_page_t page; 631 632 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 633 /* 634 * If the passed in result page is a fake 635 * page, update it with the new physical 636 * address. 637 */ 638 page = *mres; 639 vm_page_updatefake(page, paddr, vm_obj->memattr); 640 } else { 641 /* 642 * Replace the passed in "mres" page with our 643 * own fake page and free up the all of the 644 * original pages. 645 */ 646 VM_OBJECT_WUNLOCK(vm_obj); 647 page = vm_page_getfake(paddr, vm_obj->memattr); 648 VM_OBJECT_WLOCK(vm_obj); 649 650 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 651 *mres = page; 652 } 653 vm_page_valid(page); 654 return (VM_PAGER_OK); 655 } 656 return (VM_PAGER_FAIL); 657 } 658 659 static int 660 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 661 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 662 { 663 struct vm_area_struct *vmap; 664 int err; 665 666 /* get VM area structure */ 667 vmap = linux_cdev_handle_find(vm_obj->handle); 668 MPASS(vmap != NULL); 669 MPASS(vmap->vm_private_data == vm_obj->handle); 670 671 VM_OBJECT_WUNLOCK(vm_obj); 672 673 linux_set_current(curthread); 674 675 down_write(&vmap->vm_mm->mmap_sem); 676 if (unlikely(vmap->vm_ops == NULL)) { 677 err = VM_FAULT_SIGBUS; 678 } else { 679 struct vm_fault vmf; 680 681 /* fill out VM fault structure */ 682 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 683 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 684 vmf.pgoff = 0; 685 vmf.page = NULL; 686 vmf.vma = vmap; 687 688 vmap->vm_pfn_count = 0; 689 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 690 vmap->vm_obj = vm_obj; 691 692 err = vmap->vm_ops->fault(&vmf); 693 694 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 695 kern_yield(PRI_USER); 696 err = vmap->vm_ops->fault(&vmf); 697 } 698 } 699 700 /* translate return code */ 701 switch (err) { 702 case VM_FAULT_OOM: 703 err = VM_PAGER_AGAIN; 704 break; 705 case VM_FAULT_SIGBUS: 706 err = VM_PAGER_BAD; 707 break; 708 case VM_FAULT_NOPAGE: 709 /* 710 * By contract the fault handler will return having 711 * busied all the pages itself. If pidx is already 712 * found in the object, it will simply xbusy the first 713 * page and return with vm_pfn_count set to 1. 714 */ 715 *first = vmap->vm_pfn_first; 716 *last = *first + vmap->vm_pfn_count - 1; 717 err = VM_PAGER_OK; 718 break; 719 default: 720 err = VM_PAGER_ERROR; 721 break; 722 } 723 up_write(&vmap->vm_mm->mmap_sem); 724 VM_OBJECT_WLOCK(vm_obj); 725 return (err); 726 } 727 728 static struct rwlock linux_vma_lock; 729 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 730 TAILQ_HEAD_INITIALIZER(linux_vma_head); 731 732 static void 733 linux_cdev_handle_free(struct vm_area_struct *vmap) 734 { 735 /* Drop reference on vm_file */ 736 if (vmap->vm_file != NULL) 737 fput(vmap->vm_file); 738 739 /* Drop reference on mm_struct */ 740 mmput(vmap->vm_mm); 741 742 kfree(vmap); 743 } 744 745 static void 746 linux_cdev_handle_remove(struct vm_area_struct *vmap) 747 { 748 rw_wlock(&linux_vma_lock); 749 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 750 rw_wunlock(&linux_vma_lock); 751 } 752 753 static struct vm_area_struct * 754 linux_cdev_handle_find(void *handle) 755 { 756 struct vm_area_struct *vmap; 757 758 rw_rlock(&linux_vma_lock); 759 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 760 if (vmap->vm_private_data == handle) 761 break; 762 } 763 rw_runlock(&linux_vma_lock); 764 return (vmap); 765 } 766 767 static int 768 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 769 vm_ooffset_t foff, struct ucred *cred, u_short *color) 770 { 771 772 MPASS(linux_cdev_handle_find(handle) != NULL); 773 *color = 0; 774 return (0); 775 } 776 777 static void 778 linux_cdev_pager_dtor(void *handle) 779 { 780 const struct vm_operations_struct *vm_ops; 781 struct vm_area_struct *vmap; 782 783 vmap = linux_cdev_handle_find(handle); 784 MPASS(vmap != NULL); 785 786 /* 787 * Remove handle before calling close operation to prevent 788 * other threads from reusing the handle pointer. 789 */ 790 linux_cdev_handle_remove(vmap); 791 792 down_write(&vmap->vm_mm->mmap_sem); 793 vm_ops = vmap->vm_ops; 794 if (likely(vm_ops != NULL)) 795 vm_ops->close(vmap); 796 up_write(&vmap->vm_mm->mmap_sem); 797 798 linux_cdev_handle_free(vmap); 799 } 800 801 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 802 { 803 /* OBJT_MGTDEVICE */ 804 .cdev_pg_populate = linux_cdev_pager_populate, 805 .cdev_pg_ctor = linux_cdev_pager_ctor, 806 .cdev_pg_dtor = linux_cdev_pager_dtor 807 }, 808 { 809 /* OBJT_DEVICE */ 810 .cdev_pg_fault = linux_cdev_pager_fault, 811 .cdev_pg_ctor = linux_cdev_pager_ctor, 812 .cdev_pg_dtor = linux_cdev_pager_dtor 813 }, 814 }; 815 816 int 817 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 818 unsigned long size) 819 { 820 vm_object_t obj; 821 vm_page_t m; 822 823 obj = vma->vm_obj; 824 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 825 return (-ENOTSUP); 826 VM_OBJECT_RLOCK(obj); 827 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 828 m != NULL && m->pindex < OFF_TO_IDX(address + size); 829 m = TAILQ_NEXT(m, listq)) 830 pmap_remove_all(m); 831 VM_OBJECT_RUNLOCK(obj); 832 return (0); 833 } 834 835 static struct file_operations dummy_ldev_ops = { 836 /* XXXKIB */ 837 }; 838 839 static struct linux_cdev dummy_ldev = { 840 .ops = &dummy_ldev_ops, 841 }; 842 843 #define LDEV_SI_DTR 0x0001 844 #define LDEV_SI_REF 0x0002 845 846 static void 847 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 848 struct linux_cdev **dev) 849 { 850 struct linux_cdev *ldev; 851 u_int siref; 852 853 ldev = filp->f_cdev; 854 *fop = filp->f_op; 855 if (ldev != NULL) { 856 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 857 refcount_acquire(&ldev->refs); 858 } else { 859 for (siref = ldev->siref;;) { 860 if ((siref & LDEV_SI_DTR) != 0) { 861 ldev = &dummy_ldev; 862 *fop = ldev->ops; 863 siref = ldev->siref; 864 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 865 } else if (atomic_fcmpset_int(&ldev->siref, 866 &siref, siref + LDEV_SI_REF)) { 867 break; 868 } 869 } 870 } 871 } 872 *dev = ldev; 873 } 874 875 static void 876 linux_drop_fop(struct linux_cdev *ldev) 877 { 878 879 if (ldev == NULL) 880 return; 881 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 882 linux_cdev_deref(ldev); 883 } else { 884 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 885 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 886 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 887 } 888 } 889 890 #define OPW(fp,td,code) ({ \ 891 struct file *__fpop; \ 892 __typeof(code) __retval; \ 893 \ 894 __fpop = (td)->td_fpop; \ 895 (td)->td_fpop = (fp); \ 896 __retval = (code); \ 897 (td)->td_fpop = __fpop; \ 898 __retval; \ 899 }) 900 901 static int 902 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 903 struct file *file) 904 { 905 struct linux_cdev *ldev; 906 struct linux_file *filp; 907 const struct file_operations *fop; 908 int error; 909 910 ldev = dev->si_drv1; 911 912 filp = linux_file_alloc(); 913 filp->f_dentry = &filp->f_dentry_store; 914 filp->f_op = ldev->ops; 915 filp->f_mode = file->f_flag; 916 filp->f_flags = file->f_flag; 917 filp->f_vnode = file->f_vnode; 918 filp->_file = file; 919 refcount_acquire(&ldev->refs); 920 filp->f_cdev = ldev; 921 922 linux_set_current(td); 923 linux_get_fop(filp, &fop, &ldev); 924 925 if (fop->open != NULL) { 926 error = -fop->open(file->f_vnode, filp); 927 if (error != 0) { 928 linux_drop_fop(ldev); 929 linux_cdev_deref(filp->f_cdev); 930 kfree(filp); 931 return (error); 932 } 933 } 934 935 /* hold on to the vnode - used for fstat() */ 936 vhold(filp->f_vnode); 937 938 /* release the file from devfs */ 939 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 940 linux_drop_fop(ldev); 941 return (ENXIO); 942 } 943 944 #define LINUX_IOCTL_MIN_PTR 0x10000UL 945 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 946 947 static inline int 948 linux_remap_address(void **uaddr, size_t len) 949 { 950 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 951 952 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 953 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 954 struct task_struct *pts = current; 955 if (pts == NULL) { 956 *uaddr = NULL; 957 return (1); 958 } 959 960 /* compute data offset */ 961 uaddr_val -= LINUX_IOCTL_MIN_PTR; 962 963 /* check that length is within bounds */ 964 if ((len > IOCPARM_MAX) || 965 (uaddr_val + len) > pts->bsd_ioctl_len) { 966 *uaddr = NULL; 967 return (1); 968 } 969 970 /* re-add kernel buffer address */ 971 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 972 973 /* update address location */ 974 *uaddr = (void *)uaddr_val; 975 return (1); 976 } 977 return (0); 978 } 979 980 int 981 linux_copyin(const void *uaddr, void *kaddr, size_t len) 982 { 983 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 984 if (uaddr == NULL) 985 return (-EFAULT); 986 memcpy(kaddr, uaddr, len); 987 return (0); 988 } 989 return (-copyin(uaddr, kaddr, len)); 990 } 991 992 int 993 linux_copyout(const void *kaddr, void *uaddr, size_t len) 994 { 995 if (linux_remap_address(&uaddr, len)) { 996 if (uaddr == NULL) 997 return (-EFAULT); 998 memcpy(uaddr, kaddr, len); 999 return (0); 1000 } 1001 return (-copyout(kaddr, uaddr, len)); 1002 } 1003 1004 size_t 1005 linux_clear_user(void *_uaddr, size_t _len) 1006 { 1007 uint8_t *uaddr = _uaddr; 1008 size_t len = _len; 1009 1010 /* make sure uaddr is aligned before going into the fast loop */ 1011 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 1012 if (subyte(uaddr, 0)) 1013 return (_len); 1014 uaddr++; 1015 len--; 1016 } 1017 1018 /* zero 8 bytes at a time */ 1019 while (len > 7) { 1020 #ifdef __LP64__ 1021 if (suword64(uaddr, 0)) 1022 return (_len); 1023 #else 1024 if (suword32(uaddr, 0)) 1025 return (_len); 1026 if (suword32(uaddr + 4, 0)) 1027 return (_len); 1028 #endif 1029 uaddr += 8; 1030 len -= 8; 1031 } 1032 1033 /* zero fill end, if any */ 1034 while (len > 0) { 1035 if (subyte(uaddr, 0)) 1036 return (_len); 1037 uaddr++; 1038 len--; 1039 } 1040 return (0); 1041 } 1042 1043 int 1044 linux_access_ok(const void *uaddr, size_t len) 1045 { 1046 uintptr_t saddr; 1047 uintptr_t eaddr; 1048 1049 /* get start and end address */ 1050 saddr = (uintptr_t)uaddr; 1051 eaddr = (uintptr_t)uaddr + len; 1052 1053 /* verify addresses are valid for userspace */ 1054 return ((saddr == eaddr) || 1055 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 1056 } 1057 1058 /* 1059 * This function should return either EINTR or ERESTART depending on 1060 * the signal type sent to this thread: 1061 */ 1062 static int 1063 linux_get_error(struct task_struct *task, int error) 1064 { 1065 /* check for signal type interrupt code */ 1066 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 1067 error = -linux_schedule_get_interrupt_value(task); 1068 if (error == 0) 1069 error = EINTR; 1070 } 1071 return (error); 1072 } 1073 1074 static int 1075 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 1076 const struct file_operations *fop, u_long cmd, caddr_t data, 1077 struct thread *td) 1078 { 1079 struct task_struct *task = current; 1080 unsigned size; 1081 int error; 1082 1083 size = IOCPARM_LEN(cmd); 1084 /* refer to logic in sys_ioctl() */ 1085 if (size > 0) { 1086 /* 1087 * Setup hint for linux_copyin() and linux_copyout(). 1088 * 1089 * Background: Linux code expects a user-space address 1090 * while FreeBSD supplies a kernel-space address. 1091 */ 1092 task->bsd_ioctl_data = data; 1093 task->bsd_ioctl_len = size; 1094 data = (void *)LINUX_IOCTL_MIN_PTR; 1095 } else { 1096 /* fetch user-space pointer */ 1097 data = *(void **)data; 1098 } 1099 #ifdef COMPAT_FREEBSD32 1100 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 1101 /* try the compat IOCTL handler first */ 1102 if (fop->compat_ioctl != NULL) { 1103 error = -OPW(fp, td, fop->compat_ioctl(filp, 1104 cmd, (u_long)data)); 1105 } else { 1106 error = ENOTTY; 1107 } 1108 1109 /* fallback to the regular IOCTL handler, if any */ 1110 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 1111 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1112 cmd, (u_long)data)); 1113 } 1114 } else 1115 #endif 1116 { 1117 if (fop->unlocked_ioctl != NULL) { 1118 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1119 cmd, (u_long)data)); 1120 } else { 1121 error = ENOTTY; 1122 } 1123 } 1124 if (size > 0) { 1125 task->bsd_ioctl_data = NULL; 1126 task->bsd_ioctl_len = 0; 1127 } 1128 1129 if (error == EWOULDBLOCK) { 1130 /* update kqfilter status, if any */ 1131 linux_file_kqfilter_poll(filp, 1132 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1133 } else { 1134 error = linux_get_error(task, error); 1135 } 1136 return (error); 1137 } 1138 1139 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1140 1141 /* 1142 * This function atomically updates the poll wakeup state and returns 1143 * the previous state at the time of update. 1144 */ 1145 static uint8_t 1146 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1147 { 1148 int c, old; 1149 1150 c = v->counter; 1151 1152 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1153 c = old; 1154 1155 return (c); 1156 } 1157 1158 static int 1159 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1160 { 1161 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1162 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1163 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1164 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1165 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1166 }; 1167 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1168 1169 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1170 case LINUX_FWQ_STATE_QUEUED: 1171 linux_poll_wakeup(filp); 1172 return (1); 1173 default: 1174 return (0); 1175 } 1176 } 1177 1178 void 1179 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1180 { 1181 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1182 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1183 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1184 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1185 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1186 }; 1187 1188 /* check if we are called inside the select system call */ 1189 if (p == LINUX_POLL_TABLE_NORMAL) 1190 selrecord(curthread, &filp->f_selinfo); 1191 1192 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1193 case LINUX_FWQ_STATE_INIT: 1194 /* NOTE: file handles can only belong to one wait-queue */ 1195 filp->f_wait_queue.wqh = wqh; 1196 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1197 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1198 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1199 break; 1200 default: 1201 break; 1202 } 1203 } 1204 1205 static void 1206 linux_poll_wait_dequeue(struct linux_file *filp) 1207 { 1208 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1209 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1210 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1211 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1212 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1213 }; 1214 1215 seldrain(&filp->f_selinfo); 1216 1217 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1218 case LINUX_FWQ_STATE_NOT_READY: 1219 case LINUX_FWQ_STATE_QUEUED: 1220 case LINUX_FWQ_STATE_READY: 1221 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1222 break; 1223 default: 1224 break; 1225 } 1226 } 1227 1228 void 1229 linux_poll_wakeup(struct linux_file *filp) 1230 { 1231 /* this function should be NULL-safe */ 1232 if (filp == NULL) 1233 return; 1234 1235 selwakeup(&filp->f_selinfo); 1236 1237 spin_lock(&filp->f_kqlock); 1238 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1239 LINUX_KQ_FLAG_NEED_WRITE; 1240 1241 /* make sure the "knote" gets woken up */ 1242 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1243 spin_unlock(&filp->f_kqlock); 1244 } 1245 1246 static void 1247 linux_file_kqfilter_detach(struct knote *kn) 1248 { 1249 struct linux_file *filp = kn->kn_hook; 1250 1251 spin_lock(&filp->f_kqlock); 1252 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1253 spin_unlock(&filp->f_kqlock); 1254 } 1255 1256 static int 1257 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1258 { 1259 struct linux_file *filp = kn->kn_hook; 1260 1261 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1262 1263 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1264 } 1265 1266 static int 1267 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1268 { 1269 struct linux_file *filp = kn->kn_hook; 1270 1271 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1272 1273 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1274 } 1275 1276 static struct filterops linux_dev_kqfiltops_read = { 1277 .f_isfd = 1, 1278 .f_detach = linux_file_kqfilter_detach, 1279 .f_event = linux_file_kqfilter_read_event, 1280 }; 1281 1282 static struct filterops linux_dev_kqfiltops_write = { 1283 .f_isfd = 1, 1284 .f_detach = linux_file_kqfilter_detach, 1285 .f_event = linux_file_kqfilter_write_event, 1286 }; 1287 1288 static void 1289 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1290 { 1291 struct thread *td; 1292 const struct file_operations *fop; 1293 struct linux_cdev *ldev; 1294 int temp; 1295 1296 if ((filp->f_kqflags & kqflags) == 0) 1297 return; 1298 1299 td = curthread; 1300 1301 linux_get_fop(filp, &fop, &ldev); 1302 /* get the latest polling state */ 1303 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1304 linux_drop_fop(ldev); 1305 1306 spin_lock(&filp->f_kqlock); 1307 /* clear kqflags */ 1308 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1309 LINUX_KQ_FLAG_NEED_WRITE); 1310 /* update kqflags */ 1311 if ((temp & (POLLIN | POLLOUT)) != 0) { 1312 if ((temp & POLLIN) != 0) 1313 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1314 if ((temp & POLLOUT) != 0) 1315 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1316 1317 /* make sure the "knote" gets woken up */ 1318 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1319 } 1320 spin_unlock(&filp->f_kqlock); 1321 } 1322 1323 static int 1324 linux_file_kqfilter(struct file *file, struct knote *kn) 1325 { 1326 struct linux_file *filp; 1327 struct thread *td; 1328 int error; 1329 1330 td = curthread; 1331 filp = (struct linux_file *)file->f_data; 1332 filp->f_flags = file->f_flag; 1333 if (filp->f_op->poll == NULL) 1334 return (EINVAL); 1335 1336 spin_lock(&filp->f_kqlock); 1337 switch (kn->kn_filter) { 1338 case EVFILT_READ: 1339 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1340 kn->kn_fop = &linux_dev_kqfiltops_read; 1341 kn->kn_hook = filp; 1342 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1343 error = 0; 1344 break; 1345 case EVFILT_WRITE: 1346 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1347 kn->kn_fop = &linux_dev_kqfiltops_write; 1348 kn->kn_hook = filp; 1349 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1350 error = 0; 1351 break; 1352 default: 1353 error = EINVAL; 1354 break; 1355 } 1356 spin_unlock(&filp->f_kqlock); 1357 1358 if (error == 0) { 1359 linux_set_current(td); 1360 1361 /* update kqfilter status, if any */ 1362 linux_file_kqfilter_poll(filp, 1363 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1364 } 1365 return (error); 1366 } 1367 1368 static int 1369 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1370 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1371 int nprot, bool is_shared, struct thread *td) 1372 { 1373 struct task_struct *task; 1374 struct vm_area_struct *vmap; 1375 struct mm_struct *mm; 1376 struct linux_file *filp; 1377 vm_memattr_t attr; 1378 int error; 1379 1380 filp = (struct linux_file *)fp->f_data; 1381 filp->f_flags = fp->f_flag; 1382 1383 if (fop->mmap == NULL) 1384 return (EOPNOTSUPP); 1385 1386 linux_set_current(td); 1387 1388 /* 1389 * The same VM object might be shared by multiple processes 1390 * and the mm_struct is usually freed when a process exits. 1391 * 1392 * The atomic reference below makes sure the mm_struct is 1393 * available as long as the vmap is in the linux_vma_head. 1394 */ 1395 task = current; 1396 mm = task->mm; 1397 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1398 return (EINVAL); 1399 1400 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1401 vmap->vm_start = 0; 1402 vmap->vm_end = size; 1403 vmap->vm_pgoff = *offset / PAGE_SIZE; 1404 vmap->vm_pfn = 0; 1405 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1406 if (is_shared) 1407 vmap->vm_flags |= VM_SHARED; 1408 vmap->vm_ops = NULL; 1409 vmap->vm_file = get_file(filp); 1410 vmap->vm_mm = mm; 1411 1412 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1413 error = linux_get_error(task, EINTR); 1414 } else { 1415 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1416 error = linux_get_error(task, error); 1417 up_write(&vmap->vm_mm->mmap_sem); 1418 } 1419 1420 if (error != 0) { 1421 linux_cdev_handle_free(vmap); 1422 return (error); 1423 } 1424 1425 attr = pgprot2cachemode(vmap->vm_page_prot); 1426 1427 if (vmap->vm_ops != NULL) { 1428 struct vm_area_struct *ptr; 1429 void *vm_private_data; 1430 bool vm_no_fault; 1431 1432 if (vmap->vm_ops->open == NULL || 1433 vmap->vm_ops->close == NULL || 1434 vmap->vm_private_data == NULL) { 1435 /* free allocated VM area struct */ 1436 linux_cdev_handle_free(vmap); 1437 return (EINVAL); 1438 } 1439 1440 vm_private_data = vmap->vm_private_data; 1441 1442 rw_wlock(&linux_vma_lock); 1443 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1444 if (ptr->vm_private_data == vm_private_data) 1445 break; 1446 } 1447 /* check if there is an existing VM area struct */ 1448 if (ptr != NULL) { 1449 /* check if the VM area structure is invalid */ 1450 if (ptr->vm_ops == NULL || 1451 ptr->vm_ops->open == NULL || 1452 ptr->vm_ops->close == NULL) { 1453 error = ESTALE; 1454 vm_no_fault = 1; 1455 } else { 1456 error = EEXIST; 1457 vm_no_fault = (ptr->vm_ops->fault == NULL); 1458 } 1459 } else { 1460 /* insert VM area structure into list */ 1461 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1462 error = 0; 1463 vm_no_fault = (vmap->vm_ops->fault == NULL); 1464 } 1465 rw_wunlock(&linux_vma_lock); 1466 1467 if (error != 0) { 1468 /* free allocated VM area struct */ 1469 linux_cdev_handle_free(vmap); 1470 /* check for stale VM area struct */ 1471 if (error != EEXIST) 1472 return (error); 1473 } 1474 1475 /* check if there is no fault handler */ 1476 if (vm_no_fault) { 1477 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1478 &linux_cdev_pager_ops[1], size, nprot, *offset, 1479 td->td_ucred); 1480 } else { 1481 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1482 &linux_cdev_pager_ops[0], size, nprot, *offset, 1483 td->td_ucred); 1484 } 1485 1486 /* check if allocating the VM object failed */ 1487 if (*object == NULL) { 1488 if (error == 0) { 1489 /* remove VM area struct from list */ 1490 linux_cdev_handle_remove(vmap); 1491 /* free allocated VM area struct */ 1492 linux_cdev_handle_free(vmap); 1493 } 1494 return (EINVAL); 1495 } 1496 } else { 1497 struct sglist *sg; 1498 1499 sg = sglist_alloc(1, M_WAITOK); 1500 sglist_append_phys(sg, 1501 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1502 1503 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1504 nprot, 0, td->td_ucred); 1505 1506 linux_cdev_handle_free(vmap); 1507 1508 if (*object == NULL) { 1509 sglist_free(sg); 1510 return (EINVAL); 1511 } 1512 } 1513 1514 if (attr != VM_MEMATTR_DEFAULT) { 1515 VM_OBJECT_WLOCK(*object); 1516 vm_object_set_memattr(*object, attr); 1517 VM_OBJECT_WUNLOCK(*object); 1518 } 1519 *offset = 0; 1520 return (0); 1521 } 1522 1523 struct cdevsw linuxcdevsw = { 1524 .d_version = D_VERSION, 1525 .d_fdopen = linux_dev_fdopen, 1526 .d_name = "lkpidev", 1527 }; 1528 1529 static int 1530 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1531 int flags, struct thread *td) 1532 { 1533 struct linux_file *filp; 1534 const struct file_operations *fop; 1535 struct linux_cdev *ldev; 1536 ssize_t bytes; 1537 int error; 1538 1539 error = 0; 1540 filp = (struct linux_file *)file->f_data; 1541 filp->f_flags = file->f_flag; 1542 /* XXX no support for I/O vectors currently */ 1543 if (uio->uio_iovcnt != 1) 1544 return (EOPNOTSUPP); 1545 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1546 return (EINVAL); 1547 linux_set_current(td); 1548 linux_get_fop(filp, &fop, &ldev); 1549 if (fop->read != NULL) { 1550 bytes = OPW(file, td, fop->read(filp, 1551 uio->uio_iov->iov_base, 1552 uio->uio_iov->iov_len, &uio->uio_offset)); 1553 if (bytes >= 0) { 1554 uio->uio_iov->iov_base = 1555 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1556 uio->uio_iov->iov_len -= bytes; 1557 uio->uio_resid -= bytes; 1558 } else { 1559 error = linux_get_error(current, -bytes); 1560 } 1561 } else 1562 error = ENXIO; 1563 1564 /* update kqfilter status, if any */ 1565 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1566 linux_drop_fop(ldev); 1567 1568 return (error); 1569 } 1570 1571 static int 1572 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1573 int flags, struct thread *td) 1574 { 1575 struct linux_file *filp; 1576 const struct file_operations *fop; 1577 struct linux_cdev *ldev; 1578 ssize_t bytes; 1579 int error; 1580 1581 filp = (struct linux_file *)file->f_data; 1582 filp->f_flags = file->f_flag; 1583 /* XXX no support for I/O vectors currently */ 1584 if (uio->uio_iovcnt != 1) 1585 return (EOPNOTSUPP); 1586 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1587 return (EINVAL); 1588 linux_set_current(td); 1589 linux_get_fop(filp, &fop, &ldev); 1590 if (fop->write != NULL) { 1591 bytes = OPW(file, td, fop->write(filp, 1592 uio->uio_iov->iov_base, 1593 uio->uio_iov->iov_len, &uio->uio_offset)); 1594 if (bytes >= 0) { 1595 uio->uio_iov->iov_base = 1596 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1597 uio->uio_iov->iov_len -= bytes; 1598 uio->uio_resid -= bytes; 1599 error = 0; 1600 } else { 1601 error = linux_get_error(current, -bytes); 1602 } 1603 } else 1604 error = ENXIO; 1605 1606 /* update kqfilter status, if any */ 1607 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1608 1609 linux_drop_fop(ldev); 1610 1611 return (error); 1612 } 1613 1614 static int 1615 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1616 struct thread *td) 1617 { 1618 struct linux_file *filp; 1619 const struct file_operations *fop; 1620 struct linux_cdev *ldev; 1621 int revents; 1622 1623 filp = (struct linux_file *)file->f_data; 1624 filp->f_flags = file->f_flag; 1625 linux_set_current(td); 1626 linux_get_fop(filp, &fop, &ldev); 1627 if (fop->poll != NULL) { 1628 revents = OPW(file, td, fop->poll(filp, 1629 LINUX_POLL_TABLE_NORMAL)) & events; 1630 } else { 1631 revents = 0; 1632 } 1633 linux_drop_fop(ldev); 1634 return (revents); 1635 } 1636 1637 static int 1638 linux_file_close(struct file *file, struct thread *td) 1639 { 1640 struct linux_file *filp; 1641 int (*release)(struct inode *, struct linux_file *); 1642 const struct file_operations *fop; 1643 struct linux_cdev *ldev; 1644 int error; 1645 1646 filp = (struct linux_file *)file->f_data; 1647 1648 KASSERT(file_count(filp) == 0, 1649 ("File refcount(%d) is not zero", file_count(filp))); 1650 1651 if (td == NULL) 1652 td = curthread; 1653 1654 error = 0; 1655 filp->f_flags = file->f_flag; 1656 linux_set_current(td); 1657 linux_poll_wait_dequeue(filp); 1658 linux_get_fop(filp, &fop, &ldev); 1659 /* 1660 * Always use the real release function, if any, to avoid 1661 * leaking device resources: 1662 */ 1663 release = filp->f_op->release; 1664 if (release != NULL) 1665 error = -OPW(file, td, release(filp->f_vnode, filp)); 1666 funsetown(&filp->f_sigio); 1667 if (filp->f_vnode != NULL) 1668 vdrop(filp->f_vnode); 1669 linux_drop_fop(ldev); 1670 ldev = filp->f_cdev; 1671 if (ldev != NULL) 1672 linux_cdev_deref(ldev); 1673 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1674 kfree(filp); 1675 1676 return (error); 1677 } 1678 1679 static int 1680 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1681 struct thread *td) 1682 { 1683 struct linux_file *filp; 1684 const struct file_operations *fop; 1685 struct linux_cdev *ldev; 1686 struct fiodgname_arg *fgn; 1687 const char *p; 1688 int error, i; 1689 1690 error = 0; 1691 filp = (struct linux_file *)fp->f_data; 1692 filp->f_flags = fp->f_flag; 1693 linux_get_fop(filp, &fop, &ldev); 1694 1695 linux_set_current(td); 1696 switch (cmd) { 1697 case FIONBIO: 1698 break; 1699 case FIOASYNC: 1700 if (fop->fasync == NULL) 1701 break; 1702 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1703 break; 1704 case FIOSETOWN: 1705 error = fsetown(*(int *)data, &filp->f_sigio); 1706 if (error == 0) { 1707 if (fop->fasync == NULL) 1708 break; 1709 error = -OPW(fp, td, fop->fasync(0, filp, 1710 fp->f_flag & FASYNC)); 1711 } 1712 break; 1713 case FIOGETOWN: 1714 *(int *)data = fgetown(&filp->f_sigio); 1715 break; 1716 case FIODGNAME: 1717 #ifdef COMPAT_FREEBSD32 1718 case FIODGNAME_32: 1719 #endif 1720 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1721 error = ENXIO; 1722 break; 1723 } 1724 fgn = data; 1725 p = devtoname(filp->f_cdev->cdev); 1726 i = strlen(p) + 1; 1727 if (i > fgn->len) { 1728 error = EINVAL; 1729 break; 1730 } 1731 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1732 break; 1733 default: 1734 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1735 break; 1736 } 1737 linux_drop_fop(ldev); 1738 return (error); 1739 } 1740 1741 static int 1742 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1743 vm_prot_t maxprot, int flags, struct file *fp, 1744 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1745 { 1746 /* 1747 * Character devices do not provide private mappings 1748 * of any kind: 1749 */ 1750 if ((maxprot & VM_PROT_WRITE) == 0 && 1751 (prot & VM_PROT_WRITE) != 0) 1752 return (EACCES); 1753 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1754 return (EINVAL); 1755 1756 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1757 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1758 } 1759 1760 static int 1761 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1762 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1763 struct thread *td) 1764 { 1765 struct linux_file *filp; 1766 const struct file_operations *fop; 1767 struct linux_cdev *ldev; 1768 struct mount *mp; 1769 struct vnode *vp; 1770 vm_object_t object; 1771 vm_prot_t maxprot; 1772 int error; 1773 1774 filp = (struct linux_file *)fp->f_data; 1775 1776 vp = filp->f_vnode; 1777 if (vp == NULL) 1778 return (EOPNOTSUPP); 1779 1780 /* 1781 * Ensure that file and memory protections are 1782 * compatible. 1783 */ 1784 mp = vp->v_mount; 1785 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1786 maxprot = VM_PROT_NONE; 1787 if ((prot & VM_PROT_EXECUTE) != 0) 1788 return (EACCES); 1789 } else 1790 maxprot = VM_PROT_EXECUTE; 1791 if ((fp->f_flag & FREAD) != 0) 1792 maxprot |= VM_PROT_READ; 1793 else if ((prot & VM_PROT_READ) != 0) 1794 return (EACCES); 1795 1796 /* 1797 * If we are sharing potential changes via MAP_SHARED and we 1798 * are trying to get write permission although we opened it 1799 * without asking for it, bail out. 1800 * 1801 * Note that most character devices always share mappings. 1802 * 1803 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1804 * requests rather than doing it here. 1805 */ 1806 if ((flags & MAP_SHARED) != 0) { 1807 if ((fp->f_flag & FWRITE) != 0) 1808 maxprot |= VM_PROT_WRITE; 1809 else if ((prot & VM_PROT_WRITE) != 0) 1810 return (EACCES); 1811 } 1812 maxprot &= cap_maxprot; 1813 1814 linux_get_fop(filp, &fop, &ldev); 1815 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1816 &foff, fop, &object); 1817 if (error != 0) 1818 goto out; 1819 1820 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1821 foff, FALSE, td); 1822 if (error != 0) 1823 vm_object_deallocate(object); 1824 out: 1825 linux_drop_fop(ldev); 1826 return (error); 1827 } 1828 1829 static int 1830 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1831 { 1832 struct linux_file *filp; 1833 struct vnode *vp; 1834 int error; 1835 1836 filp = (struct linux_file *)fp->f_data; 1837 if (filp->f_vnode == NULL) 1838 return (EOPNOTSUPP); 1839 1840 vp = filp->f_vnode; 1841 1842 vn_lock(vp, LK_SHARED | LK_RETRY); 1843 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1844 VOP_UNLOCK(vp); 1845 1846 return (error); 1847 } 1848 1849 static int 1850 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1851 struct filedesc *fdp) 1852 { 1853 struct linux_file *filp; 1854 struct vnode *vp; 1855 int error; 1856 1857 filp = fp->f_data; 1858 vp = filp->f_vnode; 1859 if (vp == NULL) { 1860 error = 0; 1861 kif->kf_type = KF_TYPE_DEV; 1862 } else { 1863 vref(vp); 1864 FILEDESC_SUNLOCK(fdp); 1865 error = vn_fill_kinfo_vnode(vp, kif); 1866 vrele(vp); 1867 kif->kf_type = KF_TYPE_VNODE; 1868 FILEDESC_SLOCK(fdp); 1869 } 1870 return (error); 1871 } 1872 1873 unsigned int 1874 linux_iminor(struct inode *inode) 1875 { 1876 struct linux_cdev *ldev; 1877 1878 if (inode == NULL || inode->v_rdev == NULL || 1879 inode->v_rdev->si_devsw != &linuxcdevsw) 1880 return (-1U); 1881 ldev = inode->v_rdev->si_drv1; 1882 if (ldev == NULL) 1883 return (-1U); 1884 1885 return (minor(ldev->dev)); 1886 } 1887 1888 struct fileops linuxfileops = { 1889 .fo_read = linux_file_read, 1890 .fo_write = linux_file_write, 1891 .fo_truncate = invfo_truncate, 1892 .fo_kqfilter = linux_file_kqfilter, 1893 .fo_stat = linux_file_stat, 1894 .fo_fill_kinfo = linux_file_fill_kinfo, 1895 .fo_poll = linux_file_poll, 1896 .fo_close = linux_file_close, 1897 .fo_ioctl = linux_file_ioctl, 1898 .fo_mmap = linux_file_mmap, 1899 .fo_chmod = invfo_chmod, 1900 .fo_chown = invfo_chown, 1901 .fo_sendfile = invfo_sendfile, 1902 .fo_flags = DFLAG_PASSABLE, 1903 }; 1904 1905 /* 1906 * Hash of vmmap addresses. This is infrequently accessed and does not 1907 * need to be particularly large. This is done because we must store the 1908 * caller's idea of the map size to properly unmap. 1909 */ 1910 struct vmmap { 1911 LIST_ENTRY(vmmap) vm_next; 1912 void *vm_addr; 1913 unsigned long vm_size; 1914 }; 1915 1916 struct vmmaphd { 1917 struct vmmap *lh_first; 1918 }; 1919 #define VMMAP_HASH_SIZE 64 1920 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1921 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1922 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1923 static struct mtx vmmaplock; 1924 1925 static void 1926 vmmap_add(void *addr, unsigned long size) 1927 { 1928 struct vmmap *vmmap; 1929 1930 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1931 mtx_lock(&vmmaplock); 1932 vmmap->vm_size = size; 1933 vmmap->vm_addr = addr; 1934 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1935 mtx_unlock(&vmmaplock); 1936 } 1937 1938 static struct vmmap * 1939 vmmap_remove(void *addr) 1940 { 1941 struct vmmap *vmmap; 1942 1943 mtx_lock(&vmmaplock); 1944 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1945 if (vmmap->vm_addr == addr) 1946 break; 1947 if (vmmap) 1948 LIST_REMOVE(vmmap, vm_next); 1949 mtx_unlock(&vmmaplock); 1950 1951 return (vmmap); 1952 } 1953 1954 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1955 void * 1956 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1957 { 1958 void *addr; 1959 1960 addr = pmap_mapdev_attr(phys_addr, size, attr); 1961 if (addr == NULL) 1962 return (NULL); 1963 vmmap_add(addr, size); 1964 1965 return (addr); 1966 } 1967 #endif 1968 1969 void 1970 iounmap(void *addr) 1971 { 1972 struct vmmap *vmmap; 1973 1974 vmmap = vmmap_remove(addr); 1975 if (vmmap == NULL) 1976 return; 1977 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1978 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1979 #endif 1980 kfree(vmmap); 1981 } 1982 1983 void * 1984 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1985 { 1986 vm_offset_t off; 1987 size_t size; 1988 1989 size = count * PAGE_SIZE; 1990 off = kva_alloc(size); 1991 if (off == 0) 1992 return (NULL); 1993 vmmap_add((void *)off, size); 1994 pmap_qenter(off, pages, count); 1995 1996 return ((void *)off); 1997 } 1998 1999 void 2000 vunmap(void *addr) 2001 { 2002 struct vmmap *vmmap; 2003 2004 vmmap = vmmap_remove(addr); 2005 if (vmmap == NULL) 2006 return; 2007 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 2008 kva_free((vm_offset_t)addr, vmmap->vm_size); 2009 kfree(vmmap); 2010 } 2011 2012 static char * 2013 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 2014 { 2015 unsigned int len; 2016 char *p; 2017 va_list aq; 2018 2019 va_copy(aq, ap); 2020 len = vsnprintf(NULL, 0, fmt, aq); 2021 va_end(aq); 2022 2023 if (dev != NULL) 2024 p = devm_kmalloc(dev, len + 1, gfp); 2025 else 2026 p = kmalloc(len + 1, gfp); 2027 if (p != NULL) 2028 vsnprintf(p, len + 1, fmt, ap); 2029 2030 return (p); 2031 } 2032 2033 char * 2034 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 2035 { 2036 2037 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 2038 } 2039 2040 char * 2041 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 2042 { 2043 va_list ap; 2044 char *p; 2045 2046 va_start(ap, fmt); 2047 p = devm_kvasprintf(dev, gfp, fmt, ap); 2048 va_end(ap); 2049 2050 return (p); 2051 } 2052 2053 char * 2054 kasprintf(gfp_t gfp, const char *fmt, ...) 2055 { 2056 va_list ap; 2057 char *p; 2058 2059 va_start(ap, fmt); 2060 p = kvasprintf(gfp, fmt, ap); 2061 va_end(ap); 2062 2063 return (p); 2064 } 2065 2066 static void 2067 linux_timer_callback_wrapper(void *context) 2068 { 2069 struct timer_list *timer; 2070 2071 timer = context; 2072 2073 if (linux_set_current_flags(curthread, M_NOWAIT)) { 2074 /* try again later */ 2075 callout_reset(&timer->callout, 1, 2076 &linux_timer_callback_wrapper, timer); 2077 return; 2078 } 2079 2080 timer->function(timer->data); 2081 } 2082 2083 int 2084 mod_timer(struct timer_list *timer, int expires) 2085 { 2086 int ret; 2087 2088 timer->expires = expires; 2089 ret = callout_reset(&timer->callout, 2090 linux_timer_jiffies_until(expires), 2091 &linux_timer_callback_wrapper, timer); 2092 2093 MPASS(ret == 0 || ret == 1); 2094 2095 return (ret == 1); 2096 } 2097 2098 void 2099 add_timer(struct timer_list *timer) 2100 { 2101 2102 callout_reset(&timer->callout, 2103 linux_timer_jiffies_until(timer->expires), 2104 &linux_timer_callback_wrapper, timer); 2105 } 2106 2107 void 2108 add_timer_on(struct timer_list *timer, int cpu) 2109 { 2110 2111 callout_reset_on(&timer->callout, 2112 linux_timer_jiffies_until(timer->expires), 2113 &linux_timer_callback_wrapper, timer, cpu); 2114 } 2115 2116 int 2117 del_timer(struct timer_list *timer) 2118 { 2119 2120 if (callout_stop(&(timer)->callout) == -1) 2121 return (0); 2122 return (1); 2123 } 2124 2125 int 2126 del_timer_sync(struct timer_list *timer) 2127 { 2128 2129 if (callout_drain(&(timer)->callout) == -1) 2130 return (0); 2131 return (1); 2132 } 2133 2134 /* greatest common divisor, Euclid equation */ 2135 static uint64_t 2136 lkpi_gcd_64(uint64_t a, uint64_t b) 2137 { 2138 uint64_t an; 2139 uint64_t bn; 2140 2141 while (b != 0) { 2142 an = b; 2143 bn = a % b; 2144 a = an; 2145 b = bn; 2146 } 2147 return (a); 2148 } 2149 2150 uint64_t lkpi_nsec2hz_rem; 2151 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2152 uint64_t lkpi_nsec2hz_max; 2153 2154 uint64_t lkpi_usec2hz_rem; 2155 uint64_t lkpi_usec2hz_div = 1000000ULL; 2156 uint64_t lkpi_usec2hz_max; 2157 2158 uint64_t lkpi_msec2hz_rem; 2159 uint64_t lkpi_msec2hz_div = 1000ULL; 2160 uint64_t lkpi_msec2hz_max; 2161 2162 static void 2163 linux_timer_init(void *arg) 2164 { 2165 uint64_t gcd; 2166 2167 /* 2168 * Compute an internal HZ value which can divide 2**32 to 2169 * avoid timer rounding problems when the tick value wraps 2170 * around 2**32: 2171 */ 2172 linux_timer_hz_mask = 1; 2173 while (linux_timer_hz_mask < (unsigned long)hz) 2174 linux_timer_hz_mask *= 2; 2175 linux_timer_hz_mask--; 2176 2177 /* compute some internal constants */ 2178 2179 lkpi_nsec2hz_rem = hz; 2180 lkpi_usec2hz_rem = hz; 2181 lkpi_msec2hz_rem = hz; 2182 2183 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2184 lkpi_nsec2hz_rem /= gcd; 2185 lkpi_nsec2hz_div /= gcd; 2186 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2187 2188 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2189 lkpi_usec2hz_rem /= gcd; 2190 lkpi_usec2hz_div /= gcd; 2191 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2192 2193 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2194 lkpi_msec2hz_rem /= gcd; 2195 lkpi_msec2hz_div /= gcd; 2196 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2197 } 2198 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2199 2200 void 2201 linux_complete_common(struct completion *c, int all) 2202 { 2203 int wakeup_swapper; 2204 2205 sleepq_lock(c); 2206 if (all) { 2207 c->done = UINT_MAX; 2208 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2209 } else { 2210 if (c->done != UINT_MAX) 2211 c->done++; 2212 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2213 } 2214 sleepq_release(c); 2215 if (wakeup_swapper) 2216 kick_proc0(); 2217 } 2218 2219 /* 2220 * Indefinite wait for done != 0 with or without signals. 2221 */ 2222 int 2223 linux_wait_for_common(struct completion *c, int flags) 2224 { 2225 struct task_struct *task; 2226 int error; 2227 2228 if (SCHEDULER_STOPPED()) 2229 return (0); 2230 2231 task = current; 2232 2233 if (flags != 0) 2234 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2235 else 2236 flags = SLEEPQ_SLEEP; 2237 error = 0; 2238 for (;;) { 2239 sleepq_lock(c); 2240 if (c->done) 2241 break; 2242 sleepq_add(c, NULL, "completion", flags, 0); 2243 if (flags & SLEEPQ_INTERRUPTIBLE) { 2244 DROP_GIANT(); 2245 error = -sleepq_wait_sig(c, 0); 2246 PICKUP_GIANT(); 2247 if (error != 0) { 2248 linux_schedule_save_interrupt_value(task, error); 2249 error = -ERESTARTSYS; 2250 goto intr; 2251 } 2252 } else { 2253 DROP_GIANT(); 2254 sleepq_wait(c, 0); 2255 PICKUP_GIANT(); 2256 } 2257 } 2258 if (c->done != UINT_MAX) 2259 c->done--; 2260 sleepq_release(c); 2261 2262 intr: 2263 return (error); 2264 } 2265 2266 /* 2267 * Time limited wait for done != 0 with or without signals. 2268 */ 2269 int 2270 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2271 { 2272 struct task_struct *task; 2273 int end = jiffies + timeout; 2274 int error; 2275 2276 if (SCHEDULER_STOPPED()) 2277 return (0); 2278 2279 task = current; 2280 2281 if (flags != 0) 2282 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2283 else 2284 flags = SLEEPQ_SLEEP; 2285 2286 for (;;) { 2287 sleepq_lock(c); 2288 if (c->done) 2289 break; 2290 sleepq_add(c, NULL, "completion", flags, 0); 2291 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2292 2293 DROP_GIANT(); 2294 if (flags & SLEEPQ_INTERRUPTIBLE) 2295 error = -sleepq_timedwait_sig(c, 0); 2296 else 2297 error = -sleepq_timedwait(c, 0); 2298 PICKUP_GIANT(); 2299 2300 if (error != 0) { 2301 /* check for timeout */ 2302 if (error == -EWOULDBLOCK) { 2303 error = 0; /* timeout */ 2304 } else { 2305 /* signal happened */ 2306 linux_schedule_save_interrupt_value(task, error); 2307 error = -ERESTARTSYS; 2308 } 2309 goto done; 2310 } 2311 } 2312 if (c->done != UINT_MAX) 2313 c->done--; 2314 sleepq_release(c); 2315 2316 /* return how many jiffies are left */ 2317 error = linux_timer_jiffies_until(end); 2318 done: 2319 return (error); 2320 } 2321 2322 int 2323 linux_try_wait_for_completion(struct completion *c) 2324 { 2325 int isdone; 2326 2327 sleepq_lock(c); 2328 isdone = (c->done != 0); 2329 if (c->done != 0 && c->done != UINT_MAX) 2330 c->done--; 2331 sleepq_release(c); 2332 return (isdone); 2333 } 2334 2335 int 2336 linux_completion_done(struct completion *c) 2337 { 2338 int isdone; 2339 2340 sleepq_lock(c); 2341 isdone = (c->done != 0); 2342 sleepq_release(c); 2343 return (isdone); 2344 } 2345 2346 static void 2347 linux_cdev_deref(struct linux_cdev *ldev) 2348 { 2349 if (refcount_release(&ldev->refs) && 2350 ldev->kobj.ktype == &linux_cdev_ktype) 2351 kfree(ldev); 2352 } 2353 2354 static void 2355 linux_cdev_release(struct kobject *kobj) 2356 { 2357 struct linux_cdev *cdev; 2358 struct kobject *parent; 2359 2360 cdev = container_of(kobj, struct linux_cdev, kobj); 2361 parent = kobj->parent; 2362 linux_destroy_dev(cdev); 2363 linux_cdev_deref(cdev); 2364 kobject_put(parent); 2365 } 2366 2367 static void 2368 linux_cdev_static_release(struct kobject *kobj) 2369 { 2370 struct cdev *cdev; 2371 struct linux_cdev *ldev; 2372 2373 ldev = container_of(kobj, struct linux_cdev, kobj); 2374 cdev = ldev->cdev; 2375 if (cdev != NULL) { 2376 destroy_dev(cdev); 2377 ldev->cdev = NULL; 2378 } 2379 kobject_put(kobj->parent); 2380 } 2381 2382 int 2383 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2384 { 2385 int ret; 2386 2387 if (dev->devt != 0) { 2388 /* Set parent kernel object. */ 2389 ldev->kobj.parent = &dev->kobj; 2390 2391 /* 2392 * Unlike Linux we require the kobject of the 2393 * character device structure to have a valid name 2394 * before calling this function: 2395 */ 2396 if (ldev->kobj.name == NULL) 2397 return (-EINVAL); 2398 2399 ret = cdev_add(ldev, dev->devt, 1); 2400 if (ret) 2401 return (ret); 2402 } 2403 ret = device_add(dev); 2404 if (ret != 0 && dev->devt != 0) 2405 cdev_del(ldev); 2406 return (ret); 2407 } 2408 2409 void 2410 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2411 { 2412 device_del(dev); 2413 2414 if (dev->devt != 0) 2415 cdev_del(ldev); 2416 } 2417 2418 static void 2419 linux_destroy_dev(struct linux_cdev *ldev) 2420 { 2421 2422 if (ldev->cdev == NULL) 2423 return; 2424 2425 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2426 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2427 2428 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2429 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2430 pause("ldevdtr", hz / 4); 2431 2432 destroy_dev(ldev->cdev); 2433 ldev->cdev = NULL; 2434 } 2435 2436 const struct kobj_type linux_cdev_ktype = { 2437 .release = linux_cdev_release, 2438 }; 2439 2440 const struct kobj_type linux_cdev_static_ktype = { 2441 .release = linux_cdev_static_release, 2442 }; 2443 2444 static void 2445 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2446 { 2447 struct notifier_block *nb; 2448 struct netdev_notifier_info ni; 2449 2450 nb = arg; 2451 ni.ifp = ifp; 2452 ni.dev = (struct net_device *)ifp; 2453 if (linkstate == LINK_STATE_UP) 2454 nb->notifier_call(nb, NETDEV_UP, &ni); 2455 else 2456 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2457 } 2458 2459 static void 2460 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2461 { 2462 struct notifier_block *nb; 2463 struct netdev_notifier_info ni; 2464 2465 nb = arg; 2466 ni.ifp = ifp; 2467 ni.dev = (struct net_device *)ifp; 2468 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2469 } 2470 2471 static void 2472 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2473 { 2474 struct notifier_block *nb; 2475 struct netdev_notifier_info ni; 2476 2477 nb = arg; 2478 ni.ifp = ifp; 2479 ni.dev = (struct net_device *)ifp; 2480 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2481 } 2482 2483 static void 2484 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2485 { 2486 struct notifier_block *nb; 2487 struct netdev_notifier_info ni; 2488 2489 nb = arg; 2490 ni.ifp = ifp; 2491 ni.dev = (struct net_device *)ifp; 2492 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2493 } 2494 2495 static void 2496 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2497 { 2498 struct notifier_block *nb; 2499 struct netdev_notifier_info ni; 2500 2501 nb = arg; 2502 ni.ifp = ifp; 2503 ni.dev = (struct net_device *)ifp; 2504 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2505 } 2506 2507 int 2508 register_netdevice_notifier(struct notifier_block *nb) 2509 { 2510 2511 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2512 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2513 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2514 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2515 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2516 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2517 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2518 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2519 2520 return (0); 2521 } 2522 2523 int 2524 register_inetaddr_notifier(struct notifier_block *nb) 2525 { 2526 2527 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2528 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2529 return (0); 2530 } 2531 2532 int 2533 unregister_netdevice_notifier(struct notifier_block *nb) 2534 { 2535 2536 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2537 nb->tags[NETDEV_UP]); 2538 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2539 nb->tags[NETDEV_REGISTER]); 2540 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2541 nb->tags[NETDEV_UNREGISTER]); 2542 EVENTHANDLER_DEREGISTER(iflladdr_event, 2543 nb->tags[NETDEV_CHANGEADDR]); 2544 2545 return (0); 2546 } 2547 2548 int 2549 unregister_inetaddr_notifier(struct notifier_block *nb) 2550 { 2551 2552 EVENTHANDLER_DEREGISTER(ifaddr_event, 2553 nb->tags[NETDEV_CHANGEIFADDR]); 2554 2555 return (0); 2556 } 2557 2558 struct list_sort_thunk { 2559 int (*cmp)(void *, struct list_head *, struct list_head *); 2560 void *priv; 2561 }; 2562 2563 static inline int 2564 linux_le_cmp(void *priv, const void *d1, const void *d2) 2565 { 2566 struct list_head *le1, *le2; 2567 struct list_sort_thunk *thunk; 2568 2569 thunk = priv; 2570 le1 = *(__DECONST(struct list_head **, d1)); 2571 le2 = *(__DECONST(struct list_head **, d2)); 2572 return ((thunk->cmp)(thunk->priv, le1, le2)); 2573 } 2574 2575 void 2576 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2577 struct list_head *a, struct list_head *b)) 2578 { 2579 struct list_sort_thunk thunk; 2580 struct list_head **ar, *le; 2581 size_t count, i; 2582 2583 count = 0; 2584 list_for_each(le, head) 2585 count++; 2586 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2587 i = 0; 2588 list_for_each(le, head) 2589 ar[i++] = le; 2590 thunk.cmp = cmp; 2591 thunk.priv = priv; 2592 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2593 INIT_LIST_HEAD(head); 2594 for (i = 0; i < count; i++) 2595 list_add_tail(ar[i], head); 2596 free(ar, M_KMALLOC); 2597 } 2598 2599 #if defined(__i386__) || defined(__amd64__) 2600 int 2601 linux_wbinvd_on_all_cpus(void) 2602 { 2603 2604 pmap_invalidate_cache(); 2605 return (0); 2606 } 2607 #endif 2608 2609 int 2610 linux_on_each_cpu(void callback(void *), void *data) 2611 { 2612 2613 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2614 smp_no_rendezvous_barrier, data); 2615 return (0); 2616 } 2617 2618 int 2619 linux_in_atomic(void) 2620 { 2621 2622 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2623 } 2624 2625 struct linux_cdev * 2626 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2627 { 2628 dev_t dev = MKDEV(major, minor); 2629 struct cdev *cdev; 2630 2631 dev_lock(); 2632 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2633 struct linux_cdev *ldev = cdev->si_drv1; 2634 if (ldev->dev == dev && 2635 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2636 break; 2637 } 2638 } 2639 dev_unlock(); 2640 2641 return (cdev != NULL ? cdev->si_drv1 : NULL); 2642 } 2643 2644 int 2645 __register_chrdev(unsigned int major, unsigned int baseminor, 2646 unsigned int count, const char *name, 2647 const struct file_operations *fops) 2648 { 2649 struct linux_cdev *cdev; 2650 int ret = 0; 2651 int i; 2652 2653 for (i = baseminor; i < baseminor + count; i++) { 2654 cdev = cdev_alloc(); 2655 cdev->ops = fops; 2656 kobject_set_name(&cdev->kobj, name); 2657 2658 ret = cdev_add(cdev, makedev(major, i), 1); 2659 if (ret != 0) 2660 break; 2661 } 2662 return (ret); 2663 } 2664 2665 int 2666 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2667 unsigned int count, const char *name, 2668 const struct file_operations *fops, uid_t uid, 2669 gid_t gid, int mode) 2670 { 2671 struct linux_cdev *cdev; 2672 int ret = 0; 2673 int i; 2674 2675 for (i = baseminor; i < baseminor + count; i++) { 2676 cdev = cdev_alloc(); 2677 cdev->ops = fops; 2678 kobject_set_name(&cdev->kobj, name); 2679 2680 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2681 if (ret != 0) 2682 break; 2683 } 2684 return (ret); 2685 } 2686 2687 void 2688 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2689 unsigned int count, const char *name) 2690 { 2691 struct linux_cdev *cdevp; 2692 int i; 2693 2694 for (i = baseminor; i < baseminor + count; i++) { 2695 cdevp = linux_find_cdev(name, major, i); 2696 if (cdevp != NULL) 2697 cdev_del(cdevp); 2698 } 2699 } 2700 2701 void 2702 linux_dump_stack(void) 2703 { 2704 #ifdef STACK 2705 struct stack st; 2706 2707 stack_save(&st); 2708 stack_print(&st); 2709 #endif 2710 } 2711 2712 int 2713 linuxkpi_net_ratelimit(void) 2714 { 2715 2716 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2717 lkpi_net_maxpps)); 2718 } 2719 2720 struct io_mapping * 2721 io_mapping_create_wc(resource_size_t base, unsigned long size) 2722 { 2723 struct io_mapping *mapping; 2724 2725 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2726 if (mapping == NULL) 2727 return (NULL); 2728 return (io_mapping_init_wc(mapping, base, size)); 2729 } 2730 2731 #if defined(__i386__) || defined(__amd64__) 2732 bool linux_cpu_has_clflush; 2733 #endif 2734 2735 static void 2736 linux_compat_init(void *arg) 2737 { 2738 struct sysctl_oid *rootoid; 2739 int i; 2740 2741 #if defined(__i386__) || defined(__amd64__) 2742 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2743 #endif 2744 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2745 2746 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2747 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2748 kobject_init(&linux_class_root, &linux_class_ktype); 2749 kobject_set_name(&linux_class_root, "class"); 2750 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2751 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2752 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2753 kobject_set_name(&linux_root_device.kobj, "device"); 2754 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2755 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2756 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2757 linux_root_device.bsddev = root_bus; 2758 linux_class_misc.name = "misc"; 2759 class_register(&linux_class_misc); 2760 INIT_LIST_HEAD(&pci_drivers); 2761 INIT_LIST_HEAD(&pci_devices); 2762 spin_lock_init(&pci_lock); 2763 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2764 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2765 LIST_INIT(&vmmaphead[i]); 2766 init_waitqueue_head(&linux_bit_waitq); 2767 init_waitqueue_head(&linux_var_waitq); 2768 2769 CPU_COPY(&all_cpus, &cpu_online_mask); 2770 } 2771 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2772 2773 static void 2774 linux_compat_uninit(void *arg) 2775 { 2776 linux_kobject_kfree_name(&linux_class_root); 2777 linux_kobject_kfree_name(&linux_root_device.kobj); 2778 linux_kobject_kfree_name(&linux_class_misc.kobj); 2779 2780 mtx_destroy(&vmmaplock); 2781 spin_lock_destroy(&pci_lock); 2782 rw_destroy(&linux_vma_lock); 2783 } 2784 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2785 2786 /* 2787 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2788 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2789 * used. Assert these types have the same size, else some parts of the 2790 * LinuxKPI may not work like expected: 2791 */ 2792 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2793