1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/list.h> 87 #include <linux/kthread.h> 88 #include <linux/kernel.h> 89 #include <linux/compat.h> 90 #include <linux/io-mapping.h> 91 #include <linux/poll.h> 92 #include <linux/smp.h> 93 #include <linux/wait_bit.h> 94 #include <linux/rcupdate.h> 95 #include <linux/interval_tree.h> 96 #include <linux/interval_tree_generic.h> 97 98 #if defined(__i386__) || defined(__amd64__) 99 #include <asm/smp.h> 100 #include <asm/processor.h> 101 #endif 102 103 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 104 "LinuxKPI parameters"); 105 106 int linuxkpi_debug; 107 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 108 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 109 110 int linuxkpi_warn_dump_stack = 0; 111 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 112 &linuxkpi_warn_dump_stack, 0, 113 "Set to enable stack traces from WARN_ON(). Clear to disable."); 114 115 static struct timeval lkpi_net_lastlog; 116 static int lkpi_net_curpps; 117 static int lkpi_net_maxpps = 99; 118 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 119 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 120 121 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 122 123 #include <linux/rbtree.h> 124 /* Undo Linux compat changes. */ 125 #undef RB_ROOT 126 #undef file 127 #undef cdev 128 #define RB_ROOT(head) (head)->rbh_root 129 130 static void linux_destroy_dev(struct linux_cdev *); 131 static void linux_cdev_deref(struct linux_cdev *ldev); 132 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 133 134 cpumask_t cpu_online_mask; 135 struct kobject linux_class_root; 136 struct device linux_root_device; 137 struct class linux_class_misc; 138 struct list_head pci_drivers; 139 struct list_head pci_devices; 140 spinlock_t pci_lock; 141 142 unsigned long linux_timer_hz_mask; 143 144 wait_queue_head_t linux_bit_waitq; 145 wait_queue_head_t linux_var_waitq; 146 147 int 148 panic_cmp(struct rb_node *one, struct rb_node *two) 149 { 150 panic("no cmp"); 151 } 152 153 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 154 155 #define START(node) ((node)->start) 156 #define LAST(node) ((node)->last) 157 158 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 159 LAST,, lkpi_interval_tree) 160 161 struct kobject * 162 kobject_create(void) 163 { 164 struct kobject *kobj; 165 166 kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); 167 if (kobj == NULL) 168 return (NULL); 169 kobject_init(kobj, &linux_kfree_type); 170 171 return (kobj); 172 } 173 174 175 int 176 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 177 { 178 va_list tmp_va; 179 int len; 180 char *old; 181 char *name; 182 char dummy; 183 184 old = kobj->name; 185 186 if (old && fmt == NULL) 187 return (0); 188 189 /* compute length of string */ 190 va_copy(tmp_va, args); 191 len = vsnprintf(&dummy, 0, fmt, tmp_va); 192 va_end(tmp_va); 193 194 /* account for zero termination */ 195 len++; 196 197 /* check for error */ 198 if (len < 1) 199 return (-EINVAL); 200 201 /* allocate memory for string */ 202 name = kzalloc(len, GFP_KERNEL); 203 if (name == NULL) 204 return (-ENOMEM); 205 vsnprintf(name, len, fmt, args); 206 kobj->name = name; 207 208 /* free old string */ 209 kfree(old); 210 211 /* filter new string */ 212 for (; *name != '\0'; name++) 213 if (*name == '/') 214 *name = '!'; 215 return (0); 216 } 217 218 int 219 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 220 { 221 va_list args; 222 int error; 223 224 va_start(args, fmt); 225 error = kobject_set_name_vargs(kobj, fmt, args); 226 va_end(args); 227 228 return (error); 229 } 230 231 static int 232 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 233 { 234 const struct kobj_type *t; 235 int error; 236 237 kobj->parent = parent; 238 error = sysfs_create_dir(kobj); 239 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 240 struct attribute **attr; 241 t = kobj->ktype; 242 243 for (attr = t->default_attrs; *attr != NULL; attr++) { 244 error = sysfs_create_file(kobj, *attr); 245 if (error) 246 break; 247 } 248 if (error) 249 sysfs_remove_dir(kobj); 250 } 251 return (error); 252 } 253 254 int 255 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 256 { 257 va_list args; 258 int error; 259 260 va_start(args, fmt); 261 error = kobject_set_name_vargs(kobj, fmt, args); 262 va_end(args); 263 if (error) 264 return (error); 265 266 return kobject_add_complete(kobj, parent); 267 } 268 269 void 270 linux_kobject_release(struct kref *kref) 271 { 272 struct kobject *kobj; 273 char *name; 274 275 kobj = container_of(kref, struct kobject, kref); 276 sysfs_remove_dir(kobj); 277 name = kobj->name; 278 if (kobj->ktype && kobj->ktype->release) 279 kobj->ktype->release(kobj); 280 kfree(name); 281 } 282 283 static void 284 linux_kobject_kfree(struct kobject *kobj) 285 { 286 kfree(kobj); 287 } 288 289 static void 290 linux_kobject_kfree_name(struct kobject *kobj) 291 { 292 if (kobj) { 293 kfree(kobj->name); 294 } 295 } 296 297 const struct kobj_type linux_kfree_type = { 298 .release = linux_kobject_kfree 299 }; 300 301 static ssize_t 302 lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) 303 { 304 struct kobj_attribute *ka = 305 container_of(attr, struct kobj_attribute, attr); 306 307 if (ka->show == NULL) 308 return (-EIO); 309 310 return (ka->show(kobj, ka, buf)); 311 } 312 313 static ssize_t 314 lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr, 315 const char *buf, size_t count) 316 { 317 struct kobj_attribute *ka = 318 container_of(attr, struct kobj_attribute, attr); 319 320 if (ka->store == NULL) 321 return (-EIO); 322 323 return (ka->store(kobj, ka, buf, count)); 324 } 325 326 const struct sysfs_ops kobj_sysfs_ops = { 327 .show = lkpi_kobj_attr_show, 328 .store = lkpi_kobj_attr_store, 329 }; 330 331 static void 332 linux_device_release(struct device *dev) 333 { 334 pr_debug("linux_device_release: %s\n", dev_name(dev)); 335 kfree(dev); 336 } 337 338 static ssize_t 339 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 340 { 341 struct class_attribute *dattr; 342 ssize_t error; 343 344 dattr = container_of(attr, struct class_attribute, attr); 345 error = -EIO; 346 if (dattr->show) 347 error = dattr->show(container_of(kobj, struct class, kobj), 348 dattr, buf); 349 return (error); 350 } 351 352 static ssize_t 353 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 354 size_t count) 355 { 356 struct class_attribute *dattr; 357 ssize_t error; 358 359 dattr = container_of(attr, struct class_attribute, attr); 360 error = -EIO; 361 if (dattr->store) 362 error = dattr->store(container_of(kobj, struct class, kobj), 363 dattr, buf, count); 364 return (error); 365 } 366 367 static void 368 linux_class_release(struct kobject *kobj) 369 { 370 struct class *class; 371 372 class = container_of(kobj, struct class, kobj); 373 if (class->class_release) 374 class->class_release(class); 375 } 376 377 static const struct sysfs_ops linux_class_sysfs = { 378 .show = linux_class_show, 379 .store = linux_class_store, 380 }; 381 382 const struct kobj_type linux_class_ktype = { 383 .release = linux_class_release, 384 .sysfs_ops = &linux_class_sysfs 385 }; 386 387 static void 388 linux_dev_release(struct kobject *kobj) 389 { 390 struct device *dev; 391 392 dev = container_of(kobj, struct device, kobj); 393 /* This is the precedence defined by linux. */ 394 if (dev->release) 395 dev->release(dev); 396 else if (dev->class && dev->class->dev_release) 397 dev->class->dev_release(dev); 398 } 399 400 static ssize_t 401 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 402 { 403 struct device_attribute *dattr; 404 ssize_t error; 405 406 dattr = container_of(attr, struct device_attribute, attr); 407 error = -EIO; 408 if (dattr->show) 409 error = dattr->show(container_of(kobj, struct device, kobj), 410 dattr, buf); 411 return (error); 412 } 413 414 static ssize_t 415 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 416 size_t count) 417 { 418 struct device_attribute *dattr; 419 ssize_t error; 420 421 dattr = container_of(attr, struct device_attribute, attr); 422 error = -EIO; 423 if (dattr->store) 424 error = dattr->store(container_of(kobj, struct device, kobj), 425 dattr, buf, count); 426 return (error); 427 } 428 429 static const struct sysfs_ops linux_dev_sysfs = { 430 .show = linux_dev_show, 431 .store = linux_dev_store, 432 }; 433 434 const struct kobj_type linux_dev_ktype = { 435 .release = linux_dev_release, 436 .sysfs_ops = &linux_dev_sysfs 437 }; 438 439 struct device * 440 device_create(struct class *class, struct device *parent, dev_t devt, 441 void *drvdata, const char *fmt, ...) 442 { 443 struct device *dev; 444 va_list args; 445 446 dev = kzalloc(sizeof(*dev), M_WAITOK); 447 dev->parent = parent; 448 dev->class = class; 449 dev->devt = devt; 450 dev->driver_data = drvdata; 451 dev->release = linux_device_release; 452 va_start(args, fmt); 453 kobject_set_name_vargs(&dev->kobj, fmt, args); 454 va_end(args); 455 device_register(dev); 456 457 return (dev); 458 } 459 460 struct device * 461 device_create_groups_vargs(struct class *class, struct device *parent, 462 dev_t devt, void *drvdata, const struct attribute_group **groups, 463 const char *fmt, va_list args) 464 { 465 struct device *dev = NULL; 466 int retval = -ENODEV; 467 468 if (class == NULL || IS_ERR(class)) 469 goto error; 470 471 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 472 if (!dev) { 473 retval = -ENOMEM; 474 goto error; 475 } 476 477 dev->devt = devt; 478 dev->class = class; 479 dev->parent = parent; 480 dev->groups = groups; 481 dev->release = device_create_release; 482 /* device_initialize() needs the class and parent to be set */ 483 device_initialize(dev); 484 dev_set_drvdata(dev, drvdata); 485 486 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 487 if (retval) 488 goto error; 489 490 retval = device_add(dev); 491 if (retval) 492 goto error; 493 494 return dev; 495 496 error: 497 put_device(dev); 498 return ERR_PTR(retval); 499 } 500 501 struct class * 502 class_create(struct module *owner, const char *name) 503 { 504 struct class *class; 505 int error; 506 507 class = kzalloc(sizeof(*class), M_WAITOK); 508 class->owner = owner; 509 class->name = name; 510 class->class_release = linux_class_kfree; 511 error = class_register(class); 512 if (error) { 513 kfree(class); 514 return (NULL); 515 } 516 517 return (class); 518 } 519 520 int 521 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 522 struct kobject *parent, const char *fmt, ...) 523 { 524 va_list args; 525 int error; 526 527 kobject_init(kobj, ktype); 528 kobj->ktype = ktype; 529 kobj->parent = parent; 530 kobj->name = NULL; 531 532 va_start(args, fmt); 533 error = kobject_set_name_vargs(kobj, fmt, args); 534 va_end(args); 535 if (error) 536 return (error); 537 return kobject_add_complete(kobj, parent); 538 } 539 540 static void 541 linux_kq_lock(void *arg) 542 { 543 spinlock_t *s = arg; 544 545 spin_lock(s); 546 } 547 static void 548 linux_kq_unlock(void *arg) 549 { 550 spinlock_t *s = arg; 551 552 spin_unlock(s); 553 } 554 555 static void 556 linux_kq_assert_lock(void *arg, int what) 557 { 558 #ifdef INVARIANTS 559 spinlock_t *s = arg; 560 561 if (what == LA_LOCKED) 562 mtx_assert(&s->m, MA_OWNED); 563 else 564 mtx_assert(&s->m, MA_NOTOWNED); 565 #endif 566 } 567 568 static void 569 linux_file_kqfilter_poll(struct linux_file *, int); 570 571 struct linux_file * 572 linux_file_alloc(void) 573 { 574 struct linux_file *filp; 575 576 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 577 578 /* set initial refcount */ 579 filp->f_count = 1; 580 581 /* setup fields needed by kqueue support */ 582 spin_lock_init(&filp->f_kqlock); 583 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 584 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 585 586 return (filp); 587 } 588 589 void 590 linux_file_free(struct linux_file *filp) 591 { 592 if (filp->_file == NULL) { 593 if (filp->f_op != NULL && filp->f_op->release != NULL) 594 filp->f_op->release(filp->f_vnode, filp); 595 if (filp->f_shmem != NULL) 596 vm_object_deallocate(filp->f_shmem); 597 kfree_rcu(filp, rcu); 598 } else { 599 /* 600 * The close method of the character device or file 601 * will free the linux_file structure: 602 */ 603 _fdrop(filp->_file, curthread); 604 } 605 } 606 607 struct linux_cdev * 608 cdev_alloc(void) 609 { 610 struct linux_cdev *cdev; 611 612 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); 613 kobject_init(&cdev->kobj, &linux_cdev_ktype); 614 cdev->refs = 1; 615 return (cdev); 616 } 617 618 static int 619 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 620 vm_page_t *mres) 621 { 622 struct vm_area_struct *vmap; 623 624 vmap = linux_cdev_handle_find(vm_obj->handle); 625 626 MPASS(vmap != NULL); 627 MPASS(vmap->vm_private_data == vm_obj->handle); 628 629 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 630 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 631 vm_page_t page; 632 633 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 634 /* 635 * If the passed in result page is a fake 636 * page, update it with the new physical 637 * address. 638 */ 639 page = *mres; 640 vm_page_updatefake(page, paddr, vm_obj->memattr); 641 } else { 642 /* 643 * Replace the passed in "mres" page with our 644 * own fake page and free up the all of the 645 * original pages. 646 */ 647 VM_OBJECT_WUNLOCK(vm_obj); 648 page = vm_page_getfake(paddr, vm_obj->memattr); 649 VM_OBJECT_WLOCK(vm_obj); 650 651 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 652 *mres = page; 653 } 654 vm_page_valid(page); 655 return (VM_PAGER_OK); 656 } 657 return (VM_PAGER_FAIL); 658 } 659 660 static int 661 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 662 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 663 { 664 struct vm_area_struct *vmap; 665 int err; 666 667 /* get VM area structure */ 668 vmap = linux_cdev_handle_find(vm_obj->handle); 669 MPASS(vmap != NULL); 670 MPASS(vmap->vm_private_data == vm_obj->handle); 671 672 VM_OBJECT_WUNLOCK(vm_obj); 673 674 linux_set_current(curthread); 675 676 down_write(&vmap->vm_mm->mmap_sem); 677 if (unlikely(vmap->vm_ops == NULL)) { 678 err = VM_FAULT_SIGBUS; 679 } else { 680 struct vm_fault vmf; 681 682 /* fill out VM fault structure */ 683 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 684 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 685 vmf.pgoff = 0; 686 vmf.page = NULL; 687 vmf.vma = vmap; 688 689 vmap->vm_pfn_count = 0; 690 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 691 vmap->vm_obj = vm_obj; 692 693 err = vmap->vm_ops->fault(&vmf); 694 695 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 696 kern_yield(PRI_USER); 697 err = vmap->vm_ops->fault(&vmf); 698 } 699 } 700 701 /* translate return code */ 702 switch (err) { 703 case VM_FAULT_OOM: 704 err = VM_PAGER_AGAIN; 705 break; 706 case VM_FAULT_SIGBUS: 707 err = VM_PAGER_BAD; 708 break; 709 case VM_FAULT_NOPAGE: 710 /* 711 * By contract the fault handler will return having 712 * busied all the pages itself. If pidx is already 713 * found in the object, it will simply xbusy the first 714 * page and return with vm_pfn_count set to 1. 715 */ 716 *first = vmap->vm_pfn_first; 717 *last = *first + vmap->vm_pfn_count - 1; 718 err = VM_PAGER_OK; 719 break; 720 default: 721 err = VM_PAGER_ERROR; 722 break; 723 } 724 up_write(&vmap->vm_mm->mmap_sem); 725 VM_OBJECT_WLOCK(vm_obj); 726 return (err); 727 } 728 729 static struct rwlock linux_vma_lock; 730 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 731 TAILQ_HEAD_INITIALIZER(linux_vma_head); 732 733 static void 734 linux_cdev_handle_free(struct vm_area_struct *vmap) 735 { 736 /* Drop reference on vm_file */ 737 if (vmap->vm_file != NULL) 738 fput(vmap->vm_file); 739 740 /* Drop reference on mm_struct */ 741 mmput(vmap->vm_mm); 742 743 kfree(vmap); 744 } 745 746 static void 747 linux_cdev_handle_remove(struct vm_area_struct *vmap) 748 { 749 rw_wlock(&linux_vma_lock); 750 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 751 rw_wunlock(&linux_vma_lock); 752 } 753 754 static struct vm_area_struct * 755 linux_cdev_handle_find(void *handle) 756 { 757 struct vm_area_struct *vmap; 758 759 rw_rlock(&linux_vma_lock); 760 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 761 if (vmap->vm_private_data == handle) 762 break; 763 } 764 rw_runlock(&linux_vma_lock); 765 return (vmap); 766 } 767 768 static int 769 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 770 vm_ooffset_t foff, struct ucred *cred, u_short *color) 771 { 772 773 MPASS(linux_cdev_handle_find(handle) != NULL); 774 *color = 0; 775 return (0); 776 } 777 778 static void 779 linux_cdev_pager_dtor(void *handle) 780 { 781 const struct vm_operations_struct *vm_ops; 782 struct vm_area_struct *vmap; 783 784 vmap = linux_cdev_handle_find(handle); 785 MPASS(vmap != NULL); 786 787 /* 788 * Remove handle before calling close operation to prevent 789 * other threads from reusing the handle pointer. 790 */ 791 linux_cdev_handle_remove(vmap); 792 793 down_write(&vmap->vm_mm->mmap_sem); 794 vm_ops = vmap->vm_ops; 795 if (likely(vm_ops != NULL)) 796 vm_ops->close(vmap); 797 up_write(&vmap->vm_mm->mmap_sem); 798 799 linux_cdev_handle_free(vmap); 800 } 801 802 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 803 { 804 /* OBJT_MGTDEVICE */ 805 .cdev_pg_populate = linux_cdev_pager_populate, 806 .cdev_pg_ctor = linux_cdev_pager_ctor, 807 .cdev_pg_dtor = linux_cdev_pager_dtor 808 }, 809 { 810 /* OBJT_DEVICE */ 811 .cdev_pg_fault = linux_cdev_pager_fault, 812 .cdev_pg_ctor = linux_cdev_pager_ctor, 813 .cdev_pg_dtor = linux_cdev_pager_dtor 814 }, 815 }; 816 817 int 818 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 819 unsigned long size) 820 { 821 vm_object_t obj; 822 vm_page_t m; 823 824 obj = vma->vm_obj; 825 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 826 return (-ENOTSUP); 827 VM_OBJECT_RLOCK(obj); 828 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 829 m != NULL && m->pindex < OFF_TO_IDX(address + size); 830 m = TAILQ_NEXT(m, listq)) 831 pmap_remove_all(m); 832 VM_OBJECT_RUNLOCK(obj); 833 return (0); 834 } 835 836 static struct file_operations dummy_ldev_ops = { 837 /* XXXKIB */ 838 }; 839 840 static struct linux_cdev dummy_ldev = { 841 .ops = &dummy_ldev_ops, 842 }; 843 844 #define LDEV_SI_DTR 0x0001 845 #define LDEV_SI_REF 0x0002 846 847 static void 848 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 849 struct linux_cdev **dev) 850 { 851 struct linux_cdev *ldev; 852 u_int siref; 853 854 ldev = filp->f_cdev; 855 *fop = filp->f_op; 856 if (ldev != NULL) { 857 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 858 refcount_acquire(&ldev->refs); 859 } else { 860 for (siref = ldev->siref;;) { 861 if ((siref & LDEV_SI_DTR) != 0) { 862 ldev = &dummy_ldev; 863 *fop = ldev->ops; 864 siref = ldev->siref; 865 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 866 } else if (atomic_fcmpset_int(&ldev->siref, 867 &siref, siref + LDEV_SI_REF)) { 868 break; 869 } 870 } 871 } 872 } 873 *dev = ldev; 874 } 875 876 static void 877 linux_drop_fop(struct linux_cdev *ldev) 878 { 879 880 if (ldev == NULL) 881 return; 882 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 883 linux_cdev_deref(ldev); 884 } else { 885 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 886 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 887 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 888 } 889 } 890 891 #define OPW(fp,td,code) ({ \ 892 struct file *__fpop; \ 893 __typeof(code) __retval; \ 894 \ 895 __fpop = (td)->td_fpop; \ 896 (td)->td_fpop = (fp); \ 897 __retval = (code); \ 898 (td)->td_fpop = __fpop; \ 899 __retval; \ 900 }) 901 902 static int 903 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 904 struct file *file) 905 { 906 struct linux_cdev *ldev; 907 struct linux_file *filp; 908 const struct file_operations *fop; 909 int error; 910 911 ldev = dev->si_drv1; 912 913 filp = linux_file_alloc(); 914 filp->f_dentry = &filp->f_dentry_store; 915 filp->f_op = ldev->ops; 916 filp->f_mode = file->f_flag; 917 filp->f_flags = file->f_flag; 918 filp->f_vnode = file->f_vnode; 919 filp->_file = file; 920 refcount_acquire(&ldev->refs); 921 filp->f_cdev = ldev; 922 923 linux_set_current(td); 924 linux_get_fop(filp, &fop, &ldev); 925 926 if (fop->open != NULL) { 927 error = -fop->open(file->f_vnode, filp); 928 if (error != 0) { 929 linux_drop_fop(ldev); 930 linux_cdev_deref(filp->f_cdev); 931 kfree(filp); 932 return (error); 933 } 934 } 935 936 /* hold on to the vnode - used for fstat() */ 937 vhold(filp->f_vnode); 938 939 /* release the file from devfs */ 940 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 941 linux_drop_fop(ldev); 942 return (ENXIO); 943 } 944 945 #define LINUX_IOCTL_MIN_PTR 0x10000UL 946 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 947 948 static inline int 949 linux_remap_address(void **uaddr, size_t len) 950 { 951 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 952 953 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 954 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 955 struct task_struct *pts = current; 956 if (pts == NULL) { 957 *uaddr = NULL; 958 return (1); 959 } 960 961 /* compute data offset */ 962 uaddr_val -= LINUX_IOCTL_MIN_PTR; 963 964 /* check that length is within bounds */ 965 if ((len > IOCPARM_MAX) || 966 (uaddr_val + len) > pts->bsd_ioctl_len) { 967 *uaddr = NULL; 968 return (1); 969 } 970 971 /* re-add kernel buffer address */ 972 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 973 974 /* update address location */ 975 *uaddr = (void *)uaddr_val; 976 return (1); 977 } 978 return (0); 979 } 980 981 int 982 linux_copyin(const void *uaddr, void *kaddr, size_t len) 983 { 984 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 985 if (uaddr == NULL) 986 return (-EFAULT); 987 memcpy(kaddr, uaddr, len); 988 return (0); 989 } 990 return (-copyin(uaddr, kaddr, len)); 991 } 992 993 int 994 linux_copyout(const void *kaddr, void *uaddr, size_t len) 995 { 996 if (linux_remap_address(&uaddr, len)) { 997 if (uaddr == NULL) 998 return (-EFAULT); 999 memcpy(uaddr, kaddr, len); 1000 return (0); 1001 } 1002 return (-copyout(kaddr, uaddr, len)); 1003 } 1004 1005 size_t 1006 linux_clear_user(void *_uaddr, size_t _len) 1007 { 1008 uint8_t *uaddr = _uaddr; 1009 size_t len = _len; 1010 1011 /* make sure uaddr is aligned before going into the fast loop */ 1012 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 1013 if (subyte(uaddr, 0)) 1014 return (_len); 1015 uaddr++; 1016 len--; 1017 } 1018 1019 /* zero 8 bytes at a time */ 1020 while (len > 7) { 1021 #ifdef __LP64__ 1022 if (suword64(uaddr, 0)) 1023 return (_len); 1024 #else 1025 if (suword32(uaddr, 0)) 1026 return (_len); 1027 if (suword32(uaddr + 4, 0)) 1028 return (_len); 1029 #endif 1030 uaddr += 8; 1031 len -= 8; 1032 } 1033 1034 /* zero fill end, if any */ 1035 while (len > 0) { 1036 if (subyte(uaddr, 0)) 1037 return (_len); 1038 uaddr++; 1039 len--; 1040 } 1041 return (0); 1042 } 1043 1044 int 1045 linux_access_ok(const void *uaddr, size_t len) 1046 { 1047 uintptr_t saddr; 1048 uintptr_t eaddr; 1049 1050 /* get start and end address */ 1051 saddr = (uintptr_t)uaddr; 1052 eaddr = (uintptr_t)uaddr + len; 1053 1054 /* verify addresses are valid for userspace */ 1055 return ((saddr == eaddr) || 1056 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 1057 } 1058 1059 /* 1060 * This function should return either EINTR or ERESTART depending on 1061 * the signal type sent to this thread: 1062 */ 1063 static int 1064 linux_get_error(struct task_struct *task, int error) 1065 { 1066 /* check for signal type interrupt code */ 1067 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 1068 error = -linux_schedule_get_interrupt_value(task); 1069 if (error == 0) 1070 error = EINTR; 1071 } 1072 return (error); 1073 } 1074 1075 static int 1076 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 1077 const struct file_operations *fop, u_long cmd, caddr_t data, 1078 struct thread *td) 1079 { 1080 struct task_struct *task = current; 1081 unsigned size; 1082 int error; 1083 1084 size = IOCPARM_LEN(cmd); 1085 /* refer to logic in sys_ioctl() */ 1086 if (size > 0) { 1087 /* 1088 * Setup hint for linux_copyin() and linux_copyout(). 1089 * 1090 * Background: Linux code expects a user-space address 1091 * while FreeBSD supplies a kernel-space address. 1092 */ 1093 task->bsd_ioctl_data = data; 1094 task->bsd_ioctl_len = size; 1095 data = (void *)LINUX_IOCTL_MIN_PTR; 1096 } else { 1097 /* fetch user-space pointer */ 1098 data = *(void **)data; 1099 } 1100 #ifdef COMPAT_FREEBSD32 1101 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 1102 /* try the compat IOCTL handler first */ 1103 if (fop->compat_ioctl != NULL) { 1104 error = -OPW(fp, td, fop->compat_ioctl(filp, 1105 cmd, (u_long)data)); 1106 } else { 1107 error = ENOTTY; 1108 } 1109 1110 /* fallback to the regular IOCTL handler, if any */ 1111 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 1112 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1113 cmd, (u_long)data)); 1114 } 1115 } else 1116 #endif 1117 { 1118 if (fop->unlocked_ioctl != NULL) { 1119 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1120 cmd, (u_long)data)); 1121 } else { 1122 error = ENOTTY; 1123 } 1124 } 1125 if (size > 0) { 1126 task->bsd_ioctl_data = NULL; 1127 task->bsd_ioctl_len = 0; 1128 } 1129 1130 if (error == EWOULDBLOCK) { 1131 /* update kqfilter status, if any */ 1132 linux_file_kqfilter_poll(filp, 1133 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1134 } else { 1135 error = linux_get_error(task, error); 1136 } 1137 return (error); 1138 } 1139 1140 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1141 1142 /* 1143 * This function atomically updates the poll wakeup state and returns 1144 * the previous state at the time of update. 1145 */ 1146 static uint8_t 1147 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1148 { 1149 int c, old; 1150 1151 c = v->counter; 1152 1153 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1154 c = old; 1155 1156 return (c); 1157 } 1158 1159 static int 1160 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1161 { 1162 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1163 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1164 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1165 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1166 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1167 }; 1168 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1169 1170 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1171 case LINUX_FWQ_STATE_QUEUED: 1172 linux_poll_wakeup(filp); 1173 return (1); 1174 default: 1175 return (0); 1176 } 1177 } 1178 1179 void 1180 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1181 { 1182 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1183 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1184 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1185 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1186 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1187 }; 1188 1189 /* check if we are called inside the select system call */ 1190 if (p == LINUX_POLL_TABLE_NORMAL) 1191 selrecord(curthread, &filp->f_selinfo); 1192 1193 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1194 case LINUX_FWQ_STATE_INIT: 1195 /* NOTE: file handles can only belong to one wait-queue */ 1196 filp->f_wait_queue.wqh = wqh; 1197 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1198 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1199 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1200 break; 1201 default: 1202 break; 1203 } 1204 } 1205 1206 static void 1207 linux_poll_wait_dequeue(struct linux_file *filp) 1208 { 1209 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1210 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1211 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1212 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1213 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1214 }; 1215 1216 seldrain(&filp->f_selinfo); 1217 1218 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1219 case LINUX_FWQ_STATE_NOT_READY: 1220 case LINUX_FWQ_STATE_QUEUED: 1221 case LINUX_FWQ_STATE_READY: 1222 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1223 break; 1224 default: 1225 break; 1226 } 1227 } 1228 1229 void 1230 linux_poll_wakeup(struct linux_file *filp) 1231 { 1232 /* this function should be NULL-safe */ 1233 if (filp == NULL) 1234 return; 1235 1236 selwakeup(&filp->f_selinfo); 1237 1238 spin_lock(&filp->f_kqlock); 1239 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1240 LINUX_KQ_FLAG_NEED_WRITE; 1241 1242 /* make sure the "knote" gets woken up */ 1243 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1244 spin_unlock(&filp->f_kqlock); 1245 } 1246 1247 static void 1248 linux_file_kqfilter_detach(struct knote *kn) 1249 { 1250 struct linux_file *filp = kn->kn_hook; 1251 1252 spin_lock(&filp->f_kqlock); 1253 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1254 spin_unlock(&filp->f_kqlock); 1255 } 1256 1257 static int 1258 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1259 { 1260 struct linux_file *filp = kn->kn_hook; 1261 1262 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1263 1264 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1265 } 1266 1267 static int 1268 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1269 { 1270 struct linux_file *filp = kn->kn_hook; 1271 1272 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1273 1274 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1275 } 1276 1277 static struct filterops linux_dev_kqfiltops_read = { 1278 .f_isfd = 1, 1279 .f_detach = linux_file_kqfilter_detach, 1280 .f_event = linux_file_kqfilter_read_event, 1281 }; 1282 1283 static struct filterops linux_dev_kqfiltops_write = { 1284 .f_isfd = 1, 1285 .f_detach = linux_file_kqfilter_detach, 1286 .f_event = linux_file_kqfilter_write_event, 1287 }; 1288 1289 static void 1290 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1291 { 1292 struct thread *td; 1293 const struct file_operations *fop; 1294 struct linux_cdev *ldev; 1295 int temp; 1296 1297 if ((filp->f_kqflags & kqflags) == 0) 1298 return; 1299 1300 td = curthread; 1301 1302 linux_get_fop(filp, &fop, &ldev); 1303 /* get the latest polling state */ 1304 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1305 linux_drop_fop(ldev); 1306 1307 spin_lock(&filp->f_kqlock); 1308 /* clear kqflags */ 1309 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1310 LINUX_KQ_FLAG_NEED_WRITE); 1311 /* update kqflags */ 1312 if ((temp & (POLLIN | POLLOUT)) != 0) { 1313 if ((temp & POLLIN) != 0) 1314 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1315 if ((temp & POLLOUT) != 0) 1316 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1317 1318 /* make sure the "knote" gets woken up */ 1319 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1320 } 1321 spin_unlock(&filp->f_kqlock); 1322 } 1323 1324 static int 1325 linux_file_kqfilter(struct file *file, struct knote *kn) 1326 { 1327 struct linux_file *filp; 1328 struct thread *td; 1329 int error; 1330 1331 td = curthread; 1332 filp = (struct linux_file *)file->f_data; 1333 filp->f_flags = file->f_flag; 1334 if (filp->f_op->poll == NULL) 1335 return (EINVAL); 1336 1337 spin_lock(&filp->f_kqlock); 1338 switch (kn->kn_filter) { 1339 case EVFILT_READ: 1340 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1341 kn->kn_fop = &linux_dev_kqfiltops_read; 1342 kn->kn_hook = filp; 1343 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1344 error = 0; 1345 break; 1346 case EVFILT_WRITE: 1347 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1348 kn->kn_fop = &linux_dev_kqfiltops_write; 1349 kn->kn_hook = filp; 1350 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1351 error = 0; 1352 break; 1353 default: 1354 error = EINVAL; 1355 break; 1356 } 1357 spin_unlock(&filp->f_kqlock); 1358 1359 if (error == 0) { 1360 linux_set_current(td); 1361 1362 /* update kqfilter status, if any */ 1363 linux_file_kqfilter_poll(filp, 1364 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1365 } 1366 return (error); 1367 } 1368 1369 static int 1370 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1371 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1372 int nprot, bool is_shared, struct thread *td) 1373 { 1374 struct task_struct *task; 1375 struct vm_area_struct *vmap; 1376 struct mm_struct *mm; 1377 struct linux_file *filp; 1378 vm_memattr_t attr; 1379 int error; 1380 1381 filp = (struct linux_file *)fp->f_data; 1382 filp->f_flags = fp->f_flag; 1383 1384 if (fop->mmap == NULL) 1385 return (EOPNOTSUPP); 1386 1387 linux_set_current(td); 1388 1389 /* 1390 * The same VM object might be shared by multiple processes 1391 * and the mm_struct is usually freed when a process exits. 1392 * 1393 * The atomic reference below makes sure the mm_struct is 1394 * available as long as the vmap is in the linux_vma_head. 1395 */ 1396 task = current; 1397 mm = task->mm; 1398 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1399 return (EINVAL); 1400 1401 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1402 vmap->vm_start = 0; 1403 vmap->vm_end = size; 1404 vmap->vm_pgoff = *offset / PAGE_SIZE; 1405 vmap->vm_pfn = 0; 1406 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1407 if (is_shared) 1408 vmap->vm_flags |= VM_SHARED; 1409 vmap->vm_ops = NULL; 1410 vmap->vm_file = get_file(filp); 1411 vmap->vm_mm = mm; 1412 1413 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1414 error = linux_get_error(task, EINTR); 1415 } else { 1416 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1417 error = linux_get_error(task, error); 1418 up_write(&vmap->vm_mm->mmap_sem); 1419 } 1420 1421 if (error != 0) { 1422 linux_cdev_handle_free(vmap); 1423 return (error); 1424 } 1425 1426 attr = pgprot2cachemode(vmap->vm_page_prot); 1427 1428 if (vmap->vm_ops != NULL) { 1429 struct vm_area_struct *ptr; 1430 void *vm_private_data; 1431 bool vm_no_fault; 1432 1433 if (vmap->vm_ops->open == NULL || 1434 vmap->vm_ops->close == NULL || 1435 vmap->vm_private_data == NULL) { 1436 /* free allocated VM area struct */ 1437 linux_cdev_handle_free(vmap); 1438 return (EINVAL); 1439 } 1440 1441 vm_private_data = vmap->vm_private_data; 1442 1443 rw_wlock(&linux_vma_lock); 1444 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1445 if (ptr->vm_private_data == vm_private_data) 1446 break; 1447 } 1448 /* check if there is an existing VM area struct */ 1449 if (ptr != NULL) { 1450 /* check if the VM area structure is invalid */ 1451 if (ptr->vm_ops == NULL || 1452 ptr->vm_ops->open == NULL || 1453 ptr->vm_ops->close == NULL) { 1454 error = ESTALE; 1455 vm_no_fault = 1; 1456 } else { 1457 error = EEXIST; 1458 vm_no_fault = (ptr->vm_ops->fault == NULL); 1459 } 1460 } else { 1461 /* insert VM area structure into list */ 1462 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1463 error = 0; 1464 vm_no_fault = (vmap->vm_ops->fault == NULL); 1465 } 1466 rw_wunlock(&linux_vma_lock); 1467 1468 if (error != 0) { 1469 /* free allocated VM area struct */ 1470 linux_cdev_handle_free(vmap); 1471 /* check for stale VM area struct */ 1472 if (error != EEXIST) 1473 return (error); 1474 } 1475 1476 /* check if there is no fault handler */ 1477 if (vm_no_fault) { 1478 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1479 &linux_cdev_pager_ops[1], size, nprot, *offset, 1480 td->td_ucred); 1481 } else { 1482 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1483 &linux_cdev_pager_ops[0], size, nprot, *offset, 1484 td->td_ucred); 1485 } 1486 1487 /* check if allocating the VM object failed */ 1488 if (*object == NULL) { 1489 if (error == 0) { 1490 /* remove VM area struct from list */ 1491 linux_cdev_handle_remove(vmap); 1492 /* free allocated VM area struct */ 1493 linux_cdev_handle_free(vmap); 1494 } 1495 return (EINVAL); 1496 } 1497 } else { 1498 struct sglist *sg; 1499 1500 sg = sglist_alloc(1, M_WAITOK); 1501 sglist_append_phys(sg, 1502 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1503 1504 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1505 nprot, 0, td->td_ucred); 1506 1507 linux_cdev_handle_free(vmap); 1508 1509 if (*object == NULL) { 1510 sglist_free(sg); 1511 return (EINVAL); 1512 } 1513 } 1514 1515 if (attr != VM_MEMATTR_DEFAULT) { 1516 VM_OBJECT_WLOCK(*object); 1517 vm_object_set_memattr(*object, attr); 1518 VM_OBJECT_WUNLOCK(*object); 1519 } 1520 *offset = 0; 1521 return (0); 1522 } 1523 1524 struct cdevsw linuxcdevsw = { 1525 .d_version = D_VERSION, 1526 .d_fdopen = linux_dev_fdopen, 1527 .d_name = "lkpidev", 1528 }; 1529 1530 static int 1531 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1532 int flags, struct thread *td) 1533 { 1534 struct linux_file *filp; 1535 const struct file_operations *fop; 1536 struct linux_cdev *ldev; 1537 ssize_t bytes; 1538 int error; 1539 1540 error = 0; 1541 filp = (struct linux_file *)file->f_data; 1542 filp->f_flags = file->f_flag; 1543 /* XXX no support for I/O vectors currently */ 1544 if (uio->uio_iovcnt != 1) 1545 return (EOPNOTSUPP); 1546 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1547 return (EINVAL); 1548 linux_set_current(td); 1549 linux_get_fop(filp, &fop, &ldev); 1550 if (fop->read != NULL) { 1551 bytes = OPW(file, td, fop->read(filp, 1552 uio->uio_iov->iov_base, 1553 uio->uio_iov->iov_len, &uio->uio_offset)); 1554 if (bytes >= 0) { 1555 uio->uio_iov->iov_base = 1556 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1557 uio->uio_iov->iov_len -= bytes; 1558 uio->uio_resid -= bytes; 1559 } else { 1560 error = linux_get_error(current, -bytes); 1561 } 1562 } else 1563 error = ENXIO; 1564 1565 /* update kqfilter status, if any */ 1566 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1567 linux_drop_fop(ldev); 1568 1569 return (error); 1570 } 1571 1572 static int 1573 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1574 int flags, struct thread *td) 1575 { 1576 struct linux_file *filp; 1577 const struct file_operations *fop; 1578 struct linux_cdev *ldev; 1579 ssize_t bytes; 1580 int error; 1581 1582 filp = (struct linux_file *)file->f_data; 1583 filp->f_flags = file->f_flag; 1584 /* XXX no support for I/O vectors currently */ 1585 if (uio->uio_iovcnt != 1) 1586 return (EOPNOTSUPP); 1587 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1588 return (EINVAL); 1589 linux_set_current(td); 1590 linux_get_fop(filp, &fop, &ldev); 1591 if (fop->write != NULL) { 1592 bytes = OPW(file, td, fop->write(filp, 1593 uio->uio_iov->iov_base, 1594 uio->uio_iov->iov_len, &uio->uio_offset)); 1595 if (bytes >= 0) { 1596 uio->uio_iov->iov_base = 1597 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1598 uio->uio_iov->iov_len -= bytes; 1599 uio->uio_resid -= bytes; 1600 error = 0; 1601 } else { 1602 error = linux_get_error(current, -bytes); 1603 } 1604 } else 1605 error = ENXIO; 1606 1607 /* update kqfilter status, if any */ 1608 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1609 1610 linux_drop_fop(ldev); 1611 1612 return (error); 1613 } 1614 1615 static int 1616 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1617 struct thread *td) 1618 { 1619 struct linux_file *filp; 1620 const struct file_operations *fop; 1621 struct linux_cdev *ldev; 1622 int revents; 1623 1624 filp = (struct linux_file *)file->f_data; 1625 filp->f_flags = file->f_flag; 1626 linux_set_current(td); 1627 linux_get_fop(filp, &fop, &ldev); 1628 if (fop->poll != NULL) { 1629 revents = OPW(file, td, fop->poll(filp, 1630 LINUX_POLL_TABLE_NORMAL)) & events; 1631 } else { 1632 revents = 0; 1633 } 1634 linux_drop_fop(ldev); 1635 return (revents); 1636 } 1637 1638 static int 1639 linux_file_close(struct file *file, struct thread *td) 1640 { 1641 struct linux_file *filp; 1642 int (*release)(struct inode *, struct linux_file *); 1643 const struct file_operations *fop; 1644 struct linux_cdev *ldev; 1645 int error; 1646 1647 filp = (struct linux_file *)file->f_data; 1648 1649 KASSERT(file_count(filp) == 0, 1650 ("File refcount(%d) is not zero", file_count(filp))); 1651 1652 if (td == NULL) 1653 td = curthread; 1654 1655 error = 0; 1656 filp->f_flags = file->f_flag; 1657 linux_set_current(td); 1658 linux_poll_wait_dequeue(filp); 1659 linux_get_fop(filp, &fop, &ldev); 1660 /* 1661 * Always use the real release function, if any, to avoid 1662 * leaking device resources: 1663 */ 1664 release = filp->f_op->release; 1665 if (release != NULL) 1666 error = -OPW(file, td, release(filp->f_vnode, filp)); 1667 funsetown(&filp->f_sigio); 1668 if (filp->f_vnode != NULL) 1669 vdrop(filp->f_vnode); 1670 linux_drop_fop(ldev); 1671 ldev = filp->f_cdev; 1672 if (ldev != NULL) 1673 linux_cdev_deref(ldev); 1674 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1675 kfree(filp); 1676 1677 return (error); 1678 } 1679 1680 static int 1681 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1682 struct thread *td) 1683 { 1684 struct linux_file *filp; 1685 const struct file_operations *fop; 1686 struct linux_cdev *ldev; 1687 struct fiodgname_arg *fgn; 1688 const char *p; 1689 int error, i; 1690 1691 error = 0; 1692 filp = (struct linux_file *)fp->f_data; 1693 filp->f_flags = fp->f_flag; 1694 linux_get_fop(filp, &fop, &ldev); 1695 1696 linux_set_current(td); 1697 switch (cmd) { 1698 case FIONBIO: 1699 break; 1700 case FIOASYNC: 1701 if (fop->fasync == NULL) 1702 break; 1703 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1704 break; 1705 case FIOSETOWN: 1706 error = fsetown(*(int *)data, &filp->f_sigio); 1707 if (error == 0) { 1708 if (fop->fasync == NULL) 1709 break; 1710 error = -OPW(fp, td, fop->fasync(0, filp, 1711 fp->f_flag & FASYNC)); 1712 } 1713 break; 1714 case FIOGETOWN: 1715 *(int *)data = fgetown(&filp->f_sigio); 1716 break; 1717 case FIODGNAME: 1718 #ifdef COMPAT_FREEBSD32 1719 case FIODGNAME_32: 1720 #endif 1721 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1722 error = ENXIO; 1723 break; 1724 } 1725 fgn = data; 1726 p = devtoname(filp->f_cdev->cdev); 1727 i = strlen(p) + 1; 1728 if (i > fgn->len) { 1729 error = EINVAL; 1730 break; 1731 } 1732 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1733 break; 1734 default: 1735 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1736 break; 1737 } 1738 linux_drop_fop(ldev); 1739 return (error); 1740 } 1741 1742 static int 1743 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1744 vm_prot_t maxprot, int flags, struct file *fp, 1745 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1746 { 1747 /* 1748 * Character devices do not provide private mappings 1749 * of any kind: 1750 */ 1751 if ((maxprot & VM_PROT_WRITE) == 0 && 1752 (prot & VM_PROT_WRITE) != 0) 1753 return (EACCES); 1754 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1755 return (EINVAL); 1756 1757 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1758 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1759 } 1760 1761 static int 1762 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1763 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1764 struct thread *td) 1765 { 1766 struct linux_file *filp; 1767 const struct file_operations *fop; 1768 struct linux_cdev *ldev; 1769 struct mount *mp; 1770 struct vnode *vp; 1771 vm_object_t object; 1772 vm_prot_t maxprot; 1773 int error; 1774 1775 filp = (struct linux_file *)fp->f_data; 1776 1777 vp = filp->f_vnode; 1778 if (vp == NULL) 1779 return (EOPNOTSUPP); 1780 1781 /* 1782 * Ensure that file and memory protections are 1783 * compatible. 1784 */ 1785 mp = vp->v_mount; 1786 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1787 maxprot = VM_PROT_NONE; 1788 if ((prot & VM_PROT_EXECUTE) != 0) 1789 return (EACCES); 1790 } else 1791 maxprot = VM_PROT_EXECUTE; 1792 if ((fp->f_flag & FREAD) != 0) 1793 maxprot |= VM_PROT_READ; 1794 else if ((prot & VM_PROT_READ) != 0) 1795 return (EACCES); 1796 1797 /* 1798 * If we are sharing potential changes via MAP_SHARED and we 1799 * are trying to get write permission although we opened it 1800 * without asking for it, bail out. 1801 * 1802 * Note that most character devices always share mappings. 1803 * 1804 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1805 * requests rather than doing it here. 1806 */ 1807 if ((flags & MAP_SHARED) != 0) { 1808 if ((fp->f_flag & FWRITE) != 0) 1809 maxprot |= VM_PROT_WRITE; 1810 else if ((prot & VM_PROT_WRITE) != 0) 1811 return (EACCES); 1812 } 1813 maxprot &= cap_maxprot; 1814 1815 linux_get_fop(filp, &fop, &ldev); 1816 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1817 &foff, fop, &object); 1818 if (error != 0) 1819 goto out; 1820 1821 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1822 foff, FALSE, td); 1823 if (error != 0) 1824 vm_object_deallocate(object); 1825 out: 1826 linux_drop_fop(ldev); 1827 return (error); 1828 } 1829 1830 static int 1831 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1832 { 1833 struct linux_file *filp; 1834 struct vnode *vp; 1835 int error; 1836 1837 filp = (struct linux_file *)fp->f_data; 1838 if (filp->f_vnode == NULL) 1839 return (EOPNOTSUPP); 1840 1841 vp = filp->f_vnode; 1842 1843 vn_lock(vp, LK_SHARED | LK_RETRY); 1844 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1845 VOP_UNLOCK(vp); 1846 1847 return (error); 1848 } 1849 1850 static int 1851 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1852 struct filedesc *fdp) 1853 { 1854 struct linux_file *filp; 1855 struct vnode *vp; 1856 int error; 1857 1858 filp = fp->f_data; 1859 vp = filp->f_vnode; 1860 if (vp == NULL) { 1861 error = 0; 1862 kif->kf_type = KF_TYPE_DEV; 1863 } else { 1864 vref(vp); 1865 FILEDESC_SUNLOCK(fdp); 1866 error = vn_fill_kinfo_vnode(vp, kif); 1867 vrele(vp); 1868 kif->kf_type = KF_TYPE_VNODE; 1869 FILEDESC_SLOCK(fdp); 1870 } 1871 return (error); 1872 } 1873 1874 unsigned int 1875 linux_iminor(struct inode *inode) 1876 { 1877 struct linux_cdev *ldev; 1878 1879 if (inode == NULL || inode->v_rdev == NULL || 1880 inode->v_rdev->si_devsw != &linuxcdevsw) 1881 return (-1U); 1882 ldev = inode->v_rdev->si_drv1; 1883 if (ldev == NULL) 1884 return (-1U); 1885 1886 return (minor(ldev->dev)); 1887 } 1888 1889 struct fileops linuxfileops = { 1890 .fo_read = linux_file_read, 1891 .fo_write = linux_file_write, 1892 .fo_truncate = invfo_truncate, 1893 .fo_kqfilter = linux_file_kqfilter, 1894 .fo_stat = linux_file_stat, 1895 .fo_fill_kinfo = linux_file_fill_kinfo, 1896 .fo_poll = linux_file_poll, 1897 .fo_close = linux_file_close, 1898 .fo_ioctl = linux_file_ioctl, 1899 .fo_mmap = linux_file_mmap, 1900 .fo_chmod = invfo_chmod, 1901 .fo_chown = invfo_chown, 1902 .fo_sendfile = invfo_sendfile, 1903 .fo_flags = DFLAG_PASSABLE, 1904 }; 1905 1906 /* 1907 * Hash of vmmap addresses. This is infrequently accessed and does not 1908 * need to be particularly large. This is done because we must store the 1909 * caller's idea of the map size to properly unmap. 1910 */ 1911 struct vmmap { 1912 LIST_ENTRY(vmmap) vm_next; 1913 void *vm_addr; 1914 unsigned long vm_size; 1915 }; 1916 1917 struct vmmaphd { 1918 struct vmmap *lh_first; 1919 }; 1920 #define VMMAP_HASH_SIZE 64 1921 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1922 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1923 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1924 static struct mtx vmmaplock; 1925 1926 static void 1927 vmmap_add(void *addr, unsigned long size) 1928 { 1929 struct vmmap *vmmap; 1930 1931 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1932 mtx_lock(&vmmaplock); 1933 vmmap->vm_size = size; 1934 vmmap->vm_addr = addr; 1935 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1936 mtx_unlock(&vmmaplock); 1937 } 1938 1939 static struct vmmap * 1940 vmmap_remove(void *addr) 1941 { 1942 struct vmmap *vmmap; 1943 1944 mtx_lock(&vmmaplock); 1945 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1946 if (vmmap->vm_addr == addr) 1947 break; 1948 if (vmmap) 1949 LIST_REMOVE(vmmap, vm_next); 1950 mtx_unlock(&vmmaplock); 1951 1952 return (vmmap); 1953 } 1954 1955 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1956 void * 1957 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1958 { 1959 void *addr; 1960 1961 addr = pmap_mapdev_attr(phys_addr, size, attr); 1962 if (addr == NULL) 1963 return (NULL); 1964 vmmap_add(addr, size); 1965 1966 return (addr); 1967 } 1968 #endif 1969 1970 void 1971 iounmap(void *addr) 1972 { 1973 struct vmmap *vmmap; 1974 1975 vmmap = vmmap_remove(addr); 1976 if (vmmap == NULL) 1977 return; 1978 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1979 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1980 #endif 1981 kfree(vmmap); 1982 } 1983 1984 void * 1985 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1986 { 1987 vm_offset_t off; 1988 size_t size; 1989 1990 size = count * PAGE_SIZE; 1991 off = kva_alloc(size); 1992 if (off == 0) 1993 return (NULL); 1994 vmmap_add((void *)off, size); 1995 pmap_qenter(off, pages, count); 1996 1997 return ((void *)off); 1998 } 1999 2000 void 2001 vunmap(void *addr) 2002 { 2003 struct vmmap *vmmap; 2004 2005 vmmap = vmmap_remove(addr); 2006 if (vmmap == NULL) 2007 return; 2008 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 2009 kva_free((vm_offset_t)addr, vmmap->vm_size); 2010 kfree(vmmap); 2011 } 2012 2013 static char * 2014 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 2015 { 2016 unsigned int len; 2017 char *p; 2018 va_list aq; 2019 2020 va_copy(aq, ap); 2021 len = vsnprintf(NULL, 0, fmt, aq); 2022 va_end(aq); 2023 2024 if (dev != NULL) 2025 p = devm_kmalloc(dev, len + 1, gfp); 2026 else 2027 p = kmalloc(len + 1, gfp); 2028 if (p != NULL) 2029 vsnprintf(p, len + 1, fmt, ap); 2030 2031 return (p); 2032 } 2033 2034 char * 2035 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 2036 { 2037 2038 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 2039 } 2040 2041 char * 2042 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 2043 { 2044 va_list ap; 2045 char *p; 2046 2047 va_start(ap, fmt); 2048 p = devm_kvasprintf(dev, gfp, fmt, ap); 2049 va_end(ap); 2050 2051 return (p); 2052 } 2053 2054 char * 2055 kasprintf(gfp_t gfp, const char *fmt, ...) 2056 { 2057 va_list ap; 2058 char *p; 2059 2060 va_start(ap, fmt); 2061 p = kvasprintf(gfp, fmt, ap); 2062 va_end(ap); 2063 2064 return (p); 2065 } 2066 2067 static void 2068 linux_timer_callback_wrapper(void *context) 2069 { 2070 struct timer_list *timer; 2071 2072 timer = context; 2073 2074 if (linux_set_current_flags(curthread, M_NOWAIT)) { 2075 /* try again later */ 2076 callout_reset(&timer->callout, 1, 2077 &linux_timer_callback_wrapper, timer); 2078 return; 2079 } 2080 2081 timer->function(timer->data); 2082 } 2083 2084 int 2085 mod_timer(struct timer_list *timer, int expires) 2086 { 2087 int ret; 2088 2089 timer->expires = expires; 2090 ret = callout_reset(&timer->callout, 2091 linux_timer_jiffies_until(expires), 2092 &linux_timer_callback_wrapper, timer); 2093 2094 MPASS(ret == 0 || ret == 1); 2095 2096 return (ret == 1); 2097 } 2098 2099 void 2100 add_timer(struct timer_list *timer) 2101 { 2102 2103 callout_reset(&timer->callout, 2104 linux_timer_jiffies_until(timer->expires), 2105 &linux_timer_callback_wrapper, timer); 2106 } 2107 2108 void 2109 add_timer_on(struct timer_list *timer, int cpu) 2110 { 2111 2112 callout_reset_on(&timer->callout, 2113 linux_timer_jiffies_until(timer->expires), 2114 &linux_timer_callback_wrapper, timer, cpu); 2115 } 2116 2117 int 2118 del_timer(struct timer_list *timer) 2119 { 2120 2121 if (callout_stop(&(timer)->callout) == -1) 2122 return (0); 2123 return (1); 2124 } 2125 2126 int 2127 del_timer_sync(struct timer_list *timer) 2128 { 2129 2130 if (callout_drain(&(timer)->callout) == -1) 2131 return (0); 2132 return (1); 2133 } 2134 2135 /* greatest common divisor, Euclid equation */ 2136 static uint64_t 2137 lkpi_gcd_64(uint64_t a, uint64_t b) 2138 { 2139 uint64_t an; 2140 uint64_t bn; 2141 2142 while (b != 0) { 2143 an = b; 2144 bn = a % b; 2145 a = an; 2146 b = bn; 2147 } 2148 return (a); 2149 } 2150 2151 uint64_t lkpi_nsec2hz_rem; 2152 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2153 uint64_t lkpi_nsec2hz_max; 2154 2155 uint64_t lkpi_usec2hz_rem; 2156 uint64_t lkpi_usec2hz_div = 1000000ULL; 2157 uint64_t lkpi_usec2hz_max; 2158 2159 uint64_t lkpi_msec2hz_rem; 2160 uint64_t lkpi_msec2hz_div = 1000ULL; 2161 uint64_t lkpi_msec2hz_max; 2162 2163 static void 2164 linux_timer_init(void *arg) 2165 { 2166 uint64_t gcd; 2167 2168 /* 2169 * Compute an internal HZ value which can divide 2**32 to 2170 * avoid timer rounding problems when the tick value wraps 2171 * around 2**32: 2172 */ 2173 linux_timer_hz_mask = 1; 2174 while (linux_timer_hz_mask < (unsigned long)hz) 2175 linux_timer_hz_mask *= 2; 2176 linux_timer_hz_mask--; 2177 2178 /* compute some internal constants */ 2179 2180 lkpi_nsec2hz_rem = hz; 2181 lkpi_usec2hz_rem = hz; 2182 lkpi_msec2hz_rem = hz; 2183 2184 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2185 lkpi_nsec2hz_rem /= gcd; 2186 lkpi_nsec2hz_div /= gcd; 2187 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2188 2189 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2190 lkpi_usec2hz_rem /= gcd; 2191 lkpi_usec2hz_div /= gcd; 2192 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2193 2194 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2195 lkpi_msec2hz_rem /= gcd; 2196 lkpi_msec2hz_div /= gcd; 2197 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2198 } 2199 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2200 2201 void 2202 linux_complete_common(struct completion *c, int all) 2203 { 2204 int wakeup_swapper; 2205 2206 sleepq_lock(c); 2207 if (all) { 2208 c->done = UINT_MAX; 2209 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2210 } else { 2211 if (c->done != UINT_MAX) 2212 c->done++; 2213 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2214 } 2215 sleepq_release(c); 2216 if (wakeup_swapper) 2217 kick_proc0(); 2218 } 2219 2220 /* 2221 * Indefinite wait for done != 0 with or without signals. 2222 */ 2223 int 2224 linux_wait_for_common(struct completion *c, int flags) 2225 { 2226 struct task_struct *task; 2227 int error; 2228 2229 if (SCHEDULER_STOPPED()) 2230 return (0); 2231 2232 task = current; 2233 2234 if (flags != 0) 2235 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2236 else 2237 flags = SLEEPQ_SLEEP; 2238 error = 0; 2239 for (;;) { 2240 sleepq_lock(c); 2241 if (c->done) 2242 break; 2243 sleepq_add(c, NULL, "completion", flags, 0); 2244 if (flags & SLEEPQ_INTERRUPTIBLE) { 2245 DROP_GIANT(); 2246 error = -sleepq_wait_sig(c, 0); 2247 PICKUP_GIANT(); 2248 if (error != 0) { 2249 linux_schedule_save_interrupt_value(task, error); 2250 error = -ERESTARTSYS; 2251 goto intr; 2252 } 2253 } else { 2254 DROP_GIANT(); 2255 sleepq_wait(c, 0); 2256 PICKUP_GIANT(); 2257 } 2258 } 2259 if (c->done != UINT_MAX) 2260 c->done--; 2261 sleepq_release(c); 2262 2263 intr: 2264 return (error); 2265 } 2266 2267 /* 2268 * Time limited wait for done != 0 with or without signals. 2269 */ 2270 int 2271 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2272 { 2273 struct task_struct *task; 2274 int end = jiffies + timeout; 2275 int error; 2276 2277 if (SCHEDULER_STOPPED()) 2278 return (0); 2279 2280 task = current; 2281 2282 if (flags != 0) 2283 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2284 else 2285 flags = SLEEPQ_SLEEP; 2286 2287 for (;;) { 2288 sleepq_lock(c); 2289 if (c->done) 2290 break; 2291 sleepq_add(c, NULL, "completion", flags, 0); 2292 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2293 2294 DROP_GIANT(); 2295 if (flags & SLEEPQ_INTERRUPTIBLE) 2296 error = -sleepq_timedwait_sig(c, 0); 2297 else 2298 error = -sleepq_timedwait(c, 0); 2299 PICKUP_GIANT(); 2300 2301 if (error != 0) { 2302 /* check for timeout */ 2303 if (error == -EWOULDBLOCK) { 2304 error = 0; /* timeout */ 2305 } else { 2306 /* signal happened */ 2307 linux_schedule_save_interrupt_value(task, error); 2308 error = -ERESTARTSYS; 2309 } 2310 goto done; 2311 } 2312 } 2313 if (c->done != UINT_MAX) 2314 c->done--; 2315 sleepq_release(c); 2316 2317 /* return how many jiffies are left */ 2318 error = linux_timer_jiffies_until(end); 2319 done: 2320 return (error); 2321 } 2322 2323 int 2324 linux_try_wait_for_completion(struct completion *c) 2325 { 2326 int isdone; 2327 2328 sleepq_lock(c); 2329 isdone = (c->done != 0); 2330 if (c->done != 0 && c->done != UINT_MAX) 2331 c->done--; 2332 sleepq_release(c); 2333 return (isdone); 2334 } 2335 2336 int 2337 linux_completion_done(struct completion *c) 2338 { 2339 int isdone; 2340 2341 sleepq_lock(c); 2342 isdone = (c->done != 0); 2343 sleepq_release(c); 2344 return (isdone); 2345 } 2346 2347 static void 2348 linux_cdev_deref(struct linux_cdev *ldev) 2349 { 2350 if (refcount_release(&ldev->refs) && 2351 ldev->kobj.ktype == &linux_cdev_ktype) 2352 kfree(ldev); 2353 } 2354 2355 static void 2356 linux_cdev_release(struct kobject *kobj) 2357 { 2358 struct linux_cdev *cdev; 2359 struct kobject *parent; 2360 2361 cdev = container_of(kobj, struct linux_cdev, kobj); 2362 parent = kobj->parent; 2363 linux_destroy_dev(cdev); 2364 linux_cdev_deref(cdev); 2365 kobject_put(parent); 2366 } 2367 2368 static void 2369 linux_cdev_static_release(struct kobject *kobj) 2370 { 2371 struct cdev *cdev; 2372 struct linux_cdev *ldev; 2373 2374 ldev = container_of(kobj, struct linux_cdev, kobj); 2375 cdev = ldev->cdev; 2376 if (cdev != NULL) { 2377 destroy_dev(cdev); 2378 ldev->cdev = NULL; 2379 } 2380 kobject_put(kobj->parent); 2381 } 2382 2383 int 2384 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2385 { 2386 int ret; 2387 2388 if (dev->devt != 0) { 2389 /* Set parent kernel object. */ 2390 ldev->kobj.parent = &dev->kobj; 2391 2392 /* 2393 * Unlike Linux we require the kobject of the 2394 * character device structure to have a valid name 2395 * before calling this function: 2396 */ 2397 if (ldev->kobj.name == NULL) 2398 return (-EINVAL); 2399 2400 ret = cdev_add(ldev, dev->devt, 1); 2401 if (ret) 2402 return (ret); 2403 } 2404 ret = device_add(dev); 2405 if (ret != 0 && dev->devt != 0) 2406 cdev_del(ldev); 2407 return (ret); 2408 } 2409 2410 void 2411 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2412 { 2413 device_del(dev); 2414 2415 if (dev->devt != 0) 2416 cdev_del(ldev); 2417 } 2418 2419 static void 2420 linux_destroy_dev(struct linux_cdev *ldev) 2421 { 2422 2423 if (ldev->cdev == NULL) 2424 return; 2425 2426 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2427 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2428 2429 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2430 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2431 pause("ldevdtr", hz / 4); 2432 2433 destroy_dev(ldev->cdev); 2434 ldev->cdev = NULL; 2435 } 2436 2437 const struct kobj_type linux_cdev_ktype = { 2438 .release = linux_cdev_release, 2439 }; 2440 2441 const struct kobj_type linux_cdev_static_ktype = { 2442 .release = linux_cdev_static_release, 2443 }; 2444 2445 static void 2446 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2447 { 2448 struct notifier_block *nb; 2449 struct netdev_notifier_info ni; 2450 2451 nb = arg; 2452 ni.ifp = ifp; 2453 ni.dev = (struct net_device *)ifp; 2454 if (linkstate == LINK_STATE_UP) 2455 nb->notifier_call(nb, NETDEV_UP, &ni); 2456 else 2457 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2458 } 2459 2460 static void 2461 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2462 { 2463 struct notifier_block *nb; 2464 struct netdev_notifier_info ni; 2465 2466 nb = arg; 2467 ni.ifp = ifp; 2468 ni.dev = (struct net_device *)ifp; 2469 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2470 } 2471 2472 static void 2473 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2474 { 2475 struct notifier_block *nb; 2476 struct netdev_notifier_info ni; 2477 2478 nb = arg; 2479 ni.ifp = ifp; 2480 ni.dev = (struct net_device *)ifp; 2481 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2482 } 2483 2484 static void 2485 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2486 { 2487 struct notifier_block *nb; 2488 struct netdev_notifier_info ni; 2489 2490 nb = arg; 2491 ni.ifp = ifp; 2492 ni.dev = (struct net_device *)ifp; 2493 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2494 } 2495 2496 static void 2497 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2498 { 2499 struct notifier_block *nb; 2500 struct netdev_notifier_info ni; 2501 2502 nb = arg; 2503 ni.ifp = ifp; 2504 ni.dev = (struct net_device *)ifp; 2505 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2506 } 2507 2508 int 2509 register_netdevice_notifier(struct notifier_block *nb) 2510 { 2511 2512 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2513 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2514 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2515 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2516 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2517 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2518 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2519 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2520 2521 return (0); 2522 } 2523 2524 int 2525 register_inetaddr_notifier(struct notifier_block *nb) 2526 { 2527 2528 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2529 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2530 return (0); 2531 } 2532 2533 int 2534 unregister_netdevice_notifier(struct notifier_block *nb) 2535 { 2536 2537 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2538 nb->tags[NETDEV_UP]); 2539 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2540 nb->tags[NETDEV_REGISTER]); 2541 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2542 nb->tags[NETDEV_UNREGISTER]); 2543 EVENTHANDLER_DEREGISTER(iflladdr_event, 2544 nb->tags[NETDEV_CHANGEADDR]); 2545 2546 return (0); 2547 } 2548 2549 int 2550 unregister_inetaddr_notifier(struct notifier_block *nb) 2551 { 2552 2553 EVENTHANDLER_DEREGISTER(ifaddr_event, 2554 nb->tags[NETDEV_CHANGEIFADDR]); 2555 2556 return (0); 2557 } 2558 2559 struct list_sort_thunk { 2560 int (*cmp)(void *, struct list_head *, struct list_head *); 2561 void *priv; 2562 }; 2563 2564 static inline int 2565 linux_le_cmp(void *priv, const void *d1, const void *d2) 2566 { 2567 struct list_head *le1, *le2; 2568 struct list_sort_thunk *thunk; 2569 2570 thunk = priv; 2571 le1 = *(__DECONST(struct list_head **, d1)); 2572 le2 = *(__DECONST(struct list_head **, d2)); 2573 return ((thunk->cmp)(thunk->priv, le1, le2)); 2574 } 2575 2576 void 2577 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2578 struct list_head *a, struct list_head *b)) 2579 { 2580 struct list_sort_thunk thunk; 2581 struct list_head **ar, *le; 2582 size_t count, i; 2583 2584 count = 0; 2585 list_for_each(le, head) 2586 count++; 2587 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2588 i = 0; 2589 list_for_each(le, head) 2590 ar[i++] = le; 2591 thunk.cmp = cmp; 2592 thunk.priv = priv; 2593 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2594 INIT_LIST_HEAD(head); 2595 for (i = 0; i < count; i++) 2596 list_add_tail(ar[i], head); 2597 free(ar, M_KMALLOC); 2598 } 2599 2600 #if defined(__i386__) || defined(__amd64__) 2601 int 2602 linux_wbinvd_on_all_cpus(void) 2603 { 2604 2605 pmap_invalidate_cache(); 2606 return (0); 2607 } 2608 #endif 2609 2610 int 2611 linux_on_each_cpu(void callback(void *), void *data) 2612 { 2613 2614 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2615 smp_no_rendezvous_barrier, data); 2616 return (0); 2617 } 2618 2619 int 2620 linux_in_atomic(void) 2621 { 2622 2623 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2624 } 2625 2626 struct linux_cdev * 2627 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2628 { 2629 dev_t dev = MKDEV(major, minor); 2630 struct cdev *cdev; 2631 2632 dev_lock(); 2633 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2634 struct linux_cdev *ldev = cdev->si_drv1; 2635 if (ldev->dev == dev && 2636 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2637 break; 2638 } 2639 } 2640 dev_unlock(); 2641 2642 return (cdev != NULL ? cdev->si_drv1 : NULL); 2643 } 2644 2645 int 2646 __register_chrdev(unsigned int major, unsigned int baseminor, 2647 unsigned int count, const char *name, 2648 const struct file_operations *fops) 2649 { 2650 struct linux_cdev *cdev; 2651 int ret = 0; 2652 int i; 2653 2654 for (i = baseminor; i < baseminor + count; i++) { 2655 cdev = cdev_alloc(); 2656 cdev->ops = fops; 2657 kobject_set_name(&cdev->kobj, name); 2658 2659 ret = cdev_add(cdev, makedev(major, i), 1); 2660 if (ret != 0) 2661 break; 2662 } 2663 return (ret); 2664 } 2665 2666 int 2667 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2668 unsigned int count, const char *name, 2669 const struct file_operations *fops, uid_t uid, 2670 gid_t gid, int mode) 2671 { 2672 struct linux_cdev *cdev; 2673 int ret = 0; 2674 int i; 2675 2676 for (i = baseminor; i < baseminor + count; i++) { 2677 cdev = cdev_alloc(); 2678 cdev->ops = fops; 2679 kobject_set_name(&cdev->kobj, name); 2680 2681 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2682 if (ret != 0) 2683 break; 2684 } 2685 return (ret); 2686 } 2687 2688 void 2689 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2690 unsigned int count, const char *name) 2691 { 2692 struct linux_cdev *cdevp; 2693 int i; 2694 2695 for (i = baseminor; i < baseminor + count; i++) { 2696 cdevp = linux_find_cdev(name, major, i); 2697 if (cdevp != NULL) 2698 cdev_del(cdevp); 2699 } 2700 } 2701 2702 void 2703 linux_dump_stack(void) 2704 { 2705 #ifdef STACK 2706 struct stack st; 2707 2708 stack_save(&st); 2709 stack_print(&st); 2710 #endif 2711 } 2712 2713 int 2714 linuxkpi_net_ratelimit(void) 2715 { 2716 2717 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2718 lkpi_net_maxpps)); 2719 } 2720 2721 struct io_mapping * 2722 io_mapping_create_wc(resource_size_t base, unsigned long size) 2723 { 2724 struct io_mapping *mapping; 2725 2726 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2727 if (mapping == NULL) 2728 return (NULL); 2729 return (io_mapping_init_wc(mapping, base, size)); 2730 } 2731 2732 #if defined(__i386__) || defined(__amd64__) 2733 bool linux_cpu_has_clflush; 2734 struct cpuinfo_x86 boot_cpu_data; 2735 #endif 2736 2737 static void 2738 linux_compat_init(void *arg) 2739 { 2740 struct sysctl_oid *rootoid; 2741 int i; 2742 2743 #if defined(__i386__) || defined(__amd64__) 2744 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2745 boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; 2746 boot_cpu_data.x86 = ((cpu_id & 0xf0000) >> 12) | ((cpu_id & 0xf0) >> 4); 2747 #endif 2748 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2749 2750 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2751 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2752 kobject_init(&linux_class_root, &linux_class_ktype); 2753 kobject_set_name(&linux_class_root, "class"); 2754 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2755 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2756 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2757 kobject_set_name(&linux_root_device.kobj, "device"); 2758 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2759 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2760 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2761 linux_root_device.bsddev = root_bus; 2762 linux_class_misc.name = "misc"; 2763 class_register(&linux_class_misc); 2764 INIT_LIST_HEAD(&pci_drivers); 2765 INIT_LIST_HEAD(&pci_devices); 2766 spin_lock_init(&pci_lock); 2767 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2768 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2769 LIST_INIT(&vmmaphead[i]); 2770 init_waitqueue_head(&linux_bit_waitq); 2771 init_waitqueue_head(&linux_var_waitq); 2772 2773 CPU_COPY(&all_cpus, &cpu_online_mask); 2774 } 2775 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2776 2777 static void 2778 linux_compat_uninit(void *arg) 2779 { 2780 linux_kobject_kfree_name(&linux_class_root); 2781 linux_kobject_kfree_name(&linux_root_device.kobj); 2782 linux_kobject_kfree_name(&linux_class_misc.kobj); 2783 2784 mtx_destroy(&vmmaplock); 2785 spin_lock_destroy(&pci_lock); 2786 rw_destroy(&linux_vma_lock); 2787 } 2788 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2789 2790 /* 2791 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2792 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2793 * used. Assert these types have the same size, else some parts of the 2794 * LinuxKPI may not work like expected: 2795 */ 2796 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2797