1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "opt_stack.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/proc.h> 39 #include <sys/sglist.h> 40 #include <sys/sleepqueue.h> 41 #include <sys/refcount.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/bus.h> 45 #include <sys/eventhandler.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/filio.h> 49 #include <sys/rwlock.h> 50 #include <sys/mman.h> 51 #include <sys/stack.h> 52 #include <sys/sysent.h> 53 #include <sys/time.h> 54 #include <sys/user.h> 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 62 #include <machine/stdarg.h> 63 64 #if defined(__i386__) || defined(__amd64__) 65 #include <machine/md_var.h> 66 #endif 67 68 #include <linux/kobject.h> 69 #include <linux/cpu.h> 70 #include <linux/device.h> 71 #include <linux/slab.h> 72 #include <linux/module.h> 73 #include <linux/moduleparam.h> 74 #include <linux/cdev.h> 75 #include <linux/file.h> 76 #include <linux/sysfs.h> 77 #include <linux/mm.h> 78 #include <linux/io.h> 79 #include <linux/vmalloc.h> 80 #include <linux/netdevice.h> 81 #include <linux/timer.h> 82 #include <linux/interrupt.h> 83 #include <linux/uaccess.h> 84 #include <linux/utsname.h> 85 #include <linux/list.h> 86 #include <linux/kthread.h> 87 #include <linux/kernel.h> 88 #include <linux/compat.h> 89 #include <linux/io-mapping.h> 90 #include <linux/poll.h> 91 #include <linux/smp.h> 92 #include <linux/wait_bit.h> 93 #include <linux/rcupdate.h> 94 #include <linux/interval_tree.h> 95 #include <linux/interval_tree_generic.h> 96 97 #if defined(__i386__) || defined(__amd64__) 98 #include <asm/smp.h> 99 #include <asm/processor.h> 100 #endif 101 102 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 103 "LinuxKPI parameters"); 104 105 int linuxkpi_debug; 106 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 107 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 108 109 int linuxkpi_warn_dump_stack = 0; 110 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 111 &linuxkpi_warn_dump_stack, 0, 112 "Set to enable stack traces from WARN_ON(). Clear to disable."); 113 114 static struct timeval lkpi_net_lastlog; 115 static int lkpi_net_curpps; 116 static int lkpi_net_maxpps = 99; 117 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 118 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 119 120 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 121 122 #include <linux/rbtree.h> 123 /* Undo Linux compat changes. */ 124 #undef RB_ROOT 125 #undef file 126 #undef cdev 127 #define RB_ROOT(head) (head)->rbh_root 128 129 static void linux_destroy_dev(struct linux_cdev *); 130 static void linux_cdev_deref(struct linux_cdev *ldev); 131 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 132 133 cpumask_t cpu_online_mask; 134 static cpumask_t static_single_cpu_mask[MAXCPU]; 135 struct kobject linux_class_root; 136 struct device linux_root_device; 137 struct class linux_class_misc; 138 struct list_head pci_drivers; 139 struct list_head pci_devices; 140 spinlock_t pci_lock; 141 struct uts_namespace init_uts_ns; 142 143 unsigned long linux_timer_hz_mask; 144 145 wait_queue_head_t linux_bit_waitq; 146 wait_queue_head_t linux_var_waitq; 147 148 int 149 panic_cmp(struct rb_node *one, struct rb_node *two) 150 { 151 panic("no cmp"); 152 } 153 154 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 155 156 #define START(node) ((node)->start) 157 #define LAST(node) ((node)->last) 158 159 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 160 LAST,, lkpi_interval_tree) 161 162 struct kobject * 163 kobject_create(void) 164 { 165 struct kobject *kobj; 166 167 kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); 168 if (kobj == NULL) 169 return (NULL); 170 kobject_init(kobj, &linux_kfree_type); 171 172 return (kobj); 173 } 174 175 176 int 177 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 178 { 179 va_list tmp_va; 180 int len; 181 char *old; 182 char *name; 183 char dummy; 184 185 old = kobj->name; 186 187 if (old && fmt == NULL) 188 return (0); 189 190 /* compute length of string */ 191 va_copy(tmp_va, args); 192 len = vsnprintf(&dummy, 0, fmt, tmp_va); 193 va_end(tmp_va); 194 195 /* account for zero termination */ 196 len++; 197 198 /* check for error */ 199 if (len < 1) 200 return (-EINVAL); 201 202 /* allocate memory for string */ 203 name = kzalloc(len, GFP_KERNEL); 204 if (name == NULL) 205 return (-ENOMEM); 206 vsnprintf(name, len, fmt, args); 207 kobj->name = name; 208 209 /* free old string */ 210 kfree(old); 211 212 /* filter new string */ 213 for (; *name != '\0'; name++) 214 if (*name == '/') 215 *name = '!'; 216 return (0); 217 } 218 219 int 220 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 221 { 222 va_list args; 223 int error; 224 225 va_start(args, fmt); 226 error = kobject_set_name_vargs(kobj, fmt, args); 227 va_end(args); 228 229 return (error); 230 } 231 232 static int 233 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 234 { 235 const struct kobj_type *t; 236 int error; 237 238 kobj->parent = parent; 239 error = sysfs_create_dir(kobj); 240 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 241 struct attribute **attr; 242 t = kobj->ktype; 243 244 for (attr = t->default_attrs; *attr != NULL; attr++) { 245 error = sysfs_create_file(kobj, *attr); 246 if (error) 247 break; 248 } 249 if (error) 250 sysfs_remove_dir(kobj); 251 } 252 return (error); 253 } 254 255 int 256 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 257 { 258 va_list args; 259 int error; 260 261 va_start(args, fmt); 262 error = kobject_set_name_vargs(kobj, fmt, args); 263 va_end(args); 264 if (error) 265 return (error); 266 267 return kobject_add_complete(kobj, parent); 268 } 269 270 void 271 linux_kobject_release(struct kref *kref) 272 { 273 struct kobject *kobj; 274 char *name; 275 276 kobj = container_of(kref, struct kobject, kref); 277 sysfs_remove_dir(kobj); 278 name = kobj->name; 279 if (kobj->ktype && kobj->ktype->release) 280 kobj->ktype->release(kobj); 281 kfree(name); 282 } 283 284 static void 285 linux_kobject_kfree(struct kobject *kobj) 286 { 287 kfree(kobj); 288 } 289 290 static void 291 linux_kobject_kfree_name(struct kobject *kobj) 292 { 293 if (kobj) { 294 kfree(kobj->name); 295 } 296 } 297 298 const struct kobj_type linux_kfree_type = { 299 .release = linux_kobject_kfree 300 }; 301 302 static ssize_t 303 lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) 304 { 305 struct kobj_attribute *ka = 306 container_of(attr, struct kobj_attribute, attr); 307 308 if (ka->show == NULL) 309 return (-EIO); 310 311 return (ka->show(kobj, ka, buf)); 312 } 313 314 static ssize_t 315 lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr, 316 const char *buf, size_t count) 317 { 318 struct kobj_attribute *ka = 319 container_of(attr, struct kobj_attribute, attr); 320 321 if (ka->store == NULL) 322 return (-EIO); 323 324 return (ka->store(kobj, ka, buf, count)); 325 } 326 327 const struct sysfs_ops kobj_sysfs_ops = { 328 .show = lkpi_kobj_attr_show, 329 .store = lkpi_kobj_attr_store, 330 }; 331 332 static void 333 linux_device_release(struct device *dev) 334 { 335 pr_debug("linux_device_release: %s\n", dev_name(dev)); 336 kfree(dev); 337 } 338 339 static ssize_t 340 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 341 { 342 struct class_attribute *dattr; 343 ssize_t error; 344 345 dattr = container_of(attr, struct class_attribute, attr); 346 error = -EIO; 347 if (dattr->show) 348 error = dattr->show(container_of(kobj, struct class, kobj), 349 dattr, buf); 350 return (error); 351 } 352 353 static ssize_t 354 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 355 size_t count) 356 { 357 struct class_attribute *dattr; 358 ssize_t error; 359 360 dattr = container_of(attr, struct class_attribute, attr); 361 error = -EIO; 362 if (dattr->store) 363 error = dattr->store(container_of(kobj, struct class, kobj), 364 dattr, buf, count); 365 return (error); 366 } 367 368 static void 369 linux_class_release(struct kobject *kobj) 370 { 371 struct class *class; 372 373 class = container_of(kobj, struct class, kobj); 374 if (class->class_release) 375 class->class_release(class); 376 } 377 378 static const struct sysfs_ops linux_class_sysfs = { 379 .show = linux_class_show, 380 .store = linux_class_store, 381 }; 382 383 const struct kobj_type linux_class_ktype = { 384 .release = linux_class_release, 385 .sysfs_ops = &linux_class_sysfs 386 }; 387 388 static void 389 linux_dev_release(struct kobject *kobj) 390 { 391 struct device *dev; 392 393 dev = container_of(kobj, struct device, kobj); 394 /* This is the precedence defined by linux. */ 395 if (dev->release) 396 dev->release(dev); 397 else if (dev->class && dev->class->dev_release) 398 dev->class->dev_release(dev); 399 } 400 401 static ssize_t 402 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 403 { 404 struct device_attribute *dattr; 405 ssize_t error; 406 407 dattr = container_of(attr, struct device_attribute, attr); 408 error = -EIO; 409 if (dattr->show) 410 error = dattr->show(container_of(kobj, struct device, kobj), 411 dattr, buf); 412 return (error); 413 } 414 415 static ssize_t 416 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 417 size_t count) 418 { 419 struct device_attribute *dattr; 420 ssize_t error; 421 422 dattr = container_of(attr, struct device_attribute, attr); 423 error = -EIO; 424 if (dattr->store) 425 error = dattr->store(container_of(kobj, struct device, kobj), 426 dattr, buf, count); 427 return (error); 428 } 429 430 static const struct sysfs_ops linux_dev_sysfs = { 431 .show = linux_dev_show, 432 .store = linux_dev_store, 433 }; 434 435 const struct kobj_type linux_dev_ktype = { 436 .release = linux_dev_release, 437 .sysfs_ops = &linux_dev_sysfs 438 }; 439 440 struct device * 441 device_create(struct class *class, struct device *parent, dev_t devt, 442 void *drvdata, const char *fmt, ...) 443 { 444 struct device *dev; 445 va_list args; 446 447 dev = kzalloc(sizeof(*dev), M_WAITOK); 448 dev->parent = parent; 449 dev->class = class; 450 dev->devt = devt; 451 dev->driver_data = drvdata; 452 dev->release = linux_device_release; 453 va_start(args, fmt); 454 kobject_set_name_vargs(&dev->kobj, fmt, args); 455 va_end(args); 456 device_register(dev); 457 458 return (dev); 459 } 460 461 struct device * 462 device_create_groups_vargs(struct class *class, struct device *parent, 463 dev_t devt, void *drvdata, const struct attribute_group **groups, 464 const char *fmt, va_list args) 465 { 466 struct device *dev = NULL; 467 int retval = -ENODEV; 468 469 if (class == NULL || IS_ERR(class)) 470 goto error; 471 472 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 473 if (!dev) { 474 retval = -ENOMEM; 475 goto error; 476 } 477 478 dev->devt = devt; 479 dev->class = class; 480 dev->parent = parent; 481 dev->groups = groups; 482 dev->release = device_create_release; 483 /* device_initialize() needs the class and parent to be set */ 484 device_initialize(dev); 485 dev_set_drvdata(dev, drvdata); 486 487 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 488 if (retval) 489 goto error; 490 491 retval = device_add(dev); 492 if (retval) 493 goto error; 494 495 return dev; 496 497 error: 498 put_device(dev); 499 return ERR_PTR(retval); 500 } 501 502 struct class * 503 class_create(struct module *owner, const char *name) 504 { 505 struct class *class; 506 int error; 507 508 class = kzalloc(sizeof(*class), M_WAITOK); 509 class->owner = owner; 510 class->name = name; 511 class->class_release = linux_class_kfree; 512 error = class_register(class); 513 if (error) { 514 kfree(class); 515 return (NULL); 516 } 517 518 return (class); 519 } 520 521 int 522 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 523 struct kobject *parent, const char *fmt, ...) 524 { 525 va_list args; 526 int error; 527 528 kobject_init(kobj, ktype); 529 kobj->ktype = ktype; 530 kobj->parent = parent; 531 kobj->name = NULL; 532 533 va_start(args, fmt); 534 error = kobject_set_name_vargs(kobj, fmt, args); 535 va_end(args); 536 if (error) 537 return (error); 538 return kobject_add_complete(kobj, parent); 539 } 540 541 static void 542 linux_kq_lock(void *arg) 543 { 544 spinlock_t *s = arg; 545 546 spin_lock(s); 547 } 548 static void 549 linux_kq_unlock(void *arg) 550 { 551 spinlock_t *s = arg; 552 553 spin_unlock(s); 554 } 555 556 static void 557 linux_kq_assert_lock(void *arg, int what) 558 { 559 #ifdef INVARIANTS 560 spinlock_t *s = arg; 561 562 if (what == LA_LOCKED) 563 mtx_assert(&s->m, MA_OWNED); 564 else 565 mtx_assert(&s->m, MA_NOTOWNED); 566 #endif 567 } 568 569 static void 570 linux_file_kqfilter_poll(struct linux_file *, int); 571 572 struct linux_file * 573 linux_file_alloc(void) 574 { 575 struct linux_file *filp; 576 577 filp = kzalloc(sizeof(*filp), M_WAITOK); 578 579 /* set initial refcount */ 580 filp->f_count = 1; 581 582 /* setup fields needed by kqueue support */ 583 spin_lock_init(&filp->f_kqlock); 584 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 585 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 586 587 return (filp); 588 } 589 590 void 591 linux_file_free(struct linux_file *filp) 592 { 593 if (filp->_file == NULL) { 594 if (filp->f_op != NULL && filp->f_op->release != NULL) 595 filp->f_op->release(filp->f_vnode, filp); 596 if (filp->f_shmem != NULL) 597 vm_object_deallocate(filp->f_shmem); 598 kfree_rcu(filp, rcu); 599 } else { 600 /* 601 * The close method of the character device or file 602 * will free the linux_file structure: 603 */ 604 _fdrop(filp->_file, curthread); 605 } 606 } 607 608 struct linux_cdev * 609 cdev_alloc(void) 610 { 611 struct linux_cdev *cdev; 612 613 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); 614 kobject_init(&cdev->kobj, &linux_cdev_ktype); 615 cdev->refs = 1; 616 return (cdev); 617 } 618 619 static int 620 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 621 vm_page_t *mres) 622 { 623 struct vm_area_struct *vmap; 624 625 vmap = linux_cdev_handle_find(vm_obj->handle); 626 627 MPASS(vmap != NULL); 628 MPASS(vmap->vm_private_data == vm_obj->handle); 629 630 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 631 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 632 vm_page_t page; 633 634 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 635 /* 636 * If the passed in result page is a fake 637 * page, update it with the new physical 638 * address. 639 */ 640 page = *mres; 641 vm_page_updatefake(page, paddr, vm_obj->memattr); 642 } else { 643 /* 644 * Replace the passed in "mres" page with our 645 * own fake page and free up the all of the 646 * original pages. 647 */ 648 VM_OBJECT_WUNLOCK(vm_obj); 649 page = vm_page_getfake(paddr, vm_obj->memattr); 650 VM_OBJECT_WLOCK(vm_obj); 651 652 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 653 *mres = page; 654 } 655 vm_page_valid(page); 656 return (VM_PAGER_OK); 657 } 658 return (VM_PAGER_FAIL); 659 } 660 661 static int 662 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 663 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 664 { 665 struct vm_area_struct *vmap; 666 int err; 667 668 /* get VM area structure */ 669 vmap = linux_cdev_handle_find(vm_obj->handle); 670 MPASS(vmap != NULL); 671 MPASS(vmap->vm_private_data == vm_obj->handle); 672 673 VM_OBJECT_WUNLOCK(vm_obj); 674 675 linux_set_current(curthread); 676 677 down_write(&vmap->vm_mm->mmap_sem); 678 if (unlikely(vmap->vm_ops == NULL)) { 679 err = VM_FAULT_SIGBUS; 680 } else { 681 struct vm_fault vmf; 682 683 /* fill out VM fault structure */ 684 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 685 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 686 vmf.pgoff = 0; 687 vmf.page = NULL; 688 vmf.vma = vmap; 689 690 vmap->vm_pfn_count = 0; 691 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 692 vmap->vm_obj = vm_obj; 693 694 err = vmap->vm_ops->fault(&vmf); 695 696 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 697 kern_yield(PRI_USER); 698 err = vmap->vm_ops->fault(&vmf); 699 } 700 } 701 702 /* translate return code */ 703 switch (err) { 704 case VM_FAULT_OOM: 705 err = VM_PAGER_AGAIN; 706 break; 707 case VM_FAULT_SIGBUS: 708 err = VM_PAGER_BAD; 709 break; 710 case VM_FAULT_NOPAGE: 711 /* 712 * By contract the fault handler will return having 713 * busied all the pages itself. If pidx is already 714 * found in the object, it will simply xbusy the first 715 * page and return with vm_pfn_count set to 1. 716 */ 717 *first = vmap->vm_pfn_first; 718 *last = *first + vmap->vm_pfn_count - 1; 719 err = VM_PAGER_OK; 720 break; 721 default: 722 err = VM_PAGER_ERROR; 723 break; 724 } 725 up_write(&vmap->vm_mm->mmap_sem); 726 VM_OBJECT_WLOCK(vm_obj); 727 return (err); 728 } 729 730 static struct rwlock linux_vma_lock; 731 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 732 TAILQ_HEAD_INITIALIZER(linux_vma_head); 733 734 static void 735 linux_cdev_handle_free(struct vm_area_struct *vmap) 736 { 737 /* Drop reference on vm_file */ 738 if (vmap->vm_file != NULL) 739 fput(vmap->vm_file); 740 741 /* Drop reference on mm_struct */ 742 mmput(vmap->vm_mm); 743 744 kfree(vmap); 745 } 746 747 static void 748 linux_cdev_handle_remove(struct vm_area_struct *vmap) 749 { 750 rw_wlock(&linux_vma_lock); 751 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 752 rw_wunlock(&linux_vma_lock); 753 } 754 755 static struct vm_area_struct * 756 linux_cdev_handle_find(void *handle) 757 { 758 struct vm_area_struct *vmap; 759 760 rw_rlock(&linux_vma_lock); 761 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 762 if (vmap->vm_private_data == handle) 763 break; 764 } 765 rw_runlock(&linux_vma_lock); 766 return (vmap); 767 } 768 769 static int 770 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 771 vm_ooffset_t foff, struct ucred *cred, u_short *color) 772 { 773 774 MPASS(linux_cdev_handle_find(handle) != NULL); 775 *color = 0; 776 return (0); 777 } 778 779 static void 780 linux_cdev_pager_dtor(void *handle) 781 { 782 const struct vm_operations_struct *vm_ops; 783 struct vm_area_struct *vmap; 784 785 vmap = linux_cdev_handle_find(handle); 786 MPASS(vmap != NULL); 787 788 /* 789 * Remove handle before calling close operation to prevent 790 * other threads from reusing the handle pointer. 791 */ 792 linux_cdev_handle_remove(vmap); 793 794 down_write(&vmap->vm_mm->mmap_sem); 795 vm_ops = vmap->vm_ops; 796 if (likely(vm_ops != NULL)) 797 vm_ops->close(vmap); 798 up_write(&vmap->vm_mm->mmap_sem); 799 800 linux_cdev_handle_free(vmap); 801 } 802 803 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 804 { 805 /* OBJT_MGTDEVICE */ 806 .cdev_pg_populate = linux_cdev_pager_populate, 807 .cdev_pg_ctor = linux_cdev_pager_ctor, 808 .cdev_pg_dtor = linux_cdev_pager_dtor 809 }, 810 { 811 /* OBJT_DEVICE */ 812 .cdev_pg_fault = linux_cdev_pager_fault, 813 .cdev_pg_ctor = linux_cdev_pager_ctor, 814 .cdev_pg_dtor = linux_cdev_pager_dtor 815 }, 816 }; 817 818 int 819 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 820 unsigned long size) 821 { 822 vm_object_t obj; 823 vm_page_t m; 824 825 obj = vma->vm_obj; 826 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 827 return (-ENOTSUP); 828 VM_OBJECT_RLOCK(obj); 829 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 830 m != NULL && m->pindex < OFF_TO_IDX(address + size); 831 m = TAILQ_NEXT(m, listq)) 832 pmap_remove_all(m); 833 VM_OBJECT_RUNLOCK(obj); 834 return (0); 835 } 836 837 void 838 vma_set_file(struct vm_area_struct *vma, struct linux_file *file) 839 { 840 struct linux_file *tmp; 841 842 /* Changing an anonymous vma with this is illegal */ 843 get_file(file); 844 tmp = vma->vm_file; 845 vma->vm_file = file; 846 fput(tmp); 847 } 848 849 static struct file_operations dummy_ldev_ops = { 850 /* XXXKIB */ 851 }; 852 853 static struct linux_cdev dummy_ldev = { 854 .ops = &dummy_ldev_ops, 855 }; 856 857 #define LDEV_SI_DTR 0x0001 858 #define LDEV_SI_REF 0x0002 859 860 static void 861 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 862 struct linux_cdev **dev) 863 { 864 struct linux_cdev *ldev; 865 u_int siref; 866 867 ldev = filp->f_cdev; 868 *fop = filp->f_op; 869 if (ldev != NULL) { 870 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 871 refcount_acquire(&ldev->refs); 872 } else { 873 for (siref = ldev->siref;;) { 874 if ((siref & LDEV_SI_DTR) != 0) { 875 ldev = &dummy_ldev; 876 *fop = ldev->ops; 877 siref = ldev->siref; 878 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 879 } else if (atomic_fcmpset_int(&ldev->siref, 880 &siref, siref + LDEV_SI_REF)) { 881 break; 882 } 883 } 884 } 885 } 886 *dev = ldev; 887 } 888 889 static void 890 linux_drop_fop(struct linux_cdev *ldev) 891 { 892 893 if (ldev == NULL) 894 return; 895 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 896 linux_cdev_deref(ldev); 897 } else { 898 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 899 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 900 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 901 } 902 } 903 904 #define OPW(fp,td,code) ({ \ 905 struct file *__fpop; \ 906 __typeof(code) __retval; \ 907 \ 908 __fpop = (td)->td_fpop; \ 909 (td)->td_fpop = (fp); \ 910 __retval = (code); \ 911 (td)->td_fpop = __fpop; \ 912 __retval; \ 913 }) 914 915 static int 916 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 917 struct file *file) 918 { 919 struct linux_cdev *ldev; 920 struct linux_file *filp; 921 const struct file_operations *fop; 922 int error; 923 924 ldev = dev->si_drv1; 925 926 filp = linux_file_alloc(); 927 filp->f_dentry = &filp->f_dentry_store; 928 filp->f_op = ldev->ops; 929 filp->f_mode = file->f_flag; 930 filp->f_flags = file->f_flag; 931 filp->f_vnode = file->f_vnode; 932 filp->_file = file; 933 refcount_acquire(&ldev->refs); 934 filp->f_cdev = ldev; 935 936 linux_set_current(td); 937 linux_get_fop(filp, &fop, &ldev); 938 939 if (fop->open != NULL) { 940 error = -fop->open(file->f_vnode, filp); 941 if (error != 0) { 942 linux_drop_fop(ldev); 943 linux_cdev_deref(filp->f_cdev); 944 kfree(filp); 945 return (error); 946 } 947 } 948 949 /* hold on to the vnode - used for fstat() */ 950 vhold(filp->f_vnode); 951 952 /* release the file from devfs */ 953 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 954 linux_drop_fop(ldev); 955 return (ENXIO); 956 } 957 958 #define LINUX_IOCTL_MIN_PTR 0x10000UL 959 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 960 961 static inline int 962 linux_remap_address(void **uaddr, size_t len) 963 { 964 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 965 966 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 967 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 968 struct task_struct *pts = current; 969 if (pts == NULL) { 970 *uaddr = NULL; 971 return (1); 972 } 973 974 /* compute data offset */ 975 uaddr_val -= LINUX_IOCTL_MIN_PTR; 976 977 /* check that length is within bounds */ 978 if ((len > IOCPARM_MAX) || 979 (uaddr_val + len) > pts->bsd_ioctl_len) { 980 *uaddr = NULL; 981 return (1); 982 } 983 984 /* re-add kernel buffer address */ 985 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 986 987 /* update address location */ 988 *uaddr = (void *)uaddr_val; 989 return (1); 990 } 991 return (0); 992 } 993 994 int 995 linux_copyin(const void *uaddr, void *kaddr, size_t len) 996 { 997 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 998 if (uaddr == NULL) 999 return (-EFAULT); 1000 memcpy(kaddr, uaddr, len); 1001 return (0); 1002 } 1003 return (-copyin(uaddr, kaddr, len)); 1004 } 1005 1006 int 1007 linux_copyout(const void *kaddr, void *uaddr, size_t len) 1008 { 1009 if (linux_remap_address(&uaddr, len)) { 1010 if (uaddr == NULL) 1011 return (-EFAULT); 1012 memcpy(uaddr, kaddr, len); 1013 return (0); 1014 } 1015 return (-copyout(kaddr, uaddr, len)); 1016 } 1017 1018 size_t 1019 linux_clear_user(void *_uaddr, size_t _len) 1020 { 1021 uint8_t *uaddr = _uaddr; 1022 size_t len = _len; 1023 1024 /* make sure uaddr is aligned before going into the fast loop */ 1025 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 1026 if (subyte(uaddr, 0)) 1027 return (_len); 1028 uaddr++; 1029 len--; 1030 } 1031 1032 /* zero 8 bytes at a time */ 1033 while (len > 7) { 1034 #ifdef __LP64__ 1035 if (suword64(uaddr, 0)) 1036 return (_len); 1037 #else 1038 if (suword32(uaddr, 0)) 1039 return (_len); 1040 if (suword32(uaddr + 4, 0)) 1041 return (_len); 1042 #endif 1043 uaddr += 8; 1044 len -= 8; 1045 } 1046 1047 /* zero fill end, if any */ 1048 while (len > 0) { 1049 if (subyte(uaddr, 0)) 1050 return (_len); 1051 uaddr++; 1052 len--; 1053 } 1054 return (0); 1055 } 1056 1057 int 1058 linux_access_ok(const void *uaddr, size_t len) 1059 { 1060 uintptr_t saddr; 1061 uintptr_t eaddr; 1062 1063 /* get start and end address */ 1064 saddr = (uintptr_t)uaddr; 1065 eaddr = (uintptr_t)uaddr + len; 1066 1067 /* verify addresses are valid for userspace */ 1068 return ((saddr == eaddr) || 1069 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 1070 } 1071 1072 /* 1073 * This function should return either EINTR or ERESTART depending on 1074 * the signal type sent to this thread: 1075 */ 1076 static int 1077 linux_get_error(struct task_struct *task, int error) 1078 { 1079 /* check for signal type interrupt code */ 1080 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 1081 error = -linux_schedule_get_interrupt_value(task); 1082 if (error == 0) 1083 error = EINTR; 1084 } 1085 return (error); 1086 } 1087 1088 static int 1089 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 1090 const struct file_operations *fop, u_long cmd, caddr_t data, 1091 struct thread *td) 1092 { 1093 struct task_struct *task = current; 1094 unsigned size; 1095 int error; 1096 1097 size = IOCPARM_LEN(cmd); 1098 /* refer to logic in sys_ioctl() */ 1099 if (size > 0) { 1100 /* 1101 * Setup hint for linux_copyin() and linux_copyout(). 1102 * 1103 * Background: Linux code expects a user-space address 1104 * while FreeBSD supplies a kernel-space address. 1105 */ 1106 task->bsd_ioctl_data = data; 1107 task->bsd_ioctl_len = size; 1108 data = (void *)LINUX_IOCTL_MIN_PTR; 1109 } else { 1110 /* fetch user-space pointer */ 1111 data = *(void **)data; 1112 } 1113 #ifdef COMPAT_FREEBSD32 1114 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 1115 /* try the compat IOCTL handler first */ 1116 if (fop->compat_ioctl != NULL) { 1117 error = -OPW(fp, td, fop->compat_ioctl(filp, 1118 cmd, (u_long)data)); 1119 } else { 1120 error = ENOTTY; 1121 } 1122 1123 /* fallback to the regular IOCTL handler, if any */ 1124 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 1125 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1126 cmd, (u_long)data)); 1127 } 1128 } else 1129 #endif 1130 { 1131 if (fop->unlocked_ioctl != NULL) { 1132 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1133 cmd, (u_long)data)); 1134 } else { 1135 error = ENOTTY; 1136 } 1137 } 1138 if (size > 0) { 1139 task->bsd_ioctl_data = NULL; 1140 task->bsd_ioctl_len = 0; 1141 } 1142 1143 if (error == EWOULDBLOCK) { 1144 /* update kqfilter status, if any */ 1145 linux_file_kqfilter_poll(filp, 1146 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1147 } else { 1148 error = linux_get_error(task, error); 1149 } 1150 return (error); 1151 } 1152 1153 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1154 1155 /* 1156 * This function atomically updates the poll wakeup state and returns 1157 * the previous state at the time of update. 1158 */ 1159 static uint8_t 1160 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1161 { 1162 int c, old; 1163 1164 c = v->counter; 1165 1166 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1167 c = old; 1168 1169 return (c); 1170 } 1171 1172 static int 1173 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1174 { 1175 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1176 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1177 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1178 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1179 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1180 }; 1181 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1182 1183 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1184 case LINUX_FWQ_STATE_QUEUED: 1185 linux_poll_wakeup(filp); 1186 return (1); 1187 default: 1188 return (0); 1189 } 1190 } 1191 1192 void 1193 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1194 { 1195 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1196 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1197 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1198 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1199 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1200 }; 1201 1202 /* check if we are called inside the select system call */ 1203 if (p == LINUX_POLL_TABLE_NORMAL) 1204 selrecord(curthread, &filp->f_selinfo); 1205 1206 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1207 case LINUX_FWQ_STATE_INIT: 1208 /* NOTE: file handles can only belong to one wait-queue */ 1209 filp->f_wait_queue.wqh = wqh; 1210 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1211 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1212 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1213 break; 1214 default: 1215 break; 1216 } 1217 } 1218 1219 static void 1220 linux_poll_wait_dequeue(struct linux_file *filp) 1221 { 1222 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1223 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1224 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1225 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1226 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1227 }; 1228 1229 seldrain(&filp->f_selinfo); 1230 1231 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1232 case LINUX_FWQ_STATE_NOT_READY: 1233 case LINUX_FWQ_STATE_QUEUED: 1234 case LINUX_FWQ_STATE_READY: 1235 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1236 break; 1237 default: 1238 break; 1239 } 1240 } 1241 1242 void 1243 linux_poll_wakeup(struct linux_file *filp) 1244 { 1245 /* this function should be NULL-safe */ 1246 if (filp == NULL) 1247 return; 1248 1249 selwakeup(&filp->f_selinfo); 1250 1251 spin_lock(&filp->f_kqlock); 1252 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1253 LINUX_KQ_FLAG_NEED_WRITE; 1254 1255 /* make sure the "knote" gets woken up */ 1256 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1257 spin_unlock(&filp->f_kqlock); 1258 } 1259 1260 static void 1261 linux_file_kqfilter_detach(struct knote *kn) 1262 { 1263 struct linux_file *filp = kn->kn_hook; 1264 1265 spin_lock(&filp->f_kqlock); 1266 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1267 spin_unlock(&filp->f_kqlock); 1268 } 1269 1270 static int 1271 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1272 { 1273 struct linux_file *filp = kn->kn_hook; 1274 1275 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1276 1277 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1278 } 1279 1280 static int 1281 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1282 { 1283 struct linux_file *filp = kn->kn_hook; 1284 1285 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1286 1287 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1288 } 1289 1290 static struct filterops linux_dev_kqfiltops_read = { 1291 .f_isfd = 1, 1292 .f_detach = linux_file_kqfilter_detach, 1293 .f_event = linux_file_kqfilter_read_event, 1294 }; 1295 1296 static struct filterops linux_dev_kqfiltops_write = { 1297 .f_isfd = 1, 1298 .f_detach = linux_file_kqfilter_detach, 1299 .f_event = linux_file_kqfilter_write_event, 1300 }; 1301 1302 static void 1303 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1304 { 1305 struct thread *td; 1306 const struct file_operations *fop; 1307 struct linux_cdev *ldev; 1308 int temp; 1309 1310 if ((filp->f_kqflags & kqflags) == 0) 1311 return; 1312 1313 td = curthread; 1314 1315 linux_get_fop(filp, &fop, &ldev); 1316 /* get the latest polling state */ 1317 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1318 linux_drop_fop(ldev); 1319 1320 spin_lock(&filp->f_kqlock); 1321 /* clear kqflags */ 1322 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1323 LINUX_KQ_FLAG_NEED_WRITE); 1324 /* update kqflags */ 1325 if ((temp & (POLLIN | POLLOUT)) != 0) { 1326 if ((temp & POLLIN) != 0) 1327 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1328 if ((temp & POLLOUT) != 0) 1329 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1330 1331 /* make sure the "knote" gets woken up */ 1332 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1333 } 1334 spin_unlock(&filp->f_kqlock); 1335 } 1336 1337 static int 1338 linux_file_kqfilter(struct file *file, struct knote *kn) 1339 { 1340 struct linux_file *filp; 1341 struct thread *td; 1342 int error; 1343 1344 td = curthread; 1345 filp = (struct linux_file *)file->f_data; 1346 filp->f_flags = file->f_flag; 1347 if (filp->f_op->poll == NULL) 1348 return (EINVAL); 1349 1350 spin_lock(&filp->f_kqlock); 1351 switch (kn->kn_filter) { 1352 case EVFILT_READ: 1353 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1354 kn->kn_fop = &linux_dev_kqfiltops_read; 1355 kn->kn_hook = filp; 1356 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1357 error = 0; 1358 break; 1359 case EVFILT_WRITE: 1360 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1361 kn->kn_fop = &linux_dev_kqfiltops_write; 1362 kn->kn_hook = filp; 1363 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1364 error = 0; 1365 break; 1366 default: 1367 error = EINVAL; 1368 break; 1369 } 1370 spin_unlock(&filp->f_kqlock); 1371 1372 if (error == 0) { 1373 linux_set_current(td); 1374 1375 /* update kqfilter status, if any */ 1376 linux_file_kqfilter_poll(filp, 1377 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1378 } 1379 return (error); 1380 } 1381 1382 static int 1383 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1384 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1385 int nprot, bool is_shared, struct thread *td) 1386 { 1387 struct task_struct *task; 1388 struct vm_area_struct *vmap; 1389 struct mm_struct *mm; 1390 struct linux_file *filp; 1391 vm_memattr_t attr; 1392 int error; 1393 1394 filp = (struct linux_file *)fp->f_data; 1395 filp->f_flags = fp->f_flag; 1396 1397 if (fop->mmap == NULL) 1398 return (EOPNOTSUPP); 1399 1400 linux_set_current(td); 1401 1402 /* 1403 * The same VM object might be shared by multiple processes 1404 * and the mm_struct is usually freed when a process exits. 1405 * 1406 * The atomic reference below makes sure the mm_struct is 1407 * available as long as the vmap is in the linux_vma_head. 1408 */ 1409 task = current; 1410 mm = task->mm; 1411 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1412 return (EINVAL); 1413 1414 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1415 if (vmap == NULL) 1416 return (ENOMEM); 1417 1418 vmap->vm_start = 0; 1419 vmap->vm_end = size; 1420 vmap->vm_pgoff = *offset / PAGE_SIZE; 1421 vmap->vm_pfn = 0; 1422 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1423 if (is_shared) 1424 vmap->vm_flags |= VM_SHARED; 1425 vmap->vm_ops = NULL; 1426 vmap->vm_file = get_file(filp); 1427 vmap->vm_mm = mm; 1428 1429 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1430 error = linux_get_error(task, EINTR); 1431 } else { 1432 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1433 error = linux_get_error(task, error); 1434 up_write(&vmap->vm_mm->mmap_sem); 1435 } 1436 1437 if (error != 0) { 1438 linux_cdev_handle_free(vmap); 1439 return (error); 1440 } 1441 1442 attr = pgprot2cachemode(vmap->vm_page_prot); 1443 1444 if (vmap->vm_ops != NULL) { 1445 struct vm_area_struct *ptr; 1446 void *vm_private_data; 1447 bool vm_no_fault; 1448 1449 if (vmap->vm_ops->open == NULL || 1450 vmap->vm_ops->close == NULL || 1451 vmap->vm_private_data == NULL) { 1452 /* free allocated VM area struct */ 1453 linux_cdev_handle_free(vmap); 1454 return (EINVAL); 1455 } 1456 1457 vm_private_data = vmap->vm_private_data; 1458 1459 rw_wlock(&linux_vma_lock); 1460 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1461 if (ptr->vm_private_data == vm_private_data) 1462 break; 1463 } 1464 /* check if there is an existing VM area struct */ 1465 if (ptr != NULL) { 1466 /* check if the VM area structure is invalid */ 1467 if (ptr->vm_ops == NULL || 1468 ptr->vm_ops->open == NULL || 1469 ptr->vm_ops->close == NULL) { 1470 error = ESTALE; 1471 vm_no_fault = 1; 1472 } else { 1473 error = EEXIST; 1474 vm_no_fault = (ptr->vm_ops->fault == NULL); 1475 } 1476 } else { 1477 /* insert VM area structure into list */ 1478 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1479 error = 0; 1480 vm_no_fault = (vmap->vm_ops->fault == NULL); 1481 } 1482 rw_wunlock(&linux_vma_lock); 1483 1484 if (error != 0) { 1485 /* free allocated VM area struct */ 1486 linux_cdev_handle_free(vmap); 1487 /* check for stale VM area struct */ 1488 if (error != EEXIST) 1489 return (error); 1490 } 1491 1492 /* check if there is no fault handler */ 1493 if (vm_no_fault) { 1494 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1495 &linux_cdev_pager_ops[1], size, nprot, *offset, 1496 td->td_ucred); 1497 } else { 1498 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1499 &linux_cdev_pager_ops[0], size, nprot, *offset, 1500 td->td_ucred); 1501 } 1502 1503 /* check if allocating the VM object failed */ 1504 if (*object == NULL) { 1505 if (error == 0) { 1506 /* remove VM area struct from list */ 1507 linux_cdev_handle_remove(vmap); 1508 /* free allocated VM area struct */ 1509 linux_cdev_handle_free(vmap); 1510 } 1511 return (EINVAL); 1512 } 1513 } else { 1514 struct sglist *sg; 1515 1516 sg = sglist_alloc(1, M_WAITOK); 1517 sglist_append_phys(sg, 1518 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1519 1520 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1521 nprot, 0, td->td_ucred); 1522 1523 linux_cdev_handle_free(vmap); 1524 1525 if (*object == NULL) { 1526 sglist_free(sg); 1527 return (EINVAL); 1528 } 1529 } 1530 1531 if (attr != VM_MEMATTR_DEFAULT) { 1532 VM_OBJECT_WLOCK(*object); 1533 vm_object_set_memattr(*object, attr); 1534 VM_OBJECT_WUNLOCK(*object); 1535 } 1536 *offset = 0; 1537 return (0); 1538 } 1539 1540 struct cdevsw linuxcdevsw = { 1541 .d_version = D_VERSION, 1542 .d_fdopen = linux_dev_fdopen, 1543 .d_name = "lkpidev", 1544 }; 1545 1546 static int 1547 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1548 int flags, struct thread *td) 1549 { 1550 struct linux_file *filp; 1551 const struct file_operations *fop; 1552 struct linux_cdev *ldev; 1553 ssize_t bytes; 1554 int error; 1555 1556 error = 0; 1557 filp = (struct linux_file *)file->f_data; 1558 filp->f_flags = file->f_flag; 1559 /* XXX no support for I/O vectors currently */ 1560 if (uio->uio_iovcnt != 1) 1561 return (EOPNOTSUPP); 1562 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1563 return (EINVAL); 1564 linux_set_current(td); 1565 linux_get_fop(filp, &fop, &ldev); 1566 if (fop->read != NULL) { 1567 bytes = OPW(file, td, fop->read(filp, 1568 uio->uio_iov->iov_base, 1569 uio->uio_iov->iov_len, &uio->uio_offset)); 1570 if (bytes >= 0) { 1571 uio->uio_iov->iov_base = 1572 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1573 uio->uio_iov->iov_len -= bytes; 1574 uio->uio_resid -= bytes; 1575 } else { 1576 error = linux_get_error(current, -bytes); 1577 } 1578 } else 1579 error = ENXIO; 1580 1581 /* update kqfilter status, if any */ 1582 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1583 linux_drop_fop(ldev); 1584 1585 return (error); 1586 } 1587 1588 static int 1589 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1590 int flags, struct thread *td) 1591 { 1592 struct linux_file *filp; 1593 const struct file_operations *fop; 1594 struct linux_cdev *ldev; 1595 ssize_t bytes; 1596 int error; 1597 1598 filp = (struct linux_file *)file->f_data; 1599 filp->f_flags = file->f_flag; 1600 /* XXX no support for I/O vectors currently */ 1601 if (uio->uio_iovcnt != 1) 1602 return (EOPNOTSUPP); 1603 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1604 return (EINVAL); 1605 linux_set_current(td); 1606 linux_get_fop(filp, &fop, &ldev); 1607 if (fop->write != NULL) { 1608 bytes = OPW(file, td, fop->write(filp, 1609 uio->uio_iov->iov_base, 1610 uio->uio_iov->iov_len, &uio->uio_offset)); 1611 if (bytes >= 0) { 1612 uio->uio_iov->iov_base = 1613 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1614 uio->uio_iov->iov_len -= bytes; 1615 uio->uio_resid -= bytes; 1616 error = 0; 1617 } else { 1618 error = linux_get_error(current, -bytes); 1619 } 1620 } else 1621 error = ENXIO; 1622 1623 /* update kqfilter status, if any */ 1624 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1625 1626 linux_drop_fop(ldev); 1627 1628 return (error); 1629 } 1630 1631 static int 1632 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1633 struct thread *td) 1634 { 1635 struct linux_file *filp; 1636 const struct file_operations *fop; 1637 struct linux_cdev *ldev; 1638 int revents; 1639 1640 filp = (struct linux_file *)file->f_data; 1641 filp->f_flags = file->f_flag; 1642 linux_set_current(td); 1643 linux_get_fop(filp, &fop, &ldev); 1644 if (fop->poll != NULL) { 1645 revents = OPW(file, td, fop->poll(filp, 1646 LINUX_POLL_TABLE_NORMAL)) & events; 1647 } else { 1648 revents = 0; 1649 } 1650 linux_drop_fop(ldev); 1651 return (revents); 1652 } 1653 1654 static int 1655 linux_file_close(struct file *file, struct thread *td) 1656 { 1657 struct linux_file *filp; 1658 int (*release)(struct inode *, struct linux_file *); 1659 const struct file_operations *fop; 1660 struct linux_cdev *ldev; 1661 int error; 1662 1663 filp = (struct linux_file *)file->f_data; 1664 1665 KASSERT(file_count(filp) == 0, 1666 ("File refcount(%d) is not zero", file_count(filp))); 1667 1668 if (td == NULL) 1669 td = curthread; 1670 1671 error = 0; 1672 filp->f_flags = file->f_flag; 1673 linux_set_current(td); 1674 linux_poll_wait_dequeue(filp); 1675 linux_get_fop(filp, &fop, &ldev); 1676 /* 1677 * Always use the real release function, if any, to avoid 1678 * leaking device resources: 1679 */ 1680 release = filp->f_op->release; 1681 if (release != NULL) 1682 error = -OPW(file, td, release(filp->f_vnode, filp)); 1683 funsetown(&filp->f_sigio); 1684 if (filp->f_vnode != NULL) 1685 vdrop(filp->f_vnode); 1686 linux_drop_fop(ldev); 1687 ldev = filp->f_cdev; 1688 if (ldev != NULL) 1689 linux_cdev_deref(ldev); 1690 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1691 kfree(filp); 1692 1693 return (error); 1694 } 1695 1696 static int 1697 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1698 struct thread *td) 1699 { 1700 struct linux_file *filp; 1701 const struct file_operations *fop; 1702 struct linux_cdev *ldev; 1703 struct fiodgname_arg *fgn; 1704 const char *p; 1705 int error, i; 1706 1707 error = 0; 1708 filp = (struct linux_file *)fp->f_data; 1709 filp->f_flags = fp->f_flag; 1710 linux_get_fop(filp, &fop, &ldev); 1711 1712 linux_set_current(td); 1713 switch (cmd) { 1714 case FIONBIO: 1715 break; 1716 case FIOASYNC: 1717 if (fop->fasync == NULL) 1718 break; 1719 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1720 break; 1721 case FIOSETOWN: 1722 error = fsetown(*(int *)data, &filp->f_sigio); 1723 if (error == 0) { 1724 if (fop->fasync == NULL) 1725 break; 1726 error = -OPW(fp, td, fop->fasync(0, filp, 1727 fp->f_flag & FASYNC)); 1728 } 1729 break; 1730 case FIOGETOWN: 1731 *(int *)data = fgetown(&filp->f_sigio); 1732 break; 1733 case FIODGNAME: 1734 #ifdef COMPAT_FREEBSD32 1735 case FIODGNAME_32: 1736 #endif 1737 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1738 error = ENXIO; 1739 break; 1740 } 1741 fgn = data; 1742 p = devtoname(filp->f_cdev->cdev); 1743 i = strlen(p) + 1; 1744 if (i > fgn->len) { 1745 error = EINVAL; 1746 break; 1747 } 1748 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1749 break; 1750 default: 1751 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1752 break; 1753 } 1754 linux_drop_fop(ldev); 1755 return (error); 1756 } 1757 1758 static int 1759 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1760 vm_prot_t maxprot, int flags, struct file *fp, 1761 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1762 { 1763 /* 1764 * Character devices do not provide private mappings 1765 * of any kind: 1766 */ 1767 if ((maxprot & VM_PROT_WRITE) == 0 && 1768 (prot & VM_PROT_WRITE) != 0) 1769 return (EACCES); 1770 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1771 return (EINVAL); 1772 1773 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1774 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1775 } 1776 1777 static int 1778 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1779 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1780 struct thread *td) 1781 { 1782 struct linux_file *filp; 1783 const struct file_operations *fop; 1784 struct linux_cdev *ldev; 1785 struct mount *mp; 1786 struct vnode *vp; 1787 vm_object_t object; 1788 vm_prot_t maxprot; 1789 int error; 1790 1791 filp = (struct linux_file *)fp->f_data; 1792 1793 vp = filp->f_vnode; 1794 if (vp == NULL) 1795 return (EOPNOTSUPP); 1796 1797 /* 1798 * Ensure that file and memory protections are 1799 * compatible. 1800 */ 1801 mp = vp->v_mount; 1802 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1803 maxprot = VM_PROT_NONE; 1804 if ((prot & VM_PROT_EXECUTE) != 0) 1805 return (EACCES); 1806 } else 1807 maxprot = VM_PROT_EXECUTE; 1808 if ((fp->f_flag & FREAD) != 0) 1809 maxprot |= VM_PROT_READ; 1810 else if ((prot & VM_PROT_READ) != 0) 1811 return (EACCES); 1812 1813 /* 1814 * If we are sharing potential changes via MAP_SHARED and we 1815 * are trying to get write permission although we opened it 1816 * without asking for it, bail out. 1817 * 1818 * Note that most character devices always share mappings. 1819 * 1820 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1821 * requests rather than doing it here. 1822 */ 1823 if ((flags & MAP_SHARED) != 0) { 1824 if ((fp->f_flag & FWRITE) != 0) 1825 maxprot |= VM_PROT_WRITE; 1826 else if ((prot & VM_PROT_WRITE) != 0) 1827 return (EACCES); 1828 } 1829 maxprot &= cap_maxprot; 1830 1831 linux_get_fop(filp, &fop, &ldev); 1832 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1833 &foff, fop, &object); 1834 if (error != 0) 1835 goto out; 1836 1837 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1838 foff, FALSE, td); 1839 if (error != 0) 1840 vm_object_deallocate(object); 1841 out: 1842 linux_drop_fop(ldev); 1843 return (error); 1844 } 1845 1846 static int 1847 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1848 { 1849 struct linux_file *filp; 1850 struct vnode *vp; 1851 int error; 1852 1853 filp = (struct linux_file *)fp->f_data; 1854 if (filp->f_vnode == NULL) 1855 return (EOPNOTSUPP); 1856 1857 vp = filp->f_vnode; 1858 1859 vn_lock(vp, LK_SHARED | LK_RETRY); 1860 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1861 VOP_UNLOCK(vp); 1862 1863 return (error); 1864 } 1865 1866 static int 1867 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1868 struct filedesc *fdp) 1869 { 1870 struct linux_file *filp; 1871 struct vnode *vp; 1872 int error; 1873 1874 filp = fp->f_data; 1875 vp = filp->f_vnode; 1876 if (vp == NULL) { 1877 error = 0; 1878 kif->kf_type = KF_TYPE_DEV; 1879 } else { 1880 vref(vp); 1881 FILEDESC_SUNLOCK(fdp); 1882 error = vn_fill_kinfo_vnode(vp, kif); 1883 vrele(vp); 1884 kif->kf_type = KF_TYPE_VNODE; 1885 FILEDESC_SLOCK(fdp); 1886 } 1887 return (error); 1888 } 1889 1890 unsigned int 1891 linux_iminor(struct inode *inode) 1892 { 1893 struct linux_cdev *ldev; 1894 1895 if (inode == NULL || inode->v_rdev == NULL || 1896 inode->v_rdev->si_devsw != &linuxcdevsw) 1897 return (-1U); 1898 ldev = inode->v_rdev->si_drv1; 1899 if (ldev == NULL) 1900 return (-1U); 1901 1902 return (minor(ldev->dev)); 1903 } 1904 1905 struct fileops linuxfileops = { 1906 .fo_read = linux_file_read, 1907 .fo_write = linux_file_write, 1908 .fo_truncate = invfo_truncate, 1909 .fo_kqfilter = linux_file_kqfilter, 1910 .fo_stat = linux_file_stat, 1911 .fo_fill_kinfo = linux_file_fill_kinfo, 1912 .fo_poll = linux_file_poll, 1913 .fo_close = linux_file_close, 1914 .fo_ioctl = linux_file_ioctl, 1915 .fo_mmap = linux_file_mmap, 1916 .fo_chmod = invfo_chmod, 1917 .fo_chown = invfo_chown, 1918 .fo_sendfile = invfo_sendfile, 1919 .fo_flags = DFLAG_PASSABLE, 1920 }; 1921 1922 /* 1923 * Hash of vmmap addresses. This is infrequently accessed and does not 1924 * need to be particularly large. This is done because we must store the 1925 * caller's idea of the map size to properly unmap. 1926 */ 1927 struct vmmap { 1928 LIST_ENTRY(vmmap) vm_next; 1929 void *vm_addr; 1930 unsigned long vm_size; 1931 }; 1932 1933 struct vmmaphd { 1934 struct vmmap *lh_first; 1935 }; 1936 #define VMMAP_HASH_SIZE 64 1937 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1938 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1939 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1940 static struct mtx vmmaplock; 1941 1942 static void 1943 vmmap_add(void *addr, unsigned long size) 1944 { 1945 struct vmmap *vmmap; 1946 1947 vmmap = kmalloc(sizeof(*vmmap), M_WAITOK); 1948 mtx_lock(&vmmaplock); 1949 vmmap->vm_size = size; 1950 vmmap->vm_addr = addr; 1951 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1952 mtx_unlock(&vmmaplock); 1953 } 1954 1955 static struct vmmap * 1956 vmmap_remove(void *addr) 1957 { 1958 struct vmmap *vmmap; 1959 1960 mtx_lock(&vmmaplock); 1961 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1962 if (vmmap->vm_addr == addr) 1963 break; 1964 if (vmmap) 1965 LIST_REMOVE(vmmap, vm_next); 1966 mtx_unlock(&vmmaplock); 1967 1968 return (vmmap); 1969 } 1970 1971 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1972 void * 1973 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1974 { 1975 void *addr; 1976 1977 addr = pmap_mapdev_attr(phys_addr, size, attr); 1978 if (addr == NULL) 1979 return (NULL); 1980 vmmap_add(addr, size); 1981 1982 return (addr); 1983 } 1984 #endif 1985 1986 void 1987 iounmap(void *addr) 1988 { 1989 struct vmmap *vmmap; 1990 1991 vmmap = vmmap_remove(addr); 1992 if (vmmap == NULL) 1993 return; 1994 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1995 pmap_unmapdev(addr, vmmap->vm_size); 1996 #endif 1997 kfree(vmmap); 1998 } 1999 2000 void * 2001 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 2002 { 2003 vm_offset_t off; 2004 size_t size; 2005 2006 size = count * PAGE_SIZE; 2007 off = kva_alloc(size); 2008 if (off == 0) 2009 return (NULL); 2010 vmmap_add((void *)off, size); 2011 pmap_qenter(off, pages, count); 2012 2013 return ((void *)off); 2014 } 2015 2016 void 2017 vunmap(void *addr) 2018 { 2019 struct vmmap *vmmap; 2020 2021 vmmap = vmmap_remove(addr); 2022 if (vmmap == NULL) 2023 return; 2024 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 2025 kva_free((vm_offset_t)addr, vmmap->vm_size); 2026 kfree(vmmap); 2027 } 2028 2029 static char * 2030 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 2031 { 2032 unsigned int len; 2033 char *p; 2034 va_list aq; 2035 2036 va_copy(aq, ap); 2037 len = vsnprintf(NULL, 0, fmt, aq); 2038 va_end(aq); 2039 2040 if (dev != NULL) 2041 p = devm_kmalloc(dev, len + 1, gfp); 2042 else 2043 p = kmalloc(len + 1, gfp); 2044 if (p != NULL) 2045 vsnprintf(p, len + 1, fmt, ap); 2046 2047 return (p); 2048 } 2049 2050 char * 2051 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 2052 { 2053 2054 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 2055 } 2056 2057 char * 2058 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 2059 { 2060 va_list ap; 2061 char *p; 2062 2063 va_start(ap, fmt); 2064 p = devm_kvasprintf(dev, gfp, fmt, ap); 2065 va_end(ap); 2066 2067 return (p); 2068 } 2069 2070 char * 2071 kasprintf(gfp_t gfp, const char *fmt, ...) 2072 { 2073 va_list ap; 2074 char *p; 2075 2076 va_start(ap, fmt); 2077 p = kvasprintf(gfp, fmt, ap); 2078 va_end(ap); 2079 2080 return (p); 2081 } 2082 2083 static void 2084 linux_timer_callback_wrapper(void *context) 2085 { 2086 struct timer_list *timer; 2087 2088 timer = context; 2089 2090 if (linux_set_current_flags(curthread, M_NOWAIT)) { 2091 /* try again later */ 2092 callout_reset(&timer->callout, 1, 2093 &linux_timer_callback_wrapper, timer); 2094 return; 2095 } 2096 2097 timer->function(timer->data); 2098 } 2099 2100 int 2101 mod_timer(struct timer_list *timer, int expires) 2102 { 2103 int ret; 2104 2105 timer->expires = expires; 2106 ret = callout_reset(&timer->callout, 2107 linux_timer_jiffies_until(expires), 2108 &linux_timer_callback_wrapper, timer); 2109 2110 MPASS(ret == 0 || ret == 1); 2111 2112 return (ret == 1); 2113 } 2114 2115 void 2116 add_timer(struct timer_list *timer) 2117 { 2118 2119 callout_reset(&timer->callout, 2120 linux_timer_jiffies_until(timer->expires), 2121 &linux_timer_callback_wrapper, timer); 2122 } 2123 2124 void 2125 add_timer_on(struct timer_list *timer, int cpu) 2126 { 2127 2128 callout_reset_on(&timer->callout, 2129 linux_timer_jiffies_until(timer->expires), 2130 &linux_timer_callback_wrapper, timer, cpu); 2131 } 2132 2133 int 2134 del_timer(struct timer_list *timer) 2135 { 2136 2137 if (callout_stop(&(timer)->callout) == -1) 2138 return (0); 2139 return (1); 2140 } 2141 2142 int 2143 del_timer_sync(struct timer_list *timer) 2144 { 2145 2146 if (callout_drain(&(timer)->callout) == -1) 2147 return (0); 2148 return (1); 2149 } 2150 2151 int 2152 timer_delete_sync(struct timer_list *timer) 2153 { 2154 2155 return (del_timer_sync(timer)); 2156 } 2157 2158 int 2159 timer_shutdown_sync(struct timer_list *timer) 2160 { 2161 2162 return (del_timer_sync(timer)); 2163 } 2164 2165 /* greatest common divisor, Euclid equation */ 2166 static uint64_t 2167 lkpi_gcd_64(uint64_t a, uint64_t b) 2168 { 2169 uint64_t an; 2170 uint64_t bn; 2171 2172 while (b != 0) { 2173 an = b; 2174 bn = a % b; 2175 a = an; 2176 b = bn; 2177 } 2178 return (a); 2179 } 2180 2181 uint64_t lkpi_nsec2hz_rem; 2182 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2183 uint64_t lkpi_nsec2hz_max; 2184 2185 uint64_t lkpi_usec2hz_rem; 2186 uint64_t lkpi_usec2hz_div = 1000000ULL; 2187 uint64_t lkpi_usec2hz_max; 2188 2189 uint64_t lkpi_msec2hz_rem; 2190 uint64_t lkpi_msec2hz_div = 1000ULL; 2191 uint64_t lkpi_msec2hz_max; 2192 2193 static void 2194 linux_timer_init(void *arg) 2195 { 2196 uint64_t gcd; 2197 2198 /* 2199 * Compute an internal HZ value which can divide 2**32 to 2200 * avoid timer rounding problems when the tick value wraps 2201 * around 2**32: 2202 */ 2203 linux_timer_hz_mask = 1; 2204 while (linux_timer_hz_mask < (unsigned long)hz) 2205 linux_timer_hz_mask *= 2; 2206 linux_timer_hz_mask--; 2207 2208 /* compute some internal constants */ 2209 2210 lkpi_nsec2hz_rem = hz; 2211 lkpi_usec2hz_rem = hz; 2212 lkpi_msec2hz_rem = hz; 2213 2214 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2215 lkpi_nsec2hz_rem /= gcd; 2216 lkpi_nsec2hz_div /= gcd; 2217 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2218 2219 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2220 lkpi_usec2hz_rem /= gcd; 2221 lkpi_usec2hz_div /= gcd; 2222 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2223 2224 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2225 lkpi_msec2hz_rem /= gcd; 2226 lkpi_msec2hz_div /= gcd; 2227 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2228 } 2229 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2230 2231 void 2232 linux_complete_common(struct completion *c, int all) 2233 { 2234 int wakeup_swapper; 2235 2236 sleepq_lock(c); 2237 if (all) { 2238 c->done = UINT_MAX; 2239 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2240 } else { 2241 if (c->done != UINT_MAX) 2242 c->done++; 2243 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2244 } 2245 sleepq_release(c); 2246 if (wakeup_swapper) 2247 kick_proc0(); 2248 } 2249 2250 /* 2251 * Indefinite wait for done != 0 with or without signals. 2252 */ 2253 int 2254 linux_wait_for_common(struct completion *c, int flags) 2255 { 2256 struct task_struct *task; 2257 int error; 2258 2259 if (SCHEDULER_STOPPED()) 2260 return (0); 2261 2262 task = current; 2263 2264 if (flags != 0) 2265 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2266 else 2267 flags = SLEEPQ_SLEEP; 2268 error = 0; 2269 for (;;) { 2270 sleepq_lock(c); 2271 if (c->done) 2272 break; 2273 sleepq_add(c, NULL, "completion", flags, 0); 2274 if (flags & SLEEPQ_INTERRUPTIBLE) { 2275 DROP_GIANT(); 2276 error = -sleepq_wait_sig(c, 0); 2277 PICKUP_GIANT(); 2278 if (error != 0) { 2279 linux_schedule_save_interrupt_value(task, error); 2280 error = -ERESTARTSYS; 2281 goto intr; 2282 } 2283 } else { 2284 DROP_GIANT(); 2285 sleepq_wait(c, 0); 2286 PICKUP_GIANT(); 2287 } 2288 } 2289 if (c->done != UINT_MAX) 2290 c->done--; 2291 sleepq_release(c); 2292 2293 intr: 2294 return (error); 2295 } 2296 2297 /* 2298 * Time limited wait for done != 0 with or without signals. 2299 */ 2300 int 2301 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2302 { 2303 struct task_struct *task; 2304 int end = jiffies + timeout; 2305 int error; 2306 2307 if (SCHEDULER_STOPPED()) 2308 return (0); 2309 2310 task = current; 2311 2312 if (flags != 0) 2313 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2314 else 2315 flags = SLEEPQ_SLEEP; 2316 2317 for (;;) { 2318 sleepq_lock(c); 2319 if (c->done) 2320 break; 2321 sleepq_add(c, NULL, "completion", flags, 0); 2322 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2323 2324 DROP_GIANT(); 2325 if (flags & SLEEPQ_INTERRUPTIBLE) 2326 error = -sleepq_timedwait_sig(c, 0); 2327 else 2328 error = -sleepq_timedwait(c, 0); 2329 PICKUP_GIANT(); 2330 2331 if (error != 0) { 2332 /* check for timeout */ 2333 if (error == -EWOULDBLOCK) { 2334 error = 0; /* timeout */ 2335 } else { 2336 /* signal happened */ 2337 linux_schedule_save_interrupt_value(task, error); 2338 error = -ERESTARTSYS; 2339 } 2340 goto done; 2341 } 2342 } 2343 if (c->done != UINT_MAX) 2344 c->done--; 2345 sleepq_release(c); 2346 2347 /* return how many jiffies are left */ 2348 error = linux_timer_jiffies_until(end); 2349 done: 2350 return (error); 2351 } 2352 2353 int 2354 linux_try_wait_for_completion(struct completion *c) 2355 { 2356 int isdone; 2357 2358 sleepq_lock(c); 2359 isdone = (c->done != 0); 2360 if (c->done != 0 && c->done != UINT_MAX) 2361 c->done--; 2362 sleepq_release(c); 2363 return (isdone); 2364 } 2365 2366 int 2367 linux_completion_done(struct completion *c) 2368 { 2369 int isdone; 2370 2371 sleepq_lock(c); 2372 isdone = (c->done != 0); 2373 sleepq_release(c); 2374 return (isdone); 2375 } 2376 2377 static void 2378 linux_cdev_deref(struct linux_cdev *ldev) 2379 { 2380 if (refcount_release(&ldev->refs) && 2381 ldev->kobj.ktype == &linux_cdev_ktype) 2382 kfree(ldev); 2383 } 2384 2385 static void 2386 linux_cdev_release(struct kobject *kobj) 2387 { 2388 struct linux_cdev *cdev; 2389 struct kobject *parent; 2390 2391 cdev = container_of(kobj, struct linux_cdev, kobj); 2392 parent = kobj->parent; 2393 linux_destroy_dev(cdev); 2394 linux_cdev_deref(cdev); 2395 kobject_put(parent); 2396 } 2397 2398 static void 2399 linux_cdev_static_release(struct kobject *kobj) 2400 { 2401 struct cdev *cdev; 2402 struct linux_cdev *ldev; 2403 2404 ldev = container_of(kobj, struct linux_cdev, kobj); 2405 cdev = ldev->cdev; 2406 if (cdev != NULL) { 2407 destroy_dev(cdev); 2408 ldev->cdev = NULL; 2409 } 2410 kobject_put(kobj->parent); 2411 } 2412 2413 int 2414 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2415 { 2416 int ret; 2417 2418 if (dev->devt != 0) { 2419 /* Set parent kernel object. */ 2420 ldev->kobj.parent = &dev->kobj; 2421 2422 /* 2423 * Unlike Linux we require the kobject of the 2424 * character device structure to have a valid name 2425 * before calling this function: 2426 */ 2427 if (ldev->kobj.name == NULL) 2428 return (-EINVAL); 2429 2430 ret = cdev_add(ldev, dev->devt, 1); 2431 if (ret) 2432 return (ret); 2433 } 2434 ret = device_add(dev); 2435 if (ret != 0 && dev->devt != 0) 2436 cdev_del(ldev); 2437 return (ret); 2438 } 2439 2440 void 2441 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2442 { 2443 device_del(dev); 2444 2445 if (dev->devt != 0) 2446 cdev_del(ldev); 2447 } 2448 2449 static void 2450 linux_destroy_dev(struct linux_cdev *ldev) 2451 { 2452 2453 if (ldev->cdev == NULL) 2454 return; 2455 2456 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2457 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2458 2459 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2460 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2461 pause("ldevdtr", hz / 4); 2462 2463 destroy_dev(ldev->cdev); 2464 ldev->cdev = NULL; 2465 } 2466 2467 const struct kobj_type linux_cdev_ktype = { 2468 .release = linux_cdev_release, 2469 }; 2470 2471 const struct kobj_type linux_cdev_static_ktype = { 2472 .release = linux_cdev_static_release, 2473 }; 2474 2475 static void 2476 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2477 { 2478 struct notifier_block *nb; 2479 struct netdev_notifier_info ni; 2480 2481 nb = arg; 2482 ni.ifp = ifp; 2483 ni.dev = (struct net_device *)ifp; 2484 if (linkstate == LINK_STATE_UP) 2485 nb->notifier_call(nb, NETDEV_UP, &ni); 2486 else 2487 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2488 } 2489 2490 static void 2491 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2492 { 2493 struct notifier_block *nb; 2494 struct netdev_notifier_info ni; 2495 2496 nb = arg; 2497 ni.ifp = ifp; 2498 ni.dev = (struct net_device *)ifp; 2499 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2500 } 2501 2502 static void 2503 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2504 { 2505 struct notifier_block *nb; 2506 struct netdev_notifier_info ni; 2507 2508 nb = arg; 2509 ni.ifp = ifp; 2510 ni.dev = (struct net_device *)ifp; 2511 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2512 } 2513 2514 static void 2515 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2516 { 2517 struct notifier_block *nb; 2518 struct netdev_notifier_info ni; 2519 2520 nb = arg; 2521 ni.ifp = ifp; 2522 ni.dev = (struct net_device *)ifp; 2523 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2524 } 2525 2526 static void 2527 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2528 { 2529 struct notifier_block *nb; 2530 struct netdev_notifier_info ni; 2531 2532 nb = arg; 2533 ni.ifp = ifp; 2534 ni.dev = (struct net_device *)ifp; 2535 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2536 } 2537 2538 int 2539 register_netdevice_notifier(struct notifier_block *nb) 2540 { 2541 2542 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2543 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2544 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2545 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2546 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2547 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2548 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2549 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2550 2551 return (0); 2552 } 2553 2554 int 2555 register_inetaddr_notifier(struct notifier_block *nb) 2556 { 2557 2558 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2559 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2560 return (0); 2561 } 2562 2563 int 2564 unregister_netdevice_notifier(struct notifier_block *nb) 2565 { 2566 2567 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2568 nb->tags[NETDEV_UP]); 2569 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2570 nb->tags[NETDEV_REGISTER]); 2571 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2572 nb->tags[NETDEV_UNREGISTER]); 2573 EVENTHANDLER_DEREGISTER(iflladdr_event, 2574 nb->tags[NETDEV_CHANGEADDR]); 2575 2576 return (0); 2577 } 2578 2579 int 2580 unregister_inetaddr_notifier(struct notifier_block *nb) 2581 { 2582 2583 EVENTHANDLER_DEREGISTER(ifaddr_event, 2584 nb->tags[NETDEV_CHANGEIFADDR]); 2585 2586 return (0); 2587 } 2588 2589 struct list_sort_thunk { 2590 int (*cmp)(void *, struct list_head *, struct list_head *); 2591 void *priv; 2592 }; 2593 2594 static inline int 2595 linux_le_cmp(const void *d1, const void *d2, void *priv) 2596 { 2597 struct list_head *le1, *le2; 2598 struct list_sort_thunk *thunk; 2599 2600 thunk = priv; 2601 le1 = *(__DECONST(struct list_head **, d1)); 2602 le2 = *(__DECONST(struct list_head **, d2)); 2603 return ((thunk->cmp)(thunk->priv, le1, le2)); 2604 } 2605 2606 void 2607 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2608 struct list_head *a, struct list_head *b)) 2609 { 2610 struct list_sort_thunk thunk; 2611 struct list_head **ar, *le; 2612 size_t count, i; 2613 2614 count = 0; 2615 list_for_each(le, head) 2616 count++; 2617 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2618 i = 0; 2619 list_for_each(le, head) 2620 ar[i++] = le; 2621 thunk.cmp = cmp; 2622 thunk.priv = priv; 2623 qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); 2624 INIT_LIST_HEAD(head); 2625 for (i = 0; i < count; i++) 2626 list_add_tail(ar[i], head); 2627 free(ar, M_KMALLOC); 2628 } 2629 2630 #if defined(__i386__) || defined(__amd64__) 2631 int 2632 linux_wbinvd_on_all_cpus(void) 2633 { 2634 2635 pmap_invalidate_cache(); 2636 return (0); 2637 } 2638 #endif 2639 2640 int 2641 linux_on_each_cpu(void callback(void *), void *data) 2642 { 2643 2644 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2645 smp_no_rendezvous_barrier, data); 2646 return (0); 2647 } 2648 2649 int 2650 linux_in_atomic(void) 2651 { 2652 2653 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2654 } 2655 2656 struct linux_cdev * 2657 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2658 { 2659 dev_t dev = MKDEV(major, minor); 2660 struct cdev *cdev; 2661 2662 dev_lock(); 2663 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2664 struct linux_cdev *ldev = cdev->si_drv1; 2665 if (ldev->dev == dev && 2666 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2667 break; 2668 } 2669 } 2670 dev_unlock(); 2671 2672 return (cdev != NULL ? cdev->si_drv1 : NULL); 2673 } 2674 2675 int 2676 __register_chrdev(unsigned int major, unsigned int baseminor, 2677 unsigned int count, const char *name, 2678 const struct file_operations *fops) 2679 { 2680 struct linux_cdev *cdev; 2681 int ret = 0; 2682 int i; 2683 2684 for (i = baseminor; i < baseminor + count; i++) { 2685 cdev = cdev_alloc(); 2686 cdev->ops = fops; 2687 kobject_set_name(&cdev->kobj, name); 2688 2689 ret = cdev_add(cdev, makedev(major, i), 1); 2690 if (ret != 0) 2691 break; 2692 } 2693 return (ret); 2694 } 2695 2696 int 2697 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2698 unsigned int count, const char *name, 2699 const struct file_operations *fops, uid_t uid, 2700 gid_t gid, int mode) 2701 { 2702 struct linux_cdev *cdev; 2703 int ret = 0; 2704 int i; 2705 2706 for (i = baseminor; i < baseminor + count; i++) { 2707 cdev = cdev_alloc(); 2708 cdev->ops = fops; 2709 kobject_set_name(&cdev->kobj, name); 2710 2711 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2712 if (ret != 0) 2713 break; 2714 } 2715 return (ret); 2716 } 2717 2718 void 2719 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2720 unsigned int count, const char *name) 2721 { 2722 struct linux_cdev *cdevp; 2723 int i; 2724 2725 for (i = baseminor; i < baseminor + count; i++) { 2726 cdevp = linux_find_cdev(name, major, i); 2727 if (cdevp != NULL) 2728 cdev_del(cdevp); 2729 } 2730 } 2731 2732 void 2733 linux_dump_stack(void) 2734 { 2735 #ifdef STACK 2736 struct stack st; 2737 2738 stack_save(&st); 2739 stack_print(&st); 2740 #endif 2741 } 2742 2743 int 2744 linuxkpi_net_ratelimit(void) 2745 { 2746 2747 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2748 lkpi_net_maxpps)); 2749 } 2750 2751 struct io_mapping * 2752 io_mapping_create_wc(resource_size_t base, unsigned long size) 2753 { 2754 struct io_mapping *mapping; 2755 2756 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2757 if (mapping == NULL) 2758 return (NULL); 2759 return (io_mapping_init_wc(mapping, base, size)); 2760 } 2761 2762 #if defined(__i386__) || defined(__amd64__) 2763 bool linux_cpu_has_clflush; 2764 struct cpuinfo_x86 boot_cpu_data; 2765 struct cpuinfo_x86 __cpu_data[MAXCPU]; 2766 #endif 2767 2768 cpumask_t * 2769 lkpi_get_static_single_cpu_mask(int cpuid) 2770 { 2771 2772 KASSERT((cpuid >= 0 && cpuid < MAXCPU), ("%s: invalid cpuid %d\n", 2773 __func__, cpuid)); 2774 2775 return (&static_single_cpu_mask[cpuid]); 2776 } 2777 2778 static void 2779 linux_compat_init(void *arg) 2780 { 2781 struct sysctl_oid *rootoid; 2782 int i; 2783 2784 #if defined(__i386__) || defined(__amd64__) 2785 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2786 boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; 2787 boot_cpu_data.x86_max_cores = mp_ncpus; 2788 boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id); 2789 boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id); 2790 2791 for (i = 0; i < MAXCPU; i++) { 2792 __cpu_data[i].x86_clflush_size = cpu_clflush_line_size; 2793 __cpu_data[i].x86_max_cores = mp_ncpus; 2794 __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id); 2795 __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id); 2796 } 2797 #endif 2798 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2799 2800 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2801 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2802 kobject_init(&linux_class_root, &linux_class_ktype); 2803 kobject_set_name(&linux_class_root, "class"); 2804 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2805 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2806 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2807 kobject_set_name(&linux_root_device.kobj, "device"); 2808 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2809 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2810 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2811 linux_root_device.bsddev = root_bus; 2812 linux_class_misc.name = "misc"; 2813 class_register(&linux_class_misc); 2814 INIT_LIST_HEAD(&pci_drivers); 2815 INIT_LIST_HEAD(&pci_devices); 2816 spin_lock_init(&pci_lock); 2817 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2818 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2819 LIST_INIT(&vmmaphead[i]); 2820 init_waitqueue_head(&linux_bit_waitq); 2821 init_waitqueue_head(&linux_var_waitq); 2822 2823 CPU_COPY(&all_cpus, &cpu_online_mask); 2824 /* 2825 * Generate a single-CPU cpumask_t for each CPU (possibly) in the system. 2826 * CPUs are indexed from 0..(MAXCPU-1). The entry for cpuid 0 will only 2827 * have itself in the cpumask, cupid 1 only itself on entry 1, and so on. 2828 * This is used by cpumask_of() (and possibly others in the future) for, 2829 * e.g., drivers to pass hints to irq_set_affinity_hint(). 2830 */ 2831 for (i = 0; i < MAXCPU; i++) 2832 CPU_SET(i, &static_single_cpu_mask[i]); 2833 2834 strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release)); 2835 } 2836 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2837 2838 static void 2839 linux_compat_uninit(void *arg) 2840 { 2841 linux_kobject_kfree_name(&linux_class_root); 2842 linux_kobject_kfree_name(&linux_root_device.kobj); 2843 linux_kobject_kfree_name(&linux_class_misc.kobj); 2844 2845 mtx_destroy(&vmmaplock); 2846 spin_lock_destroy(&pci_lock); 2847 rw_destroy(&linux_vma_lock); 2848 } 2849 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2850 2851 /* 2852 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2853 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2854 * used. Assert these types have the same size, else some parts of the 2855 * LinuxKPI may not work like expected: 2856 */ 2857 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2858