1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/utsname.h> 87 #include <linux/list.h> 88 #include <linux/kthread.h> 89 #include <linux/kernel.h> 90 #include <linux/compat.h> 91 #include <linux/io-mapping.h> 92 #include <linux/poll.h> 93 #include <linux/smp.h> 94 #include <linux/wait_bit.h> 95 #include <linux/rcupdate.h> 96 #include <linux/interval_tree.h> 97 #include <linux/interval_tree_generic.h> 98 99 #if defined(__i386__) || defined(__amd64__) 100 #include <asm/smp.h> 101 #include <asm/processor.h> 102 #endif 103 104 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 105 "LinuxKPI parameters"); 106 107 int linuxkpi_debug; 108 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 109 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 110 111 int linuxkpi_warn_dump_stack = 0; 112 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 113 &linuxkpi_warn_dump_stack, 0, 114 "Set to enable stack traces from WARN_ON(). Clear to disable."); 115 116 static struct timeval lkpi_net_lastlog; 117 static int lkpi_net_curpps; 118 static int lkpi_net_maxpps = 99; 119 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 120 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 121 122 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 123 124 #include <linux/rbtree.h> 125 /* Undo Linux compat changes. */ 126 #undef RB_ROOT 127 #undef file 128 #undef cdev 129 #define RB_ROOT(head) (head)->rbh_root 130 131 static void linux_destroy_dev(struct linux_cdev *); 132 static void linux_cdev_deref(struct linux_cdev *ldev); 133 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 134 135 cpumask_t cpu_online_mask; 136 static cpumask_t static_single_cpu_mask[MAXCPU]; 137 struct kobject linux_class_root; 138 struct device linux_root_device; 139 struct class linux_class_misc; 140 struct list_head pci_drivers; 141 struct list_head pci_devices; 142 spinlock_t pci_lock; 143 struct uts_namespace init_uts_ns; 144 145 unsigned long linux_timer_hz_mask; 146 147 wait_queue_head_t linux_bit_waitq; 148 wait_queue_head_t linux_var_waitq; 149 150 int 151 panic_cmp(struct rb_node *one, struct rb_node *two) 152 { 153 panic("no cmp"); 154 } 155 156 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 157 158 #define START(node) ((node)->start) 159 #define LAST(node) ((node)->last) 160 161 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 162 LAST,, lkpi_interval_tree) 163 164 struct kobject * 165 kobject_create(void) 166 { 167 struct kobject *kobj; 168 169 kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); 170 if (kobj == NULL) 171 return (NULL); 172 kobject_init(kobj, &linux_kfree_type); 173 174 return (kobj); 175 } 176 177 178 int 179 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 180 { 181 va_list tmp_va; 182 int len; 183 char *old; 184 char *name; 185 char dummy; 186 187 old = kobj->name; 188 189 if (old && fmt == NULL) 190 return (0); 191 192 /* compute length of string */ 193 va_copy(tmp_va, args); 194 len = vsnprintf(&dummy, 0, fmt, tmp_va); 195 va_end(tmp_va); 196 197 /* account for zero termination */ 198 len++; 199 200 /* check for error */ 201 if (len < 1) 202 return (-EINVAL); 203 204 /* allocate memory for string */ 205 name = kzalloc(len, GFP_KERNEL); 206 if (name == NULL) 207 return (-ENOMEM); 208 vsnprintf(name, len, fmt, args); 209 kobj->name = name; 210 211 /* free old string */ 212 kfree(old); 213 214 /* filter new string */ 215 for (; *name != '\0'; name++) 216 if (*name == '/') 217 *name = '!'; 218 return (0); 219 } 220 221 int 222 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 223 { 224 va_list args; 225 int error; 226 227 va_start(args, fmt); 228 error = kobject_set_name_vargs(kobj, fmt, args); 229 va_end(args); 230 231 return (error); 232 } 233 234 static int 235 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 236 { 237 const struct kobj_type *t; 238 int error; 239 240 kobj->parent = parent; 241 error = sysfs_create_dir(kobj); 242 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 243 struct attribute **attr; 244 t = kobj->ktype; 245 246 for (attr = t->default_attrs; *attr != NULL; attr++) { 247 error = sysfs_create_file(kobj, *attr); 248 if (error) 249 break; 250 } 251 if (error) 252 sysfs_remove_dir(kobj); 253 } 254 return (error); 255 } 256 257 int 258 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 259 { 260 va_list args; 261 int error; 262 263 va_start(args, fmt); 264 error = kobject_set_name_vargs(kobj, fmt, args); 265 va_end(args); 266 if (error) 267 return (error); 268 269 return kobject_add_complete(kobj, parent); 270 } 271 272 void 273 linux_kobject_release(struct kref *kref) 274 { 275 struct kobject *kobj; 276 char *name; 277 278 kobj = container_of(kref, struct kobject, kref); 279 sysfs_remove_dir(kobj); 280 name = kobj->name; 281 if (kobj->ktype && kobj->ktype->release) 282 kobj->ktype->release(kobj); 283 kfree(name); 284 } 285 286 static void 287 linux_kobject_kfree(struct kobject *kobj) 288 { 289 kfree(kobj); 290 } 291 292 static void 293 linux_kobject_kfree_name(struct kobject *kobj) 294 { 295 if (kobj) { 296 kfree(kobj->name); 297 } 298 } 299 300 const struct kobj_type linux_kfree_type = { 301 .release = linux_kobject_kfree 302 }; 303 304 static ssize_t 305 lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) 306 { 307 struct kobj_attribute *ka = 308 container_of(attr, struct kobj_attribute, attr); 309 310 if (ka->show == NULL) 311 return (-EIO); 312 313 return (ka->show(kobj, ka, buf)); 314 } 315 316 static ssize_t 317 lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr, 318 const char *buf, size_t count) 319 { 320 struct kobj_attribute *ka = 321 container_of(attr, struct kobj_attribute, attr); 322 323 if (ka->store == NULL) 324 return (-EIO); 325 326 return (ka->store(kobj, ka, buf, count)); 327 } 328 329 const struct sysfs_ops kobj_sysfs_ops = { 330 .show = lkpi_kobj_attr_show, 331 .store = lkpi_kobj_attr_store, 332 }; 333 334 static void 335 linux_device_release(struct device *dev) 336 { 337 pr_debug("linux_device_release: %s\n", dev_name(dev)); 338 kfree(dev); 339 } 340 341 static ssize_t 342 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 343 { 344 struct class_attribute *dattr; 345 ssize_t error; 346 347 dattr = container_of(attr, struct class_attribute, attr); 348 error = -EIO; 349 if (dattr->show) 350 error = dattr->show(container_of(kobj, struct class, kobj), 351 dattr, buf); 352 return (error); 353 } 354 355 static ssize_t 356 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 357 size_t count) 358 { 359 struct class_attribute *dattr; 360 ssize_t error; 361 362 dattr = container_of(attr, struct class_attribute, attr); 363 error = -EIO; 364 if (dattr->store) 365 error = dattr->store(container_of(kobj, struct class, kobj), 366 dattr, buf, count); 367 return (error); 368 } 369 370 static void 371 linux_class_release(struct kobject *kobj) 372 { 373 struct class *class; 374 375 class = container_of(kobj, struct class, kobj); 376 if (class->class_release) 377 class->class_release(class); 378 } 379 380 static const struct sysfs_ops linux_class_sysfs = { 381 .show = linux_class_show, 382 .store = linux_class_store, 383 }; 384 385 const struct kobj_type linux_class_ktype = { 386 .release = linux_class_release, 387 .sysfs_ops = &linux_class_sysfs 388 }; 389 390 static void 391 linux_dev_release(struct kobject *kobj) 392 { 393 struct device *dev; 394 395 dev = container_of(kobj, struct device, kobj); 396 /* This is the precedence defined by linux. */ 397 if (dev->release) 398 dev->release(dev); 399 else if (dev->class && dev->class->dev_release) 400 dev->class->dev_release(dev); 401 } 402 403 static ssize_t 404 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 405 { 406 struct device_attribute *dattr; 407 ssize_t error; 408 409 dattr = container_of(attr, struct device_attribute, attr); 410 error = -EIO; 411 if (dattr->show) 412 error = dattr->show(container_of(kobj, struct device, kobj), 413 dattr, buf); 414 return (error); 415 } 416 417 static ssize_t 418 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 419 size_t count) 420 { 421 struct device_attribute *dattr; 422 ssize_t error; 423 424 dattr = container_of(attr, struct device_attribute, attr); 425 error = -EIO; 426 if (dattr->store) 427 error = dattr->store(container_of(kobj, struct device, kobj), 428 dattr, buf, count); 429 return (error); 430 } 431 432 static const struct sysfs_ops linux_dev_sysfs = { 433 .show = linux_dev_show, 434 .store = linux_dev_store, 435 }; 436 437 const struct kobj_type linux_dev_ktype = { 438 .release = linux_dev_release, 439 .sysfs_ops = &linux_dev_sysfs 440 }; 441 442 struct device * 443 device_create(struct class *class, struct device *parent, dev_t devt, 444 void *drvdata, const char *fmt, ...) 445 { 446 struct device *dev; 447 va_list args; 448 449 dev = kzalloc(sizeof(*dev), M_WAITOK); 450 dev->parent = parent; 451 dev->class = class; 452 dev->devt = devt; 453 dev->driver_data = drvdata; 454 dev->release = linux_device_release; 455 va_start(args, fmt); 456 kobject_set_name_vargs(&dev->kobj, fmt, args); 457 va_end(args); 458 device_register(dev); 459 460 return (dev); 461 } 462 463 struct device * 464 device_create_groups_vargs(struct class *class, struct device *parent, 465 dev_t devt, void *drvdata, const struct attribute_group **groups, 466 const char *fmt, va_list args) 467 { 468 struct device *dev = NULL; 469 int retval = -ENODEV; 470 471 if (class == NULL || IS_ERR(class)) 472 goto error; 473 474 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 475 if (!dev) { 476 retval = -ENOMEM; 477 goto error; 478 } 479 480 dev->devt = devt; 481 dev->class = class; 482 dev->parent = parent; 483 dev->groups = groups; 484 dev->release = device_create_release; 485 /* device_initialize() needs the class and parent to be set */ 486 device_initialize(dev); 487 dev_set_drvdata(dev, drvdata); 488 489 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 490 if (retval) 491 goto error; 492 493 retval = device_add(dev); 494 if (retval) 495 goto error; 496 497 return dev; 498 499 error: 500 put_device(dev); 501 return ERR_PTR(retval); 502 } 503 504 struct class * 505 class_create(struct module *owner, const char *name) 506 { 507 struct class *class; 508 int error; 509 510 class = kzalloc(sizeof(*class), M_WAITOK); 511 class->owner = owner; 512 class->name = name; 513 class->class_release = linux_class_kfree; 514 error = class_register(class); 515 if (error) { 516 kfree(class); 517 return (NULL); 518 } 519 520 return (class); 521 } 522 523 int 524 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 525 struct kobject *parent, const char *fmt, ...) 526 { 527 va_list args; 528 int error; 529 530 kobject_init(kobj, ktype); 531 kobj->ktype = ktype; 532 kobj->parent = parent; 533 kobj->name = NULL; 534 535 va_start(args, fmt); 536 error = kobject_set_name_vargs(kobj, fmt, args); 537 va_end(args); 538 if (error) 539 return (error); 540 return kobject_add_complete(kobj, parent); 541 } 542 543 static void 544 linux_kq_lock(void *arg) 545 { 546 spinlock_t *s = arg; 547 548 spin_lock(s); 549 } 550 static void 551 linux_kq_unlock(void *arg) 552 { 553 spinlock_t *s = arg; 554 555 spin_unlock(s); 556 } 557 558 static void 559 linux_kq_assert_lock(void *arg, int what) 560 { 561 #ifdef INVARIANTS 562 spinlock_t *s = arg; 563 564 if (what == LA_LOCKED) 565 mtx_assert(&s->m, MA_OWNED); 566 else 567 mtx_assert(&s->m, MA_NOTOWNED); 568 #endif 569 } 570 571 static void 572 linux_file_kqfilter_poll(struct linux_file *, int); 573 574 struct linux_file * 575 linux_file_alloc(void) 576 { 577 struct linux_file *filp; 578 579 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 580 581 /* set initial refcount */ 582 filp->f_count = 1; 583 584 /* setup fields needed by kqueue support */ 585 spin_lock_init(&filp->f_kqlock); 586 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 587 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 588 589 return (filp); 590 } 591 592 void 593 linux_file_free(struct linux_file *filp) 594 { 595 if (filp->_file == NULL) { 596 if (filp->f_op != NULL && filp->f_op->release != NULL) 597 filp->f_op->release(filp->f_vnode, filp); 598 if (filp->f_shmem != NULL) 599 vm_object_deallocate(filp->f_shmem); 600 kfree_rcu(filp, rcu); 601 } else { 602 /* 603 * The close method of the character device or file 604 * will free the linux_file structure: 605 */ 606 _fdrop(filp->_file, curthread); 607 } 608 } 609 610 struct linux_cdev * 611 cdev_alloc(void) 612 { 613 struct linux_cdev *cdev; 614 615 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); 616 kobject_init(&cdev->kobj, &linux_cdev_ktype); 617 cdev->refs = 1; 618 return (cdev); 619 } 620 621 static int 622 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 623 vm_page_t *mres) 624 { 625 struct vm_area_struct *vmap; 626 627 vmap = linux_cdev_handle_find(vm_obj->handle); 628 629 MPASS(vmap != NULL); 630 MPASS(vmap->vm_private_data == vm_obj->handle); 631 632 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 633 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 634 vm_page_t page; 635 636 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 637 /* 638 * If the passed in result page is a fake 639 * page, update it with the new physical 640 * address. 641 */ 642 page = *mres; 643 vm_page_updatefake(page, paddr, vm_obj->memattr); 644 } else { 645 /* 646 * Replace the passed in "mres" page with our 647 * own fake page and free up the all of the 648 * original pages. 649 */ 650 VM_OBJECT_WUNLOCK(vm_obj); 651 page = vm_page_getfake(paddr, vm_obj->memattr); 652 VM_OBJECT_WLOCK(vm_obj); 653 654 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 655 *mres = page; 656 } 657 vm_page_valid(page); 658 return (VM_PAGER_OK); 659 } 660 return (VM_PAGER_FAIL); 661 } 662 663 static int 664 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 665 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 666 { 667 struct vm_area_struct *vmap; 668 int err; 669 670 /* get VM area structure */ 671 vmap = linux_cdev_handle_find(vm_obj->handle); 672 MPASS(vmap != NULL); 673 MPASS(vmap->vm_private_data == vm_obj->handle); 674 675 VM_OBJECT_WUNLOCK(vm_obj); 676 677 linux_set_current(curthread); 678 679 down_write(&vmap->vm_mm->mmap_sem); 680 if (unlikely(vmap->vm_ops == NULL)) { 681 err = VM_FAULT_SIGBUS; 682 } else { 683 struct vm_fault vmf; 684 685 /* fill out VM fault structure */ 686 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 687 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 688 vmf.pgoff = 0; 689 vmf.page = NULL; 690 vmf.vma = vmap; 691 692 vmap->vm_pfn_count = 0; 693 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 694 vmap->vm_obj = vm_obj; 695 696 err = vmap->vm_ops->fault(&vmf); 697 698 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 699 kern_yield(PRI_USER); 700 err = vmap->vm_ops->fault(&vmf); 701 } 702 } 703 704 /* translate return code */ 705 switch (err) { 706 case VM_FAULT_OOM: 707 err = VM_PAGER_AGAIN; 708 break; 709 case VM_FAULT_SIGBUS: 710 err = VM_PAGER_BAD; 711 break; 712 case VM_FAULT_NOPAGE: 713 /* 714 * By contract the fault handler will return having 715 * busied all the pages itself. If pidx is already 716 * found in the object, it will simply xbusy the first 717 * page and return with vm_pfn_count set to 1. 718 */ 719 *first = vmap->vm_pfn_first; 720 *last = *first + vmap->vm_pfn_count - 1; 721 err = VM_PAGER_OK; 722 break; 723 default: 724 err = VM_PAGER_ERROR; 725 break; 726 } 727 up_write(&vmap->vm_mm->mmap_sem); 728 VM_OBJECT_WLOCK(vm_obj); 729 return (err); 730 } 731 732 static struct rwlock linux_vma_lock; 733 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 734 TAILQ_HEAD_INITIALIZER(linux_vma_head); 735 736 static void 737 linux_cdev_handle_free(struct vm_area_struct *vmap) 738 { 739 /* Drop reference on vm_file */ 740 if (vmap->vm_file != NULL) 741 fput(vmap->vm_file); 742 743 /* Drop reference on mm_struct */ 744 mmput(vmap->vm_mm); 745 746 kfree(vmap); 747 } 748 749 static void 750 linux_cdev_handle_remove(struct vm_area_struct *vmap) 751 { 752 rw_wlock(&linux_vma_lock); 753 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 754 rw_wunlock(&linux_vma_lock); 755 } 756 757 static struct vm_area_struct * 758 linux_cdev_handle_find(void *handle) 759 { 760 struct vm_area_struct *vmap; 761 762 rw_rlock(&linux_vma_lock); 763 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 764 if (vmap->vm_private_data == handle) 765 break; 766 } 767 rw_runlock(&linux_vma_lock); 768 return (vmap); 769 } 770 771 static int 772 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 773 vm_ooffset_t foff, struct ucred *cred, u_short *color) 774 { 775 776 MPASS(linux_cdev_handle_find(handle) != NULL); 777 *color = 0; 778 return (0); 779 } 780 781 static void 782 linux_cdev_pager_dtor(void *handle) 783 { 784 const struct vm_operations_struct *vm_ops; 785 struct vm_area_struct *vmap; 786 787 vmap = linux_cdev_handle_find(handle); 788 MPASS(vmap != NULL); 789 790 /* 791 * Remove handle before calling close operation to prevent 792 * other threads from reusing the handle pointer. 793 */ 794 linux_cdev_handle_remove(vmap); 795 796 down_write(&vmap->vm_mm->mmap_sem); 797 vm_ops = vmap->vm_ops; 798 if (likely(vm_ops != NULL)) 799 vm_ops->close(vmap); 800 up_write(&vmap->vm_mm->mmap_sem); 801 802 linux_cdev_handle_free(vmap); 803 } 804 805 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 806 { 807 /* OBJT_MGTDEVICE */ 808 .cdev_pg_populate = linux_cdev_pager_populate, 809 .cdev_pg_ctor = linux_cdev_pager_ctor, 810 .cdev_pg_dtor = linux_cdev_pager_dtor 811 }, 812 { 813 /* OBJT_DEVICE */ 814 .cdev_pg_fault = linux_cdev_pager_fault, 815 .cdev_pg_ctor = linux_cdev_pager_ctor, 816 .cdev_pg_dtor = linux_cdev_pager_dtor 817 }, 818 }; 819 820 int 821 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 822 unsigned long size) 823 { 824 vm_object_t obj; 825 vm_page_t m; 826 827 obj = vma->vm_obj; 828 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 829 return (-ENOTSUP); 830 VM_OBJECT_RLOCK(obj); 831 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 832 m != NULL && m->pindex < OFF_TO_IDX(address + size); 833 m = TAILQ_NEXT(m, listq)) 834 pmap_remove_all(m); 835 VM_OBJECT_RUNLOCK(obj); 836 return (0); 837 } 838 839 void 840 vma_set_file(struct vm_area_struct *vma, struct linux_file *file) 841 { 842 struct linux_file *tmp; 843 844 /* Changing an anonymous vma with this is illegal */ 845 get_file(file); 846 tmp = vma->vm_file; 847 vma->vm_file = file; 848 fput(tmp); 849 } 850 851 static struct file_operations dummy_ldev_ops = { 852 /* XXXKIB */ 853 }; 854 855 static struct linux_cdev dummy_ldev = { 856 .ops = &dummy_ldev_ops, 857 }; 858 859 #define LDEV_SI_DTR 0x0001 860 #define LDEV_SI_REF 0x0002 861 862 static void 863 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 864 struct linux_cdev **dev) 865 { 866 struct linux_cdev *ldev; 867 u_int siref; 868 869 ldev = filp->f_cdev; 870 *fop = filp->f_op; 871 if (ldev != NULL) { 872 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 873 refcount_acquire(&ldev->refs); 874 } else { 875 for (siref = ldev->siref;;) { 876 if ((siref & LDEV_SI_DTR) != 0) { 877 ldev = &dummy_ldev; 878 *fop = ldev->ops; 879 siref = ldev->siref; 880 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 881 } else if (atomic_fcmpset_int(&ldev->siref, 882 &siref, siref + LDEV_SI_REF)) { 883 break; 884 } 885 } 886 } 887 } 888 *dev = ldev; 889 } 890 891 static void 892 linux_drop_fop(struct linux_cdev *ldev) 893 { 894 895 if (ldev == NULL) 896 return; 897 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 898 linux_cdev_deref(ldev); 899 } else { 900 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 901 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 902 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 903 } 904 } 905 906 #define OPW(fp,td,code) ({ \ 907 struct file *__fpop; \ 908 __typeof(code) __retval; \ 909 \ 910 __fpop = (td)->td_fpop; \ 911 (td)->td_fpop = (fp); \ 912 __retval = (code); \ 913 (td)->td_fpop = __fpop; \ 914 __retval; \ 915 }) 916 917 static int 918 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 919 struct file *file) 920 { 921 struct linux_cdev *ldev; 922 struct linux_file *filp; 923 const struct file_operations *fop; 924 int error; 925 926 ldev = dev->si_drv1; 927 928 filp = linux_file_alloc(); 929 filp->f_dentry = &filp->f_dentry_store; 930 filp->f_op = ldev->ops; 931 filp->f_mode = file->f_flag; 932 filp->f_flags = file->f_flag; 933 filp->f_vnode = file->f_vnode; 934 filp->_file = file; 935 refcount_acquire(&ldev->refs); 936 filp->f_cdev = ldev; 937 938 linux_set_current(td); 939 linux_get_fop(filp, &fop, &ldev); 940 941 if (fop->open != NULL) { 942 error = -fop->open(file->f_vnode, filp); 943 if (error != 0) { 944 linux_drop_fop(ldev); 945 linux_cdev_deref(filp->f_cdev); 946 kfree(filp); 947 return (error); 948 } 949 } 950 951 /* hold on to the vnode - used for fstat() */ 952 vhold(filp->f_vnode); 953 954 /* release the file from devfs */ 955 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 956 linux_drop_fop(ldev); 957 return (ENXIO); 958 } 959 960 #define LINUX_IOCTL_MIN_PTR 0x10000UL 961 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 962 963 static inline int 964 linux_remap_address(void **uaddr, size_t len) 965 { 966 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 967 968 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 969 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 970 struct task_struct *pts = current; 971 if (pts == NULL) { 972 *uaddr = NULL; 973 return (1); 974 } 975 976 /* compute data offset */ 977 uaddr_val -= LINUX_IOCTL_MIN_PTR; 978 979 /* check that length is within bounds */ 980 if ((len > IOCPARM_MAX) || 981 (uaddr_val + len) > pts->bsd_ioctl_len) { 982 *uaddr = NULL; 983 return (1); 984 } 985 986 /* re-add kernel buffer address */ 987 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 988 989 /* update address location */ 990 *uaddr = (void *)uaddr_val; 991 return (1); 992 } 993 return (0); 994 } 995 996 int 997 linux_copyin(const void *uaddr, void *kaddr, size_t len) 998 { 999 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 1000 if (uaddr == NULL) 1001 return (-EFAULT); 1002 memcpy(kaddr, uaddr, len); 1003 return (0); 1004 } 1005 return (-copyin(uaddr, kaddr, len)); 1006 } 1007 1008 int 1009 linux_copyout(const void *kaddr, void *uaddr, size_t len) 1010 { 1011 if (linux_remap_address(&uaddr, len)) { 1012 if (uaddr == NULL) 1013 return (-EFAULT); 1014 memcpy(uaddr, kaddr, len); 1015 return (0); 1016 } 1017 return (-copyout(kaddr, uaddr, len)); 1018 } 1019 1020 size_t 1021 linux_clear_user(void *_uaddr, size_t _len) 1022 { 1023 uint8_t *uaddr = _uaddr; 1024 size_t len = _len; 1025 1026 /* make sure uaddr is aligned before going into the fast loop */ 1027 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 1028 if (subyte(uaddr, 0)) 1029 return (_len); 1030 uaddr++; 1031 len--; 1032 } 1033 1034 /* zero 8 bytes at a time */ 1035 while (len > 7) { 1036 #ifdef __LP64__ 1037 if (suword64(uaddr, 0)) 1038 return (_len); 1039 #else 1040 if (suword32(uaddr, 0)) 1041 return (_len); 1042 if (suword32(uaddr + 4, 0)) 1043 return (_len); 1044 #endif 1045 uaddr += 8; 1046 len -= 8; 1047 } 1048 1049 /* zero fill end, if any */ 1050 while (len > 0) { 1051 if (subyte(uaddr, 0)) 1052 return (_len); 1053 uaddr++; 1054 len--; 1055 } 1056 return (0); 1057 } 1058 1059 int 1060 linux_access_ok(const void *uaddr, size_t len) 1061 { 1062 uintptr_t saddr; 1063 uintptr_t eaddr; 1064 1065 /* get start and end address */ 1066 saddr = (uintptr_t)uaddr; 1067 eaddr = (uintptr_t)uaddr + len; 1068 1069 /* verify addresses are valid for userspace */ 1070 return ((saddr == eaddr) || 1071 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 1072 } 1073 1074 /* 1075 * This function should return either EINTR or ERESTART depending on 1076 * the signal type sent to this thread: 1077 */ 1078 static int 1079 linux_get_error(struct task_struct *task, int error) 1080 { 1081 /* check for signal type interrupt code */ 1082 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 1083 error = -linux_schedule_get_interrupt_value(task); 1084 if (error == 0) 1085 error = EINTR; 1086 } 1087 return (error); 1088 } 1089 1090 static int 1091 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 1092 const struct file_operations *fop, u_long cmd, caddr_t data, 1093 struct thread *td) 1094 { 1095 struct task_struct *task = current; 1096 unsigned size; 1097 int error; 1098 1099 size = IOCPARM_LEN(cmd); 1100 /* refer to logic in sys_ioctl() */ 1101 if (size > 0) { 1102 /* 1103 * Setup hint for linux_copyin() and linux_copyout(). 1104 * 1105 * Background: Linux code expects a user-space address 1106 * while FreeBSD supplies a kernel-space address. 1107 */ 1108 task->bsd_ioctl_data = data; 1109 task->bsd_ioctl_len = size; 1110 data = (void *)LINUX_IOCTL_MIN_PTR; 1111 } else { 1112 /* fetch user-space pointer */ 1113 data = *(void **)data; 1114 } 1115 #ifdef COMPAT_FREEBSD32 1116 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 1117 /* try the compat IOCTL handler first */ 1118 if (fop->compat_ioctl != NULL) { 1119 error = -OPW(fp, td, fop->compat_ioctl(filp, 1120 cmd, (u_long)data)); 1121 } else { 1122 error = ENOTTY; 1123 } 1124 1125 /* fallback to the regular IOCTL handler, if any */ 1126 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 1127 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1128 cmd, (u_long)data)); 1129 } 1130 } else 1131 #endif 1132 { 1133 if (fop->unlocked_ioctl != NULL) { 1134 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1135 cmd, (u_long)data)); 1136 } else { 1137 error = ENOTTY; 1138 } 1139 } 1140 if (size > 0) { 1141 task->bsd_ioctl_data = NULL; 1142 task->bsd_ioctl_len = 0; 1143 } 1144 1145 if (error == EWOULDBLOCK) { 1146 /* update kqfilter status, if any */ 1147 linux_file_kqfilter_poll(filp, 1148 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1149 } else { 1150 error = linux_get_error(task, error); 1151 } 1152 return (error); 1153 } 1154 1155 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1156 1157 /* 1158 * This function atomically updates the poll wakeup state and returns 1159 * the previous state at the time of update. 1160 */ 1161 static uint8_t 1162 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1163 { 1164 int c, old; 1165 1166 c = v->counter; 1167 1168 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1169 c = old; 1170 1171 return (c); 1172 } 1173 1174 static int 1175 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1176 { 1177 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1178 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1179 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1180 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1181 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1182 }; 1183 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1184 1185 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1186 case LINUX_FWQ_STATE_QUEUED: 1187 linux_poll_wakeup(filp); 1188 return (1); 1189 default: 1190 return (0); 1191 } 1192 } 1193 1194 void 1195 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1196 { 1197 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1198 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1199 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1200 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1201 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1202 }; 1203 1204 /* check if we are called inside the select system call */ 1205 if (p == LINUX_POLL_TABLE_NORMAL) 1206 selrecord(curthread, &filp->f_selinfo); 1207 1208 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1209 case LINUX_FWQ_STATE_INIT: 1210 /* NOTE: file handles can only belong to one wait-queue */ 1211 filp->f_wait_queue.wqh = wqh; 1212 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1213 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1214 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1215 break; 1216 default: 1217 break; 1218 } 1219 } 1220 1221 static void 1222 linux_poll_wait_dequeue(struct linux_file *filp) 1223 { 1224 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1225 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1226 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1227 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1228 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1229 }; 1230 1231 seldrain(&filp->f_selinfo); 1232 1233 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1234 case LINUX_FWQ_STATE_NOT_READY: 1235 case LINUX_FWQ_STATE_QUEUED: 1236 case LINUX_FWQ_STATE_READY: 1237 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1238 break; 1239 default: 1240 break; 1241 } 1242 } 1243 1244 void 1245 linux_poll_wakeup(struct linux_file *filp) 1246 { 1247 /* this function should be NULL-safe */ 1248 if (filp == NULL) 1249 return; 1250 1251 selwakeup(&filp->f_selinfo); 1252 1253 spin_lock(&filp->f_kqlock); 1254 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1255 LINUX_KQ_FLAG_NEED_WRITE; 1256 1257 /* make sure the "knote" gets woken up */ 1258 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1259 spin_unlock(&filp->f_kqlock); 1260 } 1261 1262 static void 1263 linux_file_kqfilter_detach(struct knote *kn) 1264 { 1265 struct linux_file *filp = kn->kn_hook; 1266 1267 spin_lock(&filp->f_kqlock); 1268 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1269 spin_unlock(&filp->f_kqlock); 1270 } 1271 1272 static int 1273 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1274 { 1275 struct linux_file *filp = kn->kn_hook; 1276 1277 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1278 1279 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1280 } 1281 1282 static int 1283 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1284 { 1285 struct linux_file *filp = kn->kn_hook; 1286 1287 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1288 1289 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1290 } 1291 1292 static struct filterops linux_dev_kqfiltops_read = { 1293 .f_isfd = 1, 1294 .f_detach = linux_file_kqfilter_detach, 1295 .f_event = linux_file_kqfilter_read_event, 1296 }; 1297 1298 static struct filterops linux_dev_kqfiltops_write = { 1299 .f_isfd = 1, 1300 .f_detach = linux_file_kqfilter_detach, 1301 .f_event = linux_file_kqfilter_write_event, 1302 }; 1303 1304 static void 1305 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1306 { 1307 struct thread *td; 1308 const struct file_operations *fop; 1309 struct linux_cdev *ldev; 1310 int temp; 1311 1312 if ((filp->f_kqflags & kqflags) == 0) 1313 return; 1314 1315 td = curthread; 1316 1317 linux_get_fop(filp, &fop, &ldev); 1318 /* get the latest polling state */ 1319 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1320 linux_drop_fop(ldev); 1321 1322 spin_lock(&filp->f_kqlock); 1323 /* clear kqflags */ 1324 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1325 LINUX_KQ_FLAG_NEED_WRITE); 1326 /* update kqflags */ 1327 if ((temp & (POLLIN | POLLOUT)) != 0) { 1328 if ((temp & POLLIN) != 0) 1329 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1330 if ((temp & POLLOUT) != 0) 1331 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1332 1333 /* make sure the "knote" gets woken up */ 1334 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1335 } 1336 spin_unlock(&filp->f_kqlock); 1337 } 1338 1339 static int 1340 linux_file_kqfilter(struct file *file, struct knote *kn) 1341 { 1342 struct linux_file *filp; 1343 struct thread *td; 1344 int error; 1345 1346 td = curthread; 1347 filp = (struct linux_file *)file->f_data; 1348 filp->f_flags = file->f_flag; 1349 if (filp->f_op->poll == NULL) 1350 return (EINVAL); 1351 1352 spin_lock(&filp->f_kqlock); 1353 switch (kn->kn_filter) { 1354 case EVFILT_READ: 1355 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1356 kn->kn_fop = &linux_dev_kqfiltops_read; 1357 kn->kn_hook = filp; 1358 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1359 error = 0; 1360 break; 1361 case EVFILT_WRITE: 1362 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1363 kn->kn_fop = &linux_dev_kqfiltops_write; 1364 kn->kn_hook = filp; 1365 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1366 error = 0; 1367 break; 1368 default: 1369 error = EINVAL; 1370 break; 1371 } 1372 spin_unlock(&filp->f_kqlock); 1373 1374 if (error == 0) { 1375 linux_set_current(td); 1376 1377 /* update kqfilter status, if any */ 1378 linux_file_kqfilter_poll(filp, 1379 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1380 } 1381 return (error); 1382 } 1383 1384 static int 1385 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1386 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1387 int nprot, bool is_shared, struct thread *td) 1388 { 1389 struct task_struct *task; 1390 struct vm_area_struct *vmap; 1391 struct mm_struct *mm; 1392 struct linux_file *filp; 1393 vm_memattr_t attr; 1394 int error; 1395 1396 filp = (struct linux_file *)fp->f_data; 1397 filp->f_flags = fp->f_flag; 1398 1399 if (fop->mmap == NULL) 1400 return (EOPNOTSUPP); 1401 1402 linux_set_current(td); 1403 1404 /* 1405 * The same VM object might be shared by multiple processes 1406 * and the mm_struct is usually freed when a process exits. 1407 * 1408 * The atomic reference below makes sure the mm_struct is 1409 * available as long as the vmap is in the linux_vma_head. 1410 */ 1411 task = current; 1412 mm = task->mm; 1413 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1414 return (EINVAL); 1415 1416 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1417 vmap->vm_start = 0; 1418 vmap->vm_end = size; 1419 vmap->vm_pgoff = *offset / PAGE_SIZE; 1420 vmap->vm_pfn = 0; 1421 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1422 if (is_shared) 1423 vmap->vm_flags |= VM_SHARED; 1424 vmap->vm_ops = NULL; 1425 vmap->vm_file = get_file(filp); 1426 vmap->vm_mm = mm; 1427 1428 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1429 error = linux_get_error(task, EINTR); 1430 } else { 1431 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1432 error = linux_get_error(task, error); 1433 up_write(&vmap->vm_mm->mmap_sem); 1434 } 1435 1436 if (error != 0) { 1437 linux_cdev_handle_free(vmap); 1438 return (error); 1439 } 1440 1441 attr = pgprot2cachemode(vmap->vm_page_prot); 1442 1443 if (vmap->vm_ops != NULL) { 1444 struct vm_area_struct *ptr; 1445 void *vm_private_data; 1446 bool vm_no_fault; 1447 1448 if (vmap->vm_ops->open == NULL || 1449 vmap->vm_ops->close == NULL || 1450 vmap->vm_private_data == NULL) { 1451 /* free allocated VM area struct */ 1452 linux_cdev_handle_free(vmap); 1453 return (EINVAL); 1454 } 1455 1456 vm_private_data = vmap->vm_private_data; 1457 1458 rw_wlock(&linux_vma_lock); 1459 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1460 if (ptr->vm_private_data == vm_private_data) 1461 break; 1462 } 1463 /* check if there is an existing VM area struct */ 1464 if (ptr != NULL) { 1465 /* check if the VM area structure is invalid */ 1466 if (ptr->vm_ops == NULL || 1467 ptr->vm_ops->open == NULL || 1468 ptr->vm_ops->close == NULL) { 1469 error = ESTALE; 1470 vm_no_fault = 1; 1471 } else { 1472 error = EEXIST; 1473 vm_no_fault = (ptr->vm_ops->fault == NULL); 1474 } 1475 } else { 1476 /* insert VM area structure into list */ 1477 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1478 error = 0; 1479 vm_no_fault = (vmap->vm_ops->fault == NULL); 1480 } 1481 rw_wunlock(&linux_vma_lock); 1482 1483 if (error != 0) { 1484 /* free allocated VM area struct */ 1485 linux_cdev_handle_free(vmap); 1486 /* check for stale VM area struct */ 1487 if (error != EEXIST) 1488 return (error); 1489 } 1490 1491 /* check if there is no fault handler */ 1492 if (vm_no_fault) { 1493 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1494 &linux_cdev_pager_ops[1], size, nprot, *offset, 1495 td->td_ucred); 1496 } else { 1497 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1498 &linux_cdev_pager_ops[0], size, nprot, *offset, 1499 td->td_ucred); 1500 } 1501 1502 /* check if allocating the VM object failed */ 1503 if (*object == NULL) { 1504 if (error == 0) { 1505 /* remove VM area struct from list */ 1506 linux_cdev_handle_remove(vmap); 1507 /* free allocated VM area struct */ 1508 linux_cdev_handle_free(vmap); 1509 } 1510 return (EINVAL); 1511 } 1512 } else { 1513 struct sglist *sg; 1514 1515 sg = sglist_alloc(1, M_WAITOK); 1516 sglist_append_phys(sg, 1517 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1518 1519 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1520 nprot, 0, td->td_ucred); 1521 1522 linux_cdev_handle_free(vmap); 1523 1524 if (*object == NULL) { 1525 sglist_free(sg); 1526 return (EINVAL); 1527 } 1528 } 1529 1530 if (attr != VM_MEMATTR_DEFAULT) { 1531 VM_OBJECT_WLOCK(*object); 1532 vm_object_set_memattr(*object, attr); 1533 VM_OBJECT_WUNLOCK(*object); 1534 } 1535 *offset = 0; 1536 return (0); 1537 } 1538 1539 struct cdevsw linuxcdevsw = { 1540 .d_version = D_VERSION, 1541 .d_fdopen = linux_dev_fdopen, 1542 .d_name = "lkpidev", 1543 }; 1544 1545 static int 1546 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1547 int flags, struct thread *td) 1548 { 1549 struct linux_file *filp; 1550 const struct file_operations *fop; 1551 struct linux_cdev *ldev; 1552 ssize_t bytes; 1553 int error; 1554 1555 error = 0; 1556 filp = (struct linux_file *)file->f_data; 1557 filp->f_flags = file->f_flag; 1558 /* XXX no support for I/O vectors currently */ 1559 if (uio->uio_iovcnt != 1) 1560 return (EOPNOTSUPP); 1561 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1562 return (EINVAL); 1563 linux_set_current(td); 1564 linux_get_fop(filp, &fop, &ldev); 1565 if (fop->read != NULL) { 1566 bytes = OPW(file, td, fop->read(filp, 1567 uio->uio_iov->iov_base, 1568 uio->uio_iov->iov_len, &uio->uio_offset)); 1569 if (bytes >= 0) { 1570 uio->uio_iov->iov_base = 1571 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1572 uio->uio_iov->iov_len -= bytes; 1573 uio->uio_resid -= bytes; 1574 } else { 1575 error = linux_get_error(current, -bytes); 1576 } 1577 } else 1578 error = ENXIO; 1579 1580 /* update kqfilter status, if any */ 1581 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1582 linux_drop_fop(ldev); 1583 1584 return (error); 1585 } 1586 1587 static int 1588 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1589 int flags, struct thread *td) 1590 { 1591 struct linux_file *filp; 1592 const struct file_operations *fop; 1593 struct linux_cdev *ldev; 1594 ssize_t bytes; 1595 int error; 1596 1597 filp = (struct linux_file *)file->f_data; 1598 filp->f_flags = file->f_flag; 1599 /* XXX no support for I/O vectors currently */ 1600 if (uio->uio_iovcnt != 1) 1601 return (EOPNOTSUPP); 1602 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1603 return (EINVAL); 1604 linux_set_current(td); 1605 linux_get_fop(filp, &fop, &ldev); 1606 if (fop->write != NULL) { 1607 bytes = OPW(file, td, fop->write(filp, 1608 uio->uio_iov->iov_base, 1609 uio->uio_iov->iov_len, &uio->uio_offset)); 1610 if (bytes >= 0) { 1611 uio->uio_iov->iov_base = 1612 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1613 uio->uio_iov->iov_len -= bytes; 1614 uio->uio_resid -= bytes; 1615 error = 0; 1616 } else { 1617 error = linux_get_error(current, -bytes); 1618 } 1619 } else 1620 error = ENXIO; 1621 1622 /* update kqfilter status, if any */ 1623 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1624 1625 linux_drop_fop(ldev); 1626 1627 return (error); 1628 } 1629 1630 static int 1631 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1632 struct thread *td) 1633 { 1634 struct linux_file *filp; 1635 const struct file_operations *fop; 1636 struct linux_cdev *ldev; 1637 int revents; 1638 1639 filp = (struct linux_file *)file->f_data; 1640 filp->f_flags = file->f_flag; 1641 linux_set_current(td); 1642 linux_get_fop(filp, &fop, &ldev); 1643 if (fop->poll != NULL) { 1644 revents = OPW(file, td, fop->poll(filp, 1645 LINUX_POLL_TABLE_NORMAL)) & events; 1646 } else { 1647 revents = 0; 1648 } 1649 linux_drop_fop(ldev); 1650 return (revents); 1651 } 1652 1653 static int 1654 linux_file_close(struct file *file, struct thread *td) 1655 { 1656 struct linux_file *filp; 1657 int (*release)(struct inode *, struct linux_file *); 1658 const struct file_operations *fop; 1659 struct linux_cdev *ldev; 1660 int error; 1661 1662 filp = (struct linux_file *)file->f_data; 1663 1664 KASSERT(file_count(filp) == 0, 1665 ("File refcount(%d) is not zero", file_count(filp))); 1666 1667 if (td == NULL) 1668 td = curthread; 1669 1670 error = 0; 1671 filp->f_flags = file->f_flag; 1672 linux_set_current(td); 1673 linux_poll_wait_dequeue(filp); 1674 linux_get_fop(filp, &fop, &ldev); 1675 /* 1676 * Always use the real release function, if any, to avoid 1677 * leaking device resources: 1678 */ 1679 release = filp->f_op->release; 1680 if (release != NULL) 1681 error = -OPW(file, td, release(filp->f_vnode, filp)); 1682 funsetown(&filp->f_sigio); 1683 if (filp->f_vnode != NULL) 1684 vdrop(filp->f_vnode); 1685 linux_drop_fop(ldev); 1686 ldev = filp->f_cdev; 1687 if (ldev != NULL) 1688 linux_cdev_deref(ldev); 1689 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1690 kfree(filp); 1691 1692 return (error); 1693 } 1694 1695 static int 1696 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1697 struct thread *td) 1698 { 1699 struct linux_file *filp; 1700 const struct file_operations *fop; 1701 struct linux_cdev *ldev; 1702 struct fiodgname_arg *fgn; 1703 const char *p; 1704 int error, i; 1705 1706 error = 0; 1707 filp = (struct linux_file *)fp->f_data; 1708 filp->f_flags = fp->f_flag; 1709 linux_get_fop(filp, &fop, &ldev); 1710 1711 linux_set_current(td); 1712 switch (cmd) { 1713 case FIONBIO: 1714 break; 1715 case FIOASYNC: 1716 if (fop->fasync == NULL) 1717 break; 1718 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1719 break; 1720 case FIOSETOWN: 1721 error = fsetown(*(int *)data, &filp->f_sigio); 1722 if (error == 0) { 1723 if (fop->fasync == NULL) 1724 break; 1725 error = -OPW(fp, td, fop->fasync(0, filp, 1726 fp->f_flag & FASYNC)); 1727 } 1728 break; 1729 case FIOGETOWN: 1730 *(int *)data = fgetown(&filp->f_sigio); 1731 break; 1732 case FIODGNAME: 1733 #ifdef COMPAT_FREEBSD32 1734 case FIODGNAME_32: 1735 #endif 1736 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1737 error = ENXIO; 1738 break; 1739 } 1740 fgn = data; 1741 p = devtoname(filp->f_cdev->cdev); 1742 i = strlen(p) + 1; 1743 if (i > fgn->len) { 1744 error = EINVAL; 1745 break; 1746 } 1747 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1748 break; 1749 default: 1750 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1751 break; 1752 } 1753 linux_drop_fop(ldev); 1754 return (error); 1755 } 1756 1757 static int 1758 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1759 vm_prot_t maxprot, int flags, struct file *fp, 1760 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1761 { 1762 /* 1763 * Character devices do not provide private mappings 1764 * of any kind: 1765 */ 1766 if ((maxprot & VM_PROT_WRITE) == 0 && 1767 (prot & VM_PROT_WRITE) != 0) 1768 return (EACCES); 1769 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1770 return (EINVAL); 1771 1772 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1773 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1774 } 1775 1776 static int 1777 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1778 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1779 struct thread *td) 1780 { 1781 struct linux_file *filp; 1782 const struct file_operations *fop; 1783 struct linux_cdev *ldev; 1784 struct mount *mp; 1785 struct vnode *vp; 1786 vm_object_t object; 1787 vm_prot_t maxprot; 1788 int error; 1789 1790 filp = (struct linux_file *)fp->f_data; 1791 1792 vp = filp->f_vnode; 1793 if (vp == NULL) 1794 return (EOPNOTSUPP); 1795 1796 /* 1797 * Ensure that file and memory protections are 1798 * compatible. 1799 */ 1800 mp = vp->v_mount; 1801 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1802 maxprot = VM_PROT_NONE; 1803 if ((prot & VM_PROT_EXECUTE) != 0) 1804 return (EACCES); 1805 } else 1806 maxprot = VM_PROT_EXECUTE; 1807 if ((fp->f_flag & FREAD) != 0) 1808 maxprot |= VM_PROT_READ; 1809 else if ((prot & VM_PROT_READ) != 0) 1810 return (EACCES); 1811 1812 /* 1813 * If we are sharing potential changes via MAP_SHARED and we 1814 * are trying to get write permission although we opened it 1815 * without asking for it, bail out. 1816 * 1817 * Note that most character devices always share mappings. 1818 * 1819 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1820 * requests rather than doing it here. 1821 */ 1822 if ((flags & MAP_SHARED) != 0) { 1823 if ((fp->f_flag & FWRITE) != 0) 1824 maxprot |= VM_PROT_WRITE; 1825 else if ((prot & VM_PROT_WRITE) != 0) 1826 return (EACCES); 1827 } 1828 maxprot &= cap_maxprot; 1829 1830 linux_get_fop(filp, &fop, &ldev); 1831 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1832 &foff, fop, &object); 1833 if (error != 0) 1834 goto out; 1835 1836 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1837 foff, FALSE, td); 1838 if (error != 0) 1839 vm_object_deallocate(object); 1840 out: 1841 linux_drop_fop(ldev); 1842 return (error); 1843 } 1844 1845 static int 1846 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1847 { 1848 struct linux_file *filp; 1849 struct vnode *vp; 1850 int error; 1851 1852 filp = (struct linux_file *)fp->f_data; 1853 if (filp->f_vnode == NULL) 1854 return (EOPNOTSUPP); 1855 1856 vp = filp->f_vnode; 1857 1858 vn_lock(vp, LK_SHARED | LK_RETRY); 1859 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1860 VOP_UNLOCK(vp); 1861 1862 return (error); 1863 } 1864 1865 static int 1866 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1867 struct filedesc *fdp) 1868 { 1869 struct linux_file *filp; 1870 struct vnode *vp; 1871 int error; 1872 1873 filp = fp->f_data; 1874 vp = filp->f_vnode; 1875 if (vp == NULL) { 1876 error = 0; 1877 kif->kf_type = KF_TYPE_DEV; 1878 } else { 1879 vref(vp); 1880 FILEDESC_SUNLOCK(fdp); 1881 error = vn_fill_kinfo_vnode(vp, kif); 1882 vrele(vp); 1883 kif->kf_type = KF_TYPE_VNODE; 1884 FILEDESC_SLOCK(fdp); 1885 } 1886 return (error); 1887 } 1888 1889 unsigned int 1890 linux_iminor(struct inode *inode) 1891 { 1892 struct linux_cdev *ldev; 1893 1894 if (inode == NULL || inode->v_rdev == NULL || 1895 inode->v_rdev->si_devsw != &linuxcdevsw) 1896 return (-1U); 1897 ldev = inode->v_rdev->si_drv1; 1898 if (ldev == NULL) 1899 return (-1U); 1900 1901 return (minor(ldev->dev)); 1902 } 1903 1904 struct fileops linuxfileops = { 1905 .fo_read = linux_file_read, 1906 .fo_write = linux_file_write, 1907 .fo_truncate = invfo_truncate, 1908 .fo_kqfilter = linux_file_kqfilter, 1909 .fo_stat = linux_file_stat, 1910 .fo_fill_kinfo = linux_file_fill_kinfo, 1911 .fo_poll = linux_file_poll, 1912 .fo_close = linux_file_close, 1913 .fo_ioctl = linux_file_ioctl, 1914 .fo_mmap = linux_file_mmap, 1915 .fo_chmod = invfo_chmod, 1916 .fo_chown = invfo_chown, 1917 .fo_sendfile = invfo_sendfile, 1918 .fo_flags = DFLAG_PASSABLE, 1919 }; 1920 1921 /* 1922 * Hash of vmmap addresses. This is infrequently accessed and does not 1923 * need to be particularly large. This is done because we must store the 1924 * caller's idea of the map size to properly unmap. 1925 */ 1926 struct vmmap { 1927 LIST_ENTRY(vmmap) vm_next; 1928 void *vm_addr; 1929 unsigned long vm_size; 1930 }; 1931 1932 struct vmmaphd { 1933 struct vmmap *lh_first; 1934 }; 1935 #define VMMAP_HASH_SIZE 64 1936 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1937 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1938 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1939 static struct mtx vmmaplock; 1940 1941 static void 1942 vmmap_add(void *addr, unsigned long size) 1943 { 1944 struct vmmap *vmmap; 1945 1946 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1947 mtx_lock(&vmmaplock); 1948 vmmap->vm_size = size; 1949 vmmap->vm_addr = addr; 1950 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1951 mtx_unlock(&vmmaplock); 1952 } 1953 1954 static struct vmmap * 1955 vmmap_remove(void *addr) 1956 { 1957 struct vmmap *vmmap; 1958 1959 mtx_lock(&vmmaplock); 1960 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1961 if (vmmap->vm_addr == addr) 1962 break; 1963 if (vmmap) 1964 LIST_REMOVE(vmmap, vm_next); 1965 mtx_unlock(&vmmaplock); 1966 1967 return (vmmap); 1968 } 1969 1970 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1971 void * 1972 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1973 { 1974 void *addr; 1975 1976 addr = pmap_mapdev_attr(phys_addr, size, attr); 1977 if (addr == NULL) 1978 return (NULL); 1979 vmmap_add(addr, size); 1980 1981 return (addr); 1982 } 1983 #endif 1984 1985 void 1986 iounmap(void *addr) 1987 { 1988 struct vmmap *vmmap; 1989 1990 vmmap = vmmap_remove(addr); 1991 if (vmmap == NULL) 1992 return; 1993 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1994 pmap_unmapdev(addr, vmmap->vm_size); 1995 #endif 1996 kfree(vmmap); 1997 } 1998 1999 void * 2000 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 2001 { 2002 vm_offset_t off; 2003 size_t size; 2004 2005 size = count * PAGE_SIZE; 2006 off = kva_alloc(size); 2007 if (off == 0) 2008 return (NULL); 2009 vmmap_add((void *)off, size); 2010 pmap_qenter(off, pages, count); 2011 2012 return ((void *)off); 2013 } 2014 2015 void 2016 vunmap(void *addr) 2017 { 2018 struct vmmap *vmmap; 2019 2020 vmmap = vmmap_remove(addr); 2021 if (vmmap == NULL) 2022 return; 2023 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 2024 kva_free((vm_offset_t)addr, vmmap->vm_size); 2025 kfree(vmmap); 2026 } 2027 2028 static char * 2029 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 2030 { 2031 unsigned int len; 2032 char *p; 2033 va_list aq; 2034 2035 va_copy(aq, ap); 2036 len = vsnprintf(NULL, 0, fmt, aq); 2037 va_end(aq); 2038 2039 if (dev != NULL) 2040 p = devm_kmalloc(dev, len + 1, gfp); 2041 else 2042 p = kmalloc(len + 1, gfp); 2043 if (p != NULL) 2044 vsnprintf(p, len + 1, fmt, ap); 2045 2046 return (p); 2047 } 2048 2049 char * 2050 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 2051 { 2052 2053 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 2054 } 2055 2056 char * 2057 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 2058 { 2059 va_list ap; 2060 char *p; 2061 2062 va_start(ap, fmt); 2063 p = devm_kvasprintf(dev, gfp, fmt, ap); 2064 va_end(ap); 2065 2066 return (p); 2067 } 2068 2069 char * 2070 kasprintf(gfp_t gfp, const char *fmt, ...) 2071 { 2072 va_list ap; 2073 char *p; 2074 2075 va_start(ap, fmt); 2076 p = kvasprintf(gfp, fmt, ap); 2077 va_end(ap); 2078 2079 return (p); 2080 } 2081 2082 static void 2083 linux_timer_callback_wrapper(void *context) 2084 { 2085 struct timer_list *timer; 2086 2087 timer = context; 2088 2089 if (linux_set_current_flags(curthread, M_NOWAIT)) { 2090 /* try again later */ 2091 callout_reset(&timer->callout, 1, 2092 &linux_timer_callback_wrapper, timer); 2093 return; 2094 } 2095 2096 timer->function(timer->data); 2097 } 2098 2099 int 2100 mod_timer(struct timer_list *timer, int expires) 2101 { 2102 int ret; 2103 2104 timer->expires = expires; 2105 ret = callout_reset(&timer->callout, 2106 linux_timer_jiffies_until(expires), 2107 &linux_timer_callback_wrapper, timer); 2108 2109 MPASS(ret == 0 || ret == 1); 2110 2111 return (ret == 1); 2112 } 2113 2114 void 2115 add_timer(struct timer_list *timer) 2116 { 2117 2118 callout_reset(&timer->callout, 2119 linux_timer_jiffies_until(timer->expires), 2120 &linux_timer_callback_wrapper, timer); 2121 } 2122 2123 void 2124 add_timer_on(struct timer_list *timer, int cpu) 2125 { 2126 2127 callout_reset_on(&timer->callout, 2128 linux_timer_jiffies_until(timer->expires), 2129 &linux_timer_callback_wrapper, timer, cpu); 2130 } 2131 2132 int 2133 del_timer(struct timer_list *timer) 2134 { 2135 2136 if (callout_stop(&(timer)->callout) == -1) 2137 return (0); 2138 return (1); 2139 } 2140 2141 int 2142 del_timer_sync(struct timer_list *timer) 2143 { 2144 2145 if (callout_drain(&(timer)->callout) == -1) 2146 return (0); 2147 return (1); 2148 } 2149 2150 int 2151 timer_delete_sync(struct timer_list *timer) 2152 { 2153 2154 return (del_timer_sync(timer)); 2155 } 2156 2157 int 2158 timer_shutdown_sync(struct timer_list *timer) 2159 { 2160 2161 return (del_timer_sync(timer)); 2162 } 2163 2164 /* greatest common divisor, Euclid equation */ 2165 static uint64_t 2166 lkpi_gcd_64(uint64_t a, uint64_t b) 2167 { 2168 uint64_t an; 2169 uint64_t bn; 2170 2171 while (b != 0) { 2172 an = b; 2173 bn = a % b; 2174 a = an; 2175 b = bn; 2176 } 2177 return (a); 2178 } 2179 2180 uint64_t lkpi_nsec2hz_rem; 2181 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2182 uint64_t lkpi_nsec2hz_max; 2183 2184 uint64_t lkpi_usec2hz_rem; 2185 uint64_t lkpi_usec2hz_div = 1000000ULL; 2186 uint64_t lkpi_usec2hz_max; 2187 2188 uint64_t lkpi_msec2hz_rem; 2189 uint64_t lkpi_msec2hz_div = 1000ULL; 2190 uint64_t lkpi_msec2hz_max; 2191 2192 static void 2193 linux_timer_init(void *arg) 2194 { 2195 uint64_t gcd; 2196 2197 /* 2198 * Compute an internal HZ value which can divide 2**32 to 2199 * avoid timer rounding problems when the tick value wraps 2200 * around 2**32: 2201 */ 2202 linux_timer_hz_mask = 1; 2203 while (linux_timer_hz_mask < (unsigned long)hz) 2204 linux_timer_hz_mask *= 2; 2205 linux_timer_hz_mask--; 2206 2207 /* compute some internal constants */ 2208 2209 lkpi_nsec2hz_rem = hz; 2210 lkpi_usec2hz_rem = hz; 2211 lkpi_msec2hz_rem = hz; 2212 2213 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2214 lkpi_nsec2hz_rem /= gcd; 2215 lkpi_nsec2hz_div /= gcd; 2216 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2217 2218 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2219 lkpi_usec2hz_rem /= gcd; 2220 lkpi_usec2hz_div /= gcd; 2221 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2222 2223 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2224 lkpi_msec2hz_rem /= gcd; 2225 lkpi_msec2hz_div /= gcd; 2226 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2227 } 2228 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2229 2230 void 2231 linux_complete_common(struct completion *c, int all) 2232 { 2233 int wakeup_swapper; 2234 2235 sleepq_lock(c); 2236 if (all) { 2237 c->done = UINT_MAX; 2238 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2239 } else { 2240 if (c->done != UINT_MAX) 2241 c->done++; 2242 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2243 } 2244 sleepq_release(c); 2245 if (wakeup_swapper) 2246 kick_proc0(); 2247 } 2248 2249 /* 2250 * Indefinite wait for done != 0 with or without signals. 2251 */ 2252 int 2253 linux_wait_for_common(struct completion *c, int flags) 2254 { 2255 struct task_struct *task; 2256 int error; 2257 2258 if (SCHEDULER_STOPPED()) 2259 return (0); 2260 2261 task = current; 2262 2263 if (flags != 0) 2264 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2265 else 2266 flags = SLEEPQ_SLEEP; 2267 error = 0; 2268 for (;;) { 2269 sleepq_lock(c); 2270 if (c->done) 2271 break; 2272 sleepq_add(c, NULL, "completion", flags, 0); 2273 if (flags & SLEEPQ_INTERRUPTIBLE) { 2274 DROP_GIANT(); 2275 error = -sleepq_wait_sig(c, 0); 2276 PICKUP_GIANT(); 2277 if (error != 0) { 2278 linux_schedule_save_interrupt_value(task, error); 2279 error = -ERESTARTSYS; 2280 goto intr; 2281 } 2282 } else { 2283 DROP_GIANT(); 2284 sleepq_wait(c, 0); 2285 PICKUP_GIANT(); 2286 } 2287 } 2288 if (c->done != UINT_MAX) 2289 c->done--; 2290 sleepq_release(c); 2291 2292 intr: 2293 return (error); 2294 } 2295 2296 /* 2297 * Time limited wait for done != 0 with or without signals. 2298 */ 2299 int 2300 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2301 { 2302 struct task_struct *task; 2303 int end = jiffies + timeout; 2304 int error; 2305 2306 if (SCHEDULER_STOPPED()) 2307 return (0); 2308 2309 task = current; 2310 2311 if (flags != 0) 2312 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2313 else 2314 flags = SLEEPQ_SLEEP; 2315 2316 for (;;) { 2317 sleepq_lock(c); 2318 if (c->done) 2319 break; 2320 sleepq_add(c, NULL, "completion", flags, 0); 2321 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2322 2323 DROP_GIANT(); 2324 if (flags & SLEEPQ_INTERRUPTIBLE) 2325 error = -sleepq_timedwait_sig(c, 0); 2326 else 2327 error = -sleepq_timedwait(c, 0); 2328 PICKUP_GIANT(); 2329 2330 if (error != 0) { 2331 /* check for timeout */ 2332 if (error == -EWOULDBLOCK) { 2333 error = 0; /* timeout */ 2334 } else { 2335 /* signal happened */ 2336 linux_schedule_save_interrupt_value(task, error); 2337 error = -ERESTARTSYS; 2338 } 2339 goto done; 2340 } 2341 } 2342 if (c->done != UINT_MAX) 2343 c->done--; 2344 sleepq_release(c); 2345 2346 /* return how many jiffies are left */ 2347 error = linux_timer_jiffies_until(end); 2348 done: 2349 return (error); 2350 } 2351 2352 int 2353 linux_try_wait_for_completion(struct completion *c) 2354 { 2355 int isdone; 2356 2357 sleepq_lock(c); 2358 isdone = (c->done != 0); 2359 if (c->done != 0 && c->done != UINT_MAX) 2360 c->done--; 2361 sleepq_release(c); 2362 return (isdone); 2363 } 2364 2365 int 2366 linux_completion_done(struct completion *c) 2367 { 2368 int isdone; 2369 2370 sleepq_lock(c); 2371 isdone = (c->done != 0); 2372 sleepq_release(c); 2373 return (isdone); 2374 } 2375 2376 static void 2377 linux_cdev_deref(struct linux_cdev *ldev) 2378 { 2379 if (refcount_release(&ldev->refs) && 2380 ldev->kobj.ktype == &linux_cdev_ktype) 2381 kfree(ldev); 2382 } 2383 2384 static void 2385 linux_cdev_release(struct kobject *kobj) 2386 { 2387 struct linux_cdev *cdev; 2388 struct kobject *parent; 2389 2390 cdev = container_of(kobj, struct linux_cdev, kobj); 2391 parent = kobj->parent; 2392 linux_destroy_dev(cdev); 2393 linux_cdev_deref(cdev); 2394 kobject_put(parent); 2395 } 2396 2397 static void 2398 linux_cdev_static_release(struct kobject *kobj) 2399 { 2400 struct cdev *cdev; 2401 struct linux_cdev *ldev; 2402 2403 ldev = container_of(kobj, struct linux_cdev, kobj); 2404 cdev = ldev->cdev; 2405 if (cdev != NULL) { 2406 destroy_dev(cdev); 2407 ldev->cdev = NULL; 2408 } 2409 kobject_put(kobj->parent); 2410 } 2411 2412 int 2413 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2414 { 2415 int ret; 2416 2417 if (dev->devt != 0) { 2418 /* Set parent kernel object. */ 2419 ldev->kobj.parent = &dev->kobj; 2420 2421 /* 2422 * Unlike Linux we require the kobject of the 2423 * character device structure to have a valid name 2424 * before calling this function: 2425 */ 2426 if (ldev->kobj.name == NULL) 2427 return (-EINVAL); 2428 2429 ret = cdev_add(ldev, dev->devt, 1); 2430 if (ret) 2431 return (ret); 2432 } 2433 ret = device_add(dev); 2434 if (ret != 0 && dev->devt != 0) 2435 cdev_del(ldev); 2436 return (ret); 2437 } 2438 2439 void 2440 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2441 { 2442 device_del(dev); 2443 2444 if (dev->devt != 0) 2445 cdev_del(ldev); 2446 } 2447 2448 static void 2449 linux_destroy_dev(struct linux_cdev *ldev) 2450 { 2451 2452 if (ldev->cdev == NULL) 2453 return; 2454 2455 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2456 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2457 2458 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2459 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2460 pause("ldevdtr", hz / 4); 2461 2462 destroy_dev(ldev->cdev); 2463 ldev->cdev = NULL; 2464 } 2465 2466 const struct kobj_type linux_cdev_ktype = { 2467 .release = linux_cdev_release, 2468 }; 2469 2470 const struct kobj_type linux_cdev_static_ktype = { 2471 .release = linux_cdev_static_release, 2472 }; 2473 2474 static void 2475 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2476 { 2477 struct notifier_block *nb; 2478 struct netdev_notifier_info ni; 2479 2480 nb = arg; 2481 ni.ifp = ifp; 2482 ni.dev = (struct net_device *)ifp; 2483 if (linkstate == LINK_STATE_UP) 2484 nb->notifier_call(nb, NETDEV_UP, &ni); 2485 else 2486 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2487 } 2488 2489 static void 2490 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2491 { 2492 struct notifier_block *nb; 2493 struct netdev_notifier_info ni; 2494 2495 nb = arg; 2496 ni.ifp = ifp; 2497 ni.dev = (struct net_device *)ifp; 2498 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2499 } 2500 2501 static void 2502 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2503 { 2504 struct notifier_block *nb; 2505 struct netdev_notifier_info ni; 2506 2507 nb = arg; 2508 ni.ifp = ifp; 2509 ni.dev = (struct net_device *)ifp; 2510 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2511 } 2512 2513 static void 2514 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2515 { 2516 struct notifier_block *nb; 2517 struct netdev_notifier_info ni; 2518 2519 nb = arg; 2520 ni.ifp = ifp; 2521 ni.dev = (struct net_device *)ifp; 2522 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2523 } 2524 2525 static void 2526 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2527 { 2528 struct notifier_block *nb; 2529 struct netdev_notifier_info ni; 2530 2531 nb = arg; 2532 ni.ifp = ifp; 2533 ni.dev = (struct net_device *)ifp; 2534 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2535 } 2536 2537 int 2538 register_netdevice_notifier(struct notifier_block *nb) 2539 { 2540 2541 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2542 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2543 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2544 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2545 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2546 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2547 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2548 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2549 2550 return (0); 2551 } 2552 2553 int 2554 register_inetaddr_notifier(struct notifier_block *nb) 2555 { 2556 2557 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2558 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2559 return (0); 2560 } 2561 2562 int 2563 unregister_netdevice_notifier(struct notifier_block *nb) 2564 { 2565 2566 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2567 nb->tags[NETDEV_UP]); 2568 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2569 nb->tags[NETDEV_REGISTER]); 2570 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2571 nb->tags[NETDEV_UNREGISTER]); 2572 EVENTHANDLER_DEREGISTER(iflladdr_event, 2573 nb->tags[NETDEV_CHANGEADDR]); 2574 2575 return (0); 2576 } 2577 2578 int 2579 unregister_inetaddr_notifier(struct notifier_block *nb) 2580 { 2581 2582 EVENTHANDLER_DEREGISTER(ifaddr_event, 2583 nb->tags[NETDEV_CHANGEIFADDR]); 2584 2585 return (0); 2586 } 2587 2588 struct list_sort_thunk { 2589 int (*cmp)(void *, struct list_head *, struct list_head *); 2590 void *priv; 2591 }; 2592 2593 static inline int 2594 linux_le_cmp(const void *d1, const void *d2, void *priv) 2595 { 2596 struct list_head *le1, *le2; 2597 struct list_sort_thunk *thunk; 2598 2599 thunk = priv; 2600 le1 = *(__DECONST(struct list_head **, d1)); 2601 le2 = *(__DECONST(struct list_head **, d2)); 2602 return ((thunk->cmp)(thunk->priv, le1, le2)); 2603 } 2604 2605 void 2606 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2607 struct list_head *a, struct list_head *b)) 2608 { 2609 struct list_sort_thunk thunk; 2610 struct list_head **ar, *le; 2611 size_t count, i; 2612 2613 count = 0; 2614 list_for_each(le, head) 2615 count++; 2616 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2617 i = 0; 2618 list_for_each(le, head) 2619 ar[i++] = le; 2620 thunk.cmp = cmp; 2621 thunk.priv = priv; 2622 qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); 2623 INIT_LIST_HEAD(head); 2624 for (i = 0; i < count; i++) 2625 list_add_tail(ar[i], head); 2626 free(ar, M_KMALLOC); 2627 } 2628 2629 #if defined(__i386__) || defined(__amd64__) 2630 int 2631 linux_wbinvd_on_all_cpus(void) 2632 { 2633 2634 pmap_invalidate_cache(); 2635 return (0); 2636 } 2637 #endif 2638 2639 int 2640 linux_on_each_cpu(void callback(void *), void *data) 2641 { 2642 2643 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2644 smp_no_rendezvous_barrier, data); 2645 return (0); 2646 } 2647 2648 int 2649 linux_in_atomic(void) 2650 { 2651 2652 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2653 } 2654 2655 struct linux_cdev * 2656 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2657 { 2658 dev_t dev = MKDEV(major, minor); 2659 struct cdev *cdev; 2660 2661 dev_lock(); 2662 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2663 struct linux_cdev *ldev = cdev->si_drv1; 2664 if (ldev->dev == dev && 2665 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2666 break; 2667 } 2668 } 2669 dev_unlock(); 2670 2671 return (cdev != NULL ? cdev->si_drv1 : NULL); 2672 } 2673 2674 int 2675 __register_chrdev(unsigned int major, unsigned int baseminor, 2676 unsigned int count, const char *name, 2677 const struct file_operations *fops) 2678 { 2679 struct linux_cdev *cdev; 2680 int ret = 0; 2681 int i; 2682 2683 for (i = baseminor; i < baseminor + count; i++) { 2684 cdev = cdev_alloc(); 2685 cdev->ops = fops; 2686 kobject_set_name(&cdev->kobj, name); 2687 2688 ret = cdev_add(cdev, makedev(major, i), 1); 2689 if (ret != 0) 2690 break; 2691 } 2692 return (ret); 2693 } 2694 2695 int 2696 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2697 unsigned int count, const char *name, 2698 const struct file_operations *fops, uid_t uid, 2699 gid_t gid, int mode) 2700 { 2701 struct linux_cdev *cdev; 2702 int ret = 0; 2703 int i; 2704 2705 for (i = baseminor; i < baseminor + count; i++) { 2706 cdev = cdev_alloc(); 2707 cdev->ops = fops; 2708 kobject_set_name(&cdev->kobj, name); 2709 2710 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2711 if (ret != 0) 2712 break; 2713 } 2714 return (ret); 2715 } 2716 2717 void 2718 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2719 unsigned int count, const char *name) 2720 { 2721 struct linux_cdev *cdevp; 2722 int i; 2723 2724 for (i = baseminor; i < baseminor + count; i++) { 2725 cdevp = linux_find_cdev(name, major, i); 2726 if (cdevp != NULL) 2727 cdev_del(cdevp); 2728 } 2729 } 2730 2731 void 2732 linux_dump_stack(void) 2733 { 2734 #ifdef STACK 2735 struct stack st; 2736 2737 stack_save(&st); 2738 stack_print(&st); 2739 #endif 2740 } 2741 2742 int 2743 linuxkpi_net_ratelimit(void) 2744 { 2745 2746 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2747 lkpi_net_maxpps)); 2748 } 2749 2750 struct io_mapping * 2751 io_mapping_create_wc(resource_size_t base, unsigned long size) 2752 { 2753 struct io_mapping *mapping; 2754 2755 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2756 if (mapping == NULL) 2757 return (NULL); 2758 return (io_mapping_init_wc(mapping, base, size)); 2759 } 2760 2761 #if defined(__i386__) || defined(__amd64__) 2762 bool linux_cpu_has_clflush; 2763 struct cpuinfo_x86 boot_cpu_data; 2764 struct cpuinfo_x86 __cpu_data[MAXCPU]; 2765 #endif 2766 2767 cpumask_t * 2768 lkpi_get_static_single_cpu_mask(int cpuid) 2769 { 2770 2771 KASSERT((cpuid >= 0 && cpuid < MAXCPU), ("%s: invalid cpuid %d\n", 2772 __func__, cpuid)); 2773 2774 return (&static_single_cpu_mask[cpuid]); 2775 } 2776 2777 static void 2778 linux_compat_init(void *arg) 2779 { 2780 struct sysctl_oid *rootoid; 2781 int i; 2782 2783 #if defined(__i386__) || defined(__amd64__) 2784 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2785 boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; 2786 boot_cpu_data.x86_max_cores = mp_ncpus; 2787 boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id); 2788 boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id); 2789 2790 for (i = 0; i < MAXCPU; i++) { 2791 __cpu_data[i].x86_clflush_size = cpu_clflush_line_size; 2792 __cpu_data[i].x86_max_cores = mp_ncpus; 2793 __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id); 2794 __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id); 2795 } 2796 #endif 2797 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2798 2799 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2800 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2801 kobject_init(&linux_class_root, &linux_class_ktype); 2802 kobject_set_name(&linux_class_root, "class"); 2803 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2804 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2805 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2806 kobject_set_name(&linux_root_device.kobj, "device"); 2807 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2808 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2809 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2810 linux_root_device.bsddev = root_bus; 2811 linux_class_misc.name = "misc"; 2812 class_register(&linux_class_misc); 2813 INIT_LIST_HEAD(&pci_drivers); 2814 INIT_LIST_HEAD(&pci_devices); 2815 spin_lock_init(&pci_lock); 2816 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2817 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2818 LIST_INIT(&vmmaphead[i]); 2819 init_waitqueue_head(&linux_bit_waitq); 2820 init_waitqueue_head(&linux_var_waitq); 2821 2822 CPU_COPY(&all_cpus, &cpu_online_mask); 2823 /* 2824 * Generate a single-CPU cpumask_t for each CPU (possibly) in the system. 2825 * CPUs are indexed from 0..(MAXCPU-1). The entry for cpuid 0 will only 2826 * have itself in the cpumask, cupid 1 only itself on entry 1, and so on. 2827 * This is used by cpumask_of() (and possibly others in the future) for, 2828 * e.g., drivers to pass hints to irq_set_affinity_hint(). 2829 */ 2830 for (i = 0; i < MAXCPU; i++) 2831 CPU_SET(i, &static_single_cpu_mask[i]); 2832 2833 strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release)); 2834 } 2835 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2836 2837 static void 2838 linux_compat_uninit(void *arg) 2839 { 2840 linux_kobject_kfree_name(&linux_class_root); 2841 linux_kobject_kfree_name(&linux_root_device.kobj); 2842 linux_kobject_kfree_name(&linux_class_misc.kobj); 2843 2844 mtx_destroy(&vmmaplock); 2845 spin_lock_destroy(&pci_lock); 2846 rw_destroy(&linux_vma_lock); 2847 } 2848 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2849 2850 /* 2851 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2852 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2853 * used. Assert these types have the same size, else some parts of the 2854 * LinuxKPI may not work like expected: 2855 */ 2856 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2857