1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/list.h> 87 #include <linux/kthread.h> 88 #include <linux/kernel.h> 89 #include <linux/compat.h> 90 #include <linux/poll.h> 91 #include <linux/smp.h> 92 #include <linux/wait_bit.h> 93 #include <linux/rcupdate.h> 94 95 #if defined(__i386__) || defined(__amd64__) 96 #include <asm/smp.h> 97 #endif 98 99 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 100 "LinuxKPI parameters"); 101 102 int linuxkpi_debug; 103 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 104 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 105 106 static struct timeval lkpi_net_lastlog; 107 static int lkpi_net_curpps; 108 static int lkpi_net_maxpps = 99; 109 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 110 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 111 112 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 113 114 #include <linux/rbtree.h> 115 /* Undo Linux compat changes. */ 116 #undef RB_ROOT 117 #undef file 118 #undef cdev 119 #define RB_ROOT(head) (head)->rbh_root 120 121 static void linux_destroy_dev(struct linux_cdev *); 122 static void linux_cdev_deref(struct linux_cdev *ldev); 123 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 124 125 cpumask_t cpu_online_mask; 126 struct kobject linux_class_root; 127 struct device linux_root_device; 128 struct class linux_class_misc; 129 struct list_head pci_drivers; 130 struct list_head pci_devices; 131 spinlock_t pci_lock; 132 133 unsigned long linux_timer_hz_mask; 134 135 wait_queue_head_t linux_bit_waitq; 136 wait_queue_head_t linux_var_waitq; 137 138 int 139 panic_cmp(struct rb_node *one, struct rb_node *two) 140 { 141 panic("no cmp"); 142 } 143 144 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 145 146 int 147 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 148 { 149 va_list tmp_va; 150 int len; 151 char *old; 152 char *name; 153 char dummy; 154 155 old = kobj->name; 156 157 if (old && fmt == NULL) 158 return (0); 159 160 /* compute length of string */ 161 va_copy(tmp_va, args); 162 len = vsnprintf(&dummy, 0, fmt, tmp_va); 163 va_end(tmp_va); 164 165 /* account for zero termination */ 166 len++; 167 168 /* check for error */ 169 if (len < 1) 170 return (-EINVAL); 171 172 /* allocate memory for string */ 173 name = kzalloc(len, GFP_KERNEL); 174 if (name == NULL) 175 return (-ENOMEM); 176 vsnprintf(name, len, fmt, args); 177 kobj->name = name; 178 179 /* free old string */ 180 kfree(old); 181 182 /* filter new string */ 183 for (; *name != '\0'; name++) 184 if (*name == '/') 185 *name = '!'; 186 return (0); 187 } 188 189 int 190 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 191 { 192 va_list args; 193 int error; 194 195 va_start(args, fmt); 196 error = kobject_set_name_vargs(kobj, fmt, args); 197 va_end(args); 198 199 return (error); 200 } 201 202 static int 203 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 204 { 205 const struct kobj_type *t; 206 int error; 207 208 kobj->parent = parent; 209 error = sysfs_create_dir(kobj); 210 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 211 struct attribute **attr; 212 t = kobj->ktype; 213 214 for (attr = t->default_attrs; *attr != NULL; attr++) { 215 error = sysfs_create_file(kobj, *attr); 216 if (error) 217 break; 218 } 219 if (error) 220 sysfs_remove_dir(kobj); 221 } 222 return (error); 223 } 224 225 int 226 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 227 { 228 va_list args; 229 int error; 230 231 va_start(args, fmt); 232 error = kobject_set_name_vargs(kobj, fmt, args); 233 va_end(args); 234 if (error) 235 return (error); 236 237 return kobject_add_complete(kobj, parent); 238 } 239 240 void 241 linux_kobject_release(struct kref *kref) 242 { 243 struct kobject *kobj; 244 char *name; 245 246 kobj = container_of(kref, struct kobject, kref); 247 sysfs_remove_dir(kobj); 248 name = kobj->name; 249 if (kobj->ktype && kobj->ktype->release) 250 kobj->ktype->release(kobj); 251 kfree(name); 252 } 253 254 static void 255 linux_kobject_kfree(struct kobject *kobj) 256 { 257 kfree(kobj); 258 } 259 260 static void 261 linux_kobject_kfree_name(struct kobject *kobj) 262 { 263 if (kobj) { 264 kfree(kobj->name); 265 } 266 } 267 268 const struct kobj_type linux_kfree_type = { 269 .release = linux_kobject_kfree 270 }; 271 272 static void 273 linux_device_release(struct device *dev) 274 { 275 pr_debug("linux_device_release: %s\n", dev_name(dev)); 276 kfree(dev); 277 } 278 279 static ssize_t 280 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 281 { 282 struct class_attribute *dattr; 283 ssize_t error; 284 285 dattr = container_of(attr, struct class_attribute, attr); 286 error = -EIO; 287 if (dattr->show) 288 error = dattr->show(container_of(kobj, struct class, kobj), 289 dattr, buf); 290 return (error); 291 } 292 293 static ssize_t 294 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 295 size_t count) 296 { 297 struct class_attribute *dattr; 298 ssize_t error; 299 300 dattr = container_of(attr, struct class_attribute, attr); 301 error = -EIO; 302 if (dattr->store) 303 error = dattr->store(container_of(kobj, struct class, kobj), 304 dattr, buf, count); 305 return (error); 306 } 307 308 static void 309 linux_class_release(struct kobject *kobj) 310 { 311 struct class *class; 312 313 class = container_of(kobj, struct class, kobj); 314 if (class->class_release) 315 class->class_release(class); 316 } 317 318 static const struct sysfs_ops linux_class_sysfs = { 319 .show = linux_class_show, 320 .store = linux_class_store, 321 }; 322 323 const struct kobj_type linux_class_ktype = { 324 .release = linux_class_release, 325 .sysfs_ops = &linux_class_sysfs 326 }; 327 328 static void 329 linux_dev_release(struct kobject *kobj) 330 { 331 struct device *dev; 332 333 dev = container_of(kobj, struct device, kobj); 334 /* This is the precedence defined by linux. */ 335 if (dev->release) 336 dev->release(dev); 337 else if (dev->class && dev->class->dev_release) 338 dev->class->dev_release(dev); 339 } 340 341 static ssize_t 342 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 343 { 344 struct device_attribute *dattr; 345 ssize_t error; 346 347 dattr = container_of(attr, struct device_attribute, attr); 348 error = -EIO; 349 if (dattr->show) 350 error = dattr->show(container_of(kobj, struct device, kobj), 351 dattr, buf); 352 return (error); 353 } 354 355 static ssize_t 356 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 357 size_t count) 358 { 359 struct device_attribute *dattr; 360 ssize_t error; 361 362 dattr = container_of(attr, struct device_attribute, attr); 363 error = -EIO; 364 if (dattr->store) 365 error = dattr->store(container_of(kobj, struct device, kobj), 366 dattr, buf, count); 367 return (error); 368 } 369 370 static const struct sysfs_ops linux_dev_sysfs = { 371 .show = linux_dev_show, 372 .store = linux_dev_store, 373 }; 374 375 const struct kobj_type linux_dev_ktype = { 376 .release = linux_dev_release, 377 .sysfs_ops = &linux_dev_sysfs 378 }; 379 380 struct device * 381 device_create(struct class *class, struct device *parent, dev_t devt, 382 void *drvdata, const char *fmt, ...) 383 { 384 struct device *dev; 385 va_list args; 386 387 dev = kzalloc(sizeof(*dev), M_WAITOK); 388 dev->parent = parent; 389 dev->class = class; 390 dev->devt = devt; 391 dev->driver_data = drvdata; 392 dev->release = linux_device_release; 393 va_start(args, fmt); 394 kobject_set_name_vargs(&dev->kobj, fmt, args); 395 va_end(args); 396 device_register(dev); 397 398 return (dev); 399 } 400 401 int 402 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 403 struct kobject *parent, const char *fmt, ...) 404 { 405 va_list args; 406 int error; 407 408 kobject_init(kobj, ktype); 409 kobj->ktype = ktype; 410 kobj->parent = parent; 411 kobj->name = NULL; 412 413 va_start(args, fmt); 414 error = kobject_set_name_vargs(kobj, fmt, args); 415 va_end(args); 416 if (error) 417 return (error); 418 return kobject_add_complete(kobj, parent); 419 } 420 421 static void 422 linux_kq_lock(void *arg) 423 { 424 spinlock_t *s = arg; 425 426 spin_lock(s); 427 } 428 static void 429 linux_kq_unlock(void *arg) 430 { 431 spinlock_t *s = arg; 432 433 spin_unlock(s); 434 } 435 436 static void 437 linux_kq_assert_lock(void *arg, int what) 438 { 439 #ifdef INVARIANTS 440 spinlock_t *s = arg; 441 442 if (what == LA_LOCKED) 443 mtx_assert(&s->m, MA_OWNED); 444 else 445 mtx_assert(&s->m, MA_NOTOWNED); 446 #endif 447 } 448 449 static void 450 linux_file_kqfilter_poll(struct linux_file *, int); 451 452 struct linux_file * 453 linux_file_alloc(void) 454 { 455 struct linux_file *filp; 456 457 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 458 459 /* set initial refcount */ 460 filp->f_count = 1; 461 462 /* setup fields needed by kqueue support */ 463 spin_lock_init(&filp->f_kqlock); 464 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 465 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 466 467 return (filp); 468 } 469 470 void 471 linux_file_free(struct linux_file *filp) 472 { 473 if (filp->_file == NULL) { 474 if (filp->f_op != NULL && filp->f_op->release != NULL) 475 filp->f_op->release(filp->f_vnode, filp); 476 if (filp->f_shmem != NULL) 477 vm_object_deallocate(filp->f_shmem); 478 kfree_rcu(filp, rcu); 479 } else { 480 /* 481 * The close method of the character device or file 482 * will free the linux_file structure: 483 */ 484 _fdrop(filp->_file, curthread); 485 } 486 } 487 488 static int 489 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 490 vm_page_t *mres) 491 { 492 struct vm_area_struct *vmap; 493 494 vmap = linux_cdev_handle_find(vm_obj->handle); 495 496 MPASS(vmap != NULL); 497 MPASS(vmap->vm_private_data == vm_obj->handle); 498 499 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 500 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 501 vm_page_t page; 502 503 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 504 /* 505 * If the passed in result page is a fake 506 * page, update it with the new physical 507 * address. 508 */ 509 page = *mres; 510 vm_page_updatefake(page, paddr, vm_obj->memattr); 511 } else { 512 /* 513 * Replace the passed in "mres" page with our 514 * own fake page and free up the all of the 515 * original pages. 516 */ 517 VM_OBJECT_WUNLOCK(vm_obj); 518 page = vm_page_getfake(paddr, vm_obj->memattr); 519 VM_OBJECT_WLOCK(vm_obj); 520 521 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 522 *mres = page; 523 } 524 vm_page_valid(page); 525 return (VM_PAGER_OK); 526 } 527 return (VM_PAGER_FAIL); 528 } 529 530 static int 531 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 532 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 533 { 534 struct vm_area_struct *vmap; 535 int err; 536 537 /* get VM area structure */ 538 vmap = linux_cdev_handle_find(vm_obj->handle); 539 MPASS(vmap != NULL); 540 MPASS(vmap->vm_private_data == vm_obj->handle); 541 542 VM_OBJECT_WUNLOCK(vm_obj); 543 544 linux_set_current(curthread); 545 546 down_write(&vmap->vm_mm->mmap_sem); 547 if (unlikely(vmap->vm_ops == NULL)) { 548 err = VM_FAULT_SIGBUS; 549 } else { 550 struct vm_fault vmf; 551 552 /* fill out VM fault structure */ 553 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 554 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 555 vmf.pgoff = 0; 556 vmf.page = NULL; 557 vmf.vma = vmap; 558 559 vmap->vm_pfn_count = 0; 560 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 561 vmap->vm_obj = vm_obj; 562 563 err = vmap->vm_ops->fault(&vmf); 564 565 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 566 kern_yield(PRI_USER); 567 err = vmap->vm_ops->fault(&vmf); 568 } 569 } 570 571 /* translate return code */ 572 switch (err) { 573 case VM_FAULT_OOM: 574 err = VM_PAGER_AGAIN; 575 break; 576 case VM_FAULT_SIGBUS: 577 err = VM_PAGER_BAD; 578 break; 579 case VM_FAULT_NOPAGE: 580 /* 581 * By contract the fault handler will return having 582 * busied all the pages itself. If pidx is already 583 * found in the object, it will simply xbusy the first 584 * page and return with vm_pfn_count set to 1. 585 */ 586 *first = vmap->vm_pfn_first; 587 *last = *first + vmap->vm_pfn_count - 1; 588 err = VM_PAGER_OK; 589 break; 590 default: 591 err = VM_PAGER_ERROR; 592 break; 593 } 594 up_write(&vmap->vm_mm->mmap_sem); 595 VM_OBJECT_WLOCK(vm_obj); 596 return (err); 597 } 598 599 static struct rwlock linux_vma_lock; 600 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 601 TAILQ_HEAD_INITIALIZER(linux_vma_head); 602 603 static void 604 linux_cdev_handle_free(struct vm_area_struct *vmap) 605 { 606 /* Drop reference on vm_file */ 607 if (vmap->vm_file != NULL) 608 fput(vmap->vm_file); 609 610 /* Drop reference on mm_struct */ 611 mmput(vmap->vm_mm); 612 613 kfree(vmap); 614 } 615 616 static void 617 linux_cdev_handle_remove(struct vm_area_struct *vmap) 618 { 619 rw_wlock(&linux_vma_lock); 620 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 621 rw_wunlock(&linux_vma_lock); 622 } 623 624 static struct vm_area_struct * 625 linux_cdev_handle_find(void *handle) 626 { 627 struct vm_area_struct *vmap; 628 629 rw_rlock(&linux_vma_lock); 630 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 631 if (vmap->vm_private_data == handle) 632 break; 633 } 634 rw_runlock(&linux_vma_lock); 635 return (vmap); 636 } 637 638 static int 639 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 640 vm_ooffset_t foff, struct ucred *cred, u_short *color) 641 { 642 643 MPASS(linux_cdev_handle_find(handle) != NULL); 644 *color = 0; 645 return (0); 646 } 647 648 static void 649 linux_cdev_pager_dtor(void *handle) 650 { 651 const struct vm_operations_struct *vm_ops; 652 struct vm_area_struct *vmap; 653 654 vmap = linux_cdev_handle_find(handle); 655 MPASS(vmap != NULL); 656 657 /* 658 * Remove handle before calling close operation to prevent 659 * other threads from reusing the handle pointer. 660 */ 661 linux_cdev_handle_remove(vmap); 662 663 down_write(&vmap->vm_mm->mmap_sem); 664 vm_ops = vmap->vm_ops; 665 if (likely(vm_ops != NULL)) 666 vm_ops->close(vmap); 667 up_write(&vmap->vm_mm->mmap_sem); 668 669 linux_cdev_handle_free(vmap); 670 } 671 672 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 673 { 674 /* OBJT_MGTDEVICE */ 675 .cdev_pg_populate = linux_cdev_pager_populate, 676 .cdev_pg_ctor = linux_cdev_pager_ctor, 677 .cdev_pg_dtor = linux_cdev_pager_dtor 678 }, 679 { 680 /* OBJT_DEVICE */ 681 .cdev_pg_fault = linux_cdev_pager_fault, 682 .cdev_pg_ctor = linux_cdev_pager_ctor, 683 .cdev_pg_dtor = linux_cdev_pager_dtor 684 }, 685 }; 686 687 int 688 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 689 unsigned long size) 690 { 691 vm_object_t obj; 692 vm_page_t m; 693 694 obj = vma->vm_obj; 695 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 696 return (-ENOTSUP); 697 VM_OBJECT_RLOCK(obj); 698 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 699 m != NULL && m->pindex < OFF_TO_IDX(address + size); 700 m = TAILQ_NEXT(m, listq)) 701 pmap_remove_all(m); 702 VM_OBJECT_RUNLOCK(obj); 703 return (0); 704 } 705 706 static struct file_operations dummy_ldev_ops = { 707 /* XXXKIB */ 708 }; 709 710 static struct linux_cdev dummy_ldev = { 711 .ops = &dummy_ldev_ops, 712 }; 713 714 #define LDEV_SI_DTR 0x0001 715 #define LDEV_SI_REF 0x0002 716 717 static void 718 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 719 struct linux_cdev **dev) 720 { 721 struct linux_cdev *ldev; 722 u_int siref; 723 724 ldev = filp->f_cdev; 725 *fop = filp->f_op; 726 if (ldev != NULL) { 727 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 728 refcount_acquire(&ldev->refs); 729 } else { 730 for (siref = ldev->siref;;) { 731 if ((siref & LDEV_SI_DTR) != 0) { 732 ldev = &dummy_ldev; 733 *fop = ldev->ops; 734 siref = ldev->siref; 735 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 736 } else if (atomic_fcmpset_int(&ldev->siref, 737 &siref, siref + LDEV_SI_REF)) { 738 break; 739 } 740 } 741 } 742 } 743 *dev = ldev; 744 } 745 746 static void 747 linux_drop_fop(struct linux_cdev *ldev) 748 { 749 750 if (ldev == NULL) 751 return; 752 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 753 linux_cdev_deref(ldev); 754 } else { 755 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 756 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 757 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 758 } 759 } 760 761 #define OPW(fp,td,code) ({ \ 762 struct file *__fpop; \ 763 __typeof(code) __retval; \ 764 \ 765 __fpop = (td)->td_fpop; \ 766 (td)->td_fpop = (fp); \ 767 __retval = (code); \ 768 (td)->td_fpop = __fpop; \ 769 __retval; \ 770 }) 771 772 static int 773 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 774 struct file *file) 775 { 776 struct linux_cdev *ldev; 777 struct linux_file *filp; 778 const struct file_operations *fop; 779 int error; 780 781 ldev = dev->si_drv1; 782 783 filp = linux_file_alloc(); 784 filp->f_dentry = &filp->f_dentry_store; 785 filp->f_op = ldev->ops; 786 filp->f_mode = file->f_flag; 787 filp->f_flags = file->f_flag; 788 filp->f_vnode = file->f_vnode; 789 filp->_file = file; 790 refcount_acquire(&ldev->refs); 791 filp->f_cdev = ldev; 792 793 linux_set_current(td); 794 linux_get_fop(filp, &fop, &ldev); 795 796 if (fop->open != NULL) { 797 error = -fop->open(file->f_vnode, filp); 798 if (error != 0) { 799 linux_drop_fop(ldev); 800 linux_cdev_deref(filp->f_cdev); 801 kfree(filp); 802 return (error); 803 } 804 } 805 806 /* hold on to the vnode - used for fstat() */ 807 vhold(filp->f_vnode); 808 809 /* release the file from devfs */ 810 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 811 linux_drop_fop(ldev); 812 return (ENXIO); 813 } 814 815 #define LINUX_IOCTL_MIN_PTR 0x10000UL 816 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 817 818 static inline int 819 linux_remap_address(void **uaddr, size_t len) 820 { 821 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 822 823 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 824 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 825 struct task_struct *pts = current; 826 if (pts == NULL) { 827 *uaddr = NULL; 828 return (1); 829 } 830 831 /* compute data offset */ 832 uaddr_val -= LINUX_IOCTL_MIN_PTR; 833 834 /* check that length is within bounds */ 835 if ((len > IOCPARM_MAX) || 836 (uaddr_val + len) > pts->bsd_ioctl_len) { 837 *uaddr = NULL; 838 return (1); 839 } 840 841 /* re-add kernel buffer address */ 842 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 843 844 /* update address location */ 845 *uaddr = (void *)uaddr_val; 846 return (1); 847 } 848 return (0); 849 } 850 851 int 852 linux_copyin(const void *uaddr, void *kaddr, size_t len) 853 { 854 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 855 if (uaddr == NULL) 856 return (-EFAULT); 857 memcpy(kaddr, uaddr, len); 858 return (0); 859 } 860 return (-copyin(uaddr, kaddr, len)); 861 } 862 863 int 864 linux_copyout(const void *kaddr, void *uaddr, size_t len) 865 { 866 if (linux_remap_address(&uaddr, len)) { 867 if (uaddr == NULL) 868 return (-EFAULT); 869 memcpy(uaddr, kaddr, len); 870 return (0); 871 } 872 return (-copyout(kaddr, uaddr, len)); 873 } 874 875 size_t 876 linux_clear_user(void *_uaddr, size_t _len) 877 { 878 uint8_t *uaddr = _uaddr; 879 size_t len = _len; 880 881 /* make sure uaddr is aligned before going into the fast loop */ 882 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 883 if (subyte(uaddr, 0)) 884 return (_len); 885 uaddr++; 886 len--; 887 } 888 889 /* zero 8 bytes at a time */ 890 while (len > 7) { 891 #ifdef __LP64__ 892 if (suword64(uaddr, 0)) 893 return (_len); 894 #else 895 if (suword32(uaddr, 0)) 896 return (_len); 897 if (suword32(uaddr + 4, 0)) 898 return (_len); 899 #endif 900 uaddr += 8; 901 len -= 8; 902 } 903 904 /* zero fill end, if any */ 905 while (len > 0) { 906 if (subyte(uaddr, 0)) 907 return (_len); 908 uaddr++; 909 len--; 910 } 911 return (0); 912 } 913 914 int 915 linux_access_ok(const void *uaddr, size_t len) 916 { 917 uintptr_t saddr; 918 uintptr_t eaddr; 919 920 /* get start and end address */ 921 saddr = (uintptr_t)uaddr; 922 eaddr = (uintptr_t)uaddr + len; 923 924 /* verify addresses are valid for userspace */ 925 return ((saddr == eaddr) || 926 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 927 } 928 929 /* 930 * This function should return either EINTR or ERESTART depending on 931 * the signal type sent to this thread: 932 */ 933 static int 934 linux_get_error(struct task_struct *task, int error) 935 { 936 /* check for signal type interrupt code */ 937 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 938 error = -linux_schedule_get_interrupt_value(task); 939 if (error == 0) 940 error = EINTR; 941 } 942 return (error); 943 } 944 945 static int 946 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 947 const struct file_operations *fop, u_long cmd, caddr_t data, 948 struct thread *td) 949 { 950 struct task_struct *task = current; 951 unsigned size; 952 int error; 953 954 size = IOCPARM_LEN(cmd); 955 /* refer to logic in sys_ioctl() */ 956 if (size > 0) { 957 /* 958 * Setup hint for linux_copyin() and linux_copyout(). 959 * 960 * Background: Linux code expects a user-space address 961 * while FreeBSD supplies a kernel-space address. 962 */ 963 task->bsd_ioctl_data = data; 964 task->bsd_ioctl_len = size; 965 data = (void *)LINUX_IOCTL_MIN_PTR; 966 } else { 967 /* fetch user-space pointer */ 968 data = *(void **)data; 969 } 970 #ifdef COMPAT_FREEBSD32 971 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 972 /* try the compat IOCTL handler first */ 973 if (fop->compat_ioctl != NULL) { 974 error = -OPW(fp, td, fop->compat_ioctl(filp, 975 cmd, (u_long)data)); 976 } else { 977 error = ENOTTY; 978 } 979 980 /* fallback to the regular IOCTL handler, if any */ 981 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 982 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 983 cmd, (u_long)data)); 984 } 985 } else 986 #endif 987 { 988 if (fop->unlocked_ioctl != NULL) { 989 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 990 cmd, (u_long)data)); 991 } else { 992 error = ENOTTY; 993 } 994 } 995 if (size > 0) { 996 task->bsd_ioctl_data = NULL; 997 task->bsd_ioctl_len = 0; 998 } 999 1000 if (error == EWOULDBLOCK) { 1001 /* update kqfilter status, if any */ 1002 linux_file_kqfilter_poll(filp, 1003 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1004 } else { 1005 error = linux_get_error(task, error); 1006 } 1007 return (error); 1008 } 1009 1010 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1011 1012 /* 1013 * This function atomically updates the poll wakeup state and returns 1014 * the previous state at the time of update. 1015 */ 1016 static uint8_t 1017 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1018 { 1019 int c, old; 1020 1021 c = v->counter; 1022 1023 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1024 c = old; 1025 1026 return (c); 1027 } 1028 1029 static int 1030 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1031 { 1032 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1033 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1034 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1035 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1036 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1037 }; 1038 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1039 1040 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1041 case LINUX_FWQ_STATE_QUEUED: 1042 linux_poll_wakeup(filp); 1043 return (1); 1044 default: 1045 return (0); 1046 } 1047 } 1048 1049 void 1050 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1051 { 1052 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1053 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1054 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1055 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1056 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1057 }; 1058 1059 /* check if we are called inside the select system call */ 1060 if (p == LINUX_POLL_TABLE_NORMAL) 1061 selrecord(curthread, &filp->f_selinfo); 1062 1063 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1064 case LINUX_FWQ_STATE_INIT: 1065 /* NOTE: file handles can only belong to one wait-queue */ 1066 filp->f_wait_queue.wqh = wqh; 1067 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1068 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1069 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1070 break; 1071 default: 1072 break; 1073 } 1074 } 1075 1076 static void 1077 linux_poll_wait_dequeue(struct linux_file *filp) 1078 { 1079 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1080 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1081 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1082 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1083 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1084 }; 1085 1086 seldrain(&filp->f_selinfo); 1087 1088 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1089 case LINUX_FWQ_STATE_NOT_READY: 1090 case LINUX_FWQ_STATE_QUEUED: 1091 case LINUX_FWQ_STATE_READY: 1092 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1093 break; 1094 default: 1095 break; 1096 } 1097 } 1098 1099 void 1100 linux_poll_wakeup(struct linux_file *filp) 1101 { 1102 /* this function should be NULL-safe */ 1103 if (filp == NULL) 1104 return; 1105 1106 selwakeup(&filp->f_selinfo); 1107 1108 spin_lock(&filp->f_kqlock); 1109 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1110 LINUX_KQ_FLAG_NEED_WRITE; 1111 1112 /* make sure the "knote" gets woken up */ 1113 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1114 spin_unlock(&filp->f_kqlock); 1115 } 1116 1117 static void 1118 linux_file_kqfilter_detach(struct knote *kn) 1119 { 1120 struct linux_file *filp = kn->kn_hook; 1121 1122 spin_lock(&filp->f_kqlock); 1123 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1124 spin_unlock(&filp->f_kqlock); 1125 } 1126 1127 static int 1128 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1129 { 1130 struct linux_file *filp = kn->kn_hook; 1131 1132 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1133 1134 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1135 } 1136 1137 static int 1138 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1139 { 1140 struct linux_file *filp = kn->kn_hook; 1141 1142 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1143 1144 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1145 } 1146 1147 static struct filterops linux_dev_kqfiltops_read = { 1148 .f_isfd = 1, 1149 .f_detach = linux_file_kqfilter_detach, 1150 .f_event = linux_file_kqfilter_read_event, 1151 }; 1152 1153 static struct filterops linux_dev_kqfiltops_write = { 1154 .f_isfd = 1, 1155 .f_detach = linux_file_kqfilter_detach, 1156 .f_event = linux_file_kqfilter_write_event, 1157 }; 1158 1159 static void 1160 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1161 { 1162 struct thread *td; 1163 const struct file_operations *fop; 1164 struct linux_cdev *ldev; 1165 int temp; 1166 1167 if ((filp->f_kqflags & kqflags) == 0) 1168 return; 1169 1170 td = curthread; 1171 1172 linux_get_fop(filp, &fop, &ldev); 1173 /* get the latest polling state */ 1174 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1175 linux_drop_fop(ldev); 1176 1177 spin_lock(&filp->f_kqlock); 1178 /* clear kqflags */ 1179 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1180 LINUX_KQ_FLAG_NEED_WRITE); 1181 /* update kqflags */ 1182 if ((temp & (POLLIN | POLLOUT)) != 0) { 1183 if ((temp & POLLIN) != 0) 1184 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1185 if ((temp & POLLOUT) != 0) 1186 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1187 1188 /* make sure the "knote" gets woken up */ 1189 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1190 } 1191 spin_unlock(&filp->f_kqlock); 1192 } 1193 1194 static int 1195 linux_file_kqfilter(struct file *file, struct knote *kn) 1196 { 1197 struct linux_file *filp; 1198 struct thread *td; 1199 int error; 1200 1201 td = curthread; 1202 filp = (struct linux_file *)file->f_data; 1203 filp->f_flags = file->f_flag; 1204 if (filp->f_op->poll == NULL) 1205 return (EINVAL); 1206 1207 spin_lock(&filp->f_kqlock); 1208 switch (kn->kn_filter) { 1209 case EVFILT_READ: 1210 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1211 kn->kn_fop = &linux_dev_kqfiltops_read; 1212 kn->kn_hook = filp; 1213 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1214 error = 0; 1215 break; 1216 case EVFILT_WRITE: 1217 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1218 kn->kn_fop = &linux_dev_kqfiltops_write; 1219 kn->kn_hook = filp; 1220 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1221 error = 0; 1222 break; 1223 default: 1224 error = EINVAL; 1225 break; 1226 } 1227 spin_unlock(&filp->f_kqlock); 1228 1229 if (error == 0) { 1230 linux_set_current(td); 1231 1232 /* update kqfilter status, if any */ 1233 linux_file_kqfilter_poll(filp, 1234 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1235 } 1236 return (error); 1237 } 1238 1239 static int 1240 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1241 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1242 int nprot, bool is_shared, struct thread *td) 1243 { 1244 struct task_struct *task; 1245 struct vm_area_struct *vmap; 1246 struct mm_struct *mm; 1247 struct linux_file *filp; 1248 vm_memattr_t attr; 1249 int error; 1250 1251 filp = (struct linux_file *)fp->f_data; 1252 filp->f_flags = fp->f_flag; 1253 1254 if (fop->mmap == NULL) 1255 return (EOPNOTSUPP); 1256 1257 linux_set_current(td); 1258 1259 /* 1260 * The same VM object might be shared by multiple processes 1261 * and the mm_struct is usually freed when a process exits. 1262 * 1263 * The atomic reference below makes sure the mm_struct is 1264 * available as long as the vmap is in the linux_vma_head. 1265 */ 1266 task = current; 1267 mm = task->mm; 1268 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1269 return (EINVAL); 1270 1271 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1272 vmap->vm_start = 0; 1273 vmap->vm_end = size; 1274 vmap->vm_pgoff = *offset / PAGE_SIZE; 1275 vmap->vm_pfn = 0; 1276 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1277 if (is_shared) 1278 vmap->vm_flags |= VM_SHARED; 1279 vmap->vm_ops = NULL; 1280 vmap->vm_file = get_file(filp); 1281 vmap->vm_mm = mm; 1282 1283 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1284 error = linux_get_error(task, EINTR); 1285 } else { 1286 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1287 error = linux_get_error(task, error); 1288 up_write(&vmap->vm_mm->mmap_sem); 1289 } 1290 1291 if (error != 0) { 1292 linux_cdev_handle_free(vmap); 1293 return (error); 1294 } 1295 1296 attr = pgprot2cachemode(vmap->vm_page_prot); 1297 1298 if (vmap->vm_ops != NULL) { 1299 struct vm_area_struct *ptr; 1300 void *vm_private_data; 1301 bool vm_no_fault; 1302 1303 if (vmap->vm_ops->open == NULL || 1304 vmap->vm_ops->close == NULL || 1305 vmap->vm_private_data == NULL) { 1306 /* free allocated VM area struct */ 1307 linux_cdev_handle_free(vmap); 1308 return (EINVAL); 1309 } 1310 1311 vm_private_data = vmap->vm_private_data; 1312 1313 rw_wlock(&linux_vma_lock); 1314 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1315 if (ptr->vm_private_data == vm_private_data) 1316 break; 1317 } 1318 /* check if there is an existing VM area struct */ 1319 if (ptr != NULL) { 1320 /* check if the VM area structure is invalid */ 1321 if (ptr->vm_ops == NULL || 1322 ptr->vm_ops->open == NULL || 1323 ptr->vm_ops->close == NULL) { 1324 error = ESTALE; 1325 vm_no_fault = 1; 1326 } else { 1327 error = EEXIST; 1328 vm_no_fault = (ptr->vm_ops->fault == NULL); 1329 } 1330 } else { 1331 /* insert VM area structure into list */ 1332 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1333 error = 0; 1334 vm_no_fault = (vmap->vm_ops->fault == NULL); 1335 } 1336 rw_wunlock(&linux_vma_lock); 1337 1338 if (error != 0) { 1339 /* free allocated VM area struct */ 1340 linux_cdev_handle_free(vmap); 1341 /* check for stale VM area struct */ 1342 if (error != EEXIST) 1343 return (error); 1344 } 1345 1346 /* check if there is no fault handler */ 1347 if (vm_no_fault) { 1348 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1349 &linux_cdev_pager_ops[1], size, nprot, *offset, 1350 td->td_ucred); 1351 } else { 1352 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1353 &linux_cdev_pager_ops[0], size, nprot, *offset, 1354 td->td_ucred); 1355 } 1356 1357 /* check if allocating the VM object failed */ 1358 if (*object == NULL) { 1359 if (error == 0) { 1360 /* remove VM area struct from list */ 1361 linux_cdev_handle_remove(vmap); 1362 /* free allocated VM area struct */ 1363 linux_cdev_handle_free(vmap); 1364 } 1365 return (EINVAL); 1366 } 1367 } else { 1368 struct sglist *sg; 1369 1370 sg = sglist_alloc(1, M_WAITOK); 1371 sglist_append_phys(sg, 1372 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1373 1374 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1375 nprot, 0, td->td_ucred); 1376 1377 linux_cdev_handle_free(vmap); 1378 1379 if (*object == NULL) { 1380 sglist_free(sg); 1381 return (EINVAL); 1382 } 1383 } 1384 1385 if (attr != VM_MEMATTR_DEFAULT) { 1386 VM_OBJECT_WLOCK(*object); 1387 vm_object_set_memattr(*object, attr); 1388 VM_OBJECT_WUNLOCK(*object); 1389 } 1390 *offset = 0; 1391 return (0); 1392 } 1393 1394 struct cdevsw linuxcdevsw = { 1395 .d_version = D_VERSION, 1396 .d_fdopen = linux_dev_fdopen, 1397 .d_name = "lkpidev", 1398 }; 1399 1400 static int 1401 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1402 int flags, struct thread *td) 1403 { 1404 struct linux_file *filp; 1405 const struct file_operations *fop; 1406 struct linux_cdev *ldev; 1407 ssize_t bytes; 1408 int error; 1409 1410 error = 0; 1411 filp = (struct linux_file *)file->f_data; 1412 filp->f_flags = file->f_flag; 1413 /* XXX no support for I/O vectors currently */ 1414 if (uio->uio_iovcnt != 1) 1415 return (EOPNOTSUPP); 1416 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1417 return (EINVAL); 1418 linux_set_current(td); 1419 linux_get_fop(filp, &fop, &ldev); 1420 if (fop->read != NULL) { 1421 bytes = OPW(file, td, fop->read(filp, 1422 uio->uio_iov->iov_base, 1423 uio->uio_iov->iov_len, &uio->uio_offset)); 1424 if (bytes >= 0) { 1425 uio->uio_iov->iov_base = 1426 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1427 uio->uio_iov->iov_len -= bytes; 1428 uio->uio_resid -= bytes; 1429 } else { 1430 error = linux_get_error(current, -bytes); 1431 } 1432 } else 1433 error = ENXIO; 1434 1435 /* update kqfilter status, if any */ 1436 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1437 linux_drop_fop(ldev); 1438 1439 return (error); 1440 } 1441 1442 static int 1443 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1444 int flags, struct thread *td) 1445 { 1446 struct linux_file *filp; 1447 const struct file_operations *fop; 1448 struct linux_cdev *ldev; 1449 ssize_t bytes; 1450 int error; 1451 1452 filp = (struct linux_file *)file->f_data; 1453 filp->f_flags = file->f_flag; 1454 /* XXX no support for I/O vectors currently */ 1455 if (uio->uio_iovcnt != 1) 1456 return (EOPNOTSUPP); 1457 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1458 return (EINVAL); 1459 linux_set_current(td); 1460 linux_get_fop(filp, &fop, &ldev); 1461 if (fop->write != NULL) { 1462 bytes = OPW(file, td, fop->write(filp, 1463 uio->uio_iov->iov_base, 1464 uio->uio_iov->iov_len, &uio->uio_offset)); 1465 if (bytes >= 0) { 1466 uio->uio_iov->iov_base = 1467 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1468 uio->uio_iov->iov_len -= bytes; 1469 uio->uio_resid -= bytes; 1470 error = 0; 1471 } else { 1472 error = linux_get_error(current, -bytes); 1473 } 1474 } else 1475 error = ENXIO; 1476 1477 /* update kqfilter status, if any */ 1478 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1479 1480 linux_drop_fop(ldev); 1481 1482 return (error); 1483 } 1484 1485 static int 1486 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1487 struct thread *td) 1488 { 1489 struct linux_file *filp; 1490 const struct file_operations *fop; 1491 struct linux_cdev *ldev; 1492 int revents; 1493 1494 filp = (struct linux_file *)file->f_data; 1495 filp->f_flags = file->f_flag; 1496 linux_set_current(td); 1497 linux_get_fop(filp, &fop, &ldev); 1498 if (fop->poll != NULL) { 1499 revents = OPW(file, td, fop->poll(filp, 1500 LINUX_POLL_TABLE_NORMAL)) & events; 1501 } else { 1502 revents = 0; 1503 } 1504 linux_drop_fop(ldev); 1505 return (revents); 1506 } 1507 1508 static int 1509 linux_file_close(struct file *file, struct thread *td) 1510 { 1511 struct linux_file *filp; 1512 int (*release)(struct inode *, struct linux_file *); 1513 const struct file_operations *fop; 1514 struct linux_cdev *ldev; 1515 int error; 1516 1517 filp = (struct linux_file *)file->f_data; 1518 1519 KASSERT(file_count(filp) == 0, 1520 ("File refcount(%d) is not zero", file_count(filp))); 1521 1522 if (td == NULL) 1523 td = curthread; 1524 1525 error = 0; 1526 filp->f_flags = file->f_flag; 1527 linux_set_current(td); 1528 linux_poll_wait_dequeue(filp); 1529 linux_get_fop(filp, &fop, &ldev); 1530 /* 1531 * Always use the real release function, if any, to avoid 1532 * leaking device resources: 1533 */ 1534 release = filp->f_op->release; 1535 if (release != NULL) 1536 error = -OPW(file, td, release(filp->f_vnode, filp)); 1537 funsetown(&filp->f_sigio); 1538 if (filp->f_vnode != NULL) 1539 vdrop(filp->f_vnode); 1540 linux_drop_fop(ldev); 1541 ldev = filp->f_cdev; 1542 if (ldev != NULL) 1543 linux_cdev_deref(ldev); 1544 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1545 kfree(filp); 1546 1547 return (error); 1548 } 1549 1550 static int 1551 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1552 struct thread *td) 1553 { 1554 struct linux_file *filp; 1555 const struct file_operations *fop; 1556 struct linux_cdev *ldev; 1557 struct fiodgname_arg *fgn; 1558 const char *p; 1559 int error, i; 1560 1561 error = 0; 1562 filp = (struct linux_file *)fp->f_data; 1563 filp->f_flags = fp->f_flag; 1564 linux_get_fop(filp, &fop, &ldev); 1565 1566 linux_set_current(td); 1567 switch (cmd) { 1568 case FIONBIO: 1569 break; 1570 case FIOASYNC: 1571 if (fop->fasync == NULL) 1572 break; 1573 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1574 break; 1575 case FIOSETOWN: 1576 error = fsetown(*(int *)data, &filp->f_sigio); 1577 if (error == 0) { 1578 if (fop->fasync == NULL) 1579 break; 1580 error = -OPW(fp, td, fop->fasync(0, filp, 1581 fp->f_flag & FASYNC)); 1582 } 1583 break; 1584 case FIOGETOWN: 1585 *(int *)data = fgetown(&filp->f_sigio); 1586 break; 1587 case FIODGNAME: 1588 #ifdef COMPAT_FREEBSD32 1589 case FIODGNAME_32: 1590 #endif 1591 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1592 error = ENXIO; 1593 break; 1594 } 1595 fgn = data; 1596 p = devtoname(filp->f_cdev->cdev); 1597 i = strlen(p) + 1; 1598 if (i > fgn->len) { 1599 error = EINVAL; 1600 break; 1601 } 1602 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1603 break; 1604 default: 1605 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1606 break; 1607 } 1608 linux_drop_fop(ldev); 1609 return (error); 1610 } 1611 1612 static int 1613 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1614 vm_prot_t maxprot, int flags, struct file *fp, 1615 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1616 { 1617 /* 1618 * Character devices do not provide private mappings 1619 * of any kind: 1620 */ 1621 if ((maxprot & VM_PROT_WRITE) == 0 && 1622 (prot & VM_PROT_WRITE) != 0) 1623 return (EACCES); 1624 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1625 return (EINVAL); 1626 1627 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1628 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1629 } 1630 1631 static int 1632 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1633 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1634 struct thread *td) 1635 { 1636 struct linux_file *filp; 1637 const struct file_operations *fop; 1638 struct linux_cdev *ldev; 1639 struct mount *mp; 1640 struct vnode *vp; 1641 vm_object_t object; 1642 vm_prot_t maxprot; 1643 int error; 1644 1645 filp = (struct linux_file *)fp->f_data; 1646 1647 vp = filp->f_vnode; 1648 if (vp == NULL) 1649 return (EOPNOTSUPP); 1650 1651 /* 1652 * Ensure that file and memory protections are 1653 * compatible. 1654 */ 1655 mp = vp->v_mount; 1656 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1657 maxprot = VM_PROT_NONE; 1658 if ((prot & VM_PROT_EXECUTE) != 0) 1659 return (EACCES); 1660 } else 1661 maxprot = VM_PROT_EXECUTE; 1662 if ((fp->f_flag & FREAD) != 0) 1663 maxprot |= VM_PROT_READ; 1664 else if ((prot & VM_PROT_READ) != 0) 1665 return (EACCES); 1666 1667 /* 1668 * If we are sharing potential changes via MAP_SHARED and we 1669 * are trying to get write permission although we opened it 1670 * without asking for it, bail out. 1671 * 1672 * Note that most character devices always share mappings. 1673 * 1674 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1675 * requests rather than doing it here. 1676 */ 1677 if ((flags & MAP_SHARED) != 0) { 1678 if ((fp->f_flag & FWRITE) != 0) 1679 maxprot |= VM_PROT_WRITE; 1680 else if ((prot & VM_PROT_WRITE) != 0) 1681 return (EACCES); 1682 } 1683 maxprot &= cap_maxprot; 1684 1685 linux_get_fop(filp, &fop, &ldev); 1686 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1687 &foff, fop, &object); 1688 if (error != 0) 1689 goto out; 1690 1691 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1692 foff, FALSE, td); 1693 if (error != 0) 1694 vm_object_deallocate(object); 1695 out: 1696 linux_drop_fop(ldev); 1697 return (error); 1698 } 1699 1700 static int 1701 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1702 struct thread *td) 1703 { 1704 struct linux_file *filp; 1705 struct vnode *vp; 1706 int error; 1707 1708 filp = (struct linux_file *)fp->f_data; 1709 if (filp->f_vnode == NULL) 1710 return (EOPNOTSUPP); 1711 1712 vp = filp->f_vnode; 1713 1714 vn_lock(vp, LK_SHARED | LK_RETRY); 1715 error = VOP_STAT(vp, sb, td->td_ucred, NOCRED, td); 1716 VOP_UNLOCK(vp); 1717 1718 return (error); 1719 } 1720 1721 static int 1722 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1723 struct filedesc *fdp) 1724 { 1725 struct linux_file *filp; 1726 struct vnode *vp; 1727 int error; 1728 1729 filp = fp->f_data; 1730 vp = filp->f_vnode; 1731 if (vp == NULL) { 1732 error = 0; 1733 kif->kf_type = KF_TYPE_DEV; 1734 } else { 1735 vref(vp); 1736 FILEDESC_SUNLOCK(fdp); 1737 error = vn_fill_kinfo_vnode(vp, kif); 1738 vrele(vp); 1739 kif->kf_type = KF_TYPE_VNODE; 1740 FILEDESC_SLOCK(fdp); 1741 } 1742 return (error); 1743 } 1744 1745 unsigned int 1746 linux_iminor(struct inode *inode) 1747 { 1748 struct linux_cdev *ldev; 1749 1750 if (inode == NULL || inode->v_rdev == NULL || 1751 inode->v_rdev->si_devsw != &linuxcdevsw) 1752 return (-1U); 1753 ldev = inode->v_rdev->si_drv1; 1754 if (ldev == NULL) 1755 return (-1U); 1756 1757 return (minor(ldev->dev)); 1758 } 1759 1760 struct fileops linuxfileops = { 1761 .fo_read = linux_file_read, 1762 .fo_write = linux_file_write, 1763 .fo_truncate = invfo_truncate, 1764 .fo_kqfilter = linux_file_kqfilter, 1765 .fo_stat = linux_file_stat, 1766 .fo_fill_kinfo = linux_file_fill_kinfo, 1767 .fo_poll = linux_file_poll, 1768 .fo_close = linux_file_close, 1769 .fo_ioctl = linux_file_ioctl, 1770 .fo_mmap = linux_file_mmap, 1771 .fo_chmod = invfo_chmod, 1772 .fo_chown = invfo_chown, 1773 .fo_sendfile = invfo_sendfile, 1774 .fo_flags = DFLAG_PASSABLE, 1775 }; 1776 1777 /* 1778 * Hash of vmmap addresses. This is infrequently accessed and does not 1779 * need to be particularly large. This is done because we must store the 1780 * caller's idea of the map size to properly unmap. 1781 */ 1782 struct vmmap { 1783 LIST_ENTRY(vmmap) vm_next; 1784 void *vm_addr; 1785 unsigned long vm_size; 1786 }; 1787 1788 struct vmmaphd { 1789 struct vmmap *lh_first; 1790 }; 1791 #define VMMAP_HASH_SIZE 64 1792 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1793 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1794 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1795 static struct mtx vmmaplock; 1796 1797 static void 1798 vmmap_add(void *addr, unsigned long size) 1799 { 1800 struct vmmap *vmmap; 1801 1802 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1803 mtx_lock(&vmmaplock); 1804 vmmap->vm_size = size; 1805 vmmap->vm_addr = addr; 1806 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1807 mtx_unlock(&vmmaplock); 1808 } 1809 1810 static struct vmmap * 1811 vmmap_remove(void *addr) 1812 { 1813 struct vmmap *vmmap; 1814 1815 mtx_lock(&vmmaplock); 1816 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1817 if (vmmap->vm_addr == addr) 1818 break; 1819 if (vmmap) 1820 LIST_REMOVE(vmmap, vm_next); 1821 mtx_unlock(&vmmaplock); 1822 1823 return (vmmap); 1824 } 1825 1826 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1827 void * 1828 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1829 { 1830 void *addr; 1831 1832 addr = pmap_mapdev_attr(phys_addr, size, attr); 1833 if (addr == NULL) 1834 return (NULL); 1835 vmmap_add(addr, size); 1836 1837 return (addr); 1838 } 1839 #endif 1840 1841 void 1842 iounmap(void *addr) 1843 { 1844 struct vmmap *vmmap; 1845 1846 vmmap = vmmap_remove(addr); 1847 if (vmmap == NULL) 1848 return; 1849 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1850 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1851 #endif 1852 kfree(vmmap); 1853 } 1854 1855 void * 1856 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1857 { 1858 vm_offset_t off; 1859 size_t size; 1860 1861 size = count * PAGE_SIZE; 1862 off = kva_alloc(size); 1863 if (off == 0) 1864 return (NULL); 1865 vmmap_add((void *)off, size); 1866 pmap_qenter(off, pages, count); 1867 1868 return ((void *)off); 1869 } 1870 1871 void 1872 vunmap(void *addr) 1873 { 1874 struct vmmap *vmmap; 1875 1876 vmmap = vmmap_remove(addr); 1877 if (vmmap == NULL) 1878 return; 1879 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1880 kva_free((vm_offset_t)addr, vmmap->vm_size); 1881 kfree(vmmap); 1882 } 1883 1884 static char * 1885 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1886 { 1887 unsigned int len; 1888 char *p; 1889 va_list aq; 1890 1891 va_copy(aq, ap); 1892 len = vsnprintf(NULL, 0, fmt, aq); 1893 va_end(aq); 1894 1895 if (dev != NULL) 1896 p = devm_kmalloc(dev, len + 1, gfp); 1897 else 1898 p = kmalloc(len + 1, gfp); 1899 if (p != NULL) 1900 vsnprintf(p, len + 1, fmt, ap); 1901 1902 return (p); 1903 } 1904 1905 char * 1906 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1907 { 1908 1909 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1910 } 1911 1912 char * 1913 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1914 { 1915 va_list ap; 1916 char *p; 1917 1918 va_start(ap, fmt); 1919 p = devm_kvasprintf(dev, gfp, fmt, ap); 1920 va_end(ap); 1921 1922 return (p); 1923 } 1924 1925 char * 1926 kasprintf(gfp_t gfp, const char *fmt, ...) 1927 { 1928 va_list ap; 1929 char *p; 1930 1931 va_start(ap, fmt); 1932 p = kvasprintf(gfp, fmt, ap); 1933 va_end(ap); 1934 1935 return (p); 1936 } 1937 1938 static void 1939 linux_timer_callback_wrapper(void *context) 1940 { 1941 struct timer_list *timer; 1942 1943 timer = context; 1944 1945 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1946 /* try again later */ 1947 callout_reset(&timer->callout, 1, 1948 &linux_timer_callback_wrapper, timer); 1949 return; 1950 } 1951 1952 timer->function(timer->data); 1953 } 1954 1955 int 1956 mod_timer(struct timer_list *timer, int expires) 1957 { 1958 int ret; 1959 1960 timer->expires = expires; 1961 ret = callout_reset(&timer->callout, 1962 linux_timer_jiffies_until(expires), 1963 &linux_timer_callback_wrapper, timer); 1964 1965 MPASS(ret == 0 || ret == 1); 1966 1967 return (ret == 1); 1968 } 1969 1970 void 1971 add_timer(struct timer_list *timer) 1972 { 1973 1974 callout_reset(&timer->callout, 1975 linux_timer_jiffies_until(timer->expires), 1976 &linux_timer_callback_wrapper, timer); 1977 } 1978 1979 void 1980 add_timer_on(struct timer_list *timer, int cpu) 1981 { 1982 1983 callout_reset_on(&timer->callout, 1984 linux_timer_jiffies_until(timer->expires), 1985 &linux_timer_callback_wrapper, timer, cpu); 1986 } 1987 1988 int 1989 del_timer(struct timer_list *timer) 1990 { 1991 1992 if (callout_stop(&(timer)->callout) == -1) 1993 return (0); 1994 return (1); 1995 } 1996 1997 int 1998 del_timer_sync(struct timer_list *timer) 1999 { 2000 2001 if (callout_drain(&(timer)->callout) == -1) 2002 return (0); 2003 return (1); 2004 } 2005 2006 /* greatest common divisor, Euclid equation */ 2007 static uint64_t 2008 lkpi_gcd_64(uint64_t a, uint64_t b) 2009 { 2010 uint64_t an; 2011 uint64_t bn; 2012 2013 while (b != 0) { 2014 an = b; 2015 bn = a % b; 2016 a = an; 2017 b = bn; 2018 } 2019 return (a); 2020 } 2021 2022 uint64_t lkpi_nsec2hz_rem; 2023 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2024 uint64_t lkpi_nsec2hz_max; 2025 2026 uint64_t lkpi_usec2hz_rem; 2027 uint64_t lkpi_usec2hz_div = 1000000ULL; 2028 uint64_t lkpi_usec2hz_max; 2029 2030 uint64_t lkpi_msec2hz_rem; 2031 uint64_t lkpi_msec2hz_div = 1000ULL; 2032 uint64_t lkpi_msec2hz_max; 2033 2034 static void 2035 linux_timer_init(void *arg) 2036 { 2037 uint64_t gcd; 2038 2039 /* 2040 * Compute an internal HZ value which can divide 2**32 to 2041 * avoid timer rounding problems when the tick value wraps 2042 * around 2**32: 2043 */ 2044 linux_timer_hz_mask = 1; 2045 while (linux_timer_hz_mask < (unsigned long)hz) 2046 linux_timer_hz_mask *= 2; 2047 linux_timer_hz_mask--; 2048 2049 /* compute some internal constants */ 2050 2051 lkpi_nsec2hz_rem = hz; 2052 lkpi_usec2hz_rem = hz; 2053 lkpi_msec2hz_rem = hz; 2054 2055 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2056 lkpi_nsec2hz_rem /= gcd; 2057 lkpi_nsec2hz_div /= gcd; 2058 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2059 2060 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2061 lkpi_usec2hz_rem /= gcd; 2062 lkpi_usec2hz_div /= gcd; 2063 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2064 2065 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2066 lkpi_msec2hz_rem /= gcd; 2067 lkpi_msec2hz_div /= gcd; 2068 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2069 } 2070 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2071 2072 void 2073 linux_complete_common(struct completion *c, int all) 2074 { 2075 int wakeup_swapper; 2076 2077 sleepq_lock(c); 2078 if (all) { 2079 c->done = UINT_MAX; 2080 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2081 } else { 2082 if (c->done != UINT_MAX) 2083 c->done++; 2084 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2085 } 2086 sleepq_release(c); 2087 if (wakeup_swapper) 2088 kick_proc0(); 2089 } 2090 2091 /* 2092 * Indefinite wait for done != 0 with or without signals. 2093 */ 2094 int 2095 linux_wait_for_common(struct completion *c, int flags) 2096 { 2097 struct task_struct *task; 2098 int error; 2099 2100 if (SCHEDULER_STOPPED()) 2101 return (0); 2102 2103 task = current; 2104 2105 if (flags != 0) 2106 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2107 else 2108 flags = SLEEPQ_SLEEP; 2109 error = 0; 2110 for (;;) { 2111 sleepq_lock(c); 2112 if (c->done) 2113 break; 2114 sleepq_add(c, NULL, "completion", flags, 0); 2115 if (flags & SLEEPQ_INTERRUPTIBLE) { 2116 DROP_GIANT(); 2117 error = -sleepq_wait_sig(c, 0); 2118 PICKUP_GIANT(); 2119 if (error != 0) { 2120 linux_schedule_save_interrupt_value(task, error); 2121 error = -ERESTARTSYS; 2122 goto intr; 2123 } 2124 } else { 2125 DROP_GIANT(); 2126 sleepq_wait(c, 0); 2127 PICKUP_GIANT(); 2128 } 2129 } 2130 if (c->done != UINT_MAX) 2131 c->done--; 2132 sleepq_release(c); 2133 2134 intr: 2135 return (error); 2136 } 2137 2138 /* 2139 * Time limited wait for done != 0 with or without signals. 2140 */ 2141 int 2142 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2143 { 2144 struct task_struct *task; 2145 int end = jiffies + timeout; 2146 int error; 2147 2148 if (SCHEDULER_STOPPED()) 2149 return (0); 2150 2151 task = current; 2152 2153 if (flags != 0) 2154 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2155 else 2156 flags = SLEEPQ_SLEEP; 2157 2158 for (;;) { 2159 sleepq_lock(c); 2160 if (c->done) 2161 break; 2162 sleepq_add(c, NULL, "completion", flags, 0); 2163 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2164 2165 DROP_GIANT(); 2166 if (flags & SLEEPQ_INTERRUPTIBLE) 2167 error = -sleepq_timedwait_sig(c, 0); 2168 else 2169 error = -sleepq_timedwait(c, 0); 2170 PICKUP_GIANT(); 2171 2172 if (error != 0) { 2173 /* check for timeout */ 2174 if (error == -EWOULDBLOCK) { 2175 error = 0; /* timeout */ 2176 } else { 2177 /* signal happened */ 2178 linux_schedule_save_interrupt_value(task, error); 2179 error = -ERESTARTSYS; 2180 } 2181 goto done; 2182 } 2183 } 2184 if (c->done != UINT_MAX) 2185 c->done--; 2186 sleepq_release(c); 2187 2188 /* return how many jiffies are left */ 2189 error = linux_timer_jiffies_until(end); 2190 done: 2191 return (error); 2192 } 2193 2194 int 2195 linux_try_wait_for_completion(struct completion *c) 2196 { 2197 int isdone; 2198 2199 sleepq_lock(c); 2200 isdone = (c->done != 0); 2201 if (c->done != 0 && c->done != UINT_MAX) 2202 c->done--; 2203 sleepq_release(c); 2204 return (isdone); 2205 } 2206 2207 int 2208 linux_completion_done(struct completion *c) 2209 { 2210 int isdone; 2211 2212 sleepq_lock(c); 2213 isdone = (c->done != 0); 2214 sleepq_release(c); 2215 return (isdone); 2216 } 2217 2218 static void 2219 linux_cdev_deref(struct linux_cdev *ldev) 2220 { 2221 if (refcount_release(&ldev->refs) && 2222 ldev->kobj.ktype == &linux_cdev_ktype) 2223 kfree(ldev); 2224 } 2225 2226 static void 2227 linux_cdev_release(struct kobject *kobj) 2228 { 2229 struct linux_cdev *cdev; 2230 struct kobject *parent; 2231 2232 cdev = container_of(kobj, struct linux_cdev, kobj); 2233 parent = kobj->parent; 2234 linux_destroy_dev(cdev); 2235 linux_cdev_deref(cdev); 2236 kobject_put(parent); 2237 } 2238 2239 static void 2240 linux_cdev_static_release(struct kobject *kobj) 2241 { 2242 struct cdev *cdev; 2243 struct linux_cdev *ldev; 2244 2245 ldev = container_of(kobj, struct linux_cdev, kobj); 2246 cdev = ldev->cdev; 2247 if (cdev != NULL) { 2248 destroy_dev(cdev); 2249 ldev->cdev = NULL; 2250 } 2251 kobject_put(kobj->parent); 2252 } 2253 2254 int 2255 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2256 { 2257 int ret; 2258 2259 if (dev->devt != 0) { 2260 /* Set parent kernel object. */ 2261 ldev->kobj.parent = &dev->kobj; 2262 2263 /* 2264 * Unlike Linux we require the kobject of the 2265 * character device structure to have a valid name 2266 * before calling this function: 2267 */ 2268 if (ldev->kobj.name == NULL) 2269 return (-EINVAL); 2270 2271 ret = cdev_add(ldev, dev->devt, 1); 2272 if (ret) 2273 return (ret); 2274 } 2275 ret = device_add(dev); 2276 if (ret != 0 && dev->devt != 0) 2277 cdev_del(ldev); 2278 return (ret); 2279 } 2280 2281 void 2282 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2283 { 2284 device_del(dev); 2285 2286 if (dev->devt != 0) 2287 cdev_del(ldev); 2288 } 2289 2290 static void 2291 linux_destroy_dev(struct linux_cdev *ldev) 2292 { 2293 2294 if (ldev->cdev == NULL) 2295 return; 2296 2297 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2298 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2299 2300 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2301 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2302 pause("ldevdtr", hz / 4); 2303 2304 destroy_dev(ldev->cdev); 2305 ldev->cdev = NULL; 2306 } 2307 2308 const struct kobj_type linux_cdev_ktype = { 2309 .release = linux_cdev_release, 2310 }; 2311 2312 const struct kobj_type linux_cdev_static_ktype = { 2313 .release = linux_cdev_static_release, 2314 }; 2315 2316 static void 2317 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2318 { 2319 struct notifier_block *nb; 2320 struct netdev_notifier_info ni; 2321 2322 nb = arg; 2323 ni.ifp = ifp; 2324 ni.dev = (struct net_device *)ifp; 2325 if (linkstate == LINK_STATE_UP) 2326 nb->notifier_call(nb, NETDEV_UP, &ni); 2327 else 2328 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2329 } 2330 2331 static void 2332 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2333 { 2334 struct notifier_block *nb; 2335 struct netdev_notifier_info ni; 2336 2337 nb = arg; 2338 ni.ifp = ifp; 2339 ni.dev = (struct net_device *)ifp; 2340 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2341 } 2342 2343 static void 2344 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2345 { 2346 struct notifier_block *nb; 2347 struct netdev_notifier_info ni; 2348 2349 nb = arg; 2350 ni.ifp = ifp; 2351 ni.dev = (struct net_device *)ifp; 2352 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2353 } 2354 2355 static void 2356 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2357 { 2358 struct notifier_block *nb; 2359 struct netdev_notifier_info ni; 2360 2361 nb = arg; 2362 ni.ifp = ifp; 2363 ni.dev = (struct net_device *)ifp; 2364 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2365 } 2366 2367 static void 2368 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2369 { 2370 struct notifier_block *nb; 2371 struct netdev_notifier_info ni; 2372 2373 nb = arg; 2374 ni.ifp = ifp; 2375 ni.dev = (struct net_device *)ifp; 2376 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2377 } 2378 2379 int 2380 register_netdevice_notifier(struct notifier_block *nb) 2381 { 2382 2383 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2384 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2385 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2386 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2387 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2388 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2389 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2390 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2391 2392 return (0); 2393 } 2394 2395 int 2396 register_inetaddr_notifier(struct notifier_block *nb) 2397 { 2398 2399 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2400 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2401 return (0); 2402 } 2403 2404 int 2405 unregister_netdevice_notifier(struct notifier_block *nb) 2406 { 2407 2408 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2409 nb->tags[NETDEV_UP]); 2410 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2411 nb->tags[NETDEV_REGISTER]); 2412 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2413 nb->tags[NETDEV_UNREGISTER]); 2414 EVENTHANDLER_DEREGISTER(iflladdr_event, 2415 nb->tags[NETDEV_CHANGEADDR]); 2416 2417 return (0); 2418 } 2419 2420 int 2421 unregister_inetaddr_notifier(struct notifier_block *nb) 2422 { 2423 2424 EVENTHANDLER_DEREGISTER(ifaddr_event, 2425 nb->tags[NETDEV_CHANGEIFADDR]); 2426 2427 return (0); 2428 } 2429 2430 struct list_sort_thunk { 2431 int (*cmp)(void *, struct list_head *, struct list_head *); 2432 void *priv; 2433 }; 2434 2435 static inline int 2436 linux_le_cmp(void *priv, const void *d1, const void *d2) 2437 { 2438 struct list_head *le1, *le2; 2439 struct list_sort_thunk *thunk; 2440 2441 thunk = priv; 2442 le1 = *(__DECONST(struct list_head **, d1)); 2443 le2 = *(__DECONST(struct list_head **, d2)); 2444 return ((thunk->cmp)(thunk->priv, le1, le2)); 2445 } 2446 2447 void 2448 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2449 struct list_head *a, struct list_head *b)) 2450 { 2451 struct list_sort_thunk thunk; 2452 struct list_head **ar, *le; 2453 size_t count, i; 2454 2455 count = 0; 2456 list_for_each(le, head) 2457 count++; 2458 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2459 i = 0; 2460 list_for_each(le, head) 2461 ar[i++] = le; 2462 thunk.cmp = cmp; 2463 thunk.priv = priv; 2464 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2465 INIT_LIST_HEAD(head); 2466 for (i = 0; i < count; i++) 2467 list_add_tail(ar[i], head); 2468 free(ar, M_KMALLOC); 2469 } 2470 2471 #if defined(__i386__) || defined(__amd64__) 2472 int 2473 linux_wbinvd_on_all_cpus(void) 2474 { 2475 2476 pmap_invalidate_cache(); 2477 return (0); 2478 } 2479 #endif 2480 2481 int 2482 linux_on_each_cpu(void callback(void *), void *data) 2483 { 2484 2485 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2486 smp_no_rendezvous_barrier, data); 2487 return (0); 2488 } 2489 2490 int 2491 linux_in_atomic(void) 2492 { 2493 2494 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2495 } 2496 2497 struct linux_cdev * 2498 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2499 { 2500 dev_t dev = MKDEV(major, minor); 2501 struct cdev *cdev; 2502 2503 dev_lock(); 2504 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2505 struct linux_cdev *ldev = cdev->si_drv1; 2506 if (ldev->dev == dev && 2507 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2508 break; 2509 } 2510 } 2511 dev_unlock(); 2512 2513 return (cdev != NULL ? cdev->si_drv1 : NULL); 2514 } 2515 2516 int 2517 __register_chrdev(unsigned int major, unsigned int baseminor, 2518 unsigned int count, const char *name, 2519 const struct file_operations *fops) 2520 { 2521 struct linux_cdev *cdev; 2522 int ret = 0; 2523 int i; 2524 2525 for (i = baseminor; i < baseminor + count; i++) { 2526 cdev = cdev_alloc(); 2527 cdev->ops = fops; 2528 kobject_set_name(&cdev->kobj, name); 2529 2530 ret = cdev_add(cdev, makedev(major, i), 1); 2531 if (ret != 0) 2532 break; 2533 } 2534 return (ret); 2535 } 2536 2537 int 2538 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2539 unsigned int count, const char *name, 2540 const struct file_operations *fops, uid_t uid, 2541 gid_t gid, int mode) 2542 { 2543 struct linux_cdev *cdev; 2544 int ret = 0; 2545 int i; 2546 2547 for (i = baseminor; i < baseminor + count; i++) { 2548 cdev = cdev_alloc(); 2549 cdev->ops = fops; 2550 kobject_set_name(&cdev->kobj, name); 2551 2552 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2553 if (ret != 0) 2554 break; 2555 } 2556 return (ret); 2557 } 2558 2559 void 2560 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2561 unsigned int count, const char *name) 2562 { 2563 struct linux_cdev *cdevp; 2564 int i; 2565 2566 for (i = baseminor; i < baseminor + count; i++) { 2567 cdevp = linux_find_cdev(name, major, i); 2568 if (cdevp != NULL) 2569 cdev_del(cdevp); 2570 } 2571 } 2572 2573 void 2574 linux_dump_stack(void) 2575 { 2576 #ifdef STACK 2577 struct stack st; 2578 2579 stack_zero(&st); 2580 stack_save(&st); 2581 stack_print(&st); 2582 #endif 2583 } 2584 2585 int 2586 linuxkpi_net_ratelimit(void) 2587 { 2588 2589 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2590 lkpi_net_maxpps)); 2591 } 2592 2593 #if defined(__i386__) || defined(__amd64__) 2594 bool linux_cpu_has_clflush; 2595 #endif 2596 2597 static void 2598 linux_compat_init(void *arg) 2599 { 2600 struct sysctl_oid *rootoid; 2601 int i; 2602 2603 #if defined(__i386__) || defined(__amd64__) 2604 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2605 #endif 2606 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2607 2608 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2609 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2610 kobject_init(&linux_class_root, &linux_class_ktype); 2611 kobject_set_name(&linux_class_root, "class"); 2612 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2613 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2614 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2615 kobject_set_name(&linux_root_device.kobj, "device"); 2616 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2617 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2618 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2619 linux_root_device.bsddev = root_bus; 2620 linux_class_misc.name = "misc"; 2621 class_register(&linux_class_misc); 2622 INIT_LIST_HEAD(&pci_drivers); 2623 INIT_LIST_HEAD(&pci_devices); 2624 spin_lock_init(&pci_lock); 2625 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2626 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2627 LIST_INIT(&vmmaphead[i]); 2628 init_waitqueue_head(&linux_bit_waitq); 2629 init_waitqueue_head(&linux_var_waitq); 2630 2631 CPU_COPY(&all_cpus, &cpu_online_mask); 2632 } 2633 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2634 2635 static void 2636 linux_compat_uninit(void *arg) 2637 { 2638 linux_kobject_kfree_name(&linux_class_root); 2639 linux_kobject_kfree_name(&linux_root_device.kobj); 2640 linux_kobject_kfree_name(&linux_class_misc.kobj); 2641 2642 mtx_destroy(&vmmaplock); 2643 spin_lock_destroy(&pci_lock); 2644 rw_destroy(&linux_vma_lock); 2645 } 2646 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2647 2648 /* 2649 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2650 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2651 * used. Assert these types have the same size, else some parts of the 2652 * LinuxKPI may not work like expected: 2653 */ 2654 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2655