1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/user.h> 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 62 #include <machine/stdarg.h> 63 64 #if defined(__i386__) || defined(__amd64__) 65 #include <machine/md_var.h> 66 #endif 67 68 #include <linux/kobject.h> 69 #include <linux/device.h> 70 #include <linux/slab.h> 71 #include <linux/module.h> 72 #include <linux/moduleparam.h> 73 #include <linux/cdev.h> 74 #include <linux/file.h> 75 #include <linux/sysfs.h> 76 #include <linux/mm.h> 77 #include <linux/io.h> 78 #include <linux/vmalloc.h> 79 #include <linux/netdevice.h> 80 #include <linux/timer.h> 81 #include <linux/interrupt.h> 82 #include <linux/uaccess.h> 83 #include <linux/list.h> 84 #include <linux/kthread.h> 85 #include <linux/kernel.h> 86 #include <linux/compat.h> 87 #include <linux/poll.h> 88 #include <linux/smp.h> 89 #include <linux/wait_bit.h> 90 91 #if defined(__i386__) || defined(__amd64__) 92 #include <asm/smp.h> 93 #endif 94 95 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 96 "LinuxKPI parameters"); 97 98 int linuxkpi_debug; 99 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 100 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 101 102 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 103 104 #include <linux/rbtree.h> 105 /* Undo Linux compat changes. */ 106 #undef RB_ROOT 107 #undef file 108 #undef cdev 109 #define RB_ROOT(head) (head)->rbh_root 110 111 static void linux_cdev_deref(struct linux_cdev *ldev); 112 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 113 114 struct kobject linux_class_root; 115 struct device linux_root_device; 116 struct class linux_class_misc; 117 struct list_head pci_drivers; 118 struct list_head pci_devices; 119 spinlock_t pci_lock; 120 121 unsigned long linux_timer_hz_mask; 122 123 wait_queue_head_t linux_bit_waitq; 124 wait_queue_head_t linux_var_waitq; 125 126 int 127 panic_cmp(struct rb_node *one, struct rb_node *two) 128 { 129 panic("no cmp"); 130 } 131 132 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 133 134 int 135 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 136 { 137 va_list tmp_va; 138 int len; 139 char *old; 140 char *name; 141 char dummy; 142 143 old = kobj->name; 144 145 if (old && fmt == NULL) 146 return (0); 147 148 /* compute length of string */ 149 va_copy(tmp_va, args); 150 len = vsnprintf(&dummy, 0, fmt, tmp_va); 151 va_end(tmp_va); 152 153 /* account for zero termination */ 154 len++; 155 156 /* check for error */ 157 if (len < 1) 158 return (-EINVAL); 159 160 /* allocate memory for string */ 161 name = kzalloc(len, GFP_KERNEL); 162 if (name == NULL) 163 return (-ENOMEM); 164 vsnprintf(name, len, fmt, args); 165 kobj->name = name; 166 167 /* free old string */ 168 kfree(old); 169 170 /* filter new string */ 171 for (; *name != '\0'; name++) 172 if (*name == '/') 173 *name = '!'; 174 return (0); 175 } 176 177 int 178 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 179 { 180 va_list args; 181 int error; 182 183 va_start(args, fmt); 184 error = kobject_set_name_vargs(kobj, fmt, args); 185 va_end(args); 186 187 return (error); 188 } 189 190 static int 191 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 192 { 193 const struct kobj_type *t; 194 int error; 195 196 kobj->parent = parent; 197 error = sysfs_create_dir(kobj); 198 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 199 struct attribute **attr; 200 t = kobj->ktype; 201 202 for (attr = t->default_attrs; *attr != NULL; attr++) { 203 error = sysfs_create_file(kobj, *attr); 204 if (error) 205 break; 206 } 207 if (error) 208 sysfs_remove_dir(kobj); 209 } 210 return (error); 211 } 212 213 int 214 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 215 { 216 va_list args; 217 int error; 218 219 va_start(args, fmt); 220 error = kobject_set_name_vargs(kobj, fmt, args); 221 va_end(args); 222 if (error) 223 return (error); 224 225 return kobject_add_complete(kobj, parent); 226 } 227 228 void 229 linux_kobject_release(struct kref *kref) 230 { 231 struct kobject *kobj; 232 char *name; 233 234 kobj = container_of(kref, struct kobject, kref); 235 sysfs_remove_dir(kobj); 236 name = kobj->name; 237 if (kobj->ktype && kobj->ktype->release) 238 kobj->ktype->release(kobj); 239 kfree(name); 240 } 241 242 static void 243 linux_kobject_kfree(struct kobject *kobj) 244 { 245 kfree(kobj); 246 } 247 248 static void 249 linux_kobject_kfree_name(struct kobject *kobj) 250 { 251 if (kobj) { 252 kfree(kobj->name); 253 } 254 } 255 256 const struct kobj_type linux_kfree_type = { 257 .release = linux_kobject_kfree 258 }; 259 260 static void 261 linux_device_release(struct device *dev) 262 { 263 pr_debug("linux_device_release: %s\n", dev_name(dev)); 264 kfree(dev); 265 } 266 267 static ssize_t 268 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 269 { 270 struct class_attribute *dattr; 271 ssize_t error; 272 273 dattr = container_of(attr, struct class_attribute, attr); 274 error = -EIO; 275 if (dattr->show) 276 error = dattr->show(container_of(kobj, struct class, kobj), 277 dattr, buf); 278 return (error); 279 } 280 281 static ssize_t 282 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 283 size_t count) 284 { 285 struct class_attribute *dattr; 286 ssize_t error; 287 288 dattr = container_of(attr, struct class_attribute, attr); 289 error = -EIO; 290 if (dattr->store) 291 error = dattr->store(container_of(kobj, struct class, kobj), 292 dattr, buf, count); 293 return (error); 294 } 295 296 static void 297 linux_class_release(struct kobject *kobj) 298 { 299 struct class *class; 300 301 class = container_of(kobj, struct class, kobj); 302 if (class->class_release) 303 class->class_release(class); 304 } 305 306 static const struct sysfs_ops linux_class_sysfs = { 307 .show = linux_class_show, 308 .store = linux_class_store, 309 }; 310 311 const struct kobj_type linux_class_ktype = { 312 .release = linux_class_release, 313 .sysfs_ops = &linux_class_sysfs 314 }; 315 316 static void 317 linux_dev_release(struct kobject *kobj) 318 { 319 struct device *dev; 320 321 dev = container_of(kobj, struct device, kobj); 322 /* This is the precedence defined by linux. */ 323 if (dev->release) 324 dev->release(dev); 325 else if (dev->class && dev->class->dev_release) 326 dev->class->dev_release(dev); 327 } 328 329 static ssize_t 330 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 331 { 332 struct device_attribute *dattr; 333 ssize_t error; 334 335 dattr = container_of(attr, struct device_attribute, attr); 336 error = -EIO; 337 if (dattr->show) 338 error = dattr->show(container_of(kobj, struct device, kobj), 339 dattr, buf); 340 return (error); 341 } 342 343 static ssize_t 344 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 345 size_t count) 346 { 347 struct device_attribute *dattr; 348 ssize_t error; 349 350 dattr = container_of(attr, struct device_attribute, attr); 351 error = -EIO; 352 if (dattr->store) 353 error = dattr->store(container_of(kobj, struct device, kobj), 354 dattr, buf, count); 355 return (error); 356 } 357 358 static const struct sysfs_ops linux_dev_sysfs = { 359 .show = linux_dev_show, 360 .store = linux_dev_store, 361 }; 362 363 const struct kobj_type linux_dev_ktype = { 364 .release = linux_dev_release, 365 .sysfs_ops = &linux_dev_sysfs 366 }; 367 368 struct device * 369 device_create(struct class *class, struct device *parent, dev_t devt, 370 void *drvdata, const char *fmt, ...) 371 { 372 struct device *dev; 373 va_list args; 374 375 dev = kzalloc(sizeof(*dev), M_WAITOK); 376 dev->parent = parent; 377 dev->class = class; 378 dev->devt = devt; 379 dev->driver_data = drvdata; 380 dev->release = linux_device_release; 381 va_start(args, fmt); 382 kobject_set_name_vargs(&dev->kobj, fmt, args); 383 va_end(args); 384 device_register(dev); 385 386 return (dev); 387 } 388 389 int 390 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 391 struct kobject *parent, const char *fmt, ...) 392 { 393 va_list args; 394 int error; 395 396 kobject_init(kobj, ktype); 397 kobj->ktype = ktype; 398 kobj->parent = parent; 399 kobj->name = NULL; 400 401 va_start(args, fmt); 402 error = kobject_set_name_vargs(kobj, fmt, args); 403 va_end(args); 404 if (error) 405 return (error); 406 return kobject_add_complete(kobj, parent); 407 } 408 409 static void 410 linux_kq_lock(void *arg) 411 { 412 spinlock_t *s = arg; 413 414 spin_lock(s); 415 } 416 static void 417 linux_kq_unlock(void *arg) 418 { 419 spinlock_t *s = arg; 420 421 spin_unlock(s); 422 } 423 424 static void 425 linux_kq_assert_lock(void *arg, int what) 426 { 427 #ifdef INVARIANTS 428 spinlock_t *s = arg; 429 430 if (what == LA_LOCKED) 431 mtx_assert(&s->m, MA_OWNED); 432 else 433 mtx_assert(&s->m, MA_NOTOWNED); 434 #endif 435 } 436 437 static void 438 linux_file_kqfilter_poll(struct linux_file *, int); 439 440 struct linux_file * 441 linux_file_alloc(void) 442 { 443 struct linux_file *filp; 444 445 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 446 447 /* set initial refcount */ 448 filp->f_count = 1; 449 450 /* setup fields needed by kqueue support */ 451 spin_lock_init(&filp->f_kqlock); 452 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 453 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 454 455 return (filp); 456 } 457 458 void 459 linux_file_free(struct linux_file *filp) 460 { 461 if (filp->_file == NULL) { 462 if (filp->f_shmem != NULL) 463 vm_object_deallocate(filp->f_shmem); 464 kfree(filp); 465 } else { 466 /* 467 * The close method of the character device or file 468 * will free the linux_file structure: 469 */ 470 _fdrop(filp->_file, curthread); 471 } 472 } 473 474 static int 475 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 476 vm_page_t *mres) 477 { 478 struct vm_area_struct *vmap; 479 480 vmap = linux_cdev_handle_find(vm_obj->handle); 481 482 MPASS(vmap != NULL); 483 MPASS(vmap->vm_private_data == vm_obj->handle); 484 485 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 486 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 487 vm_page_t page; 488 489 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 490 /* 491 * If the passed in result page is a fake 492 * page, update it with the new physical 493 * address. 494 */ 495 page = *mres; 496 vm_page_updatefake(page, paddr, vm_obj->memattr); 497 } else { 498 /* 499 * Replace the passed in "mres" page with our 500 * own fake page and free up the all of the 501 * original pages. 502 */ 503 VM_OBJECT_WUNLOCK(vm_obj); 504 page = vm_page_getfake(paddr, vm_obj->memattr); 505 VM_OBJECT_WLOCK(vm_obj); 506 507 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 508 *mres = page; 509 } 510 vm_page_valid(page); 511 return (VM_PAGER_OK); 512 } 513 return (VM_PAGER_FAIL); 514 } 515 516 static int 517 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 518 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 519 { 520 struct vm_area_struct *vmap; 521 int err; 522 523 /* get VM area structure */ 524 vmap = linux_cdev_handle_find(vm_obj->handle); 525 MPASS(vmap != NULL); 526 MPASS(vmap->vm_private_data == vm_obj->handle); 527 528 VM_OBJECT_WUNLOCK(vm_obj); 529 530 linux_set_current(curthread); 531 532 down_write(&vmap->vm_mm->mmap_sem); 533 if (unlikely(vmap->vm_ops == NULL)) { 534 err = VM_FAULT_SIGBUS; 535 } else { 536 struct vm_fault vmf; 537 538 /* fill out VM fault structure */ 539 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 540 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 541 vmf.pgoff = 0; 542 vmf.page = NULL; 543 vmf.vma = vmap; 544 545 vmap->vm_pfn_count = 0; 546 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 547 vmap->vm_obj = vm_obj; 548 549 err = vmap->vm_ops->fault(vmap, &vmf); 550 551 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 552 kern_yield(PRI_USER); 553 err = vmap->vm_ops->fault(vmap, &vmf); 554 } 555 } 556 557 /* translate return code */ 558 switch (err) { 559 case VM_FAULT_OOM: 560 err = VM_PAGER_AGAIN; 561 break; 562 case VM_FAULT_SIGBUS: 563 err = VM_PAGER_BAD; 564 break; 565 case VM_FAULT_NOPAGE: 566 /* 567 * By contract the fault handler will return having 568 * busied all the pages itself. If pidx is already 569 * found in the object, it will simply xbusy the first 570 * page and return with vm_pfn_count set to 1. 571 */ 572 *first = vmap->vm_pfn_first; 573 *last = *first + vmap->vm_pfn_count - 1; 574 err = VM_PAGER_OK; 575 break; 576 default: 577 err = VM_PAGER_ERROR; 578 break; 579 } 580 up_write(&vmap->vm_mm->mmap_sem); 581 VM_OBJECT_WLOCK(vm_obj); 582 return (err); 583 } 584 585 static struct rwlock linux_vma_lock; 586 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 587 TAILQ_HEAD_INITIALIZER(linux_vma_head); 588 589 static void 590 linux_cdev_handle_free(struct vm_area_struct *vmap) 591 { 592 /* Drop reference on vm_file */ 593 if (vmap->vm_file != NULL) 594 fput(vmap->vm_file); 595 596 /* Drop reference on mm_struct */ 597 mmput(vmap->vm_mm); 598 599 kfree(vmap); 600 } 601 602 static void 603 linux_cdev_handle_remove(struct vm_area_struct *vmap) 604 { 605 rw_wlock(&linux_vma_lock); 606 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 607 rw_wunlock(&linux_vma_lock); 608 } 609 610 static struct vm_area_struct * 611 linux_cdev_handle_find(void *handle) 612 { 613 struct vm_area_struct *vmap; 614 615 rw_rlock(&linux_vma_lock); 616 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 617 if (vmap->vm_private_data == handle) 618 break; 619 } 620 rw_runlock(&linux_vma_lock); 621 return (vmap); 622 } 623 624 static int 625 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 626 vm_ooffset_t foff, struct ucred *cred, u_short *color) 627 { 628 629 MPASS(linux_cdev_handle_find(handle) != NULL); 630 *color = 0; 631 return (0); 632 } 633 634 static void 635 linux_cdev_pager_dtor(void *handle) 636 { 637 const struct vm_operations_struct *vm_ops; 638 struct vm_area_struct *vmap; 639 640 vmap = linux_cdev_handle_find(handle); 641 MPASS(vmap != NULL); 642 643 /* 644 * Remove handle before calling close operation to prevent 645 * other threads from reusing the handle pointer. 646 */ 647 linux_cdev_handle_remove(vmap); 648 649 down_write(&vmap->vm_mm->mmap_sem); 650 vm_ops = vmap->vm_ops; 651 if (likely(vm_ops != NULL)) 652 vm_ops->close(vmap); 653 up_write(&vmap->vm_mm->mmap_sem); 654 655 linux_cdev_handle_free(vmap); 656 } 657 658 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 659 { 660 /* OBJT_MGTDEVICE */ 661 .cdev_pg_populate = linux_cdev_pager_populate, 662 .cdev_pg_ctor = linux_cdev_pager_ctor, 663 .cdev_pg_dtor = linux_cdev_pager_dtor 664 }, 665 { 666 /* OBJT_DEVICE */ 667 .cdev_pg_fault = linux_cdev_pager_fault, 668 .cdev_pg_ctor = linux_cdev_pager_ctor, 669 .cdev_pg_dtor = linux_cdev_pager_dtor 670 }, 671 }; 672 673 int 674 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 675 unsigned long size) 676 { 677 vm_object_t obj; 678 vm_page_t m; 679 680 obj = vma->vm_obj; 681 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 682 return (-ENOTSUP); 683 VM_OBJECT_RLOCK(obj); 684 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 685 m != NULL && m->pindex < OFF_TO_IDX(address + size); 686 m = TAILQ_NEXT(m, listq)) 687 pmap_remove_all(m); 688 VM_OBJECT_RUNLOCK(obj); 689 return (0); 690 } 691 692 static struct file_operations dummy_ldev_ops = { 693 /* XXXKIB */ 694 }; 695 696 static struct linux_cdev dummy_ldev = { 697 .ops = &dummy_ldev_ops, 698 }; 699 700 #define LDEV_SI_DTR 0x0001 701 #define LDEV_SI_REF 0x0002 702 703 static void 704 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 705 struct linux_cdev **dev) 706 { 707 struct linux_cdev *ldev; 708 u_int siref; 709 710 ldev = filp->f_cdev; 711 *fop = filp->f_op; 712 if (ldev != NULL) { 713 for (siref = ldev->siref;;) { 714 if ((siref & LDEV_SI_DTR) != 0) { 715 ldev = &dummy_ldev; 716 siref = ldev->siref; 717 *fop = ldev->ops; 718 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 719 } else if (atomic_fcmpset_int(&ldev->siref, &siref, 720 siref + LDEV_SI_REF)) { 721 break; 722 } 723 } 724 } 725 *dev = ldev; 726 } 727 728 static void 729 linux_drop_fop(struct linux_cdev *ldev) 730 { 731 732 if (ldev == NULL) 733 return; 734 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 735 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 736 } 737 738 #define OPW(fp,td,code) ({ \ 739 struct file *__fpop; \ 740 __typeof(code) __retval; \ 741 \ 742 __fpop = (td)->td_fpop; \ 743 (td)->td_fpop = (fp); \ 744 __retval = (code); \ 745 (td)->td_fpop = __fpop; \ 746 __retval; \ 747 }) 748 749 static int 750 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 751 struct file *file) 752 { 753 struct linux_cdev *ldev; 754 struct linux_file *filp; 755 const struct file_operations *fop; 756 int error; 757 758 ldev = dev->si_drv1; 759 760 filp = linux_file_alloc(); 761 filp->f_dentry = &filp->f_dentry_store; 762 filp->f_op = ldev->ops; 763 filp->f_mode = file->f_flag; 764 filp->f_flags = file->f_flag; 765 filp->f_vnode = file->f_vnode; 766 filp->_file = file; 767 refcount_acquire(&ldev->refs); 768 filp->f_cdev = ldev; 769 770 linux_set_current(td); 771 linux_get_fop(filp, &fop, &ldev); 772 773 if (fop->open != NULL) { 774 error = -fop->open(file->f_vnode, filp); 775 if (error != 0) { 776 linux_drop_fop(ldev); 777 linux_cdev_deref(filp->f_cdev); 778 kfree(filp); 779 return (error); 780 } 781 } 782 783 /* hold on to the vnode - used for fstat() */ 784 vhold(filp->f_vnode); 785 786 /* release the file from devfs */ 787 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 788 linux_drop_fop(ldev); 789 return (ENXIO); 790 } 791 792 #define LINUX_IOCTL_MIN_PTR 0x10000UL 793 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 794 795 static inline int 796 linux_remap_address(void **uaddr, size_t len) 797 { 798 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 799 800 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 801 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 802 struct task_struct *pts = current; 803 if (pts == NULL) { 804 *uaddr = NULL; 805 return (1); 806 } 807 808 /* compute data offset */ 809 uaddr_val -= LINUX_IOCTL_MIN_PTR; 810 811 /* check that length is within bounds */ 812 if ((len > IOCPARM_MAX) || 813 (uaddr_val + len) > pts->bsd_ioctl_len) { 814 *uaddr = NULL; 815 return (1); 816 } 817 818 /* re-add kernel buffer address */ 819 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 820 821 /* update address location */ 822 *uaddr = (void *)uaddr_val; 823 return (1); 824 } 825 return (0); 826 } 827 828 int 829 linux_copyin(const void *uaddr, void *kaddr, size_t len) 830 { 831 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 832 if (uaddr == NULL) 833 return (-EFAULT); 834 memcpy(kaddr, uaddr, len); 835 return (0); 836 } 837 return (-copyin(uaddr, kaddr, len)); 838 } 839 840 int 841 linux_copyout(const void *kaddr, void *uaddr, size_t len) 842 { 843 if (linux_remap_address(&uaddr, len)) { 844 if (uaddr == NULL) 845 return (-EFAULT); 846 memcpy(uaddr, kaddr, len); 847 return (0); 848 } 849 return (-copyout(kaddr, uaddr, len)); 850 } 851 852 size_t 853 linux_clear_user(void *_uaddr, size_t _len) 854 { 855 uint8_t *uaddr = _uaddr; 856 size_t len = _len; 857 858 /* make sure uaddr is aligned before going into the fast loop */ 859 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 860 if (subyte(uaddr, 0)) 861 return (_len); 862 uaddr++; 863 len--; 864 } 865 866 /* zero 8 bytes at a time */ 867 while (len > 7) { 868 #ifdef __LP64__ 869 if (suword64(uaddr, 0)) 870 return (_len); 871 #else 872 if (suword32(uaddr, 0)) 873 return (_len); 874 if (suword32(uaddr + 4, 0)) 875 return (_len); 876 #endif 877 uaddr += 8; 878 len -= 8; 879 } 880 881 /* zero fill end, if any */ 882 while (len > 0) { 883 if (subyte(uaddr, 0)) 884 return (_len); 885 uaddr++; 886 len--; 887 } 888 return (0); 889 } 890 891 int 892 linux_access_ok(const void *uaddr, size_t len) 893 { 894 uintptr_t saddr; 895 uintptr_t eaddr; 896 897 /* get start and end address */ 898 saddr = (uintptr_t)uaddr; 899 eaddr = (uintptr_t)uaddr + len; 900 901 /* verify addresses are valid for userspace */ 902 return ((saddr == eaddr) || 903 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 904 } 905 906 /* 907 * This function should return either EINTR or ERESTART depending on 908 * the signal type sent to this thread: 909 */ 910 static int 911 linux_get_error(struct task_struct *task, int error) 912 { 913 /* check for signal type interrupt code */ 914 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 915 error = -linux_schedule_get_interrupt_value(task); 916 if (error == 0) 917 error = EINTR; 918 } 919 return (error); 920 } 921 922 static int 923 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 924 const struct file_operations *fop, u_long cmd, caddr_t data, 925 struct thread *td) 926 { 927 struct task_struct *task = current; 928 unsigned size; 929 int error; 930 931 size = IOCPARM_LEN(cmd); 932 /* refer to logic in sys_ioctl() */ 933 if (size > 0) { 934 /* 935 * Setup hint for linux_copyin() and linux_copyout(). 936 * 937 * Background: Linux code expects a user-space address 938 * while FreeBSD supplies a kernel-space address. 939 */ 940 task->bsd_ioctl_data = data; 941 task->bsd_ioctl_len = size; 942 data = (void *)LINUX_IOCTL_MIN_PTR; 943 } else { 944 /* fetch user-space pointer */ 945 data = *(void **)data; 946 } 947 #if defined(__amd64__) 948 if (td->td_proc->p_elf_machine == EM_386) { 949 /* try the compat IOCTL handler first */ 950 if (fop->compat_ioctl != NULL) { 951 error = -OPW(fp, td, fop->compat_ioctl(filp, 952 cmd, (u_long)data)); 953 } else { 954 error = ENOTTY; 955 } 956 957 /* fallback to the regular IOCTL handler, if any */ 958 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 959 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 960 cmd, (u_long)data)); 961 } 962 } else 963 #endif 964 { 965 if (fop->unlocked_ioctl != NULL) { 966 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 967 cmd, (u_long)data)); 968 } else { 969 error = ENOTTY; 970 } 971 } 972 if (size > 0) { 973 task->bsd_ioctl_data = NULL; 974 task->bsd_ioctl_len = 0; 975 } 976 977 if (error == EWOULDBLOCK) { 978 /* update kqfilter status, if any */ 979 linux_file_kqfilter_poll(filp, 980 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 981 } else { 982 error = linux_get_error(task, error); 983 } 984 return (error); 985 } 986 987 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 988 989 /* 990 * This function atomically updates the poll wakeup state and returns 991 * the previous state at the time of update. 992 */ 993 static uint8_t 994 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 995 { 996 int c, old; 997 998 c = v->counter; 999 1000 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1001 c = old; 1002 1003 return (c); 1004 } 1005 1006 static int 1007 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1008 { 1009 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1010 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1011 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1012 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1013 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1014 }; 1015 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1016 1017 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1018 case LINUX_FWQ_STATE_QUEUED: 1019 linux_poll_wakeup(filp); 1020 return (1); 1021 default: 1022 return (0); 1023 } 1024 } 1025 1026 void 1027 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1028 { 1029 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1030 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1031 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1032 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1033 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1034 }; 1035 1036 /* check if we are called inside the select system call */ 1037 if (p == LINUX_POLL_TABLE_NORMAL) 1038 selrecord(curthread, &filp->f_selinfo); 1039 1040 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1041 case LINUX_FWQ_STATE_INIT: 1042 /* NOTE: file handles can only belong to one wait-queue */ 1043 filp->f_wait_queue.wqh = wqh; 1044 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1045 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1046 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1047 break; 1048 default: 1049 break; 1050 } 1051 } 1052 1053 static void 1054 linux_poll_wait_dequeue(struct linux_file *filp) 1055 { 1056 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1057 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1058 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1059 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1060 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1061 }; 1062 1063 seldrain(&filp->f_selinfo); 1064 1065 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1066 case LINUX_FWQ_STATE_NOT_READY: 1067 case LINUX_FWQ_STATE_QUEUED: 1068 case LINUX_FWQ_STATE_READY: 1069 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1070 break; 1071 default: 1072 break; 1073 } 1074 } 1075 1076 void 1077 linux_poll_wakeup(struct linux_file *filp) 1078 { 1079 /* this function should be NULL-safe */ 1080 if (filp == NULL) 1081 return; 1082 1083 selwakeup(&filp->f_selinfo); 1084 1085 spin_lock(&filp->f_kqlock); 1086 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1087 LINUX_KQ_FLAG_NEED_WRITE; 1088 1089 /* make sure the "knote" gets woken up */ 1090 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1091 spin_unlock(&filp->f_kqlock); 1092 } 1093 1094 static void 1095 linux_file_kqfilter_detach(struct knote *kn) 1096 { 1097 struct linux_file *filp = kn->kn_hook; 1098 1099 spin_lock(&filp->f_kqlock); 1100 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1101 spin_unlock(&filp->f_kqlock); 1102 } 1103 1104 static int 1105 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1106 { 1107 struct linux_file *filp = kn->kn_hook; 1108 1109 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1110 1111 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1112 } 1113 1114 static int 1115 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1116 { 1117 struct linux_file *filp = kn->kn_hook; 1118 1119 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1120 1121 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1122 } 1123 1124 static struct filterops linux_dev_kqfiltops_read = { 1125 .f_isfd = 1, 1126 .f_detach = linux_file_kqfilter_detach, 1127 .f_event = linux_file_kqfilter_read_event, 1128 }; 1129 1130 static struct filterops linux_dev_kqfiltops_write = { 1131 .f_isfd = 1, 1132 .f_detach = linux_file_kqfilter_detach, 1133 .f_event = linux_file_kqfilter_write_event, 1134 }; 1135 1136 static void 1137 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1138 { 1139 struct thread *td; 1140 const struct file_operations *fop; 1141 struct linux_cdev *ldev; 1142 int temp; 1143 1144 if ((filp->f_kqflags & kqflags) == 0) 1145 return; 1146 1147 td = curthread; 1148 1149 linux_get_fop(filp, &fop, &ldev); 1150 /* get the latest polling state */ 1151 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1152 linux_drop_fop(ldev); 1153 1154 spin_lock(&filp->f_kqlock); 1155 /* clear kqflags */ 1156 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1157 LINUX_KQ_FLAG_NEED_WRITE); 1158 /* update kqflags */ 1159 if ((temp & (POLLIN | POLLOUT)) != 0) { 1160 if ((temp & POLLIN) != 0) 1161 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1162 if ((temp & POLLOUT) != 0) 1163 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1164 1165 /* make sure the "knote" gets woken up */ 1166 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1167 } 1168 spin_unlock(&filp->f_kqlock); 1169 } 1170 1171 static int 1172 linux_file_kqfilter(struct file *file, struct knote *kn) 1173 { 1174 struct linux_file *filp; 1175 struct thread *td; 1176 int error; 1177 1178 td = curthread; 1179 filp = (struct linux_file *)file->f_data; 1180 filp->f_flags = file->f_flag; 1181 if (filp->f_op->poll == NULL) 1182 return (EINVAL); 1183 1184 spin_lock(&filp->f_kqlock); 1185 switch (kn->kn_filter) { 1186 case EVFILT_READ: 1187 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1188 kn->kn_fop = &linux_dev_kqfiltops_read; 1189 kn->kn_hook = filp; 1190 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1191 error = 0; 1192 break; 1193 case EVFILT_WRITE: 1194 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1195 kn->kn_fop = &linux_dev_kqfiltops_write; 1196 kn->kn_hook = filp; 1197 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1198 error = 0; 1199 break; 1200 default: 1201 error = EINVAL; 1202 break; 1203 } 1204 spin_unlock(&filp->f_kqlock); 1205 1206 if (error == 0) { 1207 linux_set_current(td); 1208 1209 /* update kqfilter status, if any */ 1210 linux_file_kqfilter_poll(filp, 1211 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1212 } 1213 return (error); 1214 } 1215 1216 static int 1217 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1218 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1219 int nprot, struct thread *td) 1220 { 1221 struct task_struct *task; 1222 struct vm_area_struct *vmap; 1223 struct mm_struct *mm; 1224 struct linux_file *filp; 1225 vm_memattr_t attr; 1226 int error; 1227 1228 filp = (struct linux_file *)fp->f_data; 1229 filp->f_flags = fp->f_flag; 1230 1231 if (fop->mmap == NULL) 1232 return (EOPNOTSUPP); 1233 1234 linux_set_current(td); 1235 1236 /* 1237 * The same VM object might be shared by multiple processes 1238 * and the mm_struct is usually freed when a process exits. 1239 * 1240 * The atomic reference below makes sure the mm_struct is 1241 * available as long as the vmap is in the linux_vma_head. 1242 */ 1243 task = current; 1244 mm = task->mm; 1245 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1246 return (EINVAL); 1247 1248 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1249 vmap->vm_start = 0; 1250 vmap->vm_end = size; 1251 vmap->vm_pgoff = *offset / PAGE_SIZE; 1252 vmap->vm_pfn = 0; 1253 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1254 vmap->vm_ops = NULL; 1255 vmap->vm_file = get_file(filp); 1256 vmap->vm_mm = mm; 1257 1258 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1259 error = linux_get_error(task, EINTR); 1260 } else { 1261 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1262 error = linux_get_error(task, error); 1263 up_write(&vmap->vm_mm->mmap_sem); 1264 } 1265 1266 if (error != 0) { 1267 linux_cdev_handle_free(vmap); 1268 return (error); 1269 } 1270 1271 attr = pgprot2cachemode(vmap->vm_page_prot); 1272 1273 if (vmap->vm_ops != NULL) { 1274 struct vm_area_struct *ptr; 1275 void *vm_private_data; 1276 bool vm_no_fault; 1277 1278 if (vmap->vm_ops->open == NULL || 1279 vmap->vm_ops->close == NULL || 1280 vmap->vm_private_data == NULL) { 1281 /* free allocated VM area struct */ 1282 linux_cdev_handle_free(vmap); 1283 return (EINVAL); 1284 } 1285 1286 vm_private_data = vmap->vm_private_data; 1287 1288 rw_wlock(&linux_vma_lock); 1289 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1290 if (ptr->vm_private_data == vm_private_data) 1291 break; 1292 } 1293 /* check if there is an existing VM area struct */ 1294 if (ptr != NULL) { 1295 /* check if the VM area structure is invalid */ 1296 if (ptr->vm_ops == NULL || 1297 ptr->vm_ops->open == NULL || 1298 ptr->vm_ops->close == NULL) { 1299 error = ESTALE; 1300 vm_no_fault = 1; 1301 } else { 1302 error = EEXIST; 1303 vm_no_fault = (ptr->vm_ops->fault == NULL); 1304 } 1305 } else { 1306 /* insert VM area structure into list */ 1307 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1308 error = 0; 1309 vm_no_fault = (vmap->vm_ops->fault == NULL); 1310 } 1311 rw_wunlock(&linux_vma_lock); 1312 1313 if (error != 0) { 1314 /* free allocated VM area struct */ 1315 linux_cdev_handle_free(vmap); 1316 /* check for stale VM area struct */ 1317 if (error != EEXIST) 1318 return (error); 1319 } 1320 1321 /* check if there is no fault handler */ 1322 if (vm_no_fault) { 1323 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1324 &linux_cdev_pager_ops[1], size, nprot, *offset, 1325 td->td_ucred); 1326 } else { 1327 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1328 &linux_cdev_pager_ops[0], size, nprot, *offset, 1329 td->td_ucred); 1330 } 1331 1332 /* check if allocating the VM object failed */ 1333 if (*object == NULL) { 1334 if (error == 0) { 1335 /* remove VM area struct from list */ 1336 linux_cdev_handle_remove(vmap); 1337 /* free allocated VM area struct */ 1338 linux_cdev_handle_free(vmap); 1339 } 1340 return (EINVAL); 1341 } 1342 } else { 1343 struct sglist *sg; 1344 1345 sg = sglist_alloc(1, M_WAITOK); 1346 sglist_append_phys(sg, 1347 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1348 1349 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1350 nprot, 0, td->td_ucred); 1351 1352 linux_cdev_handle_free(vmap); 1353 1354 if (*object == NULL) { 1355 sglist_free(sg); 1356 return (EINVAL); 1357 } 1358 } 1359 1360 if (attr != VM_MEMATTR_DEFAULT) { 1361 VM_OBJECT_WLOCK(*object); 1362 vm_object_set_memattr(*object, attr); 1363 VM_OBJECT_WUNLOCK(*object); 1364 } 1365 *offset = 0; 1366 return (0); 1367 } 1368 1369 struct cdevsw linuxcdevsw = { 1370 .d_version = D_VERSION, 1371 .d_fdopen = linux_dev_fdopen, 1372 .d_name = "lkpidev", 1373 }; 1374 1375 static int 1376 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1377 int flags, struct thread *td) 1378 { 1379 struct linux_file *filp; 1380 const struct file_operations *fop; 1381 struct linux_cdev *ldev; 1382 ssize_t bytes; 1383 int error; 1384 1385 error = 0; 1386 filp = (struct linux_file *)file->f_data; 1387 filp->f_flags = file->f_flag; 1388 /* XXX no support for I/O vectors currently */ 1389 if (uio->uio_iovcnt != 1) 1390 return (EOPNOTSUPP); 1391 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1392 return (EINVAL); 1393 linux_set_current(td); 1394 linux_get_fop(filp, &fop, &ldev); 1395 if (fop->read != NULL) { 1396 bytes = OPW(file, td, fop->read(filp, 1397 uio->uio_iov->iov_base, 1398 uio->uio_iov->iov_len, &uio->uio_offset)); 1399 if (bytes >= 0) { 1400 uio->uio_iov->iov_base = 1401 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1402 uio->uio_iov->iov_len -= bytes; 1403 uio->uio_resid -= bytes; 1404 } else { 1405 error = linux_get_error(current, -bytes); 1406 } 1407 } else 1408 error = ENXIO; 1409 1410 /* update kqfilter status, if any */ 1411 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1412 linux_drop_fop(ldev); 1413 1414 return (error); 1415 } 1416 1417 static int 1418 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1419 int flags, struct thread *td) 1420 { 1421 struct linux_file *filp; 1422 const struct file_operations *fop; 1423 struct linux_cdev *ldev; 1424 ssize_t bytes; 1425 int error; 1426 1427 filp = (struct linux_file *)file->f_data; 1428 filp->f_flags = file->f_flag; 1429 /* XXX no support for I/O vectors currently */ 1430 if (uio->uio_iovcnt != 1) 1431 return (EOPNOTSUPP); 1432 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1433 return (EINVAL); 1434 linux_set_current(td); 1435 linux_get_fop(filp, &fop, &ldev); 1436 if (fop->write != NULL) { 1437 bytes = OPW(file, td, fop->write(filp, 1438 uio->uio_iov->iov_base, 1439 uio->uio_iov->iov_len, &uio->uio_offset)); 1440 if (bytes >= 0) { 1441 uio->uio_iov->iov_base = 1442 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1443 uio->uio_iov->iov_len -= bytes; 1444 uio->uio_resid -= bytes; 1445 error = 0; 1446 } else { 1447 error = linux_get_error(current, -bytes); 1448 } 1449 } else 1450 error = ENXIO; 1451 1452 /* update kqfilter status, if any */ 1453 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1454 1455 linux_drop_fop(ldev); 1456 1457 return (error); 1458 } 1459 1460 static int 1461 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1462 struct thread *td) 1463 { 1464 struct linux_file *filp; 1465 const struct file_operations *fop; 1466 struct linux_cdev *ldev; 1467 int revents; 1468 1469 filp = (struct linux_file *)file->f_data; 1470 filp->f_flags = file->f_flag; 1471 linux_set_current(td); 1472 linux_get_fop(filp, &fop, &ldev); 1473 if (fop->poll != NULL) { 1474 revents = OPW(file, td, fop->poll(filp, 1475 LINUX_POLL_TABLE_NORMAL)) & events; 1476 } else { 1477 revents = 0; 1478 } 1479 linux_drop_fop(ldev); 1480 return (revents); 1481 } 1482 1483 static int 1484 linux_file_close(struct file *file, struct thread *td) 1485 { 1486 struct linux_file *filp; 1487 int (*release)(struct inode *, struct linux_file *); 1488 const struct file_operations *fop; 1489 struct linux_cdev *ldev; 1490 int error; 1491 1492 filp = (struct linux_file *)file->f_data; 1493 1494 KASSERT(file_count(filp) == 0, 1495 ("File refcount(%d) is not zero", file_count(filp))); 1496 1497 if (td == NULL) 1498 td = curthread; 1499 1500 error = 0; 1501 filp->f_flags = file->f_flag; 1502 linux_set_current(td); 1503 linux_poll_wait_dequeue(filp); 1504 linux_get_fop(filp, &fop, &ldev); 1505 /* 1506 * Always use the real release function, if any, to avoid 1507 * leaking device resources: 1508 */ 1509 release = filp->f_op->release; 1510 if (release != NULL) 1511 error = -OPW(file, td, release(filp->f_vnode, filp)); 1512 funsetown(&filp->f_sigio); 1513 if (filp->f_vnode != NULL) 1514 vdrop(filp->f_vnode); 1515 linux_drop_fop(ldev); 1516 if (filp->f_cdev != NULL) 1517 linux_cdev_deref(filp->f_cdev); 1518 kfree(filp); 1519 1520 return (error); 1521 } 1522 1523 static int 1524 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1525 struct thread *td) 1526 { 1527 struct linux_file *filp; 1528 const struct file_operations *fop; 1529 struct linux_cdev *ldev; 1530 struct fiodgname_arg *fgn; 1531 const char *p; 1532 int error, i; 1533 1534 error = 0; 1535 filp = (struct linux_file *)fp->f_data; 1536 filp->f_flags = fp->f_flag; 1537 linux_get_fop(filp, &fop, &ldev); 1538 1539 linux_set_current(td); 1540 switch (cmd) { 1541 case FIONBIO: 1542 break; 1543 case FIOASYNC: 1544 if (fop->fasync == NULL) 1545 break; 1546 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1547 break; 1548 case FIOSETOWN: 1549 error = fsetown(*(int *)data, &filp->f_sigio); 1550 if (error == 0) { 1551 if (fop->fasync == NULL) 1552 break; 1553 error = -OPW(fp, td, fop->fasync(0, filp, 1554 fp->f_flag & FASYNC)); 1555 } 1556 break; 1557 case FIOGETOWN: 1558 *(int *)data = fgetown(&filp->f_sigio); 1559 break; 1560 case FIODGNAME: 1561 #ifdef COMPAT_FREEBSD32 1562 case FIODGNAME_32: 1563 #endif 1564 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1565 error = ENXIO; 1566 break; 1567 } 1568 fgn = data; 1569 p = devtoname(filp->f_cdev->cdev); 1570 i = strlen(p) + 1; 1571 if (i > fgn->len) { 1572 error = EINVAL; 1573 break; 1574 } 1575 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1576 break; 1577 default: 1578 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1579 break; 1580 } 1581 linux_drop_fop(ldev); 1582 return (error); 1583 } 1584 1585 static int 1586 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1587 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1588 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1589 { 1590 /* 1591 * Character devices do not provide private mappings 1592 * of any kind: 1593 */ 1594 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1595 (prot & VM_PROT_WRITE) != 0) 1596 return (EACCES); 1597 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1598 return (EINVAL); 1599 1600 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1601 (int)prot, td)); 1602 } 1603 1604 static int 1605 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1606 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1607 struct thread *td) 1608 { 1609 struct linux_file *filp; 1610 const struct file_operations *fop; 1611 struct linux_cdev *ldev; 1612 struct mount *mp; 1613 struct vnode *vp; 1614 vm_object_t object; 1615 vm_prot_t maxprot; 1616 int error; 1617 1618 filp = (struct linux_file *)fp->f_data; 1619 1620 vp = filp->f_vnode; 1621 if (vp == NULL) 1622 return (EOPNOTSUPP); 1623 1624 /* 1625 * Ensure that file and memory protections are 1626 * compatible. 1627 */ 1628 mp = vp->v_mount; 1629 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1630 maxprot = VM_PROT_NONE; 1631 if ((prot & VM_PROT_EXECUTE) != 0) 1632 return (EACCES); 1633 } else 1634 maxprot = VM_PROT_EXECUTE; 1635 if ((fp->f_flag & FREAD) != 0) 1636 maxprot |= VM_PROT_READ; 1637 else if ((prot & VM_PROT_READ) != 0) 1638 return (EACCES); 1639 1640 /* 1641 * If we are sharing potential changes via MAP_SHARED and we 1642 * are trying to get write permission although we opened it 1643 * without asking for it, bail out. 1644 * 1645 * Note that most character devices always share mappings. 1646 * 1647 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1648 * requests rather than doing it here. 1649 */ 1650 if ((flags & MAP_SHARED) != 0) { 1651 if ((fp->f_flag & FWRITE) != 0) 1652 maxprot |= VM_PROT_WRITE; 1653 else if ((prot & VM_PROT_WRITE) != 0) 1654 return (EACCES); 1655 } 1656 maxprot &= cap_maxprot; 1657 1658 linux_get_fop(filp, &fop, &ldev); 1659 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, 1660 &foff, fop, &object); 1661 if (error != 0) 1662 goto out; 1663 1664 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1665 foff, FALSE, td); 1666 if (error != 0) 1667 vm_object_deallocate(object); 1668 out: 1669 linux_drop_fop(ldev); 1670 return (error); 1671 } 1672 1673 static int 1674 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1675 struct thread *td) 1676 { 1677 struct linux_file *filp; 1678 struct vnode *vp; 1679 int error; 1680 1681 filp = (struct linux_file *)fp->f_data; 1682 if (filp->f_vnode == NULL) 1683 return (EOPNOTSUPP); 1684 1685 vp = filp->f_vnode; 1686 1687 vn_lock(vp, LK_SHARED | LK_RETRY); 1688 error = VOP_STAT(vp, sb, td->td_ucred, NOCRED, td); 1689 VOP_UNLOCK(vp); 1690 1691 return (error); 1692 } 1693 1694 static int 1695 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1696 struct filedesc *fdp) 1697 { 1698 struct linux_file *filp; 1699 struct vnode *vp; 1700 int error; 1701 1702 filp = fp->f_data; 1703 vp = filp->f_vnode; 1704 if (vp == NULL) { 1705 error = 0; 1706 kif->kf_type = KF_TYPE_DEV; 1707 } else { 1708 vref(vp); 1709 FILEDESC_SUNLOCK(fdp); 1710 error = vn_fill_kinfo_vnode(vp, kif); 1711 vrele(vp); 1712 kif->kf_type = KF_TYPE_VNODE; 1713 FILEDESC_SLOCK(fdp); 1714 } 1715 return (error); 1716 } 1717 1718 unsigned int 1719 linux_iminor(struct inode *inode) 1720 { 1721 struct linux_cdev *ldev; 1722 1723 if (inode == NULL || inode->v_rdev == NULL || 1724 inode->v_rdev->si_devsw != &linuxcdevsw) 1725 return (-1U); 1726 ldev = inode->v_rdev->si_drv1; 1727 if (ldev == NULL) 1728 return (-1U); 1729 1730 return (minor(ldev->dev)); 1731 } 1732 1733 struct fileops linuxfileops = { 1734 .fo_read = linux_file_read, 1735 .fo_write = linux_file_write, 1736 .fo_truncate = invfo_truncate, 1737 .fo_kqfilter = linux_file_kqfilter, 1738 .fo_stat = linux_file_stat, 1739 .fo_fill_kinfo = linux_file_fill_kinfo, 1740 .fo_poll = linux_file_poll, 1741 .fo_close = linux_file_close, 1742 .fo_ioctl = linux_file_ioctl, 1743 .fo_mmap = linux_file_mmap, 1744 .fo_chmod = invfo_chmod, 1745 .fo_chown = invfo_chown, 1746 .fo_sendfile = invfo_sendfile, 1747 .fo_flags = DFLAG_PASSABLE, 1748 }; 1749 1750 /* 1751 * Hash of vmmap addresses. This is infrequently accessed and does not 1752 * need to be particularly large. This is done because we must store the 1753 * caller's idea of the map size to properly unmap. 1754 */ 1755 struct vmmap { 1756 LIST_ENTRY(vmmap) vm_next; 1757 void *vm_addr; 1758 unsigned long vm_size; 1759 }; 1760 1761 struct vmmaphd { 1762 struct vmmap *lh_first; 1763 }; 1764 #define VMMAP_HASH_SIZE 64 1765 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1766 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1767 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1768 static struct mtx vmmaplock; 1769 1770 static void 1771 vmmap_add(void *addr, unsigned long size) 1772 { 1773 struct vmmap *vmmap; 1774 1775 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1776 mtx_lock(&vmmaplock); 1777 vmmap->vm_size = size; 1778 vmmap->vm_addr = addr; 1779 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1780 mtx_unlock(&vmmaplock); 1781 } 1782 1783 static struct vmmap * 1784 vmmap_remove(void *addr) 1785 { 1786 struct vmmap *vmmap; 1787 1788 mtx_lock(&vmmaplock); 1789 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1790 if (vmmap->vm_addr == addr) 1791 break; 1792 if (vmmap) 1793 LIST_REMOVE(vmmap, vm_next); 1794 mtx_unlock(&vmmaplock); 1795 1796 return (vmmap); 1797 } 1798 1799 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1800 void * 1801 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1802 { 1803 void *addr; 1804 1805 addr = pmap_mapdev_attr(phys_addr, size, attr); 1806 if (addr == NULL) 1807 return (NULL); 1808 vmmap_add(addr, size); 1809 1810 return (addr); 1811 } 1812 #endif 1813 1814 void 1815 iounmap(void *addr) 1816 { 1817 struct vmmap *vmmap; 1818 1819 vmmap = vmmap_remove(addr); 1820 if (vmmap == NULL) 1821 return; 1822 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1823 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1824 #endif 1825 kfree(vmmap); 1826 } 1827 1828 void * 1829 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1830 { 1831 vm_offset_t off; 1832 size_t size; 1833 1834 size = count * PAGE_SIZE; 1835 off = kva_alloc(size); 1836 if (off == 0) 1837 return (NULL); 1838 vmmap_add((void *)off, size); 1839 pmap_qenter(off, pages, count); 1840 1841 return ((void *)off); 1842 } 1843 1844 void 1845 vunmap(void *addr) 1846 { 1847 struct vmmap *vmmap; 1848 1849 vmmap = vmmap_remove(addr); 1850 if (vmmap == NULL) 1851 return; 1852 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1853 kva_free((vm_offset_t)addr, vmmap->vm_size); 1854 kfree(vmmap); 1855 } 1856 1857 static char * 1858 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1859 { 1860 unsigned int len; 1861 char *p; 1862 va_list aq; 1863 1864 va_copy(aq, ap); 1865 len = vsnprintf(NULL, 0, fmt, aq); 1866 va_end(aq); 1867 1868 if (dev != NULL) 1869 p = devm_kmalloc(dev, len + 1, gfp); 1870 else 1871 p = kmalloc(len + 1, gfp); 1872 if (p != NULL) 1873 vsnprintf(p, len + 1, fmt, ap); 1874 1875 return (p); 1876 } 1877 1878 char * 1879 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1880 { 1881 1882 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1883 } 1884 1885 char * 1886 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1887 { 1888 va_list ap; 1889 char *p; 1890 1891 va_start(ap, fmt); 1892 p = devm_kvasprintf(dev, gfp, fmt, ap); 1893 va_end(ap); 1894 1895 return (p); 1896 } 1897 1898 char * 1899 kasprintf(gfp_t gfp, const char *fmt, ...) 1900 { 1901 va_list ap; 1902 char *p; 1903 1904 va_start(ap, fmt); 1905 p = kvasprintf(gfp, fmt, ap); 1906 va_end(ap); 1907 1908 return (p); 1909 } 1910 1911 static void 1912 linux_timer_callback_wrapper(void *context) 1913 { 1914 struct timer_list *timer; 1915 1916 linux_set_current(curthread); 1917 1918 timer = context; 1919 timer->function(timer->data); 1920 } 1921 1922 int 1923 mod_timer(struct timer_list *timer, int expires) 1924 { 1925 int ret; 1926 1927 timer->expires = expires; 1928 ret = callout_reset(&timer->callout, 1929 linux_timer_jiffies_until(expires), 1930 &linux_timer_callback_wrapper, timer); 1931 1932 MPASS(ret == 0 || ret == 1); 1933 1934 return (ret == 1); 1935 } 1936 1937 void 1938 add_timer(struct timer_list *timer) 1939 { 1940 1941 callout_reset(&timer->callout, 1942 linux_timer_jiffies_until(timer->expires), 1943 &linux_timer_callback_wrapper, timer); 1944 } 1945 1946 void 1947 add_timer_on(struct timer_list *timer, int cpu) 1948 { 1949 1950 callout_reset_on(&timer->callout, 1951 linux_timer_jiffies_until(timer->expires), 1952 &linux_timer_callback_wrapper, timer, cpu); 1953 } 1954 1955 int 1956 del_timer(struct timer_list *timer) 1957 { 1958 1959 if (callout_stop(&(timer)->callout) == -1) 1960 return (0); 1961 return (1); 1962 } 1963 1964 int 1965 del_timer_sync(struct timer_list *timer) 1966 { 1967 1968 if (callout_drain(&(timer)->callout) == -1) 1969 return (0); 1970 return (1); 1971 } 1972 1973 /* greatest common divisor, Euclid equation */ 1974 static uint64_t 1975 lkpi_gcd_64(uint64_t a, uint64_t b) 1976 { 1977 uint64_t an; 1978 uint64_t bn; 1979 1980 while (b != 0) { 1981 an = b; 1982 bn = a % b; 1983 a = an; 1984 b = bn; 1985 } 1986 return (a); 1987 } 1988 1989 uint64_t lkpi_nsec2hz_rem; 1990 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 1991 uint64_t lkpi_nsec2hz_max; 1992 1993 uint64_t lkpi_usec2hz_rem; 1994 uint64_t lkpi_usec2hz_div = 1000000ULL; 1995 uint64_t lkpi_usec2hz_max; 1996 1997 uint64_t lkpi_msec2hz_rem; 1998 uint64_t lkpi_msec2hz_div = 1000ULL; 1999 uint64_t lkpi_msec2hz_max; 2000 2001 static void 2002 linux_timer_init(void *arg) 2003 { 2004 uint64_t gcd; 2005 2006 /* 2007 * Compute an internal HZ value which can divide 2**32 to 2008 * avoid timer rounding problems when the tick value wraps 2009 * around 2**32: 2010 */ 2011 linux_timer_hz_mask = 1; 2012 while (linux_timer_hz_mask < (unsigned long)hz) 2013 linux_timer_hz_mask *= 2; 2014 linux_timer_hz_mask--; 2015 2016 /* compute some internal constants */ 2017 2018 lkpi_nsec2hz_rem = hz; 2019 lkpi_usec2hz_rem = hz; 2020 lkpi_msec2hz_rem = hz; 2021 2022 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2023 lkpi_nsec2hz_rem /= gcd; 2024 lkpi_nsec2hz_div /= gcd; 2025 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2026 2027 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2028 lkpi_usec2hz_rem /= gcd; 2029 lkpi_usec2hz_div /= gcd; 2030 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2031 2032 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2033 lkpi_msec2hz_rem /= gcd; 2034 lkpi_msec2hz_div /= gcd; 2035 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2036 } 2037 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2038 2039 void 2040 linux_complete_common(struct completion *c, int all) 2041 { 2042 int wakeup_swapper; 2043 2044 sleepq_lock(c); 2045 if (all) { 2046 c->done = UINT_MAX; 2047 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2048 } else { 2049 if (c->done != UINT_MAX) 2050 c->done++; 2051 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2052 } 2053 sleepq_release(c); 2054 if (wakeup_swapper) 2055 kick_proc0(); 2056 } 2057 2058 /* 2059 * Indefinite wait for done != 0 with or without signals. 2060 */ 2061 int 2062 linux_wait_for_common(struct completion *c, int flags) 2063 { 2064 struct task_struct *task; 2065 int error; 2066 2067 if (SCHEDULER_STOPPED()) 2068 return (0); 2069 2070 task = current; 2071 2072 if (flags != 0) 2073 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2074 else 2075 flags = SLEEPQ_SLEEP; 2076 error = 0; 2077 for (;;) { 2078 sleepq_lock(c); 2079 if (c->done) 2080 break; 2081 sleepq_add(c, NULL, "completion", flags, 0); 2082 if (flags & SLEEPQ_INTERRUPTIBLE) { 2083 DROP_GIANT(); 2084 error = -sleepq_wait_sig(c, 0); 2085 PICKUP_GIANT(); 2086 if (error != 0) { 2087 linux_schedule_save_interrupt_value(task, error); 2088 error = -ERESTARTSYS; 2089 goto intr; 2090 } 2091 } else { 2092 DROP_GIANT(); 2093 sleepq_wait(c, 0); 2094 PICKUP_GIANT(); 2095 } 2096 } 2097 if (c->done != UINT_MAX) 2098 c->done--; 2099 sleepq_release(c); 2100 2101 intr: 2102 return (error); 2103 } 2104 2105 /* 2106 * Time limited wait for done != 0 with or without signals. 2107 */ 2108 int 2109 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2110 { 2111 struct task_struct *task; 2112 int end = jiffies + timeout; 2113 int error; 2114 2115 if (SCHEDULER_STOPPED()) 2116 return (0); 2117 2118 task = current; 2119 2120 if (flags != 0) 2121 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2122 else 2123 flags = SLEEPQ_SLEEP; 2124 2125 for (;;) { 2126 sleepq_lock(c); 2127 if (c->done) 2128 break; 2129 sleepq_add(c, NULL, "completion", flags, 0); 2130 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2131 2132 DROP_GIANT(); 2133 if (flags & SLEEPQ_INTERRUPTIBLE) 2134 error = -sleepq_timedwait_sig(c, 0); 2135 else 2136 error = -sleepq_timedwait(c, 0); 2137 PICKUP_GIANT(); 2138 2139 if (error != 0) { 2140 /* check for timeout */ 2141 if (error == -EWOULDBLOCK) { 2142 error = 0; /* timeout */ 2143 } else { 2144 /* signal happened */ 2145 linux_schedule_save_interrupt_value(task, error); 2146 error = -ERESTARTSYS; 2147 } 2148 goto done; 2149 } 2150 } 2151 if (c->done != UINT_MAX) 2152 c->done--; 2153 sleepq_release(c); 2154 2155 /* return how many jiffies are left */ 2156 error = linux_timer_jiffies_until(end); 2157 done: 2158 return (error); 2159 } 2160 2161 int 2162 linux_try_wait_for_completion(struct completion *c) 2163 { 2164 int isdone; 2165 2166 sleepq_lock(c); 2167 isdone = (c->done != 0); 2168 if (c->done != 0 && c->done != UINT_MAX) 2169 c->done--; 2170 sleepq_release(c); 2171 return (isdone); 2172 } 2173 2174 int 2175 linux_completion_done(struct completion *c) 2176 { 2177 int isdone; 2178 2179 sleepq_lock(c); 2180 isdone = (c->done != 0); 2181 sleepq_release(c); 2182 return (isdone); 2183 } 2184 2185 static void 2186 linux_cdev_deref(struct linux_cdev *ldev) 2187 { 2188 2189 if (refcount_release(&ldev->refs)) 2190 kfree(ldev); 2191 } 2192 2193 static void 2194 linux_cdev_release(struct kobject *kobj) 2195 { 2196 struct linux_cdev *cdev; 2197 struct kobject *parent; 2198 2199 cdev = container_of(kobj, struct linux_cdev, kobj); 2200 parent = kobj->parent; 2201 linux_destroy_dev(cdev); 2202 linux_cdev_deref(cdev); 2203 kobject_put(parent); 2204 } 2205 2206 static void 2207 linux_cdev_static_release(struct kobject *kobj) 2208 { 2209 struct linux_cdev *cdev; 2210 struct kobject *parent; 2211 2212 cdev = container_of(kobj, struct linux_cdev, kobj); 2213 parent = kobj->parent; 2214 linux_destroy_dev(cdev); 2215 kobject_put(parent); 2216 } 2217 2218 void 2219 linux_destroy_dev(struct linux_cdev *ldev) 2220 { 2221 2222 if (ldev->cdev == NULL) 2223 return; 2224 2225 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2226 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2227 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2228 pause("ldevdtr", hz / 4); 2229 2230 destroy_dev(ldev->cdev); 2231 ldev->cdev = NULL; 2232 } 2233 2234 const struct kobj_type linux_cdev_ktype = { 2235 .release = linux_cdev_release, 2236 }; 2237 2238 const struct kobj_type linux_cdev_static_ktype = { 2239 .release = linux_cdev_static_release, 2240 }; 2241 2242 static void 2243 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2244 { 2245 struct notifier_block *nb; 2246 2247 nb = arg; 2248 if (linkstate == LINK_STATE_UP) 2249 nb->notifier_call(nb, NETDEV_UP, ifp); 2250 else 2251 nb->notifier_call(nb, NETDEV_DOWN, ifp); 2252 } 2253 2254 static void 2255 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2256 { 2257 struct notifier_block *nb; 2258 2259 nb = arg; 2260 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 2261 } 2262 2263 static void 2264 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2265 { 2266 struct notifier_block *nb; 2267 2268 nb = arg; 2269 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 2270 } 2271 2272 static void 2273 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2274 { 2275 struct notifier_block *nb; 2276 2277 nb = arg; 2278 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 2279 } 2280 2281 static void 2282 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2283 { 2284 struct notifier_block *nb; 2285 2286 nb = arg; 2287 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 2288 } 2289 2290 int 2291 register_netdevice_notifier(struct notifier_block *nb) 2292 { 2293 2294 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2295 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2296 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2297 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2298 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2299 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2300 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2301 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2302 2303 return (0); 2304 } 2305 2306 int 2307 register_inetaddr_notifier(struct notifier_block *nb) 2308 { 2309 2310 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2311 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2312 return (0); 2313 } 2314 2315 int 2316 unregister_netdevice_notifier(struct notifier_block *nb) 2317 { 2318 2319 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2320 nb->tags[NETDEV_UP]); 2321 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2322 nb->tags[NETDEV_REGISTER]); 2323 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2324 nb->tags[NETDEV_UNREGISTER]); 2325 EVENTHANDLER_DEREGISTER(iflladdr_event, 2326 nb->tags[NETDEV_CHANGEADDR]); 2327 2328 return (0); 2329 } 2330 2331 int 2332 unregister_inetaddr_notifier(struct notifier_block *nb) 2333 { 2334 2335 EVENTHANDLER_DEREGISTER(ifaddr_event, 2336 nb->tags[NETDEV_CHANGEIFADDR]); 2337 2338 return (0); 2339 } 2340 2341 struct list_sort_thunk { 2342 int (*cmp)(void *, struct list_head *, struct list_head *); 2343 void *priv; 2344 }; 2345 2346 static inline int 2347 linux_le_cmp(void *priv, const void *d1, const void *d2) 2348 { 2349 struct list_head *le1, *le2; 2350 struct list_sort_thunk *thunk; 2351 2352 thunk = priv; 2353 le1 = *(__DECONST(struct list_head **, d1)); 2354 le2 = *(__DECONST(struct list_head **, d2)); 2355 return ((thunk->cmp)(thunk->priv, le1, le2)); 2356 } 2357 2358 void 2359 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2360 struct list_head *a, struct list_head *b)) 2361 { 2362 struct list_sort_thunk thunk; 2363 struct list_head **ar, *le; 2364 size_t count, i; 2365 2366 count = 0; 2367 list_for_each(le, head) 2368 count++; 2369 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2370 i = 0; 2371 list_for_each(le, head) 2372 ar[i++] = le; 2373 thunk.cmp = cmp; 2374 thunk.priv = priv; 2375 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2376 INIT_LIST_HEAD(head); 2377 for (i = 0; i < count; i++) 2378 list_add_tail(ar[i], head); 2379 free(ar, M_KMALLOC); 2380 } 2381 2382 void 2383 linux_irq_handler(void *ent) 2384 { 2385 struct irq_ent *irqe; 2386 2387 linux_set_current(curthread); 2388 2389 irqe = ent; 2390 irqe->handler(irqe->irq, irqe->arg); 2391 } 2392 2393 #if defined(__i386__) || defined(__amd64__) 2394 int 2395 linux_wbinvd_on_all_cpus(void) 2396 { 2397 2398 pmap_invalidate_cache(); 2399 return (0); 2400 } 2401 #endif 2402 2403 int 2404 linux_on_each_cpu(void callback(void *), void *data) 2405 { 2406 2407 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2408 smp_no_rendezvous_barrier, data); 2409 return (0); 2410 } 2411 2412 int 2413 linux_in_atomic(void) 2414 { 2415 2416 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2417 } 2418 2419 struct linux_cdev * 2420 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2421 { 2422 dev_t dev = MKDEV(major, minor); 2423 struct cdev *cdev; 2424 2425 dev_lock(); 2426 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2427 struct linux_cdev *ldev = cdev->si_drv1; 2428 if (ldev->dev == dev && 2429 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2430 break; 2431 } 2432 } 2433 dev_unlock(); 2434 2435 return (cdev != NULL ? cdev->si_drv1 : NULL); 2436 } 2437 2438 int 2439 __register_chrdev(unsigned int major, unsigned int baseminor, 2440 unsigned int count, const char *name, 2441 const struct file_operations *fops) 2442 { 2443 struct linux_cdev *cdev; 2444 int ret = 0; 2445 int i; 2446 2447 for (i = baseminor; i < baseminor + count; i++) { 2448 cdev = cdev_alloc(); 2449 cdev->ops = fops; 2450 kobject_set_name(&cdev->kobj, name); 2451 2452 ret = cdev_add(cdev, makedev(major, i), 1); 2453 if (ret != 0) 2454 break; 2455 } 2456 return (ret); 2457 } 2458 2459 int 2460 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2461 unsigned int count, const char *name, 2462 const struct file_operations *fops, uid_t uid, 2463 gid_t gid, int mode) 2464 { 2465 struct linux_cdev *cdev; 2466 int ret = 0; 2467 int i; 2468 2469 for (i = baseminor; i < baseminor + count; i++) { 2470 cdev = cdev_alloc(); 2471 cdev->ops = fops; 2472 kobject_set_name(&cdev->kobj, name); 2473 2474 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2475 if (ret != 0) 2476 break; 2477 } 2478 return (ret); 2479 } 2480 2481 void 2482 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2483 unsigned int count, const char *name) 2484 { 2485 struct linux_cdev *cdevp; 2486 int i; 2487 2488 for (i = baseminor; i < baseminor + count; i++) { 2489 cdevp = linux_find_cdev(name, major, i); 2490 if (cdevp != NULL) 2491 cdev_del(cdevp); 2492 } 2493 } 2494 2495 void 2496 linux_dump_stack(void) 2497 { 2498 #ifdef STACK 2499 struct stack st; 2500 2501 stack_zero(&st); 2502 stack_save(&st); 2503 stack_print(&st); 2504 #endif 2505 } 2506 2507 #if defined(__i386__) || defined(__amd64__) 2508 bool linux_cpu_has_clflush; 2509 #endif 2510 2511 static void 2512 linux_compat_init(void *arg) 2513 { 2514 struct sysctl_oid *rootoid; 2515 int i; 2516 2517 #if defined(__i386__) || defined(__amd64__) 2518 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2519 #endif 2520 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2521 2522 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2523 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2524 kobject_init(&linux_class_root, &linux_class_ktype); 2525 kobject_set_name(&linux_class_root, "class"); 2526 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2527 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2528 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2529 kobject_set_name(&linux_root_device.kobj, "device"); 2530 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2531 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2532 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2533 linux_root_device.bsddev = root_bus; 2534 linux_class_misc.name = "misc"; 2535 class_register(&linux_class_misc); 2536 INIT_LIST_HEAD(&pci_drivers); 2537 INIT_LIST_HEAD(&pci_devices); 2538 spin_lock_init(&pci_lock); 2539 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2540 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2541 LIST_INIT(&vmmaphead[i]); 2542 init_waitqueue_head(&linux_bit_waitq); 2543 init_waitqueue_head(&linux_var_waitq); 2544 } 2545 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2546 2547 static void 2548 linux_compat_uninit(void *arg) 2549 { 2550 linux_kobject_kfree_name(&linux_class_root); 2551 linux_kobject_kfree_name(&linux_root_device.kobj); 2552 linux_kobject_kfree_name(&linux_class_misc.kobj); 2553 2554 mtx_destroy(&vmmaplock); 2555 spin_lock_destroy(&pci_lock); 2556 rw_destroy(&linux_vma_lock); 2557 } 2558 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2559 2560 /* 2561 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2562 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2563 * used. Assert these types have the same size, else some parts of the 2564 * LinuxKPI may not work like expected: 2565 */ 2566 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2567