1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/user.h> 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 62 #include <machine/stdarg.h> 63 64 #if defined(__i386__) || defined(__amd64__) 65 #include <machine/md_var.h> 66 #endif 67 68 #include <linux/kobject.h> 69 #include <linux/device.h> 70 #include <linux/slab.h> 71 #include <linux/module.h> 72 #include <linux/moduleparam.h> 73 #include <linux/cdev.h> 74 #include <linux/file.h> 75 #include <linux/sysfs.h> 76 #include <linux/mm.h> 77 #include <linux/io.h> 78 #include <linux/vmalloc.h> 79 #include <linux/netdevice.h> 80 #include <linux/timer.h> 81 #include <linux/interrupt.h> 82 #include <linux/uaccess.h> 83 #include <linux/list.h> 84 #include <linux/kthread.h> 85 #include <linux/kernel.h> 86 #include <linux/compat.h> 87 #include <linux/poll.h> 88 #include <linux/smp.h> 89 #include <linux/wait_bit.h> 90 91 #if defined(__i386__) || defined(__amd64__) 92 #include <asm/smp.h> 93 #endif 94 95 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 96 "LinuxKPI parameters"); 97 98 int linuxkpi_debug; 99 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 100 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 101 102 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 103 104 #include <linux/rbtree.h> 105 /* Undo Linux compat changes. */ 106 #undef RB_ROOT 107 #undef file 108 #undef cdev 109 #define RB_ROOT(head) (head)->rbh_root 110 111 static void linux_cdev_deref(struct linux_cdev *ldev); 112 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 113 114 struct kobject linux_class_root; 115 struct device linux_root_device; 116 struct class linux_class_misc; 117 struct list_head pci_drivers; 118 struct list_head pci_devices; 119 spinlock_t pci_lock; 120 121 unsigned long linux_timer_hz_mask; 122 123 wait_queue_head_t linux_bit_waitq; 124 wait_queue_head_t linux_var_waitq; 125 126 int 127 panic_cmp(struct rb_node *one, struct rb_node *two) 128 { 129 panic("no cmp"); 130 } 131 132 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 133 134 int 135 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 136 { 137 va_list tmp_va; 138 int len; 139 char *old; 140 char *name; 141 char dummy; 142 143 old = kobj->name; 144 145 if (old && fmt == NULL) 146 return (0); 147 148 /* compute length of string */ 149 va_copy(tmp_va, args); 150 len = vsnprintf(&dummy, 0, fmt, tmp_va); 151 va_end(tmp_va); 152 153 /* account for zero termination */ 154 len++; 155 156 /* check for error */ 157 if (len < 1) 158 return (-EINVAL); 159 160 /* allocate memory for string */ 161 name = kzalloc(len, GFP_KERNEL); 162 if (name == NULL) 163 return (-ENOMEM); 164 vsnprintf(name, len, fmt, args); 165 kobj->name = name; 166 167 /* free old string */ 168 kfree(old); 169 170 /* filter new string */ 171 for (; *name != '\0'; name++) 172 if (*name == '/') 173 *name = '!'; 174 return (0); 175 } 176 177 int 178 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 179 { 180 va_list args; 181 int error; 182 183 va_start(args, fmt); 184 error = kobject_set_name_vargs(kobj, fmt, args); 185 va_end(args); 186 187 return (error); 188 } 189 190 static int 191 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 192 { 193 const struct kobj_type *t; 194 int error; 195 196 kobj->parent = parent; 197 error = sysfs_create_dir(kobj); 198 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 199 struct attribute **attr; 200 t = kobj->ktype; 201 202 for (attr = t->default_attrs; *attr != NULL; attr++) { 203 error = sysfs_create_file(kobj, *attr); 204 if (error) 205 break; 206 } 207 if (error) 208 sysfs_remove_dir(kobj); 209 } 210 return (error); 211 } 212 213 int 214 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 215 { 216 va_list args; 217 int error; 218 219 va_start(args, fmt); 220 error = kobject_set_name_vargs(kobj, fmt, args); 221 va_end(args); 222 if (error) 223 return (error); 224 225 return kobject_add_complete(kobj, parent); 226 } 227 228 void 229 linux_kobject_release(struct kref *kref) 230 { 231 struct kobject *kobj; 232 char *name; 233 234 kobj = container_of(kref, struct kobject, kref); 235 sysfs_remove_dir(kobj); 236 name = kobj->name; 237 if (kobj->ktype && kobj->ktype->release) 238 kobj->ktype->release(kobj); 239 kfree(name); 240 } 241 242 static void 243 linux_kobject_kfree(struct kobject *kobj) 244 { 245 kfree(kobj); 246 } 247 248 static void 249 linux_kobject_kfree_name(struct kobject *kobj) 250 { 251 if (kobj) { 252 kfree(kobj->name); 253 } 254 } 255 256 const struct kobj_type linux_kfree_type = { 257 .release = linux_kobject_kfree 258 }; 259 260 static void 261 linux_device_release(struct device *dev) 262 { 263 pr_debug("linux_device_release: %s\n", dev_name(dev)); 264 kfree(dev); 265 } 266 267 static ssize_t 268 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 269 { 270 struct class_attribute *dattr; 271 ssize_t error; 272 273 dattr = container_of(attr, struct class_attribute, attr); 274 error = -EIO; 275 if (dattr->show) 276 error = dattr->show(container_of(kobj, struct class, kobj), 277 dattr, buf); 278 return (error); 279 } 280 281 static ssize_t 282 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 283 size_t count) 284 { 285 struct class_attribute *dattr; 286 ssize_t error; 287 288 dattr = container_of(attr, struct class_attribute, attr); 289 error = -EIO; 290 if (dattr->store) 291 error = dattr->store(container_of(kobj, struct class, kobj), 292 dattr, buf, count); 293 return (error); 294 } 295 296 static void 297 linux_class_release(struct kobject *kobj) 298 { 299 struct class *class; 300 301 class = container_of(kobj, struct class, kobj); 302 if (class->class_release) 303 class->class_release(class); 304 } 305 306 static const struct sysfs_ops linux_class_sysfs = { 307 .show = linux_class_show, 308 .store = linux_class_store, 309 }; 310 311 const struct kobj_type linux_class_ktype = { 312 .release = linux_class_release, 313 .sysfs_ops = &linux_class_sysfs 314 }; 315 316 static void 317 linux_dev_release(struct kobject *kobj) 318 { 319 struct device *dev; 320 321 dev = container_of(kobj, struct device, kobj); 322 /* This is the precedence defined by linux. */ 323 if (dev->release) 324 dev->release(dev); 325 else if (dev->class && dev->class->dev_release) 326 dev->class->dev_release(dev); 327 } 328 329 static ssize_t 330 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 331 { 332 struct device_attribute *dattr; 333 ssize_t error; 334 335 dattr = container_of(attr, struct device_attribute, attr); 336 error = -EIO; 337 if (dattr->show) 338 error = dattr->show(container_of(kobj, struct device, kobj), 339 dattr, buf); 340 return (error); 341 } 342 343 static ssize_t 344 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 345 size_t count) 346 { 347 struct device_attribute *dattr; 348 ssize_t error; 349 350 dattr = container_of(attr, struct device_attribute, attr); 351 error = -EIO; 352 if (dattr->store) 353 error = dattr->store(container_of(kobj, struct device, kobj), 354 dattr, buf, count); 355 return (error); 356 } 357 358 static const struct sysfs_ops linux_dev_sysfs = { 359 .show = linux_dev_show, 360 .store = linux_dev_store, 361 }; 362 363 const struct kobj_type linux_dev_ktype = { 364 .release = linux_dev_release, 365 .sysfs_ops = &linux_dev_sysfs 366 }; 367 368 struct device * 369 device_create(struct class *class, struct device *parent, dev_t devt, 370 void *drvdata, const char *fmt, ...) 371 { 372 struct device *dev; 373 va_list args; 374 375 dev = kzalloc(sizeof(*dev), M_WAITOK); 376 dev->parent = parent; 377 dev->class = class; 378 dev->devt = devt; 379 dev->driver_data = drvdata; 380 dev->release = linux_device_release; 381 va_start(args, fmt); 382 kobject_set_name_vargs(&dev->kobj, fmt, args); 383 va_end(args); 384 device_register(dev); 385 386 return (dev); 387 } 388 389 int 390 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 391 struct kobject *parent, const char *fmt, ...) 392 { 393 va_list args; 394 int error; 395 396 kobject_init(kobj, ktype); 397 kobj->ktype = ktype; 398 kobj->parent = parent; 399 kobj->name = NULL; 400 401 va_start(args, fmt); 402 error = kobject_set_name_vargs(kobj, fmt, args); 403 va_end(args); 404 if (error) 405 return (error); 406 return kobject_add_complete(kobj, parent); 407 } 408 409 static void 410 linux_kq_lock(void *arg) 411 { 412 spinlock_t *s = arg; 413 414 spin_lock(s); 415 } 416 static void 417 linux_kq_unlock(void *arg) 418 { 419 spinlock_t *s = arg; 420 421 spin_unlock(s); 422 } 423 424 static void 425 linux_kq_lock_owned(void *arg) 426 { 427 #ifdef INVARIANTS 428 spinlock_t *s = arg; 429 430 mtx_assert(&s->m, MA_OWNED); 431 #endif 432 } 433 434 static void 435 linux_kq_lock_unowned(void *arg) 436 { 437 #ifdef INVARIANTS 438 spinlock_t *s = arg; 439 440 mtx_assert(&s->m, MA_NOTOWNED); 441 #endif 442 } 443 444 static void 445 linux_file_kqfilter_poll(struct linux_file *, int); 446 447 struct linux_file * 448 linux_file_alloc(void) 449 { 450 struct linux_file *filp; 451 452 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 453 454 /* set initial refcount */ 455 filp->f_count = 1; 456 457 /* setup fields needed by kqueue support */ 458 spin_lock_init(&filp->f_kqlock); 459 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 460 linux_kq_lock, linux_kq_unlock, 461 linux_kq_lock_owned, linux_kq_lock_unowned); 462 463 return (filp); 464 } 465 466 void 467 linux_file_free(struct linux_file *filp) 468 { 469 if (filp->_file == NULL) { 470 if (filp->f_shmem != NULL) 471 vm_object_deallocate(filp->f_shmem); 472 kfree(filp); 473 } else { 474 /* 475 * The close method of the character device or file 476 * will free the linux_file structure: 477 */ 478 _fdrop(filp->_file, curthread); 479 } 480 } 481 482 static int 483 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 484 vm_page_t *mres) 485 { 486 struct vm_area_struct *vmap; 487 488 vmap = linux_cdev_handle_find(vm_obj->handle); 489 490 MPASS(vmap != NULL); 491 MPASS(vmap->vm_private_data == vm_obj->handle); 492 493 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 494 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 495 vm_page_t page; 496 497 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 498 /* 499 * If the passed in result page is a fake 500 * page, update it with the new physical 501 * address. 502 */ 503 page = *mres; 504 vm_page_updatefake(page, paddr, vm_obj->memattr); 505 } else { 506 /* 507 * Replace the passed in "mres" page with our 508 * own fake page and free up the all of the 509 * original pages. 510 */ 511 VM_OBJECT_WUNLOCK(vm_obj); 512 page = vm_page_getfake(paddr, vm_obj->memattr); 513 VM_OBJECT_WLOCK(vm_obj); 514 515 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 516 *mres = page; 517 } 518 vm_page_valid(page); 519 return (VM_PAGER_OK); 520 } 521 return (VM_PAGER_FAIL); 522 } 523 524 static int 525 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 526 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 527 { 528 struct vm_area_struct *vmap; 529 int err; 530 531 /* get VM area structure */ 532 vmap = linux_cdev_handle_find(vm_obj->handle); 533 MPASS(vmap != NULL); 534 MPASS(vmap->vm_private_data == vm_obj->handle); 535 536 VM_OBJECT_WUNLOCK(vm_obj); 537 538 linux_set_current(curthread); 539 540 down_write(&vmap->vm_mm->mmap_sem); 541 if (unlikely(vmap->vm_ops == NULL)) { 542 err = VM_FAULT_SIGBUS; 543 } else { 544 struct vm_fault vmf; 545 546 /* fill out VM fault structure */ 547 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 548 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 549 vmf.pgoff = 0; 550 vmf.page = NULL; 551 vmf.vma = vmap; 552 553 vmap->vm_pfn_count = 0; 554 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 555 vmap->vm_obj = vm_obj; 556 557 err = vmap->vm_ops->fault(vmap, &vmf); 558 559 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 560 kern_yield(PRI_USER); 561 err = vmap->vm_ops->fault(vmap, &vmf); 562 } 563 } 564 565 /* translate return code */ 566 switch (err) { 567 case VM_FAULT_OOM: 568 err = VM_PAGER_AGAIN; 569 break; 570 case VM_FAULT_SIGBUS: 571 err = VM_PAGER_BAD; 572 break; 573 case VM_FAULT_NOPAGE: 574 /* 575 * By contract the fault handler will return having 576 * busied all the pages itself. If pidx is already 577 * found in the object, it will simply xbusy the first 578 * page and return with vm_pfn_count set to 1. 579 */ 580 *first = vmap->vm_pfn_first; 581 *last = *first + vmap->vm_pfn_count - 1; 582 err = VM_PAGER_OK; 583 break; 584 default: 585 err = VM_PAGER_ERROR; 586 break; 587 } 588 up_write(&vmap->vm_mm->mmap_sem); 589 VM_OBJECT_WLOCK(vm_obj); 590 return (err); 591 } 592 593 static struct rwlock linux_vma_lock; 594 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 595 TAILQ_HEAD_INITIALIZER(linux_vma_head); 596 597 static void 598 linux_cdev_handle_free(struct vm_area_struct *vmap) 599 { 600 /* Drop reference on vm_file */ 601 if (vmap->vm_file != NULL) 602 fput(vmap->vm_file); 603 604 /* Drop reference on mm_struct */ 605 mmput(vmap->vm_mm); 606 607 kfree(vmap); 608 } 609 610 static void 611 linux_cdev_handle_remove(struct vm_area_struct *vmap) 612 { 613 rw_wlock(&linux_vma_lock); 614 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 615 rw_wunlock(&linux_vma_lock); 616 } 617 618 static struct vm_area_struct * 619 linux_cdev_handle_find(void *handle) 620 { 621 struct vm_area_struct *vmap; 622 623 rw_rlock(&linux_vma_lock); 624 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 625 if (vmap->vm_private_data == handle) 626 break; 627 } 628 rw_runlock(&linux_vma_lock); 629 return (vmap); 630 } 631 632 static int 633 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 634 vm_ooffset_t foff, struct ucred *cred, u_short *color) 635 { 636 637 MPASS(linux_cdev_handle_find(handle) != NULL); 638 *color = 0; 639 return (0); 640 } 641 642 static void 643 linux_cdev_pager_dtor(void *handle) 644 { 645 const struct vm_operations_struct *vm_ops; 646 struct vm_area_struct *vmap; 647 648 vmap = linux_cdev_handle_find(handle); 649 MPASS(vmap != NULL); 650 651 /* 652 * Remove handle before calling close operation to prevent 653 * other threads from reusing the handle pointer. 654 */ 655 linux_cdev_handle_remove(vmap); 656 657 down_write(&vmap->vm_mm->mmap_sem); 658 vm_ops = vmap->vm_ops; 659 if (likely(vm_ops != NULL)) 660 vm_ops->close(vmap); 661 up_write(&vmap->vm_mm->mmap_sem); 662 663 linux_cdev_handle_free(vmap); 664 } 665 666 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 667 { 668 /* OBJT_MGTDEVICE */ 669 .cdev_pg_populate = linux_cdev_pager_populate, 670 .cdev_pg_ctor = linux_cdev_pager_ctor, 671 .cdev_pg_dtor = linux_cdev_pager_dtor 672 }, 673 { 674 /* OBJT_DEVICE */ 675 .cdev_pg_fault = linux_cdev_pager_fault, 676 .cdev_pg_ctor = linux_cdev_pager_ctor, 677 .cdev_pg_dtor = linux_cdev_pager_dtor 678 }, 679 }; 680 681 int 682 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 683 unsigned long size) 684 { 685 vm_object_t obj; 686 vm_page_t m; 687 688 obj = vma->vm_obj; 689 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 690 return (-ENOTSUP); 691 VM_OBJECT_RLOCK(obj); 692 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 693 m != NULL && m->pindex < OFF_TO_IDX(address + size); 694 m = TAILQ_NEXT(m, listq)) 695 pmap_remove_all(m); 696 VM_OBJECT_RUNLOCK(obj); 697 return (0); 698 } 699 700 static struct file_operations dummy_ldev_ops = { 701 /* XXXKIB */ 702 }; 703 704 static struct linux_cdev dummy_ldev = { 705 .ops = &dummy_ldev_ops, 706 }; 707 708 #define LDEV_SI_DTR 0x0001 709 #define LDEV_SI_REF 0x0002 710 711 static void 712 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 713 struct linux_cdev **dev) 714 { 715 struct linux_cdev *ldev; 716 u_int siref; 717 718 ldev = filp->f_cdev; 719 *fop = filp->f_op; 720 if (ldev != NULL) { 721 for (siref = ldev->siref;;) { 722 if ((siref & LDEV_SI_DTR) != 0) { 723 ldev = &dummy_ldev; 724 siref = ldev->siref; 725 *fop = ldev->ops; 726 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 727 } else if (atomic_fcmpset_int(&ldev->siref, &siref, 728 siref + LDEV_SI_REF)) { 729 break; 730 } 731 } 732 } 733 *dev = ldev; 734 } 735 736 static void 737 linux_drop_fop(struct linux_cdev *ldev) 738 { 739 740 if (ldev == NULL) 741 return; 742 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 743 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 744 } 745 746 #define OPW(fp,td,code) ({ \ 747 struct file *__fpop; \ 748 __typeof(code) __retval; \ 749 \ 750 __fpop = (td)->td_fpop; \ 751 (td)->td_fpop = (fp); \ 752 __retval = (code); \ 753 (td)->td_fpop = __fpop; \ 754 __retval; \ 755 }) 756 757 static int 758 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 759 struct file *file) 760 { 761 struct linux_cdev *ldev; 762 struct linux_file *filp; 763 const struct file_operations *fop; 764 int error; 765 766 ldev = dev->si_drv1; 767 768 filp = linux_file_alloc(); 769 filp->f_dentry = &filp->f_dentry_store; 770 filp->f_op = ldev->ops; 771 filp->f_mode = file->f_flag; 772 filp->f_flags = file->f_flag; 773 filp->f_vnode = file->f_vnode; 774 filp->_file = file; 775 refcount_acquire(&ldev->refs); 776 filp->f_cdev = ldev; 777 778 linux_set_current(td); 779 linux_get_fop(filp, &fop, &ldev); 780 781 if (fop->open != NULL) { 782 error = -fop->open(file->f_vnode, filp); 783 if (error != 0) { 784 linux_drop_fop(ldev); 785 linux_cdev_deref(filp->f_cdev); 786 kfree(filp); 787 return (error); 788 } 789 } 790 791 /* hold on to the vnode - used for fstat() */ 792 vhold(filp->f_vnode); 793 794 /* release the file from devfs */ 795 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 796 linux_drop_fop(ldev); 797 return (ENXIO); 798 } 799 800 #define LINUX_IOCTL_MIN_PTR 0x10000UL 801 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 802 803 static inline int 804 linux_remap_address(void **uaddr, size_t len) 805 { 806 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 807 808 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 809 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 810 struct task_struct *pts = current; 811 if (pts == NULL) { 812 *uaddr = NULL; 813 return (1); 814 } 815 816 /* compute data offset */ 817 uaddr_val -= LINUX_IOCTL_MIN_PTR; 818 819 /* check that length is within bounds */ 820 if ((len > IOCPARM_MAX) || 821 (uaddr_val + len) > pts->bsd_ioctl_len) { 822 *uaddr = NULL; 823 return (1); 824 } 825 826 /* re-add kernel buffer address */ 827 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 828 829 /* update address location */ 830 *uaddr = (void *)uaddr_val; 831 return (1); 832 } 833 return (0); 834 } 835 836 int 837 linux_copyin(const void *uaddr, void *kaddr, size_t len) 838 { 839 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 840 if (uaddr == NULL) 841 return (-EFAULT); 842 memcpy(kaddr, uaddr, len); 843 return (0); 844 } 845 return (-copyin(uaddr, kaddr, len)); 846 } 847 848 int 849 linux_copyout(const void *kaddr, void *uaddr, size_t len) 850 { 851 if (linux_remap_address(&uaddr, len)) { 852 if (uaddr == NULL) 853 return (-EFAULT); 854 memcpy(uaddr, kaddr, len); 855 return (0); 856 } 857 return (-copyout(kaddr, uaddr, len)); 858 } 859 860 size_t 861 linux_clear_user(void *_uaddr, size_t _len) 862 { 863 uint8_t *uaddr = _uaddr; 864 size_t len = _len; 865 866 /* make sure uaddr is aligned before going into the fast loop */ 867 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 868 if (subyte(uaddr, 0)) 869 return (_len); 870 uaddr++; 871 len--; 872 } 873 874 /* zero 8 bytes at a time */ 875 while (len > 7) { 876 #ifdef __LP64__ 877 if (suword64(uaddr, 0)) 878 return (_len); 879 #else 880 if (suword32(uaddr, 0)) 881 return (_len); 882 if (suword32(uaddr + 4, 0)) 883 return (_len); 884 #endif 885 uaddr += 8; 886 len -= 8; 887 } 888 889 /* zero fill end, if any */ 890 while (len > 0) { 891 if (subyte(uaddr, 0)) 892 return (_len); 893 uaddr++; 894 len--; 895 } 896 return (0); 897 } 898 899 int 900 linux_access_ok(const void *uaddr, size_t len) 901 { 902 uintptr_t saddr; 903 uintptr_t eaddr; 904 905 /* get start and end address */ 906 saddr = (uintptr_t)uaddr; 907 eaddr = (uintptr_t)uaddr + len; 908 909 /* verify addresses are valid for userspace */ 910 return ((saddr == eaddr) || 911 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 912 } 913 914 /* 915 * This function should return either EINTR or ERESTART depending on 916 * the signal type sent to this thread: 917 */ 918 static int 919 linux_get_error(struct task_struct *task, int error) 920 { 921 /* check for signal type interrupt code */ 922 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 923 error = -linux_schedule_get_interrupt_value(task); 924 if (error == 0) 925 error = EINTR; 926 } 927 return (error); 928 } 929 930 static int 931 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 932 const struct file_operations *fop, u_long cmd, caddr_t data, 933 struct thread *td) 934 { 935 struct task_struct *task = current; 936 unsigned size; 937 int error; 938 939 size = IOCPARM_LEN(cmd); 940 /* refer to logic in sys_ioctl() */ 941 if (size > 0) { 942 /* 943 * Setup hint for linux_copyin() and linux_copyout(). 944 * 945 * Background: Linux code expects a user-space address 946 * while FreeBSD supplies a kernel-space address. 947 */ 948 task->bsd_ioctl_data = data; 949 task->bsd_ioctl_len = size; 950 data = (void *)LINUX_IOCTL_MIN_PTR; 951 } else { 952 /* fetch user-space pointer */ 953 data = *(void **)data; 954 } 955 #if defined(__amd64__) 956 if (td->td_proc->p_elf_machine == EM_386) { 957 /* try the compat IOCTL handler first */ 958 if (fop->compat_ioctl != NULL) { 959 error = -OPW(fp, td, fop->compat_ioctl(filp, 960 cmd, (u_long)data)); 961 } else { 962 error = ENOTTY; 963 } 964 965 /* fallback to the regular IOCTL handler, if any */ 966 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 967 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 968 cmd, (u_long)data)); 969 } 970 } else 971 #endif 972 { 973 if (fop->unlocked_ioctl != NULL) { 974 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 975 cmd, (u_long)data)); 976 } else { 977 error = ENOTTY; 978 } 979 } 980 if (size > 0) { 981 task->bsd_ioctl_data = NULL; 982 task->bsd_ioctl_len = 0; 983 } 984 985 if (error == EWOULDBLOCK) { 986 /* update kqfilter status, if any */ 987 linux_file_kqfilter_poll(filp, 988 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 989 } else { 990 error = linux_get_error(task, error); 991 } 992 return (error); 993 } 994 995 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 996 997 /* 998 * This function atomically updates the poll wakeup state and returns 999 * the previous state at the time of update. 1000 */ 1001 static uint8_t 1002 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1003 { 1004 int c, old; 1005 1006 c = v->counter; 1007 1008 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1009 c = old; 1010 1011 return (c); 1012 } 1013 1014 static int 1015 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1016 { 1017 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1018 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1019 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1020 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1021 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1022 }; 1023 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1024 1025 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1026 case LINUX_FWQ_STATE_QUEUED: 1027 linux_poll_wakeup(filp); 1028 return (1); 1029 default: 1030 return (0); 1031 } 1032 } 1033 1034 void 1035 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1036 { 1037 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1038 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1039 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1040 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1041 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1042 }; 1043 1044 /* check if we are called inside the select system call */ 1045 if (p == LINUX_POLL_TABLE_NORMAL) 1046 selrecord(curthread, &filp->f_selinfo); 1047 1048 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1049 case LINUX_FWQ_STATE_INIT: 1050 /* NOTE: file handles can only belong to one wait-queue */ 1051 filp->f_wait_queue.wqh = wqh; 1052 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1053 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1054 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1055 break; 1056 default: 1057 break; 1058 } 1059 } 1060 1061 static void 1062 linux_poll_wait_dequeue(struct linux_file *filp) 1063 { 1064 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1065 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1066 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1067 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1068 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1069 }; 1070 1071 seldrain(&filp->f_selinfo); 1072 1073 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1074 case LINUX_FWQ_STATE_NOT_READY: 1075 case LINUX_FWQ_STATE_QUEUED: 1076 case LINUX_FWQ_STATE_READY: 1077 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1078 break; 1079 default: 1080 break; 1081 } 1082 } 1083 1084 void 1085 linux_poll_wakeup(struct linux_file *filp) 1086 { 1087 /* this function should be NULL-safe */ 1088 if (filp == NULL) 1089 return; 1090 1091 selwakeup(&filp->f_selinfo); 1092 1093 spin_lock(&filp->f_kqlock); 1094 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1095 LINUX_KQ_FLAG_NEED_WRITE; 1096 1097 /* make sure the "knote" gets woken up */ 1098 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1099 spin_unlock(&filp->f_kqlock); 1100 } 1101 1102 static void 1103 linux_file_kqfilter_detach(struct knote *kn) 1104 { 1105 struct linux_file *filp = kn->kn_hook; 1106 1107 spin_lock(&filp->f_kqlock); 1108 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1109 spin_unlock(&filp->f_kqlock); 1110 } 1111 1112 static int 1113 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1114 { 1115 struct linux_file *filp = kn->kn_hook; 1116 1117 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1118 1119 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1120 } 1121 1122 static int 1123 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1124 { 1125 struct linux_file *filp = kn->kn_hook; 1126 1127 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1128 1129 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1130 } 1131 1132 static struct filterops linux_dev_kqfiltops_read = { 1133 .f_isfd = 1, 1134 .f_detach = linux_file_kqfilter_detach, 1135 .f_event = linux_file_kqfilter_read_event, 1136 }; 1137 1138 static struct filterops linux_dev_kqfiltops_write = { 1139 .f_isfd = 1, 1140 .f_detach = linux_file_kqfilter_detach, 1141 .f_event = linux_file_kqfilter_write_event, 1142 }; 1143 1144 static void 1145 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1146 { 1147 struct thread *td; 1148 const struct file_operations *fop; 1149 struct linux_cdev *ldev; 1150 int temp; 1151 1152 if ((filp->f_kqflags & kqflags) == 0) 1153 return; 1154 1155 td = curthread; 1156 1157 linux_get_fop(filp, &fop, &ldev); 1158 /* get the latest polling state */ 1159 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1160 linux_drop_fop(ldev); 1161 1162 spin_lock(&filp->f_kqlock); 1163 /* clear kqflags */ 1164 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1165 LINUX_KQ_FLAG_NEED_WRITE); 1166 /* update kqflags */ 1167 if ((temp & (POLLIN | POLLOUT)) != 0) { 1168 if ((temp & POLLIN) != 0) 1169 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1170 if ((temp & POLLOUT) != 0) 1171 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1172 1173 /* make sure the "knote" gets woken up */ 1174 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1175 } 1176 spin_unlock(&filp->f_kqlock); 1177 } 1178 1179 static int 1180 linux_file_kqfilter(struct file *file, struct knote *kn) 1181 { 1182 struct linux_file *filp; 1183 struct thread *td; 1184 int error; 1185 1186 td = curthread; 1187 filp = (struct linux_file *)file->f_data; 1188 filp->f_flags = file->f_flag; 1189 if (filp->f_op->poll == NULL) 1190 return (EINVAL); 1191 1192 spin_lock(&filp->f_kqlock); 1193 switch (kn->kn_filter) { 1194 case EVFILT_READ: 1195 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1196 kn->kn_fop = &linux_dev_kqfiltops_read; 1197 kn->kn_hook = filp; 1198 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1199 error = 0; 1200 break; 1201 case EVFILT_WRITE: 1202 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1203 kn->kn_fop = &linux_dev_kqfiltops_write; 1204 kn->kn_hook = filp; 1205 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1206 error = 0; 1207 break; 1208 default: 1209 error = EINVAL; 1210 break; 1211 } 1212 spin_unlock(&filp->f_kqlock); 1213 1214 if (error == 0) { 1215 linux_set_current(td); 1216 1217 /* update kqfilter status, if any */ 1218 linux_file_kqfilter_poll(filp, 1219 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1220 } 1221 return (error); 1222 } 1223 1224 static int 1225 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1226 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1227 int nprot, struct thread *td) 1228 { 1229 struct task_struct *task; 1230 struct vm_area_struct *vmap; 1231 struct mm_struct *mm; 1232 struct linux_file *filp; 1233 vm_memattr_t attr; 1234 int error; 1235 1236 filp = (struct linux_file *)fp->f_data; 1237 filp->f_flags = fp->f_flag; 1238 1239 if (fop->mmap == NULL) 1240 return (EOPNOTSUPP); 1241 1242 linux_set_current(td); 1243 1244 /* 1245 * The same VM object might be shared by multiple processes 1246 * and the mm_struct is usually freed when a process exits. 1247 * 1248 * The atomic reference below makes sure the mm_struct is 1249 * available as long as the vmap is in the linux_vma_head. 1250 */ 1251 task = current; 1252 mm = task->mm; 1253 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1254 return (EINVAL); 1255 1256 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1257 vmap->vm_start = 0; 1258 vmap->vm_end = size; 1259 vmap->vm_pgoff = *offset / PAGE_SIZE; 1260 vmap->vm_pfn = 0; 1261 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1262 vmap->vm_ops = NULL; 1263 vmap->vm_file = get_file(filp); 1264 vmap->vm_mm = mm; 1265 1266 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1267 error = linux_get_error(task, EINTR); 1268 } else { 1269 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1270 error = linux_get_error(task, error); 1271 up_write(&vmap->vm_mm->mmap_sem); 1272 } 1273 1274 if (error != 0) { 1275 linux_cdev_handle_free(vmap); 1276 return (error); 1277 } 1278 1279 attr = pgprot2cachemode(vmap->vm_page_prot); 1280 1281 if (vmap->vm_ops != NULL) { 1282 struct vm_area_struct *ptr; 1283 void *vm_private_data; 1284 bool vm_no_fault; 1285 1286 if (vmap->vm_ops->open == NULL || 1287 vmap->vm_ops->close == NULL || 1288 vmap->vm_private_data == NULL) { 1289 /* free allocated VM area struct */ 1290 linux_cdev_handle_free(vmap); 1291 return (EINVAL); 1292 } 1293 1294 vm_private_data = vmap->vm_private_data; 1295 1296 rw_wlock(&linux_vma_lock); 1297 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1298 if (ptr->vm_private_data == vm_private_data) 1299 break; 1300 } 1301 /* check if there is an existing VM area struct */ 1302 if (ptr != NULL) { 1303 /* check if the VM area structure is invalid */ 1304 if (ptr->vm_ops == NULL || 1305 ptr->vm_ops->open == NULL || 1306 ptr->vm_ops->close == NULL) { 1307 error = ESTALE; 1308 vm_no_fault = 1; 1309 } else { 1310 error = EEXIST; 1311 vm_no_fault = (ptr->vm_ops->fault == NULL); 1312 } 1313 } else { 1314 /* insert VM area structure into list */ 1315 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1316 error = 0; 1317 vm_no_fault = (vmap->vm_ops->fault == NULL); 1318 } 1319 rw_wunlock(&linux_vma_lock); 1320 1321 if (error != 0) { 1322 /* free allocated VM area struct */ 1323 linux_cdev_handle_free(vmap); 1324 /* check for stale VM area struct */ 1325 if (error != EEXIST) 1326 return (error); 1327 } 1328 1329 /* check if there is no fault handler */ 1330 if (vm_no_fault) { 1331 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1332 &linux_cdev_pager_ops[1], size, nprot, *offset, 1333 td->td_ucred); 1334 } else { 1335 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1336 &linux_cdev_pager_ops[0], size, nprot, *offset, 1337 td->td_ucred); 1338 } 1339 1340 /* check if allocating the VM object failed */ 1341 if (*object == NULL) { 1342 if (error == 0) { 1343 /* remove VM area struct from list */ 1344 linux_cdev_handle_remove(vmap); 1345 /* free allocated VM area struct */ 1346 linux_cdev_handle_free(vmap); 1347 } 1348 return (EINVAL); 1349 } 1350 } else { 1351 struct sglist *sg; 1352 1353 sg = sglist_alloc(1, M_WAITOK); 1354 sglist_append_phys(sg, 1355 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1356 1357 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1358 nprot, 0, td->td_ucred); 1359 1360 linux_cdev_handle_free(vmap); 1361 1362 if (*object == NULL) { 1363 sglist_free(sg); 1364 return (EINVAL); 1365 } 1366 } 1367 1368 if (attr != VM_MEMATTR_DEFAULT) { 1369 VM_OBJECT_WLOCK(*object); 1370 vm_object_set_memattr(*object, attr); 1371 VM_OBJECT_WUNLOCK(*object); 1372 } 1373 *offset = 0; 1374 return (0); 1375 } 1376 1377 struct cdevsw linuxcdevsw = { 1378 .d_version = D_VERSION, 1379 .d_fdopen = linux_dev_fdopen, 1380 .d_name = "lkpidev", 1381 }; 1382 1383 static int 1384 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1385 int flags, struct thread *td) 1386 { 1387 struct linux_file *filp; 1388 const struct file_operations *fop; 1389 struct linux_cdev *ldev; 1390 ssize_t bytes; 1391 int error; 1392 1393 error = 0; 1394 filp = (struct linux_file *)file->f_data; 1395 filp->f_flags = file->f_flag; 1396 /* XXX no support for I/O vectors currently */ 1397 if (uio->uio_iovcnt != 1) 1398 return (EOPNOTSUPP); 1399 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1400 return (EINVAL); 1401 linux_set_current(td); 1402 linux_get_fop(filp, &fop, &ldev); 1403 if (fop->read != NULL) { 1404 bytes = OPW(file, td, fop->read(filp, 1405 uio->uio_iov->iov_base, 1406 uio->uio_iov->iov_len, &uio->uio_offset)); 1407 if (bytes >= 0) { 1408 uio->uio_iov->iov_base = 1409 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1410 uio->uio_iov->iov_len -= bytes; 1411 uio->uio_resid -= bytes; 1412 } else { 1413 error = linux_get_error(current, -bytes); 1414 } 1415 } else 1416 error = ENXIO; 1417 1418 /* update kqfilter status, if any */ 1419 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1420 linux_drop_fop(ldev); 1421 1422 return (error); 1423 } 1424 1425 static int 1426 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1427 int flags, struct thread *td) 1428 { 1429 struct linux_file *filp; 1430 const struct file_operations *fop; 1431 struct linux_cdev *ldev; 1432 ssize_t bytes; 1433 int error; 1434 1435 filp = (struct linux_file *)file->f_data; 1436 filp->f_flags = file->f_flag; 1437 /* XXX no support for I/O vectors currently */ 1438 if (uio->uio_iovcnt != 1) 1439 return (EOPNOTSUPP); 1440 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1441 return (EINVAL); 1442 linux_set_current(td); 1443 linux_get_fop(filp, &fop, &ldev); 1444 if (fop->write != NULL) { 1445 bytes = OPW(file, td, fop->write(filp, 1446 uio->uio_iov->iov_base, 1447 uio->uio_iov->iov_len, &uio->uio_offset)); 1448 if (bytes >= 0) { 1449 uio->uio_iov->iov_base = 1450 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1451 uio->uio_iov->iov_len -= bytes; 1452 uio->uio_resid -= bytes; 1453 error = 0; 1454 } else { 1455 error = linux_get_error(current, -bytes); 1456 } 1457 } else 1458 error = ENXIO; 1459 1460 /* update kqfilter status, if any */ 1461 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1462 1463 linux_drop_fop(ldev); 1464 1465 return (error); 1466 } 1467 1468 static int 1469 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1470 struct thread *td) 1471 { 1472 struct linux_file *filp; 1473 const struct file_operations *fop; 1474 struct linux_cdev *ldev; 1475 int revents; 1476 1477 filp = (struct linux_file *)file->f_data; 1478 filp->f_flags = file->f_flag; 1479 linux_set_current(td); 1480 linux_get_fop(filp, &fop, &ldev); 1481 if (fop->poll != NULL) { 1482 revents = OPW(file, td, fop->poll(filp, 1483 LINUX_POLL_TABLE_NORMAL)) & events; 1484 } else { 1485 revents = 0; 1486 } 1487 linux_drop_fop(ldev); 1488 return (revents); 1489 } 1490 1491 static int 1492 linux_file_close(struct file *file, struct thread *td) 1493 { 1494 struct linux_file *filp; 1495 int (*release)(struct inode *, struct linux_file *); 1496 const struct file_operations *fop; 1497 struct linux_cdev *ldev; 1498 int error; 1499 1500 filp = (struct linux_file *)file->f_data; 1501 1502 KASSERT(file_count(filp) == 0, 1503 ("File refcount(%d) is not zero", file_count(filp))); 1504 1505 if (td == NULL) 1506 td = curthread; 1507 1508 error = 0; 1509 filp->f_flags = file->f_flag; 1510 linux_set_current(td); 1511 linux_poll_wait_dequeue(filp); 1512 linux_get_fop(filp, &fop, &ldev); 1513 /* 1514 * Always use the real release function, if any, to avoid 1515 * leaking device resources: 1516 */ 1517 release = filp->f_op->release; 1518 if (release != NULL) 1519 error = -OPW(file, td, release(filp->f_vnode, filp)); 1520 funsetown(&filp->f_sigio); 1521 if (filp->f_vnode != NULL) 1522 vdrop(filp->f_vnode); 1523 linux_drop_fop(ldev); 1524 if (filp->f_cdev != NULL) 1525 linux_cdev_deref(filp->f_cdev); 1526 kfree(filp); 1527 1528 return (error); 1529 } 1530 1531 static int 1532 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1533 struct thread *td) 1534 { 1535 struct linux_file *filp; 1536 const struct file_operations *fop; 1537 struct linux_cdev *ldev; 1538 struct fiodgname_arg *fgn; 1539 const char *p; 1540 int error, i; 1541 1542 error = 0; 1543 filp = (struct linux_file *)fp->f_data; 1544 filp->f_flags = fp->f_flag; 1545 linux_get_fop(filp, &fop, &ldev); 1546 1547 linux_set_current(td); 1548 switch (cmd) { 1549 case FIONBIO: 1550 break; 1551 case FIOASYNC: 1552 if (fop->fasync == NULL) 1553 break; 1554 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1555 break; 1556 case FIOSETOWN: 1557 error = fsetown(*(int *)data, &filp->f_sigio); 1558 if (error == 0) { 1559 if (fop->fasync == NULL) 1560 break; 1561 error = -OPW(fp, td, fop->fasync(0, filp, 1562 fp->f_flag & FASYNC)); 1563 } 1564 break; 1565 case FIOGETOWN: 1566 *(int *)data = fgetown(&filp->f_sigio); 1567 break; 1568 case FIODGNAME: 1569 #ifdef COMPAT_FREEBSD32 1570 case FIODGNAME_32: 1571 #endif 1572 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1573 error = ENXIO; 1574 break; 1575 } 1576 fgn = data; 1577 p = devtoname(filp->f_cdev->cdev); 1578 i = strlen(p) + 1; 1579 if (i > fgn->len) { 1580 error = EINVAL; 1581 break; 1582 } 1583 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1584 break; 1585 default: 1586 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1587 break; 1588 } 1589 linux_drop_fop(ldev); 1590 return (error); 1591 } 1592 1593 static int 1594 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1595 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1596 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1597 { 1598 /* 1599 * Character devices do not provide private mappings 1600 * of any kind: 1601 */ 1602 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1603 (prot & VM_PROT_WRITE) != 0) 1604 return (EACCES); 1605 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1606 return (EINVAL); 1607 1608 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1609 (int)prot, td)); 1610 } 1611 1612 static int 1613 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1614 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1615 struct thread *td) 1616 { 1617 struct linux_file *filp; 1618 const struct file_operations *fop; 1619 struct linux_cdev *ldev; 1620 struct mount *mp; 1621 struct vnode *vp; 1622 vm_object_t object; 1623 vm_prot_t maxprot; 1624 int error; 1625 1626 filp = (struct linux_file *)fp->f_data; 1627 1628 vp = filp->f_vnode; 1629 if (vp == NULL) 1630 return (EOPNOTSUPP); 1631 1632 /* 1633 * Ensure that file and memory protections are 1634 * compatible. 1635 */ 1636 mp = vp->v_mount; 1637 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1638 maxprot = VM_PROT_NONE; 1639 if ((prot & VM_PROT_EXECUTE) != 0) 1640 return (EACCES); 1641 } else 1642 maxprot = VM_PROT_EXECUTE; 1643 if ((fp->f_flag & FREAD) != 0) 1644 maxprot |= VM_PROT_READ; 1645 else if ((prot & VM_PROT_READ) != 0) 1646 return (EACCES); 1647 1648 /* 1649 * If we are sharing potential changes via MAP_SHARED and we 1650 * are trying to get write permission although we opened it 1651 * without asking for it, bail out. 1652 * 1653 * Note that most character devices always share mappings. 1654 * 1655 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1656 * requests rather than doing it here. 1657 */ 1658 if ((flags & MAP_SHARED) != 0) { 1659 if ((fp->f_flag & FWRITE) != 0) 1660 maxprot |= VM_PROT_WRITE; 1661 else if ((prot & VM_PROT_WRITE) != 0) 1662 return (EACCES); 1663 } 1664 maxprot &= cap_maxprot; 1665 1666 linux_get_fop(filp, &fop, &ldev); 1667 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, 1668 &foff, fop, &object); 1669 if (error != 0) 1670 goto out; 1671 1672 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1673 foff, FALSE, td); 1674 if (error != 0) 1675 vm_object_deallocate(object); 1676 out: 1677 linux_drop_fop(ldev); 1678 return (error); 1679 } 1680 1681 static int 1682 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1683 struct thread *td) 1684 { 1685 struct linux_file *filp; 1686 struct vnode *vp; 1687 int error; 1688 1689 filp = (struct linux_file *)fp->f_data; 1690 if (filp->f_vnode == NULL) 1691 return (EOPNOTSUPP); 1692 1693 vp = filp->f_vnode; 1694 1695 vn_lock(vp, LK_SHARED | LK_RETRY); 1696 error = VOP_STAT(vp, sb, td->td_ucred, NOCRED, td); 1697 VOP_UNLOCK(vp); 1698 1699 return (error); 1700 } 1701 1702 static int 1703 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1704 struct filedesc *fdp) 1705 { 1706 struct linux_file *filp; 1707 struct vnode *vp; 1708 int error; 1709 1710 filp = fp->f_data; 1711 vp = filp->f_vnode; 1712 if (vp == NULL) { 1713 error = 0; 1714 kif->kf_type = KF_TYPE_DEV; 1715 } else { 1716 vref(vp); 1717 FILEDESC_SUNLOCK(fdp); 1718 error = vn_fill_kinfo_vnode(vp, kif); 1719 vrele(vp); 1720 kif->kf_type = KF_TYPE_VNODE; 1721 FILEDESC_SLOCK(fdp); 1722 } 1723 return (error); 1724 } 1725 1726 unsigned int 1727 linux_iminor(struct inode *inode) 1728 { 1729 struct linux_cdev *ldev; 1730 1731 if (inode == NULL || inode->v_rdev == NULL || 1732 inode->v_rdev->si_devsw != &linuxcdevsw) 1733 return (-1U); 1734 ldev = inode->v_rdev->si_drv1; 1735 if (ldev == NULL) 1736 return (-1U); 1737 1738 return (minor(ldev->dev)); 1739 } 1740 1741 struct fileops linuxfileops = { 1742 .fo_read = linux_file_read, 1743 .fo_write = linux_file_write, 1744 .fo_truncate = invfo_truncate, 1745 .fo_kqfilter = linux_file_kqfilter, 1746 .fo_stat = linux_file_stat, 1747 .fo_fill_kinfo = linux_file_fill_kinfo, 1748 .fo_poll = linux_file_poll, 1749 .fo_close = linux_file_close, 1750 .fo_ioctl = linux_file_ioctl, 1751 .fo_mmap = linux_file_mmap, 1752 .fo_chmod = invfo_chmod, 1753 .fo_chown = invfo_chown, 1754 .fo_sendfile = invfo_sendfile, 1755 .fo_flags = DFLAG_PASSABLE, 1756 }; 1757 1758 /* 1759 * Hash of vmmap addresses. This is infrequently accessed and does not 1760 * need to be particularly large. This is done because we must store the 1761 * caller's idea of the map size to properly unmap. 1762 */ 1763 struct vmmap { 1764 LIST_ENTRY(vmmap) vm_next; 1765 void *vm_addr; 1766 unsigned long vm_size; 1767 }; 1768 1769 struct vmmaphd { 1770 struct vmmap *lh_first; 1771 }; 1772 #define VMMAP_HASH_SIZE 64 1773 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1774 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1775 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1776 static struct mtx vmmaplock; 1777 1778 static void 1779 vmmap_add(void *addr, unsigned long size) 1780 { 1781 struct vmmap *vmmap; 1782 1783 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1784 mtx_lock(&vmmaplock); 1785 vmmap->vm_size = size; 1786 vmmap->vm_addr = addr; 1787 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1788 mtx_unlock(&vmmaplock); 1789 } 1790 1791 static struct vmmap * 1792 vmmap_remove(void *addr) 1793 { 1794 struct vmmap *vmmap; 1795 1796 mtx_lock(&vmmaplock); 1797 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1798 if (vmmap->vm_addr == addr) 1799 break; 1800 if (vmmap) 1801 LIST_REMOVE(vmmap, vm_next); 1802 mtx_unlock(&vmmaplock); 1803 1804 return (vmmap); 1805 } 1806 1807 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1808 void * 1809 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1810 { 1811 void *addr; 1812 1813 addr = pmap_mapdev_attr(phys_addr, size, attr); 1814 if (addr == NULL) 1815 return (NULL); 1816 vmmap_add(addr, size); 1817 1818 return (addr); 1819 } 1820 #endif 1821 1822 void 1823 iounmap(void *addr) 1824 { 1825 struct vmmap *vmmap; 1826 1827 vmmap = vmmap_remove(addr); 1828 if (vmmap == NULL) 1829 return; 1830 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1831 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1832 #endif 1833 kfree(vmmap); 1834 } 1835 1836 void * 1837 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1838 { 1839 vm_offset_t off; 1840 size_t size; 1841 1842 size = count * PAGE_SIZE; 1843 off = kva_alloc(size); 1844 if (off == 0) 1845 return (NULL); 1846 vmmap_add((void *)off, size); 1847 pmap_qenter(off, pages, count); 1848 1849 return ((void *)off); 1850 } 1851 1852 void 1853 vunmap(void *addr) 1854 { 1855 struct vmmap *vmmap; 1856 1857 vmmap = vmmap_remove(addr); 1858 if (vmmap == NULL) 1859 return; 1860 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1861 kva_free((vm_offset_t)addr, vmmap->vm_size); 1862 kfree(vmmap); 1863 } 1864 1865 char * 1866 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1867 { 1868 unsigned int len; 1869 char *p; 1870 va_list aq; 1871 1872 va_copy(aq, ap); 1873 len = vsnprintf(NULL, 0, fmt, aq); 1874 va_end(aq); 1875 1876 p = kmalloc(len + 1, gfp); 1877 if (p != NULL) 1878 vsnprintf(p, len + 1, fmt, ap); 1879 1880 return (p); 1881 } 1882 1883 char * 1884 kasprintf(gfp_t gfp, const char *fmt, ...) 1885 { 1886 va_list ap; 1887 char *p; 1888 1889 va_start(ap, fmt); 1890 p = kvasprintf(gfp, fmt, ap); 1891 va_end(ap); 1892 1893 return (p); 1894 } 1895 1896 static void 1897 linux_timer_callback_wrapper(void *context) 1898 { 1899 struct timer_list *timer; 1900 1901 linux_set_current(curthread); 1902 1903 timer = context; 1904 timer->function(timer->data); 1905 } 1906 1907 int 1908 mod_timer(struct timer_list *timer, int expires) 1909 { 1910 int ret; 1911 1912 timer->expires = expires; 1913 ret = callout_reset(&timer->callout, 1914 linux_timer_jiffies_until(expires), 1915 &linux_timer_callback_wrapper, timer); 1916 1917 MPASS(ret == 0 || ret == 1); 1918 1919 return (ret == 1); 1920 } 1921 1922 void 1923 add_timer(struct timer_list *timer) 1924 { 1925 1926 callout_reset(&timer->callout, 1927 linux_timer_jiffies_until(timer->expires), 1928 &linux_timer_callback_wrapper, timer); 1929 } 1930 1931 void 1932 add_timer_on(struct timer_list *timer, int cpu) 1933 { 1934 1935 callout_reset_on(&timer->callout, 1936 linux_timer_jiffies_until(timer->expires), 1937 &linux_timer_callback_wrapper, timer, cpu); 1938 } 1939 1940 int 1941 del_timer(struct timer_list *timer) 1942 { 1943 1944 if (callout_stop(&(timer)->callout) == -1) 1945 return (0); 1946 return (1); 1947 } 1948 1949 int 1950 del_timer_sync(struct timer_list *timer) 1951 { 1952 1953 if (callout_drain(&(timer)->callout) == -1) 1954 return (0); 1955 return (1); 1956 } 1957 1958 /* greatest common divisor, Euclid equation */ 1959 static uint64_t 1960 lkpi_gcd_64(uint64_t a, uint64_t b) 1961 { 1962 uint64_t an; 1963 uint64_t bn; 1964 1965 while (b != 0) { 1966 an = b; 1967 bn = a % b; 1968 a = an; 1969 b = bn; 1970 } 1971 return (a); 1972 } 1973 1974 uint64_t lkpi_nsec2hz_rem; 1975 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 1976 uint64_t lkpi_nsec2hz_max; 1977 1978 uint64_t lkpi_usec2hz_rem; 1979 uint64_t lkpi_usec2hz_div = 1000000ULL; 1980 uint64_t lkpi_usec2hz_max; 1981 1982 uint64_t lkpi_msec2hz_rem; 1983 uint64_t lkpi_msec2hz_div = 1000ULL; 1984 uint64_t lkpi_msec2hz_max; 1985 1986 static void 1987 linux_timer_init(void *arg) 1988 { 1989 uint64_t gcd; 1990 1991 /* 1992 * Compute an internal HZ value which can divide 2**32 to 1993 * avoid timer rounding problems when the tick value wraps 1994 * around 2**32: 1995 */ 1996 linux_timer_hz_mask = 1; 1997 while (linux_timer_hz_mask < (unsigned long)hz) 1998 linux_timer_hz_mask *= 2; 1999 linux_timer_hz_mask--; 2000 2001 /* compute some internal constants */ 2002 2003 lkpi_nsec2hz_rem = hz; 2004 lkpi_usec2hz_rem = hz; 2005 lkpi_msec2hz_rem = hz; 2006 2007 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2008 lkpi_nsec2hz_rem /= gcd; 2009 lkpi_nsec2hz_div /= gcd; 2010 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2011 2012 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2013 lkpi_usec2hz_rem /= gcd; 2014 lkpi_usec2hz_div /= gcd; 2015 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2016 2017 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2018 lkpi_msec2hz_rem /= gcd; 2019 lkpi_msec2hz_div /= gcd; 2020 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2021 } 2022 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2023 2024 void 2025 linux_complete_common(struct completion *c, int all) 2026 { 2027 int wakeup_swapper; 2028 2029 sleepq_lock(c); 2030 if (all) { 2031 c->done = UINT_MAX; 2032 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2033 } else { 2034 if (c->done != UINT_MAX) 2035 c->done++; 2036 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2037 } 2038 sleepq_release(c); 2039 if (wakeup_swapper) 2040 kick_proc0(); 2041 } 2042 2043 /* 2044 * Indefinite wait for done != 0 with or without signals. 2045 */ 2046 int 2047 linux_wait_for_common(struct completion *c, int flags) 2048 { 2049 struct task_struct *task; 2050 int error; 2051 2052 if (SCHEDULER_STOPPED()) 2053 return (0); 2054 2055 task = current; 2056 2057 if (flags != 0) 2058 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2059 else 2060 flags = SLEEPQ_SLEEP; 2061 error = 0; 2062 for (;;) { 2063 sleepq_lock(c); 2064 if (c->done) 2065 break; 2066 sleepq_add(c, NULL, "completion", flags, 0); 2067 if (flags & SLEEPQ_INTERRUPTIBLE) { 2068 DROP_GIANT(); 2069 error = -sleepq_wait_sig(c, 0); 2070 PICKUP_GIANT(); 2071 if (error != 0) { 2072 linux_schedule_save_interrupt_value(task, error); 2073 error = -ERESTARTSYS; 2074 goto intr; 2075 } 2076 } else { 2077 DROP_GIANT(); 2078 sleepq_wait(c, 0); 2079 PICKUP_GIANT(); 2080 } 2081 } 2082 if (c->done != UINT_MAX) 2083 c->done--; 2084 sleepq_release(c); 2085 2086 intr: 2087 return (error); 2088 } 2089 2090 /* 2091 * Time limited wait for done != 0 with or without signals. 2092 */ 2093 int 2094 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2095 { 2096 struct task_struct *task; 2097 int end = jiffies + timeout; 2098 int error; 2099 2100 if (SCHEDULER_STOPPED()) 2101 return (0); 2102 2103 task = current; 2104 2105 if (flags != 0) 2106 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2107 else 2108 flags = SLEEPQ_SLEEP; 2109 2110 for (;;) { 2111 sleepq_lock(c); 2112 if (c->done) 2113 break; 2114 sleepq_add(c, NULL, "completion", flags, 0); 2115 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2116 2117 DROP_GIANT(); 2118 if (flags & SLEEPQ_INTERRUPTIBLE) 2119 error = -sleepq_timedwait_sig(c, 0); 2120 else 2121 error = -sleepq_timedwait(c, 0); 2122 PICKUP_GIANT(); 2123 2124 if (error != 0) { 2125 /* check for timeout */ 2126 if (error == -EWOULDBLOCK) { 2127 error = 0; /* timeout */ 2128 } else { 2129 /* signal happened */ 2130 linux_schedule_save_interrupt_value(task, error); 2131 error = -ERESTARTSYS; 2132 } 2133 goto done; 2134 } 2135 } 2136 if (c->done != UINT_MAX) 2137 c->done--; 2138 sleepq_release(c); 2139 2140 /* return how many jiffies are left */ 2141 error = linux_timer_jiffies_until(end); 2142 done: 2143 return (error); 2144 } 2145 2146 int 2147 linux_try_wait_for_completion(struct completion *c) 2148 { 2149 int isdone; 2150 2151 sleepq_lock(c); 2152 isdone = (c->done != 0); 2153 if (c->done != 0 && c->done != UINT_MAX) 2154 c->done--; 2155 sleepq_release(c); 2156 return (isdone); 2157 } 2158 2159 int 2160 linux_completion_done(struct completion *c) 2161 { 2162 int isdone; 2163 2164 sleepq_lock(c); 2165 isdone = (c->done != 0); 2166 sleepq_release(c); 2167 return (isdone); 2168 } 2169 2170 static void 2171 linux_cdev_deref(struct linux_cdev *ldev) 2172 { 2173 2174 if (refcount_release(&ldev->refs)) 2175 kfree(ldev); 2176 } 2177 2178 static void 2179 linux_cdev_release(struct kobject *kobj) 2180 { 2181 struct linux_cdev *cdev; 2182 struct kobject *parent; 2183 2184 cdev = container_of(kobj, struct linux_cdev, kobj); 2185 parent = kobj->parent; 2186 linux_destroy_dev(cdev); 2187 linux_cdev_deref(cdev); 2188 kobject_put(parent); 2189 } 2190 2191 static void 2192 linux_cdev_static_release(struct kobject *kobj) 2193 { 2194 struct linux_cdev *cdev; 2195 struct kobject *parent; 2196 2197 cdev = container_of(kobj, struct linux_cdev, kobj); 2198 parent = kobj->parent; 2199 linux_destroy_dev(cdev); 2200 kobject_put(parent); 2201 } 2202 2203 void 2204 linux_destroy_dev(struct linux_cdev *ldev) 2205 { 2206 2207 if (ldev->cdev == NULL) 2208 return; 2209 2210 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2211 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2212 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2213 pause("ldevdtr", hz / 4); 2214 2215 destroy_dev(ldev->cdev); 2216 ldev->cdev = NULL; 2217 } 2218 2219 const struct kobj_type linux_cdev_ktype = { 2220 .release = linux_cdev_release, 2221 }; 2222 2223 const struct kobj_type linux_cdev_static_ktype = { 2224 .release = linux_cdev_static_release, 2225 }; 2226 2227 static void 2228 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2229 { 2230 struct notifier_block *nb; 2231 2232 nb = arg; 2233 if (linkstate == LINK_STATE_UP) 2234 nb->notifier_call(nb, NETDEV_UP, ifp); 2235 else 2236 nb->notifier_call(nb, NETDEV_DOWN, ifp); 2237 } 2238 2239 static void 2240 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2241 { 2242 struct notifier_block *nb; 2243 2244 nb = arg; 2245 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 2246 } 2247 2248 static void 2249 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2250 { 2251 struct notifier_block *nb; 2252 2253 nb = arg; 2254 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 2255 } 2256 2257 static void 2258 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2259 { 2260 struct notifier_block *nb; 2261 2262 nb = arg; 2263 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 2264 } 2265 2266 static void 2267 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2268 { 2269 struct notifier_block *nb; 2270 2271 nb = arg; 2272 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 2273 } 2274 2275 int 2276 register_netdevice_notifier(struct notifier_block *nb) 2277 { 2278 2279 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2280 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2281 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2282 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2283 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2284 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2285 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2286 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2287 2288 return (0); 2289 } 2290 2291 int 2292 register_inetaddr_notifier(struct notifier_block *nb) 2293 { 2294 2295 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2296 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2297 return (0); 2298 } 2299 2300 int 2301 unregister_netdevice_notifier(struct notifier_block *nb) 2302 { 2303 2304 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2305 nb->tags[NETDEV_UP]); 2306 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2307 nb->tags[NETDEV_REGISTER]); 2308 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2309 nb->tags[NETDEV_UNREGISTER]); 2310 EVENTHANDLER_DEREGISTER(iflladdr_event, 2311 nb->tags[NETDEV_CHANGEADDR]); 2312 2313 return (0); 2314 } 2315 2316 int 2317 unregister_inetaddr_notifier(struct notifier_block *nb) 2318 { 2319 2320 EVENTHANDLER_DEREGISTER(ifaddr_event, 2321 nb->tags[NETDEV_CHANGEIFADDR]); 2322 2323 return (0); 2324 } 2325 2326 struct list_sort_thunk { 2327 int (*cmp)(void *, struct list_head *, struct list_head *); 2328 void *priv; 2329 }; 2330 2331 static inline int 2332 linux_le_cmp(void *priv, const void *d1, const void *d2) 2333 { 2334 struct list_head *le1, *le2; 2335 struct list_sort_thunk *thunk; 2336 2337 thunk = priv; 2338 le1 = *(__DECONST(struct list_head **, d1)); 2339 le2 = *(__DECONST(struct list_head **, d2)); 2340 return ((thunk->cmp)(thunk->priv, le1, le2)); 2341 } 2342 2343 void 2344 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2345 struct list_head *a, struct list_head *b)) 2346 { 2347 struct list_sort_thunk thunk; 2348 struct list_head **ar, *le; 2349 size_t count, i; 2350 2351 count = 0; 2352 list_for_each(le, head) 2353 count++; 2354 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2355 i = 0; 2356 list_for_each(le, head) 2357 ar[i++] = le; 2358 thunk.cmp = cmp; 2359 thunk.priv = priv; 2360 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2361 INIT_LIST_HEAD(head); 2362 for (i = 0; i < count; i++) 2363 list_add_tail(ar[i], head); 2364 free(ar, M_KMALLOC); 2365 } 2366 2367 void 2368 linux_irq_handler(void *ent) 2369 { 2370 struct irq_ent *irqe; 2371 2372 linux_set_current(curthread); 2373 2374 irqe = ent; 2375 irqe->handler(irqe->irq, irqe->arg); 2376 } 2377 2378 #if defined(__i386__) || defined(__amd64__) 2379 int 2380 linux_wbinvd_on_all_cpus(void) 2381 { 2382 2383 pmap_invalidate_cache(); 2384 return (0); 2385 } 2386 #endif 2387 2388 int 2389 linux_on_each_cpu(void callback(void *), void *data) 2390 { 2391 2392 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2393 smp_no_rendezvous_barrier, data); 2394 return (0); 2395 } 2396 2397 int 2398 linux_in_atomic(void) 2399 { 2400 2401 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2402 } 2403 2404 struct linux_cdev * 2405 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2406 { 2407 dev_t dev = MKDEV(major, minor); 2408 struct cdev *cdev; 2409 2410 dev_lock(); 2411 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2412 struct linux_cdev *ldev = cdev->si_drv1; 2413 if (ldev->dev == dev && 2414 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2415 break; 2416 } 2417 } 2418 dev_unlock(); 2419 2420 return (cdev != NULL ? cdev->si_drv1 : NULL); 2421 } 2422 2423 int 2424 __register_chrdev(unsigned int major, unsigned int baseminor, 2425 unsigned int count, const char *name, 2426 const struct file_operations *fops) 2427 { 2428 struct linux_cdev *cdev; 2429 int ret = 0; 2430 int i; 2431 2432 for (i = baseminor; i < baseminor + count; i++) { 2433 cdev = cdev_alloc(); 2434 cdev->ops = fops; 2435 kobject_set_name(&cdev->kobj, name); 2436 2437 ret = cdev_add(cdev, makedev(major, i), 1); 2438 if (ret != 0) 2439 break; 2440 } 2441 return (ret); 2442 } 2443 2444 int 2445 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2446 unsigned int count, const char *name, 2447 const struct file_operations *fops, uid_t uid, 2448 gid_t gid, int mode) 2449 { 2450 struct linux_cdev *cdev; 2451 int ret = 0; 2452 int i; 2453 2454 for (i = baseminor; i < baseminor + count; i++) { 2455 cdev = cdev_alloc(); 2456 cdev->ops = fops; 2457 kobject_set_name(&cdev->kobj, name); 2458 2459 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2460 if (ret != 0) 2461 break; 2462 } 2463 return (ret); 2464 } 2465 2466 void 2467 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2468 unsigned int count, const char *name) 2469 { 2470 struct linux_cdev *cdevp; 2471 int i; 2472 2473 for (i = baseminor; i < baseminor + count; i++) { 2474 cdevp = linux_find_cdev(name, major, i); 2475 if (cdevp != NULL) 2476 cdev_del(cdevp); 2477 } 2478 } 2479 2480 void 2481 linux_dump_stack(void) 2482 { 2483 #ifdef STACK 2484 struct stack st; 2485 2486 stack_zero(&st); 2487 stack_save(&st); 2488 stack_print(&st); 2489 #endif 2490 } 2491 2492 #if defined(__i386__) || defined(__amd64__) 2493 bool linux_cpu_has_clflush; 2494 #endif 2495 2496 static void 2497 linux_compat_init(void *arg) 2498 { 2499 struct sysctl_oid *rootoid; 2500 int i; 2501 2502 #if defined(__i386__) || defined(__amd64__) 2503 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2504 #endif 2505 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2506 2507 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2508 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2509 kobject_init(&linux_class_root, &linux_class_ktype); 2510 kobject_set_name(&linux_class_root, "class"); 2511 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2512 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2513 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2514 kobject_set_name(&linux_root_device.kobj, "device"); 2515 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2516 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2517 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2518 linux_root_device.bsddev = root_bus; 2519 linux_class_misc.name = "misc"; 2520 class_register(&linux_class_misc); 2521 INIT_LIST_HEAD(&pci_drivers); 2522 INIT_LIST_HEAD(&pci_devices); 2523 spin_lock_init(&pci_lock); 2524 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2525 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2526 LIST_INIT(&vmmaphead[i]); 2527 init_waitqueue_head(&linux_bit_waitq); 2528 init_waitqueue_head(&linux_var_waitq); 2529 } 2530 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2531 2532 static void 2533 linux_compat_uninit(void *arg) 2534 { 2535 linux_kobject_kfree_name(&linux_class_root); 2536 linux_kobject_kfree_name(&linux_root_device.kobj); 2537 linux_kobject_kfree_name(&linux_class_misc.kobj); 2538 2539 mtx_destroy(&vmmaplock); 2540 spin_lock_destroy(&pci_lock); 2541 rw_destroy(&linux_vma_lock); 2542 } 2543 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2544 2545 /* 2546 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2547 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2548 * used. Assert these types have the same size, else some parts of the 2549 * LinuxKPI may not work like expected: 2550 */ 2551 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2552