1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/user.h> 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 62 #include <machine/stdarg.h> 63 64 #if defined(__i386__) || defined(__amd64__) 65 #include <machine/md_var.h> 66 #endif 67 68 #include <linux/kobject.h> 69 #include <linux/device.h> 70 #include <linux/slab.h> 71 #include <linux/module.h> 72 #include <linux/moduleparam.h> 73 #include <linux/cdev.h> 74 #include <linux/file.h> 75 #include <linux/sysfs.h> 76 #include <linux/mm.h> 77 #include <linux/io.h> 78 #include <linux/vmalloc.h> 79 #include <linux/netdevice.h> 80 #include <linux/timer.h> 81 #include <linux/interrupt.h> 82 #include <linux/uaccess.h> 83 #include <linux/list.h> 84 #include <linux/kthread.h> 85 #include <linux/kernel.h> 86 #include <linux/compat.h> 87 #include <linux/poll.h> 88 #include <linux/smp.h> 89 90 #if defined(__i386__) || defined(__amd64__) 91 #include <asm/smp.h> 92 #endif 93 94 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 95 "LinuxKPI parameters"); 96 97 int linuxkpi_debug; 98 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 99 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 100 101 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 102 103 #include <linux/rbtree.h> 104 /* Undo Linux compat changes. */ 105 #undef RB_ROOT 106 #undef file 107 #undef cdev 108 #define RB_ROOT(head) (head)->rbh_root 109 110 static void linux_cdev_deref(struct linux_cdev *ldev); 111 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 112 113 struct kobject linux_class_root; 114 struct device linux_root_device; 115 struct class linux_class_misc; 116 struct list_head pci_drivers; 117 struct list_head pci_devices; 118 spinlock_t pci_lock; 119 120 unsigned long linux_timer_hz_mask; 121 122 int 123 panic_cmp(struct rb_node *one, struct rb_node *two) 124 { 125 panic("no cmp"); 126 } 127 128 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 129 130 int 131 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 132 { 133 va_list tmp_va; 134 int len; 135 char *old; 136 char *name; 137 char dummy; 138 139 old = kobj->name; 140 141 if (old && fmt == NULL) 142 return (0); 143 144 /* compute length of string */ 145 va_copy(tmp_va, args); 146 len = vsnprintf(&dummy, 0, fmt, tmp_va); 147 va_end(tmp_va); 148 149 /* account for zero termination */ 150 len++; 151 152 /* check for error */ 153 if (len < 1) 154 return (-EINVAL); 155 156 /* allocate memory for string */ 157 name = kzalloc(len, GFP_KERNEL); 158 if (name == NULL) 159 return (-ENOMEM); 160 vsnprintf(name, len, fmt, args); 161 kobj->name = name; 162 163 /* free old string */ 164 kfree(old); 165 166 /* filter new string */ 167 for (; *name != '\0'; name++) 168 if (*name == '/') 169 *name = '!'; 170 return (0); 171 } 172 173 int 174 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 175 { 176 va_list args; 177 int error; 178 179 va_start(args, fmt); 180 error = kobject_set_name_vargs(kobj, fmt, args); 181 va_end(args); 182 183 return (error); 184 } 185 186 static int 187 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 188 { 189 const struct kobj_type *t; 190 int error; 191 192 kobj->parent = parent; 193 error = sysfs_create_dir(kobj); 194 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 195 struct attribute **attr; 196 t = kobj->ktype; 197 198 for (attr = t->default_attrs; *attr != NULL; attr++) { 199 error = sysfs_create_file(kobj, *attr); 200 if (error) 201 break; 202 } 203 if (error) 204 sysfs_remove_dir(kobj); 205 206 } 207 return (error); 208 } 209 210 int 211 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 212 { 213 va_list args; 214 int error; 215 216 va_start(args, fmt); 217 error = kobject_set_name_vargs(kobj, fmt, args); 218 va_end(args); 219 if (error) 220 return (error); 221 222 return kobject_add_complete(kobj, parent); 223 } 224 225 void 226 linux_kobject_release(struct kref *kref) 227 { 228 struct kobject *kobj; 229 char *name; 230 231 kobj = container_of(kref, struct kobject, kref); 232 sysfs_remove_dir(kobj); 233 name = kobj->name; 234 if (kobj->ktype && kobj->ktype->release) 235 kobj->ktype->release(kobj); 236 kfree(name); 237 } 238 239 static void 240 linux_kobject_kfree(struct kobject *kobj) 241 { 242 kfree(kobj); 243 } 244 245 static void 246 linux_kobject_kfree_name(struct kobject *kobj) 247 { 248 if (kobj) { 249 kfree(kobj->name); 250 } 251 } 252 253 const struct kobj_type linux_kfree_type = { 254 .release = linux_kobject_kfree 255 }; 256 257 static void 258 linux_device_release(struct device *dev) 259 { 260 pr_debug("linux_device_release: %s\n", dev_name(dev)); 261 kfree(dev); 262 } 263 264 static ssize_t 265 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 266 { 267 struct class_attribute *dattr; 268 ssize_t error; 269 270 dattr = container_of(attr, struct class_attribute, attr); 271 error = -EIO; 272 if (dattr->show) 273 error = dattr->show(container_of(kobj, struct class, kobj), 274 dattr, buf); 275 return (error); 276 } 277 278 static ssize_t 279 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 280 size_t count) 281 { 282 struct class_attribute *dattr; 283 ssize_t error; 284 285 dattr = container_of(attr, struct class_attribute, attr); 286 error = -EIO; 287 if (dattr->store) 288 error = dattr->store(container_of(kobj, struct class, kobj), 289 dattr, buf, count); 290 return (error); 291 } 292 293 static void 294 linux_class_release(struct kobject *kobj) 295 { 296 struct class *class; 297 298 class = container_of(kobj, struct class, kobj); 299 if (class->class_release) 300 class->class_release(class); 301 } 302 303 static const struct sysfs_ops linux_class_sysfs = { 304 .show = linux_class_show, 305 .store = linux_class_store, 306 }; 307 308 const struct kobj_type linux_class_ktype = { 309 .release = linux_class_release, 310 .sysfs_ops = &linux_class_sysfs 311 }; 312 313 static void 314 linux_dev_release(struct kobject *kobj) 315 { 316 struct device *dev; 317 318 dev = container_of(kobj, struct device, kobj); 319 /* This is the precedence defined by linux. */ 320 if (dev->release) 321 dev->release(dev); 322 else if (dev->class && dev->class->dev_release) 323 dev->class->dev_release(dev); 324 } 325 326 static ssize_t 327 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 328 { 329 struct device_attribute *dattr; 330 ssize_t error; 331 332 dattr = container_of(attr, struct device_attribute, attr); 333 error = -EIO; 334 if (dattr->show) 335 error = dattr->show(container_of(kobj, struct device, kobj), 336 dattr, buf); 337 return (error); 338 } 339 340 static ssize_t 341 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 342 size_t count) 343 { 344 struct device_attribute *dattr; 345 ssize_t error; 346 347 dattr = container_of(attr, struct device_attribute, attr); 348 error = -EIO; 349 if (dattr->store) 350 error = dattr->store(container_of(kobj, struct device, kobj), 351 dattr, buf, count); 352 return (error); 353 } 354 355 static const struct sysfs_ops linux_dev_sysfs = { 356 .show = linux_dev_show, 357 .store = linux_dev_store, 358 }; 359 360 const struct kobj_type linux_dev_ktype = { 361 .release = linux_dev_release, 362 .sysfs_ops = &linux_dev_sysfs 363 }; 364 365 struct device * 366 device_create(struct class *class, struct device *parent, dev_t devt, 367 void *drvdata, const char *fmt, ...) 368 { 369 struct device *dev; 370 va_list args; 371 372 dev = kzalloc(sizeof(*dev), M_WAITOK); 373 dev->parent = parent; 374 dev->class = class; 375 dev->devt = devt; 376 dev->driver_data = drvdata; 377 dev->release = linux_device_release; 378 va_start(args, fmt); 379 kobject_set_name_vargs(&dev->kobj, fmt, args); 380 va_end(args); 381 device_register(dev); 382 383 return (dev); 384 } 385 386 int 387 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 388 struct kobject *parent, const char *fmt, ...) 389 { 390 va_list args; 391 int error; 392 393 kobject_init(kobj, ktype); 394 kobj->ktype = ktype; 395 kobj->parent = parent; 396 kobj->name = NULL; 397 398 va_start(args, fmt); 399 error = kobject_set_name_vargs(kobj, fmt, args); 400 va_end(args); 401 if (error) 402 return (error); 403 return kobject_add_complete(kobj, parent); 404 } 405 406 static void 407 linux_kq_lock(void *arg) 408 { 409 spinlock_t *s = arg; 410 411 spin_lock(s); 412 } 413 static void 414 linux_kq_unlock(void *arg) 415 { 416 spinlock_t *s = arg; 417 418 spin_unlock(s); 419 } 420 421 static void 422 linux_kq_lock_owned(void *arg) 423 { 424 #ifdef INVARIANTS 425 spinlock_t *s = arg; 426 427 mtx_assert(&s->m, MA_OWNED); 428 #endif 429 } 430 431 static void 432 linux_kq_lock_unowned(void *arg) 433 { 434 #ifdef INVARIANTS 435 spinlock_t *s = arg; 436 437 mtx_assert(&s->m, MA_NOTOWNED); 438 #endif 439 } 440 441 static void 442 linux_file_kqfilter_poll(struct linux_file *, int); 443 444 struct linux_file * 445 linux_file_alloc(void) 446 { 447 struct linux_file *filp; 448 449 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 450 451 /* set initial refcount */ 452 filp->f_count = 1; 453 454 /* setup fields needed by kqueue support */ 455 spin_lock_init(&filp->f_kqlock); 456 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 457 linux_kq_lock, linux_kq_unlock, 458 linux_kq_lock_owned, linux_kq_lock_unowned); 459 460 return (filp); 461 } 462 463 void 464 linux_file_free(struct linux_file *filp) 465 { 466 if (filp->_file == NULL) { 467 if (filp->f_shmem != NULL) 468 vm_object_deallocate(filp->f_shmem); 469 kfree(filp); 470 } else { 471 /* 472 * The close method of the character device or file 473 * will free the linux_file structure: 474 */ 475 _fdrop(filp->_file, curthread); 476 } 477 } 478 479 static int 480 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 481 vm_page_t *mres) 482 { 483 struct vm_area_struct *vmap; 484 485 vmap = linux_cdev_handle_find(vm_obj->handle); 486 487 MPASS(vmap != NULL); 488 MPASS(vmap->vm_private_data == vm_obj->handle); 489 490 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 491 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 492 vm_page_t page; 493 494 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 495 /* 496 * If the passed in result page is a fake 497 * page, update it with the new physical 498 * address. 499 */ 500 page = *mres; 501 vm_page_updatefake(page, paddr, vm_obj->memattr); 502 } else { 503 /* 504 * Replace the passed in "mres" page with our 505 * own fake page and free up the all of the 506 * original pages. 507 */ 508 VM_OBJECT_WUNLOCK(vm_obj); 509 page = vm_page_getfake(paddr, vm_obj->memattr); 510 VM_OBJECT_WLOCK(vm_obj); 511 512 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 513 *mres = page; 514 } 515 vm_page_valid(page); 516 return (VM_PAGER_OK); 517 } 518 return (VM_PAGER_FAIL); 519 } 520 521 static int 522 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 523 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 524 { 525 struct vm_area_struct *vmap; 526 int err; 527 528 /* get VM area structure */ 529 vmap = linux_cdev_handle_find(vm_obj->handle); 530 MPASS(vmap != NULL); 531 MPASS(vmap->vm_private_data == vm_obj->handle); 532 533 VM_OBJECT_WUNLOCK(vm_obj); 534 535 linux_set_current(curthread); 536 537 down_write(&vmap->vm_mm->mmap_sem); 538 if (unlikely(vmap->vm_ops == NULL)) { 539 err = VM_FAULT_SIGBUS; 540 } else { 541 struct vm_fault vmf; 542 543 /* fill out VM fault structure */ 544 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 545 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 546 vmf.pgoff = 0; 547 vmf.page = NULL; 548 vmf.vma = vmap; 549 550 vmap->vm_pfn_count = 0; 551 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 552 vmap->vm_obj = vm_obj; 553 554 err = vmap->vm_ops->fault(vmap, &vmf); 555 556 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 557 kern_yield(PRI_USER); 558 err = vmap->vm_ops->fault(vmap, &vmf); 559 } 560 } 561 562 /* translate return code */ 563 switch (err) { 564 case VM_FAULT_OOM: 565 err = VM_PAGER_AGAIN; 566 break; 567 case VM_FAULT_SIGBUS: 568 err = VM_PAGER_BAD; 569 break; 570 case VM_FAULT_NOPAGE: 571 /* 572 * By contract the fault handler will return having 573 * busied all the pages itself. If pidx is already 574 * found in the object, it will simply xbusy the first 575 * page and return with vm_pfn_count set to 1. 576 */ 577 *first = vmap->vm_pfn_first; 578 *last = *first + vmap->vm_pfn_count - 1; 579 err = VM_PAGER_OK; 580 break; 581 default: 582 err = VM_PAGER_ERROR; 583 break; 584 } 585 up_write(&vmap->vm_mm->mmap_sem); 586 VM_OBJECT_WLOCK(vm_obj); 587 return (err); 588 } 589 590 static struct rwlock linux_vma_lock; 591 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 592 TAILQ_HEAD_INITIALIZER(linux_vma_head); 593 594 static void 595 linux_cdev_handle_free(struct vm_area_struct *vmap) 596 { 597 /* Drop reference on vm_file */ 598 if (vmap->vm_file != NULL) 599 fput(vmap->vm_file); 600 601 /* Drop reference on mm_struct */ 602 mmput(vmap->vm_mm); 603 604 kfree(vmap); 605 } 606 607 static void 608 linux_cdev_handle_remove(struct vm_area_struct *vmap) 609 { 610 rw_wlock(&linux_vma_lock); 611 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 612 rw_wunlock(&linux_vma_lock); 613 } 614 615 static struct vm_area_struct * 616 linux_cdev_handle_find(void *handle) 617 { 618 struct vm_area_struct *vmap; 619 620 rw_rlock(&linux_vma_lock); 621 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 622 if (vmap->vm_private_data == handle) 623 break; 624 } 625 rw_runlock(&linux_vma_lock); 626 return (vmap); 627 } 628 629 static int 630 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 631 vm_ooffset_t foff, struct ucred *cred, u_short *color) 632 { 633 634 MPASS(linux_cdev_handle_find(handle) != NULL); 635 *color = 0; 636 return (0); 637 } 638 639 static void 640 linux_cdev_pager_dtor(void *handle) 641 { 642 const struct vm_operations_struct *vm_ops; 643 struct vm_area_struct *vmap; 644 645 vmap = linux_cdev_handle_find(handle); 646 MPASS(vmap != NULL); 647 648 /* 649 * Remove handle before calling close operation to prevent 650 * other threads from reusing the handle pointer. 651 */ 652 linux_cdev_handle_remove(vmap); 653 654 down_write(&vmap->vm_mm->mmap_sem); 655 vm_ops = vmap->vm_ops; 656 if (likely(vm_ops != NULL)) 657 vm_ops->close(vmap); 658 up_write(&vmap->vm_mm->mmap_sem); 659 660 linux_cdev_handle_free(vmap); 661 } 662 663 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 664 { 665 /* OBJT_MGTDEVICE */ 666 .cdev_pg_populate = linux_cdev_pager_populate, 667 .cdev_pg_ctor = linux_cdev_pager_ctor, 668 .cdev_pg_dtor = linux_cdev_pager_dtor 669 }, 670 { 671 /* OBJT_DEVICE */ 672 .cdev_pg_fault = linux_cdev_pager_fault, 673 .cdev_pg_ctor = linux_cdev_pager_ctor, 674 .cdev_pg_dtor = linux_cdev_pager_dtor 675 }, 676 }; 677 678 int 679 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 680 unsigned long size) 681 { 682 vm_object_t obj; 683 vm_page_t m; 684 685 obj = vma->vm_obj; 686 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 687 return (-ENOTSUP); 688 VM_OBJECT_RLOCK(obj); 689 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 690 m != NULL && m->pindex < OFF_TO_IDX(address + size); 691 m = TAILQ_NEXT(m, listq)) 692 pmap_remove_all(m); 693 VM_OBJECT_RUNLOCK(obj); 694 return (0); 695 } 696 697 static struct file_operations dummy_ldev_ops = { 698 /* XXXKIB */ 699 }; 700 701 static struct linux_cdev dummy_ldev = { 702 .ops = &dummy_ldev_ops, 703 }; 704 705 #define LDEV_SI_DTR 0x0001 706 #define LDEV_SI_REF 0x0002 707 708 static void 709 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 710 struct linux_cdev **dev) 711 { 712 struct linux_cdev *ldev; 713 u_int siref; 714 715 ldev = filp->f_cdev; 716 *fop = filp->f_op; 717 if (ldev != NULL) { 718 for (siref = ldev->siref;;) { 719 if ((siref & LDEV_SI_DTR) != 0) { 720 ldev = &dummy_ldev; 721 siref = ldev->siref; 722 *fop = ldev->ops; 723 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 724 } else if (atomic_fcmpset_int(&ldev->siref, &siref, 725 siref + LDEV_SI_REF)) { 726 break; 727 } 728 } 729 } 730 *dev = ldev; 731 } 732 733 static void 734 linux_drop_fop(struct linux_cdev *ldev) 735 { 736 737 if (ldev == NULL) 738 return; 739 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 740 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 741 } 742 743 #define OPW(fp,td,code) ({ \ 744 struct file *__fpop; \ 745 __typeof(code) __retval; \ 746 \ 747 __fpop = (td)->td_fpop; \ 748 (td)->td_fpop = (fp); \ 749 __retval = (code); \ 750 (td)->td_fpop = __fpop; \ 751 __retval; \ 752 }) 753 754 static int 755 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 756 struct file *file) 757 { 758 struct linux_cdev *ldev; 759 struct linux_file *filp; 760 const struct file_operations *fop; 761 int error; 762 763 ldev = dev->si_drv1; 764 765 filp = linux_file_alloc(); 766 filp->f_dentry = &filp->f_dentry_store; 767 filp->f_op = ldev->ops; 768 filp->f_mode = file->f_flag; 769 filp->f_flags = file->f_flag; 770 filp->f_vnode = file->f_vnode; 771 filp->_file = file; 772 refcount_acquire(&ldev->refs); 773 filp->f_cdev = ldev; 774 775 linux_set_current(td); 776 linux_get_fop(filp, &fop, &ldev); 777 778 if (fop->open != NULL) { 779 error = -fop->open(file->f_vnode, filp); 780 if (error != 0) { 781 linux_drop_fop(ldev); 782 linux_cdev_deref(filp->f_cdev); 783 kfree(filp); 784 return (error); 785 } 786 } 787 788 /* hold on to the vnode - used for fstat() */ 789 vhold(filp->f_vnode); 790 791 /* release the file from devfs */ 792 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 793 linux_drop_fop(ldev); 794 return (ENXIO); 795 } 796 797 #define LINUX_IOCTL_MIN_PTR 0x10000UL 798 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 799 800 static inline int 801 linux_remap_address(void **uaddr, size_t len) 802 { 803 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 804 805 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 806 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 807 struct task_struct *pts = current; 808 if (pts == NULL) { 809 *uaddr = NULL; 810 return (1); 811 } 812 813 /* compute data offset */ 814 uaddr_val -= LINUX_IOCTL_MIN_PTR; 815 816 /* check that length is within bounds */ 817 if ((len > IOCPARM_MAX) || 818 (uaddr_val + len) > pts->bsd_ioctl_len) { 819 *uaddr = NULL; 820 return (1); 821 } 822 823 /* re-add kernel buffer address */ 824 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 825 826 /* update address location */ 827 *uaddr = (void *)uaddr_val; 828 return (1); 829 } 830 return (0); 831 } 832 833 int 834 linux_copyin(const void *uaddr, void *kaddr, size_t len) 835 { 836 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 837 if (uaddr == NULL) 838 return (-EFAULT); 839 memcpy(kaddr, uaddr, len); 840 return (0); 841 } 842 return (-copyin(uaddr, kaddr, len)); 843 } 844 845 int 846 linux_copyout(const void *kaddr, void *uaddr, size_t len) 847 { 848 if (linux_remap_address(&uaddr, len)) { 849 if (uaddr == NULL) 850 return (-EFAULT); 851 memcpy(uaddr, kaddr, len); 852 return (0); 853 } 854 return (-copyout(kaddr, uaddr, len)); 855 } 856 857 size_t 858 linux_clear_user(void *_uaddr, size_t _len) 859 { 860 uint8_t *uaddr = _uaddr; 861 size_t len = _len; 862 863 /* make sure uaddr is aligned before going into the fast loop */ 864 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 865 if (subyte(uaddr, 0)) 866 return (_len); 867 uaddr++; 868 len--; 869 } 870 871 /* zero 8 bytes at a time */ 872 while (len > 7) { 873 #ifdef __LP64__ 874 if (suword64(uaddr, 0)) 875 return (_len); 876 #else 877 if (suword32(uaddr, 0)) 878 return (_len); 879 if (suword32(uaddr + 4, 0)) 880 return (_len); 881 #endif 882 uaddr += 8; 883 len -= 8; 884 } 885 886 /* zero fill end, if any */ 887 while (len > 0) { 888 if (subyte(uaddr, 0)) 889 return (_len); 890 uaddr++; 891 len--; 892 } 893 return (0); 894 } 895 896 int 897 linux_access_ok(const void *uaddr, size_t len) 898 { 899 uintptr_t saddr; 900 uintptr_t eaddr; 901 902 /* get start and end address */ 903 saddr = (uintptr_t)uaddr; 904 eaddr = (uintptr_t)uaddr + len; 905 906 /* verify addresses are valid for userspace */ 907 return ((saddr == eaddr) || 908 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 909 } 910 911 /* 912 * This function should return either EINTR or ERESTART depending on 913 * the signal type sent to this thread: 914 */ 915 static int 916 linux_get_error(struct task_struct *task, int error) 917 { 918 /* check for signal type interrupt code */ 919 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 920 error = -linux_schedule_get_interrupt_value(task); 921 if (error == 0) 922 error = EINTR; 923 } 924 return (error); 925 } 926 927 static int 928 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 929 const struct file_operations *fop, u_long cmd, caddr_t data, 930 struct thread *td) 931 { 932 struct task_struct *task = current; 933 unsigned size; 934 int error; 935 936 size = IOCPARM_LEN(cmd); 937 /* refer to logic in sys_ioctl() */ 938 if (size > 0) { 939 /* 940 * Setup hint for linux_copyin() and linux_copyout(). 941 * 942 * Background: Linux code expects a user-space address 943 * while FreeBSD supplies a kernel-space address. 944 */ 945 task->bsd_ioctl_data = data; 946 task->bsd_ioctl_len = size; 947 data = (void *)LINUX_IOCTL_MIN_PTR; 948 } else { 949 /* fetch user-space pointer */ 950 data = *(void **)data; 951 } 952 #if defined(__amd64__) 953 if (td->td_proc->p_elf_machine == EM_386) { 954 /* try the compat IOCTL handler first */ 955 if (fop->compat_ioctl != NULL) { 956 error = -OPW(fp, td, fop->compat_ioctl(filp, 957 cmd, (u_long)data)); 958 } else { 959 error = ENOTTY; 960 } 961 962 /* fallback to the regular IOCTL handler, if any */ 963 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 964 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 965 cmd, (u_long)data)); 966 } 967 } else 968 #endif 969 { 970 if (fop->unlocked_ioctl != NULL) { 971 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 972 cmd, (u_long)data)); 973 } else { 974 error = ENOTTY; 975 } 976 } 977 if (size > 0) { 978 task->bsd_ioctl_data = NULL; 979 task->bsd_ioctl_len = 0; 980 } 981 982 if (error == EWOULDBLOCK) { 983 /* update kqfilter status, if any */ 984 linux_file_kqfilter_poll(filp, 985 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 986 } else { 987 error = linux_get_error(task, error); 988 } 989 return (error); 990 } 991 992 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 993 994 /* 995 * This function atomically updates the poll wakeup state and returns 996 * the previous state at the time of update. 997 */ 998 static uint8_t 999 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1000 { 1001 int c, old; 1002 1003 c = v->counter; 1004 1005 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1006 c = old; 1007 1008 return (c); 1009 } 1010 1011 1012 static int 1013 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1014 { 1015 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1016 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1017 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1018 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1019 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1020 }; 1021 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1022 1023 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1024 case LINUX_FWQ_STATE_QUEUED: 1025 linux_poll_wakeup(filp); 1026 return (1); 1027 default: 1028 return (0); 1029 } 1030 } 1031 1032 void 1033 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1034 { 1035 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1036 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1037 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1038 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1039 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1040 }; 1041 1042 /* check if we are called inside the select system call */ 1043 if (p == LINUX_POLL_TABLE_NORMAL) 1044 selrecord(curthread, &filp->f_selinfo); 1045 1046 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1047 case LINUX_FWQ_STATE_INIT: 1048 /* NOTE: file handles can only belong to one wait-queue */ 1049 filp->f_wait_queue.wqh = wqh; 1050 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1051 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1052 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1053 break; 1054 default: 1055 break; 1056 } 1057 } 1058 1059 static void 1060 linux_poll_wait_dequeue(struct linux_file *filp) 1061 { 1062 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1063 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1064 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1065 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1066 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1067 }; 1068 1069 seldrain(&filp->f_selinfo); 1070 1071 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1072 case LINUX_FWQ_STATE_NOT_READY: 1073 case LINUX_FWQ_STATE_QUEUED: 1074 case LINUX_FWQ_STATE_READY: 1075 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1076 break; 1077 default: 1078 break; 1079 } 1080 } 1081 1082 void 1083 linux_poll_wakeup(struct linux_file *filp) 1084 { 1085 /* this function should be NULL-safe */ 1086 if (filp == NULL) 1087 return; 1088 1089 selwakeup(&filp->f_selinfo); 1090 1091 spin_lock(&filp->f_kqlock); 1092 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1093 LINUX_KQ_FLAG_NEED_WRITE; 1094 1095 /* make sure the "knote" gets woken up */ 1096 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1097 spin_unlock(&filp->f_kqlock); 1098 } 1099 1100 static void 1101 linux_file_kqfilter_detach(struct knote *kn) 1102 { 1103 struct linux_file *filp = kn->kn_hook; 1104 1105 spin_lock(&filp->f_kqlock); 1106 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1107 spin_unlock(&filp->f_kqlock); 1108 } 1109 1110 static int 1111 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1112 { 1113 struct linux_file *filp = kn->kn_hook; 1114 1115 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1116 1117 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1118 } 1119 1120 static int 1121 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1122 { 1123 struct linux_file *filp = kn->kn_hook; 1124 1125 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1126 1127 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1128 } 1129 1130 static struct filterops linux_dev_kqfiltops_read = { 1131 .f_isfd = 1, 1132 .f_detach = linux_file_kqfilter_detach, 1133 .f_event = linux_file_kqfilter_read_event, 1134 }; 1135 1136 static struct filterops linux_dev_kqfiltops_write = { 1137 .f_isfd = 1, 1138 .f_detach = linux_file_kqfilter_detach, 1139 .f_event = linux_file_kqfilter_write_event, 1140 }; 1141 1142 static void 1143 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1144 { 1145 struct thread *td; 1146 const struct file_operations *fop; 1147 struct linux_cdev *ldev; 1148 int temp; 1149 1150 if ((filp->f_kqflags & kqflags) == 0) 1151 return; 1152 1153 td = curthread; 1154 1155 linux_get_fop(filp, &fop, &ldev); 1156 /* get the latest polling state */ 1157 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1158 linux_drop_fop(ldev); 1159 1160 spin_lock(&filp->f_kqlock); 1161 /* clear kqflags */ 1162 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1163 LINUX_KQ_FLAG_NEED_WRITE); 1164 /* update kqflags */ 1165 if ((temp & (POLLIN | POLLOUT)) != 0) { 1166 if ((temp & POLLIN) != 0) 1167 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1168 if ((temp & POLLOUT) != 0) 1169 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1170 1171 /* make sure the "knote" gets woken up */ 1172 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1173 } 1174 spin_unlock(&filp->f_kqlock); 1175 } 1176 1177 static int 1178 linux_file_kqfilter(struct file *file, struct knote *kn) 1179 { 1180 struct linux_file *filp; 1181 struct thread *td; 1182 int error; 1183 1184 td = curthread; 1185 filp = (struct linux_file *)file->f_data; 1186 filp->f_flags = file->f_flag; 1187 if (filp->f_op->poll == NULL) 1188 return (EINVAL); 1189 1190 spin_lock(&filp->f_kqlock); 1191 switch (kn->kn_filter) { 1192 case EVFILT_READ: 1193 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1194 kn->kn_fop = &linux_dev_kqfiltops_read; 1195 kn->kn_hook = filp; 1196 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1197 error = 0; 1198 break; 1199 case EVFILT_WRITE: 1200 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1201 kn->kn_fop = &linux_dev_kqfiltops_write; 1202 kn->kn_hook = filp; 1203 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1204 error = 0; 1205 break; 1206 default: 1207 error = EINVAL; 1208 break; 1209 } 1210 spin_unlock(&filp->f_kqlock); 1211 1212 if (error == 0) { 1213 linux_set_current(td); 1214 1215 /* update kqfilter status, if any */ 1216 linux_file_kqfilter_poll(filp, 1217 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1218 } 1219 return (error); 1220 } 1221 1222 static int 1223 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1224 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1225 int nprot, struct thread *td) 1226 { 1227 struct task_struct *task; 1228 struct vm_area_struct *vmap; 1229 struct mm_struct *mm; 1230 struct linux_file *filp; 1231 vm_memattr_t attr; 1232 int error; 1233 1234 filp = (struct linux_file *)fp->f_data; 1235 filp->f_flags = fp->f_flag; 1236 1237 if (fop->mmap == NULL) 1238 return (EOPNOTSUPP); 1239 1240 linux_set_current(td); 1241 1242 /* 1243 * The same VM object might be shared by multiple processes 1244 * and the mm_struct is usually freed when a process exits. 1245 * 1246 * The atomic reference below makes sure the mm_struct is 1247 * available as long as the vmap is in the linux_vma_head. 1248 */ 1249 task = current; 1250 mm = task->mm; 1251 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1252 return (EINVAL); 1253 1254 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1255 vmap->vm_start = 0; 1256 vmap->vm_end = size; 1257 vmap->vm_pgoff = *offset / PAGE_SIZE; 1258 vmap->vm_pfn = 0; 1259 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1260 vmap->vm_ops = NULL; 1261 vmap->vm_file = get_file(filp); 1262 vmap->vm_mm = mm; 1263 1264 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1265 error = linux_get_error(task, EINTR); 1266 } else { 1267 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1268 error = linux_get_error(task, error); 1269 up_write(&vmap->vm_mm->mmap_sem); 1270 } 1271 1272 if (error != 0) { 1273 linux_cdev_handle_free(vmap); 1274 return (error); 1275 } 1276 1277 attr = pgprot2cachemode(vmap->vm_page_prot); 1278 1279 if (vmap->vm_ops != NULL) { 1280 struct vm_area_struct *ptr; 1281 void *vm_private_data; 1282 bool vm_no_fault; 1283 1284 if (vmap->vm_ops->open == NULL || 1285 vmap->vm_ops->close == NULL || 1286 vmap->vm_private_data == NULL) { 1287 /* free allocated VM area struct */ 1288 linux_cdev_handle_free(vmap); 1289 return (EINVAL); 1290 } 1291 1292 vm_private_data = vmap->vm_private_data; 1293 1294 rw_wlock(&linux_vma_lock); 1295 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1296 if (ptr->vm_private_data == vm_private_data) 1297 break; 1298 } 1299 /* check if there is an existing VM area struct */ 1300 if (ptr != NULL) { 1301 /* check if the VM area structure is invalid */ 1302 if (ptr->vm_ops == NULL || 1303 ptr->vm_ops->open == NULL || 1304 ptr->vm_ops->close == NULL) { 1305 error = ESTALE; 1306 vm_no_fault = 1; 1307 } else { 1308 error = EEXIST; 1309 vm_no_fault = (ptr->vm_ops->fault == NULL); 1310 } 1311 } else { 1312 /* insert VM area structure into list */ 1313 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1314 error = 0; 1315 vm_no_fault = (vmap->vm_ops->fault == NULL); 1316 } 1317 rw_wunlock(&linux_vma_lock); 1318 1319 if (error != 0) { 1320 /* free allocated VM area struct */ 1321 linux_cdev_handle_free(vmap); 1322 /* check for stale VM area struct */ 1323 if (error != EEXIST) 1324 return (error); 1325 } 1326 1327 /* check if there is no fault handler */ 1328 if (vm_no_fault) { 1329 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1330 &linux_cdev_pager_ops[1], size, nprot, *offset, 1331 td->td_ucred); 1332 } else { 1333 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1334 &linux_cdev_pager_ops[0], size, nprot, *offset, 1335 td->td_ucred); 1336 } 1337 1338 /* check if allocating the VM object failed */ 1339 if (*object == NULL) { 1340 if (error == 0) { 1341 /* remove VM area struct from list */ 1342 linux_cdev_handle_remove(vmap); 1343 /* free allocated VM area struct */ 1344 linux_cdev_handle_free(vmap); 1345 } 1346 return (EINVAL); 1347 } 1348 } else { 1349 struct sglist *sg; 1350 1351 sg = sglist_alloc(1, M_WAITOK); 1352 sglist_append_phys(sg, 1353 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1354 1355 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1356 nprot, 0, td->td_ucred); 1357 1358 linux_cdev_handle_free(vmap); 1359 1360 if (*object == NULL) { 1361 sglist_free(sg); 1362 return (EINVAL); 1363 } 1364 } 1365 1366 if (attr != VM_MEMATTR_DEFAULT) { 1367 VM_OBJECT_WLOCK(*object); 1368 vm_object_set_memattr(*object, attr); 1369 VM_OBJECT_WUNLOCK(*object); 1370 } 1371 *offset = 0; 1372 return (0); 1373 } 1374 1375 struct cdevsw linuxcdevsw = { 1376 .d_version = D_VERSION, 1377 .d_fdopen = linux_dev_fdopen, 1378 .d_name = "lkpidev", 1379 }; 1380 1381 static int 1382 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1383 int flags, struct thread *td) 1384 { 1385 struct linux_file *filp; 1386 const struct file_operations *fop; 1387 struct linux_cdev *ldev; 1388 ssize_t bytes; 1389 int error; 1390 1391 error = 0; 1392 filp = (struct linux_file *)file->f_data; 1393 filp->f_flags = file->f_flag; 1394 /* XXX no support for I/O vectors currently */ 1395 if (uio->uio_iovcnt != 1) 1396 return (EOPNOTSUPP); 1397 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1398 return (EINVAL); 1399 linux_set_current(td); 1400 linux_get_fop(filp, &fop, &ldev); 1401 if (fop->read != NULL) { 1402 bytes = OPW(file, td, fop->read(filp, 1403 uio->uio_iov->iov_base, 1404 uio->uio_iov->iov_len, &uio->uio_offset)); 1405 if (bytes >= 0) { 1406 uio->uio_iov->iov_base = 1407 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1408 uio->uio_iov->iov_len -= bytes; 1409 uio->uio_resid -= bytes; 1410 } else { 1411 error = linux_get_error(current, -bytes); 1412 } 1413 } else 1414 error = ENXIO; 1415 1416 /* update kqfilter status, if any */ 1417 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1418 linux_drop_fop(ldev); 1419 1420 return (error); 1421 } 1422 1423 static int 1424 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1425 int flags, struct thread *td) 1426 { 1427 struct linux_file *filp; 1428 const struct file_operations *fop; 1429 struct linux_cdev *ldev; 1430 ssize_t bytes; 1431 int error; 1432 1433 filp = (struct linux_file *)file->f_data; 1434 filp->f_flags = file->f_flag; 1435 /* XXX no support for I/O vectors currently */ 1436 if (uio->uio_iovcnt != 1) 1437 return (EOPNOTSUPP); 1438 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1439 return (EINVAL); 1440 linux_set_current(td); 1441 linux_get_fop(filp, &fop, &ldev); 1442 if (fop->write != NULL) { 1443 bytes = OPW(file, td, fop->write(filp, 1444 uio->uio_iov->iov_base, 1445 uio->uio_iov->iov_len, &uio->uio_offset)); 1446 if (bytes >= 0) { 1447 uio->uio_iov->iov_base = 1448 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1449 uio->uio_iov->iov_len -= bytes; 1450 uio->uio_resid -= bytes; 1451 error = 0; 1452 } else { 1453 error = linux_get_error(current, -bytes); 1454 } 1455 } else 1456 error = ENXIO; 1457 1458 /* update kqfilter status, if any */ 1459 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1460 1461 linux_drop_fop(ldev); 1462 1463 return (error); 1464 } 1465 1466 static int 1467 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1468 struct thread *td) 1469 { 1470 struct linux_file *filp; 1471 const struct file_operations *fop; 1472 struct linux_cdev *ldev; 1473 int revents; 1474 1475 filp = (struct linux_file *)file->f_data; 1476 filp->f_flags = file->f_flag; 1477 linux_set_current(td); 1478 linux_get_fop(filp, &fop, &ldev); 1479 if (fop->poll != NULL) { 1480 revents = OPW(file, td, fop->poll(filp, 1481 LINUX_POLL_TABLE_NORMAL)) & events; 1482 } else { 1483 revents = 0; 1484 } 1485 linux_drop_fop(ldev); 1486 return (revents); 1487 } 1488 1489 static int 1490 linux_file_close(struct file *file, struct thread *td) 1491 { 1492 struct linux_file *filp; 1493 int (*release)(struct inode *, struct linux_file *); 1494 const struct file_operations *fop; 1495 struct linux_cdev *ldev; 1496 int error; 1497 1498 filp = (struct linux_file *)file->f_data; 1499 1500 KASSERT(file_count(filp) == 0, 1501 ("File refcount(%d) is not zero", file_count(filp))); 1502 1503 if (td == NULL) 1504 td = curthread; 1505 1506 error = 0; 1507 filp->f_flags = file->f_flag; 1508 linux_set_current(td); 1509 linux_poll_wait_dequeue(filp); 1510 linux_get_fop(filp, &fop, &ldev); 1511 /* 1512 * Always use the real release function, if any, to avoid 1513 * leaking device resources: 1514 */ 1515 release = filp->f_op->release; 1516 if (release != NULL) 1517 error = -OPW(file, td, release(filp->f_vnode, filp)); 1518 funsetown(&filp->f_sigio); 1519 if (filp->f_vnode != NULL) 1520 vdrop(filp->f_vnode); 1521 linux_drop_fop(ldev); 1522 if (filp->f_cdev != NULL) 1523 linux_cdev_deref(filp->f_cdev); 1524 kfree(filp); 1525 1526 return (error); 1527 } 1528 1529 static int 1530 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1531 struct thread *td) 1532 { 1533 struct linux_file *filp; 1534 const struct file_operations *fop; 1535 struct linux_cdev *ldev; 1536 struct fiodgname_arg *fgn; 1537 const char *p; 1538 int error, i; 1539 1540 error = 0; 1541 filp = (struct linux_file *)fp->f_data; 1542 filp->f_flags = fp->f_flag; 1543 linux_get_fop(filp, &fop, &ldev); 1544 1545 linux_set_current(td); 1546 switch (cmd) { 1547 case FIONBIO: 1548 break; 1549 case FIOASYNC: 1550 if (fop->fasync == NULL) 1551 break; 1552 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1553 break; 1554 case FIOSETOWN: 1555 error = fsetown(*(int *)data, &filp->f_sigio); 1556 if (error == 0) { 1557 if (fop->fasync == NULL) 1558 break; 1559 error = -OPW(fp, td, fop->fasync(0, filp, 1560 fp->f_flag & FASYNC)); 1561 } 1562 break; 1563 case FIOGETOWN: 1564 *(int *)data = fgetown(&filp->f_sigio); 1565 break; 1566 case FIODGNAME: 1567 #ifdef COMPAT_FREEBSD32 1568 case FIODGNAME_32: 1569 #endif 1570 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1571 error = ENXIO; 1572 break; 1573 } 1574 fgn = data; 1575 p = devtoname(filp->f_cdev->cdev); 1576 i = strlen(p) + 1; 1577 if (i > fgn->len) { 1578 error = EINVAL; 1579 break; 1580 } 1581 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1582 break; 1583 default: 1584 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1585 break; 1586 } 1587 linux_drop_fop(ldev); 1588 return (error); 1589 } 1590 1591 static int 1592 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1593 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1594 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1595 { 1596 /* 1597 * Character devices do not provide private mappings 1598 * of any kind: 1599 */ 1600 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1601 (prot & VM_PROT_WRITE) != 0) 1602 return (EACCES); 1603 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1604 return (EINVAL); 1605 1606 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1607 (int)prot, td)); 1608 } 1609 1610 static int 1611 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1612 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1613 struct thread *td) 1614 { 1615 struct linux_file *filp; 1616 const struct file_operations *fop; 1617 struct linux_cdev *ldev; 1618 struct mount *mp; 1619 struct vnode *vp; 1620 vm_object_t object; 1621 vm_prot_t maxprot; 1622 int error; 1623 1624 filp = (struct linux_file *)fp->f_data; 1625 1626 vp = filp->f_vnode; 1627 if (vp == NULL) 1628 return (EOPNOTSUPP); 1629 1630 /* 1631 * Ensure that file and memory protections are 1632 * compatible. 1633 */ 1634 mp = vp->v_mount; 1635 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1636 maxprot = VM_PROT_NONE; 1637 if ((prot & VM_PROT_EXECUTE) != 0) 1638 return (EACCES); 1639 } else 1640 maxprot = VM_PROT_EXECUTE; 1641 if ((fp->f_flag & FREAD) != 0) 1642 maxprot |= VM_PROT_READ; 1643 else if ((prot & VM_PROT_READ) != 0) 1644 return (EACCES); 1645 1646 /* 1647 * If we are sharing potential changes via MAP_SHARED and we 1648 * are trying to get write permission although we opened it 1649 * without asking for it, bail out. 1650 * 1651 * Note that most character devices always share mappings. 1652 * 1653 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1654 * requests rather than doing it here. 1655 */ 1656 if ((flags & MAP_SHARED) != 0) { 1657 if ((fp->f_flag & FWRITE) != 0) 1658 maxprot |= VM_PROT_WRITE; 1659 else if ((prot & VM_PROT_WRITE) != 0) 1660 return (EACCES); 1661 } 1662 maxprot &= cap_maxprot; 1663 1664 linux_get_fop(filp, &fop, &ldev); 1665 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, 1666 &foff, fop, &object); 1667 if (error != 0) 1668 goto out; 1669 1670 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1671 foff, FALSE, td); 1672 if (error != 0) 1673 vm_object_deallocate(object); 1674 out: 1675 linux_drop_fop(ldev); 1676 return (error); 1677 } 1678 1679 static int 1680 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1681 struct thread *td) 1682 { 1683 struct linux_file *filp; 1684 struct vnode *vp; 1685 int error; 1686 1687 filp = (struct linux_file *)fp->f_data; 1688 if (filp->f_vnode == NULL) 1689 return (EOPNOTSUPP); 1690 1691 vp = filp->f_vnode; 1692 1693 vn_lock(vp, LK_SHARED | LK_RETRY); 1694 error = vn_stat(vp, sb, td->td_ucred, NOCRED, td); 1695 VOP_UNLOCK(vp); 1696 1697 return (error); 1698 } 1699 1700 static int 1701 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1702 struct filedesc *fdp) 1703 { 1704 struct linux_file *filp; 1705 struct vnode *vp; 1706 int error; 1707 1708 filp = fp->f_data; 1709 vp = filp->f_vnode; 1710 if (vp == NULL) { 1711 error = 0; 1712 kif->kf_type = KF_TYPE_DEV; 1713 } else { 1714 vref(vp); 1715 FILEDESC_SUNLOCK(fdp); 1716 error = vn_fill_kinfo_vnode(vp, kif); 1717 vrele(vp); 1718 kif->kf_type = KF_TYPE_VNODE; 1719 FILEDESC_SLOCK(fdp); 1720 } 1721 return (error); 1722 } 1723 1724 unsigned int 1725 linux_iminor(struct inode *inode) 1726 { 1727 struct linux_cdev *ldev; 1728 1729 if (inode == NULL || inode->v_rdev == NULL || 1730 inode->v_rdev->si_devsw != &linuxcdevsw) 1731 return (-1U); 1732 ldev = inode->v_rdev->si_drv1; 1733 if (ldev == NULL) 1734 return (-1U); 1735 1736 return (minor(ldev->dev)); 1737 } 1738 1739 struct fileops linuxfileops = { 1740 .fo_read = linux_file_read, 1741 .fo_write = linux_file_write, 1742 .fo_truncate = invfo_truncate, 1743 .fo_kqfilter = linux_file_kqfilter, 1744 .fo_stat = linux_file_stat, 1745 .fo_fill_kinfo = linux_file_fill_kinfo, 1746 .fo_poll = linux_file_poll, 1747 .fo_close = linux_file_close, 1748 .fo_ioctl = linux_file_ioctl, 1749 .fo_mmap = linux_file_mmap, 1750 .fo_chmod = invfo_chmod, 1751 .fo_chown = invfo_chown, 1752 .fo_sendfile = invfo_sendfile, 1753 .fo_flags = DFLAG_PASSABLE, 1754 }; 1755 1756 /* 1757 * Hash of vmmap addresses. This is infrequently accessed and does not 1758 * need to be particularly large. This is done because we must store the 1759 * caller's idea of the map size to properly unmap. 1760 */ 1761 struct vmmap { 1762 LIST_ENTRY(vmmap) vm_next; 1763 void *vm_addr; 1764 unsigned long vm_size; 1765 }; 1766 1767 struct vmmaphd { 1768 struct vmmap *lh_first; 1769 }; 1770 #define VMMAP_HASH_SIZE 64 1771 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1772 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1773 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1774 static struct mtx vmmaplock; 1775 1776 static void 1777 vmmap_add(void *addr, unsigned long size) 1778 { 1779 struct vmmap *vmmap; 1780 1781 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1782 mtx_lock(&vmmaplock); 1783 vmmap->vm_size = size; 1784 vmmap->vm_addr = addr; 1785 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1786 mtx_unlock(&vmmaplock); 1787 } 1788 1789 static struct vmmap * 1790 vmmap_remove(void *addr) 1791 { 1792 struct vmmap *vmmap; 1793 1794 mtx_lock(&vmmaplock); 1795 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1796 if (vmmap->vm_addr == addr) 1797 break; 1798 if (vmmap) 1799 LIST_REMOVE(vmmap, vm_next); 1800 mtx_unlock(&vmmaplock); 1801 1802 return (vmmap); 1803 } 1804 1805 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1806 void * 1807 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1808 { 1809 void *addr; 1810 1811 addr = pmap_mapdev_attr(phys_addr, size, attr); 1812 if (addr == NULL) 1813 return (NULL); 1814 vmmap_add(addr, size); 1815 1816 return (addr); 1817 } 1818 #endif 1819 1820 void 1821 iounmap(void *addr) 1822 { 1823 struct vmmap *vmmap; 1824 1825 vmmap = vmmap_remove(addr); 1826 if (vmmap == NULL) 1827 return; 1828 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1829 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1830 #endif 1831 kfree(vmmap); 1832 } 1833 1834 1835 void * 1836 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1837 { 1838 vm_offset_t off; 1839 size_t size; 1840 1841 size = count * PAGE_SIZE; 1842 off = kva_alloc(size); 1843 if (off == 0) 1844 return (NULL); 1845 vmmap_add((void *)off, size); 1846 pmap_qenter(off, pages, count); 1847 1848 return ((void *)off); 1849 } 1850 1851 void 1852 vunmap(void *addr) 1853 { 1854 struct vmmap *vmmap; 1855 1856 vmmap = vmmap_remove(addr); 1857 if (vmmap == NULL) 1858 return; 1859 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1860 kva_free((vm_offset_t)addr, vmmap->vm_size); 1861 kfree(vmmap); 1862 } 1863 1864 char * 1865 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1866 { 1867 unsigned int len; 1868 char *p; 1869 va_list aq; 1870 1871 va_copy(aq, ap); 1872 len = vsnprintf(NULL, 0, fmt, aq); 1873 va_end(aq); 1874 1875 p = kmalloc(len + 1, gfp); 1876 if (p != NULL) 1877 vsnprintf(p, len + 1, fmt, ap); 1878 1879 return (p); 1880 } 1881 1882 char * 1883 kasprintf(gfp_t gfp, const char *fmt, ...) 1884 { 1885 va_list ap; 1886 char *p; 1887 1888 va_start(ap, fmt); 1889 p = kvasprintf(gfp, fmt, ap); 1890 va_end(ap); 1891 1892 return (p); 1893 } 1894 1895 static void 1896 linux_timer_callback_wrapper(void *context) 1897 { 1898 struct timer_list *timer; 1899 1900 linux_set_current(curthread); 1901 1902 timer = context; 1903 timer->function(timer->data); 1904 } 1905 1906 int 1907 mod_timer(struct timer_list *timer, int expires) 1908 { 1909 int ret; 1910 1911 timer->expires = expires; 1912 ret = callout_reset(&timer->callout, 1913 linux_timer_jiffies_until(expires), 1914 &linux_timer_callback_wrapper, timer); 1915 1916 MPASS(ret == 0 || ret == 1); 1917 1918 return (ret == 1); 1919 } 1920 1921 void 1922 add_timer(struct timer_list *timer) 1923 { 1924 1925 callout_reset(&timer->callout, 1926 linux_timer_jiffies_until(timer->expires), 1927 &linux_timer_callback_wrapper, timer); 1928 } 1929 1930 void 1931 add_timer_on(struct timer_list *timer, int cpu) 1932 { 1933 1934 callout_reset_on(&timer->callout, 1935 linux_timer_jiffies_until(timer->expires), 1936 &linux_timer_callback_wrapper, timer, cpu); 1937 } 1938 1939 int 1940 del_timer(struct timer_list *timer) 1941 { 1942 1943 if (callout_stop(&(timer)->callout) == -1) 1944 return (0); 1945 return (1); 1946 } 1947 1948 int 1949 del_timer_sync(struct timer_list *timer) 1950 { 1951 1952 if (callout_drain(&(timer)->callout) == -1) 1953 return (0); 1954 return (1); 1955 } 1956 1957 /* greatest common divisor, Euclid equation */ 1958 static uint64_t 1959 lkpi_gcd_64(uint64_t a, uint64_t b) 1960 { 1961 uint64_t an; 1962 uint64_t bn; 1963 1964 while (b != 0) { 1965 an = b; 1966 bn = a % b; 1967 a = an; 1968 b = bn; 1969 } 1970 return (a); 1971 } 1972 1973 uint64_t lkpi_nsec2hz_rem; 1974 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 1975 uint64_t lkpi_nsec2hz_max; 1976 1977 uint64_t lkpi_usec2hz_rem; 1978 uint64_t lkpi_usec2hz_div = 1000000ULL; 1979 uint64_t lkpi_usec2hz_max; 1980 1981 uint64_t lkpi_msec2hz_rem; 1982 uint64_t lkpi_msec2hz_div = 1000ULL; 1983 uint64_t lkpi_msec2hz_max; 1984 1985 static void 1986 linux_timer_init(void *arg) 1987 { 1988 uint64_t gcd; 1989 1990 /* 1991 * Compute an internal HZ value which can divide 2**32 to 1992 * avoid timer rounding problems when the tick value wraps 1993 * around 2**32: 1994 */ 1995 linux_timer_hz_mask = 1; 1996 while (linux_timer_hz_mask < (unsigned long)hz) 1997 linux_timer_hz_mask *= 2; 1998 linux_timer_hz_mask--; 1999 2000 /* compute some internal constants */ 2001 2002 lkpi_nsec2hz_rem = hz; 2003 lkpi_usec2hz_rem = hz; 2004 lkpi_msec2hz_rem = hz; 2005 2006 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2007 lkpi_nsec2hz_rem /= gcd; 2008 lkpi_nsec2hz_div /= gcd; 2009 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2010 2011 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2012 lkpi_usec2hz_rem /= gcd; 2013 lkpi_usec2hz_div /= gcd; 2014 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2015 2016 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2017 lkpi_msec2hz_rem /= gcd; 2018 lkpi_msec2hz_div /= gcd; 2019 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2020 } 2021 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2022 2023 void 2024 linux_complete_common(struct completion *c, int all) 2025 { 2026 int wakeup_swapper; 2027 2028 sleepq_lock(c); 2029 if (all) { 2030 c->done = UINT_MAX; 2031 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2032 } else { 2033 if (c->done != UINT_MAX) 2034 c->done++; 2035 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2036 } 2037 sleepq_release(c); 2038 if (wakeup_swapper) 2039 kick_proc0(); 2040 } 2041 2042 /* 2043 * Indefinite wait for done != 0 with or without signals. 2044 */ 2045 int 2046 linux_wait_for_common(struct completion *c, int flags) 2047 { 2048 struct task_struct *task; 2049 int error; 2050 2051 if (SCHEDULER_STOPPED()) 2052 return (0); 2053 2054 task = current; 2055 2056 if (flags != 0) 2057 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2058 else 2059 flags = SLEEPQ_SLEEP; 2060 error = 0; 2061 for (;;) { 2062 sleepq_lock(c); 2063 if (c->done) 2064 break; 2065 sleepq_add(c, NULL, "completion", flags, 0); 2066 if (flags & SLEEPQ_INTERRUPTIBLE) { 2067 DROP_GIANT(); 2068 error = -sleepq_wait_sig(c, 0); 2069 PICKUP_GIANT(); 2070 if (error != 0) { 2071 linux_schedule_save_interrupt_value(task, error); 2072 error = -ERESTARTSYS; 2073 goto intr; 2074 } 2075 } else { 2076 DROP_GIANT(); 2077 sleepq_wait(c, 0); 2078 PICKUP_GIANT(); 2079 } 2080 } 2081 if (c->done != UINT_MAX) 2082 c->done--; 2083 sleepq_release(c); 2084 2085 intr: 2086 return (error); 2087 } 2088 2089 /* 2090 * Time limited wait for done != 0 with or without signals. 2091 */ 2092 int 2093 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2094 { 2095 struct task_struct *task; 2096 int end = jiffies + timeout; 2097 int error; 2098 2099 if (SCHEDULER_STOPPED()) 2100 return (0); 2101 2102 task = current; 2103 2104 if (flags != 0) 2105 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2106 else 2107 flags = SLEEPQ_SLEEP; 2108 2109 for (;;) { 2110 sleepq_lock(c); 2111 if (c->done) 2112 break; 2113 sleepq_add(c, NULL, "completion", flags, 0); 2114 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2115 2116 DROP_GIANT(); 2117 if (flags & SLEEPQ_INTERRUPTIBLE) 2118 error = -sleepq_timedwait_sig(c, 0); 2119 else 2120 error = -sleepq_timedwait(c, 0); 2121 PICKUP_GIANT(); 2122 2123 if (error != 0) { 2124 /* check for timeout */ 2125 if (error == -EWOULDBLOCK) { 2126 error = 0; /* timeout */ 2127 } else { 2128 /* signal happened */ 2129 linux_schedule_save_interrupt_value(task, error); 2130 error = -ERESTARTSYS; 2131 } 2132 goto done; 2133 } 2134 } 2135 if (c->done != UINT_MAX) 2136 c->done--; 2137 sleepq_release(c); 2138 2139 /* return how many jiffies are left */ 2140 error = linux_timer_jiffies_until(end); 2141 done: 2142 return (error); 2143 } 2144 2145 int 2146 linux_try_wait_for_completion(struct completion *c) 2147 { 2148 int isdone; 2149 2150 sleepq_lock(c); 2151 isdone = (c->done != 0); 2152 if (c->done != 0 && c->done != UINT_MAX) 2153 c->done--; 2154 sleepq_release(c); 2155 return (isdone); 2156 } 2157 2158 int 2159 linux_completion_done(struct completion *c) 2160 { 2161 int isdone; 2162 2163 sleepq_lock(c); 2164 isdone = (c->done != 0); 2165 sleepq_release(c); 2166 return (isdone); 2167 } 2168 2169 static void 2170 linux_cdev_deref(struct linux_cdev *ldev) 2171 { 2172 2173 if (refcount_release(&ldev->refs)) 2174 kfree(ldev); 2175 } 2176 2177 static void 2178 linux_cdev_release(struct kobject *kobj) 2179 { 2180 struct linux_cdev *cdev; 2181 struct kobject *parent; 2182 2183 cdev = container_of(kobj, struct linux_cdev, kobj); 2184 parent = kobj->parent; 2185 linux_destroy_dev(cdev); 2186 linux_cdev_deref(cdev); 2187 kobject_put(parent); 2188 } 2189 2190 static void 2191 linux_cdev_static_release(struct kobject *kobj) 2192 { 2193 struct linux_cdev *cdev; 2194 struct kobject *parent; 2195 2196 cdev = container_of(kobj, struct linux_cdev, kobj); 2197 parent = kobj->parent; 2198 linux_destroy_dev(cdev); 2199 kobject_put(parent); 2200 } 2201 2202 void 2203 linux_destroy_dev(struct linux_cdev *ldev) 2204 { 2205 2206 if (ldev->cdev == NULL) 2207 return; 2208 2209 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2210 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2211 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2212 pause("ldevdtr", hz / 4); 2213 2214 destroy_dev(ldev->cdev); 2215 ldev->cdev = NULL; 2216 } 2217 2218 const struct kobj_type linux_cdev_ktype = { 2219 .release = linux_cdev_release, 2220 }; 2221 2222 const struct kobj_type linux_cdev_static_ktype = { 2223 .release = linux_cdev_static_release, 2224 }; 2225 2226 static void 2227 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2228 { 2229 struct notifier_block *nb; 2230 2231 nb = arg; 2232 if (linkstate == LINK_STATE_UP) 2233 nb->notifier_call(nb, NETDEV_UP, ifp); 2234 else 2235 nb->notifier_call(nb, NETDEV_DOWN, ifp); 2236 } 2237 2238 static void 2239 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2240 { 2241 struct notifier_block *nb; 2242 2243 nb = arg; 2244 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 2245 } 2246 2247 static void 2248 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2249 { 2250 struct notifier_block *nb; 2251 2252 nb = arg; 2253 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 2254 } 2255 2256 static void 2257 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2258 { 2259 struct notifier_block *nb; 2260 2261 nb = arg; 2262 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 2263 } 2264 2265 static void 2266 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2267 { 2268 struct notifier_block *nb; 2269 2270 nb = arg; 2271 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 2272 } 2273 2274 int 2275 register_netdevice_notifier(struct notifier_block *nb) 2276 { 2277 2278 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2279 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2280 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2281 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2282 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2283 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2284 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2285 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2286 2287 return (0); 2288 } 2289 2290 int 2291 register_inetaddr_notifier(struct notifier_block *nb) 2292 { 2293 2294 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2295 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2296 return (0); 2297 } 2298 2299 int 2300 unregister_netdevice_notifier(struct notifier_block *nb) 2301 { 2302 2303 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2304 nb->tags[NETDEV_UP]); 2305 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2306 nb->tags[NETDEV_REGISTER]); 2307 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2308 nb->tags[NETDEV_UNREGISTER]); 2309 EVENTHANDLER_DEREGISTER(iflladdr_event, 2310 nb->tags[NETDEV_CHANGEADDR]); 2311 2312 return (0); 2313 } 2314 2315 int 2316 unregister_inetaddr_notifier(struct notifier_block *nb) 2317 { 2318 2319 EVENTHANDLER_DEREGISTER(ifaddr_event, 2320 nb->tags[NETDEV_CHANGEIFADDR]); 2321 2322 return (0); 2323 } 2324 2325 struct list_sort_thunk { 2326 int (*cmp)(void *, struct list_head *, struct list_head *); 2327 void *priv; 2328 }; 2329 2330 static inline int 2331 linux_le_cmp(void *priv, const void *d1, const void *d2) 2332 { 2333 struct list_head *le1, *le2; 2334 struct list_sort_thunk *thunk; 2335 2336 thunk = priv; 2337 le1 = *(__DECONST(struct list_head **, d1)); 2338 le2 = *(__DECONST(struct list_head **, d2)); 2339 return ((thunk->cmp)(thunk->priv, le1, le2)); 2340 } 2341 2342 void 2343 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2344 struct list_head *a, struct list_head *b)) 2345 { 2346 struct list_sort_thunk thunk; 2347 struct list_head **ar, *le; 2348 size_t count, i; 2349 2350 count = 0; 2351 list_for_each(le, head) 2352 count++; 2353 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2354 i = 0; 2355 list_for_each(le, head) 2356 ar[i++] = le; 2357 thunk.cmp = cmp; 2358 thunk.priv = priv; 2359 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2360 INIT_LIST_HEAD(head); 2361 for (i = 0; i < count; i++) 2362 list_add_tail(ar[i], head); 2363 free(ar, M_KMALLOC); 2364 } 2365 2366 void 2367 linux_irq_handler(void *ent) 2368 { 2369 struct irq_ent *irqe; 2370 2371 linux_set_current(curthread); 2372 2373 irqe = ent; 2374 irqe->handler(irqe->irq, irqe->arg); 2375 } 2376 2377 #if defined(__i386__) || defined(__amd64__) 2378 int 2379 linux_wbinvd_on_all_cpus(void) 2380 { 2381 2382 pmap_invalidate_cache(); 2383 return (0); 2384 } 2385 #endif 2386 2387 int 2388 linux_on_each_cpu(void callback(void *), void *data) 2389 { 2390 2391 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2392 smp_no_rendezvous_barrier, data); 2393 return (0); 2394 } 2395 2396 int 2397 linux_in_atomic(void) 2398 { 2399 2400 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2401 } 2402 2403 struct linux_cdev * 2404 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2405 { 2406 dev_t dev = MKDEV(major, minor); 2407 struct cdev *cdev; 2408 2409 dev_lock(); 2410 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2411 struct linux_cdev *ldev = cdev->si_drv1; 2412 if (ldev->dev == dev && 2413 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2414 break; 2415 } 2416 } 2417 dev_unlock(); 2418 2419 return (cdev != NULL ? cdev->si_drv1 : NULL); 2420 } 2421 2422 int 2423 __register_chrdev(unsigned int major, unsigned int baseminor, 2424 unsigned int count, const char *name, 2425 const struct file_operations *fops) 2426 { 2427 struct linux_cdev *cdev; 2428 int ret = 0; 2429 int i; 2430 2431 for (i = baseminor; i < baseminor + count; i++) { 2432 cdev = cdev_alloc(); 2433 cdev->ops = fops; 2434 kobject_set_name(&cdev->kobj, name); 2435 2436 ret = cdev_add(cdev, makedev(major, i), 1); 2437 if (ret != 0) 2438 break; 2439 } 2440 return (ret); 2441 } 2442 2443 int 2444 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2445 unsigned int count, const char *name, 2446 const struct file_operations *fops, uid_t uid, 2447 gid_t gid, int mode) 2448 { 2449 struct linux_cdev *cdev; 2450 int ret = 0; 2451 int i; 2452 2453 for (i = baseminor; i < baseminor + count; i++) { 2454 cdev = cdev_alloc(); 2455 cdev->ops = fops; 2456 kobject_set_name(&cdev->kobj, name); 2457 2458 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2459 if (ret != 0) 2460 break; 2461 } 2462 return (ret); 2463 } 2464 2465 void 2466 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2467 unsigned int count, const char *name) 2468 { 2469 struct linux_cdev *cdevp; 2470 int i; 2471 2472 for (i = baseminor; i < baseminor + count; i++) { 2473 cdevp = linux_find_cdev(name, major, i); 2474 if (cdevp != NULL) 2475 cdev_del(cdevp); 2476 } 2477 } 2478 2479 void 2480 linux_dump_stack(void) 2481 { 2482 #ifdef STACK 2483 struct stack st; 2484 2485 stack_zero(&st); 2486 stack_save(&st); 2487 stack_print(&st); 2488 #endif 2489 } 2490 2491 #if defined(__i386__) || defined(__amd64__) 2492 bool linux_cpu_has_clflush; 2493 #endif 2494 2495 static void 2496 linux_compat_init(void *arg) 2497 { 2498 struct sysctl_oid *rootoid; 2499 int i; 2500 2501 #if defined(__i386__) || defined(__amd64__) 2502 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2503 #endif 2504 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2505 2506 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2507 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2508 kobject_init(&linux_class_root, &linux_class_ktype); 2509 kobject_set_name(&linux_class_root, "class"); 2510 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2511 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2512 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2513 kobject_set_name(&linux_root_device.kobj, "device"); 2514 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2515 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2516 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2517 linux_root_device.bsddev = root_bus; 2518 linux_class_misc.name = "misc"; 2519 class_register(&linux_class_misc); 2520 INIT_LIST_HEAD(&pci_drivers); 2521 INIT_LIST_HEAD(&pci_devices); 2522 spin_lock_init(&pci_lock); 2523 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2524 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2525 LIST_INIT(&vmmaphead[i]); 2526 } 2527 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2528 2529 static void 2530 linux_compat_uninit(void *arg) 2531 { 2532 linux_kobject_kfree_name(&linux_class_root); 2533 linux_kobject_kfree_name(&linux_root_device.kobj); 2534 linux_kobject_kfree_name(&linux_class_misc.kobj); 2535 2536 mtx_destroy(&vmmaplock); 2537 spin_lock_destroy(&pci_lock); 2538 rw_destroy(&linux_vma_lock); 2539 } 2540 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2541 2542 /* 2543 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2544 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2545 * used. Assert these types have the same size, else some parts of the 2546 * LinuxKPI may not work like expected: 2547 */ 2548 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2549