1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/fcntl.h> 48 #include <sys/file.h> 49 #include <sys/filio.h> 50 #include <sys/rwlock.h> 51 #include <sys/mman.h> 52 #include <sys/stack.h> 53 #include <sys/user.h> 54 55 #include <vm/vm.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pager.h> 60 61 #include <machine/stdarg.h> 62 63 #if defined(__i386__) || defined(__amd64__) 64 #include <machine/md_var.h> 65 #endif 66 67 #include <linux/kobject.h> 68 #include <linux/device.h> 69 #include <linux/slab.h> 70 #include <linux/module.h> 71 #include <linux/moduleparam.h> 72 #include <linux/cdev.h> 73 #include <linux/file.h> 74 #include <linux/sysfs.h> 75 #include <linux/mm.h> 76 #include <linux/io.h> 77 #include <linux/vmalloc.h> 78 #include <linux/netdevice.h> 79 #include <linux/timer.h> 80 #include <linux/interrupt.h> 81 #include <linux/uaccess.h> 82 #include <linux/list.h> 83 #include <linux/kthread.h> 84 #include <linux/kernel.h> 85 #include <linux/compat.h> 86 #include <linux/poll.h> 87 #include <linux/smp.h> 88 89 #if defined(__i386__) || defined(__amd64__) 90 #include <asm/smp.h> 91 #endif 92 93 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 94 95 int linuxkpi_debug; 96 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 97 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 98 99 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 100 101 #include <linux/rbtree.h> 102 /* Undo Linux compat changes. */ 103 #undef RB_ROOT 104 #undef file 105 #undef cdev 106 #define RB_ROOT(head) (head)->rbh_root 107 108 static void linux_cdev_deref(struct linux_cdev *ldev); 109 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 110 111 struct kobject linux_class_root; 112 struct device linux_root_device; 113 struct class linux_class_misc; 114 struct list_head pci_drivers; 115 struct list_head pci_devices; 116 spinlock_t pci_lock; 117 118 unsigned long linux_timer_hz_mask; 119 120 int 121 panic_cmp(struct rb_node *one, struct rb_node *two) 122 { 123 panic("no cmp"); 124 } 125 126 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 127 128 int 129 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 130 { 131 va_list tmp_va; 132 int len; 133 char *old; 134 char *name; 135 char dummy; 136 137 old = kobj->name; 138 139 if (old && fmt == NULL) 140 return (0); 141 142 /* compute length of string */ 143 va_copy(tmp_va, args); 144 len = vsnprintf(&dummy, 0, fmt, tmp_va); 145 va_end(tmp_va); 146 147 /* account for zero termination */ 148 len++; 149 150 /* check for error */ 151 if (len < 1) 152 return (-EINVAL); 153 154 /* allocate memory for string */ 155 name = kzalloc(len, GFP_KERNEL); 156 if (name == NULL) 157 return (-ENOMEM); 158 vsnprintf(name, len, fmt, args); 159 kobj->name = name; 160 161 /* free old string */ 162 kfree(old); 163 164 /* filter new string */ 165 for (; *name != '\0'; name++) 166 if (*name == '/') 167 *name = '!'; 168 return (0); 169 } 170 171 int 172 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 173 { 174 va_list args; 175 int error; 176 177 va_start(args, fmt); 178 error = kobject_set_name_vargs(kobj, fmt, args); 179 va_end(args); 180 181 return (error); 182 } 183 184 static int 185 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 186 { 187 const struct kobj_type *t; 188 int error; 189 190 kobj->parent = parent; 191 error = sysfs_create_dir(kobj); 192 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 193 struct attribute **attr; 194 t = kobj->ktype; 195 196 for (attr = t->default_attrs; *attr != NULL; attr++) { 197 error = sysfs_create_file(kobj, *attr); 198 if (error) 199 break; 200 } 201 if (error) 202 sysfs_remove_dir(kobj); 203 204 } 205 return (error); 206 } 207 208 int 209 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 210 { 211 va_list args; 212 int error; 213 214 va_start(args, fmt); 215 error = kobject_set_name_vargs(kobj, fmt, args); 216 va_end(args); 217 if (error) 218 return (error); 219 220 return kobject_add_complete(kobj, parent); 221 } 222 223 void 224 linux_kobject_release(struct kref *kref) 225 { 226 struct kobject *kobj; 227 char *name; 228 229 kobj = container_of(kref, struct kobject, kref); 230 sysfs_remove_dir(kobj); 231 name = kobj->name; 232 if (kobj->ktype && kobj->ktype->release) 233 kobj->ktype->release(kobj); 234 kfree(name); 235 } 236 237 static void 238 linux_kobject_kfree(struct kobject *kobj) 239 { 240 kfree(kobj); 241 } 242 243 static void 244 linux_kobject_kfree_name(struct kobject *kobj) 245 { 246 if (kobj) { 247 kfree(kobj->name); 248 } 249 } 250 251 const struct kobj_type linux_kfree_type = { 252 .release = linux_kobject_kfree 253 }; 254 255 static void 256 linux_device_release(struct device *dev) 257 { 258 pr_debug("linux_device_release: %s\n", dev_name(dev)); 259 kfree(dev); 260 } 261 262 static ssize_t 263 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 264 { 265 struct class_attribute *dattr; 266 ssize_t error; 267 268 dattr = container_of(attr, struct class_attribute, attr); 269 error = -EIO; 270 if (dattr->show) 271 error = dattr->show(container_of(kobj, struct class, kobj), 272 dattr, buf); 273 return (error); 274 } 275 276 static ssize_t 277 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 278 size_t count) 279 { 280 struct class_attribute *dattr; 281 ssize_t error; 282 283 dattr = container_of(attr, struct class_attribute, attr); 284 error = -EIO; 285 if (dattr->store) 286 error = dattr->store(container_of(kobj, struct class, kobj), 287 dattr, buf, count); 288 return (error); 289 } 290 291 static void 292 linux_class_release(struct kobject *kobj) 293 { 294 struct class *class; 295 296 class = container_of(kobj, struct class, kobj); 297 if (class->class_release) 298 class->class_release(class); 299 } 300 301 static const struct sysfs_ops linux_class_sysfs = { 302 .show = linux_class_show, 303 .store = linux_class_store, 304 }; 305 306 const struct kobj_type linux_class_ktype = { 307 .release = linux_class_release, 308 .sysfs_ops = &linux_class_sysfs 309 }; 310 311 static void 312 linux_dev_release(struct kobject *kobj) 313 { 314 struct device *dev; 315 316 dev = container_of(kobj, struct device, kobj); 317 /* This is the precedence defined by linux. */ 318 if (dev->release) 319 dev->release(dev); 320 else if (dev->class && dev->class->dev_release) 321 dev->class->dev_release(dev); 322 } 323 324 static ssize_t 325 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 326 { 327 struct device_attribute *dattr; 328 ssize_t error; 329 330 dattr = container_of(attr, struct device_attribute, attr); 331 error = -EIO; 332 if (dattr->show) 333 error = dattr->show(container_of(kobj, struct device, kobj), 334 dattr, buf); 335 return (error); 336 } 337 338 static ssize_t 339 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 340 size_t count) 341 { 342 struct device_attribute *dattr; 343 ssize_t error; 344 345 dattr = container_of(attr, struct device_attribute, attr); 346 error = -EIO; 347 if (dattr->store) 348 error = dattr->store(container_of(kobj, struct device, kobj), 349 dattr, buf, count); 350 return (error); 351 } 352 353 static const struct sysfs_ops linux_dev_sysfs = { 354 .show = linux_dev_show, 355 .store = linux_dev_store, 356 }; 357 358 const struct kobj_type linux_dev_ktype = { 359 .release = linux_dev_release, 360 .sysfs_ops = &linux_dev_sysfs 361 }; 362 363 struct device * 364 device_create(struct class *class, struct device *parent, dev_t devt, 365 void *drvdata, const char *fmt, ...) 366 { 367 struct device *dev; 368 va_list args; 369 370 dev = kzalloc(sizeof(*dev), M_WAITOK); 371 dev->parent = parent; 372 dev->class = class; 373 dev->devt = devt; 374 dev->driver_data = drvdata; 375 dev->release = linux_device_release; 376 va_start(args, fmt); 377 kobject_set_name_vargs(&dev->kobj, fmt, args); 378 va_end(args); 379 device_register(dev); 380 381 return (dev); 382 } 383 384 int 385 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 386 struct kobject *parent, const char *fmt, ...) 387 { 388 va_list args; 389 int error; 390 391 kobject_init(kobj, ktype); 392 kobj->ktype = ktype; 393 kobj->parent = parent; 394 kobj->name = NULL; 395 396 va_start(args, fmt); 397 error = kobject_set_name_vargs(kobj, fmt, args); 398 va_end(args); 399 if (error) 400 return (error); 401 return kobject_add_complete(kobj, parent); 402 } 403 404 static void 405 linux_kq_lock(void *arg) 406 { 407 spinlock_t *s = arg; 408 409 spin_lock(s); 410 } 411 static void 412 linux_kq_unlock(void *arg) 413 { 414 spinlock_t *s = arg; 415 416 spin_unlock(s); 417 } 418 419 static void 420 linux_kq_lock_owned(void *arg) 421 { 422 #ifdef INVARIANTS 423 spinlock_t *s = arg; 424 425 mtx_assert(&s->m, MA_OWNED); 426 #endif 427 } 428 429 static void 430 linux_kq_lock_unowned(void *arg) 431 { 432 #ifdef INVARIANTS 433 spinlock_t *s = arg; 434 435 mtx_assert(&s->m, MA_NOTOWNED); 436 #endif 437 } 438 439 static void 440 linux_file_kqfilter_poll(struct linux_file *, int); 441 442 struct linux_file * 443 linux_file_alloc(void) 444 { 445 struct linux_file *filp; 446 447 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 448 449 /* set initial refcount */ 450 filp->f_count = 1; 451 452 /* setup fields needed by kqueue support */ 453 spin_lock_init(&filp->f_kqlock); 454 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 455 linux_kq_lock, linux_kq_unlock, 456 linux_kq_lock_owned, linux_kq_lock_unowned); 457 458 return (filp); 459 } 460 461 void 462 linux_file_free(struct linux_file *filp) 463 { 464 if (filp->_file == NULL) { 465 if (filp->f_shmem != NULL) 466 vm_object_deallocate(filp->f_shmem); 467 kfree(filp); 468 } else { 469 /* 470 * The close method of the character device or file 471 * will free the linux_file structure: 472 */ 473 _fdrop(filp->_file, curthread); 474 } 475 } 476 477 static int 478 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 479 vm_page_t *mres) 480 { 481 struct vm_area_struct *vmap; 482 483 vmap = linux_cdev_handle_find(vm_obj->handle); 484 485 MPASS(vmap != NULL); 486 MPASS(vmap->vm_private_data == vm_obj->handle); 487 488 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 489 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 490 vm_page_t page; 491 492 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 493 /* 494 * If the passed in result page is a fake 495 * page, update it with the new physical 496 * address. 497 */ 498 page = *mres; 499 vm_page_updatefake(page, paddr, vm_obj->memattr); 500 } else { 501 /* 502 * Replace the passed in "mres" page with our 503 * own fake page and free up the all of the 504 * original pages. 505 */ 506 VM_OBJECT_WUNLOCK(vm_obj); 507 page = vm_page_getfake(paddr, vm_obj->memattr); 508 VM_OBJECT_WLOCK(vm_obj); 509 510 vm_page_replace_checked(page, vm_obj, 511 (*mres)->pindex, *mres); 512 513 vm_page_lock(*mres); 514 vm_page_free(*mres); 515 vm_page_unlock(*mres); 516 *mres = page; 517 } 518 page->valid = VM_PAGE_BITS_ALL; 519 return (VM_PAGER_OK); 520 } 521 return (VM_PAGER_FAIL); 522 } 523 524 static int 525 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 526 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 527 { 528 struct vm_area_struct *vmap; 529 int err; 530 531 linux_set_current(curthread); 532 533 /* get VM area structure */ 534 vmap = linux_cdev_handle_find(vm_obj->handle); 535 MPASS(vmap != NULL); 536 MPASS(vmap->vm_private_data == vm_obj->handle); 537 538 VM_OBJECT_WUNLOCK(vm_obj); 539 540 down_write(&vmap->vm_mm->mmap_sem); 541 if (unlikely(vmap->vm_ops == NULL)) { 542 err = VM_FAULT_SIGBUS; 543 } else { 544 struct vm_fault vmf; 545 546 /* fill out VM fault structure */ 547 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 548 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 549 vmf.pgoff = 0; 550 vmf.page = NULL; 551 vmf.vma = vmap; 552 553 vmap->vm_pfn_count = 0; 554 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 555 vmap->vm_obj = vm_obj; 556 557 err = vmap->vm_ops->fault(vmap, &vmf); 558 559 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 560 kern_yield(PRI_USER); 561 err = vmap->vm_ops->fault(vmap, &vmf); 562 } 563 } 564 565 /* translate return code */ 566 switch (err) { 567 case VM_FAULT_OOM: 568 err = VM_PAGER_AGAIN; 569 break; 570 case VM_FAULT_SIGBUS: 571 err = VM_PAGER_BAD; 572 break; 573 case VM_FAULT_NOPAGE: 574 /* 575 * By contract the fault handler will return having 576 * busied all the pages itself. If pidx is already 577 * found in the object, it will simply xbusy the first 578 * page and return with vm_pfn_count set to 1. 579 */ 580 *first = vmap->vm_pfn_first; 581 *last = *first + vmap->vm_pfn_count - 1; 582 err = VM_PAGER_OK; 583 break; 584 default: 585 err = VM_PAGER_ERROR; 586 break; 587 } 588 up_write(&vmap->vm_mm->mmap_sem); 589 VM_OBJECT_WLOCK(vm_obj); 590 return (err); 591 } 592 593 static struct rwlock linux_vma_lock; 594 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 595 TAILQ_HEAD_INITIALIZER(linux_vma_head); 596 597 static void 598 linux_cdev_handle_free(struct vm_area_struct *vmap) 599 { 600 /* Drop reference on vm_file */ 601 if (vmap->vm_file != NULL) 602 fput(vmap->vm_file); 603 604 /* Drop reference on mm_struct */ 605 mmput(vmap->vm_mm); 606 607 kfree(vmap); 608 } 609 610 static void 611 linux_cdev_handle_remove(struct vm_area_struct *vmap) 612 { 613 rw_wlock(&linux_vma_lock); 614 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 615 rw_wunlock(&linux_vma_lock); 616 } 617 618 static struct vm_area_struct * 619 linux_cdev_handle_find(void *handle) 620 { 621 struct vm_area_struct *vmap; 622 623 rw_rlock(&linux_vma_lock); 624 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 625 if (vmap->vm_private_data == handle) 626 break; 627 } 628 rw_runlock(&linux_vma_lock); 629 return (vmap); 630 } 631 632 static int 633 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 634 vm_ooffset_t foff, struct ucred *cred, u_short *color) 635 { 636 637 MPASS(linux_cdev_handle_find(handle) != NULL); 638 *color = 0; 639 return (0); 640 } 641 642 static void 643 linux_cdev_pager_dtor(void *handle) 644 { 645 const struct vm_operations_struct *vm_ops; 646 struct vm_area_struct *vmap; 647 648 vmap = linux_cdev_handle_find(handle); 649 MPASS(vmap != NULL); 650 651 /* 652 * Remove handle before calling close operation to prevent 653 * other threads from reusing the handle pointer. 654 */ 655 linux_cdev_handle_remove(vmap); 656 657 down_write(&vmap->vm_mm->mmap_sem); 658 vm_ops = vmap->vm_ops; 659 if (likely(vm_ops != NULL)) 660 vm_ops->close(vmap); 661 up_write(&vmap->vm_mm->mmap_sem); 662 663 linux_cdev_handle_free(vmap); 664 } 665 666 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 667 { 668 /* OBJT_MGTDEVICE */ 669 .cdev_pg_populate = linux_cdev_pager_populate, 670 .cdev_pg_ctor = linux_cdev_pager_ctor, 671 .cdev_pg_dtor = linux_cdev_pager_dtor 672 }, 673 { 674 /* OBJT_DEVICE */ 675 .cdev_pg_fault = linux_cdev_pager_fault, 676 .cdev_pg_ctor = linux_cdev_pager_ctor, 677 .cdev_pg_dtor = linux_cdev_pager_dtor 678 }, 679 }; 680 681 int 682 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 683 unsigned long size) 684 { 685 vm_object_t obj; 686 vm_page_t m; 687 688 obj = vma->vm_obj; 689 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 690 return (-ENOTSUP); 691 VM_OBJECT_RLOCK(obj); 692 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 693 m != NULL && m->pindex < OFF_TO_IDX(address + size); 694 m = TAILQ_NEXT(m, listq)) 695 pmap_remove_all(m); 696 VM_OBJECT_RUNLOCK(obj); 697 return (0); 698 } 699 700 static struct file_operations dummy_ldev_ops = { 701 /* XXXKIB */ 702 }; 703 704 static struct linux_cdev dummy_ldev = { 705 .ops = &dummy_ldev_ops, 706 }; 707 708 #define LDEV_SI_DTR 0x0001 709 #define LDEV_SI_REF 0x0002 710 711 static void 712 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 713 struct linux_cdev **dev) 714 { 715 struct linux_cdev *ldev; 716 u_int siref; 717 718 ldev = filp->f_cdev; 719 *fop = filp->f_op; 720 if (ldev != NULL) { 721 for (siref = ldev->siref;;) { 722 if ((siref & LDEV_SI_DTR) != 0) { 723 ldev = &dummy_ldev; 724 siref = ldev->siref; 725 *fop = ldev->ops; 726 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 727 } else if (atomic_fcmpset_int(&ldev->siref, &siref, 728 siref + LDEV_SI_REF)) { 729 break; 730 } 731 } 732 } 733 *dev = ldev; 734 } 735 736 static void 737 linux_drop_fop(struct linux_cdev *ldev) 738 { 739 740 if (ldev == NULL) 741 return; 742 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 743 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 744 } 745 746 #define OPW(fp,td,code) ({ \ 747 struct file *__fpop; \ 748 __typeof(code) __retval; \ 749 \ 750 __fpop = (td)->td_fpop; \ 751 (td)->td_fpop = (fp); \ 752 __retval = (code); \ 753 (td)->td_fpop = __fpop; \ 754 __retval; \ 755 }) 756 757 static int 758 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 759 struct file *file) 760 { 761 struct linux_cdev *ldev; 762 struct linux_file *filp; 763 const struct file_operations *fop; 764 int error; 765 766 ldev = dev->si_drv1; 767 768 filp = linux_file_alloc(); 769 filp->f_dentry = &filp->f_dentry_store; 770 filp->f_op = ldev->ops; 771 filp->f_mode = file->f_flag; 772 filp->f_flags = file->f_flag; 773 filp->f_vnode = file->f_vnode; 774 filp->_file = file; 775 refcount_acquire(&ldev->refs); 776 filp->f_cdev = ldev; 777 778 linux_set_current(td); 779 linux_get_fop(filp, &fop, &ldev); 780 781 if (fop->open != NULL) { 782 error = -fop->open(file->f_vnode, filp); 783 if (error != 0) { 784 linux_drop_fop(ldev); 785 linux_cdev_deref(filp->f_cdev); 786 kfree(filp); 787 return (error); 788 } 789 } 790 791 /* hold on to the vnode - used for fstat() */ 792 vhold(filp->f_vnode); 793 794 /* release the file from devfs */ 795 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 796 linux_drop_fop(ldev); 797 return (ENXIO); 798 } 799 800 #define LINUX_IOCTL_MIN_PTR 0x10000UL 801 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 802 803 static inline int 804 linux_remap_address(void **uaddr, size_t len) 805 { 806 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 807 808 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 809 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 810 struct task_struct *pts = current; 811 if (pts == NULL) { 812 *uaddr = NULL; 813 return (1); 814 } 815 816 /* compute data offset */ 817 uaddr_val -= LINUX_IOCTL_MIN_PTR; 818 819 /* check that length is within bounds */ 820 if ((len > IOCPARM_MAX) || 821 (uaddr_val + len) > pts->bsd_ioctl_len) { 822 *uaddr = NULL; 823 return (1); 824 } 825 826 /* re-add kernel buffer address */ 827 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 828 829 /* update address location */ 830 *uaddr = (void *)uaddr_val; 831 return (1); 832 } 833 return (0); 834 } 835 836 int 837 linux_copyin(const void *uaddr, void *kaddr, size_t len) 838 { 839 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 840 if (uaddr == NULL) 841 return (-EFAULT); 842 memcpy(kaddr, uaddr, len); 843 return (0); 844 } 845 return (-copyin(uaddr, kaddr, len)); 846 } 847 848 int 849 linux_copyout(const void *kaddr, void *uaddr, size_t len) 850 { 851 if (linux_remap_address(&uaddr, len)) { 852 if (uaddr == NULL) 853 return (-EFAULT); 854 memcpy(uaddr, kaddr, len); 855 return (0); 856 } 857 return (-copyout(kaddr, uaddr, len)); 858 } 859 860 size_t 861 linux_clear_user(void *_uaddr, size_t _len) 862 { 863 uint8_t *uaddr = _uaddr; 864 size_t len = _len; 865 866 /* make sure uaddr is aligned before going into the fast loop */ 867 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 868 if (subyte(uaddr, 0)) 869 return (_len); 870 uaddr++; 871 len--; 872 } 873 874 /* zero 8 bytes at a time */ 875 while (len > 7) { 876 #ifdef __LP64__ 877 if (suword64(uaddr, 0)) 878 return (_len); 879 #else 880 if (suword32(uaddr, 0)) 881 return (_len); 882 if (suword32(uaddr + 4, 0)) 883 return (_len); 884 #endif 885 uaddr += 8; 886 len -= 8; 887 } 888 889 /* zero fill end, if any */ 890 while (len > 0) { 891 if (subyte(uaddr, 0)) 892 return (_len); 893 uaddr++; 894 len--; 895 } 896 return (0); 897 } 898 899 int 900 linux_access_ok(int rw, const void *uaddr, size_t len) 901 { 902 uintptr_t saddr; 903 uintptr_t eaddr; 904 905 /* get start and end address */ 906 saddr = (uintptr_t)uaddr; 907 eaddr = (uintptr_t)uaddr + len; 908 909 /* verify addresses are valid for userspace */ 910 return ((saddr == eaddr) || 911 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 912 } 913 914 /* 915 * This function should return either EINTR or ERESTART depending on 916 * the signal type sent to this thread: 917 */ 918 static int 919 linux_get_error(struct task_struct *task, int error) 920 { 921 /* check for signal type interrupt code */ 922 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 923 error = -linux_schedule_get_interrupt_value(task); 924 if (error == 0) 925 error = EINTR; 926 } 927 return (error); 928 } 929 930 static int 931 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 932 const struct file_operations *fop, u_long cmd, caddr_t data, 933 struct thread *td) 934 { 935 struct task_struct *task = current; 936 unsigned size; 937 int error; 938 939 size = IOCPARM_LEN(cmd); 940 /* refer to logic in sys_ioctl() */ 941 if (size > 0) { 942 /* 943 * Setup hint for linux_copyin() and linux_copyout(). 944 * 945 * Background: Linux code expects a user-space address 946 * while FreeBSD supplies a kernel-space address. 947 */ 948 task->bsd_ioctl_data = data; 949 task->bsd_ioctl_len = size; 950 data = (void *)LINUX_IOCTL_MIN_PTR; 951 } else { 952 /* fetch user-space pointer */ 953 data = *(void **)data; 954 } 955 #if defined(__amd64__) 956 if (td->td_proc->p_elf_machine == EM_386) { 957 /* try the compat IOCTL handler first */ 958 if (fop->compat_ioctl != NULL) { 959 error = -OPW(fp, td, fop->compat_ioctl(filp, 960 cmd, (u_long)data)); 961 } else { 962 error = ENOTTY; 963 } 964 965 /* fallback to the regular IOCTL handler, if any */ 966 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 967 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 968 cmd, (u_long)data)); 969 } 970 } else 971 #endif 972 { 973 if (fop->unlocked_ioctl != NULL) { 974 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 975 cmd, (u_long)data)); 976 } else { 977 error = ENOTTY; 978 } 979 } 980 if (size > 0) { 981 task->bsd_ioctl_data = NULL; 982 task->bsd_ioctl_len = 0; 983 } 984 985 if (error == EWOULDBLOCK) { 986 /* update kqfilter status, if any */ 987 linux_file_kqfilter_poll(filp, 988 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 989 } else { 990 error = linux_get_error(task, error); 991 } 992 return (error); 993 } 994 995 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 996 997 /* 998 * This function atomically updates the poll wakeup state and returns 999 * the previous state at the time of update. 1000 */ 1001 static uint8_t 1002 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1003 { 1004 int c, old; 1005 1006 c = v->counter; 1007 1008 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1009 c = old; 1010 1011 return (c); 1012 } 1013 1014 1015 static int 1016 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1017 { 1018 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1019 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1020 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1021 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1022 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1023 }; 1024 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1025 1026 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1027 case LINUX_FWQ_STATE_QUEUED: 1028 linux_poll_wakeup(filp); 1029 return (1); 1030 default: 1031 return (0); 1032 } 1033 } 1034 1035 void 1036 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1037 { 1038 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1039 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1040 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1041 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1042 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1043 }; 1044 1045 /* check if we are called inside the select system call */ 1046 if (p == LINUX_POLL_TABLE_NORMAL) 1047 selrecord(curthread, &filp->f_selinfo); 1048 1049 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1050 case LINUX_FWQ_STATE_INIT: 1051 /* NOTE: file handles can only belong to one wait-queue */ 1052 filp->f_wait_queue.wqh = wqh; 1053 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1054 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1055 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1056 break; 1057 default: 1058 break; 1059 } 1060 } 1061 1062 static void 1063 linux_poll_wait_dequeue(struct linux_file *filp) 1064 { 1065 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1066 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1067 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1068 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1069 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1070 }; 1071 1072 seldrain(&filp->f_selinfo); 1073 1074 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1075 case LINUX_FWQ_STATE_NOT_READY: 1076 case LINUX_FWQ_STATE_QUEUED: 1077 case LINUX_FWQ_STATE_READY: 1078 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1079 break; 1080 default: 1081 break; 1082 } 1083 } 1084 1085 void 1086 linux_poll_wakeup(struct linux_file *filp) 1087 { 1088 /* this function should be NULL-safe */ 1089 if (filp == NULL) 1090 return; 1091 1092 selwakeup(&filp->f_selinfo); 1093 1094 spin_lock(&filp->f_kqlock); 1095 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1096 LINUX_KQ_FLAG_NEED_WRITE; 1097 1098 /* make sure the "knote" gets woken up */ 1099 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1100 spin_unlock(&filp->f_kqlock); 1101 } 1102 1103 static void 1104 linux_file_kqfilter_detach(struct knote *kn) 1105 { 1106 struct linux_file *filp = kn->kn_hook; 1107 1108 spin_lock(&filp->f_kqlock); 1109 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1110 spin_unlock(&filp->f_kqlock); 1111 } 1112 1113 static int 1114 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1115 { 1116 struct linux_file *filp = kn->kn_hook; 1117 1118 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1119 1120 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1121 } 1122 1123 static int 1124 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1125 { 1126 struct linux_file *filp = kn->kn_hook; 1127 1128 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1129 1130 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1131 } 1132 1133 static struct filterops linux_dev_kqfiltops_read = { 1134 .f_isfd = 1, 1135 .f_detach = linux_file_kqfilter_detach, 1136 .f_event = linux_file_kqfilter_read_event, 1137 }; 1138 1139 static struct filterops linux_dev_kqfiltops_write = { 1140 .f_isfd = 1, 1141 .f_detach = linux_file_kqfilter_detach, 1142 .f_event = linux_file_kqfilter_write_event, 1143 }; 1144 1145 static void 1146 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1147 { 1148 struct thread *td; 1149 const struct file_operations *fop; 1150 struct linux_cdev *ldev; 1151 int temp; 1152 1153 if ((filp->f_kqflags & kqflags) == 0) 1154 return; 1155 1156 td = curthread; 1157 1158 linux_get_fop(filp, &fop, &ldev); 1159 /* get the latest polling state */ 1160 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1161 linux_drop_fop(ldev); 1162 1163 spin_lock(&filp->f_kqlock); 1164 /* clear kqflags */ 1165 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1166 LINUX_KQ_FLAG_NEED_WRITE); 1167 /* update kqflags */ 1168 if ((temp & (POLLIN | POLLOUT)) != 0) { 1169 if ((temp & POLLIN) != 0) 1170 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1171 if ((temp & POLLOUT) != 0) 1172 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1173 1174 /* make sure the "knote" gets woken up */ 1175 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1176 } 1177 spin_unlock(&filp->f_kqlock); 1178 } 1179 1180 static int 1181 linux_file_kqfilter(struct file *file, struct knote *kn) 1182 { 1183 struct linux_file *filp; 1184 struct thread *td; 1185 int error; 1186 1187 td = curthread; 1188 filp = (struct linux_file *)file->f_data; 1189 filp->f_flags = file->f_flag; 1190 if (filp->f_op->poll == NULL) 1191 return (EINVAL); 1192 1193 spin_lock(&filp->f_kqlock); 1194 switch (kn->kn_filter) { 1195 case EVFILT_READ: 1196 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1197 kn->kn_fop = &linux_dev_kqfiltops_read; 1198 kn->kn_hook = filp; 1199 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1200 error = 0; 1201 break; 1202 case EVFILT_WRITE: 1203 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1204 kn->kn_fop = &linux_dev_kqfiltops_write; 1205 kn->kn_hook = filp; 1206 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1207 error = 0; 1208 break; 1209 default: 1210 error = EINVAL; 1211 break; 1212 } 1213 spin_unlock(&filp->f_kqlock); 1214 1215 if (error == 0) { 1216 linux_set_current(td); 1217 1218 /* update kqfilter status, if any */ 1219 linux_file_kqfilter_poll(filp, 1220 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1221 } 1222 return (error); 1223 } 1224 1225 static int 1226 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1227 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1228 int nprot, struct thread *td) 1229 { 1230 struct task_struct *task; 1231 struct vm_area_struct *vmap; 1232 struct mm_struct *mm; 1233 struct linux_file *filp; 1234 vm_memattr_t attr; 1235 int error; 1236 1237 filp = (struct linux_file *)fp->f_data; 1238 filp->f_flags = fp->f_flag; 1239 1240 if (fop->mmap == NULL) 1241 return (EOPNOTSUPP); 1242 1243 linux_set_current(td); 1244 1245 /* 1246 * The same VM object might be shared by multiple processes 1247 * and the mm_struct is usually freed when a process exits. 1248 * 1249 * The atomic reference below makes sure the mm_struct is 1250 * available as long as the vmap is in the linux_vma_head. 1251 */ 1252 task = current; 1253 mm = task->mm; 1254 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1255 return (EINVAL); 1256 1257 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1258 vmap->vm_start = 0; 1259 vmap->vm_end = size; 1260 vmap->vm_pgoff = *offset / PAGE_SIZE; 1261 vmap->vm_pfn = 0; 1262 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1263 vmap->vm_ops = NULL; 1264 vmap->vm_file = get_file(filp); 1265 vmap->vm_mm = mm; 1266 1267 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1268 error = linux_get_error(task, EINTR); 1269 } else { 1270 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1271 error = linux_get_error(task, error); 1272 up_write(&vmap->vm_mm->mmap_sem); 1273 } 1274 1275 if (error != 0) { 1276 linux_cdev_handle_free(vmap); 1277 return (error); 1278 } 1279 1280 attr = pgprot2cachemode(vmap->vm_page_prot); 1281 1282 if (vmap->vm_ops != NULL) { 1283 struct vm_area_struct *ptr; 1284 void *vm_private_data; 1285 bool vm_no_fault; 1286 1287 if (vmap->vm_ops->open == NULL || 1288 vmap->vm_ops->close == NULL || 1289 vmap->vm_private_data == NULL) { 1290 /* free allocated VM area struct */ 1291 linux_cdev_handle_free(vmap); 1292 return (EINVAL); 1293 } 1294 1295 vm_private_data = vmap->vm_private_data; 1296 1297 rw_wlock(&linux_vma_lock); 1298 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1299 if (ptr->vm_private_data == vm_private_data) 1300 break; 1301 } 1302 /* check if there is an existing VM area struct */ 1303 if (ptr != NULL) { 1304 /* check if the VM area structure is invalid */ 1305 if (ptr->vm_ops == NULL || 1306 ptr->vm_ops->open == NULL || 1307 ptr->vm_ops->close == NULL) { 1308 error = ESTALE; 1309 vm_no_fault = 1; 1310 } else { 1311 error = EEXIST; 1312 vm_no_fault = (ptr->vm_ops->fault == NULL); 1313 } 1314 } else { 1315 /* insert VM area structure into list */ 1316 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1317 error = 0; 1318 vm_no_fault = (vmap->vm_ops->fault == NULL); 1319 } 1320 rw_wunlock(&linux_vma_lock); 1321 1322 if (error != 0) { 1323 /* free allocated VM area struct */ 1324 linux_cdev_handle_free(vmap); 1325 /* check for stale VM area struct */ 1326 if (error != EEXIST) 1327 return (error); 1328 } 1329 1330 /* check if there is no fault handler */ 1331 if (vm_no_fault) { 1332 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1333 &linux_cdev_pager_ops[1], size, nprot, *offset, 1334 td->td_ucred); 1335 } else { 1336 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1337 &linux_cdev_pager_ops[0], size, nprot, *offset, 1338 td->td_ucred); 1339 } 1340 1341 /* check if allocating the VM object failed */ 1342 if (*object == NULL) { 1343 if (error == 0) { 1344 /* remove VM area struct from list */ 1345 linux_cdev_handle_remove(vmap); 1346 /* free allocated VM area struct */ 1347 linux_cdev_handle_free(vmap); 1348 } 1349 return (EINVAL); 1350 } 1351 } else { 1352 struct sglist *sg; 1353 1354 sg = sglist_alloc(1, M_WAITOK); 1355 sglist_append_phys(sg, 1356 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1357 1358 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1359 nprot, 0, td->td_ucred); 1360 1361 linux_cdev_handle_free(vmap); 1362 1363 if (*object == NULL) { 1364 sglist_free(sg); 1365 return (EINVAL); 1366 } 1367 } 1368 1369 if (attr != VM_MEMATTR_DEFAULT) { 1370 VM_OBJECT_WLOCK(*object); 1371 vm_object_set_memattr(*object, attr); 1372 VM_OBJECT_WUNLOCK(*object); 1373 } 1374 *offset = 0; 1375 return (0); 1376 } 1377 1378 struct cdevsw linuxcdevsw = { 1379 .d_version = D_VERSION, 1380 .d_fdopen = linux_dev_fdopen, 1381 .d_name = "lkpidev", 1382 }; 1383 1384 static int 1385 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1386 int flags, struct thread *td) 1387 { 1388 struct linux_file *filp; 1389 const struct file_operations *fop; 1390 struct linux_cdev *ldev; 1391 ssize_t bytes; 1392 int error; 1393 1394 error = 0; 1395 filp = (struct linux_file *)file->f_data; 1396 filp->f_flags = file->f_flag; 1397 /* XXX no support for I/O vectors currently */ 1398 if (uio->uio_iovcnt != 1) 1399 return (EOPNOTSUPP); 1400 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1401 return (EINVAL); 1402 linux_set_current(td); 1403 linux_get_fop(filp, &fop, &ldev); 1404 if (fop->read != NULL) { 1405 bytes = OPW(file, td, fop->read(filp, 1406 uio->uio_iov->iov_base, 1407 uio->uio_iov->iov_len, &uio->uio_offset)); 1408 if (bytes >= 0) { 1409 uio->uio_iov->iov_base = 1410 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1411 uio->uio_iov->iov_len -= bytes; 1412 uio->uio_resid -= bytes; 1413 } else { 1414 error = linux_get_error(current, -bytes); 1415 } 1416 } else 1417 error = ENXIO; 1418 1419 /* update kqfilter status, if any */ 1420 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1421 linux_drop_fop(ldev); 1422 1423 return (error); 1424 } 1425 1426 static int 1427 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1428 int flags, struct thread *td) 1429 { 1430 struct linux_file *filp; 1431 const struct file_operations *fop; 1432 struct linux_cdev *ldev; 1433 ssize_t bytes; 1434 int error; 1435 1436 filp = (struct linux_file *)file->f_data; 1437 filp->f_flags = file->f_flag; 1438 /* XXX no support for I/O vectors currently */ 1439 if (uio->uio_iovcnt != 1) 1440 return (EOPNOTSUPP); 1441 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1442 return (EINVAL); 1443 linux_set_current(td); 1444 linux_get_fop(filp, &fop, &ldev); 1445 if (fop->write != NULL) { 1446 bytes = OPW(file, td, fop->write(filp, 1447 uio->uio_iov->iov_base, 1448 uio->uio_iov->iov_len, &uio->uio_offset)); 1449 if (bytes >= 0) { 1450 uio->uio_iov->iov_base = 1451 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1452 uio->uio_iov->iov_len -= bytes; 1453 uio->uio_resid -= bytes; 1454 error = 0; 1455 } else { 1456 error = linux_get_error(current, -bytes); 1457 } 1458 } else 1459 error = ENXIO; 1460 1461 /* update kqfilter status, if any */ 1462 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1463 1464 linux_drop_fop(ldev); 1465 1466 return (error); 1467 } 1468 1469 static int 1470 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1471 struct thread *td) 1472 { 1473 struct linux_file *filp; 1474 const struct file_operations *fop; 1475 struct linux_cdev *ldev; 1476 int revents; 1477 1478 filp = (struct linux_file *)file->f_data; 1479 filp->f_flags = file->f_flag; 1480 linux_set_current(td); 1481 linux_get_fop(filp, &fop, &ldev); 1482 if (fop->poll != NULL) { 1483 revents = OPW(file, td, fop->poll(filp, 1484 LINUX_POLL_TABLE_NORMAL)) & events; 1485 } else { 1486 revents = 0; 1487 } 1488 linux_drop_fop(ldev); 1489 return (revents); 1490 } 1491 1492 static int 1493 linux_file_close(struct file *file, struct thread *td) 1494 { 1495 struct linux_file *filp; 1496 const struct file_operations *fop; 1497 struct linux_cdev *ldev; 1498 int error; 1499 1500 filp = (struct linux_file *)file->f_data; 1501 1502 KASSERT(file_count(filp) == 0, 1503 ("File refcount(%d) is not zero", file_count(filp))); 1504 1505 error = 0; 1506 filp->f_flags = file->f_flag; 1507 linux_set_current(td); 1508 linux_poll_wait_dequeue(filp); 1509 linux_get_fop(filp, &fop, &ldev); 1510 if (fop->release != NULL) 1511 error = -OPW(file, td, fop->release(filp->f_vnode, filp)); 1512 funsetown(&filp->f_sigio); 1513 if (filp->f_vnode != NULL) 1514 vdrop(filp->f_vnode); 1515 linux_drop_fop(ldev); 1516 if (filp->f_cdev != NULL) 1517 linux_cdev_deref(filp->f_cdev); 1518 kfree(filp); 1519 1520 return (error); 1521 } 1522 1523 static int 1524 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1525 struct thread *td) 1526 { 1527 struct linux_file *filp; 1528 const struct file_operations *fop; 1529 struct linux_cdev *ldev; 1530 int error; 1531 1532 error = 0; 1533 filp = (struct linux_file *)fp->f_data; 1534 filp->f_flags = fp->f_flag; 1535 linux_get_fop(filp, &fop, &ldev); 1536 1537 linux_set_current(td); 1538 switch (cmd) { 1539 case FIONBIO: 1540 break; 1541 case FIOASYNC: 1542 if (fop->fasync == NULL) 1543 break; 1544 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1545 break; 1546 case FIOSETOWN: 1547 error = fsetown(*(int *)data, &filp->f_sigio); 1548 if (error == 0) { 1549 if (fop->fasync == NULL) 1550 break; 1551 error = -OPW(fp, td, fop->fasync(0, filp, 1552 fp->f_flag & FASYNC)); 1553 } 1554 break; 1555 case FIOGETOWN: 1556 *(int *)data = fgetown(&filp->f_sigio); 1557 break; 1558 default: 1559 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1560 break; 1561 } 1562 linux_drop_fop(ldev); 1563 return (error); 1564 } 1565 1566 static int 1567 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1568 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1569 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1570 { 1571 /* 1572 * Character devices do not provide private mappings 1573 * of any kind: 1574 */ 1575 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1576 (prot & VM_PROT_WRITE) != 0) 1577 return (EACCES); 1578 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1579 return (EINVAL); 1580 1581 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1582 (int)prot, td)); 1583 } 1584 1585 static int 1586 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1587 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1588 struct thread *td) 1589 { 1590 struct linux_file *filp; 1591 const struct file_operations *fop; 1592 struct linux_cdev *ldev; 1593 struct mount *mp; 1594 struct vnode *vp; 1595 vm_object_t object; 1596 vm_prot_t maxprot; 1597 int error; 1598 1599 filp = (struct linux_file *)fp->f_data; 1600 1601 vp = filp->f_vnode; 1602 if (vp == NULL) 1603 return (EOPNOTSUPP); 1604 1605 /* 1606 * Ensure that file and memory protections are 1607 * compatible. 1608 */ 1609 mp = vp->v_mount; 1610 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1611 maxprot = VM_PROT_NONE; 1612 if ((prot & VM_PROT_EXECUTE) != 0) 1613 return (EACCES); 1614 } else 1615 maxprot = VM_PROT_EXECUTE; 1616 if ((fp->f_flag & FREAD) != 0) 1617 maxprot |= VM_PROT_READ; 1618 else if ((prot & VM_PROT_READ) != 0) 1619 return (EACCES); 1620 1621 /* 1622 * If we are sharing potential changes via MAP_SHARED and we 1623 * are trying to get write permission although we opened it 1624 * without asking for it, bail out. 1625 * 1626 * Note that most character devices always share mappings. 1627 * 1628 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1629 * requests rather than doing it here. 1630 */ 1631 if ((flags & MAP_SHARED) != 0) { 1632 if ((fp->f_flag & FWRITE) != 0) 1633 maxprot |= VM_PROT_WRITE; 1634 else if ((prot & VM_PROT_WRITE) != 0) 1635 return (EACCES); 1636 } 1637 maxprot &= cap_maxprot; 1638 1639 linux_get_fop(filp, &fop, &ldev); 1640 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, 1641 &foff, fop, &object); 1642 if (error != 0) 1643 goto out; 1644 1645 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1646 foff, FALSE, td); 1647 if (error != 0) 1648 vm_object_deallocate(object); 1649 out: 1650 linux_drop_fop(ldev); 1651 return (error); 1652 } 1653 1654 static int 1655 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1656 struct thread *td) 1657 { 1658 struct linux_file *filp; 1659 struct vnode *vp; 1660 int error; 1661 1662 filp = (struct linux_file *)fp->f_data; 1663 if (filp->f_vnode == NULL) 1664 return (EOPNOTSUPP); 1665 1666 vp = filp->f_vnode; 1667 1668 vn_lock(vp, LK_SHARED | LK_RETRY); 1669 error = vn_stat(vp, sb, td->td_ucred, NOCRED, td); 1670 VOP_UNLOCK(vp, 0); 1671 1672 return (error); 1673 } 1674 1675 static int 1676 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1677 struct filedesc *fdp) 1678 { 1679 struct linux_file *filp; 1680 struct vnode *vp; 1681 int error; 1682 1683 filp = fp->f_data; 1684 vp = filp->f_vnode; 1685 if (vp == NULL) { 1686 error = 0; 1687 kif->kf_type = KF_TYPE_DEV; 1688 } else { 1689 vref(vp); 1690 FILEDESC_SUNLOCK(fdp); 1691 error = vn_fill_kinfo_vnode(vp, kif); 1692 vrele(vp); 1693 kif->kf_type = KF_TYPE_VNODE; 1694 FILEDESC_SLOCK(fdp); 1695 } 1696 return (error); 1697 } 1698 1699 unsigned int 1700 linux_iminor(struct inode *inode) 1701 { 1702 struct linux_cdev *ldev; 1703 1704 if (inode == NULL || inode->v_rdev == NULL || 1705 inode->v_rdev->si_devsw != &linuxcdevsw) 1706 return (-1U); 1707 ldev = inode->v_rdev->si_drv1; 1708 if (ldev == NULL) 1709 return (-1U); 1710 1711 return (minor(ldev->dev)); 1712 } 1713 1714 struct fileops linuxfileops = { 1715 .fo_read = linux_file_read, 1716 .fo_write = linux_file_write, 1717 .fo_truncate = invfo_truncate, 1718 .fo_kqfilter = linux_file_kqfilter, 1719 .fo_stat = linux_file_stat, 1720 .fo_fill_kinfo = linux_file_fill_kinfo, 1721 .fo_poll = linux_file_poll, 1722 .fo_close = linux_file_close, 1723 .fo_ioctl = linux_file_ioctl, 1724 .fo_mmap = linux_file_mmap, 1725 .fo_chmod = invfo_chmod, 1726 .fo_chown = invfo_chown, 1727 .fo_sendfile = invfo_sendfile, 1728 .fo_flags = DFLAG_PASSABLE, 1729 }; 1730 1731 /* 1732 * Hash of vmmap addresses. This is infrequently accessed and does not 1733 * need to be particularly large. This is done because we must store the 1734 * caller's idea of the map size to properly unmap. 1735 */ 1736 struct vmmap { 1737 LIST_ENTRY(vmmap) vm_next; 1738 void *vm_addr; 1739 unsigned long vm_size; 1740 }; 1741 1742 struct vmmaphd { 1743 struct vmmap *lh_first; 1744 }; 1745 #define VMMAP_HASH_SIZE 64 1746 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1747 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1748 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1749 static struct mtx vmmaplock; 1750 1751 static void 1752 vmmap_add(void *addr, unsigned long size) 1753 { 1754 struct vmmap *vmmap; 1755 1756 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1757 mtx_lock(&vmmaplock); 1758 vmmap->vm_size = size; 1759 vmmap->vm_addr = addr; 1760 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1761 mtx_unlock(&vmmaplock); 1762 } 1763 1764 static struct vmmap * 1765 vmmap_remove(void *addr) 1766 { 1767 struct vmmap *vmmap; 1768 1769 mtx_lock(&vmmaplock); 1770 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1771 if (vmmap->vm_addr == addr) 1772 break; 1773 if (vmmap) 1774 LIST_REMOVE(vmmap, vm_next); 1775 mtx_unlock(&vmmaplock); 1776 1777 return (vmmap); 1778 } 1779 1780 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1781 void * 1782 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1783 { 1784 void *addr; 1785 1786 addr = pmap_mapdev_attr(phys_addr, size, attr); 1787 if (addr == NULL) 1788 return (NULL); 1789 vmmap_add(addr, size); 1790 1791 return (addr); 1792 } 1793 #endif 1794 1795 void 1796 iounmap(void *addr) 1797 { 1798 struct vmmap *vmmap; 1799 1800 vmmap = vmmap_remove(addr); 1801 if (vmmap == NULL) 1802 return; 1803 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1804 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1805 #endif 1806 kfree(vmmap); 1807 } 1808 1809 1810 void * 1811 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1812 { 1813 vm_offset_t off; 1814 size_t size; 1815 1816 size = count * PAGE_SIZE; 1817 off = kva_alloc(size); 1818 if (off == 0) 1819 return (NULL); 1820 vmmap_add((void *)off, size); 1821 pmap_qenter(off, pages, count); 1822 1823 return ((void *)off); 1824 } 1825 1826 void 1827 vunmap(void *addr) 1828 { 1829 struct vmmap *vmmap; 1830 1831 vmmap = vmmap_remove(addr); 1832 if (vmmap == NULL) 1833 return; 1834 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1835 kva_free((vm_offset_t)addr, vmmap->vm_size); 1836 kfree(vmmap); 1837 } 1838 1839 char * 1840 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1841 { 1842 unsigned int len; 1843 char *p; 1844 va_list aq; 1845 1846 va_copy(aq, ap); 1847 len = vsnprintf(NULL, 0, fmt, aq); 1848 va_end(aq); 1849 1850 p = kmalloc(len + 1, gfp); 1851 if (p != NULL) 1852 vsnprintf(p, len + 1, fmt, ap); 1853 1854 return (p); 1855 } 1856 1857 char * 1858 kasprintf(gfp_t gfp, const char *fmt, ...) 1859 { 1860 va_list ap; 1861 char *p; 1862 1863 va_start(ap, fmt); 1864 p = kvasprintf(gfp, fmt, ap); 1865 va_end(ap); 1866 1867 return (p); 1868 } 1869 1870 static void 1871 linux_timer_callback_wrapper(void *context) 1872 { 1873 struct timer_list *timer; 1874 1875 linux_set_current(curthread); 1876 1877 timer = context; 1878 timer->function(timer->data); 1879 } 1880 1881 void 1882 mod_timer(struct timer_list *timer, int expires) 1883 { 1884 1885 timer->expires = expires; 1886 callout_reset(&timer->callout, 1887 linux_timer_jiffies_until(expires), 1888 &linux_timer_callback_wrapper, timer); 1889 } 1890 1891 void 1892 add_timer(struct timer_list *timer) 1893 { 1894 1895 callout_reset(&timer->callout, 1896 linux_timer_jiffies_until(timer->expires), 1897 &linux_timer_callback_wrapper, timer); 1898 } 1899 1900 void 1901 add_timer_on(struct timer_list *timer, int cpu) 1902 { 1903 1904 callout_reset_on(&timer->callout, 1905 linux_timer_jiffies_until(timer->expires), 1906 &linux_timer_callback_wrapper, timer, cpu); 1907 } 1908 1909 int 1910 del_timer(struct timer_list *timer) 1911 { 1912 1913 if (callout_stop(&(timer)->callout) == -1) 1914 return (0); 1915 return (1); 1916 } 1917 1918 static void 1919 linux_timer_init(void *arg) 1920 { 1921 1922 /* 1923 * Compute an internal HZ value which can divide 2**32 to 1924 * avoid timer rounding problems when the tick value wraps 1925 * around 2**32: 1926 */ 1927 linux_timer_hz_mask = 1; 1928 while (linux_timer_hz_mask < (unsigned long)hz) 1929 linux_timer_hz_mask *= 2; 1930 linux_timer_hz_mask--; 1931 } 1932 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1933 1934 void 1935 linux_complete_common(struct completion *c, int all) 1936 { 1937 int wakeup_swapper; 1938 1939 sleepq_lock(c); 1940 if (all) { 1941 c->done = UINT_MAX; 1942 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1943 } else { 1944 if (c->done != UINT_MAX) 1945 c->done++; 1946 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1947 } 1948 sleepq_release(c); 1949 if (wakeup_swapper) 1950 kick_proc0(); 1951 } 1952 1953 /* 1954 * Indefinite wait for done != 0 with or without signals. 1955 */ 1956 int 1957 linux_wait_for_common(struct completion *c, int flags) 1958 { 1959 struct task_struct *task; 1960 int error; 1961 1962 if (SCHEDULER_STOPPED()) 1963 return (0); 1964 1965 task = current; 1966 1967 if (flags != 0) 1968 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1969 else 1970 flags = SLEEPQ_SLEEP; 1971 error = 0; 1972 for (;;) { 1973 sleepq_lock(c); 1974 if (c->done) 1975 break; 1976 sleepq_add(c, NULL, "completion", flags, 0); 1977 if (flags & SLEEPQ_INTERRUPTIBLE) { 1978 DROP_GIANT(); 1979 error = -sleepq_wait_sig(c, 0); 1980 PICKUP_GIANT(); 1981 if (error != 0) { 1982 linux_schedule_save_interrupt_value(task, error); 1983 error = -ERESTARTSYS; 1984 goto intr; 1985 } 1986 } else { 1987 DROP_GIANT(); 1988 sleepq_wait(c, 0); 1989 PICKUP_GIANT(); 1990 } 1991 } 1992 if (c->done != UINT_MAX) 1993 c->done--; 1994 sleepq_release(c); 1995 1996 intr: 1997 return (error); 1998 } 1999 2000 /* 2001 * Time limited wait for done != 0 with or without signals. 2002 */ 2003 int 2004 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2005 { 2006 struct task_struct *task; 2007 int end = jiffies + timeout; 2008 int error; 2009 2010 if (SCHEDULER_STOPPED()) 2011 return (0); 2012 2013 task = current; 2014 2015 if (flags != 0) 2016 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2017 else 2018 flags = SLEEPQ_SLEEP; 2019 2020 for (;;) { 2021 sleepq_lock(c); 2022 if (c->done) 2023 break; 2024 sleepq_add(c, NULL, "completion", flags, 0); 2025 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2026 2027 DROP_GIANT(); 2028 if (flags & SLEEPQ_INTERRUPTIBLE) 2029 error = -sleepq_timedwait_sig(c, 0); 2030 else 2031 error = -sleepq_timedwait(c, 0); 2032 PICKUP_GIANT(); 2033 2034 if (error != 0) { 2035 /* check for timeout */ 2036 if (error == -EWOULDBLOCK) { 2037 error = 0; /* timeout */ 2038 } else { 2039 /* signal happened */ 2040 linux_schedule_save_interrupt_value(task, error); 2041 error = -ERESTARTSYS; 2042 } 2043 goto done; 2044 } 2045 } 2046 if (c->done != UINT_MAX) 2047 c->done--; 2048 sleepq_release(c); 2049 2050 /* return how many jiffies are left */ 2051 error = linux_timer_jiffies_until(end); 2052 done: 2053 return (error); 2054 } 2055 2056 int 2057 linux_try_wait_for_completion(struct completion *c) 2058 { 2059 int isdone; 2060 2061 sleepq_lock(c); 2062 isdone = (c->done != 0); 2063 if (c->done != 0 && c->done != UINT_MAX) 2064 c->done--; 2065 sleepq_release(c); 2066 return (isdone); 2067 } 2068 2069 int 2070 linux_completion_done(struct completion *c) 2071 { 2072 int isdone; 2073 2074 sleepq_lock(c); 2075 isdone = (c->done != 0); 2076 sleepq_release(c); 2077 return (isdone); 2078 } 2079 2080 static void 2081 linux_cdev_deref(struct linux_cdev *ldev) 2082 { 2083 2084 if (refcount_release(&ldev->refs)) 2085 kfree(ldev); 2086 } 2087 2088 static void 2089 linux_cdev_release(struct kobject *kobj) 2090 { 2091 struct linux_cdev *cdev; 2092 struct kobject *parent; 2093 2094 cdev = container_of(kobj, struct linux_cdev, kobj); 2095 parent = kobj->parent; 2096 linux_destroy_dev(cdev); 2097 linux_cdev_deref(cdev); 2098 kobject_put(parent); 2099 } 2100 2101 static void 2102 linux_cdev_static_release(struct kobject *kobj) 2103 { 2104 struct linux_cdev *cdev; 2105 struct kobject *parent; 2106 2107 cdev = container_of(kobj, struct linux_cdev, kobj); 2108 parent = kobj->parent; 2109 linux_destroy_dev(cdev); 2110 kobject_put(parent); 2111 } 2112 2113 void 2114 linux_destroy_dev(struct linux_cdev *ldev) 2115 { 2116 2117 if (ldev->cdev == NULL) 2118 return; 2119 2120 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2121 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2122 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2123 pause("ldevdtr", hz / 4); 2124 2125 destroy_dev(ldev->cdev); 2126 ldev->cdev = NULL; 2127 } 2128 2129 const struct kobj_type linux_cdev_ktype = { 2130 .release = linux_cdev_release, 2131 }; 2132 2133 const struct kobj_type linux_cdev_static_ktype = { 2134 .release = linux_cdev_static_release, 2135 }; 2136 2137 static void 2138 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2139 { 2140 struct notifier_block *nb; 2141 2142 nb = arg; 2143 if (linkstate == LINK_STATE_UP) 2144 nb->notifier_call(nb, NETDEV_UP, ifp); 2145 else 2146 nb->notifier_call(nb, NETDEV_DOWN, ifp); 2147 } 2148 2149 static void 2150 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2151 { 2152 struct notifier_block *nb; 2153 2154 nb = arg; 2155 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 2156 } 2157 2158 static void 2159 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2160 { 2161 struct notifier_block *nb; 2162 2163 nb = arg; 2164 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 2165 } 2166 2167 static void 2168 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2169 { 2170 struct notifier_block *nb; 2171 2172 nb = arg; 2173 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 2174 } 2175 2176 static void 2177 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2178 { 2179 struct notifier_block *nb; 2180 2181 nb = arg; 2182 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 2183 } 2184 2185 int 2186 register_netdevice_notifier(struct notifier_block *nb) 2187 { 2188 2189 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2190 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2191 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2192 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2193 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2194 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2195 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2196 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2197 2198 return (0); 2199 } 2200 2201 int 2202 register_inetaddr_notifier(struct notifier_block *nb) 2203 { 2204 2205 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2206 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2207 return (0); 2208 } 2209 2210 int 2211 unregister_netdevice_notifier(struct notifier_block *nb) 2212 { 2213 2214 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2215 nb->tags[NETDEV_UP]); 2216 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2217 nb->tags[NETDEV_REGISTER]); 2218 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2219 nb->tags[NETDEV_UNREGISTER]); 2220 EVENTHANDLER_DEREGISTER(iflladdr_event, 2221 nb->tags[NETDEV_CHANGEADDR]); 2222 2223 return (0); 2224 } 2225 2226 int 2227 unregister_inetaddr_notifier(struct notifier_block *nb) 2228 { 2229 2230 EVENTHANDLER_DEREGISTER(ifaddr_event, 2231 nb->tags[NETDEV_CHANGEIFADDR]); 2232 2233 return (0); 2234 } 2235 2236 struct list_sort_thunk { 2237 int (*cmp)(void *, struct list_head *, struct list_head *); 2238 void *priv; 2239 }; 2240 2241 static inline int 2242 linux_le_cmp(void *priv, const void *d1, const void *d2) 2243 { 2244 struct list_head *le1, *le2; 2245 struct list_sort_thunk *thunk; 2246 2247 thunk = priv; 2248 le1 = *(__DECONST(struct list_head **, d1)); 2249 le2 = *(__DECONST(struct list_head **, d2)); 2250 return ((thunk->cmp)(thunk->priv, le1, le2)); 2251 } 2252 2253 void 2254 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2255 struct list_head *a, struct list_head *b)) 2256 { 2257 struct list_sort_thunk thunk; 2258 struct list_head **ar, *le; 2259 size_t count, i; 2260 2261 count = 0; 2262 list_for_each(le, head) 2263 count++; 2264 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2265 i = 0; 2266 list_for_each(le, head) 2267 ar[i++] = le; 2268 thunk.cmp = cmp; 2269 thunk.priv = priv; 2270 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2271 INIT_LIST_HEAD(head); 2272 for (i = 0; i < count; i++) 2273 list_add_tail(ar[i], head); 2274 free(ar, M_KMALLOC); 2275 } 2276 2277 void 2278 linux_irq_handler(void *ent) 2279 { 2280 struct irq_ent *irqe; 2281 2282 linux_set_current(curthread); 2283 2284 irqe = ent; 2285 irqe->handler(irqe->irq, irqe->arg); 2286 } 2287 2288 #if defined(__i386__) || defined(__amd64__) 2289 int 2290 linux_wbinvd_on_all_cpus(void) 2291 { 2292 2293 pmap_invalidate_cache(); 2294 return (0); 2295 } 2296 #endif 2297 2298 int 2299 linux_on_each_cpu(void callback(void *), void *data) 2300 { 2301 2302 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2303 smp_no_rendezvous_barrier, data); 2304 return (0); 2305 } 2306 2307 int 2308 linux_in_atomic(void) 2309 { 2310 2311 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2312 } 2313 2314 struct linux_cdev * 2315 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2316 { 2317 dev_t dev = MKDEV(major, minor); 2318 struct cdev *cdev; 2319 2320 dev_lock(); 2321 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2322 struct linux_cdev *ldev = cdev->si_drv1; 2323 if (ldev->dev == dev && 2324 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2325 break; 2326 } 2327 } 2328 dev_unlock(); 2329 2330 return (cdev != NULL ? cdev->si_drv1 : NULL); 2331 } 2332 2333 int 2334 __register_chrdev(unsigned int major, unsigned int baseminor, 2335 unsigned int count, const char *name, 2336 const struct file_operations *fops) 2337 { 2338 struct linux_cdev *cdev; 2339 int ret = 0; 2340 int i; 2341 2342 for (i = baseminor; i < baseminor + count; i++) { 2343 cdev = cdev_alloc(); 2344 cdev->ops = fops; 2345 kobject_set_name(&cdev->kobj, name); 2346 2347 ret = cdev_add(cdev, makedev(major, i), 1); 2348 if (ret != 0) 2349 break; 2350 } 2351 return (ret); 2352 } 2353 2354 int 2355 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2356 unsigned int count, const char *name, 2357 const struct file_operations *fops, uid_t uid, 2358 gid_t gid, int mode) 2359 { 2360 struct linux_cdev *cdev; 2361 int ret = 0; 2362 int i; 2363 2364 for (i = baseminor; i < baseminor + count; i++) { 2365 cdev = cdev_alloc(); 2366 cdev->ops = fops; 2367 kobject_set_name(&cdev->kobj, name); 2368 2369 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2370 if (ret != 0) 2371 break; 2372 } 2373 return (ret); 2374 } 2375 2376 void 2377 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2378 unsigned int count, const char *name) 2379 { 2380 struct linux_cdev *cdevp; 2381 int i; 2382 2383 for (i = baseminor; i < baseminor + count; i++) { 2384 cdevp = linux_find_cdev(name, major, i); 2385 if (cdevp != NULL) 2386 cdev_del(cdevp); 2387 } 2388 } 2389 2390 void 2391 linux_dump_stack(void) 2392 { 2393 #ifdef STACK 2394 struct stack st; 2395 2396 stack_zero(&st); 2397 stack_save(&st); 2398 stack_print(&st); 2399 #endif 2400 } 2401 2402 #if defined(__i386__) || defined(__amd64__) 2403 bool linux_cpu_has_clflush; 2404 #endif 2405 2406 static void 2407 linux_compat_init(void *arg) 2408 { 2409 struct sysctl_oid *rootoid; 2410 int i; 2411 2412 #if defined(__i386__) || defined(__amd64__) 2413 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2414 #endif 2415 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2416 2417 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2418 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2419 kobject_init(&linux_class_root, &linux_class_ktype); 2420 kobject_set_name(&linux_class_root, "class"); 2421 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2422 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2423 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2424 kobject_set_name(&linux_root_device.kobj, "device"); 2425 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2426 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 2427 "device"); 2428 linux_root_device.bsddev = root_bus; 2429 linux_class_misc.name = "misc"; 2430 class_register(&linux_class_misc); 2431 INIT_LIST_HEAD(&pci_drivers); 2432 INIT_LIST_HEAD(&pci_devices); 2433 spin_lock_init(&pci_lock); 2434 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2435 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2436 LIST_INIT(&vmmaphead[i]); 2437 } 2438 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2439 2440 static void 2441 linux_compat_uninit(void *arg) 2442 { 2443 linux_kobject_kfree_name(&linux_class_root); 2444 linux_kobject_kfree_name(&linux_root_device.kobj); 2445 linux_kobject_kfree_name(&linux_class_misc.kobj); 2446 2447 mtx_destroy(&vmmaplock); 2448 spin_lock_destroy(&pci_lock); 2449 rw_destroy(&linux_vma_lock); 2450 } 2451 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2452 2453 /* 2454 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2455 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2456 * used. Assert these types have the same size, else some parts of the 2457 * LinuxKPI may not work like expected: 2458 */ 2459 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2460