1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/user.h> 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 62 #include <machine/stdarg.h> 63 64 #if defined(__i386__) || defined(__amd64__) 65 #include <machine/md_var.h> 66 #endif 67 68 #include <linux/kobject.h> 69 #include <linux/device.h> 70 #include <linux/slab.h> 71 #include <linux/module.h> 72 #include <linux/moduleparam.h> 73 #include <linux/cdev.h> 74 #include <linux/file.h> 75 #include <linux/sysfs.h> 76 #include <linux/mm.h> 77 #include <linux/io.h> 78 #include <linux/vmalloc.h> 79 #include <linux/netdevice.h> 80 #include <linux/timer.h> 81 #include <linux/interrupt.h> 82 #include <linux/uaccess.h> 83 #include <linux/list.h> 84 #include <linux/kthread.h> 85 #include <linux/kernel.h> 86 #include <linux/compat.h> 87 #include <linux/poll.h> 88 #include <linux/smp.h> 89 90 #if defined(__i386__) || defined(__amd64__) 91 #include <asm/smp.h> 92 #endif 93 94 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 95 96 int linuxkpi_debug; 97 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 98 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 99 100 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 101 102 #include <linux/rbtree.h> 103 /* Undo Linux compat changes. */ 104 #undef RB_ROOT 105 #undef file 106 #undef cdev 107 #define RB_ROOT(head) (head)->rbh_root 108 109 static void linux_cdev_deref(struct linux_cdev *ldev); 110 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 111 112 struct kobject linux_class_root; 113 struct device linux_root_device; 114 struct class linux_class_misc; 115 struct list_head pci_drivers; 116 struct list_head pci_devices; 117 spinlock_t pci_lock; 118 119 unsigned long linux_timer_hz_mask; 120 121 int 122 panic_cmp(struct rb_node *one, struct rb_node *two) 123 { 124 panic("no cmp"); 125 } 126 127 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 128 129 int 130 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 131 { 132 va_list tmp_va; 133 int len; 134 char *old; 135 char *name; 136 char dummy; 137 138 old = kobj->name; 139 140 if (old && fmt == NULL) 141 return (0); 142 143 /* compute length of string */ 144 va_copy(tmp_va, args); 145 len = vsnprintf(&dummy, 0, fmt, tmp_va); 146 va_end(tmp_va); 147 148 /* account for zero termination */ 149 len++; 150 151 /* check for error */ 152 if (len < 1) 153 return (-EINVAL); 154 155 /* allocate memory for string */ 156 name = kzalloc(len, GFP_KERNEL); 157 if (name == NULL) 158 return (-ENOMEM); 159 vsnprintf(name, len, fmt, args); 160 kobj->name = name; 161 162 /* free old string */ 163 kfree(old); 164 165 /* filter new string */ 166 for (; *name != '\0'; name++) 167 if (*name == '/') 168 *name = '!'; 169 return (0); 170 } 171 172 int 173 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 174 { 175 va_list args; 176 int error; 177 178 va_start(args, fmt); 179 error = kobject_set_name_vargs(kobj, fmt, args); 180 va_end(args); 181 182 return (error); 183 } 184 185 static int 186 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 187 { 188 const struct kobj_type *t; 189 int error; 190 191 kobj->parent = parent; 192 error = sysfs_create_dir(kobj); 193 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 194 struct attribute **attr; 195 t = kobj->ktype; 196 197 for (attr = t->default_attrs; *attr != NULL; attr++) { 198 error = sysfs_create_file(kobj, *attr); 199 if (error) 200 break; 201 } 202 if (error) 203 sysfs_remove_dir(kobj); 204 205 } 206 return (error); 207 } 208 209 int 210 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 211 { 212 va_list args; 213 int error; 214 215 va_start(args, fmt); 216 error = kobject_set_name_vargs(kobj, fmt, args); 217 va_end(args); 218 if (error) 219 return (error); 220 221 return kobject_add_complete(kobj, parent); 222 } 223 224 void 225 linux_kobject_release(struct kref *kref) 226 { 227 struct kobject *kobj; 228 char *name; 229 230 kobj = container_of(kref, struct kobject, kref); 231 sysfs_remove_dir(kobj); 232 name = kobj->name; 233 if (kobj->ktype && kobj->ktype->release) 234 kobj->ktype->release(kobj); 235 kfree(name); 236 } 237 238 static void 239 linux_kobject_kfree(struct kobject *kobj) 240 { 241 kfree(kobj); 242 } 243 244 static void 245 linux_kobject_kfree_name(struct kobject *kobj) 246 { 247 if (kobj) { 248 kfree(kobj->name); 249 } 250 } 251 252 const struct kobj_type linux_kfree_type = { 253 .release = linux_kobject_kfree 254 }; 255 256 static void 257 linux_device_release(struct device *dev) 258 { 259 pr_debug("linux_device_release: %s\n", dev_name(dev)); 260 kfree(dev); 261 } 262 263 static ssize_t 264 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 265 { 266 struct class_attribute *dattr; 267 ssize_t error; 268 269 dattr = container_of(attr, struct class_attribute, attr); 270 error = -EIO; 271 if (dattr->show) 272 error = dattr->show(container_of(kobj, struct class, kobj), 273 dattr, buf); 274 return (error); 275 } 276 277 static ssize_t 278 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 279 size_t count) 280 { 281 struct class_attribute *dattr; 282 ssize_t error; 283 284 dattr = container_of(attr, struct class_attribute, attr); 285 error = -EIO; 286 if (dattr->store) 287 error = dattr->store(container_of(kobj, struct class, kobj), 288 dattr, buf, count); 289 return (error); 290 } 291 292 static void 293 linux_class_release(struct kobject *kobj) 294 { 295 struct class *class; 296 297 class = container_of(kobj, struct class, kobj); 298 if (class->class_release) 299 class->class_release(class); 300 } 301 302 static const struct sysfs_ops linux_class_sysfs = { 303 .show = linux_class_show, 304 .store = linux_class_store, 305 }; 306 307 const struct kobj_type linux_class_ktype = { 308 .release = linux_class_release, 309 .sysfs_ops = &linux_class_sysfs 310 }; 311 312 static void 313 linux_dev_release(struct kobject *kobj) 314 { 315 struct device *dev; 316 317 dev = container_of(kobj, struct device, kobj); 318 /* This is the precedence defined by linux. */ 319 if (dev->release) 320 dev->release(dev); 321 else if (dev->class && dev->class->dev_release) 322 dev->class->dev_release(dev); 323 } 324 325 static ssize_t 326 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 327 { 328 struct device_attribute *dattr; 329 ssize_t error; 330 331 dattr = container_of(attr, struct device_attribute, attr); 332 error = -EIO; 333 if (dattr->show) 334 error = dattr->show(container_of(kobj, struct device, kobj), 335 dattr, buf); 336 return (error); 337 } 338 339 static ssize_t 340 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 341 size_t count) 342 { 343 struct device_attribute *dattr; 344 ssize_t error; 345 346 dattr = container_of(attr, struct device_attribute, attr); 347 error = -EIO; 348 if (dattr->store) 349 error = dattr->store(container_of(kobj, struct device, kobj), 350 dattr, buf, count); 351 return (error); 352 } 353 354 static const struct sysfs_ops linux_dev_sysfs = { 355 .show = linux_dev_show, 356 .store = linux_dev_store, 357 }; 358 359 const struct kobj_type linux_dev_ktype = { 360 .release = linux_dev_release, 361 .sysfs_ops = &linux_dev_sysfs 362 }; 363 364 struct device * 365 device_create(struct class *class, struct device *parent, dev_t devt, 366 void *drvdata, const char *fmt, ...) 367 { 368 struct device *dev; 369 va_list args; 370 371 dev = kzalloc(sizeof(*dev), M_WAITOK); 372 dev->parent = parent; 373 dev->class = class; 374 dev->devt = devt; 375 dev->driver_data = drvdata; 376 dev->release = linux_device_release; 377 va_start(args, fmt); 378 kobject_set_name_vargs(&dev->kobj, fmt, args); 379 va_end(args); 380 device_register(dev); 381 382 return (dev); 383 } 384 385 int 386 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 387 struct kobject *parent, const char *fmt, ...) 388 { 389 va_list args; 390 int error; 391 392 kobject_init(kobj, ktype); 393 kobj->ktype = ktype; 394 kobj->parent = parent; 395 kobj->name = NULL; 396 397 va_start(args, fmt); 398 error = kobject_set_name_vargs(kobj, fmt, args); 399 va_end(args); 400 if (error) 401 return (error); 402 return kobject_add_complete(kobj, parent); 403 } 404 405 static void 406 linux_kq_lock(void *arg) 407 { 408 spinlock_t *s = arg; 409 410 spin_lock(s); 411 } 412 static void 413 linux_kq_unlock(void *arg) 414 { 415 spinlock_t *s = arg; 416 417 spin_unlock(s); 418 } 419 420 static void 421 linux_kq_lock_owned(void *arg) 422 { 423 #ifdef INVARIANTS 424 spinlock_t *s = arg; 425 426 mtx_assert(&s->m, MA_OWNED); 427 #endif 428 } 429 430 static void 431 linux_kq_lock_unowned(void *arg) 432 { 433 #ifdef INVARIANTS 434 spinlock_t *s = arg; 435 436 mtx_assert(&s->m, MA_NOTOWNED); 437 #endif 438 } 439 440 static void 441 linux_file_kqfilter_poll(struct linux_file *, int); 442 443 struct linux_file * 444 linux_file_alloc(void) 445 { 446 struct linux_file *filp; 447 448 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 449 450 /* set initial refcount */ 451 filp->f_count = 1; 452 453 /* setup fields needed by kqueue support */ 454 spin_lock_init(&filp->f_kqlock); 455 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 456 linux_kq_lock, linux_kq_unlock, 457 linux_kq_lock_owned, linux_kq_lock_unowned); 458 459 return (filp); 460 } 461 462 void 463 linux_file_free(struct linux_file *filp) 464 { 465 if (filp->_file == NULL) { 466 if (filp->f_shmem != NULL) 467 vm_object_deallocate(filp->f_shmem); 468 kfree(filp); 469 } else { 470 /* 471 * The close method of the character device or file 472 * will free the linux_file structure: 473 */ 474 _fdrop(filp->_file, curthread); 475 } 476 } 477 478 static int 479 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 480 vm_page_t *mres) 481 { 482 struct vm_area_struct *vmap; 483 484 vmap = linux_cdev_handle_find(vm_obj->handle); 485 486 MPASS(vmap != NULL); 487 MPASS(vmap->vm_private_data == vm_obj->handle); 488 489 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 490 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 491 vm_page_t page; 492 493 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 494 /* 495 * If the passed in result page is a fake 496 * page, update it with the new physical 497 * address. 498 */ 499 page = *mres; 500 vm_page_updatefake(page, paddr, vm_obj->memattr); 501 } else { 502 /* 503 * Replace the passed in "mres" page with our 504 * own fake page and free up the all of the 505 * original pages. 506 */ 507 VM_OBJECT_WUNLOCK(vm_obj); 508 page = vm_page_getfake(paddr, vm_obj->memattr); 509 VM_OBJECT_WLOCK(vm_obj); 510 511 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 512 *mres = page; 513 } 514 vm_page_valid(page); 515 return (VM_PAGER_OK); 516 } 517 return (VM_PAGER_FAIL); 518 } 519 520 static int 521 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 522 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 523 { 524 struct vm_area_struct *vmap; 525 int err; 526 527 linux_set_current(curthread); 528 529 /* get VM area structure */ 530 vmap = linux_cdev_handle_find(vm_obj->handle); 531 MPASS(vmap != NULL); 532 MPASS(vmap->vm_private_data == vm_obj->handle); 533 534 VM_OBJECT_WUNLOCK(vm_obj); 535 536 down_write(&vmap->vm_mm->mmap_sem); 537 if (unlikely(vmap->vm_ops == NULL)) { 538 err = VM_FAULT_SIGBUS; 539 } else { 540 struct vm_fault vmf; 541 542 /* fill out VM fault structure */ 543 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 544 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 545 vmf.pgoff = 0; 546 vmf.page = NULL; 547 vmf.vma = vmap; 548 549 vmap->vm_pfn_count = 0; 550 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 551 vmap->vm_obj = vm_obj; 552 553 err = vmap->vm_ops->fault(vmap, &vmf); 554 555 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 556 kern_yield(PRI_USER); 557 err = vmap->vm_ops->fault(vmap, &vmf); 558 } 559 } 560 561 /* translate return code */ 562 switch (err) { 563 case VM_FAULT_OOM: 564 err = VM_PAGER_AGAIN; 565 break; 566 case VM_FAULT_SIGBUS: 567 err = VM_PAGER_BAD; 568 break; 569 case VM_FAULT_NOPAGE: 570 /* 571 * By contract the fault handler will return having 572 * busied all the pages itself. If pidx is already 573 * found in the object, it will simply xbusy the first 574 * page and return with vm_pfn_count set to 1. 575 */ 576 *first = vmap->vm_pfn_first; 577 *last = *first + vmap->vm_pfn_count - 1; 578 err = VM_PAGER_OK; 579 break; 580 default: 581 err = VM_PAGER_ERROR; 582 break; 583 } 584 up_write(&vmap->vm_mm->mmap_sem); 585 VM_OBJECT_WLOCK(vm_obj); 586 return (err); 587 } 588 589 static struct rwlock linux_vma_lock; 590 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 591 TAILQ_HEAD_INITIALIZER(linux_vma_head); 592 593 static void 594 linux_cdev_handle_free(struct vm_area_struct *vmap) 595 { 596 /* Drop reference on vm_file */ 597 if (vmap->vm_file != NULL) 598 fput(vmap->vm_file); 599 600 /* Drop reference on mm_struct */ 601 mmput(vmap->vm_mm); 602 603 kfree(vmap); 604 } 605 606 static void 607 linux_cdev_handle_remove(struct vm_area_struct *vmap) 608 { 609 rw_wlock(&linux_vma_lock); 610 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 611 rw_wunlock(&linux_vma_lock); 612 } 613 614 static struct vm_area_struct * 615 linux_cdev_handle_find(void *handle) 616 { 617 struct vm_area_struct *vmap; 618 619 rw_rlock(&linux_vma_lock); 620 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 621 if (vmap->vm_private_data == handle) 622 break; 623 } 624 rw_runlock(&linux_vma_lock); 625 return (vmap); 626 } 627 628 static int 629 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 630 vm_ooffset_t foff, struct ucred *cred, u_short *color) 631 { 632 633 MPASS(linux_cdev_handle_find(handle) != NULL); 634 *color = 0; 635 return (0); 636 } 637 638 static void 639 linux_cdev_pager_dtor(void *handle) 640 { 641 const struct vm_operations_struct *vm_ops; 642 struct vm_area_struct *vmap; 643 644 vmap = linux_cdev_handle_find(handle); 645 MPASS(vmap != NULL); 646 647 /* 648 * Remove handle before calling close operation to prevent 649 * other threads from reusing the handle pointer. 650 */ 651 linux_cdev_handle_remove(vmap); 652 653 down_write(&vmap->vm_mm->mmap_sem); 654 vm_ops = vmap->vm_ops; 655 if (likely(vm_ops != NULL)) 656 vm_ops->close(vmap); 657 up_write(&vmap->vm_mm->mmap_sem); 658 659 linux_cdev_handle_free(vmap); 660 } 661 662 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 663 { 664 /* OBJT_MGTDEVICE */ 665 .cdev_pg_populate = linux_cdev_pager_populate, 666 .cdev_pg_ctor = linux_cdev_pager_ctor, 667 .cdev_pg_dtor = linux_cdev_pager_dtor 668 }, 669 { 670 /* OBJT_DEVICE */ 671 .cdev_pg_fault = linux_cdev_pager_fault, 672 .cdev_pg_ctor = linux_cdev_pager_ctor, 673 .cdev_pg_dtor = linux_cdev_pager_dtor 674 }, 675 }; 676 677 int 678 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 679 unsigned long size) 680 { 681 vm_object_t obj; 682 vm_page_t m; 683 684 obj = vma->vm_obj; 685 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 686 return (-ENOTSUP); 687 VM_OBJECT_RLOCK(obj); 688 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 689 m != NULL && m->pindex < OFF_TO_IDX(address + size); 690 m = TAILQ_NEXT(m, listq)) 691 pmap_remove_all(m); 692 VM_OBJECT_RUNLOCK(obj); 693 return (0); 694 } 695 696 static struct file_operations dummy_ldev_ops = { 697 /* XXXKIB */ 698 }; 699 700 static struct linux_cdev dummy_ldev = { 701 .ops = &dummy_ldev_ops, 702 }; 703 704 #define LDEV_SI_DTR 0x0001 705 #define LDEV_SI_REF 0x0002 706 707 static void 708 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 709 struct linux_cdev **dev) 710 { 711 struct linux_cdev *ldev; 712 u_int siref; 713 714 ldev = filp->f_cdev; 715 *fop = filp->f_op; 716 if (ldev != NULL) { 717 for (siref = ldev->siref;;) { 718 if ((siref & LDEV_SI_DTR) != 0) { 719 ldev = &dummy_ldev; 720 siref = ldev->siref; 721 *fop = ldev->ops; 722 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 723 } else if (atomic_fcmpset_int(&ldev->siref, &siref, 724 siref + LDEV_SI_REF)) { 725 break; 726 } 727 } 728 } 729 *dev = ldev; 730 } 731 732 static void 733 linux_drop_fop(struct linux_cdev *ldev) 734 { 735 736 if (ldev == NULL) 737 return; 738 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 739 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 740 } 741 742 #define OPW(fp,td,code) ({ \ 743 struct file *__fpop; \ 744 __typeof(code) __retval; \ 745 \ 746 __fpop = (td)->td_fpop; \ 747 (td)->td_fpop = (fp); \ 748 __retval = (code); \ 749 (td)->td_fpop = __fpop; \ 750 __retval; \ 751 }) 752 753 static int 754 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 755 struct file *file) 756 { 757 struct linux_cdev *ldev; 758 struct linux_file *filp; 759 const struct file_operations *fop; 760 int error; 761 762 ldev = dev->si_drv1; 763 764 filp = linux_file_alloc(); 765 filp->f_dentry = &filp->f_dentry_store; 766 filp->f_op = ldev->ops; 767 filp->f_mode = file->f_flag; 768 filp->f_flags = file->f_flag; 769 filp->f_vnode = file->f_vnode; 770 filp->_file = file; 771 refcount_acquire(&ldev->refs); 772 filp->f_cdev = ldev; 773 774 linux_set_current(td); 775 linux_get_fop(filp, &fop, &ldev); 776 777 if (fop->open != NULL) { 778 error = -fop->open(file->f_vnode, filp); 779 if (error != 0) { 780 linux_drop_fop(ldev); 781 linux_cdev_deref(filp->f_cdev); 782 kfree(filp); 783 return (error); 784 } 785 } 786 787 /* hold on to the vnode - used for fstat() */ 788 vhold(filp->f_vnode); 789 790 /* release the file from devfs */ 791 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 792 linux_drop_fop(ldev); 793 return (ENXIO); 794 } 795 796 #define LINUX_IOCTL_MIN_PTR 0x10000UL 797 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 798 799 static inline int 800 linux_remap_address(void **uaddr, size_t len) 801 { 802 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 803 804 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 805 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 806 struct task_struct *pts = current; 807 if (pts == NULL) { 808 *uaddr = NULL; 809 return (1); 810 } 811 812 /* compute data offset */ 813 uaddr_val -= LINUX_IOCTL_MIN_PTR; 814 815 /* check that length is within bounds */ 816 if ((len > IOCPARM_MAX) || 817 (uaddr_val + len) > pts->bsd_ioctl_len) { 818 *uaddr = NULL; 819 return (1); 820 } 821 822 /* re-add kernel buffer address */ 823 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 824 825 /* update address location */ 826 *uaddr = (void *)uaddr_val; 827 return (1); 828 } 829 return (0); 830 } 831 832 int 833 linux_copyin(const void *uaddr, void *kaddr, size_t len) 834 { 835 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 836 if (uaddr == NULL) 837 return (-EFAULT); 838 memcpy(kaddr, uaddr, len); 839 return (0); 840 } 841 return (-copyin(uaddr, kaddr, len)); 842 } 843 844 int 845 linux_copyout(const void *kaddr, void *uaddr, size_t len) 846 { 847 if (linux_remap_address(&uaddr, len)) { 848 if (uaddr == NULL) 849 return (-EFAULT); 850 memcpy(uaddr, kaddr, len); 851 return (0); 852 } 853 return (-copyout(kaddr, uaddr, len)); 854 } 855 856 size_t 857 linux_clear_user(void *_uaddr, size_t _len) 858 { 859 uint8_t *uaddr = _uaddr; 860 size_t len = _len; 861 862 /* make sure uaddr is aligned before going into the fast loop */ 863 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 864 if (subyte(uaddr, 0)) 865 return (_len); 866 uaddr++; 867 len--; 868 } 869 870 /* zero 8 bytes at a time */ 871 while (len > 7) { 872 #ifdef __LP64__ 873 if (suword64(uaddr, 0)) 874 return (_len); 875 #else 876 if (suword32(uaddr, 0)) 877 return (_len); 878 if (suword32(uaddr + 4, 0)) 879 return (_len); 880 #endif 881 uaddr += 8; 882 len -= 8; 883 } 884 885 /* zero fill end, if any */ 886 while (len > 0) { 887 if (subyte(uaddr, 0)) 888 return (_len); 889 uaddr++; 890 len--; 891 } 892 return (0); 893 } 894 895 int 896 linux_access_ok(const void *uaddr, size_t len) 897 { 898 uintptr_t saddr; 899 uintptr_t eaddr; 900 901 /* get start and end address */ 902 saddr = (uintptr_t)uaddr; 903 eaddr = (uintptr_t)uaddr + len; 904 905 /* verify addresses are valid for userspace */ 906 return ((saddr == eaddr) || 907 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 908 } 909 910 /* 911 * This function should return either EINTR or ERESTART depending on 912 * the signal type sent to this thread: 913 */ 914 static int 915 linux_get_error(struct task_struct *task, int error) 916 { 917 /* check for signal type interrupt code */ 918 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 919 error = -linux_schedule_get_interrupt_value(task); 920 if (error == 0) 921 error = EINTR; 922 } 923 return (error); 924 } 925 926 static int 927 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 928 const struct file_operations *fop, u_long cmd, caddr_t data, 929 struct thread *td) 930 { 931 struct task_struct *task = current; 932 unsigned size; 933 int error; 934 935 size = IOCPARM_LEN(cmd); 936 /* refer to logic in sys_ioctl() */ 937 if (size > 0) { 938 /* 939 * Setup hint for linux_copyin() and linux_copyout(). 940 * 941 * Background: Linux code expects a user-space address 942 * while FreeBSD supplies a kernel-space address. 943 */ 944 task->bsd_ioctl_data = data; 945 task->bsd_ioctl_len = size; 946 data = (void *)LINUX_IOCTL_MIN_PTR; 947 } else { 948 /* fetch user-space pointer */ 949 data = *(void **)data; 950 } 951 #if defined(__amd64__) 952 if (td->td_proc->p_elf_machine == EM_386) { 953 /* try the compat IOCTL handler first */ 954 if (fop->compat_ioctl != NULL) { 955 error = -OPW(fp, td, fop->compat_ioctl(filp, 956 cmd, (u_long)data)); 957 } else { 958 error = ENOTTY; 959 } 960 961 /* fallback to the regular IOCTL handler, if any */ 962 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 963 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 964 cmd, (u_long)data)); 965 } 966 } else 967 #endif 968 { 969 if (fop->unlocked_ioctl != NULL) { 970 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 971 cmd, (u_long)data)); 972 } else { 973 error = ENOTTY; 974 } 975 } 976 if (size > 0) { 977 task->bsd_ioctl_data = NULL; 978 task->bsd_ioctl_len = 0; 979 } 980 981 if (error == EWOULDBLOCK) { 982 /* update kqfilter status, if any */ 983 linux_file_kqfilter_poll(filp, 984 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 985 } else { 986 error = linux_get_error(task, error); 987 } 988 return (error); 989 } 990 991 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 992 993 /* 994 * This function atomically updates the poll wakeup state and returns 995 * the previous state at the time of update. 996 */ 997 static uint8_t 998 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 999 { 1000 int c, old; 1001 1002 c = v->counter; 1003 1004 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1005 c = old; 1006 1007 return (c); 1008 } 1009 1010 1011 static int 1012 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1013 { 1014 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1015 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1016 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1017 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1018 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1019 }; 1020 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1021 1022 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1023 case LINUX_FWQ_STATE_QUEUED: 1024 linux_poll_wakeup(filp); 1025 return (1); 1026 default: 1027 return (0); 1028 } 1029 } 1030 1031 void 1032 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1033 { 1034 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1035 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1036 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1037 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1038 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1039 }; 1040 1041 /* check if we are called inside the select system call */ 1042 if (p == LINUX_POLL_TABLE_NORMAL) 1043 selrecord(curthread, &filp->f_selinfo); 1044 1045 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1046 case LINUX_FWQ_STATE_INIT: 1047 /* NOTE: file handles can only belong to one wait-queue */ 1048 filp->f_wait_queue.wqh = wqh; 1049 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1050 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1051 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1052 break; 1053 default: 1054 break; 1055 } 1056 } 1057 1058 static void 1059 linux_poll_wait_dequeue(struct linux_file *filp) 1060 { 1061 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1062 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1063 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1064 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1065 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1066 }; 1067 1068 seldrain(&filp->f_selinfo); 1069 1070 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1071 case LINUX_FWQ_STATE_NOT_READY: 1072 case LINUX_FWQ_STATE_QUEUED: 1073 case LINUX_FWQ_STATE_READY: 1074 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1075 break; 1076 default: 1077 break; 1078 } 1079 } 1080 1081 void 1082 linux_poll_wakeup(struct linux_file *filp) 1083 { 1084 /* this function should be NULL-safe */ 1085 if (filp == NULL) 1086 return; 1087 1088 selwakeup(&filp->f_selinfo); 1089 1090 spin_lock(&filp->f_kqlock); 1091 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1092 LINUX_KQ_FLAG_NEED_WRITE; 1093 1094 /* make sure the "knote" gets woken up */ 1095 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1096 spin_unlock(&filp->f_kqlock); 1097 } 1098 1099 static void 1100 linux_file_kqfilter_detach(struct knote *kn) 1101 { 1102 struct linux_file *filp = kn->kn_hook; 1103 1104 spin_lock(&filp->f_kqlock); 1105 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1106 spin_unlock(&filp->f_kqlock); 1107 } 1108 1109 static int 1110 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1111 { 1112 struct linux_file *filp = kn->kn_hook; 1113 1114 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1115 1116 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1117 } 1118 1119 static int 1120 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1121 { 1122 struct linux_file *filp = kn->kn_hook; 1123 1124 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1125 1126 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1127 } 1128 1129 static struct filterops linux_dev_kqfiltops_read = { 1130 .f_isfd = 1, 1131 .f_detach = linux_file_kqfilter_detach, 1132 .f_event = linux_file_kqfilter_read_event, 1133 }; 1134 1135 static struct filterops linux_dev_kqfiltops_write = { 1136 .f_isfd = 1, 1137 .f_detach = linux_file_kqfilter_detach, 1138 .f_event = linux_file_kqfilter_write_event, 1139 }; 1140 1141 static void 1142 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1143 { 1144 struct thread *td; 1145 const struct file_operations *fop; 1146 struct linux_cdev *ldev; 1147 int temp; 1148 1149 if ((filp->f_kqflags & kqflags) == 0) 1150 return; 1151 1152 td = curthread; 1153 1154 linux_get_fop(filp, &fop, &ldev); 1155 /* get the latest polling state */ 1156 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1157 linux_drop_fop(ldev); 1158 1159 spin_lock(&filp->f_kqlock); 1160 /* clear kqflags */ 1161 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1162 LINUX_KQ_FLAG_NEED_WRITE); 1163 /* update kqflags */ 1164 if ((temp & (POLLIN | POLLOUT)) != 0) { 1165 if ((temp & POLLIN) != 0) 1166 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1167 if ((temp & POLLOUT) != 0) 1168 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1169 1170 /* make sure the "knote" gets woken up */ 1171 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1172 } 1173 spin_unlock(&filp->f_kqlock); 1174 } 1175 1176 static int 1177 linux_file_kqfilter(struct file *file, struct knote *kn) 1178 { 1179 struct linux_file *filp; 1180 struct thread *td; 1181 int error; 1182 1183 td = curthread; 1184 filp = (struct linux_file *)file->f_data; 1185 filp->f_flags = file->f_flag; 1186 if (filp->f_op->poll == NULL) 1187 return (EINVAL); 1188 1189 spin_lock(&filp->f_kqlock); 1190 switch (kn->kn_filter) { 1191 case EVFILT_READ: 1192 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1193 kn->kn_fop = &linux_dev_kqfiltops_read; 1194 kn->kn_hook = filp; 1195 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1196 error = 0; 1197 break; 1198 case EVFILT_WRITE: 1199 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1200 kn->kn_fop = &linux_dev_kqfiltops_write; 1201 kn->kn_hook = filp; 1202 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1203 error = 0; 1204 break; 1205 default: 1206 error = EINVAL; 1207 break; 1208 } 1209 spin_unlock(&filp->f_kqlock); 1210 1211 if (error == 0) { 1212 linux_set_current(td); 1213 1214 /* update kqfilter status, if any */ 1215 linux_file_kqfilter_poll(filp, 1216 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1217 } 1218 return (error); 1219 } 1220 1221 static int 1222 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1223 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1224 int nprot, struct thread *td) 1225 { 1226 struct task_struct *task; 1227 struct vm_area_struct *vmap; 1228 struct mm_struct *mm; 1229 struct linux_file *filp; 1230 vm_memattr_t attr; 1231 int error; 1232 1233 filp = (struct linux_file *)fp->f_data; 1234 filp->f_flags = fp->f_flag; 1235 1236 if (fop->mmap == NULL) 1237 return (EOPNOTSUPP); 1238 1239 linux_set_current(td); 1240 1241 /* 1242 * The same VM object might be shared by multiple processes 1243 * and the mm_struct is usually freed when a process exits. 1244 * 1245 * The atomic reference below makes sure the mm_struct is 1246 * available as long as the vmap is in the linux_vma_head. 1247 */ 1248 task = current; 1249 mm = task->mm; 1250 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1251 return (EINVAL); 1252 1253 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1254 vmap->vm_start = 0; 1255 vmap->vm_end = size; 1256 vmap->vm_pgoff = *offset / PAGE_SIZE; 1257 vmap->vm_pfn = 0; 1258 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1259 vmap->vm_ops = NULL; 1260 vmap->vm_file = get_file(filp); 1261 vmap->vm_mm = mm; 1262 1263 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1264 error = linux_get_error(task, EINTR); 1265 } else { 1266 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1267 error = linux_get_error(task, error); 1268 up_write(&vmap->vm_mm->mmap_sem); 1269 } 1270 1271 if (error != 0) { 1272 linux_cdev_handle_free(vmap); 1273 return (error); 1274 } 1275 1276 attr = pgprot2cachemode(vmap->vm_page_prot); 1277 1278 if (vmap->vm_ops != NULL) { 1279 struct vm_area_struct *ptr; 1280 void *vm_private_data; 1281 bool vm_no_fault; 1282 1283 if (vmap->vm_ops->open == NULL || 1284 vmap->vm_ops->close == NULL || 1285 vmap->vm_private_data == NULL) { 1286 /* free allocated VM area struct */ 1287 linux_cdev_handle_free(vmap); 1288 return (EINVAL); 1289 } 1290 1291 vm_private_data = vmap->vm_private_data; 1292 1293 rw_wlock(&linux_vma_lock); 1294 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1295 if (ptr->vm_private_data == vm_private_data) 1296 break; 1297 } 1298 /* check if there is an existing VM area struct */ 1299 if (ptr != NULL) { 1300 /* check if the VM area structure is invalid */ 1301 if (ptr->vm_ops == NULL || 1302 ptr->vm_ops->open == NULL || 1303 ptr->vm_ops->close == NULL) { 1304 error = ESTALE; 1305 vm_no_fault = 1; 1306 } else { 1307 error = EEXIST; 1308 vm_no_fault = (ptr->vm_ops->fault == NULL); 1309 } 1310 } else { 1311 /* insert VM area structure into list */ 1312 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1313 error = 0; 1314 vm_no_fault = (vmap->vm_ops->fault == NULL); 1315 } 1316 rw_wunlock(&linux_vma_lock); 1317 1318 if (error != 0) { 1319 /* free allocated VM area struct */ 1320 linux_cdev_handle_free(vmap); 1321 /* check for stale VM area struct */ 1322 if (error != EEXIST) 1323 return (error); 1324 } 1325 1326 /* check if there is no fault handler */ 1327 if (vm_no_fault) { 1328 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1329 &linux_cdev_pager_ops[1], size, nprot, *offset, 1330 td->td_ucred); 1331 } else { 1332 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1333 &linux_cdev_pager_ops[0], size, nprot, *offset, 1334 td->td_ucred); 1335 } 1336 1337 /* check if allocating the VM object failed */ 1338 if (*object == NULL) { 1339 if (error == 0) { 1340 /* remove VM area struct from list */ 1341 linux_cdev_handle_remove(vmap); 1342 /* free allocated VM area struct */ 1343 linux_cdev_handle_free(vmap); 1344 } 1345 return (EINVAL); 1346 } 1347 } else { 1348 struct sglist *sg; 1349 1350 sg = sglist_alloc(1, M_WAITOK); 1351 sglist_append_phys(sg, 1352 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1353 1354 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1355 nprot, 0, td->td_ucred); 1356 1357 linux_cdev_handle_free(vmap); 1358 1359 if (*object == NULL) { 1360 sglist_free(sg); 1361 return (EINVAL); 1362 } 1363 } 1364 1365 if (attr != VM_MEMATTR_DEFAULT) { 1366 VM_OBJECT_WLOCK(*object); 1367 vm_object_set_memattr(*object, attr); 1368 VM_OBJECT_WUNLOCK(*object); 1369 } 1370 *offset = 0; 1371 return (0); 1372 } 1373 1374 struct cdevsw linuxcdevsw = { 1375 .d_version = D_VERSION, 1376 .d_fdopen = linux_dev_fdopen, 1377 .d_name = "lkpidev", 1378 }; 1379 1380 static int 1381 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1382 int flags, struct thread *td) 1383 { 1384 struct linux_file *filp; 1385 const struct file_operations *fop; 1386 struct linux_cdev *ldev; 1387 ssize_t bytes; 1388 int error; 1389 1390 error = 0; 1391 filp = (struct linux_file *)file->f_data; 1392 filp->f_flags = file->f_flag; 1393 /* XXX no support for I/O vectors currently */ 1394 if (uio->uio_iovcnt != 1) 1395 return (EOPNOTSUPP); 1396 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1397 return (EINVAL); 1398 linux_set_current(td); 1399 linux_get_fop(filp, &fop, &ldev); 1400 if (fop->read != NULL) { 1401 bytes = OPW(file, td, fop->read(filp, 1402 uio->uio_iov->iov_base, 1403 uio->uio_iov->iov_len, &uio->uio_offset)); 1404 if (bytes >= 0) { 1405 uio->uio_iov->iov_base = 1406 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1407 uio->uio_iov->iov_len -= bytes; 1408 uio->uio_resid -= bytes; 1409 } else { 1410 error = linux_get_error(current, -bytes); 1411 } 1412 } else 1413 error = ENXIO; 1414 1415 /* update kqfilter status, if any */ 1416 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1417 linux_drop_fop(ldev); 1418 1419 return (error); 1420 } 1421 1422 static int 1423 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1424 int flags, struct thread *td) 1425 { 1426 struct linux_file *filp; 1427 const struct file_operations *fop; 1428 struct linux_cdev *ldev; 1429 ssize_t bytes; 1430 int error; 1431 1432 filp = (struct linux_file *)file->f_data; 1433 filp->f_flags = file->f_flag; 1434 /* XXX no support for I/O vectors currently */ 1435 if (uio->uio_iovcnt != 1) 1436 return (EOPNOTSUPP); 1437 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1438 return (EINVAL); 1439 linux_set_current(td); 1440 linux_get_fop(filp, &fop, &ldev); 1441 if (fop->write != NULL) { 1442 bytes = OPW(file, td, fop->write(filp, 1443 uio->uio_iov->iov_base, 1444 uio->uio_iov->iov_len, &uio->uio_offset)); 1445 if (bytes >= 0) { 1446 uio->uio_iov->iov_base = 1447 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1448 uio->uio_iov->iov_len -= bytes; 1449 uio->uio_resid -= bytes; 1450 error = 0; 1451 } else { 1452 error = linux_get_error(current, -bytes); 1453 } 1454 } else 1455 error = ENXIO; 1456 1457 /* update kqfilter status, if any */ 1458 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1459 1460 linux_drop_fop(ldev); 1461 1462 return (error); 1463 } 1464 1465 static int 1466 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1467 struct thread *td) 1468 { 1469 struct linux_file *filp; 1470 const struct file_operations *fop; 1471 struct linux_cdev *ldev; 1472 int revents; 1473 1474 filp = (struct linux_file *)file->f_data; 1475 filp->f_flags = file->f_flag; 1476 linux_set_current(td); 1477 linux_get_fop(filp, &fop, &ldev); 1478 if (fop->poll != NULL) { 1479 revents = OPW(file, td, fop->poll(filp, 1480 LINUX_POLL_TABLE_NORMAL)) & events; 1481 } else { 1482 revents = 0; 1483 } 1484 linux_drop_fop(ldev); 1485 return (revents); 1486 } 1487 1488 static int 1489 linux_file_close(struct file *file, struct thread *td) 1490 { 1491 struct linux_file *filp; 1492 const struct file_operations *fop; 1493 struct linux_cdev *ldev; 1494 int error; 1495 1496 filp = (struct linux_file *)file->f_data; 1497 1498 KASSERT(file_count(filp) == 0, 1499 ("File refcount(%d) is not zero", file_count(filp))); 1500 1501 error = 0; 1502 filp->f_flags = file->f_flag; 1503 linux_set_current(td); 1504 linux_poll_wait_dequeue(filp); 1505 linux_get_fop(filp, &fop, &ldev); 1506 if (fop->release != NULL) 1507 error = -OPW(file, td, fop->release(filp->f_vnode, filp)); 1508 funsetown(&filp->f_sigio); 1509 if (filp->f_vnode != NULL) 1510 vdrop(filp->f_vnode); 1511 linux_drop_fop(ldev); 1512 if (filp->f_cdev != NULL) 1513 linux_cdev_deref(filp->f_cdev); 1514 kfree(filp); 1515 1516 return (error); 1517 } 1518 1519 static int 1520 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1521 struct thread *td) 1522 { 1523 struct linux_file *filp; 1524 const struct file_operations *fop; 1525 struct linux_cdev *ldev; 1526 int error; 1527 1528 error = 0; 1529 filp = (struct linux_file *)fp->f_data; 1530 filp->f_flags = fp->f_flag; 1531 linux_get_fop(filp, &fop, &ldev); 1532 1533 linux_set_current(td); 1534 switch (cmd) { 1535 case FIONBIO: 1536 break; 1537 case FIOASYNC: 1538 if (fop->fasync == NULL) 1539 break; 1540 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1541 break; 1542 case FIOSETOWN: 1543 error = fsetown(*(int *)data, &filp->f_sigio); 1544 if (error == 0) { 1545 if (fop->fasync == NULL) 1546 break; 1547 error = -OPW(fp, td, fop->fasync(0, filp, 1548 fp->f_flag & FASYNC)); 1549 } 1550 break; 1551 case FIOGETOWN: 1552 *(int *)data = fgetown(&filp->f_sigio); 1553 break; 1554 default: 1555 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1556 break; 1557 } 1558 linux_drop_fop(ldev); 1559 return (error); 1560 } 1561 1562 static int 1563 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1564 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1565 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1566 { 1567 /* 1568 * Character devices do not provide private mappings 1569 * of any kind: 1570 */ 1571 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1572 (prot & VM_PROT_WRITE) != 0) 1573 return (EACCES); 1574 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1575 return (EINVAL); 1576 1577 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1578 (int)prot, td)); 1579 } 1580 1581 static int 1582 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1583 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1584 struct thread *td) 1585 { 1586 struct linux_file *filp; 1587 const struct file_operations *fop; 1588 struct linux_cdev *ldev; 1589 struct mount *mp; 1590 struct vnode *vp; 1591 vm_object_t object; 1592 vm_prot_t maxprot; 1593 int error; 1594 1595 filp = (struct linux_file *)fp->f_data; 1596 1597 vp = filp->f_vnode; 1598 if (vp == NULL) 1599 return (EOPNOTSUPP); 1600 1601 /* 1602 * Ensure that file and memory protections are 1603 * compatible. 1604 */ 1605 mp = vp->v_mount; 1606 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1607 maxprot = VM_PROT_NONE; 1608 if ((prot & VM_PROT_EXECUTE) != 0) 1609 return (EACCES); 1610 } else 1611 maxprot = VM_PROT_EXECUTE; 1612 if ((fp->f_flag & FREAD) != 0) 1613 maxprot |= VM_PROT_READ; 1614 else if ((prot & VM_PROT_READ) != 0) 1615 return (EACCES); 1616 1617 /* 1618 * If we are sharing potential changes via MAP_SHARED and we 1619 * are trying to get write permission although we opened it 1620 * without asking for it, bail out. 1621 * 1622 * Note that most character devices always share mappings. 1623 * 1624 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1625 * requests rather than doing it here. 1626 */ 1627 if ((flags & MAP_SHARED) != 0) { 1628 if ((fp->f_flag & FWRITE) != 0) 1629 maxprot |= VM_PROT_WRITE; 1630 else if ((prot & VM_PROT_WRITE) != 0) 1631 return (EACCES); 1632 } 1633 maxprot &= cap_maxprot; 1634 1635 linux_get_fop(filp, &fop, &ldev); 1636 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, 1637 &foff, fop, &object); 1638 if (error != 0) 1639 goto out; 1640 1641 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1642 foff, FALSE, td); 1643 if (error != 0) 1644 vm_object_deallocate(object); 1645 out: 1646 linux_drop_fop(ldev); 1647 return (error); 1648 } 1649 1650 static int 1651 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1652 struct thread *td) 1653 { 1654 struct linux_file *filp; 1655 struct vnode *vp; 1656 int error; 1657 1658 filp = (struct linux_file *)fp->f_data; 1659 if (filp->f_vnode == NULL) 1660 return (EOPNOTSUPP); 1661 1662 vp = filp->f_vnode; 1663 1664 vn_lock(vp, LK_SHARED | LK_RETRY); 1665 error = vn_stat(vp, sb, td->td_ucred, NOCRED, td); 1666 VOP_UNLOCK(vp, 0); 1667 1668 return (error); 1669 } 1670 1671 static int 1672 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1673 struct filedesc *fdp) 1674 { 1675 struct linux_file *filp; 1676 struct vnode *vp; 1677 int error; 1678 1679 filp = fp->f_data; 1680 vp = filp->f_vnode; 1681 if (vp == NULL) { 1682 error = 0; 1683 kif->kf_type = KF_TYPE_DEV; 1684 } else { 1685 vref(vp); 1686 FILEDESC_SUNLOCK(fdp); 1687 error = vn_fill_kinfo_vnode(vp, kif); 1688 vrele(vp); 1689 kif->kf_type = KF_TYPE_VNODE; 1690 FILEDESC_SLOCK(fdp); 1691 } 1692 return (error); 1693 } 1694 1695 unsigned int 1696 linux_iminor(struct inode *inode) 1697 { 1698 struct linux_cdev *ldev; 1699 1700 if (inode == NULL || inode->v_rdev == NULL || 1701 inode->v_rdev->si_devsw != &linuxcdevsw) 1702 return (-1U); 1703 ldev = inode->v_rdev->si_drv1; 1704 if (ldev == NULL) 1705 return (-1U); 1706 1707 return (minor(ldev->dev)); 1708 } 1709 1710 struct fileops linuxfileops = { 1711 .fo_read = linux_file_read, 1712 .fo_write = linux_file_write, 1713 .fo_truncate = invfo_truncate, 1714 .fo_kqfilter = linux_file_kqfilter, 1715 .fo_stat = linux_file_stat, 1716 .fo_fill_kinfo = linux_file_fill_kinfo, 1717 .fo_poll = linux_file_poll, 1718 .fo_close = linux_file_close, 1719 .fo_ioctl = linux_file_ioctl, 1720 .fo_mmap = linux_file_mmap, 1721 .fo_chmod = invfo_chmod, 1722 .fo_chown = invfo_chown, 1723 .fo_sendfile = invfo_sendfile, 1724 .fo_flags = DFLAG_PASSABLE, 1725 }; 1726 1727 /* 1728 * Hash of vmmap addresses. This is infrequently accessed and does not 1729 * need to be particularly large. This is done because we must store the 1730 * caller's idea of the map size to properly unmap. 1731 */ 1732 struct vmmap { 1733 LIST_ENTRY(vmmap) vm_next; 1734 void *vm_addr; 1735 unsigned long vm_size; 1736 }; 1737 1738 struct vmmaphd { 1739 struct vmmap *lh_first; 1740 }; 1741 #define VMMAP_HASH_SIZE 64 1742 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1743 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1744 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1745 static struct mtx vmmaplock; 1746 1747 static void 1748 vmmap_add(void *addr, unsigned long size) 1749 { 1750 struct vmmap *vmmap; 1751 1752 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1753 mtx_lock(&vmmaplock); 1754 vmmap->vm_size = size; 1755 vmmap->vm_addr = addr; 1756 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1757 mtx_unlock(&vmmaplock); 1758 } 1759 1760 static struct vmmap * 1761 vmmap_remove(void *addr) 1762 { 1763 struct vmmap *vmmap; 1764 1765 mtx_lock(&vmmaplock); 1766 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1767 if (vmmap->vm_addr == addr) 1768 break; 1769 if (vmmap) 1770 LIST_REMOVE(vmmap, vm_next); 1771 mtx_unlock(&vmmaplock); 1772 1773 return (vmmap); 1774 } 1775 1776 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1777 void * 1778 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1779 { 1780 void *addr; 1781 1782 addr = pmap_mapdev_attr(phys_addr, size, attr); 1783 if (addr == NULL) 1784 return (NULL); 1785 vmmap_add(addr, size); 1786 1787 return (addr); 1788 } 1789 #endif 1790 1791 void 1792 iounmap(void *addr) 1793 { 1794 struct vmmap *vmmap; 1795 1796 vmmap = vmmap_remove(addr); 1797 if (vmmap == NULL) 1798 return; 1799 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1800 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1801 #endif 1802 kfree(vmmap); 1803 } 1804 1805 1806 void * 1807 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1808 { 1809 vm_offset_t off; 1810 size_t size; 1811 1812 size = count * PAGE_SIZE; 1813 off = kva_alloc(size); 1814 if (off == 0) 1815 return (NULL); 1816 vmmap_add((void *)off, size); 1817 pmap_qenter(off, pages, count); 1818 1819 return ((void *)off); 1820 } 1821 1822 void 1823 vunmap(void *addr) 1824 { 1825 struct vmmap *vmmap; 1826 1827 vmmap = vmmap_remove(addr); 1828 if (vmmap == NULL) 1829 return; 1830 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1831 kva_free((vm_offset_t)addr, vmmap->vm_size); 1832 kfree(vmmap); 1833 } 1834 1835 char * 1836 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1837 { 1838 unsigned int len; 1839 char *p; 1840 va_list aq; 1841 1842 va_copy(aq, ap); 1843 len = vsnprintf(NULL, 0, fmt, aq); 1844 va_end(aq); 1845 1846 p = kmalloc(len + 1, gfp); 1847 if (p != NULL) 1848 vsnprintf(p, len + 1, fmt, ap); 1849 1850 return (p); 1851 } 1852 1853 char * 1854 kasprintf(gfp_t gfp, const char *fmt, ...) 1855 { 1856 va_list ap; 1857 char *p; 1858 1859 va_start(ap, fmt); 1860 p = kvasprintf(gfp, fmt, ap); 1861 va_end(ap); 1862 1863 return (p); 1864 } 1865 1866 static void 1867 linux_timer_callback_wrapper(void *context) 1868 { 1869 struct timer_list *timer; 1870 1871 linux_set_current(curthread); 1872 1873 timer = context; 1874 timer->function(timer->data); 1875 } 1876 1877 void 1878 mod_timer(struct timer_list *timer, int expires) 1879 { 1880 1881 timer->expires = expires; 1882 callout_reset(&timer->callout, 1883 linux_timer_jiffies_until(expires), 1884 &linux_timer_callback_wrapper, timer); 1885 } 1886 1887 void 1888 add_timer(struct timer_list *timer) 1889 { 1890 1891 callout_reset(&timer->callout, 1892 linux_timer_jiffies_until(timer->expires), 1893 &linux_timer_callback_wrapper, timer); 1894 } 1895 1896 void 1897 add_timer_on(struct timer_list *timer, int cpu) 1898 { 1899 1900 callout_reset_on(&timer->callout, 1901 linux_timer_jiffies_until(timer->expires), 1902 &linux_timer_callback_wrapper, timer, cpu); 1903 } 1904 1905 int 1906 del_timer(struct timer_list *timer) 1907 { 1908 1909 if (callout_stop(&(timer)->callout) == -1) 1910 return (0); 1911 return (1); 1912 } 1913 1914 static void 1915 linux_timer_init(void *arg) 1916 { 1917 1918 /* 1919 * Compute an internal HZ value which can divide 2**32 to 1920 * avoid timer rounding problems when the tick value wraps 1921 * around 2**32: 1922 */ 1923 linux_timer_hz_mask = 1; 1924 while (linux_timer_hz_mask < (unsigned long)hz) 1925 linux_timer_hz_mask *= 2; 1926 linux_timer_hz_mask--; 1927 } 1928 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1929 1930 void 1931 linux_complete_common(struct completion *c, int all) 1932 { 1933 int wakeup_swapper; 1934 1935 sleepq_lock(c); 1936 if (all) { 1937 c->done = UINT_MAX; 1938 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1939 } else { 1940 if (c->done != UINT_MAX) 1941 c->done++; 1942 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1943 } 1944 sleepq_release(c); 1945 if (wakeup_swapper) 1946 kick_proc0(); 1947 } 1948 1949 /* 1950 * Indefinite wait for done != 0 with or without signals. 1951 */ 1952 int 1953 linux_wait_for_common(struct completion *c, int flags) 1954 { 1955 struct task_struct *task; 1956 int error; 1957 1958 if (SCHEDULER_STOPPED()) 1959 return (0); 1960 1961 task = current; 1962 1963 if (flags != 0) 1964 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1965 else 1966 flags = SLEEPQ_SLEEP; 1967 error = 0; 1968 for (;;) { 1969 sleepq_lock(c); 1970 if (c->done) 1971 break; 1972 sleepq_add(c, NULL, "completion", flags, 0); 1973 if (flags & SLEEPQ_INTERRUPTIBLE) { 1974 DROP_GIANT(); 1975 error = -sleepq_wait_sig(c, 0); 1976 PICKUP_GIANT(); 1977 if (error != 0) { 1978 linux_schedule_save_interrupt_value(task, error); 1979 error = -ERESTARTSYS; 1980 goto intr; 1981 } 1982 } else { 1983 DROP_GIANT(); 1984 sleepq_wait(c, 0); 1985 PICKUP_GIANT(); 1986 } 1987 } 1988 if (c->done != UINT_MAX) 1989 c->done--; 1990 sleepq_release(c); 1991 1992 intr: 1993 return (error); 1994 } 1995 1996 /* 1997 * Time limited wait for done != 0 with or without signals. 1998 */ 1999 int 2000 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2001 { 2002 struct task_struct *task; 2003 int end = jiffies + timeout; 2004 int error; 2005 2006 if (SCHEDULER_STOPPED()) 2007 return (0); 2008 2009 task = current; 2010 2011 if (flags != 0) 2012 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2013 else 2014 flags = SLEEPQ_SLEEP; 2015 2016 for (;;) { 2017 sleepq_lock(c); 2018 if (c->done) 2019 break; 2020 sleepq_add(c, NULL, "completion", flags, 0); 2021 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2022 2023 DROP_GIANT(); 2024 if (flags & SLEEPQ_INTERRUPTIBLE) 2025 error = -sleepq_timedwait_sig(c, 0); 2026 else 2027 error = -sleepq_timedwait(c, 0); 2028 PICKUP_GIANT(); 2029 2030 if (error != 0) { 2031 /* check for timeout */ 2032 if (error == -EWOULDBLOCK) { 2033 error = 0; /* timeout */ 2034 } else { 2035 /* signal happened */ 2036 linux_schedule_save_interrupt_value(task, error); 2037 error = -ERESTARTSYS; 2038 } 2039 goto done; 2040 } 2041 } 2042 if (c->done != UINT_MAX) 2043 c->done--; 2044 sleepq_release(c); 2045 2046 /* return how many jiffies are left */ 2047 error = linux_timer_jiffies_until(end); 2048 done: 2049 return (error); 2050 } 2051 2052 int 2053 linux_try_wait_for_completion(struct completion *c) 2054 { 2055 int isdone; 2056 2057 sleepq_lock(c); 2058 isdone = (c->done != 0); 2059 if (c->done != 0 && c->done != UINT_MAX) 2060 c->done--; 2061 sleepq_release(c); 2062 return (isdone); 2063 } 2064 2065 int 2066 linux_completion_done(struct completion *c) 2067 { 2068 int isdone; 2069 2070 sleepq_lock(c); 2071 isdone = (c->done != 0); 2072 sleepq_release(c); 2073 return (isdone); 2074 } 2075 2076 static void 2077 linux_cdev_deref(struct linux_cdev *ldev) 2078 { 2079 2080 if (refcount_release(&ldev->refs)) 2081 kfree(ldev); 2082 } 2083 2084 static void 2085 linux_cdev_release(struct kobject *kobj) 2086 { 2087 struct linux_cdev *cdev; 2088 struct kobject *parent; 2089 2090 cdev = container_of(kobj, struct linux_cdev, kobj); 2091 parent = kobj->parent; 2092 linux_destroy_dev(cdev); 2093 linux_cdev_deref(cdev); 2094 kobject_put(parent); 2095 } 2096 2097 static void 2098 linux_cdev_static_release(struct kobject *kobj) 2099 { 2100 struct linux_cdev *cdev; 2101 struct kobject *parent; 2102 2103 cdev = container_of(kobj, struct linux_cdev, kobj); 2104 parent = kobj->parent; 2105 linux_destroy_dev(cdev); 2106 kobject_put(parent); 2107 } 2108 2109 void 2110 linux_destroy_dev(struct linux_cdev *ldev) 2111 { 2112 2113 if (ldev->cdev == NULL) 2114 return; 2115 2116 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2117 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2118 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2119 pause("ldevdtr", hz / 4); 2120 2121 destroy_dev(ldev->cdev); 2122 ldev->cdev = NULL; 2123 } 2124 2125 const struct kobj_type linux_cdev_ktype = { 2126 .release = linux_cdev_release, 2127 }; 2128 2129 const struct kobj_type linux_cdev_static_ktype = { 2130 .release = linux_cdev_static_release, 2131 }; 2132 2133 static void 2134 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2135 { 2136 struct notifier_block *nb; 2137 2138 nb = arg; 2139 if (linkstate == LINK_STATE_UP) 2140 nb->notifier_call(nb, NETDEV_UP, ifp); 2141 else 2142 nb->notifier_call(nb, NETDEV_DOWN, ifp); 2143 } 2144 2145 static void 2146 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2147 { 2148 struct notifier_block *nb; 2149 2150 nb = arg; 2151 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 2152 } 2153 2154 static void 2155 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2156 { 2157 struct notifier_block *nb; 2158 2159 nb = arg; 2160 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 2161 } 2162 2163 static void 2164 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2165 { 2166 struct notifier_block *nb; 2167 2168 nb = arg; 2169 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 2170 } 2171 2172 static void 2173 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2174 { 2175 struct notifier_block *nb; 2176 2177 nb = arg; 2178 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 2179 } 2180 2181 int 2182 register_netdevice_notifier(struct notifier_block *nb) 2183 { 2184 2185 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2186 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2187 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2188 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2189 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2190 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2191 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2192 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2193 2194 return (0); 2195 } 2196 2197 int 2198 register_inetaddr_notifier(struct notifier_block *nb) 2199 { 2200 2201 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2202 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2203 return (0); 2204 } 2205 2206 int 2207 unregister_netdevice_notifier(struct notifier_block *nb) 2208 { 2209 2210 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2211 nb->tags[NETDEV_UP]); 2212 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2213 nb->tags[NETDEV_REGISTER]); 2214 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2215 nb->tags[NETDEV_UNREGISTER]); 2216 EVENTHANDLER_DEREGISTER(iflladdr_event, 2217 nb->tags[NETDEV_CHANGEADDR]); 2218 2219 return (0); 2220 } 2221 2222 int 2223 unregister_inetaddr_notifier(struct notifier_block *nb) 2224 { 2225 2226 EVENTHANDLER_DEREGISTER(ifaddr_event, 2227 nb->tags[NETDEV_CHANGEIFADDR]); 2228 2229 return (0); 2230 } 2231 2232 struct list_sort_thunk { 2233 int (*cmp)(void *, struct list_head *, struct list_head *); 2234 void *priv; 2235 }; 2236 2237 static inline int 2238 linux_le_cmp(void *priv, const void *d1, const void *d2) 2239 { 2240 struct list_head *le1, *le2; 2241 struct list_sort_thunk *thunk; 2242 2243 thunk = priv; 2244 le1 = *(__DECONST(struct list_head **, d1)); 2245 le2 = *(__DECONST(struct list_head **, d2)); 2246 return ((thunk->cmp)(thunk->priv, le1, le2)); 2247 } 2248 2249 void 2250 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2251 struct list_head *a, struct list_head *b)) 2252 { 2253 struct list_sort_thunk thunk; 2254 struct list_head **ar, *le; 2255 size_t count, i; 2256 2257 count = 0; 2258 list_for_each(le, head) 2259 count++; 2260 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2261 i = 0; 2262 list_for_each(le, head) 2263 ar[i++] = le; 2264 thunk.cmp = cmp; 2265 thunk.priv = priv; 2266 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2267 INIT_LIST_HEAD(head); 2268 for (i = 0; i < count; i++) 2269 list_add_tail(ar[i], head); 2270 free(ar, M_KMALLOC); 2271 } 2272 2273 void 2274 linux_irq_handler(void *ent) 2275 { 2276 struct irq_ent *irqe; 2277 2278 linux_set_current(curthread); 2279 2280 irqe = ent; 2281 irqe->handler(irqe->irq, irqe->arg); 2282 } 2283 2284 #if defined(__i386__) || defined(__amd64__) 2285 int 2286 linux_wbinvd_on_all_cpus(void) 2287 { 2288 2289 pmap_invalidate_cache(); 2290 return (0); 2291 } 2292 #endif 2293 2294 int 2295 linux_on_each_cpu(void callback(void *), void *data) 2296 { 2297 2298 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2299 smp_no_rendezvous_barrier, data); 2300 return (0); 2301 } 2302 2303 int 2304 linux_in_atomic(void) 2305 { 2306 2307 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2308 } 2309 2310 struct linux_cdev * 2311 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2312 { 2313 dev_t dev = MKDEV(major, minor); 2314 struct cdev *cdev; 2315 2316 dev_lock(); 2317 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2318 struct linux_cdev *ldev = cdev->si_drv1; 2319 if (ldev->dev == dev && 2320 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2321 break; 2322 } 2323 } 2324 dev_unlock(); 2325 2326 return (cdev != NULL ? cdev->si_drv1 : NULL); 2327 } 2328 2329 int 2330 __register_chrdev(unsigned int major, unsigned int baseminor, 2331 unsigned int count, const char *name, 2332 const struct file_operations *fops) 2333 { 2334 struct linux_cdev *cdev; 2335 int ret = 0; 2336 int i; 2337 2338 for (i = baseminor; i < baseminor + count; i++) { 2339 cdev = cdev_alloc(); 2340 cdev->ops = fops; 2341 kobject_set_name(&cdev->kobj, name); 2342 2343 ret = cdev_add(cdev, makedev(major, i), 1); 2344 if (ret != 0) 2345 break; 2346 } 2347 return (ret); 2348 } 2349 2350 int 2351 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2352 unsigned int count, const char *name, 2353 const struct file_operations *fops, uid_t uid, 2354 gid_t gid, int mode) 2355 { 2356 struct linux_cdev *cdev; 2357 int ret = 0; 2358 int i; 2359 2360 for (i = baseminor; i < baseminor + count; i++) { 2361 cdev = cdev_alloc(); 2362 cdev->ops = fops; 2363 kobject_set_name(&cdev->kobj, name); 2364 2365 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2366 if (ret != 0) 2367 break; 2368 } 2369 return (ret); 2370 } 2371 2372 void 2373 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2374 unsigned int count, const char *name) 2375 { 2376 struct linux_cdev *cdevp; 2377 int i; 2378 2379 for (i = baseminor; i < baseminor + count; i++) { 2380 cdevp = linux_find_cdev(name, major, i); 2381 if (cdevp != NULL) 2382 cdev_del(cdevp); 2383 } 2384 } 2385 2386 void 2387 linux_dump_stack(void) 2388 { 2389 #ifdef STACK 2390 struct stack st; 2391 2392 stack_zero(&st); 2393 stack_save(&st); 2394 stack_print(&st); 2395 #endif 2396 } 2397 2398 #if defined(__i386__) || defined(__amd64__) 2399 bool linux_cpu_has_clflush; 2400 #endif 2401 2402 static void 2403 linux_compat_init(void *arg) 2404 { 2405 struct sysctl_oid *rootoid; 2406 int i; 2407 2408 #if defined(__i386__) || defined(__amd64__) 2409 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2410 #endif 2411 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2412 2413 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2414 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2415 kobject_init(&linux_class_root, &linux_class_ktype); 2416 kobject_set_name(&linux_class_root, "class"); 2417 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2418 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2419 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2420 kobject_set_name(&linux_root_device.kobj, "device"); 2421 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2422 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 2423 "device"); 2424 linux_root_device.bsddev = root_bus; 2425 linux_class_misc.name = "misc"; 2426 class_register(&linux_class_misc); 2427 INIT_LIST_HEAD(&pci_drivers); 2428 INIT_LIST_HEAD(&pci_devices); 2429 spin_lock_init(&pci_lock); 2430 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2431 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2432 LIST_INIT(&vmmaphead[i]); 2433 } 2434 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2435 2436 static void 2437 linux_compat_uninit(void *arg) 2438 { 2439 linux_kobject_kfree_name(&linux_class_root); 2440 linux_kobject_kfree_name(&linux_root_device.kobj); 2441 linux_kobject_kfree_name(&linux_class_misc.kobj); 2442 2443 mtx_destroy(&vmmaplock); 2444 spin_lock_destroy(&pci_lock); 2445 rw_destroy(&linux_vma_lock); 2446 } 2447 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2448 2449 /* 2450 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2451 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2452 * used. Assert these types have the same size, else some parts of the 2453 * LinuxKPI may not work like expected: 2454 */ 2455 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2456