1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/time.h> 55 #include <sys/user.h> 56 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 63 #include <machine/stdarg.h> 64 65 #if defined(__i386__) || defined(__amd64__) 66 #include <machine/md_var.h> 67 #endif 68 69 #include <linux/kobject.h> 70 #include <linux/device.h> 71 #include <linux/slab.h> 72 #include <linux/module.h> 73 #include <linux/moduleparam.h> 74 #include <linux/cdev.h> 75 #include <linux/file.h> 76 #include <linux/sysfs.h> 77 #include <linux/mm.h> 78 #include <linux/io.h> 79 #include <linux/vmalloc.h> 80 #include <linux/netdevice.h> 81 #include <linux/timer.h> 82 #include <linux/interrupt.h> 83 #include <linux/uaccess.h> 84 #include <linux/list.h> 85 #include <linux/kthread.h> 86 #include <linux/kernel.h> 87 #include <linux/compat.h> 88 #include <linux/poll.h> 89 #include <linux/smp.h> 90 #include <linux/wait_bit.h> 91 92 #if defined(__i386__) || defined(__amd64__) 93 #include <asm/smp.h> 94 #endif 95 96 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 97 "LinuxKPI parameters"); 98 99 int linuxkpi_debug; 100 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 101 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 102 103 static struct timeval lkpi_net_lastlog; 104 static int lkpi_net_curpps; 105 static int lkpi_net_maxpps = 99; 106 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 107 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 108 109 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 110 111 #include <linux/rbtree.h> 112 /* Undo Linux compat changes. */ 113 #undef RB_ROOT 114 #undef file 115 #undef cdev 116 #define RB_ROOT(head) (head)->rbh_root 117 118 static void linux_cdev_deref(struct linux_cdev *ldev); 119 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 120 121 struct kobject linux_class_root; 122 struct device linux_root_device; 123 struct class linux_class_misc; 124 struct list_head pci_drivers; 125 struct list_head pci_devices; 126 spinlock_t pci_lock; 127 128 unsigned long linux_timer_hz_mask; 129 130 wait_queue_head_t linux_bit_waitq; 131 wait_queue_head_t linux_var_waitq; 132 133 int 134 panic_cmp(struct rb_node *one, struct rb_node *two) 135 { 136 panic("no cmp"); 137 } 138 139 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 140 141 int 142 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 143 { 144 va_list tmp_va; 145 int len; 146 char *old; 147 char *name; 148 char dummy; 149 150 old = kobj->name; 151 152 if (old && fmt == NULL) 153 return (0); 154 155 /* compute length of string */ 156 va_copy(tmp_va, args); 157 len = vsnprintf(&dummy, 0, fmt, tmp_va); 158 va_end(tmp_va); 159 160 /* account for zero termination */ 161 len++; 162 163 /* check for error */ 164 if (len < 1) 165 return (-EINVAL); 166 167 /* allocate memory for string */ 168 name = kzalloc(len, GFP_KERNEL); 169 if (name == NULL) 170 return (-ENOMEM); 171 vsnprintf(name, len, fmt, args); 172 kobj->name = name; 173 174 /* free old string */ 175 kfree(old); 176 177 /* filter new string */ 178 for (; *name != '\0'; name++) 179 if (*name == '/') 180 *name = '!'; 181 return (0); 182 } 183 184 int 185 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 186 { 187 va_list args; 188 int error; 189 190 va_start(args, fmt); 191 error = kobject_set_name_vargs(kobj, fmt, args); 192 va_end(args); 193 194 return (error); 195 } 196 197 static int 198 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 199 { 200 const struct kobj_type *t; 201 int error; 202 203 kobj->parent = parent; 204 error = sysfs_create_dir(kobj); 205 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 206 struct attribute **attr; 207 t = kobj->ktype; 208 209 for (attr = t->default_attrs; *attr != NULL; attr++) { 210 error = sysfs_create_file(kobj, *attr); 211 if (error) 212 break; 213 } 214 if (error) 215 sysfs_remove_dir(kobj); 216 } 217 return (error); 218 } 219 220 int 221 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 222 { 223 va_list args; 224 int error; 225 226 va_start(args, fmt); 227 error = kobject_set_name_vargs(kobj, fmt, args); 228 va_end(args); 229 if (error) 230 return (error); 231 232 return kobject_add_complete(kobj, parent); 233 } 234 235 void 236 linux_kobject_release(struct kref *kref) 237 { 238 struct kobject *kobj; 239 char *name; 240 241 kobj = container_of(kref, struct kobject, kref); 242 sysfs_remove_dir(kobj); 243 name = kobj->name; 244 if (kobj->ktype && kobj->ktype->release) 245 kobj->ktype->release(kobj); 246 kfree(name); 247 } 248 249 static void 250 linux_kobject_kfree(struct kobject *kobj) 251 { 252 kfree(kobj); 253 } 254 255 static void 256 linux_kobject_kfree_name(struct kobject *kobj) 257 { 258 if (kobj) { 259 kfree(kobj->name); 260 } 261 } 262 263 const struct kobj_type linux_kfree_type = { 264 .release = linux_kobject_kfree 265 }; 266 267 static void 268 linux_device_release(struct device *dev) 269 { 270 pr_debug("linux_device_release: %s\n", dev_name(dev)); 271 kfree(dev); 272 } 273 274 static ssize_t 275 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 276 { 277 struct class_attribute *dattr; 278 ssize_t error; 279 280 dattr = container_of(attr, struct class_attribute, attr); 281 error = -EIO; 282 if (dattr->show) 283 error = dattr->show(container_of(kobj, struct class, kobj), 284 dattr, buf); 285 return (error); 286 } 287 288 static ssize_t 289 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 290 size_t count) 291 { 292 struct class_attribute *dattr; 293 ssize_t error; 294 295 dattr = container_of(attr, struct class_attribute, attr); 296 error = -EIO; 297 if (dattr->store) 298 error = dattr->store(container_of(kobj, struct class, kobj), 299 dattr, buf, count); 300 return (error); 301 } 302 303 static void 304 linux_class_release(struct kobject *kobj) 305 { 306 struct class *class; 307 308 class = container_of(kobj, struct class, kobj); 309 if (class->class_release) 310 class->class_release(class); 311 } 312 313 static const struct sysfs_ops linux_class_sysfs = { 314 .show = linux_class_show, 315 .store = linux_class_store, 316 }; 317 318 const struct kobj_type linux_class_ktype = { 319 .release = linux_class_release, 320 .sysfs_ops = &linux_class_sysfs 321 }; 322 323 static void 324 linux_dev_release(struct kobject *kobj) 325 { 326 struct device *dev; 327 328 dev = container_of(kobj, struct device, kobj); 329 /* This is the precedence defined by linux. */ 330 if (dev->release) 331 dev->release(dev); 332 else if (dev->class && dev->class->dev_release) 333 dev->class->dev_release(dev); 334 } 335 336 static ssize_t 337 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 338 { 339 struct device_attribute *dattr; 340 ssize_t error; 341 342 dattr = container_of(attr, struct device_attribute, attr); 343 error = -EIO; 344 if (dattr->show) 345 error = dattr->show(container_of(kobj, struct device, kobj), 346 dattr, buf); 347 return (error); 348 } 349 350 static ssize_t 351 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 352 size_t count) 353 { 354 struct device_attribute *dattr; 355 ssize_t error; 356 357 dattr = container_of(attr, struct device_attribute, attr); 358 error = -EIO; 359 if (dattr->store) 360 error = dattr->store(container_of(kobj, struct device, kobj), 361 dattr, buf, count); 362 return (error); 363 } 364 365 static const struct sysfs_ops linux_dev_sysfs = { 366 .show = linux_dev_show, 367 .store = linux_dev_store, 368 }; 369 370 const struct kobj_type linux_dev_ktype = { 371 .release = linux_dev_release, 372 .sysfs_ops = &linux_dev_sysfs 373 }; 374 375 struct device * 376 device_create(struct class *class, struct device *parent, dev_t devt, 377 void *drvdata, const char *fmt, ...) 378 { 379 struct device *dev; 380 va_list args; 381 382 dev = kzalloc(sizeof(*dev), M_WAITOK); 383 dev->parent = parent; 384 dev->class = class; 385 dev->devt = devt; 386 dev->driver_data = drvdata; 387 dev->release = linux_device_release; 388 va_start(args, fmt); 389 kobject_set_name_vargs(&dev->kobj, fmt, args); 390 va_end(args); 391 device_register(dev); 392 393 return (dev); 394 } 395 396 int 397 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 398 struct kobject *parent, const char *fmt, ...) 399 { 400 va_list args; 401 int error; 402 403 kobject_init(kobj, ktype); 404 kobj->ktype = ktype; 405 kobj->parent = parent; 406 kobj->name = NULL; 407 408 va_start(args, fmt); 409 error = kobject_set_name_vargs(kobj, fmt, args); 410 va_end(args); 411 if (error) 412 return (error); 413 return kobject_add_complete(kobj, parent); 414 } 415 416 static void 417 linux_kq_lock(void *arg) 418 { 419 spinlock_t *s = arg; 420 421 spin_lock(s); 422 } 423 static void 424 linux_kq_unlock(void *arg) 425 { 426 spinlock_t *s = arg; 427 428 spin_unlock(s); 429 } 430 431 static void 432 linux_kq_assert_lock(void *arg, int what) 433 { 434 #ifdef INVARIANTS 435 spinlock_t *s = arg; 436 437 if (what == LA_LOCKED) 438 mtx_assert(&s->m, MA_OWNED); 439 else 440 mtx_assert(&s->m, MA_NOTOWNED); 441 #endif 442 } 443 444 static void 445 linux_file_kqfilter_poll(struct linux_file *, int); 446 447 struct linux_file * 448 linux_file_alloc(void) 449 { 450 struct linux_file *filp; 451 452 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 453 454 /* set initial refcount */ 455 filp->f_count = 1; 456 457 /* setup fields needed by kqueue support */ 458 spin_lock_init(&filp->f_kqlock); 459 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 460 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 461 462 return (filp); 463 } 464 465 void 466 linux_file_free(struct linux_file *filp) 467 { 468 if (filp->_file == NULL) { 469 if (filp->f_shmem != NULL) 470 vm_object_deallocate(filp->f_shmem); 471 kfree(filp); 472 } else { 473 /* 474 * The close method of the character device or file 475 * will free the linux_file structure: 476 */ 477 _fdrop(filp->_file, curthread); 478 } 479 } 480 481 static int 482 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 483 vm_page_t *mres) 484 { 485 struct vm_area_struct *vmap; 486 487 vmap = linux_cdev_handle_find(vm_obj->handle); 488 489 MPASS(vmap != NULL); 490 MPASS(vmap->vm_private_data == vm_obj->handle); 491 492 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 493 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 494 vm_page_t page; 495 496 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 497 /* 498 * If the passed in result page is a fake 499 * page, update it with the new physical 500 * address. 501 */ 502 page = *mres; 503 vm_page_updatefake(page, paddr, vm_obj->memattr); 504 } else { 505 /* 506 * Replace the passed in "mres" page with our 507 * own fake page and free up the all of the 508 * original pages. 509 */ 510 VM_OBJECT_WUNLOCK(vm_obj); 511 page = vm_page_getfake(paddr, vm_obj->memattr); 512 VM_OBJECT_WLOCK(vm_obj); 513 514 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 515 *mres = page; 516 } 517 vm_page_valid(page); 518 return (VM_PAGER_OK); 519 } 520 return (VM_PAGER_FAIL); 521 } 522 523 static int 524 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 525 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 526 { 527 struct vm_area_struct *vmap; 528 int err; 529 530 /* get VM area structure */ 531 vmap = linux_cdev_handle_find(vm_obj->handle); 532 MPASS(vmap != NULL); 533 MPASS(vmap->vm_private_data == vm_obj->handle); 534 535 VM_OBJECT_WUNLOCK(vm_obj); 536 537 linux_set_current(curthread); 538 539 down_write(&vmap->vm_mm->mmap_sem); 540 if (unlikely(vmap->vm_ops == NULL)) { 541 err = VM_FAULT_SIGBUS; 542 } else { 543 struct vm_fault vmf; 544 545 /* fill out VM fault structure */ 546 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 547 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 548 vmf.pgoff = 0; 549 vmf.page = NULL; 550 vmf.vma = vmap; 551 552 vmap->vm_pfn_count = 0; 553 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 554 vmap->vm_obj = vm_obj; 555 556 err = vmap->vm_ops->fault(vmap, &vmf); 557 558 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 559 kern_yield(PRI_USER); 560 err = vmap->vm_ops->fault(vmap, &vmf); 561 } 562 } 563 564 /* translate return code */ 565 switch (err) { 566 case VM_FAULT_OOM: 567 err = VM_PAGER_AGAIN; 568 break; 569 case VM_FAULT_SIGBUS: 570 err = VM_PAGER_BAD; 571 break; 572 case VM_FAULT_NOPAGE: 573 /* 574 * By contract the fault handler will return having 575 * busied all the pages itself. If pidx is already 576 * found in the object, it will simply xbusy the first 577 * page and return with vm_pfn_count set to 1. 578 */ 579 *first = vmap->vm_pfn_first; 580 *last = *first + vmap->vm_pfn_count - 1; 581 err = VM_PAGER_OK; 582 break; 583 default: 584 err = VM_PAGER_ERROR; 585 break; 586 } 587 up_write(&vmap->vm_mm->mmap_sem); 588 VM_OBJECT_WLOCK(vm_obj); 589 return (err); 590 } 591 592 static struct rwlock linux_vma_lock; 593 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 594 TAILQ_HEAD_INITIALIZER(linux_vma_head); 595 596 static void 597 linux_cdev_handle_free(struct vm_area_struct *vmap) 598 { 599 /* Drop reference on vm_file */ 600 if (vmap->vm_file != NULL) 601 fput(vmap->vm_file); 602 603 /* Drop reference on mm_struct */ 604 mmput(vmap->vm_mm); 605 606 kfree(vmap); 607 } 608 609 static void 610 linux_cdev_handle_remove(struct vm_area_struct *vmap) 611 { 612 rw_wlock(&linux_vma_lock); 613 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 614 rw_wunlock(&linux_vma_lock); 615 } 616 617 static struct vm_area_struct * 618 linux_cdev_handle_find(void *handle) 619 { 620 struct vm_area_struct *vmap; 621 622 rw_rlock(&linux_vma_lock); 623 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 624 if (vmap->vm_private_data == handle) 625 break; 626 } 627 rw_runlock(&linux_vma_lock); 628 return (vmap); 629 } 630 631 static int 632 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 633 vm_ooffset_t foff, struct ucred *cred, u_short *color) 634 { 635 636 MPASS(linux_cdev_handle_find(handle) != NULL); 637 *color = 0; 638 return (0); 639 } 640 641 static void 642 linux_cdev_pager_dtor(void *handle) 643 { 644 const struct vm_operations_struct *vm_ops; 645 struct vm_area_struct *vmap; 646 647 vmap = linux_cdev_handle_find(handle); 648 MPASS(vmap != NULL); 649 650 /* 651 * Remove handle before calling close operation to prevent 652 * other threads from reusing the handle pointer. 653 */ 654 linux_cdev_handle_remove(vmap); 655 656 down_write(&vmap->vm_mm->mmap_sem); 657 vm_ops = vmap->vm_ops; 658 if (likely(vm_ops != NULL)) 659 vm_ops->close(vmap); 660 up_write(&vmap->vm_mm->mmap_sem); 661 662 linux_cdev_handle_free(vmap); 663 } 664 665 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 666 { 667 /* OBJT_MGTDEVICE */ 668 .cdev_pg_populate = linux_cdev_pager_populate, 669 .cdev_pg_ctor = linux_cdev_pager_ctor, 670 .cdev_pg_dtor = linux_cdev_pager_dtor 671 }, 672 { 673 /* OBJT_DEVICE */ 674 .cdev_pg_fault = linux_cdev_pager_fault, 675 .cdev_pg_ctor = linux_cdev_pager_ctor, 676 .cdev_pg_dtor = linux_cdev_pager_dtor 677 }, 678 }; 679 680 int 681 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 682 unsigned long size) 683 { 684 vm_object_t obj; 685 vm_page_t m; 686 687 obj = vma->vm_obj; 688 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 689 return (-ENOTSUP); 690 VM_OBJECT_RLOCK(obj); 691 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 692 m != NULL && m->pindex < OFF_TO_IDX(address + size); 693 m = TAILQ_NEXT(m, listq)) 694 pmap_remove_all(m); 695 VM_OBJECT_RUNLOCK(obj); 696 return (0); 697 } 698 699 static struct file_operations dummy_ldev_ops = { 700 /* XXXKIB */ 701 }; 702 703 static struct linux_cdev dummy_ldev = { 704 .ops = &dummy_ldev_ops, 705 }; 706 707 #define LDEV_SI_DTR 0x0001 708 #define LDEV_SI_REF 0x0002 709 710 static void 711 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 712 struct linux_cdev **dev) 713 { 714 struct linux_cdev *ldev; 715 u_int siref; 716 717 ldev = filp->f_cdev; 718 *fop = filp->f_op; 719 if (ldev != NULL) { 720 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 721 refcount_acquire(&ldev->refs); 722 } else { 723 for (siref = ldev->siref;;) { 724 if ((siref & LDEV_SI_DTR) != 0) { 725 ldev = &dummy_ldev; 726 *fop = ldev->ops; 727 siref = ldev->siref; 728 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 729 } else if (atomic_fcmpset_int(&ldev->siref, 730 &siref, siref + LDEV_SI_REF)) { 731 break; 732 } 733 } 734 } 735 } 736 *dev = ldev; 737 } 738 739 static void 740 linux_drop_fop(struct linux_cdev *ldev) 741 { 742 743 if (ldev == NULL) 744 return; 745 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 746 linux_cdev_deref(ldev); 747 } else { 748 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 749 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 750 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 751 } 752 } 753 754 #define OPW(fp,td,code) ({ \ 755 struct file *__fpop; \ 756 __typeof(code) __retval; \ 757 \ 758 __fpop = (td)->td_fpop; \ 759 (td)->td_fpop = (fp); \ 760 __retval = (code); \ 761 (td)->td_fpop = __fpop; \ 762 __retval; \ 763 }) 764 765 static int 766 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 767 struct file *file) 768 { 769 struct linux_cdev *ldev; 770 struct linux_file *filp; 771 const struct file_operations *fop; 772 int error; 773 774 ldev = dev->si_drv1; 775 776 filp = linux_file_alloc(); 777 filp->f_dentry = &filp->f_dentry_store; 778 filp->f_op = ldev->ops; 779 filp->f_mode = file->f_flag; 780 filp->f_flags = file->f_flag; 781 filp->f_vnode = file->f_vnode; 782 filp->_file = file; 783 refcount_acquire(&ldev->refs); 784 filp->f_cdev = ldev; 785 786 linux_set_current(td); 787 linux_get_fop(filp, &fop, &ldev); 788 789 if (fop->open != NULL) { 790 error = -fop->open(file->f_vnode, filp); 791 if (error != 0) { 792 linux_drop_fop(ldev); 793 linux_cdev_deref(filp->f_cdev); 794 kfree(filp); 795 return (error); 796 } 797 } 798 799 /* hold on to the vnode - used for fstat() */ 800 vhold(filp->f_vnode); 801 802 /* release the file from devfs */ 803 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 804 linux_drop_fop(ldev); 805 return (ENXIO); 806 } 807 808 #define LINUX_IOCTL_MIN_PTR 0x10000UL 809 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 810 811 static inline int 812 linux_remap_address(void **uaddr, size_t len) 813 { 814 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 815 816 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 817 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 818 struct task_struct *pts = current; 819 if (pts == NULL) { 820 *uaddr = NULL; 821 return (1); 822 } 823 824 /* compute data offset */ 825 uaddr_val -= LINUX_IOCTL_MIN_PTR; 826 827 /* check that length is within bounds */ 828 if ((len > IOCPARM_MAX) || 829 (uaddr_val + len) > pts->bsd_ioctl_len) { 830 *uaddr = NULL; 831 return (1); 832 } 833 834 /* re-add kernel buffer address */ 835 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 836 837 /* update address location */ 838 *uaddr = (void *)uaddr_val; 839 return (1); 840 } 841 return (0); 842 } 843 844 int 845 linux_copyin(const void *uaddr, void *kaddr, size_t len) 846 { 847 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 848 if (uaddr == NULL) 849 return (-EFAULT); 850 memcpy(kaddr, uaddr, len); 851 return (0); 852 } 853 return (-copyin(uaddr, kaddr, len)); 854 } 855 856 int 857 linux_copyout(const void *kaddr, void *uaddr, size_t len) 858 { 859 if (linux_remap_address(&uaddr, len)) { 860 if (uaddr == NULL) 861 return (-EFAULT); 862 memcpy(uaddr, kaddr, len); 863 return (0); 864 } 865 return (-copyout(kaddr, uaddr, len)); 866 } 867 868 size_t 869 linux_clear_user(void *_uaddr, size_t _len) 870 { 871 uint8_t *uaddr = _uaddr; 872 size_t len = _len; 873 874 /* make sure uaddr is aligned before going into the fast loop */ 875 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 876 if (subyte(uaddr, 0)) 877 return (_len); 878 uaddr++; 879 len--; 880 } 881 882 /* zero 8 bytes at a time */ 883 while (len > 7) { 884 #ifdef __LP64__ 885 if (suword64(uaddr, 0)) 886 return (_len); 887 #else 888 if (suword32(uaddr, 0)) 889 return (_len); 890 if (suword32(uaddr + 4, 0)) 891 return (_len); 892 #endif 893 uaddr += 8; 894 len -= 8; 895 } 896 897 /* zero fill end, if any */ 898 while (len > 0) { 899 if (subyte(uaddr, 0)) 900 return (_len); 901 uaddr++; 902 len--; 903 } 904 return (0); 905 } 906 907 int 908 linux_access_ok(const void *uaddr, size_t len) 909 { 910 uintptr_t saddr; 911 uintptr_t eaddr; 912 913 /* get start and end address */ 914 saddr = (uintptr_t)uaddr; 915 eaddr = (uintptr_t)uaddr + len; 916 917 /* verify addresses are valid for userspace */ 918 return ((saddr == eaddr) || 919 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 920 } 921 922 /* 923 * This function should return either EINTR or ERESTART depending on 924 * the signal type sent to this thread: 925 */ 926 static int 927 linux_get_error(struct task_struct *task, int error) 928 { 929 /* check for signal type interrupt code */ 930 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 931 error = -linux_schedule_get_interrupt_value(task); 932 if (error == 0) 933 error = EINTR; 934 } 935 return (error); 936 } 937 938 static int 939 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 940 const struct file_operations *fop, u_long cmd, caddr_t data, 941 struct thread *td) 942 { 943 struct task_struct *task = current; 944 unsigned size; 945 int error; 946 947 size = IOCPARM_LEN(cmd); 948 /* refer to logic in sys_ioctl() */ 949 if (size > 0) { 950 /* 951 * Setup hint for linux_copyin() and linux_copyout(). 952 * 953 * Background: Linux code expects a user-space address 954 * while FreeBSD supplies a kernel-space address. 955 */ 956 task->bsd_ioctl_data = data; 957 task->bsd_ioctl_len = size; 958 data = (void *)LINUX_IOCTL_MIN_PTR; 959 } else { 960 /* fetch user-space pointer */ 961 data = *(void **)data; 962 } 963 #if defined(__amd64__) 964 if (td->td_proc->p_elf_machine == EM_386) { 965 /* try the compat IOCTL handler first */ 966 if (fop->compat_ioctl != NULL) { 967 error = -OPW(fp, td, fop->compat_ioctl(filp, 968 cmd, (u_long)data)); 969 } else { 970 error = ENOTTY; 971 } 972 973 /* fallback to the regular IOCTL handler, if any */ 974 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 975 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 976 cmd, (u_long)data)); 977 } 978 } else 979 #endif 980 { 981 if (fop->unlocked_ioctl != NULL) { 982 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 983 cmd, (u_long)data)); 984 } else { 985 error = ENOTTY; 986 } 987 } 988 if (size > 0) { 989 task->bsd_ioctl_data = NULL; 990 task->bsd_ioctl_len = 0; 991 } 992 993 if (error == EWOULDBLOCK) { 994 /* update kqfilter status, if any */ 995 linux_file_kqfilter_poll(filp, 996 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 997 } else { 998 error = linux_get_error(task, error); 999 } 1000 return (error); 1001 } 1002 1003 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1004 1005 /* 1006 * This function atomically updates the poll wakeup state and returns 1007 * the previous state at the time of update. 1008 */ 1009 static uint8_t 1010 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1011 { 1012 int c, old; 1013 1014 c = v->counter; 1015 1016 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1017 c = old; 1018 1019 return (c); 1020 } 1021 1022 static int 1023 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1024 { 1025 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1026 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1027 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1028 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1029 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1030 }; 1031 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1032 1033 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1034 case LINUX_FWQ_STATE_QUEUED: 1035 linux_poll_wakeup(filp); 1036 return (1); 1037 default: 1038 return (0); 1039 } 1040 } 1041 1042 void 1043 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1044 { 1045 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1046 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1047 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1048 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1049 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1050 }; 1051 1052 /* check if we are called inside the select system call */ 1053 if (p == LINUX_POLL_TABLE_NORMAL) 1054 selrecord(curthread, &filp->f_selinfo); 1055 1056 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1057 case LINUX_FWQ_STATE_INIT: 1058 /* NOTE: file handles can only belong to one wait-queue */ 1059 filp->f_wait_queue.wqh = wqh; 1060 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1061 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1062 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1063 break; 1064 default: 1065 break; 1066 } 1067 } 1068 1069 static void 1070 linux_poll_wait_dequeue(struct linux_file *filp) 1071 { 1072 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1073 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1074 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1075 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1076 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1077 }; 1078 1079 seldrain(&filp->f_selinfo); 1080 1081 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1082 case LINUX_FWQ_STATE_NOT_READY: 1083 case LINUX_FWQ_STATE_QUEUED: 1084 case LINUX_FWQ_STATE_READY: 1085 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1086 break; 1087 default: 1088 break; 1089 } 1090 } 1091 1092 void 1093 linux_poll_wakeup(struct linux_file *filp) 1094 { 1095 /* this function should be NULL-safe */ 1096 if (filp == NULL) 1097 return; 1098 1099 selwakeup(&filp->f_selinfo); 1100 1101 spin_lock(&filp->f_kqlock); 1102 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1103 LINUX_KQ_FLAG_NEED_WRITE; 1104 1105 /* make sure the "knote" gets woken up */ 1106 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1107 spin_unlock(&filp->f_kqlock); 1108 } 1109 1110 static void 1111 linux_file_kqfilter_detach(struct knote *kn) 1112 { 1113 struct linux_file *filp = kn->kn_hook; 1114 1115 spin_lock(&filp->f_kqlock); 1116 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1117 spin_unlock(&filp->f_kqlock); 1118 } 1119 1120 static int 1121 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1122 { 1123 struct linux_file *filp = kn->kn_hook; 1124 1125 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1126 1127 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1128 } 1129 1130 static int 1131 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1132 { 1133 struct linux_file *filp = kn->kn_hook; 1134 1135 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1136 1137 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1138 } 1139 1140 static struct filterops linux_dev_kqfiltops_read = { 1141 .f_isfd = 1, 1142 .f_detach = linux_file_kqfilter_detach, 1143 .f_event = linux_file_kqfilter_read_event, 1144 }; 1145 1146 static struct filterops linux_dev_kqfiltops_write = { 1147 .f_isfd = 1, 1148 .f_detach = linux_file_kqfilter_detach, 1149 .f_event = linux_file_kqfilter_write_event, 1150 }; 1151 1152 static void 1153 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1154 { 1155 struct thread *td; 1156 const struct file_operations *fop; 1157 struct linux_cdev *ldev; 1158 int temp; 1159 1160 if ((filp->f_kqflags & kqflags) == 0) 1161 return; 1162 1163 td = curthread; 1164 1165 linux_get_fop(filp, &fop, &ldev); 1166 /* get the latest polling state */ 1167 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1168 linux_drop_fop(ldev); 1169 1170 spin_lock(&filp->f_kqlock); 1171 /* clear kqflags */ 1172 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1173 LINUX_KQ_FLAG_NEED_WRITE); 1174 /* update kqflags */ 1175 if ((temp & (POLLIN | POLLOUT)) != 0) { 1176 if ((temp & POLLIN) != 0) 1177 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1178 if ((temp & POLLOUT) != 0) 1179 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1180 1181 /* make sure the "knote" gets woken up */ 1182 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1183 } 1184 spin_unlock(&filp->f_kqlock); 1185 } 1186 1187 static int 1188 linux_file_kqfilter(struct file *file, struct knote *kn) 1189 { 1190 struct linux_file *filp; 1191 struct thread *td; 1192 int error; 1193 1194 td = curthread; 1195 filp = (struct linux_file *)file->f_data; 1196 filp->f_flags = file->f_flag; 1197 if (filp->f_op->poll == NULL) 1198 return (EINVAL); 1199 1200 spin_lock(&filp->f_kqlock); 1201 switch (kn->kn_filter) { 1202 case EVFILT_READ: 1203 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1204 kn->kn_fop = &linux_dev_kqfiltops_read; 1205 kn->kn_hook = filp; 1206 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1207 error = 0; 1208 break; 1209 case EVFILT_WRITE: 1210 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1211 kn->kn_fop = &linux_dev_kqfiltops_write; 1212 kn->kn_hook = filp; 1213 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1214 error = 0; 1215 break; 1216 default: 1217 error = EINVAL; 1218 break; 1219 } 1220 spin_unlock(&filp->f_kqlock); 1221 1222 if (error == 0) { 1223 linux_set_current(td); 1224 1225 /* update kqfilter status, if any */ 1226 linux_file_kqfilter_poll(filp, 1227 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1228 } 1229 return (error); 1230 } 1231 1232 static int 1233 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1234 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1235 int nprot, struct thread *td) 1236 { 1237 struct task_struct *task; 1238 struct vm_area_struct *vmap; 1239 struct mm_struct *mm; 1240 struct linux_file *filp; 1241 vm_memattr_t attr; 1242 int error; 1243 1244 filp = (struct linux_file *)fp->f_data; 1245 filp->f_flags = fp->f_flag; 1246 1247 if (fop->mmap == NULL) 1248 return (EOPNOTSUPP); 1249 1250 linux_set_current(td); 1251 1252 /* 1253 * The same VM object might be shared by multiple processes 1254 * and the mm_struct is usually freed when a process exits. 1255 * 1256 * The atomic reference below makes sure the mm_struct is 1257 * available as long as the vmap is in the linux_vma_head. 1258 */ 1259 task = current; 1260 mm = task->mm; 1261 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1262 return (EINVAL); 1263 1264 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1265 vmap->vm_start = 0; 1266 vmap->vm_end = size; 1267 vmap->vm_pgoff = *offset / PAGE_SIZE; 1268 vmap->vm_pfn = 0; 1269 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1270 vmap->vm_ops = NULL; 1271 vmap->vm_file = get_file(filp); 1272 vmap->vm_mm = mm; 1273 1274 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1275 error = linux_get_error(task, EINTR); 1276 } else { 1277 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1278 error = linux_get_error(task, error); 1279 up_write(&vmap->vm_mm->mmap_sem); 1280 } 1281 1282 if (error != 0) { 1283 linux_cdev_handle_free(vmap); 1284 return (error); 1285 } 1286 1287 attr = pgprot2cachemode(vmap->vm_page_prot); 1288 1289 if (vmap->vm_ops != NULL) { 1290 struct vm_area_struct *ptr; 1291 void *vm_private_data; 1292 bool vm_no_fault; 1293 1294 if (vmap->vm_ops->open == NULL || 1295 vmap->vm_ops->close == NULL || 1296 vmap->vm_private_data == NULL) { 1297 /* free allocated VM area struct */ 1298 linux_cdev_handle_free(vmap); 1299 return (EINVAL); 1300 } 1301 1302 vm_private_data = vmap->vm_private_data; 1303 1304 rw_wlock(&linux_vma_lock); 1305 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1306 if (ptr->vm_private_data == vm_private_data) 1307 break; 1308 } 1309 /* check if there is an existing VM area struct */ 1310 if (ptr != NULL) { 1311 /* check if the VM area structure is invalid */ 1312 if (ptr->vm_ops == NULL || 1313 ptr->vm_ops->open == NULL || 1314 ptr->vm_ops->close == NULL) { 1315 error = ESTALE; 1316 vm_no_fault = 1; 1317 } else { 1318 error = EEXIST; 1319 vm_no_fault = (ptr->vm_ops->fault == NULL); 1320 } 1321 } else { 1322 /* insert VM area structure into list */ 1323 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1324 error = 0; 1325 vm_no_fault = (vmap->vm_ops->fault == NULL); 1326 } 1327 rw_wunlock(&linux_vma_lock); 1328 1329 if (error != 0) { 1330 /* free allocated VM area struct */ 1331 linux_cdev_handle_free(vmap); 1332 /* check for stale VM area struct */ 1333 if (error != EEXIST) 1334 return (error); 1335 } 1336 1337 /* check if there is no fault handler */ 1338 if (vm_no_fault) { 1339 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1340 &linux_cdev_pager_ops[1], size, nprot, *offset, 1341 td->td_ucred); 1342 } else { 1343 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1344 &linux_cdev_pager_ops[0], size, nprot, *offset, 1345 td->td_ucred); 1346 } 1347 1348 /* check if allocating the VM object failed */ 1349 if (*object == NULL) { 1350 if (error == 0) { 1351 /* remove VM area struct from list */ 1352 linux_cdev_handle_remove(vmap); 1353 /* free allocated VM area struct */ 1354 linux_cdev_handle_free(vmap); 1355 } 1356 return (EINVAL); 1357 } 1358 } else { 1359 struct sglist *sg; 1360 1361 sg = sglist_alloc(1, M_WAITOK); 1362 sglist_append_phys(sg, 1363 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1364 1365 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1366 nprot, 0, td->td_ucred); 1367 1368 linux_cdev_handle_free(vmap); 1369 1370 if (*object == NULL) { 1371 sglist_free(sg); 1372 return (EINVAL); 1373 } 1374 } 1375 1376 if (attr != VM_MEMATTR_DEFAULT) { 1377 VM_OBJECT_WLOCK(*object); 1378 vm_object_set_memattr(*object, attr); 1379 VM_OBJECT_WUNLOCK(*object); 1380 } 1381 *offset = 0; 1382 return (0); 1383 } 1384 1385 struct cdevsw linuxcdevsw = { 1386 .d_version = D_VERSION, 1387 .d_fdopen = linux_dev_fdopen, 1388 .d_name = "lkpidev", 1389 }; 1390 1391 static int 1392 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1393 int flags, struct thread *td) 1394 { 1395 struct linux_file *filp; 1396 const struct file_operations *fop; 1397 struct linux_cdev *ldev; 1398 ssize_t bytes; 1399 int error; 1400 1401 error = 0; 1402 filp = (struct linux_file *)file->f_data; 1403 filp->f_flags = file->f_flag; 1404 /* XXX no support for I/O vectors currently */ 1405 if (uio->uio_iovcnt != 1) 1406 return (EOPNOTSUPP); 1407 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1408 return (EINVAL); 1409 linux_set_current(td); 1410 linux_get_fop(filp, &fop, &ldev); 1411 if (fop->read != NULL) { 1412 bytes = OPW(file, td, fop->read(filp, 1413 uio->uio_iov->iov_base, 1414 uio->uio_iov->iov_len, &uio->uio_offset)); 1415 if (bytes >= 0) { 1416 uio->uio_iov->iov_base = 1417 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1418 uio->uio_iov->iov_len -= bytes; 1419 uio->uio_resid -= bytes; 1420 } else { 1421 error = linux_get_error(current, -bytes); 1422 } 1423 } else 1424 error = ENXIO; 1425 1426 /* update kqfilter status, if any */ 1427 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1428 linux_drop_fop(ldev); 1429 1430 return (error); 1431 } 1432 1433 static int 1434 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1435 int flags, struct thread *td) 1436 { 1437 struct linux_file *filp; 1438 const struct file_operations *fop; 1439 struct linux_cdev *ldev; 1440 ssize_t bytes; 1441 int error; 1442 1443 filp = (struct linux_file *)file->f_data; 1444 filp->f_flags = file->f_flag; 1445 /* XXX no support for I/O vectors currently */ 1446 if (uio->uio_iovcnt != 1) 1447 return (EOPNOTSUPP); 1448 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1449 return (EINVAL); 1450 linux_set_current(td); 1451 linux_get_fop(filp, &fop, &ldev); 1452 if (fop->write != NULL) { 1453 bytes = OPW(file, td, fop->write(filp, 1454 uio->uio_iov->iov_base, 1455 uio->uio_iov->iov_len, &uio->uio_offset)); 1456 if (bytes >= 0) { 1457 uio->uio_iov->iov_base = 1458 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1459 uio->uio_iov->iov_len -= bytes; 1460 uio->uio_resid -= bytes; 1461 error = 0; 1462 } else { 1463 error = linux_get_error(current, -bytes); 1464 } 1465 } else 1466 error = ENXIO; 1467 1468 /* update kqfilter status, if any */ 1469 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1470 1471 linux_drop_fop(ldev); 1472 1473 return (error); 1474 } 1475 1476 static int 1477 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1478 struct thread *td) 1479 { 1480 struct linux_file *filp; 1481 const struct file_operations *fop; 1482 struct linux_cdev *ldev; 1483 int revents; 1484 1485 filp = (struct linux_file *)file->f_data; 1486 filp->f_flags = file->f_flag; 1487 linux_set_current(td); 1488 linux_get_fop(filp, &fop, &ldev); 1489 if (fop->poll != NULL) { 1490 revents = OPW(file, td, fop->poll(filp, 1491 LINUX_POLL_TABLE_NORMAL)) & events; 1492 } else { 1493 revents = 0; 1494 } 1495 linux_drop_fop(ldev); 1496 return (revents); 1497 } 1498 1499 static int 1500 linux_file_close(struct file *file, struct thread *td) 1501 { 1502 struct linux_file *filp; 1503 int (*release)(struct inode *, struct linux_file *); 1504 const struct file_operations *fop; 1505 struct linux_cdev *ldev; 1506 int error; 1507 1508 filp = (struct linux_file *)file->f_data; 1509 1510 KASSERT(file_count(filp) == 0, 1511 ("File refcount(%d) is not zero", file_count(filp))); 1512 1513 if (td == NULL) 1514 td = curthread; 1515 1516 error = 0; 1517 filp->f_flags = file->f_flag; 1518 linux_set_current(td); 1519 linux_poll_wait_dequeue(filp); 1520 linux_get_fop(filp, &fop, &ldev); 1521 /* 1522 * Always use the real release function, if any, to avoid 1523 * leaking device resources: 1524 */ 1525 release = filp->f_op->release; 1526 if (release != NULL) 1527 error = -OPW(file, td, release(filp->f_vnode, filp)); 1528 funsetown(&filp->f_sigio); 1529 if (filp->f_vnode != NULL) 1530 vdrop(filp->f_vnode); 1531 linux_drop_fop(ldev); 1532 ldev = filp->f_cdev; 1533 if (ldev != NULL) 1534 linux_cdev_deref(ldev); 1535 kfree(filp); 1536 1537 return (error); 1538 } 1539 1540 static int 1541 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1542 struct thread *td) 1543 { 1544 struct linux_file *filp; 1545 const struct file_operations *fop; 1546 struct linux_cdev *ldev; 1547 struct fiodgname_arg *fgn; 1548 const char *p; 1549 int error, i; 1550 1551 error = 0; 1552 filp = (struct linux_file *)fp->f_data; 1553 filp->f_flags = fp->f_flag; 1554 linux_get_fop(filp, &fop, &ldev); 1555 1556 linux_set_current(td); 1557 switch (cmd) { 1558 case FIONBIO: 1559 break; 1560 case FIOASYNC: 1561 if (fop->fasync == NULL) 1562 break; 1563 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1564 break; 1565 case FIOSETOWN: 1566 error = fsetown(*(int *)data, &filp->f_sigio); 1567 if (error == 0) { 1568 if (fop->fasync == NULL) 1569 break; 1570 error = -OPW(fp, td, fop->fasync(0, filp, 1571 fp->f_flag & FASYNC)); 1572 } 1573 break; 1574 case FIOGETOWN: 1575 *(int *)data = fgetown(&filp->f_sigio); 1576 break; 1577 case FIODGNAME: 1578 #ifdef COMPAT_FREEBSD32 1579 case FIODGNAME_32: 1580 #endif 1581 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1582 error = ENXIO; 1583 break; 1584 } 1585 fgn = data; 1586 p = devtoname(filp->f_cdev->cdev); 1587 i = strlen(p) + 1; 1588 if (i > fgn->len) { 1589 error = EINVAL; 1590 break; 1591 } 1592 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1593 break; 1594 default: 1595 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1596 break; 1597 } 1598 linux_drop_fop(ldev); 1599 return (error); 1600 } 1601 1602 static int 1603 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1604 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1605 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1606 { 1607 /* 1608 * Character devices do not provide private mappings 1609 * of any kind: 1610 */ 1611 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1612 (prot & VM_PROT_WRITE) != 0) 1613 return (EACCES); 1614 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1615 return (EINVAL); 1616 1617 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1618 (int)prot, td)); 1619 } 1620 1621 static int 1622 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1623 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1624 struct thread *td) 1625 { 1626 struct linux_file *filp; 1627 const struct file_operations *fop; 1628 struct linux_cdev *ldev; 1629 struct mount *mp; 1630 struct vnode *vp; 1631 vm_object_t object; 1632 vm_prot_t maxprot; 1633 int error; 1634 1635 filp = (struct linux_file *)fp->f_data; 1636 1637 vp = filp->f_vnode; 1638 if (vp == NULL) 1639 return (EOPNOTSUPP); 1640 1641 /* 1642 * Ensure that file and memory protections are 1643 * compatible. 1644 */ 1645 mp = vp->v_mount; 1646 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1647 maxprot = VM_PROT_NONE; 1648 if ((prot & VM_PROT_EXECUTE) != 0) 1649 return (EACCES); 1650 } else 1651 maxprot = VM_PROT_EXECUTE; 1652 if ((fp->f_flag & FREAD) != 0) 1653 maxprot |= VM_PROT_READ; 1654 else if ((prot & VM_PROT_READ) != 0) 1655 return (EACCES); 1656 1657 /* 1658 * If we are sharing potential changes via MAP_SHARED and we 1659 * are trying to get write permission although we opened it 1660 * without asking for it, bail out. 1661 * 1662 * Note that most character devices always share mappings. 1663 * 1664 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1665 * requests rather than doing it here. 1666 */ 1667 if ((flags & MAP_SHARED) != 0) { 1668 if ((fp->f_flag & FWRITE) != 0) 1669 maxprot |= VM_PROT_WRITE; 1670 else if ((prot & VM_PROT_WRITE) != 0) 1671 return (EACCES); 1672 } 1673 maxprot &= cap_maxprot; 1674 1675 linux_get_fop(filp, &fop, &ldev); 1676 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, 1677 &foff, fop, &object); 1678 if (error != 0) 1679 goto out; 1680 1681 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1682 foff, FALSE, td); 1683 if (error != 0) 1684 vm_object_deallocate(object); 1685 out: 1686 linux_drop_fop(ldev); 1687 return (error); 1688 } 1689 1690 static int 1691 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1692 struct thread *td) 1693 { 1694 struct linux_file *filp; 1695 struct vnode *vp; 1696 int error; 1697 1698 filp = (struct linux_file *)fp->f_data; 1699 if (filp->f_vnode == NULL) 1700 return (EOPNOTSUPP); 1701 1702 vp = filp->f_vnode; 1703 1704 vn_lock(vp, LK_SHARED | LK_RETRY); 1705 error = VOP_STAT(vp, sb, td->td_ucred, NOCRED, td); 1706 VOP_UNLOCK(vp); 1707 1708 return (error); 1709 } 1710 1711 static int 1712 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1713 struct filedesc *fdp) 1714 { 1715 struct linux_file *filp; 1716 struct vnode *vp; 1717 int error; 1718 1719 filp = fp->f_data; 1720 vp = filp->f_vnode; 1721 if (vp == NULL) { 1722 error = 0; 1723 kif->kf_type = KF_TYPE_DEV; 1724 } else { 1725 vref(vp); 1726 FILEDESC_SUNLOCK(fdp); 1727 error = vn_fill_kinfo_vnode(vp, kif); 1728 vrele(vp); 1729 kif->kf_type = KF_TYPE_VNODE; 1730 FILEDESC_SLOCK(fdp); 1731 } 1732 return (error); 1733 } 1734 1735 unsigned int 1736 linux_iminor(struct inode *inode) 1737 { 1738 struct linux_cdev *ldev; 1739 1740 if (inode == NULL || inode->v_rdev == NULL || 1741 inode->v_rdev->si_devsw != &linuxcdevsw) 1742 return (-1U); 1743 ldev = inode->v_rdev->si_drv1; 1744 if (ldev == NULL) 1745 return (-1U); 1746 1747 return (minor(ldev->dev)); 1748 } 1749 1750 struct fileops linuxfileops = { 1751 .fo_read = linux_file_read, 1752 .fo_write = linux_file_write, 1753 .fo_truncate = invfo_truncate, 1754 .fo_kqfilter = linux_file_kqfilter, 1755 .fo_stat = linux_file_stat, 1756 .fo_fill_kinfo = linux_file_fill_kinfo, 1757 .fo_poll = linux_file_poll, 1758 .fo_close = linux_file_close, 1759 .fo_ioctl = linux_file_ioctl, 1760 .fo_mmap = linux_file_mmap, 1761 .fo_chmod = invfo_chmod, 1762 .fo_chown = invfo_chown, 1763 .fo_sendfile = invfo_sendfile, 1764 .fo_flags = DFLAG_PASSABLE, 1765 }; 1766 1767 /* 1768 * Hash of vmmap addresses. This is infrequently accessed and does not 1769 * need to be particularly large. This is done because we must store the 1770 * caller's idea of the map size to properly unmap. 1771 */ 1772 struct vmmap { 1773 LIST_ENTRY(vmmap) vm_next; 1774 void *vm_addr; 1775 unsigned long vm_size; 1776 }; 1777 1778 struct vmmaphd { 1779 struct vmmap *lh_first; 1780 }; 1781 #define VMMAP_HASH_SIZE 64 1782 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1783 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1784 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1785 static struct mtx vmmaplock; 1786 1787 static void 1788 vmmap_add(void *addr, unsigned long size) 1789 { 1790 struct vmmap *vmmap; 1791 1792 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1793 mtx_lock(&vmmaplock); 1794 vmmap->vm_size = size; 1795 vmmap->vm_addr = addr; 1796 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1797 mtx_unlock(&vmmaplock); 1798 } 1799 1800 static struct vmmap * 1801 vmmap_remove(void *addr) 1802 { 1803 struct vmmap *vmmap; 1804 1805 mtx_lock(&vmmaplock); 1806 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1807 if (vmmap->vm_addr == addr) 1808 break; 1809 if (vmmap) 1810 LIST_REMOVE(vmmap, vm_next); 1811 mtx_unlock(&vmmaplock); 1812 1813 return (vmmap); 1814 } 1815 1816 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1817 void * 1818 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1819 { 1820 void *addr; 1821 1822 addr = pmap_mapdev_attr(phys_addr, size, attr); 1823 if (addr == NULL) 1824 return (NULL); 1825 vmmap_add(addr, size); 1826 1827 return (addr); 1828 } 1829 #endif 1830 1831 void 1832 iounmap(void *addr) 1833 { 1834 struct vmmap *vmmap; 1835 1836 vmmap = vmmap_remove(addr); 1837 if (vmmap == NULL) 1838 return; 1839 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1840 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1841 #endif 1842 kfree(vmmap); 1843 } 1844 1845 void * 1846 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1847 { 1848 vm_offset_t off; 1849 size_t size; 1850 1851 size = count * PAGE_SIZE; 1852 off = kva_alloc(size); 1853 if (off == 0) 1854 return (NULL); 1855 vmmap_add((void *)off, size); 1856 pmap_qenter(off, pages, count); 1857 1858 return ((void *)off); 1859 } 1860 1861 void 1862 vunmap(void *addr) 1863 { 1864 struct vmmap *vmmap; 1865 1866 vmmap = vmmap_remove(addr); 1867 if (vmmap == NULL) 1868 return; 1869 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1870 kva_free((vm_offset_t)addr, vmmap->vm_size); 1871 kfree(vmmap); 1872 } 1873 1874 static char * 1875 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1876 { 1877 unsigned int len; 1878 char *p; 1879 va_list aq; 1880 1881 va_copy(aq, ap); 1882 len = vsnprintf(NULL, 0, fmt, aq); 1883 va_end(aq); 1884 1885 if (dev != NULL) 1886 p = devm_kmalloc(dev, len + 1, gfp); 1887 else 1888 p = kmalloc(len + 1, gfp); 1889 if (p != NULL) 1890 vsnprintf(p, len + 1, fmt, ap); 1891 1892 return (p); 1893 } 1894 1895 char * 1896 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1897 { 1898 1899 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1900 } 1901 1902 char * 1903 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1904 { 1905 va_list ap; 1906 char *p; 1907 1908 va_start(ap, fmt); 1909 p = devm_kvasprintf(dev, gfp, fmt, ap); 1910 va_end(ap); 1911 1912 return (p); 1913 } 1914 1915 char * 1916 kasprintf(gfp_t gfp, const char *fmt, ...) 1917 { 1918 va_list ap; 1919 char *p; 1920 1921 va_start(ap, fmt); 1922 p = kvasprintf(gfp, fmt, ap); 1923 va_end(ap); 1924 1925 return (p); 1926 } 1927 1928 static void 1929 linux_timer_callback_wrapper(void *context) 1930 { 1931 struct timer_list *timer; 1932 1933 timer = context; 1934 1935 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1936 /* try again later */ 1937 callout_reset(&timer->callout, 1, 1938 &linux_timer_callback_wrapper, timer); 1939 return; 1940 } 1941 1942 timer->function(timer->data); 1943 } 1944 1945 int 1946 mod_timer(struct timer_list *timer, int expires) 1947 { 1948 int ret; 1949 1950 timer->expires = expires; 1951 ret = callout_reset(&timer->callout, 1952 linux_timer_jiffies_until(expires), 1953 &linux_timer_callback_wrapper, timer); 1954 1955 MPASS(ret == 0 || ret == 1); 1956 1957 return (ret == 1); 1958 } 1959 1960 void 1961 add_timer(struct timer_list *timer) 1962 { 1963 1964 callout_reset(&timer->callout, 1965 linux_timer_jiffies_until(timer->expires), 1966 &linux_timer_callback_wrapper, timer); 1967 } 1968 1969 void 1970 add_timer_on(struct timer_list *timer, int cpu) 1971 { 1972 1973 callout_reset_on(&timer->callout, 1974 linux_timer_jiffies_until(timer->expires), 1975 &linux_timer_callback_wrapper, timer, cpu); 1976 } 1977 1978 int 1979 del_timer(struct timer_list *timer) 1980 { 1981 1982 if (callout_stop(&(timer)->callout) == -1) 1983 return (0); 1984 return (1); 1985 } 1986 1987 int 1988 del_timer_sync(struct timer_list *timer) 1989 { 1990 1991 if (callout_drain(&(timer)->callout) == -1) 1992 return (0); 1993 return (1); 1994 } 1995 1996 /* greatest common divisor, Euclid equation */ 1997 static uint64_t 1998 lkpi_gcd_64(uint64_t a, uint64_t b) 1999 { 2000 uint64_t an; 2001 uint64_t bn; 2002 2003 while (b != 0) { 2004 an = b; 2005 bn = a % b; 2006 a = an; 2007 b = bn; 2008 } 2009 return (a); 2010 } 2011 2012 uint64_t lkpi_nsec2hz_rem; 2013 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2014 uint64_t lkpi_nsec2hz_max; 2015 2016 uint64_t lkpi_usec2hz_rem; 2017 uint64_t lkpi_usec2hz_div = 1000000ULL; 2018 uint64_t lkpi_usec2hz_max; 2019 2020 uint64_t lkpi_msec2hz_rem; 2021 uint64_t lkpi_msec2hz_div = 1000ULL; 2022 uint64_t lkpi_msec2hz_max; 2023 2024 static void 2025 linux_timer_init(void *arg) 2026 { 2027 uint64_t gcd; 2028 2029 /* 2030 * Compute an internal HZ value which can divide 2**32 to 2031 * avoid timer rounding problems when the tick value wraps 2032 * around 2**32: 2033 */ 2034 linux_timer_hz_mask = 1; 2035 while (linux_timer_hz_mask < (unsigned long)hz) 2036 linux_timer_hz_mask *= 2; 2037 linux_timer_hz_mask--; 2038 2039 /* compute some internal constants */ 2040 2041 lkpi_nsec2hz_rem = hz; 2042 lkpi_usec2hz_rem = hz; 2043 lkpi_msec2hz_rem = hz; 2044 2045 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2046 lkpi_nsec2hz_rem /= gcd; 2047 lkpi_nsec2hz_div /= gcd; 2048 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2049 2050 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2051 lkpi_usec2hz_rem /= gcd; 2052 lkpi_usec2hz_div /= gcd; 2053 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2054 2055 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2056 lkpi_msec2hz_rem /= gcd; 2057 lkpi_msec2hz_div /= gcd; 2058 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2059 } 2060 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2061 2062 void 2063 linux_complete_common(struct completion *c, int all) 2064 { 2065 int wakeup_swapper; 2066 2067 sleepq_lock(c); 2068 if (all) { 2069 c->done = UINT_MAX; 2070 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2071 } else { 2072 if (c->done != UINT_MAX) 2073 c->done++; 2074 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2075 } 2076 sleepq_release(c); 2077 if (wakeup_swapper) 2078 kick_proc0(); 2079 } 2080 2081 /* 2082 * Indefinite wait for done != 0 with or without signals. 2083 */ 2084 int 2085 linux_wait_for_common(struct completion *c, int flags) 2086 { 2087 struct task_struct *task; 2088 int error; 2089 2090 if (SCHEDULER_STOPPED()) 2091 return (0); 2092 2093 task = current; 2094 2095 if (flags != 0) 2096 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2097 else 2098 flags = SLEEPQ_SLEEP; 2099 error = 0; 2100 for (;;) { 2101 sleepq_lock(c); 2102 if (c->done) 2103 break; 2104 sleepq_add(c, NULL, "completion", flags, 0); 2105 if (flags & SLEEPQ_INTERRUPTIBLE) { 2106 DROP_GIANT(); 2107 error = -sleepq_wait_sig(c, 0); 2108 PICKUP_GIANT(); 2109 if (error != 0) { 2110 linux_schedule_save_interrupt_value(task, error); 2111 error = -ERESTARTSYS; 2112 goto intr; 2113 } 2114 } else { 2115 DROP_GIANT(); 2116 sleepq_wait(c, 0); 2117 PICKUP_GIANT(); 2118 } 2119 } 2120 if (c->done != UINT_MAX) 2121 c->done--; 2122 sleepq_release(c); 2123 2124 intr: 2125 return (error); 2126 } 2127 2128 /* 2129 * Time limited wait for done != 0 with or without signals. 2130 */ 2131 int 2132 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2133 { 2134 struct task_struct *task; 2135 int end = jiffies + timeout; 2136 int error; 2137 2138 if (SCHEDULER_STOPPED()) 2139 return (0); 2140 2141 task = current; 2142 2143 if (flags != 0) 2144 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2145 else 2146 flags = SLEEPQ_SLEEP; 2147 2148 for (;;) { 2149 sleepq_lock(c); 2150 if (c->done) 2151 break; 2152 sleepq_add(c, NULL, "completion", flags, 0); 2153 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2154 2155 DROP_GIANT(); 2156 if (flags & SLEEPQ_INTERRUPTIBLE) 2157 error = -sleepq_timedwait_sig(c, 0); 2158 else 2159 error = -sleepq_timedwait(c, 0); 2160 PICKUP_GIANT(); 2161 2162 if (error != 0) { 2163 /* check for timeout */ 2164 if (error == -EWOULDBLOCK) { 2165 error = 0; /* timeout */ 2166 } else { 2167 /* signal happened */ 2168 linux_schedule_save_interrupt_value(task, error); 2169 error = -ERESTARTSYS; 2170 } 2171 goto done; 2172 } 2173 } 2174 if (c->done != UINT_MAX) 2175 c->done--; 2176 sleepq_release(c); 2177 2178 /* return how many jiffies are left */ 2179 error = linux_timer_jiffies_until(end); 2180 done: 2181 return (error); 2182 } 2183 2184 int 2185 linux_try_wait_for_completion(struct completion *c) 2186 { 2187 int isdone; 2188 2189 sleepq_lock(c); 2190 isdone = (c->done != 0); 2191 if (c->done != 0 && c->done != UINT_MAX) 2192 c->done--; 2193 sleepq_release(c); 2194 return (isdone); 2195 } 2196 2197 int 2198 linux_completion_done(struct completion *c) 2199 { 2200 int isdone; 2201 2202 sleepq_lock(c); 2203 isdone = (c->done != 0); 2204 sleepq_release(c); 2205 return (isdone); 2206 } 2207 2208 static void 2209 linux_cdev_deref(struct linux_cdev *ldev) 2210 { 2211 if (refcount_release(&ldev->refs) && 2212 ldev->kobj.ktype == &linux_cdev_ktype) 2213 kfree(ldev); 2214 } 2215 2216 static void 2217 linux_cdev_release(struct kobject *kobj) 2218 { 2219 struct linux_cdev *cdev; 2220 struct kobject *parent; 2221 2222 cdev = container_of(kobj, struct linux_cdev, kobj); 2223 parent = kobj->parent; 2224 linux_destroy_dev(cdev); 2225 linux_cdev_deref(cdev); 2226 kobject_put(parent); 2227 } 2228 2229 static void 2230 linux_cdev_static_release(struct kobject *kobj) 2231 { 2232 struct cdev *cdev; 2233 struct linux_cdev *ldev; 2234 2235 ldev = container_of(kobj, struct linux_cdev, kobj); 2236 cdev = ldev->cdev; 2237 if (cdev != NULL) { 2238 destroy_dev(cdev); 2239 ldev->cdev = NULL; 2240 } 2241 kobject_put(kobj->parent); 2242 } 2243 2244 void 2245 linux_destroy_dev(struct linux_cdev *ldev) 2246 { 2247 2248 if (ldev->cdev == NULL) 2249 return; 2250 2251 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2252 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2253 2254 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2255 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2256 pause("ldevdtr", hz / 4); 2257 2258 destroy_dev(ldev->cdev); 2259 ldev->cdev = NULL; 2260 } 2261 2262 const struct kobj_type linux_cdev_ktype = { 2263 .release = linux_cdev_release, 2264 }; 2265 2266 const struct kobj_type linux_cdev_static_ktype = { 2267 .release = linux_cdev_static_release, 2268 }; 2269 2270 static void 2271 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2272 { 2273 struct notifier_block *nb; 2274 struct netdev_notifier_info ni; 2275 2276 nb = arg; 2277 ni.dev = (struct net_device *)ifp; 2278 if (linkstate == LINK_STATE_UP) 2279 nb->notifier_call(nb, NETDEV_UP, &ni); 2280 else 2281 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2282 } 2283 2284 static void 2285 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2286 { 2287 struct notifier_block *nb; 2288 struct netdev_notifier_info ni; 2289 2290 nb = arg; 2291 ni.dev = (struct net_device *)ifp; 2292 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2293 } 2294 2295 static void 2296 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2297 { 2298 struct notifier_block *nb; 2299 struct netdev_notifier_info ni; 2300 2301 nb = arg; 2302 ni.dev = (struct net_device *)ifp; 2303 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2304 } 2305 2306 static void 2307 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2308 { 2309 struct notifier_block *nb; 2310 struct netdev_notifier_info ni; 2311 2312 nb = arg; 2313 ni.dev = (struct net_device *)ifp; 2314 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2315 } 2316 2317 static void 2318 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2319 { 2320 struct notifier_block *nb; 2321 struct netdev_notifier_info ni; 2322 2323 nb = arg; 2324 ni.dev = (struct net_device *)ifp; 2325 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2326 } 2327 2328 int 2329 register_netdevice_notifier(struct notifier_block *nb) 2330 { 2331 2332 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2333 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2334 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2335 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2336 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2337 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2338 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2339 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2340 2341 return (0); 2342 } 2343 2344 int 2345 register_inetaddr_notifier(struct notifier_block *nb) 2346 { 2347 2348 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2349 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2350 return (0); 2351 } 2352 2353 int 2354 unregister_netdevice_notifier(struct notifier_block *nb) 2355 { 2356 2357 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2358 nb->tags[NETDEV_UP]); 2359 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2360 nb->tags[NETDEV_REGISTER]); 2361 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2362 nb->tags[NETDEV_UNREGISTER]); 2363 EVENTHANDLER_DEREGISTER(iflladdr_event, 2364 nb->tags[NETDEV_CHANGEADDR]); 2365 2366 return (0); 2367 } 2368 2369 int 2370 unregister_inetaddr_notifier(struct notifier_block *nb) 2371 { 2372 2373 EVENTHANDLER_DEREGISTER(ifaddr_event, 2374 nb->tags[NETDEV_CHANGEIFADDR]); 2375 2376 return (0); 2377 } 2378 2379 struct list_sort_thunk { 2380 int (*cmp)(void *, struct list_head *, struct list_head *); 2381 void *priv; 2382 }; 2383 2384 static inline int 2385 linux_le_cmp(void *priv, const void *d1, const void *d2) 2386 { 2387 struct list_head *le1, *le2; 2388 struct list_sort_thunk *thunk; 2389 2390 thunk = priv; 2391 le1 = *(__DECONST(struct list_head **, d1)); 2392 le2 = *(__DECONST(struct list_head **, d2)); 2393 return ((thunk->cmp)(thunk->priv, le1, le2)); 2394 } 2395 2396 void 2397 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2398 struct list_head *a, struct list_head *b)) 2399 { 2400 struct list_sort_thunk thunk; 2401 struct list_head **ar, *le; 2402 size_t count, i; 2403 2404 count = 0; 2405 list_for_each(le, head) 2406 count++; 2407 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2408 i = 0; 2409 list_for_each(le, head) 2410 ar[i++] = le; 2411 thunk.cmp = cmp; 2412 thunk.priv = priv; 2413 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2414 INIT_LIST_HEAD(head); 2415 for (i = 0; i < count; i++) 2416 list_add_tail(ar[i], head); 2417 free(ar, M_KMALLOC); 2418 } 2419 2420 void 2421 linux_irq_handler(void *ent) 2422 { 2423 struct irq_ent *irqe; 2424 2425 if (linux_set_current_flags(curthread, M_NOWAIT)) 2426 return; 2427 2428 irqe = ent; 2429 irqe->handler(irqe->irq, irqe->arg); 2430 } 2431 2432 #if defined(__i386__) || defined(__amd64__) 2433 int 2434 linux_wbinvd_on_all_cpus(void) 2435 { 2436 2437 pmap_invalidate_cache(); 2438 return (0); 2439 } 2440 #endif 2441 2442 int 2443 linux_on_each_cpu(void callback(void *), void *data) 2444 { 2445 2446 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2447 smp_no_rendezvous_barrier, data); 2448 return (0); 2449 } 2450 2451 int 2452 linux_in_atomic(void) 2453 { 2454 2455 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2456 } 2457 2458 struct linux_cdev * 2459 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2460 { 2461 dev_t dev = MKDEV(major, minor); 2462 struct cdev *cdev; 2463 2464 dev_lock(); 2465 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2466 struct linux_cdev *ldev = cdev->si_drv1; 2467 if (ldev->dev == dev && 2468 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2469 break; 2470 } 2471 } 2472 dev_unlock(); 2473 2474 return (cdev != NULL ? cdev->si_drv1 : NULL); 2475 } 2476 2477 int 2478 __register_chrdev(unsigned int major, unsigned int baseminor, 2479 unsigned int count, const char *name, 2480 const struct file_operations *fops) 2481 { 2482 struct linux_cdev *cdev; 2483 int ret = 0; 2484 int i; 2485 2486 for (i = baseminor; i < baseminor + count; i++) { 2487 cdev = cdev_alloc(); 2488 cdev->ops = fops; 2489 kobject_set_name(&cdev->kobj, name); 2490 2491 ret = cdev_add(cdev, makedev(major, i), 1); 2492 if (ret != 0) 2493 break; 2494 } 2495 return (ret); 2496 } 2497 2498 int 2499 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2500 unsigned int count, const char *name, 2501 const struct file_operations *fops, uid_t uid, 2502 gid_t gid, int mode) 2503 { 2504 struct linux_cdev *cdev; 2505 int ret = 0; 2506 int i; 2507 2508 for (i = baseminor; i < baseminor + count; i++) { 2509 cdev = cdev_alloc(); 2510 cdev->ops = fops; 2511 kobject_set_name(&cdev->kobj, name); 2512 2513 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2514 if (ret != 0) 2515 break; 2516 } 2517 return (ret); 2518 } 2519 2520 void 2521 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2522 unsigned int count, const char *name) 2523 { 2524 struct linux_cdev *cdevp; 2525 int i; 2526 2527 for (i = baseminor; i < baseminor + count; i++) { 2528 cdevp = linux_find_cdev(name, major, i); 2529 if (cdevp != NULL) 2530 cdev_del(cdevp); 2531 } 2532 } 2533 2534 void 2535 linux_dump_stack(void) 2536 { 2537 #ifdef STACK 2538 struct stack st; 2539 2540 stack_zero(&st); 2541 stack_save(&st); 2542 stack_print(&st); 2543 #endif 2544 } 2545 2546 int 2547 linuxkpi_net_ratelimit(void) 2548 { 2549 2550 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2551 lkpi_net_maxpps)); 2552 } 2553 2554 #if defined(__i386__) || defined(__amd64__) 2555 bool linux_cpu_has_clflush; 2556 #endif 2557 2558 static void 2559 linux_compat_init(void *arg) 2560 { 2561 struct sysctl_oid *rootoid; 2562 int i; 2563 2564 #if defined(__i386__) || defined(__amd64__) 2565 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2566 #endif 2567 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2568 2569 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2570 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2571 kobject_init(&linux_class_root, &linux_class_ktype); 2572 kobject_set_name(&linux_class_root, "class"); 2573 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2574 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2575 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2576 kobject_set_name(&linux_root_device.kobj, "device"); 2577 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2578 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2579 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2580 linux_root_device.bsddev = root_bus; 2581 linux_class_misc.name = "misc"; 2582 class_register(&linux_class_misc); 2583 INIT_LIST_HEAD(&pci_drivers); 2584 INIT_LIST_HEAD(&pci_devices); 2585 spin_lock_init(&pci_lock); 2586 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2587 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2588 LIST_INIT(&vmmaphead[i]); 2589 init_waitqueue_head(&linux_bit_waitq); 2590 init_waitqueue_head(&linux_var_waitq); 2591 } 2592 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2593 2594 static void 2595 linux_compat_uninit(void *arg) 2596 { 2597 linux_kobject_kfree_name(&linux_class_root); 2598 linux_kobject_kfree_name(&linux_root_device.kobj); 2599 linux_kobject_kfree_name(&linux_class_misc.kobj); 2600 2601 mtx_destroy(&vmmaplock); 2602 spin_lock_destroy(&pci_lock); 2603 rw_destroy(&linux_vma_lock); 2604 } 2605 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2606 2607 /* 2608 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2609 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2610 * used. Assert these types have the same size, else some parts of the 2611 * LinuxKPI may not work like expected: 2612 */ 2613 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2614