1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/time.h> 55 #include <sys/user.h> 56 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 63 #include <machine/stdarg.h> 64 65 #if defined(__i386__) || defined(__amd64__) 66 #include <machine/md_var.h> 67 #endif 68 69 #include <linux/kobject.h> 70 #include <linux/device.h> 71 #include <linux/slab.h> 72 #include <linux/module.h> 73 #include <linux/moduleparam.h> 74 #include <linux/cdev.h> 75 #include <linux/file.h> 76 #include <linux/sysfs.h> 77 #include <linux/mm.h> 78 #include <linux/io.h> 79 #include <linux/vmalloc.h> 80 #include <linux/netdevice.h> 81 #include <linux/timer.h> 82 #include <linux/interrupt.h> 83 #include <linux/uaccess.h> 84 #include <linux/list.h> 85 #include <linux/kthread.h> 86 #include <linux/kernel.h> 87 #include <linux/compat.h> 88 #include <linux/poll.h> 89 #include <linux/smp.h> 90 #include <linux/wait_bit.h> 91 92 #if defined(__i386__) || defined(__amd64__) 93 #include <asm/smp.h> 94 #endif 95 96 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 97 "LinuxKPI parameters"); 98 99 int linuxkpi_debug; 100 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 101 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 102 103 static struct timeval lkpi_net_lastlog; 104 static int lkpi_net_curpps; 105 static int lkpi_net_maxpps = 99; 106 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 107 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 108 109 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 110 111 #include <linux/rbtree.h> 112 /* Undo Linux compat changes. */ 113 #undef RB_ROOT 114 #undef file 115 #undef cdev 116 #define RB_ROOT(head) (head)->rbh_root 117 118 static void linux_destroy_dev(struct linux_cdev *); 119 static void linux_cdev_deref(struct linux_cdev *ldev); 120 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 121 122 struct kobject linux_class_root; 123 struct device linux_root_device; 124 struct class linux_class_misc; 125 struct list_head pci_drivers; 126 struct list_head pci_devices; 127 spinlock_t pci_lock; 128 129 unsigned long linux_timer_hz_mask; 130 131 wait_queue_head_t linux_bit_waitq; 132 wait_queue_head_t linux_var_waitq; 133 134 int 135 panic_cmp(struct rb_node *one, struct rb_node *two) 136 { 137 panic("no cmp"); 138 } 139 140 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 141 142 int 143 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 144 { 145 va_list tmp_va; 146 int len; 147 char *old; 148 char *name; 149 char dummy; 150 151 old = kobj->name; 152 153 if (old && fmt == NULL) 154 return (0); 155 156 /* compute length of string */ 157 va_copy(tmp_va, args); 158 len = vsnprintf(&dummy, 0, fmt, tmp_va); 159 va_end(tmp_va); 160 161 /* account for zero termination */ 162 len++; 163 164 /* check for error */ 165 if (len < 1) 166 return (-EINVAL); 167 168 /* allocate memory for string */ 169 name = kzalloc(len, GFP_KERNEL); 170 if (name == NULL) 171 return (-ENOMEM); 172 vsnprintf(name, len, fmt, args); 173 kobj->name = name; 174 175 /* free old string */ 176 kfree(old); 177 178 /* filter new string */ 179 for (; *name != '\0'; name++) 180 if (*name == '/') 181 *name = '!'; 182 return (0); 183 } 184 185 int 186 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 187 { 188 va_list args; 189 int error; 190 191 va_start(args, fmt); 192 error = kobject_set_name_vargs(kobj, fmt, args); 193 va_end(args); 194 195 return (error); 196 } 197 198 static int 199 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 200 { 201 const struct kobj_type *t; 202 int error; 203 204 kobj->parent = parent; 205 error = sysfs_create_dir(kobj); 206 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 207 struct attribute **attr; 208 t = kobj->ktype; 209 210 for (attr = t->default_attrs; *attr != NULL; attr++) { 211 error = sysfs_create_file(kobj, *attr); 212 if (error) 213 break; 214 } 215 if (error) 216 sysfs_remove_dir(kobj); 217 } 218 return (error); 219 } 220 221 int 222 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 223 { 224 va_list args; 225 int error; 226 227 va_start(args, fmt); 228 error = kobject_set_name_vargs(kobj, fmt, args); 229 va_end(args); 230 if (error) 231 return (error); 232 233 return kobject_add_complete(kobj, parent); 234 } 235 236 void 237 linux_kobject_release(struct kref *kref) 238 { 239 struct kobject *kobj; 240 char *name; 241 242 kobj = container_of(kref, struct kobject, kref); 243 sysfs_remove_dir(kobj); 244 name = kobj->name; 245 if (kobj->ktype && kobj->ktype->release) 246 kobj->ktype->release(kobj); 247 kfree(name); 248 } 249 250 static void 251 linux_kobject_kfree(struct kobject *kobj) 252 { 253 kfree(kobj); 254 } 255 256 static void 257 linux_kobject_kfree_name(struct kobject *kobj) 258 { 259 if (kobj) { 260 kfree(kobj->name); 261 } 262 } 263 264 const struct kobj_type linux_kfree_type = { 265 .release = linux_kobject_kfree 266 }; 267 268 static void 269 linux_device_release(struct device *dev) 270 { 271 pr_debug("linux_device_release: %s\n", dev_name(dev)); 272 kfree(dev); 273 } 274 275 static ssize_t 276 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 277 { 278 struct class_attribute *dattr; 279 ssize_t error; 280 281 dattr = container_of(attr, struct class_attribute, attr); 282 error = -EIO; 283 if (dattr->show) 284 error = dattr->show(container_of(kobj, struct class, kobj), 285 dattr, buf); 286 return (error); 287 } 288 289 static ssize_t 290 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 291 size_t count) 292 { 293 struct class_attribute *dattr; 294 ssize_t error; 295 296 dattr = container_of(attr, struct class_attribute, attr); 297 error = -EIO; 298 if (dattr->store) 299 error = dattr->store(container_of(kobj, struct class, kobj), 300 dattr, buf, count); 301 return (error); 302 } 303 304 static void 305 linux_class_release(struct kobject *kobj) 306 { 307 struct class *class; 308 309 class = container_of(kobj, struct class, kobj); 310 if (class->class_release) 311 class->class_release(class); 312 } 313 314 static const struct sysfs_ops linux_class_sysfs = { 315 .show = linux_class_show, 316 .store = linux_class_store, 317 }; 318 319 const struct kobj_type linux_class_ktype = { 320 .release = linux_class_release, 321 .sysfs_ops = &linux_class_sysfs 322 }; 323 324 static void 325 linux_dev_release(struct kobject *kobj) 326 { 327 struct device *dev; 328 329 dev = container_of(kobj, struct device, kobj); 330 /* This is the precedence defined by linux. */ 331 if (dev->release) 332 dev->release(dev); 333 else if (dev->class && dev->class->dev_release) 334 dev->class->dev_release(dev); 335 } 336 337 static ssize_t 338 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 339 { 340 struct device_attribute *dattr; 341 ssize_t error; 342 343 dattr = container_of(attr, struct device_attribute, attr); 344 error = -EIO; 345 if (dattr->show) 346 error = dattr->show(container_of(kobj, struct device, kobj), 347 dattr, buf); 348 return (error); 349 } 350 351 static ssize_t 352 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 353 size_t count) 354 { 355 struct device_attribute *dattr; 356 ssize_t error; 357 358 dattr = container_of(attr, struct device_attribute, attr); 359 error = -EIO; 360 if (dattr->store) 361 error = dattr->store(container_of(kobj, struct device, kobj), 362 dattr, buf, count); 363 return (error); 364 } 365 366 static const struct sysfs_ops linux_dev_sysfs = { 367 .show = linux_dev_show, 368 .store = linux_dev_store, 369 }; 370 371 const struct kobj_type linux_dev_ktype = { 372 .release = linux_dev_release, 373 .sysfs_ops = &linux_dev_sysfs 374 }; 375 376 struct device * 377 device_create(struct class *class, struct device *parent, dev_t devt, 378 void *drvdata, const char *fmt, ...) 379 { 380 struct device *dev; 381 va_list args; 382 383 dev = kzalloc(sizeof(*dev), M_WAITOK); 384 dev->parent = parent; 385 dev->class = class; 386 dev->devt = devt; 387 dev->driver_data = drvdata; 388 dev->release = linux_device_release; 389 va_start(args, fmt); 390 kobject_set_name_vargs(&dev->kobj, fmt, args); 391 va_end(args); 392 device_register(dev); 393 394 return (dev); 395 } 396 397 int 398 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 399 struct kobject *parent, const char *fmt, ...) 400 { 401 va_list args; 402 int error; 403 404 kobject_init(kobj, ktype); 405 kobj->ktype = ktype; 406 kobj->parent = parent; 407 kobj->name = NULL; 408 409 va_start(args, fmt); 410 error = kobject_set_name_vargs(kobj, fmt, args); 411 va_end(args); 412 if (error) 413 return (error); 414 return kobject_add_complete(kobj, parent); 415 } 416 417 static void 418 linux_kq_lock(void *arg) 419 { 420 spinlock_t *s = arg; 421 422 spin_lock(s); 423 } 424 static void 425 linux_kq_unlock(void *arg) 426 { 427 spinlock_t *s = arg; 428 429 spin_unlock(s); 430 } 431 432 static void 433 linux_kq_assert_lock(void *arg, int what) 434 { 435 #ifdef INVARIANTS 436 spinlock_t *s = arg; 437 438 if (what == LA_LOCKED) 439 mtx_assert(&s->m, MA_OWNED); 440 else 441 mtx_assert(&s->m, MA_NOTOWNED); 442 #endif 443 } 444 445 static void 446 linux_file_kqfilter_poll(struct linux_file *, int); 447 448 struct linux_file * 449 linux_file_alloc(void) 450 { 451 struct linux_file *filp; 452 453 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 454 455 /* set initial refcount */ 456 filp->f_count = 1; 457 458 /* setup fields needed by kqueue support */ 459 spin_lock_init(&filp->f_kqlock); 460 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 461 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 462 463 return (filp); 464 } 465 466 void 467 linux_file_free(struct linux_file *filp) 468 { 469 if (filp->_file == NULL) { 470 if (filp->f_shmem != NULL) 471 vm_object_deallocate(filp->f_shmem); 472 kfree(filp); 473 } else { 474 /* 475 * The close method of the character device or file 476 * will free the linux_file structure: 477 */ 478 _fdrop(filp->_file, curthread); 479 } 480 } 481 482 static int 483 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 484 vm_page_t *mres) 485 { 486 struct vm_area_struct *vmap; 487 488 vmap = linux_cdev_handle_find(vm_obj->handle); 489 490 MPASS(vmap != NULL); 491 MPASS(vmap->vm_private_data == vm_obj->handle); 492 493 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 494 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 495 vm_page_t page; 496 497 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 498 /* 499 * If the passed in result page is a fake 500 * page, update it with the new physical 501 * address. 502 */ 503 page = *mres; 504 vm_page_updatefake(page, paddr, vm_obj->memattr); 505 } else { 506 /* 507 * Replace the passed in "mres" page with our 508 * own fake page and free up the all of the 509 * original pages. 510 */ 511 VM_OBJECT_WUNLOCK(vm_obj); 512 page = vm_page_getfake(paddr, vm_obj->memattr); 513 VM_OBJECT_WLOCK(vm_obj); 514 515 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 516 *mres = page; 517 } 518 vm_page_valid(page); 519 return (VM_PAGER_OK); 520 } 521 return (VM_PAGER_FAIL); 522 } 523 524 static int 525 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 526 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 527 { 528 struct vm_area_struct *vmap; 529 int err; 530 531 /* get VM area structure */ 532 vmap = linux_cdev_handle_find(vm_obj->handle); 533 MPASS(vmap != NULL); 534 MPASS(vmap->vm_private_data == vm_obj->handle); 535 536 VM_OBJECT_WUNLOCK(vm_obj); 537 538 linux_set_current(curthread); 539 540 down_write(&vmap->vm_mm->mmap_sem); 541 if (unlikely(vmap->vm_ops == NULL)) { 542 err = VM_FAULT_SIGBUS; 543 } else { 544 struct vm_fault vmf; 545 546 /* fill out VM fault structure */ 547 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 548 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 549 vmf.pgoff = 0; 550 vmf.page = NULL; 551 vmf.vma = vmap; 552 553 vmap->vm_pfn_count = 0; 554 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 555 vmap->vm_obj = vm_obj; 556 557 err = vmap->vm_ops->fault(vmap, &vmf); 558 559 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 560 kern_yield(PRI_USER); 561 err = vmap->vm_ops->fault(vmap, &vmf); 562 } 563 } 564 565 /* translate return code */ 566 switch (err) { 567 case VM_FAULT_OOM: 568 err = VM_PAGER_AGAIN; 569 break; 570 case VM_FAULT_SIGBUS: 571 err = VM_PAGER_BAD; 572 break; 573 case VM_FAULT_NOPAGE: 574 /* 575 * By contract the fault handler will return having 576 * busied all the pages itself. If pidx is already 577 * found in the object, it will simply xbusy the first 578 * page and return with vm_pfn_count set to 1. 579 */ 580 *first = vmap->vm_pfn_first; 581 *last = *first + vmap->vm_pfn_count - 1; 582 err = VM_PAGER_OK; 583 break; 584 default: 585 err = VM_PAGER_ERROR; 586 break; 587 } 588 up_write(&vmap->vm_mm->mmap_sem); 589 VM_OBJECT_WLOCK(vm_obj); 590 return (err); 591 } 592 593 static struct rwlock linux_vma_lock; 594 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 595 TAILQ_HEAD_INITIALIZER(linux_vma_head); 596 597 static void 598 linux_cdev_handle_free(struct vm_area_struct *vmap) 599 { 600 /* Drop reference on vm_file */ 601 if (vmap->vm_file != NULL) 602 fput(vmap->vm_file); 603 604 /* Drop reference on mm_struct */ 605 mmput(vmap->vm_mm); 606 607 kfree(vmap); 608 } 609 610 static void 611 linux_cdev_handle_remove(struct vm_area_struct *vmap) 612 { 613 rw_wlock(&linux_vma_lock); 614 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 615 rw_wunlock(&linux_vma_lock); 616 } 617 618 static struct vm_area_struct * 619 linux_cdev_handle_find(void *handle) 620 { 621 struct vm_area_struct *vmap; 622 623 rw_rlock(&linux_vma_lock); 624 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 625 if (vmap->vm_private_data == handle) 626 break; 627 } 628 rw_runlock(&linux_vma_lock); 629 return (vmap); 630 } 631 632 static int 633 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 634 vm_ooffset_t foff, struct ucred *cred, u_short *color) 635 { 636 637 MPASS(linux_cdev_handle_find(handle) != NULL); 638 *color = 0; 639 return (0); 640 } 641 642 static void 643 linux_cdev_pager_dtor(void *handle) 644 { 645 const struct vm_operations_struct *vm_ops; 646 struct vm_area_struct *vmap; 647 648 vmap = linux_cdev_handle_find(handle); 649 MPASS(vmap != NULL); 650 651 /* 652 * Remove handle before calling close operation to prevent 653 * other threads from reusing the handle pointer. 654 */ 655 linux_cdev_handle_remove(vmap); 656 657 down_write(&vmap->vm_mm->mmap_sem); 658 vm_ops = vmap->vm_ops; 659 if (likely(vm_ops != NULL)) 660 vm_ops->close(vmap); 661 up_write(&vmap->vm_mm->mmap_sem); 662 663 linux_cdev_handle_free(vmap); 664 } 665 666 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 667 { 668 /* OBJT_MGTDEVICE */ 669 .cdev_pg_populate = linux_cdev_pager_populate, 670 .cdev_pg_ctor = linux_cdev_pager_ctor, 671 .cdev_pg_dtor = linux_cdev_pager_dtor 672 }, 673 { 674 /* OBJT_DEVICE */ 675 .cdev_pg_fault = linux_cdev_pager_fault, 676 .cdev_pg_ctor = linux_cdev_pager_ctor, 677 .cdev_pg_dtor = linux_cdev_pager_dtor 678 }, 679 }; 680 681 int 682 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 683 unsigned long size) 684 { 685 vm_object_t obj; 686 vm_page_t m; 687 688 obj = vma->vm_obj; 689 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 690 return (-ENOTSUP); 691 VM_OBJECT_RLOCK(obj); 692 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 693 m != NULL && m->pindex < OFF_TO_IDX(address + size); 694 m = TAILQ_NEXT(m, listq)) 695 pmap_remove_all(m); 696 VM_OBJECT_RUNLOCK(obj); 697 return (0); 698 } 699 700 static struct file_operations dummy_ldev_ops = { 701 /* XXXKIB */ 702 }; 703 704 static struct linux_cdev dummy_ldev = { 705 .ops = &dummy_ldev_ops, 706 }; 707 708 #define LDEV_SI_DTR 0x0001 709 #define LDEV_SI_REF 0x0002 710 711 static void 712 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 713 struct linux_cdev **dev) 714 { 715 struct linux_cdev *ldev; 716 u_int siref; 717 718 ldev = filp->f_cdev; 719 *fop = filp->f_op; 720 if (ldev != NULL) { 721 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 722 refcount_acquire(&ldev->refs); 723 } else { 724 for (siref = ldev->siref;;) { 725 if ((siref & LDEV_SI_DTR) != 0) { 726 ldev = &dummy_ldev; 727 *fop = ldev->ops; 728 siref = ldev->siref; 729 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 730 } else if (atomic_fcmpset_int(&ldev->siref, 731 &siref, siref + LDEV_SI_REF)) { 732 break; 733 } 734 } 735 } 736 } 737 *dev = ldev; 738 } 739 740 static void 741 linux_drop_fop(struct linux_cdev *ldev) 742 { 743 744 if (ldev == NULL) 745 return; 746 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 747 linux_cdev_deref(ldev); 748 } else { 749 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 750 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 751 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 752 } 753 } 754 755 #define OPW(fp,td,code) ({ \ 756 struct file *__fpop; \ 757 __typeof(code) __retval; \ 758 \ 759 __fpop = (td)->td_fpop; \ 760 (td)->td_fpop = (fp); \ 761 __retval = (code); \ 762 (td)->td_fpop = __fpop; \ 763 __retval; \ 764 }) 765 766 static int 767 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 768 struct file *file) 769 { 770 struct linux_cdev *ldev; 771 struct linux_file *filp; 772 const struct file_operations *fop; 773 int error; 774 775 ldev = dev->si_drv1; 776 777 filp = linux_file_alloc(); 778 filp->f_dentry = &filp->f_dentry_store; 779 filp->f_op = ldev->ops; 780 filp->f_mode = file->f_flag; 781 filp->f_flags = file->f_flag; 782 filp->f_vnode = file->f_vnode; 783 filp->_file = file; 784 refcount_acquire(&ldev->refs); 785 filp->f_cdev = ldev; 786 787 linux_set_current(td); 788 linux_get_fop(filp, &fop, &ldev); 789 790 if (fop->open != NULL) { 791 error = -fop->open(file->f_vnode, filp); 792 if (error != 0) { 793 linux_drop_fop(ldev); 794 linux_cdev_deref(filp->f_cdev); 795 kfree(filp); 796 return (error); 797 } 798 } 799 800 /* hold on to the vnode - used for fstat() */ 801 vhold(filp->f_vnode); 802 803 /* release the file from devfs */ 804 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 805 linux_drop_fop(ldev); 806 return (ENXIO); 807 } 808 809 #define LINUX_IOCTL_MIN_PTR 0x10000UL 810 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 811 812 static inline int 813 linux_remap_address(void **uaddr, size_t len) 814 { 815 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 816 817 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 818 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 819 struct task_struct *pts = current; 820 if (pts == NULL) { 821 *uaddr = NULL; 822 return (1); 823 } 824 825 /* compute data offset */ 826 uaddr_val -= LINUX_IOCTL_MIN_PTR; 827 828 /* check that length is within bounds */ 829 if ((len > IOCPARM_MAX) || 830 (uaddr_val + len) > pts->bsd_ioctl_len) { 831 *uaddr = NULL; 832 return (1); 833 } 834 835 /* re-add kernel buffer address */ 836 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 837 838 /* update address location */ 839 *uaddr = (void *)uaddr_val; 840 return (1); 841 } 842 return (0); 843 } 844 845 int 846 linux_copyin(const void *uaddr, void *kaddr, size_t len) 847 { 848 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 849 if (uaddr == NULL) 850 return (-EFAULT); 851 memcpy(kaddr, uaddr, len); 852 return (0); 853 } 854 return (-copyin(uaddr, kaddr, len)); 855 } 856 857 int 858 linux_copyout(const void *kaddr, void *uaddr, size_t len) 859 { 860 if (linux_remap_address(&uaddr, len)) { 861 if (uaddr == NULL) 862 return (-EFAULT); 863 memcpy(uaddr, kaddr, len); 864 return (0); 865 } 866 return (-copyout(kaddr, uaddr, len)); 867 } 868 869 size_t 870 linux_clear_user(void *_uaddr, size_t _len) 871 { 872 uint8_t *uaddr = _uaddr; 873 size_t len = _len; 874 875 /* make sure uaddr is aligned before going into the fast loop */ 876 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 877 if (subyte(uaddr, 0)) 878 return (_len); 879 uaddr++; 880 len--; 881 } 882 883 /* zero 8 bytes at a time */ 884 while (len > 7) { 885 #ifdef __LP64__ 886 if (suword64(uaddr, 0)) 887 return (_len); 888 #else 889 if (suword32(uaddr, 0)) 890 return (_len); 891 if (suword32(uaddr + 4, 0)) 892 return (_len); 893 #endif 894 uaddr += 8; 895 len -= 8; 896 } 897 898 /* zero fill end, if any */ 899 while (len > 0) { 900 if (subyte(uaddr, 0)) 901 return (_len); 902 uaddr++; 903 len--; 904 } 905 return (0); 906 } 907 908 int 909 linux_access_ok(const void *uaddr, size_t len) 910 { 911 uintptr_t saddr; 912 uintptr_t eaddr; 913 914 /* get start and end address */ 915 saddr = (uintptr_t)uaddr; 916 eaddr = (uintptr_t)uaddr + len; 917 918 /* verify addresses are valid for userspace */ 919 return ((saddr == eaddr) || 920 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 921 } 922 923 /* 924 * This function should return either EINTR or ERESTART depending on 925 * the signal type sent to this thread: 926 */ 927 static int 928 linux_get_error(struct task_struct *task, int error) 929 { 930 /* check for signal type interrupt code */ 931 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 932 error = -linux_schedule_get_interrupt_value(task); 933 if (error == 0) 934 error = EINTR; 935 } 936 return (error); 937 } 938 939 static int 940 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 941 const struct file_operations *fop, u_long cmd, caddr_t data, 942 struct thread *td) 943 { 944 struct task_struct *task = current; 945 unsigned size; 946 int error; 947 948 size = IOCPARM_LEN(cmd); 949 /* refer to logic in sys_ioctl() */ 950 if (size > 0) { 951 /* 952 * Setup hint for linux_copyin() and linux_copyout(). 953 * 954 * Background: Linux code expects a user-space address 955 * while FreeBSD supplies a kernel-space address. 956 */ 957 task->bsd_ioctl_data = data; 958 task->bsd_ioctl_len = size; 959 data = (void *)LINUX_IOCTL_MIN_PTR; 960 } else { 961 /* fetch user-space pointer */ 962 data = *(void **)data; 963 } 964 #if defined(__amd64__) 965 if (td->td_proc->p_elf_machine == EM_386) { 966 /* try the compat IOCTL handler first */ 967 if (fop->compat_ioctl != NULL) { 968 error = -OPW(fp, td, fop->compat_ioctl(filp, 969 cmd, (u_long)data)); 970 } else { 971 error = ENOTTY; 972 } 973 974 /* fallback to the regular IOCTL handler, if any */ 975 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 976 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 977 cmd, (u_long)data)); 978 } 979 } else 980 #endif 981 { 982 if (fop->unlocked_ioctl != NULL) { 983 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 984 cmd, (u_long)data)); 985 } else { 986 error = ENOTTY; 987 } 988 } 989 if (size > 0) { 990 task->bsd_ioctl_data = NULL; 991 task->bsd_ioctl_len = 0; 992 } 993 994 if (error == EWOULDBLOCK) { 995 /* update kqfilter status, if any */ 996 linux_file_kqfilter_poll(filp, 997 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 998 } else { 999 error = linux_get_error(task, error); 1000 } 1001 return (error); 1002 } 1003 1004 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1005 1006 /* 1007 * This function atomically updates the poll wakeup state and returns 1008 * the previous state at the time of update. 1009 */ 1010 static uint8_t 1011 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1012 { 1013 int c, old; 1014 1015 c = v->counter; 1016 1017 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1018 c = old; 1019 1020 return (c); 1021 } 1022 1023 static int 1024 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1025 { 1026 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1027 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1028 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1029 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1030 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1031 }; 1032 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1033 1034 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1035 case LINUX_FWQ_STATE_QUEUED: 1036 linux_poll_wakeup(filp); 1037 return (1); 1038 default: 1039 return (0); 1040 } 1041 } 1042 1043 void 1044 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1045 { 1046 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1047 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1048 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1049 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1050 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1051 }; 1052 1053 /* check if we are called inside the select system call */ 1054 if (p == LINUX_POLL_TABLE_NORMAL) 1055 selrecord(curthread, &filp->f_selinfo); 1056 1057 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1058 case LINUX_FWQ_STATE_INIT: 1059 /* NOTE: file handles can only belong to one wait-queue */ 1060 filp->f_wait_queue.wqh = wqh; 1061 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1062 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1063 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1064 break; 1065 default: 1066 break; 1067 } 1068 } 1069 1070 static void 1071 linux_poll_wait_dequeue(struct linux_file *filp) 1072 { 1073 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1074 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1075 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1076 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1077 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1078 }; 1079 1080 seldrain(&filp->f_selinfo); 1081 1082 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1083 case LINUX_FWQ_STATE_NOT_READY: 1084 case LINUX_FWQ_STATE_QUEUED: 1085 case LINUX_FWQ_STATE_READY: 1086 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1087 break; 1088 default: 1089 break; 1090 } 1091 } 1092 1093 void 1094 linux_poll_wakeup(struct linux_file *filp) 1095 { 1096 /* this function should be NULL-safe */ 1097 if (filp == NULL) 1098 return; 1099 1100 selwakeup(&filp->f_selinfo); 1101 1102 spin_lock(&filp->f_kqlock); 1103 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1104 LINUX_KQ_FLAG_NEED_WRITE; 1105 1106 /* make sure the "knote" gets woken up */ 1107 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1108 spin_unlock(&filp->f_kqlock); 1109 } 1110 1111 static void 1112 linux_file_kqfilter_detach(struct knote *kn) 1113 { 1114 struct linux_file *filp = kn->kn_hook; 1115 1116 spin_lock(&filp->f_kqlock); 1117 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1118 spin_unlock(&filp->f_kqlock); 1119 } 1120 1121 static int 1122 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1123 { 1124 struct linux_file *filp = kn->kn_hook; 1125 1126 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1127 1128 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1129 } 1130 1131 static int 1132 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1133 { 1134 struct linux_file *filp = kn->kn_hook; 1135 1136 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1137 1138 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1139 } 1140 1141 static struct filterops linux_dev_kqfiltops_read = { 1142 .f_isfd = 1, 1143 .f_detach = linux_file_kqfilter_detach, 1144 .f_event = linux_file_kqfilter_read_event, 1145 }; 1146 1147 static struct filterops linux_dev_kqfiltops_write = { 1148 .f_isfd = 1, 1149 .f_detach = linux_file_kqfilter_detach, 1150 .f_event = linux_file_kqfilter_write_event, 1151 }; 1152 1153 static void 1154 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1155 { 1156 struct thread *td; 1157 const struct file_operations *fop; 1158 struct linux_cdev *ldev; 1159 int temp; 1160 1161 if ((filp->f_kqflags & kqflags) == 0) 1162 return; 1163 1164 td = curthread; 1165 1166 linux_get_fop(filp, &fop, &ldev); 1167 /* get the latest polling state */ 1168 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1169 linux_drop_fop(ldev); 1170 1171 spin_lock(&filp->f_kqlock); 1172 /* clear kqflags */ 1173 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1174 LINUX_KQ_FLAG_NEED_WRITE); 1175 /* update kqflags */ 1176 if ((temp & (POLLIN | POLLOUT)) != 0) { 1177 if ((temp & POLLIN) != 0) 1178 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1179 if ((temp & POLLOUT) != 0) 1180 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1181 1182 /* make sure the "knote" gets woken up */ 1183 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1184 } 1185 spin_unlock(&filp->f_kqlock); 1186 } 1187 1188 static int 1189 linux_file_kqfilter(struct file *file, struct knote *kn) 1190 { 1191 struct linux_file *filp; 1192 struct thread *td; 1193 int error; 1194 1195 td = curthread; 1196 filp = (struct linux_file *)file->f_data; 1197 filp->f_flags = file->f_flag; 1198 if (filp->f_op->poll == NULL) 1199 return (EINVAL); 1200 1201 spin_lock(&filp->f_kqlock); 1202 switch (kn->kn_filter) { 1203 case EVFILT_READ: 1204 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1205 kn->kn_fop = &linux_dev_kqfiltops_read; 1206 kn->kn_hook = filp; 1207 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1208 error = 0; 1209 break; 1210 case EVFILT_WRITE: 1211 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1212 kn->kn_fop = &linux_dev_kqfiltops_write; 1213 kn->kn_hook = filp; 1214 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1215 error = 0; 1216 break; 1217 default: 1218 error = EINVAL; 1219 break; 1220 } 1221 spin_unlock(&filp->f_kqlock); 1222 1223 if (error == 0) { 1224 linux_set_current(td); 1225 1226 /* update kqfilter status, if any */ 1227 linux_file_kqfilter_poll(filp, 1228 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1229 } 1230 return (error); 1231 } 1232 1233 static int 1234 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1235 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1236 int nprot, bool is_shared, struct thread *td) 1237 { 1238 struct task_struct *task; 1239 struct vm_area_struct *vmap; 1240 struct mm_struct *mm; 1241 struct linux_file *filp; 1242 vm_memattr_t attr; 1243 int error; 1244 1245 filp = (struct linux_file *)fp->f_data; 1246 filp->f_flags = fp->f_flag; 1247 1248 if (fop->mmap == NULL) 1249 return (EOPNOTSUPP); 1250 1251 linux_set_current(td); 1252 1253 /* 1254 * The same VM object might be shared by multiple processes 1255 * and the mm_struct is usually freed when a process exits. 1256 * 1257 * The atomic reference below makes sure the mm_struct is 1258 * available as long as the vmap is in the linux_vma_head. 1259 */ 1260 task = current; 1261 mm = task->mm; 1262 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1263 return (EINVAL); 1264 1265 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1266 vmap->vm_start = 0; 1267 vmap->vm_end = size; 1268 vmap->vm_pgoff = *offset / PAGE_SIZE; 1269 vmap->vm_pfn = 0; 1270 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1271 if (is_shared) 1272 vmap->vm_flags |= VM_SHARED; 1273 vmap->vm_ops = NULL; 1274 vmap->vm_file = get_file(filp); 1275 vmap->vm_mm = mm; 1276 1277 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1278 error = linux_get_error(task, EINTR); 1279 } else { 1280 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1281 error = linux_get_error(task, error); 1282 up_write(&vmap->vm_mm->mmap_sem); 1283 } 1284 1285 if (error != 0) { 1286 linux_cdev_handle_free(vmap); 1287 return (error); 1288 } 1289 1290 attr = pgprot2cachemode(vmap->vm_page_prot); 1291 1292 if (vmap->vm_ops != NULL) { 1293 struct vm_area_struct *ptr; 1294 void *vm_private_data; 1295 bool vm_no_fault; 1296 1297 if (vmap->vm_ops->open == NULL || 1298 vmap->vm_ops->close == NULL || 1299 vmap->vm_private_data == NULL) { 1300 /* free allocated VM area struct */ 1301 linux_cdev_handle_free(vmap); 1302 return (EINVAL); 1303 } 1304 1305 vm_private_data = vmap->vm_private_data; 1306 1307 rw_wlock(&linux_vma_lock); 1308 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1309 if (ptr->vm_private_data == vm_private_data) 1310 break; 1311 } 1312 /* check if there is an existing VM area struct */ 1313 if (ptr != NULL) { 1314 /* check if the VM area structure is invalid */ 1315 if (ptr->vm_ops == NULL || 1316 ptr->vm_ops->open == NULL || 1317 ptr->vm_ops->close == NULL) { 1318 error = ESTALE; 1319 vm_no_fault = 1; 1320 } else { 1321 error = EEXIST; 1322 vm_no_fault = (ptr->vm_ops->fault == NULL); 1323 } 1324 } else { 1325 /* insert VM area structure into list */ 1326 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1327 error = 0; 1328 vm_no_fault = (vmap->vm_ops->fault == NULL); 1329 } 1330 rw_wunlock(&linux_vma_lock); 1331 1332 if (error != 0) { 1333 /* free allocated VM area struct */ 1334 linux_cdev_handle_free(vmap); 1335 /* check for stale VM area struct */ 1336 if (error != EEXIST) 1337 return (error); 1338 } 1339 1340 /* check if there is no fault handler */ 1341 if (vm_no_fault) { 1342 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1343 &linux_cdev_pager_ops[1], size, nprot, *offset, 1344 td->td_ucred); 1345 } else { 1346 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1347 &linux_cdev_pager_ops[0], size, nprot, *offset, 1348 td->td_ucred); 1349 } 1350 1351 /* check if allocating the VM object failed */ 1352 if (*object == NULL) { 1353 if (error == 0) { 1354 /* remove VM area struct from list */ 1355 linux_cdev_handle_remove(vmap); 1356 /* free allocated VM area struct */ 1357 linux_cdev_handle_free(vmap); 1358 } 1359 return (EINVAL); 1360 } 1361 } else { 1362 struct sglist *sg; 1363 1364 sg = sglist_alloc(1, M_WAITOK); 1365 sglist_append_phys(sg, 1366 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1367 1368 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1369 nprot, 0, td->td_ucred); 1370 1371 linux_cdev_handle_free(vmap); 1372 1373 if (*object == NULL) { 1374 sglist_free(sg); 1375 return (EINVAL); 1376 } 1377 } 1378 1379 if (attr != VM_MEMATTR_DEFAULT) { 1380 VM_OBJECT_WLOCK(*object); 1381 vm_object_set_memattr(*object, attr); 1382 VM_OBJECT_WUNLOCK(*object); 1383 } 1384 *offset = 0; 1385 return (0); 1386 } 1387 1388 struct cdevsw linuxcdevsw = { 1389 .d_version = D_VERSION, 1390 .d_fdopen = linux_dev_fdopen, 1391 .d_name = "lkpidev", 1392 }; 1393 1394 static int 1395 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1396 int flags, struct thread *td) 1397 { 1398 struct linux_file *filp; 1399 const struct file_operations *fop; 1400 struct linux_cdev *ldev; 1401 ssize_t bytes; 1402 int error; 1403 1404 error = 0; 1405 filp = (struct linux_file *)file->f_data; 1406 filp->f_flags = file->f_flag; 1407 /* XXX no support for I/O vectors currently */ 1408 if (uio->uio_iovcnt != 1) 1409 return (EOPNOTSUPP); 1410 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1411 return (EINVAL); 1412 linux_set_current(td); 1413 linux_get_fop(filp, &fop, &ldev); 1414 if (fop->read != NULL) { 1415 bytes = OPW(file, td, fop->read(filp, 1416 uio->uio_iov->iov_base, 1417 uio->uio_iov->iov_len, &uio->uio_offset)); 1418 if (bytes >= 0) { 1419 uio->uio_iov->iov_base = 1420 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1421 uio->uio_iov->iov_len -= bytes; 1422 uio->uio_resid -= bytes; 1423 } else { 1424 error = linux_get_error(current, -bytes); 1425 } 1426 } else 1427 error = ENXIO; 1428 1429 /* update kqfilter status, if any */ 1430 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1431 linux_drop_fop(ldev); 1432 1433 return (error); 1434 } 1435 1436 static int 1437 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1438 int flags, struct thread *td) 1439 { 1440 struct linux_file *filp; 1441 const struct file_operations *fop; 1442 struct linux_cdev *ldev; 1443 ssize_t bytes; 1444 int error; 1445 1446 filp = (struct linux_file *)file->f_data; 1447 filp->f_flags = file->f_flag; 1448 /* XXX no support for I/O vectors currently */ 1449 if (uio->uio_iovcnt != 1) 1450 return (EOPNOTSUPP); 1451 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1452 return (EINVAL); 1453 linux_set_current(td); 1454 linux_get_fop(filp, &fop, &ldev); 1455 if (fop->write != NULL) { 1456 bytes = OPW(file, td, fop->write(filp, 1457 uio->uio_iov->iov_base, 1458 uio->uio_iov->iov_len, &uio->uio_offset)); 1459 if (bytes >= 0) { 1460 uio->uio_iov->iov_base = 1461 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1462 uio->uio_iov->iov_len -= bytes; 1463 uio->uio_resid -= bytes; 1464 error = 0; 1465 } else { 1466 error = linux_get_error(current, -bytes); 1467 } 1468 } else 1469 error = ENXIO; 1470 1471 /* update kqfilter status, if any */ 1472 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1473 1474 linux_drop_fop(ldev); 1475 1476 return (error); 1477 } 1478 1479 static int 1480 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1481 struct thread *td) 1482 { 1483 struct linux_file *filp; 1484 const struct file_operations *fop; 1485 struct linux_cdev *ldev; 1486 int revents; 1487 1488 filp = (struct linux_file *)file->f_data; 1489 filp->f_flags = file->f_flag; 1490 linux_set_current(td); 1491 linux_get_fop(filp, &fop, &ldev); 1492 if (fop->poll != NULL) { 1493 revents = OPW(file, td, fop->poll(filp, 1494 LINUX_POLL_TABLE_NORMAL)) & events; 1495 } else { 1496 revents = 0; 1497 } 1498 linux_drop_fop(ldev); 1499 return (revents); 1500 } 1501 1502 static int 1503 linux_file_close(struct file *file, struct thread *td) 1504 { 1505 struct linux_file *filp; 1506 int (*release)(struct inode *, struct linux_file *); 1507 const struct file_operations *fop; 1508 struct linux_cdev *ldev; 1509 int error; 1510 1511 filp = (struct linux_file *)file->f_data; 1512 1513 KASSERT(file_count(filp) == 0, 1514 ("File refcount(%d) is not zero", file_count(filp))); 1515 1516 if (td == NULL) 1517 td = curthread; 1518 1519 error = 0; 1520 filp->f_flags = file->f_flag; 1521 linux_set_current(td); 1522 linux_poll_wait_dequeue(filp); 1523 linux_get_fop(filp, &fop, &ldev); 1524 /* 1525 * Always use the real release function, if any, to avoid 1526 * leaking device resources: 1527 */ 1528 release = filp->f_op->release; 1529 if (release != NULL) 1530 error = -OPW(file, td, release(filp->f_vnode, filp)); 1531 funsetown(&filp->f_sigio); 1532 if (filp->f_vnode != NULL) 1533 vdrop(filp->f_vnode); 1534 linux_drop_fop(ldev); 1535 ldev = filp->f_cdev; 1536 if (ldev != NULL) 1537 linux_cdev_deref(ldev); 1538 kfree(filp); 1539 1540 return (error); 1541 } 1542 1543 static int 1544 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1545 struct thread *td) 1546 { 1547 struct linux_file *filp; 1548 const struct file_operations *fop; 1549 struct linux_cdev *ldev; 1550 struct fiodgname_arg *fgn; 1551 const char *p; 1552 int error, i; 1553 1554 error = 0; 1555 filp = (struct linux_file *)fp->f_data; 1556 filp->f_flags = fp->f_flag; 1557 linux_get_fop(filp, &fop, &ldev); 1558 1559 linux_set_current(td); 1560 switch (cmd) { 1561 case FIONBIO: 1562 break; 1563 case FIOASYNC: 1564 if (fop->fasync == NULL) 1565 break; 1566 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1567 break; 1568 case FIOSETOWN: 1569 error = fsetown(*(int *)data, &filp->f_sigio); 1570 if (error == 0) { 1571 if (fop->fasync == NULL) 1572 break; 1573 error = -OPW(fp, td, fop->fasync(0, filp, 1574 fp->f_flag & FASYNC)); 1575 } 1576 break; 1577 case FIOGETOWN: 1578 *(int *)data = fgetown(&filp->f_sigio); 1579 break; 1580 case FIODGNAME: 1581 #ifdef COMPAT_FREEBSD32 1582 case FIODGNAME_32: 1583 #endif 1584 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1585 error = ENXIO; 1586 break; 1587 } 1588 fgn = data; 1589 p = devtoname(filp->f_cdev->cdev); 1590 i = strlen(p) + 1; 1591 if (i > fgn->len) { 1592 error = EINVAL; 1593 break; 1594 } 1595 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1596 break; 1597 default: 1598 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1599 break; 1600 } 1601 linux_drop_fop(ldev); 1602 return (error); 1603 } 1604 1605 static int 1606 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1607 vm_prot_t maxprot, int flags, struct file *fp, 1608 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1609 { 1610 /* 1611 * Character devices do not provide private mappings 1612 * of any kind: 1613 */ 1614 if ((maxprot & VM_PROT_WRITE) == 0 && 1615 (prot & VM_PROT_WRITE) != 0) 1616 return (EACCES); 1617 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1618 return (EINVAL); 1619 1620 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1621 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1622 } 1623 1624 static int 1625 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1626 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1627 struct thread *td) 1628 { 1629 struct linux_file *filp; 1630 const struct file_operations *fop; 1631 struct linux_cdev *ldev; 1632 struct mount *mp; 1633 struct vnode *vp; 1634 vm_object_t object; 1635 vm_prot_t maxprot; 1636 int error; 1637 1638 filp = (struct linux_file *)fp->f_data; 1639 1640 vp = filp->f_vnode; 1641 if (vp == NULL) 1642 return (EOPNOTSUPP); 1643 1644 /* 1645 * Ensure that file and memory protections are 1646 * compatible. 1647 */ 1648 mp = vp->v_mount; 1649 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1650 maxprot = VM_PROT_NONE; 1651 if ((prot & VM_PROT_EXECUTE) != 0) 1652 return (EACCES); 1653 } else 1654 maxprot = VM_PROT_EXECUTE; 1655 if ((fp->f_flag & FREAD) != 0) 1656 maxprot |= VM_PROT_READ; 1657 else if ((prot & VM_PROT_READ) != 0) 1658 return (EACCES); 1659 1660 /* 1661 * If we are sharing potential changes via MAP_SHARED and we 1662 * are trying to get write permission although we opened it 1663 * without asking for it, bail out. 1664 * 1665 * Note that most character devices always share mappings. 1666 * 1667 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1668 * requests rather than doing it here. 1669 */ 1670 if ((flags & MAP_SHARED) != 0) { 1671 if ((fp->f_flag & FWRITE) != 0) 1672 maxprot |= VM_PROT_WRITE; 1673 else if ((prot & VM_PROT_WRITE) != 0) 1674 return (EACCES); 1675 } 1676 maxprot &= cap_maxprot; 1677 1678 linux_get_fop(filp, &fop, &ldev); 1679 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1680 &foff, fop, &object); 1681 if (error != 0) 1682 goto out; 1683 1684 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1685 foff, FALSE, td); 1686 if (error != 0) 1687 vm_object_deallocate(object); 1688 out: 1689 linux_drop_fop(ldev); 1690 return (error); 1691 } 1692 1693 static int 1694 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1695 struct thread *td) 1696 { 1697 struct linux_file *filp; 1698 struct vnode *vp; 1699 int error; 1700 1701 filp = (struct linux_file *)fp->f_data; 1702 if (filp->f_vnode == NULL) 1703 return (EOPNOTSUPP); 1704 1705 vp = filp->f_vnode; 1706 1707 vn_lock(vp, LK_SHARED | LK_RETRY); 1708 error = VOP_STAT(vp, sb, td->td_ucred, NOCRED, td); 1709 VOP_UNLOCK(vp); 1710 1711 return (error); 1712 } 1713 1714 static int 1715 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1716 struct filedesc *fdp) 1717 { 1718 struct linux_file *filp; 1719 struct vnode *vp; 1720 int error; 1721 1722 filp = fp->f_data; 1723 vp = filp->f_vnode; 1724 if (vp == NULL) { 1725 error = 0; 1726 kif->kf_type = KF_TYPE_DEV; 1727 } else { 1728 vref(vp); 1729 FILEDESC_SUNLOCK(fdp); 1730 error = vn_fill_kinfo_vnode(vp, kif); 1731 vrele(vp); 1732 kif->kf_type = KF_TYPE_VNODE; 1733 FILEDESC_SLOCK(fdp); 1734 } 1735 return (error); 1736 } 1737 1738 unsigned int 1739 linux_iminor(struct inode *inode) 1740 { 1741 struct linux_cdev *ldev; 1742 1743 if (inode == NULL || inode->v_rdev == NULL || 1744 inode->v_rdev->si_devsw != &linuxcdevsw) 1745 return (-1U); 1746 ldev = inode->v_rdev->si_drv1; 1747 if (ldev == NULL) 1748 return (-1U); 1749 1750 return (minor(ldev->dev)); 1751 } 1752 1753 struct fileops linuxfileops = { 1754 .fo_read = linux_file_read, 1755 .fo_write = linux_file_write, 1756 .fo_truncate = invfo_truncate, 1757 .fo_kqfilter = linux_file_kqfilter, 1758 .fo_stat = linux_file_stat, 1759 .fo_fill_kinfo = linux_file_fill_kinfo, 1760 .fo_poll = linux_file_poll, 1761 .fo_close = linux_file_close, 1762 .fo_ioctl = linux_file_ioctl, 1763 .fo_mmap = linux_file_mmap, 1764 .fo_chmod = invfo_chmod, 1765 .fo_chown = invfo_chown, 1766 .fo_sendfile = invfo_sendfile, 1767 .fo_flags = DFLAG_PASSABLE, 1768 }; 1769 1770 /* 1771 * Hash of vmmap addresses. This is infrequently accessed and does not 1772 * need to be particularly large. This is done because we must store the 1773 * caller's idea of the map size to properly unmap. 1774 */ 1775 struct vmmap { 1776 LIST_ENTRY(vmmap) vm_next; 1777 void *vm_addr; 1778 unsigned long vm_size; 1779 }; 1780 1781 struct vmmaphd { 1782 struct vmmap *lh_first; 1783 }; 1784 #define VMMAP_HASH_SIZE 64 1785 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1786 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1787 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1788 static struct mtx vmmaplock; 1789 1790 static void 1791 vmmap_add(void *addr, unsigned long size) 1792 { 1793 struct vmmap *vmmap; 1794 1795 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1796 mtx_lock(&vmmaplock); 1797 vmmap->vm_size = size; 1798 vmmap->vm_addr = addr; 1799 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1800 mtx_unlock(&vmmaplock); 1801 } 1802 1803 static struct vmmap * 1804 vmmap_remove(void *addr) 1805 { 1806 struct vmmap *vmmap; 1807 1808 mtx_lock(&vmmaplock); 1809 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1810 if (vmmap->vm_addr == addr) 1811 break; 1812 if (vmmap) 1813 LIST_REMOVE(vmmap, vm_next); 1814 mtx_unlock(&vmmaplock); 1815 1816 return (vmmap); 1817 } 1818 1819 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1820 void * 1821 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1822 { 1823 void *addr; 1824 1825 addr = pmap_mapdev_attr(phys_addr, size, attr); 1826 if (addr == NULL) 1827 return (NULL); 1828 vmmap_add(addr, size); 1829 1830 return (addr); 1831 } 1832 #endif 1833 1834 void 1835 iounmap(void *addr) 1836 { 1837 struct vmmap *vmmap; 1838 1839 vmmap = vmmap_remove(addr); 1840 if (vmmap == NULL) 1841 return; 1842 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1843 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1844 #endif 1845 kfree(vmmap); 1846 } 1847 1848 void * 1849 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1850 { 1851 vm_offset_t off; 1852 size_t size; 1853 1854 size = count * PAGE_SIZE; 1855 off = kva_alloc(size); 1856 if (off == 0) 1857 return (NULL); 1858 vmmap_add((void *)off, size); 1859 pmap_qenter(off, pages, count); 1860 1861 return ((void *)off); 1862 } 1863 1864 void 1865 vunmap(void *addr) 1866 { 1867 struct vmmap *vmmap; 1868 1869 vmmap = vmmap_remove(addr); 1870 if (vmmap == NULL) 1871 return; 1872 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1873 kva_free((vm_offset_t)addr, vmmap->vm_size); 1874 kfree(vmmap); 1875 } 1876 1877 static char * 1878 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1879 { 1880 unsigned int len; 1881 char *p; 1882 va_list aq; 1883 1884 va_copy(aq, ap); 1885 len = vsnprintf(NULL, 0, fmt, aq); 1886 va_end(aq); 1887 1888 if (dev != NULL) 1889 p = devm_kmalloc(dev, len + 1, gfp); 1890 else 1891 p = kmalloc(len + 1, gfp); 1892 if (p != NULL) 1893 vsnprintf(p, len + 1, fmt, ap); 1894 1895 return (p); 1896 } 1897 1898 char * 1899 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1900 { 1901 1902 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1903 } 1904 1905 char * 1906 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1907 { 1908 va_list ap; 1909 char *p; 1910 1911 va_start(ap, fmt); 1912 p = devm_kvasprintf(dev, gfp, fmt, ap); 1913 va_end(ap); 1914 1915 return (p); 1916 } 1917 1918 char * 1919 kasprintf(gfp_t gfp, const char *fmt, ...) 1920 { 1921 va_list ap; 1922 char *p; 1923 1924 va_start(ap, fmt); 1925 p = kvasprintf(gfp, fmt, ap); 1926 va_end(ap); 1927 1928 return (p); 1929 } 1930 1931 static void 1932 linux_timer_callback_wrapper(void *context) 1933 { 1934 struct timer_list *timer; 1935 1936 timer = context; 1937 1938 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1939 /* try again later */ 1940 callout_reset(&timer->callout, 1, 1941 &linux_timer_callback_wrapper, timer); 1942 return; 1943 } 1944 1945 timer->function(timer->data); 1946 } 1947 1948 int 1949 mod_timer(struct timer_list *timer, int expires) 1950 { 1951 int ret; 1952 1953 timer->expires = expires; 1954 ret = callout_reset(&timer->callout, 1955 linux_timer_jiffies_until(expires), 1956 &linux_timer_callback_wrapper, timer); 1957 1958 MPASS(ret == 0 || ret == 1); 1959 1960 return (ret == 1); 1961 } 1962 1963 void 1964 add_timer(struct timer_list *timer) 1965 { 1966 1967 callout_reset(&timer->callout, 1968 linux_timer_jiffies_until(timer->expires), 1969 &linux_timer_callback_wrapper, timer); 1970 } 1971 1972 void 1973 add_timer_on(struct timer_list *timer, int cpu) 1974 { 1975 1976 callout_reset_on(&timer->callout, 1977 linux_timer_jiffies_until(timer->expires), 1978 &linux_timer_callback_wrapper, timer, cpu); 1979 } 1980 1981 int 1982 del_timer(struct timer_list *timer) 1983 { 1984 1985 if (callout_stop(&(timer)->callout) == -1) 1986 return (0); 1987 return (1); 1988 } 1989 1990 int 1991 del_timer_sync(struct timer_list *timer) 1992 { 1993 1994 if (callout_drain(&(timer)->callout) == -1) 1995 return (0); 1996 return (1); 1997 } 1998 1999 /* greatest common divisor, Euclid equation */ 2000 static uint64_t 2001 lkpi_gcd_64(uint64_t a, uint64_t b) 2002 { 2003 uint64_t an; 2004 uint64_t bn; 2005 2006 while (b != 0) { 2007 an = b; 2008 bn = a % b; 2009 a = an; 2010 b = bn; 2011 } 2012 return (a); 2013 } 2014 2015 uint64_t lkpi_nsec2hz_rem; 2016 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2017 uint64_t lkpi_nsec2hz_max; 2018 2019 uint64_t lkpi_usec2hz_rem; 2020 uint64_t lkpi_usec2hz_div = 1000000ULL; 2021 uint64_t lkpi_usec2hz_max; 2022 2023 uint64_t lkpi_msec2hz_rem; 2024 uint64_t lkpi_msec2hz_div = 1000ULL; 2025 uint64_t lkpi_msec2hz_max; 2026 2027 static void 2028 linux_timer_init(void *arg) 2029 { 2030 uint64_t gcd; 2031 2032 /* 2033 * Compute an internal HZ value which can divide 2**32 to 2034 * avoid timer rounding problems when the tick value wraps 2035 * around 2**32: 2036 */ 2037 linux_timer_hz_mask = 1; 2038 while (linux_timer_hz_mask < (unsigned long)hz) 2039 linux_timer_hz_mask *= 2; 2040 linux_timer_hz_mask--; 2041 2042 /* compute some internal constants */ 2043 2044 lkpi_nsec2hz_rem = hz; 2045 lkpi_usec2hz_rem = hz; 2046 lkpi_msec2hz_rem = hz; 2047 2048 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2049 lkpi_nsec2hz_rem /= gcd; 2050 lkpi_nsec2hz_div /= gcd; 2051 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2052 2053 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2054 lkpi_usec2hz_rem /= gcd; 2055 lkpi_usec2hz_div /= gcd; 2056 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2057 2058 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2059 lkpi_msec2hz_rem /= gcd; 2060 lkpi_msec2hz_div /= gcd; 2061 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2062 } 2063 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2064 2065 void 2066 linux_complete_common(struct completion *c, int all) 2067 { 2068 int wakeup_swapper; 2069 2070 sleepq_lock(c); 2071 if (all) { 2072 c->done = UINT_MAX; 2073 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2074 } else { 2075 if (c->done != UINT_MAX) 2076 c->done++; 2077 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2078 } 2079 sleepq_release(c); 2080 if (wakeup_swapper) 2081 kick_proc0(); 2082 } 2083 2084 /* 2085 * Indefinite wait for done != 0 with or without signals. 2086 */ 2087 int 2088 linux_wait_for_common(struct completion *c, int flags) 2089 { 2090 struct task_struct *task; 2091 int error; 2092 2093 if (SCHEDULER_STOPPED()) 2094 return (0); 2095 2096 task = current; 2097 2098 if (flags != 0) 2099 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2100 else 2101 flags = SLEEPQ_SLEEP; 2102 error = 0; 2103 for (;;) { 2104 sleepq_lock(c); 2105 if (c->done) 2106 break; 2107 sleepq_add(c, NULL, "completion", flags, 0); 2108 if (flags & SLEEPQ_INTERRUPTIBLE) { 2109 DROP_GIANT(); 2110 error = -sleepq_wait_sig(c, 0); 2111 PICKUP_GIANT(); 2112 if (error != 0) { 2113 linux_schedule_save_interrupt_value(task, error); 2114 error = -ERESTARTSYS; 2115 goto intr; 2116 } 2117 } else { 2118 DROP_GIANT(); 2119 sleepq_wait(c, 0); 2120 PICKUP_GIANT(); 2121 } 2122 } 2123 if (c->done != UINT_MAX) 2124 c->done--; 2125 sleepq_release(c); 2126 2127 intr: 2128 return (error); 2129 } 2130 2131 /* 2132 * Time limited wait for done != 0 with or without signals. 2133 */ 2134 int 2135 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2136 { 2137 struct task_struct *task; 2138 int end = jiffies + timeout; 2139 int error; 2140 2141 if (SCHEDULER_STOPPED()) 2142 return (0); 2143 2144 task = current; 2145 2146 if (flags != 0) 2147 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2148 else 2149 flags = SLEEPQ_SLEEP; 2150 2151 for (;;) { 2152 sleepq_lock(c); 2153 if (c->done) 2154 break; 2155 sleepq_add(c, NULL, "completion", flags, 0); 2156 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2157 2158 DROP_GIANT(); 2159 if (flags & SLEEPQ_INTERRUPTIBLE) 2160 error = -sleepq_timedwait_sig(c, 0); 2161 else 2162 error = -sleepq_timedwait(c, 0); 2163 PICKUP_GIANT(); 2164 2165 if (error != 0) { 2166 /* check for timeout */ 2167 if (error == -EWOULDBLOCK) { 2168 error = 0; /* timeout */ 2169 } else { 2170 /* signal happened */ 2171 linux_schedule_save_interrupt_value(task, error); 2172 error = -ERESTARTSYS; 2173 } 2174 goto done; 2175 } 2176 } 2177 if (c->done != UINT_MAX) 2178 c->done--; 2179 sleepq_release(c); 2180 2181 /* return how many jiffies are left */ 2182 error = linux_timer_jiffies_until(end); 2183 done: 2184 return (error); 2185 } 2186 2187 int 2188 linux_try_wait_for_completion(struct completion *c) 2189 { 2190 int isdone; 2191 2192 sleepq_lock(c); 2193 isdone = (c->done != 0); 2194 if (c->done != 0 && c->done != UINT_MAX) 2195 c->done--; 2196 sleepq_release(c); 2197 return (isdone); 2198 } 2199 2200 int 2201 linux_completion_done(struct completion *c) 2202 { 2203 int isdone; 2204 2205 sleepq_lock(c); 2206 isdone = (c->done != 0); 2207 sleepq_release(c); 2208 return (isdone); 2209 } 2210 2211 static void 2212 linux_cdev_deref(struct linux_cdev *ldev) 2213 { 2214 if (refcount_release(&ldev->refs) && 2215 ldev->kobj.ktype == &linux_cdev_ktype) 2216 kfree(ldev); 2217 } 2218 2219 static void 2220 linux_cdev_release(struct kobject *kobj) 2221 { 2222 struct linux_cdev *cdev; 2223 struct kobject *parent; 2224 2225 cdev = container_of(kobj, struct linux_cdev, kobj); 2226 parent = kobj->parent; 2227 linux_destroy_dev(cdev); 2228 linux_cdev_deref(cdev); 2229 kobject_put(parent); 2230 } 2231 2232 static void 2233 linux_cdev_static_release(struct kobject *kobj) 2234 { 2235 struct cdev *cdev; 2236 struct linux_cdev *ldev; 2237 2238 ldev = container_of(kobj, struct linux_cdev, kobj); 2239 cdev = ldev->cdev; 2240 if (cdev != NULL) { 2241 destroy_dev(cdev); 2242 ldev->cdev = NULL; 2243 } 2244 kobject_put(kobj->parent); 2245 } 2246 2247 int 2248 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2249 { 2250 int ret; 2251 2252 if (dev->devt != 0) { 2253 /* Set parent kernel object. */ 2254 ldev->kobj.parent = &dev->kobj; 2255 2256 /* 2257 * Unlike Linux we require the kobject of the 2258 * character device structure to have a valid name 2259 * before calling this function: 2260 */ 2261 if (ldev->kobj.name == NULL) 2262 return (-EINVAL); 2263 2264 ret = cdev_add(ldev, dev->devt, 1); 2265 if (ret) 2266 return (ret); 2267 } 2268 ret = device_add(dev); 2269 if (ret != 0 && dev->devt != 0) 2270 cdev_del(ldev); 2271 return (ret); 2272 } 2273 2274 void 2275 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2276 { 2277 device_del(dev); 2278 2279 if (dev->devt != 0) 2280 cdev_del(ldev); 2281 } 2282 2283 static void 2284 linux_destroy_dev(struct linux_cdev *ldev) 2285 { 2286 2287 if (ldev->cdev == NULL) 2288 return; 2289 2290 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2291 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2292 2293 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2294 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2295 pause("ldevdtr", hz / 4); 2296 2297 destroy_dev(ldev->cdev); 2298 ldev->cdev = NULL; 2299 } 2300 2301 const struct kobj_type linux_cdev_ktype = { 2302 .release = linux_cdev_release, 2303 }; 2304 2305 const struct kobj_type linux_cdev_static_ktype = { 2306 .release = linux_cdev_static_release, 2307 }; 2308 2309 static void 2310 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2311 { 2312 struct notifier_block *nb; 2313 struct netdev_notifier_info ni; 2314 2315 nb = arg; 2316 ni.dev = (struct net_device *)ifp; 2317 if (linkstate == LINK_STATE_UP) 2318 nb->notifier_call(nb, NETDEV_UP, &ni); 2319 else 2320 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2321 } 2322 2323 static void 2324 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2325 { 2326 struct notifier_block *nb; 2327 struct netdev_notifier_info ni; 2328 2329 nb = arg; 2330 ni.dev = (struct net_device *)ifp; 2331 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2332 } 2333 2334 static void 2335 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2336 { 2337 struct notifier_block *nb; 2338 struct netdev_notifier_info ni; 2339 2340 nb = arg; 2341 ni.dev = (struct net_device *)ifp; 2342 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2343 } 2344 2345 static void 2346 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2347 { 2348 struct notifier_block *nb; 2349 struct netdev_notifier_info ni; 2350 2351 nb = arg; 2352 ni.dev = (struct net_device *)ifp; 2353 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2354 } 2355 2356 static void 2357 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2358 { 2359 struct notifier_block *nb; 2360 struct netdev_notifier_info ni; 2361 2362 nb = arg; 2363 ni.dev = (struct net_device *)ifp; 2364 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2365 } 2366 2367 int 2368 register_netdevice_notifier(struct notifier_block *nb) 2369 { 2370 2371 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2372 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2373 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2374 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2375 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2376 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2377 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2378 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2379 2380 return (0); 2381 } 2382 2383 int 2384 register_inetaddr_notifier(struct notifier_block *nb) 2385 { 2386 2387 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2388 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2389 return (0); 2390 } 2391 2392 int 2393 unregister_netdevice_notifier(struct notifier_block *nb) 2394 { 2395 2396 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2397 nb->tags[NETDEV_UP]); 2398 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2399 nb->tags[NETDEV_REGISTER]); 2400 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2401 nb->tags[NETDEV_UNREGISTER]); 2402 EVENTHANDLER_DEREGISTER(iflladdr_event, 2403 nb->tags[NETDEV_CHANGEADDR]); 2404 2405 return (0); 2406 } 2407 2408 int 2409 unregister_inetaddr_notifier(struct notifier_block *nb) 2410 { 2411 2412 EVENTHANDLER_DEREGISTER(ifaddr_event, 2413 nb->tags[NETDEV_CHANGEIFADDR]); 2414 2415 return (0); 2416 } 2417 2418 struct list_sort_thunk { 2419 int (*cmp)(void *, struct list_head *, struct list_head *); 2420 void *priv; 2421 }; 2422 2423 static inline int 2424 linux_le_cmp(void *priv, const void *d1, const void *d2) 2425 { 2426 struct list_head *le1, *le2; 2427 struct list_sort_thunk *thunk; 2428 2429 thunk = priv; 2430 le1 = *(__DECONST(struct list_head **, d1)); 2431 le2 = *(__DECONST(struct list_head **, d2)); 2432 return ((thunk->cmp)(thunk->priv, le1, le2)); 2433 } 2434 2435 void 2436 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2437 struct list_head *a, struct list_head *b)) 2438 { 2439 struct list_sort_thunk thunk; 2440 struct list_head **ar, *le; 2441 size_t count, i; 2442 2443 count = 0; 2444 list_for_each(le, head) 2445 count++; 2446 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2447 i = 0; 2448 list_for_each(le, head) 2449 ar[i++] = le; 2450 thunk.cmp = cmp; 2451 thunk.priv = priv; 2452 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2453 INIT_LIST_HEAD(head); 2454 for (i = 0; i < count; i++) 2455 list_add_tail(ar[i], head); 2456 free(ar, M_KMALLOC); 2457 } 2458 2459 void 2460 linux_irq_handler(void *ent) 2461 { 2462 struct irq_ent *irqe; 2463 2464 if (linux_set_current_flags(curthread, M_NOWAIT)) 2465 return; 2466 2467 irqe = ent; 2468 irqe->handler(irqe->irq, irqe->arg); 2469 } 2470 2471 #if defined(__i386__) || defined(__amd64__) 2472 int 2473 linux_wbinvd_on_all_cpus(void) 2474 { 2475 2476 pmap_invalidate_cache(); 2477 return (0); 2478 } 2479 #endif 2480 2481 int 2482 linux_on_each_cpu(void callback(void *), void *data) 2483 { 2484 2485 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2486 smp_no_rendezvous_barrier, data); 2487 return (0); 2488 } 2489 2490 int 2491 linux_in_atomic(void) 2492 { 2493 2494 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2495 } 2496 2497 struct linux_cdev * 2498 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2499 { 2500 dev_t dev = MKDEV(major, minor); 2501 struct cdev *cdev; 2502 2503 dev_lock(); 2504 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2505 struct linux_cdev *ldev = cdev->si_drv1; 2506 if (ldev->dev == dev && 2507 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2508 break; 2509 } 2510 } 2511 dev_unlock(); 2512 2513 return (cdev != NULL ? cdev->si_drv1 : NULL); 2514 } 2515 2516 int 2517 __register_chrdev(unsigned int major, unsigned int baseminor, 2518 unsigned int count, const char *name, 2519 const struct file_operations *fops) 2520 { 2521 struct linux_cdev *cdev; 2522 int ret = 0; 2523 int i; 2524 2525 for (i = baseminor; i < baseminor + count; i++) { 2526 cdev = cdev_alloc(); 2527 cdev->ops = fops; 2528 kobject_set_name(&cdev->kobj, name); 2529 2530 ret = cdev_add(cdev, makedev(major, i), 1); 2531 if (ret != 0) 2532 break; 2533 } 2534 return (ret); 2535 } 2536 2537 int 2538 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2539 unsigned int count, const char *name, 2540 const struct file_operations *fops, uid_t uid, 2541 gid_t gid, int mode) 2542 { 2543 struct linux_cdev *cdev; 2544 int ret = 0; 2545 int i; 2546 2547 for (i = baseminor; i < baseminor + count; i++) { 2548 cdev = cdev_alloc(); 2549 cdev->ops = fops; 2550 kobject_set_name(&cdev->kobj, name); 2551 2552 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2553 if (ret != 0) 2554 break; 2555 } 2556 return (ret); 2557 } 2558 2559 void 2560 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2561 unsigned int count, const char *name) 2562 { 2563 struct linux_cdev *cdevp; 2564 int i; 2565 2566 for (i = baseminor; i < baseminor + count; i++) { 2567 cdevp = linux_find_cdev(name, major, i); 2568 if (cdevp != NULL) 2569 cdev_del(cdevp); 2570 } 2571 } 2572 2573 void 2574 linux_dump_stack(void) 2575 { 2576 #ifdef STACK 2577 struct stack st; 2578 2579 stack_zero(&st); 2580 stack_save(&st); 2581 stack_print(&st); 2582 #endif 2583 } 2584 2585 int 2586 linuxkpi_net_ratelimit(void) 2587 { 2588 2589 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2590 lkpi_net_maxpps)); 2591 } 2592 2593 #if defined(__i386__) || defined(__amd64__) 2594 bool linux_cpu_has_clflush; 2595 #endif 2596 2597 static void 2598 linux_compat_init(void *arg) 2599 { 2600 struct sysctl_oid *rootoid; 2601 int i; 2602 2603 #if defined(__i386__) || defined(__amd64__) 2604 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2605 #endif 2606 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2607 2608 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2609 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2610 kobject_init(&linux_class_root, &linux_class_ktype); 2611 kobject_set_name(&linux_class_root, "class"); 2612 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2613 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2614 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2615 kobject_set_name(&linux_root_device.kobj, "device"); 2616 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2617 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2618 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2619 linux_root_device.bsddev = root_bus; 2620 linux_class_misc.name = "misc"; 2621 class_register(&linux_class_misc); 2622 INIT_LIST_HEAD(&pci_drivers); 2623 INIT_LIST_HEAD(&pci_devices); 2624 spin_lock_init(&pci_lock); 2625 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2626 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2627 LIST_INIT(&vmmaphead[i]); 2628 init_waitqueue_head(&linux_bit_waitq); 2629 init_waitqueue_head(&linux_var_waitq); 2630 } 2631 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2632 2633 static void 2634 linux_compat_uninit(void *arg) 2635 { 2636 linux_kobject_kfree_name(&linux_class_root); 2637 linux_kobject_kfree_name(&linux_root_device.kobj); 2638 linux_kobject_kfree_name(&linux_class_misc.kobj); 2639 2640 mtx_destroy(&vmmaplock); 2641 spin_lock_destroy(&pci_lock); 2642 rw_destroy(&linux_vma_lock); 2643 } 2644 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2645 2646 /* 2647 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2648 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2649 * used. Assert these types have the same size, else some parts of the 2650 * LinuxKPI may not work like expected: 2651 */ 2652 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2653