1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/list.h> 87 #include <linux/kthread.h> 88 #include <linux/kernel.h> 89 #include <linux/compat.h> 90 #include <linux/poll.h> 91 #include <linux/smp.h> 92 #include <linux/wait_bit.h> 93 94 #if defined(__i386__) || defined(__amd64__) 95 #include <asm/smp.h> 96 #endif 97 98 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 99 "LinuxKPI parameters"); 100 101 int linuxkpi_debug; 102 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 103 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 104 105 static struct timeval lkpi_net_lastlog; 106 static int lkpi_net_curpps; 107 static int lkpi_net_maxpps = 99; 108 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 109 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 110 111 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 112 113 #include <linux/rbtree.h> 114 /* Undo Linux compat changes. */ 115 #undef RB_ROOT 116 #undef file 117 #undef cdev 118 #define RB_ROOT(head) (head)->rbh_root 119 120 static void linux_destroy_dev(struct linux_cdev *); 121 static void linux_cdev_deref(struct linux_cdev *ldev); 122 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 123 124 cpumask_t cpu_online_mask; 125 struct kobject linux_class_root; 126 struct device linux_root_device; 127 struct class linux_class_misc; 128 struct list_head pci_drivers; 129 struct list_head pci_devices; 130 spinlock_t pci_lock; 131 132 unsigned long linux_timer_hz_mask; 133 134 wait_queue_head_t linux_bit_waitq; 135 wait_queue_head_t linux_var_waitq; 136 137 int 138 panic_cmp(struct rb_node *one, struct rb_node *two) 139 { 140 panic("no cmp"); 141 } 142 143 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 144 145 int 146 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 147 { 148 va_list tmp_va; 149 int len; 150 char *old; 151 char *name; 152 char dummy; 153 154 old = kobj->name; 155 156 if (old && fmt == NULL) 157 return (0); 158 159 /* compute length of string */ 160 va_copy(tmp_va, args); 161 len = vsnprintf(&dummy, 0, fmt, tmp_va); 162 va_end(tmp_va); 163 164 /* account for zero termination */ 165 len++; 166 167 /* check for error */ 168 if (len < 1) 169 return (-EINVAL); 170 171 /* allocate memory for string */ 172 name = kzalloc(len, GFP_KERNEL); 173 if (name == NULL) 174 return (-ENOMEM); 175 vsnprintf(name, len, fmt, args); 176 kobj->name = name; 177 178 /* free old string */ 179 kfree(old); 180 181 /* filter new string */ 182 for (; *name != '\0'; name++) 183 if (*name == '/') 184 *name = '!'; 185 return (0); 186 } 187 188 int 189 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 190 { 191 va_list args; 192 int error; 193 194 va_start(args, fmt); 195 error = kobject_set_name_vargs(kobj, fmt, args); 196 va_end(args); 197 198 return (error); 199 } 200 201 static int 202 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 203 { 204 const struct kobj_type *t; 205 int error; 206 207 kobj->parent = parent; 208 error = sysfs_create_dir(kobj); 209 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 210 struct attribute **attr; 211 t = kobj->ktype; 212 213 for (attr = t->default_attrs; *attr != NULL; attr++) { 214 error = sysfs_create_file(kobj, *attr); 215 if (error) 216 break; 217 } 218 if (error) 219 sysfs_remove_dir(kobj); 220 } 221 return (error); 222 } 223 224 int 225 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 226 { 227 va_list args; 228 int error; 229 230 va_start(args, fmt); 231 error = kobject_set_name_vargs(kobj, fmt, args); 232 va_end(args); 233 if (error) 234 return (error); 235 236 return kobject_add_complete(kobj, parent); 237 } 238 239 void 240 linux_kobject_release(struct kref *kref) 241 { 242 struct kobject *kobj; 243 char *name; 244 245 kobj = container_of(kref, struct kobject, kref); 246 sysfs_remove_dir(kobj); 247 name = kobj->name; 248 if (kobj->ktype && kobj->ktype->release) 249 kobj->ktype->release(kobj); 250 kfree(name); 251 } 252 253 static void 254 linux_kobject_kfree(struct kobject *kobj) 255 { 256 kfree(kobj); 257 } 258 259 static void 260 linux_kobject_kfree_name(struct kobject *kobj) 261 { 262 if (kobj) { 263 kfree(kobj->name); 264 } 265 } 266 267 const struct kobj_type linux_kfree_type = { 268 .release = linux_kobject_kfree 269 }; 270 271 static void 272 linux_device_release(struct device *dev) 273 { 274 pr_debug("linux_device_release: %s\n", dev_name(dev)); 275 kfree(dev); 276 } 277 278 static ssize_t 279 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 280 { 281 struct class_attribute *dattr; 282 ssize_t error; 283 284 dattr = container_of(attr, struct class_attribute, attr); 285 error = -EIO; 286 if (dattr->show) 287 error = dattr->show(container_of(kobj, struct class, kobj), 288 dattr, buf); 289 return (error); 290 } 291 292 static ssize_t 293 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 294 size_t count) 295 { 296 struct class_attribute *dattr; 297 ssize_t error; 298 299 dattr = container_of(attr, struct class_attribute, attr); 300 error = -EIO; 301 if (dattr->store) 302 error = dattr->store(container_of(kobj, struct class, kobj), 303 dattr, buf, count); 304 return (error); 305 } 306 307 static void 308 linux_class_release(struct kobject *kobj) 309 { 310 struct class *class; 311 312 class = container_of(kobj, struct class, kobj); 313 if (class->class_release) 314 class->class_release(class); 315 } 316 317 static const struct sysfs_ops linux_class_sysfs = { 318 .show = linux_class_show, 319 .store = linux_class_store, 320 }; 321 322 const struct kobj_type linux_class_ktype = { 323 .release = linux_class_release, 324 .sysfs_ops = &linux_class_sysfs 325 }; 326 327 static void 328 linux_dev_release(struct kobject *kobj) 329 { 330 struct device *dev; 331 332 dev = container_of(kobj, struct device, kobj); 333 /* This is the precedence defined by linux. */ 334 if (dev->release) 335 dev->release(dev); 336 else if (dev->class && dev->class->dev_release) 337 dev->class->dev_release(dev); 338 } 339 340 static ssize_t 341 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 342 { 343 struct device_attribute *dattr; 344 ssize_t error; 345 346 dattr = container_of(attr, struct device_attribute, attr); 347 error = -EIO; 348 if (dattr->show) 349 error = dattr->show(container_of(kobj, struct device, kobj), 350 dattr, buf); 351 return (error); 352 } 353 354 static ssize_t 355 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 356 size_t count) 357 { 358 struct device_attribute *dattr; 359 ssize_t error; 360 361 dattr = container_of(attr, struct device_attribute, attr); 362 error = -EIO; 363 if (dattr->store) 364 error = dattr->store(container_of(kobj, struct device, kobj), 365 dattr, buf, count); 366 return (error); 367 } 368 369 static const struct sysfs_ops linux_dev_sysfs = { 370 .show = linux_dev_show, 371 .store = linux_dev_store, 372 }; 373 374 const struct kobj_type linux_dev_ktype = { 375 .release = linux_dev_release, 376 .sysfs_ops = &linux_dev_sysfs 377 }; 378 379 struct device * 380 device_create(struct class *class, struct device *parent, dev_t devt, 381 void *drvdata, const char *fmt, ...) 382 { 383 struct device *dev; 384 va_list args; 385 386 dev = kzalloc(sizeof(*dev), M_WAITOK); 387 dev->parent = parent; 388 dev->class = class; 389 dev->devt = devt; 390 dev->driver_data = drvdata; 391 dev->release = linux_device_release; 392 va_start(args, fmt); 393 kobject_set_name_vargs(&dev->kobj, fmt, args); 394 va_end(args); 395 device_register(dev); 396 397 return (dev); 398 } 399 400 int 401 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 402 struct kobject *parent, const char *fmt, ...) 403 { 404 va_list args; 405 int error; 406 407 kobject_init(kobj, ktype); 408 kobj->ktype = ktype; 409 kobj->parent = parent; 410 kobj->name = NULL; 411 412 va_start(args, fmt); 413 error = kobject_set_name_vargs(kobj, fmt, args); 414 va_end(args); 415 if (error) 416 return (error); 417 return kobject_add_complete(kobj, parent); 418 } 419 420 static void 421 linux_kq_lock(void *arg) 422 { 423 spinlock_t *s = arg; 424 425 spin_lock(s); 426 } 427 static void 428 linux_kq_unlock(void *arg) 429 { 430 spinlock_t *s = arg; 431 432 spin_unlock(s); 433 } 434 435 static void 436 linux_kq_assert_lock(void *arg, int what) 437 { 438 #ifdef INVARIANTS 439 spinlock_t *s = arg; 440 441 if (what == LA_LOCKED) 442 mtx_assert(&s->m, MA_OWNED); 443 else 444 mtx_assert(&s->m, MA_NOTOWNED); 445 #endif 446 } 447 448 static void 449 linux_file_kqfilter_poll(struct linux_file *, int); 450 451 struct linux_file * 452 linux_file_alloc(void) 453 { 454 struct linux_file *filp; 455 456 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 457 458 /* set initial refcount */ 459 filp->f_count = 1; 460 461 /* setup fields needed by kqueue support */ 462 spin_lock_init(&filp->f_kqlock); 463 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 464 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 465 466 return (filp); 467 } 468 469 void 470 linux_file_free(struct linux_file *filp) 471 { 472 if (filp->_file == NULL) { 473 if (filp->f_shmem != NULL) 474 vm_object_deallocate(filp->f_shmem); 475 kfree(filp); 476 } else { 477 /* 478 * The close method of the character device or file 479 * will free the linux_file structure: 480 */ 481 _fdrop(filp->_file, curthread); 482 } 483 } 484 485 static int 486 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 487 vm_page_t *mres) 488 { 489 struct vm_area_struct *vmap; 490 491 vmap = linux_cdev_handle_find(vm_obj->handle); 492 493 MPASS(vmap != NULL); 494 MPASS(vmap->vm_private_data == vm_obj->handle); 495 496 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 497 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 498 vm_page_t page; 499 500 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 501 /* 502 * If the passed in result page is a fake 503 * page, update it with the new physical 504 * address. 505 */ 506 page = *mres; 507 vm_page_updatefake(page, paddr, vm_obj->memattr); 508 } else { 509 /* 510 * Replace the passed in "mres" page with our 511 * own fake page and free up the all of the 512 * original pages. 513 */ 514 VM_OBJECT_WUNLOCK(vm_obj); 515 page = vm_page_getfake(paddr, vm_obj->memattr); 516 VM_OBJECT_WLOCK(vm_obj); 517 518 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 519 *mres = page; 520 } 521 vm_page_valid(page); 522 return (VM_PAGER_OK); 523 } 524 return (VM_PAGER_FAIL); 525 } 526 527 static int 528 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 529 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 530 { 531 struct vm_area_struct *vmap; 532 int err; 533 534 /* get VM area structure */ 535 vmap = linux_cdev_handle_find(vm_obj->handle); 536 MPASS(vmap != NULL); 537 MPASS(vmap->vm_private_data == vm_obj->handle); 538 539 VM_OBJECT_WUNLOCK(vm_obj); 540 541 linux_set_current(curthread); 542 543 down_write(&vmap->vm_mm->mmap_sem); 544 if (unlikely(vmap->vm_ops == NULL)) { 545 err = VM_FAULT_SIGBUS; 546 } else { 547 struct vm_fault vmf; 548 549 /* fill out VM fault structure */ 550 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 551 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 552 vmf.pgoff = 0; 553 vmf.page = NULL; 554 vmf.vma = vmap; 555 556 vmap->vm_pfn_count = 0; 557 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 558 vmap->vm_obj = vm_obj; 559 560 err = vmap->vm_ops->fault(vmap, &vmf); 561 562 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 563 kern_yield(PRI_USER); 564 err = vmap->vm_ops->fault(vmap, &vmf); 565 } 566 } 567 568 /* translate return code */ 569 switch (err) { 570 case VM_FAULT_OOM: 571 err = VM_PAGER_AGAIN; 572 break; 573 case VM_FAULT_SIGBUS: 574 err = VM_PAGER_BAD; 575 break; 576 case VM_FAULT_NOPAGE: 577 /* 578 * By contract the fault handler will return having 579 * busied all the pages itself. If pidx is already 580 * found in the object, it will simply xbusy the first 581 * page and return with vm_pfn_count set to 1. 582 */ 583 *first = vmap->vm_pfn_first; 584 *last = *first + vmap->vm_pfn_count - 1; 585 err = VM_PAGER_OK; 586 break; 587 default: 588 err = VM_PAGER_ERROR; 589 break; 590 } 591 up_write(&vmap->vm_mm->mmap_sem); 592 VM_OBJECT_WLOCK(vm_obj); 593 return (err); 594 } 595 596 static struct rwlock linux_vma_lock; 597 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 598 TAILQ_HEAD_INITIALIZER(linux_vma_head); 599 600 static void 601 linux_cdev_handle_free(struct vm_area_struct *vmap) 602 { 603 /* Drop reference on vm_file */ 604 if (vmap->vm_file != NULL) 605 fput(vmap->vm_file); 606 607 /* Drop reference on mm_struct */ 608 mmput(vmap->vm_mm); 609 610 kfree(vmap); 611 } 612 613 static void 614 linux_cdev_handle_remove(struct vm_area_struct *vmap) 615 { 616 rw_wlock(&linux_vma_lock); 617 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 618 rw_wunlock(&linux_vma_lock); 619 } 620 621 static struct vm_area_struct * 622 linux_cdev_handle_find(void *handle) 623 { 624 struct vm_area_struct *vmap; 625 626 rw_rlock(&linux_vma_lock); 627 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 628 if (vmap->vm_private_data == handle) 629 break; 630 } 631 rw_runlock(&linux_vma_lock); 632 return (vmap); 633 } 634 635 static int 636 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 637 vm_ooffset_t foff, struct ucred *cred, u_short *color) 638 { 639 640 MPASS(linux_cdev_handle_find(handle) != NULL); 641 *color = 0; 642 return (0); 643 } 644 645 static void 646 linux_cdev_pager_dtor(void *handle) 647 { 648 const struct vm_operations_struct *vm_ops; 649 struct vm_area_struct *vmap; 650 651 vmap = linux_cdev_handle_find(handle); 652 MPASS(vmap != NULL); 653 654 /* 655 * Remove handle before calling close operation to prevent 656 * other threads from reusing the handle pointer. 657 */ 658 linux_cdev_handle_remove(vmap); 659 660 down_write(&vmap->vm_mm->mmap_sem); 661 vm_ops = vmap->vm_ops; 662 if (likely(vm_ops != NULL)) 663 vm_ops->close(vmap); 664 up_write(&vmap->vm_mm->mmap_sem); 665 666 linux_cdev_handle_free(vmap); 667 } 668 669 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 670 { 671 /* OBJT_MGTDEVICE */ 672 .cdev_pg_populate = linux_cdev_pager_populate, 673 .cdev_pg_ctor = linux_cdev_pager_ctor, 674 .cdev_pg_dtor = linux_cdev_pager_dtor 675 }, 676 { 677 /* OBJT_DEVICE */ 678 .cdev_pg_fault = linux_cdev_pager_fault, 679 .cdev_pg_ctor = linux_cdev_pager_ctor, 680 .cdev_pg_dtor = linux_cdev_pager_dtor 681 }, 682 }; 683 684 int 685 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 686 unsigned long size) 687 { 688 vm_object_t obj; 689 vm_page_t m; 690 691 obj = vma->vm_obj; 692 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 693 return (-ENOTSUP); 694 VM_OBJECT_RLOCK(obj); 695 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 696 m != NULL && m->pindex < OFF_TO_IDX(address + size); 697 m = TAILQ_NEXT(m, listq)) 698 pmap_remove_all(m); 699 VM_OBJECT_RUNLOCK(obj); 700 return (0); 701 } 702 703 static struct file_operations dummy_ldev_ops = { 704 /* XXXKIB */ 705 }; 706 707 static struct linux_cdev dummy_ldev = { 708 .ops = &dummy_ldev_ops, 709 }; 710 711 #define LDEV_SI_DTR 0x0001 712 #define LDEV_SI_REF 0x0002 713 714 static void 715 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 716 struct linux_cdev **dev) 717 { 718 struct linux_cdev *ldev; 719 u_int siref; 720 721 ldev = filp->f_cdev; 722 *fop = filp->f_op; 723 if (ldev != NULL) { 724 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 725 refcount_acquire(&ldev->refs); 726 } else { 727 for (siref = ldev->siref;;) { 728 if ((siref & LDEV_SI_DTR) != 0) { 729 ldev = &dummy_ldev; 730 *fop = ldev->ops; 731 siref = ldev->siref; 732 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 733 } else if (atomic_fcmpset_int(&ldev->siref, 734 &siref, siref + LDEV_SI_REF)) { 735 break; 736 } 737 } 738 } 739 } 740 *dev = ldev; 741 } 742 743 static void 744 linux_drop_fop(struct linux_cdev *ldev) 745 { 746 747 if (ldev == NULL) 748 return; 749 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 750 linux_cdev_deref(ldev); 751 } else { 752 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 753 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 754 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 755 } 756 } 757 758 #define OPW(fp,td,code) ({ \ 759 struct file *__fpop; \ 760 __typeof(code) __retval; \ 761 \ 762 __fpop = (td)->td_fpop; \ 763 (td)->td_fpop = (fp); \ 764 __retval = (code); \ 765 (td)->td_fpop = __fpop; \ 766 __retval; \ 767 }) 768 769 static int 770 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 771 struct file *file) 772 { 773 struct linux_cdev *ldev; 774 struct linux_file *filp; 775 const struct file_operations *fop; 776 int error; 777 778 ldev = dev->si_drv1; 779 780 filp = linux_file_alloc(); 781 filp->f_dentry = &filp->f_dentry_store; 782 filp->f_op = ldev->ops; 783 filp->f_mode = file->f_flag; 784 filp->f_flags = file->f_flag; 785 filp->f_vnode = file->f_vnode; 786 filp->_file = file; 787 refcount_acquire(&ldev->refs); 788 filp->f_cdev = ldev; 789 790 linux_set_current(td); 791 linux_get_fop(filp, &fop, &ldev); 792 793 if (fop->open != NULL) { 794 error = -fop->open(file->f_vnode, filp); 795 if (error != 0) { 796 linux_drop_fop(ldev); 797 linux_cdev_deref(filp->f_cdev); 798 kfree(filp); 799 return (error); 800 } 801 } 802 803 /* hold on to the vnode - used for fstat() */ 804 vhold(filp->f_vnode); 805 806 /* release the file from devfs */ 807 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 808 linux_drop_fop(ldev); 809 return (ENXIO); 810 } 811 812 #define LINUX_IOCTL_MIN_PTR 0x10000UL 813 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 814 815 static inline int 816 linux_remap_address(void **uaddr, size_t len) 817 { 818 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 819 820 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 821 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 822 struct task_struct *pts = current; 823 if (pts == NULL) { 824 *uaddr = NULL; 825 return (1); 826 } 827 828 /* compute data offset */ 829 uaddr_val -= LINUX_IOCTL_MIN_PTR; 830 831 /* check that length is within bounds */ 832 if ((len > IOCPARM_MAX) || 833 (uaddr_val + len) > pts->bsd_ioctl_len) { 834 *uaddr = NULL; 835 return (1); 836 } 837 838 /* re-add kernel buffer address */ 839 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 840 841 /* update address location */ 842 *uaddr = (void *)uaddr_val; 843 return (1); 844 } 845 return (0); 846 } 847 848 int 849 linux_copyin(const void *uaddr, void *kaddr, size_t len) 850 { 851 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 852 if (uaddr == NULL) 853 return (-EFAULT); 854 memcpy(kaddr, uaddr, len); 855 return (0); 856 } 857 return (-copyin(uaddr, kaddr, len)); 858 } 859 860 int 861 linux_copyout(const void *kaddr, void *uaddr, size_t len) 862 { 863 if (linux_remap_address(&uaddr, len)) { 864 if (uaddr == NULL) 865 return (-EFAULT); 866 memcpy(uaddr, kaddr, len); 867 return (0); 868 } 869 return (-copyout(kaddr, uaddr, len)); 870 } 871 872 size_t 873 linux_clear_user(void *_uaddr, size_t _len) 874 { 875 uint8_t *uaddr = _uaddr; 876 size_t len = _len; 877 878 /* make sure uaddr is aligned before going into the fast loop */ 879 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 880 if (subyte(uaddr, 0)) 881 return (_len); 882 uaddr++; 883 len--; 884 } 885 886 /* zero 8 bytes at a time */ 887 while (len > 7) { 888 #ifdef __LP64__ 889 if (suword64(uaddr, 0)) 890 return (_len); 891 #else 892 if (suword32(uaddr, 0)) 893 return (_len); 894 if (suword32(uaddr + 4, 0)) 895 return (_len); 896 #endif 897 uaddr += 8; 898 len -= 8; 899 } 900 901 /* zero fill end, if any */ 902 while (len > 0) { 903 if (subyte(uaddr, 0)) 904 return (_len); 905 uaddr++; 906 len--; 907 } 908 return (0); 909 } 910 911 int 912 linux_access_ok(const void *uaddr, size_t len) 913 { 914 uintptr_t saddr; 915 uintptr_t eaddr; 916 917 /* get start and end address */ 918 saddr = (uintptr_t)uaddr; 919 eaddr = (uintptr_t)uaddr + len; 920 921 /* verify addresses are valid for userspace */ 922 return ((saddr == eaddr) || 923 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 924 } 925 926 /* 927 * This function should return either EINTR or ERESTART depending on 928 * the signal type sent to this thread: 929 */ 930 static int 931 linux_get_error(struct task_struct *task, int error) 932 { 933 /* check for signal type interrupt code */ 934 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 935 error = -linux_schedule_get_interrupt_value(task); 936 if (error == 0) 937 error = EINTR; 938 } 939 return (error); 940 } 941 942 static int 943 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 944 const struct file_operations *fop, u_long cmd, caddr_t data, 945 struct thread *td) 946 { 947 struct task_struct *task = current; 948 unsigned size; 949 int error; 950 951 size = IOCPARM_LEN(cmd); 952 /* refer to logic in sys_ioctl() */ 953 if (size > 0) { 954 /* 955 * Setup hint for linux_copyin() and linux_copyout(). 956 * 957 * Background: Linux code expects a user-space address 958 * while FreeBSD supplies a kernel-space address. 959 */ 960 task->bsd_ioctl_data = data; 961 task->bsd_ioctl_len = size; 962 data = (void *)LINUX_IOCTL_MIN_PTR; 963 } else { 964 /* fetch user-space pointer */ 965 data = *(void **)data; 966 } 967 #ifdef COMPAT_FREEBSD32 968 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 969 /* try the compat IOCTL handler first */ 970 if (fop->compat_ioctl != NULL) { 971 error = -OPW(fp, td, fop->compat_ioctl(filp, 972 cmd, (u_long)data)); 973 } else { 974 error = ENOTTY; 975 } 976 977 /* fallback to the regular IOCTL handler, if any */ 978 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 979 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 980 cmd, (u_long)data)); 981 } 982 } else 983 #endif 984 { 985 if (fop->unlocked_ioctl != NULL) { 986 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 987 cmd, (u_long)data)); 988 } else { 989 error = ENOTTY; 990 } 991 } 992 if (size > 0) { 993 task->bsd_ioctl_data = NULL; 994 task->bsd_ioctl_len = 0; 995 } 996 997 if (error == EWOULDBLOCK) { 998 /* update kqfilter status, if any */ 999 linux_file_kqfilter_poll(filp, 1000 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1001 } else { 1002 error = linux_get_error(task, error); 1003 } 1004 return (error); 1005 } 1006 1007 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1008 1009 /* 1010 * This function atomically updates the poll wakeup state and returns 1011 * the previous state at the time of update. 1012 */ 1013 static uint8_t 1014 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1015 { 1016 int c, old; 1017 1018 c = v->counter; 1019 1020 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1021 c = old; 1022 1023 return (c); 1024 } 1025 1026 static int 1027 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1028 { 1029 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1030 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1031 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1032 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1033 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1034 }; 1035 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1036 1037 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1038 case LINUX_FWQ_STATE_QUEUED: 1039 linux_poll_wakeup(filp); 1040 return (1); 1041 default: 1042 return (0); 1043 } 1044 } 1045 1046 void 1047 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1048 { 1049 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1050 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1051 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1052 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1053 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1054 }; 1055 1056 /* check if we are called inside the select system call */ 1057 if (p == LINUX_POLL_TABLE_NORMAL) 1058 selrecord(curthread, &filp->f_selinfo); 1059 1060 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1061 case LINUX_FWQ_STATE_INIT: 1062 /* NOTE: file handles can only belong to one wait-queue */ 1063 filp->f_wait_queue.wqh = wqh; 1064 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1065 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1066 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1067 break; 1068 default: 1069 break; 1070 } 1071 } 1072 1073 static void 1074 linux_poll_wait_dequeue(struct linux_file *filp) 1075 { 1076 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1077 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1078 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1079 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1080 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1081 }; 1082 1083 seldrain(&filp->f_selinfo); 1084 1085 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1086 case LINUX_FWQ_STATE_NOT_READY: 1087 case LINUX_FWQ_STATE_QUEUED: 1088 case LINUX_FWQ_STATE_READY: 1089 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1090 break; 1091 default: 1092 break; 1093 } 1094 } 1095 1096 void 1097 linux_poll_wakeup(struct linux_file *filp) 1098 { 1099 /* this function should be NULL-safe */ 1100 if (filp == NULL) 1101 return; 1102 1103 selwakeup(&filp->f_selinfo); 1104 1105 spin_lock(&filp->f_kqlock); 1106 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1107 LINUX_KQ_FLAG_NEED_WRITE; 1108 1109 /* make sure the "knote" gets woken up */ 1110 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1111 spin_unlock(&filp->f_kqlock); 1112 } 1113 1114 static void 1115 linux_file_kqfilter_detach(struct knote *kn) 1116 { 1117 struct linux_file *filp = kn->kn_hook; 1118 1119 spin_lock(&filp->f_kqlock); 1120 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1121 spin_unlock(&filp->f_kqlock); 1122 } 1123 1124 static int 1125 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1126 { 1127 struct linux_file *filp = kn->kn_hook; 1128 1129 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1130 1131 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1132 } 1133 1134 static int 1135 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1136 { 1137 struct linux_file *filp = kn->kn_hook; 1138 1139 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1140 1141 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1142 } 1143 1144 static struct filterops linux_dev_kqfiltops_read = { 1145 .f_isfd = 1, 1146 .f_detach = linux_file_kqfilter_detach, 1147 .f_event = linux_file_kqfilter_read_event, 1148 }; 1149 1150 static struct filterops linux_dev_kqfiltops_write = { 1151 .f_isfd = 1, 1152 .f_detach = linux_file_kqfilter_detach, 1153 .f_event = linux_file_kqfilter_write_event, 1154 }; 1155 1156 static void 1157 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1158 { 1159 struct thread *td; 1160 const struct file_operations *fop; 1161 struct linux_cdev *ldev; 1162 int temp; 1163 1164 if ((filp->f_kqflags & kqflags) == 0) 1165 return; 1166 1167 td = curthread; 1168 1169 linux_get_fop(filp, &fop, &ldev); 1170 /* get the latest polling state */ 1171 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1172 linux_drop_fop(ldev); 1173 1174 spin_lock(&filp->f_kqlock); 1175 /* clear kqflags */ 1176 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1177 LINUX_KQ_FLAG_NEED_WRITE); 1178 /* update kqflags */ 1179 if ((temp & (POLLIN | POLLOUT)) != 0) { 1180 if ((temp & POLLIN) != 0) 1181 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1182 if ((temp & POLLOUT) != 0) 1183 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1184 1185 /* make sure the "knote" gets woken up */ 1186 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1187 } 1188 spin_unlock(&filp->f_kqlock); 1189 } 1190 1191 static int 1192 linux_file_kqfilter(struct file *file, struct knote *kn) 1193 { 1194 struct linux_file *filp; 1195 struct thread *td; 1196 int error; 1197 1198 td = curthread; 1199 filp = (struct linux_file *)file->f_data; 1200 filp->f_flags = file->f_flag; 1201 if (filp->f_op->poll == NULL) 1202 return (EINVAL); 1203 1204 spin_lock(&filp->f_kqlock); 1205 switch (kn->kn_filter) { 1206 case EVFILT_READ: 1207 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1208 kn->kn_fop = &linux_dev_kqfiltops_read; 1209 kn->kn_hook = filp; 1210 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1211 error = 0; 1212 break; 1213 case EVFILT_WRITE: 1214 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1215 kn->kn_fop = &linux_dev_kqfiltops_write; 1216 kn->kn_hook = filp; 1217 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1218 error = 0; 1219 break; 1220 default: 1221 error = EINVAL; 1222 break; 1223 } 1224 spin_unlock(&filp->f_kqlock); 1225 1226 if (error == 0) { 1227 linux_set_current(td); 1228 1229 /* update kqfilter status, if any */ 1230 linux_file_kqfilter_poll(filp, 1231 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1232 } 1233 return (error); 1234 } 1235 1236 static int 1237 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1238 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1239 int nprot, bool is_shared, struct thread *td) 1240 { 1241 struct task_struct *task; 1242 struct vm_area_struct *vmap; 1243 struct mm_struct *mm; 1244 struct linux_file *filp; 1245 vm_memattr_t attr; 1246 int error; 1247 1248 filp = (struct linux_file *)fp->f_data; 1249 filp->f_flags = fp->f_flag; 1250 1251 if (fop->mmap == NULL) 1252 return (EOPNOTSUPP); 1253 1254 linux_set_current(td); 1255 1256 /* 1257 * The same VM object might be shared by multiple processes 1258 * and the mm_struct is usually freed when a process exits. 1259 * 1260 * The atomic reference below makes sure the mm_struct is 1261 * available as long as the vmap is in the linux_vma_head. 1262 */ 1263 task = current; 1264 mm = task->mm; 1265 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1266 return (EINVAL); 1267 1268 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1269 vmap->vm_start = 0; 1270 vmap->vm_end = size; 1271 vmap->vm_pgoff = *offset / PAGE_SIZE; 1272 vmap->vm_pfn = 0; 1273 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1274 if (is_shared) 1275 vmap->vm_flags |= VM_SHARED; 1276 vmap->vm_ops = NULL; 1277 vmap->vm_file = get_file(filp); 1278 vmap->vm_mm = mm; 1279 1280 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1281 error = linux_get_error(task, EINTR); 1282 } else { 1283 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1284 error = linux_get_error(task, error); 1285 up_write(&vmap->vm_mm->mmap_sem); 1286 } 1287 1288 if (error != 0) { 1289 linux_cdev_handle_free(vmap); 1290 return (error); 1291 } 1292 1293 attr = pgprot2cachemode(vmap->vm_page_prot); 1294 1295 if (vmap->vm_ops != NULL) { 1296 struct vm_area_struct *ptr; 1297 void *vm_private_data; 1298 bool vm_no_fault; 1299 1300 if (vmap->vm_ops->open == NULL || 1301 vmap->vm_ops->close == NULL || 1302 vmap->vm_private_data == NULL) { 1303 /* free allocated VM area struct */ 1304 linux_cdev_handle_free(vmap); 1305 return (EINVAL); 1306 } 1307 1308 vm_private_data = vmap->vm_private_data; 1309 1310 rw_wlock(&linux_vma_lock); 1311 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1312 if (ptr->vm_private_data == vm_private_data) 1313 break; 1314 } 1315 /* check if there is an existing VM area struct */ 1316 if (ptr != NULL) { 1317 /* check if the VM area structure is invalid */ 1318 if (ptr->vm_ops == NULL || 1319 ptr->vm_ops->open == NULL || 1320 ptr->vm_ops->close == NULL) { 1321 error = ESTALE; 1322 vm_no_fault = 1; 1323 } else { 1324 error = EEXIST; 1325 vm_no_fault = (ptr->vm_ops->fault == NULL); 1326 } 1327 } else { 1328 /* insert VM area structure into list */ 1329 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1330 error = 0; 1331 vm_no_fault = (vmap->vm_ops->fault == NULL); 1332 } 1333 rw_wunlock(&linux_vma_lock); 1334 1335 if (error != 0) { 1336 /* free allocated VM area struct */ 1337 linux_cdev_handle_free(vmap); 1338 /* check for stale VM area struct */ 1339 if (error != EEXIST) 1340 return (error); 1341 } 1342 1343 /* check if there is no fault handler */ 1344 if (vm_no_fault) { 1345 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1346 &linux_cdev_pager_ops[1], size, nprot, *offset, 1347 td->td_ucred); 1348 } else { 1349 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1350 &linux_cdev_pager_ops[0], size, nprot, *offset, 1351 td->td_ucred); 1352 } 1353 1354 /* check if allocating the VM object failed */ 1355 if (*object == NULL) { 1356 if (error == 0) { 1357 /* remove VM area struct from list */ 1358 linux_cdev_handle_remove(vmap); 1359 /* free allocated VM area struct */ 1360 linux_cdev_handle_free(vmap); 1361 } 1362 return (EINVAL); 1363 } 1364 } else { 1365 struct sglist *sg; 1366 1367 sg = sglist_alloc(1, M_WAITOK); 1368 sglist_append_phys(sg, 1369 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1370 1371 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1372 nprot, 0, td->td_ucred); 1373 1374 linux_cdev_handle_free(vmap); 1375 1376 if (*object == NULL) { 1377 sglist_free(sg); 1378 return (EINVAL); 1379 } 1380 } 1381 1382 if (attr != VM_MEMATTR_DEFAULT) { 1383 VM_OBJECT_WLOCK(*object); 1384 vm_object_set_memattr(*object, attr); 1385 VM_OBJECT_WUNLOCK(*object); 1386 } 1387 *offset = 0; 1388 return (0); 1389 } 1390 1391 struct cdevsw linuxcdevsw = { 1392 .d_version = D_VERSION, 1393 .d_fdopen = linux_dev_fdopen, 1394 .d_name = "lkpidev", 1395 }; 1396 1397 static int 1398 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1399 int flags, struct thread *td) 1400 { 1401 struct linux_file *filp; 1402 const struct file_operations *fop; 1403 struct linux_cdev *ldev; 1404 ssize_t bytes; 1405 int error; 1406 1407 error = 0; 1408 filp = (struct linux_file *)file->f_data; 1409 filp->f_flags = file->f_flag; 1410 /* XXX no support for I/O vectors currently */ 1411 if (uio->uio_iovcnt != 1) 1412 return (EOPNOTSUPP); 1413 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1414 return (EINVAL); 1415 linux_set_current(td); 1416 linux_get_fop(filp, &fop, &ldev); 1417 if (fop->read != NULL) { 1418 bytes = OPW(file, td, fop->read(filp, 1419 uio->uio_iov->iov_base, 1420 uio->uio_iov->iov_len, &uio->uio_offset)); 1421 if (bytes >= 0) { 1422 uio->uio_iov->iov_base = 1423 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1424 uio->uio_iov->iov_len -= bytes; 1425 uio->uio_resid -= bytes; 1426 } else { 1427 error = linux_get_error(current, -bytes); 1428 } 1429 } else 1430 error = ENXIO; 1431 1432 /* update kqfilter status, if any */ 1433 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1434 linux_drop_fop(ldev); 1435 1436 return (error); 1437 } 1438 1439 static int 1440 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1441 int flags, struct thread *td) 1442 { 1443 struct linux_file *filp; 1444 const struct file_operations *fop; 1445 struct linux_cdev *ldev; 1446 ssize_t bytes; 1447 int error; 1448 1449 filp = (struct linux_file *)file->f_data; 1450 filp->f_flags = file->f_flag; 1451 /* XXX no support for I/O vectors currently */ 1452 if (uio->uio_iovcnt != 1) 1453 return (EOPNOTSUPP); 1454 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1455 return (EINVAL); 1456 linux_set_current(td); 1457 linux_get_fop(filp, &fop, &ldev); 1458 if (fop->write != NULL) { 1459 bytes = OPW(file, td, fop->write(filp, 1460 uio->uio_iov->iov_base, 1461 uio->uio_iov->iov_len, &uio->uio_offset)); 1462 if (bytes >= 0) { 1463 uio->uio_iov->iov_base = 1464 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1465 uio->uio_iov->iov_len -= bytes; 1466 uio->uio_resid -= bytes; 1467 error = 0; 1468 } else { 1469 error = linux_get_error(current, -bytes); 1470 } 1471 } else 1472 error = ENXIO; 1473 1474 /* update kqfilter status, if any */ 1475 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1476 1477 linux_drop_fop(ldev); 1478 1479 return (error); 1480 } 1481 1482 static int 1483 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1484 struct thread *td) 1485 { 1486 struct linux_file *filp; 1487 const struct file_operations *fop; 1488 struct linux_cdev *ldev; 1489 int revents; 1490 1491 filp = (struct linux_file *)file->f_data; 1492 filp->f_flags = file->f_flag; 1493 linux_set_current(td); 1494 linux_get_fop(filp, &fop, &ldev); 1495 if (fop->poll != NULL) { 1496 revents = OPW(file, td, fop->poll(filp, 1497 LINUX_POLL_TABLE_NORMAL)) & events; 1498 } else { 1499 revents = 0; 1500 } 1501 linux_drop_fop(ldev); 1502 return (revents); 1503 } 1504 1505 static int 1506 linux_file_close(struct file *file, struct thread *td) 1507 { 1508 struct linux_file *filp; 1509 int (*release)(struct inode *, struct linux_file *); 1510 const struct file_operations *fop; 1511 struct linux_cdev *ldev; 1512 int error; 1513 1514 filp = (struct linux_file *)file->f_data; 1515 1516 KASSERT(file_count(filp) == 0, 1517 ("File refcount(%d) is not zero", file_count(filp))); 1518 1519 if (td == NULL) 1520 td = curthread; 1521 1522 error = 0; 1523 filp->f_flags = file->f_flag; 1524 linux_set_current(td); 1525 linux_poll_wait_dequeue(filp); 1526 linux_get_fop(filp, &fop, &ldev); 1527 /* 1528 * Always use the real release function, if any, to avoid 1529 * leaking device resources: 1530 */ 1531 release = filp->f_op->release; 1532 if (release != NULL) 1533 error = -OPW(file, td, release(filp->f_vnode, filp)); 1534 funsetown(&filp->f_sigio); 1535 if (filp->f_vnode != NULL) 1536 vdrop(filp->f_vnode); 1537 linux_drop_fop(ldev); 1538 ldev = filp->f_cdev; 1539 if (ldev != NULL) 1540 linux_cdev_deref(ldev); 1541 kfree(filp); 1542 1543 return (error); 1544 } 1545 1546 static int 1547 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1548 struct thread *td) 1549 { 1550 struct linux_file *filp; 1551 const struct file_operations *fop; 1552 struct linux_cdev *ldev; 1553 struct fiodgname_arg *fgn; 1554 const char *p; 1555 int error, i; 1556 1557 error = 0; 1558 filp = (struct linux_file *)fp->f_data; 1559 filp->f_flags = fp->f_flag; 1560 linux_get_fop(filp, &fop, &ldev); 1561 1562 linux_set_current(td); 1563 switch (cmd) { 1564 case FIONBIO: 1565 break; 1566 case FIOASYNC: 1567 if (fop->fasync == NULL) 1568 break; 1569 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1570 break; 1571 case FIOSETOWN: 1572 error = fsetown(*(int *)data, &filp->f_sigio); 1573 if (error == 0) { 1574 if (fop->fasync == NULL) 1575 break; 1576 error = -OPW(fp, td, fop->fasync(0, filp, 1577 fp->f_flag & FASYNC)); 1578 } 1579 break; 1580 case FIOGETOWN: 1581 *(int *)data = fgetown(&filp->f_sigio); 1582 break; 1583 case FIODGNAME: 1584 #ifdef COMPAT_FREEBSD32 1585 case FIODGNAME_32: 1586 #endif 1587 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1588 error = ENXIO; 1589 break; 1590 } 1591 fgn = data; 1592 p = devtoname(filp->f_cdev->cdev); 1593 i = strlen(p) + 1; 1594 if (i > fgn->len) { 1595 error = EINVAL; 1596 break; 1597 } 1598 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1599 break; 1600 default: 1601 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1602 break; 1603 } 1604 linux_drop_fop(ldev); 1605 return (error); 1606 } 1607 1608 static int 1609 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1610 vm_prot_t maxprot, int flags, struct file *fp, 1611 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1612 { 1613 /* 1614 * Character devices do not provide private mappings 1615 * of any kind: 1616 */ 1617 if ((maxprot & VM_PROT_WRITE) == 0 && 1618 (prot & VM_PROT_WRITE) != 0) 1619 return (EACCES); 1620 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1621 return (EINVAL); 1622 1623 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1624 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1625 } 1626 1627 static int 1628 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1629 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1630 struct thread *td) 1631 { 1632 struct linux_file *filp; 1633 const struct file_operations *fop; 1634 struct linux_cdev *ldev; 1635 struct mount *mp; 1636 struct vnode *vp; 1637 vm_object_t object; 1638 vm_prot_t maxprot; 1639 int error; 1640 1641 filp = (struct linux_file *)fp->f_data; 1642 1643 vp = filp->f_vnode; 1644 if (vp == NULL) 1645 return (EOPNOTSUPP); 1646 1647 /* 1648 * Ensure that file and memory protections are 1649 * compatible. 1650 */ 1651 mp = vp->v_mount; 1652 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1653 maxprot = VM_PROT_NONE; 1654 if ((prot & VM_PROT_EXECUTE) != 0) 1655 return (EACCES); 1656 } else 1657 maxprot = VM_PROT_EXECUTE; 1658 if ((fp->f_flag & FREAD) != 0) 1659 maxprot |= VM_PROT_READ; 1660 else if ((prot & VM_PROT_READ) != 0) 1661 return (EACCES); 1662 1663 /* 1664 * If we are sharing potential changes via MAP_SHARED and we 1665 * are trying to get write permission although we opened it 1666 * without asking for it, bail out. 1667 * 1668 * Note that most character devices always share mappings. 1669 * 1670 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1671 * requests rather than doing it here. 1672 */ 1673 if ((flags & MAP_SHARED) != 0) { 1674 if ((fp->f_flag & FWRITE) != 0) 1675 maxprot |= VM_PROT_WRITE; 1676 else if ((prot & VM_PROT_WRITE) != 0) 1677 return (EACCES); 1678 } 1679 maxprot &= cap_maxprot; 1680 1681 linux_get_fop(filp, &fop, &ldev); 1682 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1683 &foff, fop, &object); 1684 if (error != 0) 1685 goto out; 1686 1687 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1688 foff, FALSE, td); 1689 if (error != 0) 1690 vm_object_deallocate(object); 1691 out: 1692 linux_drop_fop(ldev); 1693 return (error); 1694 } 1695 1696 static int 1697 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1698 struct thread *td) 1699 { 1700 struct linux_file *filp; 1701 struct vnode *vp; 1702 int error; 1703 1704 filp = (struct linux_file *)fp->f_data; 1705 if (filp->f_vnode == NULL) 1706 return (EOPNOTSUPP); 1707 1708 vp = filp->f_vnode; 1709 1710 vn_lock(vp, LK_SHARED | LK_RETRY); 1711 error = VOP_STAT(vp, sb, td->td_ucred, NOCRED, td); 1712 VOP_UNLOCK(vp); 1713 1714 return (error); 1715 } 1716 1717 static int 1718 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1719 struct filedesc *fdp) 1720 { 1721 struct linux_file *filp; 1722 struct vnode *vp; 1723 int error; 1724 1725 filp = fp->f_data; 1726 vp = filp->f_vnode; 1727 if (vp == NULL) { 1728 error = 0; 1729 kif->kf_type = KF_TYPE_DEV; 1730 } else { 1731 vref(vp); 1732 FILEDESC_SUNLOCK(fdp); 1733 error = vn_fill_kinfo_vnode(vp, kif); 1734 vrele(vp); 1735 kif->kf_type = KF_TYPE_VNODE; 1736 FILEDESC_SLOCK(fdp); 1737 } 1738 return (error); 1739 } 1740 1741 unsigned int 1742 linux_iminor(struct inode *inode) 1743 { 1744 struct linux_cdev *ldev; 1745 1746 if (inode == NULL || inode->v_rdev == NULL || 1747 inode->v_rdev->si_devsw != &linuxcdevsw) 1748 return (-1U); 1749 ldev = inode->v_rdev->si_drv1; 1750 if (ldev == NULL) 1751 return (-1U); 1752 1753 return (minor(ldev->dev)); 1754 } 1755 1756 struct fileops linuxfileops = { 1757 .fo_read = linux_file_read, 1758 .fo_write = linux_file_write, 1759 .fo_truncate = invfo_truncate, 1760 .fo_kqfilter = linux_file_kqfilter, 1761 .fo_stat = linux_file_stat, 1762 .fo_fill_kinfo = linux_file_fill_kinfo, 1763 .fo_poll = linux_file_poll, 1764 .fo_close = linux_file_close, 1765 .fo_ioctl = linux_file_ioctl, 1766 .fo_mmap = linux_file_mmap, 1767 .fo_chmod = invfo_chmod, 1768 .fo_chown = invfo_chown, 1769 .fo_sendfile = invfo_sendfile, 1770 .fo_flags = DFLAG_PASSABLE, 1771 }; 1772 1773 /* 1774 * Hash of vmmap addresses. This is infrequently accessed and does not 1775 * need to be particularly large. This is done because we must store the 1776 * caller's idea of the map size to properly unmap. 1777 */ 1778 struct vmmap { 1779 LIST_ENTRY(vmmap) vm_next; 1780 void *vm_addr; 1781 unsigned long vm_size; 1782 }; 1783 1784 struct vmmaphd { 1785 struct vmmap *lh_first; 1786 }; 1787 #define VMMAP_HASH_SIZE 64 1788 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1789 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1790 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1791 static struct mtx vmmaplock; 1792 1793 static void 1794 vmmap_add(void *addr, unsigned long size) 1795 { 1796 struct vmmap *vmmap; 1797 1798 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1799 mtx_lock(&vmmaplock); 1800 vmmap->vm_size = size; 1801 vmmap->vm_addr = addr; 1802 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1803 mtx_unlock(&vmmaplock); 1804 } 1805 1806 static struct vmmap * 1807 vmmap_remove(void *addr) 1808 { 1809 struct vmmap *vmmap; 1810 1811 mtx_lock(&vmmaplock); 1812 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1813 if (vmmap->vm_addr == addr) 1814 break; 1815 if (vmmap) 1816 LIST_REMOVE(vmmap, vm_next); 1817 mtx_unlock(&vmmaplock); 1818 1819 return (vmmap); 1820 } 1821 1822 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1823 void * 1824 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1825 { 1826 void *addr; 1827 1828 addr = pmap_mapdev_attr(phys_addr, size, attr); 1829 if (addr == NULL) 1830 return (NULL); 1831 vmmap_add(addr, size); 1832 1833 return (addr); 1834 } 1835 #endif 1836 1837 void 1838 iounmap(void *addr) 1839 { 1840 struct vmmap *vmmap; 1841 1842 vmmap = vmmap_remove(addr); 1843 if (vmmap == NULL) 1844 return; 1845 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1846 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1847 #endif 1848 kfree(vmmap); 1849 } 1850 1851 void * 1852 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1853 { 1854 vm_offset_t off; 1855 size_t size; 1856 1857 size = count * PAGE_SIZE; 1858 off = kva_alloc(size); 1859 if (off == 0) 1860 return (NULL); 1861 vmmap_add((void *)off, size); 1862 pmap_qenter(off, pages, count); 1863 1864 return ((void *)off); 1865 } 1866 1867 void 1868 vunmap(void *addr) 1869 { 1870 struct vmmap *vmmap; 1871 1872 vmmap = vmmap_remove(addr); 1873 if (vmmap == NULL) 1874 return; 1875 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1876 kva_free((vm_offset_t)addr, vmmap->vm_size); 1877 kfree(vmmap); 1878 } 1879 1880 static char * 1881 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1882 { 1883 unsigned int len; 1884 char *p; 1885 va_list aq; 1886 1887 va_copy(aq, ap); 1888 len = vsnprintf(NULL, 0, fmt, aq); 1889 va_end(aq); 1890 1891 if (dev != NULL) 1892 p = devm_kmalloc(dev, len + 1, gfp); 1893 else 1894 p = kmalloc(len + 1, gfp); 1895 if (p != NULL) 1896 vsnprintf(p, len + 1, fmt, ap); 1897 1898 return (p); 1899 } 1900 1901 char * 1902 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1903 { 1904 1905 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1906 } 1907 1908 char * 1909 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1910 { 1911 va_list ap; 1912 char *p; 1913 1914 va_start(ap, fmt); 1915 p = devm_kvasprintf(dev, gfp, fmt, ap); 1916 va_end(ap); 1917 1918 return (p); 1919 } 1920 1921 char * 1922 kasprintf(gfp_t gfp, const char *fmt, ...) 1923 { 1924 va_list ap; 1925 char *p; 1926 1927 va_start(ap, fmt); 1928 p = kvasprintf(gfp, fmt, ap); 1929 va_end(ap); 1930 1931 return (p); 1932 } 1933 1934 static void 1935 linux_timer_callback_wrapper(void *context) 1936 { 1937 struct timer_list *timer; 1938 1939 timer = context; 1940 1941 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1942 /* try again later */ 1943 callout_reset(&timer->callout, 1, 1944 &linux_timer_callback_wrapper, timer); 1945 return; 1946 } 1947 1948 timer->function(timer->data); 1949 } 1950 1951 int 1952 mod_timer(struct timer_list *timer, int expires) 1953 { 1954 int ret; 1955 1956 timer->expires = expires; 1957 ret = callout_reset(&timer->callout, 1958 linux_timer_jiffies_until(expires), 1959 &linux_timer_callback_wrapper, timer); 1960 1961 MPASS(ret == 0 || ret == 1); 1962 1963 return (ret == 1); 1964 } 1965 1966 void 1967 add_timer(struct timer_list *timer) 1968 { 1969 1970 callout_reset(&timer->callout, 1971 linux_timer_jiffies_until(timer->expires), 1972 &linux_timer_callback_wrapper, timer); 1973 } 1974 1975 void 1976 add_timer_on(struct timer_list *timer, int cpu) 1977 { 1978 1979 callout_reset_on(&timer->callout, 1980 linux_timer_jiffies_until(timer->expires), 1981 &linux_timer_callback_wrapper, timer, cpu); 1982 } 1983 1984 int 1985 del_timer(struct timer_list *timer) 1986 { 1987 1988 if (callout_stop(&(timer)->callout) == -1) 1989 return (0); 1990 return (1); 1991 } 1992 1993 int 1994 del_timer_sync(struct timer_list *timer) 1995 { 1996 1997 if (callout_drain(&(timer)->callout) == -1) 1998 return (0); 1999 return (1); 2000 } 2001 2002 /* greatest common divisor, Euclid equation */ 2003 static uint64_t 2004 lkpi_gcd_64(uint64_t a, uint64_t b) 2005 { 2006 uint64_t an; 2007 uint64_t bn; 2008 2009 while (b != 0) { 2010 an = b; 2011 bn = a % b; 2012 a = an; 2013 b = bn; 2014 } 2015 return (a); 2016 } 2017 2018 uint64_t lkpi_nsec2hz_rem; 2019 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2020 uint64_t lkpi_nsec2hz_max; 2021 2022 uint64_t lkpi_usec2hz_rem; 2023 uint64_t lkpi_usec2hz_div = 1000000ULL; 2024 uint64_t lkpi_usec2hz_max; 2025 2026 uint64_t lkpi_msec2hz_rem; 2027 uint64_t lkpi_msec2hz_div = 1000ULL; 2028 uint64_t lkpi_msec2hz_max; 2029 2030 static void 2031 linux_timer_init(void *arg) 2032 { 2033 uint64_t gcd; 2034 2035 /* 2036 * Compute an internal HZ value which can divide 2**32 to 2037 * avoid timer rounding problems when the tick value wraps 2038 * around 2**32: 2039 */ 2040 linux_timer_hz_mask = 1; 2041 while (linux_timer_hz_mask < (unsigned long)hz) 2042 linux_timer_hz_mask *= 2; 2043 linux_timer_hz_mask--; 2044 2045 /* compute some internal constants */ 2046 2047 lkpi_nsec2hz_rem = hz; 2048 lkpi_usec2hz_rem = hz; 2049 lkpi_msec2hz_rem = hz; 2050 2051 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2052 lkpi_nsec2hz_rem /= gcd; 2053 lkpi_nsec2hz_div /= gcd; 2054 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2055 2056 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2057 lkpi_usec2hz_rem /= gcd; 2058 lkpi_usec2hz_div /= gcd; 2059 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2060 2061 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2062 lkpi_msec2hz_rem /= gcd; 2063 lkpi_msec2hz_div /= gcd; 2064 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2065 } 2066 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2067 2068 void 2069 linux_complete_common(struct completion *c, int all) 2070 { 2071 int wakeup_swapper; 2072 2073 sleepq_lock(c); 2074 if (all) { 2075 c->done = UINT_MAX; 2076 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2077 } else { 2078 if (c->done != UINT_MAX) 2079 c->done++; 2080 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2081 } 2082 sleepq_release(c); 2083 if (wakeup_swapper) 2084 kick_proc0(); 2085 } 2086 2087 /* 2088 * Indefinite wait for done != 0 with or without signals. 2089 */ 2090 int 2091 linux_wait_for_common(struct completion *c, int flags) 2092 { 2093 struct task_struct *task; 2094 int error; 2095 2096 if (SCHEDULER_STOPPED()) 2097 return (0); 2098 2099 task = current; 2100 2101 if (flags != 0) 2102 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2103 else 2104 flags = SLEEPQ_SLEEP; 2105 error = 0; 2106 for (;;) { 2107 sleepq_lock(c); 2108 if (c->done) 2109 break; 2110 sleepq_add(c, NULL, "completion", flags, 0); 2111 if (flags & SLEEPQ_INTERRUPTIBLE) { 2112 DROP_GIANT(); 2113 error = -sleepq_wait_sig(c, 0); 2114 PICKUP_GIANT(); 2115 if (error != 0) { 2116 linux_schedule_save_interrupt_value(task, error); 2117 error = -ERESTARTSYS; 2118 goto intr; 2119 } 2120 } else { 2121 DROP_GIANT(); 2122 sleepq_wait(c, 0); 2123 PICKUP_GIANT(); 2124 } 2125 } 2126 if (c->done != UINT_MAX) 2127 c->done--; 2128 sleepq_release(c); 2129 2130 intr: 2131 return (error); 2132 } 2133 2134 /* 2135 * Time limited wait for done != 0 with or without signals. 2136 */ 2137 int 2138 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2139 { 2140 struct task_struct *task; 2141 int end = jiffies + timeout; 2142 int error; 2143 2144 if (SCHEDULER_STOPPED()) 2145 return (0); 2146 2147 task = current; 2148 2149 if (flags != 0) 2150 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2151 else 2152 flags = SLEEPQ_SLEEP; 2153 2154 for (;;) { 2155 sleepq_lock(c); 2156 if (c->done) 2157 break; 2158 sleepq_add(c, NULL, "completion", flags, 0); 2159 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2160 2161 DROP_GIANT(); 2162 if (flags & SLEEPQ_INTERRUPTIBLE) 2163 error = -sleepq_timedwait_sig(c, 0); 2164 else 2165 error = -sleepq_timedwait(c, 0); 2166 PICKUP_GIANT(); 2167 2168 if (error != 0) { 2169 /* check for timeout */ 2170 if (error == -EWOULDBLOCK) { 2171 error = 0; /* timeout */ 2172 } else { 2173 /* signal happened */ 2174 linux_schedule_save_interrupt_value(task, error); 2175 error = -ERESTARTSYS; 2176 } 2177 goto done; 2178 } 2179 } 2180 if (c->done != UINT_MAX) 2181 c->done--; 2182 sleepq_release(c); 2183 2184 /* return how many jiffies are left */ 2185 error = linux_timer_jiffies_until(end); 2186 done: 2187 return (error); 2188 } 2189 2190 int 2191 linux_try_wait_for_completion(struct completion *c) 2192 { 2193 int isdone; 2194 2195 sleepq_lock(c); 2196 isdone = (c->done != 0); 2197 if (c->done != 0 && c->done != UINT_MAX) 2198 c->done--; 2199 sleepq_release(c); 2200 return (isdone); 2201 } 2202 2203 int 2204 linux_completion_done(struct completion *c) 2205 { 2206 int isdone; 2207 2208 sleepq_lock(c); 2209 isdone = (c->done != 0); 2210 sleepq_release(c); 2211 return (isdone); 2212 } 2213 2214 static void 2215 linux_cdev_deref(struct linux_cdev *ldev) 2216 { 2217 if (refcount_release(&ldev->refs) && 2218 ldev->kobj.ktype == &linux_cdev_ktype) 2219 kfree(ldev); 2220 } 2221 2222 static void 2223 linux_cdev_release(struct kobject *kobj) 2224 { 2225 struct linux_cdev *cdev; 2226 struct kobject *parent; 2227 2228 cdev = container_of(kobj, struct linux_cdev, kobj); 2229 parent = kobj->parent; 2230 linux_destroy_dev(cdev); 2231 linux_cdev_deref(cdev); 2232 kobject_put(parent); 2233 } 2234 2235 static void 2236 linux_cdev_static_release(struct kobject *kobj) 2237 { 2238 struct cdev *cdev; 2239 struct linux_cdev *ldev; 2240 2241 ldev = container_of(kobj, struct linux_cdev, kobj); 2242 cdev = ldev->cdev; 2243 if (cdev != NULL) { 2244 destroy_dev(cdev); 2245 ldev->cdev = NULL; 2246 } 2247 kobject_put(kobj->parent); 2248 } 2249 2250 int 2251 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2252 { 2253 int ret; 2254 2255 if (dev->devt != 0) { 2256 /* Set parent kernel object. */ 2257 ldev->kobj.parent = &dev->kobj; 2258 2259 /* 2260 * Unlike Linux we require the kobject of the 2261 * character device structure to have a valid name 2262 * before calling this function: 2263 */ 2264 if (ldev->kobj.name == NULL) 2265 return (-EINVAL); 2266 2267 ret = cdev_add(ldev, dev->devt, 1); 2268 if (ret) 2269 return (ret); 2270 } 2271 ret = device_add(dev); 2272 if (ret != 0 && dev->devt != 0) 2273 cdev_del(ldev); 2274 return (ret); 2275 } 2276 2277 void 2278 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2279 { 2280 device_del(dev); 2281 2282 if (dev->devt != 0) 2283 cdev_del(ldev); 2284 } 2285 2286 static void 2287 linux_destroy_dev(struct linux_cdev *ldev) 2288 { 2289 2290 if (ldev->cdev == NULL) 2291 return; 2292 2293 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2294 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2295 2296 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2297 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2298 pause("ldevdtr", hz / 4); 2299 2300 destroy_dev(ldev->cdev); 2301 ldev->cdev = NULL; 2302 } 2303 2304 const struct kobj_type linux_cdev_ktype = { 2305 .release = linux_cdev_release, 2306 }; 2307 2308 const struct kobj_type linux_cdev_static_ktype = { 2309 .release = linux_cdev_static_release, 2310 }; 2311 2312 static void 2313 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2314 { 2315 struct notifier_block *nb; 2316 struct netdev_notifier_info ni; 2317 2318 nb = arg; 2319 ni.ifp = ifp; 2320 ni.dev = (struct net_device *)ifp; 2321 if (linkstate == LINK_STATE_UP) 2322 nb->notifier_call(nb, NETDEV_UP, &ni); 2323 else 2324 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2325 } 2326 2327 static void 2328 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2329 { 2330 struct notifier_block *nb; 2331 struct netdev_notifier_info ni; 2332 2333 nb = arg; 2334 ni.ifp = ifp; 2335 ni.dev = (struct net_device *)ifp; 2336 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2337 } 2338 2339 static void 2340 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2341 { 2342 struct notifier_block *nb; 2343 struct netdev_notifier_info ni; 2344 2345 nb = arg; 2346 ni.ifp = ifp; 2347 ni.dev = (struct net_device *)ifp; 2348 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2349 } 2350 2351 static void 2352 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2353 { 2354 struct notifier_block *nb; 2355 struct netdev_notifier_info ni; 2356 2357 nb = arg; 2358 ni.ifp = ifp; 2359 ni.dev = (struct net_device *)ifp; 2360 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2361 } 2362 2363 static void 2364 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2365 { 2366 struct notifier_block *nb; 2367 struct netdev_notifier_info ni; 2368 2369 nb = arg; 2370 ni.ifp = ifp; 2371 ni.dev = (struct net_device *)ifp; 2372 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2373 } 2374 2375 int 2376 register_netdevice_notifier(struct notifier_block *nb) 2377 { 2378 2379 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2380 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2381 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2382 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2383 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2384 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2385 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2386 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2387 2388 return (0); 2389 } 2390 2391 int 2392 register_inetaddr_notifier(struct notifier_block *nb) 2393 { 2394 2395 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2396 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2397 return (0); 2398 } 2399 2400 int 2401 unregister_netdevice_notifier(struct notifier_block *nb) 2402 { 2403 2404 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2405 nb->tags[NETDEV_UP]); 2406 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2407 nb->tags[NETDEV_REGISTER]); 2408 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2409 nb->tags[NETDEV_UNREGISTER]); 2410 EVENTHANDLER_DEREGISTER(iflladdr_event, 2411 nb->tags[NETDEV_CHANGEADDR]); 2412 2413 return (0); 2414 } 2415 2416 int 2417 unregister_inetaddr_notifier(struct notifier_block *nb) 2418 { 2419 2420 EVENTHANDLER_DEREGISTER(ifaddr_event, 2421 nb->tags[NETDEV_CHANGEIFADDR]); 2422 2423 return (0); 2424 } 2425 2426 struct list_sort_thunk { 2427 int (*cmp)(void *, struct list_head *, struct list_head *); 2428 void *priv; 2429 }; 2430 2431 static inline int 2432 linux_le_cmp(void *priv, const void *d1, const void *d2) 2433 { 2434 struct list_head *le1, *le2; 2435 struct list_sort_thunk *thunk; 2436 2437 thunk = priv; 2438 le1 = *(__DECONST(struct list_head **, d1)); 2439 le2 = *(__DECONST(struct list_head **, d2)); 2440 return ((thunk->cmp)(thunk->priv, le1, le2)); 2441 } 2442 2443 void 2444 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2445 struct list_head *a, struct list_head *b)) 2446 { 2447 struct list_sort_thunk thunk; 2448 struct list_head **ar, *le; 2449 size_t count, i; 2450 2451 count = 0; 2452 list_for_each(le, head) 2453 count++; 2454 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2455 i = 0; 2456 list_for_each(le, head) 2457 ar[i++] = le; 2458 thunk.cmp = cmp; 2459 thunk.priv = priv; 2460 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2461 INIT_LIST_HEAD(head); 2462 for (i = 0; i < count; i++) 2463 list_add_tail(ar[i], head); 2464 free(ar, M_KMALLOC); 2465 } 2466 2467 void 2468 lkpi_irq_release(struct device *dev, struct irq_ent *irqe) 2469 { 2470 2471 if (irqe->tag != NULL) 2472 bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag); 2473 if (irqe->res != NULL) 2474 bus_release_resource(dev->bsddev, SYS_RES_IRQ, 2475 rman_get_rid(irqe->res), irqe->res); 2476 list_del(&irqe->links); 2477 } 2478 2479 void 2480 lkpi_devm_irq_release(struct device *dev, void *p) 2481 { 2482 struct irq_ent *irqe; 2483 2484 if (dev == NULL || p == NULL) 2485 return; 2486 2487 irqe = p; 2488 lkpi_irq_release(dev, irqe); 2489 } 2490 2491 void 2492 linux_irq_handler(void *ent) 2493 { 2494 struct irq_ent *irqe; 2495 2496 if (linux_set_current_flags(curthread, M_NOWAIT)) 2497 return; 2498 2499 irqe = ent; 2500 if (irqe->handler(irqe->irq, irqe->arg) == IRQ_WAKE_THREAD && 2501 irqe->thread_handler != NULL) { 2502 THREAD_SLEEPING_OK(); 2503 irqe->thread_handler(irqe->irq, irqe->arg); 2504 THREAD_NO_SLEEPING(); 2505 } 2506 } 2507 2508 #if defined(__i386__) || defined(__amd64__) 2509 int 2510 linux_wbinvd_on_all_cpus(void) 2511 { 2512 2513 pmap_invalidate_cache(); 2514 return (0); 2515 } 2516 #endif 2517 2518 int 2519 linux_on_each_cpu(void callback(void *), void *data) 2520 { 2521 2522 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2523 smp_no_rendezvous_barrier, data); 2524 return (0); 2525 } 2526 2527 int 2528 linux_in_atomic(void) 2529 { 2530 2531 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2532 } 2533 2534 struct linux_cdev * 2535 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2536 { 2537 dev_t dev = MKDEV(major, minor); 2538 struct cdev *cdev; 2539 2540 dev_lock(); 2541 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2542 struct linux_cdev *ldev = cdev->si_drv1; 2543 if (ldev->dev == dev && 2544 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2545 break; 2546 } 2547 } 2548 dev_unlock(); 2549 2550 return (cdev != NULL ? cdev->si_drv1 : NULL); 2551 } 2552 2553 int 2554 __register_chrdev(unsigned int major, unsigned int baseminor, 2555 unsigned int count, const char *name, 2556 const struct file_operations *fops) 2557 { 2558 struct linux_cdev *cdev; 2559 int ret = 0; 2560 int i; 2561 2562 for (i = baseminor; i < baseminor + count; i++) { 2563 cdev = cdev_alloc(); 2564 cdev->ops = fops; 2565 kobject_set_name(&cdev->kobj, name); 2566 2567 ret = cdev_add(cdev, makedev(major, i), 1); 2568 if (ret != 0) 2569 break; 2570 } 2571 return (ret); 2572 } 2573 2574 int 2575 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2576 unsigned int count, const char *name, 2577 const struct file_operations *fops, uid_t uid, 2578 gid_t gid, int mode) 2579 { 2580 struct linux_cdev *cdev; 2581 int ret = 0; 2582 int i; 2583 2584 for (i = baseminor; i < baseminor + count; i++) { 2585 cdev = cdev_alloc(); 2586 cdev->ops = fops; 2587 kobject_set_name(&cdev->kobj, name); 2588 2589 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2590 if (ret != 0) 2591 break; 2592 } 2593 return (ret); 2594 } 2595 2596 void 2597 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2598 unsigned int count, const char *name) 2599 { 2600 struct linux_cdev *cdevp; 2601 int i; 2602 2603 for (i = baseminor; i < baseminor + count; i++) { 2604 cdevp = linux_find_cdev(name, major, i); 2605 if (cdevp != NULL) 2606 cdev_del(cdevp); 2607 } 2608 } 2609 2610 void 2611 linux_dump_stack(void) 2612 { 2613 #ifdef STACK 2614 struct stack st; 2615 2616 stack_zero(&st); 2617 stack_save(&st); 2618 stack_print(&st); 2619 #endif 2620 } 2621 2622 int 2623 linuxkpi_net_ratelimit(void) 2624 { 2625 2626 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2627 lkpi_net_maxpps)); 2628 } 2629 2630 #if defined(__i386__) || defined(__amd64__) 2631 bool linux_cpu_has_clflush; 2632 #endif 2633 2634 static void 2635 linux_compat_init(void *arg) 2636 { 2637 struct sysctl_oid *rootoid; 2638 int i; 2639 2640 #if defined(__i386__) || defined(__amd64__) 2641 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2642 #endif 2643 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2644 2645 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2646 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2647 kobject_init(&linux_class_root, &linux_class_ktype); 2648 kobject_set_name(&linux_class_root, "class"); 2649 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2650 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2651 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2652 kobject_set_name(&linux_root_device.kobj, "device"); 2653 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2654 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2655 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2656 linux_root_device.bsddev = root_bus; 2657 linux_class_misc.name = "misc"; 2658 class_register(&linux_class_misc); 2659 INIT_LIST_HEAD(&pci_drivers); 2660 INIT_LIST_HEAD(&pci_devices); 2661 spin_lock_init(&pci_lock); 2662 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2663 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2664 LIST_INIT(&vmmaphead[i]); 2665 init_waitqueue_head(&linux_bit_waitq); 2666 init_waitqueue_head(&linux_var_waitq); 2667 2668 CPU_COPY(&all_cpus, &cpu_online_mask); 2669 } 2670 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2671 2672 static void 2673 linux_compat_uninit(void *arg) 2674 { 2675 linux_kobject_kfree_name(&linux_class_root); 2676 linux_kobject_kfree_name(&linux_root_device.kobj); 2677 linux_kobject_kfree_name(&linux_class_misc.kobj); 2678 2679 mtx_destroy(&vmmaplock); 2680 spin_lock_destroy(&pci_lock); 2681 rw_destroy(&linux_vma_lock); 2682 } 2683 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2684 2685 /* 2686 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2687 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2688 * used. Assert these types have the same size, else some parts of the 2689 * LinuxKPI may not work like expected: 2690 */ 2691 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2692