1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/time.h> 55 #include <sys/user.h> 56 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 63 #include <machine/stdarg.h> 64 65 #if defined(__i386__) || defined(__amd64__) 66 #include <machine/md_var.h> 67 #endif 68 69 #include <linux/kobject.h> 70 #include <linux/cpu.h> 71 #include <linux/device.h> 72 #include <linux/slab.h> 73 #include <linux/module.h> 74 #include <linux/moduleparam.h> 75 #include <linux/cdev.h> 76 #include <linux/file.h> 77 #include <linux/sysfs.h> 78 #include <linux/mm.h> 79 #include <linux/io.h> 80 #include <linux/vmalloc.h> 81 #include <linux/netdevice.h> 82 #include <linux/timer.h> 83 #include <linux/interrupt.h> 84 #include <linux/uaccess.h> 85 #include <linux/list.h> 86 #include <linux/kthread.h> 87 #include <linux/kernel.h> 88 #include <linux/compat.h> 89 #include <linux/poll.h> 90 #include <linux/smp.h> 91 #include <linux/wait_bit.h> 92 93 #if defined(__i386__) || defined(__amd64__) 94 #include <asm/smp.h> 95 #endif 96 97 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 98 "LinuxKPI parameters"); 99 100 int linuxkpi_debug; 101 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 102 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 103 104 static struct timeval lkpi_net_lastlog; 105 static int lkpi_net_curpps; 106 static int lkpi_net_maxpps = 99; 107 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 108 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 109 110 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 111 112 #include <linux/rbtree.h> 113 /* Undo Linux compat changes. */ 114 #undef RB_ROOT 115 #undef file 116 #undef cdev 117 #define RB_ROOT(head) (head)->rbh_root 118 119 static void linux_destroy_dev(struct linux_cdev *); 120 static void linux_cdev_deref(struct linux_cdev *ldev); 121 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 122 123 cpumask_t cpu_online_mask; 124 struct kobject linux_class_root; 125 struct device linux_root_device; 126 struct class linux_class_misc; 127 struct list_head pci_drivers; 128 struct list_head pci_devices; 129 spinlock_t pci_lock; 130 131 unsigned long linux_timer_hz_mask; 132 133 wait_queue_head_t linux_bit_waitq; 134 wait_queue_head_t linux_var_waitq; 135 136 int 137 panic_cmp(struct rb_node *one, struct rb_node *two) 138 { 139 panic("no cmp"); 140 } 141 142 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 143 144 int 145 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 146 { 147 va_list tmp_va; 148 int len; 149 char *old; 150 char *name; 151 char dummy; 152 153 old = kobj->name; 154 155 if (old && fmt == NULL) 156 return (0); 157 158 /* compute length of string */ 159 va_copy(tmp_va, args); 160 len = vsnprintf(&dummy, 0, fmt, tmp_va); 161 va_end(tmp_va); 162 163 /* account for zero termination */ 164 len++; 165 166 /* check for error */ 167 if (len < 1) 168 return (-EINVAL); 169 170 /* allocate memory for string */ 171 name = kzalloc(len, GFP_KERNEL); 172 if (name == NULL) 173 return (-ENOMEM); 174 vsnprintf(name, len, fmt, args); 175 kobj->name = name; 176 177 /* free old string */ 178 kfree(old); 179 180 /* filter new string */ 181 for (; *name != '\0'; name++) 182 if (*name == '/') 183 *name = '!'; 184 return (0); 185 } 186 187 int 188 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 189 { 190 va_list args; 191 int error; 192 193 va_start(args, fmt); 194 error = kobject_set_name_vargs(kobj, fmt, args); 195 va_end(args); 196 197 return (error); 198 } 199 200 static int 201 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 202 { 203 const struct kobj_type *t; 204 int error; 205 206 kobj->parent = parent; 207 error = sysfs_create_dir(kobj); 208 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 209 struct attribute **attr; 210 t = kobj->ktype; 211 212 for (attr = t->default_attrs; *attr != NULL; attr++) { 213 error = sysfs_create_file(kobj, *attr); 214 if (error) 215 break; 216 } 217 if (error) 218 sysfs_remove_dir(kobj); 219 } 220 return (error); 221 } 222 223 int 224 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 225 { 226 va_list args; 227 int error; 228 229 va_start(args, fmt); 230 error = kobject_set_name_vargs(kobj, fmt, args); 231 va_end(args); 232 if (error) 233 return (error); 234 235 return kobject_add_complete(kobj, parent); 236 } 237 238 void 239 linux_kobject_release(struct kref *kref) 240 { 241 struct kobject *kobj; 242 char *name; 243 244 kobj = container_of(kref, struct kobject, kref); 245 sysfs_remove_dir(kobj); 246 name = kobj->name; 247 if (kobj->ktype && kobj->ktype->release) 248 kobj->ktype->release(kobj); 249 kfree(name); 250 } 251 252 static void 253 linux_kobject_kfree(struct kobject *kobj) 254 { 255 kfree(kobj); 256 } 257 258 static void 259 linux_kobject_kfree_name(struct kobject *kobj) 260 { 261 if (kobj) { 262 kfree(kobj->name); 263 } 264 } 265 266 const struct kobj_type linux_kfree_type = { 267 .release = linux_kobject_kfree 268 }; 269 270 static void 271 linux_device_release(struct device *dev) 272 { 273 pr_debug("linux_device_release: %s\n", dev_name(dev)); 274 kfree(dev); 275 } 276 277 static ssize_t 278 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 279 { 280 struct class_attribute *dattr; 281 ssize_t error; 282 283 dattr = container_of(attr, struct class_attribute, attr); 284 error = -EIO; 285 if (dattr->show) 286 error = dattr->show(container_of(kobj, struct class, kobj), 287 dattr, buf); 288 return (error); 289 } 290 291 static ssize_t 292 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 293 size_t count) 294 { 295 struct class_attribute *dattr; 296 ssize_t error; 297 298 dattr = container_of(attr, struct class_attribute, attr); 299 error = -EIO; 300 if (dattr->store) 301 error = dattr->store(container_of(kobj, struct class, kobj), 302 dattr, buf, count); 303 return (error); 304 } 305 306 static void 307 linux_class_release(struct kobject *kobj) 308 { 309 struct class *class; 310 311 class = container_of(kobj, struct class, kobj); 312 if (class->class_release) 313 class->class_release(class); 314 } 315 316 static const struct sysfs_ops linux_class_sysfs = { 317 .show = linux_class_show, 318 .store = linux_class_store, 319 }; 320 321 const struct kobj_type linux_class_ktype = { 322 .release = linux_class_release, 323 .sysfs_ops = &linux_class_sysfs 324 }; 325 326 static void 327 linux_dev_release(struct kobject *kobj) 328 { 329 struct device *dev; 330 331 dev = container_of(kobj, struct device, kobj); 332 /* This is the precedence defined by linux. */ 333 if (dev->release) 334 dev->release(dev); 335 else if (dev->class && dev->class->dev_release) 336 dev->class->dev_release(dev); 337 } 338 339 static ssize_t 340 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 341 { 342 struct device_attribute *dattr; 343 ssize_t error; 344 345 dattr = container_of(attr, struct device_attribute, attr); 346 error = -EIO; 347 if (dattr->show) 348 error = dattr->show(container_of(kobj, struct device, kobj), 349 dattr, buf); 350 return (error); 351 } 352 353 static ssize_t 354 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 355 size_t count) 356 { 357 struct device_attribute *dattr; 358 ssize_t error; 359 360 dattr = container_of(attr, struct device_attribute, attr); 361 error = -EIO; 362 if (dattr->store) 363 error = dattr->store(container_of(kobj, struct device, kobj), 364 dattr, buf, count); 365 return (error); 366 } 367 368 static const struct sysfs_ops linux_dev_sysfs = { 369 .show = linux_dev_show, 370 .store = linux_dev_store, 371 }; 372 373 const struct kobj_type linux_dev_ktype = { 374 .release = linux_dev_release, 375 .sysfs_ops = &linux_dev_sysfs 376 }; 377 378 struct device * 379 device_create(struct class *class, struct device *parent, dev_t devt, 380 void *drvdata, const char *fmt, ...) 381 { 382 struct device *dev; 383 va_list args; 384 385 dev = kzalloc(sizeof(*dev), M_WAITOK); 386 dev->parent = parent; 387 dev->class = class; 388 dev->devt = devt; 389 dev->driver_data = drvdata; 390 dev->release = linux_device_release; 391 va_start(args, fmt); 392 kobject_set_name_vargs(&dev->kobj, fmt, args); 393 va_end(args); 394 device_register(dev); 395 396 return (dev); 397 } 398 399 int 400 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 401 struct kobject *parent, const char *fmt, ...) 402 { 403 va_list args; 404 int error; 405 406 kobject_init(kobj, ktype); 407 kobj->ktype = ktype; 408 kobj->parent = parent; 409 kobj->name = NULL; 410 411 va_start(args, fmt); 412 error = kobject_set_name_vargs(kobj, fmt, args); 413 va_end(args); 414 if (error) 415 return (error); 416 return kobject_add_complete(kobj, parent); 417 } 418 419 static void 420 linux_kq_lock(void *arg) 421 { 422 spinlock_t *s = arg; 423 424 spin_lock(s); 425 } 426 static void 427 linux_kq_unlock(void *arg) 428 { 429 spinlock_t *s = arg; 430 431 spin_unlock(s); 432 } 433 434 static void 435 linux_kq_assert_lock(void *arg, int what) 436 { 437 #ifdef INVARIANTS 438 spinlock_t *s = arg; 439 440 if (what == LA_LOCKED) 441 mtx_assert(&s->m, MA_OWNED); 442 else 443 mtx_assert(&s->m, MA_NOTOWNED); 444 #endif 445 } 446 447 static void 448 linux_file_kqfilter_poll(struct linux_file *, int); 449 450 struct linux_file * 451 linux_file_alloc(void) 452 { 453 struct linux_file *filp; 454 455 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 456 457 /* set initial refcount */ 458 filp->f_count = 1; 459 460 /* setup fields needed by kqueue support */ 461 spin_lock_init(&filp->f_kqlock); 462 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 463 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 464 465 return (filp); 466 } 467 468 void 469 linux_file_free(struct linux_file *filp) 470 { 471 if (filp->_file == NULL) { 472 if (filp->f_shmem != NULL) 473 vm_object_deallocate(filp->f_shmem); 474 kfree(filp); 475 } else { 476 /* 477 * The close method of the character device or file 478 * will free the linux_file structure: 479 */ 480 _fdrop(filp->_file, curthread); 481 } 482 } 483 484 static int 485 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 486 vm_page_t *mres) 487 { 488 struct vm_area_struct *vmap; 489 490 vmap = linux_cdev_handle_find(vm_obj->handle); 491 492 MPASS(vmap != NULL); 493 MPASS(vmap->vm_private_data == vm_obj->handle); 494 495 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 496 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 497 vm_page_t page; 498 499 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 500 /* 501 * If the passed in result page is a fake 502 * page, update it with the new physical 503 * address. 504 */ 505 page = *mres; 506 vm_page_updatefake(page, paddr, vm_obj->memattr); 507 } else { 508 /* 509 * Replace the passed in "mres" page with our 510 * own fake page and free up the all of the 511 * original pages. 512 */ 513 VM_OBJECT_WUNLOCK(vm_obj); 514 page = vm_page_getfake(paddr, vm_obj->memattr); 515 VM_OBJECT_WLOCK(vm_obj); 516 517 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 518 *mres = page; 519 } 520 vm_page_valid(page); 521 return (VM_PAGER_OK); 522 } 523 return (VM_PAGER_FAIL); 524 } 525 526 static int 527 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 528 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 529 { 530 struct vm_area_struct *vmap; 531 int err; 532 533 /* get VM area structure */ 534 vmap = linux_cdev_handle_find(vm_obj->handle); 535 MPASS(vmap != NULL); 536 MPASS(vmap->vm_private_data == vm_obj->handle); 537 538 VM_OBJECT_WUNLOCK(vm_obj); 539 540 linux_set_current(curthread); 541 542 down_write(&vmap->vm_mm->mmap_sem); 543 if (unlikely(vmap->vm_ops == NULL)) { 544 err = VM_FAULT_SIGBUS; 545 } else { 546 struct vm_fault vmf; 547 548 /* fill out VM fault structure */ 549 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 550 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 551 vmf.pgoff = 0; 552 vmf.page = NULL; 553 vmf.vma = vmap; 554 555 vmap->vm_pfn_count = 0; 556 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 557 vmap->vm_obj = vm_obj; 558 559 err = vmap->vm_ops->fault(vmap, &vmf); 560 561 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 562 kern_yield(PRI_USER); 563 err = vmap->vm_ops->fault(vmap, &vmf); 564 } 565 } 566 567 /* translate return code */ 568 switch (err) { 569 case VM_FAULT_OOM: 570 err = VM_PAGER_AGAIN; 571 break; 572 case VM_FAULT_SIGBUS: 573 err = VM_PAGER_BAD; 574 break; 575 case VM_FAULT_NOPAGE: 576 /* 577 * By contract the fault handler will return having 578 * busied all the pages itself. If pidx is already 579 * found in the object, it will simply xbusy the first 580 * page and return with vm_pfn_count set to 1. 581 */ 582 *first = vmap->vm_pfn_first; 583 *last = *first + vmap->vm_pfn_count - 1; 584 err = VM_PAGER_OK; 585 break; 586 default: 587 err = VM_PAGER_ERROR; 588 break; 589 } 590 up_write(&vmap->vm_mm->mmap_sem); 591 VM_OBJECT_WLOCK(vm_obj); 592 return (err); 593 } 594 595 static struct rwlock linux_vma_lock; 596 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 597 TAILQ_HEAD_INITIALIZER(linux_vma_head); 598 599 static void 600 linux_cdev_handle_free(struct vm_area_struct *vmap) 601 { 602 /* Drop reference on vm_file */ 603 if (vmap->vm_file != NULL) 604 fput(vmap->vm_file); 605 606 /* Drop reference on mm_struct */ 607 mmput(vmap->vm_mm); 608 609 kfree(vmap); 610 } 611 612 static void 613 linux_cdev_handle_remove(struct vm_area_struct *vmap) 614 { 615 rw_wlock(&linux_vma_lock); 616 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 617 rw_wunlock(&linux_vma_lock); 618 } 619 620 static struct vm_area_struct * 621 linux_cdev_handle_find(void *handle) 622 { 623 struct vm_area_struct *vmap; 624 625 rw_rlock(&linux_vma_lock); 626 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 627 if (vmap->vm_private_data == handle) 628 break; 629 } 630 rw_runlock(&linux_vma_lock); 631 return (vmap); 632 } 633 634 static int 635 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 636 vm_ooffset_t foff, struct ucred *cred, u_short *color) 637 { 638 639 MPASS(linux_cdev_handle_find(handle) != NULL); 640 *color = 0; 641 return (0); 642 } 643 644 static void 645 linux_cdev_pager_dtor(void *handle) 646 { 647 const struct vm_operations_struct *vm_ops; 648 struct vm_area_struct *vmap; 649 650 vmap = linux_cdev_handle_find(handle); 651 MPASS(vmap != NULL); 652 653 /* 654 * Remove handle before calling close operation to prevent 655 * other threads from reusing the handle pointer. 656 */ 657 linux_cdev_handle_remove(vmap); 658 659 down_write(&vmap->vm_mm->mmap_sem); 660 vm_ops = vmap->vm_ops; 661 if (likely(vm_ops != NULL)) 662 vm_ops->close(vmap); 663 up_write(&vmap->vm_mm->mmap_sem); 664 665 linux_cdev_handle_free(vmap); 666 } 667 668 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 669 { 670 /* OBJT_MGTDEVICE */ 671 .cdev_pg_populate = linux_cdev_pager_populate, 672 .cdev_pg_ctor = linux_cdev_pager_ctor, 673 .cdev_pg_dtor = linux_cdev_pager_dtor 674 }, 675 { 676 /* OBJT_DEVICE */ 677 .cdev_pg_fault = linux_cdev_pager_fault, 678 .cdev_pg_ctor = linux_cdev_pager_ctor, 679 .cdev_pg_dtor = linux_cdev_pager_dtor 680 }, 681 }; 682 683 int 684 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 685 unsigned long size) 686 { 687 vm_object_t obj; 688 vm_page_t m; 689 690 obj = vma->vm_obj; 691 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 692 return (-ENOTSUP); 693 VM_OBJECT_RLOCK(obj); 694 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 695 m != NULL && m->pindex < OFF_TO_IDX(address + size); 696 m = TAILQ_NEXT(m, listq)) 697 pmap_remove_all(m); 698 VM_OBJECT_RUNLOCK(obj); 699 return (0); 700 } 701 702 static struct file_operations dummy_ldev_ops = { 703 /* XXXKIB */ 704 }; 705 706 static struct linux_cdev dummy_ldev = { 707 .ops = &dummy_ldev_ops, 708 }; 709 710 #define LDEV_SI_DTR 0x0001 711 #define LDEV_SI_REF 0x0002 712 713 static void 714 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 715 struct linux_cdev **dev) 716 { 717 struct linux_cdev *ldev; 718 u_int siref; 719 720 ldev = filp->f_cdev; 721 *fop = filp->f_op; 722 if (ldev != NULL) { 723 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 724 refcount_acquire(&ldev->refs); 725 } else { 726 for (siref = ldev->siref;;) { 727 if ((siref & LDEV_SI_DTR) != 0) { 728 ldev = &dummy_ldev; 729 *fop = ldev->ops; 730 siref = ldev->siref; 731 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 732 } else if (atomic_fcmpset_int(&ldev->siref, 733 &siref, siref + LDEV_SI_REF)) { 734 break; 735 } 736 } 737 } 738 } 739 *dev = ldev; 740 } 741 742 static void 743 linux_drop_fop(struct linux_cdev *ldev) 744 { 745 746 if (ldev == NULL) 747 return; 748 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 749 linux_cdev_deref(ldev); 750 } else { 751 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 752 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 753 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 754 } 755 } 756 757 #define OPW(fp,td,code) ({ \ 758 struct file *__fpop; \ 759 __typeof(code) __retval; \ 760 \ 761 __fpop = (td)->td_fpop; \ 762 (td)->td_fpop = (fp); \ 763 __retval = (code); \ 764 (td)->td_fpop = __fpop; \ 765 __retval; \ 766 }) 767 768 static int 769 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 770 struct file *file) 771 { 772 struct linux_cdev *ldev; 773 struct linux_file *filp; 774 const struct file_operations *fop; 775 int error; 776 777 ldev = dev->si_drv1; 778 779 filp = linux_file_alloc(); 780 filp->f_dentry = &filp->f_dentry_store; 781 filp->f_op = ldev->ops; 782 filp->f_mode = file->f_flag; 783 filp->f_flags = file->f_flag; 784 filp->f_vnode = file->f_vnode; 785 filp->_file = file; 786 refcount_acquire(&ldev->refs); 787 filp->f_cdev = ldev; 788 789 linux_set_current(td); 790 linux_get_fop(filp, &fop, &ldev); 791 792 if (fop->open != NULL) { 793 error = -fop->open(file->f_vnode, filp); 794 if (error != 0) { 795 linux_drop_fop(ldev); 796 linux_cdev_deref(filp->f_cdev); 797 kfree(filp); 798 return (error); 799 } 800 } 801 802 /* hold on to the vnode - used for fstat() */ 803 vhold(filp->f_vnode); 804 805 /* release the file from devfs */ 806 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 807 linux_drop_fop(ldev); 808 return (ENXIO); 809 } 810 811 #define LINUX_IOCTL_MIN_PTR 0x10000UL 812 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 813 814 static inline int 815 linux_remap_address(void **uaddr, size_t len) 816 { 817 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 818 819 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 820 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 821 struct task_struct *pts = current; 822 if (pts == NULL) { 823 *uaddr = NULL; 824 return (1); 825 } 826 827 /* compute data offset */ 828 uaddr_val -= LINUX_IOCTL_MIN_PTR; 829 830 /* check that length is within bounds */ 831 if ((len > IOCPARM_MAX) || 832 (uaddr_val + len) > pts->bsd_ioctl_len) { 833 *uaddr = NULL; 834 return (1); 835 } 836 837 /* re-add kernel buffer address */ 838 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 839 840 /* update address location */ 841 *uaddr = (void *)uaddr_val; 842 return (1); 843 } 844 return (0); 845 } 846 847 int 848 linux_copyin(const void *uaddr, void *kaddr, size_t len) 849 { 850 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 851 if (uaddr == NULL) 852 return (-EFAULT); 853 memcpy(kaddr, uaddr, len); 854 return (0); 855 } 856 return (-copyin(uaddr, kaddr, len)); 857 } 858 859 int 860 linux_copyout(const void *kaddr, void *uaddr, size_t len) 861 { 862 if (linux_remap_address(&uaddr, len)) { 863 if (uaddr == NULL) 864 return (-EFAULT); 865 memcpy(uaddr, kaddr, len); 866 return (0); 867 } 868 return (-copyout(kaddr, uaddr, len)); 869 } 870 871 size_t 872 linux_clear_user(void *_uaddr, size_t _len) 873 { 874 uint8_t *uaddr = _uaddr; 875 size_t len = _len; 876 877 /* make sure uaddr is aligned before going into the fast loop */ 878 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 879 if (subyte(uaddr, 0)) 880 return (_len); 881 uaddr++; 882 len--; 883 } 884 885 /* zero 8 bytes at a time */ 886 while (len > 7) { 887 #ifdef __LP64__ 888 if (suword64(uaddr, 0)) 889 return (_len); 890 #else 891 if (suword32(uaddr, 0)) 892 return (_len); 893 if (suword32(uaddr + 4, 0)) 894 return (_len); 895 #endif 896 uaddr += 8; 897 len -= 8; 898 } 899 900 /* zero fill end, if any */ 901 while (len > 0) { 902 if (subyte(uaddr, 0)) 903 return (_len); 904 uaddr++; 905 len--; 906 } 907 return (0); 908 } 909 910 int 911 linux_access_ok(const void *uaddr, size_t len) 912 { 913 uintptr_t saddr; 914 uintptr_t eaddr; 915 916 /* get start and end address */ 917 saddr = (uintptr_t)uaddr; 918 eaddr = (uintptr_t)uaddr + len; 919 920 /* verify addresses are valid for userspace */ 921 return ((saddr == eaddr) || 922 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 923 } 924 925 /* 926 * This function should return either EINTR or ERESTART depending on 927 * the signal type sent to this thread: 928 */ 929 static int 930 linux_get_error(struct task_struct *task, int error) 931 { 932 /* check for signal type interrupt code */ 933 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 934 error = -linux_schedule_get_interrupt_value(task); 935 if (error == 0) 936 error = EINTR; 937 } 938 return (error); 939 } 940 941 static int 942 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 943 const struct file_operations *fop, u_long cmd, caddr_t data, 944 struct thread *td) 945 { 946 struct task_struct *task = current; 947 unsigned size; 948 int error; 949 950 size = IOCPARM_LEN(cmd); 951 /* refer to logic in sys_ioctl() */ 952 if (size > 0) { 953 /* 954 * Setup hint for linux_copyin() and linux_copyout(). 955 * 956 * Background: Linux code expects a user-space address 957 * while FreeBSD supplies a kernel-space address. 958 */ 959 task->bsd_ioctl_data = data; 960 task->bsd_ioctl_len = size; 961 data = (void *)LINUX_IOCTL_MIN_PTR; 962 } else { 963 /* fetch user-space pointer */ 964 data = *(void **)data; 965 } 966 #if defined(__amd64__) 967 if (td->td_proc->p_elf_machine == EM_386) { 968 /* try the compat IOCTL handler first */ 969 if (fop->compat_ioctl != NULL) { 970 error = -OPW(fp, td, fop->compat_ioctl(filp, 971 cmd, (u_long)data)); 972 } else { 973 error = ENOTTY; 974 } 975 976 /* fallback to the regular IOCTL handler, if any */ 977 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 978 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 979 cmd, (u_long)data)); 980 } 981 } else 982 #endif 983 { 984 if (fop->unlocked_ioctl != NULL) { 985 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 986 cmd, (u_long)data)); 987 } else { 988 error = ENOTTY; 989 } 990 } 991 if (size > 0) { 992 task->bsd_ioctl_data = NULL; 993 task->bsd_ioctl_len = 0; 994 } 995 996 if (error == EWOULDBLOCK) { 997 /* update kqfilter status, if any */ 998 linux_file_kqfilter_poll(filp, 999 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1000 } else { 1001 error = linux_get_error(task, error); 1002 } 1003 return (error); 1004 } 1005 1006 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1007 1008 /* 1009 * This function atomically updates the poll wakeup state and returns 1010 * the previous state at the time of update. 1011 */ 1012 static uint8_t 1013 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1014 { 1015 int c, old; 1016 1017 c = v->counter; 1018 1019 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1020 c = old; 1021 1022 return (c); 1023 } 1024 1025 static int 1026 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1027 { 1028 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1029 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1030 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1031 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1032 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1033 }; 1034 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1035 1036 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1037 case LINUX_FWQ_STATE_QUEUED: 1038 linux_poll_wakeup(filp); 1039 return (1); 1040 default: 1041 return (0); 1042 } 1043 } 1044 1045 void 1046 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1047 { 1048 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1049 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1050 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1051 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1052 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1053 }; 1054 1055 /* check if we are called inside the select system call */ 1056 if (p == LINUX_POLL_TABLE_NORMAL) 1057 selrecord(curthread, &filp->f_selinfo); 1058 1059 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1060 case LINUX_FWQ_STATE_INIT: 1061 /* NOTE: file handles can only belong to one wait-queue */ 1062 filp->f_wait_queue.wqh = wqh; 1063 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1064 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1065 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1066 break; 1067 default: 1068 break; 1069 } 1070 } 1071 1072 static void 1073 linux_poll_wait_dequeue(struct linux_file *filp) 1074 { 1075 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1076 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1077 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1078 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1079 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1080 }; 1081 1082 seldrain(&filp->f_selinfo); 1083 1084 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1085 case LINUX_FWQ_STATE_NOT_READY: 1086 case LINUX_FWQ_STATE_QUEUED: 1087 case LINUX_FWQ_STATE_READY: 1088 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1089 break; 1090 default: 1091 break; 1092 } 1093 } 1094 1095 void 1096 linux_poll_wakeup(struct linux_file *filp) 1097 { 1098 /* this function should be NULL-safe */ 1099 if (filp == NULL) 1100 return; 1101 1102 selwakeup(&filp->f_selinfo); 1103 1104 spin_lock(&filp->f_kqlock); 1105 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1106 LINUX_KQ_FLAG_NEED_WRITE; 1107 1108 /* make sure the "knote" gets woken up */ 1109 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1110 spin_unlock(&filp->f_kqlock); 1111 } 1112 1113 static void 1114 linux_file_kqfilter_detach(struct knote *kn) 1115 { 1116 struct linux_file *filp = kn->kn_hook; 1117 1118 spin_lock(&filp->f_kqlock); 1119 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1120 spin_unlock(&filp->f_kqlock); 1121 } 1122 1123 static int 1124 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1125 { 1126 struct linux_file *filp = kn->kn_hook; 1127 1128 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1129 1130 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1131 } 1132 1133 static int 1134 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1135 { 1136 struct linux_file *filp = kn->kn_hook; 1137 1138 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1139 1140 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1141 } 1142 1143 static struct filterops linux_dev_kqfiltops_read = { 1144 .f_isfd = 1, 1145 .f_detach = linux_file_kqfilter_detach, 1146 .f_event = linux_file_kqfilter_read_event, 1147 }; 1148 1149 static struct filterops linux_dev_kqfiltops_write = { 1150 .f_isfd = 1, 1151 .f_detach = linux_file_kqfilter_detach, 1152 .f_event = linux_file_kqfilter_write_event, 1153 }; 1154 1155 static void 1156 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1157 { 1158 struct thread *td; 1159 const struct file_operations *fop; 1160 struct linux_cdev *ldev; 1161 int temp; 1162 1163 if ((filp->f_kqflags & kqflags) == 0) 1164 return; 1165 1166 td = curthread; 1167 1168 linux_get_fop(filp, &fop, &ldev); 1169 /* get the latest polling state */ 1170 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1171 linux_drop_fop(ldev); 1172 1173 spin_lock(&filp->f_kqlock); 1174 /* clear kqflags */ 1175 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1176 LINUX_KQ_FLAG_NEED_WRITE); 1177 /* update kqflags */ 1178 if ((temp & (POLLIN | POLLOUT)) != 0) { 1179 if ((temp & POLLIN) != 0) 1180 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1181 if ((temp & POLLOUT) != 0) 1182 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1183 1184 /* make sure the "knote" gets woken up */ 1185 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1186 } 1187 spin_unlock(&filp->f_kqlock); 1188 } 1189 1190 static int 1191 linux_file_kqfilter(struct file *file, struct knote *kn) 1192 { 1193 struct linux_file *filp; 1194 struct thread *td; 1195 int error; 1196 1197 td = curthread; 1198 filp = (struct linux_file *)file->f_data; 1199 filp->f_flags = file->f_flag; 1200 if (filp->f_op->poll == NULL) 1201 return (EINVAL); 1202 1203 spin_lock(&filp->f_kqlock); 1204 switch (kn->kn_filter) { 1205 case EVFILT_READ: 1206 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1207 kn->kn_fop = &linux_dev_kqfiltops_read; 1208 kn->kn_hook = filp; 1209 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1210 error = 0; 1211 break; 1212 case EVFILT_WRITE: 1213 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1214 kn->kn_fop = &linux_dev_kqfiltops_write; 1215 kn->kn_hook = filp; 1216 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1217 error = 0; 1218 break; 1219 default: 1220 error = EINVAL; 1221 break; 1222 } 1223 spin_unlock(&filp->f_kqlock); 1224 1225 if (error == 0) { 1226 linux_set_current(td); 1227 1228 /* update kqfilter status, if any */ 1229 linux_file_kqfilter_poll(filp, 1230 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1231 } 1232 return (error); 1233 } 1234 1235 static int 1236 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1237 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1238 int nprot, bool is_shared, struct thread *td) 1239 { 1240 struct task_struct *task; 1241 struct vm_area_struct *vmap; 1242 struct mm_struct *mm; 1243 struct linux_file *filp; 1244 vm_memattr_t attr; 1245 int error; 1246 1247 filp = (struct linux_file *)fp->f_data; 1248 filp->f_flags = fp->f_flag; 1249 1250 if (fop->mmap == NULL) 1251 return (EOPNOTSUPP); 1252 1253 linux_set_current(td); 1254 1255 /* 1256 * The same VM object might be shared by multiple processes 1257 * and the mm_struct is usually freed when a process exits. 1258 * 1259 * The atomic reference below makes sure the mm_struct is 1260 * available as long as the vmap is in the linux_vma_head. 1261 */ 1262 task = current; 1263 mm = task->mm; 1264 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1265 return (EINVAL); 1266 1267 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1268 vmap->vm_start = 0; 1269 vmap->vm_end = size; 1270 vmap->vm_pgoff = *offset / PAGE_SIZE; 1271 vmap->vm_pfn = 0; 1272 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1273 if (is_shared) 1274 vmap->vm_flags |= VM_SHARED; 1275 vmap->vm_ops = NULL; 1276 vmap->vm_file = get_file(filp); 1277 vmap->vm_mm = mm; 1278 1279 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1280 error = linux_get_error(task, EINTR); 1281 } else { 1282 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1283 error = linux_get_error(task, error); 1284 up_write(&vmap->vm_mm->mmap_sem); 1285 } 1286 1287 if (error != 0) { 1288 linux_cdev_handle_free(vmap); 1289 return (error); 1290 } 1291 1292 attr = pgprot2cachemode(vmap->vm_page_prot); 1293 1294 if (vmap->vm_ops != NULL) { 1295 struct vm_area_struct *ptr; 1296 void *vm_private_data; 1297 bool vm_no_fault; 1298 1299 if (vmap->vm_ops->open == NULL || 1300 vmap->vm_ops->close == NULL || 1301 vmap->vm_private_data == NULL) { 1302 /* free allocated VM area struct */ 1303 linux_cdev_handle_free(vmap); 1304 return (EINVAL); 1305 } 1306 1307 vm_private_data = vmap->vm_private_data; 1308 1309 rw_wlock(&linux_vma_lock); 1310 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1311 if (ptr->vm_private_data == vm_private_data) 1312 break; 1313 } 1314 /* check if there is an existing VM area struct */ 1315 if (ptr != NULL) { 1316 /* check if the VM area structure is invalid */ 1317 if (ptr->vm_ops == NULL || 1318 ptr->vm_ops->open == NULL || 1319 ptr->vm_ops->close == NULL) { 1320 error = ESTALE; 1321 vm_no_fault = 1; 1322 } else { 1323 error = EEXIST; 1324 vm_no_fault = (ptr->vm_ops->fault == NULL); 1325 } 1326 } else { 1327 /* insert VM area structure into list */ 1328 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1329 error = 0; 1330 vm_no_fault = (vmap->vm_ops->fault == NULL); 1331 } 1332 rw_wunlock(&linux_vma_lock); 1333 1334 if (error != 0) { 1335 /* free allocated VM area struct */ 1336 linux_cdev_handle_free(vmap); 1337 /* check for stale VM area struct */ 1338 if (error != EEXIST) 1339 return (error); 1340 } 1341 1342 /* check if there is no fault handler */ 1343 if (vm_no_fault) { 1344 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1345 &linux_cdev_pager_ops[1], size, nprot, *offset, 1346 td->td_ucred); 1347 } else { 1348 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1349 &linux_cdev_pager_ops[0], size, nprot, *offset, 1350 td->td_ucred); 1351 } 1352 1353 /* check if allocating the VM object failed */ 1354 if (*object == NULL) { 1355 if (error == 0) { 1356 /* remove VM area struct from list */ 1357 linux_cdev_handle_remove(vmap); 1358 /* free allocated VM area struct */ 1359 linux_cdev_handle_free(vmap); 1360 } 1361 return (EINVAL); 1362 } 1363 } else { 1364 struct sglist *sg; 1365 1366 sg = sglist_alloc(1, M_WAITOK); 1367 sglist_append_phys(sg, 1368 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1369 1370 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1371 nprot, 0, td->td_ucred); 1372 1373 linux_cdev_handle_free(vmap); 1374 1375 if (*object == NULL) { 1376 sglist_free(sg); 1377 return (EINVAL); 1378 } 1379 } 1380 1381 if (attr != VM_MEMATTR_DEFAULT) { 1382 VM_OBJECT_WLOCK(*object); 1383 vm_object_set_memattr(*object, attr); 1384 VM_OBJECT_WUNLOCK(*object); 1385 } 1386 *offset = 0; 1387 return (0); 1388 } 1389 1390 struct cdevsw linuxcdevsw = { 1391 .d_version = D_VERSION, 1392 .d_fdopen = linux_dev_fdopen, 1393 .d_name = "lkpidev", 1394 }; 1395 1396 static int 1397 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1398 int flags, struct thread *td) 1399 { 1400 struct linux_file *filp; 1401 const struct file_operations *fop; 1402 struct linux_cdev *ldev; 1403 ssize_t bytes; 1404 int error; 1405 1406 error = 0; 1407 filp = (struct linux_file *)file->f_data; 1408 filp->f_flags = file->f_flag; 1409 /* XXX no support for I/O vectors currently */ 1410 if (uio->uio_iovcnt != 1) 1411 return (EOPNOTSUPP); 1412 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1413 return (EINVAL); 1414 linux_set_current(td); 1415 linux_get_fop(filp, &fop, &ldev); 1416 if (fop->read != NULL) { 1417 bytes = OPW(file, td, fop->read(filp, 1418 uio->uio_iov->iov_base, 1419 uio->uio_iov->iov_len, &uio->uio_offset)); 1420 if (bytes >= 0) { 1421 uio->uio_iov->iov_base = 1422 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1423 uio->uio_iov->iov_len -= bytes; 1424 uio->uio_resid -= bytes; 1425 } else { 1426 error = linux_get_error(current, -bytes); 1427 } 1428 } else 1429 error = ENXIO; 1430 1431 /* update kqfilter status, if any */ 1432 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1433 linux_drop_fop(ldev); 1434 1435 return (error); 1436 } 1437 1438 static int 1439 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1440 int flags, struct thread *td) 1441 { 1442 struct linux_file *filp; 1443 const struct file_operations *fop; 1444 struct linux_cdev *ldev; 1445 ssize_t bytes; 1446 int error; 1447 1448 filp = (struct linux_file *)file->f_data; 1449 filp->f_flags = file->f_flag; 1450 /* XXX no support for I/O vectors currently */ 1451 if (uio->uio_iovcnt != 1) 1452 return (EOPNOTSUPP); 1453 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1454 return (EINVAL); 1455 linux_set_current(td); 1456 linux_get_fop(filp, &fop, &ldev); 1457 if (fop->write != NULL) { 1458 bytes = OPW(file, td, fop->write(filp, 1459 uio->uio_iov->iov_base, 1460 uio->uio_iov->iov_len, &uio->uio_offset)); 1461 if (bytes >= 0) { 1462 uio->uio_iov->iov_base = 1463 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1464 uio->uio_iov->iov_len -= bytes; 1465 uio->uio_resid -= bytes; 1466 error = 0; 1467 } else { 1468 error = linux_get_error(current, -bytes); 1469 } 1470 } else 1471 error = ENXIO; 1472 1473 /* update kqfilter status, if any */ 1474 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1475 1476 linux_drop_fop(ldev); 1477 1478 return (error); 1479 } 1480 1481 static int 1482 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1483 struct thread *td) 1484 { 1485 struct linux_file *filp; 1486 const struct file_operations *fop; 1487 struct linux_cdev *ldev; 1488 int revents; 1489 1490 filp = (struct linux_file *)file->f_data; 1491 filp->f_flags = file->f_flag; 1492 linux_set_current(td); 1493 linux_get_fop(filp, &fop, &ldev); 1494 if (fop->poll != NULL) { 1495 revents = OPW(file, td, fop->poll(filp, 1496 LINUX_POLL_TABLE_NORMAL)) & events; 1497 } else { 1498 revents = 0; 1499 } 1500 linux_drop_fop(ldev); 1501 return (revents); 1502 } 1503 1504 static int 1505 linux_file_close(struct file *file, struct thread *td) 1506 { 1507 struct linux_file *filp; 1508 int (*release)(struct inode *, struct linux_file *); 1509 const struct file_operations *fop; 1510 struct linux_cdev *ldev; 1511 int error; 1512 1513 filp = (struct linux_file *)file->f_data; 1514 1515 KASSERT(file_count(filp) == 0, 1516 ("File refcount(%d) is not zero", file_count(filp))); 1517 1518 if (td == NULL) 1519 td = curthread; 1520 1521 error = 0; 1522 filp->f_flags = file->f_flag; 1523 linux_set_current(td); 1524 linux_poll_wait_dequeue(filp); 1525 linux_get_fop(filp, &fop, &ldev); 1526 /* 1527 * Always use the real release function, if any, to avoid 1528 * leaking device resources: 1529 */ 1530 release = filp->f_op->release; 1531 if (release != NULL) 1532 error = -OPW(file, td, release(filp->f_vnode, filp)); 1533 funsetown(&filp->f_sigio); 1534 if (filp->f_vnode != NULL) 1535 vdrop(filp->f_vnode); 1536 linux_drop_fop(ldev); 1537 ldev = filp->f_cdev; 1538 if (ldev != NULL) 1539 linux_cdev_deref(ldev); 1540 kfree(filp); 1541 1542 return (error); 1543 } 1544 1545 static int 1546 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1547 struct thread *td) 1548 { 1549 struct linux_file *filp; 1550 const struct file_operations *fop; 1551 struct linux_cdev *ldev; 1552 struct fiodgname_arg *fgn; 1553 const char *p; 1554 int error, i; 1555 1556 error = 0; 1557 filp = (struct linux_file *)fp->f_data; 1558 filp->f_flags = fp->f_flag; 1559 linux_get_fop(filp, &fop, &ldev); 1560 1561 linux_set_current(td); 1562 switch (cmd) { 1563 case FIONBIO: 1564 break; 1565 case FIOASYNC: 1566 if (fop->fasync == NULL) 1567 break; 1568 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1569 break; 1570 case FIOSETOWN: 1571 error = fsetown(*(int *)data, &filp->f_sigio); 1572 if (error == 0) { 1573 if (fop->fasync == NULL) 1574 break; 1575 error = -OPW(fp, td, fop->fasync(0, filp, 1576 fp->f_flag & FASYNC)); 1577 } 1578 break; 1579 case FIOGETOWN: 1580 *(int *)data = fgetown(&filp->f_sigio); 1581 break; 1582 case FIODGNAME: 1583 #ifdef COMPAT_FREEBSD32 1584 case FIODGNAME_32: 1585 #endif 1586 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1587 error = ENXIO; 1588 break; 1589 } 1590 fgn = data; 1591 p = devtoname(filp->f_cdev->cdev); 1592 i = strlen(p) + 1; 1593 if (i > fgn->len) { 1594 error = EINVAL; 1595 break; 1596 } 1597 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1598 break; 1599 default: 1600 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1601 break; 1602 } 1603 linux_drop_fop(ldev); 1604 return (error); 1605 } 1606 1607 static int 1608 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1609 vm_prot_t maxprot, int flags, struct file *fp, 1610 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1611 { 1612 /* 1613 * Character devices do not provide private mappings 1614 * of any kind: 1615 */ 1616 if ((maxprot & VM_PROT_WRITE) == 0 && 1617 (prot & VM_PROT_WRITE) != 0) 1618 return (EACCES); 1619 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1620 return (EINVAL); 1621 1622 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1623 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1624 } 1625 1626 static int 1627 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1628 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1629 struct thread *td) 1630 { 1631 struct linux_file *filp; 1632 const struct file_operations *fop; 1633 struct linux_cdev *ldev; 1634 struct mount *mp; 1635 struct vnode *vp; 1636 vm_object_t object; 1637 vm_prot_t maxprot; 1638 int error; 1639 1640 filp = (struct linux_file *)fp->f_data; 1641 1642 vp = filp->f_vnode; 1643 if (vp == NULL) 1644 return (EOPNOTSUPP); 1645 1646 /* 1647 * Ensure that file and memory protections are 1648 * compatible. 1649 */ 1650 mp = vp->v_mount; 1651 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1652 maxprot = VM_PROT_NONE; 1653 if ((prot & VM_PROT_EXECUTE) != 0) 1654 return (EACCES); 1655 } else 1656 maxprot = VM_PROT_EXECUTE; 1657 if ((fp->f_flag & FREAD) != 0) 1658 maxprot |= VM_PROT_READ; 1659 else if ((prot & VM_PROT_READ) != 0) 1660 return (EACCES); 1661 1662 /* 1663 * If we are sharing potential changes via MAP_SHARED and we 1664 * are trying to get write permission although we opened it 1665 * without asking for it, bail out. 1666 * 1667 * Note that most character devices always share mappings. 1668 * 1669 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1670 * requests rather than doing it here. 1671 */ 1672 if ((flags & MAP_SHARED) != 0) { 1673 if ((fp->f_flag & FWRITE) != 0) 1674 maxprot |= VM_PROT_WRITE; 1675 else if ((prot & VM_PROT_WRITE) != 0) 1676 return (EACCES); 1677 } 1678 maxprot &= cap_maxprot; 1679 1680 linux_get_fop(filp, &fop, &ldev); 1681 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1682 &foff, fop, &object); 1683 if (error != 0) 1684 goto out; 1685 1686 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1687 foff, FALSE, td); 1688 if (error != 0) 1689 vm_object_deallocate(object); 1690 out: 1691 linux_drop_fop(ldev); 1692 return (error); 1693 } 1694 1695 static int 1696 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1697 struct thread *td) 1698 { 1699 struct linux_file *filp; 1700 struct vnode *vp; 1701 int error; 1702 1703 filp = (struct linux_file *)fp->f_data; 1704 if (filp->f_vnode == NULL) 1705 return (EOPNOTSUPP); 1706 1707 vp = filp->f_vnode; 1708 1709 vn_lock(vp, LK_SHARED | LK_RETRY); 1710 error = VOP_STAT(vp, sb, td->td_ucred, NOCRED, td); 1711 VOP_UNLOCK(vp); 1712 1713 return (error); 1714 } 1715 1716 static int 1717 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1718 struct filedesc *fdp) 1719 { 1720 struct linux_file *filp; 1721 struct vnode *vp; 1722 int error; 1723 1724 filp = fp->f_data; 1725 vp = filp->f_vnode; 1726 if (vp == NULL) { 1727 error = 0; 1728 kif->kf_type = KF_TYPE_DEV; 1729 } else { 1730 vref(vp); 1731 FILEDESC_SUNLOCK(fdp); 1732 error = vn_fill_kinfo_vnode(vp, kif); 1733 vrele(vp); 1734 kif->kf_type = KF_TYPE_VNODE; 1735 FILEDESC_SLOCK(fdp); 1736 } 1737 return (error); 1738 } 1739 1740 unsigned int 1741 linux_iminor(struct inode *inode) 1742 { 1743 struct linux_cdev *ldev; 1744 1745 if (inode == NULL || inode->v_rdev == NULL || 1746 inode->v_rdev->si_devsw != &linuxcdevsw) 1747 return (-1U); 1748 ldev = inode->v_rdev->si_drv1; 1749 if (ldev == NULL) 1750 return (-1U); 1751 1752 return (minor(ldev->dev)); 1753 } 1754 1755 struct fileops linuxfileops = { 1756 .fo_read = linux_file_read, 1757 .fo_write = linux_file_write, 1758 .fo_truncate = invfo_truncate, 1759 .fo_kqfilter = linux_file_kqfilter, 1760 .fo_stat = linux_file_stat, 1761 .fo_fill_kinfo = linux_file_fill_kinfo, 1762 .fo_poll = linux_file_poll, 1763 .fo_close = linux_file_close, 1764 .fo_ioctl = linux_file_ioctl, 1765 .fo_mmap = linux_file_mmap, 1766 .fo_chmod = invfo_chmod, 1767 .fo_chown = invfo_chown, 1768 .fo_sendfile = invfo_sendfile, 1769 .fo_flags = DFLAG_PASSABLE, 1770 }; 1771 1772 /* 1773 * Hash of vmmap addresses. This is infrequently accessed and does not 1774 * need to be particularly large. This is done because we must store the 1775 * caller's idea of the map size to properly unmap. 1776 */ 1777 struct vmmap { 1778 LIST_ENTRY(vmmap) vm_next; 1779 void *vm_addr; 1780 unsigned long vm_size; 1781 }; 1782 1783 struct vmmaphd { 1784 struct vmmap *lh_first; 1785 }; 1786 #define VMMAP_HASH_SIZE 64 1787 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1788 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1789 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1790 static struct mtx vmmaplock; 1791 1792 static void 1793 vmmap_add(void *addr, unsigned long size) 1794 { 1795 struct vmmap *vmmap; 1796 1797 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1798 mtx_lock(&vmmaplock); 1799 vmmap->vm_size = size; 1800 vmmap->vm_addr = addr; 1801 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1802 mtx_unlock(&vmmaplock); 1803 } 1804 1805 static struct vmmap * 1806 vmmap_remove(void *addr) 1807 { 1808 struct vmmap *vmmap; 1809 1810 mtx_lock(&vmmaplock); 1811 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1812 if (vmmap->vm_addr == addr) 1813 break; 1814 if (vmmap) 1815 LIST_REMOVE(vmmap, vm_next); 1816 mtx_unlock(&vmmaplock); 1817 1818 return (vmmap); 1819 } 1820 1821 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1822 void * 1823 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1824 { 1825 void *addr; 1826 1827 addr = pmap_mapdev_attr(phys_addr, size, attr); 1828 if (addr == NULL) 1829 return (NULL); 1830 vmmap_add(addr, size); 1831 1832 return (addr); 1833 } 1834 #endif 1835 1836 void 1837 iounmap(void *addr) 1838 { 1839 struct vmmap *vmmap; 1840 1841 vmmap = vmmap_remove(addr); 1842 if (vmmap == NULL) 1843 return; 1844 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1845 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1846 #endif 1847 kfree(vmmap); 1848 } 1849 1850 void * 1851 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1852 { 1853 vm_offset_t off; 1854 size_t size; 1855 1856 size = count * PAGE_SIZE; 1857 off = kva_alloc(size); 1858 if (off == 0) 1859 return (NULL); 1860 vmmap_add((void *)off, size); 1861 pmap_qenter(off, pages, count); 1862 1863 return ((void *)off); 1864 } 1865 1866 void 1867 vunmap(void *addr) 1868 { 1869 struct vmmap *vmmap; 1870 1871 vmmap = vmmap_remove(addr); 1872 if (vmmap == NULL) 1873 return; 1874 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1875 kva_free((vm_offset_t)addr, vmmap->vm_size); 1876 kfree(vmmap); 1877 } 1878 1879 static char * 1880 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1881 { 1882 unsigned int len; 1883 char *p; 1884 va_list aq; 1885 1886 va_copy(aq, ap); 1887 len = vsnprintf(NULL, 0, fmt, aq); 1888 va_end(aq); 1889 1890 if (dev != NULL) 1891 p = devm_kmalloc(dev, len + 1, gfp); 1892 else 1893 p = kmalloc(len + 1, gfp); 1894 if (p != NULL) 1895 vsnprintf(p, len + 1, fmt, ap); 1896 1897 return (p); 1898 } 1899 1900 char * 1901 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1902 { 1903 1904 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1905 } 1906 1907 char * 1908 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1909 { 1910 va_list ap; 1911 char *p; 1912 1913 va_start(ap, fmt); 1914 p = devm_kvasprintf(dev, gfp, fmt, ap); 1915 va_end(ap); 1916 1917 return (p); 1918 } 1919 1920 char * 1921 kasprintf(gfp_t gfp, const char *fmt, ...) 1922 { 1923 va_list ap; 1924 char *p; 1925 1926 va_start(ap, fmt); 1927 p = kvasprintf(gfp, fmt, ap); 1928 va_end(ap); 1929 1930 return (p); 1931 } 1932 1933 static void 1934 linux_timer_callback_wrapper(void *context) 1935 { 1936 struct timer_list *timer; 1937 1938 timer = context; 1939 1940 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1941 /* try again later */ 1942 callout_reset(&timer->callout, 1, 1943 &linux_timer_callback_wrapper, timer); 1944 return; 1945 } 1946 1947 timer->function(timer->data); 1948 } 1949 1950 int 1951 mod_timer(struct timer_list *timer, int expires) 1952 { 1953 int ret; 1954 1955 timer->expires = expires; 1956 ret = callout_reset(&timer->callout, 1957 linux_timer_jiffies_until(expires), 1958 &linux_timer_callback_wrapper, timer); 1959 1960 MPASS(ret == 0 || ret == 1); 1961 1962 return (ret == 1); 1963 } 1964 1965 void 1966 add_timer(struct timer_list *timer) 1967 { 1968 1969 callout_reset(&timer->callout, 1970 linux_timer_jiffies_until(timer->expires), 1971 &linux_timer_callback_wrapper, timer); 1972 } 1973 1974 void 1975 add_timer_on(struct timer_list *timer, int cpu) 1976 { 1977 1978 callout_reset_on(&timer->callout, 1979 linux_timer_jiffies_until(timer->expires), 1980 &linux_timer_callback_wrapper, timer, cpu); 1981 } 1982 1983 int 1984 del_timer(struct timer_list *timer) 1985 { 1986 1987 if (callout_stop(&(timer)->callout) == -1) 1988 return (0); 1989 return (1); 1990 } 1991 1992 int 1993 del_timer_sync(struct timer_list *timer) 1994 { 1995 1996 if (callout_drain(&(timer)->callout) == -1) 1997 return (0); 1998 return (1); 1999 } 2000 2001 /* greatest common divisor, Euclid equation */ 2002 static uint64_t 2003 lkpi_gcd_64(uint64_t a, uint64_t b) 2004 { 2005 uint64_t an; 2006 uint64_t bn; 2007 2008 while (b != 0) { 2009 an = b; 2010 bn = a % b; 2011 a = an; 2012 b = bn; 2013 } 2014 return (a); 2015 } 2016 2017 uint64_t lkpi_nsec2hz_rem; 2018 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2019 uint64_t lkpi_nsec2hz_max; 2020 2021 uint64_t lkpi_usec2hz_rem; 2022 uint64_t lkpi_usec2hz_div = 1000000ULL; 2023 uint64_t lkpi_usec2hz_max; 2024 2025 uint64_t lkpi_msec2hz_rem; 2026 uint64_t lkpi_msec2hz_div = 1000ULL; 2027 uint64_t lkpi_msec2hz_max; 2028 2029 static void 2030 linux_timer_init(void *arg) 2031 { 2032 uint64_t gcd; 2033 2034 /* 2035 * Compute an internal HZ value which can divide 2**32 to 2036 * avoid timer rounding problems when the tick value wraps 2037 * around 2**32: 2038 */ 2039 linux_timer_hz_mask = 1; 2040 while (linux_timer_hz_mask < (unsigned long)hz) 2041 linux_timer_hz_mask *= 2; 2042 linux_timer_hz_mask--; 2043 2044 /* compute some internal constants */ 2045 2046 lkpi_nsec2hz_rem = hz; 2047 lkpi_usec2hz_rem = hz; 2048 lkpi_msec2hz_rem = hz; 2049 2050 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2051 lkpi_nsec2hz_rem /= gcd; 2052 lkpi_nsec2hz_div /= gcd; 2053 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2054 2055 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2056 lkpi_usec2hz_rem /= gcd; 2057 lkpi_usec2hz_div /= gcd; 2058 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2059 2060 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2061 lkpi_msec2hz_rem /= gcd; 2062 lkpi_msec2hz_div /= gcd; 2063 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2064 } 2065 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2066 2067 void 2068 linux_complete_common(struct completion *c, int all) 2069 { 2070 int wakeup_swapper; 2071 2072 sleepq_lock(c); 2073 if (all) { 2074 c->done = UINT_MAX; 2075 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2076 } else { 2077 if (c->done != UINT_MAX) 2078 c->done++; 2079 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2080 } 2081 sleepq_release(c); 2082 if (wakeup_swapper) 2083 kick_proc0(); 2084 } 2085 2086 /* 2087 * Indefinite wait for done != 0 with or without signals. 2088 */ 2089 int 2090 linux_wait_for_common(struct completion *c, int flags) 2091 { 2092 struct task_struct *task; 2093 int error; 2094 2095 if (SCHEDULER_STOPPED()) 2096 return (0); 2097 2098 task = current; 2099 2100 if (flags != 0) 2101 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2102 else 2103 flags = SLEEPQ_SLEEP; 2104 error = 0; 2105 for (;;) { 2106 sleepq_lock(c); 2107 if (c->done) 2108 break; 2109 sleepq_add(c, NULL, "completion", flags, 0); 2110 if (flags & SLEEPQ_INTERRUPTIBLE) { 2111 DROP_GIANT(); 2112 error = -sleepq_wait_sig(c, 0); 2113 PICKUP_GIANT(); 2114 if (error != 0) { 2115 linux_schedule_save_interrupt_value(task, error); 2116 error = -ERESTARTSYS; 2117 goto intr; 2118 } 2119 } else { 2120 DROP_GIANT(); 2121 sleepq_wait(c, 0); 2122 PICKUP_GIANT(); 2123 } 2124 } 2125 if (c->done != UINT_MAX) 2126 c->done--; 2127 sleepq_release(c); 2128 2129 intr: 2130 return (error); 2131 } 2132 2133 /* 2134 * Time limited wait for done != 0 with or without signals. 2135 */ 2136 int 2137 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2138 { 2139 struct task_struct *task; 2140 int end = jiffies + timeout; 2141 int error; 2142 2143 if (SCHEDULER_STOPPED()) 2144 return (0); 2145 2146 task = current; 2147 2148 if (flags != 0) 2149 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2150 else 2151 flags = SLEEPQ_SLEEP; 2152 2153 for (;;) { 2154 sleepq_lock(c); 2155 if (c->done) 2156 break; 2157 sleepq_add(c, NULL, "completion", flags, 0); 2158 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2159 2160 DROP_GIANT(); 2161 if (flags & SLEEPQ_INTERRUPTIBLE) 2162 error = -sleepq_timedwait_sig(c, 0); 2163 else 2164 error = -sleepq_timedwait(c, 0); 2165 PICKUP_GIANT(); 2166 2167 if (error != 0) { 2168 /* check for timeout */ 2169 if (error == -EWOULDBLOCK) { 2170 error = 0; /* timeout */ 2171 } else { 2172 /* signal happened */ 2173 linux_schedule_save_interrupt_value(task, error); 2174 error = -ERESTARTSYS; 2175 } 2176 goto done; 2177 } 2178 } 2179 if (c->done != UINT_MAX) 2180 c->done--; 2181 sleepq_release(c); 2182 2183 /* return how many jiffies are left */ 2184 error = linux_timer_jiffies_until(end); 2185 done: 2186 return (error); 2187 } 2188 2189 int 2190 linux_try_wait_for_completion(struct completion *c) 2191 { 2192 int isdone; 2193 2194 sleepq_lock(c); 2195 isdone = (c->done != 0); 2196 if (c->done != 0 && c->done != UINT_MAX) 2197 c->done--; 2198 sleepq_release(c); 2199 return (isdone); 2200 } 2201 2202 int 2203 linux_completion_done(struct completion *c) 2204 { 2205 int isdone; 2206 2207 sleepq_lock(c); 2208 isdone = (c->done != 0); 2209 sleepq_release(c); 2210 return (isdone); 2211 } 2212 2213 static void 2214 linux_cdev_deref(struct linux_cdev *ldev) 2215 { 2216 if (refcount_release(&ldev->refs) && 2217 ldev->kobj.ktype == &linux_cdev_ktype) 2218 kfree(ldev); 2219 } 2220 2221 static void 2222 linux_cdev_release(struct kobject *kobj) 2223 { 2224 struct linux_cdev *cdev; 2225 struct kobject *parent; 2226 2227 cdev = container_of(kobj, struct linux_cdev, kobj); 2228 parent = kobj->parent; 2229 linux_destroy_dev(cdev); 2230 linux_cdev_deref(cdev); 2231 kobject_put(parent); 2232 } 2233 2234 static void 2235 linux_cdev_static_release(struct kobject *kobj) 2236 { 2237 struct cdev *cdev; 2238 struct linux_cdev *ldev; 2239 2240 ldev = container_of(kobj, struct linux_cdev, kobj); 2241 cdev = ldev->cdev; 2242 if (cdev != NULL) { 2243 destroy_dev(cdev); 2244 ldev->cdev = NULL; 2245 } 2246 kobject_put(kobj->parent); 2247 } 2248 2249 int 2250 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2251 { 2252 int ret; 2253 2254 if (dev->devt != 0) { 2255 /* Set parent kernel object. */ 2256 ldev->kobj.parent = &dev->kobj; 2257 2258 /* 2259 * Unlike Linux we require the kobject of the 2260 * character device structure to have a valid name 2261 * before calling this function: 2262 */ 2263 if (ldev->kobj.name == NULL) 2264 return (-EINVAL); 2265 2266 ret = cdev_add(ldev, dev->devt, 1); 2267 if (ret) 2268 return (ret); 2269 } 2270 ret = device_add(dev); 2271 if (ret != 0 && dev->devt != 0) 2272 cdev_del(ldev); 2273 return (ret); 2274 } 2275 2276 void 2277 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2278 { 2279 device_del(dev); 2280 2281 if (dev->devt != 0) 2282 cdev_del(ldev); 2283 } 2284 2285 static void 2286 linux_destroy_dev(struct linux_cdev *ldev) 2287 { 2288 2289 if (ldev->cdev == NULL) 2290 return; 2291 2292 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2293 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2294 2295 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2296 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2297 pause("ldevdtr", hz / 4); 2298 2299 destroy_dev(ldev->cdev); 2300 ldev->cdev = NULL; 2301 } 2302 2303 const struct kobj_type linux_cdev_ktype = { 2304 .release = linux_cdev_release, 2305 }; 2306 2307 const struct kobj_type linux_cdev_static_ktype = { 2308 .release = linux_cdev_static_release, 2309 }; 2310 2311 static void 2312 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2313 { 2314 struct notifier_block *nb; 2315 struct netdev_notifier_info ni; 2316 2317 nb = arg; 2318 ni.ifp = ifp; 2319 ni.dev = (struct net_device *)ifp; 2320 if (linkstate == LINK_STATE_UP) 2321 nb->notifier_call(nb, NETDEV_UP, &ni); 2322 else 2323 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2324 } 2325 2326 static void 2327 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2328 { 2329 struct notifier_block *nb; 2330 struct netdev_notifier_info ni; 2331 2332 nb = arg; 2333 ni.ifp = ifp; 2334 ni.dev = (struct net_device *)ifp; 2335 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2336 } 2337 2338 static void 2339 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2340 { 2341 struct notifier_block *nb; 2342 struct netdev_notifier_info ni; 2343 2344 nb = arg; 2345 ni.ifp = ifp; 2346 ni.dev = (struct net_device *)ifp; 2347 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2348 } 2349 2350 static void 2351 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2352 { 2353 struct notifier_block *nb; 2354 struct netdev_notifier_info ni; 2355 2356 nb = arg; 2357 ni.ifp = ifp; 2358 ni.dev = (struct net_device *)ifp; 2359 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2360 } 2361 2362 static void 2363 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2364 { 2365 struct notifier_block *nb; 2366 struct netdev_notifier_info ni; 2367 2368 nb = arg; 2369 ni.ifp = ifp; 2370 ni.dev = (struct net_device *)ifp; 2371 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2372 } 2373 2374 int 2375 register_netdevice_notifier(struct notifier_block *nb) 2376 { 2377 2378 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2379 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2380 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2381 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2382 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2383 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2384 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2385 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2386 2387 return (0); 2388 } 2389 2390 int 2391 register_inetaddr_notifier(struct notifier_block *nb) 2392 { 2393 2394 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2395 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2396 return (0); 2397 } 2398 2399 int 2400 unregister_netdevice_notifier(struct notifier_block *nb) 2401 { 2402 2403 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2404 nb->tags[NETDEV_UP]); 2405 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2406 nb->tags[NETDEV_REGISTER]); 2407 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2408 nb->tags[NETDEV_UNREGISTER]); 2409 EVENTHANDLER_DEREGISTER(iflladdr_event, 2410 nb->tags[NETDEV_CHANGEADDR]); 2411 2412 return (0); 2413 } 2414 2415 int 2416 unregister_inetaddr_notifier(struct notifier_block *nb) 2417 { 2418 2419 EVENTHANDLER_DEREGISTER(ifaddr_event, 2420 nb->tags[NETDEV_CHANGEIFADDR]); 2421 2422 return (0); 2423 } 2424 2425 struct list_sort_thunk { 2426 int (*cmp)(void *, struct list_head *, struct list_head *); 2427 void *priv; 2428 }; 2429 2430 static inline int 2431 linux_le_cmp(void *priv, const void *d1, const void *d2) 2432 { 2433 struct list_head *le1, *le2; 2434 struct list_sort_thunk *thunk; 2435 2436 thunk = priv; 2437 le1 = *(__DECONST(struct list_head **, d1)); 2438 le2 = *(__DECONST(struct list_head **, d2)); 2439 return ((thunk->cmp)(thunk->priv, le1, le2)); 2440 } 2441 2442 void 2443 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2444 struct list_head *a, struct list_head *b)) 2445 { 2446 struct list_sort_thunk thunk; 2447 struct list_head **ar, *le; 2448 size_t count, i; 2449 2450 count = 0; 2451 list_for_each(le, head) 2452 count++; 2453 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2454 i = 0; 2455 list_for_each(le, head) 2456 ar[i++] = le; 2457 thunk.cmp = cmp; 2458 thunk.priv = priv; 2459 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2460 INIT_LIST_HEAD(head); 2461 for (i = 0; i < count; i++) 2462 list_add_tail(ar[i], head); 2463 free(ar, M_KMALLOC); 2464 } 2465 2466 void 2467 lkpi_irq_release(struct device *dev, struct irq_ent *irqe) 2468 { 2469 2470 if (irqe->tag != NULL) 2471 bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag); 2472 if (irqe->res != NULL) 2473 bus_release_resource(dev->bsddev, SYS_RES_IRQ, 2474 rman_get_rid(irqe->res), irqe->res); 2475 list_del(&irqe->links); 2476 } 2477 2478 void 2479 lkpi_devm_irq_release(struct device *dev, void *p) 2480 { 2481 struct irq_ent *irqe; 2482 2483 if (dev == NULL || p == NULL) 2484 return; 2485 2486 irqe = p; 2487 lkpi_irq_release(dev, irqe); 2488 } 2489 2490 void 2491 linux_irq_handler(void *ent) 2492 { 2493 struct irq_ent *irqe; 2494 2495 if (linux_set_current_flags(curthread, M_NOWAIT)) 2496 return; 2497 2498 irqe = ent; 2499 if (irqe->handler(irqe->irq, irqe->arg) == IRQ_WAKE_THREAD && 2500 irqe->thread_handler != NULL) { 2501 THREAD_SLEEPING_OK(); 2502 irqe->thread_handler(irqe->irq, irqe->arg); 2503 THREAD_NO_SLEEPING(); 2504 } 2505 } 2506 2507 #if defined(__i386__) || defined(__amd64__) 2508 int 2509 linux_wbinvd_on_all_cpus(void) 2510 { 2511 2512 pmap_invalidate_cache(); 2513 return (0); 2514 } 2515 #endif 2516 2517 int 2518 linux_on_each_cpu(void callback(void *), void *data) 2519 { 2520 2521 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2522 smp_no_rendezvous_barrier, data); 2523 return (0); 2524 } 2525 2526 int 2527 linux_in_atomic(void) 2528 { 2529 2530 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2531 } 2532 2533 struct linux_cdev * 2534 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2535 { 2536 dev_t dev = MKDEV(major, minor); 2537 struct cdev *cdev; 2538 2539 dev_lock(); 2540 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2541 struct linux_cdev *ldev = cdev->si_drv1; 2542 if (ldev->dev == dev && 2543 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2544 break; 2545 } 2546 } 2547 dev_unlock(); 2548 2549 return (cdev != NULL ? cdev->si_drv1 : NULL); 2550 } 2551 2552 int 2553 __register_chrdev(unsigned int major, unsigned int baseminor, 2554 unsigned int count, const char *name, 2555 const struct file_operations *fops) 2556 { 2557 struct linux_cdev *cdev; 2558 int ret = 0; 2559 int i; 2560 2561 for (i = baseminor; i < baseminor + count; i++) { 2562 cdev = cdev_alloc(); 2563 cdev->ops = fops; 2564 kobject_set_name(&cdev->kobj, name); 2565 2566 ret = cdev_add(cdev, makedev(major, i), 1); 2567 if (ret != 0) 2568 break; 2569 } 2570 return (ret); 2571 } 2572 2573 int 2574 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2575 unsigned int count, const char *name, 2576 const struct file_operations *fops, uid_t uid, 2577 gid_t gid, int mode) 2578 { 2579 struct linux_cdev *cdev; 2580 int ret = 0; 2581 int i; 2582 2583 for (i = baseminor; i < baseminor + count; i++) { 2584 cdev = cdev_alloc(); 2585 cdev->ops = fops; 2586 kobject_set_name(&cdev->kobj, name); 2587 2588 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2589 if (ret != 0) 2590 break; 2591 } 2592 return (ret); 2593 } 2594 2595 void 2596 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2597 unsigned int count, const char *name) 2598 { 2599 struct linux_cdev *cdevp; 2600 int i; 2601 2602 for (i = baseminor; i < baseminor + count; i++) { 2603 cdevp = linux_find_cdev(name, major, i); 2604 if (cdevp != NULL) 2605 cdev_del(cdevp); 2606 } 2607 } 2608 2609 void 2610 linux_dump_stack(void) 2611 { 2612 #ifdef STACK 2613 struct stack st; 2614 2615 stack_zero(&st); 2616 stack_save(&st); 2617 stack_print(&st); 2618 #endif 2619 } 2620 2621 int 2622 linuxkpi_net_ratelimit(void) 2623 { 2624 2625 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2626 lkpi_net_maxpps)); 2627 } 2628 2629 #if defined(__i386__) || defined(__amd64__) 2630 bool linux_cpu_has_clflush; 2631 #endif 2632 2633 static void 2634 linux_compat_init(void *arg) 2635 { 2636 struct sysctl_oid *rootoid; 2637 int i; 2638 2639 #if defined(__i386__) || defined(__amd64__) 2640 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2641 #endif 2642 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2643 2644 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2645 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2646 kobject_init(&linux_class_root, &linux_class_ktype); 2647 kobject_set_name(&linux_class_root, "class"); 2648 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2649 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2650 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2651 kobject_set_name(&linux_root_device.kobj, "device"); 2652 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2653 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2654 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2655 linux_root_device.bsddev = root_bus; 2656 linux_class_misc.name = "misc"; 2657 class_register(&linux_class_misc); 2658 INIT_LIST_HEAD(&pci_drivers); 2659 INIT_LIST_HEAD(&pci_devices); 2660 spin_lock_init(&pci_lock); 2661 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2662 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2663 LIST_INIT(&vmmaphead[i]); 2664 init_waitqueue_head(&linux_bit_waitq); 2665 init_waitqueue_head(&linux_var_waitq); 2666 2667 CPU_COPY(&all_cpus, &cpu_online_mask); 2668 } 2669 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2670 2671 static void 2672 linux_compat_uninit(void *arg) 2673 { 2674 linux_kobject_kfree_name(&linux_class_root); 2675 linux_kobject_kfree_name(&linux_root_device.kobj); 2676 linux_kobject_kfree_name(&linux_class_misc.kobj); 2677 2678 mtx_destroy(&vmmaplock); 2679 spin_lock_destroy(&pci_lock); 2680 rw_destroy(&linux_vma_lock); 2681 } 2682 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2683 2684 /* 2685 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2686 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2687 * used. Assert these types have the same size, else some parts of the 2688 * LinuxKPI may not work like expected: 2689 */ 2690 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2691