1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/list.h> 87 #include <linux/kthread.h> 88 #include <linux/kernel.h> 89 #include <linux/compat.h> 90 #include <linux/poll.h> 91 #include <linux/smp.h> 92 #include <linux/wait_bit.h> 93 #include <linux/rcupdate.h> 94 95 #if defined(__i386__) || defined(__amd64__) 96 #include <asm/smp.h> 97 #endif 98 99 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 100 "LinuxKPI parameters"); 101 102 int linuxkpi_debug; 103 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 104 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 105 106 static struct timeval lkpi_net_lastlog; 107 static int lkpi_net_curpps; 108 static int lkpi_net_maxpps = 99; 109 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 110 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 111 112 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 113 114 #include <linux/rbtree.h> 115 /* Undo Linux compat changes. */ 116 #undef RB_ROOT 117 #undef file 118 #undef cdev 119 #define RB_ROOT(head) (head)->rbh_root 120 121 static void linux_destroy_dev(struct linux_cdev *); 122 static void linux_cdev_deref(struct linux_cdev *ldev); 123 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 124 125 cpumask_t cpu_online_mask; 126 struct kobject linux_class_root; 127 struct device linux_root_device; 128 struct class linux_class_misc; 129 struct list_head pci_drivers; 130 struct list_head pci_devices; 131 spinlock_t pci_lock; 132 133 unsigned long linux_timer_hz_mask; 134 135 wait_queue_head_t linux_bit_waitq; 136 wait_queue_head_t linux_var_waitq; 137 138 int 139 panic_cmp(struct rb_node *one, struct rb_node *two) 140 { 141 panic("no cmp"); 142 } 143 144 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 145 146 int 147 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 148 { 149 va_list tmp_va; 150 int len; 151 char *old; 152 char *name; 153 char dummy; 154 155 old = kobj->name; 156 157 if (old && fmt == NULL) 158 return (0); 159 160 /* compute length of string */ 161 va_copy(tmp_va, args); 162 len = vsnprintf(&dummy, 0, fmt, tmp_va); 163 va_end(tmp_va); 164 165 /* account for zero termination */ 166 len++; 167 168 /* check for error */ 169 if (len < 1) 170 return (-EINVAL); 171 172 /* allocate memory for string */ 173 name = kzalloc(len, GFP_KERNEL); 174 if (name == NULL) 175 return (-ENOMEM); 176 vsnprintf(name, len, fmt, args); 177 kobj->name = name; 178 179 /* free old string */ 180 kfree(old); 181 182 /* filter new string */ 183 for (; *name != '\0'; name++) 184 if (*name == '/') 185 *name = '!'; 186 return (0); 187 } 188 189 int 190 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 191 { 192 va_list args; 193 int error; 194 195 va_start(args, fmt); 196 error = kobject_set_name_vargs(kobj, fmt, args); 197 va_end(args); 198 199 return (error); 200 } 201 202 static int 203 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 204 { 205 const struct kobj_type *t; 206 int error; 207 208 kobj->parent = parent; 209 error = sysfs_create_dir(kobj); 210 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 211 struct attribute **attr; 212 t = kobj->ktype; 213 214 for (attr = t->default_attrs; *attr != NULL; attr++) { 215 error = sysfs_create_file(kobj, *attr); 216 if (error) 217 break; 218 } 219 if (error) 220 sysfs_remove_dir(kobj); 221 } 222 return (error); 223 } 224 225 int 226 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 227 { 228 va_list args; 229 int error; 230 231 va_start(args, fmt); 232 error = kobject_set_name_vargs(kobj, fmt, args); 233 va_end(args); 234 if (error) 235 return (error); 236 237 return kobject_add_complete(kobj, parent); 238 } 239 240 void 241 linux_kobject_release(struct kref *kref) 242 { 243 struct kobject *kobj; 244 char *name; 245 246 kobj = container_of(kref, struct kobject, kref); 247 sysfs_remove_dir(kobj); 248 name = kobj->name; 249 if (kobj->ktype && kobj->ktype->release) 250 kobj->ktype->release(kobj); 251 kfree(name); 252 } 253 254 static void 255 linux_kobject_kfree(struct kobject *kobj) 256 { 257 kfree(kobj); 258 } 259 260 static void 261 linux_kobject_kfree_name(struct kobject *kobj) 262 { 263 if (kobj) { 264 kfree(kobj->name); 265 } 266 } 267 268 const struct kobj_type linux_kfree_type = { 269 .release = linux_kobject_kfree 270 }; 271 272 static void 273 linux_device_release(struct device *dev) 274 { 275 pr_debug("linux_device_release: %s\n", dev_name(dev)); 276 kfree(dev); 277 } 278 279 static ssize_t 280 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 281 { 282 struct class_attribute *dattr; 283 ssize_t error; 284 285 dattr = container_of(attr, struct class_attribute, attr); 286 error = -EIO; 287 if (dattr->show) 288 error = dattr->show(container_of(kobj, struct class, kobj), 289 dattr, buf); 290 return (error); 291 } 292 293 static ssize_t 294 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 295 size_t count) 296 { 297 struct class_attribute *dattr; 298 ssize_t error; 299 300 dattr = container_of(attr, struct class_attribute, attr); 301 error = -EIO; 302 if (dattr->store) 303 error = dattr->store(container_of(kobj, struct class, kobj), 304 dattr, buf, count); 305 return (error); 306 } 307 308 static void 309 linux_class_release(struct kobject *kobj) 310 { 311 struct class *class; 312 313 class = container_of(kobj, struct class, kobj); 314 if (class->class_release) 315 class->class_release(class); 316 } 317 318 static const struct sysfs_ops linux_class_sysfs = { 319 .show = linux_class_show, 320 .store = linux_class_store, 321 }; 322 323 const struct kobj_type linux_class_ktype = { 324 .release = linux_class_release, 325 .sysfs_ops = &linux_class_sysfs 326 }; 327 328 static void 329 linux_dev_release(struct kobject *kobj) 330 { 331 struct device *dev; 332 333 dev = container_of(kobj, struct device, kobj); 334 /* This is the precedence defined by linux. */ 335 if (dev->release) 336 dev->release(dev); 337 else if (dev->class && dev->class->dev_release) 338 dev->class->dev_release(dev); 339 } 340 341 static ssize_t 342 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 343 { 344 struct device_attribute *dattr; 345 ssize_t error; 346 347 dattr = container_of(attr, struct device_attribute, attr); 348 error = -EIO; 349 if (dattr->show) 350 error = dattr->show(container_of(kobj, struct device, kobj), 351 dattr, buf); 352 return (error); 353 } 354 355 static ssize_t 356 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 357 size_t count) 358 { 359 struct device_attribute *dattr; 360 ssize_t error; 361 362 dattr = container_of(attr, struct device_attribute, attr); 363 error = -EIO; 364 if (dattr->store) 365 error = dattr->store(container_of(kobj, struct device, kobj), 366 dattr, buf, count); 367 return (error); 368 } 369 370 static const struct sysfs_ops linux_dev_sysfs = { 371 .show = linux_dev_show, 372 .store = linux_dev_store, 373 }; 374 375 const struct kobj_type linux_dev_ktype = { 376 .release = linux_dev_release, 377 .sysfs_ops = &linux_dev_sysfs 378 }; 379 380 struct device * 381 device_create(struct class *class, struct device *parent, dev_t devt, 382 void *drvdata, const char *fmt, ...) 383 { 384 struct device *dev; 385 va_list args; 386 387 dev = kzalloc(sizeof(*dev), M_WAITOK); 388 dev->parent = parent; 389 dev->class = class; 390 dev->devt = devt; 391 dev->driver_data = drvdata; 392 dev->release = linux_device_release; 393 va_start(args, fmt); 394 kobject_set_name_vargs(&dev->kobj, fmt, args); 395 va_end(args); 396 device_register(dev); 397 398 return (dev); 399 } 400 401 int 402 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 403 struct kobject *parent, const char *fmt, ...) 404 { 405 va_list args; 406 int error; 407 408 kobject_init(kobj, ktype); 409 kobj->ktype = ktype; 410 kobj->parent = parent; 411 kobj->name = NULL; 412 413 va_start(args, fmt); 414 error = kobject_set_name_vargs(kobj, fmt, args); 415 va_end(args); 416 if (error) 417 return (error); 418 return kobject_add_complete(kobj, parent); 419 } 420 421 static void 422 linux_kq_lock(void *arg) 423 { 424 spinlock_t *s = arg; 425 426 spin_lock(s); 427 } 428 static void 429 linux_kq_unlock(void *arg) 430 { 431 spinlock_t *s = arg; 432 433 spin_unlock(s); 434 } 435 436 static void 437 linux_kq_assert_lock(void *arg, int what) 438 { 439 #ifdef INVARIANTS 440 spinlock_t *s = arg; 441 442 if (what == LA_LOCKED) 443 mtx_assert(&s->m, MA_OWNED); 444 else 445 mtx_assert(&s->m, MA_NOTOWNED); 446 #endif 447 } 448 449 static void 450 linux_file_kqfilter_poll(struct linux_file *, int); 451 452 struct linux_file * 453 linux_file_alloc(void) 454 { 455 struct linux_file *filp; 456 457 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 458 459 /* set initial refcount */ 460 filp->f_count = 1; 461 462 /* setup fields needed by kqueue support */ 463 spin_lock_init(&filp->f_kqlock); 464 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 465 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 466 467 return (filp); 468 } 469 470 void 471 linux_file_free(struct linux_file *filp) 472 { 473 if (filp->_file == NULL) { 474 if (filp->f_shmem != NULL) 475 vm_object_deallocate(filp->f_shmem); 476 kfree_rcu(filp, rcu); 477 } else { 478 /* 479 * The close method of the character device or file 480 * will free the linux_file structure: 481 */ 482 _fdrop(filp->_file, curthread); 483 } 484 } 485 486 static int 487 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 488 vm_page_t *mres) 489 { 490 struct vm_area_struct *vmap; 491 492 vmap = linux_cdev_handle_find(vm_obj->handle); 493 494 MPASS(vmap != NULL); 495 MPASS(vmap->vm_private_data == vm_obj->handle); 496 497 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 498 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 499 vm_page_t page; 500 501 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 502 /* 503 * If the passed in result page is a fake 504 * page, update it with the new physical 505 * address. 506 */ 507 page = *mres; 508 vm_page_updatefake(page, paddr, vm_obj->memattr); 509 } else { 510 /* 511 * Replace the passed in "mres" page with our 512 * own fake page and free up the all of the 513 * original pages. 514 */ 515 VM_OBJECT_WUNLOCK(vm_obj); 516 page = vm_page_getfake(paddr, vm_obj->memattr); 517 VM_OBJECT_WLOCK(vm_obj); 518 519 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 520 *mres = page; 521 } 522 vm_page_valid(page); 523 return (VM_PAGER_OK); 524 } 525 return (VM_PAGER_FAIL); 526 } 527 528 static int 529 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 530 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 531 { 532 struct vm_area_struct *vmap; 533 int err; 534 535 /* get VM area structure */ 536 vmap = linux_cdev_handle_find(vm_obj->handle); 537 MPASS(vmap != NULL); 538 MPASS(vmap->vm_private_data == vm_obj->handle); 539 540 VM_OBJECT_WUNLOCK(vm_obj); 541 542 linux_set_current(curthread); 543 544 down_write(&vmap->vm_mm->mmap_sem); 545 if (unlikely(vmap->vm_ops == NULL)) { 546 err = VM_FAULT_SIGBUS; 547 } else { 548 struct vm_fault vmf; 549 550 /* fill out VM fault structure */ 551 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 552 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 553 vmf.pgoff = 0; 554 vmf.page = NULL; 555 vmf.vma = vmap; 556 557 vmap->vm_pfn_count = 0; 558 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 559 vmap->vm_obj = vm_obj; 560 561 err = vmap->vm_ops->fault(vmap, &vmf); 562 563 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 564 kern_yield(PRI_USER); 565 err = vmap->vm_ops->fault(vmap, &vmf); 566 } 567 } 568 569 /* translate return code */ 570 switch (err) { 571 case VM_FAULT_OOM: 572 err = VM_PAGER_AGAIN; 573 break; 574 case VM_FAULT_SIGBUS: 575 err = VM_PAGER_BAD; 576 break; 577 case VM_FAULT_NOPAGE: 578 /* 579 * By contract the fault handler will return having 580 * busied all the pages itself. If pidx is already 581 * found in the object, it will simply xbusy the first 582 * page and return with vm_pfn_count set to 1. 583 */ 584 *first = vmap->vm_pfn_first; 585 *last = *first + vmap->vm_pfn_count - 1; 586 err = VM_PAGER_OK; 587 break; 588 default: 589 err = VM_PAGER_ERROR; 590 break; 591 } 592 up_write(&vmap->vm_mm->mmap_sem); 593 VM_OBJECT_WLOCK(vm_obj); 594 return (err); 595 } 596 597 static struct rwlock linux_vma_lock; 598 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 599 TAILQ_HEAD_INITIALIZER(linux_vma_head); 600 601 static void 602 linux_cdev_handle_free(struct vm_area_struct *vmap) 603 { 604 /* Drop reference on vm_file */ 605 if (vmap->vm_file != NULL) 606 fput(vmap->vm_file); 607 608 /* Drop reference on mm_struct */ 609 mmput(vmap->vm_mm); 610 611 kfree(vmap); 612 } 613 614 static void 615 linux_cdev_handle_remove(struct vm_area_struct *vmap) 616 { 617 rw_wlock(&linux_vma_lock); 618 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 619 rw_wunlock(&linux_vma_lock); 620 } 621 622 static struct vm_area_struct * 623 linux_cdev_handle_find(void *handle) 624 { 625 struct vm_area_struct *vmap; 626 627 rw_rlock(&linux_vma_lock); 628 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 629 if (vmap->vm_private_data == handle) 630 break; 631 } 632 rw_runlock(&linux_vma_lock); 633 return (vmap); 634 } 635 636 static int 637 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 638 vm_ooffset_t foff, struct ucred *cred, u_short *color) 639 { 640 641 MPASS(linux_cdev_handle_find(handle) != NULL); 642 *color = 0; 643 return (0); 644 } 645 646 static void 647 linux_cdev_pager_dtor(void *handle) 648 { 649 const struct vm_operations_struct *vm_ops; 650 struct vm_area_struct *vmap; 651 652 vmap = linux_cdev_handle_find(handle); 653 MPASS(vmap != NULL); 654 655 /* 656 * Remove handle before calling close operation to prevent 657 * other threads from reusing the handle pointer. 658 */ 659 linux_cdev_handle_remove(vmap); 660 661 down_write(&vmap->vm_mm->mmap_sem); 662 vm_ops = vmap->vm_ops; 663 if (likely(vm_ops != NULL)) 664 vm_ops->close(vmap); 665 up_write(&vmap->vm_mm->mmap_sem); 666 667 linux_cdev_handle_free(vmap); 668 } 669 670 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 671 { 672 /* OBJT_MGTDEVICE */ 673 .cdev_pg_populate = linux_cdev_pager_populate, 674 .cdev_pg_ctor = linux_cdev_pager_ctor, 675 .cdev_pg_dtor = linux_cdev_pager_dtor 676 }, 677 { 678 /* OBJT_DEVICE */ 679 .cdev_pg_fault = linux_cdev_pager_fault, 680 .cdev_pg_ctor = linux_cdev_pager_ctor, 681 .cdev_pg_dtor = linux_cdev_pager_dtor 682 }, 683 }; 684 685 int 686 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 687 unsigned long size) 688 { 689 vm_object_t obj; 690 vm_page_t m; 691 692 obj = vma->vm_obj; 693 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 694 return (-ENOTSUP); 695 VM_OBJECT_RLOCK(obj); 696 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 697 m != NULL && m->pindex < OFF_TO_IDX(address + size); 698 m = TAILQ_NEXT(m, listq)) 699 pmap_remove_all(m); 700 VM_OBJECT_RUNLOCK(obj); 701 return (0); 702 } 703 704 static struct file_operations dummy_ldev_ops = { 705 /* XXXKIB */ 706 }; 707 708 static struct linux_cdev dummy_ldev = { 709 .ops = &dummy_ldev_ops, 710 }; 711 712 #define LDEV_SI_DTR 0x0001 713 #define LDEV_SI_REF 0x0002 714 715 static void 716 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 717 struct linux_cdev **dev) 718 { 719 struct linux_cdev *ldev; 720 u_int siref; 721 722 ldev = filp->f_cdev; 723 *fop = filp->f_op; 724 if (ldev != NULL) { 725 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 726 refcount_acquire(&ldev->refs); 727 } else { 728 for (siref = ldev->siref;;) { 729 if ((siref & LDEV_SI_DTR) != 0) { 730 ldev = &dummy_ldev; 731 *fop = ldev->ops; 732 siref = ldev->siref; 733 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 734 } else if (atomic_fcmpset_int(&ldev->siref, 735 &siref, siref + LDEV_SI_REF)) { 736 break; 737 } 738 } 739 } 740 } 741 *dev = ldev; 742 } 743 744 static void 745 linux_drop_fop(struct linux_cdev *ldev) 746 { 747 748 if (ldev == NULL) 749 return; 750 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 751 linux_cdev_deref(ldev); 752 } else { 753 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 754 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 755 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 756 } 757 } 758 759 #define OPW(fp,td,code) ({ \ 760 struct file *__fpop; \ 761 __typeof(code) __retval; \ 762 \ 763 __fpop = (td)->td_fpop; \ 764 (td)->td_fpop = (fp); \ 765 __retval = (code); \ 766 (td)->td_fpop = __fpop; \ 767 __retval; \ 768 }) 769 770 static int 771 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 772 struct file *file) 773 { 774 struct linux_cdev *ldev; 775 struct linux_file *filp; 776 const struct file_operations *fop; 777 int error; 778 779 ldev = dev->si_drv1; 780 781 filp = linux_file_alloc(); 782 filp->f_dentry = &filp->f_dentry_store; 783 filp->f_op = ldev->ops; 784 filp->f_mode = file->f_flag; 785 filp->f_flags = file->f_flag; 786 filp->f_vnode = file->f_vnode; 787 filp->_file = file; 788 refcount_acquire(&ldev->refs); 789 filp->f_cdev = ldev; 790 791 linux_set_current(td); 792 linux_get_fop(filp, &fop, &ldev); 793 794 if (fop->open != NULL) { 795 error = -fop->open(file->f_vnode, filp); 796 if (error != 0) { 797 linux_drop_fop(ldev); 798 linux_cdev_deref(filp->f_cdev); 799 kfree(filp); 800 return (error); 801 } 802 } 803 804 /* hold on to the vnode - used for fstat() */ 805 vhold(filp->f_vnode); 806 807 /* release the file from devfs */ 808 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 809 linux_drop_fop(ldev); 810 return (ENXIO); 811 } 812 813 #define LINUX_IOCTL_MIN_PTR 0x10000UL 814 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 815 816 static inline int 817 linux_remap_address(void **uaddr, size_t len) 818 { 819 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 820 821 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 822 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 823 struct task_struct *pts = current; 824 if (pts == NULL) { 825 *uaddr = NULL; 826 return (1); 827 } 828 829 /* compute data offset */ 830 uaddr_val -= LINUX_IOCTL_MIN_PTR; 831 832 /* check that length is within bounds */ 833 if ((len > IOCPARM_MAX) || 834 (uaddr_val + len) > pts->bsd_ioctl_len) { 835 *uaddr = NULL; 836 return (1); 837 } 838 839 /* re-add kernel buffer address */ 840 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 841 842 /* update address location */ 843 *uaddr = (void *)uaddr_val; 844 return (1); 845 } 846 return (0); 847 } 848 849 int 850 linux_copyin(const void *uaddr, void *kaddr, size_t len) 851 { 852 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 853 if (uaddr == NULL) 854 return (-EFAULT); 855 memcpy(kaddr, uaddr, len); 856 return (0); 857 } 858 return (-copyin(uaddr, kaddr, len)); 859 } 860 861 int 862 linux_copyout(const void *kaddr, void *uaddr, size_t len) 863 { 864 if (linux_remap_address(&uaddr, len)) { 865 if (uaddr == NULL) 866 return (-EFAULT); 867 memcpy(uaddr, kaddr, len); 868 return (0); 869 } 870 return (-copyout(kaddr, uaddr, len)); 871 } 872 873 size_t 874 linux_clear_user(void *_uaddr, size_t _len) 875 { 876 uint8_t *uaddr = _uaddr; 877 size_t len = _len; 878 879 /* make sure uaddr is aligned before going into the fast loop */ 880 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 881 if (subyte(uaddr, 0)) 882 return (_len); 883 uaddr++; 884 len--; 885 } 886 887 /* zero 8 bytes at a time */ 888 while (len > 7) { 889 #ifdef __LP64__ 890 if (suword64(uaddr, 0)) 891 return (_len); 892 #else 893 if (suword32(uaddr, 0)) 894 return (_len); 895 if (suword32(uaddr + 4, 0)) 896 return (_len); 897 #endif 898 uaddr += 8; 899 len -= 8; 900 } 901 902 /* zero fill end, if any */ 903 while (len > 0) { 904 if (subyte(uaddr, 0)) 905 return (_len); 906 uaddr++; 907 len--; 908 } 909 return (0); 910 } 911 912 int 913 linux_access_ok(const void *uaddr, size_t len) 914 { 915 uintptr_t saddr; 916 uintptr_t eaddr; 917 918 /* get start and end address */ 919 saddr = (uintptr_t)uaddr; 920 eaddr = (uintptr_t)uaddr + len; 921 922 /* verify addresses are valid for userspace */ 923 return ((saddr == eaddr) || 924 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 925 } 926 927 /* 928 * This function should return either EINTR or ERESTART depending on 929 * the signal type sent to this thread: 930 */ 931 static int 932 linux_get_error(struct task_struct *task, int error) 933 { 934 /* check for signal type interrupt code */ 935 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 936 error = -linux_schedule_get_interrupt_value(task); 937 if (error == 0) 938 error = EINTR; 939 } 940 return (error); 941 } 942 943 static int 944 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 945 const struct file_operations *fop, u_long cmd, caddr_t data, 946 struct thread *td) 947 { 948 struct task_struct *task = current; 949 unsigned size; 950 int error; 951 952 size = IOCPARM_LEN(cmd); 953 /* refer to logic in sys_ioctl() */ 954 if (size > 0) { 955 /* 956 * Setup hint for linux_copyin() and linux_copyout(). 957 * 958 * Background: Linux code expects a user-space address 959 * while FreeBSD supplies a kernel-space address. 960 */ 961 task->bsd_ioctl_data = data; 962 task->bsd_ioctl_len = size; 963 data = (void *)LINUX_IOCTL_MIN_PTR; 964 } else { 965 /* fetch user-space pointer */ 966 data = *(void **)data; 967 } 968 #ifdef COMPAT_FREEBSD32 969 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 970 /* try the compat IOCTL handler first */ 971 if (fop->compat_ioctl != NULL) { 972 error = -OPW(fp, td, fop->compat_ioctl(filp, 973 cmd, (u_long)data)); 974 } else { 975 error = ENOTTY; 976 } 977 978 /* fallback to the regular IOCTL handler, if any */ 979 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 980 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 981 cmd, (u_long)data)); 982 } 983 } else 984 #endif 985 { 986 if (fop->unlocked_ioctl != NULL) { 987 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 988 cmd, (u_long)data)); 989 } else { 990 error = ENOTTY; 991 } 992 } 993 if (size > 0) { 994 task->bsd_ioctl_data = NULL; 995 task->bsd_ioctl_len = 0; 996 } 997 998 if (error == EWOULDBLOCK) { 999 /* update kqfilter status, if any */ 1000 linux_file_kqfilter_poll(filp, 1001 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1002 } else { 1003 error = linux_get_error(task, error); 1004 } 1005 return (error); 1006 } 1007 1008 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1009 1010 /* 1011 * This function atomically updates the poll wakeup state and returns 1012 * the previous state at the time of update. 1013 */ 1014 static uint8_t 1015 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1016 { 1017 int c, old; 1018 1019 c = v->counter; 1020 1021 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1022 c = old; 1023 1024 return (c); 1025 } 1026 1027 static int 1028 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1029 { 1030 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1031 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1032 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1033 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1034 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1035 }; 1036 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1037 1038 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1039 case LINUX_FWQ_STATE_QUEUED: 1040 linux_poll_wakeup(filp); 1041 return (1); 1042 default: 1043 return (0); 1044 } 1045 } 1046 1047 void 1048 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1049 { 1050 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1051 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1052 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1053 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1054 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1055 }; 1056 1057 /* check if we are called inside the select system call */ 1058 if (p == LINUX_POLL_TABLE_NORMAL) 1059 selrecord(curthread, &filp->f_selinfo); 1060 1061 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1062 case LINUX_FWQ_STATE_INIT: 1063 /* NOTE: file handles can only belong to one wait-queue */ 1064 filp->f_wait_queue.wqh = wqh; 1065 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1066 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1067 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1068 break; 1069 default: 1070 break; 1071 } 1072 } 1073 1074 static void 1075 linux_poll_wait_dequeue(struct linux_file *filp) 1076 { 1077 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1078 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1079 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1080 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1081 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1082 }; 1083 1084 seldrain(&filp->f_selinfo); 1085 1086 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1087 case LINUX_FWQ_STATE_NOT_READY: 1088 case LINUX_FWQ_STATE_QUEUED: 1089 case LINUX_FWQ_STATE_READY: 1090 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1091 break; 1092 default: 1093 break; 1094 } 1095 } 1096 1097 void 1098 linux_poll_wakeup(struct linux_file *filp) 1099 { 1100 /* this function should be NULL-safe */ 1101 if (filp == NULL) 1102 return; 1103 1104 selwakeup(&filp->f_selinfo); 1105 1106 spin_lock(&filp->f_kqlock); 1107 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1108 LINUX_KQ_FLAG_NEED_WRITE; 1109 1110 /* make sure the "knote" gets woken up */ 1111 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1112 spin_unlock(&filp->f_kqlock); 1113 } 1114 1115 static void 1116 linux_file_kqfilter_detach(struct knote *kn) 1117 { 1118 struct linux_file *filp = kn->kn_hook; 1119 1120 spin_lock(&filp->f_kqlock); 1121 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1122 spin_unlock(&filp->f_kqlock); 1123 } 1124 1125 static int 1126 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1127 { 1128 struct linux_file *filp = kn->kn_hook; 1129 1130 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1131 1132 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1133 } 1134 1135 static int 1136 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1137 { 1138 struct linux_file *filp = kn->kn_hook; 1139 1140 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1141 1142 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1143 } 1144 1145 static struct filterops linux_dev_kqfiltops_read = { 1146 .f_isfd = 1, 1147 .f_detach = linux_file_kqfilter_detach, 1148 .f_event = linux_file_kqfilter_read_event, 1149 }; 1150 1151 static struct filterops linux_dev_kqfiltops_write = { 1152 .f_isfd = 1, 1153 .f_detach = linux_file_kqfilter_detach, 1154 .f_event = linux_file_kqfilter_write_event, 1155 }; 1156 1157 static void 1158 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1159 { 1160 struct thread *td; 1161 const struct file_operations *fop; 1162 struct linux_cdev *ldev; 1163 int temp; 1164 1165 if ((filp->f_kqflags & kqflags) == 0) 1166 return; 1167 1168 td = curthread; 1169 1170 linux_get_fop(filp, &fop, &ldev); 1171 /* get the latest polling state */ 1172 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1173 linux_drop_fop(ldev); 1174 1175 spin_lock(&filp->f_kqlock); 1176 /* clear kqflags */ 1177 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1178 LINUX_KQ_FLAG_NEED_WRITE); 1179 /* update kqflags */ 1180 if ((temp & (POLLIN | POLLOUT)) != 0) { 1181 if ((temp & POLLIN) != 0) 1182 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1183 if ((temp & POLLOUT) != 0) 1184 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1185 1186 /* make sure the "knote" gets woken up */ 1187 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1188 } 1189 spin_unlock(&filp->f_kqlock); 1190 } 1191 1192 static int 1193 linux_file_kqfilter(struct file *file, struct knote *kn) 1194 { 1195 struct linux_file *filp; 1196 struct thread *td; 1197 int error; 1198 1199 td = curthread; 1200 filp = (struct linux_file *)file->f_data; 1201 filp->f_flags = file->f_flag; 1202 if (filp->f_op->poll == NULL) 1203 return (EINVAL); 1204 1205 spin_lock(&filp->f_kqlock); 1206 switch (kn->kn_filter) { 1207 case EVFILT_READ: 1208 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1209 kn->kn_fop = &linux_dev_kqfiltops_read; 1210 kn->kn_hook = filp; 1211 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1212 error = 0; 1213 break; 1214 case EVFILT_WRITE: 1215 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1216 kn->kn_fop = &linux_dev_kqfiltops_write; 1217 kn->kn_hook = filp; 1218 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1219 error = 0; 1220 break; 1221 default: 1222 error = EINVAL; 1223 break; 1224 } 1225 spin_unlock(&filp->f_kqlock); 1226 1227 if (error == 0) { 1228 linux_set_current(td); 1229 1230 /* update kqfilter status, if any */ 1231 linux_file_kqfilter_poll(filp, 1232 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1233 } 1234 return (error); 1235 } 1236 1237 static int 1238 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1239 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1240 int nprot, bool is_shared, struct thread *td) 1241 { 1242 struct task_struct *task; 1243 struct vm_area_struct *vmap; 1244 struct mm_struct *mm; 1245 struct linux_file *filp; 1246 vm_memattr_t attr; 1247 int error; 1248 1249 filp = (struct linux_file *)fp->f_data; 1250 filp->f_flags = fp->f_flag; 1251 1252 if (fop->mmap == NULL) 1253 return (EOPNOTSUPP); 1254 1255 linux_set_current(td); 1256 1257 /* 1258 * The same VM object might be shared by multiple processes 1259 * and the mm_struct is usually freed when a process exits. 1260 * 1261 * The atomic reference below makes sure the mm_struct is 1262 * available as long as the vmap is in the linux_vma_head. 1263 */ 1264 task = current; 1265 mm = task->mm; 1266 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1267 return (EINVAL); 1268 1269 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1270 vmap->vm_start = 0; 1271 vmap->vm_end = size; 1272 vmap->vm_pgoff = *offset / PAGE_SIZE; 1273 vmap->vm_pfn = 0; 1274 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1275 if (is_shared) 1276 vmap->vm_flags |= VM_SHARED; 1277 vmap->vm_ops = NULL; 1278 vmap->vm_file = get_file(filp); 1279 vmap->vm_mm = mm; 1280 1281 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1282 error = linux_get_error(task, EINTR); 1283 } else { 1284 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1285 error = linux_get_error(task, error); 1286 up_write(&vmap->vm_mm->mmap_sem); 1287 } 1288 1289 if (error != 0) { 1290 linux_cdev_handle_free(vmap); 1291 return (error); 1292 } 1293 1294 attr = pgprot2cachemode(vmap->vm_page_prot); 1295 1296 if (vmap->vm_ops != NULL) { 1297 struct vm_area_struct *ptr; 1298 void *vm_private_data; 1299 bool vm_no_fault; 1300 1301 if (vmap->vm_ops->open == NULL || 1302 vmap->vm_ops->close == NULL || 1303 vmap->vm_private_data == NULL) { 1304 /* free allocated VM area struct */ 1305 linux_cdev_handle_free(vmap); 1306 return (EINVAL); 1307 } 1308 1309 vm_private_data = vmap->vm_private_data; 1310 1311 rw_wlock(&linux_vma_lock); 1312 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1313 if (ptr->vm_private_data == vm_private_data) 1314 break; 1315 } 1316 /* check if there is an existing VM area struct */ 1317 if (ptr != NULL) { 1318 /* check if the VM area structure is invalid */ 1319 if (ptr->vm_ops == NULL || 1320 ptr->vm_ops->open == NULL || 1321 ptr->vm_ops->close == NULL) { 1322 error = ESTALE; 1323 vm_no_fault = 1; 1324 } else { 1325 error = EEXIST; 1326 vm_no_fault = (ptr->vm_ops->fault == NULL); 1327 } 1328 } else { 1329 /* insert VM area structure into list */ 1330 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1331 error = 0; 1332 vm_no_fault = (vmap->vm_ops->fault == NULL); 1333 } 1334 rw_wunlock(&linux_vma_lock); 1335 1336 if (error != 0) { 1337 /* free allocated VM area struct */ 1338 linux_cdev_handle_free(vmap); 1339 /* check for stale VM area struct */ 1340 if (error != EEXIST) 1341 return (error); 1342 } 1343 1344 /* check if there is no fault handler */ 1345 if (vm_no_fault) { 1346 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1347 &linux_cdev_pager_ops[1], size, nprot, *offset, 1348 td->td_ucred); 1349 } else { 1350 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1351 &linux_cdev_pager_ops[0], size, nprot, *offset, 1352 td->td_ucred); 1353 } 1354 1355 /* check if allocating the VM object failed */ 1356 if (*object == NULL) { 1357 if (error == 0) { 1358 /* remove VM area struct from list */ 1359 linux_cdev_handle_remove(vmap); 1360 /* free allocated VM area struct */ 1361 linux_cdev_handle_free(vmap); 1362 } 1363 return (EINVAL); 1364 } 1365 } else { 1366 struct sglist *sg; 1367 1368 sg = sglist_alloc(1, M_WAITOK); 1369 sglist_append_phys(sg, 1370 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1371 1372 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1373 nprot, 0, td->td_ucred); 1374 1375 linux_cdev_handle_free(vmap); 1376 1377 if (*object == NULL) { 1378 sglist_free(sg); 1379 return (EINVAL); 1380 } 1381 } 1382 1383 if (attr != VM_MEMATTR_DEFAULT) { 1384 VM_OBJECT_WLOCK(*object); 1385 vm_object_set_memattr(*object, attr); 1386 VM_OBJECT_WUNLOCK(*object); 1387 } 1388 *offset = 0; 1389 return (0); 1390 } 1391 1392 struct cdevsw linuxcdevsw = { 1393 .d_version = D_VERSION, 1394 .d_fdopen = linux_dev_fdopen, 1395 .d_name = "lkpidev", 1396 }; 1397 1398 static int 1399 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1400 int flags, struct thread *td) 1401 { 1402 struct linux_file *filp; 1403 const struct file_operations *fop; 1404 struct linux_cdev *ldev; 1405 ssize_t bytes; 1406 int error; 1407 1408 error = 0; 1409 filp = (struct linux_file *)file->f_data; 1410 filp->f_flags = file->f_flag; 1411 /* XXX no support for I/O vectors currently */ 1412 if (uio->uio_iovcnt != 1) 1413 return (EOPNOTSUPP); 1414 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1415 return (EINVAL); 1416 linux_set_current(td); 1417 linux_get_fop(filp, &fop, &ldev); 1418 if (fop->read != NULL) { 1419 bytes = OPW(file, td, fop->read(filp, 1420 uio->uio_iov->iov_base, 1421 uio->uio_iov->iov_len, &uio->uio_offset)); 1422 if (bytes >= 0) { 1423 uio->uio_iov->iov_base = 1424 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1425 uio->uio_iov->iov_len -= bytes; 1426 uio->uio_resid -= bytes; 1427 } else { 1428 error = linux_get_error(current, -bytes); 1429 } 1430 } else 1431 error = ENXIO; 1432 1433 /* update kqfilter status, if any */ 1434 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1435 linux_drop_fop(ldev); 1436 1437 return (error); 1438 } 1439 1440 static int 1441 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1442 int flags, struct thread *td) 1443 { 1444 struct linux_file *filp; 1445 const struct file_operations *fop; 1446 struct linux_cdev *ldev; 1447 ssize_t bytes; 1448 int error; 1449 1450 filp = (struct linux_file *)file->f_data; 1451 filp->f_flags = file->f_flag; 1452 /* XXX no support for I/O vectors currently */ 1453 if (uio->uio_iovcnt != 1) 1454 return (EOPNOTSUPP); 1455 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1456 return (EINVAL); 1457 linux_set_current(td); 1458 linux_get_fop(filp, &fop, &ldev); 1459 if (fop->write != NULL) { 1460 bytes = OPW(file, td, fop->write(filp, 1461 uio->uio_iov->iov_base, 1462 uio->uio_iov->iov_len, &uio->uio_offset)); 1463 if (bytes >= 0) { 1464 uio->uio_iov->iov_base = 1465 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1466 uio->uio_iov->iov_len -= bytes; 1467 uio->uio_resid -= bytes; 1468 error = 0; 1469 } else { 1470 error = linux_get_error(current, -bytes); 1471 } 1472 } else 1473 error = ENXIO; 1474 1475 /* update kqfilter status, if any */ 1476 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1477 1478 linux_drop_fop(ldev); 1479 1480 return (error); 1481 } 1482 1483 static int 1484 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1485 struct thread *td) 1486 { 1487 struct linux_file *filp; 1488 const struct file_operations *fop; 1489 struct linux_cdev *ldev; 1490 int revents; 1491 1492 filp = (struct linux_file *)file->f_data; 1493 filp->f_flags = file->f_flag; 1494 linux_set_current(td); 1495 linux_get_fop(filp, &fop, &ldev); 1496 if (fop->poll != NULL) { 1497 revents = OPW(file, td, fop->poll(filp, 1498 LINUX_POLL_TABLE_NORMAL)) & events; 1499 } else { 1500 revents = 0; 1501 } 1502 linux_drop_fop(ldev); 1503 return (revents); 1504 } 1505 1506 static int 1507 linux_file_close(struct file *file, struct thread *td) 1508 { 1509 struct linux_file *filp; 1510 int (*release)(struct inode *, struct linux_file *); 1511 const struct file_operations *fop; 1512 struct linux_cdev *ldev; 1513 int error; 1514 1515 filp = (struct linux_file *)file->f_data; 1516 1517 KASSERT(file_count(filp) == 0, 1518 ("File refcount(%d) is not zero", file_count(filp))); 1519 1520 if (td == NULL) 1521 td = curthread; 1522 1523 error = 0; 1524 filp->f_flags = file->f_flag; 1525 linux_set_current(td); 1526 linux_poll_wait_dequeue(filp); 1527 linux_get_fop(filp, &fop, &ldev); 1528 /* 1529 * Always use the real release function, if any, to avoid 1530 * leaking device resources: 1531 */ 1532 release = filp->f_op->release; 1533 if (release != NULL) 1534 error = -OPW(file, td, release(filp->f_vnode, filp)); 1535 funsetown(&filp->f_sigio); 1536 if (filp->f_vnode != NULL) 1537 vdrop(filp->f_vnode); 1538 linux_drop_fop(ldev); 1539 ldev = filp->f_cdev; 1540 if (ldev != NULL) 1541 linux_cdev_deref(ldev); 1542 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1543 kfree(filp); 1544 1545 return (error); 1546 } 1547 1548 static int 1549 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1550 struct thread *td) 1551 { 1552 struct linux_file *filp; 1553 const struct file_operations *fop; 1554 struct linux_cdev *ldev; 1555 struct fiodgname_arg *fgn; 1556 const char *p; 1557 int error, i; 1558 1559 error = 0; 1560 filp = (struct linux_file *)fp->f_data; 1561 filp->f_flags = fp->f_flag; 1562 linux_get_fop(filp, &fop, &ldev); 1563 1564 linux_set_current(td); 1565 switch (cmd) { 1566 case FIONBIO: 1567 break; 1568 case FIOASYNC: 1569 if (fop->fasync == NULL) 1570 break; 1571 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1572 break; 1573 case FIOSETOWN: 1574 error = fsetown(*(int *)data, &filp->f_sigio); 1575 if (error == 0) { 1576 if (fop->fasync == NULL) 1577 break; 1578 error = -OPW(fp, td, fop->fasync(0, filp, 1579 fp->f_flag & FASYNC)); 1580 } 1581 break; 1582 case FIOGETOWN: 1583 *(int *)data = fgetown(&filp->f_sigio); 1584 break; 1585 case FIODGNAME: 1586 #ifdef COMPAT_FREEBSD32 1587 case FIODGNAME_32: 1588 #endif 1589 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1590 error = ENXIO; 1591 break; 1592 } 1593 fgn = data; 1594 p = devtoname(filp->f_cdev->cdev); 1595 i = strlen(p) + 1; 1596 if (i > fgn->len) { 1597 error = EINVAL; 1598 break; 1599 } 1600 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1601 break; 1602 default: 1603 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1604 break; 1605 } 1606 linux_drop_fop(ldev); 1607 return (error); 1608 } 1609 1610 static int 1611 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1612 vm_prot_t maxprot, int flags, struct file *fp, 1613 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1614 { 1615 /* 1616 * Character devices do not provide private mappings 1617 * of any kind: 1618 */ 1619 if ((maxprot & VM_PROT_WRITE) == 0 && 1620 (prot & VM_PROT_WRITE) != 0) 1621 return (EACCES); 1622 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1623 return (EINVAL); 1624 1625 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1626 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1627 } 1628 1629 static int 1630 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1631 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1632 struct thread *td) 1633 { 1634 struct linux_file *filp; 1635 const struct file_operations *fop; 1636 struct linux_cdev *ldev; 1637 struct mount *mp; 1638 struct vnode *vp; 1639 vm_object_t object; 1640 vm_prot_t maxprot; 1641 int error; 1642 1643 filp = (struct linux_file *)fp->f_data; 1644 1645 vp = filp->f_vnode; 1646 if (vp == NULL) 1647 return (EOPNOTSUPP); 1648 1649 /* 1650 * Ensure that file and memory protections are 1651 * compatible. 1652 */ 1653 mp = vp->v_mount; 1654 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1655 maxprot = VM_PROT_NONE; 1656 if ((prot & VM_PROT_EXECUTE) != 0) 1657 return (EACCES); 1658 } else 1659 maxprot = VM_PROT_EXECUTE; 1660 if ((fp->f_flag & FREAD) != 0) 1661 maxprot |= VM_PROT_READ; 1662 else if ((prot & VM_PROT_READ) != 0) 1663 return (EACCES); 1664 1665 /* 1666 * If we are sharing potential changes via MAP_SHARED and we 1667 * are trying to get write permission although we opened it 1668 * without asking for it, bail out. 1669 * 1670 * Note that most character devices always share mappings. 1671 * 1672 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1673 * requests rather than doing it here. 1674 */ 1675 if ((flags & MAP_SHARED) != 0) { 1676 if ((fp->f_flag & FWRITE) != 0) 1677 maxprot |= VM_PROT_WRITE; 1678 else if ((prot & VM_PROT_WRITE) != 0) 1679 return (EACCES); 1680 } 1681 maxprot &= cap_maxprot; 1682 1683 linux_get_fop(filp, &fop, &ldev); 1684 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1685 &foff, fop, &object); 1686 if (error != 0) 1687 goto out; 1688 1689 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1690 foff, FALSE, td); 1691 if (error != 0) 1692 vm_object_deallocate(object); 1693 out: 1694 linux_drop_fop(ldev); 1695 return (error); 1696 } 1697 1698 static int 1699 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1700 struct thread *td) 1701 { 1702 struct linux_file *filp; 1703 struct vnode *vp; 1704 int error; 1705 1706 filp = (struct linux_file *)fp->f_data; 1707 if (filp->f_vnode == NULL) 1708 return (EOPNOTSUPP); 1709 1710 vp = filp->f_vnode; 1711 1712 vn_lock(vp, LK_SHARED | LK_RETRY); 1713 error = VOP_STAT(vp, sb, td->td_ucred, NOCRED, td); 1714 VOP_UNLOCK(vp); 1715 1716 return (error); 1717 } 1718 1719 static int 1720 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1721 struct filedesc *fdp) 1722 { 1723 struct linux_file *filp; 1724 struct vnode *vp; 1725 int error; 1726 1727 filp = fp->f_data; 1728 vp = filp->f_vnode; 1729 if (vp == NULL) { 1730 error = 0; 1731 kif->kf_type = KF_TYPE_DEV; 1732 } else { 1733 vref(vp); 1734 FILEDESC_SUNLOCK(fdp); 1735 error = vn_fill_kinfo_vnode(vp, kif); 1736 vrele(vp); 1737 kif->kf_type = KF_TYPE_VNODE; 1738 FILEDESC_SLOCK(fdp); 1739 } 1740 return (error); 1741 } 1742 1743 unsigned int 1744 linux_iminor(struct inode *inode) 1745 { 1746 struct linux_cdev *ldev; 1747 1748 if (inode == NULL || inode->v_rdev == NULL || 1749 inode->v_rdev->si_devsw != &linuxcdevsw) 1750 return (-1U); 1751 ldev = inode->v_rdev->si_drv1; 1752 if (ldev == NULL) 1753 return (-1U); 1754 1755 return (minor(ldev->dev)); 1756 } 1757 1758 struct fileops linuxfileops = { 1759 .fo_read = linux_file_read, 1760 .fo_write = linux_file_write, 1761 .fo_truncate = invfo_truncate, 1762 .fo_kqfilter = linux_file_kqfilter, 1763 .fo_stat = linux_file_stat, 1764 .fo_fill_kinfo = linux_file_fill_kinfo, 1765 .fo_poll = linux_file_poll, 1766 .fo_close = linux_file_close, 1767 .fo_ioctl = linux_file_ioctl, 1768 .fo_mmap = linux_file_mmap, 1769 .fo_chmod = invfo_chmod, 1770 .fo_chown = invfo_chown, 1771 .fo_sendfile = invfo_sendfile, 1772 .fo_flags = DFLAG_PASSABLE, 1773 }; 1774 1775 /* 1776 * Hash of vmmap addresses. This is infrequently accessed and does not 1777 * need to be particularly large. This is done because we must store the 1778 * caller's idea of the map size to properly unmap. 1779 */ 1780 struct vmmap { 1781 LIST_ENTRY(vmmap) vm_next; 1782 void *vm_addr; 1783 unsigned long vm_size; 1784 }; 1785 1786 struct vmmaphd { 1787 struct vmmap *lh_first; 1788 }; 1789 #define VMMAP_HASH_SIZE 64 1790 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1791 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1792 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1793 static struct mtx vmmaplock; 1794 1795 static void 1796 vmmap_add(void *addr, unsigned long size) 1797 { 1798 struct vmmap *vmmap; 1799 1800 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1801 mtx_lock(&vmmaplock); 1802 vmmap->vm_size = size; 1803 vmmap->vm_addr = addr; 1804 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1805 mtx_unlock(&vmmaplock); 1806 } 1807 1808 static struct vmmap * 1809 vmmap_remove(void *addr) 1810 { 1811 struct vmmap *vmmap; 1812 1813 mtx_lock(&vmmaplock); 1814 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1815 if (vmmap->vm_addr == addr) 1816 break; 1817 if (vmmap) 1818 LIST_REMOVE(vmmap, vm_next); 1819 mtx_unlock(&vmmaplock); 1820 1821 return (vmmap); 1822 } 1823 1824 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1825 void * 1826 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1827 { 1828 void *addr; 1829 1830 addr = pmap_mapdev_attr(phys_addr, size, attr); 1831 if (addr == NULL) 1832 return (NULL); 1833 vmmap_add(addr, size); 1834 1835 return (addr); 1836 } 1837 #endif 1838 1839 void 1840 iounmap(void *addr) 1841 { 1842 struct vmmap *vmmap; 1843 1844 vmmap = vmmap_remove(addr); 1845 if (vmmap == NULL) 1846 return; 1847 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 1848 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1849 #endif 1850 kfree(vmmap); 1851 } 1852 1853 void * 1854 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1855 { 1856 vm_offset_t off; 1857 size_t size; 1858 1859 size = count * PAGE_SIZE; 1860 off = kva_alloc(size); 1861 if (off == 0) 1862 return (NULL); 1863 vmmap_add((void *)off, size); 1864 pmap_qenter(off, pages, count); 1865 1866 return ((void *)off); 1867 } 1868 1869 void 1870 vunmap(void *addr) 1871 { 1872 struct vmmap *vmmap; 1873 1874 vmmap = vmmap_remove(addr); 1875 if (vmmap == NULL) 1876 return; 1877 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1878 kva_free((vm_offset_t)addr, vmmap->vm_size); 1879 kfree(vmmap); 1880 } 1881 1882 static char * 1883 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1884 { 1885 unsigned int len; 1886 char *p; 1887 va_list aq; 1888 1889 va_copy(aq, ap); 1890 len = vsnprintf(NULL, 0, fmt, aq); 1891 va_end(aq); 1892 1893 if (dev != NULL) 1894 p = devm_kmalloc(dev, len + 1, gfp); 1895 else 1896 p = kmalloc(len + 1, gfp); 1897 if (p != NULL) 1898 vsnprintf(p, len + 1, fmt, ap); 1899 1900 return (p); 1901 } 1902 1903 char * 1904 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1905 { 1906 1907 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1908 } 1909 1910 char * 1911 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1912 { 1913 va_list ap; 1914 char *p; 1915 1916 va_start(ap, fmt); 1917 p = devm_kvasprintf(dev, gfp, fmt, ap); 1918 va_end(ap); 1919 1920 return (p); 1921 } 1922 1923 char * 1924 kasprintf(gfp_t gfp, const char *fmt, ...) 1925 { 1926 va_list ap; 1927 char *p; 1928 1929 va_start(ap, fmt); 1930 p = kvasprintf(gfp, fmt, ap); 1931 va_end(ap); 1932 1933 return (p); 1934 } 1935 1936 static void 1937 linux_timer_callback_wrapper(void *context) 1938 { 1939 struct timer_list *timer; 1940 1941 timer = context; 1942 1943 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1944 /* try again later */ 1945 callout_reset(&timer->callout, 1, 1946 &linux_timer_callback_wrapper, timer); 1947 return; 1948 } 1949 1950 timer->function(timer->data); 1951 } 1952 1953 int 1954 mod_timer(struct timer_list *timer, int expires) 1955 { 1956 int ret; 1957 1958 timer->expires = expires; 1959 ret = callout_reset(&timer->callout, 1960 linux_timer_jiffies_until(expires), 1961 &linux_timer_callback_wrapper, timer); 1962 1963 MPASS(ret == 0 || ret == 1); 1964 1965 return (ret == 1); 1966 } 1967 1968 void 1969 add_timer(struct timer_list *timer) 1970 { 1971 1972 callout_reset(&timer->callout, 1973 linux_timer_jiffies_until(timer->expires), 1974 &linux_timer_callback_wrapper, timer); 1975 } 1976 1977 void 1978 add_timer_on(struct timer_list *timer, int cpu) 1979 { 1980 1981 callout_reset_on(&timer->callout, 1982 linux_timer_jiffies_until(timer->expires), 1983 &linux_timer_callback_wrapper, timer, cpu); 1984 } 1985 1986 int 1987 del_timer(struct timer_list *timer) 1988 { 1989 1990 if (callout_stop(&(timer)->callout) == -1) 1991 return (0); 1992 return (1); 1993 } 1994 1995 int 1996 del_timer_sync(struct timer_list *timer) 1997 { 1998 1999 if (callout_drain(&(timer)->callout) == -1) 2000 return (0); 2001 return (1); 2002 } 2003 2004 /* greatest common divisor, Euclid equation */ 2005 static uint64_t 2006 lkpi_gcd_64(uint64_t a, uint64_t b) 2007 { 2008 uint64_t an; 2009 uint64_t bn; 2010 2011 while (b != 0) { 2012 an = b; 2013 bn = a % b; 2014 a = an; 2015 b = bn; 2016 } 2017 return (a); 2018 } 2019 2020 uint64_t lkpi_nsec2hz_rem; 2021 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2022 uint64_t lkpi_nsec2hz_max; 2023 2024 uint64_t lkpi_usec2hz_rem; 2025 uint64_t lkpi_usec2hz_div = 1000000ULL; 2026 uint64_t lkpi_usec2hz_max; 2027 2028 uint64_t lkpi_msec2hz_rem; 2029 uint64_t lkpi_msec2hz_div = 1000ULL; 2030 uint64_t lkpi_msec2hz_max; 2031 2032 static void 2033 linux_timer_init(void *arg) 2034 { 2035 uint64_t gcd; 2036 2037 /* 2038 * Compute an internal HZ value which can divide 2**32 to 2039 * avoid timer rounding problems when the tick value wraps 2040 * around 2**32: 2041 */ 2042 linux_timer_hz_mask = 1; 2043 while (linux_timer_hz_mask < (unsigned long)hz) 2044 linux_timer_hz_mask *= 2; 2045 linux_timer_hz_mask--; 2046 2047 /* compute some internal constants */ 2048 2049 lkpi_nsec2hz_rem = hz; 2050 lkpi_usec2hz_rem = hz; 2051 lkpi_msec2hz_rem = hz; 2052 2053 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2054 lkpi_nsec2hz_rem /= gcd; 2055 lkpi_nsec2hz_div /= gcd; 2056 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2057 2058 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2059 lkpi_usec2hz_rem /= gcd; 2060 lkpi_usec2hz_div /= gcd; 2061 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2062 2063 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2064 lkpi_msec2hz_rem /= gcd; 2065 lkpi_msec2hz_div /= gcd; 2066 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2067 } 2068 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2069 2070 void 2071 linux_complete_common(struct completion *c, int all) 2072 { 2073 int wakeup_swapper; 2074 2075 sleepq_lock(c); 2076 if (all) { 2077 c->done = UINT_MAX; 2078 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2079 } else { 2080 if (c->done != UINT_MAX) 2081 c->done++; 2082 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2083 } 2084 sleepq_release(c); 2085 if (wakeup_swapper) 2086 kick_proc0(); 2087 } 2088 2089 /* 2090 * Indefinite wait for done != 0 with or without signals. 2091 */ 2092 int 2093 linux_wait_for_common(struct completion *c, int flags) 2094 { 2095 struct task_struct *task; 2096 int error; 2097 2098 if (SCHEDULER_STOPPED()) 2099 return (0); 2100 2101 task = current; 2102 2103 if (flags != 0) 2104 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2105 else 2106 flags = SLEEPQ_SLEEP; 2107 error = 0; 2108 for (;;) { 2109 sleepq_lock(c); 2110 if (c->done) 2111 break; 2112 sleepq_add(c, NULL, "completion", flags, 0); 2113 if (flags & SLEEPQ_INTERRUPTIBLE) { 2114 DROP_GIANT(); 2115 error = -sleepq_wait_sig(c, 0); 2116 PICKUP_GIANT(); 2117 if (error != 0) { 2118 linux_schedule_save_interrupt_value(task, error); 2119 error = -ERESTARTSYS; 2120 goto intr; 2121 } 2122 } else { 2123 DROP_GIANT(); 2124 sleepq_wait(c, 0); 2125 PICKUP_GIANT(); 2126 } 2127 } 2128 if (c->done != UINT_MAX) 2129 c->done--; 2130 sleepq_release(c); 2131 2132 intr: 2133 return (error); 2134 } 2135 2136 /* 2137 * Time limited wait for done != 0 with or without signals. 2138 */ 2139 int 2140 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2141 { 2142 struct task_struct *task; 2143 int end = jiffies + timeout; 2144 int error; 2145 2146 if (SCHEDULER_STOPPED()) 2147 return (0); 2148 2149 task = current; 2150 2151 if (flags != 0) 2152 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2153 else 2154 flags = SLEEPQ_SLEEP; 2155 2156 for (;;) { 2157 sleepq_lock(c); 2158 if (c->done) 2159 break; 2160 sleepq_add(c, NULL, "completion", flags, 0); 2161 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2162 2163 DROP_GIANT(); 2164 if (flags & SLEEPQ_INTERRUPTIBLE) 2165 error = -sleepq_timedwait_sig(c, 0); 2166 else 2167 error = -sleepq_timedwait(c, 0); 2168 PICKUP_GIANT(); 2169 2170 if (error != 0) { 2171 /* check for timeout */ 2172 if (error == -EWOULDBLOCK) { 2173 error = 0; /* timeout */ 2174 } else { 2175 /* signal happened */ 2176 linux_schedule_save_interrupt_value(task, error); 2177 error = -ERESTARTSYS; 2178 } 2179 goto done; 2180 } 2181 } 2182 if (c->done != UINT_MAX) 2183 c->done--; 2184 sleepq_release(c); 2185 2186 /* return how many jiffies are left */ 2187 error = linux_timer_jiffies_until(end); 2188 done: 2189 return (error); 2190 } 2191 2192 int 2193 linux_try_wait_for_completion(struct completion *c) 2194 { 2195 int isdone; 2196 2197 sleepq_lock(c); 2198 isdone = (c->done != 0); 2199 if (c->done != 0 && c->done != UINT_MAX) 2200 c->done--; 2201 sleepq_release(c); 2202 return (isdone); 2203 } 2204 2205 int 2206 linux_completion_done(struct completion *c) 2207 { 2208 int isdone; 2209 2210 sleepq_lock(c); 2211 isdone = (c->done != 0); 2212 sleepq_release(c); 2213 return (isdone); 2214 } 2215 2216 static void 2217 linux_cdev_deref(struct linux_cdev *ldev) 2218 { 2219 if (refcount_release(&ldev->refs) && 2220 ldev->kobj.ktype == &linux_cdev_ktype) 2221 kfree(ldev); 2222 } 2223 2224 static void 2225 linux_cdev_release(struct kobject *kobj) 2226 { 2227 struct linux_cdev *cdev; 2228 struct kobject *parent; 2229 2230 cdev = container_of(kobj, struct linux_cdev, kobj); 2231 parent = kobj->parent; 2232 linux_destroy_dev(cdev); 2233 linux_cdev_deref(cdev); 2234 kobject_put(parent); 2235 } 2236 2237 static void 2238 linux_cdev_static_release(struct kobject *kobj) 2239 { 2240 struct cdev *cdev; 2241 struct linux_cdev *ldev; 2242 2243 ldev = container_of(kobj, struct linux_cdev, kobj); 2244 cdev = ldev->cdev; 2245 if (cdev != NULL) { 2246 destroy_dev(cdev); 2247 ldev->cdev = NULL; 2248 } 2249 kobject_put(kobj->parent); 2250 } 2251 2252 int 2253 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2254 { 2255 int ret; 2256 2257 if (dev->devt != 0) { 2258 /* Set parent kernel object. */ 2259 ldev->kobj.parent = &dev->kobj; 2260 2261 /* 2262 * Unlike Linux we require the kobject of the 2263 * character device structure to have a valid name 2264 * before calling this function: 2265 */ 2266 if (ldev->kobj.name == NULL) 2267 return (-EINVAL); 2268 2269 ret = cdev_add(ldev, dev->devt, 1); 2270 if (ret) 2271 return (ret); 2272 } 2273 ret = device_add(dev); 2274 if (ret != 0 && dev->devt != 0) 2275 cdev_del(ldev); 2276 return (ret); 2277 } 2278 2279 void 2280 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2281 { 2282 device_del(dev); 2283 2284 if (dev->devt != 0) 2285 cdev_del(ldev); 2286 } 2287 2288 static void 2289 linux_destroy_dev(struct linux_cdev *ldev) 2290 { 2291 2292 if (ldev->cdev == NULL) 2293 return; 2294 2295 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2296 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2297 2298 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2299 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2300 pause("ldevdtr", hz / 4); 2301 2302 destroy_dev(ldev->cdev); 2303 ldev->cdev = NULL; 2304 } 2305 2306 const struct kobj_type linux_cdev_ktype = { 2307 .release = linux_cdev_release, 2308 }; 2309 2310 const struct kobj_type linux_cdev_static_ktype = { 2311 .release = linux_cdev_static_release, 2312 }; 2313 2314 static void 2315 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2316 { 2317 struct notifier_block *nb; 2318 struct netdev_notifier_info ni; 2319 2320 nb = arg; 2321 ni.ifp = ifp; 2322 ni.dev = (struct net_device *)ifp; 2323 if (linkstate == LINK_STATE_UP) 2324 nb->notifier_call(nb, NETDEV_UP, &ni); 2325 else 2326 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2327 } 2328 2329 static void 2330 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2331 { 2332 struct notifier_block *nb; 2333 struct netdev_notifier_info ni; 2334 2335 nb = arg; 2336 ni.ifp = ifp; 2337 ni.dev = (struct net_device *)ifp; 2338 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2339 } 2340 2341 static void 2342 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2343 { 2344 struct notifier_block *nb; 2345 struct netdev_notifier_info ni; 2346 2347 nb = arg; 2348 ni.ifp = ifp; 2349 ni.dev = (struct net_device *)ifp; 2350 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2351 } 2352 2353 static void 2354 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2355 { 2356 struct notifier_block *nb; 2357 struct netdev_notifier_info ni; 2358 2359 nb = arg; 2360 ni.ifp = ifp; 2361 ni.dev = (struct net_device *)ifp; 2362 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2363 } 2364 2365 static void 2366 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2367 { 2368 struct notifier_block *nb; 2369 struct netdev_notifier_info ni; 2370 2371 nb = arg; 2372 ni.ifp = ifp; 2373 ni.dev = (struct net_device *)ifp; 2374 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2375 } 2376 2377 int 2378 register_netdevice_notifier(struct notifier_block *nb) 2379 { 2380 2381 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2382 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2383 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2384 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2385 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2386 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2387 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2388 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2389 2390 return (0); 2391 } 2392 2393 int 2394 register_inetaddr_notifier(struct notifier_block *nb) 2395 { 2396 2397 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2398 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2399 return (0); 2400 } 2401 2402 int 2403 unregister_netdevice_notifier(struct notifier_block *nb) 2404 { 2405 2406 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2407 nb->tags[NETDEV_UP]); 2408 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2409 nb->tags[NETDEV_REGISTER]); 2410 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2411 nb->tags[NETDEV_UNREGISTER]); 2412 EVENTHANDLER_DEREGISTER(iflladdr_event, 2413 nb->tags[NETDEV_CHANGEADDR]); 2414 2415 return (0); 2416 } 2417 2418 int 2419 unregister_inetaddr_notifier(struct notifier_block *nb) 2420 { 2421 2422 EVENTHANDLER_DEREGISTER(ifaddr_event, 2423 nb->tags[NETDEV_CHANGEIFADDR]); 2424 2425 return (0); 2426 } 2427 2428 struct list_sort_thunk { 2429 int (*cmp)(void *, struct list_head *, struct list_head *); 2430 void *priv; 2431 }; 2432 2433 static inline int 2434 linux_le_cmp(void *priv, const void *d1, const void *d2) 2435 { 2436 struct list_head *le1, *le2; 2437 struct list_sort_thunk *thunk; 2438 2439 thunk = priv; 2440 le1 = *(__DECONST(struct list_head **, d1)); 2441 le2 = *(__DECONST(struct list_head **, d2)); 2442 return ((thunk->cmp)(thunk->priv, le1, le2)); 2443 } 2444 2445 void 2446 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2447 struct list_head *a, struct list_head *b)) 2448 { 2449 struct list_sort_thunk thunk; 2450 struct list_head **ar, *le; 2451 size_t count, i; 2452 2453 count = 0; 2454 list_for_each(le, head) 2455 count++; 2456 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2457 i = 0; 2458 list_for_each(le, head) 2459 ar[i++] = le; 2460 thunk.cmp = cmp; 2461 thunk.priv = priv; 2462 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2463 INIT_LIST_HEAD(head); 2464 for (i = 0; i < count; i++) 2465 list_add_tail(ar[i], head); 2466 free(ar, M_KMALLOC); 2467 } 2468 2469 #if defined(__i386__) || defined(__amd64__) 2470 int 2471 linux_wbinvd_on_all_cpus(void) 2472 { 2473 2474 pmap_invalidate_cache(); 2475 return (0); 2476 } 2477 #endif 2478 2479 int 2480 linux_on_each_cpu(void callback(void *), void *data) 2481 { 2482 2483 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2484 smp_no_rendezvous_barrier, data); 2485 return (0); 2486 } 2487 2488 int 2489 linux_in_atomic(void) 2490 { 2491 2492 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2493 } 2494 2495 struct linux_cdev * 2496 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2497 { 2498 dev_t dev = MKDEV(major, minor); 2499 struct cdev *cdev; 2500 2501 dev_lock(); 2502 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2503 struct linux_cdev *ldev = cdev->si_drv1; 2504 if (ldev->dev == dev && 2505 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2506 break; 2507 } 2508 } 2509 dev_unlock(); 2510 2511 return (cdev != NULL ? cdev->si_drv1 : NULL); 2512 } 2513 2514 int 2515 __register_chrdev(unsigned int major, unsigned int baseminor, 2516 unsigned int count, const char *name, 2517 const struct file_operations *fops) 2518 { 2519 struct linux_cdev *cdev; 2520 int ret = 0; 2521 int i; 2522 2523 for (i = baseminor; i < baseminor + count; i++) { 2524 cdev = cdev_alloc(); 2525 cdev->ops = fops; 2526 kobject_set_name(&cdev->kobj, name); 2527 2528 ret = cdev_add(cdev, makedev(major, i), 1); 2529 if (ret != 0) 2530 break; 2531 } 2532 return (ret); 2533 } 2534 2535 int 2536 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2537 unsigned int count, const char *name, 2538 const struct file_operations *fops, uid_t uid, 2539 gid_t gid, int mode) 2540 { 2541 struct linux_cdev *cdev; 2542 int ret = 0; 2543 int i; 2544 2545 for (i = baseminor; i < baseminor + count; i++) { 2546 cdev = cdev_alloc(); 2547 cdev->ops = fops; 2548 kobject_set_name(&cdev->kobj, name); 2549 2550 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2551 if (ret != 0) 2552 break; 2553 } 2554 return (ret); 2555 } 2556 2557 void 2558 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2559 unsigned int count, const char *name) 2560 { 2561 struct linux_cdev *cdevp; 2562 int i; 2563 2564 for (i = baseminor; i < baseminor + count; i++) { 2565 cdevp = linux_find_cdev(name, major, i); 2566 if (cdevp != NULL) 2567 cdev_del(cdevp); 2568 } 2569 } 2570 2571 void 2572 linux_dump_stack(void) 2573 { 2574 #ifdef STACK 2575 struct stack st; 2576 2577 stack_zero(&st); 2578 stack_save(&st); 2579 stack_print(&st); 2580 #endif 2581 } 2582 2583 int 2584 linuxkpi_net_ratelimit(void) 2585 { 2586 2587 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2588 lkpi_net_maxpps)); 2589 } 2590 2591 #if defined(__i386__) || defined(__amd64__) 2592 bool linux_cpu_has_clflush; 2593 #endif 2594 2595 static void 2596 linux_compat_init(void *arg) 2597 { 2598 struct sysctl_oid *rootoid; 2599 int i; 2600 2601 #if defined(__i386__) || defined(__amd64__) 2602 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2603 #endif 2604 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2605 2606 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2607 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2608 kobject_init(&linux_class_root, &linux_class_ktype); 2609 kobject_set_name(&linux_class_root, "class"); 2610 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2611 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2612 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2613 kobject_set_name(&linux_root_device.kobj, "device"); 2614 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2615 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2616 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2617 linux_root_device.bsddev = root_bus; 2618 linux_class_misc.name = "misc"; 2619 class_register(&linux_class_misc); 2620 INIT_LIST_HEAD(&pci_drivers); 2621 INIT_LIST_HEAD(&pci_devices); 2622 spin_lock_init(&pci_lock); 2623 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2624 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2625 LIST_INIT(&vmmaphead[i]); 2626 init_waitqueue_head(&linux_bit_waitq); 2627 init_waitqueue_head(&linux_var_waitq); 2628 2629 CPU_COPY(&all_cpus, &cpu_online_mask); 2630 } 2631 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2632 2633 static void 2634 linux_compat_uninit(void *arg) 2635 { 2636 linux_kobject_kfree_name(&linux_class_root); 2637 linux_kobject_kfree_name(&linux_root_device.kobj); 2638 linux_kobject_kfree_name(&linux_class_misc.kobj); 2639 2640 mtx_destroy(&vmmaplock); 2641 spin_lock_destroy(&pci_lock); 2642 rw_destroy(&linux_vma_lock); 2643 } 2644 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2645 2646 /* 2647 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2648 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2649 * used. Assert these types have the same size, else some parts of the 2650 * LinuxKPI may not work like expected: 2651 */ 2652 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2653