1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "opt_stack.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/proc.h> 39 #include <sys/sglist.h> 40 #include <sys/sleepqueue.h> 41 #include <sys/refcount.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/bus.h> 45 #include <sys/eventhandler.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/filio.h> 49 #include <sys/rwlock.h> 50 #include <sys/mman.h> 51 #include <sys/stack.h> 52 #include <sys/sysent.h> 53 #include <sys/time.h> 54 #include <sys/user.h> 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 62 #include <machine/stdarg.h> 63 64 #if defined(__i386__) || defined(__amd64__) 65 #include <machine/md_var.h> 66 #endif 67 68 #include <linux/kobject.h> 69 #include <linux/cpu.h> 70 #include <linux/device.h> 71 #include <linux/slab.h> 72 #include <linux/module.h> 73 #include <linux/moduleparam.h> 74 #include <linux/cdev.h> 75 #include <linux/file.h> 76 #include <linux/sysfs.h> 77 #include <linux/mm.h> 78 #include <linux/io.h> 79 #include <linux/vmalloc.h> 80 #include <linux/netdevice.h> 81 #include <linux/timer.h> 82 #include <linux/interrupt.h> 83 #include <linux/uaccess.h> 84 #include <linux/utsname.h> 85 #include <linux/list.h> 86 #include <linux/kthread.h> 87 #include <linux/kernel.h> 88 #include <linux/compat.h> 89 #include <linux/io-mapping.h> 90 #include <linux/poll.h> 91 #include <linux/smp.h> 92 #include <linux/wait_bit.h> 93 #include <linux/rcupdate.h> 94 #include <linux/interval_tree.h> 95 #include <linux/interval_tree_generic.h> 96 97 #if defined(__i386__) || defined(__amd64__) 98 #include <asm/smp.h> 99 #include <asm/processor.h> 100 #endif 101 102 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 103 "LinuxKPI parameters"); 104 105 int linuxkpi_debug; 106 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 107 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 108 109 int linuxkpi_warn_dump_stack = 0; 110 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 111 &linuxkpi_warn_dump_stack, 0, 112 "Set to enable stack traces from WARN_ON(). Clear to disable."); 113 114 static struct timeval lkpi_net_lastlog; 115 static int lkpi_net_curpps; 116 static int lkpi_net_maxpps = 99; 117 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 118 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 119 120 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 121 122 #include <linux/rbtree.h> 123 /* Undo Linux compat changes. */ 124 #undef RB_ROOT 125 #undef file 126 #undef cdev 127 #define RB_ROOT(head) (head)->rbh_root 128 129 static void linux_destroy_dev(struct linux_cdev *); 130 static void linux_cdev_deref(struct linux_cdev *ldev); 131 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 132 133 cpumask_t cpu_online_mask; 134 static cpumask_t static_single_cpu_mask[MAXCPU]; 135 struct kobject linux_class_root; 136 struct device linux_root_device; 137 struct class linux_class_misc; 138 struct list_head pci_drivers; 139 struct list_head pci_devices; 140 spinlock_t pci_lock; 141 struct uts_namespace init_uts_ns; 142 143 unsigned long linux_timer_hz_mask; 144 145 wait_queue_head_t linux_bit_waitq; 146 wait_queue_head_t linux_var_waitq; 147 148 int 149 panic_cmp(struct rb_node *one, struct rb_node *two) 150 { 151 panic("no cmp"); 152 } 153 154 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 155 156 #define START(node) ((node)->start) 157 #define LAST(node) ((node)->last) 158 159 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 160 LAST,, lkpi_interval_tree) 161 162 static void 163 linux_device_release(struct device *dev) 164 { 165 pr_debug("linux_device_release: %s\n", dev_name(dev)); 166 kfree(dev); 167 } 168 169 static ssize_t 170 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 171 { 172 struct class_attribute *dattr; 173 ssize_t error; 174 175 dattr = container_of(attr, struct class_attribute, attr); 176 error = -EIO; 177 if (dattr->show) 178 error = dattr->show(container_of(kobj, struct class, kobj), 179 dattr, buf); 180 return (error); 181 } 182 183 static ssize_t 184 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 185 size_t count) 186 { 187 struct class_attribute *dattr; 188 ssize_t error; 189 190 dattr = container_of(attr, struct class_attribute, attr); 191 error = -EIO; 192 if (dattr->store) 193 error = dattr->store(container_of(kobj, struct class, kobj), 194 dattr, buf, count); 195 return (error); 196 } 197 198 static void 199 linux_class_release(struct kobject *kobj) 200 { 201 struct class *class; 202 203 class = container_of(kobj, struct class, kobj); 204 if (class->class_release) 205 class->class_release(class); 206 } 207 208 static const struct sysfs_ops linux_class_sysfs = { 209 .show = linux_class_show, 210 .store = linux_class_store, 211 }; 212 213 const struct kobj_type linux_class_ktype = { 214 .release = linux_class_release, 215 .sysfs_ops = &linux_class_sysfs 216 }; 217 218 static void 219 linux_dev_release(struct kobject *kobj) 220 { 221 struct device *dev; 222 223 dev = container_of(kobj, struct device, kobj); 224 /* This is the precedence defined by linux. */ 225 if (dev->release) 226 dev->release(dev); 227 else if (dev->class && dev->class->dev_release) 228 dev->class->dev_release(dev); 229 } 230 231 static ssize_t 232 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 233 { 234 struct device_attribute *dattr; 235 ssize_t error; 236 237 dattr = container_of(attr, struct device_attribute, attr); 238 error = -EIO; 239 if (dattr->show) 240 error = dattr->show(container_of(kobj, struct device, kobj), 241 dattr, buf); 242 return (error); 243 } 244 245 static ssize_t 246 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 247 size_t count) 248 { 249 struct device_attribute *dattr; 250 ssize_t error; 251 252 dattr = container_of(attr, struct device_attribute, attr); 253 error = -EIO; 254 if (dattr->store) 255 error = dattr->store(container_of(kobj, struct device, kobj), 256 dattr, buf, count); 257 return (error); 258 } 259 260 static const struct sysfs_ops linux_dev_sysfs = { 261 .show = linux_dev_show, 262 .store = linux_dev_store, 263 }; 264 265 const struct kobj_type linux_dev_ktype = { 266 .release = linux_dev_release, 267 .sysfs_ops = &linux_dev_sysfs 268 }; 269 270 struct device * 271 device_create(struct class *class, struct device *parent, dev_t devt, 272 void *drvdata, const char *fmt, ...) 273 { 274 struct device *dev; 275 va_list args; 276 277 dev = kzalloc(sizeof(*dev), M_WAITOK); 278 dev->parent = parent; 279 dev->class = class; 280 dev->devt = devt; 281 dev->driver_data = drvdata; 282 dev->release = linux_device_release; 283 va_start(args, fmt); 284 kobject_set_name_vargs(&dev->kobj, fmt, args); 285 va_end(args); 286 device_register(dev); 287 288 return (dev); 289 } 290 291 struct device * 292 device_create_groups_vargs(struct class *class, struct device *parent, 293 dev_t devt, void *drvdata, const struct attribute_group **groups, 294 const char *fmt, va_list args) 295 { 296 struct device *dev = NULL; 297 int retval = -ENODEV; 298 299 if (class == NULL || IS_ERR(class)) 300 goto error; 301 302 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 303 if (!dev) { 304 retval = -ENOMEM; 305 goto error; 306 } 307 308 dev->devt = devt; 309 dev->class = class; 310 dev->parent = parent; 311 dev->groups = groups; 312 dev->release = device_create_release; 313 /* device_initialize() needs the class and parent to be set */ 314 device_initialize(dev); 315 dev_set_drvdata(dev, drvdata); 316 317 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 318 if (retval) 319 goto error; 320 321 retval = device_add(dev); 322 if (retval) 323 goto error; 324 325 return dev; 326 327 error: 328 put_device(dev); 329 return ERR_PTR(retval); 330 } 331 332 struct class * 333 class_create(struct module *owner, const char *name) 334 { 335 struct class *class; 336 int error; 337 338 class = kzalloc(sizeof(*class), M_WAITOK); 339 class->owner = owner; 340 class->name = name; 341 class->class_release = linux_class_kfree; 342 error = class_register(class); 343 if (error) { 344 kfree(class); 345 return (NULL); 346 } 347 348 return (class); 349 } 350 351 static void 352 linux_kq_lock(void *arg) 353 { 354 spinlock_t *s = arg; 355 356 spin_lock(s); 357 } 358 static void 359 linux_kq_unlock(void *arg) 360 { 361 spinlock_t *s = arg; 362 363 spin_unlock(s); 364 } 365 366 static void 367 linux_kq_assert_lock(void *arg, int what) 368 { 369 #ifdef INVARIANTS 370 spinlock_t *s = arg; 371 372 if (what == LA_LOCKED) 373 mtx_assert(&s->m, MA_OWNED); 374 else 375 mtx_assert(&s->m, MA_NOTOWNED); 376 #endif 377 } 378 379 static void 380 linux_file_kqfilter_poll(struct linux_file *, int); 381 382 struct linux_file * 383 linux_file_alloc(void) 384 { 385 struct linux_file *filp; 386 387 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 388 389 /* set initial refcount */ 390 filp->f_count = 1; 391 392 /* setup fields needed by kqueue support */ 393 spin_lock_init(&filp->f_kqlock); 394 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 395 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 396 397 return (filp); 398 } 399 400 void 401 linux_file_free(struct linux_file *filp) 402 { 403 if (filp->_file == NULL) { 404 if (filp->f_op != NULL && filp->f_op->release != NULL) 405 filp->f_op->release(filp->f_vnode, filp); 406 if (filp->f_shmem != NULL) 407 vm_object_deallocate(filp->f_shmem); 408 kfree_rcu(filp, rcu); 409 } else { 410 /* 411 * The close method of the character device or file 412 * will free the linux_file structure: 413 */ 414 _fdrop(filp->_file, curthread); 415 } 416 } 417 418 struct linux_cdev * 419 cdev_alloc(void) 420 { 421 struct linux_cdev *cdev; 422 423 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); 424 kobject_init(&cdev->kobj, &linux_cdev_ktype); 425 cdev->refs = 1; 426 return (cdev); 427 } 428 429 static int 430 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 431 vm_page_t *mres) 432 { 433 struct vm_area_struct *vmap; 434 435 vmap = linux_cdev_handle_find(vm_obj->handle); 436 437 MPASS(vmap != NULL); 438 MPASS(vmap->vm_private_data == vm_obj->handle); 439 440 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 441 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 442 vm_page_t page; 443 444 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 445 /* 446 * If the passed in result page is a fake 447 * page, update it with the new physical 448 * address. 449 */ 450 page = *mres; 451 vm_page_updatefake(page, paddr, vm_obj->memattr); 452 } else { 453 /* 454 * Replace the passed in "mres" page with our 455 * own fake page and free up the all of the 456 * original pages. 457 */ 458 VM_OBJECT_WUNLOCK(vm_obj); 459 page = vm_page_getfake(paddr, vm_obj->memattr); 460 VM_OBJECT_WLOCK(vm_obj); 461 462 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 463 *mres = page; 464 } 465 vm_page_valid(page); 466 return (VM_PAGER_OK); 467 } 468 return (VM_PAGER_FAIL); 469 } 470 471 static int 472 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 473 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 474 { 475 struct vm_area_struct *vmap; 476 int err; 477 478 /* get VM area structure */ 479 vmap = linux_cdev_handle_find(vm_obj->handle); 480 MPASS(vmap != NULL); 481 MPASS(vmap->vm_private_data == vm_obj->handle); 482 483 VM_OBJECT_WUNLOCK(vm_obj); 484 485 linux_set_current(curthread); 486 487 down_write(&vmap->vm_mm->mmap_sem); 488 if (unlikely(vmap->vm_ops == NULL)) { 489 err = VM_FAULT_SIGBUS; 490 } else { 491 struct vm_fault vmf; 492 493 /* fill out VM fault structure */ 494 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 495 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 496 vmf.pgoff = 0; 497 vmf.page = NULL; 498 vmf.vma = vmap; 499 500 vmap->vm_pfn_count = 0; 501 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 502 vmap->vm_obj = vm_obj; 503 504 err = vmap->vm_ops->fault(&vmf); 505 506 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 507 kern_yield(PRI_USER); 508 err = vmap->vm_ops->fault(&vmf); 509 } 510 } 511 512 /* translate return code */ 513 switch (err) { 514 case VM_FAULT_OOM: 515 err = VM_PAGER_AGAIN; 516 break; 517 case VM_FAULT_SIGBUS: 518 err = VM_PAGER_BAD; 519 break; 520 case VM_FAULT_NOPAGE: 521 /* 522 * By contract the fault handler will return having 523 * busied all the pages itself. If pidx is already 524 * found in the object, it will simply xbusy the first 525 * page and return with vm_pfn_count set to 1. 526 */ 527 *first = vmap->vm_pfn_first; 528 *last = *first + vmap->vm_pfn_count - 1; 529 err = VM_PAGER_OK; 530 break; 531 default: 532 err = VM_PAGER_ERROR; 533 break; 534 } 535 up_write(&vmap->vm_mm->mmap_sem); 536 VM_OBJECT_WLOCK(vm_obj); 537 return (err); 538 } 539 540 static struct rwlock linux_vma_lock; 541 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 542 TAILQ_HEAD_INITIALIZER(linux_vma_head); 543 544 static void 545 linux_cdev_handle_free(struct vm_area_struct *vmap) 546 { 547 /* Drop reference on vm_file */ 548 if (vmap->vm_file != NULL) 549 fput(vmap->vm_file); 550 551 /* Drop reference on mm_struct */ 552 mmput(vmap->vm_mm); 553 554 kfree(vmap); 555 } 556 557 static void 558 linux_cdev_handle_remove(struct vm_area_struct *vmap) 559 { 560 rw_wlock(&linux_vma_lock); 561 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 562 rw_wunlock(&linux_vma_lock); 563 } 564 565 static struct vm_area_struct * 566 linux_cdev_handle_find(void *handle) 567 { 568 struct vm_area_struct *vmap; 569 570 rw_rlock(&linux_vma_lock); 571 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 572 if (vmap->vm_private_data == handle) 573 break; 574 } 575 rw_runlock(&linux_vma_lock); 576 return (vmap); 577 } 578 579 static int 580 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 581 vm_ooffset_t foff, struct ucred *cred, u_short *color) 582 { 583 584 MPASS(linux_cdev_handle_find(handle) != NULL); 585 *color = 0; 586 return (0); 587 } 588 589 static void 590 linux_cdev_pager_dtor(void *handle) 591 { 592 const struct vm_operations_struct *vm_ops; 593 struct vm_area_struct *vmap; 594 595 vmap = linux_cdev_handle_find(handle); 596 MPASS(vmap != NULL); 597 598 /* 599 * Remove handle before calling close operation to prevent 600 * other threads from reusing the handle pointer. 601 */ 602 linux_cdev_handle_remove(vmap); 603 604 down_write(&vmap->vm_mm->mmap_sem); 605 vm_ops = vmap->vm_ops; 606 if (likely(vm_ops != NULL)) 607 vm_ops->close(vmap); 608 up_write(&vmap->vm_mm->mmap_sem); 609 610 linux_cdev_handle_free(vmap); 611 } 612 613 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 614 { 615 /* OBJT_MGTDEVICE */ 616 .cdev_pg_populate = linux_cdev_pager_populate, 617 .cdev_pg_ctor = linux_cdev_pager_ctor, 618 .cdev_pg_dtor = linux_cdev_pager_dtor 619 }, 620 { 621 /* OBJT_DEVICE */ 622 .cdev_pg_fault = linux_cdev_pager_fault, 623 .cdev_pg_ctor = linux_cdev_pager_ctor, 624 .cdev_pg_dtor = linux_cdev_pager_dtor 625 }, 626 }; 627 628 int 629 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 630 unsigned long size) 631 { 632 vm_object_t obj; 633 vm_page_t m; 634 635 obj = vma->vm_obj; 636 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 637 return (-ENOTSUP); 638 VM_OBJECT_RLOCK(obj); 639 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 640 m != NULL && m->pindex < OFF_TO_IDX(address + size); 641 m = TAILQ_NEXT(m, listq)) 642 pmap_remove_all(m); 643 VM_OBJECT_RUNLOCK(obj); 644 return (0); 645 } 646 647 void 648 vma_set_file(struct vm_area_struct *vma, struct linux_file *file) 649 { 650 struct linux_file *tmp; 651 652 /* Changing an anonymous vma with this is illegal */ 653 get_file(file); 654 tmp = vma->vm_file; 655 vma->vm_file = file; 656 fput(tmp); 657 } 658 659 static struct file_operations dummy_ldev_ops = { 660 /* XXXKIB */ 661 }; 662 663 static struct linux_cdev dummy_ldev = { 664 .ops = &dummy_ldev_ops, 665 }; 666 667 #define LDEV_SI_DTR 0x0001 668 #define LDEV_SI_REF 0x0002 669 670 static void 671 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 672 struct linux_cdev **dev) 673 { 674 struct linux_cdev *ldev; 675 u_int siref; 676 677 ldev = filp->f_cdev; 678 *fop = filp->f_op; 679 if (ldev != NULL) { 680 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 681 refcount_acquire(&ldev->refs); 682 } else { 683 for (siref = ldev->siref;;) { 684 if ((siref & LDEV_SI_DTR) != 0) { 685 ldev = &dummy_ldev; 686 *fop = ldev->ops; 687 siref = ldev->siref; 688 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 689 } else if (atomic_fcmpset_int(&ldev->siref, 690 &siref, siref + LDEV_SI_REF)) { 691 break; 692 } 693 } 694 } 695 } 696 *dev = ldev; 697 } 698 699 static void 700 linux_drop_fop(struct linux_cdev *ldev) 701 { 702 703 if (ldev == NULL) 704 return; 705 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 706 linux_cdev_deref(ldev); 707 } else { 708 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 709 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 710 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 711 } 712 } 713 714 #define OPW(fp,td,code) ({ \ 715 struct file *__fpop; \ 716 __typeof(code) __retval; \ 717 \ 718 __fpop = (td)->td_fpop; \ 719 (td)->td_fpop = (fp); \ 720 __retval = (code); \ 721 (td)->td_fpop = __fpop; \ 722 __retval; \ 723 }) 724 725 static int 726 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 727 struct file *file) 728 { 729 struct linux_cdev *ldev; 730 struct linux_file *filp; 731 const struct file_operations *fop; 732 int error; 733 734 ldev = dev->si_drv1; 735 736 filp = linux_file_alloc(); 737 filp->f_dentry = &filp->f_dentry_store; 738 filp->f_op = ldev->ops; 739 filp->f_mode = file->f_flag; 740 filp->f_flags = file->f_flag; 741 filp->f_vnode = file->f_vnode; 742 filp->_file = file; 743 refcount_acquire(&ldev->refs); 744 filp->f_cdev = ldev; 745 746 linux_set_current(td); 747 linux_get_fop(filp, &fop, &ldev); 748 749 if (fop->open != NULL) { 750 error = -fop->open(file->f_vnode, filp); 751 if (error != 0) { 752 linux_drop_fop(ldev); 753 linux_cdev_deref(filp->f_cdev); 754 kfree(filp); 755 return (error); 756 } 757 } 758 759 /* hold on to the vnode - used for fstat() */ 760 vhold(filp->f_vnode); 761 762 /* release the file from devfs */ 763 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 764 linux_drop_fop(ldev); 765 return (ENXIO); 766 } 767 768 #define LINUX_IOCTL_MIN_PTR 0x10000UL 769 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 770 771 static inline int 772 linux_remap_address(void **uaddr, size_t len) 773 { 774 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 775 776 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 777 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 778 struct task_struct *pts = current; 779 if (pts == NULL) { 780 *uaddr = NULL; 781 return (1); 782 } 783 784 /* compute data offset */ 785 uaddr_val -= LINUX_IOCTL_MIN_PTR; 786 787 /* check that length is within bounds */ 788 if ((len > IOCPARM_MAX) || 789 (uaddr_val + len) > pts->bsd_ioctl_len) { 790 *uaddr = NULL; 791 return (1); 792 } 793 794 /* re-add kernel buffer address */ 795 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 796 797 /* update address location */ 798 *uaddr = (void *)uaddr_val; 799 return (1); 800 } 801 return (0); 802 } 803 804 int 805 linux_copyin(const void *uaddr, void *kaddr, size_t len) 806 { 807 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 808 if (uaddr == NULL) 809 return (-EFAULT); 810 memcpy(kaddr, uaddr, len); 811 return (0); 812 } 813 return (-copyin(uaddr, kaddr, len)); 814 } 815 816 int 817 linux_copyout(const void *kaddr, void *uaddr, size_t len) 818 { 819 if (linux_remap_address(&uaddr, len)) { 820 if (uaddr == NULL) 821 return (-EFAULT); 822 memcpy(uaddr, kaddr, len); 823 return (0); 824 } 825 return (-copyout(kaddr, uaddr, len)); 826 } 827 828 size_t 829 linux_clear_user(void *_uaddr, size_t _len) 830 { 831 uint8_t *uaddr = _uaddr; 832 size_t len = _len; 833 834 /* make sure uaddr is aligned before going into the fast loop */ 835 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 836 if (subyte(uaddr, 0)) 837 return (_len); 838 uaddr++; 839 len--; 840 } 841 842 /* zero 8 bytes at a time */ 843 while (len > 7) { 844 #ifdef __LP64__ 845 if (suword64(uaddr, 0)) 846 return (_len); 847 #else 848 if (suword32(uaddr, 0)) 849 return (_len); 850 if (suword32(uaddr + 4, 0)) 851 return (_len); 852 #endif 853 uaddr += 8; 854 len -= 8; 855 } 856 857 /* zero fill end, if any */ 858 while (len > 0) { 859 if (subyte(uaddr, 0)) 860 return (_len); 861 uaddr++; 862 len--; 863 } 864 return (0); 865 } 866 867 int 868 linux_access_ok(const void *uaddr, size_t len) 869 { 870 uintptr_t saddr; 871 uintptr_t eaddr; 872 873 /* get start and end address */ 874 saddr = (uintptr_t)uaddr; 875 eaddr = (uintptr_t)uaddr + len; 876 877 /* verify addresses are valid for userspace */ 878 return ((saddr == eaddr) || 879 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 880 } 881 882 /* 883 * This function should return either EINTR or ERESTART depending on 884 * the signal type sent to this thread: 885 */ 886 static int 887 linux_get_error(struct task_struct *task, int error) 888 { 889 /* check for signal type interrupt code */ 890 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 891 error = -linux_schedule_get_interrupt_value(task); 892 if (error == 0) 893 error = EINTR; 894 } 895 return (error); 896 } 897 898 static int 899 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 900 const struct file_operations *fop, u_long cmd, caddr_t data, 901 struct thread *td) 902 { 903 struct task_struct *task = current; 904 unsigned size; 905 int error; 906 907 size = IOCPARM_LEN(cmd); 908 /* refer to logic in sys_ioctl() */ 909 if (size > 0) { 910 /* 911 * Setup hint for linux_copyin() and linux_copyout(). 912 * 913 * Background: Linux code expects a user-space address 914 * while FreeBSD supplies a kernel-space address. 915 */ 916 task->bsd_ioctl_data = data; 917 task->bsd_ioctl_len = size; 918 data = (void *)LINUX_IOCTL_MIN_PTR; 919 } else { 920 /* fetch user-space pointer */ 921 data = *(void **)data; 922 } 923 #ifdef COMPAT_FREEBSD32 924 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 925 /* try the compat IOCTL handler first */ 926 if (fop->compat_ioctl != NULL) { 927 error = -OPW(fp, td, fop->compat_ioctl(filp, 928 cmd, (u_long)data)); 929 } else { 930 error = ENOTTY; 931 } 932 933 /* fallback to the regular IOCTL handler, if any */ 934 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 935 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 936 cmd, (u_long)data)); 937 } 938 } else 939 #endif 940 { 941 if (fop->unlocked_ioctl != NULL) { 942 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 943 cmd, (u_long)data)); 944 } else { 945 error = ENOTTY; 946 } 947 } 948 if (size > 0) { 949 task->bsd_ioctl_data = NULL; 950 task->bsd_ioctl_len = 0; 951 } 952 953 if (error == EWOULDBLOCK) { 954 /* update kqfilter status, if any */ 955 linux_file_kqfilter_poll(filp, 956 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 957 } else { 958 error = linux_get_error(task, error); 959 } 960 return (error); 961 } 962 963 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 964 965 /* 966 * This function atomically updates the poll wakeup state and returns 967 * the previous state at the time of update. 968 */ 969 static uint8_t 970 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 971 { 972 int c, old; 973 974 c = v->counter; 975 976 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 977 c = old; 978 979 return (c); 980 } 981 982 static int 983 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 984 { 985 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 986 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 987 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 988 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 989 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 990 }; 991 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 992 993 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 994 case LINUX_FWQ_STATE_QUEUED: 995 linux_poll_wakeup(filp); 996 return (1); 997 default: 998 return (0); 999 } 1000 } 1001 1002 void 1003 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1004 { 1005 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1006 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1007 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1008 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1009 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1010 }; 1011 1012 /* check if we are called inside the select system call */ 1013 if (p == LINUX_POLL_TABLE_NORMAL) 1014 selrecord(curthread, &filp->f_selinfo); 1015 1016 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1017 case LINUX_FWQ_STATE_INIT: 1018 /* NOTE: file handles can only belong to one wait-queue */ 1019 filp->f_wait_queue.wqh = wqh; 1020 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1021 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1022 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1023 break; 1024 default: 1025 break; 1026 } 1027 } 1028 1029 static void 1030 linux_poll_wait_dequeue(struct linux_file *filp) 1031 { 1032 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1033 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1034 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1035 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1036 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1037 }; 1038 1039 seldrain(&filp->f_selinfo); 1040 1041 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1042 case LINUX_FWQ_STATE_NOT_READY: 1043 case LINUX_FWQ_STATE_QUEUED: 1044 case LINUX_FWQ_STATE_READY: 1045 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1046 break; 1047 default: 1048 break; 1049 } 1050 } 1051 1052 void 1053 linux_poll_wakeup(struct linux_file *filp) 1054 { 1055 /* this function should be NULL-safe */ 1056 if (filp == NULL) 1057 return; 1058 1059 selwakeup(&filp->f_selinfo); 1060 1061 spin_lock(&filp->f_kqlock); 1062 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1063 LINUX_KQ_FLAG_NEED_WRITE; 1064 1065 /* make sure the "knote" gets woken up */ 1066 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1067 spin_unlock(&filp->f_kqlock); 1068 } 1069 1070 static void 1071 linux_file_kqfilter_detach(struct knote *kn) 1072 { 1073 struct linux_file *filp = kn->kn_hook; 1074 1075 spin_lock(&filp->f_kqlock); 1076 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1077 spin_unlock(&filp->f_kqlock); 1078 } 1079 1080 static int 1081 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1082 { 1083 struct linux_file *filp = kn->kn_hook; 1084 1085 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1086 1087 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1088 } 1089 1090 static int 1091 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1092 { 1093 struct linux_file *filp = kn->kn_hook; 1094 1095 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1096 1097 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1098 } 1099 1100 static struct filterops linux_dev_kqfiltops_read = { 1101 .f_isfd = 1, 1102 .f_detach = linux_file_kqfilter_detach, 1103 .f_event = linux_file_kqfilter_read_event, 1104 }; 1105 1106 static struct filterops linux_dev_kqfiltops_write = { 1107 .f_isfd = 1, 1108 .f_detach = linux_file_kqfilter_detach, 1109 .f_event = linux_file_kqfilter_write_event, 1110 }; 1111 1112 static void 1113 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1114 { 1115 struct thread *td; 1116 const struct file_operations *fop; 1117 struct linux_cdev *ldev; 1118 int temp; 1119 1120 if ((filp->f_kqflags & kqflags) == 0) 1121 return; 1122 1123 td = curthread; 1124 1125 linux_get_fop(filp, &fop, &ldev); 1126 /* get the latest polling state */ 1127 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1128 linux_drop_fop(ldev); 1129 1130 spin_lock(&filp->f_kqlock); 1131 /* clear kqflags */ 1132 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1133 LINUX_KQ_FLAG_NEED_WRITE); 1134 /* update kqflags */ 1135 if ((temp & (POLLIN | POLLOUT)) != 0) { 1136 if ((temp & POLLIN) != 0) 1137 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1138 if ((temp & POLLOUT) != 0) 1139 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1140 1141 /* make sure the "knote" gets woken up */ 1142 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1143 } 1144 spin_unlock(&filp->f_kqlock); 1145 } 1146 1147 static int 1148 linux_file_kqfilter(struct file *file, struct knote *kn) 1149 { 1150 struct linux_file *filp; 1151 struct thread *td; 1152 int error; 1153 1154 td = curthread; 1155 filp = (struct linux_file *)file->f_data; 1156 filp->f_flags = file->f_flag; 1157 if (filp->f_op->poll == NULL) 1158 return (EINVAL); 1159 1160 spin_lock(&filp->f_kqlock); 1161 switch (kn->kn_filter) { 1162 case EVFILT_READ: 1163 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1164 kn->kn_fop = &linux_dev_kqfiltops_read; 1165 kn->kn_hook = filp; 1166 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1167 error = 0; 1168 break; 1169 case EVFILT_WRITE: 1170 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1171 kn->kn_fop = &linux_dev_kqfiltops_write; 1172 kn->kn_hook = filp; 1173 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1174 error = 0; 1175 break; 1176 default: 1177 error = EINVAL; 1178 break; 1179 } 1180 spin_unlock(&filp->f_kqlock); 1181 1182 if (error == 0) { 1183 linux_set_current(td); 1184 1185 /* update kqfilter status, if any */ 1186 linux_file_kqfilter_poll(filp, 1187 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1188 } 1189 return (error); 1190 } 1191 1192 static int 1193 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1194 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1195 int nprot, bool is_shared, struct thread *td) 1196 { 1197 struct task_struct *task; 1198 struct vm_area_struct *vmap; 1199 struct mm_struct *mm; 1200 struct linux_file *filp; 1201 vm_memattr_t attr; 1202 int error; 1203 1204 filp = (struct linux_file *)fp->f_data; 1205 filp->f_flags = fp->f_flag; 1206 1207 if (fop->mmap == NULL) 1208 return (EOPNOTSUPP); 1209 1210 linux_set_current(td); 1211 1212 /* 1213 * The same VM object might be shared by multiple processes 1214 * and the mm_struct is usually freed when a process exits. 1215 * 1216 * The atomic reference below makes sure the mm_struct is 1217 * available as long as the vmap is in the linux_vma_head. 1218 */ 1219 task = current; 1220 mm = task->mm; 1221 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1222 return (EINVAL); 1223 1224 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1225 vmap->vm_start = 0; 1226 vmap->vm_end = size; 1227 vmap->vm_pgoff = *offset / PAGE_SIZE; 1228 vmap->vm_pfn = 0; 1229 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1230 if (is_shared) 1231 vmap->vm_flags |= VM_SHARED; 1232 vmap->vm_ops = NULL; 1233 vmap->vm_file = get_file(filp); 1234 vmap->vm_mm = mm; 1235 1236 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1237 error = linux_get_error(task, EINTR); 1238 } else { 1239 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1240 error = linux_get_error(task, error); 1241 up_write(&vmap->vm_mm->mmap_sem); 1242 } 1243 1244 if (error != 0) { 1245 linux_cdev_handle_free(vmap); 1246 return (error); 1247 } 1248 1249 attr = pgprot2cachemode(vmap->vm_page_prot); 1250 1251 if (vmap->vm_ops != NULL) { 1252 struct vm_area_struct *ptr; 1253 void *vm_private_data; 1254 bool vm_no_fault; 1255 1256 if (vmap->vm_ops->open == NULL || 1257 vmap->vm_ops->close == NULL || 1258 vmap->vm_private_data == NULL) { 1259 /* free allocated VM area struct */ 1260 linux_cdev_handle_free(vmap); 1261 return (EINVAL); 1262 } 1263 1264 vm_private_data = vmap->vm_private_data; 1265 1266 rw_wlock(&linux_vma_lock); 1267 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1268 if (ptr->vm_private_data == vm_private_data) 1269 break; 1270 } 1271 /* check if there is an existing VM area struct */ 1272 if (ptr != NULL) { 1273 /* check if the VM area structure is invalid */ 1274 if (ptr->vm_ops == NULL || 1275 ptr->vm_ops->open == NULL || 1276 ptr->vm_ops->close == NULL) { 1277 error = ESTALE; 1278 vm_no_fault = 1; 1279 } else { 1280 error = EEXIST; 1281 vm_no_fault = (ptr->vm_ops->fault == NULL); 1282 } 1283 } else { 1284 /* insert VM area structure into list */ 1285 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1286 error = 0; 1287 vm_no_fault = (vmap->vm_ops->fault == NULL); 1288 } 1289 rw_wunlock(&linux_vma_lock); 1290 1291 if (error != 0) { 1292 /* free allocated VM area struct */ 1293 linux_cdev_handle_free(vmap); 1294 /* check for stale VM area struct */ 1295 if (error != EEXIST) 1296 return (error); 1297 } 1298 1299 /* check if there is no fault handler */ 1300 if (vm_no_fault) { 1301 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1302 &linux_cdev_pager_ops[1], size, nprot, *offset, 1303 td->td_ucred); 1304 } else { 1305 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1306 &linux_cdev_pager_ops[0], size, nprot, *offset, 1307 td->td_ucred); 1308 } 1309 1310 /* check if allocating the VM object failed */ 1311 if (*object == NULL) { 1312 if (error == 0) { 1313 /* remove VM area struct from list */ 1314 linux_cdev_handle_remove(vmap); 1315 /* free allocated VM area struct */ 1316 linux_cdev_handle_free(vmap); 1317 } 1318 return (EINVAL); 1319 } 1320 } else { 1321 struct sglist *sg; 1322 1323 sg = sglist_alloc(1, M_WAITOK); 1324 sglist_append_phys(sg, 1325 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1326 1327 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1328 nprot, 0, td->td_ucred); 1329 1330 linux_cdev_handle_free(vmap); 1331 1332 if (*object == NULL) { 1333 sglist_free(sg); 1334 return (EINVAL); 1335 } 1336 } 1337 1338 if (attr != VM_MEMATTR_DEFAULT) { 1339 VM_OBJECT_WLOCK(*object); 1340 vm_object_set_memattr(*object, attr); 1341 VM_OBJECT_WUNLOCK(*object); 1342 } 1343 *offset = 0; 1344 return (0); 1345 } 1346 1347 struct cdevsw linuxcdevsw = { 1348 .d_version = D_VERSION, 1349 .d_fdopen = linux_dev_fdopen, 1350 .d_name = "lkpidev", 1351 }; 1352 1353 static int 1354 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1355 int flags, struct thread *td) 1356 { 1357 struct linux_file *filp; 1358 const struct file_operations *fop; 1359 struct linux_cdev *ldev; 1360 ssize_t bytes; 1361 int error; 1362 1363 error = 0; 1364 filp = (struct linux_file *)file->f_data; 1365 filp->f_flags = file->f_flag; 1366 /* XXX no support for I/O vectors currently */ 1367 if (uio->uio_iovcnt != 1) 1368 return (EOPNOTSUPP); 1369 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1370 return (EINVAL); 1371 linux_set_current(td); 1372 linux_get_fop(filp, &fop, &ldev); 1373 if (fop->read != NULL) { 1374 bytes = OPW(file, td, fop->read(filp, 1375 uio->uio_iov->iov_base, 1376 uio->uio_iov->iov_len, &uio->uio_offset)); 1377 if (bytes >= 0) { 1378 uio->uio_iov->iov_base = 1379 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1380 uio->uio_iov->iov_len -= bytes; 1381 uio->uio_resid -= bytes; 1382 } else { 1383 error = linux_get_error(current, -bytes); 1384 } 1385 } else 1386 error = ENXIO; 1387 1388 /* update kqfilter status, if any */ 1389 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1390 linux_drop_fop(ldev); 1391 1392 return (error); 1393 } 1394 1395 static int 1396 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1397 int flags, struct thread *td) 1398 { 1399 struct linux_file *filp; 1400 const struct file_operations *fop; 1401 struct linux_cdev *ldev; 1402 ssize_t bytes; 1403 int error; 1404 1405 filp = (struct linux_file *)file->f_data; 1406 filp->f_flags = file->f_flag; 1407 /* XXX no support for I/O vectors currently */ 1408 if (uio->uio_iovcnt != 1) 1409 return (EOPNOTSUPP); 1410 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1411 return (EINVAL); 1412 linux_set_current(td); 1413 linux_get_fop(filp, &fop, &ldev); 1414 if (fop->write != NULL) { 1415 bytes = OPW(file, td, fop->write(filp, 1416 uio->uio_iov->iov_base, 1417 uio->uio_iov->iov_len, &uio->uio_offset)); 1418 if (bytes >= 0) { 1419 uio->uio_iov->iov_base = 1420 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1421 uio->uio_iov->iov_len -= bytes; 1422 uio->uio_resid -= bytes; 1423 error = 0; 1424 } else { 1425 error = linux_get_error(current, -bytes); 1426 } 1427 } else 1428 error = ENXIO; 1429 1430 /* update kqfilter status, if any */ 1431 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1432 1433 linux_drop_fop(ldev); 1434 1435 return (error); 1436 } 1437 1438 static int 1439 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1440 struct thread *td) 1441 { 1442 struct linux_file *filp; 1443 const struct file_operations *fop; 1444 struct linux_cdev *ldev; 1445 int revents; 1446 1447 filp = (struct linux_file *)file->f_data; 1448 filp->f_flags = file->f_flag; 1449 linux_set_current(td); 1450 linux_get_fop(filp, &fop, &ldev); 1451 if (fop->poll != NULL) { 1452 revents = OPW(file, td, fop->poll(filp, 1453 LINUX_POLL_TABLE_NORMAL)) & events; 1454 } else { 1455 revents = 0; 1456 } 1457 linux_drop_fop(ldev); 1458 return (revents); 1459 } 1460 1461 static int 1462 linux_file_close(struct file *file, struct thread *td) 1463 { 1464 struct linux_file *filp; 1465 int (*release)(struct inode *, struct linux_file *); 1466 const struct file_operations *fop; 1467 struct linux_cdev *ldev; 1468 int error; 1469 1470 filp = (struct linux_file *)file->f_data; 1471 1472 KASSERT(file_count(filp) == 0, 1473 ("File refcount(%d) is not zero", file_count(filp))); 1474 1475 if (td == NULL) 1476 td = curthread; 1477 1478 error = 0; 1479 filp->f_flags = file->f_flag; 1480 linux_set_current(td); 1481 linux_poll_wait_dequeue(filp); 1482 linux_get_fop(filp, &fop, &ldev); 1483 /* 1484 * Always use the real release function, if any, to avoid 1485 * leaking device resources: 1486 */ 1487 release = filp->f_op->release; 1488 if (release != NULL) 1489 error = -OPW(file, td, release(filp->f_vnode, filp)); 1490 funsetown(&filp->f_sigio); 1491 if (filp->f_vnode != NULL) 1492 vdrop(filp->f_vnode); 1493 linux_drop_fop(ldev); 1494 ldev = filp->f_cdev; 1495 if (ldev != NULL) 1496 linux_cdev_deref(ldev); 1497 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1498 kfree(filp); 1499 1500 return (error); 1501 } 1502 1503 static int 1504 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1505 struct thread *td) 1506 { 1507 struct linux_file *filp; 1508 const struct file_operations *fop; 1509 struct linux_cdev *ldev; 1510 struct fiodgname_arg *fgn; 1511 const char *p; 1512 int error, i; 1513 1514 error = 0; 1515 filp = (struct linux_file *)fp->f_data; 1516 filp->f_flags = fp->f_flag; 1517 linux_get_fop(filp, &fop, &ldev); 1518 1519 linux_set_current(td); 1520 switch (cmd) { 1521 case FIONBIO: 1522 break; 1523 case FIOASYNC: 1524 if (fop->fasync == NULL) 1525 break; 1526 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1527 break; 1528 case FIOSETOWN: 1529 error = fsetown(*(int *)data, &filp->f_sigio); 1530 if (error == 0) { 1531 if (fop->fasync == NULL) 1532 break; 1533 error = -OPW(fp, td, fop->fasync(0, filp, 1534 fp->f_flag & FASYNC)); 1535 } 1536 break; 1537 case FIOGETOWN: 1538 *(int *)data = fgetown(&filp->f_sigio); 1539 break; 1540 case FIODGNAME: 1541 #ifdef COMPAT_FREEBSD32 1542 case FIODGNAME_32: 1543 #endif 1544 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1545 error = ENXIO; 1546 break; 1547 } 1548 fgn = data; 1549 p = devtoname(filp->f_cdev->cdev); 1550 i = strlen(p) + 1; 1551 if (i > fgn->len) { 1552 error = EINVAL; 1553 break; 1554 } 1555 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1556 break; 1557 default: 1558 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1559 break; 1560 } 1561 linux_drop_fop(ldev); 1562 return (error); 1563 } 1564 1565 static int 1566 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1567 vm_prot_t maxprot, int flags, struct file *fp, 1568 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1569 { 1570 /* 1571 * Character devices do not provide private mappings 1572 * of any kind: 1573 */ 1574 if ((maxprot & VM_PROT_WRITE) == 0 && 1575 (prot & VM_PROT_WRITE) != 0) 1576 return (EACCES); 1577 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1578 return (EINVAL); 1579 1580 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1581 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1582 } 1583 1584 static int 1585 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1586 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1587 struct thread *td) 1588 { 1589 struct linux_file *filp; 1590 const struct file_operations *fop; 1591 struct linux_cdev *ldev; 1592 struct mount *mp; 1593 struct vnode *vp; 1594 vm_object_t object; 1595 vm_prot_t maxprot; 1596 int error; 1597 1598 filp = (struct linux_file *)fp->f_data; 1599 1600 vp = filp->f_vnode; 1601 if (vp == NULL) 1602 return (EOPNOTSUPP); 1603 1604 /* 1605 * Ensure that file and memory protections are 1606 * compatible. 1607 */ 1608 mp = vp->v_mount; 1609 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1610 maxprot = VM_PROT_NONE; 1611 if ((prot & VM_PROT_EXECUTE) != 0) 1612 return (EACCES); 1613 } else 1614 maxprot = VM_PROT_EXECUTE; 1615 if ((fp->f_flag & FREAD) != 0) 1616 maxprot |= VM_PROT_READ; 1617 else if ((prot & VM_PROT_READ) != 0) 1618 return (EACCES); 1619 1620 /* 1621 * If we are sharing potential changes via MAP_SHARED and we 1622 * are trying to get write permission although we opened it 1623 * without asking for it, bail out. 1624 * 1625 * Note that most character devices always share mappings. 1626 * 1627 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1628 * requests rather than doing it here. 1629 */ 1630 if ((flags & MAP_SHARED) != 0) { 1631 if ((fp->f_flag & FWRITE) != 0) 1632 maxprot |= VM_PROT_WRITE; 1633 else if ((prot & VM_PROT_WRITE) != 0) 1634 return (EACCES); 1635 } 1636 maxprot &= cap_maxprot; 1637 1638 linux_get_fop(filp, &fop, &ldev); 1639 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1640 &foff, fop, &object); 1641 if (error != 0) 1642 goto out; 1643 1644 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1645 foff, FALSE, td); 1646 if (error != 0) 1647 vm_object_deallocate(object); 1648 out: 1649 linux_drop_fop(ldev); 1650 return (error); 1651 } 1652 1653 static int 1654 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1655 { 1656 struct linux_file *filp; 1657 struct vnode *vp; 1658 int error; 1659 1660 filp = (struct linux_file *)fp->f_data; 1661 if (filp->f_vnode == NULL) 1662 return (EOPNOTSUPP); 1663 1664 vp = filp->f_vnode; 1665 1666 vn_lock(vp, LK_SHARED | LK_RETRY); 1667 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1668 VOP_UNLOCK(vp); 1669 1670 return (error); 1671 } 1672 1673 static int 1674 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1675 struct filedesc *fdp) 1676 { 1677 struct linux_file *filp; 1678 struct vnode *vp; 1679 int error; 1680 1681 filp = fp->f_data; 1682 vp = filp->f_vnode; 1683 if (vp == NULL) { 1684 error = 0; 1685 kif->kf_type = KF_TYPE_DEV; 1686 } else { 1687 vref(vp); 1688 FILEDESC_SUNLOCK(fdp); 1689 error = vn_fill_kinfo_vnode(vp, kif); 1690 vrele(vp); 1691 kif->kf_type = KF_TYPE_VNODE; 1692 FILEDESC_SLOCK(fdp); 1693 } 1694 return (error); 1695 } 1696 1697 unsigned int 1698 linux_iminor(struct inode *inode) 1699 { 1700 struct linux_cdev *ldev; 1701 1702 if (inode == NULL || inode->v_rdev == NULL || 1703 inode->v_rdev->si_devsw != &linuxcdevsw) 1704 return (-1U); 1705 ldev = inode->v_rdev->si_drv1; 1706 if (ldev == NULL) 1707 return (-1U); 1708 1709 return (minor(ldev->dev)); 1710 } 1711 1712 struct fileops linuxfileops = { 1713 .fo_read = linux_file_read, 1714 .fo_write = linux_file_write, 1715 .fo_truncate = invfo_truncate, 1716 .fo_kqfilter = linux_file_kqfilter, 1717 .fo_stat = linux_file_stat, 1718 .fo_fill_kinfo = linux_file_fill_kinfo, 1719 .fo_poll = linux_file_poll, 1720 .fo_close = linux_file_close, 1721 .fo_ioctl = linux_file_ioctl, 1722 .fo_mmap = linux_file_mmap, 1723 .fo_chmod = invfo_chmod, 1724 .fo_chown = invfo_chown, 1725 .fo_sendfile = invfo_sendfile, 1726 .fo_flags = DFLAG_PASSABLE, 1727 }; 1728 1729 /* 1730 * Hash of vmmap addresses. This is infrequently accessed and does not 1731 * need to be particularly large. This is done because we must store the 1732 * caller's idea of the map size to properly unmap. 1733 */ 1734 struct vmmap { 1735 LIST_ENTRY(vmmap) vm_next; 1736 void *vm_addr; 1737 unsigned long vm_size; 1738 }; 1739 1740 struct vmmaphd { 1741 struct vmmap *lh_first; 1742 }; 1743 #define VMMAP_HASH_SIZE 64 1744 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1745 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1746 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1747 static struct mtx vmmaplock; 1748 1749 static void 1750 vmmap_add(void *addr, unsigned long size) 1751 { 1752 struct vmmap *vmmap; 1753 1754 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1755 mtx_lock(&vmmaplock); 1756 vmmap->vm_size = size; 1757 vmmap->vm_addr = addr; 1758 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1759 mtx_unlock(&vmmaplock); 1760 } 1761 1762 static struct vmmap * 1763 vmmap_remove(void *addr) 1764 { 1765 struct vmmap *vmmap; 1766 1767 mtx_lock(&vmmaplock); 1768 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1769 if (vmmap->vm_addr == addr) 1770 break; 1771 if (vmmap) 1772 LIST_REMOVE(vmmap, vm_next); 1773 mtx_unlock(&vmmaplock); 1774 1775 return (vmmap); 1776 } 1777 1778 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1779 void * 1780 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1781 { 1782 void *addr; 1783 1784 addr = pmap_mapdev_attr(phys_addr, size, attr); 1785 if (addr == NULL) 1786 return (NULL); 1787 vmmap_add(addr, size); 1788 1789 return (addr); 1790 } 1791 #endif 1792 1793 void 1794 iounmap(void *addr) 1795 { 1796 struct vmmap *vmmap; 1797 1798 vmmap = vmmap_remove(addr); 1799 if (vmmap == NULL) 1800 return; 1801 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1802 pmap_unmapdev(addr, vmmap->vm_size); 1803 #endif 1804 kfree(vmmap); 1805 } 1806 1807 void * 1808 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1809 { 1810 vm_offset_t off; 1811 size_t size; 1812 1813 size = count * PAGE_SIZE; 1814 off = kva_alloc(size); 1815 if (off == 0) 1816 return (NULL); 1817 vmmap_add((void *)off, size); 1818 pmap_qenter(off, pages, count); 1819 1820 return ((void *)off); 1821 } 1822 1823 void 1824 vunmap(void *addr) 1825 { 1826 struct vmmap *vmmap; 1827 1828 vmmap = vmmap_remove(addr); 1829 if (vmmap == NULL) 1830 return; 1831 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1832 kva_free((vm_offset_t)addr, vmmap->vm_size); 1833 kfree(vmmap); 1834 } 1835 1836 static char * 1837 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1838 { 1839 unsigned int len; 1840 char *p; 1841 va_list aq; 1842 1843 va_copy(aq, ap); 1844 len = vsnprintf(NULL, 0, fmt, aq); 1845 va_end(aq); 1846 1847 if (dev != NULL) 1848 p = devm_kmalloc(dev, len + 1, gfp); 1849 else 1850 p = kmalloc(len + 1, gfp); 1851 if (p != NULL) 1852 vsnprintf(p, len + 1, fmt, ap); 1853 1854 return (p); 1855 } 1856 1857 char * 1858 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1859 { 1860 1861 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1862 } 1863 1864 char * 1865 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1866 { 1867 va_list ap; 1868 char *p; 1869 1870 va_start(ap, fmt); 1871 p = devm_kvasprintf(dev, gfp, fmt, ap); 1872 va_end(ap); 1873 1874 return (p); 1875 } 1876 1877 char * 1878 kasprintf(gfp_t gfp, const char *fmt, ...) 1879 { 1880 va_list ap; 1881 char *p; 1882 1883 va_start(ap, fmt); 1884 p = kvasprintf(gfp, fmt, ap); 1885 va_end(ap); 1886 1887 return (p); 1888 } 1889 1890 static void 1891 linux_timer_callback_wrapper(void *context) 1892 { 1893 struct timer_list *timer; 1894 1895 timer = context; 1896 1897 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1898 /* try again later */ 1899 callout_reset(&timer->callout, 1, 1900 &linux_timer_callback_wrapper, timer); 1901 return; 1902 } 1903 1904 timer->function(timer->data); 1905 } 1906 1907 int 1908 mod_timer(struct timer_list *timer, int expires) 1909 { 1910 int ret; 1911 1912 timer->expires = expires; 1913 ret = callout_reset(&timer->callout, 1914 linux_timer_jiffies_until(expires), 1915 &linux_timer_callback_wrapper, timer); 1916 1917 MPASS(ret == 0 || ret == 1); 1918 1919 return (ret == 1); 1920 } 1921 1922 void 1923 add_timer(struct timer_list *timer) 1924 { 1925 1926 callout_reset(&timer->callout, 1927 linux_timer_jiffies_until(timer->expires), 1928 &linux_timer_callback_wrapper, timer); 1929 } 1930 1931 void 1932 add_timer_on(struct timer_list *timer, int cpu) 1933 { 1934 1935 callout_reset_on(&timer->callout, 1936 linux_timer_jiffies_until(timer->expires), 1937 &linux_timer_callback_wrapper, timer, cpu); 1938 } 1939 1940 int 1941 del_timer(struct timer_list *timer) 1942 { 1943 1944 if (callout_stop(&(timer)->callout) == -1) 1945 return (0); 1946 return (1); 1947 } 1948 1949 int 1950 del_timer_sync(struct timer_list *timer) 1951 { 1952 1953 if (callout_drain(&(timer)->callout) == -1) 1954 return (0); 1955 return (1); 1956 } 1957 1958 int 1959 timer_delete_sync(struct timer_list *timer) 1960 { 1961 1962 return (del_timer_sync(timer)); 1963 } 1964 1965 int 1966 timer_shutdown_sync(struct timer_list *timer) 1967 { 1968 1969 return (del_timer_sync(timer)); 1970 } 1971 1972 /* greatest common divisor, Euclid equation */ 1973 static uint64_t 1974 lkpi_gcd_64(uint64_t a, uint64_t b) 1975 { 1976 uint64_t an; 1977 uint64_t bn; 1978 1979 while (b != 0) { 1980 an = b; 1981 bn = a % b; 1982 a = an; 1983 b = bn; 1984 } 1985 return (a); 1986 } 1987 1988 uint64_t lkpi_nsec2hz_rem; 1989 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 1990 uint64_t lkpi_nsec2hz_max; 1991 1992 uint64_t lkpi_usec2hz_rem; 1993 uint64_t lkpi_usec2hz_div = 1000000ULL; 1994 uint64_t lkpi_usec2hz_max; 1995 1996 uint64_t lkpi_msec2hz_rem; 1997 uint64_t lkpi_msec2hz_div = 1000ULL; 1998 uint64_t lkpi_msec2hz_max; 1999 2000 static void 2001 linux_timer_init(void *arg) 2002 { 2003 uint64_t gcd; 2004 2005 /* 2006 * Compute an internal HZ value which can divide 2**32 to 2007 * avoid timer rounding problems when the tick value wraps 2008 * around 2**32: 2009 */ 2010 linux_timer_hz_mask = 1; 2011 while (linux_timer_hz_mask < (unsigned long)hz) 2012 linux_timer_hz_mask *= 2; 2013 linux_timer_hz_mask--; 2014 2015 /* compute some internal constants */ 2016 2017 lkpi_nsec2hz_rem = hz; 2018 lkpi_usec2hz_rem = hz; 2019 lkpi_msec2hz_rem = hz; 2020 2021 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2022 lkpi_nsec2hz_rem /= gcd; 2023 lkpi_nsec2hz_div /= gcd; 2024 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2025 2026 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2027 lkpi_usec2hz_rem /= gcd; 2028 lkpi_usec2hz_div /= gcd; 2029 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2030 2031 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2032 lkpi_msec2hz_rem /= gcd; 2033 lkpi_msec2hz_div /= gcd; 2034 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2035 } 2036 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2037 2038 void 2039 linux_complete_common(struct completion *c, int all) 2040 { 2041 int wakeup_swapper; 2042 2043 sleepq_lock(c); 2044 if (all) { 2045 c->done = UINT_MAX; 2046 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2047 } else { 2048 if (c->done != UINT_MAX) 2049 c->done++; 2050 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2051 } 2052 sleepq_release(c); 2053 if (wakeup_swapper) 2054 kick_proc0(); 2055 } 2056 2057 /* 2058 * Indefinite wait for done != 0 with or without signals. 2059 */ 2060 int 2061 linux_wait_for_common(struct completion *c, int flags) 2062 { 2063 struct task_struct *task; 2064 int error; 2065 2066 if (SCHEDULER_STOPPED()) 2067 return (0); 2068 2069 task = current; 2070 2071 if (flags != 0) 2072 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2073 else 2074 flags = SLEEPQ_SLEEP; 2075 error = 0; 2076 for (;;) { 2077 sleepq_lock(c); 2078 if (c->done) 2079 break; 2080 sleepq_add(c, NULL, "completion", flags, 0); 2081 if (flags & SLEEPQ_INTERRUPTIBLE) { 2082 DROP_GIANT(); 2083 error = -sleepq_wait_sig(c, 0); 2084 PICKUP_GIANT(); 2085 if (error != 0) { 2086 linux_schedule_save_interrupt_value(task, error); 2087 error = -ERESTARTSYS; 2088 goto intr; 2089 } 2090 } else { 2091 DROP_GIANT(); 2092 sleepq_wait(c, 0); 2093 PICKUP_GIANT(); 2094 } 2095 } 2096 if (c->done != UINT_MAX) 2097 c->done--; 2098 sleepq_release(c); 2099 2100 intr: 2101 return (error); 2102 } 2103 2104 /* 2105 * Time limited wait for done != 0 with or without signals. 2106 */ 2107 int 2108 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2109 { 2110 struct task_struct *task; 2111 int end = jiffies + timeout; 2112 int error; 2113 2114 if (SCHEDULER_STOPPED()) 2115 return (0); 2116 2117 task = current; 2118 2119 if (flags != 0) 2120 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2121 else 2122 flags = SLEEPQ_SLEEP; 2123 2124 for (;;) { 2125 sleepq_lock(c); 2126 if (c->done) 2127 break; 2128 sleepq_add(c, NULL, "completion", flags, 0); 2129 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2130 2131 DROP_GIANT(); 2132 if (flags & SLEEPQ_INTERRUPTIBLE) 2133 error = -sleepq_timedwait_sig(c, 0); 2134 else 2135 error = -sleepq_timedwait(c, 0); 2136 PICKUP_GIANT(); 2137 2138 if (error != 0) { 2139 /* check for timeout */ 2140 if (error == -EWOULDBLOCK) { 2141 error = 0; /* timeout */ 2142 } else { 2143 /* signal happened */ 2144 linux_schedule_save_interrupt_value(task, error); 2145 error = -ERESTARTSYS; 2146 } 2147 goto done; 2148 } 2149 } 2150 if (c->done != UINT_MAX) 2151 c->done--; 2152 sleepq_release(c); 2153 2154 /* return how many jiffies are left */ 2155 error = linux_timer_jiffies_until(end); 2156 done: 2157 return (error); 2158 } 2159 2160 int 2161 linux_try_wait_for_completion(struct completion *c) 2162 { 2163 int isdone; 2164 2165 sleepq_lock(c); 2166 isdone = (c->done != 0); 2167 if (c->done != 0 && c->done != UINT_MAX) 2168 c->done--; 2169 sleepq_release(c); 2170 return (isdone); 2171 } 2172 2173 int 2174 linux_completion_done(struct completion *c) 2175 { 2176 int isdone; 2177 2178 sleepq_lock(c); 2179 isdone = (c->done != 0); 2180 sleepq_release(c); 2181 return (isdone); 2182 } 2183 2184 static void 2185 linux_cdev_deref(struct linux_cdev *ldev) 2186 { 2187 if (refcount_release(&ldev->refs) && 2188 ldev->kobj.ktype == &linux_cdev_ktype) 2189 kfree(ldev); 2190 } 2191 2192 static void 2193 linux_cdev_release(struct kobject *kobj) 2194 { 2195 struct linux_cdev *cdev; 2196 struct kobject *parent; 2197 2198 cdev = container_of(kobj, struct linux_cdev, kobj); 2199 parent = kobj->parent; 2200 linux_destroy_dev(cdev); 2201 linux_cdev_deref(cdev); 2202 kobject_put(parent); 2203 } 2204 2205 static void 2206 linux_cdev_static_release(struct kobject *kobj) 2207 { 2208 struct cdev *cdev; 2209 struct linux_cdev *ldev; 2210 2211 ldev = container_of(kobj, struct linux_cdev, kobj); 2212 cdev = ldev->cdev; 2213 if (cdev != NULL) { 2214 destroy_dev(cdev); 2215 ldev->cdev = NULL; 2216 } 2217 kobject_put(kobj->parent); 2218 } 2219 2220 int 2221 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2222 { 2223 int ret; 2224 2225 if (dev->devt != 0) { 2226 /* Set parent kernel object. */ 2227 ldev->kobj.parent = &dev->kobj; 2228 2229 /* 2230 * Unlike Linux we require the kobject of the 2231 * character device structure to have a valid name 2232 * before calling this function: 2233 */ 2234 if (ldev->kobj.name == NULL) 2235 return (-EINVAL); 2236 2237 ret = cdev_add(ldev, dev->devt, 1); 2238 if (ret) 2239 return (ret); 2240 } 2241 ret = device_add(dev); 2242 if (ret != 0 && dev->devt != 0) 2243 cdev_del(ldev); 2244 return (ret); 2245 } 2246 2247 void 2248 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2249 { 2250 device_del(dev); 2251 2252 if (dev->devt != 0) 2253 cdev_del(ldev); 2254 } 2255 2256 static void 2257 linux_destroy_dev(struct linux_cdev *ldev) 2258 { 2259 2260 if (ldev->cdev == NULL) 2261 return; 2262 2263 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2264 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2265 2266 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2267 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2268 pause("ldevdtr", hz / 4); 2269 2270 destroy_dev(ldev->cdev); 2271 ldev->cdev = NULL; 2272 } 2273 2274 const struct kobj_type linux_cdev_ktype = { 2275 .release = linux_cdev_release, 2276 }; 2277 2278 const struct kobj_type linux_cdev_static_ktype = { 2279 .release = linux_cdev_static_release, 2280 }; 2281 2282 static void 2283 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2284 { 2285 struct notifier_block *nb; 2286 struct netdev_notifier_info ni; 2287 2288 nb = arg; 2289 ni.ifp = ifp; 2290 ni.dev = (struct net_device *)ifp; 2291 if (linkstate == LINK_STATE_UP) 2292 nb->notifier_call(nb, NETDEV_UP, &ni); 2293 else 2294 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2295 } 2296 2297 static void 2298 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2299 { 2300 struct notifier_block *nb; 2301 struct netdev_notifier_info ni; 2302 2303 nb = arg; 2304 ni.ifp = ifp; 2305 ni.dev = (struct net_device *)ifp; 2306 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2307 } 2308 2309 static void 2310 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2311 { 2312 struct notifier_block *nb; 2313 struct netdev_notifier_info ni; 2314 2315 nb = arg; 2316 ni.ifp = ifp; 2317 ni.dev = (struct net_device *)ifp; 2318 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2319 } 2320 2321 static void 2322 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2323 { 2324 struct notifier_block *nb; 2325 struct netdev_notifier_info ni; 2326 2327 nb = arg; 2328 ni.ifp = ifp; 2329 ni.dev = (struct net_device *)ifp; 2330 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2331 } 2332 2333 static void 2334 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2335 { 2336 struct notifier_block *nb; 2337 struct netdev_notifier_info ni; 2338 2339 nb = arg; 2340 ni.ifp = ifp; 2341 ni.dev = (struct net_device *)ifp; 2342 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2343 } 2344 2345 int 2346 register_netdevice_notifier(struct notifier_block *nb) 2347 { 2348 2349 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2350 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2351 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2352 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2353 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2354 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2355 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2356 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2357 2358 return (0); 2359 } 2360 2361 int 2362 register_inetaddr_notifier(struct notifier_block *nb) 2363 { 2364 2365 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2366 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2367 return (0); 2368 } 2369 2370 int 2371 unregister_netdevice_notifier(struct notifier_block *nb) 2372 { 2373 2374 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2375 nb->tags[NETDEV_UP]); 2376 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2377 nb->tags[NETDEV_REGISTER]); 2378 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2379 nb->tags[NETDEV_UNREGISTER]); 2380 EVENTHANDLER_DEREGISTER(iflladdr_event, 2381 nb->tags[NETDEV_CHANGEADDR]); 2382 2383 return (0); 2384 } 2385 2386 int 2387 unregister_inetaddr_notifier(struct notifier_block *nb) 2388 { 2389 2390 EVENTHANDLER_DEREGISTER(ifaddr_event, 2391 nb->tags[NETDEV_CHANGEIFADDR]); 2392 2393 return (0); 2394 } 2395 2396 struct list_sort_thunk { 2397 int (*cmp)(void *, struct list_head *, struct list_head *); 2398 void *priv; 2399 }; 2400 2401 static inline int 2402 linux_le_cmp(const void *d1, const void *d2, void *priv) 2403 { 2404 struct list_head *le1, *le2; 2405 struct list_sort_thunk *thunk; 2406 2407 thunk = priv; 2408 le1 = *(__DECONST(struct list_head **, d1)); 2409 le2 = *(__DECONST(struct list_head **, d2)); 2410 return ((thunk->cmp)(thunk->priv, le1, le2)); 2411 } 2412 2413 void 2414 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2415 struct list_head *a, struct list_head *b)) 2416 { 2417 struct list_sort_thunk thunk; 2418 struct list_head **ar, *le; 2419 size_t count, i; 2420 2421 count = 0; 2422 list_for_each(le, head) 2423 count++; 2424 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2425 i = 0; 2426 list_for_each(le, head) 2427 ar[i++] = le; 2428 thunk.cmp = cmp; 2429 thunk.priv = priv; 2430 qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); 2431 INIT_LIST_HEAD(head); 2432 for (i = 0; i < count; i++) 2433 list_add_tail(ar[i], head); 2434 free(ar, M_KMALLOC); 2435 } 2436 2437 #if defined(__i386__) || defined(__amd64__) 2438 int 2439 linux_wbinvd_on_all_cpus(void) 2440 { 2441 2442 pmap_invalidate_cache(); 2443 return (0); 2444 } 2445 #endif 2446 2447 int 2448 linux_on_each_cpu(void callback(void *), void *data) 2449 { 2450 2451 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2452 smp_no_rendezvous_barrier, data); 2453 return (0); 2454 } 2455 2456 int 2457 linux_in_atomic(void) 2458 { 2459 2460 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2461 } 2462 2463 struct linux_cdev * 2464 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2465 { 2466 dev_t dev = MKDEV(major, minor); 2467 struct cdev *cdev; 2468 2469 dev_lock(); 2470 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2471 struct linux_cdev *ldev = cdev->si_drv1; 2472 if (ldev->dev == dev && 2473 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2474 break; 2475 } 2476 } 2477 dev_unlock(); 2478 2479 return (cdev != NULL ? cdev->si_drv1 : NULL); 2480 } 2481 2482 int 2483 __register_chrdev(unsigned int major, unsigned int baseminor, 2484 unsigned int count, const char *name, 2485 const struct file_operations *fops) 2486 { 2487 struct linux_cdev *cdev; 2488 int ret = 0; 2489 int i; 2490 2491 for (i = baseminor; i < baseminor + count; i++) { 2492 cdev = cdev_alloc(); 2493 cdev->ops = fops; 2494 kobject_set_name(&cdev->kobj, name); 2495 2496 ret = cdev_add(cdev, makedev(major, i), 1); 2497 if (ret != 0) 2498 break; 2499 } 2500 return (ret); 2501 } 2502 2503 int 2504 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2505 unsigned int count, const char *name, 2506 const struct file_operations *fops, uid_t uid, 2507 gid_t gid, int mode) 2508 { 2509 struct linux_cdev *cdev; 2510 int ret = 0; 2511 int i; 2512 2513 for (i = baseminor; i < baseminor + count; i++) { 2514 cdev = cdev_alloc(); 2515 cdev->ops = fops; 2516 kobject_set_name(&cdev->kobj, name); 2517 2518 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2519 if (ret != 0) 2520 break; 2521 } 2522 return (ret); 2523 } 2524 2525 void 2526 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2527 unsigned int count, const char *name) 2528 { 2529 struct linux_cdev *cdevp; 2530 int i; 2531 2532 for (i = baseminor; i < baseminor + count; i++) { 2533 cdevp = linux_find_cdev(name, major, i); 2534 if (cdevp != NULL) 2535 cdev_del(cdevp); 2536 } 2537 } 2538 2539 void 2540 linux_dump_stack(void) 2541 { 2542 #ifdef STACK 2543 struct stack st; 2544 2545 stack_save(&st); 2546 stack_print(&st); 2547 #endif 2548 } 2549 2550 int 2551 linuxkpi_net_ratelimit(void) 2552 { 2553 2554 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2555 lkpi_net_maxpps)); 2556 } 2557 2558 struct io_mapping * 2559 io_mapping_create_wc(resource_size_t base, unsigned long size) 2560 { 2561 struct io_mapping *mapping; 2562 2563 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2564 if (mapping == NULL) 2565 return (NULL); 2566 return (io_mapping_init_wc(mapping, base, size)); 2567 } 2568 2569 #if defined(__i386__) || defined(__amd64__) 2570 bool linux_cpu_has_clflush; 2571 struct cpuinfo_x86 boot_cpu_data; 2572 struct cpuinfo_x86 __cpu_data[MAXCPU]; 2573 #endif 2574 2575 cpumask_t * 2576 lkpi_get_static_single_cpu_mask(int cpuid) 2577 { 2578 2579 KASSERT((cpuid >= 0 && cpuid < MAXCPU), ("%s: invalid cpuid %d\n", 2580 __func__, cpuid)); 2581 2582 return (&static_single_cpu_mask[cpuid]); 2583 } 2584 2585 static void 2586 linux_compat_init(void *arg) 2587 { 2588 struct sysctl_oid *rootoid; 2589 int i; 2590 2591 #if defined(__i386__) || defined(__amd64__) 2592 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2593 boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; 2594 boot_cpu_data.x86_max_cores = mp_ncpus; 2595 boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id); 2596 boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id); 2597 2598 for (i = 0; i < MAXCPU; i++) { 2599 __cpu_data[i].x86_clflush_size = cpu_clflush_line_size; 2600 __cpu_data[i].x86_max_cores = mp_ncpus; 2601 __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id); 2602 __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id); 2603 } 2604 #endif 2605 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2606 2607 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2608 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2609 kobject_init(&linux_class_root, &linux_class_ktype); 2610 kobject_set_name(&linux_class_root, "class"); 2611 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2612 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2613 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2614 kobject_set_name(&linux_root_device.kobj, "device"); 2615 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2616 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2617 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2618 linux_root_device.bsddev = root_bus; 2619 linux_class_misc.name = "misc"; 2620 class_register(&linux_class_misc); 2621 INIT_LIST_HEAD(&pci_drivers); 2622 INIT_LIST_HEAD(&pci_devices); 2623 spin_lock_init(&pci_lock); 2624 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2625 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2626 LIST_INIT(&vmmaphead[i]); 2627 init_waitqueue_head(&linux_bit_waitq); 2628 init_waitqueue_head(&linux_var_waitq); 2629 2630 CPU_COPY(&all_cpus, &cpu_online_mask); 2631 /* 2632 * Generate a single-CPU cpumask_t for each CPU (possibly) in the system. 2633 * CPUs are indexed from 0..(MAXCPU-1). The entry for cpuid 0 will only 2634 * have itself in the cpumask, cupid 1 only itself on entry 1, and so on. 2635 * This is used by cpumask_of() (and possibly others in the future) for, 2636 * e.g., drivers to pass hints to irq_set_affinity_hint(). 2637 */ 2638 for (i = 0; i < MAXCPU; i++) 2639 CPU_SET(i, &static_single_cpu_mask[i]); 2640 2641 strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release)); 2642 } 2643 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2644 2645 static void 2646 linux_compat_uninit(void *arg) 2647 { 2648 linux_kobject_kfree_name(&linux_class_root); 2649 linux_kobject_kfree_name(&linux_root_device.kobj); 2650 linux_kobject_kfree_name(&linux_class_misc.kobj); 2651 2652 mtx_destroy(&vmmaplock); 2653 spin_lock_destroy(&pci_lock); 2654 rw_destroy(&linux_vma_lock); 2655 } 2656 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2657 2658 /* 2659 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2660 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2661 * used. Assert these types have the same size, else some parts of the 2662 * LinuxKPI may not work like expected: 2663 */ 2664 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2665