1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/list.h> 87 #include <linux/kthread.h> 88 #include <linux/kernel.h> 89 #include <linux/compat.h> 90 #include <linux/poll.h> 91 #include <linux/smp.h> 92 #include <linux/wait_bit.h> 93 #include <linux/rcupdate.h> 94 #include <linux/interval_tree.h> 95 #include <linux/interval_tree_generic.h> 96 97 #if defined(__i386__) || defined(__amd64__) 98 #include <asm/smp.h> 99 #endif 100 101 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 102 "LinuxKPI parameters"); 103 104 int linuxkpi_debug; 105 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 106 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 107 108 int linuxkpi_warn_dump_stack = 0; 109 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 110 &linuxkpi_warn_dump_stack, 0, 111 "Set to enable stack traces from WARN_ON(). Clear to disable."); 112 113 static struct timeval lkpi_net_lastlog; 114 static int lkpi_net_curpps; 115 static int lkpi_net_maxpps = 99; 116 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 117 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 118 119 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 120 121 #include <linux/rbtree.h> 122 /* Undo Linux compat changes. */ 123 #undef RB_ROOT 124 #undef file 125 #undef cdev 126 #define RB_ROOT(head) (head)->rbh_root 127 128 static void linux_destroy_dev(struct linux_cdev *); 129 static void linux_cdev_deref(struct linux_cdev *ldev); 130 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 131 132 cpumask_t cpu_online_mask; 133 struct kobject linux_class_root; 134 struct device linux_root_device; 135 struct class linux_class_misc; 136 struct list_head pci_drivers; 137 struct list_head pci_devices; 138 spinlock_t pci_lock; 139 140 unsigned long linux_timer_hz_mask; 141 142 wait_queue_head_t linux_bit_waitq; 143 wait_queue_head_t linux_var_waitq; 144 145 int 146 panic_cmp(struct rb_node *one, struct rb_node *two) 147 { 148 panic("no cmp"); 149 } 150 151 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 152 153 #define START(node) ((node)->start) 154 #define LAST(node) ((node)->last) 155 156 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 157 LAST,, lkpi_interval_tree) 158 159 int 160 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 161 { 162 va_list tmp_va; 163 int len; 164 char *old; 165 char *name; 166 char dummy; 167 168 old = kobj->name; 169 170 if (old && fmt == NULL) 171 return (0); 172 173 /* compute length of string */ 174 va_copy(tmp_va, args); 175 len = vsnprintf(&dummy, 0, fmt, tmp_va); 176 va_end(tmp_va); 177 178 /* account for zero termination */ 179 len++; 180 181 /* check for error */ 182 if (len < 1) 183 return (-EINVAL); 184 185 /* allocate memory for string */ 186 name = kzalloc(len, GFP_KERNEL); 187 if (name == NULL) 188 return (-ENOMEM); 189 vsnprintf(name, len, fmt, args); 190 kobj->name = name; 191 192 /* free old string */ 193 kfree(old); 194 195 /* filter new string */ 196 for (; *name != '\0'; name++) 197 if (*name == '/') 198 *name = '!'; 199 return (0); 200 } 201 202 int 203 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 204 { 205 va_list args; 206 int error; 207 208 va_start(args, fmt); 209 error = kobject_set_name_vargs(kobj, fmt, args); 210 va_end(args); 211 212 return (error); 213 } 214 215 static int 216 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 217 { 218 const struct kobj_type *t; 219 int error; 220 221 kobj->parent = parent; 222 error = sysfs_create_dir(kobj); 223 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 224 struct attribute **attr; 225 t = kobj->ktype; 226 227 for (attr = t->default_attrs; *attr != NULL; attr++) { 228 error = sysfs_create_file(kobj, *attr); 229 if (error) 230 break; 231 } 232 if (error) 233 sysfs_remove_dir(kobj); 234 } 235 return (error); 236 } 237 238 int 239 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 240 { 241 va_list args; 242 int error; 243 244 va_start(args, fmt); 245 error = kobject_set_name_vargs(kobj, fmt, args); 246 va_end(args); 247 if (error) 248 return (error); 249 250 return kobject_add_complete(kobj, parent); 251 } 252 253 void 254 linux_kobject_release(struct kref *kref) 255 { 256 struct kobject *kobj; 257 char *name; 258 259 kobj = container_of(kref, struct kobject, kref); 260 sysfs_remove_dir(kobj); 261 name = kobj->name; 262 if (kobj->ktype && kobj->ktype->release) 263 kobj->ktype->release(kobj); 264 kfree(name); 265 } 266 267 static void 268 linux_kobject_kfree(struct kobject *kobj) 269 { 270 kfree(kobj); 271 } 272 273 static void 274 linux_kobject_kfree_name(struct kobject *kobj) 275 { 276 if (kobj) { 277 kfree(kobj->name); 278 } 279 } 280 281 const struct kobj_type linux_kfree_type = { 282 .release = linux_kobject_kfree 283 }; 284 285 static ssize_t 286 lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) 287 { 288 struct kobj_attribute *ka = 289 container_of(attr, struct kobj_attribute, attr); 290 291 if (ka->show == NULL) 292 return (-EIO); 293 294 return (ka->show(kobj, ka, buf)); 295 } 296 297 static ssize_t 298 lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr, 299 const char *buf, size_t count) 300 { 301 struct kobj_attribute *ka = 302 container_of(attr, struct kobj_attribute, attr); 303 304 if (ka->store == NULL) 305 return (-EIO); 306 307 return (ka->store(kobj, ka, buf, count)); 308 } 309 310 const struct sysfs_ops kobj_sysfs_ops = { 311 .show = lkpi_kobj_attr_show, 312 .store = lkpi_kobj_attr_store, 313 }; 314 315 static void 316 linux_device_release(struct device *dev) 317 { 318 pr_debug("linux_device_release: %s\n", dev_name(dev)); 319 kfree(dev); 320 } 321 322 static ssize_t 323 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 324 { 325 struct class_attribute *dattr; 326 ssize_t error; 327 328 dattr = container_of(attr, struct class_attribute, attr); 329 error = -EIO; 330 if (dattr->show) 331 error = dattr->show(container_of(kobj, struct class, kobj), 332 dattr, buf); 333 return (error); 334 } 335 336 static ssize_t 337 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 338 size_t count) 339 { 340 struct class_attribute *dattr; 341 ssize_t error; 342 343 dattr = container_of(attr, struct class_attribute, attr); 344 error = -EIO; 345 if (dattr->store) 346 error = dattr->store(container_of(kobj, struct class, kobj), 347 dattr, buf, count); 348 return (error); 349 } 350 351 static void 352 linux_class_release(struct kobject *kobj) 353 { 354 struct class *class; 355 356 class = container_of(kobj, struct class, kobj); 357 if (class->class_release) 358 class->class_release(class); 359 } 360 361 static const struct sysfs_ops linux_class_sysfs = { 362 .show = linux_class_show, 363 .store = linux_class_store, 364 }; 365 366 const struct kobj_type linux_class_ktype = { 367 .release = linux_class_release, 368 .sysfs_ops = &linux_class_sysfs 369 }; 370 371 static void 372 linux_dev_release(struct kobject *kobj) 373 { 374 struct device *dev; 375 376 dev = container_of(kobj, struct device, kobj); 377 /* This is the precedence defined by linux. */ 378 if (dev->release) 379 dev->release(dev); 380 else if (dev->class && dev->class->dev_release) 381 dev->class->dev_release(dev); 382 } 383 384 static ssize_t 385 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 386 { 387 struct device_attribute *dattr; 388 ssize_t error; 389 390 dattr = container_of(attr, struct device_attribute, attr); 391 error = -EIO; 392 if (dattr->show) 393 error = dattr->show(container_of(kobj, struct device, kobj), 394 dattr, buf); 395 return (error); 396 } 397 398 static ssize_t 399 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 400 size_t count) 401 { 402 struct device_attribute *dattr; 403 ssize_t error; 404 405 dattr = container_of(attr, struct device_attribute, attr); 406 error = -EIO; 407 if (dattr->store) 408 error = dattr->store(container_of(kobj, struct device, kobj), 409 dattr, buf, count); 410 return (error); 411 } 412 413 static const struct sysfs_ops linux_dev_sysfs = { 414 .show = linux_dev_show, 415 .store = linux_dev_store, 416 }; 417 418 const struct kobj_type linux_dev_ktype = { 419 .release = linux_dev_release, 420 .sysfs_ops = &linux_dev_sysfs 421 }; 422 423 struct device * 424 device_create(struct class *class, struct device *parent, dev_t devt, 425 void *drvdata, const char *fmt, ...) 426 { 427 struct device *dev; 428 va_list args; 429 430 dev = kzalloc(sizeof(*dev), M_WAITOK); 431 dev->parent = parent; 432 dev->class = class; 433 dev->devt = devt; 434 dev->driver_data = drvdata; 435 dev->release = linux_device_release; 436 va_start(args, fmt); 437 kobject_set_name_vargs(&dev->kobj, fmt, args); 438 va_end(args); 439 device_register(dev); 440 441 return (dev); 442 } 443 444 int 445 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 446 struct kobject *parent, const char *fmt, ...) 447 { 448 va_list args; 449 int error; 450 451 kobject_init(kobj, ktype); 452 kobj->ktype = ktype; 453 kobj->parent = parent; 454 kobj->name = NULL; 455 456 va_start(args, fmt); 457 error = kobject_set_name_vargs(kobj, fmt, args); 458 va_end(args); 459 if (error) 460 return (error); 461 return kobject_add_complete(kobj, parent); 462 } 463 464 static void 465 linux_kq_lock(void *arg) 466 { 467 spinlock_t *s = arg; 468 469 spin_lock(s); 470 } 471 static void 472 linux_kq_unlock(void *arg) 473 { 474 spinlock_t *s = arg; 475 476 spin_unlock(s); 477 } 478 479 static void 480 linux_kq_assert_lock(void *arg, int what) 481 { 482 #ifdef INVARIANTS 483 spinlock_t *s = arg; 484 485 if (what == LA_LOCKED) 486 mtx_assert(&s->m, MA_OWNED); 487 else 488 mtx_assert(&s->m, MA_NOTOWNED); 489 #endif 490 } 491 492 static void 493 linux_file_kqfilter_poll(struct linux_file *, int); 494 495 struct linux_file * 496 linux_file_alloc(void) 497 { 498 struct linux_file *filp; 499 500 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 501 502 /* set initial refcount */ 503 filp->f_count = 1; 504 505 /* setup fields needed by kqueue support */ 506 spin_lock_init(&filp->f_kqlock); 507 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 508 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 509 510 return (filp); 511 } 512 513 void 514 linux_file_free(struct linux_file *filp) 515 { 516 if (filp->_file == NULL) { 517 if (filp->f_op != NULL && filp->f_op->release != NULL) 518 filp->f_op->release(filp->f_vnode, filp); 519 if (filp->f_shmem != NULL) 520 vm_object_deallocate(filp->f_shmem); 521 kfree_rcu(filp, rcu); 522 } else { 523 /* 524 * The close method of the character device or file 525 * will free the linux_file structure: 526 */ 527 _fdrop(filp->_file, curthread); 528 } 529 } 530 531 static int 532 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 533 vm_page_t *mres) 534 { 535 struct vm_area_struct *vmap; 536 537 vmap = linux_cdev_handle_find(vm_obj->handle); 538 539 MPASS(vmap != NULL); 540 MPASS(vmap->vm_private_data == vm_obj->handle); 541 542 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 543 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 544 vm_page_t page; 545 546 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 547 /* 548 * If the passed in result page is a fake 549 * page, update it with the new physical 550 * address. 551 */ 552 page = *mres; 553 vm_page_updatefake(page, paddr, vm_obj->memattr); 554 } else { 555 /* 556 * Replace the passed in "mres" page with our 557 * own fake page and free up the all of the 558 * original pages. 559 */ 560 VM_OBJECT_WUNLOCK(vm_obj); 561 page = vm_page_getfake(paddr, vm_obj->memattr); 562 VM_OBJECT_WLOCK(vm_obj); 563 564 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 565 *mres = page; 566 } 567 vm_page_valid(page); 568 return (VM_PAGER_OK); 569 } 570 return (VM_PAGER_FAIL); 571 } 572 573 static int 574 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 575 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 576 { 577 struct vm_area_struct *vmap; 578 int err; 579 580 /* get VM area structure */ 581 vmap = linux_cdev_handle_find(vm_obj->handle); 582 MPASS(vmap != NULL); 583 MPASS(vmap->vm_private_data == vm_obj->handle); 584 585 VM_OBJECT_WUNLOCK(vm_obj); 586 587 linux_set_current(curthread); 588 589 down_write(&vmap->vm_mm->mmap_sem); 590 if (unlikely(vmap->vm_ops == NULL)) { 591 err = VM_FAULT_SIGBUS; 592 } else { 593 struct vm_fault vmf; 594 595 /* fill out VM fault structure */ 596 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 597 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 598 vmf.pgoff = 0; 599 vmf.page = NULL; 600 vmf.vma = vmap; 601 602 vmap->vm_pfn_count = 0; 603 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 604 vmap->vm_obj = vm_obj; 605 606 err = vmap->vm_ops->fault(&vmf); 607 608 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 609 kern_yield(PRI_USER); 610 err = vmap->vm_ops->fault(&vmf); 611 } 612 } 613 614 /* translate return code */ 615 switch (err) { 616 case VM_FAULT_OOM: 617 err = VM_PAGER_AGAIN; 618 break; 619 case VM_FAULT_SIGBUS: 620 err = VM_PAGER_BAD; 621 break; 622 case VM_FAULT_NOPAGE: 623 /* 624 * By contract the fault handler will return having 625 * busied all the pages itself. If pidx is already 626 * found in the object, it will simply xbusy the first 627 * page and return with vm_pfn_count set to 1. 628 */ 629 *first = vmap->vm_pfn_first; 630 *last = *first + vmap->vm_pfn_count - 1; 631 err = VM_PAGER_OK; 632 break; 633 default: 634 err = VM_PAGER_ERROR; 635 break; 636 } 637 up_write(&vmap->vm_mm->mmap_sem); 638 VM_OBJECT_WLOCK(vm_obj); 639 return (err); 640 } 641 642 static struct rwlock linux_vma_lock; 643 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 644 TAILQ_HEAD_INITIALIZER(linux_vma_head); 645 646 static void 647 linux_cdev_handle_free(struct vm_area_struct *vmap) 648 { 649 /* Drop reference on vm_file */ 650 if (vmap->vm_file != NULL) 651 fput(vmap->vm_file); 652 653 /* Drop reference on mm_struct */ 654 mmput(vmap->vm_mm); 655 656 kfree(vmap); 657 } 658 659 static void 660 linux_cdev_handle_remove(struct vm_area_struct *vmap) 661 { 662 rw_wlock(&linux_vma_lock); 663 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 664 rw_wunlock(&linux_vma_lock); 665 } 666 667 static struct vm_area_struct * 668 linux_cdev_handle_find(void *handle) 669 { 670 struct vm_area_struct *vmap; 671 672 rw_rlock(&linux_vma_lock); 673 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 674 if (vmap->vm_private_data == handle) 675 break; 676 } 677 rw_runlock(&linux_vma_lock); 678 return (vmap); 679 } 680 681 static int 682 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 683 vm_ooffset_t foff, struct ucred *cred, u_short *color) 684 { 685 686 MPASS(linux_cdev_handle_find(handle) != NULL); 687 *color = 0; 688 return (0); 689 } 690 691 static void 692 linux_cdev_pager_dtor(void *handle) 693 { 694 const struct vm_operations_struct *vm_ops; 695 struct vm_area_struct *vmap; 696 697 vmap = linux_cdev_handle_find(handle); 698 MPASS(vmap != NULL); 699 700 /* 701 * Remove handle before calling close operation to prevent 702 * other threads from reusing the handle pointer. 703 */ 704 linux_cdev_handle_remove(vmap); 705 706 down_write(&vmap->vm_mm->mmap_sem); 707 vm_ops = vmap->vm_ops; 708 if (likely(vm_ops != NULL)) 709 vm_ops->close(vmap); 710 up_write(&vmap->vm_mm->mmap_sem); 711 712 linux_cdev_handle_free(vmap); 713 } 714 715 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 716 { 717 /* OBJT_MGTDEVICE */ 718 .cdev_pg_populate = linux_cdev_pager_populate, 719 .cdev_pg_ctor = linux_cdev_pager_ctor, 720 .cdev_pg_dtor = linux_cdev_pager_dtor 721 }, 722 { 723 /* OBJT_DEVICE */ 724 .cdev_pg_fault = linux_cdev_pager_fault, 725 .cdev_pg_ctor = linux_cdev_pager_ctor, 726 .cdev_pg_dtor = linux_cdev_pager_dtor 727 }, 728 }; 729 730 int 731 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 732 unsigned long size) 733 { 734 vm_object_t obj; 735 vm_page_t m; 736 737 obj = vma->vm_obj; 738 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 739 return (-ENOTSUP); 740 VM_OBJECT_RLOCK(obj); 741 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 742 m != NULL && m->pindex < OFF_TO_IDX(address + size); 743 m = TAILQ_NEXT(m, listq)) 744 pmap_remove_all(m); 745 VM_OBJECT_RUNLOCK(obj); 746 return (0); 747 } 748 749 static struct file_operations dummy_ldev_ops = { 750 /* XXXKIB */ 751 }; 752 753 static struct linux_cdev dummy_ldev = { 754 .ops = &dummy_ldev_ops, 755 }; 756 757 #define LDEV_SI_DTR 0x0001 758 #define LDEV_SI_REF 0x0002 759 760 static void 761 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 762 struct linux_cdev **dev) 763 { 764 struct linux_cdev *ldev; 765 u_int siref; 766 767 ldev = filp->f_cdev; 768 *fop = filp->f_op; 769 if (ldev != NULL) { 770 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 771 refcount_acquire(&ldev->refs); 772 } else { 773 for (siref = ldev->siref;;) { 774 if ((siref & LDEV_SI_DTR) != 0) { 775 ldev = &dummy_ldev; 776 *fop = ldev->ops; 777 siref = ldev->siref; 778 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 779 } else if (atomic_fcmpset_int(&ldev->siref, 780 &siref, siref + LDEV_SI_REF)) { 781 break; 782 } 783 } 784 } 785 } 786 *dev = ldev; 787 } 788 789 static void 790 linux_drop_fop(struct linux_cdev *ldev) 791 { 792 793 if (ldev == NULL) 794 return; 795 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 796 linux_cdev_deref(ldev); 797 } else { 798 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 799 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 800 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 801 } 802 } 803 804 #define OPW(fp,td,code) ({ \ 805 struct file *__fpop; \ 806 __typeof(code) __retval; \ 807 \ 808 __fpop = (td)->td_fpop; \ 809 (td)->td_fpop = (fp); \ 810 __retval = (code); \ 811 (td)->td_fpop = __fpop; \ 812 __retval; \ 813 }) 814 815 static int 816 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 817 struct file *file) 818 { 819 struct linux_cdev *ldev; 820 struct linux_file *filp; 821 const struct file_operations *fop; 822 int error; 823 824 ldev = dev->si_drv1; 825 826 filp = linux_file_alloc(); 827 filp->f_dentry = &filp->f_dentry_store; 828 filp->f_op = ldev->ops; 829 filp->f_mode = file->f_flag; 830 filp->f_flags = file->f_flag; 831 filp->f_vnode = file->f_vnode; 832 filp->_file = file; 833 refcount_acquire(&ldev->refs); 834 filp->f_cdev = ldev; 835 836 linux_set_current(td); 837 linux_get_fop(filp, &fop, &ldev); 838 839 if (fop->open != NULL) { 840 error = -fop->open(file->f_vnode, filp); 841 if (error != 0) { 842 linux_drop_fop(ldev); 843 linux_cdev_deref(filp->f_cdev); 844 kfree(filp); 845 return (error); 846 } 847 } 848 849 /* hold on to the vnode - used for fstat() */ 850 vhold(filp->f_vnode); 851 852 /* release the file from devfs */ 853 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 854 linux_drop_fop(ldev); 855 return (ENXIO); 856 } 857 858 #define LINUX_IOCTL_MIN_PTR 0x10000UL 859 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 860 861 static inline int 862 linux_remap_address(void **uaddr, size_t len) 863 { 864 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 865 866 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 867 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 868 struct task_struct *pts = current; 869 if (pts == NULL) { 870 *uaddr = NULL; 871 return (1); 872 } 873 874 /* compute data offset */ 875 uaddr_val -= LINUX_IOCTL_MIN_PTR; 876 877 /* check that length is within bounds */ 878 if ((len > IOCPARM_MAX) || 879 (uaddr_val + len) > pts->bsd_ioctl_len) { 880 *uaddr = NULL; 881 return (1); 882 } 883 884 /* re-add kernel buffer address */ 885 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 886 887 /* update address location */ 888 *uaddr = (void *)uaddr_val; 889 return (1); 890 } 891 return (0); 892 } 893 894 int 895 linux_copyin(const void *uaddr, void *kaddr, size_t len) 896 { 897 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 898 if (uaddr == NULL) 899 return (-EFAULT); 900 memcpy(kaddr, uaddr, len); 901 return (0); 902 } 903 return (-copyin(uaddr, kaddr, len)); 904 } 905 906 int 907 linux_copyout(const void *kaddr, void *uaddr, size_t len) 908 { 909 if (linux_remap_address(&uaddr, len)) { 910 if (uaddr == NULL) 911 return (-EFAULT); 912 memcpy(uaddr, kaddr, len); 913 return (0); 914 } 915 return (-copyout(kaddr, uaddr, len)); 916 } 917 918 size_t 919 linux_clear_user(void *_uaddr, size_t _len) 920 { 921 uint8_t *uaddr = _uaddr; 922 size_t len = _len; 923 924 /* make sure uaddr is aligned before going into the fast loop */ 925 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 926 if (subyte(uaddr, 0)) 927 return (_len); 928 uaddr++; 929 len--; 930 } 931 932 /* zero 8 bytes at a time */ 933 while (len > 7) { 934 #ifdef __LP64__ 935 if (suword64(uaddr, 0)) 936 return (_len); 937 #else 938 if (suword32(uaddr, 0)) 939 return (_len); 940 if (suword32(uaddr + 4, 0)) 941 return (_len); 942 #endif 943 uaddr += 8; 944 len -= 8; 945 } 946 947 /* zero fill end, if any */ 948 while (len > 0) { 949 if (subyte(uaddr, 0)) 950 return (_len); 951 uaddr++; 952 len--; 953 } 954 return (0); 955 } 956 957 int 958 linux_access_ok(const void *uaddr, size_t len) 959 { 960 uintptr_t saddr; 961 uintptr_t eaddr; 962 963 /* get start and end address */ 964 saddr = (uintptr_t)uaddr; 965 eaddr = (uintptr_t)uaddr + len; 966 967 /* verify addresses are valid for userspace */ 968 return ((saddr == eaddr) || 969 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 970 } 971 972 /* 973 * This function should return either EINTR or ERESTART depending on 974 * the signal type sent to this thread: 975 */ 976 static int 977 linux_get_error(struct task_struct *task, int error) 978 { 979 /* check for signal type interrupt code */ 980 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 981 error = -linux_schedule_get_interrupt_value(task); 982 if (error == 0) 983 error = EINTR; 984 } 985 return (error); 986 } 987 988 static int 989 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 990 const struct file_operations *fop, u_long cmd, caddr_t data, 991 struct thread *td) 992 { 993 struct task_struct *task = current; 994 unsigned size; 995 int error; 996 997 size = IOCPARM_LEN(cmd); 998 /* refer to logic in sys_ioctl() */ 999 if (size > 0) { 1000 /* 1001 * Setup hint for linux_copyin() and linux_copyout(). 1002 * 1003 * Background: Linux code expects a user-space address 1004 * while FreeBSD supplies a kernel-space address. 1005 */ 1006 task->bsd_ioctl_data = data; 1007 task->bsd_ioctl_len = size; 1008 data = (void *)LINUX_IOCTL_MIN_PTR; 1009 } else { 1010 /* fetch user-space pointer */ 1011 data = *(void **)data; 1012 } 1013 #ifdef COMPAT_FREEBSD32 1014 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 1015 /* try the compat IOCTL handler first */ 1016 if (fop->compat_ioctl != NULL) { 1017 error = -OPW(fp, td, fop->compat_ioctl(filp, 1018 cmd, (u_long)data)); 1019 } else { 1020 error = ENOTTY; 1021 } 1022 1023 /* fallback to the regular IOCTL handler, if any */ 1024 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 1025 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1026 cmd, (u_long)data)); 1027 } 1028 } else 1029 #endif 1030 { 1031 if (fop->unlocked_ioctl != NULL) { 1032 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1033 cmd, (u_long)data)); 1034 } else { 1035 error = ENOTTY; 1036 } 1037 } 1038 if (size > 0) { 1039 task->bsd_ioctl_data = NULL; 1040 task->bsd_ioctl_len = 0; 1041 } 1042 1043 if (error == EWOULDBLOCK) { 1044 /* update kqfilter status, if any */ 1045 linux_file_kqfilter_poll(filp, 1046 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1047 } else { 1048 error = linux_get_error(task, error); 1049 } 1050 return (error); 1051 } 1052 1053 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1054 1055 /* 1056 * This function atomically updates the poll wakeup state and returns 1057 * the previous state at the time of update. 1058 */ 1059 static uint8_t 1060 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1061 { 1062 int c, old; 1063 1064 c = v->counter; 1065 1066 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1067 c = old; 1068 1069 return (c); 1070 } 1071 1072 static int 1073 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1074 { 1075 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1076 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1077 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1078 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1079 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1080 }; 1081 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1082 1083 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1084 case LINUX_FWQ_STATE_QUEUED: 1085 linux_poll_wakeup(filp); 1086 return (1); 1087 default: 1088 return (0); 1089 } 1090 } 1091 1092 void 1093 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1094 { 1095 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1096 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1097 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1098 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1099 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1100 }; 1101 1102 /* check if we are called inside the select system call */ 1103 if (p == LINUX_POLL_TABLE_NORMAL) 1104 selrecord(curthread, &filp->f_selinfo); 1105 1106 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1107 case LINUX_FWQ_STATE_INIT: 1108 /* NOTE: file handles can only belong to one wait-queue */ 1109 filp->f_wait_queue.wqh = wqh; 1110 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1111 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1112 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1113 break; 1114 default: 1115 break; 1116 } 1117 } 1118 1119 static void 1120 linux_poll_wait_dequeue(struct linux_file *filp) 1121 { 1122 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1123 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1124 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1125 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1126 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1127 }; 1128 1129 seldrain(&filp->f_selinfo); 1130 1131 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1132 case LINUX_FWQ_STATE_NOT_READY: 1133 case LINUX_FWQ_STATE_QUEUED: 1134 case LINUX_FWQ_STATE_READY: 1135 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1136 break; 1137 default: 1138 break; 1139 } 1140 } 1141 1142 void 1143 linux_poll_wakeup(struct linux_file *filp) 1144 { 1145 /* this function should be NULL-safe */ 1146 if (filp == NULL) 1147 return; 1148 1149 selwakeup(&filp->f_selinfo); 1150 1151 spin_lock(&filp->f_kqlock); 1152 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1153 LINUX_KQ_FLAG_NEED_WRITE; 1154 1155 /* make sure the "knote" gets woken up */ 1156 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1157 spin_unlock(&filp->f_kqlock); 1158 } 1159 1160 static void 1161 linux_file_kqfilter_detach(struct knote *kn) 1162 { 1163 struct linux_file *filp = kn->kn_hook; 1164 1165 spin_lock(&filp->f_kqlock); 1166 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1167 spin_unlock(&filp->f_kqlock); 1168 } 1169 1170 static int 1171 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1172 { 1173 struct linux_file *filp = kn->kn_hook; 1174 1175 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1176 1177 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1178 } 1179 1180 static int 1181 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1182 { 1183 struct linux_file *filp = kn->kn_hook; 1184 1185 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1186 1187 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1188 } 1189 1190 static struct filterops linux_dev_kqfiltops_read = { 1191 .f_isfd = 1, 1192 .f_detach = linux_file_kqfilter_detach, 1193 .f_event = linux_file_kqfilter_read_event, 1194 }; 1195 1196 static struct filterops linux_dev_kqfiltops_write = { 1197 .f_isfd = 1, 1198 .f_detach = linux_file_kqfilter_detach, 1199 .f_event = linux_file_kqfilter_write_event, 1200 }; 1201 1202 static void 1203 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1204 { 1205 struct thread *td; 1206 const struct file_operations *fop; 1207 struct linux_cdev *ldev; 1208 int temp; 1209 1210 if ((filp->f_kqflags & kqflags) == 0) 1211 return; 1212 1213 td = curthread; 1214 1215 linux_get_fop(filp, &fop, &ldev); 1216 /* get the latest polling state */ 1217 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1218 linux_drop_fop(ldev); 1219 1220 spin_lock(&filp->f_kqlock); 1221 /* clear kqflags */ 1222 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1223 LINUX_KQ_FLAG_NEED_WRITE); 1224 /* update kqflags */ 1225 if ((temp & (POLLIN | POLLOUT)) != 0) { 1226 if ((temp & POLLIN) != 0) 1227 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1228 if ((temp & POLLOUT) != 0) 1229 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1230 1231 /* make sure the "knote" gets woken up */ 1232 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1233 } 1234 spin_unlock(&filp->f_kqlock); 1235 } 1236 1237 static int 1238 linux_file_kqfilter(struct file *file, struct knote *kn) 1239 { 1240 struct linux_file *filp; 1241 struct thread *td; 1242 int error; 1243 1244 td = curthread; 1245 filp = (struct linux_file *)file->f_data; 1246 filp->f_flags = file->f_flag; 1247 if (filp->f_op->poll == NULL) 1248 return (EINVAL); 1249 1250 spin_lock(&filp->f_kqlock); 1251 switch (kn->kn_filter) { 1252 case EVFILT_READ: 1253 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1254 kn->kn_fop = &linux_dev_kqfiltops_read; 1255 kn->kn_hook = filp; 1256 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1257 error = 0; 1258 break; 1259 case EVFILT_WRITE: 1260 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1261 kn->kn_fop = &linux_dev_kqfiltops_write; 1262 kn->kn_hook = filp; 1263 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1264 error = 0; 1265 break; 1266 default: 1267 error = EINVAL; 1268 break; 1269 } 1270 spin_unlock(&filp->f_kqlock); 1271 1272 if (error == 0) { 1273 linux_set_current(td); 1274 1275 /* update kqfilter status, if any */ 1276 linux_file_kqfilter_poll(filp, 1277 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1278 } 1279 return (error); 1280 } 1281 1282 static int 1283 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1284 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1285 int nprot, bool is_shared, struct thread *td) 1286 { 1287 struct task_struct *task; 1288 struct vm_area_struct *vmap; 1289 struct mm_struct *mm; 1290 struct linux_file *filp; 1291 vm_memattr_t attr; 1292 int error; 1293 1294 filp = (struct linux_file *)fp->f_data; 1295 filp->f_flags = fp->f_flag; 1296 1297 if (fop->mmap == NULL) 1298 return (EOPNOTSUPP); 1299 1300 linux_set_current(td); 1301 1302 /* 1303 * The same VM object might be shared by multiple processes 1304 * and the mm_struct is usually freed when a process exits. 1305 * 1306 * The atomic reference below makes sure the mm_struct is 1307 * available as long as the vmap is in the linux_vma_head. 1308 */ 1309 task = current; 1310 mm = task->mm; 1311 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1312 return (EINVAL); 1313 1314 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1315 vmap->vm_start = 0; 1316 vmap->vm_end = size; 1317 vmap->vm_pgoff = *offset / PAGE_SIZE; 1318 vmap->vm_pfn = 0; 1319 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1320 if (is_shared) 1321 vmap->vm_flags |= VM_SHARED; 1322 vmap->vm_ops = NULL; 1323 vmap->vm_file = get_file(filp); 1324 vmap->vm_mm = mm; 1325 1326 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1327 error = linux_get_error(task, EINTR); 1328 } else { 1329 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1330 error = linux_get_error(task, error); 1331 up_write(&vmap->vm_mm->mmap_sem); 1332 } 1333 1334 if (error != 0) { 1335 linux_cdev_handle_free(vmap); 1336 return (error); 1337 } 1338 1339 attr = pgprot2cachemode(vmap->vm_page_prot); 1340 1341 if (vmap->vm_ops != NULL) { 1342 struct vm_area_struct *ptr; 1343 void *vm_private_data; 1344 bool vm_no_fault; 1345 1346 if (vmap->vm_ops->open == NULL || 1347 vmap->vm_ops->close == NULL || 1348 vmap->vm_private_data == NULL) { 1349 /* free allocated VM area struct */ 1350 linux_cdev_handle_free(vmap); 1351 return (EINVAL); 1352 } 1353 1354 vm_private_data = vmap->vm_private_data; 1355 1356 rw_wlock(&linux_vma_lock); 1357 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1358 if (ptr->vm_private_data == vm_private_data) 1359 break; 1360 } 1361 /* check if there is an existing VM area struct */ 1362 if (ptr != NULL) { 1363 /* check if the VM area structure is invalid */ 1364 if (ptr->vm_ops == NULL || 1365 ptr->vm_ops->open == NULL || 1366 ptr->vm_ops->close == NULL) { 1367 error = ESTALE; 1368 vm_no_fault = 1; 1369 } else { 1370 error = EEXIST; 1371 vm_no_fault = (ptr->vm_ops->fault == NULL); 1372 } 1373 } else { 1374 /* insert VM area structure into list */ 1375 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1376 error = 0; 1377 vm_no_fault = (vmap->vm_ops->fault == NULL); 1378 } 1379 rw_wunlock(&linux_vma_lock); 1380 1381 if (error != 0) { 1382 /* free allocated VM area struct */ 1383 linux_cdev_handle_free(vmap); 1384 /* check for stale VM area struct */ 1385 if (error != EEXIST) 1386 return (error); 1387 } 1388 1389 /* check if there is no fault handler */ 1390 if (vm_no_fault) { 1391 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1392 &linux_cdev_pager_ops[1], size, nprot, *offset, 1393 td->td_ucred); 1394 } else { 1395 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1396 &linux_cdev_pager_ops[0], size, nprot, *offset, 1397 td->td_ucred); 1398 } 1399 1400 /* check if allocating the VM object failed */ 1401 if (*object == NULL) { 1402 if (error == 0) { 1403 /* remove VM area struct from list */ 1404 linux_cdev_handle_remove(vmap); 1405 /* free allocated VM area struct */ 1406 linux_cdev_handle_free(vmap); 1407 } 1408 return (EINVAL); 1409 } 1410 } else { 1411 struct sglist *sg; 1412 1413 sg = sglist_alloc(1, M_WAITOK); 1414 sglist_append_phys(sg, 1415 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1416 1417 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1418 nprot, 0, td->td_ucred); 1419 1420 linux_cdev_handle_free(vmap); 1421 1422 if (*object == NULL) { 1423 sglist_free(sg); 1424 return (EINVAL); 1425 } 1426 } 1427 1428 if (attr != VM_MEMATTR_DEFAULT) { 1429 VM_OBJECT_WLOCK(*object); 1430 vm_object_set_memattr(*object, attr); 1431 VM_OBJECT_WUNLOCK(*object); 1432 } 1433 *offset = 0; 1434 return (0); 1435 } 1436 1437 struct cdevsw linuxcdevsw = { 1438 .d_version = D_VERSION, 1439 .d_fdopen = linux_dev_fdopen, 1440 .d_name = "lkpidev", 1441 }; 1442 1443 static int 1444 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1445 int flags, struct thread *td) 1446 { 1447 struct linux_file *filp; 1448 const struct file_operations *fop; 1449 struct linux_cdev *ldev; 1450 ssize_t bytes; 1451 int error; 1452 1453 error = 0; 1454 filp = (struct linux_file *)file->f_data; 1455 filp->f_flags = file->f_flag; 1456 /* XXX no support for I/O vectors currently */ 1457 if (uio->uio_iovcnt != 1) 1458 return (EOPNOTSUPP); 1459 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1460 return (EINVAL); 1461 linux_set_current(td); 1462 linux_get_fop(filp, &fop, &ldev); 1463 if (fop->read != NULL) { 1464 bytes = OPW(file, td, fop->read(filp, 1465 uio->uio_iov->iov_base, 1466 uio->uio_iov->iov_len, &uio->uio_offset)); 1467 if (bytes >= 0) { 1468 uio->uio_iov->iov_base = 1469 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1470 uio->uio_iov->iov_len -= bytes; 1471 uio->uio_resid -= bytes; 1472 } else { 1473 error = linux_get_error(current, -bytes); 1474 } 1475 } else 1476 error = ENXIO; 1477 1478 /* update kqfilter status, if any */ 1479 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1480 linux_drop_fop(ldev); 1481 1482 return (error); 1483 } 1484 1485 static int 1486 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1487 int flags, struct thread *td) 1488 { 1489 struct linux_file *filp; 1490 const struct file_operations *fop; 1491 struct linux_cdev *ldev; 1492 ssize_t bytes; 1493 int error; 1494 1495 filp = (struct linux_file *)file->f_data; 1496 filp->f_flags = file->f_flag; 1497 /* XXX no support for I/O vectors currently */ 1498 if (uio->uio_iovcnt != 1) 1499 return (EOPNOTSUPP); 1500 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1501 return (EINVAL); 1502 linux_set_current(td); 1503 linux_get_fop(filp, &fop, &ldev); 1504 if (fop->write != NULL) { 1505 bytes = OPW(file, td, fop->write(filp, 1506 uio->uio_iov->iov_base, 1507 uio->uio_iov->iov_len, &uio->uio_offset)); 1508 if (bytes >= 0) { 1509 uio->uio_iov->iov_base = 1510 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1511 uio->uio_iov->iov_len -= bytes; 1512 uio->uio_resid -= bytes; 1513 error = 0; 1514 } else { 1515 error = linux_get_error(current, -bytes); 1516 } 1517 } else 1518 error = ENXIO; 1519 1520 /* update kqfilter status, if any */ 1521 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1522 1523 linux_drop_fop(ldev); 1524 1525 return (error); 1526 } 1527 1528 static int 1529 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1530 struct thread *td) 1531 { 1532 struct linux_file *filp; 1533 const struct file_operations *fop; 1534 struct linux_cdev *ldev; 1535 int revents; 1536 1537 filp = (struct linux_file *)file->f_data; 1538 filp->f_flags = file->f_flag; 1539 linux_set_current(td); 1540 linux_get_fop(filp, &fop, &ldev); 1541 if (fop->poll != NULL) { 1542 revents = OPW(file, td, fop->poll(filp, 1543 LINUX_POLL_TABLE_NORMAL)) & events; 1544 } else { 1545 revents = 0; 1546 } 1547 linux_drop_fop(ldev); 1548 return (revents); 1549 } 1550 1551 static int 1552 linux_file_close(struct file *file, struct thread *td) 1553 { 1554 struct linux_file *filp; 1555 int (*release)(struct inode *, struct linux_file *); 1556 const struct file_operations *fop; 1557 struct linux_cdev *ldev; 1558 int error; 1559 1560 filp = (struct linux_file *)file->f_data; 1561 1562 KASSERT(file_count(filp) == 0, 1563 ("File refcount(%d) is not zero", file_count(filp))); 1564 1565 if (td == NULL) 1566 td = curthread; 1567 1568 error = 0; 1569 filp->f_flags = file->f_flag; 1570 linux_set_current(td); 1571 linux_poll_wait_dequeue(filp); 1572 linux_get_fop(filp, &fop, &ldev); 1573 /* 1574 * Always use the real release function, if any, to avoid 1575 * leaking device resources: 1576 */ 1577 release = filp->f_op->release; 1578 if (release != NULL) 1579 error = -OPW(file, td, release(filp->f_vnode, filp)); 1580 funsetown(&filp->f_sigio); 1581 if (filp->f_vnode != NULL) 1582 vdrop(filp->f_vnode); 1583 linux_drop_fop(ldev); 1584 ldev = filp->f_cdev; 1585 if (ldev != NULL) 1586 linux_cdev_deref(ldev); 1587 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1588 kfree(filp); 1589 1590 return (error); 1591 } 1592 1593 static int 1594 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1595 struct thread *td) 1596 { 1597 struct linux_file *filp; 1598 const struct file_operations *fop; 1599 struct linux_cdev *ldev; 1600 struct fiodgname_arg *fgn; 1601 const char *p; 1602 int error, i; 1603 1604 error = 0; 1605 filp = (struct linux_file *)fp->f_data; 1606 filp->f_flags = fp->f_flag; 1607 linux_get_fop(filp, &fop, &ldev); 1608 1609 linux_set_current(td); 1610 switch (cmd) { 1611 case FIONBIO: 1612 break; 1613 case FIOASYNC: 1614 if (fop->fasync == NULL) 1615 break; 1616 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1617 break; 1618 case FIOSETOWN: 1619 error = fsetown(*(int *)data, &filp->f_sigio); 1620 if (error == 0) { 1621 if (fop->fasync == NULL) 1622 break; 1623 error = -OPW(fp, td, fop->fasync(0, filp, 1624 fp->f_flag & FASYNC)); 1625 } 1626 break; 1627 case FIOGETOWN: 1628 *(int *)data = fgetown(&filp->f_sigio); 1629 break; 1630 case FIODGNAME: 1631 #ifdef COMPAT_FREEBSD32 1632 case FIODGNAME_32: 1633 #endif 1634 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1635 error = ENXIO; 1636 break; 1637 } 1638 fgn = data; 1639 p = devtoname(filp->f_cdev->cdev); 1640 i = strlen(p) + 1; 1641 if (i > fgn->len) { 1642 error = EINVAL; 1643 break; 1644 } 1645 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1646 break; 1647 default: 1648 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1649 break; 1650 } 1651 linux_drop_fop(ldev); 1652 return (error); 1653 } 1654 1655 static int 1656 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1657 vm_prot_t maxprot, int flags, struct file *fp, 1658 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1659 { 1660 /* 1661 * Character devices do not provide private mappings 1662 * of any kind: 1663 */ 1664 if ((maxprot & VM_PROT_WRITE) == 0 && 1665 (prot & VM_PROT_WRITE) != 0) 1666 return (EACCES); 1667 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1668 return (EINVAL); 1669 1670 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1671 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1672 } 1673 1674 static int 1675 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1676 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1677 struct thread *td) 1678 { 1679 struct linux_file *filp; 1680 const struct file_operations *fop; 1681 struct linux_cdev *ldev; 1682 struct mount *mp; 1683 struct vnode *vp; 1684 vm_object_t object; 1685 vm_prot_t maxprot; 1686 int error; 1687 1688 filp = (struct linux_file *)fp->f_data; 1689 1690 vp = filp->f_vnode; 1691 if (vp == NULL) 1692 return (EOPNOTSUPP); 1693 1694 /* 1695 * Ensure that file and memory protections are 1696 * compatible. 1697 */ 1698 mp = vp->v_mount; 1699 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1700 maxprot = VM_PROT_NONE; 1701 if ((prot & VM_PROT_EXECUTE) != 0) 1702 return (EACCES); 1703 } else 1704 maxprot = VM_PROT_EXECUTE; 1705 if ((fp->f_flag & FREAD) != 0) 1706 maxprot |= VM_PROT_READ; 1707 else if ((prot & VM_PROT_READ) != 0) 1708 return (EACCES); 1709 1710 /* 1711 * If we are sharing potential changes via MAP_SHARED and we 1712 * are trying to get write permission although we opened it 1713 * without asking for it, bail out. 1714 * 1715 * Note that most character devices always share mappings. 1716 * 1717 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1718 * requests rather than doing it here. 1719 */ 1720 if ((flags & MAP_SHARED) != 0) { 1721 if ((fp->f_flag & FWRITE) != 0) 1722 maxprot |= VM_PROT_WRITE; 1723 else if ((prot & VM_PROT_WRITE) != 0) 1724 return (EACCES); 1725 } 1726 maxprot &= cap_maxprot; 1727 1728 linux_get_fop(filp, &fop, &ldev); 1729 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1730 &foff, fop, &object); 1731 if (error != 0) 1732 goto out; 1733 1734 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1735 foff, FALSE, td); 1736 if (error != 0) 1737 vm_object_deallocate(object); 1738 out: 1739 linux_drop_fop(ldev); 1740 return (error); 1741 } 1742 1743 static int 1744 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1745 { 1746 struct linux_file *filp; 1747 struct vnode *vp; 1748 int error; 1749 1750 filp = (struct linux_file *)fp->f_data; 1751 if (filp->f_vnode == NULL) 1752 return (EOPNOTSUPP); 1753 1754 vp = filp->f_vnode; 1755 1756 vn_lock(vp, LK_SHARED | LK_RETRY); 1757 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1758 VOP_UNLOCK(vp); 1759 1760 return (error); 1761 } 1762 1763 static int 1764 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1765 struct filedesc *fdp) 1766 { 1767 struct linux_file *filp; 1768 struct vnode *vp; 1769 int error; 1770 1771 filp = fp->f_data; 1772 vp = filp->f_vnode; 1773 if (vp == NULL) { 1774 error = 0; 1775 kif->kf_type = KF_TYPE_DEV; 1776 } else { 1777 vref(vp); 1778 FILEDESC_SUNLOCK(fdp); 1779 error = vn_fill_kinfo_vnode(vp, kif); 1780 vrele(vp); 1781 kif->kf_type = KF_TYPE_VNODE; 1782 FILEDESC_SLOCK(fdp); 1783 } 1784 return (error); 1785 } 1786 1787 unsigned int 1788 linux_iminor(struct inode *inode) 1789 { 1790 struct linux_cdev *ldev; 1791 1792 if (inode == NULL || inode->v_rdev == NULL || 1793 inode->v_rdev->si_devsw != &linuxcdevsw) 1794 return (-1U); 1795 ldev = inode->v_rdev->si_drv1; 1796 if (ldev == NULL) 1797 return (-1U); 1798 1799 return (minor(ldev->dev)); 1800 } 1801 1802 struct fileops linuxfileops = { 1803 .fo_read = linux_file_read, 1804 .fo_write = linux_file_write, 1805 .fo_truncate = invfo_truncate, 1806 .fo_kqfilter = linux_file_kqfilter, 1807 .fo_stat = linux_file_stat, 1808 .fo_fill_kinfo = linux_file_fill_kinfo, 1809 .fo_poll = linux_file_poll, 1810 .fo_close = linux_file_close, 1811 .fo_ioctl = linux_file_ioctl, 1812 .fo_mmap = linux_file_mmap, 1813 .fo_chmod = invfo_chmod, 1814 .fo_chown = invfo_chown, 1815 .fo_sendfile = invfo_sendfile, 1816 .fo_flags = DFLAG_PASSABLE, 1817 }; 1818 1819 /* 1820 * Hash of vmmap addresses. This is infrequently accessed and does not 1821 * need to be particularly large. This is done because we must store the 1822 * caller's idea of the map size to properly unmap. 1823 */ 1824 struct vmmap { 1825 LIST_ENTRY(vmmap) vm_next; 1826 void *vm_addr; 1827 unsigned long vm_size; 1828 }; 1829 1830 struct vmmaphd { 1831 struct vmmap *lh_first; 1832 }; 1833 #define VMMAP_HASH_SIZE 64 1834 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1835 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1836 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1837 static struct mtx vmmaplock; 1838 1839 static void 1840 vmmap_add(void *addr, unsigned long size) 1841 { 1842 struct vmmap *vmmap; 1843 1844 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1845 mtx_lock(&vmmaplock); 1846 vmmap->vm_size = size; 1847 vmmap->vm_addr = addr; 1848 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1849 mtx_unlock(&vmmaplock); 1850 } 1851 1852 static struct vmmap * 1853 vmmap_remove(void *addr) 1854 { 1855 struct vmmap *vmmap; 1856 1857 mtx_lock(&vmmaplock); 1858 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1859 if (vmmap->vm_addr == addr) 1860 break; 1861 if (vmmap) 1862 LIST_REMOVE(vmmap, vm_next); 1863 mtx_unlock(&vmmaplock); 1864 1865 return (vmmap); 1866 } 1867 1868 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1869 void * 1870 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1871 { 1872 void *addr; 1873 1874 addr = pmap_mapdev_attr(phys_addr, size, attr); 1875 if (addr == NULL) 1876 return (NULL); 1877 vmmap_add(addr, size); 1878 1879 return (addr); 1880 } 1881 #endif 1882 1883 void 1884 iounmap(void *addr) 1885 { 1886 struct vmmap *vmmap; 1887 1888 vmmap = vmmap_remove(addr); 1889 if (vmmap == NULL) 1890 return; 1891 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1892 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1893 #endif 1894 kfree(vmmap); 1895 } 1896 1897 void * 1898 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1899 { 1900 vm_offset_t off; 1901 size_t size; 1902 1903 size = count * PAGE_SIZE; 1904 off = kva_alloc(size); 1905 if (off == 0) 1906 return (NULL); 1907 vmmap_add((void *)off, size); 1908 pmap_qenter(off, pages, count); 1909 1910 return ((void *)off); 1911 } 1912 1913 void 1914 vunmap(void *addr) 1915 { 1916 struct vmmap *vmmap; 1917 1918 vmmap = vmmap_remove(addr); 1919 if (vmmap == NULL) 1920 return; 1921 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1922 kva_free((vm_offset_t)addr, vmmap->vm_size); 1923 kfree(vmmap); 1924 } 1925 1926 static char * 1927 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1928 { 1929 unsigned int len; 1930 char *p; 1931 va_list aq; 1932 1933 va_copy(aq, ap); 1934 len = vsnprintf(NULL, 0, fmt, aq); 1935 va_end(aq); 1936 1937 if (dev != NULL) 1938 p = devm_kmalloc(dev, len + 1, gfp); 1939 else 1940 p = kmalloc(len + 1, gfp); 1941 if (p != NULL) 1942 vsnprintf(p, len + 1, fmt, ap); 1943 1944 return (p); 1945 } 1946 1947 char * 1948 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1949 { 1950 1951 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1952 } 1953 1954 char * 1955 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1956 { 1957 va_list ap; 1958 char *p; 1959 1960 va_start(ap, fmt); 1961 p = devm_kvasprintf(dev, gfp, fmt, ap); 1962 va_end(ap); 1963 1964 return (p); 1965 } 1966 1967 char * 1968 kasprintf(gfp_t gfp, const char *fmt, ...) 1969 { 1970 va_list ap; 1971 char *p; 1972 1973 va_start(ap, fmt); 1974 p = kvasprintf(gfp, fmt, ap); 1975 va_end(ap); 1976 1977 return (p); 1978 } 1979 1980 static void 1981 linux_timer_callback_wrapper(void *context) 1982 { 1983 struct timer_list *timer; 1984 1985 timer = context; 1986 1987 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1988 /* try again later */ 1989 callout_reset(&timer->callout, 1, 1990 &linux_timer_callback_wrapper, timer); 1991 return; 1992 } 1993 1994 timer->function(timer->data); 1995 } 1996 1997 int 1998 mod_timer(struct timer_list *timer, int expires) 1999 { 2000 int ret; 2001 2002 timer->expires = expires; 2003 ret = callout_reset(&timer->callout, 2004 linux_timer_jiffies_until(expires), 2005 &linux_timer_callback_wrapper, timer); 2006 2007 MPASS(ret == 0 || ret == 1); 2008 2009 return (ret == 1); 2010 } 2011 2012 void 2013 add_timer(struct timer_list *timer) 2014 { 2015 2016 callout_reset(&timer->callout, 2017 linux_timer_jiffies_until(timer->expires), 2018 &linux_timer_callback_wrapper, timer); 2019 } 2020 2021 void 2022 add_timer_on(struct timer_list *timer, int cpu) 2023 { 2024 2025 callout_reset_on(&timer->callout, 2026 linux_timer_jiffies_until(timer->expires), 2027 &linux_timer_callback_wrapper, timer, cpu); 2028 } 2029 2030 int 2031 del_timer(struct timer_list *timer) 2032 { 2033 2034 if (callout_stop(&(timer)->callout) == -1) 2035 return (0); 2036 return (1); 2037 } 2038 2039 int 2040 del_timer_sync(struct timer_list *timer) 2041 { 2042 2043 if (callout_drain(&(timer)->callout) == -1) 2044 return (0); 2045 return (1); 2046 } 2047 2048 /* greatest common divisor, Euclid equation */ 2049 static uint64_t 2050 lkpi_gcd_64(uint64_t a, uint64_t b) 2051 { 2052 uint64_t an; 2053 uint64_t bn; 2054 2055 while (b != 0) { 2056 an = b; 2057 bn = a % b; 2058 a = an; 2059 b = bn; 2060 } 2061 return (a); 2062 } 2063 2064 uint64_t lkpi_nsec2hz_rem; 2065 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2066 uint64_t lkpi_nsec2hz_max; 2067 2068 uint64_t lkpi_usec2hz_rem; 2069 uint64_t lkpi_usec2hz_div = 1000000ULL; 2070 uint64_t lkpi_usec2hz_max; 2071 2072 uint64_t lkpi_msec2hz_rem; 2073 uint64_t lkpi_msec2hz_div = 1000ULL; 2074 uint64_t lkpi_msec2hz_max; 2075 2076 static void 2077 linux_timer_init(void *arg) 2078 { 2079 uint64_t gcd; 2080 2081 /* 2082 * Compute an internal HZ value which can divide 2**32 to 2083 * avoid timer rounding problems when the tick value wraps 2084 * around 2**32: 2085 */ 2086 linux_timer_hz_mask = 1; 2087 while (linux_timer_hz_mask < (unsigned long)hz) 2088 linux_timer_hz_mask *= 2; 2089 linux_timer_hz_mask--; 2090 2091 /* compute some internal constants */ 2092 2093 lkpi_nsec2hz_rem = hz; 2094 lkpi_usec2hz_rem = hz; 2095 lkpi_msec2hz_rem = hz; 2096 2097 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2098 lkpi_nsec2hz_rem /= gcd; 2099 lkpi_nsec2hz_div /= gcd; 2100 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2101 2102 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2103 lkpi_usec2hz_rem /= gcd; 2104 lkpi_usec2hz_div /= gcd; 2105 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2106 2107 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2108 lkpi_msec2hz_rem /= gcd; 2109 lkpi_msec2hz_div /= gcd; 2110 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2111 } 2112 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2113 2114 void 2115 linux_complete_common(struct completion *c, int all) 2116 { 2117 int wakeup_swapper; 2118 2119 sleepq_lock(c); 2120 if (all) { 2121 c->done = UINT_MAX; 2122 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2123 } else { 2124 if (c->done != UINT_MAX) 2125 c->done++; 2126 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2127 } 2128 sleepq_release(c); 2129 if (wakeup_swapper) 2130 kick_proc0(); 2131 } 2132 2133 /* 2134 * Indefinite wait for done != 0 with or without signals. 2135 */ 2136 int 2137 linux_wait_for_common(struct completion *c, int flags) 2138 { 2139 struct task_struct *task; 2140 int error; 2141 2142 if (SCHEDULER_STOPPED()) 2143 return (0); 2144 2145 task = current; 2146 2147 if (flags != 0) 2148 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2149 else 2150 flags = SLEEPQ_SLEEP; 2151 error = 0; 2152 for (;;) { 2153 sleepq_lock(c); 2154 if (c->done) 2155 break; 2156 sleepq_add(c, NULL, "completion", flags, 0); 2157 if (flags & SLEEPQ_INTERRUPTIBLE) { 2158 DROP_GIANT(); 2159 error = -sleepq_wait_sig(c, 0); 2160 PICKUP_GIANT(); 2161 if (error != 0) { 2162 linux_schedule_save_interrupt_value(task, error); 2163 error = -ERESTARTSYS; 2164 goto intr; 2165 } 2166 } else { 2167 DROP_GIANT(); 2168 sleepq_wait(c, 0); 2169 PICKUP_GIANT(); 2170 } 2171 } 2172 if (c->done != UINT_MAX) 2173 c->done--; 2174 sleepq_release(c); 2175 2176 intr: 2177 return (error); 2178 } 2179 2180 /* 2181 * Time limited wait for done != 0 with or without signals. 2182 */ 2183 int 2184 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2185 { 2186 struct task_struct *task; 2187 int end = jiffies + timeout; 2188 int error; 2189 2190 if (SCHEDULER_STOPPED()) 2191 return (0); 2192 2193 task = current; 2194 2195 if (flags != 0) 2196 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2197 else 2198 flags = SLEEPQ_SLEEP; 2199 2200 for (;;) { 2201 sleepq_lock(c); 2202 if (c->done) 2203 break; 2204 sleepq_add(c, NULL, "completion", flags, 0); 2205 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2206 2207 DROP_GIANT(); 2208 if (flags & SLEEPQ_INTERRUPTIBLE) 2209 error = -sleepq_timedwait_sig(c, 0); 2210 else 2211 error = -sleepq_timedwait(c, 0); 2212 PICKUP_GIANT(); 2213 2214 if (error != 0) { 2215 /* check for timeout */ 2216 if (error == -EWOULDBLOCK) { 2217 error = 0; /* timeout */ 2218 } else { 2219 /* signal happened */ 2220 linux_schedule_save_interrupt_value(task, error); 2221 error = -ERESTARTSYS; 2222 } 2223 goto done; 2224 } 2225 } 2226 if (c->done != UINT_MAX) 2227 c->done--; 2228 sleepq_release(c); 2229 2230 /* return how many jiffies are left */ 2231 error = linux_timer_jiffies_until(end); 2232 done: 2233 return (error); 2234 } 2235 2236 int 2237 linux_try_wait_for_completion(struct completion *c) 2238 { 2239 int isdone; 2240 2241 sleepq_lock(c); 2242 isdone = (c->done != 0); 2243 if (c->done != 0 && c->done != UINT_MAX) 2244 c->done--; 2245 sleepq_release(c); 2246 return (isdone); 2247 } 2248 2249 int 2250 linux_completion_done(struct completion *c) 2251 { 2252 int isdone; 2253 2254 sleepq_lock(c); 2255 isdone = (c->done != 0); 2256 sleepq_release(c); 2257 return (isdone); 2258 } 2259 2260 static void 2261 linux_cdev_deref(struct linux_cdev *ldev) 2262 { 2263 if (refcount_release(&ldev->refs) && 2264 ldev->kobj.ktype == &linux_cdev_ktype) 2265 kfree(ldev); 2266 } 2267 2268 static void 2269 linux_cdev_release(struct kobject *kobj) 2270 { 2271 struct linux_cdev *cdev; 2272 struct kobject *parent; 2273 2274 cdev = container_of(kobj, struct linux_cdev, kobj); 2275 parent = kobj->parent; 2276 linux_destroy_dev(cdev); 2277 linux_cdev_deref(cdev); 2278 kobject_put(parent); 2279 } 2280 2281 static void 2282 linux_cdev_static_release(struct kobject *kobj) 2283 { 2284 struct cdev *cdev; 2285 struct linux_cdev *ldev; 2286 2287 ldev = container_of(kobj, struct linux_cdev, kobj); 2288 cdev = ldev->cdev; 2289 if (cdev != NULL) { 2290 destroy_dev(cdev); 2291 ldev->cdev = NULL; 2292 } 2293 kobject_put(kobj->parent); 2294 } 2295 2296 int 2297 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2298 { 2299 int ret; 2300 2301 if (dev->devt != 0) { 2302 /* Set parent kernel object. */ 2303 ldev->kobj.parent = &dev->kobj; 2304 2305 /* 2306 * Unlike Linux we require the kobject of the 2307 * character device structure to have a valid name 2308 * before calling this function: 2309 */ 2310 if (ldev->kobj.name == NULL) 2311 return (-EINVAL); 2312 2313 ret = cdev_add(ldev, dev->devt, 1); 2314 if (ret) 2315 return (ret); 2316 } 2317 ret = device_add(dev); 2318 if (ret != 0 && dev->devt != 0) 2319 cdev_del(ldev); 2320 return (ret); 2321 } 2322 2323 void 2324 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2325 { 2326 device_del(dev); 2327 2328 if (dev->devt != 0) 2329 cdev_del(ldev); 2330 } 2331 2332 static void 2333 linux_destroy_dev(struct linux_cdev *ldev) 2334 { 2335 2336 if (ldev->cdev == NULL) 2337 return; 2338 2339 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2340 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2341 2342 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2343 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2344 pause("ldevdtr", hz / 4); 2345 2346 destroy_dev(ldev->cdev); 2347 ldev->cdev = NULL; 2348 } 2349 2350 const struct kobj_type linux_cdev_ktype = { 2351 .release = linux_cdev_release, 2352 }; 2353 2354 const struct kobj_type linux_cdev_static_ktype = { 2355 .release = linux_cdev_static_release, 2356 }; 2357 2358 static void 2359 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2360 { 2361 struct notifier_block *nb; 2362 struct netdev_notifier_info ni; 2363 2364 nb = arg; 2365 ni.ifp = ifp; 2366 ni.dev = (struct net_device *)ifp; 2367 if (linkstate == LINK_STATE_UP) 2368 nb->notifier_call(nb, NETDEV_UP, &ni); 2369 else 2370 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2371 } 2372 2373 static void 2374 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2375 { 2376 struct notifier_block *nb; 2377 struct netdev_notifier_info ni; 2378 2379 nb = arg; 2380 ni.ifp = ifp; 2381 ni.dev = (struct net_device *)ifp; 2382 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2383 } 2384 2385 static void 2386 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2387 { 2388 struct notifier_block *nb; 2389 struct netdev_notifier_info ni; 2390 2391 nb = arg; 2392 ni.ifp = ifp; 2393 ni.dev = (struct net_device *)ifp; 2394 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2395 } 2396 2397 static void 2398 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2399 { 2400 struct notifier_block *nb; 2401 struct netdev_notifier_info ni; 2402 2403 nb = arg; 2404 ni.ifp = ifp; 2405 ni.dev = (struct net_device *)ifp; 2406 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2407 } 2408 2409 static void 2410 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2411 { 2412 struct notifier_block *nb; 2413 struct netdev_notifier_info ni; 2414 2415 nb = arg; 2416 ni.ifp = ifp; 2417 ni.dev = (struct net_device *)ifp; 2418 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2419 } 2420 2421 int 2422 register_netdevice_notifier(struct notifier_block *nb) 2423 { 2424 2425 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2426 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2427 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2428 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2429 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2430 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2431 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2432 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2433 2434 return (0); 2435 } 2436 2437 int 2438 register_inetaddr_notifier(struct notifier_block *nb) 2439 { 2440 2441 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2442 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2443 return (0); 2444 } 2445 2446 int 2447 unregister_netdevice_notifier(struct notifier_block *nb) 2448 { 2449 2450 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2451 nb->tags[NETDEV_UP]); 2452 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2453 nb->tags[NETDEV_REGISTER]); 2454 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2455 nb->tags[NETDEV_UNREGISTER]); 2456 EVENTHANDLER_DEREGISTER(iflladdr_event, 2457 nb->tags[NETDEV_CHANGEADDR]); 2458 2459 return (0); 2460 } 2461 2462 int 2463 unregister_inetaddr_notifier(struct notifier_block *nb) 2464 { 2465 2466 EVENTHANDLER_DEREGISTER(ifaddr_event, 2467 nb->tags[NETDEV_CHANGEIFADDR]); 2468 2469 return (0); 2470 } 2471 2472 struct list_sort_thunk { 2473 int (*cmp)(void *, struct list_head *, struct list_head *); 2474 void *priv; 2475 }; 2476 2477 static inline int 2478 linux_le_cmp(void *priv, const void *d1, const void *d2) 2479 { 2480 struct list_head *le1, *le2; 2481 struct list_sort_thunk *thunk; 2482 2483 thunk = priv; 2484 le1 = *(__DECONST(struct list_head **, d1)); 2485 le2 = *(__DECONST(struct list_head **, d2)); 2486 return ((thunk->cmp)(thunk->priv, le1, le2)); 2487 } 2488 2489 void 2490 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2491 struct list_head *a, struct list_head *b)) 2492 { 2493 struct list_sort_thunk thunk; 2494 struct list_head **ar, *le; 2495 size_t count, i; 2496 2497 count = 0; 2498 list_for_each(le, head) 2499 count++; 2500 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2501 i = 0; 2502 list_for_each(le, head) 2503 ar[i++] = le; 2504 thunk.cmp = cmp; 2505 thunk.priv = priv; 2506 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2507 INIT_LIST_HEAD(head); 2508 for (i = 0; i < count; i++) 2509 list_add_tail(ar[i], head); 2510 free(ar, M_KMALLOC); 2511 } 2512 2513 #if defined(__i386__) || defined(__amd64__) 2514 int 2515 linux_wbinvd_on_all_cpus(void) 2516 { 2517 2518 pmap_invalidate_cache(); 2519 return (0); 2520 } 2521 #endif 2522 2523 int 2524 linux_on_each_cpu(void callback(void *), void *data) 2525 { 2526 2527 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2528 smp_no_rendezvous_barrier, data); 2529 return (0); 2530 } 2531 2532 int 2533 linux_in_atomic(void) 2534 { 2535 2536 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2537 } 2538 2539 struct linux_cdev * 2540 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2541 { 2542 dev_t dev = MKDEV(major, minor); 2543 struct cdev *cdev; 2544 2545 dev_lock(); 2546 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2547 struct linux_cdev *ldev = cdev->si_drv1; 2548 if (ldev->dev == dev && 2549 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2550 break; 2551 } 2552 } 2553 dev_unlock(); 2554 2555 return (cdev != NULL ? cdev->si_drv1 : NULL); 2556 } 2557 2558 int 2559 __register_chrdev(unsigned int major, unsigned int baseminor, 2560 unsigned int count, const char *name, 2561 const struct file_operations *fops) 2562 { 2563 struct linux_cdev *cdev; 2564 int ret = 0; 2565 int i; 2566 2567 for (i = baseminor; i < baseminor + count; i++) { 2568 cdev = cdev_alloc(); 2569 cdev->ops = fops; 2570 kobject_set_name(&cdev->kobj, name); 2571 2572 ret = cdev_add(cdev, makedev(major, i), 1); 2573 if (ret != 0) 2574 break; 2575 } 2576 return (ret); 2577 } 2578 2579 int 2580 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2581 unsigned int count, const char *name, 2582 const struct file_operations *fops, uid_t uid, 2583 gid_t gid, int mode) 2584 { 2585 struct linux_cdev *cdev; 2586 int ret = 0; 2587 int i; 2588 2589 for (i = baseminor; i < baseminor + count; i++) { 2590 cdev = cdev_alloc(); 2591 cdev->ops = fops; 2592 kobject_set_name(&cdev->kobj, name); 2593 2594 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2595 if (ret != 0) 2596 break; 2597 } 2598 return (ret); 2599 } 2600 2601 void 2602 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2603 unsigned int count, const char *name) 2604 { 2605 struct linux_cdev *cdevp; 2606 int i; 2607 2608 for (i = baseminor; i < baseminor + count; i++) { 2609 cdevp = linux_find_cdev(name, major, i); 2610 if (cdevp != NULL) 2611 cdev_del(cdevp); 2612 } 2613 } 2614 2615 void 2616 linux_dump_stack(void) 2617 { 2618 #ifdef STACK 2619 struct stack st; 2620 2621 stack_save(&st); 2622 stack_print(&st); 2623 #endif 2624 } 2625 2626 int 2627 linuxkpi_net_ratelimit(void) 2628 { 2629 2630 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2631 lkpi_net_maxpps)); 2632 } 2633 2634 #if defined(__i386__) || defined(__amd64__) 2635 bool linux_cpu_has_clflush; 2636 #endif 2637 2638 static void 2639 linux_compat_init(void *arg) 2640 { 2641 struct sysctl_oid *rootoid; 2642 int i; 2643 2644 #if defined(__i386__) || defined(__amd64__) 2645 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2646 #endif 2647 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2648 2649 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2650 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2651 kobject_init(&linux_class_root, &linux_class_ktype); 2652 kobject_set_name(&linux_class_root, "class"); 2653 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2654 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2655 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2656 kobject_set_name(&linux_root_device.kobj, "device"); 2657 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2658 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2659 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2660 linux_root_device.bsddev = root_bus; 2661 linux_class_misc.name = "misc"; 2662 class_register(&linux_class_misc); 2663 INIT_LIST_HEAD(&pci_drivers); 2664 INIT_LIST_HEAD(&pci_devices); 2665 spin_lock_init(&pci_lock); 2666 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2667 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2668 LIST_INIT(&vmmaphead[i]); 2669 init_waitqueue_head(&linux_bit_waitq); 2670 init_waitqueue_head(&linux_var_waitq); 2671 2672 CPU_COPY(&all_cpus, &cpu_online_mask); 2673 } 2674 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2675 2676 static void 2677 linux_compat_uninit(void *arg) 2678 { 2679 linux_kobject_kfree_name(&linux_class_root); 2680 linux_kobject_kfree_name(&linux_root_device.kobj); 2681 linux_kobject_kfree_name(&linux_class_misc.kobj); 2682 2683 mtx_destroy(&vmmaplock); 2684 spin_lock_destroy(&pci_lock); 2685 rw_destroy(&linux_vma_lock); 2686 } 2687 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2688 2689 /* 2690 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2691 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2692 * used. Assert these types have the same size, else some parts of the 2693 * LinuxKPI may not work like expected: 2694 */ 2695 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2696