1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "opt_global.h" 32 #include "opt_stack.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/sysctl.h> 39 #include <sys/proc.h> 40 #include <sys/sglist.h> 41 #include <sys/sleepqueue.h> 42 #include <sys/refcount.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/bus.h> 46 #include <sys/eventhandler.h> 47 #include <sys/fcntl.h> 48 #include <sys/file.h> 49 #include <sys/filio.h> 50 #include <sys/rwlock.h> 51 #include <sys/mman.h> 52 #include <sys/stack.h> 53 #include <sys/sysent.h> 54 #include <sys/time.h> 55 #include <sys/user.h> 56 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 63 #include <machine/stdarg.h> 64 65 #if defined(__i386__) || defined(__amd64__) 66 #include <machine/cputypes.h> 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/fs.h> 79 #include <linux/sysfs.h> 80 #include <linux/mm.h> 81 #include <linux/io.h> 82 #include <linux/vmalloc.h> 83 #include <linux/netdevice.h> 84 #include <linux/timer.h> 85 #include <linux/interrupt.h> 86 #include <linux/uaccess.h> 87 #include <linux/utsname.h> 88 #include <linux/list.h> 89 #include <linux/kthread.h> 90 #include <linux/kernel.h> 91 #include <linux/compat.h> 92 #include <linux/io-mapping.h> 93 #include <linux/poll.h> 94 #include <linux/smp.h> 95 #include <linux/wait_bit.h> 96 #include <linux/rcupdate.h> 97 #include <linux/interval_tree.h> 98 #include <linux/interval_tree_generic.h> 99 100 #if defined(__i386__) || defined(__amd64__) 101 #include <asm/smp.h> 102 #include <asm/processor.h> 103 #endif 104 105 #include <xen/xen.h> 106 #ifdef XENHVM 107 #undef xen_pv_domain 108 #undef xen_initial_domain 109 /* xen/xen-os.h redefines __must_check */ 110 #undef __must_check 111 #include <xen/xen-os.h> 112 #endif 113 114 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 115 "LinuxKPI parameters"); 116 117 int linuxkpi_debug; 118 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 119 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 120 121 int linuxkpi_rcu_debug; 122 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, rcu_debug, CTLFLAG_RWTUN, 123 &linuxkpi_rcu_debug, 0, "Set to enable RCU warning. Clear to disable."); 124 125 int linuxkpi_warn_dump_stack = 0; 126 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 127 &linuxkpi_warn_dump_stack, 0, 128 "Set to enable stack traces from WARN_ON(). Clear to disable."); 129 130 static struct timeval lkpi_net_lastlog; 131 static int lkpi_net_curpps; 132 static int lkpi_net_maxpps = 99; 133 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 134 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 135 136 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 137 138 #include <linux/rbtree.h> 139 /* Undo Linux compat changes. */ 140 #undef RB_ROOT 141 #undef file 142 #undef cdev 143 #define RB_ROOT(head) (head)->rbh_root 144 145 static void linux_destroy_dev(struct linux_cdev *); 146 static void linux_cdev_deref(struct linux_cdev *ldev); 147 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 148 149 cpumask_t cpu_online_mask; 150 static cpumask_t **static_single_cpu_mask; 151 static cpumask_t *static_single_cpu_mask_lcs; 152 struct kobject linux_class_root; 153 struct device linux_root_device; 154 struct class linux_class_misc; 155 struct list_head pci_drivers; 156 struct list_head pci_devices; 157 spinlock_t pci_lock; 158 struct uts_namespace init_uts_ns; 159 160 unsigned long linux_timer_hz_mask; 161 162 wait_queue_head_t linux_bit_waitq; 163 wait_queue_head_t linux_var_waitq; 164 165 int 166 panic_cmp(struct rb_node *one, struct rb_node *two) 167 { 168 panic("no cmp"); 169 } 170 171 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 172 173 #define START(node) ((node)->start) 174 #define LAST(node) ((node)->last) 175 176 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 177 LAST,, lkpi_interval_tree) 178 179 static void 180 linux_device_release(struct device *dev) 181 { 182 pr_debug("linux_device_release: %s\n", dev_name(dev)); 183 kfree(dev); 184 } 185 186 static ssize_t 187 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 188 { 189 struct class_attribute *dattr; 190 ssize_t error; 191 192 dattr = container_of(attr, struct class_attribute, attr); 193 error = -EIO; 194 if (dattr->show) 195 error = dattr->show(container_of(kobj, struct class, kobj), 196 dattr, buf); 197 return (error); 198 } 199 200 static ssize_t 201 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 202 size_t count) 203 { 204 struct class_attribute *dattr; 205 ssize_t error; 206 207 dattr = container_of(attr, struct class_attribute, attr); 208 error = -EIO; 209 if (dattr->store) 210 error = dattr->store(container_of(kobj, struct class, kobj), 211 dattr, buf, count); 212 return (error); 213 } 214 215 static void 216 linux_class_release(struct kobject *kobj) 217 { 218 struct class *class; 219 220 class = container_of(kobj, struct class, kobj); 221 if (class->class_release) 222 class->class_release(class); 223 } 224 225 static const struct sysfs_ops linux_class_sysfs = { 226 .show = linux_class_show, 227 .store = linux_class_store, 228 }; 229 230 const struct kobj_type linux_class_ktype = { 231 .release = linux_class_release, 232 .sysfs_ops = &linux_class_sysfs 233 }; 234 235 static void 236 linux_dev_release(struct kobject *kobj) 237 { 238 struct device *dev; 239 240 dev = container_of(kobj, struct device, kobj); 241 /* This is the precedence defined by linux. */ 242 if (dev->release) 243 dev->release(dev); 244 else if (dev->class && dev->class->dev_release) 245 dev->class->dev_release(dev); 246 } 247 248 static ssize_t 249 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 250 { 251 struct device_attribute *dattr; 252 ssize_t error; 253 254 dattr = container_of(attr, struct device_attribute, attr); 255 error = -EIO; 256 if (dattr->show) 257 error = dattr->show(container_of(kobj, struct device, kobj), 258 dattr, buf); 259 return (error); 260 } 261 262 static ssize_t 263 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 264 size_t count) 265 { 266 struct device_attribute *dattr; 267 ssize_t error; 268 269 dattr = container_of(attr, struct device_attribute, attr); 270 error = -EIO; 271 if (dattr->store) 272 error = dattr->store(container_of(kobj, struct device, kobj), 273 dattr, buf, count); 274 return (error); 275 } 276 277 static const struct sysfs_ops linux_dev_sysfs = { 278 .show = linux_dev_show, 279 .store = linux_dev_store, 280 }; 281 282 const struct kobj_type linux_dev_ktype = { 283 .release = linux_dev_release, 284 .sysfs_ops = &linux_dev_sysfs 285 }; 286 287 struct device * 288 device_create(struct class *class, struct device *parent, dev_t devt, 289 void *drvdata, const char *fmt, ...) 290 { 291 struct device *dev; 292 va_list args; 293 294 dev = kzalloc(sizeof(*dev), M_WAITOK); 295 dev->parent = parent; 296 dev->class = class; 297 dev->devt = devt; 298 dev->driver_data = drvdata; 299 dev->release = linux_device_release; 300 va_start(args, fmt); 301 kobject_set_name_vargs(&dev->kobj, fmt, args); 302 va_end(args); 303 device_register(dev); 304 305 return (dev); 306 } 307 308 struct device * 309 device_create_groups_vargs(struct class *class, struct device *parent, 310 dev_t devt, void *drvdata, const struct attribute_group **groups, 311 const char *fmt, va_list args) 312 { 313 struct device *dev = NULL; 314 int retval = -ENODEV; 315 316 if (class == NULL || IS_ERR(class)) 317 goto error; 318 319 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 320 if (!dev) { 321 retval = -ENOMEM; 322 goto error; 323 } 324 325 dev->devt = devt; 326 dev->class = class; 327 dev->parent = parent; 328 dev->groups = groups; 329 dev->release = device_create_release; 330 /* device_initialize() needs the class and parent to be set */ 331 device_initialize(dev); 332 dev_set_drvdata(dev, drvdata); 333 334 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 335 if (retval) 336 goto error; 337 338 retval = device_add(dev); 339 if (retval) 340 goto error; 341 342 return dev; 343 344 error: 345 put_device(dev); 346 return ERR_PTR(retval); 347 } 348 349 struct class * 350 lkpi_class_create(const char *name) 351 { 352 struct class *class; 353 int error; 354 355 class = kzalloc(sizeof(*class), M_WAITOK); 356 class->name = name; 357 class->class_release = linux_class_kfree; 358 error = class_register(class); 359 if (error) { 360 kfree(class); 361 return (NULL); 362 } 363 364 return (class); 365 } 366 367 static void 368 linux_kq_lock(void *arg) 369 { 370 spinlock_t *s = arg; 371 372 spin_lock(s); 373 } 374 static void 375 linux_kq_unlock(void *arg) 376 { 377 spinlock_t *s = arg; 378 379 spin_unlock(s); 380 } 381 382 static void 383 linux_kq_assert_lock(void *arg, int what) 384 { 385 #ifdef INVARIANTS 386 spinlock_t *s = arg; 387 388 if (what == LA_LOCKED) 389 mtx_assert(s, MA_OWNED); 390 else 391 mtx_assert(s, MA_NOTOWNED); 392 #endif 393 } 394 395 static void 396 linux_file_kqfilter_poll(struct linux_file *, int); 397 398 struct linux_file * 399 linux_file_alloc(void) 400 { 401 struct linux_file *filp; 402 403 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 404 405 /* set initial refcount */ 406 filp->f_count = 1; 407 408 /* setup fields needed by kqueue support */ 409 spin_lock_init(&filp->f_kqlock); 410 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 411 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 412 413 return (filp); 414 } 415 416 void 417 linux_file_free(struct linux_file *filp) 418 { 419 if (filp->_file == NULL) { 420 if (filp->f_op != NULL && filp->f_op->release != NULL) 421 filp->f_op->release(filp->f_vnode, filp); 422 if (filp->f_shmem != NULL) 423 vm_object_deallocate(filp->f_shmem); 424 kfree_rcu(filp, rcu); 425 } else { 426 /* 427 * The close method of the character device or file 428 * will free the linux_file structure: 429 */ 430 _fdrop(filp->_file, curthread); 431 } 432 } 433 434 struct linux_cdev * 435 cdev_alloc(void) 436 { 437 struct linux_cdev *cdev; 438 439 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); 440 kobject_init(&cdev->kobj, &linux_cdev_ktype); 441 cdev->refs = 1; 442 return (cdev); 443 } 444 445 static int 446 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 447 vm_page_t *mres) 448 { 449 struct vm_area_struct *vmap; 450 451 vmap = linux_cdev_handle_find(vm_obj->handle); 452 453 MPASS(vmap != NULL); 454 MPASS(vmap->vm_private_data == vm_obj->handle); 455 456 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 457 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 458 vm_page_t page; 459 460 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 461 /* 462 * If the passed in result page is a fake 463 * page, update it with the new physical 464 * address. 465 */ 466 page = *mres; 467 vm_page_updatefake(page, paddr, vm_obj->memattr); 468 } else { 469 /* 470 * Replace the passed in "mres" page with our 471 * own fake page and free up the all of the 472 * original pages. 473 */ 474 VM_OBJECT_WUNLOCK(vm_obj); 475 page = vm_page_getfake(paddr, vm_obj->memattr); 476 VM_OBJECT_WLOCK(vm_obj); 477 478 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 479 *mres = page; 480 } 481 vm_page_valid(page); 482 return (VM_PAGER_OK); 483 } 484 return (VM_PAGER_FAIL); 485 } 486 487 static int 488 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 489 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 490 { 491 struct vm_area_struct *vmap; 492 int err; 493 494 /* get VM area structure */ 495 vmap = linux_cdev_handle_find(vm_obj->handle); 496 MPASS(vmap != NULL); 497 MPASS(vmap->vm_private_data == vm_obj->handle); 498 499 VM_OBJECT_WUNLOCK(vm_obj); 500 501 linux_set_current(curthread); 502 503 down_write(&vmap->vm_mm->mmap_sem); 504 if (unlikely(vmap->vm_ops == NULL)) { 505 err = VM_FAULT_SIGBUS; 506 } else { 507 struct vm_fault vmf; 508 509 /* fill out VM fault structure */ 510 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 511 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 512 vmf.pgoff = 0; 513 vmf.page = NULL; 514 vmf.vma = vmap; 515 516 vmap->vm_pfn_count = 0; 517 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 518 vmap->vm_obj = vm_obj; 519 520 err = vmap->vm_ops->fault(&vmf); 521 522 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 523 kern_yield(PRI_USER); 524 err = vmap->vm_ops->fault(&vmf); 525 } 526 } 527 528 /* translate return code */ 529 switch (err) { 530 case VM_FAULT_OOM: 531 err = VM_PAGER_AGAIN; 532 break; 533 case VM_FAULT_SIGBUS: 534 err = VM_PAGER_BAD; 535 break; 536 case VM_FAULT_NOPAGE: 537 /* 538 * By contract the fault handler will return having 539 * busied all the pages itself. If pidx is already 540 * found in the object, it will simply xbusy the first 541 * page and return with vm_pfn_count set to 1. 542 */ 543 *first = vmap->vm_pfn_first; 544 *last = *first + vmap->vm_pfn_count - 1; 545 err = VM_PAGER_OK; 546 break; 547 default: 548 err = VM_PAGER_ERROR; 549 break; 550 } 551 up_write(&vmap->vm_mm->mmap_sem); 552 VM_OBJECT_WLOCK(vm_obj); 553 return (err); 554 } 555 556 static struct rwlock linux_vma_lock; 557 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 558 TAILQ_HEAD_INITIALIZER(linux_vma_head); 559 560 static void 561 linux_cdev_handle_free(struct vm_area_struct *vmap) 562 { 563 /* Drop reference on vm_file */ 564 if (vmap->vm_file != NULL) 565 fput(vmap->vm_file); 566 567 /* Drop reference on mm_struct */ 568 mmput(vmap->vm_mm); 569 570 kfree(vmap); 571 } 572 573 static void 574 linux_cdev_handle_remove(struct vm_area_struct *vmap) 575 { 576 rw_wlock(&linux_vma_lock); 577 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 578 rw_wunlock(&linux_vma_lock); 579 } 580 581 static struct vm_area_struct * 582 linux_cdev_handle_find(void *handle) 583 { 584 struct vm_area_struct *vmap; 585 586 rw_rlock(&linux_vma_lock); 587 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 588 if (vmap->vm_private_data == handle) 589 break; 590 } 591 rw_runlock(&linux_vma_lock); 592 return (vmap); 593 } 594 595 static int 596 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 597 vm_ooffset_t foff, struct ucred *cred, u_short *color) 598 { 599 600 MPASS(linux_cdev_handle_find(handle) != NULL); 601 *color = 0; 602 return (0); 603 } 604 605 static void 606 linux_cdev_pager_dtor(void *handle) 607 { 608 const struct vm_operations_struct *vm_ops; 609 struct vm_area_struct *vmap; 610 611 vmap = linux_cdev_handle_find(handle); 612 MPASS(vmap != NULL); 613 614 /* 615 * Remove handle before calling close operation to prevent 616 * other threads from reusing the handle pointer. 617 */ 618 linux_cdev_handle_remove(vmap); 619 620 down_write(&vmap->vm_mm->mmap_sem); 621 vm_ops = vmap->vm_ops; 622 if (likely(vm_ops != NULL)) 623 vm_ops->close(vmap); 624 up_write(&vmap->vm_mm->mmap_sem); 625 626 linux_cdev_handle_free(vmap); 627 } 628 629 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 630 { 631 /* OBJT_MGTDEVICE */ 632 .cdev_pg_populate = linux_cdev_pager_populate, 633 .cdev_pg_ctor = linux_cdev_pager_ctor, 634 .cdev_pg_dtor = linux_cdev_pager_dtor 635 }, 636 { 637 /* OBJT_DEVICE */ 638 .cdev_pg_fault = linux_cdev_pager_fault, 639 .cdev_pg_ctor = linux_cdev_pager_ctor, 640 .cdev_pg_dtor = linux_cdev_pager_dtor 641 }, 642 }; 643 644 int 645 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 646 unsigned long size) 647 { 648 vm_object_t obj; 649 vm_page_t m; 650 651 obj = vma->vm_obj; 652 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 653 return (-ENOTSUP); 654 VM_OBJECT_RLOCK(obj); 655 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 656 m != NULL && m->pindex < OFF_TO_IDX(address + size); 657 m = TAILQ_NEXT(m, listq)) 658 pmap_remove_all(m); 659 VM_OBJECT_RUNLOCK(obj); 660 return (0); 661 } 662 663 void 664 vma_set_file(struct vm_area_struct *vma, struct linux_file *file) 665 { 666 struct linux_file *tmp; 667 668 /* Changing an anonymous vma with this is illegal */ 669 get_file(file); 670 tmp = vma->vm_file; 671 vma->vm_file = file; 672 fput(tmp); 673 } 674 675 static struct file_operations dummy_ldev_ops = { 676 /* XXXKIB */ 677 }; 678 679 static struct linux_cdev dummy_ldev = { 680 .ops = &dummy_ldev_ops, 681 }; 682 683 #define LDEV_SI_DTR 0x0001 684 #define LDEV_SI_REF 0x0002 685 686 static void 687 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 688 struct linux_cdev **dev) 689 { 690 struct linux_cdev *ldev; 691 u_int siref; 692 693 ldev = filp->f_cdev; 694 *fop = filp->f_op; 695 if (ldev != NULL) { 696 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 697 refcount_acquire(&ldev->refs); 698 } else { 699 for (siref = ldev->siref;;) { 700 if ((siref & LDEV_SI_DTR) != 0) { 701 ldev = &dummy_ldev; 702 *fop = ldev->ops; 703 siref = ldev->siref; 704 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 705 } else if (atomic_fcmpset_int(&ldev->siref, 706 &siref, siref + LDEV_SI_REF)) { 707 break; 708 } 709 } 710 } 711 } 712 *dev = ldev; 713 } 714 715 static void 716 linux_drop_fop(struct linux_cdev *ldev) 717 { 718 719 if (ldev == NULL) 720 return; 721 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 722 linux_cdev_deref(ldev); 723 } else { 724 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 725 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 726 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 727 } 728 } 729 730 #define OPW(fp,td,code) ({ \ 731 struct file *__fpop; \ 732 __typeof(code) __retval; \ 733 \ 734 __fpop = (td)->td_fpop; \ 735 (td)->td_fpop = (fp); \ 736 __retval = (code); \ 737 (td)->td_fpop = __fpop; \ 738 __retval; \ 739 }) 740 741 static int 742 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 743 struct file *file) 744 { 745 struct linux_cdev *ldev; 746 struct linux_file *filp; 747 const struct file_operations *fop; 748 int error; 749 750 ldev = dev->si_drv1; 751 752 filp = linux_file_alloc(); 753 filp->f_dentry = &filp->f_dentry_store; 754 filp->f_op = ldev->ops; 755 filp->f_mode = file->f_flag; 756 filp->f_flags = file->f_flag; 757 filp->f_vnode = file->f_vnode; 758 filp->_file = file; 759 refcount_acquire(&ldev->refs); 760 filp->f_cdev = ldev; 761 762 linux_set_current(td); 763 linux_get_fop(filp, &fop, &ldev); 764 765 if (fop->open != NULL) { 766 error = -fop->open(file->f_vnode, filp); 767 if (error != 0) { 768 linux_drop_fop(ldev); 769 linux_cdev_deref(filp->f_cdev); 770 kfree(filp); 771 return (error); 772 } 773 } 774 775 /* hold on to the vnode - used for fstat() */ 776 vref(filp->f_vnode); 777 778 /* release the file from devfs */ 779 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 780 linux_drop_fop(ldev); 781 return (ENXIO); 782 } 783 784 #define LINUX_IOCTL_MIN_PTR 0x10000UL 785 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 786 787 static inline int 788 linux_remap_address(void **uaddr, size_t len) 789 { 790 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 791 792 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 793 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 794 struct task_struct *pts = current; 795 if (pts == NULL) { 796 *uaddr = NULL; 797 return (1); 798 } 799 800 /* compute data offset */ 801 uaddr_val -= LINUX_IOCTL_MIN_PTR; 802 803 /* check that length is within bounds */ 804 if ((len > IOCPARM_MAX) || 805 (uaddr_val + len) > pts->bsd_ioctl_len) { 806 *uaddr = NULL; 807 return (1); 808 } 809 810 /* re-add kernel buffer address */ 811 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 812 813 /* update address location */ 814 *uaddr = (void *)uaddr_val; 815 return (1); 816 } 817 return (0); 818 } 819 820 int 821 linux_copyin(const void *uaddr, void *kaddr, size_t len) 822 { 823 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 824 if (uaddr == NULL) 825 return (-EFAULT); 826 memcpy(kaddr, uaddr, len); 827 return (0); 828 } 829 return (-copyin(uaddr, kaddr, len)); 830 } 831 832 int 833 linux_copyout(const void *kaddr, void *uaddr, size_t len) 834 { 835 if (linux_remap_address(&uaddr, len)) { 836 if (uaddr == NULL) 837 return (-EFAULT); 838 memcpy(uaddr, kaddr, len); 839 return (0); 840 } 841 return (-copyout(kaddr, uaddr, len)); 842 } 843 844 size_t 845 linux_clear_user(void *_uaddr, size_t _len) 846 { 847 uint8_t *uaddr = _uaddr; 848 size_t len = _len; 849 850 /* make sure uaddr is aligned before going into the fast loop */ 851 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 852 if (subyte(uaddr, 0)) 853 return (_len); 854 uaddr++; 855 len--; 856 } 857 858 /* zero 8 bytes at a time */ 859 while (len > 7) { 860 #ifdef __LP64__ 861 if (suword64(uaddr, 0)) 862 return (_len); 863 #else 864 if (suword32(uaddr, 0)) 865 return (_len); 866 if (suword32(uaddr + 4, 0)) 867 return (_len); 868 #endif 869 uaddr += 8; 870 len -= 8; 871 } 872 873 /* zero fill end, if any */ 874 while (len > 0) { 875 if (subyte(uaddr, 0)) 876 return (_len); 877 uaddr++; 878 len--; 879 } 880 return (0); 881 } 882 883 int 884 linux_access_ok(const void *uaddr, size_t len) 885 { 886 uintptr_t saddr; 887 uintptr_t eaddr; 888 889 /* get start and end address */ 890 saddr = (uintptr_t)uaddr; 891 eaddr = (uintptr_t)uaddr + len; 892 893 /* verify addresses are valid for userspace */ 894 return ((saddr == eaddr) || 895 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 896 } 897 898 /* 899 * This function should return either EINTR or ERESTART depending on 900 * the signal type sent to this thread: 901 */ 902 static int 903 linux_get_error(struct task_struct *task, int error) 904 { 905 /* check for signal type interrupt code */ 906 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 907 error = -linux_schedule_get_interrupt_value(task); 908 if (error == 0) 909 error = EINTR; 910 } 911 return (error); 912 } 913 914 static int 915 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 916 const struct file_operations *fop, u_long cmd, caddr_t data, 917 struct thread *td) 918 { 919 struct task_struct *task = current; 920 unsigned size; 921 int error; 922 923 size = IOCPARM_LEN(cmd); 924 /* refer to logic in sys_ioctl() */ 925 if (size > 0) { 926 /* 927 * Setup hint for linux_copyin() and linux_copyout(). 928 * 929 * Background: Linux code expects a user-space address 930 * while FreeBSD supplies a kernel-space address. 931 */ 932 task->bsd_ioctl_data = data; 933 task->bsd_ioctl_len = size; 934 data = (void *)LINUX_IOCTL_MIN_PTR; 935 } else { 936 /* fetch user-space pointer */ 937 data = *(void **)data; 938 } 939 #ifdef COMPAT_FREEBSD32 940 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 941 /* try the compat IOCTL handler first */ 942 if (fop->compat_ioctl != NULL) { 943 error = -OPW(fp, td, fop->compat_ioctl(filp, 944 cmd, (u_long)data)); 945 } else { 946 error = ENOTTY; 947 } 948 949 /* fallback to the regular IOCTL handler, if any */ 950 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 951 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 952 cmd, (u_long)data)); 953 } 954 } else 955 #endif 956 { 957 if (fop->unlocked_ioctl != NULL) { 958 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 959 cmd, (u_long)data)); 960 } else { 961 error = ENOTTY; 962 } 963 } 964 if (size > 0) { 965 task->bsd_ioctl_data = NULL; 966 task->bsd_ioctl_len = 0; 967 } 968 969 if (error == EWOULDBLOCK) { 970 /* update kqfilter status, if any */ 971 linux_file_kqfilter_poll(filp, 972 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 973 } else { 974 error = linux_get_error(task, error); 975 } 976 return (error); 977 } 978 979 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 980 981 /* 982 * This function atomically updates the poll wakeup state and returns 983 * the previous state at the time of update. 984 */ 985 static uint8_t 986 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 987 { 988 int c, old; 989 990 c = v->counter; 991 992 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 993 c = old; 994 995 return (c); 996 } 997 998 static int 999 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1000 { 1001 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1002 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1003 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1004 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1005 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1006 }; 1007 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1008 1009 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1010 case LINUX_FWQ_STATE_QUEUED: 1011 linux_poll_wakeup(filp); 1012 return (1); 1013 default: 1014 return (0); 1015 } 1016 } 1017 1018 void 1019 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1020 { 1021 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1022 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1023 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1024 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1025 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1026 }; 1027 1028 /* check if we are called inside the select system call */ 1029 if (p == LINUX_POLL_TABLE_NORMAL) 1030 selrecord(curthread, &filp->f_selinfo); 1031 1032 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1033 case LINUX_FWQ_STATE_INIT: 1034 /* NOTE: file handles can only belong to one wait-queue */ 1035 filp->f_wait_queue.wqh = wqh; 1036 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1037 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1038 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1039 break; 1040 default: 1041 break; 1042 } 1043 } 1044 1045 static void 1046 linux_poll_wait_dequeue(struct linux_file *filp) 1047 { 1048 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1049 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1050 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1051 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1052 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1053 }; 1054 1055 seldrain(&filp->f_selinfo); 1056 1057 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1058 case LINUX_FWQ_STATE_NOT_READY: 1059 case LINUX_FWQ_STATE_QUEUED: 1060 case LINUX_FWQ_STATE_READY: 1061 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1062 break; 1063 default: 1064 break; 1065 } 1066 } 1067 1068 void 1069 linux_poll_wakeup(struct linux_file *filp) 1070 { 1071 /* this function should be NULL-safe */ 1072 if (filp == NULL) 1073 return; 1074 1075 selwakeup(&filp->f_selinfo); 1076 1077 spin_lock(&filp->f_kqlock); 1078 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1079 LINUX_KQ_FLAG_NEED_WRITE; 1080 1081 /* make sure the "knote" gets woken up */ 1082 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1083 spin_unlock(&filp->f_kqlock); 1084 } 1085 1086 static struct linux_file * 1087 __get_file_rcu(struct linux_file **f) 1088 { 1089 struct linux_file *file1, *file2; 1090 1091 file1 = READ_ONCE(*f); 1092 if (file1 == NULL) 1093 return (NULL); 1094 1095 if (!refcount_acquire_if_not_zero( 1096 file1->_file == NULL ? &file1->f_count : &file1->_file->f_count)) 1097 return (ERR_PTR(-EAGAIN)); 1098 1099 file2 = READ_ONCE(*f); 1100 if (file2 == file1) 1101 return (file2); 1102 1103 fput(file1); 1104 return (ERR_PTR(-EAGAIN)); 1105 } 1106 1107 struct linux_file * 1108 linux_get_file_rcu(struct linux_file **f) 1109 { 1110 struct linux_file *file1; 1111 1112 for (;;) { 1113 file1 = __get_file_rcu(f); 1114 if (file1 == NULL) 1115 return (NULL); 1116 1117 if (IS_ERR(file1)) 1118 continue; 1119 1120 return (file1); 1121 } 1122 } 1123 1124 struct linux_file * 1125 get_file_active(struct linux_file **f) 1126 { 1127 struct linux_file *file1; 1128 1129 rcu_read_lock(); 1130 file1 = __get_file_rcu(f); 1131 rcu_read_unlock(); 1132 if (IS_ERR(file1)) 1133 file1 = NULL; 1134 1135 return (file1); 1136 } 1137 1138 static void 1139 linux_file_kqfilter_detach(struct knote *kn) 1140 { 1141 struct linux_file *filp = kn->kn_hook; 1142 1143 spin_lock(&filp->f_kqlock); 1144 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1145 spin_unlock(&filp->f_kqlock); 1146 } 1147 1148 static int 1149 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1150 { 1151 struct linux_file *filp = kn->kn_hook; 1152 1153 mtx_assert(&filp->f_kqlock, MA_OWNED); 1154 1155 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1156 } 1157 1158 static int 1159 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1160 { 1161 struct linux_file *filp = kn->kn_hook; 1162 1163 mtx_assert(&filp->f_kqlock, MA_OWNED); 1164 1165 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1166 } 1167 1168 static const struct filterops linux_dev_kqfiltops_read = { 1169 .f_isfd = 1, 1170 .f_detach = linux_file_kqfilter_detach, 1171 .f_event = linux_file_kqfilter_read_event, 1172 }; 1173 1174 static const struct filterops linux_dev_kqfiltops_write = { 1175 .f_isfd = 1, 1176 .f_detach = linux_file_kqfilter_detach, 1177 .f_event = linux_file_kqfilter_write_event, 1178 }; 1179 1180 static void 1181 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1182 { 1183 struct thread *td; 1184 const struct file_operations *fop; 1185 struct linux_cdev *ldev; 1186 int temp; 1187 1188 if ((filp->f_kqflags & kqflags) == 0) 1189 return; 1190 1191 td = curthread; 1192 1193 linux_get_fop(filp, &fop, &ldev); 1194 /* get the latest polling state */ 1195 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1196 linux_drop_fop(ldev); 1197 1198 spin_lock(&filp->f_kqlock); 1199 /* clear kqflags */ 1200 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1201 LINUX_KQ_FLAG_NEED_WRITE); 1202 /* update kqflags */ 1203 if ((temp & (POLLIN | POLLOUT)) != 0) { 1204 if ((temp & POLLIN) != 0) 1205 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1206 if ((temp & POLLOUT) != 0) 1207 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1208 1209 /* make sure the "knote" gets woken up */ 1210 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1211 } 1212 spin_unlock(&filp->f_kqlock); 1213 } 1214 1215 static int 1216 linux_file_kqfilter(struct file *file, struct knote *kn) 1217 { 1218 struct linux_file *filp; 1219 struct thread *td; 1220 int error; 1221 1222 td = curthread; 1223 filp = (struct linux_file *)file->f_data; 1224 filp->f_flags = file->f_flag; 1225 if (filp->f_op->poll == NULL) 1226 return (EINVAL); 1227 1228 spin_lock(&filp->f_kqlock); 1229 switch (kn->kn_filter) { 1230 case EVFILT_READ: 1231 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1232 kn->kn_fop = &linux_dev_kqfiltops_read; 1233 kn->kn_hook = filp; 1234 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1235 error = 0; 1236 break; 1237 case EVFILT_WRITE: 1238 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1239 kn->kn_fop = &linux_dev_kqfiltops_write; 1240 kn->kn_hook = filp; 1241 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1242 error = 0; 1243 break; 1244 default: 1245 error = EINVAL; 1246 break; 1247 } 1248 spin_unlock(&filp->f_kqlock); 1249 1250 if (error == 0) { 1251 linux_set_current(td); 1252 1253 /* update kqfilter status, if any */ 1254 linux_file_kqfilter_poll(filp, 1255 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1256 } 1257 return (error); 1258 } 1259 1260 static int 1261 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1262 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1263 int nprot, bool is_shared, struct thread *td) 1264 { 1265 struct task_struct *task; 1266 struct vm_area_struct *vmap; 1267 struct mm_struct *mm; 1268 struct linux_file *filp; 1269 vm_memattr_t attr; 1270 int error; 1271 1272 filp = (struct linux_file *)fp->f_data; 1273 filp->f_flags = fp->f_flag; 1274 1275 if (fop->mmap == NULL) 1276 return (EOPNOTSUPP); 1277 1278 linux_set_current(td); 1279 1280 /* 1281 * The same VM object might be shared by multiple processes 1282 * and the mm_struct is usually freed when a process exits. 1283 * 1284 * The atomic reference below makes sure the mm_struct is 1285 * available as long as the vmap is in the linux_vma_head. 1286 */ 1287 task = current; 1288 mm = task->mm; 1289 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1290 return (EINVAL); 1291 1292 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1293 vmap->vm_start = 0; 1294 vmap->vm_end = size; 1295 vmap->vm_pgoff = *offset / PAGE_SIZE; 1296 vmap->vm_pfn = 0; 1297 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1298 if (is_shared) 1299 vmap->vm_flags |= VM_SHARED; 1300 vmap->vm_ops = NULL; 1301 vmap->vm_file = get_file(filp); 1302 vmap->vm_mm = mm; 1303 1304 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1305 error = linux_get_error(task, EINTR); 1306 } else { 1307 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1308 error = linux_get_error(task, error); 1309 up_write(&vmap->vm_mm->mmap_sem); 1310 } 1311 1312 if (error != 0) { 1313 linux_cdev_handle_free(vmap); 1314 return (error); 1315 } 1316 1317 attr = pgprot2cachemode(vmap->vm_page_prot); 1318 1319 if (vmap->vm_ops != NULL) { 1320 struct vm_area_struct *ptr; 1321 void *vm_private_data; 1322 bool vm_no_fault; 1323 1324 if (vmap->vm_ops->open == NULL || 1325 vmap->vm_ops->close == NULL || 1326 vmap->vm_private_data == NULL) { 1327 /* free allocated VM area struct */ 1328 linux_cdev_handle_free(vmap); 1329 return (EINVAL); 1330 } 1331 1332 vm_private_data = vmap->vm_private_data; 1333 1334 rw_wlock(&linux_vma_lock); 1335 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1336 if (ptr->vm_private_data == vm_private_data) 1337 break; 1338 } 1339 /* check if there is an existing VM area struct */ 1340 if (ptr != NULL) { 1341 /* check if the VM area structure is invalid */ 1342 if (ptr->vm_ops == NULL || 1343 ptr->vm_ops->open == NULL || 1344 ptr->vm_ops->close == NULL) { 1345 error = ESTALE; 1346 vm_no_fault = 1; 1347 } else { 1348 error = EEXIST; 1349 vm_no_fault = (ptr->vm_ops->fault == NULL); 1350 } 1351 } else { 1352 /* insert VM area structure into list */ 1353 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1354 error = 0; 1355 vm_no_fault = (vmap->vm_ops->fault == NULL); 1356 } 1357 rw_wunlock(&linux_vma_lock); 1358 1359 if (error != 0) { 1360 /* free allocated VM area struct */ 1361 linux_cdev_handle_free(vmap); 1362 /* check for stale VM area struct */ 1363 if (error != EEXIST) 1364 return (error); 1365 } 1366 1367 /* check if there is no fault handler */ 1368 if (vm_no_fault) { 1369 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1370 &linux_cdev_pager_ops[1], size, nprot, *offset, 1371 td->td_ucred); 1372 } else { 1373 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1374 &linux_cdev_pager_ops[0], size, nprot, *offset, 1375 td->td_ucred); 1376 } 1377 1378 /* check if allocating the VM object failed */ 1379 if (*object == NULL) { 1380 if (error == 0) { 1381 /* remove VM area struct from list */ 1382 linux_cdev_handle_remove(vmap); 1383 /* free allocated VM area struct */ 1384 linux_cdev_handle_free(vmap); 1385 } 1386 return (EINVAL); 1387 } 1388 } else { 1389 struct sglist *sg; 1390 1391 sg = sglist_alloc(1, M_WAITOK); 1392 sglist_append_phys(sg, 1393 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1394 1395 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1396 nprot, 0, td->td_ucred); 1397 1398 linux_cdev_handle_free(vmap); 1399 1400 if (*object == NULL) { 1401 sglist_free(sg); 1402 return (EINVAL); 1403 } 1404 } 1405 1406 if (attr != VM_MEMATTR_DEFAULT) { 1407 VM_OBJECT_WLOCK(*object); 1408 vm_object_set_memattr(*object, attr); 1409 VM_OBJECT_WUNLOCK(*object); 1410 } 1411 *offset = 0; 1412 return (0); 1413 } 1414 1415 struct cdevsw linuxcdevsw = { 1416 .d_version = D_VERSION, 1417 .d_fdopen = linux_dev_fdopen, 1418 .d_name = "lkpidev", 1419 }; 1420 1421 static int 1422 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1423 int flags, struct thread *td) 1424 { 1425 struct linux_file *filp; 1426 const struct file_operations *fop; 1427 struct linux_cdev *ldev; 1428 ssize_t bytes; 1429 int error; 1430 1431 error = 0; 1432 filp = (struct linux_file *)file->f_data; 1433 filp->f_flags = file->f_flag; 1434 /* XXX no support for I/O vectors currently */ 1435 if (uio->uio_iovcnt != 1) 1436 return (EOPNOTSUPP); 1437 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1438 return (EINVAL); 1439 linux_set_current(td); 1440 linux_get_fop(filp, &fop, &ldev); 1441 if (fop->read != NULL) { 1442 bytes = OPW(file, td, fop->read(filp, 1443 uio->uio_iov->iov_base, 1444 uio->uio_iov->iov_len, &uio->uio_offset)); 1445 if (bytes >= 0) { 1446 uio->uio_iov->iov_base = 1447 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1448 uio->uio_iov->iov_len -= bytes; 1449 uio->uio_resid -= bytes; 1450 } else { 1451 error = linux_get_error(current, -bytes); 1452 } 1453 } else 1454 error = ENXIO; 1455 1456 /* update kqfilter status, if any */ 1457 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1458 linux_drop_fop(ldev); 1459 1460 return (error); 1461 } 1462 1463 static int 1464 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1465 int flags, struct thread *td) 1466 { 1467 struct linux_file *filp; 1468 const struct file_operations *fop; 1469 struct linux_cdev *ldev; 1470 ssize_t bytes; 1471 int error; 1472 1473 filp = (struct linux_file *)file->f_data; 1474 filp->f_flags = file->f_flag; 1475 /* XXX no support for I/O vectors currently */ 1476 if (uio->uio_iovcnt != 1) 1477 return (EOPNOTSUPP); 1478 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1479 return (EINVAL); 1480 linux_set_current(td); 1481 linux_get_fop(filp, &fop, &ldev); 1482 if (fop->write != NULL) { 1483 bytes = OPW(file, td, fop->write(filp, 1484 uio->uio_iov->iov_base, 1485 uio->uio_iov->iov_len, &uio->uio_offset)); 1486 if (bytes >= 0) { 1487 uio->uio_iov->iov_base = 1488 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1489 uio->uio_iov->iov_len -= bytes; 1490 uio->uio_resid -= bytes; 1491 error = 0; 1492 } else { 1493 error = linux_get_error(current, -bytes); 1494 } 1495 } else 1496 error = ENXIO; 1497 1498 /* update kqfilter status, if any */ 1499 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1500 1501 linux_drop_fop(ldev); 1502 1503 return (error); 1504 } 1505 1506 static int 1507 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1508 struct thread *td) 1509 { 1510 struct linux_file *filp; 1511 const struct file_operations *fop; 1512 struct linux_cdev *ldev; 1513 int revents; 1514 1515 filp = (struct linux_file *)file->f_data; 1516 filp->f_flags = file->f_flag; 1517 linux_set_current(td); 1518 linux_get_fop(filp, &fop, &ldev); 1519 if (fop->poll != NULL) { 1520 revents = OPW(file, td, fop->poll(filp, 1521 LINUX_POLL_TABLE_NORMAL)) & events; 1522 } else { 1523 revents = 0; 1524 } 1525 linux_drop_fop(ldev); 1526 return (revents); 1527 } 1528 1529 static int 1530 linux_file_close(struct file *file, struct thread *td) 1531 { 1532 struct linux_file *filp; 1533 int (*release)(struct inode *, struct linux_file *); 1534 const struct file_operations *fop; 1535 struct linux_cdev *ldev; 1536 int error; 1537 1538 filp = (struct linux_file *)file->f_data; 1539 1540 KASSERT(file_count(filp) == 0, 1541 ("File refcount(%d) is not zero", file_count(filp))); 1542 1543 if (td == NULL) 1544 td = curthread; 1545 1546 error = 0; 1547 filp->f_flags = file->f_flag; 1548 linux_set_current(td); 1549 linux_poll_wait_dequeue(filp); 1550 linux_get_fop(filp, &fop, &ldev); 1551 /* 1552 * Always use the real release function, if any, to avoid 1553 * leaking device resources: 1554 */ 1555 release = filp->f_op->release; 1556 if (release != NULL) 1557 error = -OPW(file, td, release(filp->f_vnode, filp)); 1558 funsetown(&filp->f_sigio); 1559 if (filp->f_vnode != NULL) 1560 vrele(filp->f_vnode); 1561 linux_drop_fop(ldev); 1562 ldev = filp->f_cdev; 1563 if (ldev != NULL) 1564 linux_cdev_deref(ldev); 1565 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1566 kfree(filp); 1567 1568 return (error); 1569 } 1570 1571 static int 1572 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1573 struct thread *td) 1574 { 1575 struct linux_file *filp; 1576 const struct file_operations *fop; 1577 struct linux_cdev *ldev; 1578 struct fiodgname_arg *fgn; 1579 const char *p; 1580 int error, i; 1581 1582 error = 0; 1583 filp = (struct linux_file *)fp->f_data; 1584 filp->f_flags = fp->f_flag; 1585 linux_get_fop(filp, &fop, &ldev); 1586 1587 linux_set_current(td); 1588 switch (cmd) { 1589 case FIONBIO: 1590 break; 1591 case FIOASYNC: 1592 if (fop->fasync == NULL) 1593 break; 1594 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1595 break; 1596 case FIOSETOWN: 1597 error = fsetown(*(int *)data, &filp->f_sigio); 1598 if (error == 0) { 1599 if (fop->fasync == NULL) 1600 break; 1601 error = -OPW(fp, td, fop->fasync(0, filp, 1602 fp->f_flag & FASYNC)); 1603 } 1604 break; 1605 case FIOGETOWN: 1606 *(int *)data = fgetown(&filp->f_sigio); 1607 break; 1608 case FIODGNAME: 1609 #ifdef COMPAT_FREEBSD32 1610 case FIODGNAME_32: 1611 #endif 1612 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1613 error = ENXIO; 1614 break; 1615 } 1616 fgn = data; 1617 p = devtoname(filp->f_cdev->cdev); 1618 i = strlen(p) + 1; 1619 if (i > fgn->len) { 1620 error = EINVAL; 1621 break; 1622 } 1623 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1624 break; 1625 default: 1626 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1627 break; 1628 } 1629 linux_drop_fop(ldev); 1630 return (error); 1631 } 1632 1633 static int 1634 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1635 vm_prot_t maxprot, int flags, struct file *fp, 1636 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1637 { 1638 /* 1639 * Character devices do not provide private mappings 1640 * of any kind: 1641 */ 1642 if ((maxprot & VM_PROT_WRITE) == 0 && 1643 (prot & VM_PROT_WRITE) != 0) 1644 return (EACCES); 1645 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1646 return (EINVAL); 1647 1648 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1649 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1650 } 1651 1652 static int 1653 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1654 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1655 struct thread *td) 1656 { 1657 struct linux_file *filp; 1658 const struct file_operations *fop; 1659 struct linux_cdev *ldev; 1660 struct mount *mp; 1661 struct vnode *vp; 1662 vm_object_t object; 1663 vm_prot_t maxprot; 1664 int error; 1665 1666 filp = (struct linux_file *)fp->f_data; 1667 1668 vp = filp->f_vnode; 1669 if (vp == NULL) 1670 return (EOPNOTSUPP); 1671 1672 /* 1673 * Ensure that file and memory protections are 1674 * compatible. 1675 */ 1676 mp = vp->v_mount; 1677 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1678 maxprot = VM_PROT_NONE; 1679 if ((prot & VM_PROT_EXECUTE) != 0) 1680 return (EACCES); 1681 } else 1682 maxprot = VM_PROT_EXECUTE; 1683 if ((fp->f_flag & FREAD) != 0) 1684 maxprot |= VM_PROT_READ; 1685 else if ((prot & VM_PROT_READ) != 0) 1686 return (EACCES); 1687 1688 /* 1689 * If we are sharing potential changes via MAP_SHARED and we 1690 * are trying to get write permission although we opened it 1691 * without asking for it, bail out. 1692 * 1693 * Note that most character devices always share mappings. 1694 * 1695 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1696 * requests rather than doing it here. 1697 */ 1698 if ((flags & MAP_SHARED) != 0) { 1699 if ((fp->f_flag & FWRITE) != 0) 1700 maxprot |= VM_PROT_WRITE; 1701 else if ((prot & VM_PROT_WRITE) != 0) 1702 return (EACCES); 1703 } 1704 maxprot &= cap_maxprot; 1705 1706 linux_get_fop(filp, &fop, &ldev); 1707 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1708 &foff, fop, &object); 1709 if (error != 0) 1710 goto out; 1711 1712 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1713 foff, FALSE, td); 1714 if (error != 0) 1715 vm_object_deallocate(object); 1716 out: 1717 linux_drop_fop(ldev); 1718 return (error); 1719 } 1720 1721 static int 1722 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1723 { 1724 struct linux_file *filp; 1725 struct vnode *vp; 1726 int error; 1727 1728 filp = (struct linux_file *)fp->f_data; 1729 if (filp->f_vnode == NULL) 1730 return (EOPNOTSUPP); 1731 1732 vp = filp->f_vnode; 1733 1734 vn_lock(vp, LK_SHARED | LK_RETRY); 1735 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1736 VOP_UNLOCK(vp); 1737 1738 return (error); 1739 } 1740 1741 static int 1742 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1743 struct filedesc *fdp) 1744 { 1745 struct linux_file *filp; 1746 struct vnode *vp; 1747 int error; 1748 1749 filp = fp->f_data; 1750 vp = filp->f_vnode; 1751 if (vp == NULL) { 1752 error = 0; 1753 kif->kf_type = KF_TYPE_DEV; 1754 } else { 1755 vref(vp); 1756 FILEDESC_SUNLOCK(fdp); 1757 error = vn_fill_kinfo_vnode(vp, kif); 1758 vrele(vp); 1759 kif->kf_type = KF_TYPE_VNODE; 1760 FILEDESC_SLOCK(fdp); 1761 } 1762 return (error); 1763 } 1764 1765 unsigned int 1766 linux_iminor(struct inode *inode) 1767 { 1768 struct linux_cdev *ldev; 1769 1770 if (inode == NULL || inode->v_rdev == NULL || 1771 inode->v_rdev->si_devsw != &linuxcdevsw) 1772 return (-1U); 1773 ldev = inode->v_rdev->si_drv1; 1774 if (ldev == NULL) 1775 return (-1U); 1776 1777 return (minor(ldev->dev)); 1778 } 1779 1780 static int 1781 linux_file_kcmp(struct file *fp1, struct file *fp2, struct thread *td) 1782 { 1783 struct linux_file *filp1, *filp2; 1784 1785 if (fp2->f_type != DTYPE_DEV) 1786 return (3); 1787 1788 filp1 = fp1->f_data; 1789 filp2 = fp2->f_data; 1790 return (kcmp_cmp((uintptr_t)filp1->f_cdev, (uintptr_t)filp2->f_cdev)); 1791 } 1792 1793 const struct fileops linuxfileops = { 1794 .fo_read = linux_file_read, 1795 .fo_write = linux_file_write, 1796 .fo_truncate = invfo_truncate, 1797 .fo_kqfilter = linux_file_kqfilter, 1798 .fo_stat = linux_file_stat, 1799 .fo_fill_kinfo = linux_file_fill_kinfo, 1800 .fo_poll = linux_file_poll, 1801 .fo_close = linux_file_close, 1802 .fo_ioctl = linux_file_ioctl, 1803 .fo_mmap = linux_file_mmap, 1804 .fo_chmod = invfo_chmod, 1805 .fo_chown = invfo_chown, 1806 .fo_sendfile = invfo_sendfile, 1807 .fo_cmp = linux_file_kcmp, 1808 .fo_flags = DFLAG_PASSABLE, 1809 }; 1810 1811 /* 1812 * Hash of vmmap addresses. This is infrequently accessed and does not 1813 * need to be particularly large. This is done because we must store the 1814 * caller's idea of the map size to properly unmap. 1815 */ 1816 struct vmmap { 1817 LIST_ENTRY(vmmap) vm_next; 1818 void *vm_addr; 1819 unsigned long vm_size; 1820 }; 1821 1822 struct vmmaphd { 1823 struct vmmap *lh_first; 1824 }; 1825 #define VMMAP_HASH_SIZE 64 1826 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1827 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1828 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1829 static struct mtx vmmaplock; 1830 1831 static void 1832 vmmap_add(void *addr, unsigned long size) 1833 { 1834 struct vmmap *vmmap; 1835 1836 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1837 mtx_lock(&vmmaplock); 1838 vmmap->vm_size = size; 1839 vmmap->vm_addr = addr; 1840 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1841 mtx_unlock(&vmmaplock); 1842 } 1843 1844 static struct vmmap * 1845 vmmap_remove(void *addr) 1846 { 1847 struct vmmap *vmmap; 1848 1849 mtx_lock(&vmmaplock); 1850 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1851 if (vmmap->vm_addr == addr) 1852 break; 1853 if (vmmap) 1854 LIST_REMOVE(vmmap, vm_next); 1855 mtx_unlock(&vmmaplock); 1856 1857 return (vmmap); 1858 } 1859 1860 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1861 void * 1862 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1863 { 1864 void *addr; 1865 1866 addr = pmap_mapdev_attr(phys_addr, size, attr); 1867 if (addr == NULL) 1868 return (NULL); 1869 vmmap_add(addr, size); 1870 1871 return (addr); 1872 } 1873 #endif 1874 1875 void 1876 iounmap(void *addr) 1877 { 1878 struct vmmap *vmmap; 1879 1880 vmmap = vmmap_remove(addr); 1881 if (vmmap == NULL) 1882 return; 1883 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1884 pmap_unmapdev(addr, vmmap->vm_size); 1885 #endif 1886 kfree(vmmap); 1887 } 1888 1889 void * 1890 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1891 { 1892 vm_offset_t off; 1893 size_t size; 1894 1895 size = count * PAGE_SIZE; 1896 off = kva_alloc(size); 1897 if (off == 0) 1898 return (NULL); 1899 vmmap_add((void *)off, size); 1900 pmap_qenter(off, pages, count); 1901 1902 return ((void *)off); 1903 } 1904 1905 void 1906 vunmap(void *addr) 1907 { 1908 struct vmmap *vmmap; 1909 1910 vmmap = vmmap_remove(addr); 1911 if (vmmap == NULL) 1912 return; 1913 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1914 kva_free((vm_offset_t)addr, vmmap->vm_size); 1915 kfree(vmmap); 1916 } 1917 1918 static char * 1919 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1920 { 1921 unsigned int len; 1922 char *p; 1923 va_list aq; 1924 1925 va_copy(aq, ap); 1926 len = vsnprintf(NULL, 0, fmt, aq); 1927 va_end(aq); 1928 1929 if (dev != NULL) 1930 p = devm_kmalloc(dev, len + 1, gfp); 1931 else 1932 p = kmalloc(len + 1, gfp); 1933 if (p != NULL) 1934 vsnprintf(p, len + 1, fmt, ap); 1935 1936 return (p); 1937 } 1938 1939 char * 1940 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1941 { 1942 1943 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1944 } 1945 1946 char * 1947 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1948 { 1949 va_list ap; 1950 char *p; 1951 1952 va_start(ap, fmt); 1953 p = devm_kvasprintf(dev, gfp, fmt, ap); 1954 va_end(ap); 1955 1956 return (p); 1957 } 1958 1959 char * 1960 kasprintf(gfp_t gfp, const char *fmt, ...) 1961 { 1962 va_list ap; 1963 char *p; 1964 1965 va_start(ap, fmt); 1966 p = kvasprintf(gfp, fmt, ap); 1967 va_end(ap); 1968 1969 return (p); 1970 } 1971 1972 static void 1973 linux_timer_callback_wrapper(void *context) 1974 { 1975 struct timer_list *timer; 1976 1977 timer = context; 1978 1979 /* the timer is about to be shutdown permanently */ 1980 if (timer->function == NULL) 1981 return; 1982 1983 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1984 /* try again later */ 1985 callout_reset(&timer->callout, 1, 1986 &linux_timer_callback_wrapper, timer); 1987 return; 1988 } 1989 1990 timer->function(timer->data); 1991 } 1992 1993 int 1994 mod_timer(struct timer_list *timer, int expires) 1995 { 1996 int ret; 1997 1998 timer->expires = expires; 1999 ret = callout_reset(&timer->callout, 2000 linux_timer_jiffies_until(expires), 2001 &linux_timer_callback_wrapper, timer); 2002 2003 MPASS(ret == 0 || ret == 1); 2004 2005 return (ret == 1); 2006 } 2007 2008 void 2009 add_timer(struct timer_list *timer) 2010 { 2011 2012 callout_reset(&timer->callout, 2013 linux_timer_jiffies_until(timer->expires), 2014 &linux_timer_callback_wrapper, timer); 2015 } 2016 2017 void 2018 add_timer_on(struct timer_list *timer, int cpu) 2019 { 2020 2021 callout_reset_on(&timer->callout, 2022 linux_timer_jiffies_until(timer->expires), 2023 &linux_timer_callback_wrapper, timer, cpu); 2024 } 2025 2026 int 2027 del_timer(struct timer_list *timer) 2028 { 2029 2030 if (callout_stop(&(timer)->callout) == -1) 2031 return (0); 2032 return (1); 2033 } 2034 2035 int 2036 del_timer_sync(struct timer_list *timer) 2037 { 2038 2039 if (callout_drain(&(timer)->callout) == -1) 2040 return (0); 2041 return (1); 2042 } 2043 2044 int 2045 timer_delete_sync(struct timer_list *timer) 2046 { 2047 2048 return (del_timer_sync(timer)); 2049 } 2050 2051 int 2052 timer_shutdown_sync(struct timer_list *timer) 2053 { 2054 2055 timer->function = NULL; 2056 return (del_timer_sync(timer)); 2057 } 2058 2059 /* greatest common divisor, Euclid equation */ 2060 static uint64_t 2061 lkpi_gcd_64(uint64_t a, uint64_t b) 2062 { 2063 uint64_t an; 2064 uint64_t bn; 2065 2066 while (b != 0) { 2067 an = b; 2068 bn = a % b; 2069 a = an; 2070 b = bn; 2071 } 2072 return (a); 2073 } 2074 2075 uint64_t lkpi_nsec2hz_rem; 2076 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2077 uint64_t lkpi_nsec2hz_max; 2078 2079 uint64_t lkpi_usec2hz_rem; 2080 uint64_t lkpi_usec2hz_div = 1000000ULL; 2081 uint64_t lkpi_usec2hz_max; 2082 2083 uint64_t lkpi_msec2hz_rem; 2084 uint64_t lkpi_msec2hz_div = 1000ULL; 2085 uint64_t lkpi_msec2hz_max; 2086 2087 static void 2088 linux_timer_init(void *arg) 2089 { 2090 uint64_t gcd; 2091 2092 /* 2093 * Compute an internal HZ value which can divide 2**32 to 2094 * avoid timer rounding problems when the tick value wraps 2095 * around 2**32: 2096 */ 2097 linux_timer_hz_mask = 1; 2098 while (linux_timer_hz_mask < (unsigned long)hz) 2099 linux_timer_hz_mask *= 2; 2100 linux_timer_hz_mask--; 2101 2102 /* compute some internal constants */ 2103 2104 lkpi_nsec2hz_rem = hz; 2105 lkpi_usec2hz_rem = hz; 2106 lkpi_msec2hz_rem = hz; 2107 2108 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2109 lkpi_nsec2hz_rem /= gcd; 2110 lkpi_nsec2hz_div /= gcd; 2111 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2112 2113 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2114 lkpi_usec2hz_rem /= gcd; 2115 lkpi_usec2hz_div /= gcd; 2116 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2117 2118 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2119 lkpi_msec2hz_rem /= gcd; 2120 lkpi_msec2hz_div /= gcd; 2121 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2122 } 2123 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2124 2125 void 2126 linux_complete_common(struct completion *c, int all) 2127 { 2128 sleepq_lock(c); 2129 if (all) { 2130 c->done = UINT_MAX; 2131 sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2132 } else { 2133 if (c->done != UINT_MAX) 2134 c->done++; 2135 sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2136 } 2137 sleepq_release(c); 2138 } 2139 2140 /* 2141 * Indefinite wait for done != 0 with or without signals. 2142 */ 2143 int 2144 linux_wait_for_common(struct completion *c, int flags) 2145 { 2146 struct task_struct *task; 2147 int error; 2148 2149 if (SCHEDULER_STOPPED()) 2150 return (0); 2151 2152 task = current; 2153 2154 if (flags != 0) 2155 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2156 else 2157 flags = SLEEPQ_SLEEP; 2158 error = 0; 2159 for (;;) { 2160 sleepq_lock(c); 2161 if (c->done) 2162 break; 2163 sleepq_add(c, NULL, "completion", flags, 0); 2164 if (flags & SLEEPQ_INTERRUPTIBLE) { 2165 DROP_GIANT(); 2166 error = -sleepq_wait_sig(c, 0); 2167 PICKUP_GIANT(); 2168 if (error != 0) { 2169 linux_schedule_save_interrupt_value(task, error); 2170 error = -ERESTARTSYS; 2171 goto intr; 2172 } 2173 } else { 2174 DROP_GIANT(); 2175 sleepq_wait(c, 0); 2176 PICKUP_GIANT(); 2177 } 2178 } 2179 if (c->done != UINT_MAX) 2180 c->done--; 2181 sleepq_release(c); 2182 2183 intr: 2184 return (error); 2185 } 2186 2187 /* 2188 * Time limited wait for done != 0 with or without signals. 2189 */ 2190 int 2191 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2192 { 2193 struct task_struct *task; 2194 int end = jiffies + timeout; 2195 int error; 2196 2197 if (SCHEDULER_STOPPED()) 2198 return (0); 2199 2200 task = current; 2201 2202 if (flags != 0) 2203 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2204 else 2205 flags = SLEEPQ_SLEEP; 2206 2207 for (;;) { 2208 sleepq_lock(c); 2209 if (c->done) 2210 break; 2211 sleepq_add(c, NULL, "completion", flags, 0); 2212 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2213 2214 DROP_GIANT(); 2215 if (flags & SLEEPQ_INTERRUPTIBLE) 2216 error = -sleepq_timedwait_sig(c, 0); 2217 else 2218 error = -sleepq_timedwait(c, 0); 2219 PICKUP_GIANT(); 2220 2221 if (error != 0) { 2222 /* check for timeout */ 2223 if (error == -EWOULDBLOCK) { 2224 error = 0; /* timeout */ 2225 } else { 2226 /* signal happened */ 2227 linux_schedule_save_interrupt_value(task, error); 2228 error = -ERESTARTSYS; 2229 } 2230 goto done; 2231 } 2232 } 2233 if (c->done != UINT_MAX) 2234 c->done--; 2235 sleepq_release(c); 2236 2237 /* return how many jiffies are left */ 2238 error = linux_timer_jiffies_until(end); 2239 done: 2240 return (error); 2241 } 2242 2243 int 2244 linux_try_wait_for_completion(struct completion *c) 2245 { 2246 int isdone; 2247 2248 sleepq_lock(c); 2249 isdone = (c->done != 0); 2250 if (c->done != 0 && c->done != UINT_MAX) 2251 c->done--; 2252 sleepq_release(c); 2253 return (isdone); 2254 } 2255 2256 int 2257 linux_completion_done(struct completion *c) 2258 { 2259 int isdone; 2260 2261 sleepq_lock(c); 2262 isdone = (c->done != 0); 2263 sleepq_release(c); 2264 return (isdone); 2265 } 2266 2267 static void 2268 linux_cdev_deref(struct linux_cdev *ldev) 2269 { 2270 if (refcount_release(&ldev->refs) && 2271 ldev->kobj.ktype == &linux_cdev_ktype) 2272 kfree(ldev); 2273 } 2274 2275 static void 2276 linux_cdev_release(struct kobject *kobj) 2277 { 2278 struct linux_cdev *cdev; 2279 struct kobject *parent; 2280 2281 cdev = container_of(kobj, struct linux_cdev, kobj); 2282 parent = kobj->parent; 2283 linux_destroy_dev(cdev); 2284 linux_cdev_deref(cdev); 2285 kobject_put(parent); 2286 } 2287 2288 static void 2289 linux_cdev_static_release(struct kobject *kobj) 2290 { 2291 struct cdev *cdev; 2292 struct linux_cdev *ldev; 2293 2294 ldev = container_of(kobj, struct linux_cdev, kobj); 2295 cdev = ldev->cdev; 2296 if (cdev != NULL) { 2297 destroy_dev(cdev); 2298 ldev->cdev = NULL; 2299 } 2300 kobject_put(kobj->parent); 2301 } 2302 2303 int 2304 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2305 { 2306 int ret; 2307 2308 if (dev->devt != 0) { 2309 /* Set parent kernel object. */ 2310 ldev->kobj.parent = &dev->kobj; 2311 2312 /* 2313 * Unlike Linux we require the kobject of the 2314 * character device structure to have a valid name 2315 * before calling this function: 2316 */ 2317 if (ldev->kobj.name == NULL) 2318 return (-EINVAL); 2319 2320 ret = cdev_add(ldev, dev->devt, 1); 2321 if (ret) 2322 return (ret); 2323 } 2324 ret = device_add(dev); 2325 if (ret != 0 && dev->devt != 0) 2326 cdev_del(ldev); 2327 return (ret); 2328 } 2329 2330 void 2331 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2332 { 2333 device_del(dev); 2334 2335 if (dev->devt != 0) 2336 cdev_del(ldev); 2337 } 2338 2339 static void 2340 linux_destroy_dev(struct linux_cdev *ldev) 2341 { 2342 2343 if (ldev->cdev == NULL) 2344 return; 2345 2346 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2347 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2348 2349 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2350 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2351 pause("ldevdtr", hz / 4); 2352 2353 destroy_dev(ldev->cdev); 2354 ldev->cdev = NULL; 2355 } 2356 2357 const struct kobj_type linux_cdev_ktype = { 2358 .release = linux_cdev_release, 2359 }; 2360 2361 const struct kobj_type linux_cdev_static_ktype = { 2362 .release = linux_cdev_static_release, 2363 }; 2364 2365 static void 2366 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2367 { 2368 struct notifier_block *nb; 2369 struct netdev_notifier_info ni; 2370 2371 nb = arg; 2372 ni.ifp = ifp; 2373 ni.dev = (struct net_device *)ifp; 2374 if (linkstate == LINK_STATE_UP) 2375 nb->notifier_call(nb, NETDEV_UP, &ni); 2376 else 2377 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2378 } 2379 2380 static void 2381 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2382 { 2383 struct notifier_block *nb; 2384 struct netdev_notifier_info ni; 2385 2386 nb = arg; 2387 ni.ifp = ifp; 2388 ni.dev = (struct net_device *)ifp; 2389 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2390 } 2391 2392 static void 2393 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2394 { 2395 struct notifier_block *nb; 2396 struct netdev_notifier_info ni; 2397 2398 nb = arg; 2399 ni.ifp = ifp; 2400 ni.dev = (struct net_device *)ifp; 2401 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2402 } 2403 2404 static void 2405 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2406 { 2407 struct notifier_block *nb; 2408 struct netdev_notifier_info ni; 2409 2410 nb = arg; 2411 ni.ifp = ifp; 2412 ni.dev = (struct net_device *)ifp; 2413 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2414 } 2415 2416 static void 2417 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2418 { 2419 struct notifier_block *nb; 2420 struct netdev_notifier_info ni; 2421 2422 nb = arg; 2423 ni.ifp = ifp; 2424 ni.dev = (struct net_device *)ifp; 2425 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2426 } 2427 2428 int 2429 register_netdevice_notifier(struct notifier_block *nb) 2430 { 2431 2432 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2433 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2434 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2435 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2436 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2437 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2438 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2439 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2440 2441 return (0); 2442 } 2443 2444 int 2445 register_inetaddr_notifier(struct notifier_block *nb) 2446 { 2447 2448 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2449 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2450 return (0); 2451 } 2452 2453 int 2454 unregister_netdevice_notifier(struct notifier_block *nb) 2455 { 2456 2457 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2458 nb->tags[NETDEV_UP]); 2459 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2460 nb->tags[NETDEV_REGISTER]); 2461 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2462 nb->tags[NETDEV_UNREGISTER]); 2463 EVENTHANDLER_DEREGISTER(iflladdr_event, 2464 nb->tags[NETDEV_CHANGEADDR]); 2465 2466 return (0); 2467 } 2468 2469 int 2470 unregister_inetaddr_notifier(struct notifier_block *nb) 2471 { 2472 2473 EVENTHANDLER_DEREGISTER(ifaddr_event, 2474 nb->tags[NETDEV_CHANGEIFADDR]); 2475 2476 return (0); 2477 } 2478 2479 struct list_sort_thunk { 2480 int (*cmp)(void *, struct list_head *, struct list_head *); 2481 void *priv; 2482 }; 2483 2484 static inline int 2485 linux_le_cmp(const void *d1, const void *d2, void *priv) 2486 { 2487 struct list_head *le1, *le2; 2488 struct list_sort_thunk *thunk; 2489 2490 thunk = priv; 2491 le1 = *(__DECONST(struct list_head **, d1)); 2492 le2 = *(__DECONST(struct list_head **, d2)); 2493 return ((thunk->cmp)(thunk->priv, le1, le2)); 2494 } 2495 2496 void 2497 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2498 struct list_head *a, struct list_head *b)) 2499 { 2500 struct list_sort_thunk thunk; 2501 struct list_head **ar, *le; 2502 size_t count, i; 2503 2504 count = 0; 2505 list_for_each(le, head) 2506 count++; 2507 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2508 i = 0; 2509 list_for_each(le, head) 2510 ar[i++] = le; 2511 thunk.cmp = cmp; 2512 thunk.priv = priv; 2513 qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); 2514 INIT_LIST_HEAD(head); 2515 for (i = 0; i < count; i++) 2516 list_add_tail(ar[i], head); 2517 free(ar, M_KMALLOC); 2518 } 2519 2520 #if defined(__i386__) || defined(__amd64__) 2521 int 2522 linux_wbinvd_on_all_cpus(void) 2523 { 2524 2525 pmap_invalidate_cache(); 2526 return (0); 2527 } 2528 #endif 2529 2530 int 2531 linux_on_each_cpu(void callback(void *), void *data) 2532 { 2533 2534 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2535 smp_no_rendezvous_barrier, data); 2536 return (0); 2537 } 2538 2539 int 2540 linux_in_atomic(void) 2541 { 2542 2543 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2544 } 2545 2546 struct linux_cdev * 2547 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2548 { 2549 dev_t dev = MKDEV(major, minor); 2550 struct cdev *cdev; 2551 2552 dev_lock(); 2553 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2554 struct linux_cdev *ldev = cdev->si_drv1; 2555 if (ldev->dev == dev && 2556 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2557 break; 2558 } 2559 } 2560 dev_unlock(); 2561 2562 return (cdev != NULL ? cdev->si_drv1 : NULL); 2563 } 2564 2565 int 2566 __register_chrdev(unsigned int major, unsigned int baseminor, 2567 unsigned int count, const char *name, 2568 const struct file_operations *fops) 2569 { 2570 struct linux_cdev *cdev; 2571 int ret = 0; 2572 int i; 2573 2574 for (i = baseminor; i < baseminor + count; i++) { 2575 cdev = cdev_alloc(); 2576 cdev->ops = fops; 2577 kobject_set_name(&cdev->kobj, name); 2578 2579 ret = cdev_add(cdev, makedev(major, i), 1); 2580 if (ret != 0) 2581 break; 2582 } 2583 return (ret); 2584 } 2585 2586 int 2587 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2588 unsigned int count, const char *name, 2589 const struct file_operations *fops, uid_t uid, 2590 gid_t gid, int mode) 2591 { 2592 struct linux_cdev *cdev; 2593 int ret = 0; 2594 int i; 2595 2596 for (i = baseminor; i < baseminor + count; i++) { 2597 cdev = cdev_alloc(); 2598 cdev->ops = fops; 2599 kobject_set_name(&cdev->kobj, name); 2600 2601 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2602 if (ret != 0) 2603 break; 2604 } 2605 return (ret); 2606 } 2607 2608 void 2609 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2610 unsigned int count, const char *name) 2611 { 2612 struct linux_cdev *cdevp; 2613 int i; 2614 2615 for (i = baseminor; i < baseminor + count; i++) { 2616 cdevp = linux_find_cdev(name, major, i); 2617 if (cdevp != NULL) 2618 cdev_del(cdevp); 2619 } 2620 } 2621 2622 void 2623 linux_dump_stack(void) 2624 { 2625 #ifdef STACK 2626 struct stack st; 2627 2628 stack_save(&st); 2629 stack_print(&st); 2630 #endif 2631 } 2632 2633 int 2634 linuxkpi_net_ratelimit(void) 2635 { 2636 2637 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2638 lkpi_net_maxpps)); 2639 } 2640 2641 struct io_mapping * 2642 io_mapping_create_wc(resource_size_t base, unsigned long size) 2643 { 2644 struct io_mapping *mapping; 2645 2646 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2647 if (mapping == NULL) 2648 return (NULL); 2649 return (io_mapping_init_wc(mapping, base, size)); 2650 } 2651 2652 /* We likely want a linuxkpi_device.c at some point. */ 2653 bool 2654 device_can_wakeup(struct device *dev) 2655 { 2656 2657 if (dev == NULL) 2658 return (false); 2659 /* 2660 * XXX-BZ iwlwifi queries it as part of enabling WoWLAN. 2661 * Normally this would be based on a bool in dev->power.XXX. 2662 * Check such as PCI PCIM_PCAP_*PME. We have no way to enable this yet. 2663 * We may get away by directly calling into bsddev for as long as 2664 * we can assume PCI only avoiding changing struct device breaking KBI. 2665 */ 2666 pr_debug("%s:%d: not enabled; see comment.\n", __func__, __LINE__); 2667 return (false); 2668 } 2669 2670 static void 2671 devm_device_group_remove(struct device *dev, void *p) 2672 { 2673 const struct attribute_group **dr = p; 2674 const struct attribute_group *group = *dr; 2675 2676 sysfs_remove_group(&dev->kobj, group); 2677 } 2678 2679 int 2680 lkpi_devm_device_add_group(struct device *dev, 2681 const struct attribute_group *group) 2682 { 2683 const struct attribute_group **dr; 2684 int ret; 2685 2686 dr = devres_alloc(devm_device_group_remove, sizeof(*dr), GFP_KERNEL); 2687 if (dr == NULL) 2688 return (-ENOMEM); 2689 2690 ret = sysfs_create_group(&dev->kobj, group); 2691 if (ret == 0) { 2692 *dr = group; 2693 devres_add(dev, dr); 2694 } else 2695 devres_free(dr); 2696 2697 return (ret); 2698 } 2699 2700 #if defined(__i386__) || defined(__amd64__) 2701 bool linux_cpu_has_clflush; 2702 struct cpuinfo_x86 boot_cpu_data; 2703 struct cpuinfo_x86 *__cpu_data; 2704 #endif 2705 2706 cpumask_t * 2707 lkpi_get_static_single_cpu_mask(int cpuid) 2708 { 2709 2710 KASSERT((cpuid >= 0 && cpuid <= mp_maxid), ("%s: invalid cpuid %d\n", 2711 __func__, cpuid)); 2712 KASSERT(!CPU_ABSENT(cpuid), ("%s: cpu with cpuid %d is absent\n", 2713 __func__, cpuid)); 2714 2715 return (static_single_cpu_mask[cpuid]); 2716 } 2717 2718 bool 2719 lkpi_xen_initial_domain(void) 2720 { 2721 #ifdef XENHVM 2722 return (xen_initial_domain()); 2723 #else 2724 return (false); 2725 #endif 2726 } 2727 2728 bool 2729 lkpi_xen_pv_domain(void) 2730 { 2731 #ifdef XENHVM 2732 return (xen_pv_domain()); 2733 #else 2734 return (false); 2735 #endif 2736 } 2737 2738 static void 2739 linux_compat_init(void *arg) 2740 { 2741 struct sysctl_oid *rootoid; 2742 int i; 2743 2744 #if defined(__i386__) || defined(__amd64__) 2745 static const uint32_t x86_vendors[X86_VENDOR_NUM] = { 2746 [X86_VENDOR_INTEL] = CPU_VENDOR_INTEL, 2747 [X86_VENDOR_CYRIX] = CPU_VENDOR_CYRIX, 2748 [X86_VENDOR_AMD] = CPU_VENDOR_AMD, 2749 [X86_VENDOR_UMC] = CPU_VENDOR_UMC, 2750 [X86_VENDOR_CENTAUR] = CPU_VENDOR_CENTAUR, 2751 [X86_VENDOR_TRANSMETA] = CPU_VENDOR_TRANSMETA, 2752 [X86_VENDOR_NSC] = CPU_VENDOR_NSC, 2753 [X86_VENDOR_HYGON] = CPU_VENDOR_HYGON, 2754 }; 2755 uint8_t x86_vendor = X86_VENDOR_UNKNOWN; 2756 2757 for (i = 0; i < X86_VENDOR_NUM; i++) { 2758 if (cpu_vendor_id != 0 && cpu_vendor_id == x86_vendors[i]) { 2759 x86_vendor = i; 2760 break; 2761 } 2762 } 2763 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2764 boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; 2765 boot_cpu_data.x86_max_cores = mp_ncpus; 2766 boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id); 2767 boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id); 2768 boot_cpu_data.x86_vendor = x86_vendor; 2769 2770 __cpu_data = mallocarray(mp_maxid + 1, 2771 sizeof(*__cpu_data), M_KMALLOC, M_WAITOK | M_ZERO); 2772 CPU_FOREACH(i) { 2773 __cpu_data[i].x86_clflush_size = cpu_clflush_line_size; 2774 __cpu_data[i].x86_max_cores = mp_ncpus; 2775 __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id); 2776 __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id); 2777 __cpu_data[i].x86_vendor = x86_vendor; 2778 } 2779 #endif 2780 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2781 2782 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2783 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2784 kobject_init(&linux_class_root, &linux_class_ktype); 2785 kobject_set_name(&linux_class_root, "class"); 2786 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2787 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2788 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2789 kobject_set_name(&linux_root_device.kobj, "device"); 2790 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2791 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2792 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2793 linux_root_device.bsddev = root_bus; 2794 linux_class_misc.name = "misc"; 2795 class_register(&linux_class_misc); 2796 INIT_LIST_HEAD(&pci_drivers); 2797 INIT_LIST_HEAD(&pci_devices); 2798 spin_lock_init(&pci_lock); 2799 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2800 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2801 LIST_INIT(&vmmaphead[i]); 2802 init_waitqueue_head(&linux_bit_waitq); 2803 init_waitqueue_head(&linux_var_waitq); 2804 2805 CPU_COPY(&all_cpus, &cpu_online_mask); 2806 /* 2807 * Generate a single-CPU cpumask_t for each CPU (possibly) in the system. 2808 * CPUs are indexed from 0..(mp_maxid). The entry for cpuid 0 will only 2809 * have itself in the cpumask, cupid 1 only itself on entry 1, and so on. 2810 * This is used by cpumask_of() (and possibly others in the future) for, 2811 * e.g., drivers to pass hints to irq_set_affinity_hint(). 2812 */ 2813 static_single_cpu_mask = mallocarray(mp_maxid + 1, 2814 sizeof(static_single_cpu_mask), M_KMALLOC, M_WAITOK | M_ZERO); 2815 2816 /* 2817 * When the number of CPUs reach a threshold, we start to save memory 2818 * given the sets are static by overlapping those having their single 2819 * bit set at same position in a bitset word. Asymptotically, this 2820 * regular scheme is in O(n²) whereas the overlapping one is in O(n) 2821 * only with n being the maximum number of CPUs, so the gain will become 2822 * huge quite quickly. The threshold for 64-bit architectures is 128 2823 * CPUs. 2824 */ 2825 if (mp_ncpus < (2 * _BITSET_BITS)) { 2826 cpumask_t *sscm_ptr; 2827 2828 /* 2829 * This represents 'mp_ncpus * __bitset_words(CPU_SETSIZE) * 2830 * (_BITSET_BITS / 8)' bytes (for comparison with the 2831 * overlapping scheme). 2832 */ 2833 static_single_cpu_mask_lcs = mallocarray(mp_ncpus, 2834 sizeof(*static_single_cpu_mask_lcs), 2835 M_KMALLOC, M_WAITOK | M_ZERO); 2836 2837 sscm_ptr = static_single_cpu_mask_lcs; 2838 CPU_FOREACH(i) { 2839 static_single_cpu_mask[i] = sscm_ptr++; 2840 CPU_SET(i, static_single_cpu_mask[i]); 2841 } 2842 } else { 2843 /* Pointer to a bitset word. */ 2844 __typeof(((cpuset_t *)NULL)->__bits[0]) *bwp; 2845 2846 /* 2847 * Allocate memory for (static) spans of 'cpumask_t' ('cpuset_t' 2848 * really) with a single bit set that can be reused for all 2849 * single CPU masks by making them start at different offsets. 2850 * We need '__bitset_words(CPU_SETSIZE) - 1' bitset words before 2851 * the word having its single bit set, and the same amount 2852 * after. 2853 */ 2854 static_single_cpu_mask_lcs = mallocarray(_BITSET_BITS, 2855 (2 * __bitset_words(CPU_SETSIZE) - 1) * (_BITSET_BITS / 8), 2856 M_KMALLOC, M_WAITOK | M_ZERO); 2857 2858 /* 2859 * We rely below on cpuset_t and the bitset generic 2860 * implementation assigning words in the '__bits' array in the 2861 * same order of bits (i.e., little-endian ordering, not to be 2862 * confused with machine endianness, which concerns bits in 2863 * words and other integers). This is an imperfect test, but it 2864 * will detect a change to big-endian ordering. 2865 */ 2866 _Static_assert( 2867 __bitset_word(_BITSET_BITS + 1, _BITSET_BITS) == 1, 2868 "Assumes a bitset implementation that is little-endian " 2869 "on its words"); 2870 2871 /* Initialize the single bit of each static span. */ 2872 bwp = (__typeof(bwp))static_single_cpu_mask_lcs + 2873 (__bitset_words(CPU_SETSIZE) - 1); 2874 for (i = 0; i < _BITSET_BITS; i++) { 2875 CPU_SET(i, (cpuset_t *)bwp); 2876 bwp += (2 * __bitset_words(CPU_SETSIZE) - 1); 2877 } 2878 2879 /* 2880 * Finally set all CPU masks to the proper word in their 2881 * relevant span. 2882 */ 2883 CPU_FOREACH(i) { 2884 bwp = (__typeof(bwp))static_single_cpu_mask_lcs; 2885 /* Find the non-zero word of the relevant span. */ 2886 bwp += (2 * __bitset_words(CPU_SETSIZE) - 1) * 2887 (i % _BITSET_BITS) + 2888 __bitset_words(CPU_SETSIZE) - 1; 2889 /* Shift to find the CPU mask start. */ 2890 bwp -= (i / _BITSET_BITS); 2891 static_single_cpu_mask[i] = (cpuset_t *)bwp; 2892 } 2893 } 2894 2895 strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release)); 2896 } 2897 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2898 2899 static void 2900 linux_compat_uninit(void *arg) 2901 { 2902 linux_kobject_kfree_name(&linux_class_root); 2903 linux_kobject_kfree_name(&linux_root_device.kobj); 2904 linux_kobject_kfree_name(&linux_class_misc.kobj); 2905 2906 free(static_single_cpu_mask_lcs, M_KMALLOC); 2907 free(static_single_cpu_mask, M_KMALLOC); 2908 #if defined(__i386__) || defined(__amd64__) 2909 free(__cpu_data, M_KMALLOC); 2910 #endif 2911 2912 mtx_destroy(&vmmaplock); 2913 spin_lock_destroy(&pci_lock); 2914 rw_destroy(&linux_vma_lock); 2915 } 2916 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2917 2918 /* 2919 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2920 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2921 * used. Assert these types have the same size, else some parts of the 2922 * LinuxKPI may not work like expected: 2923 */ 2924 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2925