1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "opt_global.h" 32 #include "opt_stack.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/sysctl.h> 39 #include <sys/proc.h> 40 #include <sys/sglist.h> 41 #include <sys/sleepqueue.h> 42 #include <sys/refcount.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/bus.h> 46 #include <sys/eventhandler.h> 47 #include <sys/fcntl.h> 48 #include <sys/file.h> 49 #include <sys/filio.h> 50 #include <sys/rwlock.h> 51 #include <sys/mman.h> 52 #include <sys/stack.h> 53 #include <sys/sysent.h> 54 #include <sys/time.h> 55 #include <sys/user.h> 56 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 63 #include <machine/stdarg.h> 64 65 #if defined(__i386__) || defined(__amd64__) 66 #include <machine/cputypes.h> 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/utsname.h> 87 #include <linux/list.h> 88 #include <linux/kthread.h> 89 #include <linux/kernel.h> 90 #include <linux/compat.h> 91 #include <linux/io-mapping.h> 92 #include <linux/poll.h> 93 #include <linux/smp.h> 94 #include <linux/wait_bit.h> 95 #include <linux/rcupdate.h> 96 #include <linux/interval_tree.h> 97 #include <linux/interval_tree_generic.h> 98 99 #if defined(__i386__) || defined(__amd64__) 100 #include <asm/smp.h> 101 #include <asm/processor.h> 102 #endif 103 104 #include <xen/xen.h> 105 #ifdef XENHVM 106 #undef xen_pv_domain 107 #undef xen_initial_domain 108 /* xen/xen-os.h redefines __must_check */ 109 #undef __must_check 110 #include <xen/xen-os.h> 111 #endif 112 113 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 114 "LinuxKPI parameters"); 115 116 int linuxkpi_debug; 117 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 118 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 119 120 int linuxkpi_rcu_debug; 121 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, rcu_debug, CTLFLAG_RWTUN, 122 &linuxkpi_rcu_debug, 0, "Set to enable RCU warning. Clear to disable."); 123 124 int linuxkpi_warn_dump_stack = 0; 125 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 126 &linuxkpi_warn_dump_stack, 0, 127 "Set to enable stack traces from WARN_ON(). Clear to disable."); 128 129 static struct timeval lkpi_net_lastlog; 130 static int lkpi_net_curpps; 131 static int lkpi_net_maxpps = 99; 132 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 133 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 134 135 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 136 137 #include <linux/rbtree.h> 138 /* Undo Linux compat changes. */ 139 #undef RB_ROOT 140 #undef file 141 #undef cdev 142 #define RB_ROOT(head) (head)->rbh_root 143 144 static void linux_destroy_dev(struct linux_cdev *); 145 static void linux_cdev_deref(struct linux_cdev *ldev); 146 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 147 148 cpumask_t cpu_online_mask; 149 static cpumask_t **static_single_cpu_mask; 150 static cpumask_t *static_single_cpu_mask_lcs; 151 struct kobject linux_class_root; 152 struct device linux_root_device; 153 struct class linux_class_misc; 154 struct list_head pci_drivers; 155 struct list_head pci_devices; 156 spinlock_t pci_lock; 157 struct uts_namespace init_uts_ns; 158 159 unsigned long linux_timer_hz_mask; 160 161 wait_queue_head_t linux_bit_waitq; 162 wait_queue_head_t linux_var_waitq; 163 164 int 165 panic_cmp(struct rb_node *one, struct rb_node *two) 166 { 167 panic("no cmp"); 168 } 169 170 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 171 172 #define START(node) ((node)->start) 173 #define LAST(node) ((node)->last) 174 175 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 176 LAST,, lkpi_interval_tree) 177 178 static void 179 linux_device_release(struct device *dev) 180 { 181 pr_debug("linux_device_release: %s\n", dev_name(dev)); 182 kfree(dev); 183 } 184 185 static ssize_t 186 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 187 { 188 struct class_attribute *dattr; 189 ssize_t error; 190 191 dattr = container_of(attr, struct class_attribute, attr); 192 error = -EIO; 193 if (dattr->show) 194 error = dattr->show(container_of(kobj, struct class, kobj), 195 dattr, buf); 196 return (error); 197 } 198 199 static ssize_t 200 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 201 size_t count) 202 { 203 struct class_attribute *dattr; 204 ssize_t error; 205 206 dattr = container_of(attr, struct class_attribute, attr); 207 error = -EIO; 208 if (dattr->store) 209 error = dattr->store(container_of(kobj, struct class, kobj), 210 dattr, buf, count); 211 return (error); 212 } 213 214 static void 215 linux_class_release(struct kobject *kobj) 216 { 217 struct class *class; 218 219 class = container_of(kobj, struct class, kobj); 220 if (class->class_release) 221 class->class_release(class); 222 } 223 224 static const struct sysfs_ops linux_class_sysfs = { 225 .show = linux_class_show, 226 .store = linux_class_store, 227 }; 228 229 const struct kobj_type linux_class_ktype = { 230 .release = linux_class_release, 231 .sysfs_ops = &linux_class_sysfs 232 }; 233 234 static void 235 linux_dev_release(struct kobject *kobj) 236 { 237 struct device *dev; 238 239 dev = container_of(kobj, struct device, kobj); 240 /* This is the precedence defined by linux. */ 241 if (dev->release) 242 dev->release(dev); 243 else if (dev->class && dev->class->dev_release) 244 dev->class->dev_release(dev); 245 } 246 247 static ssize_t 248 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 249 { 250 struct device_attribute *dattr; 251 ssize_t error; 252 253 dattr = container_of(attr, struct device_attribute, attr); 254 error = -EIO; 255 if (dattr->show) 256 error = dattr->show(container_of(kobj, struct device, kobj), 257 dattr, buf); 258 return (error); 259 } 260 261 static ssize_t 262 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 263 size_t count) 264 { 265 struct device_attribute *dattr; 266 ssize_t error; 267 268 dattr = container_of(attr, struct device_attribute, attr); 269 error = -EIO; 270 if (dattr->store) 271 error = dattr->store(container_of(kobj, struct device, kobj), 272 dattr, buf, count); 273 return (error); 274 } 275 276 static const struct sysfs_ops linux_dev_sysfs = { 277 .show = linux_dev_show, 278 .store = linux_dev_store, 279 }; 280 281 const struct kobj_type linux_dev_ktype = { 282 .release = linux_dev_release, 283 .sysfs_ops = &linux_dev_sysfs 284 }; 285 286 struct device * 287 device_create(struct class *class, struct device *parent, dev_t devt, 288 void *drvdata, const char *fmt, ...) 289 { 290 struct device *dev; 291 va_list args; 292 293 dev = kzalloc(sizeof(*dev), M_WAITOK); 294 dev->parent = parent; 295 dev->class = class; 296 dev->devt = devt; 297 dev->driver_data = drvdata; 298 dev->release = linux_device_release; 299 va_start(args, fmt); 300 kobject_set_name_vargs(&dev->kobj, fmt, args); 301 va_end(args); 302 device_register(dev); 303 304 return (dev); 305 } 306 307 struct device * 308 device_create_groups_vargs(struct class *class, struct device *parent, 309 dev_t devt, void *drvdata, const struct attribute_group **groups, 310 const char *fmt, va_list args) 311 { 312 struct device *dev = NULL; 313 int retval = -ENODEV; 314 315 if (class == NULL || IS_ERR(class)) 316 goto error; 317 318 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 319 if (!dev) { 320 retval = -ENOMEM; 321 goto error; 322 } 323 324 dev->devt = devt; 325 dev->class = class; 326 dev->parent = parent; 327 dev->groups = groups; 328 dev->release = device_create_release; 329 /* device_initialize() needs the class and parent to be set */ 330 device_initialize(dev); 331 dev_set_drvdata(dev, drvdata); 332 333 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 334 if (retval) 335 goto error; 336 337 retval = device_add(dev); 338 if (retval) 339 goto error; 340 341 return dev; 342 343 error: 344 put_device(dev); 345 return ERR_PTR(retval); 346 } 347 348 struct class * 349 lkpi_class_create(const char *name) 350 { 351 struct class *class; 352 int error; 353 354 class = kzalloc(sizeof(*class), M_WAITOK); 355 class->name = name; 356 class->class_release = linux_class_kfree; 357 error = class_register(class); 358 if (error) { 359 kfree(class); 360 return (NULL); 361 } 362 363 return (class); 364 } 365 366 static void 367 linux_kq_lock(void *arg) 368 { 369 spinlock_t *s = arg; 370 371 spin_lock(s); 372 } 373 static void 374 linux_kq_unlock(void *arg) 375 { 376 spinlock_t *s = arg; 377 378 spin_unlock(s); 379 } 380 381 static void 382 linux_kq_assert_lock(void *arg, int what) 383 { 384 #ifdef INVARIANTS 385 spinlock_t *s = arg; 386 387 if (what == LA_LOCKED) 388 mtx_assert(s, MA_OWNED); 389 else 390 mtx_assert(s, MA_NOTOWNED); 391 #endif 392 } 393 394 static void 395 linux_file_kqfilter_poll(struct linux_file *, int); 396 397 struct linux_file * 398 linux_file_alloc(void) 399 { 400 struct linux_file *filp; 401 402 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 403 404 /* set initial refcount */ 405 filp->f_count = 1; 406 407 /* setup fields needed by kqueue support */ 408 spin_lock_init(&filp->f_kqlock); 409 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 410 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 411 412 return (filp); 413 } 414 415 void 416 linux_file_free(struct linux_file *filp) 417 { 418 if (filp->_file == NULL) { 419 if (filp->f_op != NULL && filp->f_op->release != NULL) 420 filp->f_op->release(filp->f_vnode, filp); 421 if (filp->f_shmem != NULL) 422 vm_object_deallocate(filp->f_shmem); 423 kfree_rcu(filp, rcu); 424 } else { 425 /* 426 * The close method of the character device or file 427 * will free the linux_file structure: 428 */ 429 _fdrop(filp->_file, curthread); 430 } 431 } 432 433 struct linux_cdev * 434 cdev_alloc(void) 435 { 436 struct linux_cdev *cdev; 437 438 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); 439 kobject_init(&cdev->kobj, &linux_cdev_ktype); 440 cdev->refs = 1; 441 return (cdev); 442 } 443 444 static int 445 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 446 vm_page_t *mres) 447 { 448 struct vm_area_struct *vmap; 449 450 vmap = linux_cdev_handle_find(vm_obj->handle); 451 452 MPASS(vmap != NULL); 453 MPASS(vmap->vm_private_data == vm_obj->handle); 454 455 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 456 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 457 vm_page_t page; 458 459 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 460 /* 461 * If the passed in result page is a fake 462 * page, update it with the new physical 463 * address. 464 */ 465 page = *mres; 466 vm_page_updatefake(page, paddr, vm_obj->memattr); 467 } else { 468 /* 469 * Replace the passed in "mres" page with our 470 * own fake page and free up the all of the 471 * original pages. 472 */ 473 VM_OBJECT_WUNLOCK(vm_obj); 474 page = vm_page_getfake(paddr, vm_obj->memattr); 475 VM_OBJECT_WLOCK(vm_obj); 476 477 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 478 *mres = page; 479 } 480 vm_page_valid(page); 481 return (VM_PAGER_OK); 482 } 483 return (VM_PAGER_FAIL); 484 } 485 486 static int 487 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 488 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 489 { 490 struct vm_area_struct *vmap; 491 int err; 492 493 /* get VM area structure */ 494 vmap = linux_cdev_handle_find(vm_obj->handle); 495 MPASS(vmap != NULL); 496 MPASS(vmap->vm_private_data == vm_obj->handle); 497 498 VM_OBJECT_WUNLOCK(vm_obj); 499 500 linux_set_current(curthread); 501 502 down_write(&vmap->vm_mm->mmap_sem); 503 if (unlikely(vmap->vm_ops == NULL)) { 504 err = VM_FAULT_SIGBUS; 505 } else { 506 struct vm_fault vmf; 507 508 /* fill out VM fault structure */ 509 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 510 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 511 vmf.pgoff = 0; 512 vmf.page = NULL; 513 vmf.vma = vmap; 514 515 vmap->vm_pfn_count = 0; 516 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 517 vmap->vm_obj = vm_obj; 518 519 err = vmap->vm_ops->fault(&vmf); 520 521 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 522 kern_yield(PRI_USER); 523 err = vmap->vm_ops->fault(&vmf); 524 } 525 } 526 527 /* translate return code */ 528 switch (err) { 529 case VM_FAULT_OOM: 530 err = VM_PAGER_AGAIN; 531 break; 532 case VM_FAULT_SIGBUS: 533 err = VM_PAGER_BAD; 534 break; 535 case VM_FAULT_NOPAGE: 536 /* 537 * By contract the fault handler will return having 538 * busied all the pages itself. If pidx is already 539 * found in the object, it will simply xbusy the first 540 * page and return with vm_pfn_count set to 1. 541 */ 542 *first = vmap->vm_pfn_first; 543 *last = *first + vmap->vm_pfn_count - 1; 544 err = VM_PAGER_OK; 545 break; 546 default: 547 err = VM_PAGER_ERROR; 548 break; 549 } 550 up_write(&vmap->vm_mm->mmap_sem); 551 VM_OBJECT_WLOCK(vm_obj); 552 return (err); 553 } 554 555 static struct rwlock linux_vma_lock; 556 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 557 TAILQ_HEAD_INITIALIZER(linux_vma_head); 558 559 static void 560 linux_cdev_handle_free(struct vm_area_struct *vmap) 561 { 562 /* Drop reference on vm_file */ 563 if (vmap->vm_file != NULL) 564 fput(vmap->vm_file); 565 566 /* Drop reference on mm_struct */ 567 mmput(vmap->vm_mm); 568 569 kfree(vmap); 570 } 571 572 static void 573 linux_cdev_handle_remove(struct vm_area_struct *vmap) 574 { 575 rw_wlock(&linux_vma_lock); 576 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 577 rw_wunlock(&linux_vma_lock); 578 } 579 580 static struct vm_area_struct * 581 linux_cdev_handle_find(void *handle) 582 { 583 struct vm_area_struct *vmap; 584 585 rw_rlock(&linux_vma_lock); 586 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 587 if (vmap->vm_private_data == handle) 588 break; 589 } 590 rw_runlock(&linux_vma_lock); 591 return (vmap); 592 } 593 594 static int 595 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 596 vm_ooffset_t foff, struct ucred *cred, u_short *color) 597 { 598 599 MPASS(linux_cdev_handle_find(handle) != NULL); 600 *color = 0; 601 return (0); 602 } 603 604 static void 605 linux_cdev_pager_dtor(void *handle) 606 { 607 const struct vm_operations_struct *vm_ops; 608 struct vm_area_struct *vmap; 609 610 vmap = linux_cdev_handle_find(handle); 611 MPASS(vmap != NULL); 612 613 /* 614 * Remove handle before calling close operation to prevent 615 * other threads from reusing the handle pointer. 616 */ 617 linux_cdev_handle_remove(vmap); 618 619 down_write(&vmap->vm_mm->mmap_sem); 620 vm_ops = vmap->vm_ops; 621 if (likely(vm_ops != NULL)) 622 vm_ops->close(vmap); 623 up_write(&vmap->vm_mm->mmap_sem); 624 625 linux_cdev_handle_free(vmap); 626 } 627 628 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 629 { 630 /* OBJT_MGTDEVICE */ 631 .cdev_pg_populate = linux_cdev_pager_populate, 632 .cdev_pg_ctor = linux_cdev_pager_ctor, 633 .cdev_pg_dtor = linux_cdev_pager_dtor 634 }, 635 { 636 /* OBJT_DEVICE */ 637 .cdev_pg_fault = linux_cdev_pager_fault, 638 .cdev_pg_ctor = linux_cdev_pager_ctor, 639 .cdev_pg_dtor = linux_cdev_pager_dtor 640 }, 641 }; 642 643 int 644 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 645 unsigned long size) 646 { 647 vm_object_t obj; 648 vm_page_t m; 649 650 obj = vma->vm_obj; 651 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 652 return (-ENOTSUP); 653 VM_OBJECT_RLOCK(obj); 654 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 655 m != NULL && m->pindex < OFF_TO_IDX(address + size); 656 m = TAILQ_NEXT(m, listq)) 657 pmap_remove_all(m); 658 VM_OBJECT_RUNLOCK(obj); 659 return (0); 660 } 661 662 void 663 vma_set_file(struct vm_area_struct *vma, struct linux_file *file) 664 { 665 struct linux_file *tmp; 666 667 /* Changing an anonymous vma with this is illegal */ 668 get_file(file); 669 tmp = vma->vm_file; 670 vma->vm_file = file; 671 fput(tmp); 672 } 673 674 static struct file_operations dummy_ldev_ops = { 675 /* XXXKIB */ 676 }; 677 678 static struct linux_cdev dummy_ldev = { 679 .ops = &dummy_ldev_ops, 680 }; 681 682 #define LDEV_SI_DTR 0x0001 683 #define LDEV_SI_REF 0x0002 684 685 static void 686 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 687 struct linux_cdev **dev) 688 { 689 struct linux_cdev *ldev; 690 u_int siref; 691 692 ldev = filp->f_cdev; 693 *fop = filp->f_op; 694 if (ldev != NULL) { 695 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 696 refcount_acquire(&ldev->refs); 697 } else { 698 for (siref = ldev->siref;;) { 699 if ((siref & LDEV_SI_DTR) != 0) { 700 ldev = &dummy_ldev; 701 *fop = ldev->ops; 702 siref = ldev->siref; 703 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 704 } else if (atomic_fcmpset_int(&ldev->siref, 705 &siref, siref + LDEV_SI_REF)) { 706 break; 707 } 708 } 709 } 710 } 711 *dev = ldev; 712 } 713 714 static void 715 linux_drop_fop(struct linux_cdev *ldev) 716 { 717 718 if (ldev == NULL) 719 return; 720 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 721 linux_cdev_deref(ldev); 722 } else { 723 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 724 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 725 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 726 } 727 } 728 729 #define OPW(fp,td,code) ({ \ 730 struct file *__fpop; \ 731 __typeof(code) __retval; \ 732 \ 733 __fpop = (td)->td_fpop; \ 734 (td)->td_fpop = (fp); \ 735 __retval = (code); \ 736 (td)->td_fpop = __fpop; \ 737 __retval; \ 738 }) 739 740 static int 741 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 742 struct file *file) 743 { 744 struct linux_cdev *ldev; 745 struct linux_file *filp; 746 const struct file_operations *fop; 747 int error; 748 749 ldev = dev->si_drv1; 750 751 filp = linux_file_alloc(); 752 filp->f_dentry = &filp->f_dentry_store; 753 filp->f_op = ldev->ops; 754 filp->f_mode = file->f_flag; 755 filp->f_flags = file->f_flag; 756 filp->f_vnode = file->f_vnode; 757 filp->_file = file; 758 refcount_acquire(&ldev->refs); 759 filp->f_cdev = ldev; 760 761 linux_set_current(td); 762 linux_get_fop(filp, &fop, &ldev); 763 764 if (fop->open != NULL) { 765 error = -fop->open(file->f_vnode, filp); 766 if (error != 0) { 767 linux_drop_fop(ldev); 768 linux_cdev_deref(filp->f_cdev); 769 kfree(filp); 770 return (error); 771 } 772 } 773 774 /* hold on to the vnode - used for fstat() */ 775 vref(filp->f_vnode); 776 777 /* release the file from devfs */ 778 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 779 linux_drop_fop(ldev); 780 return (ENXIO); 781 } 782 783 #define LINUX_IOCTL_MIN_PTR 0x10000UL 784 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 785 786 static inline int 787 linux_remap_address(void **uaddr, size_t len) 788 { 789 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 790 791 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 792 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 793 struct task_struct *pts = current; 794 if (pts == NULL) { 795 *uaddr = NULL; 796 return (1); 797 } 798 799 /* compute data offset */ 800 uaddr_val -= LINUX_IOCTL_MIN_PTR; 801 802 /* check that length is within bounds */ 803 if ((len > IOCPARM_MAX) || 804 (uaddr_val + len) > pts->bsd_ioctl_len) { 805 *uaddr = NULL; 806 return (1); 807 } 808 809 /* re-add kernel buffer address */ 810 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 811 812 /* update address location */ 813 *uaddr = (void *)uaddr_val; 814 return (1); 815 } 816 return (0); 817 } 818 819 int 820 linux_copyin(const void *uaddr, void *kaddr, size_t len) 821 { 822 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 823 if (uaddr == NULL) 824 return (-EFAULT); 825 memcpy(kaddr, uaddr, len); 826 return (0); 827 } 828 return (-copyin(uaddr, kaddr, len)); 829 } 830 831 int 832 linux_copyout(const void *kaddr, void *uaddr, size_t len) 833 { 834 if (linux_remap_address(&uaddr, len)) { 835 if (uaddr == NULL) 836 return (-EFAULT); 837 memcpy(uaddr, kaddr, len); 838 return (0); 839 } 840 return (-copyout(kaddr, uaddr, len)); 841 } 842 843 size_t 844 linux_clear_user(void *_uaddr, size_t _len) 845 { 846 uint8_t *uaddr = _uaddr; 847 size_t len = _len; 848 849 /* make sure uaddr is aligned before going into the fast loop */ 850 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 851 if (subyte(uaddr, 0)) 852 return (_len); 853 uaddr++; 854 len--; 855 } 856 857 /* zero 8 bytes at a time */ 858 while (len > 7) { 859 #ifdef __LP64__ 860 if (suword64(uaddr, 0)) 861 return (_len); 862 #else 863 if (suword32(uaddr, 0)) 864 return (_len); 865 if (suword32(uaddr + 4, 0)) 866 return (_len); 867 #endif 868 uaddr += 8; 869 len -= 8; 870 } 871 872 /* zero fill end, if any */ 873 while (len > 0) { 874 if (subyte(uaddr, 0)) 875 return (_len); 876 uaddr++; 877 len--; 878 } 879 return (0); 880 } 881 882 int 883 linux_access_ok(const void *uaddr, size_t len) 884 { 885 uintptr_t saddr; 886 uintptr_t eaddr; 887 888 /* get start and end address */ 889 saddr = (uintptr_t)uaddr; 890 eaddr = (uintptr_t)uaddr + len; 891 892 /* verify addresses are valid for userspace */ 893 return ((saddr == eaddr) || 894 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 895 } 896 897 /* 898 * This function should return either EINTR or ERESTART depending on 899 * the signal type sent to this thread: 900 */ 901 static int 902 linux_get_error(struct task_struct *task, int error) 903 { 904 /* check for signal type interrupt code */ 905 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 906 error = -linux_schedule_get_interrupt_value(task); 907 if (error == 0) 908 error = EINTR; 909 } 910 return (error); 911 } 912 913 static int 914 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 915 const struct file_operations *fop, u_long cmd, caddr_t data, 916 struct thread *td) 917 { 918 struct task_struct *task = current; 919 unsigned size; 920 int error; 921 922 size = IOCPARM_LEN(cmd); 923 /* refer to logic in sys_ioctl() */ 924 if (size > 0) { 925 /* 926 * Setup hint for linux_copyin() and linux_copyout(). 927 * 928 * Background: Linux code expects a user-space address 929 * while FreeBSD supplies a kernel-space address. 930 */ 931 task->bsd_ioctl_data = data; 932 task->bsd_ioctl_len = size; 933 data = (void *)LINUX_IOCTL_MIN_PTR; 934 } else { 935 /* fetch user-space pointer */ 936 data = *(void **)data; 937 } 938 #ifdef COMPAT_FREEBSD32 939 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 940 /* try the compat IOCTL handler first */ 941 if (fop->compat_ioctl != NULL) { 942 error = -OPW(fp, td, fop->compat_ioctl(filp, 943 cmd, (u_long)data)); 944 } else { 945 error = ENOTTY; 946 } 947 948 /* fallback to the regular IOCTL handler, if any */ 949 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 950 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 951 cmd, (u_long)data)); 952 } 953 } else 954 #endif 955 { 956 if (fop->unlocked_ioctl != NULL) { 957 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 958 cmd, (u_long)data)); 959 } else { 960 error = ENOTTY; 961 } 962 } 963 if (size > 0) { 964 task->bsd_ioctl_data = NULL; 965 task->bsd_ioctl_len = 0; 966 } 967 968 if (error == EWOULDBLOCK) { 969 /* update kqfilter status, if any */ 970 linux_file_kqfilter_poll(filp, 971 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 972 } else { 973 error = linux_get_error(task, error); 974 } 975 return (error); 976 } 977 978 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 979 980 /* 981 * This function atomically updates the poll wakeup state and returns 982 * the previous state at the time of update. 983 */ 984 static uint8_t 985 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 986 { 987 int c, old; 988 989 c = v->counter; 990 991 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 992 c = old; 993 994 return (c); 995 } 996 997 static int 998 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 999 { 1000 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1001 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1002 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1003 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1004 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1005 }; 1006 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1007 1008 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1009 case LINUX_FWQ_STATE_QUEUED: 1010 linux_poll_wakeup(filp); 1011 return (1); 1012 default: 1013 return (0); 1014 } 1015 } 1016 1017 void 1018 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1019 { 1020 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1021 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1022 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1023 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1024 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1025 }; 1026 1027 /* check if we are called inside the select system call */ 1028 if (p == LINUX_POLL_TABLE_NORMAL) 1029 selrecord(curthread, &filp->f_selinfo); 1030 1031 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1032 case LINUX_FWQ_STATE_INIT: 1033 /* NOTE: file handles can only belong to one wait-queue */ 1034 filp->f_wait_queue.wqh = wqh; 1035 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1036 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1037 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1038 break; 1039 default: 1040 break; 1041 } 1042 } 1043 1044 static void 1045 linux_poll_wait_dequeue(struct linux_file *filp) 1046 { 1047 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1048 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1049 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1050 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1051 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1052 }; 1053 1054 seldrain(&filp->f_selinfo); 1055 1056 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1057 case LINUX_FWQ_STATE_NOT_READY: 1058 case LINUX_FWQ_STATE_QUEUED: 1059 case LINUX_FWQ_STATE_READY: 1060 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1061 break; 1062 default: 1063 break; 1064 } 1065 } 1066 1067 void 1068 linux_poll_wakeup(struct linux_file *filp) 1069 { 1070 /* this function should be NULL-safe */ 1071 if (filp == NULL) 1072 return; 1073 1074 selwakeup(&filp->f_selinfo); 1075 1076 spin_lock(&filp->f_kqlock); 1077 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1078 LINUX_KQ_FLAG_NEED_WRITE; 1079 1080 /* make sure the "knote" gets woken up */ 1081 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1082 spin_unlock(&filp->f_kqlock); 1083 } 1084 1085 static void 1086 linux_file_kqfilter_detach(struct knote *kn) 1087 { 1088 struct linux_file *filp = kn->kn_hook; 1089 1090 spin_lock(&filp->f_kqlock); 1091 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1092 spin_unlock(&filp->f_kqlock); 1093 } 1094 1095 static int 1096 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1097 { 1098 struct linux_file *filp = kn->kn_hook; 1099 1100 mtx_assert(&filp->f_kqlock, MA_OWNED); 1101 1102 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1103 } 1104 1105 static int 1106 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1107 { 1108 struct linux_file *filp = kn->kn_hook; 1109 1110 mtx_assert(&filp->f_kqlock, MA_OWNED); 1111 1112 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1113 } 1114 1115 static const struct filterops linux_dev_kqfiltops_read = { 1116 .f_isfd = 1, 1117 .f_detach = linux_file_kqfilter_detach, 1118 .f_event = linux_file_kqfilter_read_event, 1119 }; 1120 1121 static const struct filterops linux_dev_kqfiltops_write = { 1122 .f_isfd = 1, 1123 .f_detach = linux_file_kqfilter_detach, 1124 .f_event = linux_file_kqfilter_write_event, 1125 }; 1126 1127 static void 1128 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1129 { 1130 struct thread *td; 1131 const struct file_operations *fop; 1132 struct linux_cdev *ldev; 1133 int temp; 1134 1135 if ((filp->f_kqflags & kqflags) == 0) 1136 return; 1137 1138 td = curthread; 1139 1140 linux_get_fop(filp, &fop, &ldev); 1141 /* get the latest polling state */ 1142 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1143 linux_drop_fop(ldev); 1144 1145 spin_lock(&filp->f_kqlock); 1146 /* clear kqflags */ 1147 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1148 LINUX_KQ_FLAG_NEED_WRITE); 1149 /* update kqflags */ 1150 if ((temp & (POLLIN | POLLOUT)) != 0) { 1151 if ((temp & POLLIN) != 0) 1152 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1153 if ((temp & POLLOUT) != 0) 1154 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1155 1156 /* make sure the "knote" gets woken up */ 1157 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1158 } 1159 spin_unlock(&filp->f_kqlock); 1160 } 1161 1162 static int 1163 linux_file_kqfilter(struct file *file, struct knote *kn) 1164 { 1165 struct linux_file *filp; 1166 struct thread *td; 1167 int error; 1168 1169 td = curthread; 1170 filp = (struct linux_file *)file->f_data; 1171 filp->f_flags = file->f_flag; 1172 if (filp->f_op->poll == NULL) 1173 return (EINVAL); 1174 1175 spin_lock(&filp->f_kqlock); 1176 switch (kn->kn_filter) { 1177 case EVFILT_READ: 1178 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1179 kn->kn_fop = &linux_dev_kqfiltops_read; 1180 kn->kn_hook = filp; 1181 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1182 error = 0; 1183 break; 1184 case EVFILT_WRITE: 1185 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1186 kn->kn_fop = &linux_dev_kqfiltops_write; 1187 kn->kn_hook = filp; 1188 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1189 error = 0; 1190 break; 1191 default: 1192 error = EINVAL; 1193 break; 1194 } 1195 spin_unlock(&filp->f_kqlock); 1196 1197 if (error == 0) { 1198 linux_set_current(td); 1199 1200 /* update kqfilter status, if any */ 1201 linux_file_kqfilter_poll(filp, 1202 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1203 } 1204 return (error); 1205 } 1206 1207 static int 1208 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1209 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1210 int nprot, bool is_shared, struct thread *td) 1211 { 1212 struct task_struct *task; 1213 struct vm_area_struct *vmap; 1214 struct mm_struct *mm; 1215 struct linux_file *filp; 1216 vm_memattr_t attr; 1217 int error; 1218 1219 filp = (struct linux_file *)fp->f_data; 1220 filp->f_flags = fp->f_flag; 1221 1222 if (fop->mmap == NULL) 1223 return (EOPNOTSUPP); 1224 1225 linux_set_current(td); 1226 1227 /* 1228 * The same VM object might be shared by multiple processes 1229 * and the mm_struct is usually freed when a process exits. 1230 * 1231 * The atomic reference below makes sure the mm_struct is 1232 * available as long as the vmap is in the linux_vma_head. 1233 */ 1234 task = current; 1235 mm = task->mm; 1236 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1237 return (EINVAL); 1238 1239 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1240 vmap->vm_start = 0; 1241 vmap->vm_end = size; 1242 vmap->vm_pgoff = *offset / PAGE_SIZE; 1243 vmap->vm_pfn = 0; 1244 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1245 if (is_shared) 1246 vmap->vm_flags |= VM_SHARED; 1247 vmap->vm_ops = NULL; 1248 vmap->vm_file = get_file(filp); 1249 vmap->vm_mm = mm; 1250 1251 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1252 error = linux_get_error(task, EINTR); 1253 } else { 1254 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1255 error = linux_get_error(task, error); 1256 up_write(&vmap->vm_mm->mmap_sem); 1257 } 1258 1259 if (error != 0) { 1260 linux_cdev_handle_free(vmap); 1261 return (error); 1262 } 1263 1264 attr = pgprot2cachemode(vmap->vm_page_prot); 1265 1266 if (vmap->vm_ops != NULL) { 1267 struct vm_area_struct *ptr; 1268 void *vm_private_data; 1269 bool vm_no_fault; 1270 1271 if (vmap->vm_ops->open == NULL || 1272 vmap->vm_ops->close == NULL || 1273 vmap->vm_private_data == NULL) { 1274 /* free allocated VM area struct */ 1275 linux_cdev_handle_free(vmap); 1276 return (EINVAL); 1277 } 1278 1279 vm_private_data = vmap->vm_private_data; 1280 1281 rw_wlock(&linux_vma_lock); 1282 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1283 if (ptr->vm_private_data == vm_private_data) 1284 break; 1285 } 1286 /* check if there is an existing VM area struct */ 1287 if (ptr != NULL) { 1288 /* check if the VM area structure is invalid */ 1289 if (ptr->vm_ops == NULL || 1290 ptr->vm_ops->open == NULL || 1291 ptr->vm_ops->close == NULL) { 1292 error = ESTALE; 1293 vm_no_fault = 1; 1294 } else { 1295 error = EEXIST; 1296 vm_no_fault = (ptr->vm_ops->fault == NULL); 1297 } 1298 } else { 1299 /* insert VM area structure into list */ 1300 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1301 error = 0; 1302 vm_no_fault = (vmap->vm_ops->fault == NULL); 1303 } 1304 rw_wunlock(&linux_vma_lock); 1305 1306 if (error != 0) { 1307 /* free allocated VM area struct */ 1308 linux_cdev_handle_free(vmap); 1309 /* check for stale VM area struct */ 1310 if (error != EEXIST) 1311 return (error); 1312 } 1313 1314 /* check if there is no fault handler */ 1315 if (vm_no_fault) { 1316 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1317 &linux_cdev_pager_ops[1], size, nprot, *offset, 1318 td->td_ucred); 1319 } else { 1320 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1321 &linux_cdev_pager_ops[0], size, nprot, *offset, 1322 td->td_ucred); 1323 } 1324 1325 /* check if allocating the VM object failed */ 1326 if (*object == NULL) { 1327 if (error == 0) { 1328 /* remove VM area struct from list */ 1329 linux_cdev_handle_remove(vmap); 1330 /* free allocated VM area struct */ 1331 linux_cdev_handle_free(vmap); 1332 } 1333 return (EINVAL); 1334 } 1335 } else { 1336 struct sglist *sg; 1337 1338 sg = sglist_alloc(1, M_WAITOK); 1339 sglist_append_phys(sg, 1340 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1341 1342 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1343 nprot, 0, td->td_ucred); 1344 1345 linux_cdev_handle_free(vmap); 1346 1347 if (*object == NULL) { 1348 sglist_free(sg); 1349 return (EINVAL); 1350 } 1351 } 1352 1353 if (attr != VM_MEMATTR_DEFAULT) { 1354 VM_OBJECT_WLOCK(*object); 1355 vm_object_set_memattr(*object, attr); 1356 VM_OBJECT_WUNLOCK(*object); 1357 } 1358 *offset = 0; 1359 return (0); 1360 } 1361 1362 struct cdevsw linuxcdevsw = { 1363 .d_version = D_VERSION, 1364 .d_fdopen = linux_dev_fdopen, 1365 .d_name = "lkpidev", 1366 }; 1367 1368 static int 1369 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1370 int flags, struct thread *td) 1371 { 1372 struct linux_file *filp; 1373 const struct file_operations *fop; 1374 struct linux_cdev *ldev; 1375 ssize_t bytes; 1376 int error; 1377 1378 error = 0; 1379 filp = (struct linux_file *)file->f_data; 1380 filp->f_flags = file->f_flag; 1381 /* XXX no support for I/O vectors currently */ 1382 if (uio->uio_iovcnt != 1) 1383 return (EOPNOTSUPP); 1384 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1385 return (EINVAL); 1386 linux_set_current(td); 1387 linux_get_fop(filp, &fop, &ldev); 1388 if (fop->read != NULL) { 1389 bytes = OPW(file, td, fop->read(filp, 1390 uio->uio_iov->iov_base, 1391 uio->uio_iov->iov_len, &uio->uio_offset)); 1392 if (bytes >= 0) { 1393 uio->uio_iov->iov_base = 1394 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1395 uio->uio_iov->iov_len -= bytes; 1396 uio->uio_resid -= bytes; 1397 } else { 1398 error = linux_get_error(current, -bytes); 1399 } 1400 } else 1401 error = ENXIO; 1402 1403 /* update kqfilter status, if any */ 1404 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1405 linux_drop_fop(ldev); 1406 1407 return (error); 1408 } 1409 1410 static int 1411 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1412 int flags, struct thread *td) 1413 { 1414 struct linux_file *filp; 1415 const struct file_operations *fop; 1416 struct linux_cdev *ldev; 1417 ssize_t bytes; 1418 int error; 1419 1420 filp = (struct linux_file *)file->f_data; 1421 filp->f_flags = file->f_flag; 1422 /* XXX no support for I/O vectors currently */ 1423 if (uio->uio_iovcnt != 1) 1424 return (EOPNOTSUPP); 1425 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1426 return (EINVAL); 1427 linux_set_current(td); 1428 linux_get_fop(filp, &fop, &ldev); 1429 if (fop->write != NULL) { 1430 bytes = OPW(file, td, fop->write(filp, 1431 uio->uio_iov->iov_base, 1432 uio->uio_iov->iov_len, &uio->uio_offset)); 1433 if (bytes >= 0) { 1434 uio->uio_iov->iov_base = 1435 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1436 uio->uio_iov->iov_len -= bytes; 1437 uio->uio_resid -= bytes; 1438 error = 0; 1439 } else { 1440 error = linux_get_error(current, -bytes); 1441 } 1442 } else 1443 error = ENXIO; 1444 1445 /* update kqfilter status, if any */ 1446 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1447 1448 linux_drop_fop(ldev); 1449 1450 return (error); 1451 } 1452 1453 static int 1454 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1455 struct thread *td) 1456 { 1457 struct linux_file *filp; 1458 const struct file_operations *fop; 1459 struct linux_cdev *ldev; 1460 int revents; 1461 1462 filp = (struct linux_file *)file->f_data; 1463 filp->f_flags = file->f_flag; 1464 linux_set_current(td); 1465 linux_get_fop(filp, &fop, &ldev); 1466 if (fop->poll != NULL) { 1467 revents = OPW(file, td, fop->poll(filp, 1468 LINUX_POLL_TABLE_NORMAL)) & events; 1469 } else { 1470 revents = 0; 1471 } 1472 linux_drop_fop(ldev); 1473 return (revents); 1474 } 1475 1476 static int 1477 linux_file_close(struct file *file, struct thread *td) 1478 { 1479 struct linux_file *filp; 1480 int (*release)(struct inode *, struct linux_file *); 1481 const struct file_operations *fop; 1482 struct linux_cdev *ldev; 1483 int error; 1484 1485 filp = (struct linux_file *)file->f_data; 1486 1487 KASSERT(file_count(filp) == 0, 1488 ("File refcount(%d) is not zero", file_count(filp))); 1489 1490 if (td == NULL) 1491 td = curthread; 1492 1493 error = 0; 1494 filp->f_flags = file->f_flag; 1495 linux_set_current(td); 1496 linux_poll_wait_dequeue(filp); 1497 linux_get_fop(filp, &fop, &ldev); 1498 /* 1499 * Always use the real release function, if any, to avoid 1500 * leaking device resources: 1501 */ 1502 release = filp->f_op->release; 1503 if (release != NULL) 1504 error = -OPW(file, td, release(filp->f_vnode, filp)); 1505 funsetown(&filp->f_sigio); 1506 if (filp->f_vnode != NULL) 1507 vrele(filp->f_vnode); 1508 linux_drop_fop(ldev); 1509 ldev = filp->f_cdev; 1510 if (ldev != NULL) 1511 linux_cdev_deref(ldev); 1512 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1513 kfree(filp); 1514 1515 return (error); 1516 } 1517 1518 static int 1519 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1520 struct thread *td) 1521 { 1522 struct linux_file *filp; 1523 const struct file_operations *fop; 1524 struct linux_cdev *ldev; 1525 struct fiodgname_arg *fgn; 1526 const char *p; 1527 int error, i; 1528 1529 error = 0; 1530 filp = (struct linux_file *)fp->f_data; 1531 filp->f_flags = fp->f_flag; 1532 linux_get_fop(filp, &fop, &ldev); 1533 1534 linux_set_current(td); 1535 switch (cmd) { 1536 case FIONBIO: 1537 break; 1538 case FIOASYNC: 1539 if (fop->fasync == NULL) 1540 break; 1541 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1542 break; 1543 case FIOSETOWN: 1544 error = fsetown(*(int *)data, &filp->f_sigio); 1545 if (error == 0) { 1546 if (fop->fasync == NULL) 1547 break; 1548 error = -OPW(fp, td, fop->fasync(0, filp, 1549 fp->f_flag & FASYNC)); 1550 } 1551 break; 1552 case FIOGETOWN: 1553 *(int *)data = fgetown(&filp->f_sigio); 1554 break; 1555 case FIODGNAME: 1556 #ifdef COMPAT_FREEBSD32 1557 case FIODGNAME_32: 1558 #endif 1559 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1560 error = ENXIO; 1561 break; 1562 } 1563 fgn = data; 1564 p = devtoname(filp->f_cdev->cdev); 1565 i = strlen(p) + 1; 1566 if (i > fgn->len) { 1567 error = EINVAL; 1568 break; 1569 } 1570 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1571 break; 1572 default: 1573 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1574 break; 1575 } 1576 linux_drop_fop(ldev); 1577 return (error); 1578 } 1579 1580 static int 1581 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1582 vm_prot_t maxprot, int flags, struct file *fp, 1583 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1584 { 1585 /* 1586 * Character devices do not provide private mappings 1587 * of any kind: 1588 */ 1589 if ((maxprot & VM_PROT_WRITE) == 0 && 1590 (prot & VM_PROT_WRITE) != 0) 1591 return (EACCES); 1592 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1593 return (EINVAL); 1594 1595 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1596 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1597 } 1598 1599 static int 1600 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1601 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1602 struct thread *td) 1603 { 1604 struct linux_file *filp; 1605 const struct file_operations *fop; 1606 struct linux_cdev *ldev; 1607 struct mount *mp; 1608 struct vnode *vp; 1609 vm_object_t object; 1610 vm_prot_t maxprot; 1611 int error; 1612 1613 filp = (struct linux_file *)fp->f_data; 1614 1615 vp = filp->f_vnode; 1616 if (vp == NULL) 1617 return (EOPNOTSUPP); 1618 1619 /* 1620 * Ensure that file and memory protections are 1621 * compatible. 1622 */ 1623 mp = vp->v_mount; 1624 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1625 maxprot = VM_PROT_NONE; 1626 if ((prot & VM_PROT_EXECUTE) != 0) 1627 return (EACCES); 1628 } else 1629 maxprot = VM_PROT_EXECUTE; 1630 if ((fp->f_flag & FREAD) != 0) 1631 maxprot |= VM_PROT_READ; 1632 else if ((prot & VM_PROT_READ) != 0) 1633 return (EACCES); 1634 1635 /* 1636 * If we are sharing potential changes via MAP_SHARED and we 1637 * are trying to get write permission although we opened it 1638 * without asking for it, bail out. 1639 * 1640 * Note that most character devices always share mappings. 1641 * 1642 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1643 * requests rather than doing it here. 1644 */ 1645 if ((flags & MAP_SHARED) != 0) { 1646 if ((fp->f_flag & FWRITE) != 0) 1647 maxprot |= VM_PROT_WRITE; 1648 else if ((prot & VM_PROT_WRITE) != 0) 1649 return (EACCES); 1650 } 1651 maxprot &= cap_maxprot; 1652 1653 linux_get_fop(filp, &fop, &ldev); 1654 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1655 &foff, fop, &object); 1656 if (error != 0) 1657 goto out; 1658 1659 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1660 foff, FALSE, td); 1661 if (error != 0) 1662 vm_object_deallocate(object); 1663 out: 1664 linux_drop_fop(ldev); 1665 return (error); 1666 } 1667 1668 static int 1669 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1670 { 1671 struct linux_file *filp; 1672 struct vnode *vp; 1673 int error; 1674 1675 filp = (struct linux_file *)fp->f_data; 1676 if (filp->f_vnode == NULL) 1677 return (EOPNOTSUPP); 1678 1679 vp = filp->f_vnode; 1680 1681 vn_lock(vp, LK_SHARED | LK_RETRY); 1682 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1683 VOP_UNLOCK(vp); 1684 1685 return (error); 1686 } 1687 1688 static int 1689 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1690 struct filedesc *fdp) 1691 { 1692 struct linux_file *filp; 1693 struct vnode *vp; 1694 int error; 1695 1696 filp = fp->f_data; 1697 vp = filp->f_vnode; 1698 if (vp == NULL) { 1699 error = 0; 1700 kif->kf_type = KF_TYPE_DEV; 1701 } else { 1702 vref(vp); 1703 FILEDESC_SUNLOCK(fdp); 1704 error = vn_fill_kinfo_vnode(vp, kif); 1705 vrele(vp); 1706 kif->kf_type = KF_TYPE_VNODE; 1707 FILEDESC_SLOCK(fdp); 1708 } 1709 return (error); 1710 } 1711 1712 unsigned int 1713 linux_iminor(struct inode *inode) 1714 { 1715 struct linux_cdev *ldev; 1716 1717 if (inode == NULL || inode->v_rdev == NULL || 1718 inode->v_rdev->si_devsw != &linuxcdevsw) 1719 return (-1U); 1720 ldev = inode->v_rdev->si_drv1; 1721 if (ldev == NULL) 1722 return (-1U); 1723 1724 return (minor(ldev->dev)); 1725 } 1726 1727 static int 1728 linux_file_kcmp(struct file *fp1, struct file *fp2, struct thread *td) 1729 { 1730 struct linux_file *filp1, *filp2; 1731 1732 if (fp2->f_type != DTYPE_DEV) 1733 return (3); 1734 1735 filp1 = fp1->f_data; 1736 filp2 = fp2->f_data; 1737 return (kcmp_cmp((uintptr_t)filp1->f_cdev, (uintptr_t)filp2->f_cdev)); 1738 } 1739 1740 const struct fileops linuxfileops = { 1741 .fo_read = linux_file_read, 1742 .fo_write = linux_file_write, 1743 .fo_truncate = invfo_truncate, 1744 .fo_kqfilter = linux_file_kqfilter, 1745 .fo_stat = linux_file_stat, 1746 .fo_fill_kinfo = linux_file_fill_kinfo, 1747 .fo_poll = linux_file_poll, 1748 .fo_close = linux_file_close, 1749 .fo_ioctl = linux_file_ioctl, 1750 .fo_mmap = linux_file_mmap, 1751 .fo_chmod = invfo_chmod, 1752 .fo_chown = invfo_chown, 1753 .fo_sendfile = invfo_sendfile, 1754 .fo_cmp = linux_file_kcmp, 1755 .fo_flags = DFLAG_PASSABLE, 1756 }; 1757 1758 /* 1759 * Hash of vmmap addresses. This is infrequently accessed and does not 1760 * need to be particularly large. This is done because we must store the 1761 * caller's idea of the map size to properly unmap. 1762 */ 1763 struct vmmap { 1764 LIST_ENTRY(vmmap) vm_next; 1765 void *vm_addr; 1766 unsigned long vm_size; 1767 }; 1768 1769 struct vmmaphd { 1770 struct vmmap *lh_first; 1771 }; 1772 #define VMMAP_HASH_SIZE 64 1773 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1774 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1775 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1776 static struct mtx vmmaplock; 1777 1778 static void 1779 vmmap_add(void *addr, unsigned long size) 1780 { 1781 struct vmmap *vmmap; 1782 1783 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1784 mtx_lock(&vmmaplock); 1785 vmmap->vm_size = size; 1786 vmmap->vm_addr = addr; 1787 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1788 mtx_unlock(&vmmaplock); 1789 } 1790 1791 static struct vmmap * 1792 vmmap_remove(void *addr) 1793 { 1794 struct vmmap *vmmap; 1795 1796 mtx_lock(&vmmaplock); 1797 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1798 if (vmmap->vm_addr == addr) 1799 break; 1800 if (vmmap) 1801 LIST_REMOVE(vmmap, vm_next); 1802 mtx_unlock(&vmmaplock); 1803 1804 return (vmmap); 1805 } 1806 1807 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1808 void * 1809 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1810 { 1811 void *addr; 1812 1813 addr = pmap_mapdev_attr(phys_addr, size, attr); 1814 if (addr == NULL) 1815 return (NULL); 1816 vmmap_add(addr, size); 1817 1818 return (addr); 1819 } 1820 #endif 1821 1822 void 1823 iounmap(void *addr) 1824 { 1825 struct vmmap *vmmap; 1826 1827 vmmap = vmmap_remove(addr); 1828 if (vmmap == NULL) 1829 return; 1830 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1831 pmap_unmapdev(addr, vmmap->vm_size); 1832 #endif 1833 kfree(vmmap); 1834 } 1835 1836 void * 1837 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1838 { 1839 vm_offset_t off; 1840 size_t size; 1841 1842 size = count * PAGE_SIZE; 1843 off = kva_alloc(size); 1844 if (off == 0) 1845 return (NULL); 1846 vmmap_add((void *)off, size); 1847 pmap_qenter(off, pages, count); 1848 1849 return ((void *)off); 1850 } 1851 1852 void 1853 vunmap(void *addr) 1854 { 1855 struct vmmap *vmmap; 1856 1857 vmmap = vmmap_remove(addr); 1858 if (vmmap == NULL) 1859 return; 1860 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1861 kva_free((vm_offset_t)addr, vmmap->vm_size); 1862 kfree(vmmap); 1863 } 1864 1865 static char * 1866 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 1867 { 1868 unsigned int len; 1869 char *p; 1870 va_list aq; 1871 1872 va_copy(aq, ap); 1873 len = vsnprintf(NULL, 0, fmt, aq); 1874 va_end(aq); 1875 1876 if (dev != NULL) 1877 p = devm_kmalloc(dev, len + 1, gfp); 1878 else 1879 p = kmalloc(len + 1, gfp); 1880 if (p != NULL) 1881 vsnprintf(p, len + 1, fmt, ap); 1882 1883 return (p); 1884 } 1885 1886 char * 1887 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1888 { 1889 1890 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 1891 } 1892 1893 char * 1894 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1895 { 1896 va_list ap; 1897 char *p; 1898 1899 va_start(ap, fmt); 1900 p = devm_kvasprintf(dev, gfp, fmt, ap); 1901 va_end(ap); 1902 1903 return (p); 1904 } 1905 1906 char * 1907 kasprintf(gfp_t gfp, const char *fmt, ...) 1908 { 1909 va_list ap; 1910 char *p; 1911 1912 va_start(ap, fmt); 1913 p = kvasprintf(gfp, fmt, ap); 1914 va_end(ap); 1915 1916 return (p); 1917 } 1918 1919 static void 1920 linux_timer_callback_wrapper(void *context) 1921 { 1922 struct timer_list *timer; 1923 1924 timer = context; 1925 1926 /* the timer is about to be shutdown permanently */ 1927 if (timer->function == NULL) 1928 return; 1929 1930 if (linux_set_current_flags(curthread, M_NOWAIT)) { 1931 /* try again later */ 1932 callout_reset(&timer->callout, 1, 1933 &linux_timer_callback_wrapper, timer); 1934 return; 1935 } 1936 1937 timer->function(timer->data); 1938 } 1939 1940 int 1941 mod_timer(struct timer_list *timer, int expires) 1942 { 1943 int ret; 1944 1945 timer->expires = expires; 1946 ret = callout_reset(&timer->callout, 1947 linux_timer_jiffies_until(expires), 1948 &linux_timer_callback_wrapper, timer); 1949 1950 MPASS(ret == 0 || ret == 1); 1951 1952 return (ret == 1); 1953 } 1954 1955 void 1956 add_timer(struct timer_list *timer) 1957 { 1958 1959 callout_reset(&timer->callout, 1960 linux_timer_jiffies_until(timer->expires), 1961 &linux_timer_callback_wrapper, timer); 1962 } 1963 1964 void 1965 add_timer_on(struct timer_list *timer, int cpu) 1966 { 1967 1968 callout_reset_on(&timer->callout, 1969 linux_timer_jiffies_until(timer->expires), 1970 &linux_timer_callback_wrapper, timer, cpu); 1971 } 1972 1973 int 1974 del_timer(struct timer_list *timer) 1975 { 1976 1977 if (callout_stop(&(timer)->callout) == -1) 1978 return (0); 1979 return (1); 1980 } 1981 1982 int 1983 del_timer_sync(struct timer_list *timer) 1984 { 1985 1986 if (callout_drain(&(timer)->callout) == -1) 1987 return (0); 1988 return (1); 1989 } 1990 1991 int 1992 timer_delete_sync(struct timer_list *timer) 1993 { 1994 1995 return (del_timer_sync(timer)); 1996 } 1997 1998 int 1999 timer_shutdown_sync(struct timer_list *timer) 2000 { 2001 2002 timer->function = NULL; 2003 return (del_timer_sync(timer)); 2004 } 2005 2006 /* greatest common divisor, Euclid equation */ 2007 static uint64_t 2008 lkpi_gcd_64(uint64_t a, uint64_t b) 2009 { 2010 uint64_t an; 2011 uint64_t bn; 2012 2013 while (b != 0) { 2014 an = b; 2015 bn = a % b; 2016 a = an; 2017 b = bn; 2018 } 2019 return (a); 2020 } 2021 2022 uint64_t lkpi_nsec2hz_rem; 2023 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2024 uint64_t lkpi_nsec2hz_max; 2025 2026 uint64_t lkpi_usec2hz_rem; 2027 uint64_t lkpi_usec2hz_div = 1000000ULL; 2028 uint64_t lkpi_usec2hz_max; 2029 2030 uint64_t lkpi_msec2hz_rem; 2031 uint64_t lkpi_msec2hz_div = 1000ULL; 2032 uint64_t lkpi_msec2hz_max; 2033 2034 static void 2035 linux_timer_init(void *arg) 2036 { 2037 uint64_t gcd; 2038 2039 /* 2040 * Compute an internal HZ value which can divide 2**32 to 2041 * avoid timer rounding problems when the tick value wraps 2042 * around 2**32: 2043 */ 2044 linux_timer_hz_mask = 1; 2045 while (linux_timer_hz_mask < (unsigned long)hz) 2046 linux_timer_hz_mask *= 2; 2047 linux_timer_hz_mask--; 2048 2049 /* compute some internal constants */ 2050 2051 lkpi_nsec2hz_rem = hz; 2052 lkpi_usec2hz_rem = hz; 2053 lkpi_msec2hz_rem = hz; 2054 2055 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2056 lkpi_nsec2hz_rem /= gcd; 2057 lkpi_nsec2hz_div /= gcd; 2058 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2059 2060 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2061 lkpi_usec2hz_rem /= gcd; 2062 lkpi_usec2hz_div /= gcd; 2063 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2064 2065 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2066 lkpi_msec2hz_rem /= gcd; 2067 lkpi_msec2hz_div /= gcd; 2068 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2069 } 2070 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2071 2072 void 2073 linux_complete_common(struct completion *c, int all) 2074 { 2075 sleepq_lock(c); 2076 if (all) { 2077 c->done = UINT_MAX; 2078 sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2079 } else { 2080 if (c->done != UINT_MAX) 2081 c->done++; 2082 sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2083 } 2084 sleepq_release(c); 2085 } 2086 2087 /* 2088 * Indefinite wait for done != 0 with or without signals. 2089 */ 2090 int 2091 linux_wait_for_common(struct completion *c, int flags) 2092 { 2093 struct task_struct *task; 2094 int error; 2095 2096 if (SCHEDULER_STOPPED()) 2097 return (0); 2098 2099 task = current; 2100 2101 if (flags != 0) 2102 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2103 else 2104 flags = SLEEPQ_SLEEP; 2105 error = 0; 2106 for (;;) { 2107 sleepq_lock(c); 2108 if (c->done) 2109 break; 2110 sleepq_add(c, NULL, "completion", flags, 0); 2111 if (flags & SLEEPQ_INTERRUPTIBLE) { 2112 DROP_GIANT(); 2113 error = -sleepq_wait_sig(c, 0); 2114 PICKUP_GIANT(); 2115 if (error != 0) { 2116 linux_schedule_save_interrupt_value(task, error); 2117 error = -ERESTARTSYS; 2118 goto intr; 2119 } 2120 } else { 2121 DROP_GIANT(); 2122 sleepq_wait(c, 0); 2123 PICKUP_GIANT(); 2124 } 2125 } 2126 if (c->done != UINT_MAX) 2127 c->done--; 2128 sleepq_release(c); 2129 2130 intr: 2131 return (error); 2132 } 2133 2134 /* 2135 * Time limited wait for done != 0 with or without signals. 2136 */ 2137 int 2138 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2139 { 2140 struct task_struct *task; 2141 int end = jiffies + timeout; 2142 int error; 2143 2144 if (SCHEDULER_STOPPED()) 2145 return (0); 2146 2147 task = current; 2148 2149 if (flags != 0) 2150 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2151 else 2152 flags = SLEEPQ_SLEEP; 2153 2154 for (;;) { 2155 sleepq_lock(c); 2156 if (c->done) 2157 break; 2158 sleepq_add(c, NULL, "completion", flags, 0); 2159 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2160 2161 DROP_GIANT(); 2162 if (flags & SLEEPQ_INTERRUPTIBLE) 2163 error = -sleepq_timedwait_sig(c, 0); 2164 else 2165 error = -sleepq_timedwait(c, 0); 2166 PICKUP_GIANT(); 2167 2168 if (error != 0) { 2169 /* check for timeout */ 2170 if (error == -EWOULDBLOCK) { 2171 error = 0; /* timeout */ 2172 } else { 2173 /* signal happened */ 2174 linux_schedule_save_interrupt_value(task, error); 2175 error = -ERESTARTSYS; 2176 } 2177 goto done; 2178 } 2179 } 2180 if (c->done != UINT_MAX) 2181 c->done--; 2182 sleepq_release(c); 2183 2184 /* return how many jiffies are left */ 2185 error = linux_timer_jiffies_until(end); 2186 done: 2187 return (error); 2188 } 2189 2190 int 2191 linux_try_wait_for_completion(struct completion *c) 2192 { 2193 int isdone; 2194 2195 sleepq_lock(c); 2196 isdone = (c->done != 0); 2197 if (c->done != 0 && c->done != UINT_MAX) 2198 c->done--; 2199 sleepq_release(c); 2200 return (isdone); 2201 } 2202 2203 int 2204 linux_completion_done(struct completion *c) 2205 { 2206 int isdone; 2207 2208 sleepq_lock(c); 2209 isdone = (c->done != 0); 2210 sleepq_release(c); 2211 return (isdone); 2212 } 2213 2214 static void 2215 linux_cdev_deref(struct linux_cdev *ldev) 2216 { 2217 if (refcount_release(&ldev->refs) && 2218 ldev->kobj.ktype == &linux_cdev_ktype) 2219 kfree(ldev); 2220 } 2221 2222 static void 2223 linux_cdev_release(struct kobject *kobj) 2224 { 2225 struct linux_cdev *cdev; 2226 struct kobject *parent; 2227 2228 cdev = container_of(kobj, struct linux_cdev, kobj); 2229 parent = kobj->parent; 2230 linux_destroy_dev(cdev); 2231 linux_cdev_deref(cdev); 2232 kobject_put(parent); 2233 } 2234 2235 static void 2236 linux_cdev_static_release(struct kobject *kobj) 2237 { 2238 struct cdev *cdev; 2239 struct linux_cdev *ldev; 2240 2241 ldev = container_of(kobj, struct linux_cdev, kobj); 2242 cdev = ldev->cdev; 2243 if (cdev != NULL) { 2244 destroy_dev(cdev); 2245 ldev->cdev = NULL; 2246 } 2247 kobject_put(kobj->parent); 2248 } 2249 2250 int 2251 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2252 { 2253 int ret; 2254 2255 if (dev->devt != 0) { 2256 /* Set parent kernel object. */ 2257 ldev->kobj.parent = &dev->kobj; 2258 2259 /* 2260 * Unlike Linux we require the kobject of the 2261 * character device structure to have a valid name 2262 * before calling this function: 2263 */ 2264 if (ldev->kobj.name == NULL) 2265 return (-EINVAL); 2266 2267 ret = cdev_add(ldev, dev->devt, 1); 2268 if (ret) 2269 return (ret); 2270 } 2271 ret = device_add(dev); 2272 if (ret != 0 && dev->devt != 0) 2273 cdev_del(ldev); 2274 return (ret); 2275 } 2276 2277 void 2278 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2279 { 2280 device_del(dev); 2281 2282 if (dev->devt != 0) 2283 cdev_del(ldev); 2284 } 2285 2286 static void 2287 linux_destroy_dev(struct linux_cdev *ldev) 2288 { 2289 2290 if (ldev->cdev == NULL) 2291 return; 2292 2293 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2294 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2295 2296 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2297 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2298 pause("ldevdtr", hz / 4); 2299 2300 destroy_dev(ldev->cdev); 2301 ldev->cdev = NULL; 2302 } 2303 2304 const struct kobj_type linux_cdev_ktype = { 2305 .release = linux_cdev_release, 2306 }; 2307 2308 const struct kobj_type linux_cdev_static_ktype = { 2309 .release = linux_cdev_static_release, 2310 }; 2311 2312 static void 2313 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2314 { 2315 struct notifier_block *nb; 2316 struct netdev_notifier_info ni; 2317 2318 nb = arg; 2319 ni.ifp = ifp; 2320 ni.dev = (struct net_device *)ifp; 2321 if (linkstate == LINK_STATE_UP) 2322 nb->notifier_call(nb, NETDEV_UP, &ni); 2323 else 2324 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2325 } 2326 2327 static void 2328 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2329 { 2330 struct notifier_block *nb; 2331 struct netdev_notifier_info ni; 2332 2333 nb = arg; 2334 ni.ifp = ifp; 2335 ni.dev = (struct net_device *)ifp; 2336 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2337 } 2338 2339 static void 2340 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2341 { 2342 struct notifier_block *nb; 2343 struct netdev_notifier_info ni; 2344 2345 nb = arg; 2346 ni.ifp = ifp; 2347 ni.dev = (struct net_device *)ifp; 2348 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2349 } 2350 2351 static void 2352 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2353 { 2354 struct notifier_block *nb; 2355 struct netdev_notifier_info ni; 2356 2357 nb = arg; 2358 ni.ifp = ifp; 2359 ni.dev = (struct net_device *)ifp; 2360 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2361 } 2362 2363 static void 2364 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2365 { 2366 struct notifier_block *nb; 2367 struct netdev_notifier_info ni; 2368 2369 nb = arg; 2370 ni.ifp = ifp; 2371 ni.dev = (struct net_device *)ifp; 2372 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2373 } 2374 2375 int 2376 register_netdevice_notifier(struct notifier_block *nb) 2377 { 2378 2379 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2380 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2381 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2382 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2383 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2384 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2385 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2386 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2387 2388 return (0); 2389 } 2390 2391 int 2392 register_inetaddr_notifier(struct notifier_block *nb) 2393 { 2394 2395 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2396 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2397 return (0); 2398 } 2399 2400 int 2401 unregister_netdevice_notifier(struct notifier_block *nb) 2402 { 2403 2404 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2405 nb->tags[NETDEV_UP]); 2406 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2407 nb->tags[NETDEV_REGISTER]); 2408 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2409 nb->tags[NETDEV_UNREGISTER]); 2410 EVENTHANDLER_DEREGISTER(iflladdr_event, 2411 nb->tags[NETDEV_CHANGEADDR]); 2412 2413 return (0); 2414 } 2415 2416 int 2417 unregister_inetaddr_notifier(struct notifier_block *nb) 2418 { 2419 2420 EVENTHANDLER_DEREGISTER(ifaddr_event, 2421 nb->tags[NETDEV_CHANGEIFADDR]); 2422 2423 return (0); 2424 } 2425 2426 struct list_sort_thunk { 2427 int (*cmp)(void *, struct list_head *, struct list_head *); 2428 void *priv; 2429 }; 2430 2431 static inline int 2432 linux_le_cmp(const void *d1, const void *d2, void *priv) 2433 { 2434 struct list_head *le1, *le2; 2435 struct list_sort_thunk *thunk; 2436 2437 thunk = priv; 2438 le1 = *(__DECONST(struct list_head **, d1)); 2439 le2 = *(__DECONST(struct list_head **, d2)); 2440 return ((thunk->cmp)(thunk->priv, le1, le2)); 2441 } 2442 2443 void 2444 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2445 struct list_head *a, struct list_head *b)) 2446 { 2447 struct list_sort_thunk thunk; 2448 struct list_head **ar, *le; 2449 size_t count, i; 2450 2451 count = 0; 2452 list_for_each(le, head) 2453 count++; 2454 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2455 i = 0; 2456 list_for_each(le, head) 2457 ar[i++] = le; 2458 thunk.cmp = cmp; 2459 thunk.priv = priv; 2460 qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); 2461 INIT_LIST_HEAD(head); 2462 for (i = 0; i < count; i++) 2463 list_add_tail(ar[i], head); 2464 free(ar, M_KMALLOC); 2465 } 2466 2467 #if defined(__i386__) || defined(__amd64__) 2468 int 2469 linux_wbinvd_on_all_cpus(void) 2470 { 2471 2472 pmap_invalidate_cache(); 2473 return (0); 2474 } 2475 #endif 2476 2477 int 2478 linux_on_each_cpu(void callback(void *), void *data) 2479 { 2480 2481 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2482 smp_no_rendezvous_barrier, data); 2483 return (0); 2484 } 2485 2486 int 2487 linux_in_atomic(void) 2488 { 2489 2490 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2491 } 2492 2493 struct linux_cdev * 2494 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2495 { 2496 dev_t dev = MKDEV(major, minor); 2497 struct cdev *cdev; 2498 2499 dev_lock(); 2500 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2501 struct linux_cdev *ldev = cdev->si_drv1; 2502 if (ldev->dev == dev && 2503 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2504 break; 2505 } 2506 } 2507 dev_unlock(); 2508 2509 return (cdev != NULL ? cdev->si_drv1 : NULL); 2510 } 2511 2512 int 2513 __register_chrdev(unsigned int major, unsigned int baseminor, 2514 unsigned int count, const char *name, 2515 const struct file_operations *fops) 2516 { 2517 struct linux_cdev *cdev; 2518 int ret = 0; 2519 int i; 2520 2521 for (i = baseminor; i < baseminor + count; i++) { 2522 cdev = cdev_alloc(); 2523 cdev->ops = fops; 2524 kobject_set_name(&cdev->kobj, name); 2525 2526 ret = cdev_add(cdev, makedev(major, i), 1); 2527 if (ret != 0) 2528 break; 2529 } 2530 return (ret); 2531 } 2532 2533 int 2534 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2535 unsigned int count, const char *name, 2536 const struct file_operations *fops, uid_t uid, 2537 gid_t gid, int mode) 2538 { 2539 struct linux_cdev *cdev; 2540 int ret = 0; 2541 int i; 2542 2543 for (i = baseminor; i < baseminor + count; i++) { 2544 cdev = cdev_alloc(); 2545 cdev->ops = fops; 2546 kobject_set_name(&cdev->kobj, name); 2547 2548 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2549 if (ret != 0) 2550 break; 2551 } 2552 return (ret); 2553 } 2554 2555 void 2556 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2557 unsigned int count, const char *name) 2558 { 2559 struct linux_cdev *cdevp; 2560 int i; 2561 2562 for (i = baseminor; i < baseminor + count; i++) { 2563 cdevp = linux_find_cdev(name, major, i); 2564 if (cdevp != NULL) 2565 cdev_del(cdevp); 2566 } 2567 } 2568 2569 void 2570 linux_dump_stack(void) 2571 { 2572 #ifdef STACK 2573 struct stack st; 2574 2575 stack_save(&st); 2576 stack_print(&st); 2577 #endif 2578 } 2579 2580 int 2581 linuxkpi_net_ratelimit(void) 2582 { 2583 2584 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2585 lkpi_net_maxpps)); 2586 } 2587 2588 struct io_mapping * 2589 io_mapping_create_wc(resource_size_t base, unsigned long size) 2590 { 2591 struct io_mapping *mapping; 2592 2593 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2594 if (mapping == NULL) 2595 return (NULL); 2596 return (io_mapping_init_wc(mapping, base, size)); 2597 } 2598 2599 /* We likely want a linuxkpi_device.c at some point. */ 2600 bool 2601 device_can_wakeup(struct device *dev) 2602 { 2603 2604 if (dev == NULL) 2605 return (false); 2606 /* 2607 * XXX-BZ iwlwifi queries it as part of enabling WoWLAN. 2608 * Normally this would be based on a bool in dev->power.XXX. 2609 * Check such as PCI PCIM_PCAP_*PME. We have no way to enable this yet. 2610 * We may get away by directly calling into bsddev for as long as 2611 * we can assume PCI only avoiding changing struct device breaking KBI. 2612 */ 2613 pr_debug("%s:%d: not enabled; see comment.\n", __func__, __LINE__); 2614 return (false); 2615 } 2616 2617 static void 2618 devm_device_group_remove(struct device *dev, void *p) 2619 { 2620 const struct attribute_group **dr = p; 2621 const struct attribute_group *group = *dr; 2622 2623 sysfs_remove_group(&dev->kobj, group); 2624 } 2625 2626 int 2627 lkpi_devm_device_add_group(struct device *dev, 2628 const struct attribute_group *group) 2629 { 2630 const struct attribute_group **dr; 2631 int ret; 2632 2633 dr = devres_alloc(devm_device_group_remove, sizeof(*dr), GFP_KERNEL); 2634 if (dr == NULL) 2635 return (-ENOMEM); 2636 2637 ret = sysfs_create_group(&dev->kobj, group); 2638 if (ret == 0) { 2639 *dr = group; 2640 devres_add(dev, dr); 2641 } else 2642 devres_free(dr); 2643 2644 return (ret); 2645 } 2646 2647 #if defined(__i386__) || defined(__amd64__) 2648 bool linux_cpu_has_clflush; 2649 struct cpuinfo_x86 boot_cpu_data; 2650 struct cpuinfo_x86 *__cpu_data; 2651 #endif 2652 2653 cpumask_t * 2654 lkpi_get_static_single_cpu_mask(int cpuid) 2655 { 2656 2657 KASSERT((cpuid >= 0 && cpuid <= mp_maxid), ("%s: invalid cpuid %d\n", 2658 __func__, cpuid)); 2659 KASSERT(!CPU_ABSENT(cpuid), ("%s: cpu with cpuid %d is absent\n", 2660 __func__, cpuid)); 2661 2662 return (static_single_cpu_mask[cpuid]); 2663 } 2664 2665 bool 2666 lkpi_xen_initial_domain(void) 2667 { 2668 #ifdef XENHVM 2669 return (xen_initial_domain()); 2670 #else 2671 return (false); 2672 #endif 2673 } 2674 2675 bool 2676 lkpi_xen_pv_domain(void) 2677 { 2678 #ifdef XENHVM 2679 return (xen_pv_domain()); 2680 #else 2681 return (false); 2682 #endif 2683 } 2684 2685 static void 2686 linux_compat_init(void *arg) 2687 { 2688 struct sysctl_oid *rootoid; 2689 int i; 2690 2691 #if defined(__i386__) || defined(__amd64__) 2692 static const uint32_t x86_vendors[X86_VENDOR_NUM] = { 2693 [X86_VENDOR_INTEL] = CPU_VENDOR_INTEL, 2694 [X86_VENDOR_CYRIX] = CPU_VENDOR_CYRIX, 2695 [X86_VENDOR_AMD] = CPU_VENDOR_AMD, 2696 [X86_VENDOR_UMC] = CPU_VENDOR_UMC, 2697 [X86_VENDOR_CENTAUR] = CPU_VENDOR_CENTAUR, 2698 [X86_VENDOR_TRANSMETA] = CPU_VENDOR_TRANSMETA, 2699 [X86_VENDOR_NSC] = CPU_VENDOR_NSC, 2700 [X86_VENDOR_HYGON] = CPU_VENDOR_HYGON, 2701 }; 2702 uint8_t x86_vendor = X86_VENDOR_UNKNOWN; 2703 2704 for (i = 0; i < X86_VENDOR_NUM; i++) { 2705 if (cpu_vendor_id != 0 && cpu_vendor_id == x86_vendors[i]) { 2706 x86_vendor = i; 2707 break; 2708 } 2709 } 2710 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2711 boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; 2712 boot_cpu_data.x86_max_cores = mp_ncpus; 2713 boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id); 2714 boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id); 2715 boot_cpu_data.x86_vendor = x86_vendor; 2716 2717 __cpu_data = mallocarray(mp_maxid + 1, 2718 sizeof(*__cpu_data), M_KMALLOC, M_WAITOK | M_ZERO); 2719 CPU_FOREACH(i) { 2720 __cpu_data[i].x86_clflush_size = cpu_clflush_line_size; 2721 __cpu_data[i].x86_max_cores = mp_ncpus; 2722 __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id); 2723 __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id); 2724 __cpu_data[i].x86_vendor = x86_vendor; 2725 } 2726 #endif 2727 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2728 2729 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2730 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2731 kobject_init(&linux_class_root, &linux_class_ktype); 2732 kobject_set_name(&linux_class_root, "class"); 2733 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2734 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2735 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2736 kobject_set_name(&linux_root_device.kobj, "device"); 2737 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2738 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2739 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2740 linux_root_device.bsddev = root_bus; 2741 linux_class_misc.name = "misc"; 2742 class_register(&linux_class_misc); 2743 INIT_LIST_HEAD(&pci_drivers); 2744 INIT_LIST_HEAD(&pci_devices); 2745 spin_lock_init(&pci_lock); 2746 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2747 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2748 LIST_INIT(&vmmaphead[i]); 2749 init_waitqueue_head(&linux_bit_waitq); 2750 init_waitqueue_head(&linux_var_waitq); 2751 2752 CPU_COPY(&all_cpus, &cpu_online_mask); 2753 /* 2754 * Generate a single-CPU cpumask_t for each CPU (possibly) in the system. 2755 * CPUs are indexed from 0..(mp_maxid). The entry for cpuid 0 will only 2756 * have itself in the cpumask, cupid 1 only itself on entry 1, and so on. 2757 * This is used by cpumask_of() (and possibly others in the future) for, 2758 * e.g., drivers to pass hints to irq_set_affinity_hint(). 2759 */ 2760 static_single_cpu_mask = mallocarray(mp_maxid + 1, 2761 sizeof(static_single_cpu_mask), M_KMALLOC, M_WAITOK | M_ZERO); 2762 2763 /* 2764 * When the number of CPUs reach a threshold, we start to save memory 2765 * given the sets are static by overlapping those having their single 2766 * bit set at same position in a bitset word. Asymptotically, this 2767 * regular scheme is in O(n²) whereas the overlapping one is in O(n) 2768 * only with n being the maximum number of CPUs, so the gain will become 2769 * huge quite quickly. The threshold for 64-bit architectures is 128 2770 * CPUs. 2771 */ 2772 if (mp_ncpus < (2 * _BITSET_BITS)) { 2773 cpumask_t *sscm_ptr; 2774 2775 /* 2776 * This represents 'mp_ncpus * __bitset_words(CPU_SETSIZE) * 2777 * (_BITSET_BITS / 8)' bytes (for comparison with the 2778 * overlapping scheme). 2779 */ 2780 static_single_cpu_mask_lcs = mallocarray(mp_ncpus, 2781 sizeof(*static_single_cpu_mask_lcs), 2782 M_KMALLOC, M_WAITOK | M_ZERO); 2783 2784 sscm_ptr = static_single_cpu_mask_lcs; 2785 CPU_FOREACH(i) { 2786 static_single_cpu_mask[i] = sscm_ptr++; 2787 CPU_SET(i, static_single_cpu_mask[i]); 2788 } 2789 } else { 2790 /* Pointer to a bitset word. */ 2791 __typeof(((cpuset_t *)NULL)->__bits[0]) *bwp; 2792 2793 /* 2794 * Allocate memory for (static) spans of 'cpumask_t' ('cpuset_t' 2795 * really) with a single bit set that can be reused for all 2796 * single CPU masks by making them start at different offsets. 2797 * We need '__bitset_words(CPU_SETSIZE) - 1' bitset words before 2798 * the word having its single bit set, and the same amount 2799 * after. 2800 */ 2801 static_single_cpu_mask_lcs = mallocarray(_BITSET_BITS, 2802 (2 * __bitset_words(CPU_SETSIZE) - 1) * (_BITSET_BITS / 8), 2803 M_KMALLOC, M_WAITOK | M_ZERO); 2804 2805 /* 2806 * We rely below on cpuset_t and the bitset generic 2807 * implementation assigning words in the '__bits' array in the 2808 * same order of bits (i.e., little-endian ordering, not to be 2809 * confused with machine endianness, which concerns bits in 2810 * words and other integers). This is an imperfect test, but it 2811 * will detect a change to big-endian ordering. 2812 */ 2813 _Static_assert( 2814 __bitset_word(_BITSET_BITS + 1, _BITSET_BITS) == 1, 2815 "Assumes a bitset implementation that is little-endian " 2816 "on its words"); 2817 2818 /* Initialize the single bit of each static span. */ 2819 bwp = (__typeof(bwp))static_single_cpu_mask_lcs + 2820 (__bitset_words(CPU_SETSIZE) - 1); 2821 for (i = 0; i < _BITSET_BITS; i++) { 2822 CPU_SET(i, (cpuset_t *)bwp); 2823 bwp += (2 * __bitset_words(CPU_SETSIZE) - 1); 2824 } 2825 2826 /* 2827 * Finally set all CPU masks to the proper word in their 2828 * relevant span. 2829 */ 2830 CPU_FOREACH(i) { 2831 bwp = (__typeof(bwp))static_single_cpu_mask_lcs; 2832 /* Find the non-zero word of the relevant span. */ 2833 bwp += (2 * __bitset_words(CPU_SETSIZE) - 1) * 2834 (i % _BITSET_BITS) + 2835 __bitset_words(CPU_SETSIZE) - 1; 2836 /* Shift to find the CPU mask start. */ 2837 bwp -= (i / _BITSET_BITS); 2838 static_single_cpu_mask[i] = (cpuset_t *)bwp; 2839 } 2840 } 2841 2842 strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release)); 2843 } 2844 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2845 2846 static void 2847 linux_compat_uninit(void *arg) 2848 { 2849 linux_kobject_kfree_name(&linux_class_root); 2850 linux_kobject_kfree_name(&linux_root_device.kobj); 2851 linux_kobject_kfree_name(&linux_class_misc.kobj); 2852 2853 free(static_single_cpu_mask_lcs, M_KMALLOC); 2854 free(static_single_cpu_mask, M_KMALLOC); 2855 #if defined(__i386__) || defined(__amd64__) 2856 free(__cpu_data, M_KMALLOC); 2857 #endif 2858 2859 mtx_destroy(&vmmaplock); 2860 spin_lock_destroy(&pci_lock); 2861 rw_destroy(&linux_vma_lock); 2862 } 2863 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2864 2865 /* 2866 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2867 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2868 * used. Assert these types have the same size, else some parts of the 2869 * LinuxKPI may not work like expected: 2870 */ 2871 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2872