1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/proc.h> 39 #include <sys/sglist.h> 40 #include <sys/sleepqueue.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/bus.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/filio.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pager.h> 54 55 #include <machine/stdarg.h> 56 57 #if defined(__i386__) || defined(__amd64__) 58 #include <machine/md_var.h> 59 #endif 60 61 #include <linux/kobject.h> 62 #include <linux/device.h> 63 #include <linux/slab.h> 64 #include <linux/module.h> 65 #include <linux/moduleparam.h> 66 #include <linux/cdev.h> 67 #include <linux/file.h> 68 #include <linux/sysfs.h> 69 #include <linux/mm.h> 70 #include <linux/io.h> 71 #include <linux/vmalloc.h> 72 #include <linux/netdevice.h> 73 #include <linux/timer.h> 74 #include <linux/interrupt.h> 75 #include <linux/uaccess.h> 76 #include <linux/list.h> 77 #include <linux/kthread.h> 78 #include <linux/kernel.h> 79 #include <linux/compat.h> 80 #include <linux/poll.h> 81 #include <linux/smp.h> 82 83 #if defined(__i386__) || defined(__amd64__) 84 #include <asm/smp.h> 85 #endif 86 87 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 88 89 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 90 91 #include <linux/rbtree.h> 92 /* Undo Linux compat changes. */ 93 #undef RB_ROOT 94 #undef file 95 #undef cdev 96 #define RB_ROOT(head) (head)->rbh_root 97 98 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 99 100 struct kobject linux_class_root; 101 struct device linux_root_device; 102 struct class linux_class_misc; 103 struct list_head pci_drivers; 104 struct list_head pci_devices; 105 spinlock_t pci_lock; 106 107 unsigned long linux_timer_hz_mask; 108 109 int 110 panic_cmp(struct rb_node *one, struct rb_node *two) 111 { 112 panic("no cmp"); 113 } 114 115 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 116 117 int 118 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 119 { 120 va_list tmp_va; 121 int len; 122 char *old; 123 char *name; 124 char dummy; 125 126 old = kobj->name; 127 128 if (old && fmt == NULL) 129 return (0); 130 131 /* compute length of string */ 132 va_copy(tmp_va, args); 133 len = vsnprintf(&dummy, 0, fmt, tmp_va); 134 va_end(tmp_va); 135 136 /* account for zero termination */ 137 len++; 138 139 /* check for error */ 140 if (len < 1) 141 return (-EINVAL); 142 143 /* allocate memory for string */ 144 name = kzalloc(len, GFP_KERNEL); 145 if (name == NULL) 146 return (-ENOMEM); 147 vsnprintf(name, len, fmt, args); 148 kobj->name = name; 149 150 /* free old string */ 151 kfree(old); 152 153 /* filter new string */ 154 for (; *name != '\0'; name++) 155 if (*name == '/') 156 *name = '!'; 157 return (0); 158 } 159 160 int 161 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 162 { 163 va_list args; 164 int error; 165 166 va_start(args, fmt); 167 error = kobject_set_name_vargs(kobj, fmt, args); 168 va_end(args); 169 170 return (error); 171 } 172 173 static int 174 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 175 { 176 const struct kobj_type *t; 177 int error; 178 179 kobj->parent = parent; 180 error = sysfs_create_dir(kobj); 181 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 182 struct attribute **attr; 183 t = kobj->ktype; 184 185 for (attr = t->default_attrs; *attr != NULL; attr++) { 186 error = sysfs_create_file(kobj, *attr); 187 if (error) 188 break; 189 } 190 if (error) 191 sysfs_remove_dir(kobj); 192 193 } 194 return (error); 195 } 196 197 int 198 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 199 { 200 va_list args; 201 int error; 202 203 va_start(args, fmt); 204 error = kobject_set_name_vargs(kobj, fmt, args); 205 va_end(args); 206 if (error) 207 return (error); 208 209 return kobject_add_complete(kobj, parent); 210 } 211 212 void 213 linux_kobject_release(struct kref *kref) 214 { 215 struct kobject *kobj; 216 char *name; 217 218 kobj = container_of(kref, struct kobject, kref); 219 sysfs_remove_dir(kobj); 220 name = kobj->name; 221 if (kobj->ktype && kobj->ktype->release) 222 kobj->ktype->release(kobj); 223 kfree(name); 224 } 225 226 static void 227 linux_kobject_kfree(struct kobject *kobj) 228 { 229 kfree(kobj); 230 } 231 232 static void 233 linux_kobject_kfree_name(struct kobject *kobj) 234 { 235 if (kobj) { 236 kfree(kobj->name); 237 } 238 } 239 240 const struct kobj_type linux_kfree_type = { 241 .release = linux_kobject_kfree 242 }; 243 244 static void 245 linux_device_release(struct device *dev) 246 { 247 pr_debug("linux_device_release: %s\n", dev_name(dev)); 248 kfree(dev); 249 } 250 251 static ssize_t 252 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 253 { 254 struct class_attribute *dattr; 255 ssize_t error; 256 257 dattr = container_of(attr, struct class_attribute, attr); 258 error = -EIO; 259 if (dattr->show) 260 error = dattr->show(container_of(kobj, struct class, kobj), 261 dattr, buf); 262 return (error); 263 } 264 265 static ssize_t 266 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 267 size_t count) 268 { 269 struct class_attribute *dattr; 270 ssize_t error; 271 272 dattr = container_of(attr, struct class_attribute, attr); 273 error = -EIO; 274 if (dattr->store) 275 error = dattr->store(container_of(kobj, struct class, kobj), 276 dattr, buf, count); 277 return (error); 278 } 279 280 static void 281 linux_class_release(struct kobject *kobj) 282 { 283 struct class *class; 284 285 class = container_of(kobj, struct class, kobj); 286 if (class->class_release) 287 class->class_release(class); 288 } 289 290 static const struct sysfs_ops linux_class_sysfs = { 291 .show = linux_class_show, 292 .store = linux_class_store, 293 }; 294 295 const struct kobj_type linux_class_ktype = { 296 .release = linux_class_release, 297 .sysfs_ops = &linux_class_sysfs 298 }; 299 300 static void 301 linux_dev_release(struct kobject *kobj) 302 { 303 struct device *dev; 304 305 dev = container_of(kobj, struct device, kobj); 306 /* This is the precedence defined by linux. */ 307 if (dev->release) 308 dev->release(dev); 309 else if (dev->class && dev->class->dev_release) 310 dev->class->dev_release(dev); 311 } 312 313 static ssize_t 314 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 315 { 316 struct device_attribute *dattr; 317 ssize_t error; 318 319 dattr = container_of(attr, struct device_attribute, attr); 320 error = -EIO; 321 if (dattr->show) 322 error = dattr->show(container_of(kobj, struct device, kobj), 323 dattr, buf); 324 return (error); 325 } 326 327 static ssize_t 328 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 329 size_t count) 330 { 331 struct device_attribute *dattr; 332 ssize_t error; 333 334 dattr = container_of(attr, struct device_attribute, attr); 335 error = -EIO; 336 if (dattr->store) 337 error = dattr->store(container_of(kobj, struct device, kobj), 338 dattr, buf, count); 339 return (error); 340 } 341 342 static const struct sysfs_ops linux_dev_sysfs = { 343 .show = linux_dev_show, 344 .store = linux_dev_store, 345 }; 346 347 const struct kobj_type linux_dev_ktype = { 348 .release = linux_dev_release, 349 .sysfs_ops = &linux_dev_sysfs 350 }; 351 352 struct device * 353 device_create(struct class *class, struct device *parent, dev_t devt, 354 void *drvdata, const char *fmt, ...) 355 { 356 struct device *dev; 357 va_list args; 358 359 dev = kzalloc(sizeof(*dev), M_WAITOK); 360 dev->parent = parent; 361 dev->class = class; 362 dev->devt = devt; 363 dev->driver_data = drvdata; 364 dev->release = linux_device_release; 365 va_start(args, fmt); 366 kobject_set_name_vargs(&dev->kobj, fmt, args); 367 va_end(args); 368 device_register(dev); 369 370 return (dev); 371 } 372 373 int 374 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 375 struct kobject *parent, const char *fmt, ...) 376 { 377 va_list args; 378 int error; 379 380 kobject_init(kobj, ktype); 381 kobj->ktype = ktype; 382 kobj->parent = parent; 383 kobj->name = NULL; 384 385 va_start(args, fmt); 386 error = kobject_set_name_vargs(kobj, fmt, args); 387 va_end(args); 388 if (error) 389 return (error); 390 return kobject_add_complete(kobj, parent); 391 } 392 393 static void 394 linux_file_dtor(void *cdp) 395 { 396 struct linux_file *filp; 397 398 linux_set_current(curthread); 399 filp = cdp; 400 filp->f_op->release(filp->f_vnode, filp); 401 vdrop(filp->f_vnode); 402 kfree(filp); 403 } 404 405 static void 406 linux_kq_lock(void *arg) 407 { 408 spinlock_t *s = arg; 409 410 spin_lock(s); 411 } 412 static void 413 linux_kq_unlock(void *arg) 414 { 415 spinlock_t *s = arg; 416 417 spin_unlock(s); 418 } 419 420 static void 421 linux_kq_lock_owned(void *arg) 422 { 423 #ifdef INVARIANTS 424 spinlock_t *s = arg; 425 426 mtx_assert(&s->m, MA_OWNED); 427 #endif 428 } 429 430 static void 431 linux_kq_lock_unowned(void *arg) 432 { 433 #ifdef INVARIANTS 434 spinlock_t *s = arg; 435 436 mtx_assert(&s->m, MA_NOTOWNED); 437 #endif 438 } 439 440 static void 441 linux_dev_kqfilter_poll(struct linux_file *, int); 442 443 struct linux_file * 444 linux_file_alloc(void) 445 { 446 struct linux_file *filp; 447 448 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 449 450 /* set initial refcount */ 451 filp->f_count = 1; 452 453 /* setup fields needed by kqueue support */ 454 spin_lock_init(&filp->f_kqlock); 455 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 456 linux_kq_lock, linux_kq_unlock, 457 linux_kq_lock_owned, linux_kq_lock_unowned); 458 459 return (filp); 460 } 461 462 void 463 linux_file_free(struct linux_file *filp) 464 { 465 if (filp->_file == NULL) { 466 if (filp->f_shmem != NULL) 467 vm_object_deallocate(filp->f_shmem); 468 kfree(filp); 469 } else { 470 /* 471 * The close method of the character device or file 472 * will free the linux_file structure: 473 */ 474 _fdrop(filp->_file, curthread); 475 } 476 } 477 478 static int 479 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 480 vm_page_t *mres) 481 { 482 struct vm_area_struct *vmap; 483 484 vmap = linux_cdev_handle_find(vm_obj->handle); 485 486 MPASS(vmap != NULL); 487 MPASS(vmap->vm_private_data == vm_obj->handle); 488 489 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 490 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 491 vm_page_t page; 492 493 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 494 /* 495 * If the passed in result page is a fake 496 * page, update it with the new physical 497 * address. 498 */ 499 page = *mres; 500 vm_page_updatefake(page, paddr, vm_obj->memattr); 501 } else { 502 /* 503 * Replace the passed in "mres" page with our 504 * own fake page and free up the all of the 505 * original pages. 506 */ 507 VM_OBJECT_WUNLOCK(vm_obj); 508 page = vm_page_getfake(paddr, vm_obj->memattr); 509 VM_OBJECT_WLOCK(vm_obj); 510 511 vm_page_replace_checked(page, vm_obj, 512 (*mres)->pindex, *mres); 513 514 vm_page_lock(*mres); 515 vm_page_free(*mres); 516 vm_page_unlock(*mres); 517 *mres = page; 518 } 519 page->valid = VM_PAGE_BITS_ALL; 520 return (VM_PAGER_OK); 521 } 522 return (VM_PAGER_FAIL); 523 } 524 525 static int 526 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 527 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 528 { 529 struct vm_area_struct *vmap; 530 int err; 531 532 linux_set_current(curthread); 533 534 /* get VM area structure */ 535 vmap = linux_cdev_handle_find(vm_obj->handle); 536 MPASS(vmap != NULL); 537 MPASS(vmap->vm_private_data == vm_obj->handle); 538 539 VM_OBJECT_WUNLOCK(vm_obj); 540 541 down_write(&vmap->vm_mm->mmap_sem); 542 if (unlikely(vmap->vm_ops == NULL)) { 543 err = VM_FAULT_SIGBUS; 544 } else { 545 struct vm_fault vmf; 546 547 /* fill out VM fault structure */ 548 vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); 549 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 550 vmf.pgoff = 0; 551 vmf.page = NULL; 552 553 vmap->vm_pfn_count = 0; 554 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 555 vmap->vm_obj = vm_obj; 556 557 err = vmap->vm_ops->fault(vmap, &vmf); 558 559 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 560 kern_yield(PRI_USER); 561 err = vmap->vm_ops->fault(vmap, &vmf); 562 } 563 } 564 565 /* translate return code */ 566 switch (err) { 567 case VM_FAULT_OOM: 568 err = VM_PAGER_AGAIN; 569 break; 570 case VM_FAULT_SIGBUS: 571 err = VM_PAGER_BAD; 572 break; 573 case VM_FAULT_NOPAGE: 574 /* 575 * By contract the fault handler will return having 576 * busied all the pages itself. If pidx is already 577 * found in the object, it will simply xbusy the first 578 * page and return with vm_pfn_count set to 1. 579 */ 580 *first = vmap->vm_pfn_first; 581 *last = *first + vmap->vm_pfn_count - 1; 582 err = VM_PAGER_OK; 583 break; 584 default: 585 err = VM_PAGER_ERROR; 586 break; 587 } 588 up_write(&vmap->vm_mm->mmap_sem); 589 VM_OBJECT_WLOCK(vm_obj); 590 return (err); 591 } 592 593 static struct rwlock linux_vma_lock; 594 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 595 TAILQ_HEAD_INITIALIZER(linux_vma_head); 596 597 static void 598 linux_cdev_handle_free(struct vm_area_struct *vmap) 599 { 600 /* Drop reference on vm_file */ 601 if (vmap->vm_file != NULL) 602 fput(vmap->vm_file); 603 604 /* Drop reference on mm_struct */ 605 mmput(vmap->vm_mm); 606 607 kfree(vmap); 608 } 609 610 static void 611 linux_cdev_handle_remove(struct vm_area_struct *vmap) 612 { 613 rw_wlock(&linux_vma_lock); 614 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 615 rw_wunlock(&linux_vma_lock); 616 } 617 618 static struct vm_area_struct * 619 linux_cdev_handle_find(void *handle) 620 { 621 struct vm_area_struct *vmap; 622 623 rw_rlock(&linux_vma_lock); 624 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 625 if (vmap->vm_private_data == handle) 626 break; 627 } 628 rw_runlock(&linux_vma_lock); 629 return (vmap); 630 } 631 632 static int 633 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 634 vm_ooffset_t foff, struct ucred *cred, u_short *color) 635 { 636 637 MPASS(linux_cdev_handle_find(handle) != NULL); 638 *color = 0; 639 return (0); 640 } 641 642 static void 643 linux_cdev_pager_dtor(void *handle) 644 { 645 const struct vm_operations_struct *vm_ops; 646 struct vm_area_struct *vmap; 647 648 vmap = linux_cdev_handle_find(handle); 649 MPASS(vmap != NULL); 650 651 /* 652 * Remove handle before calling close operation to prevent 653 * other threads from reusing the handle pointer. 654 */ 655 linux_cdev_handle_remove(vmap); 656 657 down_write(&vmap->vm_mm->mmap_sem); 658 vm_ops = vmap->vm_ops; 659 if (likely(vm_ops != NULL)) 660 vm_ops->close(vmap); 661 up_write(&vmap->vm_mm->mmap_sem); 662 663 linux_cdev_handle_free(vmap); 664 } 665 666 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 667 { 668 /* OBJT_MGTDEVICE */ 669 .cdev_pg_populate = linux_cdev_pager_populate, 670 .cdev_pg_ctor = linux_cdev_pager_ctor, 671 .cdev_pg_dtor = linux_cdev_pager_dtor 672 }, 673 { 674 /* OBJT_DEVICE */ 675 .cdev_pg_fault = linux_cdev_pager_fault, 676 .cdev_pg_ctor = linux_cdev_pager_ctor, 677 .cdev_pg_dtor = linux_cdev_pager_dtor 678 }, 679 }; 680 681 static int 682 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 683 { 684 struct linux_cdev *ldev; 685 struct linux_file *filp; 686 struct file *file; 687 int error; 688 689 file = td->td_fpop; 690 ldev = dev->si_drv1; 691 if (ldev == NULL) 692 return (ENODEV); 693 694 filp = linux_file_alloc(); 695 filp->f_dentry = &filp->f_dentry_store; 696 filp->f_op = ldev->ops; 697 filp->f_flags = file->f_flag; 698 vhold(file->f_vnode); 699 filp->f_vnode = file->f_vnode; 700 filp->_file = file; 701 702 linux_set_current(td); 703 704 if (filp->f_op->open) { 705 error = -filp->f_op->open(file->f_vnode, filp); 706 if (error) { 707 vdrop(filp->f_vnode); 708 kfree(filp); 709 goto done; 710 } 711 } 712 error = devfs_set_cdevpriv(filp, linux_file_dtor); 713 if (error) { 714 filp->f_op->release(file->f_vnode, filp); 715 vdrop(filp->f_vnode); 716 kfree(filp); 717 } 718 done: 719 return (error); 720 } 721 722 static int 723 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 724 { 725 struct linux_file *filp; 726 struct file *file; 727 int error; 728 729 file = td->td_fpop; 730 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 731 return (error); 732 filp->f_flags = file->f_flag; 733 devfs_clear_cdevpriv(); 734 735 return (0); 736 } 737 738 #define LINUX_IOCTL_MIN_PTR 0x10000UL 739 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 740 741 static inline int 742 linux_remap_address(void **uaddr, size_t len) 743 { 744 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 745 746 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 747 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 748 struct task_struct *pts = current; 749 if (pts == NULL) { 750 *uaddr = NULL; 751 return (1); 752 } 753 754 /* compute data offset */ 755 uaddr_val -= LINUX_IOCTL_MIN_PTR; 756 757 /* check that length is within bounds */ 758 if ((len > IOCPARM_MAX) || 759 (uaddr_val + len) > pts->bsd_ioctl_len) { 760 *uaddr = NULL; 761 return (1); 762 } 763 764 /* re-add kernel buffer address */ 765 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 766 767 /* update address location */ 768 *uaddr = (void *)uaddr_val; 769 return (1); 770 } 771 return (0); 772 } 773 774 int 775 linux_copyin(const void *uaddr, void *kaddr, size_t len) 776 { 777 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 778 if (uaddr == NULL) 779 return (-EFAULT); 780 memcpy(kaddr, uaddr, len); 781 return (0); 782 } 783 return (-copyin(uaddr, kaddr, len)); 784 } 785 786 int 787 linux_copyout(const void *kaddr, void *uaddr, size_t len) 788 { 789 if (linux_remap_address(&uaddr, len)) { 790 if (uaddr == NULL) 791 return (-EFAULT); 792 memcpy(uaddr, kaddr, len); 793 return (0); 794 } 795 return (-copyout(kaddr, uaddr, len)); 796 } 797 798 size_t 799 linux_clear_user(void *_uaddr, size_t _len) 800 { 801 uint8_t *uaddr = _uaddr; 802 size_t len = _len; 803 804 /* make sure uaddr is aligned before going into the fast loop */ 805 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 806 if (subyte(uaddr, 0)) 807 return (_len); 808 uaddr++; 809 len--; 810 } 811 812 /* zero 8 bytes at a time */ 813 while (len > 7) { 814 #ifdef __LP64__ 815 if (suword64(uaddr, 0)) 816 return (_len); 817 #else 818 if (suword32(uaddr, 0)) 819 return (_len); 820 if (suword32(uaddr + 4, 0)) 821 return (_len); 822 #endif 823 uaddr += 8; 824 len -= 8; 825 } 826 827 /* zero fill end, if any */ 828 while (len > 0) { 829 if (subyte(uaddr, 0)) 830 return (_len); 831 uaddr++; 832 len--; 833 } 834 return (0); 835 } 836 837 int 838 linux_access_ok(int rw, const void *uaddr, size_t len) 839 { 840 uintptr_t saddr; 841 uintptr_t eaddr; 842 843 /* get start and end address */ 844 saddr = (uintptr_t)uaddr; 845 eaddr = (uintptr_t)uaddr + len; 846 847 /* verify addresses are valid for userspace */ 848 return ((saddr == eaddr) || 849 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 850 } 851 852 static int 853 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 854 struct thread *td) 855 { 856 struct linux_file *filp; 857 struct file *file; 858 unsigned size; 859 int error; 860 861 file = td->td_fpop; 862 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 863 return (error); 864 filp->f_flags = file->f_flag; 865 866 /* the LinuxKPI supports blocking and non-blocking I/O */ 867 if (cmd == FIONBIO || cmd == FIOASYNC) 868 return (0); 869 870 linux_set_current(td); 871 size = IOCPARM_LEN(cmd); 872 /* refer to logic in sys_ioctl() */ 873 if (size > 0) { 874 /* 875 * Setup hint for linux_copyin() and linux_copyout(). 876 * 877 * Background: Linux code expects a user-space address 878 * while FreeBSD supplies a kernel-space address. 879 */ 880 current->bsd_ioctl_data = data; 881 current->bsd_ioctl_len = size; 882 data = (void *)LINUX_IOCTL_MIN_PTR; 883 } else { 884 /* fetch user-space pointer */ 885 data = *(void **)data; 886 } 887 #if defined(__amd64__) 888 if (td->td_proc->p_elf_machine == EM_386) { 889 /* try the compat IOCTL handler first */ 890 if (filp->f_op->compat_ioctl != NULL) 891 error = -filp->f_op->compat_ioctl(filp, cmd, (u_long)data); 892 else 893 error = ENOTTY; 894 895 /* fallback to the regular IOCTL handler, if any */ 896 if (error == ENOTTY && filp->f_op->unlocked_ioctl != NULL) 897 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); 898 } else 899 #endif 900 if (filp->f_op->unlocked_ioctl != NULL) 901 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); 902 else 903 error = ENOTTY; 904 if (size > 0) { 905 current->bsd_ioctl_data = NULL; 906 current->bsd_ioctl_len = 0; 907 } 908 909 if (error == EWOULDBLOCK) { 910 /* update kqfilter status, if any */ 911 linux_dev_kqfilter_poll(filp, 912 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 913 } else if (error == ERESTARTSYS) 914 error = ERESTART; 915 return (error); 916 } 917 918 static int 919 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag) 920 { 921 struct linux_file *filp; 922 struct thread *td; 923 struct file *file; 924 ssize_t bytes; 925 int error; 926 927 td = curthread; 928 file = td->td_fpop; 929 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 930 return (error); 931 filp->f_flags = file->f_flag; 932 /* XXX no support for I/O vectors currently */ 933 if (uio->uio_iovcnt != 1) 934 return (EOPNOTSUPP); 935 linux_set_current(td); 936 if (filp->f_op->read) { 937 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 938 uio->uio_iov->iov_len, &uio->uio_offset); 939 if (bytes >= 0) { 940 uio->uio_iov->iov_base = 941 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 942 uio->uio_iov->iov_len -= bytes; 943 uio->uio_resid -= bytes; 944 } else { 945 error = -bytes; 946 if (error == ERESTARTSYS) 947 error = ERESTART; 948 } 949 } else 950 error = ENXIO; 951 952 /* update kqfilter status, if any */ 953 linux_dev_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 954 955 return (error); 956 } 957 958 static int 959 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag) 960 { 961 struct linux_file *filp; 962 struct thread *td; 963 struct file *file; 964 ssize_t bytes; 965 int error; 966 967 td = curthread; 968 file = td->td_fpop; 969 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 970 return (error); 971 filp->f_flags = file->f_flag; 972 /* XXX no support for I/O vectors currently */ 973 if (uio->uio_iovcnt != 1) 974 return (EOPNOTSUPP); 975 linux_set_current(td); 976 if (filp->f_op->write) { 977 bytes = filp->f_op->write(filp, uio->uio_iov->iov_base, 978 uio->uio_iov->iov_len, &uio->uio_offset); 979 if (bytes >= 0) { 980 uio->uio_iov->iov_base = 981 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 982 uio->uio_iov->iov_len -= bytes; 983 uio->uio_resid -= bytes; 984 } else { 985 error = -bytes; 986 if (error == ERESTARTSYS) 987 error = ERESTART; 988 } 989 } else 990 error = ENXIO; 991 992 /* update kqfilter status, if any */ 993 linux_dev_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 994 995 return (error); 996 } 997 998 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 999 1000 static int 1001 linux_dev_poll(struct cdev *dev, int events, struct thread *td) 1002 { 1003 struct linux_file *filp; 1004 struct file *file; 1005 int revents; 1006 1007 if (devfs_get_cdevpriv((void **)&filp) != 0) 1008 goto error; 1009 1010 file = td->td_fpop; 1011 filp->f_flags = file->f_flag; 1012 linux_set_current(td); 1013 if (filp->f_op->poll != NULL) 1014 revents = filp->f_op->poll(filp, LINUX_POLL_TABLE_NORMAL) & events; 1015 else 1016 revents = 0; 1017 1018 return (revents); 1019 error: 1020 return (events & (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1021 } 1022 1023 /* 1024 * This function atomically updates the poll wakeup state and returns 1025 * the previous state at the time of update. 1026 */ 1027 static uint8_t 1028 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1029 { 1030 int c, old; 1031 1032 c = v->counter; 1033 1034 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1035 c = old; 1036 1037 return (c); 1038 } 1039 1040 1041 static int 1042 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1043 { 1044 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1045 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1046 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1047 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1048 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1049 }; 1050 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1051 1052 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1053 case LINUX_FWQ_STATE_QUEUED: 1054 linux_poll_wakeup(filp); 1055 return (1); 1056 default: 1057 return (0); 1058 } 1059 } 1060 1061 void 1062 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1063 { 1064 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1065 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1066 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1067 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1068 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1069 }; 1070 1071 /* check if we are called inside the select system call */ 1072 if (p == LINUX_POLL_TABLE_NORMAL) 1073 selrecord(curthread, &filp->f_selinfo); 1074 1075 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1076 case LINUX_FWQ_STATE_INIT: 1077 /* NOTE: file handles can only belong to one wait-queue */ 1078 filp->f_wait_queue.wqh = wqh; 1079 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1080 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1081 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1082 break; 1083 default: 1084 break; 1085 } 1086 } 1087 1088 static void 1089 linux_poll_wait_dequeue(struct linux_file *filp) 1090 { 1091 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1092 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1093 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1094 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1095 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1096 }; 1097 1098 seldrain(&filp->f_selinfo); 1099 1100 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1101 case LINUX_FWQ_STATE_NOT_READY: 1102 case LINUX_FWQ_STATE_QUEUED: 1103 case LINUX_FWQ_STATE_READY: 1104 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1105 break; 1106 default: 1107 break; 1108 } 1109 } 1110 1111 void 1112 linux_poll_wakeup(struct linux_file *filp) 1113 { 1114 /* this function should be NULL-safe */ 1115 if (filp == NULL) 1116 return; 1117 1118 selwakeup(&filp->f_selinfo); 1119 1120 spin_lock(&filp->f_kqlock); 1121 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1122 LINUX_KQ_FLAG_NEED_WRITE; 1123 1124 /* make sure the "knote" gets woken up */ 1125 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1126 spin_unlock(&filp->f_kqlock); 1127 } 1128 1129 static void 1130 linux_dev_kqfilter_detach(struct knote *kn) 1131 { 1132 struct linux_file *filp = kn->kn_hook; 1133 1134 spin_lock(&filp->f_kqlock); 1135 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1136 spin_unlock(&filp->f_kqlock); 1137 } 1138 1139 static int 1140 linux_dev_kqfilter_read_event(struct knote *kn, long hint) 1141 { 1142 struct linux_file *filp = kn->kn_hook; 1143 1144 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1145 1146 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1147 } 1148 1149 static int 1150 linux_dev_kqfilter_write_event(struct knote *kn, long hint) 1151 { 1152 struct linux_file *filp = kn->kn_hook; 1153 1154 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1155 1156 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1157 } 1158 1159 static struct filterops linux_dev_kqfiltops_read = { 1160 .f_isfd = 1, 1161 .f_detach = linux_dev_kqfilter_detach, 1162 .f_event = linux_dev_kqfilter_read_event, 1163 }; 1164 1165 static struct filterops linux_dev_kqfiltops_write = { 1166 .f_isfd = 1, 1167 .f_detach = linux_dev_kqfilter_detach, 1168 .f_event = linux_dev_kqfilter_write_event, 1169 }; 1170 1171 static void 1172 linux_dev_kqfilter_poll(struct linux_file *filp, int kqflags) 1173 { 1174 int temp; 1175 1176 if (filp->f_kqflags & kqflags) { 1177 /* get the latest polling state */ 1178 temp = filp->f_op->poll(filp, NULL); 1179 1180 spin_lock(&filp->f_kqlock); 1181 /* clear kqflags */ 1182 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1183 LINUX_KQ_FLAG_NEED_WRITE); 1184 /* update kqflags */ 1185 if (temp & (POLLIN | POLLOUT)) { 1186 if (temp & POLLIN) 1187 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1188 if (temp & POLLOUT) 1189 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1190 1191 /* make sure the "knote" gets woken up */ 1192 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1193 } 1194 spin_unlock(&filp->f_kqlock); 1195 } 1196 } 1197 1198 static int 1199 linux_dev_kqfilter(struct cdev *dev, struct knote *kn) 1200 { 1201 struct linux_file *filp; 1202 struct file *file; 1203 struct thread *td; 1204 int error; 1205 1206 td = curthread; 1207 file = td->td_fpop; 1208 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 1209 return (error); 1210 filp->f_flags = file->f_flag; 1211 if (filp->f_op->poll == NULL) 1212 return (EINVAL); 1213 1214 spin_lock(&filp->f_kqlock); 1215 switch (kn->kn_filter) { 1216 case EVFILT_READ: 1217 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1218 kn->kn_fop = &linux_dev_kqfiltops_read; 1219 kn->kn_hook = filp; 1220 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1221 break; 1222 case EVFILT_WRITE: 1223 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1224 kn->kn_fop = &linux_dev_kqfiltops_write; 1225 kn->kn_hook = filp; 1226 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1227 break; 1228 default: 1229 error = EINVAL; 1230 break; 1231 } 1232 spin_unlock(&filp->f_kqlock); 1233 1234 if (error == 0) { 1235 linux_set_current(td); 1236 1237 /* update kqfilter status, if any */ 1238 linux_dev_kqfilter_poll(filp, 1239 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1240 } 1241 return (error); 1242 } 1243 1244 static int 1245 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset, 1246 vm_size_t size, struct vm_object **object, int nprot) 1247 { 1248 struct vm_area_struct *vmap; 1249 struct mm_struct *mm; 1250 struct linux_file *filp; 1251 struct thread *td; 1252 struct file *file; 1253 vm_memattr_t attr; 1254 int error; 1255 1256 td = curthread; 1257 file = td->td_fpop; 1258 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 1259 return (error); 1260 filp->f_flags = file->f_flag; 1261 1262 if (filp->f_op->mmap == NULL) 1263 return (ENODEV); 1264 1265 linux_set_current(td); 1266 1267 /* 1268 * The same VM object might be shared by multiple processes 1269 * and the mm_struct is usually freed when a process exits. 1270 * 1271 * The atomic reference below makes sure the mm_struct is 1272 * available as long as the vmap is in the linux_vma_head. 1273 */ 1274 mm = current->mm; 1275 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1276 return (EINVAL); 1277 1278 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1279 vmap->vm_start = 0; 1280 vmap->vm_end = size; 1281 vmap->vm_pgoff = *offset / PAGE_SIZE; 1282 vmap->vm_pfn = 0; 1283 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1284 vmap->vm_ops = NULL; 1285 vmap->vm_file = get_file(filp); 1286 vmap->vm_mm = mm; 1287 1288 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1289 error = EINTR; 1290 } else { 1291 error = -filp->f_op->mmap(filp, vmap); 1292 up_write(&vmap->vm_mm->mmap_sem); 1293 } 1294 1295 if (error != 0) { 1296 linux_cdev_handle_free(vmap); 1297 return (error); 1298 } 1299 1300 attr = pgprot2cachemode(vmap->vm_page_prot); 1301 1302 if (vmap->vm_ops != NULL) { 1303 struct vm_area_struct *ptr; 1304 void *vm_private_data; 1305 bool vm_no_fault; 1306 1307 if (vmap->vm_ops->open == NULL || 1308 vmap->vm_ops->close == NULL || 1309 vmap->vm_private_data == NULL) { 1310 /* free allocated VM area struct */ 1311 linux_cdev_handle_free(vmap); 1312 return (EINVAL); 1313 } 1314 1315 vm_private_data = vmap->vm_private_data; 1316 1317 rw_wlock(&linux_vma_lock); 1318 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1319 if (ptr->vm_private_data == vm_private_data) 1320 break; 1321 } 1322 /* check if there is an existing VM area struct */ 1323 if (ptr != NULL) { 1324 /* check if the VM area structure is invalid */ 1325 if (ptr->vm_ops == NULL || 1326 ptr->vm_ops->open == NULL || 1327 ptr->vm_ops->close == NULL) { 1328 error = ESTALE; 1329 vm_no_fault = 1; 1330 } else { 1331 error = EEXIST; 1332 vm_no_fault = (ptr->vm_ops->fault == NULL); 1333 } 1334 } else { 1335 /* insert VM area structure into list */ 1336 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1337 error = 0; 1338 vm_no_fault = (vmap->vm_ops->fault == NULL); 1339 } 1340 rw_wunlock(&linux_vma_lock); 1341 1342 if (error != 0) { 1343 /* free allocated VM area struct */ 1344 linux_cdev_handle_free(vmap); 1345 /* check for stale VM area struct */ 1346 if (error != EEXIST) 1347 return (error); 1348 } 1349 1350 /* check if there is no fault handler */ 1351 if (vm_no_fault) { 1352 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1353 &linux_cdev_pager_ops[1], size, nprot, *offset, 1354 curthread->td_ucred); 1355 } else { 1356 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1357 &linux_cdev_pager_ops[0], size, nprot, *offset, 1358 curthread->td_ucred); 1359 } 1360 1361 /* check if allocating the VM object failed */ 1362 if (*object == NULL) { 1363 if (error == 0) { 1364 /* remove VM area struct from list */ 1365 linux_cdev_handle_remove(vmap); 1366 /* free allocated VM area struct */ 1367 linux_cdev_handle_free(vmap); 1368 } 1369 return (EINVAL); 1370 } 1371 } else { 1372 struct sglist *sg; 1373 1374 sg = sglist_alloc(1, M_WAITOK); 1375 sglist_append_phys(sg, 1376 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1377 1378 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1379 nprot, 0, curthread->td_ucred); 1380 1381 linux_cdev_handle_free(vmap); 1382 1383 if (*object == NULL) { 1384 sglist_free(sg); 1385 return (EINVAL); 1386 } 1387 } 1388 1389 if (attr != VM_MEMATTR_DEFAULT) { 1390 VM_OBJECT_WLOCK(*object); 1391 vm_object_set_memattr(*object, attr); 1392 VM_OBJECT_WUNLOCK(*object); 1393 } 1394 *offset = 0; 1395 return (0); 1396 } 1397 1398 struct cdevsw linuxcdevsw = { 1399 .d_version = D_VERSION, 1400 .d_flags = D_TRACKCLOSE, 1401 .d_open = linux_dev_open, 1402 .d_close = linux_dev_close, 1403 .d_read = linux_dev_read, 1404 .d_write = linux_dev_write, 1405 .d_ioctl = linux_dev_ioctl, 1406 .d_mmap_single = linux_dev_mmap_single, 1407 .d_poll = linux_dev_poll, 1408 .d_kqfilter = linux_dev_kqfilter, 1409 .d_name = "lkpidev", 1410 }; 1411 1412 static int 1413 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1414 int flags, struct thread *td) 1415 { 1416 struct linux_file *filp; 1417 ssize_t bytes; 1418 int error; 1419 1420 error = 0; 1421 filp = (struct linux_file *)file->f_data; 1422 filp->f_flags = file->f_flag; 1423 /* XXX no support for I/O vectors currently */ 1424 if (uio->uio_iovcnt != 1) 1425 return (EOPNOTSUPP); 1426 linux_set_current(td); 1427 if (filp->f_op->read) { 1428 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 1429 uio->uio_iov->iov_len, &uio->uio_offset); 1430 if (bytes >= 0) { 1431 uio->uio_iov->iov_base = 1432 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1433 uio->uio_iov->iov_len -= bytes; 1434 uio->uio_resid -= bytes; 1435 } else 1436 error = -bytes; 1437 } else 1438 error = ENXIO; 1439 1440 return (error); 1441 } 1442 1443 static int 1444 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1445 struct thread *td) 1446 { 1447 struct linux_file *filp; 1448 int revents; 1449 1450 filp = (struct linux_file *)file->f_data; 1451 filp->f_flags = file->f_flag; 1452 linux_set_current(td); 1453 if (filp->f_op->poll != NULL) 1454 revents = filp->f_op->poll(filp, LINUX_POLL_TABLE_NORMAL) & events; 1455 else 1456 revents = 0; 1457 1458 return (revents); 1459 } 1460 1461 static int 1462 linux_file_close(struct file *file, struct thread *td) 1463 { 1464 struct linux_file *filp; 1465 int error; 1466 1467 filp = (struct linux_file *)file->f_data; 1468 filp->f_flags = file->f_flag; 1469 linux_set_current(td); 1470 linux_poll_wait_dequeue(filp); 1471 error = -filp->f_op->release(NULL, filp); 1472 funsetown(&filp->f_sigio); 1473 kfree(filp); 1474 1475 return (error); 1476 } 1477 1478 static int 1479 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1480 struct thread *td) 1481 { 1482 struct linux_file *filp; 1483 int error; 1484 1485 filp = (struct linux_file *)fp->f_data; 1486 filp->f_flags = fp->f_flag; 1487 error = 0; 1488 1489 linux_set_current(td); 1490 switch (cmd) { 1491 case FIONBIO: 1492 break; 1493 case FIOASYNC: 1494 if (filp->f_op->fasync == NULL) 1495 break; 1496 error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); 1497 break; 1498 case FIOSETOWN: 1499 error = fsetown(*(int *)data, &filp->f_sigio); 1500 if (error == 0) 1501 error = filp->f_op->fasync(0, filp, 1502 fp->f_flag & FASYNC); 1503 break; 1504 case FIOGETOWN: 1505 *(int *)data = fgetown(&filp->f_sigio); 1506 break; 1507 default: 1508 error = ENOTTY; 1509 break; 1510 } 1511 return (error); 1512 } 1513 1514 static int 1515 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1516 struct thread *td) 1517 { 1518 1519 return (EOPNOTSUPP); 1520 } 1521 1522 static int 1523 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1524 struct filedesc *fdp) 1525 { 1526 1527 return (0); 1528 } 1529 1530 unsigned int 1531 linux_iminor(struct inode *inode) 1532 { 1533 struct linux_cdev *ldev; 1534 1535 if (inode == NULL || inode->v_rdev == NULL || 1536 inode->v_rdev->si_devsw != &linuxcdevsw) 1537 return (-1U); 1538 ldev = inode->v_rdev->si_drv1; 1539 if (ldev == NULL) 1540 return (-1U); 1541 1542 return (minor(ldev->dev)); 1543 } 1544 1545 struct fileops linuxfileops = { 1546 .fo_read = linux_file_read, 1547 .fo_write = invfo_rdwr, 1548 .fo_truncate = invfo_truncate, 1549 .fo_kqfilter = invfo_kqfilter, 1550 .fo_stat = linux_file_stat, 1551 .fo_fill_kinfo = linux_file_fill_kinfo, 1552 .fo_poll = linux_file_poll, 1553 .fo_close = linux_file_close, 1554 .fo_ioctl = linux_file_ioctl, 1555 .fo_chmod = invfo_chmod, 1556 .fo_chown = invfo_chown, 1557 .fo_sendfile = invfo_sendfile, 1558 }; 1559 1560 /* 1561 * Hash of vmmap addresses. This is infrequently accessed and does not 1562 * need to be particularly large. This is done because we must store the 1563 * caller's idea of the map size to properly unmap. 1564 */ 1565 struct vmmap { 1566 LIST_ENTRY(vmmap) vm_next; 1567 void *vm_addr; 1568 unsigned long vm_size; 1569 }; 1570 1571 struct vmmaphd { 1572 struct vmmap *lh_first; 1573 }; 1574 #define VMMAP_HASH_SIZE 64 1575 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1576 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1577 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1578 static struct mtx vmmaplock; 1579 1580 static void 1581 vmmap_add(void *addr, unsigned long size) 1582 { 1583 struct vmmap *vmmap; 1584 1585 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1586 mtx_lock(&vmmaplock); 1587 vmmap->vm_size = size; 1588 vmmap->vm_addr = addr; 1589 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1590 mtx_unlock(&vmmaplock); 1591 } 1592 1593 static struct vmmap * 1594 vmmap_remove(void *addr) 1595 { 1596 struct vmmap *vmmap; 1597 1598 mtx_lock(&vmmaplock); 1599 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1600 if (vmmap->vm_addr == addr) 1601 break; 1602 if (vmmap) 1603 LIST_REMOVE(vmmap, vm_next); 1604 mtx_unlock(&vmmaplock); 1605 1606 return (vmmap); 1607 } 1608 1609 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1610 void * 1611 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1612 { 1613 void *addr; 1614 1615 addr = pmap_mapdev_attr(phys_addr, size, attr); 1616 if (addr == NULL) 1617 return (NULL); 1618 vmmap_add(addr, size); 1619 1620 return (addr); 1621 } 1622 #endif 1623 1624 void 1625 iounmap(void *addr) 1626 { 1627 struct vmmap *vmmap; 1628 1629 vmmap = vmmap_remove(addr); 1630 if (vmmap == NULL) 1631 return; 1632 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1633 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1634 #endif 1635 kfree(vmmap); 1636 } 1637 1638 1639 void * 1640 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1641 { 1642 vm_offset_t off; 1643 size_t size; 1644 1645 size = count * PAGE_SIZE; 1646 off = kva_alloc(size); 1647 if (off == 0) 1648 return (NULL); 1649 vmmap_add((void *)off, size); 1650 pmap_qenter(off, pages, count); 1651 1652 return ((void *)off); 1653 } 1654 1655 void 1656 vunmap(void *addr) 1657 { 1658 struct vmmap *vmmap; 1659 1660 vmmap = vmmap_remove(addr); 1661 if (vmmap == NULL) 1662 return; 1663 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1664 kva_free((vm_offset_t)addr, vmmap->vm_size); 1665 kfree(vmmap); 1666 } 1667 1668 char * 1669 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1670 { 1671 unsigned int len; 1672 char *p; 1673 va_list aq; 1674 1675 va_copy(aq, ap); 1676 len = vsnprintf(NULL, 0, fmt, aq); 1677 va_end(aq); 1678 1679 p = kmalloc(len + 1, gfp); 1680 if (p != NULL) 1681 vsnprintf(p, len + 1, fmt, ap); 1682 1683 return (p); 1684 } 1685 1686 char * 1687 kasprintf(gfp_t gfp, const char *fmt, ...) 1688 { 1689 va_list ap; 1690 char *p; 1691 1692 va_start(ap, fmt); 1693 p = kvasprintf(gfp, fmt, ap); 1694 va_end(ap); 1695 1696 return (p); 1697 } 1698 1699 static void 1700 linux_timer_callback_wrapper(void *context) 1701 { 1702 struct timer_list *timer; 1703 1704 linux_set_current(curthread); 1705 1706 timer = context; 1707 timer->function(timer->data); 1708 } 1709 1710 void 1711 mod_timer(struct timer_list *timer, int expires) 1712 { 1713 1714 timer->expires = expires; 1715 callout_reset(&timer->timer_callout, 1716 linux_timer_jiffies_until(expires), 1717 &linux_timer_callback_wrapper, timer); 1718 } 1719 1720 void 1721 add_timer(struct timer_list *timer) 1722 { 1723 1724 callout_reset(&timer->timer_callout, 1725 linux_timer_jiffies_until(timer->expires), 1726 &linux_timer_callback_wrapper, timer); 1727 } 1728 1729 void 1730 add_timer_on(struct timer_list *timer, int cpu) 1731 { 1732 1733 callout_reset_on(&timer->timer_callout, 1734 linux_timer_jiffies_until(timer->expires), 1735 &linux_timer_callback_wrapper, timer, cpu); 1736 } 1737 1738 static void 1739 linux_timer_init(void *arg) 1740 { 1741 1742 /* 1743 * Compute an internal HZ value which can divide 2**32 to 1744 * avoid timer rounding problems when the tick value wraps 1745 * around 2**32: 1746 */ 1747 linux_timer_hz_mask = 1; 1748 while (linux_timer_hz_mask < (unsigned long)hz) 1749 linux_timer_hz_mask *= 2; 1750 linux_timer_hz_mask--; 1751 } 1752 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1753 1754 void 1755 linux_complete_common(struct completion *c, int all) 1756 { 1757 int wakeup_swapper; 1758 1759 sleepq_lock(c); 1760 c->done++; 1761 if (all) 1762 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1763 else 1764 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1765 sleepq_release(c); 1766 if (wakeup_swapper) 1767 kick_proc0(); 1768 } 1769 1770 /* 1771 * Indefinite wait for done != 0 with or without signals. 1772 */ 1773 int 1774 linux_wait_for_common(struct completion *c, int flags) 1775 { 1776 int error; 1777 1778 if (SCHEDULER_STOPPED()) 1779 return (0); 1780 1781 DROP_GIANT(); 1782 1783 if (flags != 0) 1784 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1785 else 1786 flags = SLEEPQ_SLEEP; 1787 error = 0; 1788 for (;;) { 1789 sleepq_lock(c); 1790 if (c->done) 1791 break; 1792 sleepq_add(c, NULL, "completion", flags, 0); 1793 if (flags & SLEEPQ_INTERRUPTIBLE) { 1794 if (sleepq_wait_sig(c, 0) != 0) { 1795 error = -ERESTARTSYS; 1796 goto intr; 1797 } 1798 } else 1799 sleepq_wait(c, 0); 1800 } 1801 c->done--; 1802 sleepq_release(c); 1803 1804 intr: 1805 PICKUP_GIANT(); 1806 1807 return (error); 1808 } 1809 1810 /* 1811 * Time limited wait for done != 0 with or without signals. 1812 */ 1813 int 1814 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 1815 { 1816 int end = jiffies + timeout; 1817 int error; 1818 int ret; 1819 1820 if (SCHEDULER_STOPPED()) 1821 return (0); 1822 1823 DROP_GIANT(); 1824 1825 if (flags != 0) 1826 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1827 else 1828 flags = SLEEPQ_SLEEP; 1829 1830 error = 0; 1831 ret = 0; 1832 for (;;) { 1833 sleepq_lock(c); 1834 if (c->done) 1835 break; 1836 sleepq_add(c, NULL, "completion", flags, 0); 1837 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 1838 if (flags & SLEEPQ_INTERRUPTIBLE) 1839 ret = sleepq_timedwait_sig(c, 0); 1840 else 1841 ret = sleepq_timedwait(c, 0); 1842 if (ret != 0) { 1843 /* check for timeout or signal */ 1844 if (ret == EWOULDBLOCK) 1845 error = 0; 1846 else 1847 error = -ERESTARTSYS; 1848 goto intr; 1849 } 1850 } 1851 c->done--; 1852 sleepq_release(c); 1853 1854 intr: 1855 PICKUP_GIANT(); 1856 1857 /* return how many jiffies are left */ 1858 return (ret != 0 ? error : linux_timer_jiffies_until(end)); 1859 } 1860 1861 int 1862 linux_try_wait_for_completion(struct completion *c) 1863 { 1864 int isdone; 1865 1866 isdone = 1; 1867 sleepq_lock(c); 1868 if (c->done) 1869 c->done--; 1870 else 1871 isdone = 0; 1872 sleepq_release(c); 1873 return (isdone); 1874 } 1875 1876 int 1877 linux_completion_done(struct completion *c) 1878 { 1879 int isdone; 1880 1881 isdone = 1; 1882 sleepq_lock(c); 1883 if (c->done == 0) 1884 isdone = 0; 1885 sleepq_release(c); 1886 return (isdone); 1887 } 1888 1889 static void 1890 linux_cdev_release(struct kobject *kobj) 1891 { 1892 struct linux_cdev *cdev; 1893 struct kobject *parent; 1894 1895 cdev = container_of(kobj, struct linux_cdev, kobj); 1896 parent = kobj->parent; 1897 if (cdev->cdev) 1898 destroy_dev(cdev->cdev); 1899 kfree(cdev); 1900 kobject_put(parent); 1901 } 1902 1903 static void 1904 linux_cdev_static_release(struct kobject *kobj) 1905 { 1906 struct linux_cdev *cdev; 1907 struct kobject *parent; 1908 1909 cdev = container_of(kobj, struct linux_cdev, kobj); 1910 parent = kobj->parent; 1911 if (cdev->cdev) 1912 destroy_dev(cdev->cdev); 1913 kobject_put(parent); 1914 } 1915 1916 const struct kobj_type linux_cdev_ktype = { 1917 .release = linux_cdev_release, 1918 }; 1919 1920 const struct kobj_type linux_cdev_static_ktype = { 1921 .release = linux_cdev_static_release, 1922 }; 1923 1924 static void 1925 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 1926 { 1927 struct notifier_block *nb; 1928 1929 nb = arg; 1930 if (linkstate == LINK_STATE_UP) 1931 nb->notifier_call(nb, NETDEV_UP, ifp); 1932 else 1933 nb->notifier_call(nb, NETDEV_DOWN, ifp); 1934 } 1935 1936 static void 1937 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 1938 { 1939 struct notifier_block *nb; 1940 1941 nb = arg; 1942 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 1943 } 1944 1945 static void 1946 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 1947 { 1948 struct notifier_block *nb; 1949 1950 nb = arg; 1951 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 1952 } 1953 1954 static void 1955 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 1956 { 1957 struct notifier_block *nb; 1958 1959 nb = arg; 1960 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 1961 } 1962 1963 static void 1964 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 1965 { 1966 struct notifier_block *nb; 1967 1968 nb = arg; 1969 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 1970 } 1971 1972 int 1973 register_netdevice_notifier(struct notifier_block *nb) 1974 { 1975 1976 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 1977 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 1978 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 1979 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 1980 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 1981 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 1982 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 1983 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 1984 1985 return (0); 1986 } 1987 1988 int 1989 register_inetaddr_notifier(struct notifier_block *nb) 1990 { 1991 1992 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 1993 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 1994 return (0); 1995 } 1996 1997 int 1998 unregister_netdevice_notifier(struct notifier_block *nb) 1999 { 2000 2001 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2002 nb->tags[NETDEV_UP]); 2003 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2004 nb->tags[NETDEV_REGISTER]); 2005 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2006 nb->tags[NETDEV_UNREGISTER]); 2007 EVENTHANDLER_DEREGISTER(iflladdr_event, 2008 nb->tags[NETDEV_CHANGEADDR]); 2009 2010 return (0); 2011 } 2012 2013 int 2014 unregister_inetaddr_notifier(struct notifier_block *nb) 2015 { 2016 2017 EVENTHANDLER_DEREGISTER(ifaddr_event, 2018 nb->tags[NETDEV_CHANGEIFADDR]); 2019 2020 return (0); 2021 } 2022 2023 struct list_sort_thunk { 2024 int (*cmp)(void *, struct list_head *, struct list_head *); 2025 void *priv; 2026 }; 2027 2028 static inline int 2029 linux_le_cmp(void *priv, const void *d1, const void *d2) 2030 { 2031 struct list_head *le1, *le2; 2032 struct list_sort_thunk *thunk; 2033 2034 thunk = priv; 2035 le1 = *(__DECONST(struct list_head **, d1)); 2036 le2 = *(__DECONST(struct list_head **, d2)); 2037 return ((thunk->cmp)(thunk->priv, le1, le2)); 2038 } 2039 2040 void 2041 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2042 struct list_head *a, struct list_head *b)) 2043 { 2044 struct list_sort_thunk thunk; 2045 struct list_head **ar, *le; 2046 size_t count, i; 2047 2048 count = 0; 2049 list_for_each(le, head) 2050 count++; 2051 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2052 i = 0; 2053 list_for_each(le, head) 2054 ar[i++] = le; 2055 thunk.cmp = cmp; 2056 thunk.priv = priv; 2057 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2058 INIT_LIST_HEAD(head); 2059 for (i = 0; i < count; i++) 2060 list_add_tail(ar[i], head); 2061 free(ar, M_KMALLOC); 2062 } 2063 2064 void 2065 linux_irq_handler(void *ent) 2066 { 2067 struct irq_ent *irqe; 2068 2069 linux_set_current(curthread); 2070 2071 irqe = ent; 2072 irqe->handler(irqe->irq, irqe->arg); 2073 } 2074 2075 #if defined(__i386__) || defined(__amd64__) 2076 int 2077 linux_wbinvd_on_all_cpus(void) 2078 { 2079 2080 pmap_invalidate_cache(); 2081 return (0); 2082 } 2083 #endif 2084 2085 int 2086 linux_on_each_cpu(void callback(void *), void *data) 2087 { 2088 2089 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2090 smp_no_rendezvous_barrier, data); 2091 return (0); 2092 } 2093 2094 int 2095 linux_in_atomic(void) 2096 { 2097 2098 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2099 } 2100 2101 struct linux_cdev * 2102 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2103 { 2104 dev_t dev = MKDEV(major, minor); 2105 struct cdev *cdev; 2106 2107 dev_lock(); 2108 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2109 struct linux_cdev *ldev = cdev->si_drv1; 2110 if (ldev->dev == dev && 2111 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2112 break; 2113 } 2114 } 2115 dev_unlock(); 2116 2117 return (cdev != NULL ? cdev->si_drv1 : NULL); 2118 } 2119 2120 int 2121 __register_chrdev(unsigned int major, unsigned int baseminor, 2122 unsigned int count, const char *name, 2123 const struct file_operations *fops) 2124 { 2125 struct linux_cdev *cdev; 2126 int ret = 0; 2127 int i; 2128 2129 for (i = baseminor; i < baseminor + count; i++) { 2130 cdev = cdev_alloc(); 2131 cdev_init(cdev, fops); 2132 kobject_set_name(&cdev->kobj, name); 2133 2134 ret = cdev_add(cdev, makedev(major, i), 1); 2135 if (ret != 0) 2136 break; 2137 } 2138 return (ret); 2139 } 2140 2141 int 2142 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2143 unsigned int count, const char *name, 2144 const struct file_operations *fops, uid_t uid, 2145 gid_t gid, int mode) 2146 { 2147 struct linux_cdev *cdev; 2148 int ret = 0; 2149 int i; 2150 2151 for (i = baseminor; i < baseminor + count; i++) { 2152 cdev = cdev_alloc(); 2153 cdev_init(cdev, fops); 2154 kobject_set_name(&cdev->kobj, name); 2155 2156 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2157 if (ret != 0) 2158 break; 2159 } 2160 return (ret); 2161 } 2162 2163 void 2164 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2165 unsigned int count, const char *name) 2166 { 2167 struct linux_cdev *cdevp; 2168 int i; 2169 2170 for (i = baseminor; i < baseminor + count; i++) { 2171 cdevp = linux_find_cdev(name, major, i); 2172 if (cdevp != NULL) 2173 cdev_del(cdevp); 2174 } 2175 } 2176 2177 #if defined(__i386__) || defined(__amd64__) 2178 bool linux_cpu_has_clflush; 2179 #endif 2180 2181 static void 2182 linux_compat_init(void *arg) 2183 { 2184 struct sysctl_oid *rootoid; 2185 int i; 2186 2187 #if defined(__i386__) || defined(__amd64__) 2188 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2189 #endif 2190 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2191 2192 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2193 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2194 kobject_init(&linux_class_root, &linux_class_ktype); 2195 kobject_set_name(&linux_class_root, "class"); 2196 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2197 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2198 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2199 kobject_set_name(&linux_root_device.kobj, "device"); 2200 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2201 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 2202 "device"); 2203 linux_root_device.bsddev = root_bus; 2204 linux_class_misc.name = "misc"; 2205 class_register(&linux_class_misc); 2206 INIT_LIST_HEAD(&pci_drivers); 2207 INIT_LIST_HEAD(&pci_devices); 2208 spin_lock_init(&pci_lock); 2209 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2210 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2211 LIST_INIT(&vmmaphead[i]); 2212 } 2213 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2214 2215 static void 2216 linux_compat_uninit(void *arg) 2217 { 2218 linux_kobject_kfree_name(&linux_class_root); 2219 linux_kobject_kfree_name(&linux_root_device.kobj); 2220 linux_kobject_kfree_name(&linux_class_misc.kobj); 2221 2222 mtx_destroy(&vmmaplock); 2223 spin_lock_destroy(&pci_lock); 2224 rw_destroy(&linux_vma_lock); 2225 } 2226 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2227 2228 /* 2229 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2230 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2231 * used. Assert these types have the same size, else some parts of the 2232 * LinuxKPI may not work like expected: 2233 */ 2234 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2235