1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/proc.h> 39 #include <sys/sglist.h> 40 #include <sys/sleepqueue.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/bus.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/filio.h> 47 #include <sys/rwlock.h> 48 #include <sys/mman.h> 49 50 #include <vm/vm.h> 51 #include <vm/pmap.h> 52 #include <vm/vm_object.h> 53 #include <vm/vm_page.h> 54 #include <vm/vm_pager.h> 55 56 #include <machine/stdarg.h> 57 58 #if defined(__i386__) || defined(__amd64__) 59 #include <machine/md_var.h> 60 #endif 61 62 #include <linux/kobject.h> 63 #include <linux/device.h> 64 #include <linux/slab.h> 65 #include <linux/module.h> 66 #include <linux/moduleparam.h> 67 #include <linux/cdev.h> 68 #include <linux/file.h> 69 #include <linux/sysfs.h> 70 #include <linux/mm.h> 71 #include <linux/io.h> 72 #include <linux/vmalloc.h> 73 #include <linux/netdevice.h> 74 #include <linux/timer.h> 75 #include <linux/interrupt.h> 76 #include <linux/uaccess.h> 77 #include <linux/list.h> 78 #include <linux/kthread.h> 79 #include <linux/kernel.h> 80 #include <linux/compat.h> 81 #include <linux/poll.h> 82 #include <linux/smp.h> 83 84 #if defined(__i386__) || defined(__amd64__) 85 #include <asm/smp.h> 86 #endif 87 88 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 89 90 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 91 92 #include <linux/rbtree.h> 93 /* Undo Linux compat changes. */ 94 #undef RB_ROOT 95 #undef file 96 #undef cdev 97 #define RB_ROOT(head) (head)->rbh_root 98 99 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 100 101 struct kobject linux_class_root; 102 struct device linux_root_device; 103 struct class linux_class_misc; 104 struct list_head pci_drivers; 105 struct list_head pci_devices; 106 spinlock_t pci_lock; 107 108 unsigned long linux_timer_hz_mask; 109 110 int 111 panic_cmp(struct rb_node *one, struct rb_node *two) 112 { 113 panic("no cmp"); 114 } 115 116 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 117 118 int 119 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 120 { 121 va_list tmp_va; 122 int len; 123 char *old; 124 char *name; 125 char dummy; 126 127 old = kobj->name; 128 129 if (old && fmt == NULL) 130 return (0); 131 132 /* compute length of string */ 133 va_copy(tmp_va, args); 134 len = vsnprintf(&dummy, 0, fmt, tmp_va); 135 va_end(tmp_va); 136 137 /* account for zero termination */ 138 len++; 139 140 /* check for error */ 141 if (len < 1) 142 return (-EINVAL); 143 144 /* allocate memory for string */ 145 name = kzalloc(len, GFP_KERNEL); 146 if (name == NULL) 147 return (-ENOMEM); 148 vsnprintf(name, len, fmt, args); 149 kobj->name = name; 150 151 /* free old string */ 152 kfree(old); 153 154 /* filter new string */ 155 for (; *name != '\0'; name++) 156 if (*name == '/') 157 *name = '!'; 158 return (0); 159 } 160 161 int 162 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 163 { 164 va_list args; 165 int error; 166 167 va_start(args, fmt); 168 error = kobject_set_name_vargs(kobj, fmt, args); 169 va_end(args); 170 171 return (error); 172 } 173 174 static int 175 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 176 { 177 const struct kobj_type *t; 178 int error; 179 180 kobj->parent = parent; 181 error = sysfs_create_dir(kobj); 182 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 183 struct attribute **attr; 184 t = kobj->ktype; 185 186 for (attr = t->default_attrs; *attr != NULL; attr++) { 187 error = sysfs_create_file(kobj, *attr); 188 if (error) 189 break; 190 } 191 if (error) 192 sysfs_remove_dir(kobj); 193 194 } 195 return (error); 196 } 197 198 int 199 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 200 { 201 va_list args; 202 int error; 203 204 va_start(args, fmt); 205 error = kobject_set_name_vargs(kobj, fmt, args); 206 va_end(args); 207 if (error) 208 return (error); 209 210 return kobject_add_complete(kobj, parent); 211 } 212 213 void 214 linux_kobject_release(struct kref *kref) 215 { 216 struct kobject *kobj; 217 char *name; 218 219 kobj = container_of(kref, struct kobject, kref); 220 sysfs_remove_dir(kobj); 221 name = kobj->name; 222 if (kobj->ktype && kobj->ktype->release) 223 kobj->ktype->release(kobj); 224 kfree(name); 225 } 226 227 static void 228 linux_kobject_kfree(struct kobject *kobj) 229 { 230 kfree(kobj); 231 } 232 233 static void 234 linux_kobject_kfree_name(struct kobject *kobj) 235 { 236 if (kobj) { 237 kfree(kobj->name); 238 } 239 } 240 241 const struct kobj_type linux_kfree_type = { 242 .release = linux_kobject_kfree 243 }; 244 245 static void 246 linux_device_release(struct device *dev) 247 { 248 pr_debug("linux_device_release: %s\n", dev_name(dev)); 249 kfree(dev); 250 } 251 252 static ssize_t 253 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 254 { 255 struct class_attribute *dattr; 256 ssize_t error; 257 258 dattr = container_of(attr, struct class_attribute, attr); 259 error = -EIO; 260 if (dattr->show) 261 error = dattr->show(container_of(kobj, struct class, kobj), 262 dattr, buf); 263 return (error); 264 } 265 266 static ssize_t 267 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 268 size_t count) 269 { 270 struct class_attribute *dattr; 271 ssize_t error; 272 273 dattr = container_of(attr, struct class_attribute, attr); 274 error = -EIO; 275 if (dattr->store) 276 error = dattr->store(container_of(kobj, struct class, kobj), 277 dattr, buf, count); 278 return (error); 279 } 280 281 static void 282 linux_class_release(struct kobject *kobj) 283 { 284 struct class *class; 285 286 class = container_of(kobj, struct class, kobj); 287 if (class->class_release) 288 class->class_release(class); 289 } 290 291 static const struct sysfs_ops linux_class_sysfs = { 292 .show = linux_class_show, 293 .store = linux_class_store, 294 }; 295 296 const struct kobj_type linux_class_ktype = { 297 .release = linux_class_release, 298 .sysfs_ops = &linux_class_sysfs 299 }; 300 301 static void 302 linux_dev_release(struct kobject *kobj) 303 { 304 struct device *dev; 305 306 dev = container_of(kobj, struct device, kobj); 307 /* This is the precedence defined by linux. */ 308 if (dev->release) 309 dev->release(dev); 310 else if (dev->class && dev->class->dev_release) 311 dev->class->dev_release(dev); 312 } 313 314 static ssize_t 315 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 316 { 317 struct device_attribute *dattr; 318 ssize_t error; 319 320 dattr = container_of(attr, struct device_attribute, attr); 321 error = -EIO; 322 if (dattr->show) 323 error = dattr->show(container_of(kobj, struct device, kobj), 324 dattr, buf); 325 return (error); 326 } 327 328 static ssize_t 329 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 330 size_t count) 331 { 332 struct device_attribute *dattr; 333 ssize_t error; 334 335 dattr = container_of(attr, struct device_attribute, attr); 336 error = -EIO; 337 if (dattr->store) 338 error = dattr->store(container_of(kobj, struct device, kobj), 339 dattr, buf, count); 340 return (error); 341 } 342 343 static const struct sysfs_ops linux_dev_sysfs = { 344 .show = linux_dev_show, 345 .store = linux_dev_store, 346 }; 347 348 const struct kobj_type linux_dev_ktype = { 349 .release = linux_dev_release, 350 .sysfs_ops = &linux_dev_sysfs 351 }; 352 353 struct device * 354 device_create(struct class *class, struct device *parent, dev_t devt, 355 void *drvdata, const char *fmt, ...) 356 { 357 struct device *dev; 358 va_list args; 359 360 dev = kzalloc(sizeof(*dev), M_WAITOK); 361 dev->parent = parent; 362 dev->class = class; 363 dev->devt = devt; 364 dev->driver_data = drvdata; 365 dev->release = linux_device_release; 366 va_start(args, fmt); 367 kobject_set_name_vargs(&dev->kobj, fmt, args); 368 va_end(args); 369 device_register(dev); 370 371 return (dev); 372 } 373 374 int 375 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 376 struct kobject *parent, const char *fmt, ...) 377 { 378 va_list args; 379 int error; 380 381 kobject_init(kobj, ktype); 382 kobj->ktype = ktype; 383 kobj->parent = parent; 384 kobj->name = NULL; 385 386 va_start(args, fmt); 387 error = kobject_set_name_vargs(kobj, fmt, args); 388 va_end(args); 389 if (error) 390 return (error); 391 return kobject_add_complete(kobj, parent); 392 } 393 394 static void 395 linux_kq_lock(void *arg) 396 { 397 spinlock_t *s = arg; 398 399 spin_lock(s); 400 } 401 static void 402 linux_kq_unlock(void *arg) 403 { 404 spinlock_t *s = arg; 405 406 spin_unlock(s); 407 } 408 409 static void 410 linux_kq_lock_owned(void *arg) 411 { 412 #ifdef INVARIANTS 413 spinlock_t *s = arg; 414 415 mtx_assert(&s->m, MA_OWNED); 416 #endif 417 } 418 419 static void 420 linux_kq_lock_unowned(void *arg) 421 { 422 #ifdef INVARIANTS 423 spinlock_t *s = arg; 424 425 mtx_assert(&s->m, MA_NOTOWNED); 426 #endif 427 } 428 429 static void 430 linux_file_kqfilter_poll(struct linux_file *, int); 431 432 struct linux_file * 433 linux_file_alloc(void) 434 { 435 struct linux_file *filp; 436 437 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 438 439 /* set initial refcount */ 440 filp->f_count = 1; 441 442 /* setup fields needed by kqueue support */ 443 spin_lock_init(&filp->f_kqlock); 444 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 445 linux_kq_lock, linux_kq_unlock, 446 linux_kq_lock_owned, linux_kq_lock_unowned); 447 448 return (filp); 449 } 450 451 void 452 linux_file_free(struct linux_file *filp) 453 { 454 if (filp->_file == NULL) { 455 if (filp->f_shmem != NULL) 456 vm_object_deallocate(filp->f_shmem); 457 kfree(filp); 458 } else { 459 /* 460 * The close method of the character device or file 461 * will free the linux_file structure: 462 */ 463 _fdrop(filp->_file, curthread); 464 } 465 } 466 467 static int 468 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 469 vm_page_t *mres) 470 { 471 struct vm_area_struct *vmap; 472 473 vmap = linux_cdev_handle_find(vm_obj->handle); 474 475 MPASS(vmap != NULL); 476 MPASS(vmap->vm_private_data == vm_obj->handle); 477 478 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 479 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 480 vm_page_t page; 481 482 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 483 /* 484 * If the passed in result page is a fake 485 * page, update it with the new physical 486 * address. 487 */ 488 page = *mres; 489 vm_page_updatefake(page, paddr, vm_obj->memattr); 490 } else { 491 /* 492 * Replace the passed in "mres" page with our 493 * own fake page and free up the all of the 494 * original pages. 495 */ 496 VM_OBJECT_WUNLOCK(vm_obj); 497 page = vm_page_getfake(paddr, vm_obj->memattr); 498 VM_OBJECT_WLOCK(vm_obj); 499 500 vm_page_replace_checked(page, vm_obj, 501 (*mres)->pindex, *mres); 502 503 vm_page_lock(*mres); 504 vm_page_free(*mres); 505 vm_page_unlock(*mres); 506 *mres = page; 507 } 508 page->valid = VM_PAGE_BITS_ALL; 509 return (VM_PAGER_OK); 510 } 511 return (VM_PAGER_FAIL); 512 } 513 514 static int 515 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 516 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 517 { 518 struct vm_area_struct *vmap; 519 int err; 520 521 linux_set_current(curthread); 522 523 /* get VM area structure */ 524 vmap = linux_cdev_handle_find(vm_obj->handle); 525 MPASS(vmap != NULL); 526 MPASS(vmap->vm_private_data == vm_obj->handle); 527 528 VM_OBJECT_WUNLOCK(vm_obj); 529 530 down_write(&vmap->vm_mm->mmap_sem); 531 if (unlikely(vmap->vm_ops == NULL)) { 532 err = VM_FAULT_SIGBUS; 533 } else { 534 struct vm_fault vmf; 535 536 /* fill out VM fault structure */ 537 vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); 538 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 539 vmf.pgoff = 0; 540 vmf.page = NULL; 541 vmf.vma = vmap; 542 543 vmap->vm_pfn_count = 0; 544 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 545 vmap->vm_obj = vm_obj; 546 547 err = vmap->vm_ops->fault(vmap, &vmf); 548 549 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 550 kern_yield(PRI_USER); 551 err = vmap->vm_ops->fault(vmap, &vmf); 552 } 553 } 554 555 /* translate return code */ 556 switch (err) { 557 case VM_FAULT_OOM: 558 err = VM_PAGER_AGAIN; 559 break; 560 case VM_FAULT_SIGBUS: 561 err = VM_PAGER_BAD; 562 break; 563 case VM_FAULT_NOPAGE: 564 /* 565 * By contract the fault handler will return having 566 * busied all the pages itself. If pidx is already 567 * found in the object, it will simply xbusy the first 568 * page and return with vm_pfn_count set to 1. 569 */ 570 *first = vmap->vm_pfn_first; 571 *last = *first + vmap->vm_pfn_count - 1; 572 err = VM_PAGER_OK; 573 break; 574 default: 575 err = VM_PAGER_ERROR; 576 break; 577 } 578 up_write(&vmap->vm_mm->mmap_sem); 579 VM_OBJECT_WLOCK(vm_obj); 580 return (err); 581 } 582 583 static struct rwlock linux_vma_lock; 584 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 585 TAILQ_HEAD_INITIALIZER(linux_vma_head); 586 587 static void 588 linux_cdev_handle_free(struct vm_area_struct *vmap) 589 { 590 /* Drop reference on vm_file */ 591 if (vmap->vm_file != NULL) 592 fput(vmap->vm_file); 593 594 /* Drop reference on mm_struct */ 595 mmput(vmap->vm_mm); 596 597 kfree(vmap); 598 } 599 600 static void 601 linux_cdev_handle_remove(struct vm_area_struct *vmap) 602 { 603 rw_wlock(&linux_vma_lock); 604 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 605 rw_wunlock(&linux_vma_lock); 606 } 607 608 static struct vm_area_struct * 609 linux_cdev_handle_find(void *handle) 610 { 611 struct vm_area_struct *vmap; 612 613 rw_rlock(&linux_vma_lock); 614 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 615 if (vmap->vm_private_data == handle) 616 break; 617 } 618 rw_runlock(&linux_vma_lock); 619 return (vmap); 620 } 621 622 static int 623 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 624 vm_ooffset_t foff, struct ucred *cred, u_short *color) 625 { 626 627 MPASS(linux_cdev_handle_find(handle) != NULL); 628 *color = 0; 629 return (0); 630 } 631 632 static void 633 linux_cdev_pager_dtor(void *handle) 634 { 635 const struct vm_operations_struct *vm_ops; 636 struct vm_area_struct *vmap; 637 638 vmap = linux_cdev_handle_find(handle); 639 MPASS(vmap != NULL); 640 641 /* 642 * Remove handle before calling close operation to prevent 643 * other threads from reusing the handle pointer. 644 */ 645 linux_cdev_handle_remove(vmap); 646 647 down_write(&vmap->vm_mm->mmap_sem); 648 vm_ops = vmap->vm_ops; 649 if (likely(vm_ops != NULL)) 650 vm_ops->close(vmap); 651 up_write(&vmap->vm_mm->mmap_sem); 652 653 linux_cdev_handle_free(vmap); 654 } 655 656 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 657 { 658 /* OBJT_MGTDEVICE */ 659 .cdev_pg_populate = linux_cdev_pager_populate, 660 .cdev_pg_ctor = linux_cdev_pager_ctor, 661 .cdev_pg_dtor = linux_cdev_pager_dtor 662 }, 663 { 664 /* OBJT_DEVICE */ 665 .cdev_pg_fault = linux_cdev_pager_fault, 666 .cdev_pg_ctor = linux_cdev_pager_ctor, 667 .cdev_pg_dtor = linux_cdev_pager_dtor 668 }, 669 }; 670 671 #define OPW(fp,td,code) ({ \ 672 struct file *__fpop; \ 673 __typeof(code) __retval; \ 674 \ 675 __fpop = (td)->td_fpop; \ 676 (td)->td_fpop = (fp); \ 677 __retval = (code); \ 678 (td)->td_fpop = __fpop; \ 679 __retval; \ 680 }) 681 682 static int 683 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file) 684 { 685 struct linux_cdev *ldev; 686 struct linux_file *filp; 687 int error; 688 689 ldev = dev->si_drv1; 690 691 filp = linux_file_alloc(); 692 filp->f_dentry = &filp->f_dentry_store; 693 filp->f_op = ldev->ops; 694 filp->f_mode = file->f_flag; 695 filp->f_flags = file->f_flag; 696 filp->f_vnode = file->f_vnode; 697 filp->_file = file; 698 699 linux_set_current(td); 700 701 if (filp->f_op->open) { 702 error = -filp->f_op->open(file->f_vnode, filp); 703 if (error) { 704 kfree(filp); 705 return (error); 706 } 707 } 708 709 /* hold on to the vnode - used for fstat() */ 710 vhold(filp->f_vnode); 711 712 /* release the file from devfs */ 713 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 714 return (ENXIO); 715 } 716 717 #define LINUX_IOCTL_MIN_PTR 0x10000UL 718 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 719 720 static inline int 721 linux_remap_address(void **uaddr, size_t len) 722 { 723 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 724 725 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 726 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 727 struct task_struct *pts = current; 728 if (pts == NULL) { 729 *uaddr = NULL; 730 return (1); 731 } 732 733 /* compute data offset */ 734 uaddr_val -= LINUX_IOCTL_MIN_PTR; 735 736 /* check that length is within bounds */ 737 if ((len > IOCPARM_MAX) || 738 (uaddr_val + len) > pts->bsd_ioctl_len) { 739 *uaddr = NULL; 740 return (1); 741 } 742 743 /* re-add kernel buffer address */ 744 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 745 746 /* update address location */ 747 *uaddr = (void *)uaddr_val; 748 return (1); 749 } 750 return (0); 751 } 752 753 int 754 linux_copyin(const void *uaddr, void *kaddr, size_t len) 755 { 756 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 757 if (uaddr == NULL) 758 return (-EFAULT); 759 memcpy(kaddr, uaddr, len); 760 return (0); 761 } 762 return (-copyin(uaddr, kaddr, len)); 763 } 764 765 int 766 linux_copyout(const void *kaddr, void *uaddr, size_t len) 767 { 768 if (linux_remap_address(&uaddr, len)) { 769 if (uaddr == NULL) 770 return (-EFAULT); 771 memcpy(uaddr, kaddr, len); 772 return (0); 773 } 774 return (-copyout(kaddr, uaddr, len)); 775 } 776 777 size_t 778 linux_clear_user(void *_uaddr, size_t _len) 779 { 780 uint8_t *uaddr = _uaddr; 781 size_t len = _len; 782 783 /* make sure uaddr is aligned before going into the fast loop */ 784 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 785 if (subyte(uaddr, 0)) 786 return (_len); 787 uaddr++; 788 len--; 789 } 790 791 /* zero 8 bytes at a time */ 792 while (len > 7) { 793 #ifdef __LP64__ 794 if (suword64(uaddr, 0)) 795 return (_len); 796 #else 797 if (suword32(uaddr, 0)) 798 return (_len); 799 if (suword32(uaddr + 4, 0)) 800 return (_len); 801 #endif 802 uaddr += 8; 803 len -= 8; 804 } 805 806 /* zero fill end, if any */ 807 while (len > 0) { 808 if (subyte(uaddr, 0)) 809 return (_len); 810 uaddr++; 811 len--; 812 } 813 return (0); 814 } 815 816 int 817 linux_access_ok(int rw, const void *uaddr, size_t len) 818 { 819 uintptr_t saddr; 820 uintptr_t eaddr; 821 822 /* get start and end address */ 823 saddr = (uintptr_t)uaddr; 824 eaddr = (uintptr_t)uaddr + len; 825 826 /* verify addresses are valid for userspace */ 827 return ((saddr == eaddr) || 828 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 829 } 830 831 /* 832 * This function should return either EINTR or ERESTART depending on 833 * the signal type sent to this thread: 834 */ 835 static int 836 linux_get_error(struct task_struct *task, int error) 837 { 838 /* check for signal type interrupt code */ 839 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 840 error = -linux_schedule_get_interrupt_value(task); 841 if (error == 0) 842 error = EINTR; 843 } 844 return (error); 845 } 846 847 static int 848 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 849 u_long cmd, caddr_t data, struct thread *td) 850 { 851 struct task_struct *task = current; 852 unsigned size; 853 int error; 854 855 size = IOCPARM_LEN(cmd); 856 /* refer to logic in sys_ioctl() */ 857 if (size > 0) { 858 /* 859 * Setup hint for linux_copyin() and linux_copyout(). 860 * 861 * Background: Linux code expects a user-space address 862 * while FreeBSD supplies a kernel-space address. 863 */ 864 task->bsd_ioctl_data = data; 865 task->bsd_ioctl_len = size; 866 data = (void *)LINUX_IOCTL_MIN_PTR; 867 } else { 868 /* fetch user-space pointer */ 869 data = *(void **)data; 870 } 871 #if defined(__amd64__) 872 if (td->td_proc->p_elf_machine == EM_386) { 873 /* try the compat IOCTL handler first */ 874 if (filp->f_op->compat_ioctl != NULL) 875 error = -OPW(fp, td, filp->f_op->compat_ioctl(filp, cmd, (u_long)data)); 876 else 877 error = ENOTTY; 878 879 /* fallback to the regular IOCTL handler, if any */ 880 if (error == ENOTTY && filp->f_op->unlocked_ioctl != NULL) 881 error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data)); 882 } else 883 #endif 884 if (filp->f_op->unlocked_ioctl != NULL) 885 error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data)); 886 else 887 error = ENOTTY; 888 if (size > 0) { 889 task->bsd_ioctl_data = NULL; 890 task->bsd_ioctl_len = 0; 891 } 892 893 if (error == EWOULDBLOCK) { 894 /* update kqfilter status, if any */ 895 linux_file_kqfilter_poll(filp, 896 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 897 } else { 898 error = linux_get_error(task, error); 899 } 900 return (error); 901 } 902 903 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 904 905 /* 906 * This function atomically updates the poll wakeup state and returns 907 * the previous state at the time of update. 908 */ 909 static uint8_t 910 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 911 { 912 int c, old; 913 914 c = v->counter; 915 916 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 917 c = old; 918 919 return (c); 920 } 921 922 923 static int 924 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 925 { 926 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 927 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 928 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 929 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 930 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 931 }; 932 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 933 934 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 935 case LINUX_FWQ_STATE_QUEUED: 936 linux_poll_wakeup(filp); 937 return (1); 938 default: 939 return (0); 940 } 941 } 942 943 void 944 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 945 { 946 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 947 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 948 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 949 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 950 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 951 }; 952 953 /* check if we are called inside the select system call */ 954 if (p == LINUX_POLL_TABLE_NORMAL) 955 selrecord(curthread, &filp->f_selinfo); 956 957 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 958 case LINUX_FWQ_STATE_INIT: 959 /* NOTE: file handles can only belong to one wait-queue */ 960 filp->f_wait_queue.wqh = wqh; 961 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 962 add_wait_queue(wqh, &filp->f_wait_queue.wq); 963 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 964 break; 965 default: 966 break; 967 } 968 } 969 970 static void 971 linux_poll_wait_dequeue(struct linux_file *filp) 972 { 973 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 974 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 975 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 976 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 977 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 978 }; 979 980 seldrain(&filp->f_selinfo); 981 982 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 983 case LINUX_FWQ_STATE_NOT_READY: 984 case LINUX_FWQ_STATE_QUEUED: 985 case LINUX_FWQ_STATE_READY: 986 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 987 break; 988 default: 989 break; 990 } 991 } 992 993 void 994 linux_poll_wakeup(struct linux_file *filp) 995 { 996 /* this function should be NULL-safe */ 997 if (filp == NULL) 998 return; 999 1000 selwakeup(&filp->f_selinfo); 1001 1002 spin_lock(&filp->f_kqlock); 1003 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1004 LINUX_KQ_FLAG_NEED_WRITE; 1005 1006 /* make sure the "knote" gets woken up */ 1007 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1008 spin_unlock(&filp->f_kqlock); 1009 } 1010 1011 static void 1012 linux_file_kqfilter_detach(struct knote *kn) 1013 { 1014 struct linux_file *filp = kn->kn_hook; 1015 1016 spin_lock(&filp->f_kqlock); 1017 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1018 spin_unlock(&filp->f_kqlock); 1019 } 1020 1021 static int 1022 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1023 { 1024 struct linux_file *filp = kn->kn_hook; 1025 1026 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1027 1028 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1029 } 1030 1031 static int 1032 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1033 { 1034 struct linux_file *filp = kn->kn_hook; 1035 1036 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1037 1038 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1039 } 1040 1041 static struct filterops linux_dev_kqfiltops_read = { 1042 .f_isfd = 1, 1043 .f_detach = linux_file_kqfilter_detach, 1044 .f_event = linux_file_kqfilter_read_event, 1045 }; 1046 1047 static struct filterops linux_dev_kqfiltops_write = { 1048 .f_isfd = 1, 1049 .f_detach = linux_file_kqfilter_detach, 1050 .f_event = linux_file_kqfilter_write_event, 1051 }; 1052 1053 static void 1054 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1055 { 1056 int temp; 1057 1058 if (filp->f_kqflags & kqflags) { 1059 struct thread *td = curthread; 1060 1061 /* get the latest polling state */ 1062 temp = OPW(filp->_file, td, filp->f_op->poll(filp, NULL)); 1063 1064 spin_lock(&filp->f_kqlock); 1065 /* clear kqflags */ 1066 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1067 LINUX_KQ_FLAG_NEED_WRITE); 1068 /* update kqflags */ 1069 if (temp & (POLLIN | POLLOUT)) { 1070 if (temp & POLLIN) 1071 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1072 if (temp & POLLOUT) 1073 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1074 1075 /* make sure the "knote" gets woken up */ 1076 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1077 } 1078 spin_unlock(&filp->f_kqlock); 1079 } 1080 } 1081 1082 static int 1083 linux_file_kqfilter(struct file *file, struct knote *kn) 1084 { 1085 struct linux_file *filp; 1086 struct thread *td; 1087 int error; 1088 1089 td = curthread; 1090 filp = (struct linux_file *)file->f_data; 1091 filp->f_flags = file->f_flag; 1092 if (filp->f_op->poll == NULL) 1093 return (EINVAL); 1094 1095 spin_lock(&filp->f_kqlock); 1096 switch (kn->kn_filter) { 1097 case EVFILT_READ: 1098 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1099 kn->kn_fop = &linux_dev_kqfiltops_read; 1100 kn->kn_hook = filp; 1101 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1102 error = 0; 1103 break; 1104 case EVFILT_WRITE: 1105 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1106 kn->kn_fop = &linux_dev_kqfiltops_write; 1107 kn->kn_hook = filp; 1108 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1109 error = 0; 1110 break; 1111 default: 1112 error = EINVAL; 1113 break; 1114 } 1115 spin_unlock(&filp->f_kqlock); 1116 1117 if (error == 0) { 1118 linux_set_current(td); 1119 1120 /* update kqfilter status, if any */ 1121 linux_file_kqfilter_poll(filp, 1122 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1123 } 1124 return (error); 1125 } 1126 1127 static int 1128 linux_file_mmap_single(struct file *fp, vm_ooffset_t *offset, 1129 vm_size_t size, struct vm_object **object, int nprot, 1130 struct thread *td) 1131 { 1132 struct task_struct *task; 1133 struct vm_area_struct *vmap; 1134 struct mm_struct *mm; 1135 struct linux_file *filp; 1136 vm_memattr_t attr; 1137 int error; 1138 1139 filp = (struct linux_file *)fp->f_data; 1140 filp->f_flags = fp->f_flag; 1141 1142 if (filp->f_op->mmap == NULL) 1143 return (EOPNOTSUPP); 1144 1145 linux_set_current(td); 1146 1147 /* 1148 * The same VM object might be shared by multiple processes 1149 * and the mm_struct is usually freed when a process exits. 1150 * 1151 * The atomic reference below makes sure the mm_struct is 1152 * available as long as the vmap is in the linux_vma_head. 1153 */ 1154 task = current; 1155 mm = task->mm; 1156 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1157 return (EINVAL); 1158 1159 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1160 vmap->vm_start = 0; 1161 vmap->vm_end = size; 1162 vmap->vm_pgoff = *offset / PAGE_SIZE; 1163 vmap->vm_pfn = 0; 1164 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1165 vmap->vm_ops = NULL; 1166 vmap->vm_file = get_file(filp); 1167 vmap->vm_mm = mm; 1168 1169 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1170 error = linux_get_error(task, EINTR); 1171 } else { 1172 error = -OPW(fp, td, filp->f_op->mmap(filp, vmap)); 1173 error = linux_get_error(task, error); 1174 up_write(&vmap->vm_mm->mmap_sem); 1175 } 1176 1177 if (error != 0) { 1178 linux_cdev_handle_free(vmap); 1179 return (error); 1180 } 1181 1182 attr = pgprot2cachemode(vmap->vm_page_prot); 1183 1184 if (vmap->vm_ops != NULL) { 1185 struct vm_area_struct *ptr; 1186 void *vm_private_data; 1187 bool vm_no_fault; 1188 1189 if (vmap->vm_ops->open == NULL || 1190 vmap->vm_ops->close == NULL || 1191 vmap->vm_private_data == NULL) { 1192 /* free allocated VM area struct */ 1193 linux_cdev_handle_free(vmap); 1194 return (EINVAL); 1195 } 1196 1197 vm_private_data = vmap->vm_private_data; 1198 1199 rw_wlock(&linux_vma_lock); 1200 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1201 if (ptr->vm_private_data == vm_private_data) 1202 break; 1203 } 1204 /* check if there is an existing VM area struct */ 1205 if (ptr != NULL) { 1206 /* check if the VM area structure is invalid */ 1207 if (ptr->vm_ops == NULL || 1208 ptr->vm_ops->open == NULL || 1209 ptr->vm_ops->close == NULL) { 1210 error = ESTALE; 1211 vm_no_fault = 1; 1212 } else { 1213 error = EEXIST; 1214 vm_no_fault = (ptr->vm_ops->fault == NULL); 1215 } 1216 } else { 1217 /* insert VM area structure into list */ 1218 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1219 error = 0; 1220 vm_no_fault = (vmap->vm_ops->fault == NULL); 1221 } 1222 rw_wunlock(&linux_vma_lock); 1223 1224 if (error != 0) { 1225 /* free allocated VM area struct */ 1226 linux_cdev_handle_free(vmap); 1227 /* check for stale VM area struct */ 1228 if (error != EEXIST) 1229 return (error); 1230 } 1231 1232 /* check if there is no fault handler */ 1233 if (vm_no_fault) { 1234 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1235 &linux_cdev_pager_ops[1], size, nprot, *offset, 1236 td->td_ucred); 1237 } else { 1238 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1239 &linux_cdev_pager_ops[0], size, nprot, *offset, 1240 td->td_ucred); 1241 } 1242 1243 /* check if allocating the VM object failed */ 1244 if (*object == NULL) { 1245 if (error == 0) { 1246 /* remove VM area struct from list */ 1247 linux_cdev_handle_remove(vmap); 1248 /* free allocated VM area struct */ 1249 linux_cdev_handle_free(vmap); 1250 } 1251 return (EINVAL); 1252 } 1253 } else { 1254 struct sglist *sg; 1255 1256 sg = sglist_alloc(1, M_WAITOK); 1257 sglist_append_phys(sg, 1258 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1259 1260 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1261 nprot, 0, td->td_ucred); 1262 1263 linux_cdev_handle_free(vmap); 1264 1265 if (*object == NULL) { 1266 sglist_free(sg); 1267 return (EINVAL); 1268 } 1269 } 1270 1271 if (attr != VM_MEMATTR_DEFAULT) { 1272 VM_OBJECT_WLOCK(*object); 1273 vm_object_set_memattr(*object, attr); 1274 VM_OBJECT_WUNLOCK(*object); 1275 } 1276 *offset = 0; 1277 return (0); 1278 } 1279 1280 struct cdevsw linuxcdevsw = { 1281 .d_version = D_VERSION, 1282 .d_fdopen = linux_dev_fdopen, 1283 .d_name = "lkpidev", 1284 }; 1285 1286 static int 1287 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1288 int flags, struct thread *td) 1289 { 1290 struct linux_file *filp; 1291 ssize_t bytes; 1292 int error; 1293 1294 error = 0; 1295 filp = (struct linux_file *)file->f_data; 1296 filp->f_flags = file->f_flag; 1297 /* XXX no support for I/O vectors currently */ 1298 if (uio->uio_iovcnt != 1) 1299 return (EOPNOTSUPP); 1300 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1301 return (EINVAL); 1302 linux_set_current(td); 1303 if (filp->f_op->read) { 1304 bytes = OPW(file, td, filp->f_op->read(filp, uio->uio_iov->iov_base, 1305 uio->uio_iov->iov_len, &uio->uio_offset)); 1306 if (bytes >= 0) { 1307 uio->uio_iov->iov_base = 1308 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1309 uio->uio_iov->iov_len -= bytes; 1310 uio->uio_resid -= bytes; 1311 } else { 1312 error = linux_get_error(current, -bytes); 1313 } 1314 } else 1315 error = ENXIO; 1316 1317 /* update kqfilter status, if any */ 1318 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1319 1320 return (error); 1321 } 1322 1323 static int 1324 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1325 int flags, struct thread *td) 1326 { 1327 struct linux_file *filp; 1328 ssize_t bytes; 1329 int error; 1330 1331 error = 0; 1332 filp = (struct linux_file *)file->f_data; 1333 filp->f_flags = file->f_flag; 1334 /* XXX no support for I/O vectors currently */ 1335 if (uio->uio_iovcnt != 1) 1336 return (EOPNOTSUPP); 1337 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1338 return (EINVAL); 1339 linux_set_current(td); 1340 if (filp->f_op->write) { 1341 bytes = OPW(file, td, filp->f_op->write(filp, uio->uio_iov->iov_base, 1342 uio->uio_iov->iov_len, &uio->uio_offset)); 1343 if (bytes >= 0) { 1344 uio->uio_iov->iov_base = 1345 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1346 uio->uio_iov->iov_len -= bytes; 1347 uio->uio_resid -= bytes; 1348 } else { 1349 error = linux_get_error(current, -bytes); 1350 } 1351 } else 1352 error = ENXIO; 1353 1354 /* update kqfilter status, if any */ 1355 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1356 1357 return (error); 1358 } 1359 1360 static int 1361 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1362 struct thread *td) 1363 { 1364 struct linux_file *filp; 1365 int revents; 1366 1367 filp = (struct linux_file *)file->f_data; 1368 filp->f_flags = file->f_flag; 1369 linux_set_current(td); 1370 if (filp->f_op->poll != NULL) 1371 revents = OPW(file, td, filp->f_op->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events; 1372 else 1373 revents = 0; 1374 1375 return (revents); 1376 } 1377 1378 static int 1379 linux_file_close(struct file *file, struct thread *td) 1380 { 1381 struct linux_file *filp; 1382 int error; 1383 1384 filp = (struct linux_file *)file->f_data; 1385 1386 KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp))); 1387 1388 filp->f_flags = file->f_flag; 1389 linux_set_current(td); 1390 linux_poll_wait_dequeue(filp); 1391 error = -OPW(file, td, filp->f_op->release(filp->f_vnode, filp)); 1392 funsetown(&filp->f_sigio); 1393 if (filp->f_vnode != NULL) 1394 vdrop(filp->f_vnode); 1395 kfree(filp); 1396 1397 return (error); 1398 } 1399 1400 static int 1401 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1402 struct thread *td) 1403 { 1404 struct linux_file *filp; 1405 int error; 1406 1407 filp = (struct linux_file *)fp->f_data; 1408 filp->f_flags = fp->f_flag; 1409 error = 0; 1410 1411 linux_set_current(td); 1412 switch (cmd) { 1413 case FIONBIO: 1414 break; 1415 case FIOASYNC: 1416 if (filp->f_op->fasync == NULL) 1417 break; 1418 error = -OPW(fp, td, filp->f_op->fasync(0, filp, fp->f_flag & FASYNC)); 1419 break; 1420 case FIOSETOWN: 1421 error = fsetown(*(int *)data, &filp->f_sigio); 1422 if (error == 0) { 1423 if (filp->f_op->fasync == NULL) 1424 break; 1425 error = -OPW(fp, td, filp->f_op->fasync(0, filp, 1426 fp->f_flag & FASYNC)); 1427 } 1428 break; 1429 case FIOGETOWN: 1430 *(int *)data = fgetown(&filp->f_sigio); 1431 break; 1432 default: 1433 error = linux_file_ioctl_sub(fp, filp, cmd, data, td); 1434 break; 1435 } 1436 return (error); 1437 } 1438 1439 static int 1440 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1441 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1442 vm_ooffset_t *foff, vm_object_t *objp) 1443 { 1444 /* 1445 * Character devices do not provide private mappings 1446 * of any kind: 1447 */ 1448 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1449 (prot & VM_PROT_WRITE) != 0) 1450 return (EACCES); 1451 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1452 return (EINVAL); 1453 1454 return (linux_file_mmap_single(fp, foff, objsize, objp, (int)prot, td)); 1455 } 1456 1457 static int 1458 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1459 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1460 struct thread *td) 1461 { 1462 struct linux_file *filp; 1463 struct mount *mp; 1464 struct vnode *vp; 1465 vm_object_t object; 1466 vm_prot_t maxprot; 1467 int error; 1468 1469 filp = (struct linux_file *)fp->f_data; 1470 1471 vp = filp->f_vnode; 1472 if (vp == NULL) 1473 return (EOPNOTSUPP); 1474 1475 /* 1476 * Ensure that file and memory protections are 1477 * compatible. 1478 */ 1479 mp = vp->v_mount; 1480 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1481 maxprot = VM_PROT_NONE; 1482 if ((prot & VM_PROT_EXECUTE) != 0) 1483 return (EACCES); 1484 } else 1485 maxprot = VM_PROT_EXECUTE; 1486 if ((fp->f_flag & FREAD) != 0) 1487 maxprot |= VM_PROT_READ; 1488 else if ((prot & VM_PROT_READ) != 0) 1489 return (EACCES); 1490 1491 /* 1492 * If we are sharing potential changes via MAP_SHARED and we 1493 * are trying to get write permission although we opened it 1494 * without asking for it, bail out. 1495 * 1496 * Note that most character devices always share mappings. 1497 * 1498 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1499 * requests rather than doing it here. 1500 */ 1501 if ((flags & MAP_SHARED) != 0) { 1502 if ((fp->f_flag & FWRITE) != 0) 1503 maxprot |= VM_PROT_WRITE; 1504 else if ((prot & VM_PROT_WRITE) != 0) 1505 return (EACCES); 1506 } 1507 maxprot &= cap_maxprot; 1508 1509 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, &foff, 1510 &object); 1511 if (error != 0) 1512 return (error); 1513 1514 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1515 foff, FALSE, td); 1516 if (error != 0) 1517 vm_object_deallocate(object); 1518 return (error); 1519 } 1520 1521 static int 1522 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1523 struct thread *td) 1524 { 1525 struct linux_file *filp; 1526 struct vnode *vp; 1527 int error; 1528 1529 filp = (struct linux_file *)fp->f_data; 1530 if (filp->f_vnode == NULL) 1531 return (EOPNOTSUPP); 1532 1533 vp = filp->f_vnode; 1534 1535 vn_lock(vp, LK_SHARED | LK_RETRY); 1536 error = vn_stat(vp, sb, td->td_ucred, NOCRED, td); 1537 VOP_UNLOCK(vp, 0); 1538 1539 return (error); 1540 } 1541 1542 static int 1543 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1544 struct filedesc *fdp) 1545 { 1546 1547 return (0); 1548 } 1549 1550 unsigned int 1551 linux_iminor(struct inode *inode) 1552 { 1553 struct linux_cdev *ldev; 1554 1555 if (inode == NULL || inode->v_rdev == NULL || 1556 inode->v_rdev->si_devsw != &linuxcdevsw) 1557 return (-1U); 1558 ldev = inode->v_rdev->si_drv1; 1559 if (ldev == NULL) 1560 return (-1U); 1561 1562 return (minor(ldev->dev)); 1563 } 1564 1565 struct fileops linuxfileops = { 1566 .fo_read = linux_file_read, 1567 .fo_write = linux_file_write, 1568 .fo_truncate = invfo_truncate, 1569 .fo_kqfilter = linux_file_kqfilter, 1570 .fo_stat = linux_file_stat, 1571 .fo_fill_kinfo = linux_file_fill_kinfo, 1572 .fo_poll = linux_file_poll, 1573 .fo_close = linux_file_close, 1574 .fo_ioctl = linux_file_ioctl, 1575 .fo_mmap = linux_file_mmap, 1576 .fo_chmod = invfo_chmod, 1577 .fo_chown = invfo_chown, 1578 .fo_sendfile = invfo_sendfile, 1579 .fo_flags = DFLAG_PASSABLE, 1580 }; 1581 1582 /* 1583 * Hash of vmmap addresses. This is infrequently accessed and does not 1584 * need to be particularly large. This is done because we must store the 1585 * caller's idea of the map size to properly unmap. 1586 */ 1587 struct vmmap { 1588 LIST_ENTRY(vmmap) vm_next; 1589 void *vm_addr; 1590 unsigned long vm_size; 1591 }; 1592 1593 struct vmmaphd { 1594 struct vmmap *lh_first; 1595 }; 1596 #define VMMAP_HASH_SIZE 64 1597 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1598 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1599 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1600 static struct mtx vmmaplock; 1601 1602 static void 1603 vmmap_add(void *addr, unsigned long size) 1604 { 1605 struct vmmap *vmmap; 1606 1607 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1608 mtx_lock(&vmmaplock); 1609 vmmap->vm_size = size; 1610 vmmap->vm_addr = addr; 1611 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1612 mtx_unlock(&vmmaplock); 1613 } 1614 1615 static struct vmmap * 1616 vmmap_remove(void *addr) 1617 { 1618 struct vmmap *vmmap; 1619 1620 mtx_lock(&vmmaplock); 1621 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1622 if (vmmap->vm_addr == addr) 1623 break; 1624 if (vmmap) 1625 LIST_REMOVE(vmmap, vm_next); 1626 mtx_unlock(&vmmaplock); 1627 1628 return (vmmap); 1629 } 1630 1631 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1632 void * 1633 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1634 { 1635 void *addr; 1636 1637 addr = pmap_mapdev_attr(phys_addr, size, attr); 1638 if (addr == NULL) 1639 return (NULL); 1640 vmmap_add(addr, size); 1641 1642 return (addr); 1643 } 1644 #endif 1645 1646 void 1647 iounmap(void *addr) 1648 { 1649 struct vmmap *vmmap; 1650 1651 vmmap = vmmap_remove(addr); 1652 if (vmmap == NULL) 1653 return; 1654 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1655 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1656 #endif 1657 kfree(vmmap); 1658 } 1659 1660 1661 void * 1662 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1663 { 1664 vm_offset_t off; 1665 size_t size; 1666 1667 size = count * PAGE_SIZE; 1668 off = kva_alloc(size); 1669 if (off == 0) 1670 return (NULL); 1671 vmmap_add((void *)off, size); 1672 pmap_qenter(off, pages, count); 1673 1674 return ((void *)off); 1675 } 1676 1677 void 1678 vunmap(void *addr) 1679 { 1680 struct vmmap *vmmap; 1681 1682 vmmap = vmmap_remove(addr); 1683 if (vmmap == NULL) 1684 return; 1685 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1686 kva_free((vm_offset_t)addr, vmmap->vm_size); 1687 kfree(vmmap); 1688 } 1689 1690 char * 1691 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1692 { 1693 unsigned int len; 1694 char *p; 1695 va_list aq; 1696 1697 va_copy(aq, ap); 1698 len = vsnprintf(NULL, 0, fmt, aq); 1699 va_end(aq); 1700 1701 p = kmalloc(len + 1, gfp); 1702 if (p != NULL) 1703 vsnprintf(p, len + 1, fmt, ap); 1704 1705 return (p); 1706 } 1707 1708 char * 1709 kasprintf(gfp_t gfp, const char *fmt, ...) 1710 { 1711 va_list ap; 1712 char *p; 1713 1714 va_start(ap, fmt); 1715 p = kvasprintf(gfp, fmt, ap); 1716 va_end(ap); 1717 1718 return (p); 1719 } 1720 1721 static void 1722 linux_timer_callback_wrapper(void *context) 1723 { 1724 struct timer_list *timer; 1725 1726 linux_set_current(curthread); 1727 1728 timer = context; 1729 timer->function(timer->data); 1730 } 1731 1732 void 1733 mod_timer(struct timer_list *timer, int expires) 1734 { 1735 1736 timer->expires = expires; 1737 callout_reset(&timer->callout, 1738 linux_timer_jiffies_until(expires), 1739 &linux_timer_callback_wrapper, timer); 1740 } 1741 1742 void 1743 add_timer(struct timer_list *timer) 1744 { 1745 1746 callout_reset(&timer->callout, 1747 linux_timer_jiffies_until(timer->expires), 1748 &linux_timer_callback_wrapper, timer); 1749 } 1750 1751 void 1752 add_timer_on(struct timer_list *timer, int cpu) 1753 { 1754 1755 callout_reset_on(&timer->callout, 1756 linux_timer_jiffies_until(timer->expires), 1757 &linux_timer_callback_wrapper, timer, cpu); 1758 } 1759 1760 static void 1761 linux_timer_init(void *arg) 1762 { 1763 1764 /* 1765 * Compute an internal HZ value which can divide 2**32 to 1766 * avoid timer rounding problems when the tick value wraps 1767 * around 2**32: 1768 */ 1769 linux_timer_hz_mask = 1; 1770 while (linux_timer_hz_mask < (unsigned long)hz) 1771 linux_timer_hz_mask *= 2; 1772 linux_timer_hz_mask--; 1773 } 1774 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1775 1776 void 1777 linux_complete_common(struct completion *c, int all) 1778 { 1779 int wakeup_swapper; 1780 1781 sleepq_lock(c); 1782 if (all) { 1783 c->done = UINT_MAX; 1784 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1785 } else { 1786 if (c->done != UINT_MAX) 1787 c->done++; 1788 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1789 } 1790 sleepq_release(c); 1791 if (wakeup_swapper) 1792 kick_proc0(); 1793 } 1794 1795 /* 1796 * Indefinite wait for done != 0 with or without signals. 1797 */ 1798 int 1799 linux_wait_for_common(struct completion *c, int flags) 1800 { 1801 struct task_struct *task; 1802 int error; 1803 1804 if (SCHEDULER_STOPPED()) 1805 return (0); 1806 1807 task = current; 1808 1809 if (flags != 0) 1810 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1811 else 1812 flags = SLEEPQ_SLEEP; 1813 error = 0; 1814 for (;;) { 1815 sleepq_lock(c); 1816 if (c->done) 1817 break; 1818 sleepq_add(c, NULL, "completion", flags, 0); 1819 if (flags & SLEEPQ_INTERRUPTIBLE) { 1820 DROP_GIANT(); 1821 error = -sleepq_wait_sig(c, 0); 1822 PICKUP_GIANT(); 1823 if (error != 0) { 1824 linux_schedule_save_interrupt_value(task, error); 1825 error = -ERESTARTSYS; 1826 goto intr; 1827 } 1828 } else { 1829 DROP_GIANT(); 1830 sleepq_wait(c, 0); 1831 PICKUP_GIANT(); 1832 } 1833 } 1834 if (c->done != UINT_MAX) 1835 c->done--; 1836 sleepq_release(c); 1837 1838 intr: 1839 return (error); 1840 } 1841 1842 /* 1843 * Time limited wait for done != 0 with or without signals. 1844 */ 1845 int 1846 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 1847 { 1848 struct task_struct *task; 1849 int end = jiffies + timeout; 1850 int error; 1851 1852 if (SCHEDULER_STOPPED()) 1853 return (0); 1854 1855 task = current; 1856 1857 if (flags != 0) 1858 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1859 else 1860 flags = SLEEPQ_SLEEP; 1861 1862 for (;;) { 1863 sleepq_lock(c); 1864 if (c->done) 1865 break; 1866 sleepq_add(c, NULL, "completion", flags, 0); 1867 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 1868 1869 DROP_GIANT(); 1870 if (flags & SLEEPQ_INTERRUPTIBLE) 1871 error = -sleepq_timedwait_sig(c, 0); 1872 else 1873 error = -sleepq_timedwait(c, 0); 1874 PICKUP_GIANT(); 1875 1876 if (error != 0) { 1877 /* check for timeout */ 1878 if (error == -EWOULDBLOCK) { 1879 error = 0; /* timeout */ 1880 } else { 1881 /* signal happened */ 1882 linux_schedule_save_interrupt_value(task, error); 1883 error = -ERESTARTSYS; 1884 } 1885 goto done; 1886 } 1887 } 1888 if (c->done != UINT_MAX) 1889 c->done--; 1890 sleepq_release(c); 1891 1892 /* return how many jiffies are left */ 1893 error = linux_timer_jiffies_until(end); 1894 done: 1895 return (error); 1896 } 1897 1898 int 1899 linux_try_wait_for_completion(struct completion *c) 1900 { 1901 int isdone; 1902 1903 sleepq_lock(c); 1904 isdone = (c->done != 0); 1905 if (c->done != 0 && c->done != UINT_MAX) 1906 c->done--; 1907 sleepq_release(c); 1908 return (isdone); 1909 } 1910 1911 int 1912 linux_completion_done(struct completion *c) 1913 { 1914 int isdone; 1915 1916 sleepq_lock(c); 1917 isdone = (c->done != 0); 1918 sleepq_release(c); 1919 return (isdone); 1920 } 1921 1922 static void 1923 linux_cdev_release(struct kobject *kobj) 1924 { 1925 struct linux_cdev *cdev; 1926 struct kobject *parent; 1927 1928 cdev = container_of(kobj, struct linux_cdev, kobj); 1929 parent = kobj->parent; 1930 if (cdev->cdev) 1931 destroy_dev(cdev->cdev); 1932 kfree(cdev); 1933 kobject_put(parent); 1934 } 1935 1936 static void 1937 linux_cdev_static_release(struct kobject *kobj) 1938 { 1939 struct linux_cdev *cdev; 1940 struct kobject *parent; 1941 1942 cdev = container_of(kobj, struct linux_cdev, kobj); 1943 parent = kobj->parent; 1944 if (cdev->cdev) 1945 destroy_dev(cdev->cdev); 1946 kobject_put(parent); 1947 } 1948 1949 const struct kobj_type linux_cdev_ktype = { 1950 .release = linux_cdev_release, 1951 }; 1952 1953 const struct kobj_type linux_cdev_static_ktype = { 1954 .release = linux_cdev_static_release, 1955 }; 1956 1957 static void 1958 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 1959 { 1960 struct notifier_block *nb; 1961 1962 nb = arg; 1963 if (linkstate == LINK_STATE_UP) 1964 nb->notifier_call(nb, NETDEV_UP, ifp); 1965 else 1966 nb->notifier_call(nb, NETDEV_DOWN, ifp); 1967 } 1968 1969 static void 1970 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 1971 { 1972 struct notifier_block *nb; 1973 1974 nb = arg; 1975 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 1976 } 1977 1978 static void 1979 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 1980 { 1981 struct notifier_block *nb; 1982 1983 nb = arg; 1984 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 1985 } 1986 1987 static void 1988 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 1989 { 1990 struct notifier_block *nb; 1991 1992 nb = arg; 1993 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 1994 } 1995 1996 static void 1997 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 1998 { 1999 struct notifier_block *nb; 2000 2001 nb = arg; 2002 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 2003 } 2004 2005 int 2006 register_netdevice_notifier(struct notifier_block *nb) 2007 { 2008 2009 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2010 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2011 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2012 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2013 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2014 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2015 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2016 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2017 2018 return (0); 2019 } 2020 2021 int 2022 register_inetaddr_notifier(struct notifier_block *nb) 2023 { 2024 2025 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2026 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2027 return (0); 2028 } 2029 2030 int 2031 unregister_netdevice_notifier(struct notifier_block *nb) 2032 { 2033 2034 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2035 nb->tags[NETDEV_UP]); 2036 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2037 nb->tags[NETDEV_REGISTER]); 2038 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2039 nb->tags[NETDEV_UNREGISTER]); 2040 EVENTHANDLER_DEREGISTER(iflladdr_event, 2041 nb->tags[NETDEV_CHANGEADDR]); 2042 2043 return (0); 2044 } 2045 2046 int 2047 unregister_inetaddr_notifier(struct notifier_block *nb) 2048 { 2049 2050 EVENTHANDLER_DEREGISTER(ifaddr_event, 2051 nb->tags[NETDEV_CHANGEIFADDR]); 2052 2053 return (0); 2054 } 2055 2056 struct list_sort_thunk { 2057 int (*cmp)(void *, struct list_head *, struct list_head *); 2058 void *priv; 2059 }; 2060 2061 static inline int 2062 linux_le_cmp(void *priv, const void *d1, const void *d2) 2063 { 2064 struct list_head *le1, *le2; 2065 struct list_sort_thunk *thunk; 2066 2067 thunk = priv; 2068 le1 = *(__DECONST(struct list_head **, d1)); 2069 le2 = *(__DECONST(struct list_head **, d2)); 2070 return ((thunk->cmp)(thunk->priv, le1, le2)); 2071 } 2072 2073 void 2074 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2075 struct list_head *a, struct list_head *b)) 2076 { 2077 struct list_sort_thunk thunk; 2078 struct list_head **ar, *le; 2079 size_t count, i; 2080 2081 count = 0; 2082 list_for_each(le, head) 2083 count++; 2084 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2085 i = 0; 2086 list_for_each(le, head) 2087 ar[i++] = le; 2088 thunk.cmp = cmp; 2089 thunk.priv = priv; 2090 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2091 INIT_LIST_HEAD(head); 2092 for (i = 0; i < count; i++) 2093 list_add_tail(ar[i], head); 2094 free(ar, M_KMALLOC); 2095 } 2096 2097 void 2098 linux_irq_handler(void *ent) 2099 { 2100 struct irq_ent *irqe; 2101 2102 linux_set_current(curthread); 2103 2104 irqe = ent; 2105 irqe->handler(irqe->irq, irqe->arg); 2106 } 2107 2108 #if defined(__i386__) || defined(__amd64__) 2109 int 2110 linux_wbinvd_on_all_cpus(void) 2111 { 2112 2113 pmap_invalidate_cache(); 2114 return (0); 2115 } 2116 #endif 2117 2118 int 2119 linux_on_each_cpu(void callback(void *), void *data) 2120 { 2121 2122 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2123 smp_no_rendezvous_barrier, data); 2124 return (0); 2125 } 2126 2127 int 2128 linux_in_atomic(void) 2129 { 2130 2131 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2132 } 2133 2134 struct linux_cdev * 2135 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2136 { 2137 dev_t dev = MKDEV(major, minor); 2138 struct cdev *cdev; 2139 2140 dev_lock(); 2141 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2142 struct linux_cdev *ldev = cdev->si_drv1; 2143 if (ldev->dev == dev && 2144 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2145 break; 2146 } 2147 } 2148 dev_unlock(); 2149 2150 return (cdev != NULL ? cdev->si_drv1 : NULL); 2151 } 2152 2153 int 2154 __register_chrdev(unsigned int major, unsigned int baseminor, 2155 unsigned int count, const char *name, 2156 const struct file_operations *fops) 2157 { 2158 struct linux_cdev *cdev; 2159 int ret = 0; 2160 int i; 2161 2162 for (i = baseminor; i < baseminor + count; i++) { 2163 cdev = cdev_alloc(); 2164 cdev_init(cdev, fops); 2165 kobject_set_name(&cdev->kobj, name); 2166 2167 ret = cdev_add(cdev, makedev(major, i), 1); 2168 if (ret != 0) 2169 break; 2170 } 2171 return (ret); 2172 } 2173 2174 int 2175 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2176 unsigned int count, const char *name, 2177 const struct file_operations *fops, uid_t uid, 2178 gid_t gid, int mode) 2179 { 2180 struct linux_cdev *cdev; 2181 int ret = 0; 2182 int i; 2183 2184 for (i = baseminor; i < baseminor + count; i++) { 2185 cdev = cdev_alloc(); 2186 cdev_init(cdev, fops); 2187 kobject_set_name(&cdev->kobj, name); 2188 2189 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2190 if (ret != 0) 2191 break; 2192 } 2193 return (ret); 2194 } 2195 2196 void 2197 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2198 unsigned int count, const char *name) 2199 { 2200 struct linux_cdev *cdevp; 2201 int i; 2202 2203 for (i = baseminor; i < baseminor + count; i++) { 2204 cdevp = linux_find_cdev(name, major, i); 2205 if (cdevp != NULL) 2206 cdev_del(cdevp); 2207 } 2208 } 2209 2210 #if defined(__i386__) || defined(__amd64__) 2211 bool linux_cpu_has_clflush; 2212 #endif 2213 2214 static void 2215 linux_compat_init(void *arg) 2216 { 2217 struct sysctl_oid *rootoid; 2218 int i; 2219 2220 #if defined(__i386__) || defined(__amd64__) 2221 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2222 #endif 2223 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2224 2225 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2226 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2227 kobject_init(&linux_class_root, &linux_class_ktype); 2228 kobject_set_name(&linux_class_root, "class"); 2229 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2230 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2231 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2232 kobject_set_name(&linux_root_device.kobj, "device"); 2233 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2234 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 2235 "device"); 2236 linux_root_device.bsddev = root_bus; 2237 linux_class_misc.name = "misc"; 2238 class_register(&linux_class_misc); 2239 INIT_LIST_HEAD(&pci_drivers); 2240 INIT_LIST_HEAD(&pci_devices); 2241 spin_lock_init(&pci_lock); 2242 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2243 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2244 LIST_INIT(&vmmaphead[i]); 2245 } 2246 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2247 2248 static void 2249 linux_compat_uninit(void *arg) 2250 { 2251 linux_kobject_kfree_name(&linux_class_root); 2252 linux_kobject_kfree_name(&linux_root_device.kobj); 2253 linux_kobject_kfree_name(&linux_class_misc.kobj); 2254 2255 mtx_destroy(&vmmaplock); 2256 spin_lock_destroy(&pci_lock); 2257 rw_destroy(&linux_vma_lock); 2258 } 2259 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2260 2261 /* 2262 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2263 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2264 * used. Assert these types have the same size, else some parts of the 2265 * LinuxKPI may not work like expected: 2266 */ 2267 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2268