1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/bus.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/filio.h> 49 #include <sys/rwlock.h> 50 #include <sys/mman.h> 51 #include <sys/stack.h> 52 #include <sys/user.h> 53 54 #include <vm/vm.h> 55 #include <vm/pmap.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_page.h> 58 #include <vm/vm_pager.h> 59 60 #include <machine/stdarg.h> 61 62 #if defined(__i386__) || defined(__amd64__) 63 #include <machine/md_var.h> 64 #endif 65 66 #include <linux/kobject.h> 67 #include <linux/device.h> 68 #include <linux/slab.h> 69 #include <linux/module.h> 70 #include <linux/moduleparam.h> 71 #include <linux/cdev.h> 72 #include <linux/file.h> 73 #include <linux/sysfs.h> 74 #include <linux/mm.h> 75 #include <linux/io.h> 76 #include <linux/vmalloc.h> 77 #include <linux/netdevice.h> 78 #include <linux/timer.h> 79 #include <linux/interrupt.h> 80 #include <linux/uaccess.h> 81 #include <linux/list.h> 82 #include <linux/kthread.h> 83 #include <linux/kernel.h> 84 #include <linux/compat.h> 85 #include <linux/poll.h> 86 #include <linux/smp.h> 87 88 #if defined(__i386__) || defined(__amd64__) 89 #include <asm/smp.h> 90 #endif 91 92 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 93 94 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 95 96 #include <linux/rbtree.h> 97 /* Undo Linux compat changes. */ 98 #undef RB_ROOT 99 #undef file 100 #undef cdev 101 #define RB_ROOT(head) (head)->rbh_root 102 103 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 104 105 struct kobject linux_class_root; 106 struct device linux_root_device; 107 struct class linux_class_misc; 108 struct list_head pci_drivers; 109 struct list_head pci_devices; 110 spinlock_t pci_lock; 111 112 unsigned long linux_timer_hz_mask; 113 114 int 115 panic_cmp(struct rb_node *one, struct rb_node *two) 116 { 117 panic("no cmp"); 118 } 119 120 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 121 122 int 123 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 124 { 125 va_list tmp_va; 126 int len; 127 char *old; 128 char *name; 129 char dummy; 130 131 old = kobj->name; 132 133 if (old && fmt == NULL) 134 return (0); 135 136 /* compute length of string */ 137 va_copy(tmp_va, args); 138 len = vsnprintf(&dummy, 0, fmt, tmp_va); 139 va_end(tmp_va); 140 141 /* account for zero termination */ 142 len++; 143 144 /* check for error */ 145 if (len < 1) 146 return (-EINVAL); 147 148 /* allocate memory for string */ 149 name = kzalloc(len, GFP_KERNEL); 150 if (name == NULL) 151 return (-ENOMEM); 152 vsnprintf(name, len, fmt, args); 153 kobj->name = name; 154 155 /* free old string */ 156 kfree(old); 157 158 /* filter new string */ 159 for (; *name != '\0'; name++) 160 if (*name == '/') 161 *name = '!'; 162 return (0); 163 } 164 165 int 166 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 167 { 168 va_list args; 169 int error; 170 171 va_start(args, fmt); 172 error = kobject_set_name_vargs(kobj, fmt, args); 173 va_end(args); 174 175 return (error); 176 } 177 178 static int 179 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 180 { 181 const struct kobj_type *t; 182 int error; 183 184 kobj->parent = parent; 185 error = sysfs_create_dir(kobj); 186 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 187 struct attribute **attr; 188 t = kobj->ktype; 189 190 for (attr = t->default_attrs; *attr != NULL; attr++) { 191 error = sysfs_create_file(kobj, *attr); 192 if (error) 193 break; 194 } 195 if (error) 196 sysfs_remove_dir(kobj); 197 198 } 199 return (error); 200 } 201 202 int 203 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 204 { 205 va_list args; 206 int error; 207 208 va_start(args, fmt); 209 error = kobject_set_name_vargs(kobj, fmt, args); 210 va_end(args); 211 if (error) 212 return (error); 213 214 return kobject_add_complete(kobj, parent); 215 } 216 217 void 218 linux_kobject_release(struct kref *kref) 219 { 220 struct kobject *kobj; 221 char *name; 222 223 kobj = container_of(kref, struct kobject, kref); 224 sysfs_remove_dir(kobj); 225 name = kobj->name; 226 if (kobj->ktype && kobj->ktype->release) 227 kobj->ktype->release(kobj); 228 kfree(name); 229 } 230 231 static void 232 linux_kobject_kfree(struct kobject *kobj) 233 { 234 kfree(kobj); 235 } 236 237 static void 238 linux_kobject_kfree_name(struct kobject *kobj) 239 { 240 if (kobj) { 241 kfree(kobj->name); 242 } 243 } 244 245 const struct kobj_type linux_kfree_type = { 246 .release = linux_kobject_kfree 247 }; 248 249 static void 250 linux_device_release(struct device *dev) 251 { 252 pr_debug("linux_device_release: %s\n", dev_name(dev)); 253 kfree(dev); 254 } 255 256 static ssize_t 257 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 258 { 259 struct class_attribute *dattr; 260 ssize_t error; 261 262 dattr = container_of(attr, struct class_attribute, attr); 263 error = -EIO; 264 if (dattr->show) 265 error = dattr->show(container_of(kobj, struct class, kobj), 266 dattr, buf); 267 return (error); 268 } 269 270 static ssize_t 271 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 272 size_t count) 273 { 274 struct class_attribute *dattr; 275 ssize_t error; 276 277 dattr = container_of(attr, struct class_attribute, attr); 278 error = -EIO; 279 if (dattr->store) 280 error = dattr->store(container_of(kobj, struct class, kobj), 281 dattr, buf, count); 282 return (error); 283 } 284 285 static void 286 linux_class_release(struct kobject *kobj) 287 { 288 struct class *class; 289 290 class = container_of(kobj, struct class, kobj); 291 if (class->class_release) 292 class->class_release(class); 293 } 294 295 static const struct sysfs_ops linux_class_sysfs = { 296 .show = linux_class_show, 297 .store = linux_class_store, 298 }; 299 300 const struct kobj_type linux_class_ktype = { 301 .release = linux_class_release, 302 .sysfs_ops = &linux_class_sysfs 303 }; 304 305 static void 306 linux_dev_release(struct kobject *kobj) 307 { 308 struct device *dev; 309 310 dev = container_of(kobj, struct device, kobj); 311 /* This is the precedence defined by linux. */ 312 if (dev->release) 313 dev->release(dev); 314 else if (dev->class && dev->class->dev_release) 315 dev->class->dev_release(dev); 316 } 317 318 static ssize_t 319 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 320 { 321 struct device_attribute *dattr; 322 ssize_t error; 323 324 dattr = container_of(attr, struct device_attribute, attr); 325 error = -EIO; 326 if (dattr->show) 327 error = dattr->show(container_of(kobj, struct device, kobj), 328 dattr, buf); 329 return (error); 330 } 331 332 static ssize_t 333 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 334 size_t count) 335 { 336 struct device_attribute *dattr; 337 ssize_t error; 338 339 dattr = container_of(attr, struct device_attribute, attr); 340 error = -EIO; 341 if (dattr->store) 342 error = dattr->store(container_of(kobj, struct device, kobj), 343 dattr, buf, count); 344 return (error); 345 } 346 347 static const struct sysfs_ops linux_dev_sysfs = { 348 .show = linux_dev_show, 349 .store = linux_dev_store, 350 }; 351 352 const struct kobj_type linux_dev_ktype = { 353 .release = linux_dev_release, 354 .sysfs_ops = &linux_dev_sysfs 355 }; 356 357 struct device * 358 device_create(struct class *class, struct device *parent, dev_t devt, 359 void *drvdata, const char *fmt, ...) 360 { 361 struct device *dev; 362 va_list args; 363 364 dev = kzalloc(sizeof(*dev), M_WAITOK); 365 dev->parent = parent; 366 dev->class = class; 367 dev->devt = devt; 368 dev->driver_data = drvdata; 369 dev->release = linux_device_release; 370 va_start(args, fmt); 371 kobject_set_name_vargs(&dev->kobj, fmt, args); 372 va_end(args); 373 device_register(dev); 374 375 return (dev); 376 } 377 378 int 379 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 380 struct kobject *parent, const char *fmt, ...) 381 { 382 va_list args; 383 int error; 384 385 kobject_init(kobj, ktype); 386 kobj->ktype = ktype; 387 kobj->parent = parent; 388 kobj->name = NULL; 389 390 va_start(args, fmt); 391 error = kobject_set_name_vargs(kobj, fmt, args); 392 va_end(args); 393 if (error) 394 return (error); 395 return kobject_add_complete(kobj, parent); 396 } 397 398 static void 399 linux_kq_lock(void *arg) 400 { 401 spinlock_t *s = arg; 402 403 spin_lock(s); 404 } 405 static void 406 linux_kq_unlock(void *arg) 407 { 408 spinlock_t *s = arg; 409 410 spin_unlock(s); 411 } 412 413 static void 414 linux_kq_lock_owned(void *arg) 415 { 416 #ifdef INVARIANTS 417 spinlock_t *s = arg; 418 419 mtx_assert(&s->m, MA_OWNED); 420 #endif 421 } 422 423 static void 424 linux_kq_lock_unowned(void *arg) 425 { 426 #ifdef INVARIANTS 427 spinlock_t *s = arg; 428 429 mtx_assert(&s->m, MA_NOTOWNED); 430 #endif 431 } 432 433 static void 434 linux_file_kqfilter_poll(struct linux_file *, int); 435 436 struct linux_file * 437 linux_file_alloc(void) 438 { 439 struct linux_file *filp; 440 441 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 442 443 /* set initial refcount */ 444 filp->f_count = 1; 445 446 /* setup fields needed by kqueue support */ 447 spin_lock_init(&filp->f_kqlock); 448 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 449 linux_kq_lock, linux_kq_unlock, 450 linux_kq_lock_owned, linux_kq_lock_unowned); 451 452 return (filp); 453 } 454 455 void 456 linux_file_free(struct linux_file *filp) 457 { 458 if (filp->_file == NULL) { 459 if (filp->f_shmem != NULL) 460 vm_object_deallocate(filp->f_shmem); 461 kfree(filp); 462 } else { 463 /* 464 * The close method of the character device or file 465 * will free the linux_file structure: 466 */ 467 _fdrop(filp->_file, curthread); 468 } 469 } 470 471 static int 472 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 473 vm_page_t *mres) 474 { 475 struct vm_area_struct *vmap; 476 477 vmap = linux_cdev_handle_find(vm_obj->handle); 478 479 MPASS(vmap != NULL); 480 MPASS(vmap->vm_private_data == vm_obj->handle); 481 482 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 483 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 484 vm_page_t page; 485 486 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 487 /* 488 * If the passed in result page is a fake 489 * page, update it with the new physical 490 * address. 491 */ 492 page = *mres; 493 vm_page_updatefake(page, paddr, vm_obj->memattr); 494 } else { 495 /* 496 * Replace the passed in "mres" page with our 497 * own fake page and free up the all of the 498 * original pages. 499 */ 500 VM_OBJECT_WUNLOCK(vm_obj); 501 page = vm_page_getfake(paddr, vm_obj->memattr); 502 VM_OBJECT_WLOCK(vm_obj); 503 504 vm_page_replace_checked(page, vm_obj, 505 (*mres)->pindex, *mres); 506 507 vm_page_lock(*mres); 508 vm_page_free(*mres); 509 vm_page_unlock(*mres); 510 *mres = page; 511 } 512 page->valid = VM_PAGE_BITS_ALL; 513 return (VM_PAGER_OK); 514 } 515 return (VM_PAGER_FAIL); 516 } 517 518 static int 519 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 520 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 521 { 522 struct vm_area_struct *vmap; 523 int err; 524 525 linux_set_current(curthread); 526 527 /* get VM area structure */ 528 vmap = linux_cdev_handle_find(vm_obj->handle); 529 MPASS(vmap != NULL); 530 MPASS(vmap->vm_private_data == vm_obj->handle); 531 532 VM_OBJECT_WUNLOCK(vm_obj); 533 534 down_write(&vmap->vm_mm->mmap_sem); 535 if (unlikely(vmap->vm_ops == NULL)) { 536 err = VM_FAULT_SIGBUS; 537 } else { 538 struct vm_fault vmf; 539 540 /* fill out VM fault structure */ 541 vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); 542 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 543 vmf.pgoff = 0; 544 vmf.page = NULL; 545 vmf.vma = vmap; 546 547 vmap->vm_pfn_count = 0; 548 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 549 vmap->vm_obj = vm_obj; 550 551 err = vmap->vm_ops->fault(vmap, &vmf); 552 553 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 554 kern_yield(PRI_USER); 555 err = vmap->vm_ops->fault(vmap, &vmf); 556 } 557 } 558 559 /* translate return code */ 560 switch (err) { 561 case VM_FAULT_OOM: 562 err = VM_PAGER_AGAIN; 563 break; 564 case VM_FAULT_SIGBUS: 565 err = VM_PAGER_BAD; 566 break; 567 case VM_FAULT_NOPAGE: 568 /* 569 * By contract the fault handler will return having 570 * busied all the pages itself. If pidx is already 571 * found in the object, it will simply xbusy the first 572 * page and return with vm_pfn_count set to 1. 573 */ 574 *first = vmap->vm_pfn_first; 575 *last = *first + vmap->vm_pfn_count - 1; 576 err = VM_PAGER_OK; 577 break; 578 default: 579 err = VM_PAGER_ERROR; 580 break; 581 } 582 up_write(&vmap->vm_mm->mmap_sem); 583 VM_OBJECT_WLOCK(vm_obj); 584 return (err); 585 } 586 587 static struct rwlock linux_vma_lock; 588 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 589 TAILQ_HEAD_INITIALIZER(linux_vma_head); 590 591 static void 592 linux_cdev_handle_free(struct vm_area_struct *vmap) 593 { 594 /* Drop reference on vm_file */ 595 if (vmap->vm_file != NULL) 596 fput(vmap->vm_file); 597 598 /* Drop reference on mm_struct */ 599 mmput(vmap->vm_mm); 600 601 kfree(vmap); 602 } 603 604 static void 605 linux_cdev_handle_remove(struct vm_area_struct *vmap) 606 { 607 rw_wlock(&linux_vma_lock); 608 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 609 rw_wunlock(&linux_vma_lock); 610 } 611 612 static struct vm_area_struct * 613 linux_cdev_handle_find(void *handle) 614 { 615 struct vm_area_struct *vmap; 616 617 rw_rlock(&linux_vma_lock); 618 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 619 if (vmap->vm_private_data == handle) 620 break; 621 } 622 rw_runlock(&linux_vma_lock); 623 return (vmap); 624 } 625 626 static int 627 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 628 vm_ooffset_t foff, struct ucred *cred, u_short *color) 629 { 630 631 MPASS(linux_cdev_handle_find(handle) != NULL); 632 *color = 0; 633 return (0); 634 } 635 636 static void 637 linux_cdev_pager_dtor(void *handle) 638 { 639 const struct vm_operations_struct *vm_ops; 640 struct vm_area_struct *vmap; 641 642 vmap = linux_cdev_handle_find(handle); 643 MPASS(vmap != NULL); 644 645 /* 646 * Remove handle before calling close operation to prevent 647 * other threads from reusing the handle pointer. 648 */ 649 linux_cdev_handle_remove(vmap); 650 651 down_write(&vmap->vm_mm->mmap_sem); 652 vm_ops = vmap->vm_ops; 653 if (likely(vm_ops != NULL)) 654 vm_ops->close(vmap); 655 up_write(&vmap->vm_mm->mmap_sem); 656 657 linux_cdev_handle_free(vmap); 658 } 659 660 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 661 { 662 /* OBJT_MGTDEVICE */ 663 .cdev_pg_populate = linux_cdev_pager_populate, 664 .cdev_pg_ctor = linux_cdev_pager_ctor, 665 .cdev_pg_dtor = linux_cdev_pager_dtor 666 }, 667 { 668 /* OBJT_DEVICE */ 669 .cdev_pg_fault = linux_cdev_pager_fault, 670 .cdev_pg_ctor = linux_cdev_pager_ctor, 671 .cdev_pg_dtor = linux_cdev_pager_dtor 672 }, 673 }; 674 675 #define OPW(fp,td,code) ({ \ 676 struct file *__fpop; \ 677 __typeof(code) __retval; \ 678 \ 679 __fpop = (td)->td_fpop; \ 680 (td)->td_fpop = (fp); \ 681 __retval = (code); \ 682 (td)->td_fpop = __fpop; \ 683 __retval; \ 684 }) 685 686 static int 687 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file) 688 { 689 struct linux_cdev *ldev; 690 struct linux_file *filp; 691 int error; 692 693 ldev = dev->si_drv1; 694 695 filp = linux_file_alloc(); 696 filp->f_dentry = &filp->f_dentry_store; 697 filp->f_op = ldev->ops; 698 filp->f_mode = file->f_flag; 699 filp->f_flags = file->f_flag; 700 filp->f_vnode = file->f_vnode; 701 filp->_file = file; 702 filp->f_cdev = ldev; 703 704 linux_set_current(td); 705 706 /* get a reference on the Linux character device */ 707 if (atomic_long_add_unless(&ldev->refs, 1, -1L) == 0) { 708 kfree(filp); 709 return (EINVAL); 710 } 711 712 if (filp->f_op->open) { 713 error = -filp->f_op->open(file->f_vnode, filp); 714 if (error) { 715 atomic_long_dec(&ldev->refs); 716 kfree(filp); 717 return (error); 718 } 719 } 720 721 /* hold on to the vnode - used for fstat() */ 722 vhold(filp->f_vnode); 723 724 /* release the file from devfs */ 725 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 726 return (ENXIO); 727 } 728 729 #define LINUX_IOCTL_MIN_PTR 0x10000UL 730 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 731 732 static inline int 733 linux_remap_address(void **uaddr, size_t len) 734 { 735 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 736 737 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 738 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 739 struct task_struct *pts = current; 740 if (pts == NULL) { 741 *uaddr = NULL; 742 return (1); 743 } 744 745 /* compute data offset */ 746 uaddr_val -= LINUX_IOCTL_MIN_PTR; 747 748 /* check that length is within bounds */ 749 if ((len > IOCPARM_MAX) || 750 (uaddr_val + len) > pts->bsd_ioctl_len) { 751 *uaddr = NULL; 752 return (1); 753 } 754 755 /* re-add kernel buffer address */ 756 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 757 758 /* update address location */ 759 *uaddr = (void *)uaddr_val; 760 return (1); 761 } 762 return (0); 763 } 764 765 int 766 linux_copyin(const void *uaddr, void *kaddr, size_t len) 767 { 768 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 769 if (uaddr == NULL) 770 return (-EFAULT); 771 memcpy(kaddr, uaddr, len); 772 return (0); 773 } 774 return (-copyin(uaddr, kaddr, len)); 775 } 776 777 int 778 linux_copyout(const void *kaddr, void *uaddr, size_t len) 779 { 780 if (linux_remap_address(&uaddr, len)) { 781 if (uaddr == NULL) 782 return (-EFAULT); 783 memcpy(uaddr, kaddr, len); 784 return (0); 785 } 786 return (-copyout(kaddr, uaddr, len)); 787 } 788 789 size_t 790 linux_clear_user(void *_uaddr, size_t _len) 791 { 792 uint8_t *uaddr = _uaddr; 793 size_t len = _len; 794 795 /* make sure uaddr is aligned before going into the fast loop */ 796 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 797 if (subyte(uaddr, 0)) 798 return (_len); 799 uaddr++; 800 len--; 801 } 802 803 /* zero 8 bytes at a time */ 804 while (len > 7) { 805 #ifdef __LP64__ 806 if (suword64(uaddr, 0)) 807 return (_len); 808 #else 809 if (suword32(uaddr, 0)) 810 return (_len); 811 if (suword32(uaddr + 4, 0)) 812 return (_len); 813 #endif 814 uaddr += 8; 815 len -= 8; 816 } 817 818 /* zero fill end, if any */ 819 while (len > 0) { 820 if (subyte(uaddr, 0)) 821 return (_len); 822 uaddr++; 823 len--; 824 } 825 return (0); 826 } 827 828 int 829 linux_access_ok(int rw, const void *uaddr, size_t len) 830 { 831 uintptr_t saddr; 832 uintptr_t eaddr; 833 834 /* get start and end address */ 835 saddr = (uintptr_t)uaddr; 836 eaddr = (uintptr_t)uaddr + len; 837 838 /* verify addresses are valid for userspace */ 839 return ((saddr == eaddr) || 840 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 841 } 842 843 /* 844 * This function should return either EINTR or ERESTART depending on 845 * the signal type sent to this thread: 846 */ 847 static int 848 linux_get_error(struct task_struct *task, int error) 849 { 850 /* check for signal type interrupt code */ 851 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 852 error = -linux_schedule_get_interrupt_value(task); 853 if (error == 0) 854 error = EINTR; 855 } 856 return (error); 857 } 858 859 static int 860 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 861 u_long cmd, caddr_t data, struct thread *td) 862 { 863 struct task_struct *task = current; 864 unsigned size; 865 int error; 866 867 size = IOCPARM_LEN(cmd); 868 /* refer to logic in sys_ioctl() */ 869 if (size > 0) { 870 /* 871 * Setup hint for linux_copyin() and linux_copyout(). 872 * 873 * Background: Linux code expects a user-space address 874 * while FreeBSD supplies a kernel-space address. 875 */ 876 task->bsd_ioctl_data = data; 877 task->bsd_ioctl_len = size; 878 data = (void *)LINUX_IOCTL_MIN_PTR; 879 } else { 880 /* fetch user-space pointer */ 881 data = *(void **)data; 882 } 883 #if defined(__amd64__) 884 if (td->td_proc->p_elf_machine == EM_386) { 885 /* try the compat IOCTL handler first */ 886 if (filp->f_op->compat_ioctl != NULL) 887 error = -OPW(fp, td, filp->f_op->compat_ioctl(filp, cmd, (u_long)data)); 888 else 889 error = ENOTTY; 890 891 /* fallback to the regular IOCTL handler, if any */ 892 if (error == ENOTTY && filp->f_op->unlocked_ioctl != NULL) 893 error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data)); 894 } else 895 #endif 896 if (filp->f_op->unlocked_ioctl != NULL) 897 error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data)); 898 else 899 error = ENOTTY; 900 if (size > 0) { 901 task->bsd_ioctl_data = NULL; 902 task->bsd_ioctl_len = 0; 903 } 904 905 if (error == EWOULDBLOCK) { 906 /* update kqfilter status, if any */ 907 linux_file_kqfilter_poll(filp, 908 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 909 } else { 910 error = linux_get_error(task, error); 911 } 912 return (error); 913 } 914 915 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 916 917 /* 918 * This function atomically updates the poll wakeup state and returns 919 * the previous state at the time of update. 920 */ 921 static uint8_t 922 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 923 { 924 int c, old; 925 926 c = v->counter; 927 928 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 929 c = old; 930 931 return (c); 932 } 933 934 935 static int 936 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 937 { 938 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 939 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 940 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 941 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 942 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 943 }; 944 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 945 946 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 947 case LINUX_FWQ_STATE_QUEUED: 948 linux_poll_wakeup(filp); 949 return (1); 950 default: 951 return (0); 952 } 953 } 954 955 void 956 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 957 { 958 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 959 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 960 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 961 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 962 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 963 }; 964 965 /* check if we are called inside the select system call */ 966 if (p == LINUX_POLL_TABLE_NORMAL) 967 selrecord(curthread, &filp->f_selinfo); 968 969 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 970 case LINUX_FWQ_STATE_INIT: 971 /* NOTE: file handles can only belong to one wait-queue */ 972 filp->f_wait_queue.wqh = wqh; 973 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 974 add_wait_queue(wqh, &filp->f_wait_queue.wq); 975 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 976 break; 977 default: 978 break; 979 } 980 } 981 982 static void 983 linux_poll_wait_dequeue(struct linux_file *filp) 984 { 985 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 986 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 987 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 988 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 989 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 990 }; 991 992 seldrain(&filp->f_selinfo); 993 994 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 995 case LINUX_FWQ_STATE_NOT_READY: 996 case LINUX_FWQ_STATE_QUEUED: 997 case LINUX_FWQ_STATE_READY: 998 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 999 break; 1000 default: 1001 break; 1002 } 1003 } 1004 1005 void 1006 linux_poll_wakeup(struct linux_file *filp) 1007 { 1008 /* this function should be NULL-safe */ 1009 if (filp == NULL) 1010 return; 1011 1012 selwakeup(&filp->f_selinfo); 1013 1014 spin_lock(&filp->f_kqlock); 1015 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1016 LINUX_KQ_FLAG_NEED_WRITE; 1017 1018 /* make sure the "knote" gets woken up */ 1019 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1020 spin_unlock(&filp->f_kqlock); 1021 } 1022 1023 static void 1024 linux_file_kqfilter_detach(struct knote *kn) 1025 { 1026 struct linux_file *filp = kn->kn_hook; 1027 1028 spin_lock(&filp->f_kqlock); 1029 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1030 spin_unlock(&filp->f_kqlock); 1031 } 1032 1033 static int 1034 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1035 { 1036 struct linux_file *filp = kn->kn_hook; 1037 1038 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1039 1040 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1041 } 1042 1043 static int 1044 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1045 { 1046 struct linux_file *filp = kn->kn_hook; 1047 1048 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1049 1050 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1051 } 1052 1053 static struct filterops linux_dev_kqfiltops_read = { 1054 .f_isfd = 1, 1055 .f_detach = linux_file_kqfilter_detach, 1056 .f_event = linux_file_kqfilter_read_event, 1057 }; 1058 1059 static struct filterops linux_dev_kqfiltops_write = { 1060 .f_isfd = 1, 1061 .f_detach = linux_file_kqfilter_detach, 1062 .f_event = linux_file_kqfilter_write_event, 1063 }; 1064 1065 static void 1066 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1067 { 1068 int temp; 1069 1070 if (filp->f_kqflags & kqflags) { 1071 struct thread *td = curthread; 1072 1073 /* get the latest polling state */ 1074 temp = OPW(filp->_file, td, filp->f_op->poll(filp, NULL)); 1075 1076 spin_lock(&filp->f_kqlock); 1077 /* clear kqflags */ 1078 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1079 LINUX_KQ_FLAG_NEED_WRITE); 1080 /* update kqflags */ 1081 if (temp & (POLLIN | POLLOUT)) { 1082 if (temp & POLLIN) 1083 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1084 if (temp & POLLOUT) 1085 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1086 1087 /* make sure the "knote" gets woken up */ 1088 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1089 } 1090 spin_unlock(&filp->f_kqlock); 1091 } 1092 } 1093 1094 static int 1095 linux_file_kqfilter(struct file *file, struct knote *kn) 1096 { 1097 struct linux_file *filp; 1098 struct thread *td; 1099 int error; 1100 1101 td = curthread; 1102 filp = (struct linux_file *)file->f_data; 1103 filp->f_flags = file->f_flag; 1104 if (filp->f_op->poll == NULL) 1105 return (EINVAL); 1106 1107 spin_lock(&filp->f_kqlock); 1108 switch (kn->kn_filter) { 1109 case EVFILT_READ: 1110 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1111 kn->kn_fop = &linux_dev_kqfiltops_read; 1112 kn->kn_hook = filp; 1113 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1114 error = 0; 1115 break; 1116 case EVFILT_WRITE: 1117 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1118 kn->kn_fop = &linux_dev_kqfiltops_write; 1119 kn->kn_hook = filp; 1120 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1121 error = 0; 1122 break; 1123 default: 1124 error = EINVAL; 1125 break; 1126 } 1127 spin_unlock(&filp->f_kqlock); 1128 1129 if (error == 0) { 1130 linux_set_current(td); 1131 1132 /* update kqfilter status, if any */ 1133 linux_file_kqfilter_poll(filp, 1134 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1135 } 1136 return (error); 1137 } 1138 1139 static int 1140 linux_file_mmap_single(struct file *fp, vm_ooffset_t *offset, 1141 vm_size_t size, struct vm_object **object, int nprot, 1142 struct thread *td) 1143 { 1144 struct task_struct *task; 1145 struct vm_area_struct *vmap; 1146 struct mm_struct *mm; 1147 struct linux_file *filp; 1148 vm_memattr_t attr; 1149 int error; 1150 1151 filp = (struct linux_file *)fp->f_data; 1152 filp->f_flags = fp->f_flag; 1153 1154 if (filp->f_op->mmap == NULL) 1155 return (EOPNOTSUPP); 1156 1157 linux_set_current(td); 1158 1159 /* 1160 * The same VM object might be shared by multiple processes 1161 * and the mm_struct is usually freed when a process exits. 1162 * 1163 * The atomic reference below makes sure the mm_struct is 1164 * available as long as the vmap is in the linux_vma_head. 1165 */ 1166 task = current; 1167 mm = task->mm; 1168 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1169 return (EINVAL); 1170 1171 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1172 vmap->vm_start = 0; 1173 vmap->vm_end = size; 1174 vmap->vm_pgoff = *offset / PAGE_SIZE; 1175 vmap->vm_pfn = 0; 1176 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1177 vmap->vm_ops = NULL; 1178 vmap->vm_file = get_file(filp); 1179 vmap->vm_mm = mm; 1180 1181 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1182 error = linux_get_error(task, EINTR); 1183 } else { 1184 error = -OPW(fp, td, filp->f_op->mmap(filp, vmap)); 1185 error = linux_get_error(task, error); 1186 up_write(&vmap->vm_mm->mmap_sem); 1187 } 1188 1189 if (error != 0) { 1190 linux_cdev_handle_free(vmap); 1191 return (error); 1192 } 1193 1194 attr = pgprot2cachemode(vmap->vm_page_prot); 1195 1196 if (vmap->vm_ops != NULL) { 1197 struct vm_area_struct *ptr; 1198 void *vm_private_data; 1199 bool vm_no_fault; 1200 1201 if (vmap->vm_ops->open == NULL || 1202 vmap->vm_ops->close == NULL || 1203 vmap->vm_private_data == NULL) { 1204 /* free allocated VM area struct */ 1205 linux_cdev_handle_free(vmap); 1206 return (EINVAL); 1207 } 1208 1209 vm_private_data = vmap->vm_private_data; 1210 1211 rw_wlock(&linux_vma_lock); 1212 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1213 if (ptr->vm_private_data == vm_private_data) 1214 break; 1215 } 1216 /* check if there is an existing VM area struct */ 1217 if (ptr != NULL) { 1218 /* check if the VM area structure is invalid */ 1219 if (ptr->vm_ops == NULL || 1220 ptr->vm_ops->open == NULL || 1221 ptr->vm_ops->close == NULL) { 1222 error = ESTALE; 1223 vm_no_fault = 1; 1224 } else { 1225 error = EEXIST; 1226 vm_no_fault = (ptr->vm_ops->fault == NULL); 1227 } 1228 } else { 1229 /* insert VM area structure into list */ 1230 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1231 error = 0; 1232 vm_no_fault = (vmap->vm_ops->fault == NULL); 1233 } 1234 rw_wunlock(&linux_vma_lock); 1235 1236 if (error != 0) { 1237 /* free allocated VM area struct */ 1238 linux_cdev_handle_free(vmap); 1239 /* check for stale VM area struct */ 1240 if (error != EEXIST) 1241 return (error); 1242 } 1243 1244 /* check if there is no fault handler */ 1245 if (vm_no_fault) { 1246 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1247 &linux_cdev_pager_ops[1], size, nprot, *offset, 1248 td->td_ucred); 1249 } else { 1250 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1251 &linux_cdev_pager_ops[0], size, nprot, *offset, 1252 td->td_ucred); 1253 } 1254 1255 /* check if allocating the VM object failed */ 1256 if (*object == NULL) { 1257 if (error == 0) { 1258 /* remove VM area struct from list */ 1259 linux_cdev_handle_remove(vmap); 1260 /* free allocated VM area struct */ 1261 linux_cdev_handle_free(vmap); 1262 } 1263 return (EINVAL); 1264 } 1265 } else { 1266 struct sglist *sg; 1267 1268 sg = sglist_alloc(1, M_WAITOK); 1269 sglist_append_phys(sg, 1270 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1271 1272 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1273 nprot, 0, td->td_ucred); 1274 1275 linux_cdev_handle_free(vmap); 1276 1277 if (*object == NULL) { 1278 sglist_free(sg); 1279 return (EINVAL); 1280 } 1281 } 1282 1283 if (attr != VM_MEMATTR_DEFAULT) { 1284 VM_OBJECT_WLOCK(*object); 1285 vm_object_set_memattr(*object, attr); 1286 VM_OBJECT_WUNLOCK(*object); 1287 } 1288 *offset = 0; 1289 return (0); 1290 } 1291 1292 struct cdevsw linuxcdevsw = { 1293 .d_version = D_VERSION, 1294 .d_fdopen = linux_dev_fdopen, 1295 .d_name = "lkpidev", 1296 }; 1297 1298 static int 1299 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1300 int flags, struct thread *td) 1301 { 1302 struct linux_file *filp; 1303 ssize_t bytes; 1304 int error; 1305 1306 error = 0; 1307 filp = (struct linux_file *)file->f_data; 1308 filp->f_flags = file->f_flag; 1309 /* XXX no support for I/O vectors currently */ 1310 if (uio->uio_iovcnt != 1) 1311 return (EOPNOTSUPP); 1312 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1313 return (EINVAL); 1314 linux_set_current(td); 1315 if (filp->f_op->read) { 1316 bytes = OPW(file, td, filp->f_op->read(filp, uio->uio_iov->iov_base, 1317 uio->uio_iov->iov_len, &uio->uio_offset)); 1318 if (bytes >= 0) { 1319 uio->uio_iov->iov_base = 1320 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1321 uio->uio_iov->iov_len -= bytes; 1322 uio->uio_resid -= bytes; 1323 } else { 1324 error = linux_get_error(current, -bytes); 1325 } 1326 } else 1327 error = ENXIO; 1328 1329 /* update kqfilter status, if any */ 1330 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1331 1332 return (error); 1333 } 1334 1335 static int 1336 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1337 int flags, struct thread *td) 1338 { 1339 struct linux_file *filp; 1340 ssize_t bytes; 1341 int error; 1342 1343 error = 0; 1344 filp = (struct linux_file *)file->f_data; 1345 filp->f_flags = file->f_flag; 1346 /* XXX no support for I/O vectors currently */ 1347 if (uio->uio_iovcnt != 1) 1348 return (EOPNOTSUPP); 1349 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1350 return (EINVAL); 1351 linux_set_current(td); 1352 if (filp->f_op->write) { 1353 bytes = OPW(file, td, filp->f_op->write(filp, uio->uio_iov->iov_base, 1354 uio->uio_iov->iov_len, &uio->uio_offset)); 1355 if (bytes >= 0) { 1356 uio->uio_iov->iov_base = 1357 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1358 uio->uio_iov->iov_len -= bytes; 1359 uio->uio_resid -= bytes; 1360 } else { 1361 error = linux_get_error(current, -bytes); 1362 } 1363 } else 1364 error = ENXIO; 1365 1366 /* update kqfilter status, if any */ 1367 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1368 1369 return (error); 1370 } 1371 1372 static int 1373 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1374 struct thread *td) 1375 { 1376 struct linux_file *filp; 1377 int revents; 1378 1379 filp = (struct linux_file *)file->f_data; 1380 filp->f_flags = file->f_flag; 1381 linux_set_current(td); 1382 if (filp->f_op->poll != NULL) 1383 revents = OPW(file, td, filp->f_op->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events; 1384 else 1385 revents = 0; 1386 1387 return (revents); 1388 } 1389 1390 static int 1391 linux_file_close(struct file *file, struct thread *td) 1392 { 1393 struct linux_file *filp; 1394 int error; 1395 1396 filp = (struct linux_file *)file->f_data; 1397 1398 KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp))); 1399 1400 filp->f_flags = file->f_flag; 1401 linux_set_current(td); 1402 linux_poll_wait_dequeue(filp); 1403 error = -OPW(file, td, filp->f_op->release(filp->f_vnode, filp)); 1404 funsetown(&filp->f_sigio); 1405 if (filp->f_vnode != NULL) 1406 vdrop(filp->f_vnode); 1407 if (filp->f_cdev != NULL) { 1408 /* put a reference on the Linux character device */ 1409 atomic_long_dec(&filp->f_cdev->refs); 1410 } 1411 kfree(filp); 1412 1413 return (error); 1414 } 1415 1416 static int 1417 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1418 struct thread *td) 1419 { 1420 struct linux_file *filp; 1421 int error; 1422 1423 filp = (struct linux_file *)fp->f_data; 1424 filp->f_flags = fp->f_flag; 1425 error = 0; 1426 1427 linux_set_current(td); 1428 switch (cmd) { 1429 case FIONBIO: 1430 break; 1431 case FIOASYNC: 1432 if (filp->f_op->fasync == NULL) 1433 break; 1434 error = -OPW(fp, td, filp->f_op->fasync(0, filp, fp->f_flag & FASYNC)); 1435 break; 1436 case FIOSETOWN: 1437 error = fsetown(*(int *)data, &filp->f_sigio); 1438 if (error == 0) { 1439 if (filp->f_op->fasync == NULL) 1440 break; 1441 error = -OPW(fp, td, filp->f_op->fasync(0, filp, 1442 fp->f_flag & FASYNC)); 1443 } 1444 break; 1445 case FIOGETOWN: 1446 *(int *)data = fgetown(&filp->f_sigio); 1447 break; 1448 default: 1449 error = linux_file_ioctl_sub(fp, filp, cmd, data, td); 1450 break; 1451 } 1452 return (error); 1453 } 1454 1455 static int 1456 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1457 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1458 vm_ooffset_t *foff, vm_object_t *objp) 1459 { 1460 /* 1461 * Character devices do not provide private mappings 1462 * of any kind: 1463 */ 1464 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1465 (prot & VM_PROT_WRITE) != 0) 1466 return (EACCES); 1467 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1468 return (EINVAL); 1469 1470 return (linux_file_mmap_single(fp, foff, objsize, objp, (int)prot, td)); 1471 } 1472 1473 static int 1474 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1475 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1476 struct thread *td) 1477 { 1478 struct linux_file *filp; 1479 struct mount *mp; 1480 struct vnode *vp; 1481 vm_object_t object; 1482 vm_prot_t maxprot; 1483 int error; 1484 1485 filp = (struct linux_file *)fp->f_data; 1486 1487 vp = filp->f_vnode; 1488 if (vp == NULL) 1489 return (EOPNOTSUPP); 1490 1491 /* 1492 * Ensure that file and memory protections are 1493 * compatible. 1494 */ 1495 mp = vp->v_mount; 1496 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1497 maxprot = VM_PROT_NONE; 1498 if ((prot & VM_PROT_EXECUTE) != 0) 1499 return (EACCES); 1500 } else 1501 maxprot = VM_PROT_EXECUTE; 1502 if ((fp->f_flag & FREAD) != 0) 1503 maxprot |= VM_PROT_READ; 1504 else if ((prot & VM_PROT_READ) != 0) 1505 return (EACCES); 1506 1507 /* 1508 * If we are sharing potential changes via MAP_SHARED and we 1509 * are trying to get write permission although we opened it 1510 * without asking for it, bail out. 1511 * 1512 * Note that most character devices always share mappings. 1513 * 1514 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1515 * requests rather than doing it here. 1516 */ 1517 if ((flags & MAP_SHARED) != 0) { 1518 if ((fp->f_flag & FWRITE) != 0) 1519 maxprot |= VM_PROT_WRITE; 1520 else if ((prot & VM_PROT_WRITE) != 0) 1521 return (EACCES); 1522 } 1523 maxprot &= cap_maxprot; 1524 1525 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, &foff, 1526 &object); 1527 if (error != 0) 1528 return (error); 1529 1530 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1531 foff, FALSE, td); 1532 if (error != 0) 1533 vm_object_deallocate(object); 1534 return (error); 1535 } 1536 1537 static int 1538 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1539 struct thread *td) 1540 { 1541 struct linux_file *filp; 1542 struct vnode *vp; 1543 int error; 1544 1545 filp = (struct linux_file *)fp->f_data; 1546 if (filp->f_vnode == NULL) 1547 return (EOPNOTSUPP); 1548 1549 vp = filp->f_vnode; 1550 1551 vn_lock(vp, LK_SHARED | LK_RETRY); 1552 error = vn_stat(vp, sb, td->td_ucred, NOCRED, td); 1553 VOP_UNLOCK(vp, 0); 1554 1555 return (error); 1556 } 1557 1558 static int 1559 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1560 struct filedesc *fdp) 1561 { 1562 struct linux_file *filp; 1563 struct vnode *vp; 1564 int error; 1565 1566 filp = fp->f_data; 1567 vp = filp->f_vnode; 1568 if (vp == NULL) { 1569 error = 0; 1570 kif->kf_type = KF_TYPE_DEV; 1571 } else { 1572 vref(vp); 1573 FILEDESC_SUNLOCK(fdp); 1574 error = vn_fill_kinfo_vnode(vp, kif); 1575 vrele(vp); 1576 kif->kf_type = KF_TYPE_VNODE; 1577 FILEDESC_SLOCK(fdp); 1578 } 1579 return (error); 1580 } 1581 1582 unsigned int 1583 linux_iminor(struct inode *inode) 1584 { 1585 struct linux_cdev *ldev; 1586 1587 if (inode == NULL || inode->v_rdev == NULL || 1588 inode->v_rdev->si_devsw != &linuxcdevsw) 1589 return (-1U); 1590 ldev = inode->v_rdev->si_drv1; 1591 if (ldev == NULL) 1592 return (-1U); 1593 1594 return (minor(ldev->dev)); 1595 } 1596 1597 struct fileops linuxfileops = { 1598 .fo_read = linux_file_read, 1599 .fo_write = linux_file_write, 1600 .fo_truncate = invfo_truncate, 1601 .fo_kqfilter = linux_file_kqfilter, 1602 .fo_stat = linux_file_stat, 1603 .fo_fill_kinfo = linux_file_fill_kinfo, 1604 .fo_poll = linux_file_poll, 1605 .fo_close = linux_file_close, 1606 .fo_ioctl = linux_file_ioctl, 1607 .fo_mmap = linux_file_mmap, 1608 .fo_chmod = invfo_chmod, 1609 .fo_chown = invfo_chown, 1610 .fo_sendfile = invfo_sendfile, 1611 .fo_flags = DFLAG_PASSABLE, 1612 }; 1613 1614 /* 1615 * Hash of vmmap addresses. This is infrequently accessed and does not 1616 * need to be particularly large. This is done because we must store the 1617 * caller's idea of the map size to properly unmap. 1618 */ 1619 struct vmmap { 1620 LIST_ENTRY(vmmap) vm_next; 1621 void *vm_addr; 1622 unsigned long vm_size; 1623 }; 1624 1625 struct vmmaphd { 1626 struct vmmap *lh_first; 1627 }; 1628 #define VMMAP_HASH_SIZE 64 1629 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1630 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1631 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1632 static struct mtx vmmaplock; 1633 1634 static void 1635 vmmap_add(void *addr, unsigned long size) 1636 { 1637 struct vmmap *vmmap; 1638 1639 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1640 mtx_lock(&vmmaplock); 1641 vmmap->vm_size = size; 1642 vmmap->vm_addr = addr; 1643 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1644 mtx_unlock(&vmmaplock); 1645 } 1646 1647 static struct vmmap * 1648 vmmap_remove(void *addr) 1649 { 1650 struct vmmap *vmmap; 1651 1652 mtx_lock(&vmmaplock); 1653 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1654 if (vmmap->vm_addr == addr) 1655 break; 1656 if (vmmap) 1657 LIST_REMOVE(vmmap, vm_next); 1658 mtx_unlock(&vmmaplock); 1659 1660 return (vmmap); 1661 } 1662 1663 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1664 void * 1665 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1666 { 1667 void *addr; 1668 1669 addr = pmap_mapdev_attr(phys_addr, size, attr); 1670 if (addr == NULL) 1671 return (NULL); 1672 vmmap_add(addr, size); 1673 1674 return (addr); 1675 } 1676 #endif 1677 1678 void 1679 iounmap(void *addr) 1680 { 1681 struct vmmap *vmmap; 1682 1683 vmmap = vmmap_remove(addr); 1684 if (vmmap == NULL) 1685 return; 1686 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1687 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1688 #endif 1689 kfree(vmmap); 1690 } 1691 1692 1693 void * 1694 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1695 { 1696 vm_offset_t off; 1697 size_t size; 1698 1699 size = count * PAGE_SIZE; 1700 off = kva_alloc(size); 1701 if (off == 0) 1702 return (NULL); 1703 vmmap_add((void *)off, size); 1704 pmap_qenter(off, pages, count); 1705 1706 return ((void *)off); 1707 } 1708 1709 void 1710 vunmap(void *addr) 1711 { 1712 struct vmmap *vmmap; 1713 1714 vmmap = vmmap_remove(addr); 1715 if (vmmap == NULL) 1716 return; 1717 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1718 kva_free((vm_offset_t)addr, vmmap->vm_size); 1719 kfree(vmmap); 1720 } 1721 1722 char * 1723 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1724 { 1725 unsigned int len; 1726 char *p; 1727 va_list aq; 1728 1729 va_copy(aq, ap); 1730 len = vsnprintf(NULL, 0, fmt, aq); 1731 va_end(aq); 1732 1733 p = kmalloc(len + 1, gfp); 1734 if (p != NULL) 1735 vsnprintf(p, len + 1, fmt, ap); 1736 1737 return (p); 1738 } 1739 1740 char * 1741 kasprintf(gfp_t gfp, const char *fmt, ...) 1742 { 1743 va_list ap; 1744 char *p; 1745 1746 va_start(ap, fmt); 1747 p = kvasprintf(gfp, fmt, ap); 1748 va_end(ap); 1749 1750 return (p); 1751 } 1752 1753 static void 1754 linux_timer_callback_wrapper(void *context) 1755 { 1756 struct timer_list *timer; 1757 1758 linux_set_current(curthread); 1759 1760 timer = context; 1761 timer->function(timer->data); 1762 } 1763 1764 void 1765 mod_timer(struct timer_list *timer, int expires) 1766 { 1767 1768 timer->expires = expires; 1769 callout_reset(&timer->callout, 1770 linux_timer_jiffies_until(expires), 1771 &linux_timer_callback_wrapper, timer); 1772 } 1773 1774 void 1775 add_timer(struct timer_list *timer) 1776 { 1777 1778 callout_reset(&timer->callout, 1779 linux_timer_jiffies_until(timer->expires), 1780 &linux_timer_callback_wrapper, timer); 1781 } 1782 1783 void 1784 add_timer_on(struct timer_list *timer, int cpu) 1785 { 1786 1787 callout_reset_on(&timer->callout, 1788 linux_timer_jiffies_until(timer->expires), 1789 &linux_timer_callback_wrapper, timer, cpu); 1790 } 1791 1792 static void 1793 linux_timer_init(void *arg) 1794 { 1795 1796 /* 1797 * Compute an internal HZ value which can divide 2**32 to 1798 * avoid timer rounding problems when the tick value wraps 1799 * around 2**32: 1800 */ 1801 linux_timer_hz_mask = 1; 1802 while (linux_timer_hz_mask < (unsigned long)hz) 1803 linux_timer_hz_mask *= 2; 1804 linux_timer_hz_mask--; 1805 } 1806 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1807 1808 void 1809 linux_complete_common(struct completion *c, int all) 1810 { 1811 int wakeup_swapper; 1812 1813 sleepq_lock(c); 1814 if (all) { 1815 c->done = UINT_MAX; 1816 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1817 } else { 1818 if (c->done != UINT_MAX) 1819 c->done++; 1820 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1821 } 1822 sleepq_release(c); 1823 if (wakeup_swapper) 1824 kick_proc0(); 1825 } 1826 1827 /* 1828 * Indefinite wait for done != 0 with or without signals. 1829 */ 1830 int 1831 linux_wait_for_common(struct completion *c, int flags) 1832 { 1833 struct task_struct *task; 1834 int error; 1835 1836 if (SCHEDULER_STOPPED()) 1837 return (0); 1838 1839 task = current; 1840 1841 if (flags != 0) 1842 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1843 else 1844 flags = SLEEPQ_SLEEP; 1845 error = 0; 1846 for (;;) { 1847 sleepq_lock(c); 1848 if (c->done) 1849 break; 1850 sleepq_add(c, NULL, "completion", flags, 0); 1851 if (flags & SLEEPQ_INTERRUPTIBLE) { 1852 DROP_GIANT(); 1853 error = -sleepq_wait_sig(c, 0); 1854 PICKUP_GIANT(); 1855 if (error != 0) { 1856 linux_schedule_save_interrupt_value(task, error); 1857 error = -ERESTARTSYS; 1858 goto intr; 1859 } 1860 } else { 1861 DROP_GIANT(); 1862 sleepq_wait(c, 0); 1863 PICKUP_GIANT(); 1864 } 1865 } 1866 if (c->done != UINT_MAX) 1867 c->done--; 1868 sleepq_release(c); 1869 1870 intr: 1871 return (error); 1872 } 1873 1874 /* 1875 * Time limited wait for done != 0 with or without signals. 1876 */ 1877 int 1878 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 1879 { 1880 struct task_struct *task; 1881 int end = jiffies + timeout; 1882 int error; 1883 1884 if (SCHEDULER_STOPPED()) 1885 return (0); 1886 1887 task = current; 1888 1889 if (flags != 0) 1890 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1891 else 1892 flags = SLEEPQ_SLEEP; 1893 1894 for (;;) { 1895 sleepq_lock(c); 1896 if (c->done) 1897 break; 1898 sleepq_add(c, NULL, "completion", flags, 0); 1899 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 1900 1901 DROP_GIANT(); 1902 if (flags & SLEEPQ_INTERRUPTIBLE) 1903 error = -sleepq_timedwait_sig(c, 0); 1904 else 1905 error = -sleepq_timedwait(c, 0); 1906 PICKUP_GIANT(); 1907 1908 if (error != 0) { 1909 /* check for timeout */ 1910 if (error == -EWOULDBLOCK) { 1911 error = 0; /* timeout */ 1912 } else { 1913 /* signal happened */ 1914 linux_schedule_save_interrupt_value(task, error); 1915 error = -ERESTARTSYS; 1916 } 1917 goto done; 1918 } 1919 } 1920 if (c->done != UINT_MAX) 1921 c->done--; 1922 sleepq_release(c); 1923 1924 /* return how many jiffies are left */ 1925 error = linux_timer_jiffies_until(end); 1926 done: 1927 return (error); 1928 } 1929 1930 int 1931 linux_try_wait_for_completion(struct completion *c) 1932 { 1933 int isdone; 1934 1935 sleepq_lock(c); 1936 isdone = (c->done != 0); 1937 if (c->done != 0 && c->done != UINT_MAX) 1938 c->done--; 1939 sleepq_release(c); 1940 return (isdone); 1941 } 1942 1943 int 1944 linux_completion_done(struct completion *c) 1945 { 1946 int isdone; 1947 1948 sleepq_lock(c); 1949 isdone = (c->done != 0); 1950 sleepq_release(c); 1951 return (isdone); 1952 } 1953 1954 static void 1955 linux_cdev_release(struct kobject *kobj) 1956 { 1957 struct linux_cdev *cdev; 1958 struct kobject *parent; 1959 1960 cdev = container_of(kobj, struct linux_cdev, kobj); 1961 parent = kobj->parent; 1962 linux_destroy_dev(cdev); 1963 kfree(cdev); 1964 kobject_put(parent); 1965 } 1966 1967 static void 1968 linux_cdev_static_release(struct kobject *kobj) 1969 { 1970 struct linux_cdev *cdev; 1971 struct kobject *parent; 1972 1973 cdev = container_of(kobj, struct linux_cdev, kobj); 1974 parent = kobj->parent; 1975 linux_destroy_dev(cdev); 1976 kobject_put(parent); 1977 } 1978 1979 void 1980 linux_destroy_dev(struct linux_cdev *cdev) 1981 { 1982 1983 if (cdev->cdev == NULL) 1984 return; 1985 1986 atomic_long_dec(&cdev->refs); 1987 1988 /* wait for all open files to be closed */ 1989 while (atomic_long_read(&cdev->refs) != -1L) 1990 pause("ldevdrn", hz); 1991 1992 destroy_dev(cdev->cdev); 1993 cdev->cdev = NULL; 1994 } 1995 1996 const struct kobj_type linux_cdev_ktype = { 1997 .release = linux_cdev_release, 1998 }; 1999 2000 const struct kobj_type linux_cdev_static_ktype = { 2001 .release = linux_cdev_static_release, 2002 }; 2003 2004 static void 2005 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2006 { 2007 struct notifier_block *nb; 2008 2009 nb = arg; 2010 if (linkstate == LINK_STATE_UP) 2011 nb->notifier_call(nb, NETDEV_UP, ifp); 2012 else 2013 nb->notifier_call(nb, NETDEV_DOWN, ifp); 2014 } 2015 2016 static void 2017 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2018 { 2019 struct notifier_block *nb; 2020 2021 nb = arg; 2022 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 2023 } 2024 2025 static void 2026 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2027 { 2028 struct notifier_block *nb; 2029 2030 nb = arg; 2031 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 2032 } 2033 2034 static void 2035 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2036 { 2037 struct notifier_block *nb; 2038 2039 nb = arg; 2040 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 2041 } 2042 2043 static void 2044 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2045 { 2046 struct notifier_block *nb; 2047 2048 nb = arg; 2049 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 2050 } 2051 2052 int 2053 register_netdevice_notifier(struct notifier_block *nb) 2054 { 2055 2056 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2057 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2058 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2059 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2060 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2061 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2062 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2063 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2064 2065 return (0); 2066 } 2067 2068 int 2069 register_inetaddr_notifier(struct notifier_block *nb) 2070 { 2071 2072 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2073 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2074 return (0); 2075 } 2076 2077 int 2078 unregister_netdevice_notifier(struct notifier_block *nb) 2079 { 2080 2081 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2082 nb->tags[NETDEV_UP]); 2083 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2084 nb->tags[NETDEV_REGISTER]); 2085 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2086 nb->tags[NETDEV_UNREGISTER]); 2087 EVENTHANDLER_DEREGISTER(iflladdr_event, 2088 nb->tags[NETDEV_CHANGEADDR]); 2089 2090 return (0); 2091 } 2092 2093 int 2094 unregister_inetaddr_notifier(struct notifier_block *nb) 2095 { 2096 2097 EVENTHANDLER_DEREGISTER(ifaddr_event, 2098 nb->tags[NETDEV_CHANGEIFADDR]); 2099 2100 return (0); 2101 } 2102 2103 struct list_sort_thunk { 2104 int (*cmp)(void *, struct list_head *, struct list_head *); 2105 void *priv; 2106 }; 2107 2108 static inline int 2109 linux_le_cmp(void *priv, const void *d1, const void *d2) 2110 { 2111 struct list_head *le1, *le2; 2112 struct list_sort_thunk *thunk; 2113 2114 thunk = priv; 2115 le1 = *(__DECONST(struct list_head **, d1)); 2116 le2 = *(__DECONST(struct list_head **, d2)); 2117 return ((thunk->cmp)(thunk->priv, le1, le2)); 2118 } 2119 2120 void 2121 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2122 struct list_head *a, struct list_head *b)) 2123 { 2124 struct list_sort_thunk thunk; 2125 struct list_head **ar, *le; 2126 size_t count, i; 2127 2128 count = 0; 2129 list_for_each(le, head) 2130 count++; 2131 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2132 i = 0; 2133 list_for_each(le, head) 2134 ar[i++] = le; 2135 thunk.cmp = cmp; 2136 thunk.priv = priv; 2137 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2138 INIT_LIST_HEAD(head); 2139 for (i = 0; i < count; i++) 2140 list_add_tail(ar[i], head); 2141 free(ar, M_KMALLOC); 2142 } 2143 2144 void 2145 linux_irq_handler(void *ent) 2146 { 2147 struct irq_ent *irqe; 2148 2149 linux_set_current(curthread); 2150 2151 irqe = ent; 2152 irqe->handler(irqe->irq, irqe->arg); 2153 } 2154 2155 #if defined(__i386__) || defined(__amd64__) 2156 int 2157 linux_wbinvd_on_all_cpus(void) 2158 { 2159 2160 pmap_invalidate_cache(); 2161 return (0); 2162 } 2163 #endif 2164 2165 int 2166 linux_on_each_cpu(void callback(void *), void *data) 2167 { 2168 2169 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2170 smp_no_rendezvous_barrier, data); 2171 return (0); 2172 } 2173 2174 int 2175 linux_in_atomic(void) 2176 { 2177 2178 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2179 } 2180 2181 struct linux_cdev * 2182 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2183 { 2184 dev_t dev = MKDEV(major, minor); 2185 struct cdev *cdev; 2186 2187 dev_lock(); 2188 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2189 struct linux_cdev *ldev = cdev->si_drv1; 2190 if (ldev->dev == dev && 2191 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2192 break; 2193 } 2194 } 2195 dev_unlock(); 2196 2197 return (cdev != NULL ? cdev->si_drv1 : NULL); 2198 } 2199 2200 int 2201 __register_chrdev(unsigned int major, unsigned int baseminor, 2202 unsigned int count, const char *name, 2203 const struct file_operations *fops) 2204 { 2205 struct linux_cdev *cdev; 2206 int ret = 0; 2207 int i; 2208 2209 for (i = baseminor; i < baseminor + count; i++) { 2210 cdev = cdev_alloc(); 2211 cdev_init(cdev, fops); 2212 kobject_set_name(&cdev->kobj, name); 2213 2214 ret = cdev_add(cdev, makedev(major, i), 1); 2215 if (ret != 0) 2216 break; 2217 } 2218 return (ret); 2219 } 2220 2221 int 2222 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2223 unsigned int count, const char *name, 2224 const struct file_operations *fops, uid_t uid, 2225 gid_t gid, int mode) 2226 { 2227 struct linux_cdev *cdev; 2228 int ret = 0; 2229 int i; 2230 2231 for (i = baseminor; i < baseminor + count; i++) { 2232 cdev = cdev_alloc(); 2233 cdev_init(cdev, fops); 2234 kobject_set_name(&cdev->kobj, name); 2235 2236 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2237 if (ret != 0) 2238 break; 2239 } 2240 return (ret); 2241 } 2242 2243 void 2244 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2245 unsigned int count, const char *name) 2246 { 2247 struct linux_cdev *cdevp; 2248 int i; 2249 2250 for (i = baseminor; i < baseminor + count; i++) { 2251 cdevp = linux_find_cdev(name, major, i); 2252 if (cdevp != NULL) 2253 cdev_del(cdevp); 2254 } 2255 } 2256 2257 void 2258 linux_dump_stack(void) 2259 { 2260 #ifdef STACK 2261 struct stack st; 2262 2263 stack_zero(&st); 2264 stack_save(&st); 2265 stack_print(&st); 2266 #endif 2267 } 2268 2269 #if defined(__i386__) || defined(__amd64__) 2270 bool linux_cpu_has_clflush; 2271 #endif 2272 2273 static void 2274 linux_compat_init(void *arg) 2275 { 2276 struct sysctl_oid *rootoid; 2277 int i; 2278 2279 #if defined(__i386__) || defined(__amd64__) 2280 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2281 #endif 2282 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2283 2284 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2285 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2286 kobject_init(&linux_class_root, &linux_class_ktype); 2287 kobject_set_name(&linux_class_root, "class"); 2288 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2289 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2290 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2291 kobject_set_name(&linux_root_device.kobj, "device"); 2292 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2293 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 2294 "device"); 2295 linux_root_device.bsddev = root_bus; 2296 linux_class_misc.name = "misc"; 2297 class_register(&linux_class_misc); 2298 INIT_LIST_HEAD(&pci_drivers); 2299 INIT_LIST_HEAD(&pci_devices); 2300 spin_lock_init(&pci_lock); 2301 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2302 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2303 LIST_INIT(&vmmaphead[i]); 2304 } 2305 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2306 2307 static void 2308 linux_compat_uninit(void *arg) 2309 { 2310 linux_kobject_kfree_name(&linux_class_root); 2311 linux_kobject_kfree_name(&linux_root_device.kobj); 2312 linux_kobject_kfree_name(&linux_class_misc.kobj); 2313 2314 mtx_destroy(&vmmaplock); 2315 spin_lock_destroy(&pci_lock); 2316 rw_destroy(&linux_vma_lock); 2317 } 2318 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2319 2320 /* 2321 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2322 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2323 * used. Assert these types have the same size, else some parts of the 2324 * LinuxKPI may not work like expected: 2325 */ 2326 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2327