1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/proc.h> 39 #include <sys/sglist.h> 40 #include <sys/sleepqueue.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/bus.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/filio.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pager.h> 54 55 #include <machine/stdarg.h> 56 57 #if defined(__i386__) || defined(__amd64__) 58 #include <machine/md_var.h> 59 #endif 60 61 #include <linux/kobject.h> 62 #include <linux/device.h> 63 #include <linux/slab.h> 64 #include <linux/module.h> 65 #include <linux/moduleparam.h> 66 #include <linux/cdev.h> 67 #include <linux/file.h> 68 #include <linux/sysfs.h> 69 #include <linux/mm.h> 70 #include <linux/io.h> 71 #include <linux/vmalloc.h> 72 #include <linux/netdevice.h> 73 #include <linux/timer.h> 74 #include <linux/interrupt.h> 75 #include <linux/uaccess.h> 76 #include <linux/list.h> 77 #include <linux/kthread.h> 78 #include <linux/kernel.h> 79 #include <linux/compat.h> 80 #include <linux/poll.h> 81 #include <linux/smp.h> 82 83 #if defined(__i386__) || defined(__amd64__) 84 #include <asm/smp.h> 85 #endif 86 87 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 88 89 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 90 91 #include <linux/rbtree.h> 92 /* Undo Linux compat changes. */ 93 #undef RB_ROOT 94 #undef file 95 #undef cdev 96 #define RB_ROOT(head) (head)->rbh_root 97 98 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 99 100 struct kobject linux_class_root; 101 struct device linux_root_device; 102 struct class linux_class_misc; 103 struct list_head pci_drivers; 104 struct list_head pci_devices; 105 spinlock_t pci_lock; 106 107 unsigned long linux_timer_hz_mask; 108 109 int 110 panic_cmp(struct rb_node *one, struct rb_node *two) 111 { 112 panic("no cmp"); 113 } 114 115 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 116 117 int 118 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 119 { 120 va_list tmp_va; 121 int len; 122 char *old; 123 char *name; 124 char dummy; 125 126 old = kobj->name; 127 128 if (old && fmt == NULL) 129 return (0); 130 131 /* compute length of string */ 132 va_copy(tmp_va, args); 133 len = vsnprintf(&dummy, 0, fmt, tmp_va); 134 va_end(tmp_va); 135 136 /* account for zero termination */ 137 len++; 138 139 /* check for error */ 140 if (len < 1) 141 return (-EINVAL); 142 143 /* allocate memory for string */ 144 name = kzalloc(len, GFP_KERNEL); 145 if (name == NULL) 146 return (-ENOMEM); 147 vsnprintf(name, len, fmt, args); 148 kobj->name = name; 149 150 /* free old string */ 151 kfree(old); 152 153 /* filter new string */ 154 for (; *name != '\0'; name++) 155 if (*name == '/') 156 *name = '!'; 157 return (0); 158 } 159 160 int 161 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 162 { 163 va_list args; 164 int error; 165 166 va_start(args, fmt); 167 error = kobject_set_name_vargs(kobj, fmt, args); 168 va_end(args); 169 170 return (error); 171 } 172 173 static int 174 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 175 { 176 const struct kobj_type *t; 177 int error; 178 179 kobj->parent = parent; 180 error = sysfs_create_dir(kobj); 181 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 182 struct attribute **attr; 183 t = kobj->ktype; 184 185 for (attr = t->default_attrs; *attr != NULL; attr++) { 186 error = sysfs_create_file(kobj, *attr); 187 if (error) 188 break; 189 } 190 if (error) 191 sysfs_remove_dir(kobj); 192 193 } 194 return (error); 195 } 196 197 int 198 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 199 { 200 va_list args; 201 int error; 202 203 va_start(args, fmt); 204 error = kobject_set_name_vargs(kobj, fmt, args); 205 va_end(args); 206 if (error) 207 return (error); 208 209 return kobject_add_complete(kobj, parent); 210 } 211 212 void 213 linux_kobject_release(struct kref *kref) 214 { 215 struct kobject *kobj; 216 char *name; 217 218 kobj = container_of(kref, struct kobject, kref); 219 sysfs_remove_dir(kobj); 220 name = kobj->name; 221 if (kobj->ktype && kobj->ktype->release) 222 kobj->ktype->release(kobj); 223 kfree(name); 224 } 225 226 static void 227 linux_kobject_kfree(struct kobject *kobj) 228 { 229 kfree(kobj); 230 } 231 232 static void 233 linux_kobject_kfree_name(struct kobject *kobj) 234 { 235 if (kobj) { 236 kfree(kobj->name); 237 } 238 } 239 240 const struct kobj_type linux_kfree_type = { 241 .release = linux_kobject_kfree 242 }; 243 244 static void 245 linux_device_release(struct device *dev) 246 { 247 pr_debug("linux_device_release: %s\n", dev_name(dev)); 248 kfree(dev); 249 } 250 251 static ssize_t 252 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 253 { 254 struct class_attribute *dattr; 255 ssize_t error; 256 257 dattr = container_of(attr, struct class_attribute, attr); 258 error = -EIO; 259 if (dattr->show) 260 error = dattr->show(container_of(kobj, struct class, kobj), 261 dattr, buf); 262 return (error); 263 } 264 265 static ssize_t 266 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 267 size_t count) 268 { 269 struct class_attribute *dattr; 270 ssize_t error; 271 272 dattr = container_of(attr, struct class_attribute, attr); 273 error = -EIO; 274 if (dattr->store) 275 error = dattr->store(container_of(kobj, struct class, kobj), 276 dattr, buf, count); 277 return (error); 278 } 279 280 static void 281 linux_class_release(struct kobject *kobj) 282 { 283 struct class *class; 284 285 class = container_of(kobj, struct class, kobj); 286 if (class->class_release) 287 class->class_release(class); 288 } 289 290 static const struct sysfs_ops linux_class_sysfs = { 291 .show = linux_class_show, 292 .store = linux_class_store, 293 }; 294 295 const struct kobj_type linux_class_ktype = { 296 .release = linux_class_release, 297 .sysfs_ops = &linux_class_sysfs 298 }; 299 300 static void 301 linux_dev_release(struct kobject *kobj) 302 { 303 struct device *dev; 304 305 dev = container_of(kobj, struct device, kobj); 306 /* This is the precedence defined by linux. */ 307 if (dev->release) 308 dev->release(dev); 309 else if (dev->class && dev->class->dev_release) 310 dev->class->dev_release(dev); 311 } 312 313 static ssize_t 314 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 315 { 316 struct device_attribute *dattr; 317 ssize_t error; 318 319 dattr = container_of(attr, struct device_attribute, attr); 320 error = -EIO; 321 if (dattr->show) 322 error = dattr->show(container_of(kobj, struct device, kobj), 323 dattr, buf); 324 return (error); 325 } 326 327 static ssize_t 328 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 329 size_t count) 330 { 331 struct device_attribute *dattr; 332 ssize_t error; 333 334 dattr = container_of(attr, struct device_attribute, attr); 335 error = -EIO; 336 if (dattr->store) 337 error = dattr->store(container_of(kobj, struct device, kobj), 338 dattr, buf, count); 339 return (error); 340 } 341 342 static const struct sysfs_ops linux_dev_sysfs = { 343 .show = linux_dev_show, 344 .store = linux_dev_store, 345 }; 346 347 const struct kobj_type linux_dev_ktype = { 348 .release = linux_dev_release, 349 .sysfs_ops = &linux_dev_sysfs 350 }; 351 352 struct device * 353 device_create(struct class *class, struct device *parent, dev_t devt, 354 void *drvdata, const char *fmt, ...) 355 { 356 struct device *dev; 357 va_list args; 358 359 dev = kzalloc(sizeof(*dev), M_WAITOK); 360 dev->parent = parent; 361 dev->class = class; 362 dev->devt = devt; 363 dev->driver_data = drvdata; 364 dev->release = linux_device_release; 365 va_start(args, fmt); 366 kobject_set_name_vargs(&dev->kobj, fmt, args); 367 va_end(args); 368 device_register(dev); 369 370 return (dev); 371 } 372 373 int 374 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 375 struct kobject *parent, const char *fmt, ...) 376 { 377 va_list args; 378 int error; 379 380 kobject_init(kobj, ktype); 381 kobj->ktype = ktype; 382 kobj->parent = parent; 383 kobj->name = NULL; 384 385 va_start(args, fmt); 386 error = kobject_set_name_vargs(kobj, fmt, args); 387 va_end(args); 388 if (error) 389 return (error); 390 return kobject_add_complete(kobj, parent); 391 } 392 393 static void 394 linux_file_dtor(void *cdp) 395 { 396 struct linux_file *filp; 397 398 linux_set_current(curthread); 399 filp = cdp; 400 filp->f_op->release(filp->f_vnode, filp); 401 vdrop(filp->f_vnode); 402 kfree(filp); 403 } 404 405 static void 406 linux_kq_lock(void *arg) 407 { 408 spinlock_t *s = arg; 409 410 spin_lock(s); 411 } 412 static void 413 linux_kq_unlock(void *arg) 414 { 415 spinlock_t *s = arg; 416 417 spin_unlock(s); 418 } 419 420 static void 421 linux_kq_lock_owned(void *arg) 422 { 423 #ifdef INVARIANTS 424 spinlock_t *s = arg; 425 426 mtx_assert(&s->m, MA_OWNED); 427 #endif 428 } 429 430 static void 431 linux_kq_lock_unowned(void *arg) 432 { 433 #ifdef INVARIANTS 434 spinlock_t *s = arg; 435 436 mtx_assert(&s->m, MA_NOTOWNED); 437 #endif 438 } 439 440 static void 441 linux_dev_kqfilter_poll(struct linux_file *, int); 442 443 struct linux_file * 444 linux_file_alloc(void) 445 { 446 struct linux_file *filp; 447 448 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 449 450 /* set initial refcount */ 451 filp->f_count = 1; 452 453 /* setup fields needed by kqueue support */ 454 spin_lock_init(&filp->f_kqlock); 455 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 456 linux_kq_lock, linux_kq_unlock, 457 linux_kq_lock_owned, linux_kq_lock_unowned); 458 459 return (filp); 460 } 461 462 void 463 linux_file_free(struct linux_file *filp) 464 { 465 if (filp->_file == NULL) { 466 if (filp->f_shmem != NULL) 467 vm_object_deallocate(filp->f_shmem); 468 kfree(filp); 469 } else { 470 /* 471 * The close method of the character device or file 472 * will free the linux_file structure: 473 */ 474 _fdrop(filp->_file, curthread); 475 } 476 } 477 478 static int 479 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 480 vm_page_t *mres) 481 { 482 struct vm_area_struct *vmap; 483 484 vmap = linux_cdev_handle_find(vm_obj->handle); 485 486 MPASS(vmap != NULL); 487 MPASS(vmap->vm_private_data == vm_obj->handle); 488 489 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 490 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 491 vm_page_t page; 492 493 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 494 /* 495 * If the passed in result page is a fake 496 * page, update it with the new physical 497 * address. 498 */ 499 page = *mres; 500 vm_page_updatefake(page, paddr, vm_obj->memattr); 501 } else { 502 /* 503 * Replace the passed in "mres" page with our 504 * own fake page and free up the all of the 505 * original pages. 506 */ 507 VM_OBJECT_WUNLOCK(vm_obj); 508 page = vm_page_getfake(paddr, vm_obj->memattr); 509 VM_OBJECT_WLOCK(vm_obj); 510 511 vm_page_replace_checked(page, vm_obj, 512 (*mres)->pindex, *mres); 513 514 vm_page_lock(*mres); 515 vm_page_free(*mres); 516 vm_page_unlock(*mres); 517 *mres = page; 518 } 519 page->valid = VM_PAGE_BITS_ALL; 520 return (VM_PAGER_OK); 521 } 522 return (VM_PAGER_FAIL); 523 } 524 525 static int 526 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 527 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 528 { 529 struct vm_area_struct *vmap; 530 int err; 531 532 linux_set_current(curthread); 533 534 /* get VM area structure */ 535 vmap = linux_cdev_handle_find(vm_obj->handle); 536 MPASS(vmap != NULL); 537 MPASS(vmap->vm_private_data == vm_obj->handle); 538 539 VM_OBJECT_WUNLOCK(vm_obj); 540 541 down_write(&vmap->vm_mm->mmap_sem); 542 if (unlikely(vmap->vm_ops == NULL)) { 543 err = VM_FAULT_SIGBUS; 544 } else { 545 struct vm_fault vmf; 546 547 /* fill out VM fault structure */ 548 vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); 549 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 550 vmf.pgoff = 0; 551 vmf.page = NULL; 552 553 vmap->vm_pfn_count = 0; 554 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 555 vmap->vm_obj = vm_obj; 556 557 err = vmap->vm_ops->fault(vmap, &vmf); 558 559 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 560 kern_yield(PRI_USER); 561 err = vmap->vm_ops->fault(vmap, &vmf); 562 } 563 } 564 565 /* translate return code */ 566 switch (err) { 567 case VM_FAULT_OOM: 568 err = VM_PAGER_AGAIN; 569 break; 570 case VM_FAULT_SIGBUS: 571 err = VM_PAGER_BAD; 572 break; 573 case VM_FAULT_NOPAGE: 574 /* 575 * By contract the fault handler will return having 576 * busied all the pages itself. If pidx is already 577 * found in the object, it will simply xbusy the first 578 * page and return with vm_pfn_count set to 1. 579 */ 580 *first = vmap->vm_pfn_first; 581 *last = *first + vmap->vm_pfn_count - 1; 582 err = VM_PAGER_OK; 583 break; 584 default: 585 err = VM_PAGER_ERROR; 586 break; 587 } 588 up_write(&vmap->vm_mm->mmap_sem); 589 VM_OBJECT_WLOCK(vm_obj); 590 return (err); 591 } 592 593 static struct rwlock linux_vma_lock; 594 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 595 TAILQ_HEAD_INITIALIZER(linux_vma_head); 596 597 static void 598 linux_cdev_handle_free(struct vm_area_struct *vmap) 599 { 600 /* Drop reference on vm_file */ 601 if (vmap->vm_file != NULL) 602 fput(vmap->vm_file); 603 604 /* Drop reference on mm_struct */ 605 mmput(vmap->vm_mm); 606 607 kfree(vmap); 608 } 609 610 static struct vm_area_struct * 611 linux_cdev_handle_insert(void *handle, struct vm_area_struct *vmap) 612 { 613 struct vm_area_struct *ptr; 614 615 rw_wlock(&linux_vma_lock); 616 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 617 if (ptr->vm_private_data == handle) { 618 rw_wunlock(&linux_vma_lock); 619 linux_cdev_handle_free(vmap); 620 return (NULL); 621 } 622 } 623 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 624 rw_wunlock(&linux_vma_lock); 625 return (vmap); 626 } 627 628 static void 629 linux_cdev_handle_remove(struct vm_area_struct *vmap) 630 { 631 rw_wlock(&linux_vma_lock); 632 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 633 rw_wunlock(&linux_vma_lock); 634 } 635 636 static struct vm_area_struct * 637 linux_cdev_handle_find(void *handle) 638 { 639 struct vm_area_struct *vmap; 640 641 rw_rlock(&linux_vma_lock); 642 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 643 if (vmap->vm_private_data == handle) 644 break; 645 } 646 rw_runlock(&linux_vma_lock); 647 return (vmap); 648 } 649 650 static int 651 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 652 vm_ooffset_t foff, struct ucred *cred, u_short *color) 653 { 654 655 MPASS(linux_cdev_handle_find(handle) != NULL); 656 *color = 0; 657 return (0); 658 } 659 660 static void 661 linux_cdev_pager_dtor(void *handle) 662 { 663 const struct vm_operations_struct *vm_ops; 664 struct vm_area_struct *vmap; 665 666 vmap = linux_cdev_handle_find(handle); 667 MPASS(vmap != NULL); 668 669 /* 670 * Remove handle before calling close operation to prevent 671 * other threads from reusing the handle pointer. 672 */ 673 linux_cdev_handle_remove(vmap); 674 675 down_write(&vmap->vm_mm->mmap_sem); 676 vm_ops = vmap->vm_ops; 677 if (likely(vm_ops != NULL)) 678 vm_ops->close(vmap); 679 up_write(&vmap->vm_mm->mmap_sem); 680 681 linux_cdev_handle_free(vmap); 682 } 683 684 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 685 { 686 /* OBJT_MGTDEVICE */ 687 .cdev_pg_populate = linux_cdev_pager_populate, 688 .cdev_pg_ctor = linux_cdev_pager_ctor, 689 .cdev_pg_dtor = linux_cdev_pager_dtor 690 }, 691 { 692 /* OBJT_DEVICE */ 693 .cdev_pg_fault = linux_cdev_pager_fault, 694 .cdev_pg_ctor = linux_cdev_pager_ctor, 695 .cdev_pg_dtor = linux_cdev_pager_dtor 696 }, 697 }; 698 699 static int 700 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 701 { 702 struct linux_cdev *ldev; 703 struct linux_file *filp; 704 struct file *file; 705 int error; 706 707 file = td->td_fpop; 708 ldev = dev->si_drv1; 709 if (ldev == NULL) 710 return (ENODEV); 711 712 filp = linux_file_alloc(); 713 filp->f_dentry = &filp->f_dentry_store; 714 filp->f_op = ldev->ops; 715 filp->f_flags = file->f_flag; 716 vhold(file->f_vnode); 717 filp->f_vnode = file->f_vnode; 718 filp->_file = file; 719 720 linux_set_current(td); 721 722 if (filp->f_op->open) { 723 error = -filp->f_op->open(file->f_vnode, filp); 724 if (error) { 725 vdrop(filp->f_vnode); 726 kfree(filp); 727 goto done; 728 } 729 } 730 error = devfs_set_cdevpriv(filp, linux_file_dtor); 731 if (error) { 732 filp->f_op->release(file->f_vnode, filp); 733 vdrop(filp->f_vnode); 734 kfree(filp); 735 } 736 done: 737 return (error); 738 } 739 740 static int 741 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 742 { 743 struct linux_file *filp; 744 struct file *file; 745 int error; 746 747 file = td->td_fpop; 748 if (dev->si_drv1 == NULL) 749 return (0); 750 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 751 return (error); 752 filp->f_flags = file->f_flag; 753 devfs_clear_cdevpriv(); 754 755 return (0); 756 } 757 758 #define LINUX_IOCTL_MIN_PTR 0x10000UL 759 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 760 761 static inline int 762 linux_remap_address(void **uaddr, size_t len) 763 { 764 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 765 766 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 767 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 768 struct task_struct *pts = current; 769 if (pts == NULL) { 770 *uaddr = NULL; 771 return (1); 772 } 773 774 /* compute data offset */ 775 uaddr_val -= LINUX_IOCTL_MIN_PTR; 776 777 /* check that length is within bounds */ 778 if ((len > IOCPARM_MAX) || 779 (uaddr_val + len) > pts->bsd_ioctl_len) { 780 *uaddr = NULL; 781 return (1); 782 } 783 784 /* re-add kernel buffer address */ 785 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 786 787 /* update address location */ 788 *uaddr = (void *)uaddr_val; 789 return (1); 790 } 791 return (0); 792 } 793 794 int 795 linux_copyin(const void *uaddr, void *kaddr, size_t len) 796 { 797 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 798 if (uaddr == NULL) 799 return (-EFAULT); 800 memcpy(kaddr, uaddr, len); 801 return (0); 802 } 803 return (-copyin(uaddr, kaddr, len)); 804 } 805 806 int 807 linux_copyout(const void *kaddr, void *uaddr, size_t len) 808 { 809 if (linux_remap_address(&uaddr, len)) { 810 if (uaddr == NULL) 811 return (-EFAULT); 812 memcpy(uaddr, kaddr, len); 813 return (0); 814 } 815 return (-copyout(kaddr, uaddr, len)); 816 } 817 818 size_t 819 linux_clear_user(void *_uaddr, size_t _len) 820 { 821 uint8_t *uaddr = _uaddr; 822 size_t len = _len; 823 824 /* make sure uaddr is aligned before going into the fast loop */ 825 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 826 if (subyte(uaddr, 0)) 827 return (_len); 828 uaddr++; 829 len--; 830 } 831 832 /* zero 8 bytes at a time */ 833 while (len > 7) { 834 #ifdef __LP64__ 835 if (suword64(uaddr, 0)) 836 return (_len); 837 #else 838 if (suword32(uaddr, 0)) 839 return (_len); 840 if (suword32(uaddr + 4, 0)) 841 return (_len); 842 #endif 843 uaddr += 8; 844 len -= 8; 845 } 846 847 /* zero fill end, if any */ 848 while (len > 0) { 849 if (subyte(uaddr, 0)) 850 return (_len); 851 uaddr++; 852 len--; 853 } 854 return (0); 855 } 856 857 int 858 linux_access_ok(int rw, const void *uaddr, size_t len) 859 { 860 uintptr_t saddr; 861 uintptr_t eaddr; 862 863 /* get start and end address */ 864 saddr = (uintptr_t)uaddr; 865 eaddr = (uintptr_t)uaddr + len; 866 867 /* verify addresses are valid for userspace */ 868 return ((saddr == eaddr) || 869 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 870 } 871 872 static int 873 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 874 struct thread *td) 875 { 876 struct linux_file *filp; 877 struct file *file; 878 unsigned size; 879 int error; 880 881 file = td->td_fpop; 882 if (dev->si_drv1 == NULL) 883 return (ENXIO); 884 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 885 return (error); 886 filp->f_flags = file->f_flag; 887 888 /* the LinuxKPI supports blocking and non-blocking I/O */ 889 if (cmd == FIONBIO || cmd == FIOASYNC) 890 return (0); 891 892 linux_set_current(td); 893 size = IOCPARM_LEN(cmd); 894 /* refer to logic in sys_ioctl() */ 895 if (size > 0) { 896 /* 897 * Setup hint for linux_copyin() and linux_copyout(). 898 * 899 * Background: Linux code expects a user-space address 900 * while FreeBSD supplies a kernel-space address. 901 */ 902 current->bsd_ioctl_data = data; 903 current->bsd_ioctl_len = size; 904 data = (void *)LINUX_IOCTL_MIN_PTR; 905 } else { 906 /* fetch user-space pointer */ 907 data = *(void **)data; 908 } 909 if (filp->f_op->unlocked_ioctl) 910 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); 911 else 912 error = ENOTTY; 913 if (size > 0) { 914 current->bsd_ioctl_data = NULL; 915 current->bsd_ioctl_len = 0; 916 } 917 918 if (error == EWOULDBLOCK) { 919 /* update kqfilter status, if any */ 920 linux_dev_kqfilter_poll(filp, 921 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 922 } else if (error == ERESTARTSYS) 923 error = ERESTART; 924 return (error); 925 } 926 927 static int 928 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag) 929 { 930 struct linux_file *filp; 931 struct thread *td; 932 struct file *file; 933 ssize_t bytes; 934 int error; 935 936 td = curthread; 937 file = td->td_fpop; 938 if (dev->si_drv1 == NULL) 939 return (ENXIO); 940 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 941 return (error); 942 filp->f_flags = file->f_flag; 943 /* XXX no support for I/O vectors currently */ 944 if (uio->uio_iovcnt != 1) 945 return (EOPNOTSUPP); 946 linux_set_current(td); 947 if (filp->f_op->read) { 948 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 949 uio->uio_iov->iov_len, &uio->uio_offset); 950 if (bytes >= 0) { 951 uio->uio_iov->iov_base = 952 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 953 uio->uio_iov->iov_len -= bytes; 954 uio->uio_resid -= bytes; 955 } else { 956 error = -bytes; 957 if (error == ERESTARTSYS) 958 error = ERESTART; 959 } 960 } else 961 error = ENXIO; 962 963 /* update kqfilter status, if any */ 964 linux_dev_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 965 966 return (error); 967 } 968 969 static int 970 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag) 971 { 972 struct linux_file *filp; 973 struct thread *td; 974 struct file *file; 975 ssize_t bytes; 976 int error; 977 978 td = curthread; 979 file = td->td_fpop; 980 if (dev->si_drv1 == NULL) 981 return (ENXIO); 982 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 983 return (error); 984 filp->f_flags = file->f_flag; 985 /* XXX no support for I/O vectors currently */ 986 if (uio->uio_iovcnt != 1) 987 return (EOPNOTSUPP); 988 linux_set_current(td); 989 if (filp->f_op->write) { 990 bytes = filp->f_op->write(filp, uio->uio_iov->iov_base, 991 uio->uio_iov->iov_len, &uio->uio_offset); 992 if (bytes >= 0) { 993 uio->uio_iov->iov_base = 994 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 995 uio->uio_iov->iov_len -= bytes; 996 uio->uio_resid -= bytes; 997 } else { 998 error = -bytes; 999 if (error == ERESTARTSYS) 1000 error = ERESTART; 1001 } 1002 } else 1003 error = ENXIO; 1004 1005 /* update kqfilter status, if any */ 1006 linux_dev_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1007 1008 return (error); 1009 } 1010 1011 static int 1012 linux_dev_poll(struct cdev *dev, int events, struct thread *td) 1013 { 1014 struct linux_file *filp; 1015 struct file *file; 1016 int revents; 1017 1018 if (dev->si_drv1 == NULL) 1019 goto error; 1020 if (devfs_get_cdevpriv((void **)&filp) != 0) 1021 goto error; 1022 1023 file = td->td_fpop; 1024 filp->f_flags = file->f_flag; 1025 linux_set_current(td); 1026 if (filp->f_op->poll != NULL) { 1027 selrecord(td, &filp->f_selinfo); 1028 revents = filp->f_op->poll(filp, NULL) & events; 1029 } else 1030 revents = 0; 1031 1032 return (revents); 1033 error: 1034 return (events & (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1035 } 1036 1037 void 1038 linux_poll_wakeup(struct linux_file *filp) 1039 { 1040 /* this function should be NULL-safe */ 1041 if (filp == NULL) 1042 return; 1043 1044 selwakeup(&filp->f_selinfo); 1045 1046 spin_lock(&filp->f_kqlock); 1047 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1048 LINUX_KQ_FLAG_NEED_WRITE; 1049 1050 /* make sure the "knote" gets woken up */ 1051 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1052 spin_unlock(&filp->f_kqlock); 1053 } 1054 1055 static void 1056 linux_dev_kqfilter_detach(struct knote *kn) 1057 { 1058 struct linux_file *filp = kn->kn_hook; 1059 1060 spin_lock(&filp->f_kqlock); 1061 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1062 spin_unlock(&filp->f_kqlock); 1063 } 1064 1065 static int 1066 linux_dev_kqfilter_read_event(struct knote *kn, long hint) 1067 { 1068 struct linux_file *filp = kn->kn_hook; 1069 1070 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1071 1072 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1073 } 1074 1075 static int 1076 linux_dev_kqfilter_write_event(struct knote *kn, long hint) 1077 { 1078 struct linux_file *filp = kn->kn_hook; 1079 1080 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1081 1082 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1083 } 1084 1085 static struct filterops linux_dev_kqfiltops_read = { 1086 .f_isfd = 1, 1087 .f_detach = linux_dev_kqfilter_detach, 1088 .f_event = linux_dev_kqfilter_read_event, 1089 }; 1090 1091 static struct filterops linux_dev_kqfiltops_write = { 1092 .f_isfd = 1, 1093 .f_detach = linux_dev_kqfilter_detach, 1094 .f_event = linux_dev_kqfilter_write_event, 1095 }; 1096 1097 static void 1098 linux_dev_kqfilter_poll(struct linux_file *filp, int kqflags) 1099 { 1100 int temp; 1101 1102 if (filp->f_kqflags & kqflags) { 1103 /* get the latest polling state */ 1104 temp = filp->f_op->poll(filp, NULL); 1105 1106 spin_lock(&filp->f_kqlock); 1107 /* clear kqflags */ 1108 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1109 LINUX_KQ_FLAG_NEED_WRITE); 1110 /* update kqflags */ 1111 if (temp & (POLLIN | POLLOUT)) { 1112 if (temp & POLLIN) 1113 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1114 if (temp & POLLOUT) 1115 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1116 1117 /* make sure the "knote" gets woken up */ 1118 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1119 } 1120 spin_unlock(&filp->f_kqlock); 1121 } 1122 } 1123 1124 static int 1125 linux_dev_kqfilter(struct cdev *dev, struct knote *kn) 1126 { 1127 struct linux_file *filp; 1128 struct file *file; 1129 struct thread *td; 1130 int error; 1131 1132 td = curthread; 1133 file = td->td_fpop; 1134 if (dev->si_drv1 == NULL) 1135 return (ENXIO); 1136 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 1137 return (error); 1138 filp->f_flags = file->f_flag; 1139 if (filp->f_op->poll == NULL) 1140 return (EINVAL); 1141 1142 spin_lock(&filp->f_kqlock); 1143 switch (kn->kn_filter) { 1144 case EVFILT_READ: 1145 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1146 kn->kn_fop = &linux_dev_kqfiltops_read; 1147 kn->kn_hook = filp; 1148 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1149 break; 1150 case EVFILT_WRITE: 1151 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1152 kn->kn_fop = &linux_dev_kqfiltops_write; 1153 kn->kn_hook = filp; 1154 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1155 break; 1156 default: 1157 error = EINVAL; 1158 break; 1159 } 1160 spin_unlock(&filp->f_kqlock); 1161 1162 if (error == 0) { 1163 linux_set_current(td); 1164 1165 /* update kqfilter status, if any */ 1166 linux_dev_kqfilter_poll(filp, 1167 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1168 } 1169 return (error); 1170 } 1171 1172 static int 1173 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset, 1174 vm_size_t size, struct vm_object **object, int nprot) 1175 { 1176 struct vm_area_struct *vmap; 1177 struct mm_struct *mm; 1178 struct linux_file *filp; 1179 struct thread *td; 1180 struct file *file; 1181 vm_memattr_t attr; 1182 int error; 1183 1184 td = curthread; 1185 file = td->td_fpop; 1186 if (dev->si_drv1 == NULL) 1187 return (ENODEV); 1188 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 1189 return (error); 1190 filp->f_flags = file->f_flag; 1191 1192 if (filp->f_op->mmap == NULL) 1193 return (ENODEV); 1194 1195 linux_set_current(td); 1196 1197 /* 1198 * The same VM object might be shared by multiple processes 1199 * and the mm_struct is usually freed when a process exits. 1200 * 1201 * The atomic reference below makes sure the mm_struct is 1202 * available as long as the vmap is in the linux_vma_head. 1203 */ 1204 mm = current->mm; 1205 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1206 return (EINVAL); 1207 1208 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1209 vmap->vm_start = 0; 1210 vmap->vm_end = size; 1211 vmap->vm_pgoff = *offset / PAGE_SIZE; 1212 vmap->vm_pfn = 0; 1213 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1214 vmap->vm_ops = NULL; 1215 vmap->vm_file = get_file(filp); 1216 vmap->vm_mm = mm; 1217 1218 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1219 error = EINTR; 1220 } else { 1221 error = -filp->f_op->mmap(filp, vmap); 1222 up_write(&vmap->vm_mm->mmap_sem); 1223 } 1224 1225 if (error != 0) { 1226 linux_cdev_handle_free(vmap); 1227 return (error); 1228 } 1229 1230 attr = pgprot2cachemode(vmap->vm_page_prot); 1231 1232 if (vmap->vm_ops != NULL) { 1233 void *vm_private_data; 1234 1235 if (vmap->vm_ops->open == NULL || 1236 vmap->vm_ops->close == NULL || 1237 vmap->vm_private_data == NULL) { 1238 linux_cdev_handle_free(vmap); 1239 return (EINVAL); 1240 } 1241 1242 vm_private_data = vmap->vm_private_data; 1243 1244 vmap = linux_cdev_handle_insert(vm_private_data, vmap); 1245 1246 if (vmap->vm_ops->fault == NULL) { 1247 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1248 &linux_cdev_pager_ops[1], size, nprot, *offset, 1249 curthread->td_ucred); 1250 } else { 1251 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1252 &linux_cdev_pager_ops[0], size, nprot, *offset, 1253 curthread->td_ucred); 1254 } 1255 1256 if (*object == NULL) { 1257 linux_cdev_handle_remove(vmap); 1258 linux_cdev_handle_free(vmap); 1259 return (EINVAL); 1260 } 1261 } else { 1262 struct sglist *sg; 1263 1264 sg = sglist_alloc(1, M_WAITOK); 1265 sglist_append_phys(sg, 1266 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1267 1268 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1269 nprot, 0, curthread->td_ucred); 1270 1271 linux_cdev_handle_free(vmap); 1272 1273 if (*object == NULL) { 1274 sglist_free(sg); 1275 return (EINVAL); 1276 } 1277 } 1278 1279 if (attr != VM_MEMATTR_DEFAULT) { 1280 VM_OBJECT_WLOCK(*object); 1281 vm_object_set_memattr(*object, attr); 1282 VM_OBJECT_WUNLOCK(*object); 1283 } 1284 *offset = 0; 1285 return (0); 1286 } 1287 1288 struct cdevsw linuxcdevsw = { 1289 .d_version = D_VERSION, 1290 .d_flags = D_TRACKCLOSE, 1291 .d_open = linux_dev_open, 1292 .d_close = linux_dev_close, 1293 .d_read = linux_dev_read, 1294 .d_write = linux_dev_write, 1295 .d_ioctl = linux_dev_ioctl, 1296 .d_mmap_single = linux_dev_mmap_single, 1297 .d_poll = linux_dev_poll, 1298 .d_kqfilter = linux_dev_kqfilter, 1299 .d_name = "lkpidev", 1300 }; 1301 1302 static int 1303 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1304 int flags, struct thread *td) 1305 { 1306 struct linux_file *filp; 1307 ssize_t bytes; 1308 int error; 1309 1310 error = 0; 1311 filp = (struct linux_file *)file->f_data; 1312 filp->f_flags = file->f_flag; 1313 /* XXX no support for I/O vectors currently */ 1314 if (uio->uio_iovcnt != 1) 1315 return (EOPNOTSUPP); 1316 linux_set_current(td); 1317 if (filp->f_op->read) { 1318 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 1319 uio->uio_iov->iov_len, &uio->uio_offset); 1320 if (bytes >= 0) { 1321 uio->uio_iov->iov_base = 1322 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1323 uio->uio_iov->iov_len -= bytes; 1324 uio->uio_resid -= bytes; 1325 } else 1326 error = -bytes; 1327 } else 1328 error = ENXIO; 1329 1330 return (error); 1331 } 1332 1333 static int 1334 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1335 struct thread *td) 1336 { 1337 struct linux_file *filp; 1338 int revents; 1339 1340 filp = (struct linux_file *)file->f_data; 1341 filp->f_flags = file->f_flag; 1342 linux_set_current(td); 1343 if (filp->f_op->poll != NULL) { 1344 selrecord(td, &filp->f_selinfo); 1345 revents = filp->f_op->poll(filp, NULL) & events; 1346 } else 1347 revents = 0; 1348 1349 return (revents); 1350 } 1351 1352 static int 1353 linux_file_close(struct file *file, struct thread *td) 1354 { 1355 struct linux_file *filp; 1356 int error; 1357 1358 filp = (struct linux_file *)file->f_data; 1359 filp->f_flags = file->f_flag; 1360 linux_set_current(td); 1361 error = -filp->f_op->release(NULL, filp); 1362 funsetown(&filp->f_sigio); 1363 kfree(filp); 1364 1365 return (error); 1366 } 1367 1368 static int 1369 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1370 struct thread *td) 1371 { 1372 struct linux_file *filp; 1373 int error; 1374 1375 filp = (struct linux_file *)fp->f_data; 1376 filp->f_flags = fp->f_flag; 1377 error = 0; 1378 1379 linux_set_current(td); 1380 switch (cmd) { 1381 case FIONBIO: 1382 break; 1383 case FIOASYNC: 1384 if (filp->f_op->fasync == NULL) 1385 break; 1386 error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); 1387 break; 1388 case FIOSETOWN: 1389 error = fsetown(*(int *)data, &filp->f_sigio); 1390 if (error == 0) 1391 error = filp->f_op->fasync(0, filp, 1392 fp->f_flag & FASYNC); 1393 break; 1394 case FIOGETOWN: 1395 *(int *)data = fgetown(&filp->f_sigio); 1396 break; 1397 default: 1398 error = ENOTTY; 1399 break; 1400 } 1401 return (error); 1402 } 1403 1404 static int 1405 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1406 struct thread *td) 1407 { 1408 1409 return (EOPNOTSUPP); 1410 } 1411 1412 static int 1413 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1414 struct filedesc *fdp) 1415 { 1416 1417 return (0); 1418 } 1419 1420 unsigned int 1421 linux_iminor(struct inode *inode) 1422 { 1423 struct linux_cdev *ldev; 1424 1425 if (inode == NULL || inode->v_rdev == NULL || 1426 inode->v_rdev->si_devsw != &linuxcdevsw) 1427 return (-1U); 1428 ldev = inode->v_rdev->si_drv1; 1429 if (ldev == NULL) 1430 return (-1U); 1431 1432 return (minor(ldev->dev)); 1433 } 1434 1435 struct fileops linuxfileops = { 1436 .fo_read = linux_file_read, 1437 .fo_write = invfo_rdwr, 1438 .fo_truncate = invfo_truncate, 1439 .fo_kqfilter = invfo_kqfilter, 1440 .fo_stat = linux_file_stat, 1441 .fo_fill_kinfo = linux_file_fill_kinfo, 1442 .fo_poll = linux_file_poll, 1443 .fo_close = linux_file_close, 1444 .fo_ioctl = linux_file_ioctl, 1445 .fo_chmod = invfo_chmod, 1446 .fo_chown = invfo_chown, 1447 .fo_sendfile = invfo_sendfile, 1448 }; 1449 1450 /* 1451 * Hash of vmmap addresses. This is infrequently accessed and does not 1452 * need to be particularly large. This is done because we must store the 1453 * caller's idea of the map size to properly unmap. 1454 */ 1455 struct vmmap { 1456 LIST_ENTRY(vmmap) vm_next; 1457 void *vm_addr; 1458 unsigned long vm_size; 1459 }; 1460 1461 struct vmmaphd { 1462 struct vmmap *lh_first; 1463 }; 1464 #define VMMAP_HASH_SIZE 64 1465 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1466 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1467 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1468 static struct mtx vmmaplock; 1469 1470 static void 1471 vmmap_add(void *addr, unsigned long size) 1472 { 1473 struct vmmap *vmmap; 1474 1475 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1476 mtx_lock(&vmmaplock); 1477 vmmap->vm_size = size; 1478 vmmap->vm_addr = addr; 1479 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1480 mtx_unlock(&vmmaplock); 1481 } 1482 1483 static struct vmmap * 1484 vmmap_remove(void *addr) 1485 { 1486 struct vmmap *vmmap; 1487 1488 mtx_lock(&vmmaplock); 1489 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1490 if (vmmap->vm_addr == addr) 1491 break; 1492 if (vmmap) 1493 LIST_REMOVE(vmmap, vm_next); 1494 mtx_unlock(&vmmaplock); 1495 1496 return (vmmap); 1497 } 1498 1499 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1500 void * 1501 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1502 { 1503 void *addr; 1504 1505 addr = pmap_mapdev_attr(phys_addr, size, attr); 1506 if (addr == NULL) 1507 return (NULL); 1508 vmmap_add(addr, size); 1509 1510 return (addr); 1511 } 1512 #endif 1513 1514 void 1515 iounmap(void *addr) 1516 { 1517 struct vmmap *vmmap; 1518 1519 vmmap = vmmap_remove(addr); 1520 if (vmmap == NULL) 1521 return; 1522 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1523 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1524 #endif 1525 kfree(vmmap); 1526 } 1527 1528 1529 void * 1530 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1531 { 1532 vm_offset_t off; 1533 size_t size; 1534 1535 size = count * PAGE_SIZE; 1536 off = kva_alloc(size); 1537 if (off == 0) 1538 return (NULL); 1539 vmmap_add((void *)off, size); 1540 pmap_qenter(off, pages, count); 1541 1542 return ((void *)off); 1543 } 1544 1545 void 1546 vunmap(void *addr) 1547 { 1548 struct vmmap *vmmap; 1549 1550 vmmap = vmmap_remove(addr); 1551 if (vmmap == NULL) 1552 return; 1553 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1554 kva_free((vm_offset_t)addr, vmmap->vm_size); 1555 kfree(vmmap); 1556 } 1557 1558 char * 1559 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1560 { 1561 unsigned int len; 1562 char *p; 1563 va_list aq; 1564 1565 va_copy(aq, ap); 1566 len = vsnprintf(NULL, 0, fmt, aq); 1567 va_end(aq); 1568 1569 p = kmalloc(len + 1, gfp); 1570 if (p != NULL) 1571 vsnprintf(p, len + 1, fmt, ap); 1572 1573 return (p); 1574 } 1575 1576 char * 1577 kasprintf(gfp_t gfp, const char *fmt, ...) 1578 { 1579 va_list ap; 1580 char *p; 1581 1582 va_start(ap, fmt); 1583 p = kvasprintf(gfp, fmt, ap); 1584 va_end(ap); 1585 1586 return (p); 1587 } 1588 1589 static void 1590 linux_timer_callback_wrapper(void *context) 1591 { 1592 struct timer_list *timer; 1593 1594 linux_set_current(curthread); 1595 1596 timer = context; 1597 timer->function(timer->data); 1598 } 1599 1600 void 1601 mod_timer(struct timer_list *timer, int expires) 1602 { 1603 1604 timer->expires = expires; 1605 callout_reset(&timer->timer_callout, 1606 linux_timer_jiffies_until(expires), 1607 &linux_timer_callback_wrapper, timer); 1608 } 1609 1610 void 1611 add_timer(struct timer_list *timer) 1612 { 1613 1614 callout_reset(&timer->timer_callout, 1615 linux_timer_jiffies_until(timer->expires), 1616 &linux_timer_callback_wrapper, timer); 1617 } 1618 1619 void 1620 add_timer_on(struct timer_list *timer, int cpu) 1621 { 1622 1623 callout_reset_on(&timer->timer_callout, 1624 linux_timer_jiffies_until(timer->expires), 1625 &linux_timer_callback_wrapper, timer, cpu); 1626 } 1627 1628 static void 1629 linux_timer_init(void *arg) 1630 { 1631 1632 /* 1633 * Compute an internal HZ value which can divide 2**32 to 1634 * avoid timer rounding problems when the tick value wraps 1635 * around 2**32: 1636 */ 1637 linux_timer_hz_mask = 1; 1638 while (linux_timer_hz_mask < (unsigned long)hz) 1639 linux_timer_hz_mask *= 2; 1640 linux_timer_hz_mask--; 1641 } 1642 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1643 1644 void 1645 linux_complete_common(struct completion *c, int all) 1646 { 1647 int wakeup_swapper; 1648 1649 sleepq_lock(c); 1650 c->done++; 1651 if (all) 1652 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1653 else 1654 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1655 sleepq_release(c); 1656 if (wakeup_swapper) 1657 kick_proc0(); 1658 } 1659 1660 /* 1661 * Indefinite wait for done != 0 with or without signals. 1662 */ 1663 int 1664 linux_wait_for_common(struct completion *c, int flags) 1665 { 1666 int error; 1667 1668 if (SCHEDULER_STOPPED()) 1669 return (0); 1670 1671 DROP_GIANT(); 1672 1673 if (flags != 0) 1674 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1675 else 1676 flags = SLEEPQ_SLEEP; 1677 error = 0; 1678 for (;;) { 1679 sleepq_lock(c); 1680 if (c->done) 1681 break; 1682 sleepq_add(c, NULL, "completion", flags, 0); 1683 if (flags & SLEEPQ_INTERRUPTIBLE) { 1684 if (sleepq_wait_sig(c, 0) != 0) { 1685 error = -ERESTARTSYS; 1686 goto intr; 1687 } 1688 } else 1689 sleepq_wait(c, 0); 1690 } 1691 c->done--; 1692 sleepq_release(c); 1693 1694 intr: 1695 PICKUP_GIANT(); 1696 1697 return (error); 1698 } 1699 1700 /* 1701 * Time limited wait for done != 0 with or without signals. 1702 */ 1703 int 1704 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 1705 { 1706 int end = jiffies + timeout; 1707 int error; 1708 int ret; 1709 1710 if (SCHEDULER_STOPPED()) 1711 return (0); 1712 1713 DROP_GIANT(); 1714 1715 if (flags != 0) 1716 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1717 else 1718 flags = SLEEPQ_SLEEP; 1719 1720 error = 0; 1721 ret = 0; 1722 for (;;) { 1723 sleepq_lock(c); 1724 if (c->done) 1725 break; 1726 sleepq_add(c, NULL, "completion", flags, 0); 1727 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 1728 if (flags & SLEEPQ_INTERRUPTIBLE) 1729 ret = sleepq_timedwait_sig(c, 0); 1730 else 1731 ret = sleepq_timedwait(c, 0); 1732 if (ret != 0) { 1733 /* check for timeout or signal */ 1734 if (ret == EWOULDBLOCK) 1735 error = 0; 1736 else 1737 error = -ERESTARTSYS; 1738 goto intr; 1739 } 1740 } 1741 c->done--; 1742 sleepq_release(c); 1743 1744 intr: 1745 PICKUP_GIANT(); 1746 1747 /* return how many jiffies are left */ 1748 return (ret != 0 ? error : linux_timer_jiffies_until(end)); 1749 } 1750 1751 int 1752 linux_try_wait_for_completion(struct completion *c) 1753 { 1754 int isdone; 1755 1756 isdone = 1; 1757 sleepq_lock(c); 1758 if (c->done) 1759 c->done--; 1760 else 1761 isdone = 0; 1762 sleepq_release(c); 1763 return (isdone); 1764 } 1765 1766 int 1767 linux_completion_done(struct completion *c) 1768 { 1769 int isdone; 1770 1771 isdone = 1; 1772 sleepq_lock(c); 1773 if (c->done == 0) 1774 isdone = 0; 1775 sleepq_release(c); 1776 return (isdone); 1777 } 1778 1779 static void 1780 linux_cdev_release(struct kobject *kobj) 1781 { 1782 struct linux_cdev *cdev; 1783 struct kobject *parent; 1784 1785 cdev = container_of(kobj, struct linux_cdev, kobj); 1786 parent = kobj->parent; 1787 if (cdev->cdev) 1788 destroy_dev(cdev->cdev); 1789 kfree(cdev); 1790 kobject_put(parent); 1791 } 1792 1793 static void 1794 linux_cdev_static_release(struct kobject *kobj) 1795 { 1796 struct linux_cdev *cdev; 1797 struct kobject *parent; 1798 1799 cdev = container_of(kobj, struct linux_cdev, kobj); 1800 parent = kobj->parent; 1801 if (cdev->cdev) 1802 destroy_dev(cdev->cdev); 1803 kobject_put(parent); 1804 } 1805 1806 const struct kobj_type linux_cdev_ktype = { 1807 .release = linux_cdev_release, 1808 }; 1809 1810 const struct kobj_type linux_cdev_static_ktype = { 1811 .release = linux_cdev_static_release, 1812 }; 1813 1814 static void 1815 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 1816 { 1817 struct notifier_block *nb; 1818 1819 nb = arg; 1820 if (linkstate == LINK_STATE_UP) 1821 nb->notifier_call(nb, NETDEV_UP, ifp); 1822 else 1823 nb->notifier_call(nb, NETDEV_DOWN, ifp); 1824 } 1825 1826 static void 1827 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 1828 { 1829 struct notifier_block *nb; 1830 1831 nb = arg; 1832 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 1833 } 1834 1835 static void 1836 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 1837 { 1838 struct notifier_block *nb; 1839 1840 nb = arg; 1841 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 1842 } 1843 1844 static void 1845 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 1846 { 1847 struct notifier_block *nb; 1848 1849 nb = arg; 1850 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 1851 } 1852 1853 static void 1854 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 1855 { 1856 struct notifier_block *nb; 1857 1858 nb = arg; 1859 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 1860 } 1861 1862 int 1863 register_netdevice_notifier(struct notifier_block *nb) 1864 { 1865 1866 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 1867 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 1868 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 1869 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 1870 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 1871 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 1872 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 1873 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 1874 1875 return (0); 1876 } 1877 1878 int 1879 register_inetaddr_notifier(struct notifier_block *nb) 1880 { 1881 1882 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 1883 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 1884 return (0); 1885 } 1886 1887 int 1888 unregister_netdevice_notifier(struct notifier_block *nb) 1889 { 1890 1891 EVENTHANDLER_DEREGISTER(ifnet_link_event, 1892 nb->tags[NETDEV_UP]); 1893 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 1894 nb->tags[NETDEV_REGISTER]); 1895 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 1896 nb->tags[NETDEV_UNREGISTER]); 1897 EVENTHANDLER_DEREGISTER(iflladdr_event, 1898 nb->tags[NETDEV_CHANGEADDR]); 1899 1900 return (0); 1901 } 1902 1903 int 1904 unregister_inetaddr_notifier(struct notifier_block *nb) 1905 { 1906 1907 EVENTHANDLER_DEREGISTER(ifaddr_event, 1908 nb->tags[NETDEV_CHANGEIFADDR]); 1909 1910 return (0); 1911 } 1912 1913 struct list_sort_thunk { 1914 int (*cmp)(void *, struct list_head *, struct list_head *); 1915 void *priv; 1916 }; 1917 1918 static inline int 1919 linux_le_cmp(void *priv, const void *d1, const void *d2) 1920 { 1921 struct list_head *le1, *le2; 1922 struct list_sort_thunk *thunk; 1923 1924 thunk = priv; 1925 le1 = *(__DECONST(struct list_head **, d1)); 1926 le2 = *(__DECONST(struct list_head **, d2)); 1927 return ((thunk->cmp)(thunk->priv, le1, le2)); 1928 } 1929 1930 void 1931 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 1932 struct list_head *a, struct list_head *b)) 1933 { 1934 struct list_sort_thunk thunk; 1935 struct list_head **ar, *le; 1936 size_t count, i; 1937 1938 count = 0; 1939 list_for_each(le, head) 1940 count++; 1941 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 1942 i = 0; 1943 list_for_each(le, head) 1944 ar[i++] = le; 1945 thunk.cmp = cmp; 1946 thunk.priv = priv; 1947 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 1948 INIT_LIST_HEAD(head); 1949 for (i = 0; i < count; i++) 1950 list_add_tail(ar[i], head); 1951 free(ar, M_KMALLOC); 1952 } 1953 1954 void 1955 linux_irq_handler(void *ent) 1956 { 1957 struct irq_ent *irqe; 1958 1959 linux_set_current(curthread); 1960 1961 irqe = ent; 1962 irqe->handler(irqe->irq, irqe->arg); 1963 } 1964 1965 #if defined(__i386__) || defined(__amd64__) 1966 int 1967 linux_wbinvd_on_all_cpus(void) 1968 { 1969 1970 pmap_invalidate_cache(); 1971 return (0); 1972 } 1973 #endif 1974 1975 int 1976 linux_on_each_cpu(void callback(void *), void *data) 1977 { 1978 1979 smp_rendezvous(smp_no_rendezvous_barrier, callback, 1980 smp_no_rendezvous_barrier, data); 1981 return (0); 1982 } 1983 1984 int 1985 linux_in_atomic(void) 1986 { 1987 1988 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 1989 } 1990 1991 struct linux_cdev * 1992 linux_find_cdev(const char *name, unsigned major, unsigned minor) 1993 { 1994 dev_t dev = MKDEV(major, minor); 1995 struct cdev *cdev; 1996 1997 dev_lock(); 1998 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 1999 struct linux_cdev *ldev = cdev->si_drv1; 2000 if (ldev->dev == dev && 2001 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2002 break; 2003 } 2004 } 2005 dev_unlock(); 2006 2007 return (cdev != NULL ? cdev->si_drv1 : NULL); 2008 } 2009 2010 int 2011 __register_chrdev(unsigned int major, unsigned int baseminor, 2012 unsigned int count, const char *name, 2013 const struct file_operations *fops) 2014 { 2015 struct linux_cdev *cdev; 2016 int ret = 0; 2017 int i; 2018 2019 for (i = baseminor; i < baseminor + count; i++) { 2020 cdev = cdev_alloc(); 2021 cdev_init(cdev, fops); 2022 kobject_set_name(&cdev->kobj, name); 2023 2024 ret = cdev_add(cdev, makedev(major, i), 1); 2025 if (ret != 0) 2026 break; 2027 } 2028 return (ret); 2029 } 2030 2031 int 2032 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2033 unsigned int count, const char *name, 2034 const struct file_operations *fops, uid_t uid, 2035 gid_t gid, int mode) 2036 { 2037 struct linux_cdev *cdev; 2038 int ret = 0; 2039 int i; 2040 2041 for (i = baseminor; i < baseminor + count; i++) { 2042 cdev = cdev_alloc(); 2043 cdev_init(cdev, fops); 2044 kobject_set_name(&cdev->kobj, name); 2045 2046 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2047 if (ret != 0) 2048 break; 2049 } 2050 return (ret); 2051 } 2052 2053 void 2054 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2055 unsigned int count, const char *name) 2056 { 2057 struct linux_cdev *cdevp; 2058 int i; 2059 2060 for (i = baseminor; i < baseminor + count; i++) { 2061 cdevp = linux_find_cdev(name, major, i); 2062 if (cdevp != NULL) 2063 cdev_del(cdevp); 2064 } 2065 } 2066 2067 #if defined(__i386__) || defined(__amd64__) 2068 bool linux_cpu_has_clflush; 2069 #endif 2070 2071 static void 2072 linux_compat_init(void *arg) 2073 { 2074 struct sysctl_oid *rootoid; 2075 int i; 2076 2077 #if defined(__i386__) || defined(__amd64__) 2078 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2079 #endif 2080 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2081 2082 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2083 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2084 kobject_init(&linux_class_root, &linux_class_ktype); 2085 kobject_set_name(&linux_class_root, "class"); 2086 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2087 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2088 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2089 kobject_set_name(&linux_root_device.kobj, "device"); 2090 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2091 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 2092 "device"); 2093 linux_root_device.bsddev = root_bus; 2094 linux_class_misc.name = "misc"; 2095 class_register(&linux_class_misc); 2096 INIT_LIST_HEAD(&pci_drivers); 2097 INIT_LIST_HEAD(&pci_devices); 2098 spin_lock_init(&pci_lock); 2099 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2100 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2101 LIST_INIT(&vmmaphead[i]); 2102 } 2103 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2104 2105 static void 2106 linux_compat_uninit(void *arg) 2107 { 2108 linux_kobject_kfree_name(&linux_class_root); 2109 linux_kobject_kfree_name(&linux_root_device.kobj); 2110 linux_kobject_kfree_name(&linux_class_misc.kobj); 2111 2112 mtx_destroy(&vmmaplock); 2113 spin_lock_destroy(&pci_lock); 2114 rw_destroy(&linux_vma_lock); 2115 } 2116 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2117 2118 /* 2119 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2120 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2121 * used. Assert these types have the same size, else some parts of the 2122 * LinuxKPI may not work like expected: 2123 */ 2124 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2125