1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/proc.h> 39 #include <sys/sglist.h> 40 #include <sys/sleepqueue.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/bus.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/filio.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pager.h> 54 55 #include <machine/stdarg.h> 56 57 #if defined(__i386__) || defined(__amd64__) 58 #include <machine/md_var.h> 59 #endif 60 61 #include <linux/kobject.h> 62 #include <linux/device.h> 63 #include <linux/slab.h> 64 #include <linux/module.h> 65 #include <linux/moduleparam.h> 66 #include <linux/cdev.h> 67 #include <linux/file.h> 68 #include <linux/sysfs.h> 69 #include <linux/mm.h> 70 #include <linux/io.h> 71 #include <linux/vmalloc.h> 72 #include <linux/netdevice.h> 73 #include <linux/timer.h> 74 #include <linux/interrupt.h> 75 #include <linux/uaccess.h> 76 #include <linux/list.h> 77 #include <linux/kthread.h> 78 #include <linux/kernel.h> 79 #include <linux/compat.h> 80 #include <linux/poll.h> 81 #include <linux/smp.h> 82 83 #if defined(__i386__) || defined(__amd64__) 84 #include <asm/smp.h> 85 #endif 86 87 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 88 89 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 90 91 #include <linux/rbtree.h> 92 /* Undo Linux compat changes. */ 93 #undef RB_ROOT 94 #undef file 95 #undef cdev 96 #define RB_ROOT(head) (head)->rbh_root 97 98 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 99 100 struct kobject linux_class_root; 101 struct device linux_root_device; 102 struct class linux_class_misc; 103 struct list_head pci_drivers; 104 struct list_head pci_devices; 105 spinlock_t pci_lock; 106 107 unsigned long linux_timer_hz_mask; 108 109 int 110 panic_cmp(struct rb_node *one, struct rb_node *two) 111 { 112 panic("no cmp"); 113 } 114 115 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 116 117 int 118 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 119 { 120 va_list tmp_va; 121 int len; 122 char *old; 123 char *name; 124 char dummy; 125 126 old = kobj->name; 127 128 if (old && fmt == NULL) 129 return (0); 130 131 /* compute length of string */ 132 va_copy(tmp_va, args); 133 len = vsnprintf(&dummy, 0, fmt, tmp_va); 134 va_end(tmp_va); 135 136 /* account for zero termination */ 137 len++; 138 139 /* check for error */ 140 if (len < 1) 141 return (-EINVAL); 142 143 /* allocate memory for string */ 144 name = kzalloc(len, GFP_KERNEL); 145 if (name == NULL) 146 return (-ENOMEM); 147 vsnprintf(name, len, fmt, args); 148 kobj->name = name; 149 150 /* free old string */ 151 kfree(old); 152 153 /* filter new string */ 154 for (; *name != '\0'; name++) 155 if (*name == '/') 156 *name = '!'; 157 return (0); 158 } 159 160 int 161 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 162 { 163 va_list args; 164 int error; 165 166 va_start(args, fmt); 167 error = kobject_set_name_vargs(kobj, fmt, args); 168 va_end(args); 169 170 return (error); 171 } 172 173 static int 174 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 175 { 176 const struct kobj_type *t; 177 int error; 178 179 kobj->parent = parent; 180 error = sysfs_create_dir(kobj); 181 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 182 struct attribute **attr; 183 t = kobj->ktype; 184 185 for (attr = t->default_attrs; *attr != NULL; attr++) { 186 error = sysfs_create_file(kobj, *attr); 187 if (error) 188 break; 189 } 190 if (error) 191 sysfs_remove_dir(kobj); 192 193 } 194 return (error); 195 } 196 197 int 198 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 199 { 200 va_list args; 201 int error; 202 203 va_start(args, fmt); 204 error = kobject_set_name_vargs(kobj, fmt, args); 205 va_end(args); 206 if (error) 207 return (error); 208 209 return kobject_add_complete(kobj, parent); 210 } 211 212 void 213 linux_kobject_release(struct kref *kref) 214 { 215 struct kobject *kobj; 216 char *name; 217 218 kobj = container_of(kref, struct kobject, kref); 219 sysfs_remove_dir(kobj); 220 name = kobj->name; 221 if (kobj->ktype && kobj->ktype->release) 222 kobj->ktype->release(kobj); 223 kfree(name); 224 } 225 226 static void 227 linux_kobject_kfree(struct kobject *kobj) 228 { 229 kfree(kobj); 230 } 231 232 static void 233 linux_kobject_kfree_name(struct kobject *kobj) 234 { 235 if (kobj) { 236 kfree(kobj->name); 237 } 238 } 239 240 const struct kobj_type linux_kfree_type = { 241 .release = linux_kobject_kfree 242 }; 243 244 static void 245 linux_device_release(struct device *dev) 246 { 247 pr_debug("linux_device_release: %s\n", dev_name(dev)); 248 kfree(dev); 249 } 250 251 static ssize_t 252 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 253 { 254 struct class_attribute *dattr; 255 ssize_t error; 256 257 dattr = container_of(attr, struct class_attribute, attr); 258 error = -EIO; 259 if (dattr->show) 260 error = dattr->show(container_of(kobj, struct class, kobj), 261 dattr, buf); 262 return (error); 263 } 264 265 static ssize_t 266 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 267 size_t count) 268 { 269 struct class_attribute *dattr; 270 ssize_t error; 271 272 dattr = container_of(attr, struct class_attribute, attr); 273 error = -EIO; 274 if (dattr->store) 275 error = dattr->store(container_of(kobj, struct class, kobj), 276 dattr, buf, count); 277 return (error); 278 } 279 280 static void 281 linux_class_release(struct kobject *kobj) 282 { 283 struct class *class; 284 285 class = container_of(kobj, struct class, kobj); 286 if (class->class_release) 287 class->class_release(class); 288 } 289 290 static const struct sysfs_ops linux_class_sysfs = { 291 .show = linux_class_show, 292 .store = linux_class_store, 293 }; 294 295 const struct kobj_type linux_class_ktype = { 296 .release = linux_class_release, 297 .sysfs_ops = &linux_class_sysfs 298 }; 299 300 static void 301 linux_dev_release(struct kobject *kobj) 302 { 303 struct device *dev; 304 305 dev = container_of(kobj, struct device, kobj); 306 /* This is the precedence defined by linux. */ 307 if (dev->release) 308 dev->release(dev); 309 else if (dev->class && dev->class->dev_release) 310 dev->class->dev_release(dev); 311 } 312 313 static ssize_t 314 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 315 { 316 struct device_attribute *dattr; 317 ssize_t error; 318 319 dattr = container_of(attr, struct device_attribute, attr); 320 error = -EIO; 321 if (dattr->show) 322 error = dattr->show(container_of(kobj, struct device, kobj), 323 dattr, buf); 324 return (error); 325 } 326 327 static ssize_t 328 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 329 size_t count) 330 { 331 struct device_attribute *dattr; 332 ssize_t error; 333 334 dattr = container_of(attr, struct device_attribute, attr); 335 error = -EIO; 336 if (dattr->store) 337 error = dattr->store(container_of(kobj, struct device, kobj), 338 dattr, buf, count); 339 return (error); 340 } 341 342 static const struct sysfs_ops linux_dev_sysfs = { 343 .show = linux_dev_show, 344 .store = linux_dev_store, 345 }; 346 347 const struct kobj_type linux_dev_ktype = { 348 .release = linux_dev_release, 349 .sysfs_ops = &linux_dev_sysfs 350 }; 351 352 struct device * 353 device_create(struct class *class, struct device *parent, dev_t devt, 354 void *drvdata, const char *fmt, ...) 355 { 356 struct device *dev; 357 va_list args; 358 359 dev = kzalloc(sizeof(*dev), M_WAITOK); 360 dev->parent = parent; 361 dev->class = class; 362 dev->devt = devt; 363 dev->driver_data = drvdata; 364 dev->release = linux_device_release; 365 va_start(args, fmt); 366 kobject_set_name_vargs(&dev->kobj, fmt, args); 367 va_end(args); 368 device_register(dev); 369 370 return (dev); 371 } 372 373 int 374 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 375 struct kobject *parent, const char *fmt, ...) 376 { 377 va_list args; 378 int error; 379 380 kobject_init(kobj, ktype); 381 kobj->ktype = ktype; 382 kobj->parent = parent; 383 kobj->name = NULL; 384 385 va_start(args, fmt); 386 error = kobject_set_name_vargs(kobj, fmt, args); 387 va_end(args); 388 if (error) 389 return (error); 390 return kobject_add_complete(kobj, parent); 391 } 392 393 static void 394 linux_file_dtor(void *cdp) 395 { 396 struct linux_file *filp; 397 398 linux_set_current(curthread); 399 filp = cdp; 400 filp->f_op->release(filp->f_vnode, filp); 401 vdrop(filp->f_vnode); 402 kfree(filp); 403 } 404 405 static void 406 linux_kq_lock(void *arg) 407 { 408 spinlock_t *s = arg; 409 410 spin_lock(s); 411 } 412 static void 413 linux_kq_unlock(void *arg) 414 { 415 spinlock_t *s = arg; 416 417 spin_unlock(s); 418 } 419 420 static void 421 linux_kq_lock_owned(void *arg) 422 { 423 #ifdef INVARIANTS 424 spinlock_t *s = arg; 425 426 mtx_assert(&s->m, MA_OWNED); 427 #endif 428 } 429 430 static void 431 linux_kq_lock_unowned(void *arg) 432 { 433 #ifdef INVARIANTS 434 spinlock_t *s = arg; 435 436 mtx_assert(&s->m, MA_NOTOWNED); 437 #endif 438 } 439 440 static void 441 linux_dev_kqfilter_poll(struct linux_file *, int); 442 443 struct linux_file * 444 linux_file_alloc(void) 445 { 446 struct linux_file *filp; 447 448 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 449 450 /* set initial refcount */ 451 filp->f_count = 1; 452 453 /* setup fields needed by kqueue support */ 454 spin_lock_init(&filp->f_kqlock); 455 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 456 linux_kq_lock, linux_kq_unlock, 457 linux_kq_lock_owned, linux_kq_lock_unowned); 458 459 return (filp); 460 } 461 462 void 463 linux_file_free(struct linux_file *filp) 464 { 465 if (filp->_file == NULL) { 466 kfree(filp); 467 } else { 468 /* 469 * The close method of the character device or file 470 * will free the linux_file structure: 471 */ 472 _fdrop(filp->_file, curthread); 473 } 474 } 475 476 static int 477 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 478 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 479 { 480 struct vm_area_struct *vmap; 481 struct vm_fault vmf; 482 int err; 483 484 linux_set_current(curthread); 485 486 /* get VM area structure */ 487 vmap = linux_cdev_handle_find(vm_obj->handle); 488 MPASS(vmap != NULL); 489 MPASS(vmap->vm_private_data == vm_obj->handle); 490 491 /* fill out VM fault structure */ 492 vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); 493 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 494 vmf.pgoff = 0; 495 vmf.page = NULL; 496 497 VM_OBJECT_WUNLOCK(vm_obj); 498 499 down_write(&vmap->vm_mm->mmap_sem); 500 if (unlikely(vmap->vm_ops == NULL || vmap->vm_ops->fault == NULL)) { 501 err = VM_FAULT_SIGBUS; 502 } else { 503 vmap->vm_pfn_count = 0; 504 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 505 vmap->vm_obj = vm_obj; 506 507 err = vmap->vm_ops->fault(vmap, &vmf); 508 509 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 510 kern_yield(PRI_USER); 511 err = vmap->vm_ops->fault(vmap, &vmf); 512 } 513 } 514 515 /* translate return code */ 516 switch (err) { 517 case VM_FAULT_OOM: 518 err = VM_PAGER_AGAIN; 519 break; 520 case VM_FAULT_SIGBUS: 521 err = VM_PAGER_BAD; 522 break; 523 case VM_FAULT_NOPAGE: 524 /* 525 * By contract the fault handler will return having 526 * busied all the pages itself. If pidx is already 527 * found in the object, it will simply xbusy the first 528 * page and return with vm_pfn_count set to 1. 529 */ 530 *first = vmap->vm_pfn_first; 531 *last = *first + vmap->vm_pfn_count - 1; 532 err = VM_PAGER_OK; 533 break; 534 default: 535 err = VM_PAGER_ERROR; 536 break; 537 } 538 up_write(&vmap->vm_mm->mmap_sem); 539 VM_OBJECT_WLOCK(vm_obj); 540 return (err); 541 } 542 543 static struct rwlock linux_vma_lock; 544 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 545 TAILQ_HEAD_INITIALIZER(linux_vma_head); 546 547 static void 548 linux_cdev_handle_free(struct vm_area_struct *vmap) 549 { 550 /* Drop reference on vm_file */ 551 if (vmap->vm_file != NULL) 552 fput(vmap->vm_file); 553 554 /* Drop reference on mm_struct */ 555 mmput(vmap->vm_mm); 556 557 kfree(vmap); 558 } 559 560 static struct vm_area_struct * 561 linux_cdev_handle_insert(void *handle, struct vm_area_struct *vmap) 562 { 563 struct vm_area_struct *ptr; 564 565 rw_wlock(&linux_vma_lock); 566 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 567 if (ptr->vm_private_data == handle) { 568 rw_wunlock(&linux_vma_lock); 569 linux_cdev_handle_free(vmap); 570 return (NULL); 571 } 572 } 573 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 574 rw_wunlock(&linux_vma_lock); 575 return (vmap); 576 } 577 578 static void 579 linux_cdev_handle_remove(struct vm_area_struct *vmap) 580 { 581 rw_wlock(&linux_vma_lock); 582 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 583 rw_wunlock(&linux_vma_lock); 584 } 585 586 static struct vm_area_struct * 587 linux_cdev_handle_find(void *handle) 588 { 589 struct vm_area_struct *vmap; 590 591 rw_rlock(&linux_vma_lock); 592 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 593 if (vmap->vm_private_data == handle) 594 break; 595 } 596 rw_runlock(&linux_vma_lock); 597 return (vmap); 598 } 599 600 static int 601 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 602 vm_ooffset_t foff, struct ucred *cred, u_short *color) 603 { 604 605 MPASS(linux_cdev_handle_find(handle) != NULL); 606 *color = 0; 607 return (0); 608 } 609 610 static void 611 linux_cdev_pager_dtor(void *handle) 612 { 613 const struct vm_operations_struct *vm_ops; 614 struct vm_area_struct *vmap; 615 616 vmap = linux_cdev_handle_find(handle); 617 MPASS(vmap != NULL); 618 619 /* 620 * Remove handle before calling close operation to prevent 621 * other threads from reusing the handle pointer. 622 */ 623 linux_cdev_handle_remove(vmap); 624 625 down_write(&vmap->vm_mm->mmap_sem); 626 vm_ops = vmap->vm_ops; 627 if (likely(vm_ops != NULL)) 628 vm_ops->close(vmap); 629 up_write(&vmap->vm_mm->mmap_sem); 630 631 linux_cdev_handle_free(vmap); 632 } 633 634 static struct cdev_pager_ops linux_cdev_pager_ops = { 635 .cdev_pg_populate = linux_cdev_pager_populate, 636 .cdev_pg_ctor = linux_cdev_pager_ctor, 637 .cdev_pg_dtor = linux_cdev_pager_dtor 638 }; 639 640 static int 641 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 642 { 643 struct linux_cdev *ldev; 644 struct linux_file *filp; 645 struct file *file; 646 int error; 647 648 file = td->td_fpop; 649 ldev = dev->si_drv1; 650 if (ldev == NULL) 651 return (ENODEV); 652 653 filp = linux_file_alloc(); 654 filp->f_dentry = &filp->f_dentry_store; 655 filp->f_op = ldev->ops; 656 filp->f_flags = file->f_flag; 657 vhold(file->f_vnode); 658 filp->f_vnode = file->f_vnode; 659 filp->_file = file; 660 661 linux_set_current(td); 662 663 if (filp->f_op->open) { 664 error = -filp->f_op->open(file->f_vnode, filp); 665 if (error) { 666 vdrop(filp->f_vnode); 667 kfree(filp); 668 goto done; 669 } 670 } 671 error = devfs_set_cdevpriv(filp, linux_file_dtor); 672 if (error) { 673 filp->f_op->release(file->f_vnode, filp); 674 vdrop(filp->f_vnode); 675 kfree(filp); 676 } 677 done: 678 return (error); 679 } 680 681 static int 682 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 683 { 684 struct linux_file *filp; 685 struct file *file; 686 int error; 687 688 file = td->td_fpop; 689 if (dev->si_drv1 == NULL) 690 return (0); 691 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 692 return (error); 693 filp->f_flags = file->f_flag; 694 devfs_clear_cdevpriv(); 695 696 return (0); 697 } 698 699 #define LINUX_IOCTL_MIN_PTR 0x10000UL 700 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 701 702 static inline int 703 linux_remap_address(void **uaddr, size_t len) 704 { 705 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 706 707 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 708 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 709 struct task_struct *pts = current; 710 if (pts == NULL) { 711 *uaddr = NULL; 712 return (1); 713 } 714 715 /* compute data offset */ 716 uaddr_val -= LINUX_IOCTL_MIN_PTR; 717 718 /* check that length is within bounds */ 719 if ((len > IOCPARM_MAX) || 720 (uaddr_val + len) > pts->bsd_ioctl_len) { 721 *uaddr = NULL; 722 return (1); 723 } 724 725 /* re-add kernel buffer address */ 726 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 727 728 /* update address location */ 729 *uaddr = (void *)uaddr_val; 730 return (1); 731 } 732 return (0); 733 } 734 735 int 736 linux_copyin(const void *uaddr, void *kaddr, size_t len) 737 { 738 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 739 if (uaddr == NULL) 740 return (-EFAULT); 741 memcpy(kaddr, uaddr, len); 742 return (0); 743 } 744 return (-copyin(uaddr, kaddr, len)); 745 } 746 747 int 748 linux_copyout(const void *kaddr, void *uaddr, size_t len) 749 { 750 if (linux_remap_address(&uaddr, len)) { 751 if (uaddr == NULL) 752 return (-EFAULT); 753 memcpy(uaddr, kaddr, len); 754 return (0); 755 } 756 return (-copyout(kaddr, uaddr, len)); 757 } 758 759 size_t 760 linux_clear_user(void *_uaddr, size_t _len) 761 { 762 uint8_t *uaddr = _uaddr; 763 size_t len = _len; 764 765 /* make sure uaddr is aligned before going into the fast loop */ 766 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 767 if (subyte(uaddr, 0)) 768 return (_len); 769 uaddr++; 770 len--; 771 } 772 773 /* zero 8 bytes at a time */ 774 while (len > 7) { 775 #ifdef __LP64__ 776 if (suword64(uaddr, 0)) 777 return (_len); 778 #else 779 if (suword32(uaddr, 0)) 780 return (_len); 781 if (suword32(uaddr + 4, 0)) 782 return (_len); 783 #endif 784 uaddr += 8; 785 len -= 8; 786 } 787 788 /* zero fill end, if any */ 789 while (len > 0) { 790 if (subyte(uaddr, 0)) 791 return (_len); 792 uaddr++; 793 len--; 794 } 795 return (0); 796 } 797 798 int 799 linux_access_ok(int rw, const void *uaddr, size_t len) 800 { 801 uintptr_t saddr; 802 uintptr_t eaddr; 803 804 /* get start and end address */ 805 saddr = (uintptr_t)uaddr; 806 eaddr = (uintptr_t)uaddr + len; 807 808 /* verify addresses are valid for userspace */ 809 return ((saddr == eaddr) || 810 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 811 } 812 813 static int 814 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 815 struct thread *td) 816 { 817 struct linux_file *filp; 818 struct file *file; 819 unsigned size; 820 int error; 821 822 file = td->td_fpop; 823 if (dev->si_drv1 == NULL) 824 return (ENXIO); 825 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 826 return (error); 827 filp->f_flags = file->f_flag; 828 829 /* the LinuxKPI supports blocking and non-blocking I/O */ 830 if (cmd == FIONBIO || cmd == FIOASYNC) 831 return (0); 832 833 linux_set_current(td); 834 size = IOCPARM_LEN(cmd); 835 /* refer to logic in sys_ioctl() */ 836 if (size > 0) { 837 /* 838 * Setup hint for linux_copyin() and linux_copyout(). 839 * 840 * Background: Linux code expects a user-space address 841 * while FreeBSD supplies a kernel-space address. 842 */ 843 current->bsd_ioctl_data = data; 844 current->bsd_ioctl_len = size; 845 data = (void *)LINUX_IOCTL_MIN_PTR; 846 } else { 847 /* fetch user-space pointer */ 848 data = *(void **)data; 849 } 850 if (filp->f_op->unlocked_ioctl) 851 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); 852 else 853 error = ENOTTY; 854 if (size > 0) { 855 current->bsd_ioctl_data = NULL; 856 current->bsd_ioctl_len = 0; 857 } 858 859 if (error == EWOULDBLOCK) { 860 /* update kqfilter status, if any */ 861 linux_dev_kqfilter_poll(filp, 862 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 863 } else if (error == ERESTARTSYS) 864 error = ERESTART; 865 return (error); 866 } 867 868 static int 869 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag) 870 { 871 struct linux_file *filp; 872 struct thread *td; 873 struct file *file; 874 ssize_t bytes; 875 int error; 876 877 td = curthread; 878 file = td->td_fpop; 879 if (dev->si_drv1 == NULL) 880 return (ENXIO); 881 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 882 return (error); 883 filp->f_flags = file->f_flag; 884 /* XXX no support for I/O vectors currently */ 885 if (uio->uio_iovcnt != 1) 886 return (EOPNOTSUPP); 887 linux_set_current(td); 888 if (filp->f_op->read) { 889 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 890 uio->uio_iov->iov_len, &uio->uio_offset); 891 if (bytes >= 0) { 892 uio->uio_iov->iov_base = 893 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 894 uio->uio_iov->iov_len -= bytes; 895 uio->uio_resid -= bytes; 896 } else { 897 error = -bytes; 898 if (error == ERESTARTSYS) 899 error = ERESTART; 900 } 901 } else 902 error = ENXIO; 903 904 /* update kqfilter status, if any */ 905 linux_dev_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 906 907 return (error); 908 } 909 910 static int 911 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag) 912 { 913 struct linux_file *filp; 914 struct thread *td; 915 struct file *file; 916 ssize_t bytes; 917 int error; 918 919 td = curthread; 920 file = td->td_fpop; 921 if (dev->si_drv1 == NULL) 922 return (ENXIO); 923 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 924 return (error); 925 filp->f_flags = file->f_flag; 926 /* XXX no support for I/O vectors currently */ 927 if (uio->uio_iovcnt != 1) 928 return (EOPNOTSUPP); 929 linux_set_current(td); 930 if (filp->f_op->write) { 931 bytes = filp->f_op->write(filp, uio->uio_iov->iov_base, 932 uio->uio_iov->iov_len, &uio->uio_offset); 933 if (bytes >= 0) { 934 uio->uio_iov->iov_base = 935 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 936 uio->uio_iov->iov_len -= bytes; 937 uio->uio_resid -= bytes; 938 } else { 939 error = -bytes; 940 if (error == ERESTARTSYS) 941 error = ERESTART; 942 } 943 } else 944 error = ENXIO; 945 946 /* update kqfilter status, if any */ 947 linux_dev_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 948 949 return (error); 950 } 951 952 static int 953 linux_dev_poll(struct cdev *dev, int events, struct thread *td) 954 { 955 struct linux_file *filp; 956 struct file *file; 957 int revents; 958 959 if (dev->si_drv1 == NULL) 960 goto error; 961 if (devfs_get_cdevpriv((void **)&filp) != 0) 962 goto error; 963 964 file = td->td_fpop; 965 filp->f_flags = file->f_flag; 966 linux_set_current(td); 967 if (filp->f_op->poll != NULL) { 968 selrecord(td, &filp->f_selinfo); 969 revents = filp->f_op->poll(filp, NULL) & events; 970 } else 971 revents = 0; 972 973 return (revents); 974 error: 975 return (events & (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 976 } 977 978 void 979 linux_poll_wakeup(struct linux_file *filp) 980 { 981 /* this function should be NULL-safe */ 982 if (filp == NULL) 983 return; 984 985 selwakeup(&filp->f_selinfo); 986 987 spin_lock(&filp->f_kqlock); 988 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 989 LINUX_KQ_FLAG_NEED_WRITE; 990 991 /* make sure the "knote" gets woken up */ 992 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 993 spin_unlock(&filp->f_kqlock); 994 } 995 996 static void 997 linux_dev_kqfilter_detach(struct knote *kn) 998 { 999 struct linux_file *filp = kn->kn_hook; 1000 1001 spin_lock(&filp->f_kqlock); 1002 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1003 spin_unlock(&filp->f_kqlock); 1004 } 1005 1006 static int 1007 linux_dev_kqfilter_read_event(struct knote *kn, long hint) 1008 { 1009 struct linux_file *filp = kn->kn_hook; 1010 1011 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1012 1013 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1014 } 1015 1016 static int 1017 linux_dev_kqfilter_write_event(struct knote *kn, long hint) 1018 { 1019 struct linux_file *filp = kn->kn_hook; 1020 1021 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1022 1023 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1024 } 1025 1026 static struct filterops linux_dev_kqfiltops_read = { 1027 .f_isfd = 1, 1028 .f_detach = linux_dev_kqfilter_detach, 1029 .f_event = linux_dev_kqfilter_read_event, 1030 }; 1031 1032 static struct filterops linux_dev_kqfiltops_write = { 1033 .f_isfd = 1, 1034 .f_detach = linux_dev_kqfilter_detach, 1035 .f_event = linux_dev_kqfilter_write_event, 1036 }; 1037 1038 static void 1039 linux_dev_kqfilter_poll(struct linux_file *filp, int kqflags) 1040 { 1041 int temp; 1042 1043 if (filp->f_kqflags & kqflags) { 1044 /* get the latest polling state */ 1045 temp = filp->f_op->poll(filp, NULL); 1046 1047 spin_lock(&filp->f_kqlock); 1048 /* clear kqflags */ 1049 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1050 LINUX_KQ_FLAG_NEED_WRITE); 1051 /* update kqflags */ 1052 if (temp & (POLLIN | POLLOUT)) { 1053 if (temp & POLLIN) 1054 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1055 if (temp & POLLOUT) 1056 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1057 1058 /* make sure the "knote" gets woken up */ 1059 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1060 } 1061 spin_unlock(&filp->f_kqlock); 1062 } 1063 } 1064 1065 static int 1066 linux_dev_kqfilter(struct cdev *dev, struct knote *kn) 1067 { 1068 struct linux_file *filp; 1069 struct file *file; 1070 struct thread *td; 1071 int error; 1072 1073 td = curthread; 1074 file = td->td_fpop; 1075 if (dev->si_drv1 == NULL) 1076 return (ENXIO); 1077 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 1078 return (error); 1079 filp->f_flags = file->f_flag; 1080 if (filp->f_op->poll == NULL) 1081 return (EINVAL); 1082 1083 spin_lock(&filp->f_kqlock); 1084 switch (kn->kn_filter) { 1085 case EVFILT_READ: 1086 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1087 kn->kn_fop = &linux_dev_kqfiltops_read; 1088 kn->kn_hook = filp; 1089 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1090 break; 1091 case EVFILT_WRITE: 1092 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1093 kn->kn_fop = &linux_dev_kqfiltops_write; 1094 kn->kn_hook = filp; 1095 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1096 break; 1097 default: 1098 error = EINVAL; 1099 break; 1100 } 1101 spin_unlock(&filp->f_kqlock); 1102 1103 if (error == 0) { 1104 linux_set_current(td); 1105 1106 /* update kqfilter status, if any */ 1107 linux_dev_kqfilter_poll(filp, 1108 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1109 } 1110 return (error); 1111 } 1112 1113 static int 1114 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset, 1115 vm_size_t size, struct vm_object **object, int nprot) 1116 { 1117 struct vm_area_struct *vmap; 1118 struct mm_struct *mm; 1119 struct linux_file *filp; 1120 struct thread *td; 1121 struct file *file; 1122 vm_memattr_t attr; 1123 int error; 1124 1125 td = curthread; 1126 file = td->td_fpop; 1127 if (dev->si_drv1 == NULL) 1128 return (ENODEV); 1129 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 1130 return (error); 1131 filp->f_flags = file->f_flag; 1132 1133 if (filp->f_op->mmap == NULL) 1134 return (ENODEV); 1135 1136 linux_set_current(td); 1137 1138 /* 1139 * The same VM object might be shared by multiple processes 1140 * and the mm_struct is usually freed when a process exits. 1141 * 1142 * The atomic reference below makes sure the mm_struct is 1143 * available as long as the vmap is in the linux_vma_head. 1144 */ 1145 mm = current->mm; 1146 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1147 return (EINVAL); 1148 1149 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1150 vmap->vm_start = 0; 1151 vmap->vm_end = size; 1152 vmap->vm_pgoff = *offset / PAGE_SIZE; 1153 vmap->vm_pfn = 0; 1154 vmap->vm_flags = vmap->vm_page_prot = nprot; 1155 vmap->vm_ops = NULL; 1156 vmap->vm_file = get_file(filp); 1157 vmap->vm_mm = mm; 1158 1159 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1160 error = EINTR; 1161 } else { 1162 error = -filp->f_op->mmap(filp, vmap); 1163 up_write(&vmap->vm_mm->mmap_sem); 1164 } 1165 1166 if (error != 0) { 1167 linux_cdev_handle_free(vmap); 1168 return (error); 1169 } 1170 1171 attr = pgprot2cachemode(vmap->vm_page_prot); 1172 1173 if (vmap->vm_ops != NULL) { 1174 void *vm_private_data; 1175 1176 if (vmap->vm_ops->open == NULL || 1177 vmap->vm_ops->close == NULL || 1178 vmap->vm_private_data == NULL) { 1179 linux_cdev_handle_free(vmap); 1180 return (EINVAL); 1181 } 1182 1183 vm_private_data = vmap->vm_private_data; 1184 1185 vmap = linux_cdev_handle_insert(vm_private_data, vmap); 1186 1187 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1188 &linux_cdev_pager_ops, size, nprot, *offset, curthread->td_ucred); 1189 1190 if (*object == NULL) { 1191 linux_cdev_handle_remove(vmap); 1192 linux_cdev_handle_free(vmap); 1193 return (EINVAL); 1194 } 1195 } else { 1196 struct sglist *sg; 1197 1198 sg = sglist_alloc(1, M_WAITOK); 1199 sglist_append_phys(sg, (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1200 1201 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1202 nprot, 0, curthread->td_ucred); 1203 1204 linux_cdev_handle_free(vmap); 1205 1206 if (*object == NULL) { 1207 sglist_free(sg); 1208 return (EINVAL); 1209 } 1210 } 1211 1212 if (attr != VM_MEMATTR_DEFAULT) { 1213 VM_OBJECT_WLOCK(*object); 1214 vm_object_set_memattr(*object, attr); 1215 VM_OBJECT_WUNLOCK(*object); 1216 } 1217 *offset = 0; 1218 return (0); 1219 } 1220 1221 struct cdevsw linuxcdevsw = { 1222 .d_version = D_VERSION, 1223 .d_flags = D_TRACKCLOSE, 1224 .d_open = linux_dev_open, 1225 .d_close = linux_dev_close, 1226 .d_read = linux_dev_read, 1227 .d_write = linux_dev_write, 1228 .d_ioctl = linux_dev_ioctl, 1229 .d_mmap_single = linux_dev_mmap_single, 1230 .d_poll = linux_dev_poll, 1231 .d_kqfilter = linux_dev_kqfilter, 1232 .d_name = "lkpidev", 1233 }; 1234 1235 static int 1236 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1237 int flags, struct thread *td) 1238 { 1239 struct linux_file *filp; 1240 ssize_t bytes; 1241 int error; 1242 1243 error = 0; 1244 filp = (struct linux_file *)file->f_data; 1245 filp->f_flags = file->f_flag; 1246 /* XXX no support for I/O vectors currently */ 1247 if (uio->uio_iovcnt != 1) 1248 return (EOPNOTSUPP); 1249 linux_set_current(td); 1250 if (filp->f_op->read) { 1251 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 1252 uio->uio_iov->iov_len, &uio->uio_offset); 1253 if (bytes >= 0) { 1254 uio->uio_iov->iov_base = 1255 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1256 uio->uio_iov->iov_len -= bytes; 1257 uio->uio_resid -= bytes; 1258 } else 1259 error = -bytes; 1260 } else 1261 error = ENXIO; 1262 1263 return (error); 1264 } 1265 1266 static int 1267 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1268 struct thread *td) 1269 { 1270 struct linux_file *filp; 1271 int revents; 1272 1273 filp = (struct linux_file *)file->f_data; 1274 filp->f_flags = file->f_flag; 1275 linux_set_current(td); 1276 if (filp->f_op->poll != NULL) { 1277 selrecord(td, &filp->f_selinfo); 1278 revents = filp->f_op->poll(filp, NULL) & events; 1279 } else 1280 revents = 0; 1281 1282 return (revents); 1283 } 1284 1285 static int 1286 linux_file_close(struct file *file, struct thread *td) 1287 { 1288 struct linux_file *filp; 1289 int error; 1290 1291 filp = (struct linux_file *)file->f_data; 1292 filp->f_flags = file->f_flag; 1293 linux_set_current(td); 1294 error = -filp->f_op->release(NULL, filp); 1295 funsetown(&filp->f_sigio); 1296 kfree(filp); 1297 1298 return (error); 1299 } 1300 1301 static int 1302 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1303 struct thread *td) 1304 { 1305 struct linux_file *filp; 1306 int error; 1307 1308 filp = (struct linux_file *)fp->f_data; 1309 filp->f_flags = fp->f_flag; 1310 error = 0; 1311 1312 linux_set_current(td); 1313 switch (cmd) { 1314 case FIONBIO: 1315 break; 1316 case FIOASYNC: 1317 if (filp->f_op->fasync == NULL) 1318 break; 1319 error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); 1320 break; 1321 case FIOSETOWN: 1322 error = fsetown(*(int *)data, &filp->f_sigio); 1323 if (error == 0) 1324 error = filp->f_op->fasync(0, filp, 1325 fp->f_flag & FASYNC); 1326 break; 1327 case FIOGETOWN: 1328 *(int *)data = fgetown(&filp->f_sigio); 1329 break; 1330 default: 1331 error = ENOTTY; 1332 break; 1333 } 1334 return (error); 1335 } 1336 1337 static int 1338 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1339 struct thread *td) 1340 { 1341 1342 return (EOPNOTSUPP); 1343 } 1344 1345 static int 1346 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1347 struct filedesc *fdp) 1348 { 1349 1350 return (0); 1351 } 1352 1353 struct fileops linuxfileops = { 1354 .fo_read = linux_file_read, 1355 .fo_write = invfo_rdwr, 1356 .fo_truncate = invfo_truncate, 1357 .fo_kqfilter = invfo_kqfilter, 1358 .fo_stat = linux_file_stat, 1359 .fo_fill_kinfo = linux_file_fill_kinfo, 1360 .fo_poll = linux_file_poll, 1361 .fo_close = linux_file_close, 1362 .fo_ioctl = linux_file_ioctl, 1363 .fo_chmod = invfo_chmod, 1364 .fo_chown = invfo_chown, 1365 .fo_sendfile = invfo_sendfile, 1366 }; 1367 1368 /* 1369 * Hash of vmmap addresses. This is infrequently accessed and does not 1370 * need to be particularly large. This is done because we must store the 1371 * caller's idea of the map size to properly unmap. 1372 */ 1373 struct vmmap { 1374 LIST_ENTRY(vmmap) vm_next; 1375 void *vm_addr; 1376 unsigned long vm_size; 1377 }; 1378 1379 struct vmmaphd { 1380 struct vmmap *lh_first; 1381 }; 1382 #define VMMAP_HASH_SIZE 64 1383 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1384 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1385 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1386 static struct mtx vmmaplock; 1387 1388 static void 1389 vmmap_add(void *addr, unsigned long size) 1390 { 1391 struct vmmap *vmmap; 1392 1393 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1394 mtx_lock(&vmmaplock); 1395 vmmap->vm_size = size; 1396 vmmap->vm_addr = addr; 1397 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1398 mtx_unlock(&vmmaplock); 1399 } 1400 1401 static struct vmmap * 1402 vmmap_remove(void *addr) 1403 { 1404 struct vmmap *vmmap; 1405 1406 mtx_lock(&vmmaplock); 1407 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1408 if (vmmap->vm_addr == addr) 1409 break; 1410 if (vmmap) 1411 LIST_REMOVE(vmmap, vm_next); 1412 mtx_unlock(&vmmaplock); 1413 1414 return (vmmap); 1415 } 1416 1417 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1418 void * 1419 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1420 { 1421 void *addr; 1422 1423 addr = pmap_mapdev_attr(phys_addr, size, attr); 1424 if (addr == NULL) 1425 return (NULL); 1426 vmmap_add(addr, size); 1427 1428 return (addr); 1429 } 1430 #endif 1431 1432 void 1433 iounmap(void *addr) 1434 { 1435 struct vmmap *vmmap; 1436 1437 vmmap = vmmap_remove(addr); 1438 if (vmmap == NULL) 1439 return; 1440 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1441 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1442 #endif 1443 kfree(vmmap); 1444 } 1445 1446 1447 void * 1448 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1449 { 1450 vm_offset_t off; 1451 size_t size; 1452 1453 size = count * PAGE_SIZE; 1454 off = kva_alloc(size); 1455 if (off == 0) 1456 return (NULL); 1457 vmmap_add((void *)off, size); 1458 pmap_qenter(off, pages, count); 1459 1460 return ((void *)off); 1461 } 1462 1463 void 1464 vunmap(void *addr) 1465 { 1466 struct vmmap *vmmap; 1467 1468 vmmap = vmmap_remove(addr); 1469 if (vmmap == NULL) 1470 return; 1471 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1472 kva_free((vm_offset_t)addr, vmmap->vm_size); 1473 kfree(vmmap); 1474 } 1475 1476 char * 1477 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1478 { 1479 unsigned int len; 1480 char *p; 1481 va_list aq; 1482 1483 va_copy(aq, ap); 1484 len = vsnprintf(NULL, 0, fmt, aq); 1485 va_end(aq); 1486 1487 p = kmalloc(len + 1, gfp); 1488 if (p != NULL) 1489 vsnprintf(p, len + 1, fmt, ap); 1490 1491 return (p); 1492 } 1493 1494 char * 1495 kasprintf(gfp_t gfp, const char *fmt, ...) 1496 { 1497 va_list ap; 1498 char *p; 1499 1500 va_start(ap, fmt); 1501 p = kvasprintf(gfp, fmt, ap); 1502 va_end(ap); 1503 1504 return (p); 1505 } 1506 1507 static void 1508 linux_timer_callback_wrapper(void *context) 1509 { 1510 struct timer_list *timer; 1511 1512 linux_set_current(curthread); 1513 1514 timer = context; 1515 timer->function(timer->data); 1516 } 1517 1518 void 1519 mod_timer(struct timer_list *timer, unsigned long expires) 1520 { 1521 1522 timer->expires = expires; 1523 callout_reset(&timer->timer_callout, 1524 linux_timer_jiffies_until(expires), 1525 &linux_timer_callback_wrapper, timer); 1526 } 1527 1528 void 1529 add_timer(struct timer_list *timer) 1530 { 1531 1532 callout_reset(&timer->timer_callout, 1533 linux_timer_jiffies_until(timer->expires), 1534 &linux_timer_callback_wrapper, timer); 1535 } 1536 1537 void 1538 add_timer_on(struct timer_list *timer, int cpu) 1539 { 1540 1541 callout_reset_on(&timer->timer_callout, 1542 linux_timer_jiffies_until(timer->expires), 1543 &linux_timer_callback_wrapper, timer, cpu); 1544 } 1545 1546 static void 1547 linux_timer_init(void *arg) 1548 { 1549 1550 /* 1551 * Compute an internal HZ value which can divide 2**32 to 1552 * avoid timer rounding problems when the tick value wraps 1553 * around 2**32: 1554 */ 1555 linux_timer_hz_mask = 1; 1556 while (linux_timer_hz_mask < (unsigned long)hz) 1557 linux_timer_hz_mask *= 2; 1558 linux_timer_hz_mask--; 1559 } 1560 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1561 1562 void 1563 linux_complete_common(struct completion *c, int all) 1564 { 1565 int wakeup_swapper; 1566 1567 sleepq_lock(c); 1568 c->done++; 1569 if (all) 1570 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1571 else 1572 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1573 sleepq_release(c); 1574 if (wakeup_swapper) 1575 kick_proc0(); 1576 } 1577 1578 /* 1579 * Indefinite wait for done != 0 with or without signals. 1580 */ 1581 long 1582 linux_wait_for_common(struct completion *c, int flags) 1583 { 1584 long error; 1585 1586 if (SCHEDULER_STOPPED()) 1587 return (0); 1588 1589 DROP_GIANT(); 1590 1591 if (flags != 0) 1592 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1593 else 1594 flags = SLEEPQ_SLEEP; 1595 error = 0; 1596 for (;;) { 1597 sleepq_lock(c); 1598 if (c->done) 1599 break; 1600 sleepq_add(c, NULL, "completion", flags, 0); 1601 if (flags & SLEEPQ_INTERRUPTIBLE) { 1602 if (sleepq_wait_sig(c, 0) != 0) { 1603 error = -ERESTARTSYS; 1604 goto intr; 1605 } 1606 } else 1607 sleepq_wait(c, 0); 1608 } 1609 c->done--; 1610 sleepq_release(c); 1611 1612 intr: 1613 PICKUP_GIANT(); 1614 1615 return (error); 1616 } 1617 1618 /* 1619 * Time limited wait for done != 0 with or without signals. 1620 */ 1621 long 1622 linux_wait_for_timeout_common(struct completion *c, long timeout, int flags) 1623 { 1624 long end = jiffies + timeout, error; 1625 int ret; 1626 1627 if (SCHEDULER_STOPPED()) 1628 return (0); 1629 1630 DROP_GIANT(); 1631 1632 if (flags != 0) 1633 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1634 else 1635 flags = SLEEPQ_SLEEP; 1636 1637 error = 0; 1638 ret = 0; 1639 for (;;) { 1640 sleepq_lock(c); 1641 if (c->done) 1642 break; 1643 sleepq_add(c, NULL, "completion", flags, 0); 1644 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 1645 if (flags & SLEEPQ_INTERRUPTIBLE) 1646 ret = sleepq_timedwait_sig(c, 0); 1647 else 1648 ret = sleepq_timedwait(c, 0); 1649 if (ret != 0) { 1650 /* check for timeout or signal */ 1651 if (ret == EWOULDBLOCK) 1652 error = 0; 1653 else 1654 error = -ERESTARTSYS; 1655 goto intr; 1656 } 1657 } 1658 c->done--; 1659 sleepq_release(c); 1660 1661 intr: 1662 PICKUP_GIANT(); 1663 1664 /* return how many jiffies are left */ 1665 return (ret != 0 ? error : linux_timer_jiffies_until(end)); 1666 } 1667 1668 int 1669 linux_try_wait_for_completion(struct completion *c) 1670 { 1671 int isdone; 1672 1673 isdone = 1; 1674 sleepq_lock(c); 1675 if (c->done) 1676 c->done--; 1677 else 1678 isdone = 0; 1679 sleepq_release(c); 1680 return (isdone); 1681 } 1682 1683 int 1684 linux_completion_done(struct completion *c) 1685 { 1686 int isdone; 1687 1688 isdone = 1; 1689 sleepq_lock(c); 1690 if (c->done == 0) 1691 isdone = 0; 1692 sleepq_release(c); 1693 return (isdone); 1694 } 1695 1696 static void 1697 linux_cdev_release(struct kobject *kobj) 1698 { 1699 struct linux_cdev *cdev; 1700 struct kobject *parent; 1701 1702 cdev = container_of(kobj, struct linux_cdev, kobj); 1703 parent = kobj->parent; 1704 if (cdev->cdev) 1705 destroy_dev(cdev->cdev); 1706 kfree(cdev); 1707 kobject_put(parent); 1708 } 1709 1710 static void 1711 linux_cdev_static_release(struct kobject *kobj) 1712 { 1713 struct linux_cdev *cdev; 1714 struct kobject *parent; 1715 1716 cdev = container_of(kobj, struct linux_cdev, kobj); 1717 parent = kobj->parent; 1718 if (cdev->cdev) 1719 destroy_dev(cdev->cdev); 1720 kobject_put(parent); 1721 } 1722 1723 const struct kobj_type linux_cdev_ktype = { 1724 .release = linux_cdev_release, 1725 }; 1726 1727 const struct kobj_type linux_cdev_static_ktype = { 1728 .release = linux_cdev_static_release, 1729 }; 1730 1731 static void 1732 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 1733 { 1734 struct notifier_block *nb; 1735 1736 nb = arg; 1737 if (linkstate == LINK_STATE_UP) 1738 nb->notifier_call(nb, NETDEV_UP, ifp); 1739 else 1740 nb->notifier_call(nb, NETDEV_DOWN, ifp); 1741 } 1742 1743 static void 1744 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 1745 { 1746 struct notifier_block *nb; 1747 1748 nb = arg; 1749 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 1750 } 1751 1752 static void 1753 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 1754 { 1755 struct notifier_block *nb; 1756 1757 nb = arg; 1758 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 1759 } 1760 1761 static void 1762 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 1763 { 1764 struct notifier_block *nb; 1765 1766 nb = arg; 1767 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 1768 } 1769 1770 static void 1771 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 1772 { 1773 struct notifier_block *nb; 1774 1775 nb = arg; 1776 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 1777 } 1778 1779 int 1780 register_netdevice_notifier(struct notifier_block *nb) 1781 { 1782 1783 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 1784 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 1785 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 1786 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 1787 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 1788 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 1789 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 1790 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 1791 1792 return (0); 1793 } 1794 1795 int 1796 register_inetaddr_notifier(struct notifier_block *nb) 1797 { 1798 1799 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 1800 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 1801 return (0); 1802 } 1803 1804 int 1805 unregister_netdevice_notifier(struct notifier_block *nb) 1806 { 1807 1808 EVENTHANDLER_DEREGISTER(ifnet_link_event, 1809 nb->tags[NETDEV_UP]); 1810 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 1811 nb->tags[NETDEV_REGISTER]); 1812 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 1813 nb->tags[NETDEV_UNREGISTER]); 1814 EVENTHANDLER_DEREGISTER(iflladdr_event, 1815 nb->tags[NETDEV_CHANGEADDR]); 1816 1817 return (0); 1818 } 1819 1820 int 1821 unregister_inetaddr_notifier(struct notifier_block *nb) 1822 { 1823 1824 EVENTHANDLER_DEREGISTER(ifaddr_event, 1825 nb->tags[NETDEV_CHANGEIFADDR]); 1826 1827 return (0); 1828 } 1829 1830 struct list_sort_thunk { 1831 int (*cmp)(void *, struct list_head *, struct list_head *); 1832 void *priv; 1833 }; 1834 1835 static inline int 1836 linux_le_cmp(void *priv, const void *d1, const void *d2) 1837 { 1838 struct list_head *le1, *le2; 1839 struct list_sort_thunk *thunk; 1840 1841 thunk = priv; 1842 le1 = *(__DECONST(struct list_head **, d1)); 1843 le2 = *(__DECONST(struct list_head **, d2)); 1844 return ((thunk->cmp)(thunk->priv, le1, le2)); 1845 } 1846 1847 void 1848 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 1849 struct list_head *a, struct list_head *b)) 1850 { 1851 struct list_sort_thunk thunk; 1852 struct list_head **ar, *le; 1853 size_t count, i; 1854 1855 count = 0; 1856 list_for_each(le, head) 1857 count++; 1858 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 1859 i = 0; 1860 list_for_each(le, head) 1861 ar[i++] = le; 1862 thunk.cmp = cmp; 1863 thunk.priv = priv; 1864 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 1865 INIT_LIST_HEAD(head); 1866 for (i = 0; i < count; i++) 1867 list_add_tail(ar[i], head); 1868 free(ar, M_KMALLOC); 1869 } 1870 1871 void 1872 linux_irq_handler(void *ent) 1873 { 1874 struct irq_ent *irqe; 1875 1876 linux_set_current(curthread); 1877 1878 irqe = ent; 1879 irqe->handler(irqe->irq, irqe->arg); 1880 } 1881 1882 #if defined(__i386__) || defined(__amd64__) 1883 int 1884 linux_wbinvd_on_all_cpus(void) 1885 { 1886 1887 pmap_invalidate_cache(); 1888 return (0); 1889 } 1890 #endif 1891 1892 int 1893 linux_on_each_cpu(void callback(void *), void *data) 1894 { 1895 1896 smp_rendezvous(smp_no_rendezvous_barrier, callback, 1897 smp_no_rendezvous_barrier, data); 1898 return (0); 1899 } 1900 1901 int 1902 linux_in_atomic(void) 1903 { 1904 1905 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 1906 } 1907 1908 struct linux_cdev * 1909 linux_find_cdev(const char *name, unsigned major, unsigned minor) 1910 { 1911 int unit = MKDEV(major, minor); 1912 struct cdev *cdev; 1913 1914 dev_lock(); 1915 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 1916 struct linux_cdev *ldev = cdev->si_drv1; 1917 if (dev2unit(cdev) == unit && 1918 strcmp(kobject_name(&ldev->kobj), name) == 0) { 1919 break; 1920 } 1921 } 1922 dev_unlock(); 1923 1924 return (cdev != NULL ? cdev->si_drv1 : NULL); 1925 } 1926 1927 int 1928 __register_chrdev(unsigned int major, unsigned int baseminor, 1929 unsigned int count, const char *name, 1930 const struct file_operations *fops) 1931 { 1932 struct linux_cdev *cdev; 1933 int ret = 0; 1934 int i; 1935 1936 for (i = baseminor; i < baseminor + count; i++) { 1937 cdev = cdev_alloc(); 1938 cdev_init(cdev, fops); 1939 kobject_set_name(&cdev->kobj, name); 1940 1941 ret = cdev_add(cdev, makedev(major, i), 1); 1942 if (ret != 0) 1943 break; 1944 } 1945 return (ret); 1946 } 1947 1948 int 1949 __register_chrdev_p(unsigned int major, unsigned int baseminor, 1950 unsigned int count, const char *name, 1951 const struct file_operations *fops, uid_t uid, 1952 gid_t gid, int mode) 1953 { 1954 struct linux_cdev *cdev; 1955 int ret = 0; 1956 int i; 1957 1958 for (i = baseminor; i < baseminor + count; i++) { 1959 cdev = cdev_alloc(); 1960 cdev_init(cdev, fops); 1961 kobject_set_name(&cdev->kobj, name); 1962 1963 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 1964 if (ret != 0) 1965 break; 1966 } 1967 return (ret); 1968 } 1969 1970 void 1971 __unregister_chrdev(unsigned int major, unsigned int baseminor, 1972 unsigned int count, const char *name) 1973 { 1974 struct linux_cdev *cdevp; 1975 int i; 1976 1977 for (i = baseminor; i < baseminor + count; i++) { 1978 cdevp = linux_find_cdev(name, major, i); 1979 if (cdevp != NULL) 1980 cdev_del(cdevp); 1981 } 1982 } 1983 1984 #if defined(__i386__) || defined(__amd64__) 1985 bool linux_cpu_has_clflush; 1986 #endif 1987 1988 static void 1989 linux_compat_init(void *arg) 1990 { 1991 struct sysctl_oid *rootoid; 1992 int i; 1993 1994 #if defined(__i386__) || defined(__amd64__) 1995 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 1996 #endif 1997 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 1998 1999 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2000 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2001 kobject_init(&linux_class_root, &linux_class_ktype); 2002 kobject_set_name(&linux_class_root, "class"); 2003 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2004 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2005 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2006 kobject_set_name(&linux_root_device.kobj, "device"); 2007 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2008 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 2009 "device"); 2010 linux_root_device.bsddev = root_bus; 2011 linux_class_misc.name = "misc"; 2012 class_register(&linux_class_misc); 2013 INIT_LIST_HEAD(&pci_drivers); 2014 INIT_LIST_HEAD(&pci_devices); 2015 spin_lock_init(&pci_lock); 2016 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2017 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2018 LIST_INIT(&vmmaphead[i]); 2019 } 2020 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2021 2022 static void 2023 linux_compat_uninit(void *arg) 2024 { 2025 linux_kobject_kfree_name(&linux_class_root); 2026 linux_kobject_kfree_name(&linux_root_device.kobj); 2027 linux_kobject_kfree_name(&linux_class_misc.kobj); 2028 2029 mtx_destroy(&vmmaplock); 2030 spin_lock_destroy(&pci_lock); 2031 rw_destroy(&linux_vma_lock); 2032 } 2033 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2034 2035 /* 2036 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2037 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2038 * used. Assert these types have the same size, else some parts of the 2039 * LinuxKPI may not work like expected: 2040 */ 2041 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2042