1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/proc.h> 39 #include <sys/sglist.h> 40 #include <sys/sleepqueue.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/bus.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/filio.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pager.h> 54 55 #include <machine/stdarg.h> 56 57 #if defined(__i386__) || defined(__amd64__) 58 #include <machine/md_var.h> 59 #endif 60 61 #include <linux/kobject.h> 62 #include <linux/device.h> 63 #include <linux/slab.h> 64 #include <linux/module.h> 65 #include <linux/moduleparam.h> 66 #include <linux/cdev.h> 67 #include <linux/file.h> 68 #include <linux/sysfs.h> 69 #include <linux/mm.h> 70 #include <linux/io.h> 71 #include <linux/vmalloc.h> 72 #include <linux/netdevice.h> 73 #include <linux/timer.h> 74 #include <linux/interrupt.h> 75 #include <linux/uaccess.h> 76 #include <linux/kernel.h> 77 #include <linux/list.h> 78 #include <linux/compat.h> 79 #include <linux/poll.h> 80 #include <linux/smp.h> 81 82 #if defined(__i386__) || defined(__amd64__) 83 #include <asm/smp.h> 84 #endif 85 86 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 87 88 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 89 90 #include <linux/rbtree.h> 91 /* Undo Linux compat changes. */ 92 #undef RB_ROOT 93 #undef file 94 #undef cdev 95 #define RB_ROOT(head) (head)->rbh_root 96 97 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 98 99 struct kobject linux_class_root; 100 struct device linux_root_device; 101 struct class linux_class_misc; 102 struct list_head pci_drivers; 103 struct list_head pci_devices; 104 spinlock_t pci_lock; 105 106 unsigned long linux_timer_hz_mask; 107 108 int 109 panic_cmp(struct rb_node *one, struct rb_node *two) 110 { 111 panic("no cmp"); 112 } 113 114 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 115 116 int 117 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 118 { 119 va_list tmp_va; 120 int len; 121 char *old; 122 char *name; 123 char dummy; 124 125 old = kobj->name; 126 127 if (old && fmt == NULL) 128 return (0); 129 130 /* compute length of string */ 131 va_copy(tmp_va, args); 132 len = vsnprintf(&dummy, 0, fmt, tmp_va); 133 va_end(tmp_va); 134 135 /* account for zero termination */ 136 len++; 137 138 /* check for error */ 139 if (len < 1) 140 return (-EINVAL); 141 142 /* allocate memory for string */ 143 name = kzalloc(len, GFP_KERNEL); 144 if (name == NULL) 145 return (-ENOMEM); 146 vsnprintf(name, len, fmt, args); 147 kobj->name = name; 148 149 /* free old string */ 150 kfree(old); 151 152 /* filter new string */ 153 for (; *name != '\0'; name++) 154 if (*name == '/') 155 *name = '!'; 156 return (0); 157 } 158 159 int 160 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 161 { 162 va_list args; 163 int error; 164 165 va_start(args, fmt); 166 error = kobject_set_name_vargs(kobj, fmt, args); 167 va_end(args); 168 169 return (error); 170 } 171 172 static int 173 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 174 { 175 const struct kobj_type *t; 176 int error; 177 178 kobj->parent = parent; 179 error = sysfs_create_dir(kobj); 180 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 181 struct attribute **attr; 182 t = kobj->ktype; 183 184 for (attr = t->default_attrs; *attr != NULL; attr++) { 185 error = sysfs_create_file(kobj, *attr); 186 if (error) 187 break; 188 } 189 if (error) 190 sysfs_remove_dir(kobj); 191 192 } 193 return (error); 194 } 195 196 int 197 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 198 { 199 va_list args; 200 int error; 201 202 va_start(args, fmt); 203 error = kobject_set_name_vargs(kobj, fmt, args); 204 va_end(args); 205 if (error) 206 return (error); 207 208 return kobject_add_complete(kobj, parent); 209 } 210 211 void 212 linux_kobject_release(struct kref *kref) 213 { 214 struct kobject *kobj; 215 char *name; 216 217 kobj = container_of(kref, struct kobject, kref); 218 sysfs_remove_dir(kobj); 219 name = kobj->name; 220 if (kobj->ktype && kobj->ktype->release) 221 kobj->ktype->release(kobj); 222 kfree(name); 223 } 224 225 static void 226 linux_kobject_kfree(struct kobject *kobj) 227 { 228 kfree(kobj); 229 } 230 231 static void 232 linux_kobject_kfree_name(struct kobject *kobj) 233 { 234 if (kobj) { 235 kfree(kobj->name); 236 } 237 } 238 239 const struct kobj_type linux_kfree_type = { 240 .release = linux_kobject_kfree 241 }; 242 243 static void 244 linux_device_release(struct device *dev) 245 { 246 pr_debug("linux_device_release: %s\n", dev_name(dev)); 247 kfree(dev); 248 } 249 250 static ssize_t 251 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 252 { 253 struct class_attribute *dattr; 254 ssize_t error; 255 256 dattr = container_of(attr, struct class_attribute, attr); 257 error = -EIO; 258 if (dattr->show) 259 error = dattr->show(container_of(kobj, struct class, kobj), 260 dattr, buf); 261 return (error); 262 } 263 264 static ssize_t 265 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 266 size_t count) 267 { 268 struct class_attribute *dattr; 269 ssize_t error; 270 271 dattr = container_of(attr, struct class_attribute, attr); 272 error = -EIO; 273 if (dattr->store) 274 error = dattr->store(container_of(kobj, struct class, kobj), 275 dattr, buf, count); 276 return (error); 277 } 278 279 static void 280 linux_class_release(struct kobject *kobj) 281 { 282 struct class *class; 283 284 class = container_of(kobj, struct class, kobj); 285 if (class->class_release) 286 class->class_release(class); 287 } 288 289 static const struct sysfs_ops linux_class_sysfs = { 290 .show = linux_class_show, 291 .store = linux_class_store, 292 }; 293 294 const struct kobj_type linux_class_ktype = { 295 .release = linux_class_release, 296 .sysfs_ops = &linux_class_sysfs 297 }; 298 299 static void 300 linux_dev_release(struct kobject *kobj) 301 { 302 struct device *dev; 303 304 dev = container_of(kobj, struct device, kobj); 305 /* This is the precedence defined by linux. */ 306 if (dev->release) 307 dev->release(dev); 308 else if (dev->class && dev->class->dev_release) 309 dev->class->dev_release(dev); 310 } 311 312 static ssize_t 313 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 314 { 315 struct device_attribute *dattr; 316 ssize_t error; 317 318 dattr = container_of(attr, struct device_attribute, attr); 319 error = -EIO; 320 if (dattr->show) 321 error = dattr->show(container_of(kobj, struct device, kobj), 322 dattr, buf); 323 return (error); 324 } 325 326 static ssize_t 327 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 328 size_t count) 329 { 330 struct device_attribute *dattr; 331 ssize_t error; 332 333 dattr = container_of(attr, struct device_attribute, attr); 334 error = -EIO; 335 if (dattr->store) 336 error = dattr->store(container_of(kobj, struct device, kobj), 337 dattr, buf, count); 338 return (error); 339 } 340 341 static const struct sysfs_ops linux_dev_sysfs = { 342 .show = linux_dev_show, 343 .store = linux_dev_store, 344 }; 345 346 const struct kobj_type linux_dev_ktype = { 347 .release = linux_dev_release, 348 .sysfs_ops = &linux_dev_sysfs 349 }; 350 351 struct device * 352 device_create(struct class *class, struct device *parent, dev_t devt, 353 void *drvdata, const char *fmt, ...) 354 { 355 struct device *dev; 356 va_list args; 357 358 dev = kzalloc(sizeof(*dev), M_WAITOK); 359 dev->parent = parent; 360 dev->class = class; 361 dev->devt = devt; 362 dev->driver_data = drvdata; 363 dev->release = linux_device_release; 364 va_start(args, fmt); 365 kobject_set_name_vargs(&dev->kobj, fmt, args); 366 va_end(args); 367 device_register(dev); 368 369 return (dev); 370 } 371 372 int 373 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 374 struct kobject *parent, const char *fmt, ...) 375 { 376 va_list args; 377 int error; 378 379 kobject_init(kobj, ktype); 380 kobj->ktype = ktype; 381 kobj->parent = parent; 382 kobj->name = NULL; 383 384 va_start(args, fmt); 385 error = kobject_set_name_vargs(kobj, fmt, args); 386 va_end(args); 387 if (error) 388 return (error); 389 return kobject_add_complete(kobj, parent); 390 } 391 392 static void 393 linux_file_dtor(void *cdp) 394 { 395 struct linux_file *filp; 396 397 linux_set_current(curthread); 398 filp = cdp; 399 filp->f_op->release(filp->f_vnode, filp); 400 vdrop(filp->f_vnode); 401 kfree(filp); 402 } 403 404 static int 405 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 406 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 407 { 408 struct vm_area_struct *vmap; 409 struct vm_fault vmf; 410 int err; 411 412 linux_set_current(curthread); 413 414 /* get VM area structure */ 415 vmap = linux_cdev_handle_find(vm_obj->handle); 416 MPASS(vmap != NULL); 417 MPASS(vmap->vm_private_data == vm_obj->handle); 418 419 /* fill out VM fault structure */ 420 vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); 421 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 422 vmf.pgoff = 0; 423 vmf.page = NULL; 424 425 VM_OBJECT_WUNLOCK(vm_obj); 426 427 down_write(&vmap->vm_mm->mmap_sem); 428 if (unlikely(vmap->vm_ops == NULL)) { 429 err = VM_FAULT_SIGBUS; 430 } else { 431 vmap->vm_pfn_count = 0; 432 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 433 vmap->vm_obj = vm_obj; 434 435 err = vmap->vm_ops->fault(vmap, &vmf); 436 437 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 438 kern_yield(0); 439 err = vmap->vm_ops->fault(vmap, &vmf); 440 } 441 } 442 443 /* translate return code */ 444 switch (err) { 445 case VM_FAULT_OOM: 446 err = VM_PAGER_AGAIN; 447 break; 448 case VM_FAULT_SIGBUS: 449 err = VM_PAGER_BAD; 450 break; 451 case VM_FAULT_NOPAGE: 452 /* 453 * By contract the fault handler will return having 454 * busied all the pages itself. If pidx is already 455 * found in the object, it will simply xbusy the first 456 * page and return with vm_pfn_count set to 1. 457 */ 458 *first = vmap->vm_pfn_first; 459 *last = *first + vmap->vm_pfn_count - 1; 460 err = VM_PAGER_OK; 461 break; 462 default: 463 err = VM_PAGER_ERROR; 464 break; 465 } 466 up_write(&vmap->vm_mm->mmap_sem); 467 VM_OBJECT_WLOCK(vm_obj); 468 return (err); 469 } 470 471 static struct rwlock linux_vma_lock; 472 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 473 TAILQ_HEAD_INITIALIZER(linux_vma_head); 474 475 static struct vm_area_struct * 476 linux_cdev_handle_insert(void *handle, struct vm_area_struct *vmap) 477 { 478 struct vm_area_struct *ptr; 479 480 rw_wlock(&linux_vma_lock); 481 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 482 if (ptr->vm_private_data == handle) { 483 rw_wunlock(&linux_vma_lock); 484 kfree(vmap); 485 return (NULL); 486 } 487 } 488 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 489 rw_wunlock(&linux_vma_lock); 490 return (vmap); 491 } 492 493 static void 494 linux_cdev_handle_remove(struct vm_area_struct *vmap) 495 { 496 if (vmap == NULL) 497 return; 498 499 rw_wlock(&linux_vma_lock); 500 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 501 rw_wunlock(&linux_vma_lock); 502 kfree(vmap); 503 } 504 505 static struct vm_area_struct * 506 linux_cdev_handle_find(void *handle) 507 { 508 struct vm_area_struct *vmap; 509 510 rw_rlock(&linux_vma_lock); 511 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 512 if (vmap->vm_private_data == handle) 513 break; 514 } 515 rw_runlock(&linux_vma_lock); 516 return (vmap); 517 } 518 519 static int 520 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 521 vm_ooffset_t foff, struct ucred *cred, u_short *color) 522 { 523 const struct vm_operations_struct *vm_ops; 524 struct vm_area_struct *vmap; 525 526 vmap = linux_cdev_handle_find(handle); 527 MPASS(vmap != NULL); 528 529 *color = 0; 530 531 down_write(&vmap->vm_mm->mmap_sem); 532 vm_ops = vmap->vm_ops; 533 if (likely(vm_ops != NULL)) 534 vm_ops->open(vmap); 535 up_write(&vmap->vm_mm->mmap_sem); 536 537 return (0); 538 } 539 540 static void 541 linux_cdev_pager_dtor(void *handle) 542 { 543 const struct vm_operations_struct *vm_ops; 544 struct vm_area_struct *vmap; 545 546 vmap = linux_cdev_handle_find(handle); 547 MPASS(vmap != NULL); 548 549 down_write(&vmap->vm_mm->mmap_sem); 550 vm_ops = vmap->vm_ops; 551 if (likely(vm_ops != NULL)) 552 vm_ops->close(vmap); 553 up_write(&vmap->vm_mm->mmap_sem); 554 555 linux_cdev_handle_remove(vmap); 556 } 557 558 static struct cdev_pager_ops linux_cdev_pager_ops = { 559 .cdev_pg_populate = linux_cdev_pager_populate, 560 .cdev_pg_ctor = linux_cdev_pager_ctor, 561 .cdev_pg_dtor = linux_cdev_pager_dtor 562 }; 563 564 static int 565 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 566 { 567 struct linux_cdev *ldev; 568 struct linux_file *filp; 569 struct file *file; 570 int error; 571 572 file = td->td_fpop; 573 ldev = dev->si_drv1; 574 if (ldev == NULL) 575 return (ENODEV); 576 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 577 filp->f_dentry = &filp->f_dentry_store; 578 filp->f_op = ldev->ops; 579 filp->f_flags = file->f_flag; 580 vhold(file->f_vnode); 581 filp->f_vnode = file->f_vnode; 582 linux_set_current(td); 583 if (filp->f_op->open) { 584 error = -filp->f_op->open(file->f_vnode, filp); 585 if (error) { 586 kfree(filp); 587 goto done; 588 } 589 } 590 error = devfs_set_cdevpriv(filp, linux_file_dtor); 591 if (error) { 592 filp->f_op->release(file->f_vnode, filp); 593 kfree(filp); 594 } 595 done: 596 return (error); 597 } 598 599 static int 600 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 601 { 602 struct linux_file *filp; 603 struct file *file; 604 int error; 605 606 file = td->td_fpop; 607 if (dev->si_drv1 == NULL) 608 return (0); 609 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 610 return (error); 611 filp->f_flags = file->f_flag; 612 devfs_clear_cdevpriv(); 613 614 615 return (0); 616 } 617 618 #define LINUX_IOCTL_MIN_PTR 0x10000UL 619 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 620 621 static inline int 622 linux_remap_address(void **uaddr, size_t len) 623 { 624 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 625 626 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 627 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 628 struct task_struct *pts = current; 629 if (pts == NULL) { 630 *uaddr = NULL; 631 return (1); 632 } 633 634 /* compute data offset */ 635 uaddr_val -= LINUX_IOCTL_MIN_PTR; 636 637 /* check that length is within bounds */ 638 if ((len > IOCPARM_MAX) || 639 (uaddr_val + len) > pts->bsd_ioctl_len) { 640 *uaddr = NULL; 641 return (1); 642 } 643 644 /* re-add kernel buffer address */ 645 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 646 647 /* update address location */ 648 *uaddr = (void *)uaddr_val; 649 return (1); 650 } 651 return (0); 652 } 653 654 int 655 linux_copyin(const void *uaddr, void *kaddr, size_t len) 656 { 657 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 658 if (uaddr == NULL) 659 return (-EFAULT); 660 memcpy(kaddr, uaddr, len); 661 return (0); 662 } 663 return (-copyin(uaddr, kaddr, len)); 664 } 665 666 int 667 linux_copyout(const void *kaddr, void *uaddr, size_t len) 668 { 669 if (linux_remap_address(&uaddr, len)) { 670 if (uaddr == NULL) 671 return (-EFAULT); 672 memcpy(uaddr, kaddr, len); 673 return (0); 674 } 675 return (-copyout(kaddr, uaddr, len)); 676 } 677 678 size_t 679 linux_clear_user(void *_uaddr, size_t _len) 680 { 681 uint8_t *uaddr = _uaddr; 682 size_t len = _len; 683 684 /* make sure uaddr is aligned before going into the fast loop */ 685 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 686 if (subyte(uaddr, 0)) 687 return (_len); 688 uaddr++; 689 len--; 690 } 691 692 /* zero 8 bytes at a time */ 693 while (len > 7) { 694 #ifdef __LP64__ 695 if (suword64(uaddr, 0)) 696 return (_len); 697 #else 698 if (suword32(uaddr, 0)) 699 return (_len); 700 if (suword32(uaddr + 4, 0)) 701 return (_len); 702 #endif 703 uaddr += 8; 704 len -= 8; 705 } 706 707 /* zero fill end, if any */ 708 while (len > 0) { 709 if (subyte(uaddr, 0)) 710 return (_len); 711 uaddr++; 712 len--; 713 } 714 return (0); 715 } 716 717 int 718 linux_access_ok(int rw, const void *uaddr, size_t len) 719 { 720 uintptr_t saddr; 721 uintptr_t eaddr; 722 723 /* get start and end address */ 724 saddr = (uintptr_t)uaddr; 725 eaddr = (uintptr_t)uaddr + len; 726 727 /* verify addresses are valid for userspace */ 728 return ((saddr == eaddr) || 729 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 730 } 731 732 static int 733 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 734 struct thread *td) 735 { 736 struct linux_file *filp; 737 struct file *file; 738 unsigned size; 739 int error; 740 741 file = td->td_fpop; 742 if (dev->si_drv1 == NULL) 743 return (ENXIO); 744 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 745 return (error); 746 filp->f_flags = file->f_flag; 747 748 linux_set_current(td); 749 size = IOCPARM_LEN(cmd); 750 /* refer to logic in sys_ioctl() */ 751 if (size > 0) { 752 /* 753 * Setup hint for linux_copyin() and linux_copyout(). 754 * 755 * Background: Linux code expects a user-space address 756 * while FreeBSD supplies a kernel-space address. 757 */ 758 current->bsd_ioctl_data = data; 759 current->bsd_ioctl_len = size; 760 data = (void *)LINUX_IOCTL_MIN_PTR; 761 } else { 762 /* fetch user-space pointer */ 763 data = *(void **)data; 764 } 765 if (filp->f_op->unlocked_ioctl) 766 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); 767 else 768 error = ENOTTY; 769 if (size > 0) { 770 current->bsd_ioctl_data = NULL; 771 current->bsd_ioctl_len = 0; 772 } 773 774 return (error); 775 } 776 777 static int 778 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag) 779 { 780 struct linux_file *filp; 781 struct thread *td; 782 struct file *file; 783 ssize_t bytes; 784 int error; 785 786 td = curthread; 787 file = td->td_fpop; 788 if (dev->si_drv1 == NULL) 789 return (ENXIO); 790 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 791 return (error); 792 filp->f_flags = file->f_flag; 793 /* XXX no support for I/O vectors currently */ 794 if (uio->uio_iovcnt != 1) 795 return (EOPNOTSUPP); 796 linux_set_current(td); 797 if (filp->f_op->read) { 798 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 799 uio->uio_iov->iov_len, &uio->uio_offset); 800 if (bytes >= 0) { 801 uio->uio_iov->iov_base = 802 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 803 uio->uio_iov->iov_len -= bytes; 804 uio->uio_resid -= bytes; 805 } else 806 error = -bytes; 807 } else 808 error = ENXIO; 809 810 return (error); 811 } 812 813 static int 814 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag) 815 { 816 struct linux_file *filp; 817 struct thread *td; 818 struct file *file; 819 ssize_t bytes; 820 int error; 821 822 td = curthread; 823 file = td->td_fpop; 824 if (dev->si_drv1 == NULL) 825 return (ENXIO); 826 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 827 return (error); 828 filp->f_flags = file->f_flag; 829 /* XXX no support for I/O vectors currently */ 830 if (uio->uio_iovcnt != 1) 831 return (EOPNOTSUPP); 832 linux_set_current(td); 833 if (filp->f_op->write) { 834 bytes = filp->f_op->write(filp, uio->uio_iov->iov_base, 835 uio->uio_iov->iov_len, &uio->uio_offset); 836 if (bytes >= 0) { 837 uio->uio_iov->iov_base = 838 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 839 uio->uio_iov->iov_len -= bytes; 840 uio->uio_resid -= bytes; 841 } else 842 error = -bytes; 843 } else 844 error = ENXIO; 845 846 return (error); 847 } 848 849 static int 850 linux_dev_poll(struct cdev *dev, int events, struct thread *td) 851 { 852 struct linux_file *filp; 853 struct file *file; 854 int revents; 855 856 if (dev->si_drv1 == NULL) 857 goto error; 858 if (devfs_get_cdevpriv((void **)&filp) != 0) 859 goto error; 860 861 file = td->td_fpop; 862 filp->f_flags = file->f_flag; 863 linux_set_current(td); 864 if (filp->f_op->poll) 865 revents = filp->f_op->poll(filp, NULL) & events; 866 else 867 revents = 0; 868 869 return (revents); 870 error: 871 return (events & (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 872 } 873 874 static int 875 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset, 876 vm_size_t size, struct vm_object **object, int nprot) 877 { 878 struct vm_area_struct *vmap; 879 struct linux_file *filp; 880 struct thread *td; 881 struct file *file; 882 vm_memattr_t attr; 883 int error; 884 885 td = curthread; 886 file = td->td_fpop; 887 if (dev->si_drv1 == NULL) 888 return (ENODEV); 889 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 890 return (error); 891 filp->f_flags = file->f_flag; 892 893 if (filp->f_op->mmap == NULL) 894 return (ENODEV); 895 896 linux_set_current(td); 897 898 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 899 vmap->vm_start = 0; 900 vmap->vm_end = size; 901 vmap->vm_pgoff = *offset / PAGE_SIZE; 902 vmap->vm_pfn = 0; 903 vmap->vm_flags = vmap->vm_page_prot = nprot; 904 vmap->vm_ops = NULL; 905 vmap->vm_file = filp; 906 vmap->vm_mm = current->mm; 907 908 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 909 error = EINTR; 910 } else { 911 error = -filp->f_op->mmap(filp, vmap); 912 up_write(&vmap->vm_mm->mmap_sem); 913 } 914 915 if (error != 0) { 916 kfree(vmap); 917 return (error); 918 } 919 920 attr = pgprot2cachemode(vmap->vm_page_prot); 921 922 if (vmap->vm_ops != NULL) { 923 void *vm_private_data; 924 925 if (vmap->vm_ops->fault == NULL || 926 vmap->vm_ops->open == NULL || 927 vmap->vm_ops->close == NULL || 928 vmap->vm_private_data == NULL) { 929 kfree(vmap); 930 return (EINVAL); 931 } 932 933 vm_private_data = vmap->vm_private_data; 934 935 vmap = linux_cdev_handle_insert(vm_private_data, vmap); 936 937 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 938 &linux_cdev_pager_ops, size, nprot, *offset, curthread->td_ucred); 939 940 if (*object == NULL) { 941 linux_cdev_handle_remove(vmap); 942 return (EINVAL); 943 } 944 } else { 945 struct sglist *sg; 946 947 sg = sglist_alloc(1, M_WAITOK); 948 sglist_append_phys(sg, (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 949 950 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 951 nprot, 0, curthread->td_ucred); 952 953 kfree(vmap); 954 955 if (*object == NULL) { 956 sglist_free(sg); 957 return (EINVAL); 958 } 959 } 960 961 if (attr != VM_MEMATTR_DEFAULT) { 962 VM_OBJECT_WLOCK(*object); 963 vm_object_set_memattr(*object, attr); 964 VM_OBJECT_WUNLOCK(*object); 965 } 966 *offset = 0; 967 return (0); 968 } 969 970 struct cdevsw linuxcdevsw = { 971 .d_version = D_VERSION, 972 .d_flags = D_TRACKCLOSE, 973 .d_open = linux_dev_open, 974 .d_close = linux_dev_close, 975 .d_read = linux_dev_read, 976 .d_write = linux_dev_write, 977 .d_ioctl = linux_dev_ioctl, 978 .d_mmap_single = linux_dev_mmap_single, 979 .d_poll = linux_dev_poll, 980 }; 981 982 static int 983 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 984 int flags, struct thread *td) 985 { 986 struct linux_file *filp; 987 ssize_t bytes; 988 int error; 989 990 error = 0; 991 filp = (struct linux_file *)file->f_data; 992 filp->f_flags = file->f_flag; 993 /* XXX no support for I/O vectors currently */ 994 if (uio->uio_iovcnt != 1) 995 return (EOPNOTSUPP); 996 linux_set_current(td); 997 if (filp->f_op->read) { 998 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 999 uio->uio_iov->iov_len, &uio->uio_offset); 1000 if (bytes >= 0) { 1001 uio->uio_iov->iov_base = 1002 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1003 uio->uio_iov->iov_len -= bytes; 1004 uio->uio_resid -= bytes; 1005 } else 1006 error = -bytes; 1007 } else 1008 error = ENXIO; 1009 1010 return (error); 1011 } 1012 1013 static int 1014 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1015 struct thread *td) 1016 { 1017 struct linux_file *filp; 1018 int revents; 1019 1020 filp = (struct linux_file *)file->f_data; 1021 filp->f_flags = file->f_flag; 1022 linux_set_current(td); 1023 if (filp->f_op->poll) 1024 revents = filp->f_op->poll(filp, NULL) & events; 1025 else 1026 revents = 0; 1027 1028 return (revents); 1029 } 1030 1031 static int 1032 linux_file_close(struct file *file, struct thread *td) 1033 { 1034 struct linux_file *filp; 1035 int error; 1036 1037 filp = (struct linux_file *)file->f_data; 1038 filp->f_flags = file->f_flag; 1039 linux_set_current(td); 1040 error = -filp->f_op->release(NULL, filp); 1041 funsetown(&filp->f_sigio); 1042 kfree(filp); 1043 1044 return (error); 1045 } 1046 1047 static int 1048 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1049 struct thread *td) 1050 { 1051 struct linux_file *filp; 1052 int error; 1053 1054 filp = (struct linux_file *)fp->f_data; 1055 filp->f_flags = fp->f_flag; 1056 error = 0; 1057 1058 linux_set_current(td); 1059 switch (cmd) { 1060 case FIONBIO: 1061 break; 1062 case FIOASYNC: 1063 if (filp->f_op->fasync == NULL) 1064 break; 1065 error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); 1066 break; 1067 case FIOSETOWN: 1068 error = fsetown(*(int *)data, &filp->f_sigio); 1069 if (error == 0) 1070 error = filp->f_op->fasync(0, filp, 1071 fp->f_flag & FASYNC); 1072 break; 1073 case FIOGETOWN: 1074 *(int *)data = fgetown(&filp->f_sigio); 1075 break; 1076 default: 1077 error = ENOTTY; 1078 break; 1079 } 1080 return (error); 1081 } 1082 1083 static int 1084 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1085 struct thread *td) 1086 { 1087 1088 return (EOPNOTSUPP); 1089 } 1090 1091 static int 1092 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1093 struct filedesc *fdp) 1094 { 1095 1096 return (0); 1097 } 1098 1099 struct fileops linuxfileops = { 1100 .fo_read = linux_file_read, 1101 .fo_write = invfo_rdwr, 1102 .fo_truncate = invfo_truncate, 1103 .fo_kqfilter = invfo_kqfilter, 1104 .fo_stat = linux_file_stat, 1105 .fo_fill_kinfo = linux_file_fill_kinfo, 1106 .fo_poll = linux_file_poll, 1107 .fo_close = linux_file_close, 1108 .fo_ioctl = linux_file_ioctl, 1109 .fo_chmod = invfo_chmod, 1110 .fo_chown = invfo_chown, 1111 .fo_sendfile = invfo_sendfile, 1112 }; 1113 1114 /* 1115 * Hash of vmmap addresses. This is infrequently accessed and does not 1116 * need to be particularly large. This is done because we must store the 1117 * caller's idea of the map size to properly unmap. 1118 */ 1119 struct vmmap { 1120 LIST_ENTRY(vmmap) vm_next; 1121 void *vm_addr; 1122 unsigned long vm_size; 1123 }; 1124 1125 struct vmmaphd { 1126 struct vmmap *lh_first; 1127 }; 1128 #define VMMAP_HASH_SIZE 64 1129 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1130 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1131 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1132 static struct mtx vmmaplock; 1133 1134 static void 1135 vmmap_add(void *addr, unsigned long size) 1136 { 1137 struct vmmap *vmmap; 1138 1139 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1140 mtx_lock(&vmmaplock); 1141 vmmap->vm_size = size; 1142 vmmap->vm_addr = addr; 1143 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1144 mtx_unlock(&vmmaplock); 1145 } 1146 1147 static struct vmmap * 1148 vmmap_remove(void *addr) 1149 { 1150 struct vmmap *vmmap; 1151 1152 mtx_lock(&vmmaplock); 1153 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1154 if (vmmap->vm_addr == addr) 1155 break; 1156 if (vmmap) 1157 LIST_REMOVE(vmmap, vm_next); 1158 mtx_unlock(&vmmaplock); 1159 1160 return (vmmap); 1161 } 1162 1163 #if defined(__i386__) || defined(__amd64__) 1164 void * 1165 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1166 { 1167 void *addr; 1168 1169 addr = pmap_mapdev_attr(phys_addr, size, attr); 1170 if (addr == NULL) 1171 return (NULL); 1172 vmmap_add(addr, size); 1173 1174 return (addr); 1175 } 1176 #endif 1177 1178 void 1179 iounmap(void *addr) 1180 { 1181 struct vmmap *vmmap; 1182 1183 vmmap = vmmap_remove(addr); 1184 if (vmmap == NULL) 1185 return; 1186 #if defined(__i386__) || defined(__amd64__) 1187 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1188 #endif 1189 kfree(vmmap); 1190 } 1191 1192 1193 void * 1194 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1195 { 1196 vm_offset_t off; 1197 size_t size; 1198 1199 size = count * PAGE_SIZE; 1200 off = kva_alloc(size); 1201 if (off == 0) 1202 return (NULL); 1203 vmmap_add((void *)off, size); 1204 pmap_qenter(off, pages, count); 1205 1206 return ((void *)off); 1207 } 1208 1209 void 1210 vunmap(void *addr) 1211 { 1212 struct vmmap *vmmap; 1213 1214 vmmap = vmmap_remove(addr); 1215 if (vmmap == NULL) 1216 return; 1217 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1218 kva_free((vm_offset_t)addr, vmmap->vm_size); 1219 kfree(vmmap); 1220 } 1221 1222 char * 1223 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1224 { 1225 unsigned int len; 1226 char *p; 1227 va_list aq; 1228 1229 va_copy(aq, ap); 1230 len = vsnprintf(NULL, 0, fmt, aq); 1231 va_end(aq); 1232 1233 p = kmalloc(len + 1, gfp); 1234 if (p != NULL) 1235 vsnprintf(p, len + 1, fmt, ap); 1236 1237 return (p); 1238 } 1239 1240 char * 1241 kasprintf(gfp_t gfp, const char *fmt, ...) 1242 { 1243 va_list ap; 1244 char *p; 1245 1246 va_start(ap, fmt); 1247 p = kvasprintf(gfp, fmt, ap); 1248 va_end(ap); 1249 1250 return (p); 1251 } 1252 1253 static void 1254 linux_timer_callback_wrapper(void *context) 1255 { 1256 struct timer_list *timer; 1257 1258 linux_set_current(curthread); 1259 1260 timer = context; 1261 timer->function(timer->data); 1262 } 1263 1264 void 1265 mod_timer(struct timer_list *timer, unsigned long expires) 1266 { 1267 1268 timer->expires = expires; 1269 callout_reset(&timer->timer_callout, 1270 linux_timer_jiffies_until(expires), 1271 &linux_timer_callback_wrapper, timer); 1272 } 1273 1274 void 1275 add_timer(struct timer_list *timer) 1276 { 1277 1278 callout_reset(&timer->timer_callout, 1279 linux_timer_jiffies_until(timer->expires), 1280 &linux_timer_callback_wrapper, timer); 1281 } 1282 1283 void 1284 add_timer_on(struct timer_list *timer, int cpu) 1285 { 1286 1287 callout_reset_on(&timer->timer_callout, 1288 linux_timer_jiffies_until(timer->expires), 1289 &linux_timer_callback_wrapper, timer, cpu); 1290 } 1291 1292 static void 1293 linux_timer_init(void *arg) 1294 { 1295 1296 /* 1297 * Compute an internal HZ value which can divide 2**32 to 1298 * avoid timer rounding problems when the tick value wraps 1299 * around 2**32: 1300 */ 1301 linux_timer_hz_mask = 1; 1302 while (linux_timer_hz_mask < (unsigned long)hz) 1303 linux_timer_hz_mask *= 2; 1304 linux_timer_hz_mask--; 1305 } 1306 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1307 1308 void 1309 linux_complete_common(struct completion *c, int all) 1310 { 1311 int wakeup_swapper; 1312 1313 sleepq_lock(c); 1314 c->done++; 1315 if (all) 1316 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1317 else 1318 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1319 sleepq_release(c); 1320 if (wakeup_swapper) 1321 kick_proc0(); 1322 } 1323 1324 /* 1325 * Indefinite wait for done != 0 with or without signals. 1326 */ 1327 long 1328 linux_wait_for_common(struct completion *c, int flags) 1329 { 1330 long error; 1331 1332 if (SCHEDULER_STOPPED()) 1333 return (0); 1334 1335 DROP_GIANT(); 1336 1337 if (flags != 0) 1338 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1339 else 1340 flags = SLEEPQ_SLEEP; 1341 error = 0; 1342 for (;;) { 1343 sleepq_lock(c); 1344 if (c->done) 1345 break; 1346 sleepq_add(c, NULL, "completion", flags, 0); 1347 if (flags & SLEEPQ_INTERRUPTIBLE) { 1348 if (sleepq_wait_sig(c, 0) != 0) { 1349 error = -ERESTARTSYS; 1350 goto intr; 1351 } 1352 } else 1353 sleepq_wait(c, 0); 1354 } 1355 c->done--; 1356 sleepq_release(c); 1357 1358 intr: 1359 PICKUP_GIANT(); 1360 1361 return (error); 1362 } 1363 1364 /* 1365 * Time limited wait for done != 0 with or without signals. 1366 */ 1367 long 1368 linux_wait_for_timeout_common(struct completion *c, long timeout, int flags) 1369 { 1370 long end = jiffies + timeout, error; 1371 int ret; 1372 1373 if (SCHEDULER_STOPPED()) 1374 return (0); 1375 1376 DROP_GIANT(); 1377 1378 if (flags != 0) 1379 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1380 else 1381 flags = SLEEPQ_SLEEP; 1382 1383 error = 0; 1384 ret = 0; 1385 for (;;) { 1386 sleepq_lock(c); 1387 if (c->done) 1388 break; 1389 sleepq_add(c, NULL, "completion", flags, 0); 1390 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 1391 if (flags & SLEEPQ_INTERRUPTIBLE) 1392 ret = sleepq_timedwait_sig(c, 0); 1393 else 1394 ret = sleepq_timedwait(c, 0); 1395 if (ret != 0) { 1396 /* check for timeout or signal */ 1397 if (ret == EWOULDBLOCK) 1398 error = 0; 1399 else 1400 error = -ERESTARTSYS; 1401 goto intr; 1402 } 1403 } 1404 c->done--; 1405 sleepq_release(c); 1406 1407 intr: 1408 PICKUP_GIANT(); 1409 1410 /* return how many jiffies are left */ 1411 return (ret != 0 ? error : linux_timer_jiffies_until(end)); 1412 } 1413 1414 int 1415 linux_try_wait_for_completion(struct completion *c) 1416 { 1417 int isdone; 1418 1419 isdone = 1; 1420 sleepq_lock(c); 1421 if (c->done) 1422 c->done--; 1423 else 1424 isdone = 0; 1425 sleepq_release(c); 1426 return (isdone); 1427 } 1428 1429 int 1430 linux_completion_done(struct completion *c) 1431 { 1432 int isdone; 1433 1434 isdone = 1; 1435 sleepq_lock(c); 1436 if (c->done == 0) 1437 isdone = 0; 1438 sleepq_release(c); 1439 return (isdone); 1440 } 1441 1442 static void 1443 linux_cdev_release(struct kobject *kobj) 1444 { 1445 struct linux_cdev *cdev; 1446 struct kobject *parent; 1447 1448 cdev = container_of(kobj, struct linux_cdev, kobj); 1449 parent = kobj->parent; 1450 if (cdev->cdev) 1451 destroy_dev(cdev->cdev); 1452 kfree(cdev); 1453 kobject_put(parent); 1454 } 1455 1456 static void 1457 linux_cdev_static_release(struct kobject *kobj) 1458 { 1459 struct linux_cdev *cdev; 1460 struct kobject *parent; 1461 1462 cdev = container_of(kobj, struct linux_cdev, kobj); 1463 parent = kobj->parent; 1464 if (cdev->cdev) 1465 destroy_dev(cdev->cdev); 1466 kobject_put(parent); 1467 } 1468 1469 const struct kobj_type linux_cdev_ktype = { 1470 .release = linux_cdev_release, 1471 }; 1472 1473 const struct kobj_type linux_cdev_static_ktype = { 1474 .release = linux_cdev_static_release, 1475 }; 1476 1477 static void 1478 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 1479 { 1480 struct notifier_block *nb; 1481 1482 nb = arg; 1483 if (linkstate == LINK_STATE_UP) 1484 nb->notifier_call(nb, NETDEV_UP, ifp); 1485 else 1486 nb->notifier_call(nb, NETDEV_DOWN, ifp); 1487 } 1488 1489 static void 1490 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 1491 { 1492 struct notifier_block *nb; 1493 1494 nb = arg; 1495 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 1496 } 1497 1498 static void 1499 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 1500 { 1501 struct notifier_block *nb; 1502 1503 nb = arg; 1504 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 1505 } 1506 1507 static void 1508 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 1509 { 1510 struct notifier_block *nb; 1511 1512 nb = arg; 1513 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 1514 } 1515 1516 static void 1517 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 1518 { 1519 struct notifier_block *nb; 1520 1521 nb = arg; 1522 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 1523 } 1524 1525 int 1526 register_netdevice_notifier(struct notifier_block *nb) 1527 { 1528 1529 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 1530 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 1531 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 1532 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 1533 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 1534 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 1535 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 1536 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 1537 1538 return (0); 1539 } 1540 1541 int 1542 register_inetaddr_notifier(struct notifier_block *nb) 1543 { 1544 1545 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 1546 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 1547 return (0); 1548 } 1549 1550 int 1551 unregister_netdevice_notifier(struct notifier_block *nb) 1552 { 1553 1554 EVENTHANDLER_DEREGISTER(ifnet_link_event, 1555 nb->tags[NETDEV_UP]); 1556 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 1557 nb->tags[NETDEV_REGISTER]); 1558 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 1559 nb->tags[NETDEV_UNREGISTER]); 1560 EVENTHANDLER_DEREGISTER(iflladdr_event, 1561 nb->tags[NETDEV_CHANGEADDR]); 1562 1563 return (0); 1564 } 1565 1566 int 1567 unregister_inetaddr_notifier(struct notifier_block *nb) 1568 { 1569 1570 EVENTHANDLER_DEREGISTER(ifaddr_event, 1571 nb->tags[NETDEV_CHANGEIFADDR]); 1572 1573 return (0); 1574 } 1575 1576 struct list_sort_thunk { 1577 int (*cmp)(void *, struct list_head *, struct list_head *); 1578 void *priv; 1579 }; 1580 1581 static inline int 1582 linux_le_cmp(void *priv, const void *d1, const void *d2) 1583 { 1584 struct list_head *le1, *le2; 1585 struct list_sort_thunk *thunk; 1586 1587 thunk = priv; 1588 le1 = *(__DECONST(struct list_head **, d1)); 1589 le2 = *(__DECONST(struct list_head **, d2)); 1590 return ((thunk->cmp)(thunk->priv, le1, le2)); 1591 } 1592 1593 void 1594 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 1595 struct list_head *a, struct list_head *b)) 1596 { 1597 struct list_sort_thunk thunk; 1598 struct list_head **ar, *le; 1599 size_t count, i; 1600 1601 count = 0; 1602 list_for_each(le, head) 1603 count++; 1604 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 1605 i = 0; 1606 list_for_each(le, head) 1607 ar[i++] = le; 1608 thunk.cmp = cmp; 1609 thunk.priv = priv; 1610 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 1611 INIT_LIST_HEAD(head); 1612 for (i = 0; i < count; i++) 1613 list_add_tail(ar[i], head); 1614 free(ar, M_KMALLOC); 1615 } 1616 1617 void 1618 linux_irq_handler(void *ent) 1619 { 1620 struct irq_ent *irqe; 1621 1622 linux_set_current(curthread); 1623 1624 irqe = ent; 1625 irqe->handler(irqe->irq, irqe->arg); 1626 } 1627 1628 #if defined(__i386__) || defined(__amd64__) 1629 static void 1630 wbinvd_cb(void *arg __unused) 1631 { 1632 1633 wbinvd(); 1634 } 1635 1636 int 1637 linux_wbinvd_on_all_cpus(void) 1638 { 1639 1640 return (linux_on_each_cpu(wbinvd_cb, NULL)); 1641 } 1642 #endif 1643 1644 int 1645 linux_on_each_cpu(void callback(void *), void *data) 1646 { 1647 1648 smp_rendezvous(smp_no_rendezvous_barrier, callback, 1649 smp_no_rendezvous_barrier, data); 1650 return (0); 1651 } 1652 1653 struct linux_cdev * 1654 linux_find_cdev(const char *name, unsigned major, unsigned minor) 1655 { 1656 int unit = MKDEV(major, minor); 1657 struct cdev *cdev; 1658 1659 dev_lock(); 1660 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 1661 struct linux_cdev *ldev = cdev->si_drv1; 1662 if (dev2unit(cdev) == unit && 1663 strcmp(kobject_name(&ldev->kobj), name) == 0) { 1664 break; 1665 } 1666 } 1667 dev_unlock(); 1668 1669 return (cdev != NULL ? cdev->si_drv1 : NULL); 1670 } 1671 1672 int 1673 __register_chrdev(unsigned int major, unsigned int baseminor, 1674 unsigned int count, const char *name, 1675 const struct file_operations *fops) 1676 { 1677 struct linux_cdev *cdev; 1678 int ret = 0; 1679 int i; 1680 1681 for (i = baseminor; i < baseminor + count; i++) { 1682 cdev = cdev_alloc(); 1683 cdev_init(cdev, fops); 1684 kobject_set_name(&cdev->kobj, name); 1685 1686 ret = cdev_add(cdev, makedev(major, i), 1); 1687 if (ret != 0) 1688 break; 1689 } 1690 return (ret); 1691 } 1692 1693 int 1694 __register_chrdev_p(unsigned int major, unsigned int baseminor, 1695 unsigned int count, const char *name, 1696 const struct file_operations *fops, uid_t uid, 1697 gid_t gid, int mode) 1698 { 1699 struct linux_cdev *cdev; 1700 int ret = 0; 1701 int i; 1702 1703 for (i = baseminor; i < baseminor + count; i++) { 1704 cdev = cdev_alloc(); 1705 cdev_init(cdev, fops); 1706 kobject_set_name(&cdev->kobj, name); 1707 1708 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 1709 if (ret != 0) 1710 break; 1711 } 1712 return (ret); 1713 } 1714 1715 void 1716 __unregister_chrdev(unsigned int major, unsigned int baseminor, 1717 unsigned int count, const char *name) 1718 { 1719 struct linux_cdev *cdevp; 1720 int i; 1721 1722 for (i = baseminor; i < baseminor + count; i++) { 1723 cdevp = linux_find_cdev(name, major, i); 1724 if (cdevp != NULL) 1725 cdev_del(cdevp); 1726 } 1727 } 1728 1729 #if defined(__i386__) || defined(__amd64__) 1730 bool linux_cpu_has_clflush; 1731 #endif 1732 1733 static void 1734 linux_compat_init(void *arg) 1735 { 1736 struct sysctl_oid *rootoid; 1737 int i; 1738 1739 #if defined(__i386__) || defined(__amd64__) 1740 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 1741 #endif 1742 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 1743 1744 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 1745 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 1746 kobject_init(&linux_class_root, &linux_class_ktype); 1747 kobject_set_name(&linux_class_root, "class"); 1748 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 1749 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 1750 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 1751 kobject_set_name(&linux_root_device.kobj, "device"); 1752 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 1753 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 1754 "device"); 1755 linux_root_device.bsddev = root_bus; 1756 linux_class_misc.name = "misc"; 1757 class_register(&linux_class_misc); 1758 INIT_LIST_HEAD(&pci_drivers); 1759 INIT_LIST_HEAD(&pci_devices); 1760 spin_lock_init(&pci_lock); 1761 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 1762 for (i = 0; i < VMMAP_HASH_SIZE; i++) 1763 LIST_INIT(&vmmaphead[i]); 1764 } 1765 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 1766 1767 static void 1768 linux_compat_uninit(void *arg) 1769 { 1770 linux_kobject_kfree_name(&linux_class_root); 1771 linux_kobject_kfree_name(&linux_root_device.kobj); 1772 linux_kobject_kfree_name(&linux_class_misc.kobj); 1773 1774 rw_destroy(&linux_vma_lock); 1775 } 1776 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 1777 1778 /* 1779 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 1780 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 1781 * used. Assert these types have the same size, else some parts of the 1782 * LinuxKPI may not work like expected: 1783 */ 1784 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 1785