1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include "opt_global.h"
32 #include "opt_stack.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
39 #include <sys/proc.h>
40 #include <sys/sglist.h>
41 #include <sys/sleepqueue.h>
42 #include <sys/refcount.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/bus.h>
46 #include <sys/eventhandler.h>
47 #include <sys/fcntl.h>
48 #include <sys/file.h>
49 #include <sys/filio.h>
50 #include <sys/rwlock.h>
51 #include <sys/mman.h>
52 #include <sys/stack.h>
53 #include <sys/sysent.h>
54 #include <sys/time.h>
55 #include <sys/user.h>
56
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vm_radix.h>
63
64 #include <machine/stdarg.h>
65
66 #if defined(__i386__) || defined(__amd64__)
67 #include <machine/cputypes.h>
68 #include <machine/md_var.h>
69 #endif
70
71 #include <linux/kobject.h>
72 #include <linux/cpu.h>
73 #include <linux/device.h>
74 #include <linux/slab.h>
75 #include <linux/module.h>
76 #include <linux/moduleparam.h>
77 #include <linux/cdev.h>
78 #include <linux/file.h>
79 #include <linux/fs.h>
80 #include <linux/sysfs.h>
81 #include <linux/mm.h>
82 #include <linux/io.h>
83 #include <linux/vmalloc.h>
84 #include <linux/netdevice.h>
85 #include <linux/timer.h>
86 #include <linux/interrupt.h>
87 #include <linux/uaccess.h>
88 #include <linux/utsname.h>
89 #include <linux/list.h>
90 #include <linux/kthread.h>
91 #include <linux/kernel.h>
92 #include <linux/compat.h>
93 #include <linux/io-mapping.h>
94 #include <linux/poll.h>
95 #include <linux/smp.h>
96 #include <linux/wait_bit.h>
97 #include <linux/rcupdate.h>
98 #include <linux/interval_tree.h>
99 #include <linux/interval_tree_generic.h>
100 #include <linux/printk.h>
101 #include <linux/seq_file.h>
102
103 #if defined(__i386__) || defined(__amd64__)
104 #include <asm/smp.h>
105 #include <asm/processor.h>
106 #endif
107
108 #include <xen/xen.h>
109 #ifdef XENHVM
110 #undef xen_pv_domain
111 #undef xen_initial_domain
112 /* xen/xen-os.h redefines __must_check */
113 #undef __must_check
114 #include <xen/xen-os.h>
115 #endif
116
117 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
118 "LinuxKPI parameters");
119
120 int linuxkpi_debug;
121 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN,
122 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable.");
123
124 int linuxkpi_rcu_debug;
125 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, rcu_debug, CTLFLAG_RWTUN,
126 &linuxkpi_rcu_debug, 0, "Set to enable RCU warning. Clear to disable.");
127
128 int linuxkpi_warn_dump_stack = 0;
129 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN,
130 &linuxkpi_warn_dump_stack, 0,
131 "Set to enable stack traces from WARN_ON(). Clear to disable.");
132
133 static struct timeval lkpi_net_lastlog;
134 static int lkpi_net_curpps;
135 static int lkpi_net_maxpps = 99;
136 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN,
137 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second.");
138
139 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat");
140
141 #include <linux/rbtree.h>
142 /* Undo Linux compat changes. */
143 #undef RB_ROOT
144 #undef file
145 #undef cdev
146 #define RB_ROOT(head) (head)->rbh_root
147
148 static void linux_destroy_dev(struct linux_cdev *);
149 static void linux_cdev_deref(struct linux_cdev *ldev);
150 static struct vm_area_struct *linux_cdev_handle_find(void *handle);
151
152 cpumask_t cpu_online_mask;
153 static cpumask_t **static_single_cpu_mask;
154 static cpumask_t *static_single_cpu_mask_lcs;
155 struct kobject linux_class_root;
156 struct device linux_root_device;
157 struct class linux_class_misc;
158 struct list_head pci_drivers;
159 struct list_head pci_devices;
160 spinlock_t pci_lock;
161 struct uts_namespace init_uts_ns;
162
163 unsigned long linux_timer_hz_mask;
164
165 wait_queue_head_t linux_bit_waitq;
166 wait_queue_head_t linux_var_waitq;
167
168 int
panic_cmp(struct rb_node * one,struct rb_node * two)169 panic_cmp(struct rb_node *one, struct rb_node *two)
170 {
171 panic("no cmp");
172 }
173
174 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
175
176 #define START(node) ((node)->start)
177 #define LAST(node) ((node)->last)
178
179 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START,
180 LAST,, lkpi_interval_tree)
181
182 static void
linux_device_release(struct device * dev)183 linux_device_release(struct device *dev)
184 {
185 pr_debug("linux_device_release: %s\n", dev_name(dev));
186 kfree(dev);
187 }
188
189 static ssize_t
linux_class_show(struct kobject * kobj,struct attribute * attr,char * buf)190 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
191 {
192 struct class_attribute *dattr;
193 ssize_t error;
194
195 dattr = container_of(attr, struct class_attribute, attr);
196 error = -EIO;
197 if (dattr->show)
198 error = dattr->show(container_of(kobj, struct class, kobj),
199 dattr, buf);
200 return (error);
201 }
202
203 static ssize_t
linux_class_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)204 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
205 size_t count)
206 {
207 struct class_attribute *dattr;
208 ssize_t error;
209
210 dattr = container_of(attr, struct class_attribute, attr);
211 error = -EIO;
212 if (dattr->store)
213 error = dattr->store(container_of(kobj, struct class, kobj),
214 dattr, buf, count);
215 return (error);
216 }
217
218 static void
linux_class_release(struct kobject * kobj)219 linux_class_release(struct kobject *kobj)
220 {
221 struct class *class;
222
223 class = container_of(kobj, struct class, kobj);
224 if (class->class_release)
225 class->class_release(class);
226 }
227
228 static const struct sysfs_ops linux_class_sysfs = {
229 .show = linux_class_show,
230 .store = linux_class_store,
231 };
232
233 const struct kobj_type linux_class_ktype = {
234 .release = linux_class_release,
235 .sysfs_ops = &linux_class_sysfs
236 };
237
238 static void
linux_dev_release(struct kobject * kobj)239 linux_dev_release(struct kobject *kobj)
240 {
241 struct device *dev;
242
243 dev = container_of(kobj, struct device, kobj);
244 /* This is the precedence defined by linux. */
245 if (dev->release)
246 dev->release(dev);
247 else if (dev->class && dev->class->dev_release)
248 dev->class->dev_release(dev);
249 }
250
251 static ssize_t
linux_dev_show(struct kobject * kobj,struct attribute * attr,char * buf)252 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
253 {
254 struct device_attribute *dattr;
255 ssize_t error;
256
257 dattr = container_of(attr, struct device_attribute, attr);
258 error = -EIO;
259 if (dattr->show)
260 error = dattr->show(container_of(kobj, struct device, kobj),
261 dattr, buf);
262 return (error);
263 }
264
265 static ssize_t
linux_dev_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)266 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
267 size_t count)
268 {
269 struct device_attribute *dattr;
270 ssize_t error;
271
272 dattr = container_of(attr, struct device_attribute, attr);
273 error = -EIO;
274 if (dattr->store)
275 error = dattr->store(container_of(kobj, struct device, kobj),
276 dattr, buf, count);
277 return (error);
278 }
279
280 static const struct sysfs_ops linux_dev_sysfs = {
281 .show = linux_dev_show,
282 .store = linux_dev_store,
283 };
284
285 const struct kobj_type linux_dev_ktype = {
286 .release = linux_dev_release,
287 .sysfs_ops = &linux_dev_sysfs
288 };
289
290 struct device *
device_create(struct class * class,struct device * parent,dev_t devt,void * drvdata,const char * fmt,...)291 device_create(struct class *class, struct device *parent, dev_t devt,
292 void *drvdata, const char *fmt, ...)
293 {
294 struct device *dev;
295 va_list args;
296
297 dev = kzalloc(sizeof(*dev), M_WAITOK);
298 dev->parent = parent;
299 dev->class = class;
300 dev->devt = devt;
301 dev->driver_data = drvdata;
302 dev->release = linux_device_release;
303 va_start(args, fmt);
304 kobject_set_name_vargs(&dev->kobj, fmt, args);
305 va_end(args);
306 device_register(dev);
307
308 return (dev);
309 }
310
311 struct device *
device_create_groups_vargs(struct class * class,struct device * parent,dev_t devt,void * drvdata,const struct attribute_group ** groups,const char * fmt,va_list args)312 device_create_groups_vargs(struct class *class, struct device *parent,
313 dev_t devt, void *drvdata, const struct attribute_group **groups,
314 const char *fmt, va_list args)
315 {
316 struct device *dev = NULL;
317 int retval = -ENODEV;
318
319 if (class == NULL || IS_ERR(class))
320 goto error;
321
322 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
323 if (!dev) {
324 retval = -ENOMEM;
325 goto error;
326 }
327
328 dev->devt = devt;
329 dev->class = class;
330 dev->parent = parent;
331 dev->groups = groups;
332 dev->release = device_create_release;
333 /* device_initialize() needs the class and parent to be set */
334 device_initialize(dev);
335 dev_set_drvdata(dev, drvdata);
336
337 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
338 if (retval)
339 goto error;
340
341 retval = device_add(dev);
342 if (retval)
343 goto error;
344
345 return dev;
346
347 error:
348 put_device(dev);
349 return ERR_PTR(retval);
350 }
351
352 struct class *
lkpi_class_create(const char * name)353 lkpi_class_create(const char *name)
354 {
355 struct class *class;
356 int error;
357
358 class = kzalloc(sizeof(*class), M_WAITOK);
359 class->name = name;
360 class->class_release = linux_class_kfree;
361 error = class_register(class);
362 if (error) {
363 kfree(class);
364 return (NULL);
365 }
366
367 return (class);
368 }
369
370 static void
linux_kq_lock(void * arg)371 linux_kq_lock(void *arg)
372 {
373 spinlock_t *s = arg;
374
375 spin_lock(s);
376 }
377 static void
linux_kq_unlock(void * arg)378 linux_kq_unlock(void *arg)
379 {
380 spinlock_t *s = arg;
381
382 spin_unlock(s);
383 }
384
385 static void
linux_kq_assert_lock(void * arg,int what)386 linux_kq_assert_lock(void *arg, int what)
387 {
388 #ifdef INVARIANTS
389 spinlock_t *s = arg;
390
391 if (what == LA_LOCKED)
392 mtx_assert(s, MA_OWNED);
393 else
394 mtx_assert(s, MA_NOTOWNED);
395 #endif
396 }
397
398 static void
399 linux_file_kqfilter_poll(struct linux_file *, int);
400
401 struct linux_file *
linux_file_alloc(void)402 linux_file_alloc(void)
403 {
404 struct linux_file *filp;
405
406 filp = kzalloc(sizeof(*filp), GFP_KERNEL);
407
408 /* set initial refcount */
409 filp->f_count = 1;
410
411 /* setup fields needed by kqueue support */
412 spin_lock_init(&filp->f_kqlock);
413 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock,
414 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock);
415
416 return (filp);
417 }
418
419 void
linux_file_free(struct linux_file * filp)420 linux_file_free(struct linux_file *filp)
421 {
422 if (filp->_file == NULL) {
423 if (filp->f_op != NULL && filp->f_op->release != NULL)
424 filp->f_op->release(filp->f_vnode, filp);
425 if (filp->f_shmem != NULL)
426 vm_object_deallocate(filp->f_shmem);
427 kfree_rcu(filp, rcu);
428 } else {
429 /*
430 * The close method of the character device or file
431 * will free the linux_file structure:
432 */
433 _fdrop(filp->_file, curthread);
434 }
435 }
436
437 struct linux_cdev *
cdev_alloc(void)438 cdev_alloc(void)
439 {
440 struct linux_cdev *cdev;
441
442 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK);
443 kobject_init(&cdev->kobj, &linux_cdev_ktype);
444 cdev->refs = 1;
445 return (cdev);
446 }
447
448 static int
linux_cdev_pager_fault(vm_object_t vm_obj,vm_ooffset_t offset,int prot,vm_page_t * mres)449 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
450 vm_page_t *mres)
451 {
452 struct vm_area_struct *vmap;
453
454 vmap = linux_cdev_handle_find(vm_obj->handle);
455
456 MPASS(vmap != NULL);
457 MPASS(vmap->vm_private_data == vm_obj->handle);
458
459 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) {
460 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset;
461 vm_page_t page;
462
463 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
464 /*
465 * If the passed in result page is a fake
466 * page, update it with the new physical
467 * address.
468 */
469 page = *mres;
470 vm_page_updatefake(page, paddr, vm_obj->memattr);
471 } else {
472 /*
473 * Replace the passed in "mres" page with our
474 * own fake page and free up the all of the
475 * original pages.
476 */
477 VM_OBJECT_WUNLOCK(vm_obj);
478 page = vm_page_getfake(paddr, vm_obj->memattr);
479 VM_OBJECT_WLOCK(vm_obj);
480
481 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres);
482 *mres = page;
483 }
484 vm_page_valid(page);
485 return (VM_PAGER_OK);
486 }
487 return (VM_PAGER_FAIL);
488 }
489
490 static int
linux_cdev_pager_populate(vm_object_t vm_obj,vm_pindex_t pidx,int fault_type,vm_prot_t max_prot,vm_pindex_t * first,vm_pindex_t * last)491 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
492 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
493 {
494 struct vm_area_struct *vmap;
495 int err;
496
497 /* get VM area structure */
498 vmap = linux_cdev_handle_find(vm_obj->handle);
499 MPASS(vmap != NULL);
500 MPASS(vmap->vm_private_data == vm_obj->handle);
501
502 VM_OBJECT_WUNLOCK(vm_obj);
503
504 linux_set_current(curthread);
505
506 down_write(&vmap->vm_mm->mmap_sem);
507 if (unlikely(vmap->vm_ops == NULL)) {
508 err = VM_FAULT_SIGBUS;
509 } else {
510 struct vm_fault vmf;
511
512 /* fill out VM fault structure */
513 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx);
514 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
515 vmf.pgoff = 0;
516 vmf.page = NULL;
517 vmf.vma = vmap;
518
519 vmap->vm_pfn_count = 0;
520 vmap->vm_pfn_pcount = &vmap->vm_pfn_count;
521 vmap->vm_obj = vm_obj;
522
523 err = vmap->vm_ops->fault(&vmf);
524
525 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
526 kern_yield(PRI_USER);
527 err = vmap->vm_ops->fault(&vmf);
528 }
529 }
530
531 /* translate return code */
532 switch (err) {
533 case VM_FAULT_OOM:
534 err = VM_PAGER_AGAIN;
535 break;
536 case VM_FAULT_SIGBUS:
537 err = VM_PAGER_BAD;
538 break;
539 case VM_FAULT_NOPAGE:
540 /*
541 * By contract the fault handler will return having
542 * busied all the pages itself. If pidx is already
543 * found in the object, it will simply xbusy the first
544 * page and return with vm_pfn_count set to 1.
545 */
546 *first = vmap->vm_pfn_first;
547 *last = *first + vmap->vm_pfn_count - 1;
548 err = VM_PAGER_OK;
549 break;
550 default:
551 err = VM_PAGER_ERROR;
552 break;
553 }
554 up_write(&vmap->vm_mm->mmap_sem);
555 VM_OBJECT_WLOCK(vm_obj);
556 return (err);
557 }
558
559 static struct rwlock linux_vma_lock;
560 static TAILQ_HEAD(, vm_area_struct) linux_vma_head =
561 TAILQ_HEAD_INITIALIZER(linux_vma_head);
562
563 static void
linux_cdev_handle_free(struct vm_area_struct * vmap)564 linux_cdev_handle_free(struct vm_area_struct *vmap)
565 {
566 /* Drop reference on vm_file */
567 if (vmap->vm_file != NULL)
568 fput(vmap->vm_file);
569
570 /* Drop reference on mm_struct */
571 mmput(vmap->vm_mm);
572
573 kfree(vmap);
574 }
575
576 static void
linux_cdev_handle_remove(struct vm_area_struct * vmap)577 linux_cdev_handle_remove(struct vm_area_struct *vmap)
578 {
579 rw_wlock(&linux_vma_lock);
580 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry);
581 rw_wunlock(&linux_vma_lock);
582 }
583
584 static struct vm_area_struct *
linux_cdev_handle_find(void * handle)585 linux_cdev_handle_find(void *handle)
586 {
587 struct vm_area_struct *vmap;
588
589 rw_rlock(&linux_vma_lock);
590 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) {
591 if (vmap->vm_private_data == handle)
592 break;
593 }
594 rw_runlock(&linux_vma_lock);
595 return (vmap);
596 }
597
598 static int
linux_cdev_pager_ctor(void * handle,vm_ooffset_t size,vm_prot_t prot,vm_ooffset_t foff,struct ucred * cred,u_short * color)599 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
600 vm_ooffset_t foff, struct ucred *cred, u_short *color)
601 {
602
603 MPASS(linux_cdev_handle_find(handle) != NULL);
604 *color = 0;
605 return (0);
606 }
607
608 static void
linux_cdev_pager_dtor(void * handle)609 linux_cdev_pager_dtor(void *handle)
610 {
611 const struct vm_operations_struct *vm_ops;
612 struct vm_area_struct *vmap;
613
614 vmap = linux_cdev_handle_find(handle);
615 MPASS(vmap != NULL);
616
617 /*
618 * Remove handle before calling close operation to prevent
619 * other threads from reusing the handle pointer.
620 */
621 linux_cdev_handle_remove(vmap);
622
623 down_write(&vmap->vm_mm->mmap_sem);
624 vm_ops = vmap->vm_ops;
625 if (likely(vm_ops != NULL))
626 vm_ops->close(vmap);
627 up_write(&vmap->vm_mm->mmap_sem);
628
629 linux_cdev_handle_free(vmap);
630 }
631
632 static struct cdev_pager_ops linux_cdev_pager_ops[2] = {
633 {
634 /* OBJT_MGTDEVICE */
635 .cdev_pg_populate = linux_cdev_pager_populate,
636 .cdev_pg_ctor = linux_cdev_pager_ctor,
637 .cdev_pg_dtor = linux_cdev_pager_dtor
638 },
639 {
640 /* OBJT_DEVICE */
641 .cdev_pg_fault = linux_cdev_pager_fault,
642 .cdev_pg_ctor = linux_cdev_pager_ctor,
643 .cdev_pg_dtor = linux_cdev_pager_dtor
644 },
645 };
646
647 int
zap_vma_ptes(struct vm_area_struct * vma,unsigned long address,unsigned long size)648 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
649 unsigned long size)
650 {
651 struct pctrie_iter pages;
652 vm_object_t obj;
653 vm_page_t m;
654
655 obj = vma->vm_obj;
656 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0)
657 return (-ENOTSUP);
658 VM_OBJECT_RLOCK(obj);
659 vm_page_iter_limit_init(&pages, obj, OFF_TO_IDX(address + size));
660 VM_RADIX_FOREACH_FROM(m, &pages, OFF_TO_IDX(address))
661 pmap_remove_all(m);
662 VM_OBJECT_RUNLOCK(obj);
663 return (0);
664 }
665
666 void
vma_set_file(struct vm_area_struct * vma,struct linux_file * file)667 vma_set_file(struct vm_area_struct *vma, struct linux_file *file)
668 {
669 struct linux_file *tmp;
670
671 /* Changing an anonymous vma with this is illegal */
672 get_file(file);
673 tmp = vma->vm_file;
674 vma->vm_file = file;
675 fput(tmp);
676 }
677
678 static struct file_operations dummy_ldev_ops = {
679 /* XXXKIB */
680 };
681
682 static struct linux_cdev dummy_ldev = {
683 .ops = &dummy_ldev_ops,
684 };
685
686 #define LDEV_SI_DTR 0x0001
687 #define LDEV_SI_REF 0x0002
688
689 static void
linux_get_fop(struct linux_file * filp,const struct file_operations ** fop,struct linux_cdev ** dev)690 linux_get_fop(struct linux_file *filp, const struct file_operations **fop,
691 struct linux_cdev **dev)
692 {
693 struct linux_cdev *ldev;
694 u_int siref;
695
696 ldev = filp->f_cdev;
697 *fop = filp->f_op;
698 if (ldev != NULL) {
699 if (ldev->kobj.ktype == &linux_cdev_static_ktype) {
700 refcount_acquire(&ldev->refs);
701 } else {
702 for (siref = ldev->siref;;) {
703 if ((siref & LDEV_SI_DTR) != 0) {
704 ldev = &dummy_ldev;
705 *fop = ldev->ops;
706 siref = ldev->siref;
707 MPASS((ldev->siref & LDEV_SI_DTR) == 0);
708 } else if (atomic_fcmpset_int(&ldev->siref,
709 &siref, siref + LDEV_SI_REF)) {
710 break;
711 }
712 }
713 }
714 }
715 *dev = ldev;
716 }
717
718 static void
linux_drop_fop(struct linux_cdev * ldev)719 linux_drop_fop(struct linux_cdev *ldev)
720 {
721
722 if (ldev == NULL)
723 return;
724 if (ldev->kobj.ktype == &linux_cdev_static_ktype) {
725 linux_cdev_deref(ldev);
726 } else {
727 MPASS(ldev->kobj.ktype == &linux_cdev_ktype);
728 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0);
729 atomic_subtract_int(&ldev->siref, LDEV_SI_REF);
730 }
731 }
732
733 #define OPW(fp,td,code) ({ \
734 struct file *__fpop; \
735 __typeof(code) __retval; \
736 \
737 __fpop = (td)->td_fpop; \
738 (td)->td_fpop = (fp); \
739 __retval = (code); \
740 (td)->td_fpop = __fpop; \
741 __retval; \
742 })
743
744 static int
linux_dev_fdopen(struct cdev * dev,int fflags,struct thread * td,struct file * file)745 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td,
746 struct file *file)
747 {
748 struct linux_cdev *ldev;
749 struct linux_file *filp;
750 const struct file_operations *fop;
751 int error;
752
753 ldev = dev->si_drv1;
754
755 filp = linux_file_alloc();
756 filp->f_dentry = &filp->f_dentry_store;
757 filp->f_op = ldev->ops;
758 filp->f_mode = file->f_flag;
759 filp->f_flags = file->f_flag;
760 filp->f_vnode = file->f_vnode;
761 filp->_file = file;
762 refcount_acquire(&ldev->refs);
763 filp->f_cdev = ldev;
764
765 linux_set_current(td);
766 linux_get_fop(filp, &fop, &ldev);
767
768 if (fop->open != NULL) {
769 error = -fop->open(file->f_vnode, filp);
770 if (error != 0) {
771 linux_drop_fop(ldev);
772 linux_cdev_deref(filp->f_cdev);
773 kfree(filp);
774 return (error);
775 }
776 }
777
778 /* hold on to the vnode - used for fstat() */
779 vref(filp->f_vnode);
780
781 /* release the file from devfs */
782 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
783 linux_drop_fop(ldev);
784 return (ENXIO);
785 }
786
787 #define LINUX_IOCTL_MIN_PTR 0x10000UL
788 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)
789
790 static inline int
linux_remap_address(void ** uaddr,size_t len)791 linux_remap_address(void **uaddr, size_t len)
792 {
793 uintptr_t uaddr_val = (uintptr_t)(*uaddr);
794
795 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
796 uaddr_val < LINUX_IOCTL_MAX_PTR)) {
797 struct task_struct *pts = current;
798 if (pts == NULL) {
799 *uaddr = NULL;
800 return (1);
801 }
802
803 /* compute data offset */
804 uaddr_val -= LINUX_IOCTL_MIN_PTR;
805
806 /* check that length is within bounds */
807 if ((len > IOCPARM_MAX) ||
808 (uaddr_val + len) > pts->bsd_ioctl_len) {
809 *uaddr = NULL;
810 return (1);
811 }
812
813 /* re-add kernel buffer address */
814 uaddr_val += (uintptr_t)pts->bsd_ioctl_data;
815
816 /* update address location */
817 *uaddr = (void *)uaddr_val;
818 return (1);
819 }
820 return (0);
821 }
822
823 int
linux_copyin(const void * uaddr,void * kaddr,size_t len)824 linux_copyin(const void *uaddr, void *kaddr, size_t len)
825 {
826 if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
827 if (uaddr == NULL)
828 return (-EFAULT);
829 memcpy(kaddr, uaddr, len);
830 return (0);
831 }
832 return (-copyin(uaddr, kaddr, len));
833 }
834
835 int
linux_copyout(const void * kaddr,void * uaddr,size_t len)836 linux_copyout(const void *kaddr, void *uaddr, size_t len)
837 {
838 if (linux_remap_address(&uaddr, len)) {
839 if (uaddr == NULL)
840 return (-EFAULT);
841 memcpy(uaddr, kaddr, len);
842 return (0);
843 }
844 return (-copyout(kaddr, uaddr, len));
845 }
846
847 size_t
linux_clear_user(void * _uaddr,size_t _len)848 linux_clear_user(void *_uaddr, size_t _len)
849 {
850 uint8_t *uaddr = _uaddr;
851 size_t len = _len;
852
853 /* make sure uaddr is aligned before going into the fast loop */
854 while (((uintptr_t)uaddr & 7) != 0 && len > 7) {
855 if (subyte(uaddr, 0))
856 return (_len);
857 uaddr++;
858 len--;
859 }
860
861 /* zero 8 bytes at a time */
862 while (len > 7) {
863 #ifdef __LP64__
864 if (suword64(uaddr, 0))
865 return (_len);
866 #else
867 if (suword32(uaddr, 0))
868 return (_len);
869 if (suword32(uaddr + 4, 0))
870 return (_len);
871 #endif
872 uaddr += 8;
873 len -= 8;
874 }
875
876 /* zero fill end, if any */
877 while (len > 0) {
878 if (subyte(uaddr, 0))
879 return (_len);
880 uaddr++;
881 len--;
882 }
883 return (0);
884 }
885
886 int
linux_access_ok(const void * uaddr,size_t len)887 linux_access_ok(const void *uaddr, size_t len)
888 {
889 uintptr_t saddr;
890 uintptr_t eaddr;
891
892 /* get start and end address */
893 saddr = (uintptr_t)uaddr;
894 eaddr = (uintptr_t)uaddr + len;
895
896 /* verify addresses are valid for userspace */
897 return ((saddr == eaddr) ||
898 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS));
899 }
900
901 /*
902 * This function should return either EINTR or ERESTART depending on
903 * the signal type sent to this thread:
904 */
905 static int
linux_get_error(struct task_struct * task,int error)906 linux_get_error(struct task_struct *task, int error)
907 {
908 /* check for signal type interrupt code */
909 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) {
910 error = -linux_schedule_get_interrupt_value(task);
911 if (error == 0)
912 error = EINTR;
913 }
914 return (error);
915 }
916
917 static int
linux_file_ioctl_sub(struct file * fp,struct linux_file * filp,const struct file_operations * fop,u_long cmd,caddr_t data,struct thread * td)918 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp,
919 const struct file_operations *fop, u_long cmd, caddr_t data,
920 struct thread *td)
921 {
922 struct task_struct *task = current;
923 unsigned size;
924 int error;
925
926 size = IOCPARM_LEN(cmd);
927 /* refer to logic in sys_ioctl() */
928 if (size > 0) {
929 /*
930 * Setup hint for linux_copyin() and linux_copyout().
931 *
932 * Background: Linux code expects a user-space address
933 * while FreeBSD supplies a kernel-space address.
934 */
935 task->bsd_ioctl_data = data;
936 task->bsd_ioctl_len = size;
937 data = (void *)LINUX_IOCTL_MIN_PTR;
938 } else {
939 /* fetch user-space pointer */
940 data = *(void **)data;
941 }
942 #ifdef COMPAT_FREEBSD32
943 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
944 /* try the compat IOCTL handler first */
945 if (fop->compat_ioctl != NULL) {
946 error = -OPW(fp, td, fop->compat_ioctl(filp,
947 cmd, (u_long)data));
948 } else {
949 error = ENOTTY;
950 }
951
952 /* fallback to the regular IOCTL handler, if any */
953 if (error == ENOTTY && fop->unlocked_ioctl != NULL) {
954 error = -OPW(fp, td, fop->unlocked_ioctl(filp,
955 cmd, (u_long)data));
956 }
957 } else
958 #endif
959 {
960 if (fop->unlocked_ioctl != NULL) {
961 error = -OPW(fp, td, fop->unlocked_ioctl(filp,
962 cmd, (u_long)data));
963 } else {
964 error = ENOTTY;
965 }
966 }
967 if (size > 0) {
968 task->bsd_ioctl_data = NULL;
969 task->bsd_ioctl_len = 0;
970 }
971
972 if (error == EWOULDBLOCK) {
973 /* update kqfilter status, if any */
974 linux_file_kqfilter_poll(filp,
975 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
976 } else {
977 error = linux_get_error(task, error);
978 }
979 return (error);
980 }
981
982 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1)
983
984 /*
985 * This function atomically updates the poll wakeup state and returns
986 * the previous state at the time of update.
987 */
988 static uint8_t
linux_poll_wakeup_state(atomic_t * v,const uint8_t * pstate)989 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate)
990 {
991 int c, old;
992
993 c = v->counter;
994
995 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
996 c = old;
997
998 return (c);
999 }
1000
1001 static int
linux_poll_wakeup_callback(wait_queue_t * wq,unsigned int wq_state,int flags,void * key)1002 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key)
1003 {
1004 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1005 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
1006 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
1007 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY,
1008 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */
1009 };
1010 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq);
1011
1012 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1013 case LINUX_FWQ_STATE_QUEUED:
1014 linux_poll_wakeup(filp);
1015 return (1);
1016 default:
1017 return (0);
1018 }
1019 }
1020
1021 void
linux_poll_wait(struct linux_file * filp,wait_queue_head_t * wqh,poll_table * p)1022 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p)
1023 {
1024 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1025 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY,
1026 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
1027 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */
1028 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED,
1029 };
1030
1031 /* check if we are called inside the select system call */
1032 if (p == LINUX_POLL_TABLE_NORMAL)
1033 selrecord(curthread, &filp->f_selinfo);
1034
1035 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1036 case LINUX_FWQ_STATE_INIT:
1037 /* NOTE: file handles can only belong to one wait-queue */
1038 filp->f_wait_queue.wqh = wqh;
1039 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback;
1040 add_wait_queue(wqh, &filp->f_wait_queue.wq);
1041 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED);
1042 break;
1043 default:
1044 break;
1045 }
1046 }
1047
1048 static void
linux_poll_wait_dequeue(struct linux_file * filp)1049 linux_poll_wait_dequeue(struct linux_file *filp)
1050 {
1051 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1052 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
1053 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT,
1054 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT,
1055 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT,
1056 };
1057
1058 seldrain(&filp->f_selinfo);
1059
1060 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1061 case LINUX_FWQ_STATE_NOT_READY:
1062 case LINUX_FWQ_STATE_QUEUED:
1063 case LINUX_FWQ_STATE_READY:
1064 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq);
1065 break;
1066 default:
1067 break;
1068 }
1069 }
1070
1071 void
linux_poll_wakeup(struct linux_file * filp)1072 linux_poll_wakeup(struct linux_file *filp)
1073 {
1074 /* this function should be NULL-safe */
1075 if (filp == NULL)
1076 return;
1077
1078 selwakeup(&filp->f_selinfo);
1079
1080 spin_lock(&filp->f_kqlock);
1081 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ |
1082 LINUX_KQ_FLAG_NEED_WRITE;
1083
1084 /* make sure the "knote" gets woken up */
1085 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1);
1086 spin_unlock(&filp->f_kqlock);
1087 }
1088
1089 static struct linux_file *
__get_file_rcu(struct linux_file ** f)1090 __get_file_rcu(struct linux_file **f)
1091 {
1092 struct linux_file *file1, *file2;
1093
1094 file1 = READ_ONCE(*f);
1095 if (file1 == NULL)
1096 return (NULL);
1097
1098 if (!refcount_acquire_if_not_zero(
1099 file1->_file == NULL ? &file1->f_count : &file1->_file->f_count))
1100 return (ERR_PTR(-EAGAIN));
1101
1102 file2 = READ_ONCE(*f);
1103 if (file2 == file1)
1104 return (file2);
1105
1106 fput(file1);
1107 return (ERR_PTR(-EAGAIN));
1108 }
1109
1110 struct linux_file *
linux_get_file_rcu(struct linux_file ** f)1111 linux_get_file_rcu(struct linux_file **f)
1112 {
1113 struct linux_file *file1;
1114
1115 for (;;) {
1116 file1 = __get_file_rcu(f);
1117 if (file1 == NULL)
1118 return (NULL);
1119
1120 if (IS_ERR(file1))
1121 continue;
1122
1123 return (file1);
1124 }
1125 }
1126
1127 struct linux_file *
get_file_active(struct linux_file ** f)1128 get_file_active(struct linux_file **f)
1129 {
1130 struct linux_file *file1;
1131
1132 rcu_read_lock();
1133 file1 = __get_file_rcu(f);
1134 rcu_read_unlock();
1135 if (IS_ERR(file1))
1136 file1 = NULL;
1137
1138 return (file1);
1139 }
1140
1141 static void
linux_file_kqfilter_detach(struct knote * kn)1142 linux_file_kqfilter_detach(struct knote *kn)
1143 {
1144 struct linux_file *filp = kn->kn_hook;
1145
1146 spin_lock(&filp->f_kqlock);
1147 knlist_remove(&filp->f_selinfo.si_note, kn, 1);
1148 spin_unlock(&filp->f_kqlock);
1149 }
1150
1151 static int
linux_file_kqfilter_read_event(struct knote * kn,long hint)1152 linux_file_kqfilter_read_event(struct knote *kn, long hint)
1153 {
1154 struct linux_file *filp = kn->kn_hook;
1155
1156 mtx_assert(&filp->f_kqlock, MA_OWNED);
1157
1158 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0);
1159 }
1160
1161 static int
linux_file_kqfilter_write_event(struct knote * kn,long hint)1162 linux_file_kqfilter_write_event(struct knote *kn, long hint)
1163 {
1164 struct linux_file *filp = kn->kn_hook;
1165
1166 mtx_assert(&filp->f_kqlock, MA_OWNED);
1167
1168 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0);
1169 }
1170
1171 static const struct filterops linux_dev_kqfiltops_read = {
1172 .f_isfd = 1,
1173 .f_detach = linux_file_kqfilter_detach,
1174 .f_event = linux_file_kqfilter_read_event,
1175 };
1176
1177 static const struct filterops linux_dev_kqfiltops_write = {
1178 .f_isfd = 1,
1179 .f_detach = linux_file_kqfilter_detach,
1180 .f_event = linux_file_kqfilter_write_event,
1181 };
1182
1183 static void
linux_file_kqfilter_poll(struct linux_file * filp,int kqflags)1184 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags)
1185 {
1186 struct thread *td;
1187 const struct file_operations *fop;
1188 struct linux_cdev *ldev;
1189 int temp;
1190
1191 if ((filp->f_kqflags & kqflags) == 0)
1192 return;
1193
1194 td = curthread;
1195
1196 linux_get_fop(filp, &fop, &ldev);
1197 /* get the latest polling state */
1198 temp = OPW(filp->_file, td, fop->poll(filp, NULL));
1199 linux_drop_fop(ldev);
1200
1201 spin_lock(&filp->f_kqlock);
1202 /* clear kqflags */
1203 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ |
1204 LINUX_KQ_FLAG_NEED_WRITE);
1205 /* update kqflags */
1206 if ((temp & (POLLIN | POLLOUT)) != 0) {
1207 if ((temp & POLLIN) != 0)
1208 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ;
1209 if ((temp & POLLOUT) != 0)
1210 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE;
1211
1212 /* make sure the "knote" gets woken up */
1213 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0);
1214 }
1215 spin_unlock(&filp->f_kqlock);
1216 }
1217
1218 static int
linux_file_kqfilter(struct file * file,struct knote * kn)1219 linux_file_kqfilter(struct file *file, struct knote *kn)
1220 {
1221 struct linux_file *filp;
1222 struct thread *td;
1223 int error;
1224
1225 td = curthread;
1226 filp = (struct linux_file *)file->f_data;
1227 filp->f_flags = file->f_flag;
1228 if (filp->f_op->poll == NULL)
1229 return (EINVAL);
1230
1231 spin_lock(&filp->f_kqlock);
1232 switch (kn->kn_filter) {
1233 case EVFILT_READ:
1234 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ;
1235 kn->kn_fop = &linux_dev_kqfiltops_read;
1236 kn->kn_hook = filp;
1237 knlist_add(&filp->f_selinfo.si_note, kn, 1);
1238 error = 0;
1239 break;
1240 case EVFILT_WRITE:
1241 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE;
1242 kn->kn_fop = &linux_dev_kqfiltops_write;
1243 kn->kn_hook = filp;
1244 knlist_add(&filp->f_selinfo.si_note, kn, 1);
1245 error = 0;
1246 break;
1247 default:
1248 error = EINVAL;
1249 break;
1250 }
1251 spin_unlock(&filp->f_kqlock);
1252
1253 if (error == 0) {
1254 linux_set_current(td);
1255
1256 /* update kqfilter status, if any */
1257 linux_file_kqfilter_poll(filp,
1258 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
1259 }
1260 return (error);
1261 }
1262
1263 static int
linux_file_mmap_single(struct file * fp,const struct file_operations * fop,vm_ooffset_t * offset,vm_size_t size,struct vm_object ** object,int nprot,bool is_shared,struct thread * td)1264 linux_file_mmap_single(struct file *fp, const struct file_operations *fop,
1265 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object,
1266 int nprot, bool is_shared, struct thread *td)
1267 {
1268 struct task_struct *task;
1269 struct vm_area_struct *vmap;
1270 struct mm_struct *mm;
1271 struct linux_file *filp;
1272 vm_memattr_t attr;
1273 int error;
1274
1275 filp = (struct linux_file *)fp->f_data;
1276 filp->f_flags = fp->f_flag;
1277
1278 if (fop->mmap == NULL)
1279 return (EOPNOTSUPP);
1280
1281 linux_set_current(td);
1282
1283 /*
1284 * The same VM object might be shared by multiple processes
1285 * and the mm_struct is usually freed when a process exits.
1286 *
1287 * The atomic reference below makes sure the mm_struct is
1288 * available as long as the vmap is in the linux_vma_head.
1289 */
1290 task = current;
1291 mm = task->mm;
1292 if (atomic_inc_not_zero(&mm->mm_users) == 0)
1293 return (EINVAL);
1294
1295 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL);
1296 vmap->vm_start = 0;
1297 vmap->vm_end = size;
1298 vmap->vm_pgoff = *offset / PAGE_SIZE;
1299 vmap->vm_pfn = 0;
1300 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL);
1301 if (is_shared)
1302 vmap->vm_flags |= VM_SHARED;
1303 vmap->vm_ops = NULL;
1304 vmap->vm_file = get_file(filp);
1305 vmap->vm_mm = mm;
1306
1307 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) {
1308 error = linux_get_error(task, EINTR);
1309 } else {
1310 error = -OPW(fp, td, fop->mmap(filp, vmap));
1311 error = linux_get_error(task, error);
1312 up_write(&vmap->vm_mm->mmap_sem);
1313 }
1314
1315 if (error != 0) {
1316 linux_cdev_handle_free(vmap);
1317 return (error);
1318 }
1319
1320 attr = pgprot2cachemode(vmap->vm_page_prot);
1321
1322 if (vmap->vm_ops != NULL) {
1323 struct vm_area_struct *ptr;
1324 void *vm_private_data;
1325 bool vm_no_fault;
1326
1327 if (vmap->vm_ops->open == NULL ||
1328 vmap->vm_ops->close == NULL ||
1329 vmap->vm_private_data == NULL) {
1330 /* free allocated VM area struct */
1331 linux_cdev_handle_free(vmap);
1332 return (EINVAL);
1333 }
1334
1335 vm_private_data = vmap->vm_private_data;
1336
1337 rw_wlock(&linux_vma_lock);
1338 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) {
1339 if (ptr->vm_private_data == vm_private_data)
1340 break;
1341 }
1342 /* check if there is an existing VM area struct */
1343 if (ptr != NULL) {
1344 /* check if the VM area structure is invalid */
1345 if (ptr->vm_ops == NULL ||
1346 ptr->vm_ops->open == NULL ||
1347 ptr->vm_ops->close == NULL) {
1348 error = ESTALE;
1349 vm_no_fault = 1;
1350 } else {
1351 error = EEXIST;
1352 vm_no_fault = (ptr->vm_ops->fault == NULL);
1353 }
1354 } else {
1355 /* insert VM area structure into list */
1356 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry);
1357 error = 0;
1358 vm_no_fault = (vmap->vm_ops->fault == NULL);
1359 }
1360 rw_wunlock(&linux_vma_lock);
1361
1362 if (error != 0) {
1363 /* free allocated VM area struct */
1364 linux_cdev_handle_free(vmap);
1365 /* check for stale VM area struct */
1366 if (error != EEXIST)
1367 return (error);
1368 }
1369
1370 /* check if there is no fault handler */
1371 if (vm_no_fault) {
1372 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE,
1373 &linux_cdev_pager_ops[1], size, nprot, *offset,
1374 td->td_ucred);
1375 } else {
1376 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE,
1377 &linux_cdev_pager_ops[0], size, nprot, *offset,
1378 td->td_ucred);
1379 }
1380
1381 /* check if allocating the VM object failed */
1382 if (*object == NULL) {
1383 if (error == 0) {
1384 /* remove VM area struct from list */
1385 linux_cdev_handle_remove(vmap);
1386 /* free allocated VM area struct */
1387 linux_cdev_handle_free(vmap);
1388 }
1389 return (EINVAL);
1390 }
1391 } else {
1392 struct sglist *sg;
1393
1394 sg = sglist_alloc(1, M_WAITOK);
1395 sglist_append_phys(sg,
1396 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len);
1397
1398 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len,
1399 nprot, 0, td->td_ucred);
1400
1401 linux_cdev_handle_free(vmap);
1402
1403 if (*object == NULL) {
1404 sglist_free(sg);
1405 return (EINVAL);
1406 }
1407 }
1408
1409 if (attr != VM_MEMATTR_DEFAULT) {
1410 VM_OBJECT_WLOCK(*object);
1411 vm_object_set_memattr(*object, attr);
1412 VM_OBJECT_WUNLOCK(*object);
1413 }
1414 *offset = 0;
1415 return (0);
1416 }
1417
1418 struct cdevsw linuxcdevsw = {
1419 .d_version = D_VERSION,
1420 .d_fdopen = linux_dev_fdopen,
1421 .d_name = "lkpidev",
1422 };
1423
1424 static int
linux_file_read(struct file * file,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)1425 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
1426 int flags, struct thread *td)
1427 {
1428 struct linux_file *filp;
1429 const struct file_operations *fop;
1430 struct linux_cdev *ldev;
1431 ssize_t bytes;
1432 int error;
1433
1434 error = 0;
1435 filp = (struct linux_file *)file->f_data;
1436 filp->f_flags = file->f_flag;
1437 /* XXX no support for I/O vectors currently */
1438 if (uio->uio_iovcnt != 1)
1439 return (EOPNOTSUPP);
1440 if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1441 return (EINVAL);
1442 linux_set_current(td);
1443 linux_get_fop(filp, &fop, &ldev);
1444 if (fop->read != NULL) {
1445 bytes = OPW(file, td, fop->read(filp,
1446 uio->uio_iov->iov_base,
1447 uio->uio_iov->iov_len, &uio->uio_offset));
1448 if (bytes >= 0) {
1449 uio->uio_iov->iov_base =
1450 ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1451 uio->uio_iov->iov_len -= bytes;
1452 uio->uio_resid -= bytes;
1453 } else {
1454 error = linux_get_error(current, -bytes);
1455 }
1456 } else
1457 error = ENXIO;
1458
1459 /* update kqfilter status, if any */
1460 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ);
1461 linux_drop_fop(ldev);
1462
1463 return (error);
1464 }
1465
1466 static int
linux_file_write(struct file * file,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)1467 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred,
1468 int flags, struct thread *td)
1469 {
1470 struct linux_file *filp;
1471 const struct file_operations *fop;
1472 struct linux_cdev *ldev;
1473 ssize_t bytes;
1474 int error;
1475
1476 filp = (struct linux_file *)file->f_data;
1477 filp->f_flags = file->f_flag;
1478 /* XXX no support for I/O vectors currently */
1479 if (uio->uio_iovcnt != 1)
1480 return (EOPNOTSUPP);
1481 if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1482 return (EINVAL);
1483 linux_set_current(td);
1484 linux_get_fop(filp, &fop, &ldev);
1485 if (fop->write != NULL) {
1486 bytes = OPW(file, td, fop->write(filp,
1487 uio->uio_iov->iov_base,
1488 uio->uio_iov->iov_len, &uio->uio_offset));
1489 if (bytes >= 0) {
1490 uio->uio_iov->iov_base =
1491 ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1492 uio->uio_iov->iov_len -= bytes;
1493 uio->uio_resid -= bytes;
1494 error = 0;
1495 } else {
1496 error = linux_get_error(current, -bytes);
1497 }
1498 } else
1499 error = ENXIO;
1500
1501 /* update kqfilter status, if any */
1502 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE);
1503
1504 linux_drop_fop(ldev);
1505
1506 return (error);
1507 }
1508
1509 static int
linux_file_poll(struct file * file,int events,struct ucred * active_cred,struct thread * td)1510 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
1511 struct thread *td)
1512 {
1513 struct linux_file *filp;
1514 const struct file_operations *fop;
1515 struct linux_cdev *ldev;
1516 int revents;
1517
1518 filp = (struct linux_file *)file->f_data;
1519 filp->f_flags = file->f_flag;
1520 linux_set_current(td);
1521 linux_get_fop(filp, &fop, &ldev);
1522 if (fop->poll != NULL) {
1523 revents = OPW(file, td, fop->poll(filp,
1524 LINUX_POLL_TABLE_NORMAL)) & events;
1525 } else {
1526 revents = 0;
1527 }
1528 linux_drop_fop(ldev);
1529 return (revents);
1530 }
1531
1532 static int
linux_file_close(struct file * file,struct thread * td)1533 linux_file_close(struct file *file, struct thread *td)
1534 {
1535 struct linux_file *filp;
1536 int (*release)(struct inode *, struct linux_file *);
1537 const struct file_operations *fop;
1538 struct linux_cdev *ldev;
1539 int error;
1540
1541 filp = (struct linux_file *)file->f_data;
1542
1543 KASSERT(file_count(filp) == 0,
1544 ("File refcount(%d) is not zero", file_count(filp)));
1545
1546 if (td == NULL)
1547 td = curthread;
1548
1549 error = 0;
1550 filp->f_flags = file->f_flag;
1551 linux_set_current(td);
1552 linux_poll_wait_dequeue(filp);
1553 linux_get_fop(filp, &fop, &ldev);
1554 /*
1555 * Always use the real release function, if any, to avoid
1556 * leaking device resources:
1557 */
1558 release = filp->f_op->release;
1559 if (release != NULL)
1560 error = -OPW(file, td, release(filp->f_vnode, filp));
1561 funsetown(&filp->f_sigio);
1562 if (filp->f_vnode != NULL)
1563 vrele(filp->f_vnode);
1564 linux_drop_fop(ldev);
1565 ldev = filp->f_cdev;
1566 if (ldev != NULL)
1567 linux_cdev_deref(ldev);
1568 linux_synchronize_rcu(RCU_TYPE_REGULAR);
1569 kfree(filp);
1570
1571 return (error);
1572 }
1573
1574 static int
linux_file_ioctl(struct file * fp,u_long cmd,void * data,struct ucred * cred,struct thread * td)1575 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
1576 struct thread *td)
1577 {
1578 struct linux_file *filp;
1579 const struct file_operations *fop;
1580 struct linux_cdev *ldev;
1581 struct fiodgname_arg *fgn;
1582 const char *p;
1583 int error, i;
1584
1585 error = 0;
1586 filp = (struct linux_file *)fp->f_data;
1587 filp->f_flags = fp->f_flag;
1588 linux_get_fop(filp, &fop, &ldev);
1589
1590 linux_set_current(td);
1591 switch (cmd) {
1592 case FIONBIO:
1593 break;
1594 case FIOASYNC:
1595 if (fop->fasync == NULL)
1596 break;
1597 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC));
1598 break;
1599 case FIOSETOWN:
1600 error = fsetown(*(int *)data, &filp->f_sigio);
1601 if (error == 0) {
1602 if (fop->fasync == NULL)
1603 break;
1604 error = -OPW(fp, td, fop->fasync(0, filp,
1605 fp->f_flag & FASYNC));
1606 }
1607 break;
1608 case FIOGETOWN:
1609 *(int *)data = fgetown(&filp->f_sigio);
1610 break;
1611 case FIODGNAME:
1612 #ifdef COMPAT_FREEBSD32
1613 case FIODGNAME_32:
1614 #endif
1615 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) {
1616 error = ENXIO;
1617 break;
1618 }
1619 fgn = data;
1620 p = devtoname(filp->f_cdev->cdev);
1621 i = strlen(p) + 1;
1622 if (i > fgn->len) {
1623 error = EINVAL;
1624 break;
1625 }
1626 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i);
1627 break;
1628 default:
1629 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td);
1630 break;
1631 }
1632 linux_drop_fop(ldev);
1633 return (error);
1634 }
1635
1636 static int
linux_file_mmap_sub(struct thread * td,vm_size_t objsize,vm_prot_t prot,vm_prot_t maxprot,int flags,struct file * fp,vm_ooffset_t * foff,const struct file_operations * fop,vm_object_t * objp)1637 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1638 vm_prot_t maxprot, int flags, struct file *fp,
1639 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp)
1640 {
1641 /*
1642 * Character devices do not provide private mappings
1643 * of any kind:
1644 */
1645 if ((maxprot & VM_PROT_WRITE) == 0 &&
1646 (prot & VM_PROT_WRITE) != 0)
1647 return (EACCES);
1648 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0)
1649 return (EINVAL);
1650
1651 return (linux_file_mmap_single(fp, fop, foff, objsize, objp,
1652 (int)prot, (flags & MAP_SHARED) ? true : false, td));
1653 }
1654
1655 static int
linux_file_mmap(struct file * fp,vm_map_t map,vm_offset_t * addr,vm_size_t size,vm_prot_t prot,vm_prot_t cap_maxprot,int flags,vm_ooffset_t foff,struct thread * td)1656 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
1657 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
1658 struct thread *td)
1659 {
1660 struct linux_file *filp;
1661 const struct file_operations *fop;
1662 struct linux_cdev *ldev;
1663 struct mount *mp;
1664 struct vnode *vp;
1665 vm_object_t object;
1666 vm_prot_t maxprot;
1667 int error;
1668
1669 filp = (struct linux_file *)fp->f_data;
1670
1671 vp = filp->f_vnode;
1672 if (vp == NULL)
1673 return (EOPNOTSUPP);
1674
1675 /*
1676 * Ensure that file and memory protections are
1677 * compatible.
1678 */
1679 mp = vp->v_mount;
1680 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
1681 maxprot = VM_PROT_NONE;
1682 if ((prot & VM_PROT_EXECUTE) != 0)
1683 return (EACCES);
1684 } else
1685 maxprot = VM_PROT_EXECUTE;
1686 if ((fp->f_flag & FREAD) != 0)
1687 maxprot |= VM_PROT_READ;
1688 else if ((prot & VM_PROT_READ) != 0)
1689 return (EACCES);
1690
1691 /*
1692 * If we are sharing potential changes via MAP_SHARED and we
1693 * are trying to get write permission although we opened it
1694 * without asking for it, bail out.
1695 *
1696 * Note that most character devices always share mappings.
1697 *
1698 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE
1699 * requests rather than doing it here.
1700 */
1701 if ((flags & MAP_SHARED) != 0) {
1702 if ((fp->f_flag & FWRITE) != 0)
1703 maxprot |= VM_PROT_WRITE;
1704 else if ((prot & VM_PROT_WRITE) != 0)
1705 return (EACCES);
1706 }
1707 maxprot &= cap_maxprot;
1708
1709 linux_get_fop(filp, &fop, &ldev);
1710 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp,
1711 &foff, fop, &object);
1712 if (error != 0)
1713 goto out;
1714
1715 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1716 foff, FALSE, td);
1717 if (error != 0)
1718 vm_object_deallocate(object);
1719 out:
1720 linux_drop_fop(ldev);
1721 return (error);
1722 }
1723
1724 static int
linux_file_stat(struct file * fp,struct stat * sb,struct ucred * active_cred)1725 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
1726 {
1727 struct linux_file *filp;
1728 struct vnode *vp;
1729 int error;
1730
1731 filp = (struct linux_file *)fp->f_data;
1732 if (filp->f_vnode == NULL)
1733 return (EOPNOTSUPP);
1734
1735 vp = filp->f_vnode;
1736
1737 vn_lock(vp, LK_SHARED | LK_RETRY);
1738 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED);
1739 VOP_UNLOCK(vp);
1740
1741 return (error);
1742 }
1743
1744 static int
linux_file_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp)1745 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1746 struct filedesc *fdp)
1747 {
1748 struct linux_file *filp;
1749 struct vnode *vp;
1750 int error;
1751
1752 filp = fp->f_data;
1753 vp = filp->f_vnode;
1754 if (vp == NULL) {
1755 error = 0;
1756 kif->kf_type = KF_TYPE_DEV;
1757 } else {
1758 vref(vp);
1759 FILEDESC_SUNLOCK(fdp);
1760 error = vn_fill_kinfo_vnode(vp, kif);
1761 vrele(vp);
1762 kif->kf_type = KF_TYPE_VNODE;
1763 FILEDESC_SLOCK(fdp);
1764 }
1765 return (error);
1766 }
1767
1768 unsigned int
linux_iminor(struct inode * inode)1769 linux_iminor(struct inode *inode)
1770 {
1771 struct linux_cdev *ldev;
1772
1773 if (inode == NULL || inode->v_rdev == NULL ||
1774 inode->v_rdev->si_devsw != &linuxcdevsw)
1775 return (-1U);
1776 ldev = inode->v_rdev->si_drv1;
1777 if (ldev == NULL)
1778 return (-1U);
1779
1780 return (minor(ldev->dev));
1781 }
1782
1783 static int
linux_file_kcmp(struct file * fp1,struct file * fp2,struct thread * td)1784 linux_file_kcmp(struct file *fp1, struct file *fp2, struct thread *td)
1785 {
1786 struct linux_file *filp1, *filp2;
1787
1788 if (fp2->f_type != DTYPE_DEV)
1789 return (3);
1790
1791 filp1 = fp1->f_data;
1792 filp2 = fp2->f_data;
1793 return (kcmp_cmp((uintptr_t)filp1->f_cdev, (uintptr_t)filp2->f_cdev));
1794 }
1795
1796 const struct fileops linuxfileops = {
1797 .fo_read = linux_file_read,
1798 .fo_write = linux_file_write,
1799 .fo_truncate = invfo_truncate,
1800 .fo_kqfilter = linux_file_kqfilter,
1801 .fo_stat = linux_file_stat,
1802 .fo_fill_kinfo = linux_file_fill_kinfo,
1803 .fo_poll = linux_file_poll,
1804 .fo_close = linux_file_close,
1805 .fo_ioctl = linux_file_ioctl,
1806 .fo_mmap = linux_file_mmap,
1807 .fo_chmod = invfo_chmod,
1808 .fo_chown = invfo_chown,
1809 .fo_sendfile = invfo_sendfile,
1810 .fo_cmp = linux_file_kcmp,
1811 .fo_flags = DFLAG_PASSABLE,
1812 };
1813
1814 /*
1815 * Hash of vmmap addresses. This is infrequently accessed and does not
1816 * need to be particularly large. This is done because we must store the
1817 * caller's idea of the map size to properly unmap.
1818 */
1819 struct vmmap {
1820 LIST_ENTRY(vmmap) vm_next;
1821 void *vm_addr;
1822 unsigned long vm_size;
1823 };
1824
1825 struct vmmaphd {
1826 struct vmmap *lh_first;
1827 };
1828 #define VMMAP_HASH_SIZE 64
1829 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
1830 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
1831 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
1832 static struct mtx vmmaplock;
1833
1834 static void
vmmap_add(void * addr,unsigned long size)1835 vmmap_add(void *addr, unsigned long size)
1836 {
1837 struct vmmap *vmmap;
1838
1839 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
1840 mtx_lock(&vmmaplock);
1841 vmmap->vm_size = size;
1842 vmmap->vm_addr = addr;
1843 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
1844 mtx_unlock(&vmmaplock);
1845 }
1846
1847 static struct vmmap *
vmmap_remove(void * addr)1848 vmmap_remove(void *addr)
1849 {
1850 struct vmmap *vmmap;
1851
1852 mtx_lock(&vmmaplock);
1853 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
1854 if (vmmap->vm_addr == addr)
1855 break;
1856 if (vmmap)
1857 LIST_REMOVE(vmmap, vm_next);
1858 mtx_unlock(&vmmaplock);
1859
1860 return (vmmap);
1861 }
1862
1863 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
1864 void *
_ioremap_attr(vm_paddr_t phys_addr,unsigned long size,int attr)1865 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
1866 {
1867 void *addr;
1868
1869 addr = pmap_mapdev_attr(phys_addr, size, attr);
1870 if (addr == NULL)
1871 return (NULL);
1872 vmmap_add(addr, size);
1873
1874 return (addr);
1875 }
1876 #endif
1877
1878 void
iounmap(void * addr)1879 iounmap(void *addr)
1880 {
1881 struct vmmap *vmmap;
1882
1883 vmmap = vmmap_remove(addr);
1884 if (vmmap == NULL)
1885 return;
1886 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
1887 pmap_unmapdev(addr, vmmap->vm_size);
1888 #endif
1889 kfree(vmmap);
1890 }
1891
1892 void *
vmap(struct page ** pages,unsigned int count,unsigned long flags,int prot)1893 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
1894 {
1895 vm_offset_t off;
1896 size_t size;
1897
1898 size = count * PAGE_SIZE;
1899 off = kva_alloc(size);
1900 if (off == 0)
1901 return (NULL);
1902 vmmap_add((void *)off, size);
1903 pmap_qenter(off, pages, count);
1904
1905 return ((void *)off);
1906 }
1907
1908 void
vunmap(void * addr)1909 vunmap(void *addr)
1910 {
1911 struct vmmap *vmmap;
1912
1913 vmmap = vmmap_remove(addr);
1914 if (vmmap == NULL)
1915 return;
1916 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
1917 kva_free((vm_offset_t)addr, vmmap->vm_size);
1918 kfree(vmmap);
1919 }
1920
1921 static char *
devm_kvasprintf(struct device * dev,gfp_t gfp,const char * fmt,va_list ap)1922 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap)
1923 {
1924 unsigned int len;
1925 char *p;
1926 va_list aq;
1927
1928 va_copy(aq, ap);
1929 len = vsnprintf(NULL, 0, fmt, aq);
1930 va_end(aq);
1931
1932 if (dev != NULL)
1933 p = devm_kmalloc(dev, len + 1, gfp);
1934 else
1935 p = kmalloc(len + 1, gfp);
1936 if (p != NULL)
1937 vsnprintf(p, len + 1, fmt, ap);
1938
1939 return (p);
1940 }
1941
1942 char *
kvasprintf(gfp_t gfp,const char * fmt,va_list ap)1943 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
1944 {
1945
1946 return (devm_kvasprintf(NULL, gfp, fmt, ap));
1947 }
1948
1949 char *
lkpi_devm_kasprintf(struct device * dev,gfp_t gfp,const char * fmt,...)1950 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1951 {
1952 va_list ap;
1953 char *p;
1954
1955 va_start(ap, fmt);
1956 p = devm_kvasprintf(dev, gfp, fmt, ap);
1957 va_end(ap);
1958
1959 return (p);
1960 }
1961
1962 char *
kasprintf(gfp_t gfp,const char * fmt,...)1963 kasprintf(gfp_t gfp, const char *fmt, ...)
1964 {
1965 va_list ap;
1966 char *p;
1967
1968 va_start(ap, fmt);
1969 p = kvasprintf(gfp, fmt, ap);
1970 va_end(ap);
1971
1972 return (p);
1973 }
1974
1975 int
__lkpi_hexdump_printf(void * arg1 __unused,const char * fmt,...)1976 __lkpi_hexdump_printf(void *arg1 __unused, const char *fmt, ...)
1977 {
1978 va_list ap;
1979 int result;
1980
1981 va_start(ap, fmt);
1982 result = vprintf(fmt, ap);
1983 va_end(ap);
1984 return (result);
1985 }
1986
1987 int
__lkpi_hexdump_sbuf_printf(void * arg1,const char * fmt,...)1988 __lkpi_hexdump_sbuf_printf(void *arg1, const char *fmt, ...)
1989 {
1990 va_list ap;
1991 int result;
1992
1993 va_start(ap, fmt);
1994 result = sbuf_vprintf(arg1, fmt, ap);
1995 va_end(ap);
1996 return (result);
1997 }
1998
1999 void
lkpi_hex_dump(int (* _fpf)(void *,const char *,...),void * arg1,const char * level,const char * prefix_str,const int prefix_type,const int rowsize,const int groupsize,const void * buf,size_t len,const bool ascii)2000 lkpi_hex_dump(int(*_fpf)(void *, const char *, ...), void *arg1,
2001 const char *level, const char *prefix_str,
2002 const int prefix_type, const int rowsize, const int groupsize,
2003 const void *buf, size_t len, const bool ascii)
2004 {
2005 typedef const struct { long long value; } __packed *print_64p_t;
2006 typedef const struct { uint32_t value; } __packed *print_32p_t;
2007 typedef const struct { uint16_t value; } __packed *print_16p_t;
2008 const void *buf_old = buf;
2009 int row;
2010
2011 while (len > 0) {
2012 if (level != NULL)
2013 _fpf(arg1, "%s", level);
2014 if (prefix_str != NULL)
2015 _fpf(arg1, "%s ", prefix_str);
2016
2017 switch (prefix_type) {
2018 case DUMP_PREFIX_ADDRESS:
2019 _fpf(arg1, "[%p] ", buf);
2020 break;
2021 case DUMP_PREFIX_OFFSET:
2022 _fpf(arg1, "[%#tx] ", ((const char *)buf -
2023 (const char *)buf_old));
2024 break;
2025 default:
2026 break;
2027 }
2028 for (row = 0; row != rowsize; row++) {
2029 if (groupsize == 8 && len > 7) {
2030 _fpf(arg1, "%016llx ", ((print_64p_t)buf)->value);
2031 buf = (const uint8_t *)buf + 8;
2032 len -= 8;
2033 } else if (groupsize == 4 && len > 3) {
2034 _fpf(arg1, "%08x ", ((print_32p_t)buf)->value);
2035 buf = (const uint8_t *)buf + 4;
2036 len -= 4;
2037 } else if (groupsize == 2 && len > 1) {
2038 _fpf(arg1, "%04x ", ((print_16p_t)buf)->value);
2039 buf = (const uint8_t *)buf + 2;
2040 len -= 2;
2041 } else if (len > 0) {
2042 _fpf(arg1, "%02x ", *(const uint8_t *)buf);
2043 buf = (const uint8_t *)buf + 1;
2044 len--;
2045 } else {
2046 break;
2047 }
2048 }
2049 _fpf(arg1, "\n");
2050 }
2051 }
2052
2053 static void
linux_timer_callback_wrapper(void * context)2054 linux_timer_callback_wrapper(void *context)
2055 {
2056 struct timer_list *timer;
2057
2058 timer = context;
2059
2060 /* the timer is about to be shutdown permanently */
2061 if (timer->function == NULL)
2062 return;
2063
2064 if (linux_set_current_flags(curthread, M_NOWAIT)) {
2065 /* try again later */
2066 callout_reset(&timer->callout, 1,
2067 &linux_timer_callback_wrapper, timer);
2068 return;
2069 }
2070
2071 timer->function(timer->data);
2072 }
2073
2074 int
mod_timer(struct timer_list * timer,int expires)2075 mod_timer(struct timer_list *timer, int expires)
2076 {
2077 int ret;
2078
2079 timer->expires = expires;
2080 ret = callout_reset(&timer->callout,
2081 linux_timer_jiffies_until(expires),
2082 &linux_timer_callback_wrapper, timer);
2083
2084 MPASS(ret == 0 || ret == 1);
2085
2086 return (ret == 1);
2087 }
2088
2089 void
add_timer(struct timer_list * timer)2090 add_timer(struct timer_list *timer)
2091 {
2092
2093 callout_reset(&timer->callout,
2094 linux_timer_jiffies_until(timer->expires),
2095 &linux_timer_callback_wrapper, timer);
2096 }
2097
2098 void
add_timer_on(struct timer_list * timer,int cpu)2099 add_timer_on(struct timer_list *timer, int cpu)
2100 {
2101
2102 callout_reset_on(&timer->callout,
2103 linux_timer_jiffies_until(timer->expires),
2104 &linux_timer_callback_wrapper, timer, cpu);
2105 }
2106
2107 int
del_timer(struct timer_list * timer)2108 del_timer(struct timer_list *timer)
2109 {
2110
2111 if (callout_stop(&(timer)->callout) == -1)
2112 return (0);
2113 return (1);
2114 }
2115
2116 int
del_timer_sync(struct timer_list * timer)2117 del_timer_sync(struct timer_list *timer)
2118 {
2119
2120 if (callout_drain(&(timer)->callout) == -1)
2121 return (0);
2122 return (1);
2123 }
2124
2125 int
timer_delete_sync(struct timer_list * timer)2126 timer_delete_sync(struct timer_list *timer)
2127 {
2128
2129 return (del_timer_sync(timer));
2130 }
2131
2132 int
timer_shutdown_sync(struct timer_list * timer)2133 timer_shutdown_sync(struct timer_list *timer)
2134 {
2135
2136 timer->function = NULL;
2137 return (del_timer_sync(timer));
2138 }
2139
2140 /* greatest common divisor, Euclid equation */
2141 static uint64_t
lkpi_gcd_64(uint64_t a,uint64_t b)2142 lkpi_gcd_64(uint64_t a, uint64_t b)
2143 {
2144 uint64_t an;
2145 uint64_t bn;
2146
2147 while (b != 0) {
2148 an = b;
2149 bn = a % b;
2150 a = an;
2151 b = bn;
2152 }
2153 return (a);
2154 }
2155
2156 uint64_t lkpi_nsec2hz_rem;
2157 uint64_t lkpi_nsec2hz_div = 1000000000ULL;
2158 uint64_t lkpi_nsec2hz_max;
2159
2160 uint64_t lkpi_usec2hz_rem;
2161 uint64_t lkpi_usec2hz_div = 1000000ULL;
2162 uint64_t lkpi_usec2hz_max;
2163
2164 uint64_t lkpi_msec2hz_rem;
2165 uint64_t lkpi_msec2hz_div = 1000ULL;
2166 uint64_t lkpi_msec2hz_max;
2167
2168 static void
linux_timer_init(void * arg)2169 linux_timer_init(void *arg)
2170 {
2171 uint64_t gcd;
2172
2173 /*
2174 * Compute an internal HZ value which can divide 2**32 to
2175 * avoid timer rounding problems when the tick value wraps
2176 * around 2**32:
2177 */
2178 linux_timer_hz_mask = 1;
2179 while (linux_timer_hz_mask < (unsigned long)hz)
2180 linux_timer_hz_mask *= 2;
2181 linux_timer_hz_mask--;
2182
2183 /* compute some internal constants */
2184
2185 lkpi_nsec2hz_rem = hz;
2186 lkpi_usec2hz_rem = hz;
2187 lkpi_msec2hz_rem = hz;
2188
2189 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div);
2190 lkpi_nsec2hz_rem /= gcd;
2191 lkpi_nsec2hz_div /= gcd;
2192 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem;
2193
2194 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div);
2195 lkpi_usec2hz_rem /= gcd;
2196 lkpi_usec2hz_div /= gcd;
2197 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem;
2198
2199 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div);
2200 lkpi_msec2hz_rem /= gcd;
2201 lkpi_msec2hz_div /= gcd;
2202 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem;
2203 }
2204 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
2205
2206 void
linux_complete_common(struct completion * c,int all)2207 linux_complete_common(struct completion *c, int all)
2208 {
2209 sleepq_lock(c);
2210 if (all) {
2211 c->done = UINT_MAX;
2212 sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
2213 } else {
2214 if (c->done != UINT_MAX)
2215 c->done++;
2216 sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
2217 }
2218 sleepq_release(c);
2219 }
2220
2221 /*
2222 * Indefinite wait for done != 0 with or without signals.
2223 */
2224 int
linux_wait_for_common(struct completion * c,int flags)2225 linux_wait_for_common(struct completion *c, int flags)
2226 {
2227 struct task_struct *task;
2228 int error;
2229
2230 if (SCHEDULER_STOPPED())
2231 return (0);
2232
2233 task = current;
2234
2235 if (flags != 0)
2236 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
2237 else
2238 flags = SLEEPQ_SLEEP;
2239 error = 0;
2240 for (;;) {
2241 sleepq_lock(c);
2242 if (c->done)
2243 break;
2244 sleepq_add(c, NULL, "completion", flags, 0);
2245 if (flags & SLEEPQ_INTERRUPTIBLE) {
2246 DROP_GIANT();
2247 error = -sleepq_wait_sig(c, 0);
2248 PICKUP_GIANT();
2249 if (error != 0) {
2250 linux_schedule_save_interrupt_value(task, error);
2251 error = -ERESTARTSYS;
2252 goto intr;
2253 }
2254 } else {
2255 DROP_GIANT();
2256 sleepq_wait(c, 0);
2257 PICKUP_GIANT();
2258 }
2259 }
2260 if (c->done != UINT_MAX)
2261 c->done--;
2262 sleepq_release(c);
2263
2264 intr:
2265 return (error);
2266 }
2267
2268 /*
2269 * Time limited wait for done != 0 with or without signals.
2270 */
2271 int
linux_wait_for_timeout_common(struct completion * c,int timeout,int flags)2272 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags)
2273 {
2274 struct task_struct *task;
2275 int end = jiffies + timeout;
2276 int error;
2277
2278 if (SCHEDULER_STOPPED())
2279 return (0);
2280
2281 task = current;
2282
2283 if (flags != 0)
2284 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
2285 else
2286 flags = SLEEPQ_SLEEP;
2287
2288 for (;;) {
2289 sleepq_lock(c);
2290 if (c->done)
2291 break;
2292 sleepq_add(c, NULL, "completion", flags, 0);
2293 sleepq_set_timeout(c, linux_timer_jiffies_until(end));
2294
2295 DROP_GIANT();
2296 if (flags & SLEEPQ_INTERRUPTIBLE)
2297 error = -sleepq_timedwait_sig(c, 0);
2298 else
2299 error = -sleepq_timedwait(c, 0);
2300 PICKUP_GIANT();
2301
2302 if (error != 0) {
2303 /* check for timeout */
2304 if (error == -EWOULDBLOCK) {
2305 error = 0; /* timeout */
2306 } else {
2307 /* signal happened */
2308 linux_schedule_save_interrupt_value(task, error);
2309 error = -ERESTARTSYS;
2310 }
2311 goto done;
2312 }
2313 }
2314 if (c->done != UINT_MAX)
2315 c->done--;
2316 sleepq_release(c);
2317
2318 /* return how many jiffies are left */
2319 error = linux_timer_jiffies_until(end);
2320 done:
2321 return (error);
2322 }
2323
2324 int
linux_try_wait_for_completion(struct completion * c)2325 linux_try_wait_for_completion(struct completion *c)
2326 {
2327 int isdone;
2328
2329 sleepq_lock(c);
2330 isdone = (c->done != 0);
2331 if (c->done != 0 && c->done != UINT_MAX)
2332 c->done--;
2333 sleepq_release(c);
2334 return (isdone);
2335 }
2336
2337 int
linux_completion_done(struct completion * c)2338 linux_completion_done(struct completion *c)
2339 {
2340 int isdone;
2341
2342 sleepq_lock(c);
2343 isdone = (c->done != 0);
2344 sleepq_release(c);
2345 return (isdone);
2346 }
2347
2348 static void
linux_cdev_deref(struct linux_cdev * ldev)2349 linux_cdev_deref(struct linux_cdev *ldev)
2350 {
2351 if (refcount_release(&ldev->refs) &&
2352 ldev->kobj.ktype == &linux_cdev_ktype)
2353 kfree(ldev);
2354 }
2355
2356 static void
linux_cdev_release(struct kobject * kobj)2357 linux_cdev_release(struct kobject *kobj)
2358 {
2359 struct linux_cdev *cdev;
2360 struct kobject *parent;
2361
2362 cdev = container_of(kobj, struct linux_cdev, kobj);
2363 parent = kobj->parent;
2364 linux_destroy_dev(cdev);
2365 linux_cdev_deref(cdev);
2366 kobject_put(parent);
2367 }
2368
2369 static void
linux_cdev_static_release(struct kobject * kobj)2370 linux_cdev_static_release(struct kobject *kobj)
2371 {
2372 struct cdev *cdev;
2373 struct linux_cdev *ldev;
2374
2375 ldev = container_of(kobj, struct linux_cdev, kobj);
2376 cdev = ldev->cdev;
2377 if (cdev != NULL) {
2378 destroy_dev(cdev);
2379 ldev->cdev = NULL;
2380 }
2381 kobject_put(kobj->parent);
2382 }
2383
2384 int
linux_cdev_device_add(struct linux_cdev * ldev,struct device * dev)2385 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev)
2386 {
2387 int ret;
2388
2389 if (dev->devt != 0) {
2390 /* Set parent kernel object. */
2391 ldev->kobj.parent = &dev->kobj;
2392
2393 /*
2394 * Unlike Linux we require the kobject of the
2395 * character device structure to have a valid name
2396 * before calling this function:
2397 */
2398 if (ldev->kobj.name == NULL)
2399 return (-EINVAL);
2400
2401 ret = cdev_add(ldev, dev->devt, 1);
2402 if (ret)
2403 return (ret);
2404 }
2405 ret = device_add(dev);
2406 if (ret != 0 && dev->devt != 0)
2407 cdev_del(ldev);
2408 return (ret);
2409 }
2410
2411 void
linux_cdev_device_del(struct linux_cdev * ldev,struct device * dev)2412 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev)
2413 {
2414 device_del(dev);
2415
2416 if (dev->devt != 0)
2417 cdev_del(ldev);
2418 }
2419
2420 static void
linux_destroy_dev(struct linux_cdev * ldev)2421 linux_destroy_dev(struct linux_cdev *ldev)
2422 {
2423
2424 if (ldev->cdev == NULL)
2425 return;
2426
2427 MPASS((ldev->siref & LDEV_SI_DTR) == 0);
2428 MPASS(ldev->kobj.ktype == &linux_cdev_ktype);
2429
2430 atomic_set_int(&ldev->siref, LDEV_SI_DTR);
2431 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0)
2432 pause("ldevdtr", hz / 4);
2433
2434 destroy_dev(ldev->cdev);
2435 ldev->cdev = NULL;
2436 }
2437
2438 const struct kobj_type linux_cdev_ktype = {
2439 .release = linux_cdev_release,
2440 };
2441
2442 const struct kobj_type linux_cdev_static_ktype = {
2443 .release = linux_cdev_static_release,
2444 };
2445
2446 static void
linux_handle_ifnet_link_event(void * arg,struct ifnet * ifp,int linkstate)2447 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate)
2448 {
2449 struct notifier_block *nb;
2450 struct netdev_notifier_info ni;
2451
2452 nb = arg;
2453 ni.ifp = ifp;
2454 ni.dev = (struct net_device *)ifp;
2455 if (linkstate == LINK_STATE_UP)
2456 nb->notifier_call(nb, NETDEV_UP, &ni);
2457 else
2458 nb->notifier_call(nb, NETDEV_DOWN, &ni);
2459 }
2460
2461 static void
linux_handle_ifnet_arrival_event(void * arg,struct ifnet * ifp)2462 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp)
2463 {
2464 struct notifier_block *nb;
2465 struct netdev_notifier_info ni;
2466
2467 nb = arg;
2468 ni.ifp = ifp;
2469 ni.dev = (struct net_device *)ifp;
2470 nb->notifier_call(nb, NETDEV_REGISTER, &ni);
2471 }
2472
2473 static void
linux_handle_ifnet_departure_event(void * arg,struct ifnet * ifp)2474 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp)
2475 {
2476 struct notifier_block *nb;
2477 struct netdev_notifier_info ni;
2478
2479 nb = arg;
2480 ni.ifp = ifp;
2481 ni.dev = (struct net_device *)ifp;
2482 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni);
2483 }
2484
2485 static void
linux_handle_iflladdr_event(void * arg,struct ifnet * ifp)2486 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp)
2487 {
2488 struct notifier_block *nb;
2489 struct netdev_notifier_info ni;
2490
2491 nb = arg;
2492 ni.ifp = ifp;
2493 ni.dev = (struct net_device *)ifp;
2494 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni);
2495 }
2496
2497 static void
linux_handle_ifaddr_event(void * arg,struct ifnet * ifp)2498 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp)
2499 {
2500 struct notifier_block *nb;
2501 struct netdev_notifier_info ni;
2502
2503 nb = arg;
2504 ni.ifp = ifp;
2505 ni.dev = (struct net_device *)ifp;
2506 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni);
2507 }
2508
2509 int
register_netdevice_notifier(struct notifier_block * nb)2510 register_netdevice_notifier(struct notifier_block *nb)
2511 {
2512
2513 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER(
2514 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0);
2515 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER(
2516 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0);
2517 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER(
2518 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0);
2519 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER(
2520 iflladdr_event, linux_handle_iflladdr_event, nb, 0);
2521
2522 return (0);
2523 }
2524
2525 int
register_inetaddr_notifier(struct notifier_block * nb)2526 register_inetaddr_notifier(struct notifier_block *nb)
2527 {
2528
2529 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER(
2530 ifaddr_event, linux_handle_ifaddr_event, nb, 0);
2531 return (0);
2532 }
2533
2534 int
unregister_netdevice_notifier(struct notifier_block * nb)2535 unregister_netdevice_notifier(struct notifier_block *nb)
2536 {
2537
2538 EVENTHANDLER_DEREGISTER(ifnet_link_event,
2539 nb->tags[NETDEV_UP]);
2540 EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
2541 nb->tags[NETDEV_REGISTER]);
2542 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
2543 nb->tags[NETDEV_UNREGISTER]);
2544 EVENTHANDLER_DEREGISTER(iflladdr_event,
2545 nb->tags[NETDEV_CHANGEADDR]);
2546
2547 return (0);
2548 }
2549
2550 int
unregister_inetaddr_notifier(struct notifier_block * nb)2551 unregister_inetaddr_notifier(struct notifier_block *nb)
2552 {
2553
2554 EVENTHANDLER_DEREGISTER(ifaddr_event,
2555 nb->tags[NETDEV_CHANGEIFADDR]);
2556
2557 return (0);
2558 }
2559
2560 struct list_sort_thunk {
2561 int (*cmp)(void *, struct list_head *, struct list_head *);
2562 void *priv;
2563 };
2564
2565 static inline int
linux_le_cmp(const void * d1,const void * d2,void * priv)2566 linux_le_cmp(const void *d1, const void *d2, void *priv)
2567 {
2568 struct list_head *le1, *le2;
2569 struct list_sort_thunk *thunk;
2570
2571 thunk = priv;
2572 le1 = *(__DECONST(struct list_head **, d1));
2573 le2 = *(__DECONST(struct list_head **, d2));
2574 return ((thunk->cmp)(thunk->priv, le1, le2));
2575 }
2576
2577 void
list_sort(void * priv,struct list_head * head,int (* cmp)(void * priv,struct list_head * a,struct list_head * b))2578 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
2579 struct list_head *a, struct list_head *b))
2580 {
2581 struct list_sort_thunk thunk;
2582 struct list_head **ar, *le;
2583 size_t count, i;
2584
2585 count = 0;
2586 list_for_each(le, head)
2587 count++;
2588 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK);
2589 i = 0;
2590 list_for_each(le, head)
2591 ar[i++] = le;
2592 thunk.cmp = cmp;
2593 thunk.priv = priv;
2594 qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk);
2595 INIT_LIST_HEAD(head);
2596 for (i = 0; i < count; i++)
2597 list_add_tail(ar[i], head);
2598 free(ar, M_KMALLOC);
2599 }
2600
2601 #if defined(__i386__) || defined(__amd64__)
2602 int
linux_wbinvd_on_all_cpus(void)2603 linux_wbinvd_on_all_cpus(void)
2604 {
2605
2606 pmap_invalidate_cache();
2607 return (0);
2608 }
2609 #endif
2610
2611 int
linux_on_each_cpu(void callback (void *),void * data)2612 linux_on_each_cpu(void callback(void *), void *data)
2613 {
2614
2615 smp_rendezvous(smp_no_rendezvous_barrier, callback,
2616 smp_no_rendezvous_barrier, data);
2617 return (0);
2618 }
2619
2620 int
linux_in_atomic(void)2621 linux_in_atomic(void)
2622 {
2623
2624 return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
2625 }
2626
2627 struct linux_cdev *
linux_find_cdev(const char * name,unsigned major,unsigned minor)2628 linux_find_cdev(const char *name, unsigned major, unsigned minor)
2629 {
2630 dev_t dev = MKDEV(major, minor);
2631 struct cdev *cdev;
2632
2633 dev_lock();
2634 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) {
2635 struct linux_cdev *ldev = cdev->si_drv1;
2636 if (ldev->dev == dev &&
2637 strcmp(kobject_name(&ldev->kobj), name) == 0) {
2638 break;
2639 }
2640 }
2641 dev_unlock();
2642
2643 return (cdev != NULL ? cdev->si_drv1 : NULL);
2644 }
2645
2646 int
__register_chrdev(unsigned int major,unsigned int baseminor,unsigned int count,const char * name,const struct file_operations * fops)2647 __register_chrdev(unsigned int major, unsigned int baseminor,
2648 unsigned int count, const char *name,
2649 const struct file_operations *fops)
2650 {
2651 struct linux_cdev *cdev;
2652 int ret = 0;
2653 int i;
2654
2655 for (i = baseminor; i < baseminor + count; i++) {
2656 cdev = cdev_alloc();
2657 cdev->ops = fops;
2658 kobject_set_name(&cdev->kobj, name);
2659
2660 ret = cdev_add(cdev, makedev(major, i), 1);
2661 if (ret != 0)
2662 break;
2663 }
2664 return (ret);
2665 }
2666
2667 int
__register_chrdev_p(unsigned int major,unsigned int baseminor,unsigned int count,const char * name,const struct file_operations * fops,uid_t uid,gid_t gid,int mode)2668 __register_chrdev_p(unsigned int major, unsigned int baseminor,
2669 unsigned int count, const char *name,
2670 const struct file_operations *fops, uid_t uid,
2671 gid_t gid, int mode)
2672 {
2673 struct linux_cdev *cdev;
2674 int ret = 0;
2675 int i;
2676
2677 for (i = baseminor; i < baseminor + count; i++) {
2678 cdev = cdev_alloc();
2679 cdev->ops = fops;
2680 kobject_set_name(&cdev->kobj, name);
2681
2682 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode);
2683 if (ret != 0)
2684 break;
2685 }
2686 return (ret);
2687 }
2688
2689 void
__unregister_chrdev(unsigned int major,unsigned int baseminor,unsigned int count,const char * name)2690 __unregister_chrdev(unsigned int major, unsigned int baseminor,
2691 unsigned int count, const char *name)
2692 {
2693 struct linux_cdev *cdevp;
2694 int i;
2695
2696 for (i = baseminor; i < baseminor + count; i++) {
2697 cdevp = linux_find_cdev(name, major, i);
2698 if (cdevp != NULL)
2699 cdev_del(cdevp);
2700 }
2701 }
2702
2703 void
linux_dump_stack(void)2704 linux_dump_stack(void)
2705 {
2706 #ifdef STACK
2707 struct stack st;
2708
2709 stack_save(&st);
2710 stack_print(&st);
2711 #endif
2712 }
2713
2714 int
linuxkpi_net_ratelimit(void)2715 linuxkpi_net_ratelimit(void)
2716 {
2717
2718 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps,
2719 lkpi_net_maxpps));
2720 }
2721
2722 struct io_mapping *
io_mapping_create_wc(resource_size_t base,unsigned long size)2723 io_mapping_create_wc(resource_size_t base, unsigned long size)
2724 {
2725 struct io_mapping *mapping;
2726
2727 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2728 if (mapping == NULL)
2729 return (NULL);
2730 return (io_mapping_init_wc(mapping, base, size));
2731 }
2732
2733 /* We likely want a linuxkpi_device.c at some point. */
2734 bool
device_can_wakeup(struct device * dev)2735 device_can_wakeup(struct device *dev)
2736 {
2737
2738 if (dev == NULL)
2739 return (false);
2740 /*
2741 * XXX-BZ iwlwifi queries it as part of enabling WoWLAN.
2742 * Normally this would be based on a bool in dev->power.XXX.
2743 * Check such as PCI PCIM_PCAP_*PME. We have no way to enable this yet.
2744 * We may get away by directly calling into bsddev for as long as
2745 * we can assume PCI only avoiding changing struct device breaking KBI.
2746 */
2747 pr_debug("%s:%d: not enabled; see comment.\n", __func__, __LINE__);
2748 return (false);
2749 }
2750
2751 static void
devm_device_group_remove(struct device * dev,void * p)2752 devm_device_group_remove(struct device *dev, void *p)
2753 {
2754 const struct attribute_group **dr = p;
2755 const struct attribute_group *group = *dr;
2756
2757 sysfs_remove_group(&dev->kobj, group);
2758 }
2759
2760 int
lkpi_devm_device_add_group(struct device * dev,const struct attribute_group * group)2761 lkpi_devm_device_add_group(struct device *dev,
2762 const struct attribute_group *group)
2763 {
2764 const struct attribute_group **dr;
2765 int ret;
2766
2767 dr = devres_alloc(devm_device_group_remove, sizeof(*dr), GFP_KERNEL);
2768 if (dr == NULL)
2769 return (-ENOMEM);
2770
2771 ret = sysfs_create_group(&dev->kobj, group);
2772 if (ret == 0) {
2773 *dr = group;
2774 devres_add(dev, dr);
2775 } else
2776 devres_free(dr);
2777
2778 return (ret);
2779 }
2780
2781 #if defined(__i386__) || defined(__amd64__)
2782 bool linux_cpu_has_clflush;
2783 struct cpuinfo_x86 boot_cpu_data;
2784 struct cpuinfo_x86 *__cpu_data;
2785 #endif
2786
2787 cpumask_t *
lkpi_get_static_single_cpu_mask(int cpuid)2788 lkpi_get_static_single_cpu_mask(int cpuid)
2789 {
2790
2791 KASSERT((cpuid >= 0 && cpuid <= mp_maxid), ("%s: invalid cpuid %d\n",
2792 __func__, cpuid));
2793 KASSERT(!CPU_ABSENT(cpuid), ("%s: cpu with cpuid %d is absent\n",
2794 __func__, cpuid));
2795
2796 return (static_single_cpu_mask[cpuid]);
2797 }
2798
2799 bool
lkpi_xen_initial_domain(void)2800 lkpi_xen_initial_domain(void)
2801 {
2802 #ifdef XENHVM
2803 return (xen_initial_domain());
2804 #else
2805 return (false);
2806 #endif
2807 }
2808
2809 bool
lkpi_xen_pv_domain(void)2810 lkpi_xen_pv_domain(void)
2811 {
2812 #ifdef XENHVM
2813 return (xen_pv_domain());
2814 #else
2815 return (false);
2816 #endif
2817 }
2818
2819 static void
linux_compat_init(void * arg)2820 linux_compat_init(void *arg)
2821 {
2822 struct sysctl_oid *rootoid;
2823 int i;
2824
2825 #if defined(__i386__) || defined(__amd64__)
2826 static const uint32_t x86_vendors[X86_VENDOR_NUM] = {
2827 [X86_VENDOR_INTEL] = CPU_VENDOR_INTEL,
2828 [X86_VENDOR_CYRIX] = CPU_VENDOR_CYRIX,
2829 [X86_VENDOR_AMD] = CPU_VENDOR_AMD,
2830 [X86_VENDOR_UMC] = CPU_VENDOR_UMC,
2831 [X86_VENDOR_CENTAUR] = CPU_VENDOR_CENTAUR,
2832 [X86_VENDOR_TRANSMETA] = CPU_VENDOR_TRANSMETA,
2833 [X86_VENDOR_NSC] = CPU_VENDOR_NSC,
2834 [X86_VENDOR_HYGON] = CPU_VENDOR_HYGON,
2835 };
2836 uint8_t x86_vendor = X86_VENDOR_UNKNOWN;
2837
2838 for (i = 0; i < X86_VENDOR_NUM; i++) {
2839 if (cpu_vendor_id != 0 && cpu_vendor_id == x86_vendors[i]) {
2840 x86_vendor = i;
2841 break;
2842 }
2843 }
2844 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH);
2845 boot_cpu_data.x86_clflush_size = cpu_clflush_line_size;
2846 boot_cpu_data.x86_max_cores = mp_ncpus;
2847 boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id);
2848 boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id);
2849 boot_cpu_data.x86_vendor = x86_vendor;
2850
2851 __cpu_data = kmalloc_array(mp_maxid + 1,
2852 sizeof(*__cpu_data), M_WAITOK | M_ZERO);
2853 CPU_FOREACH(i) {
2854 __cpu_data[i].x86_clflush_size = cpu_clflush_line_size;
2855 __cpu_data[i].x86_max_cores = mp_ncpus;
2856 __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id);
2857 __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id);
2858 __cpu_data[i].x86_vendor = x86_vendor;
2859 }
2860 #endif
2861 rw_init(&linux_vma_lock, "lkpi-vma-lock");
2862
2863 rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
2864 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
2865 kobject_init(&linux_class_root, &linux_class_ktype);
2866 kobject_set_name(&linux_class_root, "class");
2867 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
2868 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
2869 kobject_init(&linux_root_device.kobj, &linux_dev_ktype);
2870 kobject_set_name(&linux_root_device.kobj, "device");
2871 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL,
2872 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device",
2873 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device");
2874 linux_root_device.bsddev = root_bus;
2875 linux_class_misc.name = "misc";
2876 class_register(&linux_class_misc);
2877 INIT_LIST_HEAD(&pci_drivers);
2878 INIT_LIST_HEAD(&pci_devices);
2879 spin_lock_init(&pci_lock);
2880 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
2881 for (i = 0; i < VMMAP_HASH_SIZE; i++)
2882 LIST_INIT(&vmmaphead[i]);
2883 init_waitqueue_head(&linux_bit_waitq);
2884 init_waitqueue_head(&linux_var_waitq);
2885
2886 CPU_COPY(&all_cpus, &cpu_online_mask);
2887 /*
2888 * Generate a single-CPU cpumask_t for each CPU (possibly) in the system.
2889 * CPUs are indexed from 0..(mp_maxid). The entry for cpuid 0 will only
2890 * have itself in the cpumask, cupid 1 only itself on entry 1, and so on.
2891 * This is used by cpumask_of() (and possibly others in the future) for,
2892 * e.g., drivers to pass hints to irq_set_affinity_hint().
2893 */
2894 static_single_cpu_mask = kmalloc_array(mp_maxid + 1,
2895 sizeof(static_single_cpu_mask), M_WAITOK | M_ZERO);
2896
2897 /*
2898 * When the number of CPUs reach a threshold, we start to save memory
2899 * given the sets are static by overlapping those having their single
2900 * bit set at same position in a bitset word. Asymptotically, this
2901 * regular scheme is in O(n²) whereas the overlapping one is in O(n)
2902 * only with n being the maximum number of CPUs, so the gain will become
2903 * huge quite quickly. The threshold for 64-bit architectures is 128
2904 * CPUs.
2905 */
2906 if (mp_ncpus < (2 * _BITSET_BITS)) {
2907 cpumask_t *sscm_ptr;
2908
2909 /*
2910 * This represents 'mp_ncpus * __bitset_words(CPU_SETSIZE) *
2911 * (_BITSET_BITS / 8)' bytes (for comparison with the
2912 * overlapping scheme).
2913 */
2914 static_single_cpu_mask_lcs = kmalloc_array(mp_ncpus,
2915 sizeof(*static_single_cpu_mask_lcs),
2916 M_WAITOK | M_ZERO);
2917
2918 sscm_ptr = static_single_cpu_mask_lcs;
2919 CPU_FOREACH(i) {
2920 static_single_cpu_mask[i] = sscm_ptr++;
2921 CPU_SET(i, static_single_cpu_mask[i]);
2922 }
2923 } else {
2924 /* Pointer to a bitset word. */
2925 __typeof(((cpuset_t *)NULL)->__bits[0]) *bwp;
2926
2927 /*
2928 * Allocate memory for (static) spans of 'cpumask_t' ('cpuset_t'
2929 * really) with a single bit set that can be reused for all
2930 * single CPU masks by making them start at different offsets.
2931 * We need '__bitset_words(CPU_SETSIZE) - 1' bitset words before
2932 * the word having its single bit set, and the same amount
2933 * after.
2934 */
2935 static_single_cpu_mask_lcs = mallocarray(_BITSET_BITS,
2936 (2 * __bitset_words(CPU_SETSIZE) - 1) * (_BITSET_BITS / 8),
2937 M_KMALLOC, M_WAITOK | M_ZERO);
2938
2939 /*
2940 * We rely below on cpuset_t and the bitset generic
2941 * implementation assigning words in the '__bits' array in the
2942 * same order of bits (i.e., little-endian ordering, not to be
2943 * confused with machine endianness, which concerns bits in
2944 * words and other integers). This is an imperfect test, but it
2945 * will detect a change to big-endian ordering.
2946 */
2947 _Static_assert(
2948 __bitset_word(_BITSET_BITS + 1, _BITSET_BITS) == 1,
2949 "Assumes a bitset implementation that is little-endian "
2950 "on its words");
2951
2952 /* Initialize the single bit of each static span. */
2953 bwp = (__typeof(bwp))static_single_cpu_mask_lcs +
2954 (__bitset_words(CPU_SETSIZE) - 1);
2955 for (i = 0; i < _BITSET_BITS; i++) {
2956 CPU_SET(i, (cpuset_t *)bwp);
2957 bwp += (2 * __bitset_words(CPU_SETSIZE) - 1);
2958 }
2959
2960 /*
2961 * Finally set all CPU masks to the proper word in their
2962 * relevant span.
2963 */
2964 CPU_FOREACH(i) {
2965 bwp = (__typeof(bwp))static_single_cpu_mask_lcs;
2966 /* Find the non-zero word of the relevant span. */
2967 bwp += (2 * __bitset_words(CPU_SETSIZE) - 1) *
2968 (i % _BITSET_BITS) +
2969 __bitset_words(CPU_SETSIZE) - 1;
2970 /* Shift to find the CPU mask start. */
2971 bwp -= (i / _BITSET_BITS);
2972 static_single_cpu_mask[i] = (cpuset_t *)bwp;
2973 }
2974 }
2975
2976 strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release));
2977 }
2978 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
2979
2980 static void
linux_compat_uninit(void * arg)2981 linux_compat_uninit(void *arg)
2982 {
2983 linux_kobject_kfree_name(&linux_class_root);
2984 linux_kobject_kfree_name(&linux_root_device.kobj);
2985 linux_kobject_kfree_name(&linux_class_misc.kobj);
2986
2987 free(static_single_cpu_mask_lcs, M_KMALLOC);
2988 free(static_single_cpu_mask, M_KMALLOC);
2989 #if defined(__i386__) || defined(__amd64__)
2990 free(__cpu_data, M_KMALLOC);
2991 #endif
2992
2993 mtx_destroy(&vmmaplock);
2994 spin_lock_destroy(&pci_lock);
2995 rw_destroy(&linux_vma_lock);
2996 }
2997 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
2998
2999 /*
3000 * NOTE: Linux frequently uses "unsigned long" for pointer to integer
3001 * conversion and vice versa, where in FreeBSD "uintptr_t" would be
3002 * used. Assert these types have the same size, else some parts of the
3003 * LinuxKPI may not work like expected:
3004 */
3005 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));
3006