xref: /freebsd/sys/compat/linuxkpi/common/src/linux_compat.c (revision 357378bbdedf24ce2b90e9bd831af4a9db3ec70a)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include "opt_global.h"
32 #include "opt_stack.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
39 #include <sys/proc.h>
40 #include <sys/sglist.h>
41 #include <sys/sleepqueue.h>
42 #include <sys/refcount.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/bus.h>
46 #include <sys/eventhandler.h>
47 #include <sys/fcntl.h>
48 #include <sys/file.h>
49 #include <sys/filio.h>
50 #include <sys/rwlock.h>
51 #include <sys/mman.h>
52 #include <sys/stack.h>
53 #include <sys/sysent.h>
54 #include <sys/time.h>
55 #include <sys/user.h>
56 
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 
63 #include <machine/stdarg.h>
64 
65 #if defined(__i386__) || defined(__amd64__)
66 #include <machine/cputypes.h>
67 #include <machine/md_var.h>
68 #endif
69 
70 #include <linux/kobject.h>
71 #include <linux/cpu.h>
72 #include <linux/device.h>
73 #include <linux/slab.h>
74 #include <linux/module.h>
75 #include <linux/moduleparam.h>
76 #include <linux/cdev.h>
77 #include <linux/file.h>
78 #include <linux/sysfs.h>
79 #include <linux/mm.h>
80 #include <linux/io.h>
81 #include <linux/vmalloc.h>
82 #include <linux/netdevice.h>
83 #include <linux/timer.h>
84 #include <linux/interrupt.h>
85 #include <linux/uaccess.h>
86 #include <linux/utsname.h>
87 #include <linux/list.h>
88 #include <linux/kthread.h>
89 #include <linux/kernel.h>
90 #include <linux/compat.h>
91 #include <linux/io-mapping.h>
92 #include <linux/poll.h>
93 #include <linux/smp.h>
94 #include <linux/wait_bit.h>
95 #include <linux/rcupdate.h>
96 #include <linux/interval_tree.h>
97 #include <linux/interval_tree_generic.h>
98 
99 #if defined(__i386__) || defined(__amd64__)
100 #include <asm/smp.h>
101 #include <asm/processor.h>
102 #endif
103 
104 #include <xen/xen.h>
105 #ifdef XENHVM
106 #undef xen_pv_domain
107 #undef xen_initial_domain
108 /* xen/xen-os.h redefines __must_check */
109 #undef __must_check
110 #include <xen/xen-os.h>
111 #endif
112 
113 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
114     "LinuxKPI parameters");
115 
116 int linuxkpi_debug;
117 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN,
118     &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable.");
119 
120 int linuxkpi_warn_dump_stack = 0;
121 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN,
122     &linuxkpi_warn_dump_stack, 0,
123     "Set to enable stack traces from WARN_ON(). Clear to disable.");
124 
125 static struct timeval lkpi_net_lastlog;
126 static int lkpi_net_curpps;
127 static int lkpi_net_maxpps = 99;
128 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN,
129     &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second.");
130 
131 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat");
132 
133 #include <linux/rbtree.h>
134 /* Undo Linux compat changes. */
135 #undef RB_ROOT
136 #undef file
137 #undef cdev
138 #define	RB_ROOT(head)	(head)->rbh_root
139 
140 static void linux_destroy_dev(struct linux_cdev *);
141 static void linux_cdev_deref(struct linux_cdev *ldev);
142 static struct vm_area_struct *linux_cdev_handle_find(void *handle);
143 
144 cpumask_t cpu_online_mask;
145 static cpumask_t **static_single_cpu_mask;
146 static cpumask_t *static_single_cpu_mask_lcs;
147 struct kobject linux_class_root;
148 struct device linux_root_device;
149 struct class linux_class_misc;
150 struct list_head pci_drivers;
151 struct list_head pci_devices;
152 spinlock_t pci_lock;
153 struct uts_namespace init_uts_ns;
154 
155 unsigned long linux_timer_hz_mask;
156 
157 wait_queue_head_t linux_bit_waitq;
158 wait_queue_head_t linux_var_waitq;
159 
160 int
161 panic_cmp(struct rb_node *one, struct rb_node *two)
162 {
163 	panic("no cmp");
164 }
165 
166 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
167 
168 #define	START(node)	((node)->start)
169 #define	LAST(node)	((node)->last)
170 
171 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START,
172     LAST,, lkpi_interval_tree)
173 
174 static void
175 linux_device_release(struct device *dev)
176 {
177 	pr_debug("linux_device_release: %s\n", dev_name(dev));
178 	kfree(dev);
179 }
180 
181 static ssize_t
182 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
183 {
184 	struct class_attribute *dattr;
185 	ssize_t error;
186 
187 	dattr = container_of(attr, struct class_attribute, attr);
188 	error = -EIO;
189 	if (dattr->show)
190 		error = dattr->show(container_of(kobj, struct class, kobj),
191 		    dattr, buf);
192 	return (error);
193 }
194 
195 static ssize_t
196 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
197     size_t count)
198 {
199 	struct class_attribute *dattr;
200 	ssize_t error;
201 
202 	dattr = container_of(attr, struct class_attribute, attr);
203 	error = -EIO;
204 	if (dattr->store)
205 		error = dattr->store(container_of(kobj, struct class, kobj),
206 		    dattr, buf, count);
207 	return (error);
208 }
209 
210 static void
211 linux_class_release(struct kobject *kobj)
212 {
213 	struct class *class;
214 
215 	class = container_of(kobj, struct class, kobj);
216 	if (class->class_release)
217 		class->class_release(class);
218 }
219 
220 static const struct sysfs_ops linux_class_sysfs = {
221 	.show  = linux_class_show,
222 	.store = linux_class_store,
223 };
224 
225 const struct kobj_type linux_class_ktype = {
226 	.release = linux_class_release,
227 	.sysfs_ops = &linux_class_sysfs
228 };
229 
230 static void
231 linux_dev_release(struct kobject *kobj)
232 {
233 	struct device *dev;
234 
235 	dev = container_of(kobj, struct device, kobj);
236 	/* This is the precedence defined by linux. */
237 	if (dev->release)
238 		dev->release(dev);
239 	else if (dev->class && dev->class->dev_release)
240 		dev->class->dev_release(dev);
241 }
242 
243 static ssize_t
244 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
245 {
246 	struct device_attribute *dattr;
247 	ssize_t error;
248 
249 	dattr = container_of(attr, struct device_attribute, attr);
250 	error = -EIO;
251 	if (dattr->show)
252 		error = dattr->show(container_of(kobj, struct device, kobj),
253 		    dattr, buf);
254 	return (error);
255 }
256 
257 static ssize_t
258 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
259     size_t count)
260 {
261 	struct device_attribute *dattr;
262 	ssize_t error;
263 
264 	dattr = container_of(attr, struct device_attribute, attr);
265 	error = -EIO;
266 	if (dattr->store)
267 		error = dattr->store(container_of(kobj, struct device, kobj),
268 		    dattr, buf, count);
269 	return (error);
270 }
271 
272 static const struct sysfs_ops linux_dev_sysfs = {
273 	.show  = linux_dev_show,
274 	.store = linux_dev_store,
275 };
276 
277 const struct kobj_type linux_dev_ktype = {
278 	.release = linux_dev_release,
279 	.sysfs_ops = &linux_dev_sysfs
280 };
281 
282 struct device *
283 device_create(struct class *class, struct device *parent, dev_t devt,
284     void *drvdata, const char *fmt, ...)
285 {
286 	struct device *dev;
287 	va_list args;
288 
289 	dev = kzalloc(sizeof(*dev), M_WAITOK);
290 	dev->parent = parent;
291 	dev->class = class;
292 	dev->devt = devt;
293 	dev->driver_data = drvdata;
294 	dev->release = linux_device_release;
295 	va_start(args, fmt);
296 	kobject_set_name_vargs(&dev->kobj, fmt, args);
297 	va_end(args);
298 	device_register(dev);
299 
300 	return (dev);
301 }
302 
303 struct device *
304 device_create_groups_vargs(struct class *class, struct device *parent,
305     dev_t devt, void *drvdata, const struct attribute_group **groups,
306     const char *fmt, va_list args)
307 {
308 	struct device *dev = NULL;
309 	int retval = -ENODEV;
310 
311 	if (class == NULL || IS_ERR(class))
312 		goto error;
313 
314 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
315 	if (!dev) {
316 		retval = -ENOMEM;
317 		goto error;
318 	}
319 
320 	dev->devt = devt;
321 	dev->class = class;
322 	dev->parent = parent;
323 	dev->groups = groups;
324 	dev->release = device_create_release;
325 	/* device_initialize() needs the class and parent to be set */
326 	device_initialize(dev);
327 	dev_set_drvdata(dev, drvdata);
328 
329 	retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
330 	if (retval)
331 		goto error;
332 
333 	retval = device_add(dev);
334 	if (retval)
335 		goto error;
336 
337 	return dev;
338 
339 error:
340 	put_device(dev);
341 	return ERR_PTR(retval);
342 }
343 
344 struct class *
345 lkpi_class_create(const char *name)
346 {
347 	struct class *class;
348 	int error;
349 
350 	class = kzalloc(sizeof(*class), M_WAITOK);
351 	class->name = name;
352 	class->class_release = linux_class_kfree;
353 	error = class_register(class);
354 	if (error) {
355 		kfree(class);
356 		return (NULL);
357 	}
358 
359 	return (class);
360 }
361 
362 static void
363 linux_kq_lock(void *arg)
364 {
365 	spinlock_t *s = arg;
366 
367 	spin_lock(s);
368 }
369 static void
370 linux_kq_unlock(void *arg)
371 {
372 	spinlock_t *s = arg;
373 
374 	spin_unlock(s);
375 }
376 
377 static void
378 linux_kq_assert_lock(void *arg, int what)
379 {
380 #ifdef INVARIANTS
381 	spinlock_t *s = arg;
382 
383 	if (what == LA_LOCKED)
384 		mtx_assert(s, MA_OWNED);
385 	else
386 		mtx_assert(s, MA_NOTOWNED);
387 #endif
388 }
389 
390 static void
391 linux_file_kqfilter_poll(struct linux_file *, int);
392 
393 struct linux_file *
394 linux_file_alloc(void)
395 {
396 	struct linux_file *filp;
397 
398 	filp = kzalloc(sizeof(*filp), GFP_KERNEL);
399 
400 	/* set initial refcount */
401 	filp->f_count = 1;
402 
403 	/* setup fields needed by kqueue support */
404 	spin_lock_init(&filp->f_kqlock);
405 	knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock,
406 	    linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock);
407 
408 	return (filp);
409 }
410 
411 void
412 linux_file_free(struct linux_file *filp)
413 {
414 	if (filp->_file == NULL) {
415 		if (filp->f_op != NULL && filp->f_op->release != NULL)
416 			filp->f_op->release(filp->f_vnode, filp);
417 		if (filp->f_shmem != NULL)
418 			vm_object_deallocate(filp->f_shmem);
419 		kfree_rcu(filp, rcu);
420 	} else {
421 		/*
422 		 * The close method of the character device or file
423 		 * will free the linux_file structure:
424 		 */
425 		_fdrop(filp->_file, curthread);
426 	}
427 }
428 
429 struct linux_cdev *
430 cdev_alloc(void)
431 {
432 	struct linux_cdev *cdev;
433 
434 	cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK);
435 	kobject_init(&cdev->kobj, &linux_cdev_ktype);
436 	cdev->refs = 1;
437 	return (cdev);
438 }
439 
440 static int
441 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
442     vm_page_t *mres)
443 {
444 	struct vm_area_struct *vmap;
445 
446 	vmap = linux_cdev_handle_find(vm_obj->handle);
447 
448 	MPASS(vmap != NULL);
449 	MPASS(vmap->vm_private_data == vm_obj->handle);
450 
451 	if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) {
452 		vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset;
453 		vm_page_t page;
454 
455 		if (((*mres)->flags & PG_FICTITIOUS) != 0) {
456 			/*
457 			 * If the passed in result page is a fake
458 			 * page, update it with the new physical
459 			 * address.
460 			 */
461 			page = *mres;
462 			vm_page_updatefake(page, paddr, vm_obj->memattr);
463 		} else {
464 			/*
465 			 * Replace the passed in "mres" page with our
466 			 * own fake page and free up the all of the
467 			 * original pages.
468 			 */
469 			VM_OBJECT_WUNLOCK(vm_obj);
470 			page = vm_page_getfake(paddr, vm_obj->memattr);
471 			VM_OBJECT_WLOCK(vm_obj);
472 
473 			vm_page_replace(page, vm_obj, (*mres)->pindex, *mres);
474 			*mres = page;
475 		}
476 		vm_page_valid(page);
477 		return (VM_PAGER_OK);
478 	}
479 	return (VM_PAGER_FAIL);
480 }
481 
482 static int
483 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
484     vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
485 {
486 	struct vm_area_struct *vmap;
487 	int err;
488 
489 	/* get VM area structure */
490 	vmap = linux_cdev_handle_find(vm_obj->handle);
491 	MPASS(vmap != NULL);
492 	MPASS(vmap->vm_private_data == vm_obj->handle);
493 
494 	VM_OBJECT_WUNLOCK(vm_obj);
495 
496 	linux_set_current(curthread);
497 
498 	down_write(&vmap->vm_mm->mmap_sem);
499 	if (unlikely(vmap->vm_ops == NULL)) {
500 		err = VM_FAULT_SIGBUS;
501 	} else {
502 		struct vm_fault vmf;
503 
504 		/* fill out VM fault structure */
505 		vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx);
506 		vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
507 		vmf.pgoff = 0;
508 		vmf.page = NULL;
509 		vmf.vma = vmap;
510 
511 		vmap->vm_pfn_count = 0;
512 		vmap->vm_pfn_pcount = &vmap->vm_pfn_count;
513 		vmap->vm_obj = vm_obj;
514 
515 		err = vmap->vm_ops->fault(&vmf);
516 
517 		while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
518 			kern_yield(PRI_USER);
519 			err = vmap->vm_ops->fault(&vmf);
520 		}
521 	}
522 
523 	/* translate return code */
524 	switch (err) {
525 	case VM_FAULT_OOM:
526 		err = VM_PAGER_AGAIN;
527 		break;
528 	case VM_FAULT_SIGBUS:
529 		err = VM_PAGER_BAD;
530 		break;
531 	case VM_FAULT_NOPAGE:
532 		/*
533 		 * By contract the fault handler will return having
534 		 * busied all the pages itself. If pidx is already
535 		 * found in the object, it will simply xbusy the first
536 		 * page and return with vm_pfn_count set to 1.
537 		 */
538 		*first = vmap->vm_pfn_first;
539 		*last = *first + vmap->vm_pfn_count - 1;
540 		err = VM_PAGER_OK;
541 		break;
542 	default:
543 		err = VM_PAGER_ERROR;
544 		break;
545 	}
546 	up_write(&vmap->vm_mm->mmap_sem);
547 	VM_OBJECT_WLOCK(vm_obj);
548 	return (err);
549 }
550 
551 static struct rwlock linux_vma_lock;
552 static TAILQ_HEAD(, vm_area_struct) linux_vma_head =
553     TAILQ_HEAD_INITIALIZER(linux_vma_head);
554 
555 static void
556 linux_cdev_handle_free(struct vm_area_struct *vmap)
557 {
558 	/* Drop reference on vm_file */
559 	if (vmap->vm_file != NULL)
560 		fput(vmap->vm_file);
561 
562 	/* Drop reference on mm_struct */
563 	mmput(vmap->vm_mm);
564 
565 	kfree(vmap);
566 }
567 
568 static void
569 linux_cdev_handle_remove(struct vm_area_struct *vmap)
570 {
571 	rw_wlock(&linux_vma_lock);
572 	TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry);
573 	rw_wunlock(&linux_vma_lock);
574 }
575 
576 static struct vm_area_struct *
577 linux_cdev_handle_find(void *handle)
578 {
579 	struct vm_area_struct *vmap;
580 
581 	rw_rlock(&linux_vma_lock);
582 	TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) {
583 		if (vmap->vm_private_data == handle)
584 			break;
585 	}
586 	rw_runlock(&linux_vma_lock);
587 	return (vmap);
588 }
589 
590 static int
591 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
592 		      vm_ooffset_t foff, struct ucred *cred, u_short *color)
593 {
594 
595 	MPASS(linux_cdev_handle_find(handle) != NULL);
596 	*color = 0;
597 	return (0);
598 }
599 
600 static void
601 linux_cdev_pager_dtor(void *handle)
602 {
603 	const struct vm_operations_struct *vm_ops;
604 	struct vm_area_struct *vmap;
605 
606 	vmap = linux_cdev_handle_find(handle);
607 	MPASS(vmap != NULL);
608 
609 	/*
610 	 * Remove handle before calling close operation to prevent
611 	 * other threads from reusing the handle pointer.
612 	 */
613 	linux_cdev_handle_remove(vmap);
614 
615 	down_write(&vmap->vm_mm->mmap_sem);
616 	vm_ops = vmap->vm_ops;
617 	if (likely(vm_ops != NULL))
618 		vm_ops->close(vmap);
619 	up_write(&vmap->vm_mm->mmap_sem);
620 
621 	linux_cdev_handle_free(vmap);
622 }
623 
624 static struct cdev_pager_ops linux_cdev_pager_ops[2] = {
625   {
626 	/* OBJT_MGTDEVICE */
627 	.cdev_pg_populate	= linux_cdev_pager_populate,
628 	.cdev_pg_ctor	= linux_cdev_pager_ctor,
629 	.cdev_pg_dtor	= linux_cdev_pager_dtor
630   },
631   {
632 	/* OBJT_DEVICE */
633 	.cdev_pg_fault	= linux_cdev_pager_fault,
634 	.cdev_pg_ctor	= linux_cdev_pager_ctor,
635 	.cdev_pg_dtor	= linux_cdev_pager_dtor
636   },
637 };
638 
639 int
640 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
641     unsigned long size)
642 {
643 	vm_object_t obj;
644 	vm_page_t m;
645 
646 	obj = vma->vm_obj;
647 	if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0)
648 		return (-ENOTSUP);
649 	VM_OBJECT_RLOCK(obj);
650 	for (m = vm_page_find_least(obj, OFF_TO_IDX(address));
651 	    m != NULL && m->pindex < OFF_TO_IDX(address + size);
652 	    m = TAILQ_NEXT(m, listq))
653 		pmap_remove_all(m);
654 	VM_OBJECT_RUNLOCK(obj);
655 	return (0);
656 }
657 
658 void
659 vma_set_file(struct vm_area_struct *vma, struct linux_file *file)
660 {
661 	struct linux_file *tmp;
662 
663 	/* Changing an anonymous vma with this is illegal */
664 	get_file(file);
665 	tmp = vma->vm_file;
666 	vma->vm_file = file;
667 	fput(tmp);
668 }
669 
670 static struct file_operations dummy_ldev_ops = {
671 	/* XXXKIB */
672 };
673 
674 static struct linux_cdev dummy_ldev = {
675 	.ops = &dummy_ldev_ops,
676 };
677 
678 #define	LDEV_SI_DTR	0x0001
679 #define	LDEV_SI_REF	0x0002
680 
681 static void
682 linux_get_fop(struct linux_file *filp, const struct file_operations **fop,
683     struct linux_cdev **dev)
684 {
685 	struct linux_cdev *ldev;
686 	u_int siref;
687 
688 	ldev = filp->f_cdev;
689 	*fop = filp->f_op;
690 	if (ldev != NULL) {
691 		if (ldev->kobj.ktype == &linux_cdev_static_ktype) {
692 			refcount_acquire(&ldev->refs);
693 		} else {
694 			for (siref = ldev->siref;;) {
695 				if ((siref & LDEV_SI_DTR) != 0) {
696 					ldev = &dummy_ldev;
697 					*fop = ldev->ops;
698 					siref = ldev->siref;
699 					MPASS((ldev->siref & LDEV_SI_DTR) == 0);
700 				} else if (atomic_fcmpset_int(&ldev->siref,
701 				    &siref, siref + LDEV_SI_REF)) {
702 					break;
703 				}
704 			}
705 		}
706 	}
707 	*dev = ldev;
708 }
709 
710 static void
711 linux_drop_fop(struct linux_cdev *ldev)
712 {
713 
714 	if (ldev == NULL)
715 		return;
716 	if (ldev->kobj.ktype == &linux_cdev_static_ktype) {
717 		linux_cdev_deref(ldev);
718 	} else {
719 		MPASS(ldev->kobj.ktype == &linux_cdev_ktype);
720 		MPASS((ldev->siref & ~LDEV_SI_DTR) != 0);
721 		atomic_subtract_int(&ldev->siref, LDEV_SI_REF);
722 	}
723 }
724 
725 #define	OPW(fp,td,code) ({			\
726 	struct file *__fpop;			\
727 	__typeof(code) __retval;		\
728 						\
729 	__fpop = (td)->td_fpop;			\
730 	(td)->td_fpop = (fp);			\
731 	__retval = (code);			\
732 	(td)->td_fpop = __fpop;			\
733 	__retval;				\
734 })
735 
736 static int
737 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td,
738     struct file *file)
739 {
740 	struct linux_cdev *ldev;
741 	struct linux_file *filp;
742 	const struct file_operations *fop;
743 	int error;
744 
745 	ldev = dev->si_drv1;
746 
747 	filp = linux_file_alloc();
748 	filp->f_dentry = &filp->f_dentry_store;
749 	filp->f_op = ldev->ops;
750 	filp->f_mode = file->f_flag;
751 	filp->f_flags = file->f_flag;
752 	filp->f_vnode = file->f_vnode;
753 	filp->_file = file;
754 	refcount_acquire(&ldev->refs);
755 	filp->f_cdev = ldev;
756 
757 	linux_set_current(td);
758 	linux_get_fop(filp, &fop, &ldev);
759 
760 	if (fop->open != NULL) {
761 		error = -fop->open(file->f_vnode, filp);
762 		if (error != 0) {
763 			linux_drop_fop(ldev);
764 			linux_cdev_deref(filp->f_cdev);
765 			kfree(filp);
766 			return (error);
767 		}
768 	}
769 
770 	/* hold on to the vnode - used for fstat() */
771 	vhold(filp->f_vnode);
772 
773 	/* release the file from devfs */
774 	finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
775 	linux_drop_fop(ldev);
776 	return (ENXIO);
777 }
778 
779 #define	LINUX_IOCTL_MIN_PTR 0x10000UL
780 #define	LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)
781 
782 static inline int
783 linux_remap_address(void **uaddr, size_t len)
784 {
785 	uintptr_t uaddr_val = (uintptr_t)(*uaddr);
786 
787 	if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
788 	    uaddr_val < LINUX_IOCTL_MAX_PTR)) {
789 		struct task_struct *pts = current;
790 		if (pts == NULL) {
791 			*uaddr = NULL;
792 			return (1);
793 		}
794 
795 		/* compute data offset */
796 		uaddr_val -= LINUX_IOCTL_MIN_PTR;
797 
798 		/* check that length is within bounds */
799 		if ((len > IOCPARM_MAX) ||
800 		    (uaddr_val + len) > pts->bsd_ioctl_len) {
801 			*uaddr = NULL;
802 			return (1);
803 		}
804 
805 		/* re-add kernel buffer address */
806 		uaddr_val += (uintptr_t)pts->bsd_ioctl_data;
807 
808 		/* update address location */
809 		*uaddr = (void *)uaddr_val;
810 		return (1);
811 	}
812 	return (0);
813 }
814 
815 int
816 linux_copyin(const void *uaddr, void *kaddr, size_t len)
817 {
818 	if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
819 		if (uaddr == NULL)
820 			return (-EFAULT);
821 		memcpy(kaddr, uaddr, len);
822 		return (0);
823 	}
824 	return (-copyin(uaddr, kaddr, len));
825 }
826 
827 int
828 linux_copyout(const void *kaddr, void *uaddr, size_t len)
829 {
830 	if (linux_remap_address(&uaddr, len)) {
831 		if (uaddr == NULL)
832 			return (-EFAULT);
833 		memcpy(uaddr, kaddr, len);
834 		return (0);
835 	}
836 	return (-copyout(kaddr, uaddr, len));
837 }
838 
839 size_t
840 linux_clear_user(void *_uaddr, size_t _len)
841 {
842 	uint8_t *uaddr = _uaddr;
843 	size_t len = _len;
844 
845 	/* make sure uaddr is aligned before going into the fast loop */
846 	while (((uintptr_t)uaddr & 7) != 0 && len > 7) {
847 		if (subyte(uaddr, 0))
848 			return (_len);
849 		uaddr++;
850 		len--;
851 	}
852 
853 	/* zero 8 bytes at a time */
854 	while (len > 7) {
855 #ifdef __LP64__
856 		if (suword64(uaddr, 0))
857 			return (_len);
858 #else
859 		if (suword32(uaddr, 0))
860 			return (_len);
861 		if (suword32(uaddr + 4, 0))
862 			return (_len);
863 #endif
864 		uaddr += 8;
865 		len -= 8;
866 	}
867 
868 	/* zero fill end, if any */
869 	while (len > 0) {
870 		if (subyte(uaddr, 0))
871 			return (_len);
872 		uaddr++;
873 		len--;
874 	}
875 	return (0);
876 }
877 
878 int
879 linux_access_ok(const void *uaddr, size_t len)
880 {
881 	uintptr_t saddr;
882 	uintptr_t eaddr;
883 
884 	/* get start and end address */
885 	saddr = (uintptr_t)uaddr;
886 	eaddr = (uintptr_t)uaddr + len;
887 
888 	/* verify addresses are valid for userspace */
889 	return ((saddr == eaddr) ||
890 	    (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS));
891 }
892 
893 /*
894  * This function should return either EINTR or ERESTART depending on
895  * the signal type sent to this thread:
896  */
897 static int
898 linux_get_error(struct task_struct *task, int error)
899 {
900 	/* check for signal type interrupt code */
901 	if (error == EINTR || error == ERESTARTSYS || error == ERESTART) {
902 		error = -linux_schedule_get_interrupt_value(task);
903 		if (error == 0)
904 			error = EINTR;
905 	}
906 	return (error);
907 }
908 
909 static int
910 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp,
911     const struct file_operations *fop, u_long cmd, caddr_t data,
912     struct thread *td)
913 {
914 	struct task_struct *task = current;
915 	unsigned size;
916 	int error;
917 
918 	size = IOCPARM_LEN(cmd);
919 	/* refer to logic in sys_ioctl() */
920 	if (size > 0) {
921 		/*
922 		 * Setup hint for linux_copyin() and linux_copyout().
923 		 *
924 		 * Background: Linux code expects a user-space address
925 		 * while FreeBSD supplies a kernel-space address.
926 		 */
927 		task->bsd_ioctl_data = data;
928 		task->bsd_ioctl_len = size;
929 		data = (void *)LINUX_IOCTL_MIN_PTR;
930 	} else {
931 		/* fetch user-space pointer */
932 		data = *(void **)data;
933 	}
934 #ifdef COMPAT_FREEBSD32
935 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
936 		/* try the compat IOCTL handler first */
937 		if (fop->compat_ioctl != NULL) {
938 			error = -OPW(fp, td, fop->compat_ioctl(filp,
939 			    cmd, (u_long)data));
940 		} else {
941 			error = ENOTTY;
942 		}
943 
944 		/* fallback to the regular IOCTL handler, if any */
945 		if (error == ENOTTY && fop->unlocked_ioctl != NULL) {
946 			error = -OPW(fp, td, fop->unlocked_ioctl(filp,
947 			    cmd, (u_long)data));
948 		}
949 	} else
950 #endif
951 	{
952 		if (fop->unlocked_ioctl != NULL) {
953 			error = -OPW(fp, td, fop->unlocked_ioctl(filp,
954 			    cmd, (u_long)data));
955 		} else {
956 			error = ENOTTY;
957 		}
958 	}
959 	if (size > 0) {
960 		task->bsd_ioctl_data = NULL;
961 		task->bsd_ioctl_len = 0;
962 	}
963 
964 	if (error == EWOULDBLOCK) {
965 		/* update kqfilter status, if any */
966 		linux_file_kqfilter_poll(filp,
967 		    LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
968 	} else {
969 		error = linux_get_error(task, error);
970 	}
971 	return (error);
972 }
973 
974 #define	LINUX_POLL_TABLE_NORMAL ((poll_table *)1)
975 
976 /*
977  * This function atomically updates the poll wakeup state and returns
978  * the previous state at the time of update.
979  */
980 static uint8_t
981 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate)
982 {
983 	int c, old;
984 
985 	c = v->counter;
986 
987 	while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
988 		c = old;
989 
990 	return (c);
991 }
992 
993 static int
994 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key)
995 {
996 	static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
997 		[LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
998 		[LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
999 		[LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY,
1000 		[LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */
1001 	};
1002 	struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq);
1003 
1004 	switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1005 	case LINUX_FWQ_STATE_QUEUED:
1006 		linux_poll_wakeup(filp);
1007 		return (1);
1008 	default:
1009 		return (0);
1010 	}
1011 }
1012 
1013 void
1014 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p)
1015 {
1016 	static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1017 		[LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY,
1018 		[LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
1019 		[LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */
1020 		[LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED,
1021 	};
1022 
1023 	/* check if we are called inside the select system call */
1024 	if (p == LINUX_POLL_TABLE_NORMAL)
1025 		selrecord(curthread, &filp->f_selinfo);
1026 
1027 	switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1028 	case LINUX_FWQ_STATE_INIT:
1029 		/* NOTE: file handles can only belong to one wait-queue */
1030 		filp->f_wait_queue.wqh = wqh;
1031 		filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback;
1032 		add_wait_queue(wqh, &filp->f_wait_queue.wq);
1033 		atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED);
1034 		break;
1035 	default:
1036 		break;
1037 	}
1038 }
1039 
1040 static void
1041 linux_poll_wait_dequeue(struct linux_file *filp)
1042 {
1043 	static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1044 		[LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT,	/* NOP */
1045 		[LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT,
1046 		[LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT,
1047 		[LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT,
1048 	};
1049 
1050 	seldrain(&filp->f_selinfo);
1051 
1052 	switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1053 	case LINUX_FWQ_STATE_NOT_READY:
1054 	case LINUX_FWQ_STATE_QUEUED:
1055 	case LINUX_FWQ_STATE_READY:
1056 		remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq);
1057 		break;
1058 	default:
1059 		break;
1060 	}
1061 }
1062 
1063 void
1064 linux_poll_wakeup(struct linux_file *filp)
1065 {
1066 	/* this function should be NULL-safe */
1067 	if (filp == NULL)
1068 		return;
1069 
1070 	selwakeup(&filp->f_selinfo);
1071 
1072 	spin_lock(&filp->f_kqlock);
1073 	filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ |
1074 	    LINUX_KQ_FLAG_NEED_WRITE;
1075 
1076 	/* make sure the "knote" gets woken up */
1077 	KNOTE_LOCKED(&filp->f_selinfo.si_note, 1);
1078 	spin_unlock(&filp->f_kqlock);
1079 }
1080 
1081 static void
1082 linux_file_kqfilter_detach(struct knote *kn)
1083 {
1084 	struct linux_file *filp = kn->kn_hook;
1085 
1086 	spin_lock(&filp->f_kqlock);
1087 	knlist_remove(&filp->f_selinfo.si_note, kn, 1);
1088 	spin_unlock(&filp->f_kqlock);
1089 }
1090 
1091 static int
1092 linux_file_kqfilter_read_event(struct knote *kn, long hint)
1093 {
1094 	struct linux_file *filp = kn->kn_hook;
1095 
1096 	mtx_assert(&filp->f_kqlock, MA_OWNED);
1097 
1098 	return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0);
1099 }
1100 
1101 static int
1102 linux_file_kqfilter_write_event(struct knote *kn, long hint)
1103 {
1104 	struct linux_file *filp = kn->kn_hook;
1105 
1106 	mtx_assert(&filp->f_kqlock, MA_OWNED);
1107 
1108 	return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0);
1109 }
1110 
1111 static struct filterops linux_dev_kqfiltops_read = {
1112 	.f_isfd = 1,
1113 	.f_detach = linux_file_kqfilter_detach,
1114 	.f_event = linux_file_kqfilter_read_event,
1115 };
1116 
1117 static struct filterops linux_dev_kqfiltops_write = {
1118 	.f_isfd = 1,
1119 	.f_detach = linux_file_kqfilter_detach,
1120 	.f_event = linux_file_kqfilter_write_event,
1121 };
1122 
1123 static void
1124 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags)
1125 {
1126 	struct thread *td;
1127 	const struct file_operations *fop;
1128 	struct linux_cdev *ldev;
1129 	int temp;
1130 
1131 	if ((filp->f_kqflags & kqflags) == 0)
1132 		return;
1133 
1134 	td = curthread;
1135 
1136 	linux_get_fop(filp, &fop, &ldev);
1137 	/* get the latest polling state */
1138 	temp = OPW(filp->_file, td, fop->poll(filp, NULL));
1139 	linux_drop_fop(ldev);
1140 
1141 	spin_lock(&filp->f_kqlock);
1142 	/* clear kqflags */
1143 	filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ |
1144 	    LINUX_KQ_FLAG_NEED_WRITE);
1145 	/* update kqflags */
1146 	if ((temp & (POLLIN | POLLOUT)) != 0) {
1147 		if ((temp & POLLIN) != 0)
1148 			filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ;
1149 		if ((temp & POLLOUT) != 0)
1150 			filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE;
1151 
1152 		/* make sure the "knote" gets woken up */
1153 		KNOTE_LOCKED(&filp->f_selinfo.si_note, 0);
1154 	}
1155 	spin_unlock(&filp->f_kqlock);
1156 }
1157 
1158 static int
1159 linux_file_kqfilter(struct file *file, struct knote *kn)
1160 {
1161 	struct linux_file *filp;
1162 	struct thread *td;
1163 	int error;
1164 
1165 	td = curthread;
1166 	filp = (struct linux_file *)file->f_data;
1167 	filp->f_flags = file->f_flag;
1168 	if (filp->f_op->poll == NULL)
1169 		return (EINVAL);
1170 
1171 	spin_lock(&filp->f_kqlock);
1172 	switch (kn->kn_filter) {
1173 	case EVFILT_READ:
1174 		filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ;
1175 		kn->kn_fop = &linux_dev_kqfiltops_read;
1176 		kn->kn_hook = filp;
1177 		knlist_add(&filp->f_selinfo.si_note, kn, 1);
1178 		error = 0;
1179 		break;
1180 	case EVFILT_WRITE:
1181 		filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE;
1182 		kn->kn_fop = &linux_dev_kqfiltops_write;
1183 		kn->kn_hook = filp;
1184 		knlist_add(&filp->f_selinfo.si_note, kn, 1);
1185 		error = 0;
1186 		break;
1187 	default:
1188 		error = EINVAL;
1189 		break;
1190 	}
1191 	spin_unlock(&filp->f_kqlock);
1192 
1193 	if (error == 0) {
1194 		linux_set_current(td);
1195 
1196 		/* update kqfilter status, if any */
1197 		linux_file_kqfilter_poll(filp,
1198 		    LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
1199 	}
1200 	return (error);
1201 }
1202 
1203 static int
1204 linux_file_mmap_single(struct file *fp, const struct file_operations *fop,
1205     vm_ooffset_t *offset, vm_size_t size, struct vm_object **object,
1206     int nprot, bool is_shared, struct thread *td)
1207 {
1208 	struct task_struct *task;
1209 	struct vm_area_struct *vmap;
1210 	struct mm_struct *mm;
1211 	struct linux_file *filp;
1212 	vm_memattr_t attr;
1213 	int error;
1214 
1215 	filp = (struct linux_file *)fp->f_data;
1216 	filp->f_flags = fp->f_flag;
1217 
1218 	if (fop->mmap == NULL)
1219 		return (EOPNOTSUPP);
1220 
1221 	linux_set_current(td);
1222 
1223 	/*
1224 	 * The same VM object might be shared by multiple processes
1225 	 * and the mm_struct is usually freed when a process exits.
1226 	 *
1227 	 * The atomic reference below makes sure the mm_struct is
1228 	 * available as long as the vmap is in the linux_vma_head.
1229 	 */
1230 	task = current;
1231 	mm = task->mm;
1232 	if (atomic_inc_not_zero(&mm->mm_users) == 0)
1233 		return (EINVAL);
1234 
1235 	vmap = kzalloc(sizeof(*vmap), GFP_KERNEL);
1236 	vmap->vm_start = 0;
1237 	vmap->vm_end = size;
1238 	vmap->vm_pgoff = *offset / PAGE_SIZE;
1239 	vmap->vm_pfn = 0;
1240 	vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL);
1241 	if (is_shared)
1242 		vmap->vm_flags |= VM_SHARED;
1243 	vmap->vm_ops = NULL;
1244 	vmap->vm_file = get_file(filp);
1245 	vmap->vm_mm = mm;
1246 
1247 	if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) {
1248 		error = linux_get_error(task, EINTR);
1249 	} else {
1250 		error = -OPW(fp, td, fop->mmap(filp, vmap));
1251 		error = linux_get_error(task, error);
1252 		up_write(&vmap->vm_mm->mmap_sem);
1253 	}
1254 
1255 	if (error != 0) {
1256 		linux_cdev_handle_free(vmap);
1257 		return (error);
1258 	}
1259 
1260 	attr = pgprot2cachemode(vmap->vm_page_prot);
1261 
1262 	if (vmap->vm_ops != NULL) {
1263 		struct vm_area_struct *ptr;
1264 		void *vm_private_data;
1265 		bool vm_no_fault;
1266 
1267 		if (vmap->vm_ops->open == NULL ||
1268 		    vmap->vm_ops->close == NULL ||
1269 		    vmap->vm_private_data == NULL) {
1270 			/* free allocated VM area struct */
1271 			linux_cdev_handle_free(vmap);
1272 			return (EINVAL);
1273 		}
1274 
1275 		vm_private_data = vmap->vm_private_data;
1276 
1277 		rw_wlock(&linux_vma_lock);
1278 		TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) {
1279 			if (ptr->vm_private_data == vm_private_data)
1280 				break;
1281 		}
1282 		/* check if there is an existing VM area struct */
1283 		if (ptr != NULL) {
1284 			/* check if the VM area structure is invalid */
1285 			if (ptr->vm_ops == NULL ||
1286 			    ptr->vm_ops->open == NULL ||
1287 			    ptr->vm_ops->close == NULL) {
1288 				error = ESTALE;
1289 				vm_no_fault = 1;
1290 			} else {
1291 				error = EEXIST;
1292 				vm_no_fault = (ptr->vm_ops->fault == NULL);
1293 			}
1294 		} else {
1295 			/* insert VM area structure into list */
1296 			TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry);
1297 			error = 0;
1298 			vm_no_fault = (vmap->vm_ops->fault == NULL);
1299 		}
1300 		rw_wunlock(&linux_vma_lock);
1301 
1302 		if (error != 0) {
1303 			/* free allocated VM area struct */
1304 			linux_cdev_handle_free(vmap);
1305 			/* check for stale VM area struct */
1306 			if (error != EEXIST)
1307 				return (error);
1308 		}
1309 
1310 		/* check if there is no fault handler */
1311 		if (vm_no_fault) {
1312 			*object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE,
1313 			    &linux_cdev_pager_ops[1], size, nprot, *offset,
1314 			    td->td_ucred);
1315 		} else {
1316 			*object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE,
1317 			    &linux_cdev_pager_ops[0], size, nprot, *offset,
1318 			    td->td_ucred);
1319 		}
1320 
1321 		/* check if allocating the VM object failed */
1322 		if (*object == NULL) {
1323 			if (error == 0) {
1324 				/* remove VM area struct from list */
1325 				linux_cdev_handle_remove(vmap);
1326 				/* free allocated VM area struct */
1327 				linux_cdev_handle_free(vmap);
1328 			}
1329 			return (EINVAL);
1330 		}
1331 	} else {
1332 		struct sglist *sg;
1333 
1334 		sg = sglist_alloc(1, M_WAITOK);
1335 		sglist_append_phys(sg,
1336 		    (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len);
1337 
1338 		*object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len,
1339 		    nprot, 0, td->td_ucred);
1340 
1341 		linux_cdev_handle_free(vmap);
1342 
1343 		if (*object == NULL) {
1344 			sglist_free(sg);
1345 			return (EINVAL);
1346 		}
1347 	}
1348 
1349 	if (attr != VM_MEMATTR_DEFAULT) {
1350 		VM_OBJECT_WLOCK(*object);
1351 		vm_object_set_memattr(*object, attr);
1352 		VM_OBJECT_WUNLOCK(*object);
1353 	}
1354 	*offset = 0;
1355 	return (0);
1356 }
1357 
1358 struct cdevsw linuxcdevsw = {
1359 	.d_version = D_VERSION,
1360 	.d_fdopen = linux_dev_fdopen,
1361 	.d_name = "lkpidev",
1362 };
1363 
1364 static int
1365 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
1366     int flags, struct thread *td)
1367 {
1368 	struct linux_file *filp;
1369 	const struct file_operations *fop;
1370 	struct linux_cdev *ldev;
1371 	ssize_t bytes;
1372 	int error;
1373 
1374 	error = 0;
1375 	filp = (struct linux_file *)file->f_data;
1376 	filp->f_flags = file->f_flag;
1377 	/* XXX no support for I/O vectors currently */
1378 	if (uio->uio_iovcnt != 1)
1379 		return (EOPNOTSUPP);
1380 	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1381 		return (EINVAL);
1382 	linux_set_current(td);
1383 	linux_get_fop(filp, &fop, &ldev);
1384 	if (fop->read != NULL) {
1385 		bytes = OPW(file, td, fop->read(filp,
1386 		    uio->uio_iov->iov_base,
1387 		    uio->uio_iov->iov_len, &uio->uio_offset));
1388 		if (bytes >= 0) {
1389 			uio->uio_iov->iov_base =
1390 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1391 			uio->uio_iov->iov_len -= bytes;
1392 			uio->uio_resid -= bytes;
1393 		} else {
1394 			error = linux_get_error(current, -bytes);
1395 		}
1396 	} else
1397 		error = ENXIO;
1398 
1399 	/* update kqfilter status, if any */
1400 	linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ);
1401 	linux_drop_fop(ldev);
1402 
1403 	return (error);
1404 }
1405 
1406 static int
1407 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred,
1408     int flags, struct thread *td)
1409 {
1410 	struct linux_file *filp;
1411 	const struct file_operations *fop;
1412 	struct linux_cdev *ldev;
1413 	ssize_t bytes;
1414 	int error;
1415 
1416 	filp = (struct linux_file *)file->f_data;
1417 	filp->f_flags = file->f_flag;
1418 	/* XXX no support for I/O vectors currently */
1419 	if (uio->uio_iovcnt != 1)
1420 		return (EOPNOTSUPP);
1421 	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1422 		return (EINVAL);
1423 	linux_set_current(td);
1424 	linux_get_fop(filp, &fop, &ldev);
1425 	if (fop->write != NULL) {
1426 		bytes = OPW(file, td, fop->write(filp,
1427 		    uio->uio_iov->iov_base,
1428 		    uio->uio_iov->iov_len, &uio->uio_offset));
1429 		if (bytes >= 0) {
1430 			uio->uio_iov->iov_base =
1431 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1432 			uio->uio_iov->iov_len -= bytes;
1433 			uio->uio_resid -= bytes;
1434 			error = 0;
1435 		} else {
1436 			error = linux_get_error(current, -bytes);
1437 		}
1438 	} else
1439 		error = ENXIO;
1440 
1441 	/* update kqfilter status, if any */
1442 	linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE);
1443 
1444 	linux_drop_fop(ldev);
1445 
1446 	return (error);
1447 }
1448 
1449 static int
1450 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
1451     struct thread *td)
1452 {
1453 	struct linux_file *filp;
1454 	const struct file_operations *fop;
1455 	struct linux_cdev *ldev;
1456 	int revents;
1457 
1458 	filp = (struct linux_file *)file->f_data;
1459 	filp->f_flags = file->f_flag;
1460 	linux_set_current(td);
1461 	linux_get_fop(filp, &fop, &ldev);
1462 	if (fop->poll != NULL) {
1463 		revents = OPW(file, td, fop->poll(filp,
1464 		    LINUX_POLL_TABLE_NORMAL)) & events;
1465 	} else {
1466 		revents = 0;
1467 	}
1468 	linux_drop_fop(ldev);
1469 	return (revents);
1470 }
1471 
1472 static int
1473 linux_file_close(struct file *file, struct thread *td)
1474 {
1475 	struct linux_file *filp;
1476 	int (*release)(struct inode *, struct linux_file *);
1477 	const struct file_operations *fop;
1478 	struct linux_cdev *ldev;
1479 	int error;
1480 
1481 	filp = (struct linux_file *)file->f_data;
1482 
1483 	KASSERT(file_count(filp) == 0,
1484 	    ("File refcount(%d) is not zero", file_count(filp)));
1485 
1486 	if (td == NULL)
1487 		td = curthread;
1488 
1489 	error = 0;
1490 	filp->f_flags = file->f_flag;
1491 	linux_set_current(td);
1492 	linux_poll_wait_dequeue(filp);
1493 	linux_get_fop(filp, &fop, &ldev);
1494 	/*
1495 	 * Always use the real release function, if any, to avoid
1496 	 * leaking device resources:
1497 	 */
1498 	release = filp->f_op->release;
1499 	if (release != NULL)
1500 		error = -OPW(file, td, release(filp->f_vnode, filp));
1501 	funsetown(&filp->f_sigio);
1502 	if (filp->f_vnode != NULL)
1503 		vdrop(filp->f_vnode);
1504 	linux_drop_fop(ldev);
1505 	ldev = filp->f_cdev;
1506 	if (ldev != NULL)
1507 		linux_cdev_deref(ldev);
1508 	linux_synchronize_rcu(RCU_TYPE_REGULAR);
1509 	kfree(filp);
1510 
1511 	return (error);
1512 }
1513 
1514 static int
1515 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
1516     struct thread *td)
1517 {
1518 	struct linux_file *filp;
1519 	const struct file_operations *fop;
1520 	struct linux_cdev *ldev;
1521 	struct fiodgname_arg *fgn;
1522 	const char *p;
1523 	int error, i;
1524 
1525 	error = 0;
1526 	filp = (struct linux_file *)fp->f_data;
1527 	filp->f_flags = fp->f_flag;
1528 	linux_get_fop(filp, &fop, &ldev);
1529 
1530 	linux_set_current(td);
1531 	switch (cmd) {
1532 	case FIONBIO:
1533 		break;
1534 	case FIOASYNC:
1535 		if (fop->fasync == NULL)
1536 			break;
1537 		error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC));
1538 		break;
1539 	case FIOSETOWN:
1540 		error = fsetown(*(int *)data, &filp->f_sigio);
1541 		if (error == 0) {
1542 			if (fop->fasync == NULL)
1543 				break;
1544 			error = -OPW(fp, td, fop->fasync(0, filp,
1545 			    fp->f_flag & FASYNC));
1546 		}
1547 		break;
1548 	case FIOGETOWN:
1549 		*(int *)data = fgetown(&filp->f_sigio);
1550 		break;
1551 	case FIODGNAME:
1552 #ifdef	COMPAT_FREEBSD32
1553 	case FIODGNAME_32:
1554 #endif
1555 		if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) {
1556 			error = ENXIO;
1557 			break;
1558 		}
1559 		fgn = data;
1560 		p = devtoname(filp->f_cdev->cdev);
1561 		i = strlen(p) + 1;
1562 		if (i > fgn->len) {
1563 			error = EINVAL;
1564 			break;
1565 		}
1566 		error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i);
1567 		break;
1568 	default:
1569 		error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td);
1570 		break;
1571 	}
1572 	linux_drop_fop(ldev);
1573 	return (error);
1574 }
1575 
1576 static int
1577 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1578     vm_prot_t maxprot, int flags, struct file *fp,
1579     vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp)
1580 {
1581 	/*
1582 	 * Character devices do not provide private mappings
1583 	 * of any kind:
1584 	 */
1585 	if ((maxprot & VM_PROT_WRITE) == 0 &&
1586 	    (prot & VM_PROT_WRITE) != 0)
1587 		return (EACCES);
1588 	if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0)
1589 		return (EINVAL);
1590 
1591 	return (linux_file_mmap_single(fp, fop, foff, objsize, objp,
1592 	    (int)prot, (flags & MAP_SHARED) ? true : false, td));
1593 }
1594 
1595 static int
1596 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
1597     vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
1598     struct thread *td)
1599 {
1600 	struct linux_file *filp;
1601 	const struct file_operations *fop;
1602 	struct linux_cdev *ldev;
1603 	struct mount *mp;
1604 	struct vnode *vp;
1605 	vm_object_t object;
1606 	vm_prot_t maxprot;
1607 	int error;
1608 
1609 	filp = (struct linux_file *)fp->f_data;
1610 
1611 	vp = filp->f_vnode;
1612 	if (vp == NULL)
1613 		return (EOPNOTSUPP);
1614 
1615 	/*
1616 	 * Ensure that file and memory protections are
1617 	 * compatible.
1618 	 */
1619 	mp = vp->v_mount;
1620 	if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
1621 		maxprot = VM_PROT_NONE;
1622 		if ((prot & VM_PROT_EXECUTE) != 0)
1623 			return (EACCES);
1624 	} else
1625 		maxprot = VM_PROT_EXECUTE;
1626 	if ((fp->f_flag & FREAD) != 0)
1627 		maxprot |= VM_PROT_READ;
1628 	else if ((prot & VM_PROT_READ) != 0)
1629 		return (EACCES);
1630 
1631 	/*
1632 	 * If we are sharing potential changes via MAP_SHARED and we
1633 	 * are trying to get write permission although we opened it
1634 	 * without asking for it, bail out.
1635 	 *
1636 	 * Note that most character devices always share mappings.
1637 	 *
1638 	 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE
1639 	 * requests rather than doing it here.
1640 	 */
1641 	if ((flags & MAP_SHARED) != 0) {
1642 		if ((fp->f_flag & FWRITE) != 0)
1643 			maxprot |= VM_PROT_WRITE;
1644 		else if ((prot & VM_PROT_WRITE) != 0)
1645 			return (EACCES);
1646 	}
1647 	maxprot &= cap_maxprot;
1648 
1649 	linux_get_fop(filp, &fop, &ldev);
1650 	error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp,
1651 	    &foff, fop, &object);
1652 	if (error != 0)
1653 		goto out;
1654 
1655 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1656 	    foff, FALSE, td);
1657 	if (error != 0)
1658 		vm_object_deallocate(object);
1659 out:
1660 	linux_drop_fop(ldev);
1661 	return (error);
1662 }
1663 
1664 static int
1665 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
1666 {
1667 	struct linux_file *filp;
1668 	struct vnode *vp;
1669 	int error;
1670 
1671 	filp = (struct linux_file *)fp->f_data;
1672 	if (filp->f_vnode == NULL)
1673 		return (EOPNOTSUPP);
1674 
1675 	vp = filp->f_vnode;
1676 
1677 	vn_lock(vp, LK_SHARED | LK_RETRY);
1678 	error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED);
1679 	VOP_UNLOCK(vp);
1680 
1681 	return (error);
1682 }
1683 
1684 static int
1685 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1686     struct filedesc *fdp)
1687 {
1688 	struct linux_file *filp;
1689 	struct vnode *vp;
1690 	int error;
1691 
1692 	filp = fp->f_data;
1693 	vp = filp->f_vnode;
1694 	if (vp == NULL) {
1695 		error = 0;
1696 		kif->kf_type = KF_TYPE_DEV;
1697 	} else {
1698 		vref(vp);
1699 		FILEDESC_SUNLOCK(fdp);
1700 		error = vn_fill_kinfo_vnode(vp, kif);
1701 		vrele(vp);
1702 		kif->kf_type = KF_TYPE_VNODE;
1703 		FILEDESC_SLOCK(fdp);
1704 	}
1705 	return (error);
1706 }
1707 
1708 unsigned int
1709 linux_iminor(struct inode *inode)
1710 {
1711 	struct linux_cdev *ldev;
1712 
1713 	if (inode == NULL || inode->v_rdev == NULL ||
1714 	    inode->v_rdev->si_devsw != &linuxcdevsw)
1715 		return (-1U);
1716 	ldev = inode->v_rdev->si_drv1;
1717 	if (ldev == NULL)
1718 		return (-1U);
1719 
1720 	return (minor(ldev->dev));
1721 }
1722 
1723 static int
1724 linux_file_kcmp(struct file *fp1, struct file *fp2, struct thread *td)
1725 {
1726 	struct linux_file *filp1, *filp2;
1727 
1728 	if (fp2->f_type != DTYPE_DEV)
1729 		return (3);
1730 
1731 	filp1 = fp1->f_data;
1732 	filp2 = fp2->f_data;
1733 	return (kcmp_cmp((uintptr_t)filp1->f_cdev, (uintptr_t)filp2->f_cdev));
1734 }
1735 
1736 struct fileops linuxfileops = {
1737 	.fo_read = linux_file_read,
1738 	.fo_write = linux_file_write,
1739 	.fo_truncate = invfo_truncate,
1740 	.fo_kqfilter = linux_file_kqfilter,
1741 	.fo_stat = linux_file_stat,
1742 	.fo_fill_kinfo = linux_file_fill_kinfo,
1743 	.fo_poll = linux_file_poll,
1744 	.fo_close = linux_file_close,
1745 	.fo_ioctl = linux_file_ioctl,
1746 	.fo_mmap = linux_file_mmap,
1747 	.fo_chmod = invfo_chmod,
1748 	.fo_chown = invfo_chown,
1749 	.fo_sendfile = invfo_sendfile,
1750 	.fo_cmp = linux_file_kcmp,
1751 	.fo_flags = DFLAG_PASSABLE,
1752 };
1753 
1754 /*
1755  * Hash of vmmap addresses.  This is infrequently accessed and does not
1756  * need to be particularly large.  This is done because we must store the
1757  * caller's idea of the map size to properly unmap.
1758  */
1759 struct vmmap {
1760 	LIST_ENTRY(vmmap)	vm_next;
1761 	void 			*vm_addr;
1762 	unsigned long		vm_size;
1763 };
1764 
1765 struct vmmaphd {
1766 	struct vmmap *lh_first;
1767 };
1768 #define	VMMAP_HASH_SIZE	64
1769 #define	VMMAP_HASH_MASK	(VMMAP_HASH_SIZE - 1)
1770 #define	VM_HASH(addr)	((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
1771 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
1772 static struct mtx vmmaplock;
1773 
1774 static void
1775 vmmap_add(void *addr, unsigned long size)
1776 {
1777 	struct vmmap *vmmap;
1778 
1779 	vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
1780 	mtx_lock(&vmmaplock);
1781 	vmmap->vm_size = size;
1782 	vmmap->vm_addr = addr;
1783 	LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
1784 	mtx_unlock(&vmmaplock);
1785 }
1786 
1787 static struct vmmap *
1788 vmmap_remove(void *addr)
1789 {
1790 	struct vmmap *vmmap;
1791 
1792 	mtx_lock(&vmmaplock);
1793 	LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
1794 		if (vmmap->vm_addr == addr)
1795 			break;
1796 	if (vmmap)
1797 		LIST_REMOVE(vmmap, vm_next);
1798 	mtx_unlock(&vmmaplock);
1799 
1800 	return (vmmap);
1801 }
1802 
1803 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
1804 void *
1805 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
1806 {
1807 	void *addr;
1808 
1809 	addr = pmap_mapdev_attr(phys_addr, size, attr);
1810 	if (addr == NULL)
1811 		return (NULL);
1812 	vmmap_add(addr, size);
1813 
1814 	return (addr);
1815 }
1816 #endif
1817 
1818 void
1819 iounmap(void *addr)
1820 {
1821 	struct vmmap *vmmap;
1822 
1823 	vmmap = vmmap_remove(addr);
1824 	if (vmmap == NULL)
1825 		return;
1826 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
1827 	pmap_unmapdev(addr, vmmap->vm_size);
1828 #endif
1829 	kfree(vmmap);
1830 }
1831 
1832 void *
1833 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
1834 {
1835 	vm_offset_t off;
1836 	size_t size;
1837 
1838 	size = count * PAGE_SIZE;
1839 	off = kva_alloc(size);
1840 	if (off == 0)
1841 		return (NULL);
1842 	vmmap_add((void *)off, size);
1843 	pmap_qenter(off, pages, count);
1844 
1845 	return ((void *)off);
1846 }
1847 
1848 void
1849 vunmap(void *addr)
1850 {
1851 	struct vmmap *vmmap;
1852 
1853 	vmmap = vmmap_remove(addr);
1854 	if (vmmap == NULL)
1855 		return;
1856 	pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
1857 	kva_free((vm_offset_t)addr, vmmap->vm_size);
1858 	kfree(vmmap);
1859 }
1860 
1861 static char *
1862 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap)
1863 {
1864 	unsigned int len;
1865 	char *p;
1866 	va_list aq;
1867 
1868 	va_copy(aq, ap);
1869 	len = vsnprintf(NULL, 0, fmt, aq);
1870 	va_end(aq);
1871 
1872 	if (dev != NULL)
1873 		p = devm_kmalloc(dev, len + 1, gfp);
1874 	else
1875 		p = kmalloc(len + 1, gfp);
1876 	if (p != NULL)
1877 		vsnprintf(p, len + 1, fmt, ap);
1878 
1879 	return (p);
1880 }
1881 
1882 char *
1883 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
1884 {
1885 
1886 	return (devm_kvasprintf(NULL, gfp, fmt, ap));
1887 }
1888 
1889 char *
1890 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1891 {
1892 	va_list ap;
1893 	char *p;
1894 
1895 	va_start(ap, fmt);
1896 	p = devm_kvasprintf(dev, gfp, fmt, ap);
1897 	va_end(ap);
1898 
1899 	return (p);
1900 }
1901 
1902 char *
1903 kasprintf(gfp_t gfp, const char *fmt, ...)
1904 {
1905 	va_list ap;
1906 	char *p;
1907 
1908 	va_start(ap, fmt);
1909 	p = kvasprintf(gfp, fmt, ap);
1910 	va_end(ap);
1911 
1912 	return (p);
1913 }
1914 
1915 static void
1916 linux_timer_callback_wrapper(void *context)
1917 {
1918 	struct timer_list *timer;
1919 
1920 	timer = context;
1921 
1922 	/* the timer is about to be shutdown permanently */
1923 	if (timer->function == NULL)
1924 		return;
1925 
1926 	if (linux_set_current_flags(curthread, M_NOWAIT)) {
1927 		/* try again later */
1928 		callout_reset(&timer->callout, 1,
1929 		    &linux_timer_callback_wrapper, timer);
1930 		return;
1931 	}
1932 
1933 	timer->function(timer->data);
1934 }
1935 
1936 int
1937 mod_timer(struct timer_list *timer, int expires)
1938 {
1939 	int ret;
1940 
1941 	timer->expires = expires;
1942 	ret = callout_reset(&timer->callout,
1943 	    linux_timer_jiffies_until(expires),
1944 	    &linux_timer_callback_wrapper, timer);
1945 
1946 	MPASS(ret == 0 || ret == 1);
1947 
1948 	return (ret == 1);
1949 }
1950 
1951 void
1952 add_timer(struct timer_list *timer)
1953 {
1954 
1955 	callout_reset(&timer->callout,
1956 	    linux_timer_jiffies_until(timer->expires),
1957 	    &linux_timer_callback_wrapper, timer);
1958 }
1959 
1960 void
1961 add_timer_on(struct timer_list *timer, int cpu)
1962 {
1963 
1964 	callout_reset_on(&timer->callout,
1965 	    linux_timer_jiffies_until(timer->expires),
1966 	    &linux_timer_callback_wrapper, timer, cpu);
1967 }
1968 
1969 int
1970 del_timer(struct timer_list *timer)
1971 {
1972 
1973 	if (callout_stop(&(timer)->callout) == -1)
1974 		return (0);
1975 	return (1);
1976 }
1977 
1978 int
1979 del_timer_sync(struct timer_list *timer)
1980 {
1981 
1982 	if (callout_drain(&(timer)->callout) == -1)
1983 		return (0);
1984 	return (1);
1985 }
1986 
1987 int
1988 timer_delete_sync(struct timer_list *timer)
1989 {
1990 
1991 	return (del_timer_sync(timer));
1992 }
1993 
1994 int
1995 timer_shutdown_sync(struct timer_list *timer)
1996 {
1997 
1998 	timer->function = NULL;
1999 	return (del_timer_sync(timer));
2000 }
2001 
2002 /* greatest common divisor, Euclid equation */
2003 static uint64_t
2004 lkpi_gcd_64(uint64_t a, uint64_t b)
2005 {
2006 	uint64_t an;
2007 	uint64_t bn;
2008 
2009 	while (b != 0) {
2010 		an = b;
2011 		bn = a % b;
2012 		a = an;
2013 		b = bn;
2014 	}
2015 	return (a);
2016 }
2017 
2018 uint64_t lkpi_nsec2hz_rem;
2019 uint64_t lkpi_nsec2hz_div = 1000000000ULL;
2020 uint64_t lkpi_nsec2hz_max;
2021 
2022 uint64_t lkpi_usec2hz_rem;
2023 uint64_t lkpi_usec2hz_div = 1000000ULL;
2024 uint64_t lkpi_usec2hz_max;
2025 
2026 uint64_t lkpi_msec2hz_rem;
2027 uint64_t lkpi_msec2hz_div = 1000ULL;
2028 uint64_t lkpi_msec2hz_max;
2029 
2030 static void
2031 linux_timer_init(void *arg)
2032 {
2033 	uint64_t gcd;
2034 
2035 	/*
2036 	 * Compute an internal HZ value which can divide 2**32 to
2037 	 * avoid timer rounding problems when the tick value wraps
2038 	 * around 2**32:
2039 	 */
2040 	linux_timer_hz_mask = 1;
2041 	while (linux_timer_hz_mask < (unsigned long)hz)
2042 		linux_timer_hz_mask *= 2;
2043 	linux_timer_hz_mask--;
2044 
2045 	/* compute some internal constants */
2046 
2047 	lkpi_nsec2hz_rem = hz;
2048 	lkpi_usec2hz_rem = hz;
2049 	lkpi_msec2hz_rem = hz;
2050 
2051 	gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div);
2052 	lkpi_nsec2hz_rem /= gcd;
2053 	lkpi_nsec2hz_div /= gcd;
2054 	lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem;
2055 
2056 	gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div);
2057 	lkpi_usec2hz_rem /= gcd;
2058 	lkpi_usec2hz_div /= gcd;
2059 	lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem;
2060 
2061 	gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div);
2062 	lkpi_msec2hz_rem /= gcd;
2063 	lkpi_msec2hz_div /= gcd;
2064 	lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem;
2065 }
2066 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
2067 
2068 void
2069 linux_complete_common(struct completion *c, int all)
2070 {
2071 	sleepq_lock(c);
2072 	if (all) {
2073 		c->done = UINT_MAX;
2074 		sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
2075 	} else {
2076 		if (c->done != UINT_MAX)
2077 			c->done++;
2078 		sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
2079 	}
2080 	sleepq_release(c);
2081 }
2082 
2083 /*
2084  * Indefinite wait for done != 0 with or without signals.
2085  */
2086 int
2087 linux_wait_for_common(struct completion *c, int flags)
2088 {
2089 	struct task_struct *task;
2090 	int error;
2091 
2092 	if (SCHEDULER_STOPPED())
2093 		return (0);
2094 
2095 	task = current;
2096 
2097 	if (flags != 0)
2098 		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
2099 	else
2100 		flags = SLEEPQ_SLEEP;
2101 	error = 0;
2102 	for (;;) {
2103 		sleepq_lock(c);
2104 		if (c->done)
2105 			break;
2106 		sleepq_add(c, NULL, "completion", flags, 0);
2107 		if (flags & SLEEPQ_INTERRUPTIBLE) {
2108 			DROP_GIANT();
2109 			error = -sleepq_wait_sig(c, 0);
2110 			PICKUP_GIANT();
2111 			if (error != 0) {
2112 				linux_schedule_save_interrupt_value(task, error);
2113 				error = -ERESTARTSYS;
2114 				goto intr;
2115 			}
2116 		} else {
2117 			DROP_GIANT();
2118 			sleepq_wait(c, 0);
2119 			PICKUP_GIANT();
2120 		}
2121 	}
2122 	if (c->done != UINT_MAX)
2123 		c->done--;
2124 	sleepq_release(c);
2125 
2126 intr:
2127 	return (error);
2128 }
2129 
2130 /*
2131  * Time limited wait for done != 0 with or without signals.
2132  */
2133 int
2134 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags)
2135 {
2136 	struct task_struct *task;
2137 	int end = jiffies + timeout;
2138 	int error;
2139 
2140 	if (SCHEDULER_STOPPED())
2141 		return (0);
2142 
2143 	task = current;
2144 
2145 	if (flags != 0)
2146 		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
2147 	else
2148 		flags = SLEEPQ_SLEEP;
2149 
2150 	for (;;) {
2151 		sleepq_lock(c);
2152 		if (c->done)
2153 			break;
2154 		sleepq_add(c, NULL, "completion", flags, 0);
2155 		sleepq_set_timeout(c, linux_timer_jiffies_until(end));
2156 
2157 		DROP_GIANT();
2158 		if (flags & SLEEPQ_INTERRUPTIBLE)
2159 			error = -sleepq_timedwait_sig(c, 0);
2160 		else
2161 			error = -sleepq_timedwait(c, 0);
2162 		PICKUP_GIANT();
2163 
2164 		if (error != 0) {
2165 			/* check for timeout */
2166 			if (error == -EWOULDBLOCK) {
2167 				error = 0;	/* timeout */
2168 			} else {
2169 				/* signal happened */
2170 				linux_schedule_save_interrupt_value(task, error);
2171 				error = -ERESTARTSYS;
2172 			}
2173 			goto done;
2174 		}
2175 	}
2176 	if (c->done != UINT_MAX)
2177 		c->done--;
2178 	sleepq_release(c);
2179 
2180 	/* return how many jiffies are left */
2181 	error = linux_timer_jiffies_until(end);
2182 done:
2183 	return (error);
2184 }
2185 
2186 int
2187 linux_try_wait_for_completion(struct completion *c)
2188 {
2189 	int isdone;
2190 
2191 	sleepq_lock(c);
2192 	isdone = (c->done != 0);
2193 	if (c->done != 0 && c->done != UINT_MAX)
2194 		c->done--;
2195 	sleepq_release(c);
2196 	return (isdone);
2197 }
2198 
2199 int
2200 linux_completion_done(struct completion *c)
2201 {
2202 	int isdone;
2203 
2204 	sleepq_lock(c);
2205 	isdone = (c->done != 0);
2206 	sleepq_release(c);
2207 	return (isdone);
2208 }
2209 
2210 static void
2211 linux_cdev_deref(struct linux_cdev *ldev)
2212 {
2213 	if (refcount_release(&ldev->refs) &&
2214 	    ldev->kobj.ktype == &linux_cdev_ktype)
2215 		kfree(ldev);
2216 }
2217 
2218 static void
2219 linux_cdev_release(struct kobject *kobj)
2220 {
2221 	struct linux_cdev *cdev;
2222 	struct kobject *parent;
2223 
2224 	cdev = container_of(kobj, struct linux_cdev, kobj);
2225 	parent = kobj->parent;
2226 	linux_destroy_dev(cdev);
2227 	linux_cdev_deref(cdev);
2228 	kobject_put(parent);
2229 }
2230 
2231 static void
2232 linux_cdev_static_release(struct kobject *kobj)
2233 {
2234 	struct cdev *cdev;
2235 	struct linux_cdev *ldev;
2236 
2237 	ldev = container_of(kobj, struct linux_cdev, kobj);
2238 	cdev = ldev->cdev;
2239 	if (cdev != NULL) {
2240 		destroy_dev(cdev);
2241 		ldev->cdev = NULL;
2242 	}
2243 	kobject_put(kobj->parent);
2244 }
2245 
2246 int
2247 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev)
2248 {
2249 	int ret;
2250 
2251 	if (dev->devt != 0) {
2252 		/* Set parent kernel object. */
2253 		ldev->kobj.parent = &dev->kobj;
2254 
2255 		/*
2256 		 * Unlike Linux we require the kobject of the
2257 		 * character device structure to have a valid name
2258 		 * before calling this function:
2259 		 */
2260 		if (ldev->kobj.name == NULL)
2261 			return (-EINVAL);
2262 
2263 		ret = cdev_add(ldev, dev->devt, 1);
2264 		if (ret)
2265 			return (ret);
2266 	}
2267 	ret = device_add(dev);
2268 	if (ret != 0 && dev->devt != 0)
2269 		cdev_del(ldev);
2270 	return (ret);
2271 }
2272 
2273 void
2274 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev)
2275 {
2276 	device_del(dev);
2277 
2278 	if (dev->devt != 0)
2279 		cdev_del(ldev);
2280 }
2281 
2282 static void
2283 linux_destroy_dev(struct linux_cdev *ldev)
2284 {
2285 
2286 	if (ldev->cdev == NULL)
2287 		return;
2288 
2289 	MPASS((ldev->siref & LDEV_SI_DTR) == 0);
2290 	MPASS(ldev->kobj.ktype == &linux_cdev_ktype);
2291 
2292 	atomic_set_int(&ldev->siref, LDEV_SI_DTR);
2293 	while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0)
2294 		pause("ldevdtr", hz / 4);
2295 
2296 	destroy_dev(ldev->cdev);
2297 	ldev->cdev = NULL;
2298 }
2299 
2300 const struct kobj_type linux_cdev_ktype = {
2301 	.release = linux_cdev_release,
2302 };
2303 
2304 const struct kobj_type linux_cdev_static_ktype = {
2305 	.release = linux_cdev_static_release,
2306 };
2307 
2308 static void
2309 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate)
2310 {
2311 	struct notifier_block *nb;
2312 	struct netdev_notifier_info ni;
2313 
2314 	nb = arg;
2315 	ni.ifp = ifp;
2316 	ni.dev = (struct net_device *)ifp;
2317 	if (linkstate == LINK_STATE_UP)
2318 		nb->notifier_call(nb, NETDEV_UP, &ni);
2319 	else
2320 		nb->notifier_call(nb, NETDEV_DOWN, &ni);
2321 }
2322 
2323 static void
2324 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp)
2325 {
2326 	struct notifier_block *nb;
2327 	struct netdev_notifier_info ni;
2328 
2329 	nb = arg;
2330 	ni.ifp = ifp;
2331 	ni.dev = (struct net_device *)ifp;
2332 	nb->notifier_call(nb, NETDEV_REGISTER, &ni);
2333 }
2334 
2335 static void
2336 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp)
2337 {
2338 	struct notifier_block *nb;
2339 	struct netdev_notifier_info ni;
2340 
2341 	nb = arg;
2342 	ni.ifp = ifp;
2343 	ni.dev = (struct net_device *)ifp;
2344 	nb->notifier_call(nb, NETDEV_UNREGISTER, &ni);
2345 }
2346 
2347 static void
2348 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp)
2349 {
2350 	struct notifier_block *nb;
2351 	struct netdev_notifier_info ni;
2352 
2353 	nb = arg;
2354 	ni.ifp = ifp;
2355 	ni.dev = (struct net_device *)ifp;
2356 	nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni);
2357 }
2358 
2359 static void
2360 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp)
2361 {
2362 	struct notifier_block *nb;
2363 	struct netdev_notifier_info ni;
2364 
2365 	nb = arg;
2366 	ni.ifp = ifp;
2367 	ni.dev = (struct net_device *)ifp;
2368 	nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni);
2369 }
2370 
2371 int
2372 register_netdevice_notifier(struct notifier_block *nb)
2373 {
2374 
2375 	nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER(
2376 	    ifnet_link_event, linux_handle_ifnet_link_event, nb, 0);
2377 	nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER(
2378 	    ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0);
2379 	nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER(
2380 	    ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0);
2381 	nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER(
2382 	    iflladdr_event, linux_handle_iflladdr_event, nb, 0);
2383 
2384 	return (0);
2385 }
2386 
2387 int
2388 register_inetaddr_notifier(struct notifier_block *nb)
2389 {
2390 
2391 	nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER(
2392 	    ifaddr_event, linux_handle_ifaddr_event, nb, 0);
2393 	return (0);
2394 }
2395 
2396 int
2397 unregister_netdevice_notifier(struct notifier_block *nb)
2398 {
2399 
2400 	EVENTHANDLER_DEREGISTER(ifnet_link_event,
2401 	    nb->tags[NETDEV_UP]);
2402 	EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
2403 	    nb->tags[NETDEV_REGISTER]);
2404 	EVENTHANDLER_DEREGISTER(ifnet_departure_event,
2405 	    nb->tags[NETDEV_UNREGISTER]);
2406 	EVENTHANDLER_DEREGISTER(iflladdr_event,
2407 	    nb->tags[NETDEV_CHANGEADDR]);
2408 
2409 	return (0);
2410 }
2411 
2412 int
2413 unregister_inetaddr_notifier(struct notifier_block *nb)
2414 {
2415 
2416 	EVENTHANDLER_DEREGISTER(ifaddr_event,
2417 	    nb->tags[NETDEV_CHANGEIFADDR]);
2418 
2419 	return (0);
2420 }
2421 
2422 struct list_sort_thunk {
2423 	int (*cmp)(void *, struct list_head *, struct list_head *);
2424 	void *priv;
2425 };
2426 
2427 static inline int
2428 linux_le_cmp(const void *d1, const void *d2, void *priv)
2429 {
2430 	struct list_head *le1, *le2;
2431 	struct list_sort_thunk *thunk;
2432 
2433 	thunk = priv;
2434 	le1 = *(__DECONST(struct list_head **, d1));
2435 	le2 = *(__DECONST(struct list_head **, d2));
2436 	return ((thunk->cmp)(thunk->priv, le1, le2));
2437 }
2438 
2439 void
2440 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
2441     struct list_head *a, struct list_head *b))
2442 {
2443 	struct list_sort_thunk thunk;
2444 	struct list_head **ar, *le;
2445 	size_t count, i;
2446 
2447 	count = 0;
2448 	list_for_each(le, head)
2449 		count++;
2450 	ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK);
2451 	i = 0;
2452 	list_for_each(le, head)
2453 		ar[i++] = le;
2454 	thunk.cmp = cmp;
2455 	thunk.priv = priv;
2456 	qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk);
2457 	INIT_LIST_HEAD(head);
2458 	for (i = 0; i < count; i++)
2459 		list_add_tail(ar[i], head);
2460 	free(ar, M_KMALLOC);
2461 }
2462 
2463 #if defined(__i386__) || defined(__amd64__)
2464 int
2465 linux_wbinvd_on_all_cpus(void)
2466 {
2467 
2468 	pmap_invalidate_cache();
2469 	return (0);
2470 }
2471 #endif
2472 
2473 int
2474 linux_on_each_cpu(void callback(void *), void *data)
2475 {
2476 
2477 	smp_rendezvous(smp_no_rendezvous_barrier, callback,
2478 	    smp_no_rendezvous_barrier, data);
2479 	return (0);
2480 }
2481 
2482 int
2483 linux_in_atomic(void)
2484 {
2485 
2486 	return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
2487 }
2488 
2489 struct linux_cdev *
2490 linux_find_cdev(const char *name, unsigned major, unsigned minor)
2491 {
2492 	dev_t dev = MKDEV(major, minor);
2493 	struct cdev *cdev;
2494 
2495 	dev_lock();
2496 	LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) {
2497 		struct linux_cdev *ldev = cdev->si_drv1;
2498 		if (ldev->dev == dev &&
2499 		    strcmp(kobject_name(&ldev->kobj), name) == 0) {
2500 			break;
2501 		}
2502 	}
2503 	dev_unlock();
2504 
2505 	return (cdev != NULL ? cdev->si_drv1 : NULL);
2506 }
2507 
2508 int
2509 __register_chrdev(unsigned int major, unsigned int baseminor,
2510     unsigned int count, const char *name,
2511     const struct file_operations *fops)
2512 {
2513 	struct linux_cdev *cdev;
2514 	int ret = 0;
2515 	int i;
2516 
2517 	for (i = baseminor; i < baseminor + count; i++) {
2518 		cdev = cdev_alloc();
2519 		cdev->ops = fops;
2520 		kobject_set_name(&cdev->kobj, name);
2521 
2522 		ret = cdev_add(cdev, makedev(major, i), 1);
2523 		if (ret != 0)
2524 			break;
2525 	}
2526 	return (ret);
2527 }
2528 
2529 int
2530 __register_chrdev_p(unsigned int major, unsigned int baseminor,
2531     unsigned int count, const char *name,
2532     const struct file_operations *fops, uid_t uid,
2533     gid_t gid, int mode)
2534 {
2535 	struct linux_cdev *cdev;
2536 	int ret = 0;
2537 	int i;
2538 
2539 	for (i = baseminor; i < baseminor + count; i++) {
2540 		cdev = cdev_alloc();
2541 		cdev->ops = fops;
2542 		kobject_set_name(&cdev->kobj, name);
2543 
2544 		ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode);
2545 		if (ret != 0)
2546 			break;
2547 	}
2548 	return (ret);
2549 }
2550 
2551 void
2552 __unregister_chrdev(unsigned int major, unsigned int baseminor,
2553     unsigned int count, const char *name)
2554 {
2555 	struct linux_cdev *cdevp;
2556 	int i;
2557 
2558 	for (i = baseminor; i < baseminor + count; i++) {
2559 		cdevp = linux_find_cdev(name, major, i);
2560 		if (cdevp != NULL)
2561 			cdev_del(cdevp);
2562 	}
2563 }
2564 
2565 void
2566 linux_dump_stack(void)
2567 {
2568 #ifdef STACK
2569 	struct stack st;
2570 
2571 	stack_save(&st);
2572 	stack_print(&st);
2573 #endif
2574 }
2575 
2576 int
2577 linuxkpi_net_ratelimit(void)
2578 {
2579 
2580 	return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps,
2581 	   lkpi_net_maxpps));
2582 }
2583 
2584 struct io_mapping *
2585 io_mapping_create_wc(resource_size_t base, unsigned long size)
2586 {
2587 	struct io_mapping *mapping;
2588 
2589 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2590 	if (mapping == NULL)
2591 		return (NULL);
2592 	return (io_mapping_init_wc(mapping, base, size));
2593 }
2594 
2595 /* We likely want a linuxkpi_device.c at some point. */
2596 bool
2597 device_can_wakeup(struct device *dev)
2598 {
2599 
2600 	if (dev == NULL)
2601 		return (false);
2602 	/*
2603 	 * XXX-BZ iwlwifi queries it as part of enabling WoWLAN.
2604 	 * Normally this would be based on a bool in dev->power.XXX.
2605 	 * Check such as PCI PCIM_PCAP_*PME.  We have no way to enable this yet.
2606 	 * We may get away by directly calling into bsddev for as long as
2607 	 * we can assume PCI only avoiding changing struct device breaking KBI.
2608 	 */
2609 	pr_debug("%s:%d: not enabled; see comment.\n", __func__, __LINE__);
2610 	return (false);
2611 }
2612 
2613 static void
2614 devm_device_group_remove(struct device *dev, void *p)
2615 {
2616 	const struct attribute_group **dr = p;
2617 	const struct attribute_group *group = *dr;
2618 
2619 	sysfs_remove_group(&dev->kobj, group);
2620 }
2621 
2622 int
2623 lkpi_devm_device_add_group(struct device *dev,
2624     const struct attribute_group *group)
2625 {
2626 	const struct attribute_group **dr;
2627 	int ret;
2628 
2629 	dr = devres_alloc(devm_device_group_remove, sizeof(*dr), GFP_KERNEL);
2630 	if (dr == NULL)
2631 		return (-ENOMEM);
2632 
2633 	ret = sysfs_create_group(&dev->kobj, group);
2634 	if (ret == 0) {
2635 		*dr = group;
2636 		devres_add(dev, dr);
2637 	} else
2638 		devres_free(dr);
2639 
2640 	return (ret);
2641 }
2642 
2643 #if defined(__i386__) || defined(__amd64__)
2644 bool linux_cpu_has_clflush;
2645 struct cpuinfo_x86 boot_cpu_data;
2646 struct cpuinfo_x86 *__cpu_data;
2647 #endif
2648 
2649 cpumask_t *
2650 lkpi_get_static_single_cpu_mask(int cpuid)
2651 {
2652 
2653 	KASSERT((cpuid >= 0 && cpuid <= mp_maxid), ("%s: invalid cpuid %d\n",
2654 	    __func__, cpuid));
2655 	KASSERT(!CPU_ABSENT(cpuid), ("%s: cpu with cpuid %d is absent\n",
2656 	    __func__, cpuid));
2657 
2658 	return (static_single_cpu_mask[cpuid]);
2659 }
2660 
2661 bool
2662 lkpi_xen_initial_domain(void)
2663 {
2664 #ifdef XENHVM
2665 	return (xen_initial_domain());
2666 #else
2667 	return (false);
2668 #endif
2669 }
2670 
2671 bool
2672 lkpi_xen_pv_domain(void)
2673 {
2674 #ifdef XENHVM
2675 	return (xen_pv_domain());
2676 #else
2677 	return (false);
2678 #endif
2679 }
2680 
2681 static void
2682 linux_compat_init(void *arg)
2683 {
2684 	struct sysctl_oid *rootoid;
2685 	int i;
2686 
2687 #if defined(__i386__) || defined(__amd64__)
2688 	static const uint32_t x86_vendors[X86_VENDOR_NUM] = {
2689 		[X86_VENDOR_INTEL] = CPU_VENDOR_INTEL,
2690 		[X86_VENDOR_CYRIX] = CPU_VENDOR_CYRIX,
2691 		[X86_VENDOR_AMD] = CPU_VENDOR_AMD,
2692 		[X86_VENDOR_UMC] = CPU_VENDOR_UMC,
2693 		[X86_VENDOR_CENTAUR] = CPU_VENDOR_CENTAUR,
2694 		[X86_VENDOR_TRANSMETA] = CPU_VENDOR_TRANSMETA,
2695 		[X86_VENDOR_NSC] = CPU_VENDOR_NSC,
2696 		[X86_VENDOR_HYGON] = CPU_VENDOR_HYGON,
2697 	};
2698 	uint8_t x86_vendor = X86_VENDOR_UNKNOWN;
2699 
2700 	for (i = 0; i < X86_VENDOR_NUM; i++) {
2701 		if (cpu_vendor_id != 0 && cpu_vendor_id == x86_vendors[i]) {
2702 			x86_vendor = i;
2703 			break;
2704 		}
2705 	}
2706 	linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH);
2707 	boot_cpu_data.x86_clflush_size = cpu_clflush_line_size;
2708 	boot_cpu_data.x86_max_cores = mp_ncpus;
2709 	boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id);
2710 	boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id);
2711 	boot_cpu_data.x86_vendor = x86_vendor;
2712 
2713 	__cpu_data = mallocarray(mp_maxid + 1,
2714 	    sizeof(*__cpu_data), M_KMALLOC, M_WAITOK | M_ZERO);
2715 	CPU_FOREACH(i) {
2716 		__cpu_data[i].x86_clflush_size = cpu_clflush_line_size;
2717 		__cpu_data[i].x86_max_cores = mp_ncpus;
2718 		__cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id);
2719 		__cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id);
2720 		__cpu_data[i].x86_vendor = x86_vendor;
2721 	}
2722 #endif
2723 	rw_init(&linux_vma_lock, "lkpi-vma-lock");
2724 
2725 	rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
2726 	    OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
2727 	kobject_init(&linux_class_root, &linux_class_ktype);
2728 	kobject_set_name(&linux_class_root, "class");
2729 	linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
2730 	    OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
2731 	kobject_init(&linux_root_device.kobj, &linux_dev_ktype);
2732 	kobject_set_name(&linux_root_device.kobj, "device");
2733 	linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL,
2734 	    SYSCTL_CHILDREN(rootoid), OID_AUTO, "device",
2735 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device");
2736 	linux_root_device.bsddev = root_bus;
2737 	linux_class_misc.name = "misc";
2738 	class_register(&linux_class_misc);
2739 	INIT_LIST_HEAD(&pci_drivers);
2740 	INIT_LIST_HEAD(&pci_devices);
2741 	spin_lock_init(&pci_lock);
2742 	mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
2743 	for (i = 0; i < VMMAP_HASH_SIZE; i++)
2744 		LIST_INIT(&vmmaphead[i]);
2745 	init_waitqueue_head(&linux_bit_waitq);
2746 	init_waitqueue_head(&linux_var_waitq);
2747 
2748 	CPU_COPY(&all_cpus, &cpu_online_mask);
2749 	/*
2750 	 * Generate a single-CPU cpumask_t for each CPU (possibly) in the system.
2751 	 * CPUs are indexed from 0..(mp_maxid).  The entry for cpuid 0 will only
2752 	 * have itself in the cpumask, cupid 1 only itself on entry 1, and so on.
2753 	 * This is used by cpumask_of() (and possibly others in the future) for,
2754 	 * e.g., drivers to pass hints to irq_set_affinity_hint().
2755 	 */
2756 	static_single_cpu_mask = mallocarray(mp_maxid + 1,
2757 	    sizeof(static_single_cpu_mask), M_KMALLOC, M_WAITOK | M_ZERO);
2758 
2759 	/*
2760 	 * When the number of CPUs reach a threshold, we start to save memory
2761 	 * given the sets are static by overlapping those having their single
2762 	 * bit set at same position in a bitset word.  Asymptotically, this
2763 	 * regular scheme is in O(n²) whereas the overlapping one is in O(n)
2764 	 * only with n being the maximum number of CPUs, so the gain will become
2765 	 * huge quite quickly.  The threshold for 64-bit architectures is 128
2766 	 * CPUs.
2767 	 */
2768 	if (mp_ncpus < (2 * _BITSET_BITS)) {
2769 		cpumask_t *sscm_ptr;
2770 
2771 		/*
2772 		 * This represents 'mp_ncpus * __bitset_words(CPU_SETSIZE) *
2773 		 * (_BITSET_BITS / 8)' bytes (for comparison with the
2774 		 * overlapping scheme).
2775 		 */
2776 		static_single_cpu_mask_lcs = mallocarray(mp_ncpus,
2777 		    sizeof(*static_single_cpu_mask_lcs),
2778 		    M_KMALLOC, M_WAITOK | M_ZERO);
2779 
2780 		sscm_ptr = static_single_cpu_mask_lcs;
2781 		CPU_FOREACH(i) {
2782 			static_single_cpu_mask[i] = sscm_ptr++;
2783 			CPU_SET(i, static_single_cpu_mask[i]);
2784 		}
2785 	} else {
2786 		/* Pointer to a bitset word. */
2787 		__typeof(((cpuset_t *)NULL)->__bits[0]) *bwp;
2788 
2789 		/*
2790 		 * Allocate memory for (static) spans of 'cpumask_t' ('cpuset_t'
2791 		 * really) with a single bit set that can be reused for all
2792 		 * single CPU masks by making them start at different offsets.
2793 		 * We need '__bitset_words(CPU_SETSIZE) - 1' bitset words before
2794 		 * the word having its single bit set, and the same amount
2795 		 * after.
2796 		 */
2797 		static_single_cpu_mask_lcs = mallocarray(_BITSET_BITS,
2798 		    (2 * __bitset_words(CPU_SETSIZE) - 1) * (_BITSET_BITS / 8),
2799 		    M_KMALLOC, M_WAITOK | M_ZERO);
2800 
2801 		/*
2802 		 * We rely below on cpuset_t and the bitset generic
2803 		 * implementation assigning words in the '__bits' array in the
2804 		 * same order of bits (i.e., little-endian ordering, not to be
2805 		 * confused with machine endianness, which concerns bits in
2806 		 * words and other integers).  This is an imperfect test, but it
2807 		 * will detect a change to big-endian ordering.
2808 		 */
2809 		_Static_assert(
2810 		    __bitset_word(_BITSET_BITS + 1, _BITSET_BITS) == 1,
2811 		    "Assumes a bitset implementation that is little-endian "
2812 		    "on its words");
2813 
2814 		/* Initialize the single bit of each static span. */
2815 		bwp = (__typeof(bwp))static_single_cpu_mask_lcs +
2816 		    (__bitset_words(CPU_SETSIZE) - 1);
2817 		for (i = 0; i < _BITSET_BITS; i++) {
2818 			CPU_SET(i, (cpuset_t *)bwp);
2819 			bwp += (2 * __bitset_words(CPU_SETSIZE) - 1);
2820 		}
2821 
2822 		/*
2823 		 * Finally set all CPU masks to the proper word in their
2824 		 * relevant span.
2825 		 */
2826 		CPU_FOREACH(i) {
2827 			bwp = (__typeof(bwp))static_single_cpu_mask_lcs;
2828 			/* Find the non-zero word of the relevant span. */
2829 			bwp += (2 * __bitset_words(CPU_SETSIZE) - 1) *
2830 			    (i % _BITSET_BITS) +
2831 			    __bitset_words(CPU_SETSIZE) - 1;
2832 			/* Shift to find the CPU mask start. */
2833 			bwp -= (i / _BITSET_BITS);
2834 			static_single_cpu_mask[i] = (cpuset_t *)bwp;
2835 		}
2836 	}
2837 
2838 	strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release));
2839 }
2840 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
2841 
2842 static void
2843 linux_compat_uninit(void *arg)
2844 {
2845 	linux_kobject_kfree_name(&linux_class_root);
2846 	linux_kobject_kfree_name(&linux_root_device.kobj);
2847 	linux_kobject_kfree_name(&linux_class_misc.kobj);
2848 
2849 	free(static_single_cpu_mask_lcs, M_KMALLOC);
2850 	free(static_single_cpu_mask, M_KMALLOC);
2851 #if defined(__i386__) || defined(__amd64__)
2852 	free(__cpu_data, M_KMALLOC);
2853 #endif
2854 
2855 	mtx_destroy(&vmmaplock);
2856 	spin_lock_destroy(&pci_lock);
2857 	rw_destroy(&linux_vma_lock);
2858 }
2859 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
2860 
2861 /*
2862  * NOTE: Linux frequently uses "unsigned long" for pointer to integer
2863  * conversion and vice versa, where in FreeBSD "uintptr_t" would be
2864  * used. Assert these types have the same size, else some parts of the
2865  * LinuxKPI may not work like expected:
2866  */
2867 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));
2868