xref: /freebsd/sys/compat/linuxkpi/common/src/linux_compat.c (revision b08fc26cbdd00df6852e71e1be58fa9cc92019f0)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/proc.h>
39 #include <sys/sglist.h>
40 #include <sys/sleepqueue.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/bus.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/filio.h>
47 #include <sys/rwlock.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 
52 #include <machine/stdarg.h>
53 
54 #if defined(__i386__) || defined(__amd64__)
55 #include <machine/md_var.h>
56 #endif
57 
58 #include <linux/kobject.h>
59 #include <linux/device.h>
60 #include <linux/slab.h>
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/cdev.h>
64 #include <linux/file.h>
65 #include <linux/sysfs.h>
66 #include <linux/mm.h>
67 #include <linux/io.h>
68 #include <linux/vmalloc.h>
69 #include <linux/netdevice.h>
70 #include <linux/timer.h>
71 #include <linux/interrupt.h>
72 #include <linux/uaccess.h>
73 #include <linux/kernel.h>
74 #include <linux/list.h>
75 #include <linux/compat.h>
76 
77 #include <vm/vm_pager.h>
78 
79 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters");
80 
81 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
82 
83 #include <linux/rbtree.h>
84 /* Undo Linux compat changes. */
85 #undef RB_ROOT
86 #undef file
87 #undef cdev
88 #define	RB_ROOT(head)	(head)->rbh_root
89 
90 struct kobject linux_class_root;
91 struct device linux_root_device;
92 struct class linux_class_misc;
93 struct list_head pci_drivers;
94 struct list_head pci_devices;
95 struct net init_net;
96 spinlock_t pci_lock;
97 
98 unsigned long linux_timer_hz_mask;
99 
100 int
101 panic_cmp(struct rb_node *one, struct rb_node *two)
102 {
103 	panic("no cmp");
104 }
105 
106 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
107 
108 int
109 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
110 {
111 	va_list tmp_va;
112 	int len;
113 	char *old;
114 	char *name;
115 	char dummy;
116 
117 	old = kobj->name;
118 
119 	if (old && fmt == NULL)
120 		return (0);
121 
122 	/* compute length of string */
123 	va_copy(tmp_va, args);
124 	len = vsnprintf(&dummy, 0, fmt, tmp_va);
125 	va_end(tmp_va);
126 
127 	/* account for zero termination */
128 	len++;
129 
130 	/* check for error */
131 	if (len < 1)
132 		return (-EINVAL);
133 
134 	/* allocate memory for string */
135 	name = kzalloc(len, GFP_KERNEL);
136 	if (name == NULL)
137 		return (-ENOMEM);
138 	vsnprintf(name, len, fmt, args);
139 	kobj->name = name;
140 
141 	/* free old string */
142 	kfree(old);
143 
144 	/* filter new string */
145 	for (; *name != '\0'; name++)
146 		if (*name == '/')
147 			*name = '!';
148 	return (0);
149 }
150 
151 int
152 kobject_set_name(struct kobject *kobj, const char *fmt, ...)
153 {
154 	va_list args;
155 	int error;
156 
157 	va_start(args, fmt);
158 	error = kobject_set_name_vargs(kobj, fmt, args);
159 	va_end(args);
160 
161 	return (error);
162 }
163 
164 static int
165 kobject_add_complete(struct kobject *kobj, struct kobject *parent)
166 {
167 	const struct kobj_type *t;
168 	int error;
169 
170 	kobj->parent = parent;
171 	error = sysfs_create_dir(kobj);
172 	if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
173 		struct attribute **attr;
174 		t = kobj->ktype;
175 
176 		for (attr = t->default_attrs; *attr != NULL; attr++) {
177 			error = sysfs_create_file(kobj, *attr);
178 			if (error)
179 				break;
180 		}
181 		if (error)
182 			sysfs_remove_dir(kobj);
183 
184 	}
185 	return (error);
186 }
187 
188 int
189 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
190 {
191 	va_list args;
192 	int error;
193 
194 	va_start(args, fmt);
195 	error = kobject_set_name_vargs(kobj, fmt, args);
196 	va_end(args);
197 	if (error)
198 		return (error);
199 
200 	return kobject_add_complete(kobj, parent);
201 }
202 
203 void
204 linux_kobject_release(struct kref *kref)
205 {
206 	struct kobject *kobj;
207 	char *name;
208 
209 	kobj = container_of(kref, struct kobject, kref);
210 	sysfs_remove_dir(kobj);
211 	name = kobj->name;
212 	if (kobj->ktype && kobj->ktype->release)
213 		kobj->ktype->release(kobj);
214 	kfree(name);
215 }
216 
217 static void
218 linux_kobject_kfree(struct kobject *kobj)
219 {
220 	kfree(kobj);
221 }
222 
223 static void
224 linux_kobject_kfree_name(struct kobject *kobj)
225 {
226 	if (kobj) {
227 		kfree(kobj->name);
228 	}
229 }
230 
231 const struct kobj_type linux_kfree_type = {
232 	.release = linux_kobject_kfree
233 };
234 
235 static void
236 linux_device_release(struct device *dev)
237 {
238 	pr_debug("linux_device_release: %s\n", dev_name(dev));
239 	kfree(dev);
240 }
241 
242 static ssize_t
243 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
244 {
245 	struct class_attribute *dattr;
246 	ssize_t error;
247 
248 	dattr = container_of(attr, struct class_attribute, attr);
249 	error = -EIO;
250 	if (dattr->show)
251 		error = dattr->show(container_of(kobj, struct class, kobj),
252 		    dattr, buf);
253 	return (error);
254 }
255 
256 static ssize_t
257 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
258     size_t count)
259 {
260 	struct class_attribute *dattr;
261 	ssize_t error;
262 
263 	dattr = container_of(attr, struct class_attribute, attr);
264 	error = -EIO;
265 	if (dattr->store)
266 		error = dattr->store(container_of(kobj, struct class, kobj),
267 		    dattr, buf, count);
268 	return (error);
269 }
270 
271 static void
272 linux_class_release(struct kobject *kobj)
273 {
274 	struct class *class;
275 
276 	class = container_of(kobj, struct class, kobj);
277 	if (class->class_release)
278 		class->class_release(class);
279 }
280 
281 static const struct sysfs_ops linux_class_sysfs = {
282 	.show  = linux_class_show,
283 	.store = linux_class_store,
284 };
285 
286 const struct kobj_type linux_class_ktype = {
287 	.release = linux_class_release,
288 	.sysfs_ops = &linux_class_sysfs
289 };
290 
291 static void
292 linux_dev_release(struct kobject *kobj)
293 {
294 	struct device *dev;
295 
296 	dev = container_of(kobj, struct device, kobj);
297 	/* This is the precedence defined by linux. */
298 	if (dev->release)
299 		dev->release(dev);
300 	else if (dev->class && dev->class->dev_release)
301 		dev->class->dev_release(dev);
302 }
303 
304 static ssize_t
305 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
306 {
307 	struct device_attribute *dattr;
308 	ssize_t error;
309 
310 	dattr = container_of(attr, struct device_attribute, attr);
311 	error = -EIO;
312 	if (dattr->show)
313 		error = dattr->show(container_of(kobj, struct device, kobj),
314 		    dattr, buf);
315 	return (error);
316 }
317 
318 static ssize_t
319 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
320     size_t count)
321 {
322 	struct device_attribute *dattr;
323 	ssize_t error;
324 
325 	dattr = container_of(attr, struct device_attribute, attr);
326 	error = -EIO;
327 	if (dattr->store)
328 		error = dattr->store(container_of(kobj, struct device, kobj),
329 		    dattr, buf, count);
330 	return (error);
331 }
332 
333 static const struct sysfs_ops linux_dev_sysfs = {
334 	.show  = linux_dev_show,
335 	.store = linux_dev_store,
336 };
337 
338 const struct kobj_type linux_dev_ktype = {
339 	.release = linux_dev_release,
340 	.sysfs_ops = &linux_dev_sysfs
341 };
342 
343 struct device *
344 device_create(struct class *class, struct device *parent, dev_t devt,
345     void *drvdata, const char *fmt, ...)
346 {
347 	struct device *dev;
348 	va_list args;
349 
350 	dev = kzalloc(sizeof(*dev), M_WAITOK);
351 	dev->parent = parent;
352 	dev->class = class;
353 	dev->devt = devt;
354 	dev->driver_data = drvdata;
355 	dev->release = linux_device_release;
356 	va_start(args, fmt);
357 	kobject_set_name_vargs(&dev->kobj, fmt, args);
358 	va_end(args);
359 	device_register(dev);
360 
361 	return (dev);
362 }
363 
364 int
365 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
366     struct kobject *parent, const char *fmt, ...)
367 {
368 	va_list args;
369 	int error;
370 
371 	kobject_init(kobj, ktype);
372 	kobj->ktype = ktype;
373 	kobj->parent = parent;
374 	kobj->name = NULL;
375 
376 	va_start(args, fmt);
377 	error = kobject_set_name_vargs(kobj, fmt, args);
378 	va_end(args);
379 	if (error)
380 		return (error);
381 	return kobject_add_complete(kobj, parent);
382 }
383 
384 static void
385 linux_file_dtor(void *cdp)
386 {
387 	struct linux_file *filp;
388 
389 	linux_set_current(curthread);
390 	filp = cdp;
391 	filp->f_op->release(filp->f_vnode, filp);
392 	vdrop(filp->f_vnode);
393 	kfree(filp);
394 }
395 
396 static int
397 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
398 {
399 	struct linux_cdev *ldev;
400 	struct linux_file *filp;
401 	struct file *file;
402 	int error;
403 
404 	file = td->td_fpop;
405 	ldev = dev->si_drv1;
406 	if (ldev == NULL)
407 		return (ENODEV);
408 	filp = kzalloc(sizeof(*filp), GFP_KERNEL);
409 	filp->f_dentry = &filp->f_dentry_store;
410 	filp->f_op = ldev->ops;
411 	filp->f_flags = file->f_flag;
412 	vhold(file->f_vnode);
413 	filp->f_vnode = file->f_vnode;
414 	linux_set_current(td);
415 	if (filp->f_op->open) {
416 		error = -filp->f_op->open(file->f_vnode, filp);
417 		if (error) {
418 			kfree(filp);
419 			goto done;
420 		}
421 	}
422 	error = devfs_set_cdevpriv(filp, linux_file_dtor);
423 	if (error) {
424 		filp->f_op->release(file->f_vnode, filp);
425 		kfree(filp);
426 	}
427 done:
428 	return (error);
429 }
430 
431 static int
432 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
433 {
434 	struct linux_cdev *ldev;
435 	struct linux_file *filp;
436 	struct file *file;
437 	int error;
438 
439 	file = td->td_fpop;
440 	ldev = dev->si_drv1;
441 	if (ldev == NULL)
442 		return (0);
443 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
444 		return (error);
445 	filp->f_flags = file->f_flag;
446         devfs_clear_cdevpriv();
447 
448 
449 	return (0);
450 }
451 
452 #define	LINUX_IOCTL_MIN_PTR 0x10000UL
453 #define	LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)
454 
455 static inline int
456 linux_remap_address(void **uaddr, size_t len)
457 {
458 	uintptr_t uaddr_val = (uintptr_t)(*uaddr);
459 
460 	if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
461 	    uaddr_val < LINUX_IOCTL_MAX_PTR)) {
462 		struct task_struct *pts = current;
463 		if (pts == NULL) {
464 			*uaddr = NULL;
465 			return (1);
466 		}
467 
468 		/* compute data offset */
469 		uaddr_val -= LINUX_IOCTL_MIN_PTR;
470 
471 		/* check that length is within bounds */
472 		if ((len > IOCPARM_MAX) ||
473 		    (uaddr_val + len) > pts->bsd_ioctl_len) {
474 			*uaddr = NULL;
475 			return (1);
476 		}
477 
478 		/* re-add kernel buffer address */
479 		uaddr_val += (uintptr_t)pts->bsd_ioctl_data;
480 
481 		/* update address location */
482 		*uaddr = (void *)uaddr_val;
483 		return (1);
484 	}
485 	return (0);
486 }
487 
488 int
489 linux_copyin(const void *uaddr, void *kaddr, size_t len)
490 {
491 	if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
492 		if (uaddr == NULL)
493 			return (-EFAULT);
494 		memcpy(kaddr, uaddr, len);
495 		return (0);
496 	}
497 	return (-copyin(uaddr, kaddr, len));
498 }
499 
500 int
501 linux_copyout(const void *kaddr, void *uaddr, size_t len)
502 {
503 	if (linux_remap_address(&uaddr, len)) {
504 		if (uaddr == NULL)
505 			return (-EFAULT);
506 		memcpy(uaddr, kaddr, len);
507 		return (0);
508 	}
509 	return (-copyout(kaddr, uaddr, len));
510 }
511 
512 static int
513 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
514     struct thread *td)
515 {
516 	struct linux_cdev *ldev;
517 	struct linux_file *filp;
518 	struct file *file;
519 	unsigned size;
520 	int error;
521 
522 	file = td->td_fpop;
523 	ldev = dev->si_drv1;
524 	if (ldev == NULL)
525 		return (0);
526 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
527 		return (error);
528 	filp->f_flags = file->f_flag;
529 
530 	linux_set_current(td);
531 	size = IOCPARM_LEN(cmd);
532 	/* refer to logic in sys_ioctl() */
533 	if (size > 0) {
534 		/*
535 		 * Setup hint for linux_copyin() and linux_copyout().
536 		 *
537 		 * Background: Linux code expects a user-space address
538 		 * while FreeBSD supplies a kernel-space address.
539 		 */
540 		current->bsd_ioctl_data = data;
541 		current->bsd_ioctl_len = size;
542 		data = (void *)LINUX_IOCTL_MIN_PTR;
543 	} else {
544 		/* fetch user-space pointer */
545 		data = *(void **)data;
546 	}
547 	if (filp->f_op->unlocked_ioctl)
548 		error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data);
549 	else
550 		error = ENOTTY;
551 	if (size > 0) {
552 		current->bsd_ioctl_data = NULL;
553 		current->bsd_ioctl_len = 0;
554 	}
555 
556 	return (error);
557 }
558 
559 static int
560 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
561 {
562 	struct linux_cdev *ldev;
563 	struct linux_file *filp;
564 	struct thread *td;
565 	struct file *file;
566 	ssize_t bytes;
567 	int error;
568 
569 	td = curthread;
570 	file = td->td_fpop;
571 	ldev = dev->si_drv1;
572 	if (ldev == NULL)
573 		return (0);
574 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
575 		return (error);
576 	filp->f_flags = file->f_flag;
577 	/* XXX no support for I/O vectors currently */
578 	if (uio->uio_iovcnt != 1)
579 		return (EOPNOTSUPP);
580 	linux_set_current(td);
581 	if (filp->f_op->read) {
582 		bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
583 		    uio->uio_iov->iov_len, &uio->uio_offset);
584 		if (bytes >= 0) {
585 			uio->uio_iov->iov_base =
586 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
587 			uio->uio_iov->iov_len -= bytes;
588 			uio->uio_resid -= bytes;
589 		} else
590 			error = -bytes;
591 	} else
592 		error = ENXIO;
593 
594 	return (error);
595 }
596 
597 static int
598 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag)
599 {
600 	struct linux_cdev *ldev;
601 	struct linux_file *filp;
602 	struct thread *td;
603 	struct file *file;
604 	ssize_t bytes;
605 	int error;
606 
607 	td = curthread;
608 	file = td->td_fpop;
609 	ldev = dev->si_drv1;
610 	if (ldev == NULL)
611 		return (0);
612 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
613 		return (error);
614 	filp->f_flags = file->f_flag;
615 	/* XXX no support for I/O vectors currently */
616 	if (uio->uio_iovcnt != 1)
617 		return (EOPNOTSUPP);
618 	linux_set_current(td);
619 	if (filp->f_op->write) {
620 		bytes = filp->f_op->write(filp, uio->uio_iov->iov_base,
621 		    uio->uio_iov->iov_len, &uio->uio_offset);
622 		if (bytes >= 0) {
623 			uio->uio_iov->iov_base =
624 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
625 			uio->uio_iov->iov_len -= bytes;
626 			uio->uio_resid -= bytes;
627 		} else
628 			error = -bytes;
629 	} else
630 		error = ENXIO;
631 
632 	return (error);
633 }
634 
635 static int
636 linux_dev_poll(struct cdev *dev, int events, struct thread *td)
637 {
638 	struct linux_cdev *ldev;
639 	struct linux_file *filp;
640 	struct file *file;
641 	int revents;
642 	int error;
643 
644 	file = td->td_fpop;
645 	ldev = dev->si_drv1;
646 	if (ldev == NULL)
647 		return (0);
648 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
649 		return (error);
650 	filp->f_flags = file->f_flag;
651 	linux_set_current(td);
652 	if (filp->f_op->poll)
653 		revents = filp->f_op->poll(filp, NULL) & events;
654 	else
655 		revents = 0;
656 
657 	return (revents);
658 }
659 
660 static int
661 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
662     vm_size_t size, struct vm_object **object, int nprot)
663 {
664 	struct linux_cdev *ldev;
665 	struct linux_file *filp;
666 	struct thread *td;
667 	struct file *file;
668 	struct vm_area_struct vma;
669 	int error;
670 
671 	td = curthread;
672 	file = td->td_fpop;
673 	ldev = dev->si_drv1;
674 	if (ldev == NULL)
675 		return (ENODEV);
676 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
677 		return (error);
678 	filp->f_flags = file->f_flag;
679 	linux_set_current(td);
680 	vma.vm_start = 0;
681 	vma.vm_end = size;
682 	vma.vm_pgoff = *offset / PAGE_SIZE;
683 	vma.vm_pfn = 0;
684 	vma.vm_page_prot = VM_MEMATTR_DEFAULT;
685 	if (filp->f_op->mmap) {
686 		error = -filp->f_op->mmap(filp, &vma);
687 		if (error == 0) {
688 			struct sglist *sg;
689 
690 			sg = sglist_alloc(1, M_WAITOK);
691 			sglist_append_phys(sg,
692 			    (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len);
693 			*object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len,
694 			    nprot, 0, td->td_ucred);
695 		        if (*object == NULL) {
696 				sglist_free(sg);
697 				error = EINVAL;
698 				goto done;
699 			}
700 			*offset = 0;
701 			if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) {
702 				VM_OBJECT_WLOCK(*object);
703 				vm_object_set_memattr(*object,
704 				    vma.vm_page_prot);
705 				VM_OBJECT_WUNLOCK(*object);
706 			}
707 		}
708 	} else
709 		error = ENODEV;
710 done:
711 	return (error);
712 }
713 
714 struct cdevsw linuxcdevsw = {
715 	.d_version = D_VERSION,
716 	.d_flags = D_TRACKCLOSE,
717 	.d_open = linux_dev_open,
718 	.d_close = linux_dev_close,
719 	.d_read = linux_dev_read,
720 	.d_write = linux_dev_write,
721 	.d_ioctl = linux_dev_ioctl,
722 	.d_mmap_single = linux_dev_mmap_single,
723 	.d_poll = linux_dev_poll,
724 };
725 
726 static int
727 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
728     int flags, struct thread *td)
729 {
730 	struct linux_file *filp;
731 	ssize_t bytes;
732 	int error;
733 
734 	error = 0;
735 	filp = (struct linux_file *)file->f_data;
736 	filp->f_flags = file->f_flag;
737 	/* XXX no support for I/O vectors currently */
738 	if (uio->uio_iovcnt != 1)
739 		return (EOPNOTSUPP);
740 	linux_set_current(td);
741 	if (filp->f_op->read) {
742 		bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
743 		    uio->uio_iov->iov_len, &uio->uio_offset);
744 		if (bytes >= 0) {
745 			uio->uio_iov->iov_base =
746 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
747 			uio->uio_iov->iov_len -= bytes;
748 			uio->uio_resid -= bytes;
749 		} else
750 			error = -bytes;
751 	} else
752 		error = ENXIO;
753 
754 	return (error);
755 }
756 
757 static int
758 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
759     struct thread *td)
760 {
761 	struct linux_file *filp;
762 	int revents;
763 
764 	filp = (struct linux_file *)file->f_data;
765 	filp->f_flags = file->f_flag;
766 	linux_set_current(td);
767 	if (filp->f_op->poll)
768 		revents = filp->f_op->poll(filp, NULL) & events;
769 	else
770 		revents = 0;
771 
772 	return (revents);
773 }
774 
775 static int
776 linux_file_close(struct file *file, struct thread *td)
777 {
778 	struct linux_file *filp;
779 	int error;
780 
781 	filp = (struct linux_file *)file->f_data;
782 	filp->f_flags = file->f_flag;
783 	linux_set_current(td);
784 	error = -filp->f_op->release(NULL, filp);
785 	funsetown(&filp->f_sigio);
786 	kfree(filp);
787 
788 	return (error);
789 }
790 
791 static int
792 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
793     struct thread *td)
794 {
795 	struct linux_file *filp;
796 	int error;
797 
798 	filp = (struct linux_file *)fp->f_data;
799 	filp->f_flags = fp->f_flag;
800 	error = 0;
801 
802 	linux_set_current(td);
803 	switch (cmd) {
804 	case FIONBIO:
805 		break;
806 	case FIOASYNC:
807 		if (filp->f_op->fasync == NULL)
808 			break;
809 		error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC);
810 		break;
811 	case FIOSETOWN:
812 		error = fsetown(*(int *)data, &filp->f_sigio);
813 		if (error == 0)
814 			error = filp->f_op->fasync(0, filp,
815 			    fp->f_flag & FASYNC);
816 		break;
817 	case FIOGETOWN:
818 		*(int *)data = fgetown(&filp->f_sigio);
819 		break;
820 	default:
821 		error = ENOTTY;
822 		break;
823 	}
824 	return (error);
825 }
826 
827 static int
828 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
829     struct thread *td)
830 {
831 
832 	return (EOPNOTSUPP);
833 }
834 
835 static int
836 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
837     struct filedesc *fdp)
838 {
839 
840 	return (0);
841 }
842 
843 struct fileops linuxfileops = {
844 	.fo_read = linux_file_read,
845 	.fo_write = invfo_rdwr,
846 	.fo_truncate = invfo_truncate,
847 	.fo_kqfilter = invfo_kqfilter,
848 	.fo_stat = linux_file_stat,
849 	.fo_fill_kinfo = linux_file_fill_kinfo,
850 	.fo_poll = linux_file_poll,
851 	.fo_close = linux_file_close,
852 	.fo_ioctl = linux_file_ioctl,
853 	.fo_chmod = invfo_chmod,
854 	.fo_chown = invfo_chown,
855 	.fo_sendfile = invfo_sendfile,
856 };
857 
858 /*
859  * Hash of vmmap addresses.  This is infrequently accessed and does not
860  * need to be particularly large.  This is done because we must store the
861  * caller's idea of the map size to properly unmap.
862  */
863 struct vmmap {
864 	LIST_ENTRY(vmmap)	vm_next;
865 	void 			*vm_addr;
866 	unsigned long		vm_size;
867 };
868 
869 struct vmmaphd {
870 	struct vmmap *lh_first;
871 };
872 #define	VMMAP_HASH_SIZE	64
873 #define	VMMAP_HASH_MASK	(VMMAP_HASH_SIZE - 1)
874 #define	VM_HASH(addr)	((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
875 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
876 static struct mtx vmmaplock;
877 
878 static void
879 vmmap_add(void *addr, unsigned long size)
880 {
881 	struct vmmap *vmmap;
882 
883 	vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
884 	mtx_lock(&vmmaplock);
885 	vmmap->vm_size = size;
886 	vmmap->vm_addr = addr;
887 	LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
888 	mtx_unlock(&vmmaplock);
889 }
890 
891 static struct vmmap *
892 vmmap_remove(void *addr)
893 {
894 	struct vmmap *vmmap;
895 
896 	mtx_lock(&vmmaplock);
897 	LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
898 		if (vmmap->vm_addr == addr)
899 			break;
900 	if (vmmap)
901 		LIST_REMOVE(vmmap, vm_next);
902 	mtx_unlock(&vmmaplock);
903 
904 	return (vmmap);
905 }
906 
907 #if defined(__i386__) || defined(__amd64__)
908 void *
909 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
910 {
911 	void *addr;
912 
913 	addr = pmap_mapdev_attr(phys_addr, size, attr);
914 	if (addr == NULL)
915 		return (NULL);
916 	vmmap_add(addr, size);
917 
918 	return (addr);
919 }
920 #endif
921 
922 void
923 iounmap(void *addr)
924 {
925 	struct vmmap *vmmap;
926 
927 	vmmap = vmmap_remove(addr);
928 	if (vmmap == NULL)
929 		return;
930 #if defined(__i386__) || defined(__amd64__)
931 	pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
932 #endif
933 	kfree(vmmap);
934 }
935 
936 
937 void *
938 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
939 {
940 	vm_offset_t off;
941 	size_t size;
942 
943 	size = count * PAGE_SIZE;
944 	off = kva_alloc(size);
945 	if (off == 0)
946 		return (NULL);
947 	vmmap_add((void *)off, size);
948 	pmap_qenter(off, pages, count);
949 
950 	return ((void *)off);
951 }
952 
953 void
954 vunmap(void *addr)
955 {
956 	struct vmmap *vmmap;
957 
958 	vmmap = vmmap_remove(addr);
959 	if (vmmap == NULL)
960 		return;
961 	pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
962 	kva_free((vm_offset_t)addr, vmmap->vm_size);
963 	kfree(vmmap);
964 }
965 
966 char *
967 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
968 {
969 	unsigned int len;
970 	char *p;
971 	va_list aq;
972 
973 	va_copy(aq, ap);
974 	len = vsnprintf(NULL, 0, fmt, aq);
975 	va_end(aq);
976 
977 	p = kmalloc(len + 1, gfp);
978 	if (p != NULL)
979 		vsnprintf(p, len + 1, fmt, ap);
980 
981 	return (p);
982 }
983 
984 char *
985 kasprintf(gfp_t gfp, const char *fmt, ...)
986 {
987 	va_list ap;
988 	char *p;
989 
990 	va_start(ap, fmt);
991 	p = kvasprintf(gfp, fmt, ap);
992 	va_end(ap);
993 
994 	return (p);
995 }
996 
997 static void
998 linux_timer_callback_wrapper(void *context)
999 {
1000 	struct timer_list *timer;
1001 
1002 	timer = context;
1003 	timer->function(timer->data);
1004 }
1005 
1006 void
1007 mod_timer(struct timer_list *timer, unsigned long expires)
1008 {
1009 
1010 	timer->expires = expires;
1011 	callout_reset(&timer->timer_callout,
1012 	    linux_timer_jiffies_until(expires),
1013 	    &linux_timer_callback_wrapper, timer);
1014 }
1015 
1016 void
1017 add_timer(struct timer_list *timer)
1018 {
1019 
1020 	callout_reset(&timer->timer_callout,
1021 	    linux_timer_jiffies_until(timer->expires),
1022 	    &linux_timer_callback_wrapper, timer);
1023 }
1024 
1025 void
1026 add_timer_on(struct timer_list *timer, int cpu)
1027 {
1028 
1029 	callout_reset_on(&timer->timer_callout,
1030 	    linux_timer_jiffies_until(timer->expires),
1031 	    &linux_timer_callback_wrapper, timer, cpu);
1032 }
1033 
1034 static void
1035 linux_timer_init(void *arg)
1036 {
1037 
1038 	/*
1039 	 * Compute an internal HZ value which can divide 2**32 to
1040 	 * avoid timer rounding problems when the tick value wraps
1041 	 * around 2**32:
1042 	 */
1043 	linux_timer_hz_mask = 1;
1044 	while (linux_timer_hz_mask < (unsigned long)hz)
1045 		linux_timer_hz_mask *= 2;
1046 	linux_timer_hz_mask--;
1047 }
1048 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
1049 
1050 void
1051 linux_complete_common(struct completion *c, int all)
1052 {
1053 	int wakeup_swapper;
1054 
1055 	sleepq_lock(c);
1056 	c->done++;
1057 	if (all)
1058 		wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
1059 	else
1060 		wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
1061 	sleepq_release(c);
1062 	if (wakeup_swapper)
1063 		kick_proc0();
1064 }
1065 
1066 /*
1067  * Indefinite wait for done != 0 with or without signals.
1068  */
1069 long
1070 linux_wait_for_common(struct completion *c, int flags)
1071 {
1072 	if (SCHEDULER_STOPPED())
1073 		return (0);
1074 
1075 	if (flags != 0)
1076 		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
1077 	else
1078 		flags = SLEEPQ_SLEEP;
1079 	for (;;) {
1080 		sleepq_lock(c);
1081 		if (c->done)
1082 			break;
1083 		sleepq_add(c, NULL, "completion", flags, 0);
1084 		if (flags & SLEEPQ_INTERRUPTIBLE) {
1085 			if (sleepq_wait_sig(c, 0) != 0)
1086 				return (-ERESTARTSYS);
1087 		} else
1088 			sleepq_wait(c, 0);
1089 	}
1090 	c->done--;
1091 	sleepq_release(c);
1092 
1093 	return (0);
1094 }
1095 
1096 /*
1097  * Time limited wait for done != 0 with or without signals.
1098  */
1099 long
1100 linux_wait_for_timeout_common(struct completion *c, long timeout, int flags)
1101 {
1102 	long end = jiffies + timeout;
1103 
1104 	if (SCHEDULER_STOPPED())
1105 		return (0);
1106 
1107 	if (flags != 0)
1108 		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
1109 	else
1110 		flags = SLEEPQ_SLEEP;
1111 	for (;;) {
1112 		int ret;
1113 
1114 		sleepq_lock(c);
1115 		if (c->done)
1116 			break;
1117 		sleepq_add(c, NULL, "completion", flags, 0);
1118 		sleepq_set_timeout(c, linux_timer_jiffies_until(end));
1119 		if (flags & SLEEPQ_INTERRUPTIBLE)
1120 			ret = sleepq_timedwait_sig(c, 0);
1121 		else
1122 			ret = sleepq_timedwait(c, 0);
1123 		if (ret != 0) {
1124 			/* check for timeout or signal */
1125 			if (ret == EWOULDBLOCK)
1126 				return (0);
1127 			else
1128 				return (-ERESTARTSYS);
1129 		}
1130 	}
1131 	c->done--;
1132 	sleepq_release(c);
1133 
1134 	/* return how many jiffies are left */
1135 	return (linux_timer_jiffies_until(end));
1136 }
1137 
1138 int
1139 linux_try_wait_for_completion(struct completion *c)
1140 {
1141 	int isdone;
1142 
1143 	isdone = 1;
1144 	sleepq_lock(c);
1145 	if (c->done)
1146 		c->done--;
1147 	else
1148 		isdone = 0;
1149 	sleepq_release(c);
1150 	return (isdone);
1151 }
1152 
1153 int
1154 linux_completion_done(struct completion *c)
1155 {
1156 	int isdone;
1157 
1158 	isdone = 1;
1159 	sleepq_lock(c);
1160 	if (c->done == 0)
1161 		isdone = 0;
1162 	sleepq_release(c);
1163 	return (isdone);
1164 }
1165 
1166 static void
1167 linux_cdev_release(struct kobject *kobj)
1168 {
1169 	struct linux_cdev *cdev;
1170 	struct kobject *parent;
1171 
1172 	cdev = container_of(kobj, struct linux_cdev, kobj);
1173 	parent = kobj->parent;
1174 	if (cdev->cdev)
1175 		destroy_dev(cdev->cdev);
1176 	kfree(cdev);
1177 	kobject_put(parent);
1178 }
1179 
1180 static void
1181 linux_cdev_static_release(struct kobject *kobj)
1182 {
1183 	struct linux_cdev *cdev;
1184 	struct kobject *parent;
1185 
1186 	cdev = container_of(kobj, struct linux_cdev, kobj);
1187 	parent = kobj->parent;
1188 	if (cdev->cdev)
1189 		destroy_dev(cdev->cdev);
1190 	kobject_put(parent);
1191 }
1192 
1193 const struct kobj_type linux_cdev_ktype = {
1194 	.release = linux_cdev_release,
1195 };
1196 
1197 const struct kobj_type linux_cdev_static_ktype = {
1198 	.release = linux_cdev_static_release,
1199 };
1200 
1201 static void
1202 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate)
1203 {
1204 	struct notifier_block *nb;
1205 
1206 	nb = arg;
1207 	if (linkstate == LINK_STATE_UP)
1208 		nb->notifier_call(nb, NETDEV_UP, ifp);
1209 	else
1210 		nb->notifier_call(nb, NETDEV_DOWN, ifp);
1211 }
1212 
1213 static void
1214 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp)
1215 {
1216 	struct notifier_block *nb;
1217 
1218 	nb = arg;
1219 	nb->notifier_call(nb, NETDEV_REGISTER, ifp);
1220 }
1221 
1222 static void
1223 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp)
1224 {
1225 	struct notifier_block *nb;
1226 
1227 	nb = arg;
1228 	nb->notifier_call(nb, NETDEV_UNREGISTER, ifp);
1229 }
1230 
1231 static void
1232 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp)
1233 {
1234 	struct notifier_block *nb;
1235 
1236 	nb = arg;
1237 	nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp);
1238 }
1239 
1240 static void
1241 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp)
1242 {
1243 	struct notifier_block *nb;
1244 
1245 	nb = arg;
1246 	nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp);
1247 }
1248 
1249 int
1250 register_netdevice_notifier(struct notifier_block *nb)
1251 {
1252 
1253 	nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER(
1254 	    ifnet_link_event, linux_handle_ifnet_link_event, nb, 0);
1255 	nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER(
1256 	    ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0);
1257 	nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER(
1258 	    ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0);
1259 	nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER(
1260 	    iflladdr_event, linux_handle_iflladdr_event, nb, 0);
1261 
1262 	return (0);
1263 }
1264 
1265 int
1266 register_inetaddr_notifier(struct notifier_block *nb)
1267 {
1268 
1269         nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER(
1270             ifaddr_event, linux_handle_ifaddr_event, nb, 0);
1271         return (0);
1272 }
1273 
1274 int
1275 unregister_netdevice_notifier(struct notifier_block *nb)
1276 {
1277 
1278         EVENTHANDLER_DEREGISTER(ifnet_link_event,
1279 	    nb->tags[NETDEV_UP]);
1280         EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
1281 	    nb->tags[NETDEV_REGISTER]);
1282         EVENTHANDLER_DEREGISTER(ifnet_departure_event,
1283 	    nb->tags[NETDEV_UNREGISTER]);
1284         EVENTHANDLER_DEREGISTER(iflladdr_event,
1285 	    nb->tags[NETDEV_CHANGEADDR]);
1286 
1287 	return (0);
1288 }
1289 
1290 int
1291 unregister_inetaddr_notifier(struct notifier_block *nb)
1292 {
1293 
1294         EVENTHANDLER_DEREGISTER(ifaddr_event,
1295             nb->tags[NETDEV_CHANGEIFADDR]);
1296 
1297         return (0);
1298 }
1299 
1300 struct list_sort_thunk {
1301 	int (*cmp)(void *, struct list_head *, struct list_head *);
1302 	void *priv;
1303 };
1304 
1305 static inline int
1306 linux_le_cmp(void *priv, const void *d1, const void *d2)
1307 {
1308 	struct list_head *le1, *le2;
1309 	struct list_sort_thunk *thunk;
1310 
1311 	thunk = priv;
1312 	le1 = *(__DECONST(struct list_head **, d1));
1313 	le2 = *(__DECONST(struct list_head **, d2));
1314 	return ((thunk->cmp)(thunk->priv, le1, le2));
1315 }
1316 
1317 void
1318 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
1319     struct list_head *a, struct list_head *b))
1320 {
1321 	struct list_sort_thunk thunk;
1322 	struct list_head **ar, *le;
1323 	size_t count, i;
1324 
1325 	count = 0;
1326 	list_for_each(le, head)
1327 		count++;
1328 	ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK);
1329 	i = 0;
1330 	list_for_each(le, head)
1331 		ar[i++] = le;
1332 	thunk.cmp = cmp;
1333 	thunk.priv = priv;
1334 	qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp);
1335 	INIT_LIST_HEAD(head);
1336 	for (i = 0; i < count; i++)
1337 		list_add_tail(ar[i], head);
1338 	free(ar, M_KMALLOC);
1339 }
1340 
1341 void
1342 linux_irq_handler(void *ent)
1343 {
1344 	struct irq_ent *irqe;
1345 
1346 	irqe = ent;
1347 	irqe->handler(irqe->irq, irqe->arg);
1348 }
1349 
1350 struct linux_cdev *
1351 linux_find_cdev(const char *name, unsigned major, unsigned minor)
1352 {
1353 	int unit = MKDEV(major, minor);
1354 	struct cdev *cdev;
1355 
1356 	dev_lock();
1357 	LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) {
1358 		struct linux_cdev *ldev = cdev->si_drv1;
1359 		if (dev2unit(cdev) == unit &&
1360 		    strcmp(kobject_name(&ldev->kobj), name) == 0) {
1361 			break;
1362 		}
1363 	}
1364 	dev_unlock();
1365 
1366 	return (cdev != NULL ? cdev->si_drv1 : NULL);
1367 }
1368 
1369 int
1370 __register_chrdev(unsigned int major, unsigned int baseminor,
1371     unsigned int count, const char *name,
1372     const struct file_operations *fops)
1373 {
1374 	struct linux_cdev *cdev;
1375 	int ret = 0;
1376 	int i;
1377 
1378 	for (i = baseminor; i < baseminor + count; i++) {
1379 		cdev = cdev_alloc();
1380 		cdev_init(cdev, fops);
1381 		kobject_set_name(&cdev->kobj, name);
1382 
1383 		ret = cdev_add(cdev, makedev(major, i), 1);
1384 		if (ret != 0)
1385 			break;
1386 	}
1387 	return (ret);
1388 }
1389 
1390 int
1391 __register_chrdev_p(unsigned int major, unsigned int baseminor,
1392     unsigned int count, const char *name,
1393     const struct file_operations *fops, uid_t uid,
1394     gid_t gid, int mode)
1395 {
1396 	struct linux_cdev *cdev;
1397 	int ret = 0;
1398 	int i;
1399 
1400 	for (i = baseminor; i < baseminor + count; i++) {
1401 		cdev = cdev_alloc();
1402 		cdev_init(cdev, fops);
1403 		kobject_set_name(&cdev->kobj, name);
1404 
1405 		ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode);
1406 		if (ret != 0)
1407 			break;
1408 	}
1409 	return (ret);
1410 }
1411 
1412 void
1413 __unregister_chrdev(unsigned int major, unsigned int baseminor,
1414     unsigned int count, const char *name)
1415 {
1416 	struct linux_cdev *cdevp;
1417 	int i;
1418 
1419 	for (i = baseminor; i < baseminor + count; i++) {
1420 		cdevp = linux_find_cdev(name, major, i);
1421 		if (cdevp != NULL)
1422 			cdev_del(cdevp);
1423 	}
1424 }
1425 
1426 #if defined(__i386__) || defined(__amd64__)
1427 bool linux_cpu_has_clflush;
1428 #endif
1429 
1430 static void
1431 linux_compat_init(void *arg)
1432 {
1433 	struct sysctl_oid *rootoid;
1434 	int i;
1435 
1436 #if defined(__i386__) || defined(__amd64__)
1437 	linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH);
1438 #endif
1439 
1440 	rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
1441 	    OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
1442 	kobject_init(&linux_class_root, &linux_class_ktype);
1443 	kobject_set_name(&linux_class_root, "class");
1444 	linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
1445 	    OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
1446 	kobject_init(&linux_root_device.kobj, &linux_dev_ktype);
1447 	kobject_set_name(&linux_root_device.kobj, "device");
1448 	linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL,
1449 	    SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
1450 	    "device");
1451 	linux_root_device.bsddev = root_bus;
1452 	linux_class_misc.name = "misc";
1453 	class_register(&linux_class_misc);
1454 	INIT_LIST_HEAD(&pci_drivers);
1455 	INIT_LIST_HEAD(&pci_devices);
1456 	spin_lock_init(&pci_lock);
1457 	mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
1458 	for (i = 0; i < VMMAP_HASH_SIZE; i++)
1459 		LIST_INIT(&vmmaphead[i]);
1460 }
1461 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
1462 
1463 static void
1464 linux_compat_uninit(void *arg)
1465 {
1466 	linux_kobject_kfree_name(&linux_class_root);
1467 	linux_kobject_kfree_name(&linux_root_device.kobj);
1468 	linux_kobject_kfree_name(&linux_class_misc.kobj);
1469 }
1470 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
1471 
1472 /*
1473  * NOTE: Linux frequently uses "unsigned long" for pointer to integer
1474  * conversion and vice versa, where in FreeBSD "uintptr_t" would be
1475  * used. Assert these types have the same size, else some parts of the
1476  * LinuxKPI may not work like expected:
1477  */
1478 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));
1479