xref: /freebsd/sys/compat/linuxkpi/common/src/linux_compat.c (revision 93e779a26c651610ac6e7986d67ecc9ed2cadcbf)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/proc.h>
39 #include <sys/sglist.h>
40 #include <sys/sleepqueue.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/bus.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/filio.h>
47 #include <sys/rwlock.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 
52 #include <machine/stdarg.h>
53 #include <machine/pmap.h>
54 
55 #include <linux/kobject.h>
56 #include <linux/device.h>
57 #include <linux/slab.h>
58 #include <linux/module.h>
59 #include <linux/cdev.h>
60 #include <linux/file.h>
61 #include <linux/sysfs.h>
62 #include <linux/mm.h>
63 #include <linux/io.h>
64 #include <linux/vmalloc.h>
65 #include <linux/netdevice.h>
66 #include <linux/timer.h>
67 
68 #include <vm/vm_pager.h>
69 
70 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
71 
72 #include <linux/rbtree.h>
73 /* Undo Linux compat changes. */
74 #undef RB_ROOT
75 #undef file
76 #undef cdev
77 #define	RB_ROOT(head)	(head)->rbh_root
78 
79 struct kobject class_root;
80 struct device linux_rootdev;
81 struct class miscclass;
82 struct list_head pci_drivers;
83 struct list_head pci_devices;
84 struct net init_net;
85 spinlock_t pci_lock;
86 
87 unsigned long linux_timer_hz_mask;
88 
89 int
90 panic_cmp(struct rb_node *one, struct rb_node *two)
91 {
92 	panic("no cmp");
93 }
94 
95 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
96 
97 int
98 kobject_set_name(struct kobject *kobj, const char *fmt, ...)
99 {
100 	va_list args;
101 	int error;
102 
103 	va_start(args, fmt);
104 	error = kobject_set_name_vargs(kobj, fmt, args);
105 	va_end(args);
106 
107 	return (error);
108 }
109 
110 static inline int
111 kobject_add_complete(struct kobject *kobj, struct kobject *parent)
112 {
113 	struct kobj_type *t;
114 	int error;
115 
116 	kobj->parent = kobject_get(parent);
117 	error = sysfs_create_dir(kobj);
118 	if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
119 		struct attribute **attr;
120 		t = kobj->ktype;
121 
122 		for (attr = t->default_attrs; *attr != NULL; attr++) {
123 			error = sysfs_create_file(kobj, *attr);
124 			if (error)
125 				break;
126 		}
127 		if (error)
128 			sysfs_remove_dir(kobj);
129 
130 	}
131 	return (error);
132 }
133 
134 int
135 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
136 {
137 	va_list args;
138 	int error;
139 
140 	va_start(args, fmt);
141 	error = kobject_set_name_vargs(kobj, fmt, args);
142 	va_end(args);
143 	if (error)
144 		return (error);
145 
146 	return kobject_add_complete(kobj, parent);
147 }
148 
149 void
150 kobject_release(struct kref *kref)
151 {
152 	struct kobject *kobj;
153 	char *name;
154 
155 	kobj = container_of(kref, struct kobject, kref);
156 	sysfs_remove_dir(kobj);
157 	if (kobj->parent)
158 		kobject_put(kobj->parent);
159 	kobj->parent = NULL;
160 	name = kobj->name;
161 	if (kobj->ktype && kobj->ktype->release)
162 		kobj->ktype->release(kobj);
163 	kfree(name);
164 }
165 
166 static void
167 kobject_kfree(struct kobject *kobj)
168 {
169 	kfree(kobj);
170 }
171 
172 static void
173 kobject_kfree_name(struct kobject *kobj)
174 {
175 	if (kobj) {
176 		kfree(kobj->name);
177 	}
178 }
179 
180 struct kobj_type kfree_type = { .release = kobject_kfree };
181 
182 static void
183 dev_release(struct device *dev)
184 {
185 	pr_debug("dev_release: %s\n", dev_name(dev));
186 	kfree(dev);
187 }
188 
189 struct device *
190 device_create(struct class *class, struct device *parent, dev_t devt,
191     void *drvdata, const char *fmt, ...)
192 {
193 	struct device *dev;
194 	va_list args;
195 
196 	dev = kzalloc(sizeof(*dev), M_WAITOK);
197 	dev->parent = parent;
198 	dev->class = class;
199 	dev->devt = devt;
200 	dev->driver_data = drvdata;
201 	dev->release = dev_release;
202 	va_start(args, fmt);
203 	kobject_set_name_vargs(&dev->kobj, fmt, args);
204 	va_end(args);
205 	device_register(dev);
206 
207 	return (dev);
208 }
209 
210 int
211 kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
212     struct kobject *parent, const char *fmt, ...)
213 {
214 	va_list args;
215 	int error;
216 
217 	kobject_init(kobj, ktype);
218 	kobj->ktype = ktype;
219 	kobj->parent = parent;
220 	kobj->name = NULL;
221 
222 	va_start(args, fmt);
223 	error = kobject_set_name_vargs(kobj, fmt, args);
224 	va_end(args);
225 	if (error)
226 		return (error);
227 	return kobject_add_complete(kobj, parent);
228 }
229 
230 static void
231 linux_file_dtor(void *cdp)
232 {
233 	struct linux_file *filp;
234 
235 	filp = cdp;
236 	filp->f_op->release(filp->f_vnode, filp);
237 	vdrop(filp->f_vnode);
238 	kfree(filp);
239 }
240 
241 static int
242 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
243 {
244 	struct linux_cdev *ldev;
245 	struct linux_file *filp;
246 	struct file *file;
247 	int error;
248 
249 	file = curthread->td_fpop;
250 	ldev = dev->si_drv1;
251 	if (ldev == NULL)
252 		return (ENODEV);
253 	filp = kzalloc(sizeof(*filp), GFP_KERNEL);
254 	filp->f_dentry = &filp->f_dentry_store;
255 	filp->f_op = ldev->ops;
256 	filp->f_flags = file->f_flag;
257 	vhold(file->f_vnode);
258 	filp->f_vnode = file->f_vnode;
259 	if (filp->f_op->open) {
260 		error = -filp->f_op->open(file->f_vnode, filp);
261 		if (error) {
262 			kfree(filp);
263 			return (error);
264 		}
265 	}
266 	error = devfs_set_cdevpriv(filp, linux_file_dtor);
267 	if (error) {
268 		filp->f_op->release(file->f_vnode, filp);
269 		kfree(filp);
270 		return (error);
271 	}
272 
273 	return 0;
274 }
275 
276 static int
277 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
278 {
279 	struct linux_cdev *ldev;
280 	struct linux_file *filp;
281 	struct file *file;
282 	int error;
283 
284 	file = curthread->td_fpop;
285 	ldev = dev->si_drv1;
286 	if (ldev == NULL)
287 		return (0);
288 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
289 		return (error);
290 	filp->f_flags = file->f_flag;
291         devfs_clear_cdevpriv();
292 
293 
294 	return (0);
295 }
296 
297 static int
298 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
299     struct thread *td)
300 {
301 	struct linux_cdev *ldev;
302 	struct linux_file *filp;
303 	struct file *file;
304 	int error;
305 
306 	file = curthread->td_fpop;
307 	ldev = dev->si_drv1;
308 	if (ldev == NULL)
309 		return (0);
310 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
311 		return (error);
312 	filp->f_flags = file->f_flag;
313 	/*
314 	 * Linux does not have a generic ioctl copyin/copyout layer.  All
315 	 * linux ioctls must be converted to void ioctls which pass a
316 	 * pointer to the address of the data.  We want the actual user
317 	 * address so we dereference here.
318 	 */
319 	data = *(void **)data;
320 	if (filp->f_op->unlocked_ioctl)
321 		error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data);
322 	else
323 		error = ENOTTY;
324 
325 	return (error);
326 }
327 
328 static int
329 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
330 {
331 	struct linux_cdev *ldev;
332 	struct linux_file *filp;
333 	struct file *file;
334 	ssize_t bytes;
335 	int error;
336 
337 	file = curthread->td_fpop;
338 	ldev = dev->si_drv1;
339 	if (ldev == NULL)
340 		return (0);
341 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
342 		return (error);
343 	filp->f_flags = file->f_flag;
344 	if (uio->uio_iovcnt != 1)
345 		panic("linux_dev_read: uio %p iovcnt %d",
346 		    uio, uio->uio_iovcnt);
347 	if (filp->f_op->read) {
348 		bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
349 		    uio->uio_iov->iov_len, &uio->uio_offset);
350 		if (bytes >= 0) {
351 			uio->uio_iov->iov_base =
352 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
353 			uio->uio_iov->iov_len -= bytes;
354 			uio->uio_resid -= bytes;
355 		} else
356 			error = -bytes;
357 	} else
358 		error = ENXIO;
359 
360 	return (error);
361 }
362 
363 static int
364 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag)
365 {
366 	struct linux_cdev *ldev;
367 	struct linux_file *filp;
368 	struct file *file;
369 	ssize_t bytes;
370 	int error;
371 
372 	file = curthread->td_fpop;
373 	ldev = dev->si_drv1;
374 	if (ldev == NULL)
375 		return (0);
376 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
377 		return (error);
378 	filp->f_flags = file->f_flag;
379 	if (uio->uio_iovcnt != 1)
380 		panic("linux_dev_write: uio %p iovcnt %d",
381 		    uio, uio->uio_iovcnt);
382 	if (filp->f_op->write) {
383 		bytes = filp->f_op->write(filp, uio->uio_iov->iov_base,
384 		    uio->uio_iov->iov_len, &uio->uio_offset);
385 		if (bytes >= 0) {
386 			uio->uio_iov->iov_base =
387 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
388 			uio->uio_iov->iov_len -= bytes;
389 			uio->uio_resid -= bytes;
390 		} else
391 			error = -bytes;
392 	} else
393 		error = ENXIO;
394 
395 	return (error);
396 }
397 
398 static int
399 linux_dev_poll(struct cdev *dev, int events, struct thread *td)
400 {
401 	struct linux_cdev *ldev;
402 	struct linux_file *filp;
403 	struct file *file;
404 	int revents;
405 	int error;
406 
407 	file = curthread->td_fpop;
408 	ldev = dev->si_drv1;
409 	if (ldev == NULL)
410 		return (0);
411 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
412 		return (error);
413 	filp->f_flags = file->f_flag;
414 	if (filp->f_op->poll)
415 		revents = filp->f_op->poll(filp, NULL) & events;
416 	else
417 		revents = 0;
418 
419 	return (revents);
420 }
421 
422 static int
423 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
424     vm_size_t size, struct vm_object **object, int nprot)
425 {
426 	struct linux_cdev *ldev;
427 	struct linux_file *filp;
428 	struct file *file;
429 	struct vm_area_struct vma;
430 	int error;
431 
432 	file = curthread->td_fpop;
433 	ldev = dev->si_drv1;
434 	if (ldev == NULL)
435 		return (ENODEV);
436 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
437 		return (error);
438 	filp->f_flags = file->f_flag;
439 	vma.vm_start = 0;
440 	vma.vm_end = size;
441 	vma.vm_pgoff = *offset / PAGE_SIZE;
442 	vma.vm_pfn = 0;
443 	vma.vm_page_prot = 0;
444 	if (filp->f_op->mmap) {
445 		error = -filp->f_op->mmap(filp, &vma);
446 		if (error == 0) {
447 			struct sglist *sg;
448 
449 			sg = sglist_alloc(1, M_WAITOK);
450 			sglist_append_phys(sg,
451 			    (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len);
452 			*object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len,
453 			    nprot, 0, curthread->td_ucred);
454 		        if (*object == NULL) {
455 				sglist_free(sg);
456 				return (EINVAL);
457 			}
458 			*offset = 0;
459 			if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) {
460 				VM_OBJECT_WLOCK(*object);
461 				vm_object_set_memattr(*object,
462 				    vma.vm_page_prot);
463 				VM_OBJECT_WUNLOCK(*object);
464 			}
465 		}
466 	} else
467 		error = ENODEV;
468 
469 	return (error);
470 }
471 
472 struct cdevsw linuxcdevsw = {
473 	.d_version = D_VERSION,
474 	.d_flags = D_TRACKCLOSE,
475 	.d_open = linux_dev_open,
476 	.d_close = linux_dev_close,
477 	.d_read = linux_dev_read,
478 	.d_write = linux_dev_write,
479 	.d_ioctl = linux_dev_ioctl,
480 	.d_mmap_single = linux_dev_mmap_single,
481 	.d_poll = linux_dev_poll,
482 };
483 
484 static int
485 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
486     int flags, struct thread *td)
487 {
488 	struct linux_file *filp;
489 	ssize_t bytes;
490 	int error;
491 
492 	error = 0;
493 	filp = (struct linux_file *)file->f_data;
494 	filp->f_flags = file->f_flag;
495 	if (uio->uio_iovcnt != 1)
496 		panic("linux_file_read: uio %p iovcnt %d",
497 		    uio, uio->uio_iovcnt);
498 	if (filp->f_op->read) {
499 		bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
500 		    uio->uio_iov->iov_len, &uio->uio_offset);
501 		if (bytes >= 0) {
502 			uio->uio_iov->iov_base =
503 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
504 			uio->uio_iov->iov_len -= bytes;
505 			uio->uio_resid -= bytes;
506 		} else
507 			error = -bytes;
508 	} else
509 		error = ENXIO;
510 
511 	return (error);
512 }
513 
514 static int
515 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
516     struct thread *td)
517 {
518 	struct linux_file *filp;
519 	int revents;
520 
521 	filp = (struct linux_file *)file->f_data;
522 	filp->f_flags = file->f_flag;
523 	if (filp->f_op->poll)
524 		revents = filp->f_op->poll(filp, NULL) & events;
525 	else
526 		revents = 0;
527 
528 	return (0);
529 }
530 
531 static int
532 linux_file_close(struct file *file, struct thread *td)
533 {
534 	struct linux_file *filp;
535 	int error;
536 
537 	filp = (struct linux_file *)file->f_data;
538 	filp->f_flags = file->f_flag;
539 	error = -filp->f_op->release(NULL, filp);
540 	funsetown(&filp->f_sigio);
541 	kfree(filp);
542 
543 	return (error);
544 }
545 
546 static int
547 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
548     struct thread *td)
549 {
550 	struct linux_file *filp;
551 	int error;
552 
553 	filp = (struct linux_file *)fp->f_data;
554 	filp->f_flags = fp->f_flag;
555 	error = 0;
556 
557 	switch (cmd) {
558 	case FIONBIO:
559 		break;
560 	case FIOASYNC:
561 		if (filp->f_op->fasync == NULL)
562 			break;
563 		error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC);
564 		break;
565 	case FIOSETOWN:
566 		error = fsetown(*(int *)data, &filp->f_sigio);
567 		if (error == 0)
568 			error = filp->f_op->fasync(0, filp,
569 			    fp->f_flag & FASYNC);
570 		break;
571 	case FIOGETOWN:
572 		*(int *)data = fgetown(&filp->f_sigio);
573 		break;
574 	default:
575 		error = ENOTTY;
576 		break;
577 	}
578 	return (error);
579 }
580 
581 static int
582 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
583     struct thread *td)
584 {
585 
586 	return (EOPNOTSUPP);
587 }
588 
589 static int
590 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
591     struct filedesc *fdp)
592 {
593 
594 	return (0);
595 }
596 
597 struct fileops linuxfileops = {
598 	.fo_read = linux_file_read,
599 	.fo_write = invfo_rdwr,
600 	.fo_truncate = invfo_truncate,
601 	.fo_kqfilter = invfo_kqfilter,
602 	.fo_stat = linux_file_stat,
603 	.fo_fill_kinfo = linux_file_fill_kinfo,
604 	.fo_poll = linux_file_poll,
605 	.fo_close = linux_file_close,
606 	.fo_ioctl = linux_file_ioctl,
607 	.fo_chmod = invfo_chmod,
608 	.fo_chown = invfo_chown,
609 	.fo_sendfile = invfo_sendfile,
610 };
611 
612 /*
613  * Hash of vmmap addresses.  This is infrequently accessed and does not
614  * need to be particularly large.  This is done because we must store the
615  * caller's idea of the map size to properly unmap.
616  */
617 struct vmmap {
618 	LIST_ENTRY(vmmap)	vm_next;
619 	void 			*vm_addr;
620 	unsigned long		vm_size;
621 };
622 
623 struct vmmaphd {
624 	struct vmmap *lh_first;
625 };
626 #define	VMMAP_HASH_SIZE	64
627 #define	VMMAP_HASH_MASK	(VMMAP_HASH_SIZE - 1)
628 #define	VM_HASH(addr)	((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
629 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
630 static struct mtx vmmaplock;
631 
632 static void
633 vmmap_add(void *addr, unsigned long size)
634 {
635 	struct vmmap *vmmap;
636 
637 	vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
638 	mtx_lock(&vmmaplock);
639 	vmmap->vm_size = size;
640 	vmmap->vm_addr = addr;
641 	LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
642 	mtx_unlock(&vmmaplock);
643 }
644 
645 static struct vmmap *
646 vmmap_remove(void *addr)
647 {
648 	struct vmmap *vmmap;
649 
650 	mtx_lock(&vmmaplock);
651 	LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
652 		if (vmmap->vm_addr == addr)
653 			break;
654 	if (vmmap)
655 		LIST_REMOVE(vmmap, vm_next);
656 	mtx_unlock(&vmmaplock);
657 
658 	return (vmmap);
659 }
660 
661 #if defined(__i386__) || defined(__amd64__)
662 void *
663 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
664 {
665 	void *addr;
666 
667 	addr = pmap_mapdev_attr(phys_addr, size, attr);
668 	if (addr == NULL)
669 		return (NULL);
670 	vmmap_add(addr, size);
671 
672 	return (addr);
673 }
674 #endif
675 
676 void
677 iounmap(void *addr)
678 {
679 	struct vmmap *vmmap;
680 
681 	vmmap = vmmap_remove(addr);
682 	if (vmmap == NULL)
683 		return;
684 #if defined(__i386__) || defined(__amd64__)
685 	pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
686 #endif
687 	kfree(vmmap);
688 }
689 
690 
691 void *
692 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
693 {
694 	vm_offset_t off;
695 	size_t size;
696 
697 	size = count * PAGE_SIZE;
698 	off = kva_alloc(size);
699 	if (off == 0)
700 		return (NULL);
701 	vmmap_add((void *)off, size);
702 	pmap_qenter(off, pages, count);
703 
704 	return ((void *)off);
705 }
706 
707 void
708 vunmap(void *addr)
709 {
710 	struct vmmap *vmmap;
711 
712 	vmmap = vmmap_remove(addr);
713 	if (vmmap == NULL)
714 		return;
715 	pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
716 	kva_free((vm_offset_t)addr, vmmap->vm_size);
717 	kfree(vmmap);
718 }
719 
720 char *
721 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
722 {
723 	unsigned int len;
724 	char *p;
725 	va_list aq;
726 
727 	va_copy(aq, ap);
728 	len = vsnprintf(NULL, 0, fmt, aq);
729 	va_end(aq);
730 
731 	p = kmalloc(len + 1, gfp);
732 	if (p != NULL)
733 		vsnprintf(p, len + 1, fmt, ap);
734 
735 	return (p);
736 }
737 
738 char *
739 kasprintf(gfp_t gfp, const char *fmt, ...)
740 {
741 	va_list ap;
742 	char *p;
743 
744 	va_start(ap, fmt);
745 	p = kvasprintf(gfp, fmt, ap);
746 	va_end(ap);
747 
748 	return (p);
749 }
750 
751 static int
752 linux_timer_jiffies_until(unsigned long expires)
753 {
754 	int delta = expires - jiffies;
755 	/* guard against already expired values */
756 	if (delta < 1)
757 		delta = 1;
758 	return (delta);
759 }
760 
761 static void
762 linux_timer_callback_wrapper(void *context)
763 {
764 	struct timer_list *timer;
765 
766 	timer = context;
767 	timer->function(timer->data);
768 }
769 
770 void
771 mod_timer(struct timer_list *timer, unsigned long expires)
772 {
773 
774 	timer->expires = expires;
775 	callout_reset(&timer->timer_callout,
776 	    linux_timer_jiffies_until(expires),
777 	    &linux_timer_callback_wrapper, timer);
778 }
779 
780 void
781 add_timer(struct timer_list *timer)
782 {
783 
784 	callout_reset(&timer->timer_callout,
785 	    linux_timer_jiffies_until(timer->expires),
786 	    &linux_timer_callback_wrapper, timer);
787 }
788 
789 static void
790 linux_timer_init(void *arg)
791 {
792 
793 	/*
794 	 * Compute an internal HZ value which can divide 2**32 to
795 	 * avoid timer rounding problems when the tick value wraps
796 	 * around 2**32:
797 	 */
798 	linux_timer_hz_mask = 1;
799 	while (linux_timer_hz_mask < (unsigned long)hz)
800 		linux_timer_hz_mask *= 2;
801 	linux_timer_hz_mask--;
802 }
803 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
804 
805 void
806 linux_complete_common(struct completion *c, int all)
807 {
808 	int wakeup_swapper;
809 
810 	sleepq_lock(c);
811 	c->done++;
812 	if (all)
813 		wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
814 	else
815 		wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
816 	sleepq_release(c);
817 	if (wakeup_swapper)
818 		kick_proc0();
819 }
820 
821 /*
822  * Indefinite wait for done != 0 with or without signals.
823  */
824 long
825 linux_wait_for_common(struct completion *c, int flags)
826 {
827 
828 	if (flags != 0)
829 		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
830 	else
831 		flags = SLEEPQ_SLEEP;
832 	for (;;) {
833 		sleepq_lock(c);
834 		if (c->done)
835 			break;
836 		sleepq_add(c, NULL, "completion", flags, 0);
837 		if (flags & SLEEPQ_INTERRUPTIBLE) {
838 			if (sleepq_wait_sig(c, 0) != 0)
839 				return (-ERESTARTSYS);
840 		} else
841 			sleepq_wait(c, 0);
842 	}
843 	c->done--;
844 	sleepq_release(c);
845 
846 	return (0);
847 }
848 
849 /*
850  * Time limited wait for done != 0 with or without signals.
851  */
852 long
853 linux_wait_for_timeout_common(struct completion *c, long timeout, int flags)
854 {
855 	long end = jiffies + timeout;
856 
857 	if (flags != 0)
858 		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
859 	else
860 		flags = SLEEPQ_SLEEP;
861 	for (;;) {
862 		int ret;
863 
864 		sleepq_lock(c);
865 		if (c->done)
866 			break;
867 		sleepq_add(c, NULL, "completion", flags, 0);
868 		sleepq_set_timeout(c, linux_timer_jiffies_until(end));
869 		if (flags & SLEEPQ_INTERRUPTIBLE)
870 			ret = sleepq_timedwait_sig(c, 0);
871 		else
872 			ret = sleepq_timedwait(c, 0);
873 		if (ret != 0) {
874 			/* check for timeout or signal */
875 			if (ret == EWOULDBLOCK)
876 				return (0);
877 			else
878 				return (-ERESTARTSYS);
879 		}
880 	}
881 	c->done--;
882 	sleepq_release(c);
883 
884 	/* return how many jiffies are left */
885 	return (linux_timer_jiffies_until(end));
886 }
887 
888 int
889 linux_try_wait_for_completion(struct completion *c)
890 {
891 	int isdone;
892 
893 	isdone = 1;
894 	sleepq_lock(c);
895 	if (c->done)
896 		c->done--;
897 	else
898 		isdone = 0;
899 	sleepq_release(c);
900 	return (isdone);
901 }
902 
903 int
904 linux_completion_done(struct completion *c)
905 {
906 	int isdone;
907 
908 	isdone = 1;
909 	sleepq_lock(c);
910 	if (c->done == 0)
911 		isdone = 0;
912 	sleepq_release(c);
913 	return (isdone);
914 }
915 
916 static void
917 linux_compat_init(void *arg)
918 {
919 	struct sysctl_oid *rootoid;
920 	int i;
921 
922 	rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
923 	    OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
924 	kobject_init(&class_root, &class_ktype);
925 	kobject_set_name(&class_root, "class");
926 	class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
927 	    OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
928 	kobject_init(&linux_rootdev.kobj, &dev_ktype);
929 	kobject_set_name(&linux_rootdev.kobj, "device");
930 	linux_rootdev.kobj.oidp = SYSCTL_ADD_NODE(NULL,
931 	    SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
932 	    "device");
933 	linux_rootdev.bsddev = root_bus;
934 	miscclass.name = "misc";
935 	class_register(&miscclass);
936 	INIT_LIST_HEAD(&pci_drivers);
937 	INIT_LIST_HEAD(&pci_devices);
938 	spin_lock_init(&pci_lock);
939 	mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
940 	for (i = 0; i < VMMAP_HASH_SIZE; i++)
941 		LIST_INIT(&vmmaphead[i]);
942 }
943 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
944 
945 static void
946 linux_compat_uninit(void *arg)
947 {
948 	kobject_kfree_name(&class_root);
949 	kobject_kfree_name(&linux_rootdev.kobj);
950 	kobject_kfree_name(&miscclass.kobj);
951 }
952 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
953 
954 /*
955  * NOTE: Linux frequently uses "unsigned long" for pointer to integer
956  * conversion and vice versa, where in FreeBSD "uintptr_t" would be
957  * used. Assert these types have the same size, else some parts of the
958  * LinuxKPI may not work like expected:
959  */
960 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));
961