xref: /freebsd/sys/kern/vfs_aio.c (revision 145f01a3dfcba97c20ad14f2054d4b0600b23350)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. John S. Dyson's name may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
15  * bad that happens because of using this software isn't the responsibility
16  * of the author.  This software is distributed AS-IS.
17  */
18 
19 /*
20  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
21  */
22 
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
29 #include <sys/bio.h>
30 #include <sys/buf.h>
31 #include <sys/capsicum.h>
32 #include <sys/eventhandler.h>
33 #include <sys/sysproto.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/kthread.h>
38 #include <sys/fcntl.h>
39 #include <sys/file.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/unistd.h>
44 #include <sys/posix4.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/protosw.h>
50 #include <sys/rwlock.h>
51 #include <sys/sema.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/syscall.h>
55 #include <sys/sysent.h>
56 #include <sys/sysctl.h>
57 #include <sys/syslog.h>
58 #include <sys/sx.h>
59 #include <sys/taskqueue.h>
60 #include <sys/vnode.h>
61 #include <sys/conf.h>
62 #include <sys/event.h>
63 #include <sys/mount.h>
64 #include <geom/geom.h>
65 
66 #include <machine/atomic.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_extern.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/uma.h>
75 #include <sys/aio.h>
76 
77 /*
78  * Counter for allocating reference ids to new jobs.  Wrapped to 1 on
79  * overflow. (XXX will be removed soon.)
80  */
81 static u_long jobrefid;
82 
83 /*
84  * Counter for aio_fsync.
85  */
86 static uint64_t jobseqno;
87 
88 #ifndef MAX_AIO_PER_PROC
89 #define MAX_AIO_PER_PROC	32
90 #endif
91 
92 #ifndef MAX_AIO_QUEUE_PER_PROC
93 #define MAX_AIO_QUEUE_PER_PROC	256
94 #endif
95 
96 #ifndef MAX_AIO_QUEUE
97 #define MAX_AIO_QUEUE		1024 /* Bigger than MAX_AIO_QUEUE_PER_PROC */
98 #endif
99 
100 #ifndef MAX_BUF_AIO
101 #define MAX_BUF_AIO		16
102 #endif
103 
104 FEATURE(aio, "Asynchronous I/O");
105 SYSCTL_DECL(_p1003_1b);
106 
107 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
108 static MALLOC_DEFINE(M_AIOS, "aios", "aio_suspend aio control block list");
109 
110 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
111     "Async IO management");
112 
113 static int enable_aio_unsafe = 0;
114 SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
115     "Permit asynchronous IO on all file types, not just known-safe types");
116 
117 static unsigned int unsafe_warningcnt = 1;
118 SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW,
119     &unsafe_warningcnt, 0,
120     "Warnings that will be triggered upon failed IO requests on unsafe files");
121 
122 static int max_aio_procs = MAX_AIO_PROCS;
123 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
124     "Maximum number of kernel processes to use for handling async IO ");
125 
126 static int num_aio_procs = 0;
127 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
128     "Number of presently active kernel processes for async IO");
129 
130 /*
131  * The code will adjust the actual number of AIO processes towards this
132  * number when it gets a chance.
133  */
134 static int target_aio_procs = TARGET_AIO_PROCS;
135 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
136     0,
137     "Preferred number of ready kernel processes for async IO");
138 
139 static int max_queue_count = MAX_AIO_QUEUE;
140 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
141     "Maximum number of aio requests to queue, globally");
142 
143 static int num_queue_count = 0;
144 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
145     "Number of queued aio requests");
146 
147 static int num_buf_aio = 0;
148 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
149     "Number of aio requests presently handled by the buf subsystem");
150 
151 static int num_unmapped_aio = 0;
152 SYSCTL_INT(_vfs_aio, OID_AUTO, num_unmapped_aio, CTLFLAG_RD, &num_unmapped_aio,
153     0,
154     "Number of aio requests presently handled by unmapped I/O buffers");
155 
156 /* Number of async I/O processes in the process of being started */
157 /* XXX This should be local to aio_aqueue() */
158 static int num_aio_resv_start = 0;
159 
160 static int aiod_lifetime;
161 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
162     "Maximum lifetime for idle aiod");
163 
164 static int max_aio_per_proc = MAX_AIO_PER_PROC;
165 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
166     0,
167     "Maximum active aio requests per process");
168 
169 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
170 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
171     &max_aio_queue_per_proc, 0,
172     "Maximum queued aio requests per process");
173 
174 static int max_buf_aio = MAX_BUF_AIO;
175 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
176     "Maximum buf aio requests per process");
177 
178 /*
179  * Though redundant with vfs.aio.max_aio_queue_per_proc, POSIX requires
180  * sysconf(3) to support AIO_LISTIO_MAX, and we implement that with
181  * vfs.aio.aio_listio_max.
182  */
183 SYSCTL_INT(_p1003_1b, CTL_P1003_1B_AIO_LISTIO_MAX, aio_listio_max,
184     CTLFLAG_RD | CTLFLAG_CAPRD, &max_aio_queue_per_proc,
185     0, "Maximum aio requests for a single lio_listio call");
186 
187 #ifdef COMPAT_FREEBSD6
188 typedef struct oaiocb {
189 	int	aio_fildes;		/* File descriptor */
190 	off_t	aio_offset;		/* File offset for I/O */
191 	volatile void *aio_buf;         /* I/O buffer in process space */
192 	size_t	aio_nbytes;		/* Number of bytes for I/O */
193 	struct	osigevent aio_sigevent;	/* Signal to deliver */
194 	int	aio_lio_opcode;		/* LIO opcode */
195 	int	aio_reqprio;		/* Request priority -- ignored */
196 	struct	__aiocb_private	_aiocb_private;
197 } oaiocb_t;
198 #endif
199 
200 /*
201  * Below is a key of locks used to protect each member of struct kaiocb
202  * aioliojob and kaioinfo and any backends.
203  *
204  * * - need not protected
205  * a - locked by kaioinfo lock
206  * b - locked by backend lock, the backend lock can be null in some cases,
207  *     for example, BIO belongs to this type, in this case, proc lock is
208  *     reused.
209  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
210  */
211 
212 /*
213  * If the routine that services an AIO request blocks while running in an
214  * AIO kernel process it can starve other I/O requests.  BIO requests
215  * queued via aio_qbio() complete asynchronously and do not use AIO kernel
216  * processes at all.  Socket I/O requests use a separate pool of
217  * kprocs and also force non-blocking I/O.  Other file I/O requests
218  * use the generic fo_read/fo_write operations which can block.  The
219  * fsync and mlock operations can also block while executing.  Ideally
220  * none of these requests would block while executing.
221  *
222  * Note that the service routines cannot toggle O_NONBLOCK in the file
223  * structure directly while handling a request due to races with
224  * userland threads.
225  */
226 
227 /* jobflags */
228 #define	KAIOCB_QUEUEING		0x01
229 #define	KAIOCB_CANCELLED	0x02
230 #define	KAIOCB_CANCELLING	0x04
231 #define	KAIOCB_CHECKSYNC	0x08
232 #define	KAIOCB_CLEARED		0x10
233 #define	KAIOCB_FINISHED		0x20
234 
235 /*
236  * AIO process info
237  */
238 #define AIOP_FREE	0x1			/* proc on free queue */
239 
240 struct aioproc {
241 	int	aioprocflags;			/* (c) AIO proc flags */
242 	TAILQ_ENTRY(aioproc) list;		/* (c) list of processes */
243 	struct	proc *aioproc;			/* (*) the AIO proc */
244 };
245 
246 /*
247  * data-structure for lio signal management
248  */
249 struct aioliojob {
250 	int	lioj_flags;			/* (a) listio flags */
251 	int	lioj_count;			/* (a) count of jobs */
252 	int	lioj_finished_count;		/* (a) count of finished jobs */
253 	struct	sigevent lioj_signal;		/* (a) signal on all I/O done */
254 	TAILQ_ENTRY(aioliojob) lioj_list;	/* (a) lio list */
255 	struct	knlist klist;			/* (a) list of knotes */
256 	ksiginfo_t lioj_ksi;			/* (a) Realtime signal info */
257 };
258 
259 #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
260 #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
261 #define LIOJ_KEVENT_POSTED	0x4	/* kevent triggered */
262 
263 /*
264  * per process aio data structure
265  */
266 struct kaioinfo {
267 	struct	mtx kaio_mtx;		/* the lock to protect this struct */
268 	int	kaio_flags;		/* (a) per process kaio flags */
269 	int	kaio_active_count;	/* (c) number of currently used AIOs */
270 	int	kaio_count;		/* (a) size of AIO queue */
271 	int	kaio_buffer_count;	/* (a) number of bio buffers */
272 	TAILQ_HEAD(,kaiocb) kaio_all;	/* (a) all AIOs in a process */
273 	TAILQ_HEAD(,kaiocb) kaio_done;	/* (a) done queue for process */
274 	TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
275 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
276 	TAILQ_HEAD(,kaiocb) kaio_syncqueue;	/* (a) queue for aio_fsync */
277 	TAILQ_HEAD(,kaiocb) kaio_syncready;  /* (a) second q for aio_fsync */
278 	struct	task kaio_task;		/* (*) task to kick aio processes */
279 	struct	task kaio_sync_task;	/* (*) task to schedule fsync jobs */
280 };
281 
282 #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
283 #define AIO_UNLOCK(ki)		mtx_unlock(&(ki)->kaio_mtx)
284 #define AIO_LOCK_ASSERT(ki, f)	mtx_assert(&(ki)->kaio_mtx, (f))
285 #define AIO_MTX(ki)		(&(ki)->kaio_mtx)
286 
287 #define KAIO_RUNDOWN	0x1	/* process is being run down */
288 #define KAIO_WAKEUP	0x2	/* wakeup process when AIO completes */
289 
290 /*
291  * Operations used to interact with userland aio control blocks.
292  * Different ABIs provide their own operations.
293  */
294 struct aiocb_ops {
295 	int	(*aio_copyin)(struct aiocb *ujob, struct kaiocb *kjob, int ty);
296 	long	(*fetch_status)(struct aiocb *ujob);
297 	long	(*fetch_error)(struct aiocb *ujob);
298 	int	(*store_status)(struct aiocb *ujob, long status);
299 	int	(*store_error)(struct aiocb *ujob, long error);
300 	int	(*store_kernelinfo)(struct aiocb *ujob, long jobref);
301 	int	(*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
302 };
303 
304 static TAILQ_HEAD(,aioproc) aio_freeproc;		/* (c) Idle daemons */
305 static struct sema aio_newproc_sem;
306 static struct mtx aio_job_mtx;
307 static TAILQ_HEAD(,kaiocb) aio_jobs;			/* (c) Async job list */
308 static struct unrhdr *aiod_unr;
309 
310 static void	aio_biocleanup(struct bio *bp);
311 void		aio_init_aioinfo(struct proc *p);
312 static int	aio_onceonly(void);
313 static int	aio_free_entry(struct kaiocb *job);
314 static void	aio_process_rw(struct kaiocb *job);
315 static void	aio_process_sync(struct kaiocb *job);
316 static void	aio_process_mlock(struct kaiocb *job);
317 static void	aio_schedule_fsync(void *context, int pending);
318 static int	aio_newproc(int *);
319 int		aio_aqueue(struct thread *td, struct aiocb *ujob,
320 		    struct aioliojob *lio, int type, struct aiocb_ops *ops);
321 static int	aio_queue_file(struct file *fp, struct kaiocb *job);
322 static void	aio_biowakeup(struct bio *bp);
323 static void	aio_proc_rundown(void *arg, struct proc *p);
324 static void	aio_proc_rundown_exec(void *arg, struct proc *p,
325 		    struct image_params *imgp);
326 static int	aio_qbio(struct proc *p, struct kaiocb *job);
327 static void	aio_daemon(void *param);
328 static void	aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
329 static bool	aio_clear_cancel_function_locked(struct kaiocb *job);
330 static int	aio_kick(struct proc *userp);
331 static void	aio_kick_nowait(struct proc *userp);
332 static void	aio_kick_helper(void *context, int pending);
333 static int	filt_aioattach(struct knote *kn);
334 static void	filt_aiodetach(struct knote *kn);
335 static int	filt_aio(struct knote *kn, long hint);
336 static int	filt_lioattach(struct knote *kn);
337 static void	filt_liodetach(struct knote *kn);
338 static int	filt_lio(struct knote *kn, long hint);
339 
340 /*
341  * Zones for:
342  * 	kaio	Per process async io info
343  *	aiop	async io process data
344  *	aiocb	async io jobs
345  *	aiolio	list io jobs
346  */
347 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiolio_zone;
348 
349 /* kqueue filters for aio */
350 static struct filterops aio_filtops = {
351 	.f_isfd = 0,
352 	.f_attach = filt_aioattach,
353 	.f_detach = filt_aiodetach,
354 	.f_event = filt_aio,
355 };
356 static struct filterops lio_filtops = {
357 	.f_isfd = 0,
358 	.f_attach = filt_lioattach,
359 	.f_detach = filt_liodetach,
360 	.f_event = filt_lio
361 };
362 
363 static eventhandler_tag exit_tag, exec_tag;
364 
365 TASKQUEUE_DEFINE_THREAD(aiod_kick);
366 
367 /*
368  * Main operations function for use as a kernel module.
369  */
370 static int
371 aio_modload(struct module *module, int cmd, void *arg)
372 {
373 	int error = 0;
374 
375 	switch (cmd) {
376 	case MOD_LOAD:
377 		aio_onceonly();
378 		break;
379 	case MOD_SHUTDOWN:
380 		break;
381 	default:
382 		error = EOPNOTSUPP;
383 		break;
384 	}
385 	return (error);
386 }
387 
388 static moduledata_t aio_mod = {
389 	"aio",
390 	&aio_modload,
391 	NULL
392 };
393 
394 DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY);
395 MODULE_VERSION(aio, 1);
396 
397 /*
398  * Startup initialization
399  */
400 static int
401 aio_onceonly(void)
402 {
403 
404 	exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
405 	    EVENTHANDLER_PRI_ANY);
406 	exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
407 	    NULL, EVENTHANDLER_PRI_ANY);
408 	kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
409 	kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
410 	TAILQ_INIT(&aio_freeproc);
411 	sema_init(&aio_newproc_sem, 0, "aio_new_proc");
412 	mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
413 	TAILQ_INIT(&aio_jobs);
414 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
415 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
416 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
417 	aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL,
418 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
419 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
420 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
421 	aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
422 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
423 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
424 	jobrefid = 1;
425 	p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO);
426 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
427 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
428 
429 	return (0);
430 }
431 
432 /*
433  * Init the per-process aioinfo structure.  The aioinfo limits are set
434  * per-process for user limit (resource) management.
435  */
436 void
437 aio_init_aioinfo(struct proc *p)
438 {
439 	struct kaioinfo *ki;
440 
441 	ki = uma_zalloc(kaio_zone, M_WAITOK);
442 	mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
443 	ki->kaio_flags = 0;
444 	ki->kaio_active_count = 0;
445 	ki->kaio_count = 0;
446 	ki->kaio_buffer_count = 0;
447 	TAILQ_INIT(&ki->kaio_all);
448 	TAILQ_INIT(&ki->kaio_done);
449 	TAILQ_INIT(&ki->kaio_jobqueue);
450 	TAILQ_INIT(&ki->kaio_liojoblist);
451 	TAILQ_INIT(&ki->kaio_syncqueue);
452 	TAILQ_INIT(&ki->kaio_syncready);
453 	TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
454 	TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
455 	PROC_LOCK(p);
456 	if (p->p_aioinfo == NULL) {
457 		p->p_aioinfo = ki;
458 		PROC_UNLOCK(p);
459 	} else {
460 		PROC_UNLOCK(p);
461 		mtx_destroy(&ki->kaio_mtx);
462 		uma_zfree(kaio_zone, ki);
463 	}
464 
465 	while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
466 		aio_newproc(NULL);
467 }
468 
469 static int
470 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi, bool ext)
471 {
472 	struct thread *td;
473 	int error;
474 
475 	error = sigev_findtd(p, sigev, &td);
476 	if (error)
477 		return (error);
478 	if (!KSI_ONQ(ksi)) {
479 		ksiginfo_set_sigev(ksi, sigev);
480 		ksi->ksi_code = SI_ASYNCIO;
481 		ksi->ksi_flags |= ext ? (KSI_EXT | KSI_INS) : 0;
482 		tdsendsignal(p, td, ksi->ksi_signo, ksi);
483 	}
484 	PROC_UNLOCK(p);
485 	return (error);
486 }
487 
488 /*
489  * Free a job entry.  Wait for completion if it is currently active, but don't
490  * delay forever.  If we delay, we return a flag that says that we have to
491  * restart the queue scan.
492  */
493 static int
494 aio_free_entry(struct kaiocb *job)
495 {
496 	struct kaioinfo *ki;
497 	struct aioliojob *lj;
498 	struct proc *p;
499 
500 	p = job->userproc;
501 	MPASS(curproc == p);
502 	ki = p->p_aioinfo;
503 	MPASS(ki != NULL);
504 
505 	AIO_LOCK_ASSERT(ki, MA_OWNED);
506 	MPASS(job->jobflags & KAIOCB_FINISHED);
507 
508 	atomic_subtract_int(&num_queue_count, 1);
509 
510 	ki->kaio_count--;
511 	MPASS(ki->kaio_count >= 0);
512 
513 	TAILQ_REMOVE(&ki->kaio_done, job, plist);
514 	TAILQ_REMOVE(&ki->kaio_all, job, allist);
515 
516 	lj = job->lio;
517 	if (lj) {
518 		lj->lioj_count--;
519 		lj->lioj_finished_count--;
520 
521 		if (lj->lioj_count == 0) {
522 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
523 			/* lio is going away, we need to destroy any knotes */
524 			knlist_delete(&lj->klist, curthread, 1);
525 			PROC_LOCK(p);
526 			sigqueue_take(&lj->lioj_ksi);
527 			PROC_UNLOCK(p);
528 			uma_zfree(aiolio_zone, lj);
529 		}
530 	}
531 
532 	/* job is going away, we need to destroy any knotes */
533 	knlist_delete(&job->klist, curthread, 1);
534 	PROC_LOCK(p);
535 	sigqueue_take(&job->ksi);
536 	PROC_UNLOCK(p);
537 
538 	AIO_UNLOCK(ki);
539 
540 	/*
541 	 * The thread argument here is used to find the owning process
542 	 * and is also passed to fo_close() which may pass it to various
543 	 * places such as devsw close() routines.  Because of that, we
544 	 * need a thread pointer from the process owning the job that is
545 	 * persistent and won't disappear out from under us or move to
546 	 * another process.
547 	 *
548 	 * Currently, all the callers of this function call it to remove
549 	 * a kaiocb from the current process' job list either via a
550 	 * syscall or due to the current process calling exit() or
551 	 * execve().  Thus, we know that p == curproc.  We also know that
552 	 * curthread can't exit since we are curthread.
553 	 *
554 	 * Therefore, we use curthread as the thread to pass to
555 	 * knlist_delete().  This does mean that it is possible for the
556 	 * thread pointer at close time to differ from the thread pointer
557 	 * at open time, but this is already true of file descriptors in
558 	 * a multithreaded process.
559 	 */
560 	if (job->fd_file)
561 		fdrop(job->fd_file, curthread);
562 	crfree(job->cred);
563 	if (job->uiop != &job->uio)
564 		free(job->uiop, M_IOV);
565 	uma_zfree(aiocb_zone, job);
566 	AIO_LOCK(ki);
567 
568 	return (0);
569 }
570 
571 static void
572 aio_proc_rundown_exec(void *arg, struct proc *p,
573     struct image_params *imgp __unused)
574 {
575    	aio_proc_rundown(arg, p);
576 }
577 
578 static int
579 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
580 {
581 	aio_cancel_fn_t *func;
582 	int cancelled;
583 
584 	AIO_LOCK_ASSERT(ki, MA_OWNED);
585 	if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
586 		return (0);
587 	MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
588 	job->jobflags |= KAIOCB_CANCELLED;
589 
590 	func = job->cancel_fn;
591 
592 	/*
593 	 * If there is no cancel routine, just leave the job marked as
594 	 * cancelled.  The job should be in active use by a caller who
595 	 * should complete it normally or when it fails to install a
596 	 * cancel routine.
597 	 */
598 	if (func == NULL)
599 		return (0);
600 
601 	/*
602 	 * Set the CANCELLING flag so that aio_complete() will defer
603 	 * completions of this job.  This prevents the job from being
604 	 * freed out from under the cancel callback.  After the
605 	 * callback any deferred completion (whether from the callback
606 	 * or any other source) will be completed.
607 	 */
608 	job->jobflags |= KAIOCB_CANCELLING;
609 	AIO_UNLOCK(ki);
610 	func(job);
611 	AIO_LOCK(ki);
612 	job->jobflags &= ~KAIOCB_CANCELLING;
613 	if (job->jobflags & KAIOCB_FINISHED) {
614 		cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
615 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
616 		aio_bio_done_notify(p, job);
617 	} else {
618 		/*
619 		 * The cancel callback might have scheduled an
620 		 * operation to cancel this request, but it is
621 		 * only counted as cancelled if the request is
622 		 * cancelled when the callback returns.
623 		 */
624 		cancelled = 0;
625 	}
626 	return (cancelled);
627 }
628 
629 /*
630  * Rundown the jobs for a given process.
631  */
632 static void
633 aio_proc_rundown(void *arg, struct proc *p)
634 {
635 	struct kaioinfo *ki;
636 	struct aioliojob *lj;
637 	struct kaiocb *job, *jobn;
638 
639 	KASSERT(curthread->td_proc == p,
640 	    ("%s: called on non-curproc", __func__));
641 	ki = p->p_aioinfo;
642 	if (ki == NULL)
643 		return;
644 
645 	AIO_LOCK(ki);
646 	ki->kaio_flags |= KAIO_RUNDOWN;
647 
648 restart:
649 
650 	/*
651 	 * Try to cancel all pending requests. This code simulates
652 	 * aio_cancel on all pending I/O requests.
653 	 */
654 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
655 		aio_cancel_job(p, ki, job);
656 	}
657 
658 	/* Wait for all running I/O to be finished */
659 	if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
660 		ki->kaio_flags |= KAIO_WAKEUP;
661 		msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
662 		goto restart;
663 	}
664 
665 	/* Free all completed I/O requests. */
666 	while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
667 		aio_free_entry(job);
668 
669 	while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
670 		if (lj->lioj_count == 0) {
671 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
672 			knlist_delete(&lj->klist, curthread, 1);
673 			PROC_LOCK(p);
674 			sigqueue_take(&lj->lioj_ksi);
675 			PROC_UNLOCK(p);
676 			uma_zfree(aiolio_zone, lj);
677 		} else {
678 			panic("LIO job not cleaned up: C:%d, FC:%d\n",
679 			    lj->lioj_count, lj->lioj_finished_count);
680 		}
681 	}
682 	AIO_UNLOCK(ki);
683 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
684 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
685 	mtx_destroy(&ki->kaio_mtx);
686 	uma_zfree(kaio_zone, ki);
687 	p->p_aioinfo = NULL;
688 }
689 
690 /*
691  * Select a job to run (called by an AIO daemon).
692  */
693 static struct kaiocb *
694 aio_selectjob(struct aioproc *aiop)
695 {
696 	struct kaiocb *job;
697 	struct kaioinfo *ki;
698 	struct proc *userp;
699 
700 	mtx_assert(&aio_job_mtx, MA_OWNED);
701 restart:
702 	TAILQ_FOREACH(job, &aio_jobs, list) {
703 		userp = job->userproc;
704 		ki = userp->p_aioinfo;
705 
706 		if (ki->kaio_active_count < max_aio_per_proc) {
707 			TAILQ_REMOVE(&aio_jobs, job, list);
708 			if (!aio_clear_cancel_function(job))
709 				goto restart;
710 
711 			/* Account for currently active jobs. */
712 			ki->kaio_active_count++;
713 			break;
714 		}
715 	}
716 	return (job);
717 }
718 
719 /*
720  * Move all data to a permanent storage device.  This code
721  * simulates the fsync and fdatasync syscalls.
722  */
723 static int
724 aio_fsync_vnode(struct thread *td, struct vnode *vp, int op)
725 {
726 	struct mount *mp;
727 	int error;
728 
729 	if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
730 		goto drop;
731 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
732 	if (vp->v_object != NULL) {
733 		VM_OBJECT_WLOCK(vp->v_object);
734 		vm_object_page_clean(vp->v_object, 0, 0, 0);
735 		VM_OBJECT_WUNLOCK(vp->v_object);
736 	}
737 	if (op == LIO_DSYNC)
738 		error = VOP_FDATASYNC(vp, td);
739 	else
740 		error = VOP_FSYNC(vp, MNT_WAIT, td);
741 
742 	VOP_UNLOCK(vp);
743 	vn_finished_write(mp);
744 drop:
745 	return (error);
746 }
747 
748 /*
749  * The AIO processing activity for LIO_READ/LIO_WRITE.  This is the code that
750  * does the I/O request for the non-bio version of the operations.  The normal
751  * vn operations are used, and this code should work in all instances for every
752  * type of file, including pipes, sockets, fifos, and regular files.
753  *
754  * XXX I don't think it works well for socket, pipe, and fifo.
755  */
756 static void
757 aio_process_rw(struct kaiocb *job)
758 {
759 	struct ucred *td_savedcred;
760 	struct thread *td;
761 	struct aiocb *cb;
762 	struct file *fp;
763 	ssize_t cnt;
764 	long msgsnd_st, msgsnd_end;
765 	long msgrcv_st, msgrcv_end;
766 	long oublock_st, oublock_end;
767 	long inblock_st, inblock_end;
768 	int error, opcode;
769 
770 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
771 	    job->uaiocb.aio_lio_opcode == LIO_READV ||
772 	    job->uaiocb.aio_lio_opcode == LIO_WRITE ||
773 	    job->uaiocb.aio_lio_opcode == LIO_WRITEV,
774 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
775 
776 	aio_switch_vmspace(job);
777 	td = curthread;
778 	td_savedcred = td->td_ucred;
779 	td->td_ucred = job->cred;
780 	job->uiop->uio_td = td;
781 	cb = &job->uaiocb;
782 	fp = job->fd_file;
783 
784 	opcode = job->uaiocb.aio_lio_opcode;
785 	cnt = job->uiop->uio_resid;
786 
787 	msgrcv_st = td->td_ru.ru_msgrcv;
788 	msgsnd_st = td->td_ru.ru_msgsnd;
789 	inblock_st = td->td_ru.ru_inblock;
790 	oublock_st = td->td_ru.ru_oublock;
791 
792 	/*
793 	 * aio_aqueue() acquires a reference to the file that is
794 	 * released in aio_free_entry().
795 	 */
796 	if (opcode == LIO_READ || opcode == LIO_READV) {
797 		if (job->uiop->uio_resid == 0)
798 			error = 0;
799 		else
800 			error = fo_read(fp, job->uiop, fp->f_cred, FOF_OFFSET,
801 			    td);
802 	} else {
803 		if (fp->f_type == DTYPE_VNODE)
804 			bwillwrite();
805 		error = fo_write(fp, job->uiop, fp->f_cred, FOF_OFFSET, td);
806 	}
807 	msgrcv_end = td->td_ru.ru_msgrcv;
808 	msgsnd_end = td->td_ru.ru_msgsnd;
809 	inblock_end = td->td_ru.ru_inblock;
810 	oublock_end = td->td_ru.ru_oublock;
811 
812 	job->msgrcv = msgrcv_end - msgrcv_st;
813 	job->msgsnd = msgsnd_end - msgsnd_st;
814 	job->inblock = inblock_end - inblock_st;
815 	job->outblock = oublock_end - oublock_st;
816 
817 	if (error != 0 && job->uiop->uio_resid != cnt) {
818 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
819 			error = 0;
820 		if (error == EPIPE &&
821 		    (opcode == LIO_WRITE || opcode == LIO_WRITEV)) {
822 			PROC_LOCK(job->userproc);
823 			kern_psignal(job->userproc, SIGPIPE);
824 			PROC_UNLOCK(job->userproc);
825 		}
826 	}
827 
828 	cnt -= job->uiop->uio_resid;
829 	td->td_ucred = td_savedcred;
830 	if (error)
831 		aio_complete(job, -1, error);
832 	else
833 		aio_complete(job, cnt, 0);
834 }
835 
836 static void
837 aio_process_sync(struct kaiocb *job)
838 {
839 	struct thread *td = curthread;
840 	struct ucred *td_savedcred = td->td_ucred;
841 	struct file *fp = job->fd_file;
842 	int error = 0;
843 
844 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC ||
845 	    job->uaiocb.aio_lio_opcode == LIO_DSYNC,
846 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
847 
848 	td->td_ucred = job->cred;
849 	if (fp->f_vnode != NULL) {
850 		error = aio_fsync_vnode(td, fp->f_vnode,
851 		    job->uaiocb.aio_lio_opcode);
852 	}
853 	td->td_ucred = td_savedcred;
854 	if (error)
855 		aio_complete(job, -1, error);
856 	else
857 		aio_complete(job, 0, 0);
858 }
859 
860 static void
861 aio_process_mlock(struct kaiocb *job)
862 {
863 	struct aiocb *cb = &job->uaiocb;
864 	int error;
865 
866 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
867 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
868 
869 	aio_switch_vmspace(job);
870 	error = kern_mlock(job->userproc, job->cred,
871 	    __DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes);
872 	aio_complete(job, error != 0 ? -1 : 0, error);
873 }
874 
875 static void
876 aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
877 {
878 	struct aioliojob *lj;
879 	struct kaioinfo *ki;
880 	struct kaiocb *sjob, *sjobn;
881 	int lj_done;
882 	bool schedule_fsync;
883 
884 	ki = userp->p_aioinfo;
885 	AIO_LOCK_ASSERT(ki, MA_OWNED);
886 	lj = job->lio;
887 	lj_done = 0;
888 	if (lj) {
889 		lj->lioj_finished_count++;
890 		if (lj->lioj_count == lj->lioj_finished_count)
891 			lj_done = 1;
892 	}
893 	TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
894 	MPASS(job->jobflags & KAIOCB_FINISHED);
895 
896 	if (ki->kaio_flags & KAIO_RUNDOWN)
897 		goto notification_done;
898 
899 	if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
900 	    job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
901 		aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi, true);
902 
903 	KNOTE_LOCKED(&job->klist, 1);
904 
905 	if (lj_done) {
906 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
907 			lj->lioj_flags |= LIOJ_KEVENT_POSTED;
908 			KNOTE_LOCKED(&lj->klist, 1);
909 		}
910 		if ((lj->lioj_flags & (LIOJ_SIGNAL | LIOJ_SIGNAL_POSTED))
911 		    == LIOJ_SIGNAL &&
912 		    (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
913 		    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
914 			aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi,
915 			    true);
916 			lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
917 		}
918 	}
919 
920 notification_done:
921 	if (job->jobflags & KAIOCB_CHECKSYNC) {
922 		schedule_fsync = false;
923 		TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
924 			if (job->fd_file != sjob->fd_file ||
925 			    job->seqno >= sjob->seqno)
926 				continue;
927 			if (--sjob->pending > 0)
928 				continue;
929 			TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list);
930 			if (!aio_clear_cancel_function_locked(sjob))
931 				continue;
932 			TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list);
933 			schedule_fsync = true;
934 		}
935 		if (schedule_fsync)
936 			taskqueue_enqueue(taskqueue_aiod_kick,
937 			    &ki->kaio_sync_task);
938 	}
939 	if (ki->kaio_flags & KAIO_WAKEUP) {
940 		ki->kaio_flags &= ~KAIO_WAKEUP;
941 		wakeup(&userp->p_aioinfo);
942 	}
943 }
944 
945 static void
946 aio_schedule_fsync(void *context, int pending)
947 {
948 	struct kaioinfo *ki;
949 	struct kaiocb *job;
950 
951 	ki = context;
952 	AIO_LOCK(ki);
953 	while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
954 		job = TAILQ_FIRST(&ki->kaio_syncready);
955 		TAILQ_REMOVE(&ki->kaio_syncready, job, list);
956 		AIO_UNLOCK(ki);
957 		aio_schedule(job, aio_process_sync);
958 		AIO_LOCK(ki);
959 	}
960 	AIO_UNLOCK(ki);
961 }
962 
963 bool
964 aio_cancel_cleared(struct kaiocb *job)
965 {
966 
967 	/*
968 	 * The caller should hold the same queue lock held when
969 	 * aio_clear_cancel_function() was called and set this flag
970 	 * ensuring this check sees an up-to-date value.  However,
971 	 * there is no way to assert that.
972 	 */
973 	return ((job->jobflags & KAIOCB_CLEARED) != 0);
974 }
975 
976 static bool
977 aio_clear_cancel_function_locked(struct kaiocb *job)
978 {
979 
980 	AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
981 	MPASS(job->cancel_fn != NULL);
982 	if (job->jobflags & KAIOCB_CANCELLING) {
983 		job->jobflags |= KAIOCB_CLEARED;
984 		return (false);
985 	}
986 	job->cancel_fn = NULL;
987 	return (true);
988 }
989 
990 bool
991 aio_clear_cancel_function(struct kaiocb *job)
992 {
993 	struct kaioinfo *ki;
994 	bool ret;
995 
996 	ki = job->userproc->p_aioinfo;
997 	AIO_LOCK(ki);
998 	ret = aio_clear_cancel_function_locked(job);
999 	AIO_UNLOCK(ki);
1000 	return (ret);
1001 }
1002 
1003 static bool
1004 aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func)
1005 {
1006 
1007 	AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
1008 	if (job->jobflags & KAIOCB_CANCELLED)
1009 		return (false);
1010 	job->cancel_fn = func;
1011 	return (true);
1012 }
1013 
1014 bool
1015 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
1016 {
1017 	struct kaioinfo *ki;
1018 	bool ret;
1019 
1020 	ki = job->userproc->p_aioinfo;
1021 	AIO_LOCK(ki);
1022 	ret = aio_set_cancel_function_locked(job, func);
1023 	AIO_UNLOCK(ki);
1024 	return (ret);
1025 }
1026 
1027 void
1028 aio_complete(struct kaiocb *job, long status, int error)
1029 {
1030 	struct kaioinfo *ki;
1031 	struct proc *userp;
1032 
1033 	job->uaiocb._aiocb_private.error = error;
1034 	job->uaiocb._aiocb_private.status = status;
1035 
1036 	userp = job->userproc;
1037 	ki = userp->p_aioinfo;
1038 
1039 	AIO_LOCK(ki);
1040 	KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1041 	    ("duplicate aio_complete"));
1042 	job->jobflags |= KAIOCB_FINISHED;
1043 	if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1044 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1045 		aio_bio_done_notify(userp, job);
1046 	}
1047 	AIO_UNLOCK(ki);
1048 }
1049 
1050 void
1051 aio_cancel(struct kaiocb *job)
1052 {
1053 
1054 	aio_complete(job, -1, ECANCELED);
1055 }
1056 
1057 void
1058 aio_switch_vmspace(struct kaiocb *job)
1059 {
1060 
1061 	vmspace_switch_aio(job->userproc->p_vmspace);
1062 }
1063 
1064 /*
1065  * The AIO daemon, most of the actual work is done in aio_process_*,
1066  * but the setup (and address space mgmt) is done in this routine.
1067  */
1068 static void
1069 aio_daemon(void *_id)
1070 {
1071 	struct kaiocb *job;
1072 	struct aioproc *aiop;
1073 	struct kaioinfo *ki;
1074 	struct proc *p;
1075 	struct vmspace *myvm;
1076 	struct thread *td = curthread;
1077 	int id = (intptr_t)_id;
1078 
1079 	/*
1080 	 * Grab an extra reference on the daemon's vmspace so that it
1081 	 * doesn't get freed by jobs that switch to a different
1082 	 * vmspace.
1083 	 */
1084 	p = td->td_proc;
1085 	myvm = vmspace_acquire_ref(p);
1086 
1087 	KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1088 
1089 	/*
1090 	 * Allocate and ready the aio control info.  There is one aiop structure
1091 	 * per daemon.
1092 	 */
1093 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
1094 	aiop->aioproc = p;
1095 	aiop->aioprocflags = 0;
1096 
1097 	/*
1098 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
1099 	 * and creating too many daemons.)
1100 	 */
1101 	sema_post(&aio_newproc_sem);
1102 
1103 	mtx_lock(&aio_job_mtx);
1104 	for (;;) {
1105 		/*
1106 		 * Take daemon off of free queue
1107 		 */
1108 		if (aiop->aioprocflags & AIOP_FREE) {
1109 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
1110 			aiop->aioprocflags &= ~AIOP_FREE;
1111 		}
1112 
1113 		/*
1114 		 * Check for jobs.
1115 		 */
1116 		while ((job = aio_selectjob(aiop)) != NULL) {
1117 			mtx_unlock(&aio_job_mtx);
1118 
1119 			ki = job->userproc->p_aioinfo;
1120 			job->handle_fn(job);
1121 
1122 			mtx_lock(&aio_job_mtx);
1123 			/* Decrement the active job count. */
1124 			ki->kaio_active_count--;
1125 		}
1126 
1127 		/*
1128 		 * Disconnect from user address space.
1129 		 */
1130 		if (p->p_vmspace != myvm) {
1131 			mtx_unlock(&aio_job_mtx);
1132 			vmspace_switch_aio(myvm);
1133 			mtx_lock(&aio_job_mtx);
1134 			/*
1135 			 * We have to restart to avoid race, we only sleep if
1136 			 * no job can be selected.
1137 			 */
1138 			continue;
1139 		}
1140 
1141 		mtx_assert(&aio_job_mtx, MA_OWNED);
1142 
1143 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
1144 		aiop->aioprocflags |= AIOP_FREE;
1145 
1146 		/*
1147 		 * If daemon is inactive for a long time, allow it to exit,
1148 		 * thereby freeing resources.
1149 		 */
1150 		if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
1151 		    aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
1152 		    (aiop->aioprocflags & AIOP_FREE) &&
1153 		    num_aio_procs > target_aio_procs)
1154 			break;
1155 	}
1156 	TAILQ_REMOVE(&aio_freeproc, aiop, list);
1157 	num_aio_procs--;
1158 	mtx_unlock(&aio_job_mtx);
1159 	uma_zfree(aiop_zone, aiop);
1160 	free_unr(aiod_unr, id);
1161 	vmspace_free(myvm);
1162 
1163 	KASSERT(p->p_vmspace == myvm,
1164 	    ("AIOD: bad vmspace for exiting daemon"));
1165 	KASSERT(refcount_load(&myvm->vm_refcnt) > 1,
1166 	    ("AIOD: bad vm refcnt for exiting daemon: %d",
1167 	    refcount_load(&myvm->vm_refcnt)));
1168 	kproc_exit(0);
1169 }
1170 
1171 /*
1172  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1173  * AIO daemon modifies its environment itself.
1174  */
1175 static int
1176 aio_newproc(int *start)
1177 {
1178 	int error;
1179 	struct proc *p;
1180 	int id;
1181 
1182 	id = alloc_unr(aiod_unr);
1183 	error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
1184 		RFNOWAIT, 0, "aiod%d", id);
1185 	if (error == 0) {
1186 		/*
1187 		 * Wait until daemon is started.
1188 		 */
1189 		sema_wait(&aio_newproc_sem);
1190 		mtx_lock(&aio_job_mtx);
1191 		num_aio_procs++;
1192 		if (start != NULL)
1193 			(*start)--;
1194 		mtx_unlock(&aio_job_mtx);
1195 	} else {
1196 		free_unr(aiod_unr, id);
1197 	}
1198 	return (error);
1199 }
1200 
1201 /*
1202  * Try the high-performance, low-overhead bio method for eligible
1203  * VCHR devices.  This method doesn't use an aio helper thread, and
1204  * thus has very low overhead.
1205  *
1206  * Assumes that the caller, aio_aqueue(), has incremented the file
1207  * structure's reference count, preventing its deallocation for the
1208  * duration of this call.
1209  */
1210 static int
1211 aio_qbio(struct proc *p, struct kaiocb *job)
1212 {
1213 	struct aiocb *cb;
1214 	struct file *fp;
1215 	struct buf *pbuf;
1216 	struct vnode *vp;
1217 	struct cdevsw *csw;
1218 	struct cdev *dev;
1219 	struct kaioinfo *ki;
1220 	struct bio **bios = NULL;
1221 	off_t offset;
1222 	int bio_cmd, error, i, iovcnt, opcode, poff, ref;
1223 	vm_prot_t prot;
1224 	bool use_unmapped;
1225 
1226 	cb = &job->uaiocb;
1227 	fp = job->fd_file;
1228 	opcode = cb->aio_lio_opcode;
1229 
1230 	if (!(opcode == LIO_WRITE || opcode == LIO_WRITEV ||
1231 	    opcode == LIO_READ || opcode == LIO_READV))
1232 		return (-1);
1233 	if (fp == NULL || fp->f_type != DTYPE_VNODE)
1234 		return (-1);
1235 
1236 	vp = fp->f_vnode;
1237 	if (vp->v_type != VCHR)
1238 		return (-1);
1239 	if (vp->v_bufobj.bo_bsize == 0)
1240 		return (-1);
1241 
1242 	bio_cmd = opcode == LIO_WRITE || opcode == LIO_WRITEV ? BIO_WRITE :
1243 	    BIO_READ;
1244 	iovcnt = job->uiop->uio_iovcnt;
1245 	if (iovcnt > max_buf_aio)
1246 		return (-1);
1247 	for (i = 0; i < iovcnt; i++) {
1248 		if (job->uiop->uio_iov[i].iov_len % vp->v_bufobj.bo_bsize != 0)
1249 			return (-1);
1250 		if (job->uiop->uio_iov[i].iov_len > maxphys) {
1251 			error = -1;
1252 			return (-1);
1253 		}
1254 	}
1255 	offset = cb->aio_offset;
1256 
1257 	ref = 0;
1258 	csw = devvn_refthread(vp, &dev, &ref);
1259 	if (csw == NULL)
1260 		return (ENXIO);
1261 
1262 	if ((csw->d_flags & D_DISK) == 0) {
1263 		error = -1;
1264 		goto unref;
1265 	}
1266 	if (job->uiop->uio_resid > dev->si_iosize_max) {
1267 		error = -1;
1268 		goto unref;
1269 	}
1270 
1271 	ki = p->p_aioinfo;
1272 	job->error = 0;
1273 
1274 	use_unmapped = (dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed;
1275 	if (!use_unmapped) {
1276 		AIO_LOCK(ki);
1277 		if (ki->kaio_buffer_count + iovcnt > max_buf_aio) {
1278 			AIO_UNLOCK(ki);
1279 			error = EAGAIN;
1280 			goto unref;
1281 		}
1282 		ki->kaio_buffer_count += iovcnt;
1283 		AIO_UNLOCK(ki);
1284 	}
1285 
1286 	bios = malloc(sizeof(struct bio *) * iovcnt, M_TEMP, M_WAITOK);
1287 	atomic_store_int(&job->nbio, iovcnt);
1288 	for (i = 0; i < iovcnt; i++) {
1289 		struct vm_page** pages;
1290 		struct bio *bp;
1291 		void *buf;
1292 		size_t nbytes;
1293 		int npages;
1294 
1295 		buf = job->uiop->uio_iov[i].iov_base;
1296 		nbytes = job->uiop->uio_iov[i].iov_len;
1297 
1298 		bios[i] = g_alloc_bio();
1299 		bp = bios[i];
1300 
1301 		poff = (vm_offset_t)buf & PAGE_MASK;
1302 		if (use_unmapped) {
1303 			pbuf = NULL;
1304 			pages = malloc(sizeof(vm_page_t) * (atop(round_page(
1305 			    nbytes)) + 1), M_TEMP, M_WAITOK | M_ZERO);
1306 		} else {
1307 			pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
1308 			BUF_KERNPROC(pbuf);
1309 			pages = pbuf->b_pages;
1310 		}
1311 
1312 		bp->bio_length = nbytes;
1313 		bp->bio_bcount = nbytes;
1314 		bp->bio_done = aio_biowakeup;
1315 		bp->bio_offset = offset;
1316 		bp->bio_cmd = bio_cmd;
1317 		bp->bio_dev = dev;
1318 		bp->bio_caller1 = job;
1319 		bp->bio_caller2 = pbuf;
1320 
1321 		prot = VM_PROT_READ;
1322 		if (opcode == LIO_READ || opcode == LIO_READV)
1323 			prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
1324 		npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
1325 		    (vm_offset_t)buf, bp->bio_length, prot, pages,
1326 		    atop(maxphys) + 1);
1327 		if (npages < 0) {
1328 			if (pbuf != NULL)
1329 				uma_zfree(pbuf_zone, pbuf);
1330 			else
1331 				free(pages, M_TEMP);
1332 			error = EFAULT;
1333 			g_destroy_bio(bp);
1334 			i--;
1335 			goto destroy_bios;
1336 		}
1337 		if (pbuf != NULL) {
1338 			pmap_qenter((vm_offset_t)pbuf->b_data, pages, npages);
1339 			bp->bio_data = pbuf->b_data + poff;
1340 			pbuf->b_npages = npages;
1341 			atomic_add_int(&num_buf_aio, 1);
1342 		} else {
1343 			bp->bio_ma = pages;
1344 			bp->bio_ma_n = npages;
1345 			bp->bio_ma_offset = poff;
1346 			bp->bio_data = unmapped_buf;
1347 			bp->bio_flags |= BIO_UNMAPPED;
1348 			atomic_add_int(&num_unmapped_aio, 1);
1349 		}
1350 
1351 		offset += nbytes;
1352 	}
1353 
1354 	/* Perform transfer. */
1355 	for (i = 0; i < iovcnt; i++)
1356 		csw->d_strategy(bios[i]);
1357 	free(bios, M_TEMP);
1358 
1359 	dev_relthread(dev, ref);
1360 	return (0);
1361 
1362 destroy_bios:
1363 	for (; i >= 0; i--)
1364 		aio_biocleanup(bios[i]);
1365 	free(bios, M_TEMP);
1366 unref:
1367 	dev_relthread(dev, ref);
1368 	return (error);
1369 }
1370 
1371 #ifdef COMPAT_FREEBSD6
1372 static int
1373 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
1374 {
1375 
1376 	/*
1377 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
1378 	 * supported by AIO with the old sigevent structure.
1379 	 */
1380 	nsig->sigev_notify = osig->sigev_notify;
1381 	switch (nsig->sigev_notify) {
1382 	case SIGEV_NONE:
1383 		break;
1384 	case SIGEV_SIGNAL:
1385 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
1386 		break;
1387 	case SIGEV_KEVENT:
1388 		nsig->sigev_notify_kqueue =
1389 		    osig->__sigev_u.__sigev_notify_kqueue;
1390 		nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
1391 		break;
1392 	default:
1393 		return (EINVAL);
1394 	}
1395 	return (0);
1396 }
1397 
1398 static int
1399 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct kaiocb *kjob,
1400     int type __unused)
1401 {
1402 	struct oaiocb *ojob;
1403 	struct aiocb *kcb = &kjob->uaiocb;
1404 	int error;
1405 
1406 	bzero(kcb, sizeof(struct aiocb));
1407 	error = copyin(ujob, kcb, sizeof(struct oaiocb));
1408 	if (error)
1409 		return (error);
1410 	/* No need to copyin aio_iov, because it did not exist in FreeBSD 6 */
1411 	ojob = (struct oaiocb *)kcb;
1412 	return (convert_old_sigevent(&ojob->aio_sigevent, &kcb->aio_sigevent));
1413 }
1414 #endif
1415 
1416 static int
1417 aiocb_copyin(struct aiocb *ujob, struct kaiocb *kjob, int type)
1418 {
1419 	struct aiocb *kcb = &kjob->uaiocb;
1420 	int error;
1421 
1422 	error = copyin(ujob, kcb, sizeof(struct aiocb));
1423 	if (error)
1424 		return (error);
1425 	if (type == LIO_READV || type == LIO_WRITEV) {
1426 		/* malloc a uio and copy in the iovec */
1427 		error = copyinuio(__DEVOLATILE(struct iovec*, kcb->aio_iov),
1428 		    kcb->aio_iovcnt, &kjob->uiop);
1429 	}
1430 
1431 	return (error);
1432 }
1433 
1434 static long
1435 aiocb_fetch_status(struct aiocb *ujob)
1436 {
1437 
1438 	return (fuword(&ujob->_aiocb_private.status));
1439 }
1440 
1441 static long
1442 aiocb_fetch_error(struct aiocb *ujob)
1443 {
1444 
1445 	return (fuword(&ujob->_aiocb_private.error));
1446 }
1447 
1448 static int
1449 aiocb_store_status(struct aiocb *ujob, long status)
1450 {
1451 
1452 	return (suword(&ujob->_aiocb_private.status, status));
1453 }
1454 
1455 static int
1456 aiocb_store_error(struct aiocb *ujob, long error)
1457 {
1458 
1459 	return (suword(&ujob->_aiocb_private.error, error));
1460 }
1461 
1462 static int
1463 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
1464 {
1465 
1466 	return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
1467 }
1468 
1469 static int
1470 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
1471 {
1472 
1473 	return (suword(ujobp, (long)ujob));
1474 }
1475 
1476 static struct aiocb_ops aiocb_ops = {
1477 	.aio_copyin = aiocb_copyin,
1478 	.fetch_status = aiocb_fetch_status,
1479 	.fetch_error = aiocb_fetch_error,
1480 	.store_status = aiocb_store_status,
1481 	.store_error = aiocb_store_error,
1482 	.store_kernelinfo = aiocb_store_kernelinfo,
1483 	.store_aiocb = aiocb_store_aiocb,
1484 };
1485 
1486 #ifdef COMPAT_FREEBSD6
1487 static struct aiocb_ops aiocb_ops_osigevent = {
1488 	.aio_copyin = aiocb_copyin_old_sigevent,
1489 	.fetch_status = aiocb_fetch_status,
1490 	.fetch_error = aiocb_fetch_error,
1491 	.store_status = aiocb_store_status,
1492 	.store_error = aiocb_store_error,
1493 	.store_kernelinfo = aiocb_store_kernelinfo,
1494 	.store_aiocb = aiocb_store_aiocb,
1495 };
1496 #endif
1497 
1498 /*
1499  * Queue a new AIO request.  Choosing either the threaded or direct bio VCHR
1500  * technique is done in this code.
1501  */
1502 int
1503 aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
1504     int type, struct aiocb_ops *ops)
1505 {
1506 	struct proc *p = td->td_proc;
1507 	struct file *fp = NULL;
1508 	struct kaiocb *job;
1509 	struct kaioinfo *ki;
1510 	struct kevent kev;
1511 	int opcode;
1512 	int error;
1513 	int fd, kqfd;
1514 	int jid;
1515 	u_short evflags;
1516 
1517 	if (p->p_aioinfo == NULL)
1518 		aio_init_aioinfo(p);
1519 
1520 	ki = p->p_aioinfo;
1521 
1522 	ops->store_status(ujob, -1);
1523 	ops->store_error(ujob, 0);
1524 	ops->store_kernelinfo(ujob, -1);
1525 
1526 	if (num_queue_count >= max_queue_count ||
1527 	    ki->kaio_count >= max_aio_queue_per_proc) {
1528 		error = EAGAIN;
1529 		goto err1;
1530 	}
1531 
1532 	job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
1533 	knlist_init_mtx(&job->klist, AIO_MTX(ki));
1534 
1535 	error = ops->aio_copyin(ujob, job, type);
1536 	if (error)
1537 		goto err2;
1538 
1539 	if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
1540 		error = EINVAL;
1541 		goto err2;
1542 	}
1543 
1544 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
1545 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
1546 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
1547 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
1548 		error = EINVAL;
1549 		goto err2;
1550 	}
1551 
1552 	if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
1553 	     job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
1554 		!_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
1555 		error = EINVAL;
1556 		goto err2;
1557 	}
1558 
1559 	/* Get the opcode. */
1560 	if (type == LIO_NOP) {
1561 		switch (job->uaiocb.aio_lio_opcode) {
1562 		case LIO_WRITE:
1563 		case LIO_NOP:
1564 		case LIO_READ:
1565 			opcode = job->uaiocb.aio_lio_opcode;
1566 			break;
1567 		default:
1568 			error = EINVAL;
1569 			goto err2;
1570 		}
1571 	} else
1572 		opcode = job->uaiocb.aio_lio_opcode = type;
1573 
1574 	ksiginfo_init(&job->ksi);
1575 
1576 	/* Save userspace address of the job info. */
1577 	job->ujob = ujob;
1578 
1579 	/*
1580 	 * Validate the opcode and fetch the file object for the specified
1581 	 * file descriptor.
1582 	 *
1583 	 * XXXRW: Moved the opcode validation up here so that we don't
1584 	 * retrieve a file descriptor without knowing what the capabiltity
1585 	 * should be.
1586 	 */
1587 	fd = job->uaiocb.aio_fildes;
1588 	switch (opcode) {
1589 	case LIO_WRITE:
1590 	case LIO_WRITEV:
1591 		error = fget_write(td, fd, &cap_pwrite_rights, &fp);
1592 		break;
1593 	case LIO_READ:
1594 	case LIO_READV:
1595 		error = fget_read(td, fd, &cap_pread_rights, &fp);
1596 		break;
1597 	case LIO_SYNC:
1598 	case LIO_DSYNC:
1599 		error = fget(td, fd, &cap_fsync_rights, &fp);
1600 		break;
1601 	case LIO_MLOCK:
1602 		break;
1603 	case LIO_NOP:
1604 		error = fget(td, fd, &cap_no_rights, &fp);
1605 		break;
1606 	default:
1607 		error = EINVAL;
1608 	}
1609 	if (error)
1610 		goto err3;
1611 
1612 	if ((opcode == LIO_SYNC || opcode == LIO_DSYNC) && fp->f_vnode == NULL) {
1613 		error = EINVAL;
1614 		goto err3;
1615 	}
1616 
1617 	if ((opcode == LIO_READ || opcode == LIO_READV ||
1618 	    opcode == LIO_WRITE || opcode == LIO_WRITEV) &&
1619 	    job->uaiocb.aio_offset < 0 &&
1620 	    (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) {
1621 		error = EINVAL;
1622 		goto err3;
1623 	}
1624 
1625 	job->fd_file = fp;
1626 
1627 	mtx_lock(&aio_job_mtx);
1628 	jid = jobrefid++;
1629 	job->seqno = jobseqno++;
1630 	mtx_unlock(&aio_job_mtx);
1631 	error = ops->store_kernelinfo(ujob, jid);
1632 	if (error) {
1633 		error = EINVAL;
1634 		goto err3;
1635 	}
1636 	job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
1637 
1638 	if (opcode == LIO_NOP) {
1639 		fdrop(fp, td);
1640 		MPASS(job->uiop == &job->uio || job->uiop == NULL);
1641 		uma_zfree(aiocb_zone, job);
1642 		return (0);
1643 	}
1644 
1645 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1646 		goto no_kqueue;
1647 	evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1648 	if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1649 		error = EINVAL;
1650 		goto err3;
1651 	}
1652 	kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
1653 	memset(&kev, 0, sizeof(kev));
1654 	kev.ident = (uintptr_t)job->ujob;
1655 	kev.filter = EVFILT_AIO;
1656 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
1657 	kev.data = (intptr_t)job;
1658 	kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1659 	error = kqfd_register(kqfd, &kev, td, M_WAITOK);
1660 	if (error)
1661 		goto err3;
1662 
1663 no_kqueue:
1664 
1665 	ops->store_error(ujob, EINPROGRESS);
1666 	job->uaiocb._aiocb_private.error = EINPROGRESS;
1667 	job->userproc = p;
1668 	job->cred = crhold(td->td_ucred);
1669 	job->jobflags = KAIOCB_QUEUEING;
1670 	job->lio = lj;
1671 
1672 	switch (opcode) {
1673 	case LIO_READV:
1674 	case LIO_WRITEV:
1675 		/* Use the uio copied in by aio_copyin */
1676 		MPASS(job->uiop != &job->uio && job->uiop != NULL);
1677 		break;
1678 	case LIO_READ:
1679 	case LIO_WRITE:
1680 		/* Setup the inline uio */
1681 		job->iov[0].iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf;
1682 		job->iov[0].iov_len = job->uaiocb.aio_nbytes;
1683 		job->uio.uio_iov = job->iov;
1684 		job->uio.uio_iovcnt = 1;
1685 		job->uio.uio_resid = job->uaiocb.aio_nbytes;
1686 		job->uio.uio_segflg = UIO_USERSPACE;
1687 		/* FALLTHROUGH */
1688 	default:
1689 		job->uiop = &job->uio;
1690 		break;
1691 	}
1692 	switch (opcode) {
1693 	case LIO_READ:
1694 	case LIO_READV:
1695 		job->uiop->uio_rw = UIO_READ;
1696 		break;
1697 	case LIO_WRITE:
1698 	case LIO_WRITEV:
1699 		job->uiop->uio_rw = UIO_WRITE;
1700 		break;
1701 	}
1702 	job->uiop->uio_offset = job->uaiocb.aio_offset;
1703 	job->uiop->uio_td = td;
1704 
1705 	if (opcode == LIO_MLOCK) {
1706 		aio_schedule(job, aio_process_mlock);
1707 		error = 0;
1708 	} else if (fp->f_ops->fo_aio_queue == NULL)
1709 		error = aio_queue_file(fp, job);
1710 	else
1711 		error = fo_aio_queue(fp, job);
1712 	if (error)
1713 		goto err3;
1714 
1715 	AIO_LOCK(ki);
1716 	job->jobflags &= ~KAIOCB_QUEUEING;
1717 	TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1718 	ki->kaio_count++;
1719 	if (lj)
1720 		lj->lioj_count++;
1721 	atomic_add_int(&num_queue_count, 1);
1722 	if (job->jobflags & KAIOCB_FINISHED) {
1723 		/*
1724 		 * The queue callback completed the request synchronously.
1725 		 * The bulk of the completion is deferred in that case
1726 		 * until this point.
1727 		 */
1728 		aio_bio_done_notify(p, job);
1729 	} else
1730 		TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1731 	AIO_UNLOCK(ki);
1732 	return (0);
1733 
1734 err3:
1735 	if (fp)
1736 		fdrop(fp, td);
1737 	knlist_delete(&job->klist, curthread, 0);
1738 err2:
1739 	if (job->uiop != &job->uio)
1740 		free(job->uiop, M_IOV);
1741 	uma_zfree(aiocb_zone, job);
1742 err1:
1743 	ops->store_error(ujob, error);
1744 	return (error);
1745 }
1746 
1747 static void
1748 aio_cancel_daemon_job(struct kaiocb *job)
1749 {
1750 
1751 	mtx_lock(&aio_job_mtx);
1752 	if (!aio_cancel_cleared(job))
1753 		TAILQ_REMOVE(&aio_jobs, job, list);
1754 	mtx_unlock(&aio_job_mtx);
1755 	aio_cancel(job);
1756 }
1757 
1758 void
1759 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1760 {
1761 
1762 	mtx_lock(&aio_job_mtx);
1763 	if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1764 		mtx_unlock(&aio_job_mtx);
1765 		aio_cancel(job);
1766 		return;
1767 	}
1768 	job->handle_fn = func;
1769 	TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1770 	aio_kick_nowait(job->userproc);
1771 	mtx_unlock(&aio_job_mtx);
1772 }
1773 
1774 static void
1775 aio_cancel_sync(struct kaiocb *job)
1776 {
1777 	struct kaioinfo *ki;
1778 
1779 	ki = job->userproc->p_aioinfo;
1780 	AIO_LOCK(ki);
1781 	if (!aio_cancel_cleared(job))
1782 		TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1783 	AIO_UNLOCK(ki);
1784 	aio_cancel(job);
1785 }
1786 
1787 int
1788 aio_queue_file(struct file *fp, struct kaiocb *job)
1789 {
1790 	struct kaioinfo *ki;
1791 	struct kaiocb *job2;
1792 	struct vnode *vp;
1793 	struct mount *mp;
1794 	int error;
1795 	bool safe;
1796 
1797 	ki = job->userproc->p_aioinfo;
1798 	error = aio_qbio(job->userproc, job);
1799 	if (error >= 0)
1800 		return (error);
1801 	safe = false;
1802 	if (fp->f_type == DTYPE_VNODE) {
1803 		vp = fp->f_vnode;
1804 		if (vp->v_type == VREG || vp->v_type == VDIR) {
1805 			mp = fp->f_vnode->v_mount;
1806 			if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0)
1807 				safe = true;
1808 		}
1809 	}
1810 	if (!(safe || enable_aio_unsafe)) {
1811 		counted_warning(&unsafe_warningcnt,
1812 		    "is attempting to use unsafe AIO requests");
1813 		return (EOPNOTSUPP);
1814 	}
1815 
1816 	switch (job->uaiocb.aio_lio_opcode) {
1817 	case LIO_READ:
1818 	case LIO_READV:
1819 	case LIO_WRITE:
1820 	case LIO_WRITEV:
1821 		aio_schedule(job, aio_process_rw);
1822 		error = 0;
1823 		break;
1824 	case LIO_SYNC:
1825 	case LIO_DSYNC:
1826 		AIO_LOCK(ki);
1827 		TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
1828 			if (job2->fd_file == job->fd_file &&
1829 			    job2->uaiocb.aio_lio_opcode != LIO_SYNC &&
1830 			    job2->uaiocb.aio_lio_opcode != LIO_DSYNC &&
1831 			    job2->seqno < job->seqno) {
1832 				job2->jobflags |= KAIOCB_CHECKSYNC;
1833 				job->pending++;
1834 			}
1835 		}
1836 		if (job->pending != 0) {
1837 			if (!aio_set_cancel_function_locked(job,
1838 				aio_cancel_sync)) {
1839 				AIO_UNLOCK(ki);
1840 				aio_cancel(job);
1841 				return (0);
1842 			}
1843 			TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1844 			AIO_UNLOCK(ki);
1845 			return (0);
1846 		}
1847 		AIO_UNLOCK(ki);
1848 		aio_schedule(job, aio_process_sync);
1849 		error = 0;
1850 		break;
1851 	default:
1852 		error = EINVAL;
1853 	}
1854 	return (error);
1855 }
1856 
1857 static void
1858 aio_kick_nowait(struct proc *userp)
1859 {
1860 	struct kaioinfo *ki = userp->p_aioinfo;
1861 	struct aioproc *aiop;
1862 
1863 	mtx_assert(&aio_job_mtx, MA_OWNED);
1864 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1865 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
1866 		aiop->aioprocflags &= ~AIOP_FREE;
1867 		wakeup(aiop->aioproc);
1868 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
1869 	    ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) {
1870 		taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
1871 	}
1872 }
1873 
1874 static int
1875 aio_kick(struct proc *userp)
1876 {
1877 	struct kaioinfo *ki = userp->p_aioinfo;
1878 	struct aioproc *aiop;
1879 	int error, ret = 0;
1880 
1881 	mtx_assert(&aio_job_mtx, MA_OWNED);
1882 retryproc:
1883 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1884 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
1885 		aiop->aioprocflags &= ~AIOP_FREE;
1886 		wakeup(aiop->aioproc);
1887 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
1888 	    ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) {
1889 		num_aio_resv_start++;
1890 		mtx_unlock(&aio_job_mtx);
1891 		error = aio_newproc(&num_aio_resv_start);
1892 		mtx_lock(&aio_job_mtx);
1893 		if (error) {
1894 			num_aio_resv_start--;
1895 			goto retryproc;
1896 		}
1897 	} else {
1898 		ret = -1;
1899 	}
1900 	return (ret);
1901 }
1902 
1903 static void
1904 aio_kick_helper(void *context, int pending)
1905 {
1906 	struct proc *userp = context;
1907 
1908 	mtx_lock(&aio_job_mtx);
1909 	while (--pending >= 0) {
1910 		if (aio_kick(userp))
1911 			break;
1912 	}
1913 	mtx_unlock(&aio_job_mtx);
1914 }
1915 
1916 /*
1917  * Support the aio_return system call, as a side-effect, kernel resources are
1918  * released.
1919  */
1920 static int
1921 kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1922 {
1923 	struct proc *p = td->td_proc;
1924 	struct kaiocb *job;
1925 	struct kaioinfo *ki;
1926 	long status, error;
1927 
1928 	ki = p->p_aioinfo;
1929 	if (ki == NULL)
1930 		return (EINVAL);
1931 	AIO_LOCK(ki);
1932 	TAILQ_FOREACH(job, &ki->kaio_done, plist) {
1933 		if (job->ujob == ujob)
1934 			break;
1935 	}
1936 	if (job != NULL) {
1937 		MPASS(job->jobflags & KAIOCB_FINISHED);
1938 		status = job->uaiocb._aiocb_private.status;
1939 		error = job->uaiocb._aiocb_private.error;
1940 		td->td_retval[0] = status;
1941 		td->td_ru.ru_oublock += job->outblock;
1942 		td->td_ru.ru_inblock += job->inblock;
1943 		td->td_ru.ru_msgsnd += job->msgsnd;
1944 		td->td_ru.ru_msgrcv += job->msgrcv;
1945 		aio_free_entry(job);
1946 		AIO_UNLOCK(ki);
1947 		ops->store_error(ujob, error);
1948 		ops->store_status(ujob, status);
1949 	} else {
1950 		error = EINVAL;
1951 		AIO_UNLOCK(ki);
1952 	}
1953 	return (error);
1954 }
1955 
1956 int
1957 sys_aio_return(struct thread *td, struct aio_return_args *uap)
1958 {
1959 
1960 	return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
1961 }
1962 
1963 /*
1964  * Allow a process to wakeup when any of the I/O requests are completed.
1965  */
1966 static int
1967 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
1968     struct timespec *ts)
1969 {
1970 	struct proc *p = td->td_proc;
1971 	struct timeval atv;
1972 	struct kaioinfo *ki;
1973 	struct kaiocb *firstjob, *job;
1974 	int error, i, timo;
1975 
1976 	timo = 0;
1977 	if (ts) {
1978 		if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
1979 			return (EINVAL);
1980 
1981 		TIMESPEC_TO_TIMEVAL(&atv, ts);
1982 		if (itimerfix(&atv))
1983 			return (EINVAL);
1984 		timo = tvtohz(&atv);
1985 	}
1986 
1987 	ki = p->p_aioinfo;
1988 	if (ki == NULL)
1989 		return (EAGAIN);
1990 
1991 	if (njoblist == 0)
1992 		return (0);
1993 
1994 	AIO_LOCK(ki);
1995 	for (;;) {
1996 		firstjob = NULL;
1997 		error = 0;
1998 		TAILQ_FOREACH(job, &ki->kaio_all, allist) {
1999 			for (i = 0; i < njoblist; i++) {
2000 				if (job->ujob == ujoblist[i]) {
2001 					if (firstjob == NULL)
2002 						firstjob = job;
2003 					if (job->jobflags & KAIOCB_FINISHED)
2004 						goto RETURN;
2005 				}
2006 			}
2007 		}
2008 		/* All tasks were finished. */
2009 		if (firstjob == NULL)
2010 			break;
2011 
2012 		ki->kaio_flags |= KAIO_WAKEUP;
2013 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2014 		    "aiospn", timo);
2015 		if (error == ERESTART)
2016 			error = EINTR;
2017 		if (error)
2018 			break;
2019 	}
2020 RETURN:
2021 	AIO_UNLOCK(ki);
2022 	return (error);
2023 }
2024 
2025 int
2026 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
2027 {
2028 	struct timespec ts, *tsp;
2029 	struct aiocb **ujoblist;
2030 	int error;
2031 
2032 	if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
2033 		return (EINVAL);
2034 
2035 	if (uap->timeout) {
2036 		/* Get timespec struct. */
2037 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
2038 			return (error);
2039 		tsp = &ts;
2040 	} else
2041 		tsp = NULL;
2042 
2043 	ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK);
2044 	error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
2045 	if (error == 0)
2046 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2047 	free(ujoblist, M_AIOS);
2048 	return (error);
2049 }
2050 
2051 /*
2052  * aio_cancel cancels any non-bio aio operations not currently in progress.
2053  */
2054 int
2055 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
2056 {
2057 	struct proc *p = td->td_proc;
2058 	struct kaioinfo *ki;
2059 	struct kaiocb *job, *jobn;
2060 	struct file *fp;
2061 	int error;
2062 	int cancelled = 0;
2063 	int notcancelled = 0;
2064 	struct vnode *vp;
2065 
2066 	/* Lookup file object. */
2067 	error = fget(td, uap->fd, &cap_no_rights, &fp);
2068 	if (error)
2069 		return (error);
2070 
2071 	ki = p->p_aioinfo;
2072 	if (ki == NULL)
2073 		goto done;
2074 
2075 	if (fp->f_type == DTYPE_VNODE) {
2076 		vp = fp->f_vnode;
2077 		if (vn_isdisk(vp)) {
2078 			fdrop(fp, td);
2079 			td->td_retval[0] = AIO_NOTCANCELED;
2080 			return (0);
2081 		}
2082 	}
2083 
2084 	AIO_LOCK(ki);
2085 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
2086 		if ((uap->fd == job->uaiocb.aio_fildes) &&
2087 		    ((uap->aiocbp == NULL) ||
2088 		     (uap->aiocbp == job->ujob))) {
2089 			if (aio_cancel_job(p, ki, job)) {
2090 				cancelled++;
2091 			} else {
2092 				notcancelled++;
2093 			}
2094 			if (uap->aiocbp != NULL)
2095 				break;
2096 		}
2097 	}
2098 	AIO_UNLOCK(ki);
2099 
2100 done:
2101 	fdrop(fp, td);
2102 
2103 	if (uap->aiocbp != NULL) {
2104 		if (cancelled) {
2105 			td->td_retval[0] = AIO_CANCELED;
2106 			return (0);
2107 		}
2108 	}
2109 
2110 	if (notcancelled) {
2111 		td->td_retval[0] = AIO_NOTCANCELED;
2112 		return (0);
2113 	}
2114 
2115 	if (cancelled) {
2116 		td->td_retval[0] = AIO_CANCELED;
2117 		return (0);
2118 	}
2119 
2120 	td->td_retval[0] = AIO_ALLDONE;
2121 
2122 	return (0);
2123 }
2124 
2125 /*
2126  * aio_error is implemented in the kernel level for compatibility purposes
2127  * only.  For a user mode async implementation, it would be best to do it in
2128  * a userland subroutine.
2129  */
2130 static int
2131 kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
2132 {
2133 	struct proc *p = td->td_proc;
2134 	struct kaiocb *job;
2135 	struct kaioinfo *ki;
2136 	int status;
2137 
2138 	ki = p->p_aioinfo;
2139 	if (ki == NULL) {
2140 		td->td_retval[0] = EINVAL;
2141 		return (0);
2142 	}
2143 
2144 	AIO_LOCK(ki);
2145 	TAILQ_FOREACH(job, &ki->kaio_all, allist) {
2146 		if (job->ujob == ujob) {
2147 			if (job->jobflags & KAIOCB_FINISHED)
2148 				td->td_retval[0] =
2149 					job->uaiocb._aiocb_private.error;
2150 			else
2151 				td->td_retval[0] = EINPROGRESS;
2152 			AIO_UNLOCK(ki);
2153 			return (0);
2154 		}
2155 	}
2156 	AIO_UNLOCK(ki);
2157 
2158 	/*
2159 	 * Hack for failure of aio_aqueue.
2160 	 */
2161 	status = ops->fetch_status(ujob);
2162 	if (status == -1) {
2163 		td->td_retval[0] = ops->fetch_error(ujob);
2164 		return (0);
2165 	}
2166 
2167 	td->td_retval[0] = EINVAL;
2168 	return (0);
2169 }
2170 
2171 int
2172 sys_aio_error(struct thread *td, struct aio_error_args *uap)
2173 {
2174 
2175 	return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
2176 }
2177 
2178 /* syscall - asynchronous read from a file (REALTIME) */
2179 #ifdef COMPAT_FREEBSD6
2180 int
2181 freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap)
2182 {
2183 
2184 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2185 	    &aiocb_ops_osigevent));
2186 }
2187 #endif
2188 
2189 int
2190 sys_aio_read(struct thread *td, struct aio_read_args *uap)
2191 {
2192 
2193 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2194 }
2195 
2196 int
2197 sys_aio_readv(struct thread *td, struct aio_readv_args *uap)
2198 {
2199 
2200 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READV, &aiocb_ops));
2201 }
2202 
2203 /* syscall - asynchronous write to a file (REALTIME) */
2204 #ifdef COMPAT_FREEBSD6
2205 int
2206 freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap)
2207 {
2208 
2209 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2210 	    &aiocb_ops_osigevent));
2211 }
2212 #endif
2213 
2214 int
2215 sys_aio_write(struct thread *td, struct aio_write_args *uap)
2216 {
2217 
2218 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
2219 }
2220 
2221 int
2222 sys_aio_writev(struct thread *td, struct aio_writev_args *uap)
2223 {
2224 
2225 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITEV, &aiocb_ops));
2226 }
2227 
2228 int
2229 sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
2230 {
2231 
2232 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
2233 }
2234 
2235 static int
2236 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
2237     struct aiocb **acb_list, int nent, struct sigevent *sig,
2238     struct aiocb_ops *ops)
2239 {
2240 	struct proc *p = td->td_proc;
2241 	struct aiocb *job;
2242 	struct kaioinfo *ki;
2243 	struct aioliojob *lj;
2244 	struct kevent kev;
2245 	int error;
2246 	int nagain, nerror;
2247 	int i;
2248 
2249 	if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2250 		return (EINVAL);
2251 
2252 	if (nent < 0 || nent > max_aio_queue_per_proc)
2253 		return (EINVAL);
2254 
2255 	if (p->p_aioinfo == NULL)
2256 		aio_init_aioinfo(p);
2257 
2258 	ki = p->p_aioinfo;
2259 
2260 	lj = uma_zalloc(aiolio_zone, M_WAITOK);
2261 	lj->lioj_flags = 0;
2262 	lj->lioj_count = 0;
2263 	lj->lioj_finished_count = 0;
2264 	knlist_init_mtx(&lj->klist, AIO_MTX(ki));
2265 	ksiginfo_init(&lj->lioj_ksi);
2266 
2267 	/*
2268 	 * Setup signal.
2269 	 */
2270 	if (sig && (mode == LIO_NOWAIT)) {
2271 		bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
2272 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2273 			/* Assume only new style KEVENT */
2274 			memset(&kev, 0, sizeof(kev));
2275 			kev.filter = EVFILT_LIO;
2276 			kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
2277 			kev.ident = (uintptr_t)uacb_list; /* something unique */
2278 			kev.data = (intptr_t)lj;
2279 			/* pass user defined sigval data */
2280 			kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
2281 			error = kqfd_register(
2282 			    lj->lioj_signal.sigev_notify_kqueue, &kev, td,
2283 			    M_WAITOK);
2284 			if (error) {
2285 				uma_zfree(aiolio_zone, lj);
2286 				return (error);
2287 			}
2288 		} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
2289 			;
2290 		} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2291 			   lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
2292 				if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
2293 					uma_zfree(aiolio_zone, lj);
2294 					return EINVAL;
2295 				}
2296 				lj->lioj_flags |= LIOJ_SIGNAL;
2297 		} else {
2298 			uma_zfree(aiolio_zone, lj);
2299 			return EINVAL;
2300 		}
2301 	}
2302 
2303 	AIO_LOCK(ki);
2304 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
2305 	/*
2306 	 * Add extra aiocb count to avoid the lio to be freed
2307 	 * by other threads doing aio_waitcomplete or aio_return,
2308 	 * and prevent event from being sent until we have queued
2309 	 * all tasks.
2310 	 */
2311 	lj->lioj_count = 1;
2312 	AIO_UNLOCK(ki);
2313 
2314 	/*
2315 	 * Get pointers to the list of I/O requests.
2316 	 */
2317 	nagain = 0;
2318 	nerror = 0;
2319 	for (i = 0; i < nent; i++) {
2320 		job = acb_list[i];
2321 		if (job != NULL) {
2322 			error = aio_aqueue(td, job, lj, LIO_NOP, ops);
2323 			if (error == EAGAIN)
2324 				nagain++;
2325 			else if (error != 0)
2326 				nerror++;
2327 		}
2328 	}
2329 
2330 	error = 0;
2331 	AIO_LOCK(ki);
2332 	if (mode == LIO_WAIT) {
2333 		while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2334 			ki->kaio_flags |= KAIO_WAKEUP;
2335 			error = msleep(&p->p_aioinfo, AIO_MTX(ki),
2336 			    PRIBIO | PCATCH, "aiospn", 0);
2337 			if (error == ERESTART)
2338 				error = EINTR;
2339 			if (error)
2340 				break;
2341 		}
2342 	} else {
2343 		if (lj->lioj_count - 1 == lj->lioj_finished_count) {
2344 			if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2345 				lj->lioj_flags |= LIOJ_KEVENT_POSTED;
2346 				KNOTE_LOCKED(&lj->klist, 1);
2347 			}
2348 			if ((lj->lioj_flags & (LIOJ_SIGNAL |
2349 			    LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL &&
2350 			    (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2351 			    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
2352 				aio_sendsig(p, &lj->lioj_signal, &lj->lioj_ksi,
2353 				    lj->lioj_count != 1);
2354 				lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2355 			}
2356 		}
2357 	}
2358 	lj->lioj_count--;
2359 	if (lj->lioj_count == 0) {
2360 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
2361 		knlist_delete(&lj->klist, curthread, 1);
2362 		PROC_LOCK(p);
2363 		sigqueue_take(&lj->lioj_ksi);
2364 		PROC_UNLOCK(p);
2365 		AIO_UNLOCK(ki);
2366 		uma_zfree(aiolio_zone, lj);
2367 	} else
2368 		AIO_UNLOCK(ki);
2369 
2370 	if (nerror)
2371 		return (EIO);
2372 	else if (nagain)
2373 		return (EAGAIN);
2374 	else
2375 		return (error);
2376 }
2377 
2378 /* syscall - list directed I/O (REALTIME) */
2379 #ifdef COMPAT_FREEBSD6
2380 int
2381 freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap)
2382 {
2383 	struct aiocb **acb_list;
2384 	struct sigevent *sigp, sig;
2385 	struct osigevent osig;
2386 	int error, nent;
2387 
2388 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2389 		return (EINVAL);
2390 
2391 	nent = uap->nent;
2392 	if (nent < 0 || nent > max_aio_queue_per_proc)
2393 		return (EINVAL);
2394 
2395 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2396 		error = copyin(uap->sig, &osig, sizeof(osig));
2397 		if (error)
2398 			return (error);
2399 		error = convert_old_sigevent(&osig, &sig);
2400 		if (error)
2401 			return (error);
2402 		sigp = &sig;
2403 	} else
2404 		sigp = NULL;
2405 
2406 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2407 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2408 	if (error == 0)
2409 		error = kern_lio_listio(td, uap->mode,
2410 		    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2411 		    &aiocb_ops_osigevent);
2412 	free(acb_list, M_LIO);
2413 	return (error);
2414 }
2415 #endif
2416 
2417 /* syscall - list directed I/O (REALTIME) */
2418 int
2419 sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
2420 {
2421 	struct aiocb **acb_list;
2422 	struct sigevent *sigp, sig;
2423 	int error, nent;
2424 
2425 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2426 		return (EINVAL);
2427 
2428 	nent = uap->nent;
2429 	if (nent < 0 || nent > max_aio_queue_per_proc)
2430 		return (EINVAL);
2431 
2432 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2433 		error = copyin(uap->sig, &sig, sizeof(sig));
2434 		if (error)
2435 			return (error);
2436 		sigp = &sig;
2437 	} else
2438 		sigp = NULL;
2439 
2440 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2441 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2442 	if (error == 0)
2443 		error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
2444 		    nent, sigp, &aiocb_ops);
2445 	free(acb_list, M_LIO);
2446 	return (error);
2447 }
2448 
2449 static void
2450 aio_biocleanup(struct bio *bp)
2451 {
2452 	struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2453 	struct kaioinfo *ki;
2454 	struct buf *pbuf = (struct buf *)bp->bio_caller2;
2455 
2456 	/* Release mapping into kernel space. */
2457 	if (pbuf != NULL) {
2458 		MPASS(pbuf->b_npages <= atop(maxphys) + 1);
2459 		pmap_qremove((vm_offset_t)pbuf->b_data, pbuf->b_npages);
2460 		vm_page_unhold_pages(pbuf->b_pages, pbuf->b_npages);
2461 		uma_zfree(pbuf_zone, pbuf);
2462 		atomic_subtract_int(&num_buf_aio, 1);
2463 		ki = job->userproc->p_aioinfo;
2464 		AIO_LOCK(ki);
2465 		ki->kaio_buffer_count--;
2466 		AIO_UNLOCK(ki);
2467 	} else {
2468 		MPASS(bp->bio_ma_n <= atop(maxphys) + 1);
2469 		vm_page_unhold_pages(bp->bio_ma, bp->bio_ma_n);
2470 		free(bp->bio_ma, M_TEMP);
2471 		atomic_subtract_int(&num_unmapped_aio, 1);
2472 	}
2473 	g_destroy_bio(bp);
2474 }
2475 
2476 static void
2477 aio_biowakeup(struct bio *bp)
2478 {
2479 	struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2480 	size_t nbytes;
2481 	long bcount = bp->bio_bcount;
2482 	long resid = bp->bio_resid;
2483 	int error, opcode, nblks;
2484 	int bio_error = bp->bio_error;
2485 	uint16_t flags = bp->bio_flags;
2486 
2487 	opcode = job->uaiocb.aio_lio_opcode;
2488 
2489 	aio_biocleanup(bp);
2490 
2491 	nbytes =bcount - resid;
2492 	atomic_add_acq_long(&job->nbytes, nbytes);
2493 	nblks = btodb(nbytes);
2494 	error = 0;
2495 	/*
2496 	 * If multiple bios experienced an error, the job will reflect the
2497 	 * error of whichever failed bio completed last.
2498 	 */
2499 	if (flags & BIO_ERROR)
2500 		atomic_set_int(&job->error, bio_error);
2501 	if (opcode == LIO_WRITE || opcode == LIO_WRITEV)
2502 		atomic_add_int(&job->outblock, nblks);
2503 	else
2504 		atomic_add_int(&job->inblock, nblks);
2505 	atomic_subtract_int(&job->nbio, 1);
2506 
2507 
2508 	if (atomic_load_int(&job->nbio) == 0) {
2509 		if (atomic_load_int(&job->error))
2510 			aio_complete(job, -1, job->error);
2511 		else
2512 			aio_complete(job, atomic_load_long(&job->nbytes), 0);
2513 	}
2514 }
2515 
2516 /* syscall - wait for the next completion of an aio request */
2517 static int
2518 kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
2519     struct timespec *ts, struct aiocb_ops *ops)
2520 {
2521 	struct proc *p = td->td_proc;
2522 	struct timeval atv;
2523 	struct kaioinfo *ki;
2524 	struct kaiocb *job;
2525 	struct aiocb *ujob;
2526 	long error, status;
2527 	int timo;
2528 
2529 	ops->store_aiocb(ujobp, NULL);
2530 
2531 	if (ts == NULL) {
2532 		timo = 0;
2533 	} else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
2534 		timo = -1;
2535 	} else {
2536 		if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2537 			return (EINVAL);
2538 
2539 		TIMESPEC_TO_TIMEVAL(&atv, ts);
2540 		if (itimerfix(&atv))
2541 			return (EINVAL);
2542 		timo = tvtohz(&atv);
2543 	}
2544 
2545 	if (p->p_aioinfo == NULL)
2546 		aio_init_aioinfo(p);
2547 	ki = p->p_aioinfo;
2548 
2549 	error = 0;
2550 	job = NULL;
2551 	AIO_LOCK(ki);
2552 	while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
2553 		if (timo == -1) {
2554 			error = EWOULDBLOCK;
2555 			break;
2556 		}
2557 		ki->kaio_flags |= KAIO_WAKEUP;
2558 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2559 		    "aiowc", timo);
2560 		if (timo && error == ERESTART)
2561 			error = EINTR;
2562 		if (error)
2563 			break;
2564 	}
2565 
2566 	if (job != NULL) {
2567 		MPASS(job->jobflags & KAIOCB_FINISHED);
2568 		ujob = job->ujob;
2569 		status = job->uaiocb._aiocb_private.status;
2570 		error = job->uaiocb._aiocb_private.error;
2571 		td->td_retval[0] = status;
2572 		td->td_ru.ru_oublock += job->outblock;
2573 		td->td_ru.ru_inblock += job->inblock;
2574 		td->td_ru.ru_msgsnd += job->msgsnd;
2575 		td->td_ru.ru_msgrcv += job->msgrcv;
2576 		aio_free_entry(job);
2577 		AIO_UNLOCK(ki);
2578 		ops->store_aiocb(ujobp, ujob);
2579 		ops->store_error(ujob, error);
2580 		ops->store_status(ujob, status);
2581 	} else
2582 		AIO_UNLOCK(ki);
2583 
2584 	return (error);
2585 }
2586 
2587 int
2588 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2589 {
2590 	struct timespec ts, *tsp;
2591 	int error;
2592 
2593 	if (uap->timeout) {
2594 		/* Get timespec struct. */
2595 		error = copyin(uap->timeout, &ts, sizeof(ts));
2596 		if (error)
2597 			return (error);
2598 		tsp = &ts;
2599 	} else
2600 		tsp = NULL;
2601 
2602 	return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
2603 }
2604 
2605 static int
2606 kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
2607     struct aiocb_ops *ops)
2608 {
2609 	int listop;
2610 
2611 	switch (op) {
2612 	case O_SYNC:
2613 		listop = LIO_SYNC;
2614 		break;
2615 	case O_DSYNC:
2616 		listop = LIO_DSYNC;
2617 		break;
2618 	default:
2619 		return (EINVAL);
2620 	}
2621 
2622 	return (aio_aqueue(td, ujob, NULL, listop, ops));
2623 }
2624 
2625 int
2626 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
2627 {
2628 
2629 	return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
2630 }
2631 
2632 /* kqueue attach function */
2633 static int
2634 filt_aioattach(struct knote *kn)
2635 {
2636 	struct kaiocb *job;
2637 
2638 	job = (struct kaiocb *)(uintptr_t)kn->kn_sdata;
2639 
2640 	/*
2641 	 * The job pointer must be validated before using it, so
2642 	 * registration is restricted to the kernel; the user cannot
2643 	 * set EV_FLAG1.
2644 	 */
2645 	if ((kn->kn_flags & EV_FLAG1) == 0)
2646 		return (EPERM);
2647 	kn->kn_ptr.p_aio = job;
2648 	kn->kn_flags &= ~EV_FLAG1;
2649 
2650 	knlist_add(&job->klist, kn, 0);
2651 
2652 	return (0);
2653 }
2654 
2655 /* kqueue detach function */
2656 static void
2657 filt_aiodetach(struct knote *kn)
2658 {
2659 	struct knlist *knl;
2660 
2661 	knl = &kn->kn_ptr.p_aio->klist;
2662 	knl->kl_lock(knl->kl_lockarg);
2663 	if (!knlist_empty(knl))
2664 		knlist_remove(knl, kn, 1);
2665 	knl->kl_unlock(knl->kl_lockarg);
2666 }
2667 
2668 /* kqueue filter function */
2669 /*ARGSUSED*/
2670 static int
2671 filt_aio(struct knote *kn, long hint)
2672 {
2673 	struct kaiocb *job = kn->kn_ptr.p_aio;
2674 
2675 	kn->kn_data = job->uaiocb._aiocb_private.error;
2676 	if (!(job->jobflags & KAIOCB_FINISHED))
2677 		return (0);
2678 	kn->kn_flags |= EV_EOF;
2679 	return (1);
2680 }
2681 
2682 /* kqueue attach function */
2683 static int
2684 filt_lioattach(struct knote *kn)
2685 {
2686 	struct aioliojob *lj;
2687 
2688 	lj = (struct aioliojob *)(uintptr_t)kn->kn_sdata;
2689 
2690 	/*
2691 	 * The aioliojob pointer must be validated before using it, so
2692 	 * registration is restricted to the kernel; the user cannot
2693 	 * set EV_FLAG1.
2694 	 */
2695 	if ((kn->kn_flags & EV_FLAG1) == 0)
2696 		return (EPERM);
2697 	kn->kn_ptr.p_lio = lj;
2698 	kn->kn_flags &= ~EV_FLAG1;
2699 
2700 	knlist_add(&lj->klist, kn, 0);
2701 
2702 	return (0);
2703 }
2704 
2705 /* kqueue detach function */
2706 static void
2707 filt_liodetach(struct knote *kn)
2708 {
2709 	struct knlist *knl;
2710 
2711 	knl = &kn->kn_ptr.p_lio->klist;
2712 	knl->kl_lock(knl->kl_lockarg);
2713 	if (!knlist_empty(knl))
2714 		knlist_remove(knl, kn, 1);
2715 	knl->kl_unlock(knl->kl_lockarg);
2716 }
2717 
2718 /* kqueue filter function */
2719 /*ARGSUSED*/
2720 static int
2721 filt_lio(struct knote *kn, long hint)
2722 {
2723 	struct aioliojob * lj = kn->kn_ptr.p_lio;
2724 
2725 	return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
2726 }
2727 
2728 #ifdef COMPAT_FREEBSD32
2729 #include <sys/mount.h>
2730 #include <sys/socket.h>
2731 #include <compat/freebsd32/freebsd32.h>
2732 #include <compat/freebsd32/freebsd32_proto.h>
2733 #include <compat/freebsd32/freebsd32_signal.h>
2734 #include <compat/freebsd32/freebsd32_syscall.h>
2735 #include <compat/freebsd32/freebsd32_util.h>
2736 
2737 struct __aiocb_private32 {
2738 	int32_t	status;
2739 	int32_t	error;
2740 	uint32_t kernelinfo;
2741 };
2742 
2743 #ifdef COMPAT_FREEBSD6
2744 typedef struct oaiocb32 {
2745 	int	aio_fildes;		/* File descriptor */
2746 	uint64_t aio_offset __packed;	/* File offset for I/O */
2747 	uint32_t aio_buf;		/* I/O buffer in process space */
2748 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
2749 	struct	osigevent32 aio_sigevent; /* Signal to deliver */
2750 	int	aio_lio_opcode;		/* LIO opcode */
2751 	int	aio_reqprio;		/* Request priority -- ignored */
2752 	struct	__aiocb_private32 _aiocb_private;
2753 } oaiocb32_t;
2754 #endif
2755 
2756 typedef struct aiocb32 {
2757 	int32_t	aio_fildes;		/* File descriptor */
2758 	uint64_t aio_offset __packed;	/* File offset for I/O */
2759 	uint32_t aio_buf;	/* I/O buffer in process space */
2760 	uint32_t aio_nbytes;	/* Number of bytes for I/O */
2761 	int	__spare__[2];
2762 	uint32_t __spare2__;
2763 	int	aio_lio_opcode;		/* LIO opcode */
2764 	int	aio_reqprio;		/* Request priority -- ignored */
2765 	struct	__aiocb_private32 _aiocb_private;
2766 	struct	sigevent32 aio_sigevent;	/* Signal to deliver */
2767 } aiocb32_t;
2768 
2769 #ifdef COMPAT_FREEBSD6
2770 static int
2771 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
2772 {
2773 
2774 	/*
2775 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
2776 	 * supported by AIO with the old sigevent structure.
2777 	 */
2778 	CP(*osig, *nsig, sigev_notify);
2779 	switch (nsig->sigev_notify) {
2780 	case SIGEV_NONE:
2781 		break;
2782 	case SIGEV_SIGNAL:
2783 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
2784 		break;
2785 	case SIGEV_KEVENT:
2786 		nsig->sigev_notify_kqueue =
2787 		    osig->__sigev_u.__sigev_notify_kqueue;
2788 		PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
2789 		break;
2790 	default:
2791 		return (EINVAL);
2792 	}
2793 	return (0);
2794 }
2795 
2796 static int
2797 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct kaiocb *kjob,
2798     int type __unused)
2799 {
2800 	struct oaiocb32 job32;
2801 	struct aiocb *kcb = &kjob->uaiocb;
2802 	int error;
2803 
2804 	bzero(kcb, sizeof(struct aiocb));
2805 	error = copyin(ujob, &job32, sizeof(job32));
2806 	if (error)
2807 		return (error);
2808 
2809 	/* No need to copyin aio_iov, because it did not exist in FreeBSD 6 */
2810 
2811 	CP(job32, *kcb, aio_fildes);
2812 	CP(job32, *kcb, aio_offset);
2813 	PTRIN_CP(job32, *kcb, aio_buf);
2814 	CP(job32, *kcb, aio_nbytes);
2815 	CP(job32, *kcb, aio_lio_opcode);
2816 	CP(job32, *kcb, aio_reqprio);
2817 	CP(job32, *kcb, _aiocb_private.status);
2818 	CP(job32, *kcb, _aiocb_private.error);
2819 	PTRIN_CP(job32, *kcb, _aiocb_private.kernelinfo);
2820 	return (convert_old_sigevent32(&job32.aio_sigevent,
2821 	    &kcb->aio_sigevent));
2822 }
2823 #endif
2824 
2825 static int
2826 aiocb32_copyin(struct aiocb *ujob, struct kaiocb *kjob, int type)
2827 {
2828 	struct aiocb32 job32;
2829 	struct aiocb *kcb = &kjob->uaiocb;
2830 	struct iovec32 *iov32;
2831 	int error;
2832 
2833 	error = copyin(ujob, &job32, sizeof(job32));
2834 	if (error)
2835 		return (error);
2836 	CP(job32, *kcb, aio_fildes);
2837 	CP(job32, *kcb, aio_offset);
2838 	CP(job32, *kcb, aio_lio_opcode);
2839 	if (type == LIO_READV || type == LIO_WRITEV) {
2840 		iov32 = PTRIN(job32.aio_iov);
2841 		CP(job32, *kcb, aio_iovcnt);
2842 		/* malloc a uio and copy in the iovec */
2843 		error = freebsd32_copyinuio(iov32,
2844 		    kcb->aio_iovcnt, &kjob->uiop);
2845 		if (error)
2846 			return (error);
2847 	} else {
2848 		PTRIN_CP(job32, *kcb, aio_buf);
2849 		CP(job32, *kcb, aio_nbytes);
2850 	}
2851 	CP(job32, *kcb, aio_reqprio);
2852 	CP(job32, *kcb, _aiocb_private.status);
2853 	CP(job32, *kcb, _aiocb_private.error);
2854 	PTRIN_CP(job32, *kcb, _aiocb_private.kernelinfo);
2855 	error = convert_sigevent32(&job32.aio_sigevent, &kcb->aio_sigevent);
2856 
2857 	return (error);
2858 }
2859 
2860 static long
2861 aiocb32_fetch_status(struct aiocb *ujob)
2862 {
2863 	struct aiocb32 *ujob32;
2864 
2865 	ujob32 = (struct aiocb32 *)ujob;
2866 	return (fuword32(&ujob32->_aiocb_private.status));
2867 }
2868 
2869 static long
2870 aiocb32_fetch_error(struct aiocb *ujob)
2871 {
2872 	struct aiocb32 *ujob32;
2873 
2874 	ujob32 = (struct aiocb32 *)ujob;
2875 	return (fuword32(&ujob32->_aiocb_private.error));
2876 }
2877 
2878 static int
2879 aiocb32_store_status(struct aiocb *ujob, long status)
2880 {
2881 	struct aiocb32 *ujob32;
2882 
2883 	ujob32 = (struct aiocb32 *)ujob;
2884 	return (suword32(&ujob32->_aiocb_private.status, status));
2885 }
2886 
2887 static int
2888 aiocb32_store_error(struct aiocb *ujob, long error)
2889 {
2890 	struct aiocb32 *ujob32;
2891 
2892 	ujob32 = (struct aiocb32 *)ujob;
2893 	return (suword32(&ujob32->_aiocb_private.error, error));
2894 }
2895 
2896 static int
2897 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
2898 {
2899 	struct aiocb32 *ujob32;
2900 
2901 	ujob32 = (struct aiocb32 *)ujob;
2902 	return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
2903 }
2904 
2905 static int
2906 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
2907 {
2908 
2909 	return (suword32(ujobp, (long)ujob));
2910 }
2911 
2912 static struct aiocb_ops aiocb32_ops = {
2913 	.aio_copyin = aiocb32_copyin,
2914 	.fetch_status = aiocb32_fetch_status,
2915 	.fetch_error = aiocb32_fetch_error,
2916 	.store_status = aiocb32_store_status,
2917 	.store_error = aiocb32_store_error,
2918 	.store_kernelinfo = aiocb32_store_kernelinfo,
2919 	.store_aiocb = aiocb32_store_aiocb,
2920 };
2921 
2922 #ifdef COMPAT_FREEBSD6
2923 static struct aiocb_ops aiocb32_ops_osigevent = {
2924 	.aio_copyin = aiocb32_copyin_old_sigevent,
2925 	.fetch_status = aiocb32_fetch_status,
2926 	.fetch_error = aiocb32_fetch_error,
2927 	.store_status = aiocb32_store_status,
2928 	.store_error = aiocb32_store_error,
2929 	.store_kernelinfo = aiocb32_store_kernelinfo,
2930 	.store_aiocb = aiocb32_store_aiocb,
2931 };
2932 #endif
2933 
2934 int
2935 freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
2936 {
2937 
2938 	return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2939 }
2940 
2941 int
2942 freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
2943 {
2944 	struct timespec32 ts32;
2945 	struct timespec ts, *tsp;
2946 	struct aiocb **ujoblist;
2947 	uint32_t *ujoblist32;
2948 	int error, i;
2949 
2950 	if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
2951 		return (EINVAL);
2952 
2953 	if (uap->timeout) {
2954 		/* Get timespec struct. */
2955 		if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
2956 			return (error);
2957 		CP(ts32, ts, tv_sec);
2958 		CP(ts32, ts, tv_nsec);
2959 		tsp = &ts;
2960 	} else
2961 		tsp = NULL;
2962 
2963 	ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK);
2964 	ujoblist32 = (uint32_t *)ujoblist;
2965 	error = copyin(uap->aiocbp, ujoblist32, uap->nent *
2966 	    sizeof(ujoblist32[0]));
2967 	if (error == 0) {
2968 		for (i = uap->nent - 1; i >= 0; i--)
2969 			ujoblist[i] = PTRIN(ujoblist32[i]);
2970 
2971 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2972 	}
2973 	free(ujoblist, M_AIOS);
2974 	return (error);
2975 }
2976 
2977 int
2978 freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
2979 {
2980 
2981 	return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2982 }
2983 
2984 #ifdef COMPAT_FREEBSD6
2985 int
2986 freebsd6_freebsd32_aio_read(struct thread *td,
2987     struct freebsd6_freebsd32_aio_read_args *uap)
2988 {
2989 
2990 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2991 	    &aiocb32_ops_osigevent));
2992 }
2993 #endif
2994 
2995 int
2996 freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
2997 {
2998 
2999 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
3000 	    &aiocb32_ops));
3001 }
3002 
3003 int
3004 freebsd32_aio_readv(struct thread *td, struct freebsd32_aio_readv_args *uap)
3005 {
3006 
3007 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READV,
3008 	    &aiocb32_ops));
3009 }
3010 
3011 #ifdef COMPAT_FREEBSD6
3012 int
3013 freebsd6_freebsd32_aio_write(struct thread *td,
3014     struct freebsd6_freebsd32_aio_write_args *uap)
3015 {
3016 
3017 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
3018 	    &aiocb32_ops_osigevent));
3019 }
3020 #endif
3021 
3022 int
3023 freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
3024 {
3025 
3026 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
3027 	    &aiocb32_ops));
3028 }
3029 
3030 int
3031 freebsd32_aio_writev(struct thread *td, struct freebsd32_aio_writev_args *uap)
3032 {
3033 
3034 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITEV,
3035 	    &aiocb32_ops));
3036 }
3037 
3038 int
3039 freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
3040 {
3041 
3042 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
3043 	    &aiocb32_ops));
3044 }
3045 
3046 int
3047 freebsd32_aio_waitcomplete(struct thread *td,
3048     struct freebsd32_aio_waitcomplete_args *uap)
3049 {
3050 	struct timespec32 ts32;
3051 	struct timespec ts, *tsp;
3052 	int error;
3053 
3054 	if (uap->timeout) {
3055 		/* Get timespec struct. */
3056 		error = copyin(uap->timeout, &ts32, sizeof(ts32));
3057 		if (error)
3058 			return (error);
3059 		CP(ts32, ts, tv_sec);
3060 		CP(ts32, ts, tv_nsec);
3061 		tsp = &ts;
3062 	} else
3063 		tsp = NULL;
3064 
3065 	return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
3066 	    &aiocb32_ops));
3067 }
3068 
3069 int
3070 freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
3071 {
3072 
3073 	return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
3074 	    &aiocb32_ops));
3075 }
3076 
3077 #ifdef COMPAT_FREEBSD6
3078 int
3079 freebsd6_freebsd32_lio_listio(struct thread *td,
3080     struct freebsd6_freebsd32_lio_listio_args *uap)
3081 {
3082 	struct aiocb **acb_list;
3083 	struct sigevent *sigp, sig;
3084 	struct osigevent32 osig;
3085 	uint32_t *acb_list32;
3086 	int error, i, nent;
3087 
3088 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
3089 		return (EINVAL);
3090 
3091 	nent = uap->nent;
3092 	if (nent < 0 || nent > max_aio_queue_per_proc)
3093 		return (EINVAL);
3094 
3095 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
3096 		error = copyin(uap->sig, &osig, sizeof(osig));
3097 		if (error)
3098 			return (error);
3099 		error = convert_old_sigevent32(&osig, &sig);
3100 		if (error)
3101 			return (error);
3102 		sigp = &sig;
3103 	} else
3104 		sigp = NULL;
3105 
3106 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
3107 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
3108 	if (error) {
3109 		free(acb_list32, M_LIO);
3110 		return (error);
3111 	}
3112 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
3113 	for (i = 0; i < nent; i++)
3114 		acb_list[i] = PTRIN(acb_list32[i]);
3115 	free(acb_list32, M_LIO);
3116 
3117 	error = kern_lio_listio(td, uap->mode,
3118 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
3119 	    &aiocb32_ops_osigevent);
3120 	free(acb_list, M_LIO);
3121 	return (error);
3122 }
3123 #endif
3124 
3125 int
3126 freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
3127 {
3128 	struct aiocb **acb_list;
3129 	struct sigevent *sigp, sig;
3130 	struct sigevent32 sig32;
3131 	uint32_t *acb_list32;
3132 	int error, i, nent;
3133 
3134 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
3135 		return (EINVAL);
3136 
3137 	nent = uap->nent;
3138 	if (nent < 0 || nent > max_aio_queue_per_proc)
3139 		return (EINVAL);
3140 
3141 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
3142 		error = copyin(uap->sig, &sig32, sizeof(sig32));
3143 		if (error)
3144 			return (error);
3145 		error = convert_sigevent32(&sig32, &sig);
3146 		if (error)
3147 			return (error);
3148 		sigp = &sig;
3149 	} else
3150 		sigp = NULL;
3151 
3152 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
3153 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
3154 	if (error) {
3155 		free(acb_list32, M_LIO);
3156 		return (error);
3157 	}
3158 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
3159 	for (i = 0; i < nent; i++)
3160 		acb_list[i] = PTRIN(acb_list32[i]);
3161 	free(acb_list32, M_LIO);
3162 
3163 	error = kern_lio_listio(td, uap->mode,
3164 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
3165 	    &aiocb32_ops);
3166 	free(acb_list, M_LIO);
3167 	return (error);
3168 }
3169 
3170 #endif
3171