xref: /freebsd/sys/kern/vfs_aio.c (revision 2c8d04d0228871c24017509cf039e7c5d97d97be)
1 /*-
2  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. John S. Dyson's name may not be used to endorse or promote products
10  *    derived from this software without specific prior written permission.
11  *
12  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
13  * bad that happens because of using this software isn't the responsibility
14  * of the author.  This software is distributed AS-IS.
15  */
16 
17 /*
18  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19  */
20 
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
23 
24 #include "opt_compat.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
29 #include <sys/bio.h>
30 #include <sys/buf.h>
31 #include <sys/capsicum.h>
32 #include <sys/eventhandler.h>
33 #include <sys/sysproto.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/kthread.h>
38 #include <sys/fcntl.h>
39 #include <sys/file.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/unistd.h>
44 #include <sys/posix4.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/protosw.h>
49 #include <sys/rwlock.h>
50 #include <sys/sema.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/syscall.h>
54 #include <sys/sysent.h>
55 #include <sys/sysctl.h>
56 #include <sys/syslog.h>
57 #include <sys/sx.h>
58 #include <sys/taskqueue.h>
59 #include <sys/vnode.h>
60 #include <sys/conf.h>
61 #include <sys/event.h>
62 #include <sys/mount.h>
63 #include <geom/geom.h>
64 
65 #include <machine/atomic.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_extern.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/uma.h>
74 #include <sys/aio.h>
75 
76 /*
77  * Counter for allocating reference ids to new jobs.  Wrapped to 1 on
78  * overflow. (XXX will be removed soon.)
79  */
80 static u_long jobrefid;
81 
82 /*
83  * Counter for aio_fsync.
84  */
85 static uint64_t jobseqno;
86 
87 #ifndef MAX_AIO_PER_PROC
88 #define MAX_AIO_PER_PROC	32
89 #endif
90 
91 #ifndef MAX_AIO_QUEUE_PER_PROC
92 #define MAX_AIO_QUEUE_PER_PROC	256 /* Bigger than AIO_LISTIO_MAX */
93 #endif
94 
95 #ifndef MAX_AIO_QUEUE
96 #define	MAX_AIO_QUEUE		1024 /* Bigger than AIO_LISTIO_MAX */
97 #endif
98 
99 #ifndef MAX_BUF_AIO
100 #define MAX_BUF_AIO		16
101 #endif
102 
103 FEATURE(aio, "Asynchronous I/O");
104 
105 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
106 
107 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0,
108     "Async IO management");
109 
110 static int enable_aio_unsafe = 0;
111 SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
112     "Permit asynchronous IO on all file types, not just known-safe types");
113 
114 static unsigned int unsafe_warningcnt = 1;
115 SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW,
116     &unsafe_warningcnt, 0,
117     "Warnings that will be triggered upon failed IO requests on unsafe files");
118 
119 static int max_aio_procs = MAX_AIO_PROCS;
120 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
121     "Maximum number of kernel processes to use for handling async IO ");
122 
123 static int num_aio_procs = 0;
124 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
125     "Number of presently active kernel processes for async IO");
126 
127 /*
128  * The code will adjust the actual number of AIO processes towards this
129  * number when it gets a chance.
130  */
131 static int target_aio_procs = TARGET_AIO_PROCS;
132 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
133     0,
134     "Preferred number of ready kernel processes for async IO");
135 
136 static int max_queue_count = MAX_AIO_QUEUE;
137 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
138     "Maximum number of aio requests to queue, globally");
139 
140 static int num_queue_count = 0;
141 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
142     "Number of queued aio requests");
143 
144 static int num_buf_aio = 0;
145 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
146     "Number of aio requests presently handled by the buf subsystem");
147 
148 /* Number of async I/O processes in the process of being started */
149 /* XXX This should be local to aio_aqueue() */
150 static int num_aio_resv_start = 0;
151 
152 static int aiod_lifetime;
153 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
154     "Maximum lifetime for idle aiod");
155 
156 static int max_aio_per_proc = MAX_AIO_PER_PROC;
157 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
158     0,
159     "Maximum active aio requests per process (stored in the process)");
160 
161 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
162 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
163     &max_aio_queue_per_proc, 0,
164     "Maximum queued aio requests per process (stored in the process)");
165 
166 static int max_buf_aio = MAX_BUF_AIO;
167 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
168     "Maximum buf aio requests per process (stored in the process)");
169 
170 #ifdef COMPAT_FREEBSD6
171 typedef struct oaiocb {
172 	int	aio_fildes;		/* File descriptor */
173 	off_t	aio_offset;		/* File offset for I/O */
174 	volatile void *aio_buf;         /* I/O buffer in process space */
175 	size_t	aio_nbytes;		/* Number of bytes for I/O */
176 	struct	osigevent aio_sigevent;	/* Signal to deliver */
177 	int	aio_lio_opcode;		/* LIO opcode */
178 	int	aio_reqprio;		/* Request priority -- ignored */
179 	struct	__aiocb_private	_aiocb_private;
180 } oaiocb_t;
181 #endif
182 
183 /*
184  * Below is a key of locks used to protect each member of struct kaiocb
185  * aioliojob and kaioinfo and any backends.
186  *
187  * * - need not protected
188  * a - locked by kaioinfo lock
189  * b - locked by backend lock, the backend lock can be null in some cases,
190  *     for example, BIO belongs to this type, in this case, proc lock is
191  *     reused.
192  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
193  */
194 
195 /*
196  * If the routine that services an AIO request blocks while running in an
197  * AIO kernel process it can starve other I/O requests.  BIO requests
198  * queued via aio_qphysio() complete in GEOM and do not use AIO kernel
199  * processes at all.  Socket I/O requests use a separate pool of
200  * kprocs and also force non-blocking I/O.  Other file I/O requests
201  * use the generic fo_read/fo_write operations which can block.  The
202  * fsync and mlock operations can also block while executing.  Ideally
203  * none of these requests would block while executing.
204  *
205  * Note that the service routines cannot toggle O_NONBLOCK in the file
206  * structure directly while handling a request due to races with
207  * userland threads.
208  */
209 
210 /* jobflags */
211 #define	KAIOCB_QUEUEING		0x01
212 #define	KAIOCB_CANCELLED	0x02
213 #define	KAIOCB_CANCELLING	0x04
214 #define	KAIOCB_CHECKSYNC	0x08
215 #define	KAIOCB_CLEARED		0x10
216 #define	KAIOCB_FINISHED		0x20
217 
218 /*
219  * AIO process info
220  */
221 #define AIOP_FREE	0x1			/* proc on free queue */
222 
223 struct aioproc {
224 	int	aioprocflags;			/* (c) AIO proc flags */
225 	TAILQ_ENTRY(aioproc) list;		/* (c) list of processes */
226 	struct	proc *aioproc;			/* (*) the AIO proc */
227 };
228 
229 /*
230  * data-structure for lio signal management
231  */
232 struct aioliojob {
233 	int	lioj_flags;			/* (a) listio flags */
234 	int	lioj_count;			/* (a) listio flags */
235 	int	lioj_finished_count;		/* (a) listio flags */
236 	struct	sigevent lioj_signal;		/* (a) signal on all I/O done */
237 	TAILQ_ENTRY(aioliojob) lioj_list;	/* (a) lio list */
238 	struct	knlist klist;			/* (a) list of knotes */
239 	ksiginfo_t lioj_ksi;			/* (a) Realtime signal info */
240 };
241 
242 #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
243 #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
244 #define LIOJ_KEVENT_POSTED	0x4	/* kevent triggered */
245 
246 /*
247  * per process aio data structure
248  */
249 struct kaioinfo {
250 	struct	mtx kaio_mtx;		/* the lock to protect this struct */
251 	int	kaio_flags;		/* (a) per process kaio flags */
252 	int	kaio_maxactive_count;	/* (*) maximum number of AIOs */
253 	int	kaio_active_count;	/* (c) number of currently used AIOs */
254 	int	kaio_qallowed_count;	/* (*) maxiumu size of AIO queue */
255 	int	kaio_count;		/* (a) size of AIO queue */
256 	int	kaio_ballowed_count;	/* (*) maximum number of buffers */
257 	int	kaio_buffer_count;	/* (a) number of physio buffers */
258 	TAILQ_HEAD(,kaiocb) kaio_all;	/* (a) all AIOs in a process */
259 	TAILQ_HEAD(,kaiocb) kaio_done;	/* (a) done queue for process */
260 	TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
261 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
262 	TAILQ_HEAD(,kaiocb) kaio_syncqueue;	/* (a) queue for aio_fsync */
263 	TAILQ_HEAD(,kaiocb) kaio_syncready;  /* (a) second q for aio_fsync */
264 	struct	task kaio_task;		/* (*) task to kick aio processes */
265 	struct	task kaio_sync_task;	/* (*) task to schedule fsync jobs */
266 };
267 
268 #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
269 #define AIO_UNLOCK(ki)		mtx_unlock(&(ki)->kaio_mtx)
270 #define AIO_LOCK_ASSERT(ki, f)	mtx_assert(&(ki)->kaio_mtx, (f))
271 #define AIO_MTX(ki)		(&(ki)->kaio_mtx)
272 
273 #define KAIO_RUNDOWN	0x1	/* process is being run down */
274 #define KAIO_WAKEUP	0x2	/* wakeup process when AIO completes */
275 
276 /*
277  * Operations used to interact with userland aio control blocks.
278  * Different ABIs provide their own operations.
279  */
280 struct aiocb_ops {
281 	int	(*copyin)(struct aiocb *ujob, struct aiocb *kjob);
282 	long	(*fetch_status)(struct aiocb *ujob);
283 	long	(*fetch_error)(struct aiocb *ujob);
284 	int	(*store_status)(struct aiocb *ujob, long status);
285 	int	(*store_error)(struct aiocb *ujob, long error);
286 	int	(*store_kernelinfo)(struct aiocb *ujob, long jobref);
287 	int	(*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
288 };
289 
290 static TAILQ_HEAD(,aioproc) aio_freeproc;		/* (c) Idle daemons */
291 static struct sema aio_newproc_sem;
292 static struct mtx aio_job_mtx;
293 static TAILQ_HEAD(,kaiocb) aio_jobs;			/* (c) Async job list */
294 static struct unrhdr *aiod_unr;
295 
296 void		aio_init_aioinfo(struct proc *p);
297 static int	aio_onceonly(void);
298 static int	aio_free_entry(struct kaiocb *job);
299 static void	aio_process_rw(struct kaiocb *job);
300 static void	aio_process_sync(struct kaiocb *job);
301 static void	aio_process_mlock(struct kaiocb *job);
302 static void	aio_schedule_fsync(void *context, int pending);
303 static int	aio_newproc(int *);
304 int		aio_aqueue(struct thread *td, struct aiocb *ujob,
305 		    struct aioliojob *lio, int type, struct aiocb_ops *ops);
306 static int	aio_queue_file(struct file *fp, struct kaiocb *job);
307 static void	aio_physwakeup(struct bio *bp);
308 static void	aio_proc_rundown(void *arg, struct proc *p);
309 static void	aio_proc_rundown_exec(void *arg, struct proc *p,
310 		    struct image_params *imgp);
311 static int	aio_qphysio(struct proc *p, struct kaiocb *job);
312 static void	aio_daemon(void *param);
313 static void	aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
314 static int	aio_kick(struct proc *userp);
315 static void	aio_kick_nowait(struct proc *userp);
316 static void	aio_kick_helper(void *context, int pending);
317 static int	filt_aioattach(struct knote *kn);
318 static void	filt_aiodetach(struct knote *kn);
319 static int	filt_aio(struct knote *kn, long hint);
320 static int	filt_lioattach(struct knote *kn);
321 static void	filt_liodetach(struct knote *kn);
322 static int	filt_lio(struct knote *kn, long hint);
323 
324 /*
325  * Zones for:
326  * 	kaio	Per process async io info
327  *	aiop	async io process data
328  *	aiocb	async io jobs
329  *	aiol	list io job pointer - internal to aio_suspend XXX
330  *	aiolio	list io jobs
331  */
332 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
333 
334 /* kqueue filters for aio */
335 static struct filterops aio_filtops = {
336 	.f_isfd = 0,
337 	.f_attach = filt_aioattach,
338 	.f_detach = filt_aiodetach,
339 	.f_event = filt_aio,
340 };
341 static struct filterops lio_filtops = {
342 	.f_isfd = 0,
343 	.f_attach = filt_lioattach,
344 	.f_detach = filt_liodetach,
345 	.f_event = filt_lio
346 };
347 
348 static eventhandler_tag exit_tag, exec_tag;
349 
350 TASKQUEUE_DEFINE_THREAD(aiod_kick);
351 
352 /*
353  * Main operations function for use as a kernel module.
354  */
355 static int
356 aio_modload(struct module *module, int cmd, void *arg)
357 {
358 	int error = 0;
359 
360 	switch (cmd) {
361 	case MOD_LOAD:
362 		aio_onceonly();
363 		break;
364 	case MOD_SHUTDOWN:
365 		break;
366 	default:
367 		error = EOPNOTSUPP;
368 		break;
369 	}
370 	return (error);
371 }
372 
373 static moduledata_t aio_mod = {
374 	"aio",
375 	&aio_modload,
376 	NULL
377 };
378 
379 DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY);
380 MODULE_VERSION(aio, 1);
381 
382 /*
383  * Startup initialization
384  */
385 static int
386 aio_onceonly(void)
387 {
388 
389 	exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
390 	    EVENTHANDLER_PRI_ANY);
391 	exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
392 	    NULL, EVENTHANDLER_PRI_ANY);
393 	kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
394 	kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
395 	TAILQ_INIT(&aio_freeproc);
396 	sema_init(&aio_newproc_sem, 0, "aio_new_proc");
397 	mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
398 	TAILQ_INIT(&aio_jobs);
399 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
400 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
401 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
402 	aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL,
403 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
404 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
405 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
406 	aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
407 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
408 	aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
409 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
410 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
411 	jobrefid = 1;
412 	p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO);
413 	p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
414 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
415 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
416 
417 	return (0);
418 }
419 
420 /*
421  * Init the per-process aioinfo structure.  The aioinfo limits are set
422  * per-process for user limit (resource) management.
423  */
424 void
425 aio_init_aioinfo(struct proc *p)
426 {
427 	struct kaioinfo *ki;
428 
429 	ki = uma_zalloc(kaio_zone, M_WAITOK);
430 	mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
431 	ki->kaio_flags = 0;
432 	ki->kaio_maxactive_count = max_aio_per_proc;
433 	ki->kaio_active_count = 0;
434 	ki->kaio_qallowed_count = max_aio_queue_per_proc;
435 	ki->kaio_count = 0;
436 	ki->kaio_ballowed_count = max_buf_aio;
437 	ki->kaio_buffer_count = 0;
438 	TAILQ_INIT(&ki->kaio_all);
439 	TAILQ_INIT(&ki->kaio_done);
440 	TAILQ_INIT(&ki->kaio_jobqueue);
441 	TAILQ_INIT(&ki->kaio_liojoblist);
442 	TAILQ_INIT(&ki->kaio_syncqueue);
443 	TAILQ_INIT(&ki->kaio_syncready);
444 	TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
445 	TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
446 	PROC_LOCK(p);
447 	if (p->p_aioinfo == NULL) {
448 		p->p_aioinfo = ki;
449 		PROC_UNLOCK(p);
450 	} else {
451 		PROC_UNLOCK(p);
452 		mtx_destroy(&ki->kaio_mtx);
453 		uma_zfree(kaio_zone, ki);
454 	}
455 
456 	while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
457 		aio_newproc(NULL);
458 }
459 
460 static int
461 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
462 {
463 	struct thread *td;
464 	int error;
465 
466 	error = sigev_findtd(p, sigev, &td);
467 	if (error)
468 		return (error);
469 	if (!KSI_ONQ(ksi)) {
470 		ksiginfo_set_sigev(ksi, sigev);
471 		ksi->ksi_code = SI_ASYNCIO;
472 		ksi->ksi_flags |= KSI_EXT | KSI_INS;
473 		tdsendsignal(p, td, ksi->ksi_signo, ksi);
474 	}
475 	PROC_UNLOCK(p);
476 	return (error);
477 }
478 
479 /*
480  * Free a job entry.  Wait for completion if it is currently active, but don't
481  * delay forever.  If we delay, we return a flag that says that we have to
482  * restart the queue scan.
483  */
484 static int
485 aio_free_entry(struct kaiocb *job)
486 {
487 	struct kaioinfo *ki;
488 	struct aioliojob *lj;
489 	struct proc *p;
490 
491 	p = job->userproc;
492 	MPASS(curproc == p);
493 	ki = p->p_aioinfo;
494 	MPASS(ki != NULL);
495 
496 	AIO_LOCK_ASSERT(ki, MA_OWNED);
497 	MPASS(job->jobflags & KAIOCB_FINISHED);
498 
499 	atomic_subtract_int(&num_queue_count, 1);
500 
501 	ki->kaio_count--;
502 	MPASS(ki->kaio_count >= 0);
503 
504 	TAILQ_REMOVE(&ki->kaio_done, job, plist);
505 	TAILQ_REMOVE(&ki->kaio_all, job, allist);
506 
507 	lj = job->lio;
508 	if (lj) {
509 		lj->lioj_count--;
510 		lj->lioj_finished_count--;
511 
512 		if (lj->lioj_count == 0) {
513 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
514 			/* lio is going away, we need to destroy any knotes */
515 			knlist_delete(&lj->klist, curthread, 1);
516 			PROC_LOCK(p);
517 			sigqueue_take(&lj->lioj_ksi);
518 			PROC_UNLOCK(p);
519 			uma_zfree(aiolio_zone, lj);
520 		}
521 	}
522 
523 	/* job is going away, we need to destroy any knotes */
524 	knlist_delete(&job->klist, curthread, 1);
525 	PROC_LOCK(p);
526 	sigqueue_take(&job->ksi);
527 	PROC_UNLOCK(p);
528 
529 	AIO_UNLOCK(ki);
530 
531 	/*
532 	 * The thread argument here is used to find the owning process
533 	 * and is also passed to fo_close() which may pass it to various
534 	 * places such as devsw close() routines.  Because of that, we
535 	 * need a thread pointer from the process owning the job that is
536 	 * persistent and won't disappear out from under us or move to
537 	 * another process.
538 	 *
539 	 * Currently, all the callers of this function call it to remove
540 	 * a kaiocb from the current process' job list either via a
541 	 * syscall or due to the current process calling exit() or
542 	 * execve().  Thus, we know that p == curproc.  We also know that
543 	 * curthread can't exit since we are curthread.
544 	 *
545 	 * Therefore, we use curthread as the thread to pass to
546 	 * knlist_delete().  This does mean that it is possible for the
547 	 * thread pointer at close time to differ from the thread pointer
548 	 * at open time, but this is already true of file descriptors in
549 	 * a multithreaded process.
550 	 */
551 	if (job->fd_file)
552 		fdrop(job->fd_file, curthread);
553 	crfree(job->cred);
554 	uma_zfree(aiocb_zone, job);
555 	AIO_LOCK(ki);
556 
557 	return (0);
558 }
559 
560 static void
561 aio_proc_rundown_exec(void *arg, struct proc *p,
562     struct image_params *imgp __unused)
563 {
564    	aio_proc_rundown(arg, p);
565 }
566 
567 static int
568 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
569 {
570 	aio_cancel_fn_t *func;
571 	int cancelled;
572 
573 	AIO_LOCK_ASSERT(ki, MA_OWNED);
574 	if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
575 		return (0);
576 	MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
577 	job->jobflags |= KAIOCB_CANCELLED;
578 
579 	func = job->cancel_fn;
580 
581 	/*
582 	 * If there is no cancel routine, just leave the job marked as
583 	 * cancelled.  The job should be in active use by a caller who
584 	 * should complete it normally or when it fails to install a
585 	 * cancel routine.
586 	 */
587 	if (func == NULL)
588 		return (0);
589 
590 	/*
591 	 * Set the CANCELLING flag so that aio_complete() will defer
592 	 * completions of this job.  This prevents the job from being
593 	 * freed out from under the cancel callback.  After the
594 	 * callback any deferred completion (whether from the callback
595 	 * or any other source) will be completed.
596 	 */
597 	job->jobflags |= KAIOCB_CANCELLING;
598 	AIO_UNLOCK(ki);
599 	func(job);
600 	AIO_LOCK(ki);
601 	job->jobflags &= ~KAIOCB_CANCELLING;
602 	if (job->jobflags & KAIOCB_FINISHED) {
603 		cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
604 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
605 		aio_bio_done_notify(p, job);
606 	} else {
607 		/*
608 		 * The cancel callback might have scheduled an
609 		 * operation to cancel this request, but it is
610 		 * only counted as cancelled if the request is
611 		 * cancelled when the callback returns.
612 		 */
613 		cancelled = 0;
614 	}
615 	return (cancelled);
616 }
617 
618 /*
619  * Rundown the jobs for a given process.
620  */
621 static void
622 aio_proc_rundown(void *arg, struct proc *p)
623 {
624 	struct kaioinfo *ki;
625 	struct aioliojob *lj;
626 	struct kaiocb *job, *jobn;
627 
628 	KASSERT(curthread->td_proc == p,
629 	    ("%s: called on non-curproc", __func__));
630 	ki = p->p_aioinfo;
631 	if (ki == NULL)
632 		return;
633 
634 	AIO_LOCK(ki);
635 	ki->kaio_flags |= KAIO_RUNDOWN;
636 
637 restart:
638 
639 	/*
640 	 * Try to cancel all pending requests. This code simulates
641 	 * aio_cancel on all pending I/O requests.
642 	 */
643 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
644 		aio_cancel_job(p, ki, job);
645 	}
646 
647 	/* Wait for all running I/O to be finished */
648 	if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
649 		ki->kaio_flags |= KAIO_WAKEUP;
650 		msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
651 		goto restart;
652 	}
653 
654 	/* Free all completed I/O requests. */
655 	while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
656 		aio_free_entry(job);
657 
658 	while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
659 		if (lj->lioj_count == 0) {
660 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
661 			knlist_delete(&lj->klist, curthread, 1);
662 			PROC_LOCK(p);
663 			sigqueue_take(&lj->lioj_ksi);
664 			PROC_UNLOCK(p);
665 			uma_zfree(aiolio_zone, lj);
666 		} else {
667 			panic("LIO job not cleaned up: C:%d, FC:%d\n",
668 			    lj->lioj_count, lj->lioj_finished_count);
669 		}
670 	}
671 	AIO_UNLOCK(ki);
672 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
673 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
674 	mtx_destroy(&ki->kaio_mtx);
675 	uma_zfree(kaio_zone, ki);
676 	p->p_aioinfo = NULL;
677 }
678 
679 /*
680  * Select a job to run (called by an AIO daemon).
681  */
682 static struct kaiocb *
683 aio_selectjob(struct aioproc *aiop)
684 {
685 	struct kaiocb *job;
686 	struct kaioinfo *ki;
687 	struct proc *userp;
688 
689 	mtx_assert(&aio_job_mtx, MA_OWNED);
690 restart:
691 	TAILQ_FOREACH(job, &aio_jobs, list) {
692 		userp = job->userproc;
693 		ki = userp->p_aioinfo;
694 
695 		if (ki->kaio_active_count < ki->kaio_maxactive_count) {
696 			TAILQ_REMOVE(&aio_jobs, job, list);
697 			if (!aio_clear_cancel_function(job))
698 				goto restart;
699 
700 			/* Account for currently active jobs. */
701 			ki->kaio_active_count++;
702 			break;
703 		}
704 	}
705 	return (job);
706 }
707 
708 /*
709  * Move all data to a permanent storage device.  This code
710  * simulates the fsync syscall.
711  */
712 static int
713 aio_fsync_vnode(struct thread *td, struct vnode *vp)
714 {
715 	struct mount *mp;
716 	int error;
717 
718 	if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
719 		goto drop;
720 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
721 	if (vp->v_object != NULL) {
722 		VM_OBJECT_WLOCK(vp->v_object);
723 		vm_object_page_clean(vp->v_object, 0, 0, 0);
724 		VM_OBJECT_WUNLOCK(vp->v_object);
725 	}
726 	error = VOP_FSYNC(vp, MNT_WAIT, td);
727 
728 	VOP_UNLOCK(vp, 0);
729 	vn_finished_write(mp);
730 drop:
731 	return (error);
732 }
733 
734 /*
735  * The AIO processing activity for LIO_READ/LIO_WRITE.  This is the code that
736  * does the I/O request for the non-physio version of the operations.  The
737  * normal vn operations are used, and this code should work in all instances
738  * for every type of file, including pipes, sockets, fifos, and regular files.
739  *
740  * XXX I don't think it works well for socket, pipe, and fifo.
741  */
742 static void
743 aio_process_rw(struct kaiocb *job)
744 {
745 	struct ucred *td_savedcred;
746 	struct thread *td;
747 	struct aiocb *cb;
748 	struct file *fp;
749 	struct uio auio;
750 	struct iovec aiov;
751 	ssize_t cnt;
752 	long msgsnd_st, msgsnd_end;
753 	long msgrcv_st, msgrcv_end;
754 	long oublock_st, oublock_end;
755 	long inblock_st, inblock_end;
756 	int error;
757 
758 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
759 	    job->uaiocb.aio_lio_opcode == LIO_WRITE,
760 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
761 
762 	aio_switch_vmspace(job);
763 	td = curthread;
764 	td_savedcred = td->td_ucred;
765 	td->td_ucred = job->cred;
766 	cb = &job->uaiocb;
767 	fp = job->fd_file;
768 
769 	aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
770 	aiov.iov_len = cb->aio_nbytes;
771 
772 	auio.uio_iov = &aiov;
773 	auio.uio_iovcnt = 1;
774 	auio.uio_offset = cb->aio_offset;
775 	auio.uio_resid = cb->aio_nbytes;
776 	cnt = cb->aio_nbytes;
777 	auio.uio_segflg = UIO_USERSPACE;
778 	auio.uio_td = td;
779 
780 	msgrcv_st = td->td_ru.ru_msgrcv;
781 	msgsnd_st = td->td_ru.ru_msgsnd;
782 	inblock_st = td->td_ru.ru_inblock;
783 	oublock_st = td->td_ru.ru_oublock;
784 
785 	/*
786 	 * aio_aqueue() acquires a reference to the file that is
787 	 * released in aio_free_entry().
788 	 */
789 	if (cb->aio_lio_opcode == LIO_READ) {
790 		auio.uio_rw = UIO_READ;
791 		if (auio.uio_resid == 0)
792 			error = 0;
793 		else
794 			error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
795 	} else {
796 		if (fp->f_type == DTYPE_VNODE)
797 			bwillwrite();
798 		auio.uio_rw = UIO_WRITE;
799 		error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
800 	}
801 	msgrcv_end = td->td_ru.ru_msgrcv;
802 	msgsnd_end = td->td_ru.ru_msgsnd;
803 	inblock_end = td->td_ru.ru_inblock;
804 	oublock_end = td->td_ru.ru_oublock;
805 
806 	job->msgrcv = msgrcv_end - msgrcv_st;
807 	job->msgsnd = msgsnd_end - msgsnd_st;
808 	job->inblock = inblock_end - inblock_st;
809 	job->outblock = oublock_end - oublock_st;
810 
811 	if ((error) && (auio.uio_resid != cnt)) {
812 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
813 			error = 0;
814 		if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
815 			PROC_LOCK(job->userproc);
816 			kern_psignal(job->userproc, SIGPIPE);
817 			PROC_UNLOCK(job->userproc);
818 		}
819 	}
820 
821 	cnt -= auio.uio_resid;
822 	td->td_ucred = td_savedcred;
823 	if (error)
824 		aio_complete(job, -1, error);
825 	else
826 		aio_complete(job, cnt, 0);
827 }
828 
829 static void
830 aio_process_sync(struct kaiocb *job)
831 {
832 	struct thread *td = curthread;
833 	struct ucred *td_savedcred = td->td_ucred;
834 	struct file *fp = job->fd_file;
835 	int error = 0;
836 
837 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC,
838 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
839 
840 	td->td_ucred = job->cred;
841 	if (fp->f_vnode != NULL)
842 		error = aio_fsync_vnode(td, fp->f_vnode);
843 	td->td_ucred = td_savedcred;
844 	if (error)
845 		aio_complete(job, -1, error);
846 	else
847 		aio_complete(job, 0, 0);
848 }
849 
850 static void
851 aio_process_mlock(struct kaiocb *job)
852 {
853 	struct aiocb *cb = &job->uaiocb;
854 	int error;
855 
856 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
857 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
858 
859 	aio_switch_vmspace(job);
860 	error = vm_mlock(job->userproc, job->cred,
861 	    __DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes);
862 	if (error)
863 		aio_complete(job, -1, error);
864 	else
865 		aio_complete(job, 0, 0);
866 }
867 
868 static void
869 aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
870 {
871 	struct aioliojob *lj;
872 	struct kaioinfo *ki;
873 	struct kaiocb *sjob, *sjobn;
874 	int lj_done;
875 	bool schedule_fsync;
876 
877 	ki = userp->p_aioinfo;
878 	AIO_LOCK_ASSERT(ki, MA_OWNED);
879 	lj = job->lio;
880 	lj_done = 0;
881 	if (lj) {
882 		lj->lioj_finished_count++;
883 		if (lj->lioj_count == lj->lioj_finished_count)
884 			lj_done = 1;
885 	}
886 	TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
887 	MPASS(job->jobflags & KAIOCB_FINISHED);
888 
889 	if (ki->kaio_flags & KAIO_RUNDOWN)
890 		goto notification_done;
891 
892 	if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
893 	    job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
894 		aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi);
895 
896 	KNOTE_LOCKED(&job->klist, 1);
897 
898 	if (lj_done) {
899 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
900 			lj->lioj_flags |= LIOJ_KEVENT_POSTED;
901 			KNOTE_LOCKED(&lj->klist, 1);
902 		}
903 		if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
904 		    == LIOJ_SIGNAL
905 		    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
906 		        lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
907 			aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
908 			lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
909 		}
910 	}
911 
912 notification_done:
913 	if (job->jobflags & KAIOCB_CHECKSYNC) {
914 		schedule_fsync = false;
915 		TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
916 			if (job->fd_file == sjob->fd_file &&
917 			    job->seqno < sjob->seqno) {
918 				if (--sjob->pending == 0) {
919 					TAILQ_REMOVE(&ki->kaio_syncqueue, sjob,
920 					    list);
921 					if (!aio_clear_cancel_function(sjob))
922 						continue;
923 					TAILQ_INSERT_TAIL(&ki->kaio_syncready,
924 					    sjob, list);
925 					schedule_fsync = true;
926 				}
927 			}
928 		}
929 		if (schedule_fsync)
930 			taskqueue_enqueue(taskqueue_aiod_kick,
931 			    &ki->kaio_sync_task);
932 	}
933 	if (ki->kaio_flags & KAIO_WAKEUP) {
934 		ki->kaio_flags &= ~KAIO_WAKEUP;
935 		wakeup(&userp->p_aioinfo);
936 	}
937 }
938 
939 static void
940 aio_schedule_fsync(void *context, int pending)
941 {
942 	struct kaioinfo *ki;
943 	struct kaiocb *job;
944 
945 	ki = context;
946 	AIO_LOCK(ki);
947 	while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
948 		job = TAILQ_FIRST(&ki->kaio_syncready);
949 		TAILQ_REMOVE(&ki->kaio_syncready, job, list);
950 		AIO_UNLOCK(ki);
951 		aio_schedule(job, aio_process_sync);
952 		AIO_LOCK(ki);
953 	}
954 	AIO_UNLOCK(ki);
955 }
956 
957 bool
958 aio_cancel_cleared(struct kaiocb *job)
959 {
960 	struct kaioinfo *ki;
961 
962 	/*
963 	 * The caller should hold the same queue lock held when
964 	 * aio_clear_cancel_function() was called and set this flag
965 	 * ensuring this check sees an up-to-date value.  However,
966 	 * there is no way to assert that.
967 	 */
968 	ki = job->userproc->p_aioinfo;
969 	return ((job->jobflags & KAIOCB_CLEARED) != 0);
970 }
971 
972 bool
973 aio_clear_cancel_function(struct kaiocb *job)
974 {
975 	struct kaioinfo *ki;
976 
977 	ki = job->userproc->p_aioinfo;
978 	AIO_LOCK(ki);
979 	MPASS(job->cancel_fn != NULL);
980 	if (job->jobflags & KAIOCB_CANCELLING) {
981 		job->jobflags |= KAIOCB_CLEARED;
982 		AIO_UNLOCK(ki);
983 		return (false);
984 	}
985 	job->cancel_fn = NULL;
986 	AIO_UNLOCK(ki);
987 	return (true);
988 }
989 
990 bool
991 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
992 {
993 	struct kaioinfo *ki;
994 
995 	ki = job->userproc->p_aioinfo;
996 	AIO_LOCK(ki);
997 	if (job->jobflags & KAIOCB_CANCELLED) {
998 		AIO_UNLOCK(ki);
999 		return (false);
1000 	}
1001 	job->cancel_fn = func;
1002 	AIO_UNLOCK(ki);
1003 	return (true);
1004 }
1005 
1006 void
1007 aio_complete(struct kaiocb *job, long status, int error)
1008 {
1009 	struct kaioinfo *ki;
1010 	struct proc *userp;
1011 
1012 	job->uaiocb._aiocb_private.error = error;
1013 	job->uaiocb._aiocb_private.status = status;
1014 
1015 	userp = job->userproc;
1016 	ki = userp->p_aioinfo;
1017 
1018 	AIO_LOCK(ki);
1019 	KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1020 	    ("duplicate aio_complete"));
1021 	job->jobflags |= KAIOCB_FINISHED;
1022 	if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1023 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1024 		aio_bio_done_notify(userp, job);
1025 	}
1026 	AIO_UNLOCK(ki);
1027 }
1028 
1029 void
1030 aio_cancel(struct kaiocb *job)
1031 {
1032 
1033 	aio_complete(job, -1, ECANCELED);
1034 }
1035 
1036 void
1037 aio_switch_vmspace(struct kaiocb *job)
1038 {
1039 
1040 	vmspace_switch_aio(job->userproc->p_vmspace);
1041 }
1042 
1043 /*
1044  * The AIO daemon, most of the actual work is done in aio_process_*,
1045  * but the setup (and address space mgmt) is done in this routine.
1046  */
1047 static void
1048 aio_daemon(void *_id)
1049 {
1050 	struct kaiocb *job;
1051 	struct aioproc *aiop;
1052 	struct kaioinfo *ki;
1053 	struct proc *p;
1054 	struct vmspace *myvm;
1055 	struct thread *td = curthread;
1056 	int id = (intptr_t)_id;
1057 
1058 	/*
1059 	 * Grab an extra reference on the daemon's vmspace so that it
1060 	 * doesn't get freed by jobs that switch to a different
1061 	 * vmspace.
1062 	 */
1063 	p = td->td_proc;
1064 	myvm = vmspace_acquire_ref(p);
1065 
1066 	KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1067 
1068 	/*
1069 	 * Allocate and ready the aio control info.  There is one aiop structure
1070 	 * per daemon.
1071 	 */
1072 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
1073 	aiop->aioproc = p;
1074 	aiop->aioprocflags = 0;
1075 
1076 	/*
1077 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
1078 	 * and creating too many daemons.)
1079 	 */
1080 	sema_post(&aio_newproc_sem);
1081 
1082 	mtx_lock(&aio_job_mtx);
1083 	for (;;) {
1084 		/*
1085 		 * Take daemon off of free queue
1086 		 */
1087 		if (aiop->aioprocflags & AIOP_FREE) {
1088 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
1089 			aiop->aioprocflags &= ~AIOP_FREE;
1090 		}
1091 
1092 		/*
1093 		 * Check for jobs.
1094 		 */
1095 		while ((job = aio_selectjob(aiop)) != NULL) {
1096 			mtx_unlock(&aio_job_mtx);
1097 
1098 			ki = job->userproc->p_aioinfo;
1099 			job->handle_fn(job);
1100 
1101 			mtx_lock(&aio_job_mtx);
1102 			/* Decrement the active job count. */
1103 			ki->kaio_active_count--;
1104 		}
1105 
1106 		/*
1107 		 * Disconnect from user address space.
1108 		 */
1109 		if (p->p_vmspace != myvm) {
1110 			mtx_unlock(&aio_job_mtx);
1111 			vmspace_switch_aio(myvm);
1112 			mtx_lock(&aio_job_mtx);
1113 			/*
1114 			 * We have to restart to avoid race, we only sleep if
1115 			 * no job can be selected.
1116 			 */
1117 			continue;
1118 		}
1119 
1120 		mtx_assert(&aio_job_mtx, MA_OWNED);
1121 
1122 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
1123 		aiop->aioprocflags |= AIOP_FREE;
1124 
1125 		/*
1126 		 * If daemon is inactive for a long time, allow it to exit,
1127 		 * thereby freeing resources.
1128 		 */
1129 		if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
1130 		    aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
1131 		    (aiop->aioprocflags & AIOP_FREE) &&
1132 		    num_aio_procs > target_aio_procs)
1133 			break;
1134 	}
1135 	TAILQ_REMOVE(&aio_freeproc, aiop, list);
1136 	num_aio_procs--;
1137 	mtx_unlock(&aio_job_mtx);
1138 	uma_zfree(aiop_zone, aiop);
1139 	free_unr(aiod_unr, id);
1140 	vmspace_free(myvm);
1141 
1142 	KASSERT(p->p_vmspace == myvm,
1143 	    ("AIOD: bad vmspace for exiting daemon"));
1144 	KASSERT(myvm->vm_refcnt > 1,
1145 	    ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt));
1146 	kproc_exit(0);
1147 }
1148 
1149 /*
1150  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1151  * AIO daemon modifies its environment itself.
1152  */
1153 static int
1154 aio_newproc(int *start)
1155 {
1156 	int error;
1157 	struct proc *p;
1158 	int id;
1159 
1160 	id = alloc_unr(aiod_unr);
1161 	error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
1162 		RFNOWAIT, 0, "aiod%d", id);
1163 	if (error == 0) {
1164 		/*
1165 		 * Wait until daemon is started.
1166 		 */
1167 		sema_wait(&aio_newproc_sem);
1168 		mtx_lock(&aio_job_mtx);
1169 		num_aio_procs++;
1170 		if (start != NULL)
1171 			(*start)--;
1172 		mtx_unlock(&aio_job_mtx);
1173 	} else {
1174 		free_unr(aiod_unr, id);
1175 	}
1176 	return (error);
1177 }
1178 
1179 /*
1180  * Try the high-performance, low-overhead physio method for eligible
1181  * VCHR devices.  This method doesn't use an aio helper thread, and
1182  * thus has very low overhead.
1183  *
1184  * Assumes that the caller, aio_aqueue(), has incremented the file
1185  * structure's reference count, preventing its deallocation for the
1186  * duration of this call.
1187  */
1188 static int
1189 aio_qphysio(struct proc *p, struct kaiocb *job)
1190 {
1191 	struct aiocb *cb;
1192 	struct file *fp;
1193 	struct bio *bp;
1194 	struct buf *pbuf;
1195 	struct vnode *vp;
1196 	struct cdevsw *csw;
1197 	struct cdev *dev;
1198 	struct kaioinfo *ki;
1199 	int error, ref, poff;
1200 	vm_prot_t prot;
1201 
1202 	cb = &job->uaiocb;
1203 	fp = job->fd_file;
1204 
1205 	if (fp == NULL || fp->f_type != DTYPE_VNODE)
1206 		return (-1);
1207 
1208 	vp = fp->f_vnode;
1209 	if (vp->v_type != VCHR)
1210 		return (-1);
1211 	if (vp->v_bufobj.bo_bsize == 0)
1212 		return (-1);
1213 	if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1214 		return (-1);
1215 
1216 	ref = 0;
1217 	csw = devvn_refthread(vp, &dev, &ref);
1218 	if (csw == NULL)
1219 		return (ENXIO);
1220 
1221 	if ((csw->d_flags & D_DISK) == 0) {
1222 		error = -1;
1223 		goto unref;
1224 	}
1225 	if (cb->aio_nbytes > dev->si_iosize_max) {
1226 		error = -1;
1227 		goto unref;
1228 	}
1229 
1230 	ki = p->p_aioinfo;
1231 	poff = (vm_offset_t)cb->aio_buf & PAGE_MASK;
1232 	if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
1233 		if (cb->aio_nbytes > MAXPHYS) {
1234 			error = -1;
1235 			goto unref;
1236 		}
1237 
1238 		pbuf = NULL;
1239 	} else {
1240 		if (cb->aio_nbytes > MAXPHYS - poff) {
1241 			error = -1;
1242 			goto unref;
1243 		}
1244 		if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) {
1245 			error = -1;
1246 			goto unref;
1247 		}
1248 
1249 		job->pbuf = pbuf = (struct buf *)getpbuf(NULL);
1250 		BUF_KERNPROC(pbuf);
1251 		AIO_LOCK(ki);
1252 		ki->kaio_buffer_count++;
1253 		AIO_UNLOCK(ki);
1254 	}
1255 	job->bp = bp = g_alloc_bio();
1256 
1257 	bp->bio_length = cb->aio_nbytes;
1258 	bp->bio_bcount = cb->aio_nbytes;
1259 	bp->bio_done = aio_physwakeup;
1260 	bp->bio_data = (void *)(uintptr_t)cb->aio_buf;
1261 	bp->bio_offset = cb->aio_offset;
1262 	bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1263 	bp->bio_dev = dev;
1264 	bp->bio_caller1 = (void *)job;
1265 
1266 	prot = VM_PROT_READ;
1267 	if (cb->aio_lio_opcode == LIO_READ)
1268 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
1269 	job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
1270 	    (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages,
1271 	    nitems(job->pages));
1272 	if (job->npages < 0) {
1273 		error = EFAULT;
1274 		goto doerror;
1275 	}
1276 	if (pbuf != NULL) {
1277 		pmap_qenter((vm_offset_t)pbuf->b_data,
1278 		    job->pages, job->npages);
1279 		bp->bio_data = pbuf->b_data + poff;
1280 		atomic_add_int(&num_buf_aio, 1);
1281 	} else {
1282 		bp->bio_ma = job->pages;
1283 		bp->bio_ma_n = job->npages;
1284 		bp->bio_ma_offset = poff;
1285 		bp->bio_data = unmapped_buf;
1286 		bp->bio_flags |= BIO_UNMAPPED;
1287 	}
1288 
1289 	/* Perform transfer. */
1290 	csw->d_strategy(bp);
1291 	dev_relthread(dev, ref);
1292 	return (0);
1293 
1294 doerror:
1295 	if (pbuf != NULL) {
1296 		AIO_LOCK(ki);
1297 		ki->kaio_buffer_count--;
1298 		AIO_UNLOCK(ki);
1299 		relpbuf(pbuf, NULL);
1300 		job->pbuf = NULL;
1301 	}
1302 	g_destroy_bio(bp);
1303 	job->bp = NULL;
1304 unref:
1305 	dev_relthread(dev, ref);
1306 	return (error);
1307 }
1308 
1309 #ifdef COMPAT_FREEBSD6
1310 static int
1311 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
1312 {
1313 
1314 	/*
1315 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
1316 	 * supported by AIO with the old sigevent structure.
1317 	 */
1318 	nsig->sigev_notify = osig->sigev_notify;
1319 	switch (nsig->sigev_notify) {
1320 	case SIGEV_NONE:
1321 		break;
1322 	case SIGEV_SIGNAL:
1323 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
1324 		break;
1325 	case SIGEV_KEVENT:
1326 		nsig->sigev_notify_kqueue =
1327 		    osig->__sigev_u.__sigev_notify_kqueue;
1328 		nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
1329 		break;
1330 	default:
1331 		return (EINVAL);
1332 	}
1333 	return (0);
1334 }
1335 
1336 static int
1337 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
1338 {
1339 	struct oaiocb *ojob;
1340 	int error;
1341 
1342 	bzero(kjob, sizeof(struct aiocb));
1343 	error = copyin(ujob, kjob, sizeof(struct oaiocb));
1344 	if (error)
1345 		return (error);
1346 	ojob = (struct oaiocb *)kjob;
1347 	return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent));
1348 }
1349 #endif
1350 
1351 static int
1352 aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
1353 {
1354 
1355 	return (copyin(ujob, kjob, sizeof(struct aiocb)));
1356 }
1357 
1358 static long
1359 aiocb_fetch_status(struct aiocb *ujob)
1360 {
1361 
1362 	return (fuword(&ujob->_aiocb_private.status));
1363 }
1364 
1365 static long
1366 aiocb_fetch_error(struct aiocb *ujob)
1367 {
1368 
1369 	return (fuword(&ujob->_aiocb_private.error));
1370 }
1371 
1372 static int
1373 aiocb_store_status(struct aiocb *ujob, long status)
1374 {
1375 
1376 	return (suword(&ujob->_aiocb_private.status, status));
1377 }
1378 
1379 static int
1380 aiocb_store_error(struct aiocb *ujob, long error)
1381 {
1382 
1383 	return (suword(&ujob->_aiocb_private.error, error));
1384 }
1385 
1386 static int
1387 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
1388 {
1389 
1390 	return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
1391 }
1392 
1393 static int
1394 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
1395 {
1396 
1397 	return (suword(ujobp, (long)ujob));
1398 }
1399 
1400 static struct aiocb_ops aiocb_ops = {
1401 	.copyin = aiocb_copyin,
1402 	.fetch_status = aiocb_fetch_status,
1403 	.fetch_error = aiocb_fetch_error,
1404 	.store_status = aiocb_store_status,
1405 	.store_error = aiocb_store_error,
1406 	.store_kernelinfo = aiocb_store_kernelinfo,
1407 	.store_aiocb = aiocb_store_aiocb,
1408 };
1409 
1410 #ifdef COMPAT_FREEBSD6
1411 static struct aiocb_ops aiocb_ops_osigevent = {
1412 	.copyin = aiocb_copyin_old_sigevent,
1413 	.fetch_status = aiocb_fetch_status,
1414 	.fetch_error = aiocb_fetch_error,
1415 	.store_status = aiocb_store_status,
1416 	.store_error = aiocb_store_error,
1417 	.store_kernelinfo = aiocb_store_kernelinfo,
1418 	.store_aiocb = aiocb_store_aiocb,
1419 };
1420 #endif
1421 
1422 /*
1423  * Queue a new AIO request.  Choosing either the threaded or direct physio VCHR
1424  * technique is done in this code.
1425  */
1426 int
1427 aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
1428 	int type, struct aiocb_ops *ops)
1429 {
1430 	struct proc *p = td->td_proc;
1431 	cap_rights_t rights;
1432 	struct file *fp;
1433 	struct kaiocb *job;
1434 	struct kaioinfo *ki;
1435 	struct kevent kev;
1436 	int opcode;
1437 	int error;
1438 	int fd, kqfd;
1439 	int jid;
1440 	u_short evflags;
1441 
1442 	if (p->p_aioinfo == NULL)
1443 		aio_init_aioinfo(p);
1444 
1445 	ki = p->p_aioinfo;
1446 
1447 	ops->store_status(ujob, -1);
1448 	ops->store_error(ujob, 0);
1449 	ops->store_kernelinfo(ujob, -1);
1450 
1451 	if (num_queue_count >= max_queue_count ||
1452 	    ki->kaio_count >= ki->kaio_qallowed_count) {
1453 		ops->store_error(ujob, EAGAIN);
1454 		return (EAGAIN);
1455 	}
1456 
1457 	job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
1458 	knlist_init_mtx(&job->klist, AIO_MTX(ki));
1459 
1460 	error = ops->copyin(ujob, &job->uaiocb);
1461 	if (error) {
1462 		ops->store_error(ujob, error);
1463 		uma_zfree(aiocb_zone, job);
1464 		return (error);
1465 	}
1466 
1467 	if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
1468 		uma_zfree(aiocb_zone, job);
1469 		return (EINVAL);
1470 	}
1471 
1472 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
1473 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
1474 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
1475 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
1476 		ops->store_error(ujob, EINVAL);
1477 		uma_zfree(aiocb_zone, job);
1478 		return (EINVAL);
1479 	}
1480 
1481 	if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
1482 	     job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
1483 		!_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
1484 		uma_zfree(aiocb_zone, job);
1485 		return (EINVAL);
1486 	}
1487 
1488 	ksiginfo_init(&job->ksi);
1489 
1490 	/* Save userspace address of the job info. */
1491 	job->ujob = ujob;
1492 
1493 	/* Get the opcode. */
1494 	if (type != LIO_NOP)
1495 		job->uaiocb.aio_lio_opcode = type;
1496 	opcode = job->uaiocb.aio_lio_opcode;
1497 
1498 	/*
1499 	 * Validate the opcode and fetch the file object for the specified
1500 	 * file descriptor.
1501 	 *
1502 	 * XXXRW: Moved the opcode validation up here so that we don't
1503 	 * retrieve a file descriptor without knowing what the capabiltity
1504 	 * should be.
1505 	 */
1506 	fd = job->uaiocb.aio_fildes;
1507 	switch (opcode) {
1508 	case LIO_WRITE:
1509 		error = fget_write(td, fd,
1510 		    cap_rights_init(&rights, CAP_PWRITE), &fp);
1511 		break;
1512 	case LIO_READ:
1513 		error = fget_read(td, fd,
1514 		    cap_rights_init(&rights, CAP_PREAD), &fp);
1515 		break;
1516 	case LIO_SYNC:
1517 		error = fget(td, fd, cap_rights_init(&rights, CAP_FSYNC), &fp);
1518 		break;
1519 	case LIO_MLOCK:
1520 		fp = NULL;
1521 		break;
1522 	case LIO_NOP:
1523 		error = fget(td, fd, cap_rights_init(&rights), &fp);
1524 		break;
1525 	default:
1526 		error = EINVAL;
1527 	}
1528 	if (error) {
1529 		uma_zfree(aiocb_zone, job);
1530 		ops->store_error(ujob, error);
1531 		return (error);
1532 	}
1533 
1534 	if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
1535 		error = EINVAL;
1536 		goto aqueue_fail;
1537 	}
1538 
1539 	if (opcode != LIO_SYNC && job->uaiocb.aio_offset == -1LL) {
1540 		error = EINVAL;
1541 		goto aqueue_fail;
1542 	}
1543 
1544 	job->fd_file = fp;
1545 
1546 	mtx_lock(&aio_job_mtx);
1547 	jid = jobrefid++;
1548 	job->seqno = jobseqno++;
1549 	mtx_unlock(&aio_job_mtx);
1550 	error = ops->store_kernelinfo(ujob, jid);
1551 	if (error) {
1552 		error = EINVAL;
1553 		goto aqueue_fail;
1554 	}
1555 	job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
1556 
1557 	if (opcode == LIO_NOP) {
1558 		fdrop(fp, td);
1559 		uma_zfree(aiocb_zone, job);
1560 		return (0);
1561 	}
1562 
1563 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1564 		goto no_kqueue;
1565 	evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1566 	if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1567 		error = EINVAL;
1568 		goto aqueue_fail;
1569 	}
1570 	kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
1571 	kev.ident = (uintptr_t)job->ujob;
1572 	kev.filter = EVFILT_AIO;
1573 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
1574 	kev.data = (intptr_t)job;
1575 	kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1576 	error = kqfd_register(kqfd, &kev, td, 1);
1577 	if (error)
1578 		goto aqueue_fail;
1579 
1580 no_kqueue:
1581 
1582 	ops->store_error(ujob, EINPROGRESS);
1583 	job->uaiocb._aiocb_private.error = EINPROGRESS;
1584 	job->userproc = p;
1585 	job->cred = crhold(td->td_ucred);
1586 	job->jobflags = KAIOCB_QUEUEING;
1587 	job->lio = lj;
1588 
1589 	if (opcode == LIO_MLOCK) {
1590 		aio_schedule(job, aio_process_mlock);
1591 		error = 0;
1592 	} else if (fp->f_ops->fo_aio_queue == NULL)
1593 		error = aio_queue_file(fp, job);
1594 	else
1595 		error = fo_aio_queue(fp, job);
1596 	if (error)
1597 		goto aqueue_fail;
1598 
1599 	AIO_LOCK(ki);
1600 	job->jobflags &= ~KAIOCB_QUEUEING;
1601 	TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1602 	ki->kaio_count++;
1603 	if (lj)
1604 		lj->lioj_count++;
1605 	atomic_add_int(&num_queue_count, 1);
1606 	if (job->jobflags & KAIOCB_FINISHED) {
1607 		/*
1608 		 * The queue callback completed the request synchronously.
1609 		 * The bulk of the completion is deferred in that case
1610 		 * until this point.
1611 		 */
1612 		aio_bio_done_notify(p, job);
1613 	} else
1614 		TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1615 	AIO_UNLOCK(ki);
1616 	return (0);
1617 
1618 aqueue_fail:
1619 	knlist_delete(&job->klist, curthread, 0);
1620 	if (fp)
1621 		fdrop(fp, td);
1622 	uma_zfree(aiocb_zone, job);
1623 	ops->store_error(ujob, error);
1624 	return (error);
1625 }
1626 
1627 static void
1628 aio_cancel_daemon_job(struct kaiocb *job)
1629 {
1630 
1631 	mtx_lock(&aio_job_mtx);
1632 	if (!aio_cancel_cleared(job))
1633 		TAILQ_REMOVE(&aio_jobs, job, list);
1634 	mtx_unlock(&aio_job_mtx);
1635 	aio_cancel(job);
1636 }
1637 
1638 void
1639 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1640 {
1641 
1642 	mtx_lock(&aio_job_mtx);
1643 	if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1644 		mtx_unlock(&aio_job_mtx);
1645 		aio_cancel(job);
1646 		return;
1647 	}
1648 	job->handle_fn = func;
1649 	TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1650 	aio_kick_nowait(job->userproc);
1651 	mtx_unlock(&aio_job_mtx);
1652 }
1653 
1654 static void
1655 aio_cancel_sync(struct kaiocb *job)
1656 {
1657 	struct kaioinfo *ki;
1658 
1659 	ki = job->userproc->p_aioinfo;
1660 	mtx_lock(&aio_job_mtx);
1661 	if (!aio_cancel_cleared(job))
1662 		TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1663 	mtx_unlock(&aio_job_mtx);
1664 	aio_cancel(job);
1665 }
1666 
1667 int
1668 aio_queue_file(struct file *fp, struct kaiocb *job)
1669 {
1670 	struct aioliojob *lj;
1671 	struct kaioinfo *ki;
1672 	struct kaiocb *job2;
1673 	struct vnode *vp;
1674 	struct mount *mp;
1675 	int error, opcode;
1676 	bool safe;
1677 
1678 	lj = job->lio;
1679 	ki = job->userproc->p_aioinfo;
1680 	opcode = job->uaiocb.aio_lio_opcode;
1681 	if (opcode == LIO_SYNC)
1682 		goto queueit;
1683 
1684 	if ((error = aio_qphysio(job->userproc, job)) == 0)
1685 		goto done;
1686 #if 0
1687 	/*
1688 	 * XXX: This means qphysio() failed with EFAULT.  The current
1689 	 * behavior is to retry the operation via fo_read/fo_write.
1690 	 * Wouldn't it be better to just complete the request with an
1691 	 * error here?
1692 	 */
1693 	if (error > 0)
1694 		goto done;
1695 #endif
1696 queueit:
1697 	safe = false;
1698 	if (fp->f_type == DTYPE_VNODE) {
1699 		vp = fp->f_vnode;
1700 		if (vp->v_type == VREG || vp->v_type == VDIR) {
1701 			mp = fp->f_vnode->v_mount;
1702 			if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0)
1703 				safe = true;
1704 		}
1705 	}
1706 	if (!(safe || enable_aio_unsafe)) {
1707 		counted_warning(&unsafe_warningcnt,
1708 		    "is attempting to use unsafe AIO requests");
1709 		return (EOPNOTSUPP);
1710 	}
1711 
1712 	if (opcode == LIO_SYNC) {
1713 		AIO_LOCK(ki);
1714 		TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
1715 			if (job2->fd_file == job->fd_file &&
1716 			    job2->uaiocb.aio_lio_opcode != LIO_SYNC &&
1717 			    job2->seqno < job->seqno) {
1718 				job2->jobflags |= KAIOCB_CHECKSYNC;
1719 				job->pending++;
1720 			}
1721 		}
1722 		if (job->pending != 0) {
1723 			if (!aio_set_cancel_function(job, aio_cancel_sync)) {
1724 				AIO_UNLOCK(ki);
1725 				aio_cancel(job);
1726 				return (0);
1727 			}
1728 			TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1729 			AIO_UNLOCK(ki);
1730 			return (0);
1731 		}
1732 		AIO_UNLOCK(ki);
1733 	}
1734 
1735 	switch (opcode) {
1736 	case LIO_READ:
1737 	case LIO_WRITE:
1738 		aio_schedule(job, aio_process_rw);
1739 		error = 0;
1740 		break;
1741 	case LIO_SYNC:
1742 		aio_schedule(job, aio_process_sync);
1743 		error = 0;
1744 		break;
1745 	default:
1746 		error = EINVAL;
1747 	}
1748 done:
1749 	return (error);
1750 }
1751 
1752 static void
1753 aio_kick_nowait(struct proc *userp)
1754 {
1755 	struct kaioinfo *ki = userp->p_aioinfo;
1756 	struct aioproc *aiop;
1757 
1758 	mtx_assert(&aio_job_mtx, MA_OWNED);
1759 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1760 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
1761 		aiop->aioprocflags &= ~AIOP_FREE;
1762 		wakeup(aiop->aioproc);
1763 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
1764 	    ki->kaio_active_count + num_aio_resv_start <
1765 	    ki->kaio_maxactive_count) {
1766 		taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
1767 	}
1768 }
1769 
1770 static int
1771 aio_kick(struct proc *userp)
1772 {
1773 	struct kaioinfo *ki = userp->p_aioinfo;
1774 	struct aioproc *aiop;
1775 	int error, ret = 0;
1776 
1777 	mtx_assert(&aio_job_mtx, MA_OWNED);
1778 retryproc:
1779 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1780 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
1781 		aiop->aioprocflags &= ~AIOP_FREE;
1782 		wakeup(aiop->aioproc);
1783 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
1784 	    ki->kaio_active_count + num_aio_resv_start <
1785 	    ki->kaio_maxactive_count) {
1786 		num_aio_resv_start++;
1787 		mtx_unlock(&aio_job_mtx);
1788 		error = aio_newproc(&num_aio_resv_start);
1789 		mtx_lock(&aio_job_mtx);
1790 		if (error) {
1791 			num_aio_resv_start--;
1792 			goto retryproc;
1793 		}
1794 	} else {
1795 		ret = -1;
1796 	}
1797 	return (ret);
1798 }
1799 
1800 static void
1801 aio_kick_helper(void *context, int pending)
1802 {
1803 	struct proc *userp = context;
1804 
1805 	mtx_lock(&aio_job_mtx);
1806 	while (--pending >= 0) {
1807 		if (aio_kick(userp))
1808 			break;
1809 	}
1810 	mtx_unlock(&aio_job_mtx);
1811 }
1812 
1813 /*
1814  * Support the aio_return system call, as a side-effect, kernel resources are
1815  * released.
1816  */
1817 static int
1818 kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1819 {
1820 	struct proc *p = td->td_proc;
1821 	struct kaiocb *job;
1822 	struct kaioinfo *ki;
1823 	long status, error;
1824 
1825 	ki = p->p_aioinfo;
1826 	if (ki == NULL)
1827 		return (EINVAL);
1828 	AIO_LOCK(ki);
1829 	TAILQ_FOREACH(job, &ki->kaio_done, plist) {
1830 		if (job->ujob == ujob)
1831 			break;
1832 	}
1833 	if (job != NULL) {
1834 		MPASS(job->jobflags & KAIOCB_FINISHED);
1835 		status = job->uaiocb._aiocb_private.status;
1836 		error = job->uaiocb._aiocb_private.error;
1837 		td->td_retval[0] = status;
1838 		td->td_ru.ru_oublock += job->outblock;
1839 		td->td_ru.ru_inblock += job->inblock;
1840 		td->td_ru.ru_msgsnd += job->msgsnd;
1841 		td->td_ru.ru_msgrcv += job->msgrcv;
1842 		aio_free_entry(job);
1843 		AIO_UNLOCK(ki);
1844 		ops->store_error(ujob, error);
1845 		ops->store_status(ujob, status);
1846 	} else {
1847 		error = EINVAL;
1848 		AIO_UNLOCK(ki);
1849 	}
1850 	return (error);
1851 }
1852 
1853 int
1854 sys_aio_return(struct thread *td, struct aio_return_args *uap)
1855 {
1856 
1857 	return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
1858 }
1859 
1860 /*
1861  * Allow a process to wakeup when any of the I/O requests are completed.
1862  */
1863 static int
1864 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
1865     struct timespec *ts)
1866 {
1867 	struct proc *p = td->td_proc;
1868 	struct timeval atv;
1869 	struct kaioinfo *ki;
1870 	struct kaiocb *firstjob, *job;
1871 	int error, i, timo;
1872 
1873 	timo = 0;
1874 	if (ts) {
1875 		if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
1876 			return (EINVAL);
1877 
1878 		TIMESPEC_TO_TIMEVAL(&atv, ts);
1879 		if (itimerfix(&atv))
1880 			return (EINVAL);
1881 		timo = tvtohz(&atv);
1882 	}
1883 
1884 	ki = p->p_aioinfo;
1885 	if (ki == NULL)
1886 		return (EAGAIN);
1887 
1888 	if (njoblist == 0)
1889 		return (0);
1890 
1891 	AIO_LOCK(ki);
1892 	for (;;) {
1893 		firstjob = NULL;
1894 		error = 0;
1895 		TAILQ_FOREACH(job, &ki->kaio_all, allist) {
1896 			for (i = 0; i < njoblist; i++) {
1897 				if (job->ujob == ujoblist[i]) {
1898 					if (firstjob == NULL)
1899 						firstjob = job;
1900 					if (job->jobflags & KAIOCB_FINISHED)
1901 						goto RETURN;
1902 				}
1903 			}
1904 		}
1905 		/* All tasks were finished. */
1906 		if (firstjob == NULL)
1907 			break;
1908 
1909 		ki->kaio_flags |= KAIO_WAKEUP;
1910 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
1911 		    "aiospn", timo);
1912 		if (error == ERESTART)
1913 			error = EINTR;
1914 		if (error)
1915 			break;
1916 	}
1917 RETURN:
1918 	AIO_UNLOCK(ki);
1919 	return (error);
1920 }
1921 
1922 int
1923 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
1924 {
1925 	struct timespec ts, *tsp;
1926 	struct aiocb **ujoblist;
1927 	int error;
1928 
1929 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
1930 		return (EINVAL);
1931 
1932 	if (uap->timeout) {
1933 		/* Get timespec struct. */
1934 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
1935 			return (error);
1936 		tsp = &ts;
1937 	} else
1938 		tsp = NULL;
1939 
1940 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
1941 	error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
1942 	if (error == 0)
1943 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
1944 	uma_zfree(aiol_zone, ujoblist);
1945 	return (error);
1946 }
1947 
1948 /*
1949  * aio_cancel cancels any non-physio aio operations not currently in
1950  * progress.
1951  */
1952 int
1953 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1954 {
1955 	struct proc *p = td->td_proc;
1956 	struct kaioinfo *ki;
1957 	struct kaiocb *job, *jobn;
1958 	struct file *fp;
1959 	cap_rights_t rights;
1960 	int error;
1961 	int cancelled = 0;
1962 	int notcancelled = 0;
1963 	struct vnode *vp;
1964 
1965 	/* Lookup file object. */
1966 	error = fget(td, uap->fd, cap_rights_init(&rights), &fp);
1967 	if (error)
1968 		return (error);
1969 
1970 	ki = p->p_aioinfo;
1971 	if (ki == NULL)
1972 		goto done;
1973 
1974 	if (fp->f_type == DTYPE_VNODE) {
1975 		vp = fp->f_vnode;
1976 		if (vn_isdisk(vp, &error)) {
1977 			fdrop(fp, td);
1978 			td->td_retval[0] = AIO_NOTCANCELED;
1979 			return (0);
1980 		}
1981 	}
1982 
1983 	AIO_LOCK(ki);
1984 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
1985 		if ((uap->fd == job->uaiocb.aio_fildes) &&
1986 		    ((uap->aiocbp == NULL) ||
1987 		     (uap->aiocbp == job->ujob))) {
1988 			if (aio_cancel_job(p, ki, job)) {
1989 				cancelled++;
1990 			} else {
1991 				notcancelled++;
1992 			}
1993 			if (uap->aiocbp != NULL)
1994 				break;
1995 		}
1996 	}
1997 	AIO_UNLOCK(ki);
1998 
1999 done:
2000 	fdrop(fp, td);
2001 
2002 	if (uap->aiocbp != NULL) {
2003 		if (cancelled) {
2004 			td->td_retval[0] = AIO_CANCELED;
2005 			return (0);
2006 		}
2007 	}
2008 
2009 	if (notcancelled) {
2010 		td->td_retval[0] = AIO_NOTCANCELED;
2011 		return (0);
2012 	}
2013 
2014 	if (cancelled) {
2015 		td->td_retval[0] = AIO_CANCELED;
2016 		return (0);
2017 	}
2018 
2019 	td->td_retval[0] = AIO_ALLDONE;
2020 
2021 	return (0);
2022 }
2023 
2024 /*
2025  * aio_error is implemented in the kernel level for compatibility purposes
2026  * only.  For a user mode async implementation, it would be best to do it in
2027  * a userland subroutine.
2028  */
2029 static int
2030 kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
2031 {
2032 	struct proc *p = td->td_proc;
2033 	struct kaiocb *job;
2034 	struct kaioinfo *ki;
2035 	int status;
2036 
2037 	ki = p->p_aioinfo;
2038 	if (ki == NULL) {
2039 		td->td_retval[0] = EINVAL;
2040 		return (0);
2041 	}
2042 
2043 	AIO_LOCK(ki);
2044 	TAILQ_FOREACH(job, &ki->kaio_all, allist) {
2045 		if (job->ujob == ujob) {
2046 			if (job->jobflags & KAIOCB_FINISHED)
2047 				td->td_retval[0] =
2048 					job->uaiocb._aiocb_private.error;
2049 			else
2050 				td->td_retval[0] = EINPROGRESS;
2051 			AIO_UNLOCK(ki);
2052 			return (0);
2053 		}
2054 	}
2055 	AIO_UNLOCK(ki);
2056 
2057 	/*
2058 	 * Hack for failure of aio_aqueue.
2059 	 */
2060 	status = ops->fetch_status(ujob);
2061 	if (status == -1) {
2062 		td->td_retval[0] = ops->fetch_error(ujob);
2063 		return (0);
2064 	}
2065 
2066 	td->td_retval[0] = EINVAL;
2067 	return (0);
2068 }
2069 
2070 int
2071 sys_aio_error(struct thread *td, struct aio_error_args *uap)
2072 {
2073 
2074 	return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
2075 }
2076 
2077 /* syscall - asynchronous read from a file (REALTIME) */
2078 #ifdef COMPAT_FREEBSD6
2079 int
2080 freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap)
2081 {
2082 
2083 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2084 	    &aiocb_ops_osigevent));
2085 }
2086 #endif
2087 
2088 int
2089 sys_aio_read(struct thread *td, struct aio_read_args *uap)
2090 {
2091 
2092 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2093 }
2094 
2095 /* syscall - asynchronous write to a file (REALTIME) */
2096 #ifdef COMPAT_FREEBSD6
2097 int
2098 freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap)
2099 {
2100 
2101 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2102 	    &aiocb_ops_osigevent));
2103 }
2104 #endif
2105 
2106 int
2107 sys_aio_write(struct thread *td, struct aio_write_args *uap)
2108 {
2109 
2110 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
2111 }
2112 
2113 int
2114 sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
2115 {
2116 
2117 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
2118 }
2119 
2120 static int
2121 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
2122     struct aiocb **acb_list, int nent, struct sigevent *sig,
2123     struct aiocb_ops *ops)
2124 {
2125 	struct proc *p = td->td_proc;
2126 	struct aiocb *job;
2127 	struct kaioinfo *ki;
2128 	struct aioliojob *lj;
2129 	struct kevent kev;
2130 	int error;
2131 	int nerror;
2132 	int i;
2133 
2134 	if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2135 		return (EINVAL);
2136 
2137 	if (nent < 0 || nent > AIO_LISTIO_MAX)
2138 		return (EINVAL);
2139 
2140 	if (p->p_aioinfo == NULL)
2141 		aio_init_aioinfo(p);
2142 
2143 	ki = p->p_aioinfo;
2144 
2145 	lj = uma_zalloc(aiolio_zone, M_WAITOK);
2146 	lj->lioj_flags = 0;
2147 	lj->lioj_count = 0;
2148 	lj->lioj_finished_count = 0;
2149 	knlist_init_mtx(&lj->klist, AIO_MTX(ki));
2150 	ksiginfo_init(&lj->lioj_ksi);
2151 
2152 	/*
2153 	 * Setup signal.
2154 	 */
2155 	if (sig && (mode == LIO_NOWAIT)) {
2156 		bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
2157 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2158 			/* Assume only new style KEVENT */
2159 			kev.filter = EVFILT_LIO;
2160 			kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
2161 			kev.ident = (uintptr_t)uacb_list; /* something unique */
2162 			kev.data = (intptr_t)lj;
2163 			/* pass user defined sigval data */
2164 			kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
2165 			error = kqfd_register(
2166 			    lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1);
2167 			if (error) {
2168 				uma_zfree(aiolio_zone, lj);
2169 				return (error);
2170 			}
2171 		} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
2172 			;
2173 		} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2174 			   lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
2175 				if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
2176 					uma_zfree(aiolio_zone, lj);
2177 					return EINVAL;
2178 				}
2179 				lj->lioj_flags |= LIOJ_SIGNAL;
2180 		} else {
2181 			uma_zfree(aiolio_zone, lj);
2182 			return EINVAL;
2183 		}
2184 	}
2185 
2186 	AIO_LOCK(ki);
2187 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
2188 	/*
2189 	 * Add extra aiocb count to avoid the lio to be freed
2190 	 * by other threads doing aio_waitcomplete or aio_return,
2191 	 * and prevent event from being sent until we have queued
2192 	 * all tasks.
2193 	 */
2194 	lj->lioj_count = 1;
2195 	AIO_UNLOCK(ki);
2196 
2197 	/*
2198 	 * Get pointers to the list of I/O requests.
2199 	 */
2200 	nerror = 0;
2201 	for (i = 0; i < nent; i++) {
2202 		job = acb_list[i];
2203 		if (job != NULL) {
2204 			error = aio_aqueue(td, job, lj, LIO_NOP, ops);
2205 			if (error != 0)
2206 				nerror++;
2207 		}
2208 	}
2209 
2210 	error = 0;
2211 	AIO_LOCK(ki);
2212 	if (mode == LIO_WAIT) {
2213 		while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2214 			ki->kaio_flags |= KAIO_WAKEUP;
2215 			error = msleep(&p->p_aioinfo, AIO_MTX(ki),
2216 			    PRIBIO | PCATCH, "aiospn", 0);
2217 			if (error == ERESTART)
2218 				error = EINTR;
2219 			if (error)
2220 				break;
2221 		}
2222 	} else {
2223 		if (lj->lioj_count - 1 == lj->lioj_finished_count) {
2224 			if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
2225 				lj->lioj_flags |= LIOJ_KEVENT_POSTED;
2226 				KNOTE_LOCKED(&lj->klist, 1);
2227 			}
2228 			if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
2229 			    == LIOJ_SIGNAL
2230 			    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
2231 			    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
2232 				aio_sendsig(p, &lj->lioj_signal,
2233 					    &lj->lioj_ksi);
2234 				lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2235 			}
2236 		}
2237 	}
2238 	lj->lioj_count--;
2239 	if (lj->lioj_count == 0) {
2240 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
2241 		knlist_delete(&lj->klist, curthread, 1);
2242 		PROC_LOCK(p);
2243 		sigqueue_take(&lj->lioj_ksi);
2244 		PROC_UNLOCK(p);
2245 		AIO_UNLOCK(ki);
2246 		uma_zfree(aiolio_zone, lj);
2247 	} else
2248 		AIO_UNLOCK(ki);
2249 
2250 	if (nerror)
2251 		return (EIO);
2252 	return (error);
2253 }
2254 
2255 /* syscall - list directed I/O (REALTIME) */
2256 #ifdef COMPAT_FREEBSD6
2257 int
2258 freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap)
2259 {
2260 	struct aiocb **acb_list;
2261 	struct sigevent *sigp, sig;
2262 	struct osigevent osig;
2263 	int error, nent;
2264 
2265 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2266 		return (EINVAL);
2267 
2268 	nent = uap->nent;
2269 	if (nent < 0 || nent > AIO_LISTIO_MAX)
2270 		return (EINVAL);
2271 
2272 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2273 		error = copyin(uap->sig, &osig, sizeof(osig));
2274 		if (error)
2275 			return (error);
2276 		error = convert_old_sigevent(&osig, &sig);
2277 		if (error)
2278 			return (error);
2279 		sigp = &sig;
2280 	} else
2281 		sigp = NULL;
2282 
2283 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2284 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2285 	if (error == 0)
2286 		error = kern_lio_listio(td, uap->mode,
2287 		    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2288 		    &aiocb_ops_osigevent);
2289 	free(acb_list, M_LIO);
2290 	return (error);
2291 }
2292 #endif
2293 
2294 /* syscall - list directed I/O (REALTIME) */
2295 int
2296 sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
2297 {
2298 	struct aiocb **acb_list;
2299 	struct sigevent *sigp, sig;
2300 	int error, nent;
2301 
2302 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2303 		return (EINVAL);
2304 
2305 	nent = uap->nent;
2306 	if (nent < 0 || nent > AIO_LISTIO_MAX)
2307 		return (EINVAL);
2308 
2309 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2310 		error = copyin(uap->sig, &sig, sizeof(sig));
2311 		if (error)
2312 			return (error);
2313 		sigp = &sig;
2314 	} else
2315 		sigp = NULL;
2316 
2317 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2318 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
2319 	if (error == 0)
2320 		error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
2321 		    nent, sigp, &aiocb_ops);
2322 	free(acb_list, M_LIO);
2323 	return (error);
2324 }
2325 
2326 static void
2327 aio_physwakeup(struct bio *bp)
2328 {
2329 	struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2330 	struct proc *userp;
2331 	struct kaioinfo *ki;
2332 	size_t nbytes;
2333 	int error, nblks;
2334 
2335 	/* Release mapping into kernel space. */
2336 	userp = job->userproc;
2337 	ki = userp->p_aioinfo;
2338 	if (job->pbuf) {
2339 		pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages);
2340 		relpbuf(job->pbuf, NULL);
2341 		job->pbuf = NULL;
2342 		atomic_subtract_int(&num_buf_aio, 1);
2343 		AIO_LOCK(ki);
2344 		ki->kaio_buffer_count--;
2345 		AIO_UNLOCK(ki);
2346 	}
2347 	vm_page_unhold_pages(job->pages, job->npages);
2348 
2349 	bp = job->bp;
2350 	job->bp = NULL;
2351 	nbytes = job->uaiocb.aio_nbytes - bp->bio_resid;
2352 	error = 0;
2353 	if (bp->bio_flags & BIO_ERROR)
2354 		error = bp->bio_error;
2355 	nblks = btodb(nbytes);
2356 	if (job->uaiocb.aio_lio_opcode == LIO_WRITE)
2357 		job->outblock += nblks;
2358 	else
2359 		job->inblock += nblks;
2360 
2361 	if (error)
2362 		aio_complete(job, -1, error);
2363 	else
2364 		aio_complete(job, nbytes, 0);
2365 
2366 	g_destroy_bio(bp);
2367 }
2368 
2369 /* syscall - wait for the next completion of an aio request */
2370 static int
2371 kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
2372     struct timespec *ts, struct aiocb_ops *ops)
2373 {
2374 	struct proc *p = td->td_proc;
2375 	struct timeval atv;
2376 	struct kaioinfo *ki;
2377 	struct kaiocb *job;
2378 	struct aiocb *ujob;
2379 	long error, status;
2380 	int timo;
2381 
2382 	ops->store_aiocb(ujobp, NULL);
2383 
2384 	if (ts == NULL) {
2385 		timo = 0;
2386 	} else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
2387 		timo = -1;
2388 	} else {
2389 		if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2390 			return (EINVAL);
2391 
2392 		TIMESPEC_TO_TIMEVAL(&atv, ts);
2393 		if (itimerfix(&atv))
2394 			return (EINVAL);
2395 		timo = tvtohz(&atv);
2396 	}
2397 
2398 	if (p->p_aioinfo == NULL)
2399 		aio_init_aioinfo(p);
2400 	ki = p->p_aioinfo;
2401 
2402 	error = 0;
2403 	job = NULL;
2404 	AIO_LOCK(ki);
2405 	while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
2406 		if (timo == -1) {
2407 			error = EWOULDBLOCK;
2408 			break;
2409 		}
2410 		ki->kaio_flags |= KAIO_WAKEUP;
2411 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
2412 		    "aiowc", timo);
2413 		if (timo && error == ERESTART)
2414 			error = EINTR;
2415 		if (error)
2416 			break;
2417 	}
2418 
2419 	if (job != NULL) {
2420 		MPASS(job->jobflags & KAIOCB_FINISHED);
2421 		ujob = job->ujob;
2422 		status = job->uaiocb._aiocb_private.status;
2423 		error = job->uaiocb._aiocb_private.error;
2424 		td->td_retval[0] = status;
2425 		td->td_ru.ru_oublock += job->outblock;
2426 		td->td_ru.ru_inblock += job->inblock;
2427 		td->td_ru.ru_msgsnd += job->msgsnd;
2428 		td->td_ru.ru_msgrcv += job->msgrcv;
2429 		aio_free_entry(job);
2430 		AIO_UNLOCK(ki);
2431 		ops->store_aiocb(ujobp, ujob);
2432 		ops->store_error(ujob, error);
2433 		ops->store_status(ujob, status);
2434 	} else
2435 		AIO_UNLOCK(ki);
2436 
2437 	return (error);
2438 }
2439 
2440 int
2441 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2442 {
2443 	struct timespec ts, *tsp;
2444 	int error;
2445 
2446 	if (uap->timeout) {
2447 		/* Get timespec struct. */
2448 		error = copyin(uap->timeout, &ts, sizeof(ts));
2449 		if (error)
2450 			return (error);
2451 		tsp = &ts;
2452 	} else
2453 		tsp = NULL;
2454 
2455 	return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
2456 }
2457 
2458 static int
2459 kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
2460     struct aiocb_ops *ops)
2461 {
2462 	struct proc *p = td->td_proc;
2463 	struct kaioinfo *ki;
2464 
2465 	if (op != O_SYNC) /* XXX lack of O_DSYNC */
2466 		return (EINVAL);
2467 	ki = p->p_aioinfo;
2468 	if (ki == NULL)
2469 		aio_init_aioinfo(p);
2470 	return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops));
2471 }
2472 
2473 int
2474 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
2475 {
2476 
2477 	return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
2478 }
2479 
2480 /* kqueue attach function */
2481 static int
2482 filt_aioattach(struct knote *kn)
2483 {
2484 	struct kaiocb *job = (struct kaiocb *)kn->kn_sdata;
2485 
2486 	/*
2487 	 * The job pointer must be validated before using it, so
2488 	 * registration is restricted to the kernel; the user cannot
2489 	 * set EV_FLAG1.
2490 	 */
2491 	if ((kn->kn_flags & EV_FLAG1) == 0)
2492 		return (EPERM);
2493 	kn->kn_ptr.p_aio = job;
2494 	kn->kn_flags &= ~EV_FLAG1;
2495 
2496 	knlist_add(&job->klist, kn, 0);
2497 
2498 	return (0);
2499 }
2500 
2501 /* kqueue detach function */
2502 static void
2503 filt_aiodetach(struct knote *kn)
2504 {
2505 	struct knlist *knl;
2506 
2507 	knl = &kn->kn_ptr.p_aio->klist;
2508 	knl->kl_lock(knl->kl_lockarg);
2509 	if (!knlist_empty(knl))
2510 		knlist_remove(knl, kn, 1);
2511 	knl->kl_unlock(knl->kl_lockarg);
2512 }
2513 
2514 /* kqueue filter function */
2515 /*ARGSUSED*/
2516 static int
2517 filt_aio(struct knote *kn, long hint)
2518 {
2519 	struct kaiocb *job = kn->kn_ptr.p_aio;
2520 
2521 	kn->kn_data = job->uaiocb._aiocb_private.error;
2522 	if (!(job->jobflags & KAIOCB_FINISHED))
2523 		return (0);
2524 	kn->kn_flags |= EV_EOF;
2525 	return (1);
2526 }
2527 
2528 /* kqueue attach function */
2529 static int
2530 filt_lioattach(struct knote *kn)
2531 {
2532 	struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
2533 
2534 	/*
2535 	 * The aioliojob pointer must be validated before using it, so
2536 	 * registration is restricted to the kernel; the user cannot
2537 	 * set EV_FLAG1.
2538 	 */
2539 	if ((kn->kn_flags & EV_FLAG1) == 0)
2540 		return (EPERM);
2541 	kn->kn_ptr.p_lio = lj;
2542 	kn->kn_flags &= ~EV_FLAG1;
2543 
2544 	knlist_add(&lj->klist, kn, 0);
2545 
2546 	return (0);
2547 }
2548 
2549 /* kqueue detach function */
2550 static void
2551 filt_liodetach(struct knote *kn)
2552 {
2553 	struct knlist *knl;
2554 
2555 	knl = &kn->kn_ptr.p_lio->klist;
2556 	knl->kl_lock(knl->kl_lockarg);
2557 	if (!knlist_empty(knl))
2558 		knlist_remove(knl, kn, 1);
2559 	knl->kl_unlock(knl->kl_lockarg);
2560 }
2561 
2562 /* kqueue filter function */
2563 /*ARGSUSED*/
2564 static int
2565 filt_lio(struct knote *kn, long hint)
2566 {
2567 	struct aioliojob * lj = kn->kn_ptr.p_lio;
2568 
2569 	return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
2570 }
2571 
2572 #ifdef COMPAT_FREEBSD32
2573 #include <sys/mount.h>
2574 #include <sys/socket.h>
2575 #include <compat/freebsd32/freebsd32.h>
2576 #include <compat/freebsd32/freebsd32_proto.h>
2577 #include <compat/freebsd32/freebsd32_signal.h>
2578 #include <compat/freebsd32/freebsd32_syscall.h>
2579 #include <compat/freebsd32/freebsd32_util.h>
2580 
2581 struct __aiocb_private32 {
2582 	int32_t	status;
2583 	int32_t	error;
2584 	uint32_t kernelinfo;
2585 };
2586 
2587 #ifdef COMPAT_FREEBSD6
2588 typedef struct oaiocb32 {
2589 	int	aio_fildes;		/* File descriptor */
2590 	uint64_t aio_offset __packed;	/* File offset for I/O */
2591 	uint32_t aio_buf;		/* I/O buffer in process space */
2592 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
2593 	struct	osigevent32 aio_sigevent; /* Signal to deliver */
2594 	int	aio_lio_opcode;		/* LIO opcode */
2595 	int	aio_reqprio;		/* Request priority -- ignored */
2596 	struct	__aiocb_private32 _aiocb_private;
2597 } oaiocb32_t;
2598 #endif
2599 
2600 typedef struct aiocb32 {
2601 	int32_t	aio_fildes;		/* File descriptor */
2602 	uint64_t aio_offset __packed;	/* File offset for I/O */
2603 	uint32_t aio_buf;		/* I/O buffer in process space */
2604 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
2605 	int	__spare__[2];
2606 	uint32_t __spare2__;
2607 	int	aio_lio_opcode;		/* LIO opcode */
2608 	int	aio_reqprio;		/* Request priority -- ignored */
2609 	struct	__aiocb_private32 _aiocb_private;
2610 	struct	sigevent32 aio_sigevent;	/* Signal to deliver */
2611 } aiocb32_t;
2612 
2613 #ifdef COMPAT_FREEBSD6
2614 static int
2615 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
2616 {
2617 
2618 	/*
2619 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
2620 	 * supported by AIO with the old sigevent structure.
2621 	 */
2622 	CP(*osig, *nsig, sigev_notify);
2623 	switch (nsig->sigev_notify) {
2624 	case SIGEV_NONE:
2625 		break;
2626 	case SIGEV_SIGNAL:
2627 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
2628 		break;
2629 	case SIGEV_KEVENT:
2630 		nsig->sigev_notify_kqueue =
2631 		    osig->__sigev_u.__sigev_notify_kqueue;
2632 		PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
2633 		break;
2634 	default:
2635 		return (EINVAL);
2636 	}
2637 	return (0);
2638 }
2639 
2640 static int
2641 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
2642 {
2643 	struct oaiocb32 job32;
2644 	int error;
2645 
2646 	bzero(kjob, sizeof(struct aiocb));
2647 	error = copyin(ujob, &job32, sizeof(job32));
2648 	if (error)
2649 		return (error);
2650 
2651 	CP(job32, *kjob, aio_fildes);
2652 	CP(job32, *kjob, aio_offset);
2653 	PTRIN_CP(job32, *kjob, aio_buf);
2654 	CP(job32, *kjob, aio_nbytes);
2655 	CP(job32, *kjob, aio_lio_opcode);
2656 	CP(job32, *kjob, aio_reqprio);
2657 	CP(job32, *kjob, _aiocb_private.status);
2658 	CP(job32, *kjob, _aiocb_private.error);
2659 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
2660 	return (convert_old_sigevent32(&job32.aio_sigevent,
2661 	    &kjob->aio_sigevent));
2662 }
2663 #endif
2664 
2665 static int
2666 aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob)
2667 {
2668 	struct aiocb32 job32;
2669 	int error;
2670 
2671 	error = copyin(ujob, &job32, sizeof(job32));
2672 	if (error)
2673 		return (error);
2674 	CP(job32, *kjob, aio_fildes);
2675 	CP(job32, *kjob, aio_offset);
2676 	PTRIN_CP(job32, *kjob, aio_buf);
2677 	CP(job32, *kjob, aio_nbytes);
2678 	CP(job32, *kjob, aio_lio_opcode);
2679 	CP(job32, *kjob, aio_reqprio);
2680 	CP(job32, *kjob, _aiocb_private.status);
2681 	CP(job32, *kjob, _aiocb_private.error);
2682 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
2683 	return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent));
2684 }
2685 
2686 static long
2687 aiocb32_fetch_status(struct aiocb *ujob)
2688 {
2689 	struct aiocb32 *ujob32;
2690 
2691 	ujob32 = (struct aiocb32 *)ujob;
2692 	return (fuword32(&ujob32->_aiocb_private.status));
2693 }
2694 
2695 static long
2696 aiocb32_fetch_error(struct aiocb *ujob)
2697 {
2698 	struct aiocb32 *ujob32;
2699 
2700 	ujob32 = (struct aiocb32 *)ujob;
2701 	return (fuword32(&ujob32->_aiocb_private.error));
2702 }
2703 
2704 static int
2705 aiocb32_store_status(struct aiocb *ujob, long status)
2706 {
2707 	struct aiocb32 *ujob32;
2708 
2709 	ujob32 = (struct aiocb32 *)ujob;
2710 	return (suword32(&ujob32->_aiocb_private.status, status));
2711 }
2712 
2713 static int
2714 aiocb32_store_error(struct aiocb *ujob, long error)
2715 {
2716 	struct aiocb32 *ujob32;
2717 
2718 	ujob32 = (struct aiocb32 *)ujob;
2719 	return (suword32(&ujob32->_aiocb_private.error, error));
2720 }
2721 
2722 static int
2723 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
2724 {
2725 	struct aiocb32 *ujob32;
2726 
2727 	ujob32 = (struct aiocb32 *)ujob;
2728 	return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
2729 }
2730 
2731 static int
2732 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
2733 {
2734 
2735 	return (suword32(ujobp, (long)ujob));
2736 }
2737 
2738 static struct aiocb_ops aiocb32_ops = {
2739 	.copyin = aiocb32_copyin,
2740 	.fetch_status = aiocb32_fetch_status,
2741 	.fetch_error = aiocb32_fetch_error,
2742 	.store_status = aiocb32_store_status,
2743 	.store_error = aiocb32_store_error,
2744 	.store_kernelinfo = aiocb32_store_kernelinfo,
2745 	.store_aiocb = aiocb32_store_aiocb,
2746 };
2747 
2748 #ifdef COMPAT_FREEBSD6
2749 static struct aiocb_ops aiocb32_ops_osigevent = {
2750 	.copyin = aiocb32_copyin_old_sigevent,
2751 	.fetch_status = aiocb32_fetch_status,
2752 	.fetch_error = aiocb32_fetch_error,
2753 	.store_status = aiocb32_store_status,
2754 	.store_error = aiocb32_store_error,
2755 	.store_kernelinfo = aiocb32_store_kernelinfo,
2756 	.store_aiocb = aiocb32_store_aiocb,
2757 };
2758 #endif
2759 
2760 int
2761 freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
2762 {
2763 
2764 	return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2765 }
2766 
2767 int
2768 freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
2769 {
2770 	struct timespec32 ts32;
2771 	struct timespec ts, *tsp;
2772 	struct aiocb **ujoblist;
2773 	uint32_t *ujoblist32;
2774 	int error, i;
2775 
2776 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
2777 		return (EINVAL);
2778 
2779 	if (uap->timeout) {
2780 		/* Get timespec struct. */
2781 		if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
2782 			return (error);
2783 		CP(ts32, ts, tv_sec);
2784 		CP(ts32, ts, tv_nsec);
2785 		tsp = &ts;
2786 	} else
2787 		tsp = NULL;
2788 
2789 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
2790 	ujoblist32 = (uint32_t *)ujoblist;
2791 	error = copyin(uap->aiocbp, ujoblist32, uap->nent *
2792 	    sizeof(ujoblist32[0]));
2793 	if (error == 0) {
2794 		for (i = uap->nent; i > 0; i--)
2795 			ujoblist[i] = PTRIN(ujoblist32[i]);
2796 
2797 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
2798 	}
2799 	uma_zfree(aiol_zone, ujoblist);
2800 	return (error);
2801 }
2802 
2803 int
2804 freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
2805 {
2806 
2807 	return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
2808 }
2809 
2810 #ifdef COMPAT_FREEBSD6
2811 int
2812 freebsd6_freebsd32_aio_read(struct thread *td,
2813     struct freebsd6_freebsd32_aio_read_args *uap)
2814 {
2815 
2816 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2817 	    &aiocb32_ops_osigevent));
2818 }
2819 #endif
2820 
2821 int
2822 freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
2823 {
2824 
2825 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
2826 	    &aiocb32_ops));
2827 }
2828 
2829 #ifdef COMPAT_FREEBSD6
2830 int
2831 freebsd6_freebsd32_aio_write(struct thread *td,
2832     struct freebsd6_freebsd32_aio_write_args *uap)
2833 {
2834 
2835 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2836 	    &aiocb32_ops_osigevent));
2837 }
2838 #endif
2839 
2840 int
2841 freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
2842 {
2843 
2844 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
2845 	    &aiocb32_ops));
2846 }
2847 
2848 int
2849 freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
2850 {
2851 
2852 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
2853 	    &aiocb32_ops));
2854 }
2855 
2856 int
2857 freebsd32_aio_waitcomplete(struct thread *td,
2858     struct freebsd32_aio_waitcomplete_args *uap)
2859 {
2860 	struct timespec32 ts32;
2861 	struct timespec ts, *tsp;
2862 	int error;
2863 
2864 	if (uap->timeout) {
2865 		/* Get timespec struct. */
2866 		error = copyin(uap->timeout, &ts32, sizeof(ts32));
2867 		if (error)
2868 			return (error);
2869 		CP(ts32, ts, tv_sec);
2870 		CP(ts32, ts, tv_nsec);
2871 		tsp = &ts;
2872 	} else
2873 		tsp = NULL;
2874 
2875 	return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
2876 	    &aiocb32_ops));
2877 }
2878 
2879 int
2880 freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
2881 {
2882 
2883 	return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
2884 	    &aiocb32_ops));
2885 }
2886 
2887 #ifdef COMPAT_FREEBSD6
2888 int
2889 freebsd6_freebsd32_lio_listio(struct thread *td,
2890     struct freebsd6_freebsd32_lio_listio_args *uap)
2891 {
2892 	struct aiocb **acb_list;
2893 	struct sigevent *sigp, sig;
2894 	struct osigevent32 osig;
2895 	uint32_t *acb_list32;
2896 	int error, i, nent;
2897 
2898 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2899 		return (EINVAL);
2900 
2901 	nent = uap->nent;
2902 	if (nent < 0 || nent > AIO_LISTIO_MAX)
2903 		return (EINVAL);
2904 
2905 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2906 		error = copyin(uap->sig, &osig, sizeof(osig));
2907 		if (error)
2908 			return (error);
2909 		error = convert_old_sigevent32(&osig, &sig);
2910 		if (error)
2911 			return (error);
2912 		sigp = &sig;
2913 	} else
2914 		sigp = NULL;
2915 
2916 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
2917 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
2918 	if (error) {
2919 		free(acb_list32, M_LIO);
2920 		return (error);
2921 	}
2922 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2923 	for (i = 0; i < nent; i++)
2924 		acb_list[i] = PTRIN(acb_list32[i]);
2925 	free(acb_list32, M_LIO);
2926 
2927 	error = kern_lio_listio(td, uap->mode,
2928 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2929 	    &aiocb32_ops_osigevent);
2930 	free(acb_list, M_LIO);
2931 	return (error);
2932 }
2933 #endif
2934 
2935 int
2936 freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
2937 {
2938 	struct aiocb **acb_list;
2939 	struct sigevent *sigp, sig;
2940 	struct sigevent32 sig32;
2941 	uint32_t *acb_list32;
2942 	int error, i, nent;
2943 
2944 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
2945 		return (EINVAL);
2946 
2947 	nent = uap->nent;
2948 	if (nent < 0 || nent > AIO_LISTIO_MAX)
2949 		return (EINVAL);
2950 
2951 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
2952 		error = copyin(uap->sig, &sig32, sizeof(sig32));
2953 		if (error)
2954 			return (error);
2955 		error = convert_sigevent32(&sig32, &sig);
2956 		if (error)
2957 			return (error);
2958 		sigp = &sig;
2959 	} else
2960 		sigp = NULL;
2961 
2962 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
2963 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
2964 	if (error) {
2965 		free(acb_list32, M_LIO);
2966 		return (error);
2967 	}
2968 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
2969 	for (i = 0; i < nent; i++)
2970 		acb_list[i] = PTRIN(acb_list32[i]);
2971 	free(acb_list32, M_LIO);
2972 
2973 	error = kern_lio_listio(td, uap->mode,
2974 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
2975 	    &aiocb32_ops);
2976 	free(acb_list, M_LIO);
2977 	return (error);
2978 }
2979 
2980 #endif
2981