xref: /freebsd/sys/kern/vfs_aio.c (revision 4d805eacfa529ea9d1f01b2fbfa8de694a41cdfa)
19454b2d8SWarner Losh /*-
2ee877a35SJohn Dyson  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
3ee877a35SJohn Dyson  *
4ee877a35SJohn Dyson  * Redistribution and use in source and binary forms, with or without
5ee877a35SJohn Dyson  * modification, are permitted provided that the following conditions
6ee877a35SJohn Dyson  * are met:
7ee877a35SJohn Dyson  * 1. Redistributions of source code must retain the above copyright
8ee877a35SJohn Dyson  *    notice, this list of conditions and the following disclaimer.
9ee877a35SJohn Dyson  * 2. John S. Dyson's name may not be used to endorse or promote products
10ee877a35SJohn Dyson  *    derived from this software without specific prior written permission.
11ee877a35SJohn Dyson  *
12ee877a35SJohn Dyson  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
13ee877a35SJohn Dyson  * bad that happens because of using this software isn't the responsibility
14ee877a35SJohn Dyson  * of the author.  This software is distributed AS-IS.
15ee877a35SJohn Dyson  */
16ee877a35SJohn Dyson 
17ee877a35SJohn Dyson /*
188a6472b7SPeter Dufault  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19ee877a35SJohn Dyson  */
20ee877a35SJohn Dyson 
21677b542eSDavid E. O'Brien #include <sys/cdefs.h>
22677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
23677b542eSDavid E. O'Brien 
243858a1f4SJohn Baldwin #include "opt_compat.h"
253858a1f4SJohn Baldwin 
26ee877a35SJohn Dyson #include <sys/param.h>
27ee877a35SJohn Dyson #include <sys/systm.h>
28f591779bSSeigo Tanimura #include <sys/malloc.h>
299626b608SPoul-Henning Kamp #include <sys/bio.h>
30a5c9bce7SBruce Evans #include <sys/buf.h>
314a144410SRobert Watson #include <sys/capsicum.h>
3275b8b3b2SJohn Baldwin #include <sys/eventhandler.h>
33ee877a35SJohn Dyson #include <sys/sysproto.h>
34ee877a35SJohn Dyson #include <sys/filedesc.h>
35ee877a35SJohn Dyson #include <sys/kernel.h>
3677409fe1SPoul-Henning Kamp #include <sys/module.h>
37c9a970a7SAlan Cox #include <sys/kthread.h>
38ee877a35SJohn Dyson #include <sys/fcntl.h>
39ee877a35SJohn Dyson #include <sys/file.h>
40104a9b7eSAlexander Kabaev #include <sys/limits.h>
41fdebd4f0SBruce Evans #include <sys/lock.h>
4235e0e5b3SJohn Baldwin #include <sys/mutex.h>
43ee877a35SJohn Dyson #include <sys/unistd.h>
446aeb05d7STom Rhodes #include <sys/posix4.h>
45ee877a35SJohn Dyson #include <sys/proc.h>
462d2f8ae7SBruce Evans #include <sys/resourcevar.h>
47ee877a35SJohn Dyson #include <sys/signalvar.h>
48bfbbc4aaSJason Evans #include <sys/protosw.h>
4989f6b863SAttilio Rao #include <sys/rwlock.h>
501ce91824SDavid Xu #include <sys/sema.h>
511ce91824SDavid Xu #include <sys/socket.h>
52bfbbc4aaSJason Evans #include <sys/socketvar.h>
5321d56e9cSAlfred Perlstein #include <sys/syscall.h>
5421d56e9cSAlfred Perlstein #include <sys/sysent.h>
55a624e84fSJohn Dyson #include <sys/sysctl.h>
56ee99e978SBruce Evans #include <sys/sx.h>
571ce91824SDavid Xu #include <sys/taskqueue.h>
58fd3bf775SJohn Dyson #include <sys/vnode.h>
59fd3bf775SJohn Dyson #include <sys/conf.h>
60cb679c38SJonathan Lemon #include <sys/event.h>
6199eee864SDavid Xu #include <sys/mount.h>
62f743d981SAlexander Motin #include <geom/geom.h>
63ee877a35SJohn Dyson 
641ce91824SDavid Xu #include <machine/atomic.h>
651ce91824SDavid Xu 
66ee877a35SJohn Dyson #include <vm/vm.h>
67f743d981SAlexander Motin #include <vm/vm_page.h>
68ee877a35SJohn Dyson #include <vm/vm_extern.h>
692244ea07SJohn Dyson #include <vm/pmap.h>
702244ea07SJohn Dyson #include <vm/vm_map.h>
7199eee864SDavid Xu #include <vm/vm_object.h>
72c897b813SJeff Roberson #include <vm/uma.h>
73ee877a35SJohn Dyson #include <sys/aio.h>
745aaef07cSJohn Dyson 
75eb8e6d52SEivind Eklund /*
76eb8e6d52SEivind Eklund  * Counter for allocating reference ids to new jobs.  Wrapped to 1 on
7799eee864SDavid Xu  * overflow. (XXX will be removed soon.)
78eb8e6d52SEivind Eklund  */
7999eee864SDavid Xu static u_long jobrefid;
802244ea07SJohn Dyson 
8199eee864SDavid Xu /*
8299eee864SDavid Xu  * Counter for aio_fsync.
8399eee864SDavid Xu  */
8499eee864SDavid Xu static uint64_t jobseqno;
8599eee864SDavid Xu 
8684af4da6SJohn Dyson #ifndef MAX_AIO_PER_PROC
872244ea07SJohn Dyson #define MAX_AIO_PER_PROC	32
8884af4da6SJohn Dyson #endif
8984af4da6SJohn Dyson 
9084af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE_PER_PROC
912244ea07SJohn Dyson #define MAX_AIO_QUEUE_PER_PROC	256 /* Bigger than AIO_LISTIO_MAX */
9284af4da6SJohn Dyson #endif
9384af4da6SJohn Dyson 
9484af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE
952244ea07SJohn Dyson #define	MAX_AIO_QUEUE		1024 /* Bigger than AIO_LISTIO_MAX */
9684af4da6SJohn Dyson #endif
9784af4da6SJohn Dyson 
9884af4da6SJohn Dyson #ifndef MAX_BUF_AIO
9984af4da6SJohn Dyson #define MAX_BUF_AIO		16
10084af4da6SJohn Dyson #endif
10184af4da6SJohn Dyson 
102e603be7aSRobert Watson FEATURE(aio, "Asynchronous I/O");
103e603be7aSRobert Watson 
1043858a1f4SJohn Baldwin static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
1053858a1f4SJohn Baldwin 
1060dd6c035SJohn Baldwin static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0,
1070dd6c035SJohn Baldwin     "Async IO management");
108eb8e6d52SEivind Eklund 
109f3215338SJohn Baldwin static int enable_aio_unsafe = 0;
110f3215338SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
111f3215338SJohn Baldwin     "Permit asynchronous IO on all file types, not just known-safe types");
112f3215338SJohn Baldwin 
113303b270bSEivind Eklund static int max_aio_procs = MAX_AIO_PROCS;
1140dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
11539314b7dSJohn Baldwin     "Maximum number of kernel processes to use for handling async IO ");
116a624e84fSJohn Dyson 
117eb8e6d52SEivind Eklund static int num_aio_procs = 0;
1180dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
11939314b7dSJohn Baldwin     "Number of presently active kernel processes for async IO");
120a624e84fSJohn Dyson 
121eb8e6d52SEivind Eklund /*
122eb8e6d52SEivind Eklund  * The code will adjust the actual number of AIO processes towards this
123eb8e6d52SEivind Eklund  * number when it gets a chance.
124eb8e6d52SEivind Eklund  */
125eb8e6d52SEivind Eklund static int target_aio_procs = TARGET_AIO_PROCS;
126eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
1270dd6c035SJohn Baldwin     0,
1280dd6c035SJohn Baldwin     "Preferred number of ready kernel processes for async IO");
129a624e84fSJohn Dyson 
130eb8e6d52SEivind Eklund static int max_queue_count = MAX_AIO_QUEUE;
131eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
132eb8e6d52SEivind Eklund     "Maximum number of aio requests to queue, globally");
133a624e84fSJohn Dyson 
134eb8e6d52SEivind Eklund static int num_queue_count = 0;
135eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
136eb8e6d52SEivind Eklund     "Number of queued aio requests");
137a624e84fSJohn Dyson 
138eb8e6d52SEivind Eklund static int num_buf_aio = 0;
139eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
140eb8e6d52SEivind Eklund     "Number of aio requests presently handled by the buf subsystem");
141fd3bf775SJohn Dyson 
14239314b7dSJohn Baldwin /* Number of async I/O processes in the process of being started */
143a9bf5e37SDavid Xu /* XXX This should be local to aio_aqueue() */
144eb8e6d52SEivind Eklund static int num_aio_resv_start = 0;
145fd3bf775SJohn Dyson 
146eb8e6d52SEivind Eklund static int aiod_lifetime;
147eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
148eb8e6d52SEivind Eklund     "Maximum lifetime for idle aiod");
14984af4da6SJohn Dyson 
150eb8e6d52SEivind Eklund static int max_aio_per_proc = MAX_AIO_PER_PROC;
151eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
1520dd6c035SJohn Baldwin     0,
1530dd6c035SJohn Baldwin     "Maximum active aio requests per process (stored in the process)");
154eb8e6d52SEivind Eklund 
155eb8e6d52SEivind Eklund static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
156eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
157eb8e6d52SEivind Eklund     &max_aio_queue_per_proc, 0,
158eb8e6d52SEivind Eklund     "Maximum queued aio requests per process (stored in the process)");
159eb8e6d52SEivind Eklund 
160eb8e6d52SEivind Eklund static int max_buf_aio = MAX_BUF_AIO;
161eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
162eb8e6d52SEivind Eklund     "Maximum buf aio requests per process (stored in the process)");
163eb8e6d52SEivind Eklund 
164399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
1650972628aSDavid Xu typedef struct oaiocb {
1660972628aSDavid Xu 	int	aio_fildes;		/* File descriptor */
1670972628aSDavid Xu 	off_t	aio_offset;		/* File offset for I/O */
1680972628aSDavid Xu 	volatile void *aio_buf;         /* I/O buffer in process space */
1690972628aSDavid Xu 	size_t	aio_nbytes;		/* Number of bytes for I/O */
1700972628aSDavid Xu 	struct	osigevent aio_sigevent;	/* Signal to deliver */
1710972628aSDavid Xu 	int	aio_lio_opcode;		/* LIO opcode */
1720972628aSDavid Xu 	int	aio_reqprio;		/* Request priority -- ignored */
1730972628aSDavid Xu 	struct	__aiocb_private	_aiocb_private;
1740972628aSDavid Xu } oaiocb_t;
175399e8c17SJohn Baldwin #endif
1760972628aSDavid Xu 
1771aa4c324SDavid Xu /*
1785652770dSJohn Baldwin  * Below is a key of locks used to protect each member of struct kaiocb
1791aa4c324SDavid Xu  * aioliojob and kaioinfo and any backends.
1801aa4c324SDavid Xu  *
1811aa4c324SDavid Xu  * * - need not protected
182759ccccaSDavid Xu  * a - locked by kaioinfo lock
1831aa4c324SDavid Xu  * b - locked by backend lock, the backend lock can be null in some cases,
1841aa4c324SDavid Xu  *     for example, BIO belongs to this type, in this case, proc lock is
1851aa4c324SDavid Xu  *     reused.
1861aa4c324SDavid Xu  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
1871aa4c324SDavid Xu  */
1881aa4c324SDavid Xu 
1891aa4c324SDavid Xu /*
190f3215338SJohn Baldwin  * If the routine that services an AIO request blocks while running in an
191f3215338SJohn Baldwin  * AIO kernel process it can starve other I/O requests.  BIO requests
192f3215338SJohn Baldwin  * queued via aio_qphysio() complete in GEOM and do not use AIO kernel
193f3215338SJohn Baldwin  * processes at all.  Socket I/O requests use a separate pool of
194f3215338SJohn Baldwin  * kprocs and also force non-blocking I/O.  Other file I/O requests
195f3215338SJohn Baldwin  * use the generic fo_read/fo_write operations which can block.  The
196f3215338SJohn Baldwin  * fsync and mlock operations can also block while executing.  Ideally
197f3215338SJohn Baldwin  * none of these requests would block while executing.
198f3215338SJohn Baldwin  *
199f3215338SJohn Baldwin  * Note that the service routines cannot toggle O_NONBLOCK in the file
200f3215338SJohn Baldwin  * structure directly while handling a request due to races with
201f3215338SJohn Baldwin  * userland threads.
2021aa4c324SDavid Xu  */
2031aa4c324SDavid Xu 
20448dac059SAlan Cox /* jobflags */
205f3215338SJohn Baldwin #define	KAIOCB_QUEUEING		0x01
206f3215338SJohn Baldwin #define	KAIOCB_CANCELLED	0x02
207f3215338SJohn Baldwin #define	KAIOCB_CANCELLING	0x04
2085652770dSJohn Baldwin #define	KAIOCB_CHECKSYNC	0x08
209f3215338SJohn Baldwin #define	KAIOCB_CLEARED		0x10
210f3215338SJohn Baldwin #define	KAIOCB_FINISHED		0x20
21148dac059SAlan Cox 
2122244ea07SJohn Dyson /*
2132244ea07SJohn Dyson  * AIO process info
2142244ea07SJohn Dyson  */
21584af4da6SJohn Dyson #define AIOP_FREE	0x1			/* proc on free queue */
21684af4da6SJohn Dyson 
21739314b7dSJohn Baldwin struct aioproc {
21839314b7dSJohn Baldwin 	int	aioprocflags;			/* (c) AIO proc flags */
21939314b7dSJohn Baldwin 	TAILQ_ENTRY(aioproc) list;		/* (c) list of processes */
22039314b7dSJohn Baldwin 	struct	proc *aioproc;			/* (*) the AIO proc */
2212244ea07SJohn Dyson };
2222244ea07SJohn Dyson 
22384af4da6SJohn Dyson /*
22484af4da6SJohn Dyson  * data-structure for lio signal management
22584af4da6SJohn Dyson  */
2261ce91824SDavid Xu struct aioliojob {
2271aa4c324SDavid Xu 	int	lioj_flags;			/* (a) listio flags */
2281aa4c324SDavid Xu 	int	lioj_count;			/* (a) listio flags */
2291aa4c324SDavid Xu 	int	lioj_finished_count;		/* (a) listio flags */
2301aa4c324SDavid Xu 	struct	sigevent lioj_signal;		/* (a) signal on all I/O done */
2311aa4c324SDavid Xu 	TAILQ_ENTRY(aioliojob) lioj_list;	/* (a) lio list */
2321aa4c324SDavid Xu 	struct	knlist klist;			/* (a) list of knotes */
2331aa4c324SDavid Xu 	ksiginfo_t lioj_ksi;			/* (a) Realtime signal info */
23484af4da6SJohn Dyson };
2351ce91824SDavid Xu 
23684af4da6SJohn Dyson #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
23784af4da6SJohn Dyson #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
23869cd28daSDoug Ambrisko #define LIOJ_KEVENT_POSTED	0x4	/* kevent triggered */
23984af4da6SJohn Dyson 
24084af4da6SJohn Dyson /*
24184af4da6SJohn Dyson  * per process aio data structure
24284af4da6SJohn Dyson  */
2432244ea07SJohn Dyson struct kaioinfo {
244759ccccaSDavid Xu 	struct	mtx kaio_mtx;		/* the lock to protect this struct */
2451aa4c324SDavid Xu 	int	kaio_flags;		/* (a) per process kaio flags */
2461aa4c324SDavid Xu 	int	kaio_maxactive_count;	/* (*) maximum number of AIOs */
2471aa4c324SDavid Xu 	int	kaio_active_count;	/* (c) number of currently used AIOs */
2481aa4c324SDavid Xu 	int	kaio_qallowed_count;	/* (*) maxiumu size of AIO queue */
2491aa4c324SDavid Xu 	int	kaio_count;		/* (a) size of AIO queue */
2501aa4c324SDavid Xu 	int	kaio_ballowed_count;	/* (*) maximum number of buffers */
2511aa4c324SDavid Xu 	int	kaio_buffer_count;	/* (a) number of physio buffers */
2525652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_all;	/* (a) all AIOs in a process */
2535652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_done;	/* (a) done queue for process */
2541aa4c324SDavid Xu 	TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
2555652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
2565652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncqueue;	/* (a) queue for aio_fsync */
257f3215338SJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncready;  /* (a) second q for aio_fsync */
25839314b7dSJohn Baldwin 	struct	task kaio_task;		/* (*) task to kick aio processes */
259f3215338SJohn Baldwin 	struct	task kaio_sync_task;	/* (*) task to schedule fsync jobs */
2602244ea07SJohn Dyson };
2612244ea07SJohn Dyson 
262759ccccaSDavid Xu #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
263759ccccaSDavid Xu #define AIO_UNLOCK(ki)		mtx_unlock(&(ki)->kaio_mtx)
264759ccccaSDavid Xu #define AIO_LOCK_ASSERT(ki, f)	mtx_assert(&(ki)->kaio_mtx, (f))
265759ccccaSDavid Xu #define AIO_MTX(ki)		(&(ki)->kaio_mtx)
266759ccccaSDavid Xu 
26784af4da6SJohn Dyson #define KAIO_RUNDOWN	0x1	/* process is being run down */
2680dd6c035SJohn Baldwin #define KAIO_WAKEUP	0x2	/* wakeup process when AIO completes */
269fd3bf775SJohn Dyson 
2703858a1f4SJohn Baldwin /*
2713858a1f4SJohn Baldwin  * Operations used to interact with userland aio control blocks.
2723858a1f4SJohn Baldwin  * Different ABIs provide their own operations.
2733858a1f4SJohn Baldwin  */
2743858a1f4SJohn Baldwin struct aiocb_ops {
2753858a1f4SJohn Baldwin 	int	(*copyin)(struct aiocb *ujob, struct aiocb *kjob);
2763858a1f4SJohn Baldwin 	long	(*fetch_status)(struct aiocb *ujob);
2773858a1f4SJohn Baldwin 	long	(*fetch_error)(struct aiocb *ujob);
2783858a1f4SJohn Baldwin 	int	(*store_status)(struct aiocb *ujob, long status);
2793858a1f4SJohn Baldwin 	int	(*store_error)(struct aiocb *ujob, long error);
2803858a1f4SJohn Baldwin 	int	(*store_kernelinfo)(struct aiocb *ujob, long jobref);
2813858a1f4SJohn Baldwin 	int	(*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
2823858a1f4SJohn Baldwin };
2833858a1f4SJohn Baldwin 
28439314b7dSJohn Baldwin static TAILQ_HEAD(,aioproc) aio_freeproc;		/* (c) Idle daemons */
2851ce91824SDavid Xu static struct sema aio_newproc_sem;
2861ce91824SDavid Xu static struct mtx aio_job_mtx;
2875652770dSJohn Baldwin static TAILQ_HEAD(,kaiocb) aio_jobs;			/* (c) Async job list */
2881ce91824SDavid Xu static struct unrhdr *aiod_unr;
2892244ea07SJohn Dyson 
2906a1162d4SAlexander Leidinger void		aio_init_aioinfo(struct proc *p);
291723d37c0SKonstantin Belousov static int	aio_onceonly(void);
2925652770dSJohn Baldwin static int	aio_free_entry(struct kaiocb *job);
2935652770dSJohn Baldwin static void	aio_process_rw(struct kaiocb *job);
2945652770dSJohn Baldwin static void	aio_process_sync(struct kaiocb *job);
2955652770dSJohn Baldwin static void	aio_process_mlock(struct kaiocb *job);
296f3215338SJohn Baldwin static void	aio_schedule_fsync(void *context, int pending);
2971ce91824SDavid Xu static int	aio_newproc(int *);
2985652770dSJohn Baldwin int		aio_aqueue(struct thread *td, struct aiocb *ujob,
2993858a1f4SJohn Baldwin 		    struct aioliojob *lio, int type, struct aiocb_ops *ops);
300f3215338SJohn Baldwin static int	aio_queue_file(struct file *fp, struct kaiocb *job);
301f743d981SAlexander Motin static void	aio_physwakeup(struct bio *bp);
30275b8b3b2SJohn Baldwin static void	aio_proc_rundown(void *arg, struct proc *p);
3030dd6c035SJohn Baldwin static void	aio_proc_rundown_exec(void *arg, struct proc *p,
3040dd6c035SJohn Baldwin 		    struct image_params *imgp);
3055652770dSJohn Baldwin static int	aio_qphysio(struct proc *p, struct kaiocb *job);
3061ce91824SDavid Xu static void	aio_daemon(void *param);
307f3215338SJohn Baldwin static void	aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
308dbbccfe9SDavid Xu static int	aio_kick(struct proc *userp);
30999eee864SDavid Xu static void	aio_kick_nowait(struct proc *userp);
31099eee864SDavid Xu static void	aio_kick_helper(void *context, int pending);
31121d56e9cSAlfred Perlstein static int	filt_aioattach(struct knote *kn);
31221d56e9cSAlfred Perlstein static void	filt_aiodetach(struct knote *kn);
31321d56e9cSAlfred Perlstein static int	filt_aio(struct knote *kn, long hint);
31469cd28daSDoug Ambrisko static int	filt_lioattach(struct knote *kn);
31569cd28daSDoug Ambrisko static void	filt_liodetach(struct knote *kn);
31669cd28daSDoug Ambrisko static int	filt_lio(struct knote *kn, long hint);
3172244ea07SJohn Dyson 
318eb8e6d52SEivind Eklund /*
319eb8e6d52SEivind Eklund  * Zones for:
320eb8e6d52SEivind Eklund  * 	kaio	Per process async io info
32139314b7dSJohn Baldwin  *	aiop	async io process data
322eb8e6d52SEivind Eklund  *	aiocb	async io jobs
323eb8e6d52SEivind Eklund  *	aiol	list io job pointer - internal to aio_suspend XXX
324eb8e6d52SEivind Eklund  *	aiolio	list io jobs
325eb8e6d52SEivind Eklund  */
326c897b813SJeff Roberson static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
327fd3bf775SJohn Dyson 
328eb8e6d52SEivind Eklund /* kqueue filters for aio */
329e76d823bSRobert Watson static struct filterops aio_filtops = {
330e76d823bSRobert Watson 	.f_isfd = 0,
331e76d823bSRobert Watson 	.f_attach = filt_aioattach,
332e76d823bSRobert Watson 	.f_detach = filt_aiodetach,
333e76d823bSRobert Watson 	.f_event = filt_aio,
334e76d823bSRobert Watson };
335e76d823bSRobert Watson static struct filterops lio_filtops = {
336e76d823bSRobert Watson 	.f_isfd = 0,
337e76d823bSRobert Watson 	.f_attach = filt_lioattach,
338e76d823bSRobert Watson 	.f_detach = filt_liodetach,
339e76d823bSRobert Watson 	.f_event = filt_lio
340e76d823bSRobert Watson };
34121d56e9cSAlfred Perlstein 
34275b8b3b2SJohn Baldwin static eventhandler_tag exit_tag, exec_tag;
34375b8b3b2SJohn Baldwin 
344c85650caSJohn Baldwin TASKQUEUE_DEFINE_THREAD(aiod_kick);
3451ce91824SDavid Xu 
346eb8e6d52SEivind Eklund /*
347eb8e6d52SEivind Eklund  * Main operations function for use as a kernel module.
348eb8e6d52SEivind Eklund  */
34921d56e9cSAlfred Perlstein static int
35021d56e9cSAlfred Perlstein aio_modload(struct module *module, int cmd, void *arg)
35121d56e9cSAlfred Perlstein {
35221d56e9cSAlfred Perlstein 	int error = 0;
35321d56e9cSAlfred Perlstein 
35421d56e9cSAlfred Perlstein 	switch (cmd) {
35521d56e9cSAlfred Perlstein 	case MOD_LOAD:
35621d56e9cSAlfred Perlstein 		aio_onceonly();
35721d56e9cSAlfred Perlstein 		break;
35821d56e9cSAlfred Perlstein 	case MOD_SHUTDOWN:
35921d56e9cSAlfred Perlstein 		break;
36021d56e9cSAlfred Perlstein 	default:
361f3215338SJohn Baldwin 		error = EOPNOTSUPP;
36221d56e9cSAlfred Perlstein 		break;
36321d56e9cSAlfred Perlstein 	}
36421d56e9cSAlfred Perlstein 	return (error);
36521d56e9cSAlfred Perlstein }
36621d56e9cSAlfred Perlstein 
36721d56e9cSAlfred Perlstein static moduledata_t aio_mod = {
36821d56e9cSAlfred Perlstein 	"aio",
36921d56e9cSAlfred Perlstein 	&aio_modload,
37021d56e9cSAlfred Perlstein 	NULL
37121d56e9cSAlfred Perlstein };
37221d56e9cSAlfred Perlstein 
373399e8c17SJohn Baldwin DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY);
37421d56e9cSAlfred Perlstein MODULE_VERSION(aio, 1);
37521d56e9cSAlfred Perlstein 
376fd3bf775SJohn Dyson /*
3772244ea07SJohn Dyson  * Startup initialization
3782244ea07SJohn Dyson  */
379723d37c0SKonstantin Belousov static int
38021d56e9cSAlfred Perlstein aio_onceonly(void)
381fd3bf775SJohn Dyson {
38221d56e9cSAlfred Perlstein 
38375b8b3b2SJohn Baldwin 	exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
38475b8b3b2SJohn Baldwin 	    EVENTHANDLER_PRI_ANY);
3850dd6c035SJohn Baldwin 	exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
3860dd6c035SJohn Baldwin 	    NULL, EVENTHANDLER_PRI_ANY);
38721d56e9cSAlfred Perlstein 	kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
38869cd28daSDoug Ambrisko 	kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
3892244ea07SJohn Dyson 	TAILQ_INIT(&aio_freeproc);
3901ce91824SDavid Xu 	sema_init(&aio_newproc_sem, 0, "aio_new_proc");
3911ce91824SDavid Xu 	mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
3922244ea07SJohn Dyson 	TAILQ_INIT(&aio_jobs);
3931ce91824SDavid Xu 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
394c897b813SJeff Roberson 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
395c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
39639314b7dSJohn Baldwin 	aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL,
397c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
3985652770dSJohn Baldwin 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
399c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
400c897b813SJeff Roberson 	aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
401c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4021ce91824SDavid Xu 	aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
403c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
40484af4da6SJohn Dyson 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
405fd3bf775SJohn Dyson 	jobrefid = 1;
406399e8c17SJohn Baldwin 	p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO);
407c844abc9SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
40886d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
40986d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
410723d37c0SKonstantin Belousov 
411723d37c0SKonstantin Belousov 	return (0);
4122244ea07SJohn Dyson }
4132244ea07SJohn Dyson 
414eb8e6d52SEivind Eklund /*
415bfbbc4aaSJason Evans  * Init the per-process aioinfo structure.  The aioinfo limits are set
416bfbbc4aaSJason Evans  * per-process for user limit (resource) management.
4172244ea07SJohn Dyson  */
4186a1162d4SAlexander Leidinger void
419fd3bf775SJohn Dyson aio_init_aioinfo(struct proc *p)
420fd3bf775SJohn Dyson {
4212244ea07SJohn Dyson 	struct kaioinfo *ki;
422ac41f2efSAlfred Perlstein 
423a163d034SWarner Losh 	ki = uma_zalloc(kaio_zone, M_WAITOK);
4249889bbacSKonstantin Belousov 	mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
42584af4da6SJohn Dyson 	ki->kaio_flags = 0;
426a624e84fSJohn Dyson 	ki->kaio_maxactive_count = max_aio_per_proc;
4272244ea07SJohn Dyson 	ki->kaio_active_count = 0;
428a624e84fSJohn Dyson 	ki->kaio_qallowed_count = max_aio_queue_per_proc;
4291ce91824SDavid Xu 	ki->kaio_count = 0;
43084af4da6SJohn Dyson 	ki->kaio_ballowed_count = max_buf_aio;
431fd3bf775SJohn Dyson 	ki->kaio_buffer_count = 0;
4321ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_all);
4331ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_done);
4342244ea07SJohn Dyson 	TAILQ_INIT(&ki->kaio_jobqueue);
43584af4da6SJohn Dyson 	TAILQ_INIT(&ki->kaio_liojoblist);
43699eee864SDavid Xu 	TAILQ_INIT(&ki->kaio_syncqueue);
437f3215338SJohn Baldwin 	TAILQ_INIT(&ki->kaio_syncready);
43899eee864SDavid Xu 	TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
439f3215338SJohn Baldwin 	TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
4403999ebe3SAlan Cox 	PROC_LOCK(p);
4413999ebe3SAlan Cox 	if (p->p_aioinfo == NULL) {
4423999ebe3SAlan Cox 		p->p_aioinfo = ki;
4433999ebe3SAlan Cox 		PROC_UNLOCK(p);
4443999ebe3SAlan Cox 	} else {
4453999ebe3SAlan Cox 		PROC_UNLOCK(p);
446759ccccaSDavid Xu 		mtx_destroy(&ki->kaio_mtx);
4473999ebe3SAlan Cox 		uma_zfree(kaio_zone, ki);
4482244ea07SJohn Dyson 	}
449bfbbc4aaSJason Evans 
45022035f47SOleksandr Tymoshenko 	while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
4511ce91824SDavid Xu 		aio_newproc(NULL);
4522244ea07SJohn Dyson }
4532244ea07SJohn Dyson 
4544c0fb2cfSDavid Xu static int
4554c0fb2cfSDavid Xu aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
4564c0fb2cfSDavid Xu {
457cf7d9a8cSDavid Xu 	struct thread *td;
458cf7d9a8cSDavid Xu 	int error;
459759ccccaSDavid Xu 
460cf7d9a8cSDavid Xu 	error = sigev_findtd(p, sigev, &td);
461cf7d9a8cSDavid Xu 	if (error)
462cf7d9a8cSDavid Xu 		return (error);
4634c0fb2cfSDavid Xu 	if (!KSI_ONQ(ksi)) {
464cf7d9a8cSDavid Xu 		ksiginfo_set_sigev(ksi, sigev);
4654c0fb2cfSDavid Xu 		ksi->ksi_code = SI_ASYNCIO;
4664c0fb2cfSDavid Xu 		ksi->ksi_flags |= KSI_EXT | KSI_INS;
467cf7d9a8cSDavid Xu 		tdsendsignal(p, td, ksi->ksi_signo, ksi);
4684c0fb2cfSDavid Xu 	}
469759ccccaSDavid Xu 	PROC_UNLOCK(p);
470cf7d9a8cSDavid Xu 	return (error);
4714c0fb2cfSDavid Xu }
4724c0fb2cfSDavid Xu 
4732244ea07SJohn Dyson /*
474bfbbc4aaSJason Evans  * Free a job entry.  Wait for completion if it is currently active, but don't
475bfbbc4aaSJason Evans  * delay forever.  If we delay, we return a flag that says that we have to
476bfbbc4aaSJason Evans  * restart the queue scan.
4772244ea07SJohn Dyson  */
47888ed460eSAlan Cox static int
4795652770dSJohn Baldwin aio_free_entry(struct kaiocb *job)
480fd3bf775SJohn Dyson {
4812244ea07SJohn Dyson 	struct kaioinfo *ki;
4821ce91824SDavid Xu 	struct aioliojob *lj;
4832244ea07SJohn Dyson 	struct proc *p;
4842244ea07SJohn Dyson 
4855652770dSJohn Baldwin 	p = job->userproc;
4861ce91824SDavid Xu 	MPASS(curproc == p);
4872244ea07SJohn Dyson 	ki = p->p_aioinfo;
4881ce91824SDavid Xu 	MPASS(ki != NULL);
4891ce91824SDavid Xu 
490759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
491f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
492759ccccaSDavid Xu 
4931ce91824SDavid Xu 	atomic_subtract_int(&num_queue_count, 1);
4941ce91824SDavid Xu 
4951ce91824SDavid Xu 	ki->kaio_count--;
4961ce91824SDavid Xu 	MPASS(ki->kaio_count >= 0);
4971ce91824SDavid Xu 
4985652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_done, job, plist);
4995652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_all, job, allist);
50027b8220dSDavid Xu 
5015652770dSJohn Baldwin 	lj = job->lio;
50284af4da6SJohn Dyson 	if (lj) {
5031ce91824SDavid Xu 		lj->lioj_count--;
5041ce91824SDavid Xu 		lj->lioj_finished_count--;
5051ce91824SDavid Xu 
506a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
5071ce91824SDavid Xu 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
5081ce91824SDavid Xu 			/* lio is going away, we need to destroy any knotes */
5091ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
510759ccccaSDavid Xu 			PROC_LOCK(p);
5111ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
512759ccccaSDavid Xu 			PROC_UNLOCK(p);
5131ce91824SDavid Xu 			uma_zfree(aiolio_zone, lj);
51484af4da6SJohn Dyson 		}
51584af4da6SJohn Dyson 	}
5161ce91824SDavid Xu 
5175652770dSJohn Baldwin 	/* job is going away, we need to destroy any knotes */
5185652770dSJohn Baldwin 	knlist_delete(&job->klist, curthread, 1);
519759ccccaSDavid Xu 	PROC_LOCK(p);
5205652770dSJohn Baldwin 	sigqueue_take(&job->ksi);
521759ccccaSDavid Xu 	PROC_UNLOCK(p);
5221ce91824SDavid Xu 
5235652770dSJohn Baldwin 	MPASS(job->bp == NULL);
524759ccccaSDavid Xu 	AIO_UNLOCK(ki);
5252a522eb9SJohn Baldwin 
5262a522eb9SJohn Baldwin 	/*
5272a522eb9SJohn Baldwin 	 * The thread argument here is used to find the owning process
5282a522eb9SJohn Baldwin 	 * and is also passed to fo_close() which may pass it to various
5292a522eb9SJohn Baldwin 	 * places such as devsw close() routines.  Because of that, we
5302a522eb9SJohn Baldwin 	 * need a thread pointer from the process owning the job that is
5312a522eb9SJohn Baldwin 	 * persistent and won't disappear out from under us or move to
5322a522eb9SJohn Baldwin 	 * another process.
5332a522eb9SJohn Baldwin 	 *
5342a522eb9SJohn Baldwin 	 * Currently, all the callers of this function call it to remove
5355652770dSJohn Baldwin 	 * a kaiocb from the current process' job list either via a
5362a522eb9SJohn Baldwin 	 * syscall or due to the current process calling exit() or
5372a522eb9SJohn Baldwin 	 * execve().  Thus, we know that p == curproc.  We also know that
5382a522eb9SJohn Baldwin 	 * curthread can't exit since we are curthread.
5392a522eb9SJohn Baldwin 	 *
5402a522eb9SJohn Baldwin 	 * Therefore, we use curthread as the thread to pass to
5412a522eb9SJohn Baldwin 	 * knlist_delete().  This does mean that it is possible for the
5422a522eb9SJohn Baldwin 	 * thread pointer at close time to differ from the thread pointer
5432a522eb9SJohn Baldwin 	 * at open time, but this is already true of file descriptors in
5442a522eb9SJohn Baldwin 	 * a multithreaded process.
545b40ce416SJulian Elischer 	 */
5465652770dSJohn Baldwin 	if (job->fd_file)
5475652770dSJohn Baldwin 		fdrop(job->fd_file, curthread);
5485652770dSJohn Baldwin 	crfree(job->cred);
5495652770dSJohn Baldwin 	uma_zfree(aiocb_zone, job);
550759ccccaSDavid Xu 	AIO_LOCK(ki);
5511ce91824SDavid Xu 
552ac41f2efSAlfred Perlstein 	return (0);
5532244ea07SJohn Dyson }
5542244ea07SJohn Dyson 
555993182e5SAlexander Leidinger static void
5560dd6c035SJohn Baldwin aio_proc_rundown_exec(void *arg, struct proc *p,
5570dd6c035SJohn Baldwin     struct image_params *imgp __unused)
558993182e5SAlexander Leidinger {
559993182e5SAlexander Leidinger    	aio_proc_rundown(arg, p);
560993182e5SAlexander Leidinger }
561993182e5SAlexander Leidinger 
562f3215338SJohn Baldwin static int
563f3215338SJohn Baldwin aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
564f3215338SJohn Baldwin {
565f3215338SJohn Baldwin 	aio_cancel_fn_t *func;
566f3215338SJohn Baldwin 	int cancelled;
567f3215338SJohn Baldwin 
568f3215338SJohn Baldwin 	AIO_LOCK_ASSERT(ki, MA_OWNED);
569f3215338SJohn Baldwin 	if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
570f3215338SJohn Baldwin 		return (0);
571f3215338SJohn Baldwin 	MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
572f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLED;
573f3215338SJohn Baldwin 
574f3215338SJohn Baldwin 	func = job->cancel_fn;
575f3215338SJohn Baldwin 
576f3215338SJohn Baldwin 	/*
577f3215338SJohn Baldwin 	 * If there is no cancel routine, just leave the job marked as
578f3215338SJohn Baldwin 	 * cancelled.  The job should be in active use by a caller who
579f3215338SJohn Baldwin 	 * should complete it normally or when it fails to install a
580f3215338SJohn Baldwin 	 * cancel routine.
581f3215338SJohn Baldwin 	 */
582f3215338SJohn Baldwin 	if (func == NULL)
583f3215338SJohn Baldwin 		return (0);
584f3215338SJohn Baldwin 
585f3215338SJohn Baldwin 	/*
586f3215338SJohn Baldwin 	 * Set the CANCELLING flag so that aio_complete() will defer
587f3215338SJohn Baldwin 	 * completions of this job.  This prevents the job from being
588f3215338SJohn Baldwin 	 * freed out from under the cancel callback.  After the
589f3215338SJohn Baldwin 	 * callback any deferred completion (whether from the callback
590f3215338SJohn Baldwin 	 * or any other source) will be completed.
591f3215338SJohn Baldwin 	 */
592f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLING;
593f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
594f3215338SJohn Baldwin 	func(job);
595f3215338SJohn Baldwin 	AIO_LOCK(ki);
596f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_CANCELLING;
597f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
598f3215338SJohn Baldwin 		cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
599f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
600f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
601f3215338SJohn Baldwin 	} else {
602f3215338SJohn Baldwin 		/*
603f3215338SJohn Baldwin 		 * The cancel callback might have scheduled an
604f3215338SJohn Baldwin 		 * operation to cancel this request, but it is
605f3215338SJohn Baldwin 		 * only counted as cancelled if the request is
606f3215338SJohn Baldwin 		 * cancelled when the callback returns.
607f3215338SJohn Baldwin 		 */
608f3215338SJohn Baldwin 		cancelled = 0;
609f3215338SJohn Baldwin 	}
610f3215338SJohn Baldwin 	return (cancelled);
611f3215338SJohn Baldwin }
612f3215338SJohn Baldwin 
6132244ea07SJohn Dyson /*
6142244ea07SJohn Dyson  * Rundown the jobs for a given process.
6152244ea07SJohn Dyson  */
61621d56e9cSAlfred Perlstein static void
61775b8b3b2SJohn Baldwin aio_proc_rundown(void *arg, struct proc *p)
618fd3bf775SJohn Dyson {
6192244ea07SJohn Dyson 	struct kaioinfo *ki;
6201ce91824SDavid Xu 	struct aioliojob *lj;
6215652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
6222244ea07SJohn Dyson 
6232a522eb9SJohn Baldwin 	KASSERT(curthread->td_proc == p,
6242a522eb9SJohn Baldwin 	    ("%s: called on non-curproc", __func__));
6252244ea07SJohn Dyson 	ki = p->p_aioinfo;
6262244ea07SJohn Dyson 	if (ki == NULL)
6272244ea07SJohn Dyson 		return;
6282244ea07SJohn Dyson 
629759ccccaSDavid Xu 	AIO_LOCK(ki);
63027b8220dSDavid Xu 	ki->kaio_flags |= KAIO_RUNDOWN;
6311ce91824SDavid Xu 
6321ce91824SDavid Xu restart:
633a624e84fSJohn Dyson 
634bfbbc4aaSJason Evans 	/*
6351ce91824SDavid Xu 	 * Try to cancel all pending requests. This code simulates
6361ce91824SDavid Xu 	 * aio_cancel on all pending I/O requests.
637bfbbc4aaSJason Evans 	 */
6385652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
639f3215338SJohn Baldwin 		aio_cancel_job(p, ki, job);
6402244ea07SJohn Dyson 	}
64184af4da6SJohn Dyson 
6421ce91824SDavid Xu 	/* Wait for all running I/O to be finished */
643f3215338SJohn Baldwin 	if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
64484af4da6SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
645759ccccaSDavid Xu 		msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
6461ce91824SDavid Xu 		goto restart;
64784af4da6SJohn Dyson 	}
64884af4da6SJohn Dyson 
6491ce91824SDavid Xu 	/* Free all completed I/O requests. */
6505652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
6515652770dSJohn Baldwin 		aio_free_entry(job);
65284af4da6SJohn Dyson 
6531ce91824SDavid Xu 	while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
654a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
65584af4da6SJohn Dyson 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
6561ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
657759ccccaSDavid Xu 			PROC_LOCK(p);
6581ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
659759ccccaSDavid Xu 			PROC_UNLOCK(p);
660c897b813SJeff Roberson 			uma_zfree(aiolio_zone, lj);
661f4f0ecefSJohn Dyson 		} else {
662a9bf5e37SDavid Xu 			panic("LIO job not cleaned up: C:%d, FC:%d\n",
663a9bf5e37SDavid Xu 			    lj->lioj_count, lj->lioj_finished_count);
66484af4da6SJohn Dyson 		}
665f4f0ecefSJohn Dyson 	}
666759ccccaSDavid Xu 	AIO_UNLOCK(ki);
667c85650caSJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
668f3215338SJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
6695114048bSKonstantin Belousov 	mtx_destroy(&ki->kaio_mtx);
670c897b813SJeff Roberson 	uma_zfree(kaio_zone, ki);
671a624e84fSJohn Dyson 	p->p_aioinfo = NULL;
6722244ea07SJohn Dyson }
6732244ea07SJohn Dyson 
6742244ea07SJohn Dyson /*
675bfbbc4aaSJason Evans  * Select a job to run (called by an AIO daemon).
6762244ea07SJohn Dyson  */
6775652770dSJohn Baldwin static struct kaiocb *
67839314b7dSJohn Baldwin aio_selectjob(struct aioproc *aiop)
679fd3bf775SJohn Dyson {
6805652770dSJohn Baldwin 	struct kaiocb *job;
681bfbbc4aaSJason Evans 	struct kaioinfo *ki;
682bfbbc4aaSJason Evans 	struct proc *userp;
6832244ea07SJohn Dyson 
6841ce91824SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
685f3215338SJohn Baldwin restart:
6865652770dSJohn Baldwin 	TAILQ_FOREACH(job, &aio_jobs, list) {
6875652770dSJohn Baldwin 		userp = job->userproc;
6882244ea07SJohn Dyson 		ki = userp->p_aioinfo;
6892244ea07SJohn Dyson 
6902244ea07SJohn Dyson 		if (ki->kaio_active_count < ki->kaio_maxactive_count) {
6915652770dSJohn Baldwin 			TAILQ_REMOVE(&aio_jobs, job, list);
692f3215338SJohn Baldwin 			if (!aio_clear_cancel_function(job))
693f3215338SJohn Baldwin 				goto restart;
694f3215338SJohn Baldwin 
6951ce91824SDavid Xu 			/* Account for currently active jobs. */
6961ce91824SDavid Xu 			ki->kaio_active_count++;
6971ce91824SDavid Xu 			break;
6981ce91824SDavid Xu 		}
6991ce91824SDavid Xu 	}
7005652770dSJohn Baldwin 	return (job);
7012244ea07SJohn Dyson }
7022244ea07SJohn Dyson 
7032244ea07SJohn Dyson /*
7040dd6c035SJohn Baldwin  * Move all data to a permanent storage device.  This code
7050dd6c035SJohn Baldwin  * simulates the fsync syscall.
70699eee864SDavid Xu  */
70799eee864SDavid Xu static int
70899eee864SDavid Xu aio_fsync_vnode(struct thread *td, struct vnode *vp)
70999eee864SDavid Xu {
71099eee864SDavid Xu 	struct mount *mp;
71199eee864SDavid Xu 	int error;
71299eee864SDavid Xu 
71399eee864SDavid Xu 	if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
71499eee864SDavid Xu 		goto drop;
715cb05b60aSAttilio Rao 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
71699eee864SDavid Xu 	if (vp->v_object != NULL) {
71789f6b863SAttilio Rao 		VM_OBJECT_WLOCK(vp->v_object);
71899eee864SDavid Xu 		vm_object_page_clean(vp->v_object, 0, 0, 0);
71989f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(vp->v_object);
72099eee864SDavid Xu 	}
72199eee864SDavid Xu 	error = VOP_FSYNC(vp, MNT_WAIT, td);
72299eee864SDavid Xu 
72322db15c0SAttilio Rao 	VOP_UNLOCK(vp, 0);
72499eee864SDavid Xu 	vn_finished_write(mp);
72599eee864SDavid Xu drop:
72699eee864SDavid Xu 	return (error);
72799eee864SDavid Xu }
72899eee864SDavid Xu 
72999eee864SDavid Xu /*
730f95c13dbSGleb Smirnoff  * The AIO processing activity for LIO_READ/LIO_WRITE.  This is the code that
731f95c13dbSGleb Smirnoff  * does the I/O request for the non-physio version of the operations.  The
732f95c13dbSGleb Smirnoff  * normal vn operations are used, and this code should work in all instances
733f95c13dbSGleb Smirnoff  * for every type of file, including pipes, sockets, fifos, and regular files.
7341ce91824SDavid Xu  *
7351aa4c324SDavid Xu  * XXX I don't think it works well for socket, pipe, and fifo.
7362244ea07SJohn Dyson  */
73788ed460eSAlan Cox static void
7385652770dSJohn Baldwin aio_process_rw(struct kaiocb *job)
739fd3bf775SJohn Dyson {
740f8f750c5SRobert Watson 	struct ucred *td_savedcred;
741b40ce416SJulian Elischer 	struct thread *td;
7422244ea07SJohn Dyson 	struct aiocb *cb;
7432244ea07SJohn Dyson 	struct file *fp;
7442244ea07SJohn Dyson 	struct uio auio;
7452244ea07SJohn Dyson 	struct iovec aiov;
746bb430bc7SJohn Baldwin 	ssize_t cnt;
7472244ea07SJohn Dyson 	int error;
748fd3bf775SJohn Dyson 	int oublock_st, oublock_end;
749fd3bf775SJohn Dyson 	int inblock_st, inblock_end;
7502244ea07SJohn Dyson 
7515652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
7525652770dSJohn Baldwin 	    job->uaiocb.aio_lio_opcode == LIO_WRITE,
7535652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
754f95c13dbSGleb Smirnoff 
755f3215338SJohn Baldwin 	aio_switch_vmspace(job);
756b40ce416SJulian Elischer 	td = curthread;
757f8f750c5SRobert Watson 	td_savedcred = td->td_ucred;
7585652770dSJohn Baldwin 	td->td_ucred = job->cred;
7595652770dSJohn Baldwin 	cb = &job->uaiocb;
7605652770dSJohn Baldwin 	fp = job->fd_file;
761bfbbc4aaSJason Evans 
76291369fc7SAlan Cox 	aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
7632244ea07SJohn Dyson 	aiov.iov_len = cb->aio_nbytes;
7642244ea07SJohn Dyson 
7652244ea07SJohn Dyson 	auio.uio_iov = &aiov;
7662244ea07SJohn Dyson 	auio.uio_iovcnt = 1;
7679b16adc1SAlan Cox 	auio.uio_offset = cb->aio_offset;
7682244ea07SJohn Dyson 	auio.uio_resid = cb->aio_nbytes;
7692244ea07SJohn Dyson 	cnt = cb->aio_nbytes;
7702244ea07SJohn Dyson 	auio.uio_segflg = UIO_USERSPACE;
771b40ce416SJulian Elischer 	auio.uio_td = td;
7722244ea07SJohn Dyson 
7731c4bcd05SJeff Roberson 	inblock_st = td->td_ru.ru_inblock;
7741c4bcd05SJeff Roberson 	oublock_st = td->td_ru.ru_oublock;
775279d7226SMatthew Dillon 	/*
776a9bf5e37SDavid Xu 	 * aio_aqueue() acquires a reference to the file that is
7779b16adc1SAlan Cox 	 * released in aio_free_entry().
778279d7226SMatthew Dillon 	 */
7792244ea07SJohn Dyson 	if (cb->aio_lio_opcode == LIO_READ) {
7802244ea07SJohn Dyson 		auio.uio_rw = UIO_READ;
7815114048bSKonstantin Belousov 		if (auio.uio_resid == 0)
7825114048bSKonstantin Belousov 			error = 0;
7835114048bSKonstantin Belousov 		else
784b40ce416SJulian Elischer 			error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
7852244ea07SJohn Dyson 	} else {
7866d53aa62SDavid Xu 		if (fp->f_type == DTYPE_VNODE)
7876d53aa62SDavid Xu 			bwillwrite();
7882244ea07SJohn Dyson 		auio.uio_rw = UIO_WRITE;
789b40ce416SJulian Elischer 		error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
7902244ea07SJohn Dyson 	}
7911c4bcd05SJeff Roberson 	inblock_end = td->td_ru.ru_inblock;
7921c4bcd05SJeff Roberson 	oublock_end = td->td_ru.ru_oublock;
793fd3bf775SJohn Dyson 
7945652770dSJohn Baldwin 	job->inputcharge = inblock_end - inblock_st;
7955652770dSJohn Baldwin 	job->outputcharge = oublock_end - oublock_st;
7962244ea07SJohn Dyson 
797bfbbc4aaSJason Evans 	if ((error) && (auio.uio_resid != cnt)) {
7982244ea07SJohn Dyson 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
7992244ea07SJohn Dyson 			error = 0;
80019eb87d2SJohn Baldwin 		if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
8015652770dSJohn Baldwin 			PROC_LOCK(job->userproc);
8025652770dSJohn Baldwin 			kern_psignal(job->userproc, SIGPIPE);
8035652770dSJohn Baldwin 			PROC_UNLOCK(job->userproc);
80419eb87d2SJohn Baldwin 		}
8052244ea07SJohn Dyson 	}
8062244ea07SJohn Dyson 
8072244ea07SJohn Dyson 	cnt -= auio.uio_resid;
808f8f750c5SRobert Watson 	td->td_ucred = td_savedcred;
809f3215338SJohn Baldwin 	aio_complete(job, cnt, error);
8102244ea07SJohn Dyson }
8112244ea07SJohn Dyson 
81269cd28daSDoug Ambrisko static void
8135652770dSJohn Baldwin aio_process_sync(struct kaiocb *job)
814f95c13dbSGleb Smirnoff {
815f95c13dbSGleb Smirnoff 	struct thread *td = curthread;
816f95c13dbSGleb Smirnoff 	struct ucred *td_savedcred = td->td_ucred;
8175652770dSJohn Baldwin 	struct file *fp = job->fd_file;
818f95c13dbSGleb Smirnoff 	int error = 0;
819f95c13dbSGleb Smirnoff 
8205652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC,
8215652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
822f95c13dbSGleb Smirnoff 
8235652770dSJohn Baldwin 	td->td_ucred = job->cred;
824f95c13dbSGleb Smirnoff 	if (fp->f_vnode != NULL)
825f95c13dbSGleb Smirnoff 		error = aio_fsync_vnode(td, fp->f_vnode);
826f95c13dbSGleb Smirnoff 	td->td_ucred = td_savedcred;
827f3215338SJohn Baldwin 	aio_complete(job, 0, error);
828f95c13dbSGleb Smirnoff }
829f95c13dbSGleb Smirnoff 
830f95c13dbSGleb Smirnoff static void
8315652770dSJohn Baldwin aio_process_mlock(struct kaiocb *job)
8326160e12cSGleb Smirnoff {
8335652770dSJohn Baldwin 	struct aiocb *cb = &job->uaiocb;
8346160e12cSGleb Smirnoff 	int error;
8356160e12cSGleb Smirnoff 
8365652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
8375652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
8386160e12cSGleb Smirnoff 
839f3215338SJohn Baldwin 	aio_switch_vmspace(job);
8405652770dSJohn Baldwin 	error = vm_mlock(job->userproc, job->cred,
8416160e12cSGleb Smirnoff 	    __DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes);
842f3215338SJohn Baldwin 	aio_complete(job, 0, error);
8436160e12cSGleb Smirnoff }
8446160e12cSGleb Smirnoff 
8456160e12cSGleb Smirnoff static void
846f3215338SJohn Baldwin aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
8471ce91824SDavid Xu {
8481ce91824SDavid Xu 	struct aioliojob *lj;
84969cd28daSDoug Ambrisko 	struct kaioinfo *ki;
8505652770dSJohn Baldwin 	struct kaiocb *sjob, *sjobn;
8511ce91824SDavid Xu 	int lj_done;
852f3215338SJohn Baldwin 	bool schedule_fsync;
85369cd28daSDoug Ambrisko 
85469cd28daSDoug Ambrisko 	ki = userp->p_aioinfo;
855759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
8565652770dSJohn Baldwin 	lj = job->lio;
85769cd28daSDoug Ambrisko 	lj_done = 0;
85869cd28daSDoug Ambrisko 	if (lj) {
8591ce91824SDavid Xu 		lj->lioj_finished_count++;
8601ce91824SDavid Xu 		if (lj->lioj_count == lj->lioj_finished_count)
86169cd28daSDoug Ambrisko 			lj_done = 1;
86269cd28daSDoug Ambrisko 	}
8635652770dSJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
864f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
86527b8220dSDavid Xu 
86627b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_RUNDOWN)
86727b8220dSDavid Xu 		goto notification_done;
86827b8220dSDavid Xu 
8695652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
8705652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
8715652770dSJohn Baldwin 		aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi);
8721ce91824SDavid Xu 
8735652770dSJohn Baldwin 	KNOTE_LOCKED(&job->klist, 1);
8741ce91824SDavid Xu 
87569cd28daSDoug Ambrisko 	if (lj_done) {
8761ce91824SDavid Xu 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
87769cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_KEVENT_POSTED;
8781ce91824SDavid Xu 			KNOTE_LOCKED(&lj->klist, 1);
87969cd28daSDoug Ambrisko 		}
8801ce91824SDavid Xu 		if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
88169cd28daSDoug Ambrisko 		    == LIOJ_SIGNAL
8824c0fb2cfSDavid Xu 		    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
8834c0fb2cfSDavid Xu 		        lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
8844c0fb2cfSDavid Xu 			aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
88569cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
88669cd28daSDoug Ambrisko 		}
88769cd28daSDoug Ambrisko 	}
88827b8220dSDavid Xu 
88927b8220dSDavid Xu notification_done:
8905652770dSJohn Baldwin 	if (job->jobflags & KAIOCB_CHECKSYNC) {
891f3215338SJohn Baldwin 		schedule_fsync = false;
8925652770dSJohn Baldwin 		TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
8935652770dSJohn Baldwin 			if (job->fd_file == sjob->fd_file &&
8945652770dSJohn Baldwin 			    job->seqno < sjob->seqno) {
8955652770dSJohn Baldwin 				if (--sjob->pending == 0) {
8965652770dSJohn Baldwin 					TAILQ_REMOVE(&ki->kaio_syncqueue, sjob,
8970dd6c035SJohn Baldwin 					    list);
898f3215338SJohn Baldwin 					if (!aio_clear_cancel_function(sjob))
899f3215338SJohn Baldwin 						continue;
900f3215338SJohn Baldwin 					TAILQ_INSERT_TAIL(&ki->kaio_syncready,
901f3215338SJohn Baldwin 					    sjob, list);
902f3215338SJohn Baldwin 					schedule_fsync = true;
90399eee864SDavid Xu 				}
90499eee864SDavid Xu 			}
90599eee864SDavid Xu 		}
906f3215338SJohn Baldwin 		if (schedule_fsync)
907f3215338SJohn Baldwin 			taskqueue_enqueue(taskqueue_aiod_kick,
908f3215338SJohn Baldwin 			    &ki->kaio_sync_task);
90999eee864SDavid Xu 	}
91027b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_WAKEUP) {
91169cd28daSDoug Ambrisko 		ki->kaio_flags &= ~KAIO_WAKEUP;
9121ce91824SDavid Xu 		wakeup(&userp->p_aioinfo);
91369cd28daSDoug Ambrisko 	}
91469cd28daSDoug Ambrisko }
91569cd28daSDoug Ambrisko 
9168a4dc40fSJohn Baldwin static void
917f3215338SJohn Baldwin aio_schedule_fsync(void *context, int pending)
918f3215338SJohn Baldwin {
919f3215338SJohn Baldwin 	struct kaioinfo *ki;
920f3215338SJohn Baldwin 	struct kaiocb *job;
921f3215338SJohn Baldwin 
922f3215338SJohn Baldwin 	ki = context;
923f3215338SJohn Baldwin 	AIO_LOCK(ki);
924f3215338SJohn Baldwin 	while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
925f3215338SJohn Baldwin 		job = TAILQ_FIRST(&ki->kaio_syncready);
926f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncready, job, list);
927f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
928f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
929f3215338SJohn Baldwin 		AIO_LOCK(ki);
930f3215338SJohn Baldwin 	}
931f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
932f3215338SJohn Baldwin }
933f3215338SJohn Baldwin 
934f3215338SJohn Baldwin bool
935f3215338SJohn Baldwin aio_cancel_cleared(struct kaiocb *job)
936f3215338SJohn Baldwin {
937f3215338SJohn Baldwin 	struct kaioinfo *ki;
938f3215338SJohn Baldwin 
939f3215338SJohn Baldwin 	/*
940f3215338SJohn Baldwin 	 * The caller should hold the same queue lock held when
941f3215338SJohn Baldwin 	 * aio_clear_cancel_function() was called and set this flag
942f3215338SJohn Baldwin 	 * ensuring this check sees an up-to-date value.  However,
943f3215338SJohn Baldwin 	 * there is no way to assert that.
944f3215338SJohn Baldwin 	 */
945f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
946f3215338SJohn Baldwin 	return ((job->jobflags & KAIOCB_CLEARED) != 0);
947f3215338SJohn Baldwin }
948f3215338SJohn Baldwin 
949f3215338SJohn Baldwin bool
950f3215338SJohn Baldwin aio_clear_cancel_function(struct kaiocb *job)
951f3215338SJohn Baldwin {
952f3215338SJohn Baldwin 	struct kaioinfo *ki;
953f3215338SJohn Baldwin 
954f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
955f3215338SJohn Baldwin 	AIO_LOCK(ki);
956f3215338SJohn Baldwin 	MPASS(job->cancel_fn != NULL);
957f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLING) {
958f3215338SJohn Baldwin 		job->jobflags |= KAIOCB_CLEARED;
959f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
960f3215338SJohn Baldwin 		return (false);
961f3215338SJohn Baldwin 	}
962f3215338SJohn Baldwin 	job->cancel_fn = NULL;
963f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
964f3215338SJohn Baldwin 	return (true);
965f3215338SJohn Baldwin }
966f3215338SJohn Baldwin 
967f3215338SJohn Baldwin bool
968f3215338SJohn Baldwin aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
969f3215338SJohn Baldwin {
970f3215338SJohn Baldwin 	struct kaioinfo *ki;
971f3215338SJohn Baldwin 
972f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
973f3215338SJohn Baldwin 	AIO_LOCK(ki);
974f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLED) {
975f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
976f3215338SJohn Baldwin 		return (false);
977f3215338SJohn Baldwin 	}
978f3215338SJohn Baldwin 	job->cancel_fn = func;
979f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
980f3215338SJohn Baldwin 	return (true);
981f3215338SJohn Baldwin }
982f3215338SJohn Baldwin 
983f3215338SJohn Baldwin void
984f3215338SJohn Baldwin aio_complete(struct kaiocb *job, long status, int error)
985f3215338SJohn Baldwin {
986f3215338SJohn Baldwin 	struct kaioinfo *ki;
987f3215338SJohn Baldwin 	struct proc *userp;
988f3215338SJohn Baldwin 
989f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.error = error;
990f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.status = status;
991f3215338SJohn Baldwin 
992f3215338SJohn Baldwin 	userp = job->userproc;
993f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
994f3215338SJohn Baldwin 
995f3215338SJohn Baldwin 	AIO_LOCK(ki);
996f3215338SJohn Baldwin 	KASSERT(!(job->jobflags & KAIOCB_FINISHED),
997f3215338SJohn Baldwin 	    ("duplicate aio_complete"));
998f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_FINISHED;
999f3215338SJohn Baldwin 	if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1000f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1001f3215338SJohn Baldwin 		aio_bio_done_notify(userp, job);
1002f3215338SJohn Baldwin 	}
1003f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1004f3215338SJohn Baldwin }
1005f3215338SJohn Baldwin 
1006f3215338SJohn Baldwin void
1007f3215338SJohn Baldwin aio_cancel(struct kaiocb *job)
1008f3215338SJohn Baldwin {
1009f3215338SJohn Baldwin 
1010f3215338SJohn Baldwin 	aio_complete(job, -1, ECANCELED);
1011f3215338SJohn Baldwin }
1012f3215338SJohn Baldwin 
1013f3215338SJohn Baldwin void
10145652770dSJohn Baldwin aio_switch_vmspace(struct kaiocb *job)
10158a4dc40fSJohn Baldwin {
10168a4dc40fSJohn Baldwin 
10175652770dSJohn Baldwin 	vmspace_switch_aio(job->userproc->p_vmspace);
10188a4dc40fSJohn Baldwin }
10198a4dc40fSJohn Baldwin 
10202244ea07SJohn Dyson /*
1021f95c13dbSGleb Smirnoff  * The AIO daemon, most of the actual work is done in aio_process_*,
102284af4da6SJohn Dyson  * but the setup (and address space mgmt) is done in this routine.
10232244ea07SJohn Dyson  */
10242244ea07SJohn Dyson static void
10251ce91824SDavid Xu aio_daemon(void *_id)
10262244ea07SJohn Dyson {
10275652770dSJohn Baldwin 	struct kaiocb *job;
102839314b7dSJohn Baldwin 	struct aioproc *aiop;
1029bfbbc4aaSJason Evans 	struct kaioinfo *ki;
1030f3215338SJohn Baldwin 	struct proc *p;
10318a4dc40fSJohn Baldwin 	struct vmspace *myvm;
1032b40ce416SJulian Elischer 	struct thread *td = curthread;
10331ce91824SDavid Xu 	int id = (intptr_t)_id;
10342244ea07SJohn Dyson 
10352244ea07SJohn Dyson 	/*
10368a4dc40fSJohn Baldwin 	 * Grab an extra reference on the daemon's vmspace so that it
10378a4dc40fSJohn Baldwin 	 * doesn't get freed by jobs that switch to a different
10388a4dc40fSJohn Baldwin 	 * vmspace.
10392244ea07SJohn Dyson 	 */
10408a4dc40fSJohn Baldwin 	p = td->td_proc;
10418a4dc40fSJohn Baldwin 	myvm = vmspace_acquire_ref(p);
1042fd3bf775SJohn Dyson 
10438a4dc40fSJohn Baldwin 	KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1044fd3bf775SJohn Dyson 
1045fd3bf775SJohn Dyson 	/*
1046bfbbc4aaSJason Evans 	 * Allocate and ready the aio control info.  There is one aiop structure
1047bfbbc4aaSJason Evans 	 * per daemon.
1048fd3bf775SJohn Dyson 	 */
1049a163d034SWarner Losh 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
105039314b7dSJohn Baldwin 	aiop->aioproc = p;
105139314b7dSJohn Baldwin 	aiop->aioprocflags = 0;
1052bfbbc4aaSJason Evans 
1053fd3bf775SJohn Dyson 	/*
1054fd3bf775SJohn Dyson 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
1055b40ce416SJulian Elischer 	 * and creating too many daemons.)
1056fd3bf775SJohn Dyson 	 */
10571ce91824SDavid Xu 	sema_post(&aio_newproc_sem);
10582244ea07SJohn Dyson 
10591ce91824SDavid Xu 	mtx_lock(&aio_job_mtx);
1060bfbbc4aaSJason Evans 	for (;;) {
1061fd3bf775SJohn Dyson 		/*
1062fd3bf775SJohn Dyson 		 * Take daemon off of free queue
1063fd3bf775SJohn Dyson 		 */
106439314b7dSJohn Baldwin 		if (aiop->aioprocflags & AIOP_FREE) {
10652244ea07SJohn Dyson 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
106639314b7dSJohn Baldwin 			aiop->aioprocflags &= ~AIOP_FREE;
10672244ea07SJohn Dyson 		}
10682244ea07SJohn Dyson 
1069fd3bf775SJohn Dyson 		/*
1070bfbbc4aaSJason Evans 		 * Check for jobs.
1071fd3bf775SJohn Dyson 		 */
10725652770dSJohn Baldwin 		while ((job = aio_selectjob(aiop)) != NULL) {
10731ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
10742244ea07SJohn Dyson 
1075f3215338SJohn Baldwin 			ki = job->userproc->p_aioinfo;
1076f3215338SJohn Baldwin 			job->handle_fn(job);
107784af4da6SJohn Dyson 
10789b84335cSDavid Xu 			mtx_lock(&aio_job_mtx);
10799b84335cSDavid Xu 			/* Decrement the active job count. */
10809b84335cSDavid Xu 			ki->kaio_active_count--;
10812244ea07SJohn Dyson 		}
10822244ea07SJohn Dyson 
1083fd3bf775SJohn Dyson 		/*
1084bfbbc4aaSJason Evans 		 * Disconnect from user address space.
1085fd3bf775SJohn Dyson 		 */
10868a4dc40fSJohn Baldwin 		if (p->p_vmspace != myvm) {
10871ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
10888a4dc40fSJohn Baldwin 			vmspace_switch_aio(myvm);
10891ce91824SDavid Xu 			mtx_lock(&aio_job_mtx);
10901ce91824SDavid Xu 			/*
10911ce91824SDavid Xu 			 * We have to restart to avoid race, we only sleep if
10928a4dc40fSJohn Baldwin 			 * no job can be selected.
10931ce91824SDavid Xu 			 */
10941ce91824SDavid Xu 			continue;
1095fd3bf775SJohn Dyson 		}
1096fd3bf775SJohn Dyson 
10971ce91824SDavid Xu 		mtx_assert(&aio_job_mtx, MA_OWNED);
10981ce91824SDavid Xu 
1099fd3bf775SJohn Dyson 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
110039314b7dSJohn Baldwin 		aiop->aioprocflags |= AIOP_FREE;
1101fd3bf775SJohn Dyson 
1102fd3bf775SJohn Dyson 		/*
1103bfbbc4aaSJason Evans 		 * If daemon is inactive for a long time, allow it to exit,
1104bfbbc4aaSJason Evans 		 * thereby freeing resources.
1105fd3bf775SJohn Dyson 		 */
110639314b7dSJohn Baldwin 		if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
11078a4dc40fSJohn Baldwin 		    aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
110839314b7dSJohn Baldwin 		    (aiop->aioprocflags & AIOP_FREE) &&
11098a4dc40fSJohn Baldwin 		    num_aio_procs > target_aio_procs)
11108a4dc40fSJohn Baldwin 			break;
11118a4dc40fSJohn Baldwin 	}
1112fd3bf775SJohn Dyson 	TAILQ_REMOVE(&aio_freeproc, aiop, list);
111384af4da6SJohn Dyson 	num_aio_procs--;
11141ce91824SDavid Xu 	mtx_unlock(&aio_job_mtx);
11151ce91824SDavid Xu 	uma_zfree(aiop_zone, aiop);
11161ce91824SDavid Xu 	free_unr(aiod_unr, id);
11178a4dc40fSJohn Baldwin 	vmspace_free(myvm);
11188a4dc40fSJohn Baldwin 
11198a4dc40fSJohn Baldwin 	KASSERT(p->p_vmspace == myvm,
11208a4dc40fSJohn Baldwin 	    ("AIOD: bad vmspace for exiting daemon"));
11218a4dc40fSJohn Baldwin 	KASSERT(myvm->vm_refcnt > 1,
11228a4dc40fSJohn Baldwin 	    ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt));
11233745c395SJulian Elischer 	kproc_exit(0);
1124fd3bf775SJohn Dyson }
11252244ea07SJohn Dyson 
11262244ea07SJohn Dyson /*
1127bfbbc4aaSJason Evans  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1128bfbbc4aaSJason Evans  * AIO daemon modifies its environment itself.
11292244ea07SJohn Dyson  */
11302244ea07SJohn Dyson static int
11311ce91824SDavid Xu aio_newproc(int *start)
1132fd3bf775SJohn Dyson {
11332244ea07SJohn Dyson 	int error;
1134c9a970a7SAlan Cox 	struct proc *p;
11351ce91824SDavid Xu 	int id;
11362244ea07SJohn Dyson 
11371ce91824SDavid Xu 	id = alloc_unr(aiod_unr);
11383745c395SJulian Elischer 	error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
11391ce91824SDavid Xu 		RFNOWAIT, 0, "aiod%d", id);
11401ce91824SDavid Xu 	if (error == 0) {
1141fd3bf775SJohn Dyson 		/*
11421ce91824SDavid Xu 		 * Wait until daemon is started.
1143fd3bf775SJohn Dyson 		 */
11441ce91824SDavid Xu 		sema_wait(&aio_newproc_sem);
11451ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
114684af4da6SJohn Dyson 		num_aio_procs++;
11471ce91824SDavid Xu 		if (start != NULL)
11487f34b521SDavid Xu 			(*start)--;
11491ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
11501ce91824SDavid Xu 	} else {
11511ce91824SDavid Xu 		free_unr(aiod_unr, id);
11521ce91824SDavid Xu 	}
1153ac41f2efSAlfred Perlstein 	return (error);
11542244ea07SJohn Dyson }
11552244ea07SJohn Dyson 
11562244ea07SJohn Dyson /*
115788ed460eSAlan Cox  * Try the high-performance, low-overhead physio method for eligible
115888ed460eSAlan Cox  * VCHR devices.  This method doesn't use an aio helper thread, and
115988ed460eSAlan Cox  * thus has very low overhead.
116088ed460eSAlan Cox  *
1161a9bf5e37SDavid Xu  * Assumes that the caller, aio_aqueue(), has incremented the file
116288ed460eSAlan Cox  * structure's reference count, preventing its deallocation for the
116388ed460eSAlan Cox  * duration of this call.
1164fd3bf775SJohn Dyson  */
116588ed460eSAlan Cox static int
11665652770dSJohn Baldwin aio_qphysio(struct proc *p, struct kaiocb *job)
1167fd3bf775SJohn Dyson {
1168fd3bf775SJohn Dyson 	struct aiocb *cb;
1169fd3bf775SJohn Dyson 	struct file *fp;
1170f743d981SAlexander Motin 	struct bio *bp;
1171f743d981SAlexander Motin 	struct buf *pbuf;
1172fd3bf775SJohn Dyson 	struct vnode *vp;
1173f3215a60SKonstantin Belousov 	struct cdevsw *csw;
1174f3215a60SKonstantin Belousov 	struct cdev *dev;
1175fd3bf775SJohn Dyson 	struct kaioinfo *ki;
1176*4d805eacSJohn Baldwin 	int error, ref, poff;
1177f743d981SAlexander Motin 	vm_prot_t prot;
1178fd3bf775SJohn Dyson 
11795652770dSJohn Baldwin 	cb = &job->uaiocb;
11805652770dSJohn Baldwin 	fp = job->fd_file;
1181fd3bf775SJohn Dyson 
11826160e12cSGleb Smirnoff 	if (fp == NULL || fp->f_type != DTYPE_VNODE)
1183008626c3SPoul-Henning Kamp 		return (-1);
1184fd3bf775SJohn Dyson 
11853b6d9652SPoul-Henning Kamp 	vp = fp->f_vnode;
1186f743d981SAlexander Motin 	if (vp->v_type != VCHR)
1187f582ac06SBrian Feldman 		return (-1);
1188ad8de0f2SDavid Xu 	if (vp->v_bufobj.bo_bsize == 0)
1189ad8de0f2SDavid Xu 		return (-1);
11905d9d81e7SPoul-Henning Kamp 	if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1191008626c3SPoul-Henning Kamp 		return (-1);
1192fd3bf775SJohn Dyson 
1193f3215a60SKonstantin Belousov 	ref = 0;
1194f3215a60SKonstantin Belousov 	csw = devvn_refthread(vp, &dev, &ref);
1195f3215a60SKonstantin Belousov 	if (csw == NULL)
1196f3215a60SKonstantin Belousov 		return (ENXIO);
1197f743d981SAlexander Motin 
1198f743d981SAlexander Motin 	if ((csw->d_flags & D_DISK) == 0) {
1199f743d981SAlexander Motin 		error = -1;
1200f743d981SAlexander Motin 		goto unref;
1201f743d981SAlexander Motin 	}
1202f3215a60SKonstantin Belousov 	if (cb->aio_nbytes > dev->si_iosize_max) {
1203f3215a60SKonstantin Belousov 		error = -1;
1204f3215a60SKonstantin Belousov 		goto unref;
1205f3215a60SKonstantin Belousov 	}
1206f3215a60SKonstantin Belousov 
1207f743d981SAlexander Motin 	ki = p->p_aioinfo;
1208f743d981SAlexander Motin 	poff = (vm_offset_t)cb->aio_buf & PAGE_MASK;
1209*4d805eacSJohn Baldwin 	if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
1210f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS) {
1211f743d981SAlexander Motin 			error = -1;
1212f743d981SAlexander Motin 			goto unref;
1213f743d981SAlexander Motin 		}
1214*4d805eacSJohn Baldwin 
1215*4d805eacSJohn Baldwin 		pbuf = NULL;
1216f743d981SAlexander Motin 	} else {
1217f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS - poff) {
1218f743d981SAlexander Motin 			error = -1;
1219f743d981SAlexander Motin 			goto unref;
1220f743d981SAlexander Motin 		}
1221f743d981SAlexander Motin 		if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) {
1222f743d981SAlexander Motin 			error = -1;
1223f743d981SAlexander Motin 			goto unref;
1224f743d981SAlexander Motin 		}
1225*4d805eacSJohn Baldwin 
12265652770dSJohn Baldwin 		job->pbuf = pbuf = (struct buf *)getpbuf(NULL);
1227f743d981SAlexander Motin 		BUF_KERNPROC(pbuf);
1228759ccccaSDavid Xu 		AIO_LOCK(ki);
12291ce91824SDavid Xu 		ki->kaio_buffer_count++;
1230759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1231*4d805eacSJohn Baldwin 	}
1232*4d805eacSJohn Baldwin 	job->bp = bp = g_alloc_bio();
12331ce91824SDavid Xu 
1234f743d981SAlexander Motin 	bp->bio_length = cb->aio_nbytes;
1235f743d981SAlexander Motin 	bp->bio_bcount = cb->aio_nbytes;
1236f743d981SAlexander Motin 	bp->bio_done = aio_physwakeup;
1237f743d981SAlexander Motin 	bp->bio_data = (void *)(uintptr_t)cb->aio_buf;
1238f743d981SAlexander Motin 	bp->bio_offset = cb->aio_offset;
1239f743d981SAlexander Motin 	bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1240f743d981SAlexander Motin 	bp->bio_dev = dev;
12415652770dSJohn Baldwin 	bp->bio_caller1 = (void *)job;
1242f743d981SAlexander Motin 
1243f743d981SAlexander Motin 	prot = VM_PROT_READ;
1244f743d981SAlexander Motin 	if (cb->aio_lio_opcode == LIO_READ)
1245f743d981SAlexander Motin 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
1246*4d805eacSJohn Baldwin 	job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
12475652770dSJohn Baldwin 	    (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages,
1248*4d805eacSJohn Baldwin 	    nitems(job->pages));
1249*4d805eacSJohn Baldwin 	if (job->npages < 0) {
1250f743d981SAlexander Motin 		error = EFAULT;
1251f743d981SAlexander Motin 		goto doerror;
1252f743d981SAlexander Motin 	}
1253*4d805eacSJohn Baldwin 	if (pbuf != NULL) {
1254f743d981SAlexander Motin 		pmap_qenter((vm_offset_t)pbuf->b_data,
12555652770dSJohn Baldwin 		    job->pages, job->npages);
1256f743d981SAlexander Motin 		bp->bio_data = pbuf->b_data + poff;
1257*4d805eacSJohn Baldwin 		atomic_add_int(&num_buf_aio, 1);
1258f743d981SAlexander Motin 	} else {
12595652770dSJohn Baldwin 		bp->bio_ma = job->pages;
12605652770dSJohn Baldwin 		bp->bio_ma_n = job->npages;
1261f743d981SAlexander Motin 		bp->bio_ma_offset = poff;
1262f743d981SAlexander Motin 		bp->bio_data = unmapped_buf;
1263f743d981SAlexander Motin 		bp->bio_flags |= BIO_UNMAPPED;
1264f743d981SAlexander Motin 	}
1265f743d981SAlexander Motin 
1266bfbbc4aaSJason Evans 	/* Perform transfer. */
1267f743d981SAlexander Motin 	csw->d_strategy(bp);
1268f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1269ac41f2efSAlfred Perlstein 	return (0);
1270fd3bf775SJohn Dyson 
1271fd3bf775SJohn Dyson doerror:
1272*4d805eacSJohn Baldwin 	if (pbuf != NULL) {
1273759ccccaSDavid Xu 		AIO_LOCK(ki);
1274fd3bf775SJohn Dyson 		ki->kaio_buffer_count--;
1275759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1276f743d981SAlexander Motin 		relpbuf(pbuf, NULL);
12775652770dSJohn Baldwin 		job->pbuf = NULL;
1278f743d981SAlexander Motin 	}
1279f743d981SAlexander Motin 	g_destroy_bio(bp);
12805652770dSJohn Baldwin 	job->bp = NULL;
1281f3215a60SKonstantin Belousov unref:
1282f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1283fd3bf775SJohn Dyson 	return (error);
1284fd3bf775SJohn Dyson }
1285fd3bf775SJohn Dyson 
1286399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
12873858a1f4SJohn Baldwin static int
12883858a1f4SJohn Baldwin convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
12893858a1f4SJohn Baldwin {
12903858a1f4SJohn Baldwin 
12913858a1f4SJohn Baldwin 	/*
12923858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
12933858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
12943858a1f4SJohn Baldwin 	 */
12953858a1f4SJohn Baldwin 	nsig->sigev_notify = osig->sigev_notify;
12963858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
12973858a1f4SJohn Baldwin 	case SIGEV_NONE:
12983858a1f4SJohn Baldwin 		break;
12993858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
13003858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
13013858a1f4SJohn Baldwin 		break;
13023858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
13033858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
13043858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
13053858a1f4SJohn Baldwin 		nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
13063858a1f4SJohn Baldwin 		break;
13073858a1f4SJohn Baldwin 	default:
13083858a1f4SJohn Baldwin 		return (EINVAL);
13093858a1f4SJohn Baldwin 	}
13103858a1f4SJohn Baldwin 	return (0);
13113858a1f4SJohn Baldwin }
13123858a1f4SJohn Baldwin 
13133858a1f4SJohn Baldwin static int
13143858a1f4SJohn Baldwin aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
13153858a1f4SJohn Baldwin {
13163858a1f4SJohn Baldwin 	struct oaiocb *ojob;
13173858a1f4SJohn Baldwin 	int error;
13183858a1f4SJohn Baldwin 
13193858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
13203858a1f4SJohn Baldwin 	error = copyin(ujob, kjob, sizeof(struct oaiocb));
13213858a1f4SJohn Baldwin 	if (error)
13223858a1f4SJohn Baldwin 		return (error);
13233858a1f4SJohn Baldwin 	ojob = (struct oaiocb *)kjob;
13243858a1f4SJohn Baldwin 	return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent));
13253858a1f4SJohn Baldwin }
1326399e8c17SJohn Baldwin #endif
13273858a1f4SJohn Baldwin 
13283858a1f4SJohn Baldwin static int
13293858a1f4SJohn Baldwin aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
13303858a1f4SJohn Baldwin {
13313858a1f4SJohn Baldwin 
13323858a1f4SJohn Baldwin 	return (copyin(ujob, kjob, sizeof(struct aiocb)));
13333858a1f4SJohn Baldwin }
13343858a1f4SJohn Baldwin 
13353858a1f4SJohn Baldwin static long
13363858a1f4SJohn Baldwin aiocb_fetch_status(struct aiocb *ujob)
13373858a1f4SJohn Baldwin {
13383858a1f4SJohn Baldwin 
13393858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.status));
13403858a1f4SJohn Baldwin }
13413858a1f4SJohn Baldwin 
13423858a1f4SJohn Baldwin static long
13433858a1f4SJohn Baldwin aiocb_fetch_error(struct aiocb *ujob)
13443858a1f4SJohn Baldwin {
13453858a1f4SJohn Baldwin 
13463858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.error));
13473858a1f4SJohn Baldwin }
13483858a1f4SJohn Baldwin 
13493858a1f4SJohn Baldwin static int
13503858a1f4SJohn Baldwin aiocb_store_status(struct aiocb *ujob, long status)
13513858a1f4SJohn Baldwin {
13523858a1f4SJohn Baldwin 
13533858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.status, status));
13543858a1f4SJohn Baldwin }
13553858a1f4SJohn Baldwin 
13563858a1f4SJohn Baldwin static int
13573858a1f4SJohn Baldwin aiocb_store_error(struct aiocb *ujob, long error)
13583858a1f4SJohn Baldwin {
13593858a1f4SJohn Baldwin 
13603858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.error, error));
13613858a1f4SJohn Baldwin }
13623858a1f4SJohn Baldwin 
13633858a1f4SJohn Baldwin static int
13643858a1f4SJohn Baldwin aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
13653858a1f4SJohn Baldwin {
13663858a1f4SJohn Baldwin 
13673858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
13683858a1f4SJohn Baldwin }
13693858a1f4SJohn Baldwin 
13703858a1f4SJohn Baldwin static int
13713858a1f4SJohn Baldwin aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
13723858a1f4SJohn Baldwin {
13733858a1f4SJohn Baldwin 
13743858a1f4SJohn Baldwin 	return (suword(ujobp, (long)ujob));
13753858a1f4SJohn Baldwin }
13763858a1f4SJohn Baldwin 
13773858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops = {
13783858a1f4SJohn Baldwin 	.copyin = aiocb_copyin,
13793858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
13803858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
13813858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
13823858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
13833858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
13843858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
13853858a1f4SJohn Baldwin };
13863858a1f4SJohn Baldwin 
1387399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
13883858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops_osigevent = {
13893858a1f4SJohn Baldwin 	.copyin = aiocb_copyin_old_sigevent,
13903858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
13913858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
13923858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
13933858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
13943858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
13953858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
13963858a1f4SJohn Baldwin };
1397399e8c17SJohn Baldwin #endif
13983858a1f4SJohn Baldwin 
1399bfbbc4aaSJason Evans /*
1400bfbbc4aaSJason Evans  * Queue a new AIO request.  Choosing either the threaded or direct physio VCHR
1401bfbbc4aaSJason Evans  * technique is done in this code.
14022244ea07SJohn Dyson  */
14036a1162d4SAlexander Leidinger int
14045652770dSJohn Baldwin aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
14053858a1f4SJohn Baldwin 	int type, struct aiocb_ops *ops)
1406fd3bf775SJohn Dyson {
1407b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
14087008be5bSPawel Jakub Dawidek 	cap_rights_t rights;
14092244ea07SJohn Dyson 	struct file *fp;
1410f3215338SJohn Baldwin 	struct kaiocb *job;
14112244ea07SJohn Dyson 	struct kaioinfo *ki;
1412c6fa9f78SAlan Cox 	struct kevent kev;
14131ce91824SDavid Xu 	int opcode;
14141ce91824SDavid Xu 	int error;
14154db71d27SJohn-Mark Gurney 	int fd, kqfd;
14161ce91824SDavid Xu 	int jid;
1417fde80935SDavid Xu 	u_short evflags;
14182244ea07SJohn Dyson 
1419a9bf5e37SDavid Xu 	if (p->p_aioinfo == NULL)
1420a9bf5e37SDavid Xu 		aio_init_aioinfo(p);
1421a9bf5e37SDavid Xu 
14221ce91824SDavid Xu 	ki = p->p_aioinfo;
14231ce91824SDavid Xu 
14245652770dSJohn Baldwin 	ops->store_status(ujob, -1);
14255652770dSJohn Baldwin 	ops->store_error(ujob, 0);
14265652770dSJohn Baldwin 	ops->store_kernelinfo(ujob, -1);
1427a9bf5e37SDavid Xu 
1428a9bf5e37SDavid Xu 	if (num_queue_count >= max_queue_count ||
1429a9bf5e37SDavid Xu 	    ki->kaio_count >= ki->kaio_qallowed_count) {
14305652770dSJohn Baldwin 		ops->store_error(ujob, EAGAIN);
1431a9bf5e37SDavid Xu 		return (EAGAIN);
1432a9bf5e37SDavid Xu 	}
1433a9bf5e37SDavid Xu 
14345652770dSJohn Baldwin 	job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
14355652770dSJohn Baldwin 	knlist_init_mtx(&job->klist, AIO_MTX(ki));
1436fd3bf775SJohn Dyson 
14375652770dSJohn Baldwin 	error = ops->copyin(ujob, &job->uaiocb);
14382244ea07SJohn Dyson 	if (error) {
14395652770dSJohn Baldwin 		ops->store_error(ujob, error);
14405652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1441ac41f2efSAlfred Perlstein 		return (error);
14422244ea07SJohn Dyson 	}
144368d71118SDavid Xu 
1444bb430bc7SJohn Baldwin 	if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
14455652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1446434ea137SGleb Smirnoff 		return (EINVAL);
1447434ea137SGleb Smirnoff 	}
1448434ea137SGleb Smirnoff 
14495652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
14505652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
14515652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
14525652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
14535652770dSJohn Baldwin 		ops->store_error(ujob, EINVAL);
14545652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
145568d71118SDavid Xu 		return (EINVAL);
145668d71118SDavid Xu 	}
145768d71118SDavid Xu 
14585652770dSJohn Baldwin 	if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
14595652770dSJohn Baldwin 	     job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
14605652770dSJohn Baldwin 		!_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
14615652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1462ac41f2efSAlfred Perlstein 		return (EINVAL);
14632f3cf918SAlfred Perlstein 	}
14642244ea07SJohn Dyson 
14655652770dSJohn Baldwin 	ksiginfo_init(&job->ksi);
14664c0fb2cfSDavid Xu 
1467bfbbc4aaSJason Evans 	/* Save userspace address of the job info. */
14685652770dSJohn Baldwin 	job->ujob = ujob;
146911783b14SJohn Dyson 
1470bfbbc4aaSJason Evans 	/* Get the opcode. */
1471bfbbc4aaSJason Evans 	if (type != LIO_NOP)
14725652770dSJohn Baldwin 		job->uaiocb.aio_lio_opcode = type;
14735652770dSJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
14742244ea07SJohn Dyson 
1475a9d2f8d8SRobert Watson 	/*
1476a9d2f8d8SRobert Watson 	 * Validate the opcode and fetch the file object for the specified
1477a9d2f8d8SRobert Watson 	 * file descriptor.
1478a9d2f8d8SRobert Watson 	 *
1479a9d2f8d8SRobert Watson 	 * XXXRW: Moved the opcode validation up here so that we don't
1480a9d2f8d8SRobert Watson 	 * retrieve a file descriptor without knowing what the capabiltity
1481a9d2f8d8SRobert Watson 	 * should be.
1482a9d2f8d8SRobert Watson 	 */
14835652770dSJohn Baldwin 	fd = job->uaiocb.aio_fildes;
14842a522eb9SJohn Baldwin 	switch (opcode) {
14852a522eb9SJohn Baldwin 	case LIO_WRITE:
14867008be5bSPawel Jakub Dawidek 		error = fget_write(td, fd,
14877008be5bSPawel Jakub Dawidek 		    cap_rights_init(&rights, CAP_PWRITE), &fp);
14882a522eb9SJohn Baldwin 		break;
14892a522eb9SJohn Baldwin 	case LIO_READ:
14907008be5bSPawel Jakub Dawidek 		error = fget_read(td, fd,
14917008be5bSPawel Jakub Dawidek 		    cap_rights_init(&rights, CAP_PREAD), &fp);
1492a9d2f8d8SRobert Watson 		break;
1493a9d2f8d8SRobert Watson 	case LIO_SYNC:
14947008be5bSPawel Jakub Dawidek 		error = fget(td, fd, cap_rights_init(&rights, CAP_FSYNC), &fp);
1495a9d2f8d8SRobert Watson 		break;
14966160e12cSGleb Smirnoff 	case LIO_MLOCK:
14976160e12cSGleb Smirnoff 		fp = NULL;
14986160e12cSGleb Smirnoff 		break;
1499a9d2f8d8SRobert Watson 	case LIO_NOP:
15007008be5bSPawel Jakub Dawidek 		error = fget(td, fd, cap_rights_init(&rights), &fp);
15012a522eb9SJohn Baldwin 		break;
15022a522eb9SJohn Baldwin 	default:
1503a9d2f8d8SRobert Watson 		error = EINVAL;
15042a522eb9SJohn Baldwin 	}
15052a522eb9SJohn Baldwin 	if (error) {
15065652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
15075652770dSJohn Baldwin 		ops->store_error(ujob, error);
1508af56abaaSJohn Baldwin 		return (error);
15092244ea07SJohn Dyson 	}
151099eee864SDavid Xu 
151199eee864SDavid Xu 	if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
151299eee864SDavid Xu 		error = EINVAL;
151399eee864SDavid Xu 		goto aqueue_fail;
151499eee864SDavid Xu 	}
15152244ea07SJohn Dyson 
15165652770dSJohn Baldwin 	if (opcode != LIO_SYNC && job->uaiocb.aio_offset == -1LL) {
1517ae124fc4SAlan Cox 		error = EINVAL;
1518ae124fc4SAlan Cox 		goto aqueue_fail;
15192244ea07SJohn Dyson 	}
15201ce91824SDavid Xu 
15215652770dSJohn Baldwin 	job->fd_file = fp;
15221ce91824SDavid Xu 
152399eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
152499eee864SDavid Xu 	jid = jobrefid++;
15255652770dSJohn Baldwin 	job->seqno = jobseqno++;
152699eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
15275652770dSJohn Baldwin 	error = ops->store_kernelinfo(ujob, jid);
15281ce91824SDavid Xu 	if (error) {
15291ce91824SDavid Xu 		error = EINVAL;
15301ce91824SDavid Xu 		goto aqueue_fail;
15311ce91824SDavid Xu 	}
15325652770dSJohn Baldwin 	job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
15332244ea07SJohn Dyson 
15342244ea07SJohn Dyson 	if (opcode == LIO_NOP) {
1535a5c0b1c0SAlan Cox 		fdrop(fp, td);
15365652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1537ac41f2efSAlfred Perlstein 		return (0);
15382244ea07SJohn Dyson 	}
15392244ea07SJohn Dyson 
15405652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1541cb679c38SJonathan Lemon 		goto no_kqueue;
15425652770dSJohn Baldwin 	evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1543fde80935SDavid Xu 	if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1544fde80935SDavid Xu 		error = EINVAL;
1545fde80935SDavid Xu 		goto aqueue_fail;
1546fde80935SDavid Xu 	}
15475652770dSJohn Baldwin 	kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
15485652770dSJohn Baldwin 	kev.ident = (uintptr_t)job->ujob;
1549cb679c38SJonathan Lemon 	kev.filter = EVFILT_AIO;
1550fde80935SDavid Xu 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
15515652770dSJohn Baldwin 	kev.data = (intptr_t)job;
15525652770dSJohn Baldwin 	kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
15534db71d27SJohn-Mark Gurney 	error = kqfd_register(kqfd, &kev, td, 1);
1554f3215338SJohn Baldwin 	if (error)
1555f3215338SJohn Baldwin 		goto aqueue_fail;
1556f3215338SJohn Baldwin 
1557cb679c38SJonathan Lemon no_kqueue:
1558cb679c38SJonathan Lemon 
15595652770dSJohn Baldwin 	ops->store_error(ujob, EINPROGRESS);
15605652770dSJohn Baldwin 	job->uaiocb._aiocb_private.error = EINPROGRESS;
15615652770dSJohn Baldwin 	job->userproc = p;
15625652770dSJohn Baldwin 	job->cred = crhold(td->td_ucred);
1563f3215338SJohn Baldwin 	job->jobflags = KAIOCB_QUEUEING;
15645652770dSJohn Baldwin 	job->lio = lj;
15652244ea07SJohn Dyson 
1566f3215338SJohn Baldwin 	if (opcode == LIO_MLOCK) {
1567f3215338SJohn Baldwin 		aio_schedule(job, aio_process_mlock);
1568f3215338SJohn Baldwin 		error = 0;
1569f3215338SJohn Baldwin 	} else if (fp->f_ops->fo_aio_queue == NULL)
1570f3215338SJohn Baldwin 		error = aio_queue_file(fp, job);
1571f3215338SJohn Baldwin 	else
1572f3215338SJohn Baldwin 		error = fo_aio_queue(fp, job);
1573f3215338SJohn Baldwin 	if (error)
1574f3215338SJohn Baldwin 		goto aqueue_fail;
1575f3215338SJohn Baldwin 
1576f3215338SJohn Baldwin 	AIO_LOCK(ki);
1577f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_QUEUEING;
1578f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1579f3215338SJohn Baldwin 	ki->kaio_count++;
1580f3215338SJohn Baldwin 	if (lj)
1581f3215338SJohn Baldwin 		lj->lioj_count++;
1582f3215338SJohn Baldwin 	atomic_add_int(&num_queue_count, 1);
1583f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
1584f3215338SJohn Baldwin 		/*
1585f3215338SJohn Baldwin 		 * The queue callback completed the request synchronously.
1586f3215338SJohn Baldwin 		 * The bulk of the completion is deferred in that case
1587f3215338SJohn Baldwin 		 * until this point.
1588f3215338SJohn Baldwin 		 */
1589f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
1590f3215338SJohn Baldwin 	} else
1591f3215338SJohn Baldwin 		TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1592f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1593f3215338SJohn Baldwin 	return (0);
1594f3215338SJohn Baldwin 
1595f3215338SJohn Baldwin aqueue_fail:
1596f3215338SJohn Baldwin 	knlist_delete(&job->klist, curthread, 0);
1597f3215338SJohn Baldwin 	if (fp)
1598f3215338SJohn Baldwin 		fdrop(fp, td);
1599f3215338SJohn Baldwin 	uma_zfree(aiocb_zone, job);
1600f3215338SJohn Baldwin 	ops->store_error(ujob, error);
1601f3215338SJohn Baldwin 	return (error);
1602f3215338SJohn Baldwin }
1603f3215338SJohn Baldwin 
1604f3215338SJohn Baldwin static void
1605f3215338SJohn Baldwin aio_cancel_daemon_job(struct kaiocb *job)
1606f3215338SJohn Baldwin {
1607f3215338SJohn Baldwin 
1608f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1609f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1610f3215338SJohn Baldwin 		TAILQ_REMOVE(&aio_jobs, job, list);
1611f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1612f3215338SJohn Baldwin 	aio_cancel(job);
1613f3215338SJohn Baldwin }
1614f3215338SJohn Baldwin 
1615f3215338SJohn Baldwin void
1616f3215338SJohn Baldwin aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1617f3215338SJohn Baldwin {
1618f3215338SJohn Baldwin 
1619f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1620f3215338SJohn Baldwin 	if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1621f3215338SJohn Baldwin 		mtx_unlock(&aio_job_mtx);
1622f3215338SJohn Baldwin 		aio_cancel(job);
1623f3215338SJohn Baldwin 		return;
1624f3215338SJohn Baldwin 	}
1625f3215338SJohn Baldwin 	job->handle_fn = func;
1626f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1627f3215338SJohn Baldwin 	aio_kick_nowait(job->userproc);
1628f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1629f3215338SJohn Baldwin }
1630f3215338SJohn Baldwin 
1631f3215338SJohn Baldwin static void
1632f3215338SJohn Baldwin aio_cancel_sync(struct kaiocb *job)
1633f3215338SJohn Baldwin {
1634f3215338SJohn Baldwin 	struct kaioinfo *ki;
1635f3215338SJohn Baldwin 
1636f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1637f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1638f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1639f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1640f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1641f3215338SJohn Baldwin 	aio_cancel(job);
1642f3215338SJohn Baldwin }
1643f3215338SJohn Baldwin 
1644f3215338SJohn Baldwin int
1645f3215338SJohn Baldwin aio_queue_file(struct file *fp, struct kaiocb *job)
1646f3215338SJohn Baldwin {
1647f3215338SJohn Baldwin 	struct aioliojob *lj;
1648f3215338SJohn Baldwin 	struct kaioinfo *ki;
1649f3215338SJohn Baldwin 	struct kaiocb *job2;
1650f3215338SJohn Baldwin 	int error, opcode;
1651f3215338SJohn Baldwin 
1652f3215338SJohn Baldwin 	lj = job->lio;
1653f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1654f3215338SJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
165599eee864SDavid Xu 	if (opcode == LIO_SYNC)
165699eee864SDavid Xu 		goto queueit;
165799eee864SDavid Xu 
1658f3215338SJohn Baldwin 	if ((error = aio_qphysio(job->userproc, job)) == 0)
1659279d7226SMatthew Dillon 		goto done;
16601ce91824SDavid Xu #if 0
1661f3215338SJohn Baldwin 	/*
1662f3215338SJohn Baldwin 	 * XXX: This means qphysio() failed with EFAULT.  The current
1663f3215338SJohn Baldwin 	 * behavior is to retry the operation via fo_read/fo_write.
1664f3215338SJohn Baldwin 	 * Wouldn't it be better to just complete the request with an
1665f3215338SJohn Baldwin 	 * error here?
1666f3215338SJohn Baldwin 	 */
1667f3215338SJohn Baldwin 	if (error > 0)
1668279d7226SMatthew Dillon 		goto done;
16691ce91824SDavid Xu #endif
167099eee864SDavid Xu queueit:
1671f3215338SJohn Baldwin 	if (!enable_aio_unsafe)
1672f3215338SJohn Baldwin 		return (EOPNOTSUPP);
167384af4da6SJohn Dyson 
167499eee864SDavid Xu 	if (opcode == LIO_SYNC) {
1675f3215338SJohn Baldwin 		AIO_LOCK(ki);
16765652770dSJohn Baldwin 		TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
16775652770dSJohn Baldwin 			if (job2->fd_file == job->fd_file &&
16785652770dSJohn Baldwin 			    job2->uaiocb.aio_lio_opcode != LIO_SYNC &&
16795652770dSJohn Baldwin 			    job2->seqno < job->seqno) {
16805652770dSJohn Baldwin 				job2->jobflags |= KAIOCB_CHECKSYNC;
16815652770dSJohn Baldwin 				job->pending++;
1682dbbccfe9SDavid Xu 			}
1683dbbccfe9SDavid Xu 		}
16845652770dSJohn Baldwin 		if (job->pending != 0) {
1685f3215338SJohn Baldwin 			if (!aio_set_cancel_function(job, aio_cancel_sync)) {
1686f3215338SJohn Baldwin 				AIO_UNLOCK(ki);
1687f3215338SJohn Baldwin 				aio_cancel(job);
1688f3215338SJohn Baldwin 				return (0);
1689f3215338SJohn Baldwin 			}
16905652770dSJohn Baldwin 			TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1691759ccccaSDavid Xu 			AIO_UNLOCK(ki);
1692f3215338SJohn Baldwin 			return (0);
1693dbbccfe9SDavid Xu 		}
1694759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1695f3215338SJohn Baldwin 	}
1696f3215338SJohn Baldwin 
1697f3215338SJohn Baldwin 	switch (opcode) {
1698f3215338SJohn Baldwin 	case LIO_READ:
1699f3215338SJohn Baldwin 	case LIO_WRITE:
1700f3215338SJohn Baldwin 		aio_schedule(job, aio_process_rw);
17011ce91824SDavid Xu 		error = 0;
1702f3215338SJohn Baldwin 		break;
1703f3215338SJohn Baldwin 	case LIO_SYNC:
1704f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
1705f3215338SJohn Baldwin 		error = 0;
1706f3215338SJohn Baldwin 		break;
1707f3215338SJohn Baldwin 	default:
1708f3215338SJohn Baldwin 		error = EINVAL;
1709f3215338SJohn Baldwin 	}
171099eee864SDavid Xu done:
171199eee864SDavid Xu 	return (error);
171299eee864SDavid Xu }
171399eee864SDavid Xu 
171499eee864SDavid Xu static void
171599eee864SDavid Xu aio_kick_nowait(struct proc *userp)
171699eee864SDavid Xu {
171799eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
171839314b7dSJohn Baldwin 	struct aioproc *aiop;
171999eee864SDavid Xu 
172099eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
172199eee864SDavid Xu 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
172299eee864SDavid Xu 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
172339314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
172439314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
17250dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
17260dd6c035SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start <
17270dd6c035SJohn Baldwin 	    ki->kaio_maxactive_count) {
1728c85650caSJohn Baldwin 		taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
172999eee864SDavid Xu 	}
173099eee864SDavid Xu }
173199eee864SDavid Xu 
1732dbbccfe9SDavid Xu static int
173399eee864SDavid Xu aio_kick(struct proc *userp)
173499eee864SDavid Xu {
173599eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
173639314b7dSJohn Baldwin 	struct aioproc *aiop;
1737dbbccfe9SDavid Xu 	int error, ret = 0;
173899eee864SDavid Xu 
173999eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
174099eee864SDavid Xu retryproc:
1741d254af07SMatthew Dillon 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
17422244ea07SJohn Dyson 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
174339314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
174439314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
17450dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
17460dd6c035SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start <
17470dd6c035SJohn Baldwin 	    ki->kaio_maxactive_count) {
1748fd3bf775SJohn Dyson 		num_aio_resv_start++;
17491ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
17501ce91824SDavid Xu 		error = aio_newproc(&num_aio_resv_start);
17511ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
17521ce91824SDavid Xu 		if (error) {
175384af4da6SJohn Dyson 			num_aio_resv_start--;
17542244ea07SJohn Dyson 			goto retryproc;
1755fd3bf775SJohn Dyson 		}
1756dbbccfe9SDavid Xu 	} else {
1757dbbccfe9SDavid Xu 		ret = -1;
17581ce91824SDavid Xu 	}
1759dbbccfe9SDavid Xu 	return (ret);
176099eee864SDavid Xu }
17611ce91824SDavid Xu 
176299eee864SDavid Xu static void
176399eee864SDavid Xu aio_kick_helper(void *context, int pending)
176499eee864SDavid Xu {
176599eee864SDavid Xu 	struct proc *userp = context;
176699eee864SDavid Xu 
176799eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
1768dbbccfe9SDavid Xu 	while (--pending >= 0) {
1769dbbccfe9SDavid Xu 		if (aio_kick(userp))
1770dbbccfe9SDavid Xu 			break;
1771dbbccfe9SDavid Xu 	}
177299eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
17732244ea07SJohn Dyson }
17742244ea07SJohn Dyson 
1775fd3bf775SJohn Dyson /*
1776bfbbc4aaSJason Evans  * Support the aio_return system call, as a side-effect, kernel resources are
1777bfbbc4aaSJason Evans  * released.
17782244ea07SJohn Dyson  */
17793858a1f4SJohn Baldwin static int
17805652770dSJohn Baldwin kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1781fd3bf775SJohn Dyson {
1782b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
17835652770dSJohn Baldwin 	struct kaiocb *job;
17842244ea07SJohn Dyson 	struct kaioinfo *ki;
1785bb430bc7SJohn Baldwin 	long status, error;
17862244ea07SJohn Dyson 
1787c0bf5caaSAlan Cox 	ki = p->p_aioinfo;
1788c0bf5caaSAlan Cox 	if (ki == NULL)
1789ac41f2efSAlfred Perlstein 		return (EINVAL);
1790759ccccaSDavid Xu 	AIO_LOCK(ki);
17915652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_done, plist) {
17925652770dSJohn Baldwin 		if (job->ujob == ujob)
1793c0bf5caaSAlan Cox 			break;
1794c0bf5caaSAlan Cox 	}
17955652770dSJohn Baldwin 	if (job != NULL) {
1796f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
17975652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
17985652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
17991ce91824SDavid Xu 		td->td_retval[0] = status;
18005652770dSJohn Baldwin 		if (job->uaiocb.aio_lio_opcode == LIO_WRITE) {
18015652770dSJohn Baldwin 			td->td_ru.ru_oublock += job->outputcharge;
18025652770dSJohn Baldwin 			job->outputcharge = 0;
18035652770dSJohn Baldwin 		} else if (job->uaiocb.aio_lio_opcode == LIO_READ) {
18045652770dSJohn Baldwin 			td->td_ru.ru_inblock += job->inputcharge;
18055652770dSJohn Baldwin 			job->inputcharge = 0;
180669cd28daSDoug Ambrisko 		}
18075652770dSJohn Baldwin 		aio_free_entry(job);
1808759ccccaSDavid Xu 		AIO_UNLOCK(ki);
18095652770dSJohn Baldwin 		ops->store_error(ujob, error);
18105652770dSJohn Baldwin 		ops->store_status(ujob, status);
181155a122bfSDavid Xu 	} else {
18121ce91824SDavid Xu 		error = EINVAL;
1813759ccccaSDavid Xu 		AIO_UNLOCK(ki);
181455a122bfSDavid Xu 	}
18151ce91824SDavid Xu 	return (error);
18162244ea07SJohn Dyson }
18172244ea07SJohn Dyson 
18183858a1f4SJohn Baldwin int
18198451d0ddSKip Macy sys_aio_return(struct thread *td, struct aio_return_args *uap)
18203858a1f4SJohn Baldwin {
18213858a1f4SJohn Baldwin 
18223858a1f4SJohn Baldwin 	return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
18233858a1f4SJohn Baldwin }
18243858a1f4SJohn Baldwin 
18252244ea07SJohn Dyson /*
1826bfbbc4aaSJason Evans  * Allow a process to wakeup when any of the I/O requests are completed.
18272244ea07SJohn Dyson  */
18283858a1f4SJohn Baldwin static int
18293858a1f4SJohn Baldwin kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
18303858a1f4SJohn Baldwin     struct timespec *ts)
1831fd3bf775SJohn Dyson {
1832b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18334a11ca4eSPoul-Henning Kamp 	struct timeval atv;
18342244ea07SJohn Dyson 	struct kaioinfo *ki;
18355652770dSJohn Baldwin 	struct kaiocb *firstjob, *job;
18363858a1f4SJohn Baldwin 	int error, i, timo;
18372244ea07SJohn Dyson 
18382244ea07SJohn Dyson 	timo = 0;
18393858a1f4SJohn Baldwin 	if (ts) {
18403858a1f4SJohn Baldwin 		if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
18412244ea07SJohn Dyson 			return (EINVAL);
18422244ea07SJohn Dyson 
18433858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
18442244ea07SJohn Dyson 		if (itimerfix(&atv))
18452244ea07SJohn Dyson 			return (EINVAL);
1846227ee8a1SPoul-Henning Kamp 		timo = tvtohz(&atv);
18472244ea07SJohn Dyson 	}
18482244ea07SJohn Dyson 
18492244ea07SJohn Dyson 	ki = p->p_aioinfo;
18502244ea07SJohn Dyson 	if (ki == NULL)
1851ac41f2efSAlfred Perlstein 		return (EAGAIN);
18522244ea07SJohn Dyson 
18533858a1f4SJohn Baldwin 	if (njoblist == 0)
1854ac41f2efSAlfred Perlstein 		return (0);
18552244ea07SJohn Dyson 
1856759ccccaSDavid Xu 	AIO_LOCK(ki);
18571ce91824SDavid Xu 	for (;;) {
18585652770dSJohn Baldwin 		firstjob = NULL;
18591ce91824SDavid Xu 		error = 0;
18605652770dSJohn Baldwin 		TAILQ_FOREACH(job, &ki->kaio_all, allist) {
186184af4da6SJohn Dyson 			for (i = 0; i < njoblist; i++) {
18625652770dSJohn Baldwin 				if (job->ujob == ujoblist[i]) {
18635652770dSJohn Baldwin 					if (firstjob == NULL)
18645652770dSJohn Baldwin 						firstjob = job;
1865f3215338SJohn Baldwin 					if (job->jobflags & KAIOCB_FINISHED)
18661ce91824SDavid Xu 						goto RETURN;
186784af4da6SJohn Dyson 				}
186884af4da6SJohn Dyson 			}
186984af4da6SJohn Dyson 		}
18701ce91824SDavid Xu 		/* All tasks were finished. */
18715652770dSJohn Baldwin 		if (firstjob == NULL)
18721ce91824SDavid Xu 			break;
18732244ea07SJohn Dyson 
1874fd3bf775SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
1875759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
18761ce91824SDavid Xu 		    "aiospn", timo);
18771ce91824SDavid Xu 		if (error == ERESTART)
18781ce91824SDavid Xu 			error = EINTR;
18791ce91824SDavid Xu 		if (error)
18801ce91824SDavid Xu 			break;
18812244ea07SJohn Dyson 	}
18821ce91824SDavid Xu RETURN:
1883759ccccaSDavid Xu 	AIO_UNLOCK(ki);
18843858a1f4SJohn Baldwin 	return (error);
18853858a1f4SJohn Baldwin }
18863858a1f4SJohn Baldwin 
18873858a1f4SJohn Baldwin int
18888451d0ddSKip Macy sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
18893858a1f4SJohn Baldwin {
18903858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
18913858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
18923858a1f4SJohn Baldwin 	int error;
18933858a1f4SJohn Baldwin 
18943858a1f4SJohn Baldwin 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
18953858a1f4SJohn Baldwin 		return (EINVAL);
18963858a1f4SJohn Baldwin 
18973858a1f4SJohn Baldwin 	if (uap->timeout) {
18983858a1f4SJohn Baldwin 		/* Get timespec struct. */
18993858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
19003858a1f4SJohn Baldwin 			return (error);
19013858a1f4SJohn Baldwin 		tsp = &ts;
19023858a1f4SJohn Baldwin 	} else
19033858a1f4SJohn Baldwin 		tsp = NULL;
19043858a1f4SJohn Baldwin 
19053858a1f4SJohn Baldwin 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
19063858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
19073858a1f4SJohn Baldwin 	if (error == 0)
19083858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
19091ce91824SDavid Xu 	uma_zfree(aiol_zone, ujoblist);
19101ce91824SDavid Xu 	return (error);
19112244ea07SJohn Dyson }
1912ee877a35SJohn Dyson 
1913ee877a35SJohn Dyson /*
1914dd85920aSJason Evans  * aio_cancel cancels any non-physio aio operations not currently in
1915dd85920aSJason Evans  * progress.
1916ee877a35SJohn Dyson  */
1917ee877a35SJohn Dyson int
19188451d0ddSKip Macy sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1919fd3bf775SJohn Dyson {
1920b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
1921dd85920aSJason Evans 	struct kaioinfo *ki;
19225652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
1923dd85920aSJason Evans 	struct file *fp;
1924f131759fSMateusz Guzik 	cap_rights_t rights;
19251ce91824SDavid Xu 	int error;
1926dd85920aSJason Evans 	int cancelled = 0;
1927dd85920aSJason Evans 	int notcancelled = 0;
1928dd85920aSJason Evans 	struct vnode *vp;
1929dd85920aSJason Evans 
19302a522eb9SJohn Baldwin 	/* Lookup file object. */
1931f131759fSMateusz Guzik 	error = fget(td, uap->fd, cap_rights_init(&rights), &fp);
19322a522eb9SJohn Baldwin 	if (error)
19332a522eb9SJohn Baldwin 		return (error);
1934dd85920aSJason Evans 
19351ce91824SDavid Xu 	ki = p->p_aioinfo;
19361ce91824SDavid Xu 	if (ki == NULL)
19371ce91824SDavid Xu 		goto done;
19381ce91824SDavid Xu 
1939dd85920aSJason Evans 	if (fp->f_type == DTYPE_VNODE) {
19403b6d9652SPoul-Henning Kamp 		vp = fp->f_vnode;
1941dd85920aSJason Evans 		if (vn_isdisk(vp, &error)) {
19422a522eb9SJohn Baldwin 			fdrop(fp, td);
1943b40ce416SJulian Elischer 			td->td_retval[0] = AIO_NOTCANCELED;
1944ac41f2efSAlfred Perlstein 			return (0);
1945dd85920aSJason Evans 		}
1946dd85920aSJason Evans 	}
1947dd85920aSJason Evans 
1948759ccccaSDavid Xu 	AIO_LOCK(ki);
19495652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
19505652770dSJohn Baldwin 		if ((uap->fd == job->uaiocb.aio_fildes) &&
1951dd85920aSJason Evans 		    ((uap->aiocbp == NULL) ||
19525652770dSJohn Baldwin 		     (uap->aiocbp == job->ujob))) {
1953f3215338SJohn Baldwin 			if (aio_cancel_job(p, ki, job)) {
19541ce91824SDavid Xu 				cancelled++;
1955dd85920aSJason Evans 			} else {
1956dd85920aSJason Evans 				notcancelled++;
1957dd85920aSJason Evans 			}
19581aa4c324SDavid Xu 			if (uap->aiocbp != NULL)
19591aa4c324SDavid Xu 				break;
1960dd85920aSJason Evans 		}
1961dd85920aSJason Evans 	}
1962759ccccaSDavid Xu 	AIO_UNLOCK(ki);
19631ce91824SDavid Xu 
1964ad49abc0SAlan Cox done:
19652a522eb9SJohn Baldwin 	fdrop(fp, td);
19661aa4c324SDavid Xu 
19671aa4c324SDavid Xu 	if (uap->aiocbp != NULL) {
1968dd85920aSJason Evans 		if (cancelled) {
1969b40ce416SJulian Elischer 			td->td_retval[0] = AIO_CANCELED;
1970ac41f2efSAlfred Perlstein 			return (0);
1971dd85920aSJason Evans 		}
19721aa4c324SDavid Xu 	}
19731aa4c324SDavid Xu 
19741aa4c324SDavid Xu 	if (notcancelled) {
19751aa4c324SDavid Xu 		td->td_retval[0] = AIO_NOTCANCELED;
19761aa4c324SDavid Xu 		return (0);
19771aa4c324SDavid Xu 	}
19781aa4c324SDavid Xu 
19791aa4c324SDavid Xu 	if (cancelled) {
19801aa4c324SDavid Xu 		td->td_retval[0] = AIO_CANCELED;
19811aa4c324SDavid Xu 		return (0);
19821aa4c324SDavid Xu 	}
19831aa4c324SDavid Xu 
1984b40ce416SJulian Elischer 	td->td_retval[0] = AIO_ALLDONE;
1985dd85920aSJason Evans 
1986ac41f2efSAlfred Perlstein 	return (0);
1987ee877a35SJohn Dyson }
1988ee877a35SJohn Dyson 
1989ee877a35SJohn Dyson /*
1990873fbcd7SRobert Watson  * aio_error is implemented in the kernel level for compatibility purposes
1991873fbcd7SRobert Watson  * only.  For a user mode async implementation, it would be best to do it in
1992873fbcd7SRobert Watson  * a userland subroutine.
1993ee877a35SJohn Dyson  */
19943858a1f4SJohn Baldwin static int
19955652770dSJohn Baldwin kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1996fd3bf775SJohn Dyson {
1997b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
19985652770dSJohn Baldwin 	struct kaiocb *job;
19992244ea07SJohn Dyson 	struct kaioinfo *ki;
20001ce91824SDavid Xu 	int status;
2001ee877a35SJohn Dyson 
20022244ea07SJohn Dyson 	ki = p->p_aioinfo;
20031ce91824SDavid Xu 	if (ki == NULL) {
20041ce91824SDavid Xu 		td->td_retval[0] = EINVAL;
20051ce91824SDavid Xu 		return (0);
20061ce91824SDavid Xu 	}
2007ee877a35SJohn Dyson 
2008759ccccaSDavid Xu 	AIO_LOCK(ki);
20095652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_all, allist) {
20105652770dSJohn Baldwin 		if (job->ujob == ujob) {
2011f3215338SJohn Baldwin 			if (job->jobflags & KAIOCB_FINISHED)
20121ce91824SDavid Xu 				td->td_retval[0] =
20135652770dSJohn Baldwin 					job->uaiocb._aiocb_private.error;
20141ce91824SDavid Xu 			else
2015b40ce416SJulian Elischer 				td->td_retval[0] = EINPROGRESS;
2016759ccccaSDavid Xu 			AIO_UNLOCK(ki);
2017ac41f2efSAlfred Perlstein 			return (0);
20182244ea07SJohn Dyson 		}
20192244ea07SJohn Dyson 	}
2020759ccccaSDavid Xu 	AIO_UNLOCK(ki);
202184af4da6SJohn Dyson 
20222244ea07SJohn Dyson 	/*
2023a9bf5e37SDavid Xu 	 * Hack for failure of aio_aqueue.
20242244ea07SJohn Dyson 	 */
20255652770dSJohn Baldwin 	status = ops->fetch_status(ujob);
20261ce91824SDavid Xu 	if (status == -1) {
20275652770dSJohn Baldwin 		td->td_retval[0] = ops->fetch_error(ujob);
20281ce91824SDavid Xu 		return (0);
20291ce91824SDavid Xu 	}
20301ce91824SDavid Xu 
20311ce91824SDavid Xu 	td->td_retval[0] = EINVAL;
20321ce91824SDavid Xu 	return (0);
2033ee877a35SJohn Dyson }
2034ee877a35SJohn Dyson 
20353858a1f4SJohn Baldwin int
20368451d0ddSKip Macy sys_aio_error(struct thread *td, struct aio_error_args *uap)
20373858a1f4SJohn Baldwin {
20383858a1f4SJohn Baldwin 
20393858a1f4SJohn Baldwin 	return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
20403858a1f4SJohn Baldwin }
20413858a1f4SJohn Baldwin 
2042eb8e6d52SEivind Eklund /* syscall - asynchronous read from a file (REALTIME) */
2043399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
2044ee877a35SJohn Dyson int
2045399e8c17SJohn Baldwin freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap)
20460972628aSDavid Xu {
20470972628aSDavid Xu 
20483858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
20493858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
20500972628aSDavid Xu }
2051399e8c17SJohn Baldwin #endif
20520972628aSDavid Xu 
20530972628aSDavid Xu int
20548451d0ddSKip Macy sys_aio_read(struct thread *td, struct aio_read_args *uap)
2055fd3bf775SJohn Dyson {
205621d56e9cSAlfred Perlstein 
20573858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2058ee877a35SJohn Dyson }
2059ee877a35SJohn Dyson 
2060eb8e6d52SEivind Eklund /* syscall - asynchronous write to a file (REALTIME) */
2061399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
2062ee877a35SJohn Dyson int
2063399e8c17SJohn Baldwin freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap)
20640972628aSDavid Xu {
20650972628aSDavid Xu 
20663858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
20673858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
20680972628aSDavid Xu }
2069399e8c17SJohn Baldwin #endif
20700972628aSDavid Xu 
20710972628aSDavid Xu int
20728451d0ddSKip Macy sys_aio_write(struct thread *td, struct aio_write_args *uap)
2073fd3bf775SJohn Dyson {
207421d56e9cSAlfred Perlstein 
20753858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
20760972628aSDavid Xu }
20770972628aSDavid Xu 
20786160e12cSGleb Smirnoff int
20796160e12cSGleb Smirnoff sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
20806160e12cSGleb Smirnoff {
20816160e12cSGleb Smirnoff 
20826160e12cSGleb Smirnoff 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
20836160e12cSGleb Smirnoff }
20846160e12cSGleb Smirnoff 
20850972628aSDavid Xu static int
20863858a1f4SJohn Baldwin kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
20873858a1f4SJohn Baldwin     struct aiocb **acb_list, int nent, struct sigevent *sig,
20883858a1f4SJohn Baldwin     struct aiocb_ops *ops)
20890972628aSDavid Xu {
2090b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
20915652770dSJohn Baldwin 	struct aiocb *job;
20922244ea07SJohn Dyson 	struct kaioinfo *ki;
20931ce91824SDavid Xu 	struct aioliojob *lj;
209469cd28daSDoug Ambrisko 	struct kevent kev;
20951ce91824SDavid Xu 	int error;
2096fd3bf775SJohn Dyson 	int nerror;
2097ee877a35SJohn Dyson 	int i;
2098ee877a35SJohn Dyson 
20993858a1f4SJohn Baldwin 	if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2100ac41f2efSAlfred Perlstein 		return (EINVAL);
21012244ea07SJohn Dyson 
2102ae3b195fSTim J. Robbins 	if (nent < 0 || nent > AIO_LISTIO_MAX)
2103ac41f2efSAlfred Perlstein 		return (EINVAL);
21042244ea07SJohn Dyson 
2105bfbbc4aaSJason Evans 	if (p->p_aioinfo == NULL)
21062244ea07SJohn Dyson 		aio_init_aioinfo(p);
21072244ea07SJohn Dyson 
21082244ea07SJohn Dyson 	ki = p->p_aioinfo;
21092244ea07SJohn Dyson 
2110a163d034SWarner Losh 	lj = uma_zalloc(aiolio_zone, M_WAITOK);
211184af4da6SJohn Dyson 	lj->lioj_flags = 0;
21121ce91824SDavid Xu 	lj->lioj_count = 0;
21131ce91824SDavid Xu 	lj->lioj_finished_count = 0;
2114d8b0556cSKonstantin Belousov 	knlist_init_mtx(&lj->klist, AIO_MTX(ki));
21154c0fb2cfSDavid Xu 	ksiginfo_init(&lj->lioj_ksi);
211669cd28daSDoug Ambrisko 
211784af4da6SJohn Dyson 	/*
2118bfbbc4aaSJason Evans 	 * Setup signal.
211984af4da6SJohn Dyson 	 */
21203858a1f4SJohn Baldwin 	if (sig && (mode == LIO_NOWAIT)) {
21213858a1f4SJohn Baldwin 		bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
212269cd28daSDoug Ambrisko 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
212369cd28daSDoug Ambrisko 			/* Assume only new style KEVENT */
212469cd28daSDoug Ambrisko 			kev.filter = EVFILT_LIO;
212569cd28daSDoug Ambrisko 			kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
21263858a1f4SJohn Baldwin 			kev.ident = (uintptr_t)uacb_list; /* something unique */
212769cd28daSDoug Ambrisko 			kev.data = (intptr_t)lj;
21281ce91824SDavid Xu 			/* pass user defined sigval data */
21291ce91824SDavid Xu 			kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
21304db71d27SJohn-Mark Gurney 			error = kqfd_register(
21314db71d27SJohn-Mark Gurney 			    lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1);
213269cd28daSDoug Ambrisko 			if (error) {
213369cd28daSDoug Ambrisko 				uma_zfree(aiolio_zone, lj);
213469cd28daSDoug Ambrisko 				return (error);
213569cd28daSDoug Ambrisko 			}
21361ce91824SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
21371ce91824SDavid Xu 			;
213868d71118SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
213968d71118SDavid Xu 			   lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
214068d71118SDavid Xu 				if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
214169cd28daSDoug Ambrisko 					uma_zfree(aiolio_zone, lj);
214269cd28daSDoug Ambrisko 					return EINVAL;
214368d71118SDavid Xu 				}
214484af4da6SJohn Dyson 				lj->lioj_flags |= LIOJ_SIGNAL;
214568d71118SDavid Xu 		} else {
214668d71118SDavid Xu 			uma_zfree(aiolio_zone, lj);
214768d71118SDavid Xu 			return EINVAL;
21484d752b01SAlan Cox 		}
21491ce91824SDavid Xu 	}
215069cd28daSDoug Ambrisko 
2151759ccccaSDavid Xu 	AIO_LOCK(ki);
21522f3cf918SAlfred Perlstein 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
21532244ea07SJohn Dyson 	/*
21541ce91824SDavid Xu 	 * Add extra aiocb count to avoid the lio to be freed
21551ce91824SDavid Xu 	 * by other threads doing aio_waitcomplete or aio_return,
21561ce91824SDavid Xu 	 * and prevent event from being sent until we have queued
21571ce91824SDavid Xu 	 * all tasks.
21581ce91824SDavid Xu 	 */
21591ce91824SDavid Xu 	lj->lioj_count = 1;
2160759ccccaSDavid Xu 	AIO_UNLOCK(ki);
21611ce91824SDavid Xu 
21621ce91824SDavid Xu 	/*
2163bfbbc4aaSJason Evans 	 * Get pointers to the list of I/O requests.
21642244ea07SJohn Dyson 	 */
2165fd3bf775SJohn Dyson 	nerror = 0;
21663858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++) {
21675652770dSJohn Baldwin 		job = acb_list[i];
21685652770dSJohn Baldwin 		if (job != NULL) {
21695652770dSJohn Baldwin 			error = aio_aqueue(td, job, lj, LIO_NOP, ops);
21701ce91824SDavid Xu 			if (error != 0)
2171fd3bf775SJohn Dyson 				nerror++;
2172fd3bf775SJohn Dyson 		}
2173fd3bf775SJohn Dyson 	}
21742244ea07SJohn Dyson 
21751ce91824SDavid Xu 	error = 0;
2176759ccccaSDavid Xu 	AIO_LOCK(ki);
21773858a1f4SJohn Baldwin 	if (mode == LIO_WAIT) {
21781ce91824SDavid Xu 		while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2179fd3bf775SJohn Dyson 			ki->kaio_flags |= KAIO_WAKEUP;
2180759ccccaSDavid Xu 			error = msleep(&p->p_aioinfo, AIO_MTX(ki),
21811ce91824SDavid Xu 			    PRIBIO | PCATCH, "aiospn", 0);
21821ce91824SDavid Xu 			if (error == ERESTART)
21831ce91824SDavid Xu 				error = EINTR;
21841ce91824SDavid Xu 			if (error)
21851ce91824SDavid Xu 				break;
21861ce91824SDavid Xu 		}
21871ce91824SDavid Xu 	} else {
21881ce91824SDavid Xu 		if (lj->lioj_count - 1 == lj->lioj_finished_count) {
21891ce91824SDavid Xu 			if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
21901ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_KEVENT_POSTED;
21911ce91824SDavid Xu 				KNOTE_LOCKED(&lj->klist, 1);
21921ce91824SDavid Xu 			}
21931ce91824SDavid Xu 			if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
21941ce91824SDavid Xu 			    == LIOJ_SIGNAL
21951ce91824SDavid Xu 			    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
21961ce91824SDavid Xu 			    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
21971ce91824SDavid Xu 				aio_sendsig(p, &lj->lioj_signal,
21981ce91824SDavid Xu 					    &lj->lioj_ksi);
21991ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
22002244ea07SJohn Dyson 			}
22012244ea07SJohn Dyson 		}
22021ce91824SDavid Xu 	}
22031ce91824SDavid Xu 	lj->lioj_count--;
22041ce91824SDavid Xu 	if (lj->lioj_count == 0) {
22051ce91824SDavid Xu 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
22061ce91824SDavid Xu 		knlist_delete(&lj->klist, curthread, 1);
2207759ccccaSDavid Xu 		PROC_LOCK(p);
22081ce91824SDavid Xu 		sigqueue_take(&lj->lioj_ksi);
22091ce91824SDavid Xu 		PROC_UNLOCK(p);
2210759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22111ce91824SDavid Xu 		uma_zfree(aiolio_zone, lj);
22121ce91824SDavid Xu 	} else
2213759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22142244ea07SJohn Dyson 
22151ce91824SDavid Xu 	if (nerror)
22161ce91824SDavid Xu 		return (EIO);
22171ce91824SDavid Xu 	return (error);
2218ee877a35SJohn Dyson }
2219fd3bf775SJohn Dyson 
22203858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
2221399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
22223858a1f4SJohn Baldwin int
2223399e8c17SJohn Baldwin freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap)
22243858a1f4SJohn Baldwin {
22253858a1f4SJohn Baldwin 	struct aiocb **acb_list;
22263858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
22273858a1f4SJohn Baldwin 	struct osigevent osig;
22283858a1f4SJohn Baldwin 	int error, nent;
22293858a1f4SJohn Baldwin 
22303858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
22313858a1f4SJohn Baldwin 		return (EINVAL);
22323858a1f4SJohn Baldwin 
22333858a1f4SJohn Baldwin 	nent = uap->nent;
22343858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
22353858a1f4SJohn Baldwin 		return (EINVAL);
22363858a1f4SJohn Baldwin 
22373858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
22383858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
22393858a1f4SJohn Baldwin 		if (error)
22403858a1f4SJohn Baldwin 			return (error);
22413858a1f4SJohn Baldwin 		error = convert_old_sigevent(&osig, &sig);
22423858a1f4SJohn Baldwin 		if (error)
22433858a1f4SJohn Baldwin 			return (error);
22443858a1f4SJohn Baldwin 		sigp = &sig;
22453858a1f4SJohn Baldwin 	} else
22463858a1f4SJohn Baldwin 		sigp = NULL;
22473858a1f4SJohn Baldwin 
22483858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
22493858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
22503858a1f4SJohn Baldwin 	if (error == 0)
22513858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode,
22523858a1f4SJohn Baldwin 		    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
22533858a1f4SJohn Baldwin 		    &aiocb_ops_osigevent);
22543858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
22553858a1f4SJohn Baldwin 	return (error);
22563858a1f4SJohn Baldwin }
2257399e8c17SJohn Baldwin #endif
22583858a1f4SJohn Baldwin 
22593858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
22603858a1f4SJohn Baldwin int
22618451d0ddSKip Macy sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
22623858a1f4SJohn Baldwin {
22633858a1f4SJohn Baldwin 	struct aiocb **acb_list;
22643858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
22653858a1f4SJohn Baldwin 	int error, nent;
22663858a1f4SJohn Baldwin 
22673858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
22683858a1f4SJohn Baldwin 		return (EINVAL);
22693858a1f4SJohn Baldwin 
22703858a1f4SJohn Baldwin 	nent = uap->nent;
22713858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
22723858a1f4SJohn Baldwin 		return (EINVAL);
22733858a1f4SJohn Baldwin 
22743858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
22753858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig, sizeof(sig));
22763858a1f4SJohn Baldwin 		if (error)
22773858a1f4SJohn Baldwin 			return (error);
22783858a1f4SJohn Baldwin 		sigp = &sig;
22793858a1f4SJohn Baldwin 	} else
22803858a1f4SJohn Baldwin 		sigp = NULL;
22813858a1f4SJohn Baldwin 
22823858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
22833858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
22843858a1f4SJohn Baldwin 	if (error == 0)
22853858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
22863858a1f4SJohn Baldwin 		    nent, sigp, &aiocb_ops);
22873858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
22883858a1f4SJohn Baldwin 	return (error);
22893858a1f4SJohn Baldwin }
22903858a1f4SJohn Baldwin 
2291fd3bf775SJohn Dyson static void
2292f743d981SAlexander Motin aio_physwakeup(struct bio *bp)
2293fd3bf775SJohn Dyson {
22945652770dSJohn Baldwin 	struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
22951ce91824SDavid Xu 	struct proc *userp;
229627b8220dSDavid Xu 	struct kaioinfo *ki;
2297f3215338SJohn Baldwin 	size_t nbytes;
2298f3215338SJohn Baldwin 	int error, nblks;
22991ce91824SDavid Xu 
2300f743d981SAlexander Motin 	/* Release mapping into kernel space. */
2301f3215338SJohn Baldwin 	userp = job->userproc;
2302f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
23035652770dSJohn Baldwin 	if (job->pbuf) {
23045652770dSJohn Baldwin 		pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages);
23055652770dSJohn Baldwin 		relpbuf(job->pbuf, NULL);
23065652770dSJohn Baldwin 		job->pbuf = NULL;
2307f743d981SAlexander Motin 		atomic_subtract_int(&num_buf_aio, 1);
2308f3215338SJohn Baldwin 		AIO_LOCK(ki);
2309f3215338SJohn Baldwin 		ki->kaio_buffer_count--;
2310f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
2311f743d981SAlexander Motin 	}
23125652770dSJohn Baldwin 	vm_page_unhold_pages(job->pages, job->npages);
2313f743d981SAlexander Motin 
23145652770dSJohn Baldwin 	bp = job->bp;
23155652770dSJohn Baldwin 	job->bp = NULL;
2316f3215338SJohn Baldwin 	nbytes = job->uaiocb.aio_nbytes - bp->bio_resid;
2317f3215338SJohn Baldwin 	error = 0;
2318f743d981SAlexander Motin 	if (bp->bio_flags & BIO_ERROR)
2319f3215338SJohn Baldwin 		error = bp->bio_error;
2320f3215338SJohn Baldwin 	nblks = btodb(nbytes);
23215652770dSJohn Baldwin 	if (job->uaiocb.aio_lio_opcode == LIO_WRITE)
23225652770dSJohn Baldwin 		job->outputcharge += nblks;
23231ce91824SDavid Xu 	else
23245652770dSJohn Baldwin 		job->inputcharge += nblks;
2325f3215338SJohn Baldwin 
2326f3215338SJohn Baldwin 	aio_complete(job, nbytes, error);
23271ce91824SDavid Xu 
2328f743d981SAlexander Motin 	g_destroy_bio(bp);
232984af4da6SJohn Dyson }
2330bfbbc4aaSJason Evans 
2331eb8e6d52SEivind Eklund /* syscall - wait for the next completion of an aio request */
23323858a1f4SJohn Baldwin static int
23335652770dSJohn Baldwin kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
23343858a1f4SJohn Baldwin     struct timespec *ts, struct aiocb_ops *ops)
2335bfbbc4aaSJason Evans {
2336b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
2337bfbbc4aaSJason Evans 	struct timeval atv;
2338bfbbc4aaSJason Evans 	struct kaioinfo *ki;
23395652770dSJohn Baldwin 	struct kaiocb *job;
23405652770dSJohn Baldwin 	struct aiocb *ujob;
2341bb430bc7SJohn Baldwin 	long error, status;
2342bb430bc7SJohn Baldwin 	int timo;
2343bfbbc4aaSJason Evans 
23445652770dSJohn Baldwin 	ops->store_aiocb(ujobp, NULL);
2345dd85920aSJason Evans 
234638d68e2dSPawel Jakub Dawidek 	if (ts == NULL) {
2347bfbbc4aaSJason Evans 		timo = 0;
234838d68e2dSPawel Jakub Dawidek 	} else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
234938d68e2dSPawel Jakub Dawidek 		timo = -1;
235038d68e2dSPawel Jakub Dawidek 	} else {
23513858a1f4SJohn Baldwin 		if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2352bfbbc4aaSJason Evans 			return (EINVAL);
2353bfbbc4aaSJason Evans 
23543858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
2355bfbbc4aaSJason Evans 		if (itimerfix(&atv))
2356bfbbc4aaSJason Evans 			return (EINVAL);
2357bfbbc4aaSJason Evans 		timo = tvtohz(&atv);
2358bfbbc4aaSJason Evans 	}
2359bfbbc4aaSJason Evans 
23608213baf0SChristian S.J. Peron 	if (p->p_aioinfo == NULL)
2361323fe565SDavid Xu 		aio_init_aioinfo(p);
23628213baf0SChristian S.J. Peron 	ki = p->p_aioinfo;
2363bfbbc4aaSJason Evans 
23641ce91824SDavid Xu 	error = 0;
23655652770dSJohn Baldwin 	job = NULL;
2366759ccccaSDavid Xu 	AIO_LOCK(ki);
23675652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
236838d68e2dSPawel Jakub Dawidek 		if (timo == -1) {
236938d68e2dSPawel Jakub Dawidek 			error = EWOULDBLOCK;
237038d68e2dSPawel Jakub Dawidek 			break;
237138d68e2dSPawel Jakub Dawidek 		}
23721ce91824SDavid Xu 		ki->kaio_flags |= KAIO_WAKEUP;
2373759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
23741ce91824SDavid Xu 		    "aiowc", timo);
237527b8220dSDavid Xu 		if (timo && error == ERESTART)
23761ce91824SDavid Xu 			error = EINTR;
23771ce91824SDavid Xu 		if (error)
23781ce91824SDavid Xu 			break;
23791ce91824SDavid Xu 	}
23801ce91824SDavid Xu 
23815652770dSJohn Baldwin 	if (job != NULL) {
2382f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
23835652770dSJohn Baldwin 		ujob = job->ujob;
23845652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
23855652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
23861ce91824SDavid Xu 		td->td_retval[0] = status;
23875652770dSJohn Baldwin 		if (job->uaiocb.aio_lio_opcode == LIO_WRITE) {
23885652770dSJohn Baldwin 			td->td_ru.ru_oublock += job->outputcharge;
23895652770dSJohn Baldwin 			job->outputcharge = 0;
23905652770dSJohn Baldwin 		} else if (job->uaiocb.aio_lio_opcode == LIO_READ) {
23915652770dSJohn Baldwin 			td->td_ru.ru_inblock += job->inputcharge;
23925652770dSJohn Baldwin 			job->inputcharge = 0;
2393bfbbc4aaSJason Evans 		}
23945652770dSJohn Baldwin 		aio_free_entry(job);
2395759ccccaSDavid Xu 		AIO_UNLOCK(ki);
23965652770dSJohn Baldwin 		ops->store_aiocb(ujobp, ujob);
23975652770dSJohn Baldwin 		ops->store_error(ujob, error);
23985652770dSJohn Baldwin 		ops->store_status(ujob, status);
23991ce91824SDavid Xu 	} else
2400759ccccaSDavid Xu 		AIO_UNLOCK(ki);
2401bfbbc4aaSJason Evans 
2402ac41f2efSAlfred Perlstein 	return (error);
2403bfbbc4aaSJason Evans }
2404cb679c38SJonathan Lemon 
240599eee864SDavid Xu int
24068451d0ddSKip Macy sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
24073858a1f4SJohn Baldwin {
24083858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
24093858a1f4SJohn Baldwin 	int error;
24103858a1f4SJohn Baldwin 
24113858a1f4SJohn Baldwin 	if (uap->timeout) {
24123858a1f4SJohn Baldwin 		/* Get timespec struct. */
24133858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts, sizeof(ts));
24143858a1f4SJohn Baldwin 		if (error)
24153858a1f4SJohn Baldwin 			return (error);
24163858a1f4SJohn Baldwin 		tsp = &ts;
24173858a1f4SJohn Baldwin 	} else
24183858a1f4SJohn Baldwin 		tsp = NULL;
24193858a1f4SJohn Baldwin 
24203858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
24213858a1f4SJohn Baldwin }
24223858a1f4SJohn Baldwin 
24233858a1f4SJohn Baldwin static int
24245652770dSJohn Baldwin kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
24253858a1f4SJohn Baldwin     struct aiocb_ops *ops)
242699eee864SDavid Xu {
242799eee864SDavid Xu 	struct proc *p = td->td_proc;
242899eee864SDavid Xu 	struct kaioinfo *ki;
242999eee864SDavid Xu 
24303858a1f4SJohn Baldwin 	if (op != O_SYNC) /* XXX lack of O_DSYNC */
243199eee864SDavid Xu 		return (EINVAL);
243299eee864SDavid Xu 	ki = p->p_aioinfo;
243399eee864SDavid Xu 	if (ki == NULL)
243499eee864SDavid Xu 		aio_init_aioinfo(p);
24355652770dSJohn Baldwin 	return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops));
24363858a1f4SJohn Baldwin }
24373858a1f4SJohn Baldwin 
24383858a1f4SJohn Baldwin int
24398451d0ddSKip Macy sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
24403858a1f4SJohn Baldwin {
24413858a1f4SJohn Baldwin 
24423858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
244399eee864SDavid Xu }
244499eee864SDavid Xu 
2445eb8e6d52SEivind Eklund /* kqueue attach function */
2446cb679c38SJonathan Lemon static int
2447cb679c38SJonathan Lemon filt_aioattach(struct knote *kn)
2448cb679c38SJonathan Lemon {
24495652770dSJohn Baldwin 	struct kaiocb *job = (struct kaiocb *)kn->kn_sdata;
2450cb679c38SJonathan Lemon 
2451cb679c38SJonathan Lemon 	/*
24525652770dSJohn Baldwin 	 * The job pointer must be validated before using it, so
2453cb679c38SJonathan Lemon 	 * registration is restricted to the kernel; the user cannot
2454cb679c38SJonathan Lemon 	 * set EV_FLAG1.
2455cb679c38SJonathan Lemon 	 */
2456cb679c38SJonathan Lemon 	if ((kn->kn_flags & EV_FLAG1) == 0)
2457cb679c38SJonathan Lemon 		return (EPERM);
24585652770dSJohn Baldwin 	kn->kn_ptr.p_aio = job;
2459cb679c38SJonathan Lemon 	kn->kn_flags &= ~EV_FLAG1;
2460cb679c38SJonathan Lemon 
24615652770dSJohn Baldwin 	knlist_add(&job->klist, kn, 0);
2462cb679c38SJonathan Lemon 
2463cb679c38SJonathan Lemon 	return (0);
2464cb679c38SJonathan Lemon }
2465cb679c38SJonathan Lemon 
2466eb8e6d52SEivind Eklund /* kqueue detach function */
2467cb679c38SJonathan Lemon static void
2468cb679c38SJonathan Lemon filt_aiodetach(struct knote *kn)
2469cb679c38SJonathan Lemon {
24708e9fc278SDoug Ambrisko 	struct knlist *knl;
2471cb679c38SJonathan Lemon 
24728e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_aio->klist;
24738e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
24748e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
24758e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
24768e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
2477cb679c38SJonathan Lemon }
2478cb679c38SJonathan Lemon 
2479eb8e6d52SEivind Eklund /* kqueue filter function */
2480cb679c38SJonathan Lemon /*ARGSUSED*/
2481cb679c38SJonathan Lemon static int
2482cb679c38SJonathan Lemon filt_aio(struct knote *kn, long hint)
2483cb679c38SJonathan Lemon {
24845652770dSJohn Baldwin 	struct kaiocb *job = kn->kn_ptr.p_aio;
2485cb679c38SJonathan Lemon 
24865652770dSJohn Baldwin 	kn->kn_data = job->uaiocb._aiocb_private.error;
2487f3215338SJohn Baldwin 	if (!(job->jobflags & KAIOCB_FINISHED))
2488cb679c38SJonathan Lemon 		return (0);
2489cb679c38SJonathan Lemon 	kn->kn_flags |= EV_EOF;
2490cb679c38SJonathan Lemon 	return (1);
2491cb679c38SJonathan Lemon }
249269cd28daSDoug Ambrisko 
249369cd28daSDoug Ambrisko /* kqueue attach function */
249469cd28daSDoug Ambrisko static int
249569cd28daSDoug Ambrisko filt_lioattach(struct knote *kn)
249669cd28daSDoug Ambrisko {
24971ce91824SDavid Xu 	struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
249869cd28daSDoug Ambrisko 
249969cd28daSDoug Ambrisko 	/*
25001ce91824SDavid Xu 	 * The aioliojob pointer must be validated before using it, so
250169cd28daSDoug Ambrisko 	 * registration is restricted to the kernel; the user cannot
250269cd28daSDoug Ambrisko 	 * set EV_FLAG1.
250369cd28daSDoug Ambrisko 	 */
250469cd28daSDoug Ambrisko 	if ((kn->kn_flags & EV_FLAG1) == 0)
250569cd28daSDoug Ambrisko 		return (EPERM);
2506a8afa221SJean-Sébastien Pédron 	kn->kn_ptr.p_lio = lj;
250769cd28daSDoug Ambrisko 	kn->kn_flags &= ~EV_FLAG1;
250869cd28daSDoug Ambrisko 
250969cd28daSDoug Ambrisko 	knlist_add(&lj->klist, kn, 0);
251069cd28daSDoug Ambrisko 
251169cd28daSDoug Ambrisko 	return (0);
251269cd28daSDoug Ambrisko }
251369cd28daSDoug Ambrisko 
251469cd28daSDoug Ambrisko /* kqueue detach function */
251569cd28daSDoug Ambrisko static void
251669cd28daSDoug Ambrisko filt_liodetach(struct knote *kn)
251769cd28daSDoug Ambrisko {
25188e9fc278SDoug Ambrisko 	struct knlist *knl;
251969cd28daSDoug Ambrisko 
25208e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_lio->klist;
25218e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25228e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25238e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25248e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
252569cd28daSDoug Ambrisko }
252669cd28daSDoug Ambrisko 
252769cd28daSDoug Ambrisko /* kqueue filter function */
252869cd28daSDoug Ambrisko /*ARGSUSED*/
252969cd28daSDoug Ambrisko static int
253069cd28daSDoug Ambrisko filt_lio(struct knote *kn, long hint)
253169cd28daSDoug Ambrisko {
2532a8afa221SJean-Sébastien Pédron 	struct aioliojob * lj = kn->kn_ptr.p_lio;
25331ce91824SDavid Xu 
253469cd28daSDoug Ambrisko 	return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
253569cd28daSDoug Ambrisko }
25363858a1f4SJohn Baldwin 
2537841c0c7eSNathan Whitehorn #ifdef COMPAT_FREEBSD32
2538399e8c17SJohn Baldwin #include <sys/mount.h>
2539399e8c17SJohn Baldwin #include <sys/socket.h>
2540399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32.h>
2541399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_proto.h>
2542399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_signal.h>
2543399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_syscall.h>
2544399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_util.h>
25453858a1f4SJohn Baldwin 
25463858a1f4SJohn Baldwin struct __aiocb_private32 {
25473858a1f4SJohn Baldwin 	int32_t	status;
25483858a1f4SJohn Baldwin 	int32_t	error;
25493858a1f4SJohn Baldwin 	uint32_t kernelinfo;
25503858a1f4SJohn Baldwin };
25513858a1f4SJohn Baldwin 
2552399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
25533858a1f4SJohn Baldwin typedef struct oaiocb32 {
25543858a1f4SJohn Baldwin 	int	aio_fildes;		/* File descriptor */
25553858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
25563858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
25573858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
25583858a1f4SJohn Baldwin 	struct	osigevent32 aio_sigevent; /* Signal to deliver */
25593858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
25603858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
25613858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
25623858a1f4SJohn Baldwin } oaiocb32_t;
2563399e8c17SJohn Baldwin #endif
25643858a1f4SJohn Baldwin 
25653858a1f4SJohn Baldwin typedef struct aiocb32 {
25663858a1f4SJohn Baldwin 	int32_t	aio_fildes;		/* File descriptor */
25673858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
25683858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
25693858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
25703858a1f4SJohn Baldwin 	int	__spare__[2];
25713858a1f4SJohn Baldwin 	uint32_t __spare2__;
25723858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
25733858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
25743858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
25753858a1f4SJohn Baldwin 	struct	sigevent32 aio_sigevent;	/* Signal to deliver */
25763858a1f4SJohn Baldwin } aiocb32_t;
25773858a1f4SJohn Baldwin 
2578399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
25793858a1f4SJohn Baldwin static int
25803858a1f4SJohn Baldwin convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
25813858a1f4SJohn Baldwin {
25823858a1f4SJohn Baldwin 
25833858a1f4SJohn Baldwin 	/*
25843858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
25853858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
25863858a1f4SJohn Baldwin 	 */
25873858a1f4SJohn Baldwin 	CP(*osig, *nsig, sigev_notify);
25883858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
25893858a1f4SJohn Baldwin 	case SIGEV_NONE:
25903858a1f4SJohn Baldwin 		break;
25913858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
25923858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
25933858a1f4SJohn Baldwin 		break;
25943858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
25953858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
25963858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
25973858a1f4SJohn Baldwin 		PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
25983858a1f4SJohn Baldwin 		break;
25993858a1f4SJohn Baldwin 	default:
26003858a1f4SJohn Baldwin 		return (EINVAL);
26013858a1f4SJohn Baldwin 	}
26023858a1f4SJohn Baldwin 	return (0);
26033858a1f4SJohn Baldwin }
26043858a1f4SJohn Baldwin 
26053858a1f4SJohn Baldwin static int
26063858a1f4SJohn Baldwin aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
26073858a1f4SJohn Baldwin {
26083858a1f4SJohn Baldwin 	struct oaiocb32 job32;
26093858a1f4SJohn Baldwin 	int error;
26103858a1f4SJohn Baldwin 
26113858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
26123858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26133858a1f4SJohn Baldwin 	if (error)
26143858a1f4SJohn Baldwin 		return (error);
26153858a1f4SJohn Baldwin 
26163858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26173858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26183858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26193858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26203858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26213858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26223858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26233858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26243858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
26253858a1f4SJohn Baldwin 	return (convert_old_sigevent32(&job32.aio_sigevent,
26263858a1f4SJohn Baldwin 	    &kjob->aio_sigevent));
26273858a1f4SJohn Baldwin }
2628399e8c17SJohn Baldwin #endif
26293858a1f4SJohn Baldwin 
26303858a1f4SJohn Baldwin static int
26313858a1f4SJohn Baldwin aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob)
26323858a1f4SJohn Baldwin {
26333858a1f4SJohn Baldwin 	struct aiocb32 job32;
26343858a1f4SJohn Baldwin 	int error;
26353858a1f4SJohn Baldwin 
26363858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26373858a1f4SJohn Baldwin 	if (error)
26383858a1f4SJohn Baldwin 		return (error);
26393858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26403858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26413858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26423858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26433858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26443858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26453858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26463858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26473858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
26483858a1f4SJohn Baldwin 	return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent));
26493858a1f4SJohn Baldwin }
26503858a1f4SJohn Baldwin 
26513858a1f4SJohn Baldwin static long
26523858a1f4SJohn Baldwin aiocb32_fetch_status(struct aiocb *ujob)
26533858a1f4SJohn Baldwin {
26543858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
26553858a1f4SJohn Baldwin 
26563858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
26573858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.status));
26583858a1f4SJohn Baldwin }
26593858a1f4SJohn Baldwin 
26603858a1f4SJohn Baldwin static long
26613858a1f4SJohn Baldwin aiocb32_fetch_error(struct aiocb *ujob)
26623858a1f4SJohn Baldwin {
26633858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
26643858a1f4SJohn Baldwin 
26653858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
26663858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.error));
26673858a1f4SJohn Baldwin }
26683858a1f4SJohn Baldwin 
26693858a1f4SJohn Baldwin static int
26703858a1f4SJohn Baldwin aiocb32_store_status(struct aiocb *ujob, long status)
26713858a1f4SJohn Baldwin {
26723858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
26733858a1f4SJohn Baldwin 
26743858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
26753858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.status, status));
26763858a1f4SJohn Baldwin }
26773858a1f4SJohn Baldwin 
26783858a1f4SJohn Baldwin static int
26793858a1f4SJohn Baldwin aiocb32_store_error(struct aiocb *ujob, long error)
26803858a1f4SJohn Baldwin {
26813858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
26823858a1f4SJohn Baldwin 
26833858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
26843858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.error, error));
26853858a1f4SJohn Baldwin }
26863858a1f4SJohn Baldwin 
26873858a1f4SJohn Baldwin static int
26883858a1f4SJohn Baldwin aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
26893858a1f4SJohn Baldwin {
26903858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
26913858a1f4SJohn Baldwin 
26923858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
26933858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
26943858a1f4SJohn Baldwin }
26953858a1f4SJohn Baldwin 
26963858a1f4SJohn Baldwin static int
26973858a1f4SJohn Baldwin aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
26983858a1f4SJohn Baldwin {
26993858a1f4SJohn Baldwin 
27003858a1f4SJohn Baldwin 	return (suword32(ujobp, (long)ujob));
27013858a1f4SJohn Baldwin }
27023858a1f4SJohn Baldwin 
27033858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops = {
27043858a1f4SJohn Baldwin 	.copyin = aiocb32_copyin,
27053858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27063858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27073858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27083858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27093858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27103858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27113858a1f4SJohn Baldwin };
27123858a1f4SJohn Baldwin 
2713399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
27143858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops_osigevent = {
27153858a1f4SJohn Baldwin 	.copyin = aiocb32_copyin_old_sigevent,
27163858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27173858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27183858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27193858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27203858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27213858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27223858a1f4SJohn Baldwin };
2723399e8c17SJohn Baldwin #endif
27243858a1f4SJohn Baldwin 
27253858a1f4SJohn Baldwin int
27263858a1f4SJohn Baldwin freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
27273858a1f4SJohn Baldwin {
27283858a1f4SJohn Baldwin 
27293858a1f4SJohn Baldwin 	return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
27303858a1f4SJohn Baldwin }
27313858a1f4SJohn Baldwin 
27323858a1f4SJohn Baldwin int
27333858a1f4SJohn Baldwin freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
27343858a1f4SJohn Baldwin {
27353858a1f4SJohn Baldwin 	struct timespec32 ts32;
27363858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
27373858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
27383858a1f4SJohn Baldwin 	uint32_t *ujoblist32;
27393858a1f4SJohn Baldwin 	int error, i;
27403858a1f4SJohn Baldwin 
27413858a1f4SJohn Baldwin 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
27423858a1f4SJohn Baldwin 		return (EINVAL);
27433858a1f4SJohn Baldwin 
27443858a1f4SJohn Baldwin 	if (uap->timeout) {
27453858a1f4SJohn Baldwin 		/* Get timespec struct. */
27463858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
27473858a1f4SJohn Baldwin 			return (error);
27483858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
27493858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
27503858a1f4SJohn Baldwin 		tsp = &ts;
27513858a1f4SJohn Baldwin 	} else
27523858a1f4SJohn Baldwin 		tsp = NULL;
27533858a1f4SJohn Baldwin 
27543858a1f4SJohn Baldwin 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
27553858a1f4SJohn Baldwin 	ujoblist32 = (uint32_t *)ujoblist;
27563858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist32, uap->nent *
27573858a1f4SJohn Baldwin 	    sizeof(ujoblist32[0]));
27583858a1f4SJohn Baldwin 	if (error == 0) {
27593858a1f4SJohn Baldwin 		for (i = uap->nent; i > 0; i--)
27603858a1f4SJohn Baldwin 			ujoblist[i] = PTRIN(ujoblist32[i]);
27613858a1f4SJohn Baldwin 
27623858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
27633858a1f4SJohn Baldwin 	}
27643858a1f4SJohn Baldwin 	uma_zfree(aiol_zone, ujoblist);
27653858a1f4SJohn Baldwin 	return (error);
27663858a1f4SJohn Baldwin }
27673858a1f4SJohn Baldwin 
27683858a1f4SJohn Baldwin int
27693858a1f4SJohn Baldwin freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
27703858a1f4SJohn Baldwin {
27713858a1f4SJohn Baldwin 
27723858a1f4SJohn Baldwin 	return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
27733858a1f4SJohn Baldwin }
27743858a1f4SJohn Baldwin 
2775399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
27763858a1f4SJohn Baldwin int
2777399e8c17SJohn Baldwin freebsd6_freebsd32_aio_read(struct thread *td,
2778399e8c17SJohn Baldwin     struct freebsd6_freebsd32_aio_read_args *uap)
27793858a1f4SJohn Baldwin {
27803858a1f4SJohn Baldwin 
27813858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
27823858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
27833858a1f4SJohn Baldwin }
2784399e8c17SJohn Baldwin #endif
27853858a1f4SJohn Baldwin 
27863858a1f4SJohn Baldwin int
27873858a1f4SJohn Baldwin freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
27883858a1f4SJohn Baldwin {
27893858a1f4SJohn Baldwin 
27903858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
27913858a1f4SJohn Baldwin 	    &aiocb32_ops));
27923858a1f4SJohn Baldwin }
27933858a1f4SJohn Baldwin 
2794399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
27953858a1f4SJohn Baldwin int
2796399e8c17SJohn Baldwin freebsd6_freebsd32_aio_write(struct thread *td,
2797399e8c17SJohn Baldwin     struct freebsd6_freebsd32_aio_write_args *uap)
27983858a1f4SJohn Baldwin {
27993858a1f4SJohn Baldwin 
28003858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28013858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28023858a1f4SJohn Baldwin }
2803399e8c17SJohn Baldwin #endif
28043858a1f4SJohn Baldwin 
28053858a1f4SJohn Baldwin int
28063858a1f4SJohn Baldwin freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
28073858a1f4SJohn Baldwin {
28083858a1f4SJohn Baldwin 
28093858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28103858a1f4SJohn Baldwin 	    &aiocb32_ops));
28113858a1f4SJohn Baldwin }
28123858a1f4SJohn Baldwin 
28133858a1f4SJohn Baldwin int
28146160e12cSGleb Smirnoff freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
28156160e12cSGleb Smirnoff {
28166160e12cSGleb Smirnoff 
28176160e12cSGleb Smirnoff 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
28186160e12cSGleb Smirnoff 	    &aiocb32_ops));
28196160e12cSGleb Smirnoff }
28206160e12cSGleb Smirnoff 
28216160e12cSGleb Smirnoff int
28223858a1f4SJohn Baldwin freebsd32_aio_waitcomplete(struct thread *td,
28233858a1f4SJohn Baldwin     struct freebsd32_aio_waitcomplete_args *uap)
28243858a1f4SJohn Baldwin {
2825e588eeb1SJohn Baldwin 	struct timespec32 ts32;
28263858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
28273858a1f4SJohn Baldwin 	int error;
28283858a1f4SJohn Baldwin 
28293858a1f4SJohn Baldwin 	if (uap->timeout) {
28303858a1f4SJohn Baldwin 		/* Get timespec struct. */
28313858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts32, sizeof(ts32));
28323858a1f4SJohn Baldwin 		if (error)
28333858a1f4SJohn Baldwin 			return (error);
28343858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
28353858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
28363858a1f4SJohn Baldwin 		tsp = &ts;
28373858a1f4SJohn Baldwin 	} else
28383858a1f4SJohn Baldwin 		tsp = NULL;
28393858a1f4SJohn Baldwin 
28403858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
28413858a1f4SJohn Baldwin 	    &aiocb32_ops));
28423858a1f4SJohn Baldwin }
28433858a1f4SJohn Baldwin 
28443858a1f4SJohn Baldwin int
28453858a1f4SJohn Baldwin freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
28463858a1f4SJohn Baldwin {
28473858a1f4SJohn Baldwin 
28483858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
28493858a1f4SJohn Baldwin 	    &aiocb32_ops));
28503858a1f4SJohn Baldwin }
28513858a1f4SJohn Baldwin 
2852399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
28533858a1f4SJohn Baldwin int
2854399e8c17SJohn Baldwin freebsd6_freebsd32_lio_listio(struct thread *td,
2855399e8c17SJohn Baldwin     struct freebsd6_freebsd32_lio_listio_args *uap)
28563858a1f4SJohn Baldwin {
28573858a1f4SJohn Baldwin 	struct aiocb **acb_list;
28583858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
28593858a1f4SJohn Baldwin 	struct osigevent32 osig;
28603858a1f4SJohn Baldwin 	uint32_t *acb_list32;
28613858a1f4SJohn Baldwin 	int error, i, nent;
28623858a1f4SJohn Baldwin 
28633858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
28643858a1f4SJohn Baldwin 		return (EINVAL);
28653858a1f4SJohn Baldwin 
28663858a1f4SJohn Baldwin 	nent = uap->nent;
28673858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
28683858a1f4SJohn Baldwin 		return (EINVAL);
28693858a1f4SJohn Baldwin 
28703858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
28713858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
28723858a1f4SJohn Baldwin 		if (error)
28733858a1f4SJohn Baldwin 			return (error);
28743858a1f4SJohn Baldwin 		error = convert_old_sigevent32(&osig, &sig);
28753858a1f4SJohn Baldwin 		if (error)
28763858a1f4SJohn Baldwin 			return (error);
28773858a1f4SJohn Baldwin 		sigp = &sig;
28783858a1f4SJohn Baldwin 	} else
28793858a1f4SJohn Baldwin 		sigp = NULL;
28803858a1f4SJohn Baldwin 
28813858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
28823858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
28833858a1f4SJohn Baldwin 	if (error) {
28843858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
28853858a1f4SJohn Baldwin 		return (error);
28863858a1f4SJohn Baldwin 	}
28873858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
28883858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
28893858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
28903858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
28913858a1f4SJohn Baldwin 
28923858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
28933858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
28943858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent);
28953858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
28963858a1f4SJohn Baldwin 	return (error);
28973858a1f4SJohn Baldwin }
2898399e8c17SJohn Baldwin #endif
28993858a1f4SJohn Baldwin 
29003858a1f4SJohn Baldwin int
29013858a1f4SJohn Baldwin freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
29023858a1f4SJohn Baldwin {
29033858a1f4SJohn Baldwin 	struct aiocb **acb_list;
29043858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
29053858a1f4SJohn Baldwin 	struct sigevent32 sig32;
29063858a1f4SJohn Baldwin 	uint32_t *acb_list32;
29073858a1f4SJohn Baldwin 	int error, i, nent;
29083858a1f4SJohn Baldwin 
29093858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
29103858a1f4SJohn Baldwin 		return (EINVAL);
29113858a1f4SJohn Baldwin 
29123858a1f4SJohn Baldwin 	nent = uap->nent;
29133858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
29143858a1f4SJohn Baldwin 		return (EINVAL);
29153858a1f4SJohn Baldwin 
29163858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29173858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig32, sizeof(sig32));
29183858a1f4SJohn Baldwin 		if (error)
29193858a1f4SJohn Baldwin 			return (error);
29203858a1f4SJohn Baldwin 		error = convert_sigevent32(&sig32, &sig);
29213858a1f4SJohn Baldwin 		if (error)
29223858a1f4SJohn Baldwin 			return (error);
29233858a1f4SJohn Baldwin 		sigp = &sig;
29243858a1f4SJohn Baldwin 	} else
29253858a1f4SJohn Baldwin 		sigp = NULL;
29263858a1f4SJohn Baldwin 
29273858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29283858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29293858a1f4SJohn Baldwin 	if (error) {
29303858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29313858a1f4SJohn Baldwin 		return (error);
29323858a1f4SJohn Baldwin 	}
29333858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29343858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29353858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29363858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29373858a1f4SJohn Baldwin 
29383858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29393858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
29403858a1f4SJohn Baldwin 	    &aiocb32_ops);
29413858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
29423858a1f4SJohn Baldwin 	return (error);
29433858a1f4SJohn Baldwin }
29443858a1f4SJohn Baldwin 
29453858a1f4SJohn Baldwin #endif
2946