xref: /freebsd/sys/kern/vfs_aio.c (revision 005ce8e4e6041b626ba4b050db839e6764f44043)
19454b2d8SWarner Losh /*-
2ee877a35SJohn Dyson  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
3ee877a35SJohn Dyson  *
4ee877a35SJohn Dyson  * Redistribution and use in source and binary forms, with or without
5ee877a35SJohn Dyson  * modification, are permitted provided that the following conditions
6ee877a35SJohn Dyson  * are met:
7ee877a35SJohn Dyson  * 1. Redistributions of source code must retain the above copyright
8ee877a35SJohn Dyson  *    notice, this list of conditions and the following disclaimer.
9ee877a35SJohn Dyson  * 2. John S. Dyson's name may not be used to endorse or promote products
10ee877a35SJohn Dyson  *    derived from this software without specific prior written permission.
11ee877a35SJohn Dyson  *
12ee877a35SJohn Dyson  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
13ee877a35SJohn Dyson  * bad that happens because of using this software isn't the responsibility
14ee877a35SJohn Dyson  * of the author.  This software is distributed AS-IS.
15ee877a35SJohn Dyson  */
16ee877a35SJohn Dyson 
17ee877a35SJohn Dyson /*
188a6472b7SPeter Dufault  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19ee877a35SJohn Dyson  */
20ee877a35SJohn Dyson 
21677b542eSDavid E. O'Brien #include <sys/cdefs.h>
22677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
23677b542eSDavid E. O'Brien 
243858a1f4SJohn Baldwin #include "opt_compat.h"
253858a1f4SJohn Baldwin 
26ee877a35SJohn Dyson #include <sys/param.h>
27ee877a35SJohn Dyson #include <sys/systm.h>
28f591779bSSeigo Tanimura #include <sys/malloc.h>
299626b608SPoul-Henning Kamp #include <sys/bio.h>
30a5c9bce7SBruce Evans #include <sys/buf.h>
314a144410SRobert Watson #include <sys/capsicum.h>
3275b8b3b2SJohn Baldwin #include <sys/eventhandler.h>
33ee877a35SJohn Dyson #include <sys/sysproto.h>
34ee877a35SJohn Dyson #include <sys/filedesc.h>
35ee877a35SJohn Dyson #include <sys/kernel.h>
3677409fe1SPoul-Henning Kamp #include <sys/module.h>
37c9a970a7SAlan Cox #include <sys/kthread.h>
38ee877a35SJohn Dyson #include <sys/fcntl.h>
39ee877a35SJohn Dyson #include <sys/file.h>
40104a9b7eSAlexander Kabaev #include <sys/limits.h>
41fdebd4f0SBruce Evans #include <sys/lock.h>
4235e0e5b3SJohn Baldwin #include <sys/mutex.h>
43ee877a35SJohn Dyson #include <sys/unistd.h>
446aeb05d7STom Rhodes #include <sys/posix4.h>
45ee877a35SJohn Dyson #include <sys/proc.h>
462d2f8ae7SBruce Evans #include <sys/resourcevar.h>
47ee877a35SJohn Dyson #include <sys/signalvar.h>
48bfbbc4aaSJason Evans #include <sys/protosw.h>
4989f6b863SAttilio Rao #include <sys/rwlock.h>
501ce91824SDavid Xu #include <sys/sema.h>
511ce91824SDavid Xu #include <sys/socket.h>
52bfbbc4aaSJason Evans #include <sys/socketvar.h>
5321d56e9cSAlfred Perlstein #include <sys/syscall.h>
5421d56e9cSAlfred Perlstein #include <sys/sysent.h>
55a624e84fSJohn Dyson #include <sys/sysctl.h>
569c20dc99SJohn Baldwin #include <sys/syslog.h>
57ee99e978SBruce Evans #include <sys/sx.h>
581ce91824SDavid Xu #include <sys/taskqueue.h>
59fd3bf775SJohn Dyson #include <sys/vnode.h>
60fd3bf775SJohn Dyson #include <sys/conf.h>
61cb679c38SJonathan Lemon #include <sys/event.h>
6299eee864SDavid Xu #include <sys/mount.h>
63f743d981SAlexander Motin #include <geom/geom.h>
64ee877a35SJohn Dyson 
651ce91824SDavid Xu #include <machine/atomic.h>
661ce91824SDavid Xu 
67ee877a35SJohn Dyson #include <vm/vm.h>
68f743d981SAlexander Motin #include <vm/vm_page.h>
69ee877a35SJohn Dyson #include <vm/vm_extern.h>
702244ea07SJohn Dyson #include <vm/pmap.h>
712244ea07SJohn Dyson #include <vm/vm_map.h>
7299eee864SDavid Xu #include <vm/vm_object.h>
73c897b813SJeff Roberson #include <vm/uma.h>
74ee877a35SJohn Dyson #include <sys/aio.h>
755aaef07cSJohn Dyson 
76eb8e6d52SEivind Eklund /*
77eb8e6d52SEivind Eklund  * Counter for allocating reference ids to new jobs.  Wrapped to 1 on
7899eee864SDavid Xu  * overflow. (XXX will be removed soon.)
79eb8e6d52SEivind Eklund  */
8099eee864SDavid Xu static u_long jobrefid;
812244ea07SJohn Dyson 
8299eee864SDavid Xu /*
8399eee864SDavid Xu  * Counter for aio_fsync.
8499eee864SDavid Xu  */
8599eee864SDavid Xu static uint64_t jobseqno;
8699eee864SDavid Xu 
8784af4da6SJohn Dyson #ifndef MAX_AIO_PER_PROC
882244ea07SJohn Dyson #define MAX_AIO_PER_PROC	32
8984af4da6SJohn Dyson #endif
9084af4da6SJohn Dyson 
9184af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE_PER_PROC
922244ea07SJohn Dyson #define MAX_AIO_QUEUE_PER_PROC	256 /* Bigger than AIO_LISTIO_MAX */
9384af4da6SJohn Dyson #endif
9484af4da6SJohn Dyson 
9584af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE
962244ea07SJohn Dyson #define	MAX_AIO_QUEUE		1024 /* Bigger than AIO_LISTIO_MAX */
9784af4da6SJohn Dyson #endif
9884af4da6SJohn Dyson 
9984af4da6SJohn Dyson #ifndef MAX_BUF_AIO
10084af4da6SJohn Dyson #define MAX_BUF_AIO		16
10184af4da6SJohn Dyson #endif
10284af4da6SJohn Dyson 
103e603be7aSRobert Watson FEATURE(aio, "Asynchronous I/O");
104e603be7aSRobert Watson 
1053858a1f4SJohn Baldwin static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
1063858a1f4SJohn Baldwin 
1070dd6c035SJohn Baldwin static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0,
1080dd6c035SJohn Baldwin     "Async IO management");
109eb8e6d52SEivind Eklund 
110f3215338SJohn Baldwin static int enable_aio_unsafe = 0;
111f3215338SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
112f3215338SJohn Baldwin     "Permit asynchronous IO on all file types, not just known-safe types");
113f3215338SJohn Baldwin 
1149c20dc99SJohn Baldwin static unsigned int unsafe_warningcnt = 1;
1159c20dc99SJohn Baldwin SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW,
1169c20dc99SJohn Baldwin     &unsafe_warningcnt, 0,
1179c20dc99SJohn Baldwin     "Warnings that will be triggered upon failed IO requests on unsafe files");
1189c20dc99SJohn Baldwin 
119303b270bSEivind Eklund static int max_aio_procs = MAX_AIO_PROCS;
1200dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
12139314b7dSJohn Baldwin     "Maximum number of kernel processes to use for handling async IO ");
122a624e84fSJohn Dyson 
123eb8e6d52SEivind Eklund static int num_aio_procs = 0;
1240dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
12539314b7dSJohn Baldwin     "Number of presently active kernel processes for async IO");
126a624e84fSJohn Dyson 
127eb8e6d52SEivind Eklund /*
128eb8e6d52SEivind Eklund  * The code will adjust the actual number of AIO processes towards this
129eb8e6d52SEivind Eklund  * number when it gets a chance.
130eb8e6d52SEivind Eklund  */
131eb8e6d52SEivind Eklund static int target_aio_procs = TARGET_AIO_PROCS;
132eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
1330dd6c035SJohn Baldwin     0,
1340dd6c035SJohn Baldwin     "Preferred number of ready kernel processes for async IO");
135a624e84fSJohn Dyson 
136eb8e6d52SEivind Eklund static int max_queue_count = MAX_AIO_QUEUE;
137eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
138eb8e6d52SEivind Eklund     "Maximum number of aio requests to queue, globally");
139a624e84fSJohn Dyson 
140eb8e6d52SEivind Eklund static int num_queue_count = 0;
141eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
142eb8e6d52SEivind Eklund     "Number of queued aio requests");
143a624e84fSJohn Dyson 
144eb8e6d52SEivind Eklund static int num_buf_aio = 0;
145eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
146eb8e6d52SEivind Eklund     "Number of aio requests presently handled by the buf subsystem");
147fd3bf775SJohn Dyson 
14839314b7dSJohn Baldwin /* Number of async I/O processes in the process of being started */
149a9bf5e37SDavid Xu /* XXX This should be local to aio_aqueue() */
150eb8e6d52SEivind Eklund static int num_aio_resv_start = 0;
151fd3bf775SJohn Dyson 
152eb8e6d52SEivind Eklund static int aiod_lifetime;
153eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
154eb8e6d52SEivind Eklund     "Maximum lifetime for idle aiod");
15584af4da6SJohn Dyson 
156eb8e6d52SEivind Eklund static int max_aio_per_proc = MAX_AIO_PER_PROC;
157eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
1580dd6c035SJohn Baldwin     0,
1590dd6c035SJohn Baldwin     "Maximum active aio requests per process (stored in the process)");
160eb8e6d52SEivind Eklund 
161eb8e6d52SEivind Eklund static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
162eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
163eb8e6d52SEivind Eklund     &max_aio_queue_per_proc, 0,
164eb8e6d52SEivind Eklund     "Maximum queued aio requests per process (stored in the process)");
165eb8e6d52SEivind Eklund 
166eb8e6d52SEivind Eklund static int max_buf_aio = MAX_BUF_AIO;
167eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
168eb8e6d52SEivind Eklund     "Maximum buf aio requests per process (stored in the process)");
169eb8e6d52SEivind Eklund 
170399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
1710972628aSDavid Xu typedef struct oaiocb {
1720972628aSDavid Xu 	int	aio_fildes;		/* File descriptor */
1730972628aSDavid Xu 	off_t	aio_offset;		/* File offset for I/O */
1740972628aSDavid Xu 	volatile void *aio_buf;         /* I/O buffer in process space */
1750972628aSDavid Xu 	size_t	aio_nbytes;		/* Number of bytes for I/O */
1760972628aSDavid Xu 	struct	osigevent aio_sigevent;	/* Signal to deliver */
1770972628aSDavid Xu 	int	aio_lio_opcode;		/* LIO opcode */
1780972628aSDavid Xu 	int	aio_reqprio;		/* Request priority -- ignored */
1790972628aSDavid Xu 	struct	__aiocb_private	_aiocb_private;
1800972628aSDavid Xu } oaiocb_t;
181399e8c17SJohn Baldwin #endif
1820972628aSDavid Xu 
1831aa4c324SDavid Xu /*
1845652770dSJohn Baldwin  * Below is a key of locks used to protect each member of struct kaiocb
1851aa4c324SDavid Xu  * aioliojob and kaioinfo and any backends.
1861aa4c324SDavid Xu  *
1871aa4c324SDavid Xu  * * - need not protected
188759ccccaSDavid Xu  * a - locked by kaioinfo lock
1891aa4c324SDavid Xu  * b - locked by backend lock, the backend lock can be null in some cases,
1901aa4c324SDavid Xu  *     for example, BIO belongs to this type, in this case, proc lock is
1911aa4c324SDavid Xu  *     reused.
1921aa4c324SDavid Xu  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
1931aa4c324SDavid Xu  */
1941aa4c324SDavid Xu 
1951aa4c324SDavid Xu /*
196f3215338SJohn Baldwin  * If the routine that services an AIO request blocks while running in an
197f3215338SJohn Baldwin  * AIO kernel process it can starve other I/O requests.  BIO requests
198f3215338SJohn Baldwin  * queued via aio_qphysio() complete in GEOM and do not use AIO kernel
199f3215338SJohn Baldwin  * processes at all.  Socket I/O requests use a separate pool of
200f3215338SJohn Baldwin  * kprocs and also force non-blocking I/O.  Other file I/O requests
201f3215338SJohn Baldwin  * use the generic fo_read/fo_write operations which can block.  The
202f3215338SJohn Baldwin  * fsync and mlock operations can also block while executing.  Ideally
203f3215338SJohn Baldwin  * none of these requests would block while executing.
204f3215338SJohn Baldwin  *
205f3215338SJohn Baldwin  * Note that the service routines cannot toggle O_NONBLOCK in the file
206f3215338SJohn Baldwin  * structure directly while handling a request due to races with
207f3215338SJohn Baldwin  * userland threads.
2081aa4c324SDavid Xu  */
2091aa4c324SDavid Xu 
21048dac059SAlan Cox /* jobflags */
211f3215338SJohn Baldwin #define	KAIOCB_QUEUEING		0x01
212f3215338SJohn Baldwin #define	KAIOCB_CANCELLED	0x02
213f3215338SJohn Baldwin #define	KAIOCB_CANCELLING	0x04
2145652770dSJohn Baldwin #define	KAIOCB_CHECKSYNC	0x08
215f3215338SJohn Baldwin #define	KAIOCB_CLEARED		0x10
216f3215338SJohn Baldwin #define	KAIOCB_FINISHED		0x20
21748dac059SAlan Cox 
2182244ea07SJohn Dyson /*
2192244ea07SJohn Dyson  * AIO process info
2202244ea07SJohn Dyson  */
22184af4da6SJohn Dyson #define AIOP_FREE	0x1			/* proc on free queue */
22284af4da6SJohn Dyson 
22339314b7dSJohn Baldwin struct aioproc {
22439314b7dSJohn Baldwin 	int	aioprocflags;			/* (c) AIO proc flags */
22539314b7dSJohn Baldwin 	TAILQ_ENTRY(aioproc) list;		/* (c) list of processes */
22639314b7dSJohn Baldwin 	struct	proc *aioproc;			/* (*) the AIO proc */
2272244ea07SJohn Dyson };
2282244ea07SJohn Dyson 
22984af4da6SJohn Dyson /*
23084af4da6SJohn Dyson  * data-structure for lio signal management
23184af4da6SJohn Dyson  */
2321ce91824SDavid Xu struct aioliojob {
2331aa4c324SDavid Xu 	int	lioj_flags;			/* (a) listio flags */
2341aa4c324SDavid Xu 	int	lioj_count;			/* (a) listio flags */
2351aa4c324SDavid Xu 	int	lioj_finished_count;		/* (a) listio flags */
2361aa4c324SDavid Xu 	struct	sigevent lioj_signal;		/* (a) signal on all I/O done */
2371aa4c324SDavid Xu 	TAILQ_ENTRY(aioliojob) lioj_list;	/* (a) lio list */
2381aa4c324SDavid Xu 	struct	knlist klist;			/* (a) list of knotes */
2391aa4c324SDavid Xu 	ksiginfo_t lioj_ksi;			/* (a) Realtime signal info */
24084af4da6SJohn Dyson };
2411ce91824SDavid Xu 
24284af4da6SJohn Dyson #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
24384af4da6SJohn Dyson #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
24469cd28daSDoug Ambrisko #define LIOJ_KEVENT_POSTED	0x4	/* kevent triggered */
24584af4da6SJohn Dyson 
24684af4da6SJohn Dyson /*
24784af4da6SJohn Dyson  * per process aio data structure
24884af4da6SJohn Dyson  */
2492244ea07SJohn Dyson struct kaioinfo {
250759ccccaSDavid Xu 	struct	mtx kaio_mtx;		/* the lock to protect this struct */
2511aa4c324SDavid Xu 	int	kaio_flags;		/* (a) per process kaio flags */
2521aa4c324SDavid Xu 	int	kaio_maxactive_count;	/* (*) maximum number of AIOs */
2531aa4c324SDavid Xu 	int	kaio_active_count;	/* (c) number of currently used AIOs */
2541aa4c324SDavid Xu 	int	kaio_qallowed_count;	/* (*) maxiumu size of AIO queue */
2551aa4c324SDavid Xu 	int	kaio_count;		/* (a) size of AIO queue */
2561aa4c324SDavid Xu 	int	kaio_ballowed_count;	/* (*) maximum number of buffers */
2571aa4c324SDavid Xu 	int	kaio_buffer_count;	/* (a) number of physio buffers */
2585652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_all;	/* (a) all AIOs in a process */
2595652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_done;	/* (a) done queue for process */
2601aa4c324SDavid Xu 	TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
2615652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
2625652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncqueue;	/* (a) queue for aio_fsync */
263f3215338SJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncready;  /* (a) second q for aio_fsync */
26439314b7dSJohn Baldwin 	struct	task kaio_task;		/* (*) task to kick aio processes */
265f3215338SJohn Baldwin 	struct	task kaio_sync_task;	/* (*) task to schedule fsync jobs */
2662244ea07SJohn Dyson };
2672244ea07SJohn Dyson 
268759ccccaSDavid Xu #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
269759ccccaSDavid Xu #define AIO_UNLOCK(ki)		mtx_unlock(&(ki)->kaio_mtx)
270759ccccaSDavid Xu #define AIO_LOCK_ASSERT(ki, f)	mtx_assert(&(ki)->kaio_mtx, (f))
271759ccccaSDavid Xu #define AIO_MTX(ki)		(&(ki)->kaio_mtx)
272759ccccaSDavid Xu 
27384af4da6SJohn Dyson #define KAIO_RUNDOWN	0x1	/* process is being run down */
2740dd6c035SJohn Baldwin #define KAIO_WAKEUP	0x2	/* wakeup process when AIO completes */
275fd3bf775SJohn Dyson 
2763858a1f4SJohn Baldwin /*
2773858a1f4SJohn Baldwin  * Operations used to interact with userland aio control blocks.
2783858a1f4SJohn Baldwin  * Different ABIs provide their own operations.
2793858a1f4SJohn Baldwin  */
2803858a1f4SJohn Baldwin struct aiocb_ops {
2813858a1f4SJohn Baldwin 	int	(*copyin)(struct aiocb *ujob, struct aiocb *kjob);
2823858a1f4SJohn Baldwin 	long	(*fetch_status)(struct aiocb *ujob);
2833858a1f4SJohn Baldwin 	long	(*fetch_error)(struct aiocb *ujob);
2843858a1f4SJohn Baldwin 	int	(*store_status)(struct aiocb *ujob, long status);
2853858a1f4SJohn Baldwin 	int	(*store_error)(struct aiocb *ujob, long error);
2863858a1f4SJohn Baldwin 	int	(*store_kernelinfo)(struct aiocb *ujob, long jobref);
2873858a1f4SJohn Baldwin 	int	(*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
2883858a1f4SJohn Baldwin };
2893858a1f4SJohn Baldwin 
29039314b7dSJohn Baldwin static TAILQ_HEAD(,aioproc) aio_freeproc;		/* (c) Idle daemons */
2911ce91824SDavid Xu static struct sema aio_newproc_sem;
2921ce91824SDavid Xu static struct mtx aio_job_mtx;
2935652770dSJohn Baldwin static TAILQ_HEAD(,kaiocb) aio_jobs;			/* (c) Async job list */
2941ce91824SDavid Xu static struct unrhdr *aiod_unr;
2952244ea07SJohn Dyson 
2966a1162d4SAlexander Leidinger void		aio_init_aioinfo(struct proc *p);
297723d37c0SKonstantin Belousov static int	aio_onceonly(void);
2985652770dSJohn Baldwin static int	aio_free_entry(struct kaiocb *job);
2995652770dSJohn Baldwin static void	aio_process_rw(struct kaiocb *job);
3005652770dSJohn Baldwin static void	aio_process_sync(struct kaiocb *job);
3015652770dSJohn Baldwin static void	aio_process_mlock(struct kaiocb *job);
302f3215338SJohn Baldwin static void	aio_schedule_fsync(void *context, int pending);
3031ce91824SDavid Xu static int	aio_newproc(int *);
3045652770dSJohn Baldwin int		aio_aqueue(struct thread *td, struct aiocb *ujob,
3053858a1f4SJohn Baldwin 		    struct aioliojob *lio, int type, struct aiocb_ops *ops);
306f3215338SJohn Baldwin static int	aio_queue_file(struct file *fp, struct kaiocb *job);
307f743d981SAlexander Motin static void	aio_physwakeup(struct bio *bp);
30875b8b3b2SJohn Baldwin static void	aio_proc_rundown(void *arg, struct proc *p);
3090dd6c035SJohn Baldwin static void	aio_proc_rundown_exec(void *arg, struct proc *p,
3100dd6c035SJohn Baldwin 		    struct image_params *imgp);
3115652770dSJohn Baldwin static int	aio_qphysio(struct proc *p, struct kaiocb *job);
3121ce91824SDavid Xu static void	aio_daemon(void *param);
313f3215338SJohn Baldwin static void	aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
314*005ce8e4SJohn Baldwin static bool	aio_clear_cancel_function_locked(struct kaiocb *job);
315dbbccfe9SDavid Xu static int	aio_kick(struct proc *userp);
31699eee864SDavid Xu static void	aio_kick_nowait(struct proc *userp);
31799eee864SDavid Xu static void	aio_kick_helper(void *context, int pending);
31821d56e9cSAlfred Perlstein static int	filt_aioattach(struct knote *kn);
31921d56e9cSAlfred Perlstein static void	filt_aiodetach(struct knote *kn);
32021d56e9cSAlfred Perlstein static int	filt_aio(struct knote *kn, long hint);
32169cd28daSDoug Ambrisko static int	filt_lioattach(struct knote *kn);
32269cd28daSDoug Ambrisko static void	filt_liodetach(struct knote *kn);
32369cd28daSDoug Ambrisko static int	filt_lio(struct knote *kn, long hint);
3242244ea07SJohn Dyson 
325eb8e6d52SEivind Eklund /*
326eb8e6d52SEivind Eklund  * Zones for:
327eb8e6d52SEivind Eklund  * 	kaio	Per process async io info
32839314b7dSJohn Baldwin  *	aiop	async io process data
329eb8e6d52SEivind Eklund  *	aiocb	async io jobs
330eb8e6d52SEivind Eklund  *	aiol	list io job pointer - internal to aio_suspend XXX
331eb8e6d52SEivind Eklund  *	aiolio	list io jobs
332eb8e6d52SEivind Eklund  */
333c897b813SJeff Roberson static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
334fd3bf775SJohn Dyson 
335eb8e6d52SEivind Eklund /* kqueue filters for aio */
336e76d823bSRobert Watson static struct filterops aio_filtops = {
337e76d823bSRobert Watson 	.f_isfd = 0,
338e76d823bSRobert Watson 	.f_attach = filt_aioattach,
339e76d823bSRobert Watson 	.f_detach = filt_aiodetach,
340e76d823bSRobert Watson 	.f_event = filt_aio,
341e76d823bSRobert Watson };
342e76d823bSRobert Watson static struct filterops lio_filtops = {
343e76d823bSRobert Watson 	.f_isfd = 0,
344e76d823bSRobert Watson 	.f_attach = filt_lioattach,
345e76d823bSRobert Watson 	.f_detach = filt_liodetach,
346e76d823bSRobert Watson 	.f_event = filt_lio
347e76d823bSRobert Watson };
34821d56e9cSAlfred Perlstein 
34975b8b3b2SJohn Baldwin static eventhandler_tag exit_tag, exec_tag;
35075b8b3b2SJohn Baldwin 
351c85650caSJohn Baldwin TASKQUEUE_DEFINE_THREAD(aiod_kick);
3521ce91824SDavid Xu 
353eb8e6d52SEivind Eklund /*
354eb8e6d52SEivind Eklund  * Main operations function for use as a kernel module.
355eb8e6d52SEivind Eklund  */
35621d56e9cSAlfred Perlstein static int
35721d56e9cSAlfred Perlstein aio_modload(struct module *module, int cmd, void *arg)
35821d56e9cSAlfred Perlstein {
35921d56e9cSAlfred Perlstein 	int error = 0;
36021d56e9cSAlfred Perlstein 
36121d56e9cSAlfred Perlstein 	switch (cmd) {
36221d56e9cSAlfred Perlstein 	case MOD_LOAD:
36321d56e9cSAlfred Perlstein 		aio_onceonly();
36421d56e9cSAlfred Perlstein 		break;
36521d56e9cSAlfred Perlstein 	case MOD_SHUTDOWN:
36621d56e9cSAlfred Perlstein 		break;
36721d56e9cSAlfred Perlstein 	default:
368f3215338SJohn Baldwin 		error = EOPNOTSUPP;
36921d56e9cSAlfred Perlstein 		break;
37021d56e9cSAlfred Perlstein 	}
37121d56e9cSAlfred Perlstein 	return (error);
37221d56e9cSAlfred Perlstein }
37321d56e9cSAlfred Perlstein 
37421d56e9cSAlfred Perlstein static moduledata_t aio_mod = {
37521d56e9cSAlfred Perlstein 	"aio",
37621d56e9cSAlfred Perlstein 	&aio_modload,
37721d56e9cSAlfred Perlstein 	NULL
37821d56e9cSAlfred Perlstein };
37921d56e9cSAlfred Perlstein 
380399e8c17SJohn Baldwin DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY);
38121d56e9cSAlfred Perlstein MODULE_VERSION(aio, 1);
38221d56e9cSAlfred Perlstein 
383fd3bf775SJohn Dyson /*
3842244ea07SJohn Dyson  * Startup initialization
3852244ea07SJohn Dyson  */
386723d37c0SKonstantin Belousov static int
38721d56e9cSAlfred Perlstein aio_onceonly(void)
388fd3bf775SJohn Dyson {
38921d56e9cSAlfred Perlstein 
39075b8b3b2SJohn Baldwin 	exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
39175b8b3b2SJohn Baldwin 	    EVENTHANDLER_PRI_ANY);
3920dd6c035SJohn Baldwin 	exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
3930dd6c035SJohn Baldwin 	    NULL, EVENTHANDLER_PRI_ANY);
39421d56e9cSAlfred Perlstein 	kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
39569cd28daSDoug Ambrisko 	kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
3962244ea07SJohn Dyson 	TAILQ_INIT(&aio_freeproc);
3971ce91824SDavid Xu 	sema_init(&aio_newproc_sem, 0, "aio_new_proc");
3981ce91824SDavid Xu 	mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
3992244ea07SJohn Dyson 	TAILQ_INIT(&aio_jobs);
4001ce91824SDavid Xu 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
401c897b813SJeff Roberson 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
402c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
40339314b7dSJohn Baldwin 	aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL,
404c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4055652770dSJohn Baldwin 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
406c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
407c897b813SJeff Roberson 	aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
408c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4091ce91824SDavid Xu 	aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
410c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
41184af4da6SJohn Dyson 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
412fd3bf775SJohn Dyson 	jobrefid = 1;
413399e8c17SJohn Baldwin 	p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO);
414c844abc9SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
41586d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
41686d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
417723d37c0SKonstantin Belousov 
418723d37c0SKonstantin Belousov 	return (0);
4192244ea07SJohn Dyson }
4202244ea07SJohn Dyson 
421eb8e6d52SEivind Eklund /*
422bfbbc4aaSJason Evans  * Init the per-process aioinfo structure.  The aioinfo limits are set
423bfbbc4aaSJason Evans  * per-process for user limit (resource) management.
4242244ea07SJohn Dyson  */
4256a1162d4SAlexander Leidinger void
426fd3bf775SJohn Dyson aio_init_aioinfo(struct proc *p)
427fd3bf775SJohn Dyson {
4282244ea07SJohn Dyson 	struct kaioinfo *ki;
429ac41f2efSAlfred Perlstein 
430a163d034SWarner Losh 	ki = uma_zalloc(kaio_zone, M_WAITOK);
4319889bbacSKonstantin Belousov 	mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
43284af4da6SJohn Dyson 	ki->kaio_flags = 0;
433a624e84fSJohn Dyson 	ki->kaio_maxactive_count = max_aio_per_proc;
4342244ea07SJohn Dyson 	ki->kaio_active_count = 0;
435a624e84fSJohn Dyson 	ki->kaio_qallowed_count = max_aio_queue_per_proc;
4361ce91824SDavid Xu 	ki->kaio_count = 0;
43784af4da6SJohn Dyson 	ki->kaio_ballowed_count = max_buf_aio;
438fd3bf775SJohn Dyson 	ki->kaio_buffer_count = 0;
4391ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_all);
4401ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_done);
4412244ea07SJohn Dyson 	TAILQ_INIT(&ki->kaio_jobqueue);
44284af4da6SJohn Dyson 	TAILQ_INIT(&ki->kaio_liojoblist);
44399eee864SDavid Xu 	TAILQ_INIT(&ki->kaio_syncqueue);
444f3215338SJohn Baldwin 	TAILQ_INIT(&ki->kaio_syncready);
44599eee864SDavid Xu 	TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
446f3215338SJohn Baldwin 	TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
4473999ebe3SAlan Cox 	PROC_LOCK(p);
4483999ebe3SAlan Cox 	if (p->p_aioinfo == NULL) {
4493999ebe3SAlan Cox 		p->p_aioinfo = ki;
4503999ebe3SAlan Cox 		PROC_UNLOCK(p);
4513999ebe3SAlan Cox 	} else {
4523999ebe3SAlan Cox 		PROC_UNLOCK(p);
453759ccccaSDavid Xu 		mtx_destroy(&ki->kaio_mtx);
4543999ebe3SAlan Cox 		uma_zfree(kaio_zone, ki);
4552244ea07SJohn Dyson 	}
456bfbbc4aaSJason Evans 
45722035f47SOleksandr Tymoshenko 	while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
4581ce91824SDavid Xu 		aio_newproc(NULL);
4592244ea07SJohn Dyson }
4602244ea07SJohn Dyson 
4614c0fb2cfSDavid Xu static int
4624c0fb2cfSDavid Xu aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
4634c0fb2cfSDavid Xu {
464cf7d9a8cSDavid Xu 	struct thread *td;
465cf7d9a8cSDavid Xu 	int error;
466759ccccaSDavid Xu 
467cf7d9a8cSDavid Xu 	error = sigev_findtd(p, sigev, &td);
468cf7d9a8cSDavid Xu 	if (error)
469cf7d9a8cSDavid Xu 		return (error);
4704c0fb2cfSDavid Xu 	if (!KSI_ONQ(ksi)) {
471cf7d9a8cSDavid Xu 		ksiginfo_set_sigev(ksi, sigev);
4724c0fb2cfSDavid Xu 		ksi->ksi_code = SI_ASYNCIO;
4734c0fb2cfSDavid Xu 		ksi->ksi_flags |= KSI_EXT | KSI_INS;
474cf7d9a8cSDavid Xu 		tdsendsignal(p, td, ksi->ksi_signo, ksi);
4754c0fb2cfSDavid Xu 	}
476759ccccaSDavid Xu 	PROC_UNLOCK(p);
477cf7d9a8cSDavid Xu 	return (error);
4784c0fb2cfSDavid Xu }
4794c0fb2cfSDavid Xu 
4802244ea07SJohn Dyson /*
481bfbbc4aaSJason Evans  * Free a job entry.  Wait for completion if it is currently active, but don't
482bfbbc4aaSJason Evans  * delay forever.  If we delay, we return a flag that says that we have to
483bfbbc4aaSJason Evans  * restart the queue scan.
4842244ea07SJohn Dyson  */
48588ed460eSAlan Cox static int
4865652770dSJohn Baldwin aio_free_entry(struct kaiocb *job)
487fd3bf775SJohn Dyson {
4882244ea07SJohn Dyson 	struct kaioinfo *ki;
4891ce91824SDavid Xu 	struct aioliojob *lj;
4902244ea07SJohn Dyson 	struct proc *p;
4912244ea07SJohn Dyson 
4925652770dSJohn Baldwin 	p = job->userproc;
4931ce91824SDavid Xu 	MPASS(curproc == p);
4942244ea07SJohn Dyson 	ki = p->p_aioinfo;
4951ce91824SDavid Xu 	MPASS(ki != NULL);
4961ce91824SDavid Xu 
497759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
498f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
499759ccccaSDavid Xu 
5001ce91824SDavid Xu 	atomic_subtract_int(&num_queue_count, 1);
5011ce91824SDavid Xu 
5021ce91824SDavid Xu 	ki->kaio_count--;
5031ce91824SDavid Xu 	MPASS(ki->kaio_count >= 0);
5041ce91824SDavid Xu 
5055652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_done, job, plist);
5065652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_all, job, allist);
50727b8220dSDavid Xu 
5085652770dSJohn Baldwin 	lj = job->lio;
50984af4da6SJohn Dyson 	if (lj) {
5101ce91824SDavid Xu 		lj->lioj_count--;
5111ce91824SDavid Xu 		lj->lioj_finished_count--;
5121ce91824SDavid Xu 
513a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
5141ce91824SDavid Xu 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
5151ce91824SDavid Xu 			/* lio is going away, we need to destroy any knotes */
5161ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
517759ccccaSDavid Xu 			PROC_LOCK(p);
5181ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
519759ccccaSDavid Xu 			PROC_UNLOCK(p);
5201ce91824SDavid Xu 			uma_zfree(aiolio_zone, lj);
52184af4da6SJohn Dyson 		}
52284af4da6SJohn Dyson 	}
5231ce91824SDavid Xu 
5245652770dSJohn Baldwin 	/* job is going away, we need to destroy any knotes */
5255652770dSJohn Baldwin 	knlist_delete(&job->klist, curthread, 1);
526759ccccaSDavid Xu 	PROC_LOCK(p);
5275652770dSJohn Baldwin 	sigqueue_take(&job->ksi);
528759ccccaSDavid Xu 	PROC_UNLOCK(p);
5291ce91824SDavid Xu 
530759ccccaSDavid Xu 	AIO_UNLOCK(ki);
5312a522eb9SJohn Baldwin 
5322a522eb9SJohn Baldwin 	/*
5332a522eb9SJohn Baldwin 	 * The thread argument here is used to find the owning process
5342a522eb9SJohn Baldwin 	 * and is also passed to fo_close() which may pass it to various
5352a522eb9SJohn Baldwin 	 * places such as devsw close() routines.  Because of that, we
5362a522eb9SJohn Baldwin 	 * need a thread pointer from the process owning the job that is
5372a522eb9SJohn Baldwin 	 * persistent and won't disappear out from under us or move to
5382a522eb9SJohn Baldwin 	 * another process.
5392a522eb9SJohn Baldwin 	 *
5402a522eb9SJohn Baldwin 	 * Currently, all the callers of this function call it to remove
5415652770dSJohn Baldwin 	 * a kaiocb from the current process' job list either via a
5422a522eb9SJohn Baldwin 	 * syscall or due to the current process calling exit() or
5432a522eb9SJohn Baldwin 	 * execve().  Thus, we know that p == curproc.  We also know that
5442a522eb9SJohn Baldwin 	 * curthread can't exit since we are curthread.
5452a522eb9SJohn Baldwin 	 *
5462a522eb9SJohn Baldwin 	 * Therefore, we use curthread as the thread to pass to
5472a522eb9SJohn Baldwin 	 * knlist_delete().  This does mean that it is possible for the
5482a522eb9SJohn Baldwin 	 * thread pointer at close time to differ from the thread pointer
5492a522eb9SJohn Baldwin 	 * at open time, but this is already true of file descriptors in
5502a522eb9SJohn Baldwin 	 * a multithreaded process.
551b40ce416SJulian Elischer 	 */
5525652770dSJohn Baldwin 	if (job->fd_file)
5535652770dSJohn Baldwin 		fdrop(job->fd_file, curthread);
5545652770dSJohn Baldwin 	crfree(job->cred);
5555652770dSJohn Baldwin 	uma_zfree(aiocb_zone, job);
556759ccccaSDavid Xu 	AIO_LOCK(ki);
5571ce91824SDavid Xu 
558ac41f2efSAlfred Perlstein 	return (0);
5592244ea07SJohn Dyson }
5602244ea07SJohn Dyson 
561993182e5SAlexander Leidinger static void
5620dd6c035SJohn Baldwin aio_proc_rundown_exec(void *arg, struct proc *p,
5630dd6c035SJohn Baldwin     struct image_params *imgp __unused)
564993182e5SAlexander Leidinger {
565993182e5SAlexander Leidinger    	aio_proc_rundown(arg, p);
566993182e5SAlexander Leidinger }
567993182e5SAlexander Leidinger 
568f3215338SJohn Baldwin static int
569f3215338SJohn Baldwin aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
570f3215338SJohn Baldwin {
571f3215338SJohn Baldwin 	aio_cancel_fn_t *func;
572f3215338SJohn Baldwin 	int cancelled;
573f3215338SJohn Baldwin 
574f3215338SJohn Baldwin 	AIO_LOCK_ASSERT(ki, MA_OWNED);
575f3215338SJohn Baldwin 	if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
576f3215338SJohn Baldwin 		return (0);
577f3215338SJohn Baldwin 	MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
578f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLED;
579f3215338SJohn Baldwin 
580f3215338SJohn Baldwin 	func = job->cancel_fn;
581f3215338SJohn Baldwin 
582f3215338SJohn Baldwin 	/*
583f3215338SJohn Baldwin 	 * If there is no cancel routine, just leave the job marked as
584f3215338SJohn Baldwin 	 * cancelled.  The job should be in active use by a caller who
585f3215338SJohn Baldwin 	 * should complete it normally or when it fails to install a
586f3215338SJohn Baldwin 	 * cancel routine.
587f3215338SJohn Baldwin 	 */
588f3215338SJohn Baldwin 	if (func == NULL)
589f3215338SJohn Baldwin 		return (0);
590f3215338SJohn Baldwin 
591f3215338SJohn Baldwin 	/*
592f3215338SJohn Baldwin 	 * Set the CANCELLING flag so that aio_complete() will defer
593f3215338SJohn Baldwin 	 * completions of this job.  This prevents the job from being
594f3215338SJohn Baldwin 	 * freed out from under the cancel callback.  After the
595f3215338SJohn Baldwin 	 * callback any deferred completion (whether from the callback
596f3215338SJohn Baldwin 	 * or any other source) will be completed.
597f3215338SJohn Baldwin 	 */
598f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLING;
599f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
600f3215338SJohn Baldwin 	func(job);
601f3215338SJohn Baldwin 	AIO_LOCK(ki);
602f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_CANCELLING;
603f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
604f3215338SJohn Baldwin 		cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
605f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
606f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
607f3215338SJohn Baldwin 	} else {
608f3215338SJohn Baldwin 		/*
609f3215338SJohn Baldwin 		 * The cancel callback might have scheduled an
610f3215338SJohn Baldwin 		 * operation to cancel this request, but it is
611f3215338SJohn Baldwin 		 * only counted as cancelled if the request is
612f3215338SJohn Baldwin 		 * cancelled when the callback returns.
613f3215338SJohn Baldwin 		 */
614f3215338SJohn Baldwin 		cancelled = 0;
615f3215338SJohn Baldwin 	}
616f3215338SJohn Baldwin 	return (cancelled);
617f3215338SJohn Baldwin }
618f3215338SJohn Baldwin 
6192244ea07SJohn Dyson /*
6202244ea07SJohn Dyson  * Rundown the jobs for a given process.
6212244ea07SJohn Dyson  */
62221d56e9cSAlfred Perlstein static void
62375b8b3b2SJohn Baldwin aio_proc_rundown(void *arg, struct proc *p)
624fd3bf775SJohn Dyson {
6252244ea07SJohn Dyson 	struct kaioinfo *ki;
6261ce91824SDavid Xu 	struct aioliojob *lj;
6275652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
6282244ea07SJohn Dyson 
6292a522eb9SJohn Baldwin 	KASSERT(curthread->td_proc == p,
6302a522eb9SJohn Baldwin 	    ("%s: called on non-curproc", __func__));
6312244ea07SJohn Dyson 	ki = p->p_aioinfo;
6322244ea07SJohn Dyson 	if (ki == NULL)
6332244ea07SJohn Dyson 		return;
6342244ea07SJohn Dyson 
635759ccccaSDavid Xu 	AIO_LOCK(ki);
63627b8220dSDavid Xu 	ki->kaio_flags |= KAIO_RUNDOWN;
6371ce91824SDavid Xu 
6381ce91824SDavid Xu restart:
639a624e84fSJohn Dyson 
640bfbbc4aaSJason Evans 	/*
6411ce91824SDavid Xu 	 * Try to cancel all pending requests. This code simulates
6421ce91824SDavid Xu 	 * aio_cancel on all pending I/O requests.
643bfbbc4aaSJason Evans 	 */
6445652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
645f3215338SJohn Baldwin 		aio_cancel_job(p, ki, job);
6462244ea07SJohn Dyson 	}
64784af4da6SJohn Dyson 
6481ce91824SDavid Xu 	/* Wait for all running I/O to be finished */
649f3215338SJohn Baldwin 	if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
65084af4da6SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
651759ccccaSDavid Xu 		msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
6521ce91824SDavid Xu 		goto restart;
65384af4da6SJohn Dyson 	}
65484af4da6SJohn Dyson 
6551ce91824SDavid Xu 	/* Free all completed I/O requests. */
6565652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
6575652770dSJohn Baldwin 		aio_free_entry(job);
65884af4da6SJohn Dyson 
6591ce91824SDavid Xu 	while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
660a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
66184af4da6SJohn Dyson 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
6621ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
663759ccccaSDavid Xu 			PROC_LOCK(p);
6641ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
665759ccccaSDavid Xu 			PROC_UNLOCK(p);
666c897b813SJeff Roberson 			uma_zfree(aiolio_zone, lj);
667f4f0ecefSJohn Dyson 		} else {
668a9bf5e37SDavid Xu 			panic("LIO job not cleaned up: C:%d, FC:%d\n",
669a9bf5e37SDavid Xu 			    lj->lioj_count, lj->lioj_finished_count);
67084af4da6SJohn Dyson 		}
671f4f0ecefSJohn Dyson 	}
672759ccccaSDavid Xu 	AIO_UNLOCK(ki);
673c85650caSJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
674f3215338SJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
6755114048bSKonstantin Belousov 	mtx_destroy(&ki->kaio_mtx);
676c897b813SJeff Roberson 	uma_zfree(kaio_zone, ki);
677a624e84fSJohn Dyson 	p->p_aioinfo = NULL;
6782244ea07SJohn Dyson }
6792244ea07SJohn Dyson 
6802244ea07SJohn Dyson /*
681bfbbc4aaSJason Evans  * Select a job to run (called by an AIO daemon).
6822244ea07SJohn Dyson  */
6835652770dSJohn Baldwin static struct kaiocb *
68439314b7dSJohn Baldwin aio_selectjob(struct aioproc *aiop)
685fd3bf775SJohn Dyson {
6865652770dSJohn Baldwin 	struct kaiocb *job;
687bfbbc4aaSJason Evans 	struct kaioinfo *ki;
688bfbbc4aaSJason Evans 	struct proc *userp;
6892244ea07SJohn Dyson 
6901ce91824SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
691f3215338SJohn Baldwin restart:
6925652770dSJohn Baldwin 	TAILQ_FOREACH(job, &aio_jobs, list) {
6935652770dSJohn Baldwin 		userp = job->userproc;
6942244ea07SJohn Dyson 		ki = userp->p_aioinfo;
6952244ea07SJohn Dyson 
6962244ea07SJohn Dyson 		if (ki->kaio_active_count < ki->kaio_maxactive_count) {
6975652770dSJohn Baldwin 			TAILQ_REMOVE(&aio_jobs, job, list);
698f3215338SJohn Baldwin 			if (!aio_clear_cancel_function(job))
699f3215338SJohn Baldwin 				goto restart;
700f3215338SJohn Baldwin 
7011ce91824SDavid Xu 			/* Account for currently active jobs. */
7021ce91824SDavid Xu 			ki->kaio_active_count++;
7031ce91824SDavid Xu 			break;
7041ce91824SDavid Xu 		}
7051ce91824SDavid Xu 	}
7065652770dSJohn Baldwin 	return (job);
7072244ea07SJohn Dyson }
7082244ea07SJohn Dyson 
7092244ea07SJohn Dyson /*
7100dd6c035SJohn Baldwin  * Move all data to a permanent storage device.  This code
7110dd6c035SJohn Baldwin  * simulates the fsync syscall.
71299eee864SDavid Xu  */
71399eee864SDavid Xu static int
71499eee864SDavid Xu aio_fsync_vnode(struct thread *td, struct vnode *vp)
71599eee864SDavid Xu {
71699eee864SDavid Xu 	struct mount *mp;
71799eee864SDavid Xu 	int error;
71899eee864SDavid Xu 
71999eee864SDavid Xu 	if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
72099eee864SDavid Xu 		goto drop;
721cb05b60aSAttilio Rao 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
72299eee864SDavid Xu 	if (vp->v_object != NULL) {
72389f6b863SAttilio Rao 		VM_OBJECT_WLOCK(vp->v_object);
72499eee864SDavid Xu 		vm_object_page_clean(vp->v_object, 0, 0, 0);
72589f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(vp->v_object);
72699eee864SDavid Xu 	}
72799eee864SDavid Xu 	error = VOP_FSYNC(vp, MNT_WAIT, td);
72899eee864SDavid Xu 
72922db15c0SAttilio Rao 	VOP_UNLOCK(vp, 0);
73099eee864SDavid Xu 	vn_finished_write(mp);
73199eee864SDavid Xu drop:
73299eee864SDavid Xu 	return (error);
73399eee864SDavid Xu }
73499eee864SDavid Xu 
73599eee864SDavid Xu /*
736f95c13dbSGleb Smirnoff  * The AIO processing activity for LIO_READ/LIO_WRITE.  This is the code that
737f95c13dbSGleb Smirnoff  * does the I/O request for the non-physio version of the operations.  The
738f95c13dbSGleb Smirnoff  * normal vn operations are used, and this code should work in all instances
739f95c13dbSGleb Smirnoff  * for every type of file, including pipes, sockets, fifos, and regular files.
7401ce91824SDavid Xu  *
7411aa4c324SDavid Xu  * XXX I don't think it works well for socket, pipe, and fifo.
7422244ea07SJohn Dyson  */
74388ed460eSAlan Cox static void
7445652770dSJohn Baldwin aio_process_rw(struct kaiocb *job)
745fd3bf775SJohn Dyson {
746f8f750c5SRobert Watson 	struct ucred *td_savedcred;
747b40ce416SJulian Elischer 	struct thread *td;
7482244ea07SJohn Dyson 	struct aiocb *cb;
7492244ea07SJohn Dyson 	struct file *fp;
7502244ea07SJohn Dyson 	struct uio auio;
7512244ea07SJohn Dyson 	struct iovec aiov;
752bb430bc7SJohn Baldwin 	ssize_t cnt;
753b1012d80SJohn Baldwin 	long msgsnd_st, msgsnd_end;
754b1012d80SJohn Baldwin 	long msgrcv_st, msgrcv_end;
755b1012d80SJohn Baldwin 	long oublock_st, oublock_end;
756b1012d80SJohn Baldwin 	long inblock_st, inblock_end;
7572244ea07SJohn Dyson 	int error;
7582244ea07SJohn Dyson 
7595652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
7605652770dSJohn Baldwin 	    job->uaiocb.aio_lio_opcode == LIO_WRITE,
7615652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
762f95c13dbSGleb Smirnoff 
763f3215338SJohn Baldwin 	aio_switch_vmspace(job);
764b40ce416SJulian Elischer 	td = curthread;
765f8f750c5SRobert Watson 	td_savedcred = td->td_ucred;
7665652770dSJohn Baldwin 	td->td_ucred = job->cred;
7675652770dSJohn Baldwin 	cb = &job->uaiocb;
7685652770dSJohn Baldwin 	fp = job->fd_file;
769bfbbc4aaSJason Evans 
77091369fc7SAlan Cox 	aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
7712244ea07SJohn Dyson 	aiov.iov_len = cb->aio_nbytes;
7722244ea07SJohn Dyson 
7732244ea07SJohn Dyson 	auio.uio_iov = &aiov;
7742244ea07SJohn Dyson 	auio.uio_iovcnt = 1;
7759b16adc1SAlan Cox 	auio.uio_offset = cb->aio_offset;
7762244ea07SJohn Dyson 	auio.uio_resid = cb->aio_nbytes;
7772244ea07SJohn Dyson 	cnt = cb->aio_nbytes;
7782244ea07SJohn Dyson 	auio.uio_segflg = UIO_USERSPACE;
779b40ce416SJulian Elischer 	auio.uio_td = td;
7802244ea07SJohn Dyson 
781b1012d80SJohn Baldwin 	msgrcv_st = td->td_ru.ru_msgrcv;
782b1012d80SJohn Baldwin 	msgsnd_st = td->td_ru.ru_msgsnd;
7831c4bcd05SJeff Roberson 	inblock_st = td->td_ru.ru_inblock;
7841c4bcd05SJeff Roberson 	oublock_st = td->td_ru.ru_oublock;
785b1012d80SJohn Baldwin 
786279d7226SMatthew Dillon 	/*
787a9bf5e37SDavid Xu 	 * aio_aqueue() acquires a reference to the file that is
7889b16adc1SAlan Cox 	 * released in aio_free_entry().
789279d7226SMatthew Dillon 	 */
7902244ea07SJohn Dyson 	if (cb->aio_lio_opcode == LIO_READ) {
7912244ea07SJohn Dyson 		auio.uio_rw = UIO_READ;
7925114048bSKonstantin Belousov 		if (auio.uio_resid == 0)
7935114048bSKonstantin Belousov 			error = 0;
7945114048bSKonstantin Belousov 		else
795b40ce416SJulian Elischer 			error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
7962244ea07SJohn Dyson 	} else {
7976d53aa62SDavid Xu 		if (fp->f_type == DTYPE_VNODE)
7986d53aa62SDavid Xu 			bwillwrite();
7992244ea07SJohn Dyson 		auio.uio_rw = UIO_WRITE;
800b40ce416SJulian Elischer 		error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8012244ea07SJohn Dyson 	}
802b1012d80SJohn Baldwin 	msgrcv_end = td->td_ru.ru_msgrcv;
803b1012d80SJohn Baldwin 	msgsnd_end = td->td_ru.ru_msgsnd;
8041c4bcd05SJeff Roberson 	inblock_end = td->td_ru.ru_inblock;
8051c4bcd05SJeff Roberson 	oublock_end = td->td_ru.ru_oublock;
806fd3bf775SJohn Dyson 
807b1012d80SJohn Baldwin 	job->msgrcv = msgrcv_end - msgrcv_st;
808b1012d80SJohn Baldwin 	job->msgsnd = msgsnd_end - msgsnd_st;
809b1012d80SJohn Baldwin 	job->inblock = inblock_end - inblock_st;
810b1012d80SJohn Baldwin 	job->outblock = oublock_end - oublock_st;
8112244ea07SJohn Dyson 
812bfbbc4aaSJason Evans 	if ((error) && (auio.uio_resid != cnt)) {
8132244ea07SJohn Dyson 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
8142244ea07SJohn Dyson 			error = 0;
81519eb87d2SJohn Baldwin 		if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
8165652770dSJohn Baldwin 			PROC_LOCK(job->userproc);
8175652770dSJohn Baldwin 			kern_psignal(job->userproc, SIGPIPE);
8185652770dSJohn Baldwin 			PROC_UNLOCK(job->userproc);
81919eb87d2SJohn Baldwin 		}
8202244ea07SJohn Dyson 	}
8212244ea07SJohn Dyson 
8222244ea07SJohn Dyson 	cnt -= auio.uio_resid;
823f8f750c5SRobert Watson 	td->td_ucred = td_savedcred;
824f0ec1740SJohn Baldwin 	if (error)
825f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
826f0ec1740SJohn Baldwin 	else
827f0ec1740SJohn Baldwin 		aio_complete(job, cnt, 0);
8282244ea07SJohn Dyson }
8292244ea07SJohn Dyson 
83069cd28daSDoug Ambrisko static void
8315652770dSJohn Baldwin aio_process_sync(struct kaiocb *job)
832f95c13dbSGleb Smirnoff {
833f95c13dbSGleb Smirnoff 	struct thread *td = curthread;
834f95c13dbSGleb Smirnoff 	struct ucred *td_savedcred = td->td_ucred;
8355652770dSJohn Baldwin 	struct file *fp = job->fd_file;
836f95c13dbSGleb Smirnoff 	int error = 0;
837f95c13dbSGleb Smirnoff 
8385652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC,
8395652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
840f95c13dbSGleb Smirnoff 
8415652770dSJohn Baldwin 	td->td_ucred = job->cred;
842f95c13dbSGleb Smirnoff 	if (fp->f_vnode != NULL)
843f95c13dbSGleb Smirnoff 		error = aio_fsync_vnode(td, fp->f_vnode);
844f95c13dbSGleb Smirnoff 	td->td_ucred = td_savedcred;
845f0ec1740SJohn Baldwin 	if (error)
846f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
847f0ec1740SJohn Baldwin 	else
848f0ec1740SJohn Baldwin 		aio_complete(job, 0, 0);
849f95c13dbSGleb Smirnoff }
850f95c13dbSGleb Smirnoff 
851f95c13dbSGleb Smirnoff static void
8525652770dSJohn Baldwin aio_process_mlock(struct kaiocb *job)
8536160e12cSGleb Smirnoff {
8545652770dSJohn Baldwin 	struct aiocb *cb = &job->uaiocb;
8556160e12cSGleb Smirnoff 	int error;
8566160e12cSGleb Smirnoff 
8575652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
8585652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
8596160e12cSGleb Smirnoff 
860f3215338SJohn Baldwin 	aio_switch_vmspace(job);
8615652770dSJohn Baldwin 	error = vm_mlock(job->userproc, job->cred,
8626160e12cSGleb Smirnoff 	    __DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes);
863f0ec1740SJohn Baldwin 	if (error)
864f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
865f0ec1740SJohn Baldwin 	else
866f0ec1740SJohn Baldwin 		aio_complete(job, 0, 0);
8676160e12cSGleb Smirnoff }
8686160e12cSGleb Smirnoff 
8696160e12cSGleb Smirnoff static void
870f3215338SJohn Baldwin aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
8711ce91824SDavid Xu {
8721ce91824SDavid Xu 	struct aioliojob *lj;
87369cd28daSDoug Ambrisko 	struct kaioinfo *ki;
8745652770dSJohn Baldwin 	struct kaiocb *sjob, *sjobn;
8751ce91824SDavid Xu 	int lj_done;
876f3215338SJohn Baldwin 	bool schedule_fsync;
87769cd28daSDoug Ambrisko 
87869cd28daSDoug Ambrisko 	ki = userp->p_aioinfo;
879759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
8805652770dSJohn Baldwin 	lj = job->lio;
88169cd28daSDoug Ambrisko 	lj_done = 0;
88269cd28daSDoug Ambrisko 	if (lj) {
8831ce91824SDavid Xu 		lj->lioj_finished_count++;
8841ce91824SDavid Xu 		if (lj->lioj_count == lj->lioj_finished_count)
88569cd28daSDoug Ambrisko 			lj_done = 1;
88669cd28daSDoug Ambrisko 	}
8875652770dSJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
888f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
88927b8220dSDavid Xu 
89027b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_RUNDOWN)
89127b8220dSDavid Xu 		goto notification_done;
89227b8220dSDavid Xu 
8935652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
8945652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
8955652770dSJohn Baldwin 		aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi);
8961ce91824SDavid Xu 
8975652770dSJohn Baldwin 	KNOTE_LOCKED(&job->klist, 1);
8981ce91824SDavid Xu 
89969cd28daSDoug Ambrisko 	if (lj_done) {
9001ce91824SDavid Xu 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
90169cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_KEVENT_POSTED;
9021ce91824SDavid Xu 			KNOTE_LOCKED(&lj->klist, 1);
90369cd28daSDoug Ambrisko 		}
9041ce91824SDavid Xu 		if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
90569cd28daSDoug Ambrisko 		    == LIOJ_SIGNAL
9064c0fb2cfSDavid Xu 		    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
9074c0fb2cfSDavid Xu 		        lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
9084c0fb2cfSDavid Xu 			aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
90969cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
91069cd28daSDoug Ambrisko 		}
91169cd28daSDoug Ambrisko 	}
91227b8220dSDavid Xu 
91327b8220dSDavid Xu notification_done:
9145652770dSJohn Baldwin 	if (job->jobflags & KAIOCB_CHECKSYNC) {
915f3215338SJohn Baldwin 		schedule_fsync = false;
9165652770dSJohn Baldwin 		TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
917b9a53e16SJohn Baldwin 			if (job->fd_file != sjob->fd_file ||
918b9a53e16SJohn Baldwin 			    job->seqno >= sjob->seqno)
919b9a53e16SJohn Baldwin 				continue;
920b9a53e16SJohn Baldwin 			if (--sjob->pending > 0)
921b9a53e16SJohn Baldwin 				continue;
922b9a53e16SJohn Baldwin 			TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list);
923*005ce8e4SJohn Baldwin 			if (!aio_clear_cancel_function_locked(sjob))
924f3215338SJohn Baldwin 				continue;
925b9a53e16SJohn Baldwin 			TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list);
926f3215338SJohn Baldwin 			schedule_fsync = true;
92799eee864SDavid Xu 		}
928f3215338SJohn Baldwin 		if (schedule_fsync)
929f3215338SJohn Baldwin 			taskqueue_enqueue(taskqueue_aiod_kick,
930f3215338SJohn Baldwin 			    &ki->kaio_sync_task);
93199eee864SDavid Xu 	}
93227b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_WAKEUP) {
93369cd28daSDoug Ambrisko 		ki->kaio_flags &= ~KAIO_WAKEUP;
9341ce91824SDavid Xu 		wakeup(&userp->p_aioinfo);
93569cd28daSDoug Ambrisko 	}
93669cd28daSDoug Ambrisko }
93769cd28daSDoug Ambrisko 
9388a4dc40fSJohn Baldwin static void
939f3215338SJohn Baldwin aio_schedule_fsync(void *context, int pending)
940f3215338SJohn Baldwin {
941f3215338SJohn Baldwin 	struct kaioinfo *ki;
942f3215338SJohn Baldwin 	struct kaiocb *job;
943f3215338SJohn Baldwin 
944f3215338SJohn Baldwin 	ki = context;
945f3215338SJohn Baldwin 	AIO_LOCK(ki);
946f3215338SJohn Baldwin 	while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
947f3215338SJohn Baldwin 		job = TAILQ_FIRST(&ki->kaio_syncready);
948f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncready, job, list);
949f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
950f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
951f3215338SJohn Baldwin 		AIO_LOCK(ki);
952f3215338SJohn Baldwin 	}
953f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
954f3215338SJohn Baldwin }
955f3215338SJohn Baldwin 
956f3215338SJohn Baldwin bool
957f3215338SJohn Baldwin aio_cancel_cleared(struct kaiocb *job)
958f3215338SJohn Baldwin {
959f3215338SJohn Baldwin 	struct kaioinfo *ki;
960f3215338SJohn Baldwin 
961f3215338SJohn Baldwin 	/*
962f3215338SJohn Baldwin 	 * The caller should hold the same queue lock held when
963f3215338SJohn Baldwin 	 * aio_clear_cancel_function() was called and set this flag
964f3215338SJohn Baldwin 	 * ensuring this check sees an up-to-date value.  However,
965f3215338SJohn Baldwin 	 * there is no way to assert that.
966f3215338SJohn Baldwin 	 */
967f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
968f3215338SJohn Baldwin 	return ((job->jobflags & KAIOCB_CLEARED) != 0);
969f3215338SJohn Baldwin }
970f3215338SJohn Baldwin 
971*005ce8e4SJohn Baldwin static bool
972*005ce8e4SJohn Baldwin aio_clear_cancel_function_locked(struct kaiocb *job)
973*005ce8e4SJohn Baldwin {
974*005ce8e4SJohn Baldwin 
975*005ce8e4SJohn Baldwin 	AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
976*005ce8e4SJohn Baldwin 	MPASS(job->cancel_fn != NULL);
977*005ce8e4SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLING) {
978*005ce8e4SJohn Baldwin 		job->jobflags |= KAIOCB_CLEARED;
979*005ce8e4SJohn Baldwin 		return (false);
980*005ce8e4SJohn Baldwin 	}
981*005ce8e4SJohn Baldwin 	job->cancel_fn = NULL;
982*005ce8e4SJohn Baldwin 	return (true);
983*005ce8e4SJohn Baldwin }
984*005ce8e4SJohn Baldwin 
985f3215338SJohn Baldwin bool
986f3215338SJohn Baldwin aio_clear_cancel_function(struct kaiocb *job)
987f3215338SJohn Baldwin {
988f3215338SJohn Baldwin 	struct kaioinfo *ki;
989*005ce8e4SJohn Baldwin 	bool ret;
990f3215338SJohn Baldwin 
991f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
992f3215338SJohn Baldwin 	AIO_LOCK(ki);
993*005ce8e4SJohn Baldwin 	ret = aio_clear_cancel_function_locked(job);
994f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
995*005ce8e4SJohn Baldwin 	return (ret);
996f3215338SJohn Baldwin }
997*005ce8e4SJohn Baldwin 
998*005ce8e4SJohn Baldwin static bool
999*005ce8e4SJohn Baldwin aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func)
1000*005ce8e4SJohn Baldwin {
1001*005ce8e4SJohn Baldwin 
1002*005ce8e4SJohn Baldwin 	AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
1003*005ce8e4SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLED)
1004*005ce8e4SJohn Baldwin 		return (false);
1005*005ce8e4SJohn Baldwin 	job->cancel_fn = func;
1006f3215338SJohn Baldwin 	return (true);
1007f3215338SJohn Baldwin }
1008f3215338SJohn Baldwin 
1009f3215338SJohn Baldwin bool
1010f3215338SJohn Baldwin aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
1011f3215338SJohn Baldwin {
1012f3215338SJohn Baldwin 	struct kaioinfo *ki;
1013*005ce8e4SJohn Baldwin 	bool ret;
1014f3215338SJohn Baldwin 
1015f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1016f3215338SJohn Baldwin 	AIO_LOCK(ki);
1017*005ce8e4SJohn Baldwin 	ret = aio_set_cancel_function_locked(job, func);
1018f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1019*005ce8e4SJohn Baldwin 	return (ret);
1020f3215338SJohn Baldwin }
1021f3215338SJohn Baldwin 
1022f3215338SJohn Baldwin void
1023f3215338SJohn Baldwin aio_complete(struct kaiocb *job, long status, int error)
1024f3215338SJohn Baldwin {
1025f3215338SJohn Baldwin 	struct kaioinfo *ki;
1026f3215338SJohn Baldwin 	struct proc *userp;
1027f3215338SJohn Baldwin 
1028f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.error = error;
1029f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.status = status;
1030f3215338SJohn Baldwin 
1031f3215338SJohn Baldwin 	userp = job->userproc;
1032f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
1033f3215338SJohn Baldwin 
1034f3215338SJohn Baldwin 	AIO_LOCK(ki);
1035f3215338SJohn Baldwin 	KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1036f3215338SJohn Baldwin 	    ("duplicate aio_complete"));
1037f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_FINISHED;
1038f3215338SJohn Baldwin 	if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1039f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1040f3215338SJohn Baldwin 		aio_bio_done_notify(userp, job);
1041f3215338SJohn Baldwin 	}
1042f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1043f3215338SJohn Baldwin }
1044f3215338SJohn Baldwin 
1045f3215338SJohn Baldwin void
1046f3215338SJohn Baldwin aio_cancel(struct kaiocb *job)
1047f3215338SJohn Baldwin {
1048f3215338SJohn Baldwin 
1049f3215338SJohn Baldwin 	aio_complete(job, -1, ECANCELED);
1050f3215338SJohn Baldwin }
1051f3215338SJohn Baldwin 
1052f3215338SJohn Baldwin void
10535652770dSJohn Baldwin aio_switch_vmspace(struct kaiocb *job)
10548a4dc40fSJohn Baldwin {
10558a4dc40fSJohn Baldwin 
10565652770dSJohn Baldwin 	vmspace_switch_aio(job->userproc->p_vmspace);
10578a4dc40fSJohn Baldwin }
10588a4dc40fSJohn Baldwin 
10592244ea07SJohn Dyson /*
1060f95c13dbSGleb Smirnoff  * The AIO daemon, most of the actual work is done in aio_process_*,
106184af4da6SJohn Dyson  * but the setup (and address space mgmt) is done in this routine.
10622244ea07SJohn Dyson  */
10632244ea07SJohn Dyson static void
10641ce91824SDavid Xu aio_daemon(void *_id)
10652244ea07SJohn Dyson {
10665652770dSJohn Baldwin 	struct kaiocb *job;
106739314b7dSJohn Baldwin 	struct aioproc *aiop;
1068bfbbc4aaSJason Evans 	struct kaioinfo *ki;
1069f3215338SJohn Baldwin 	struct proc *p;
10708a4dc40fSJohn Baldwin 	struct vmspace *myvm;
1071b40ce416SJulian Elischer 	struct thread *td = curthread;
10721ce91824SDavid Xu 	int id = (intptr_t)_id;
10732244ea07SJohn Dyson 
10742244ea07SJohn Dyson 	/*
10758a4dc40fSJohn Baldwin 	 * Grab an extra reference on the daemon's vmspace so that it
10768a4dc40fSJohn Baldwin 	 * doesn't get freed by jobs that switch to a different
10778a4dc40fSJohn Baldwin 	 * vmspace.
10782244ea07SJohn Dyson 	 */
10798a4dc40fSJohn Baldwin 	p = td->td_proc;
10808a4dc40fSJohn Baldwin 	myvm = vmspace_acquire_ref(p);
1081fd3bf775SJohn Dyson 
10828a4dc40fSJohn Baldwin 	KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1083fd3bf775SJohn Dyson 
1084fd3bf775SJohn Dyson 	/*
1085bfbbc4aaSJason Evans 	 * Allocate and ready the aio control info.  There is one aiop structure
1086bfbbc4aaSJason Evans 	 * per daemon.
1087fd3bf775SJohn Dyson 	 */
1088a163d034SWarner Losh 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
108939314b7dSJohn Baldwin 	aiop->aioproc = p;
109039314b7dSJohn Baldwin 	aiop->aioprocflags = 0;
1091bfbbc4aaSJason Evans 
1092fd3bf775SJohn Dyson 	/*
1093fd3bf775SJohn Dyson 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
1094b40ce416SJulian Elischer 	 * and creating too many daemons.)
1095fd3bf775SJohn Dyson 	 */
10961ce91824SDavid Xu 	sema_post(&aio_newproc_sem);
10972244ea07SJohn Dyson 
10981ce91824SDavid Xu 	mtx_lock(&aio_job_mtx);
1099bfbbc4aaSJason Evans 	for (;;) {
1100fd3bf775SJohn Dyson 		/*
1101fd3bf775SJohn Dyson 		 * Take daemon off of free queue
1102fd3bf775SJohn Dyson 		 */
110339314b7dSJohn Baldwin 		if (aiop->aioprocflags & AIOP_FREE) {
11042244ea07SJohn Dyson 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
110539314b7dSJohn Baldwin 			aiop->aioprocflags &= ~AIOP_FREE;
11062244ea07SJohn Dyson 		}
11072244ea07SJohn Dyson 
1108fd3bf775SJohn Dyson 		/*
1109bfbbc4aaSJason Evans 		 * Check for jobs.
1110fd3bf775SJohn Dyson 		 */
11115652770dSJohn Baldwin 		while ((job = aio_selectjob(aiop)) != NULL) {
11121ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
11132244ea07SJohn Dyson 
1114f3215338SJohn Baldwin 			ki = job->userproc->p_aioinfo;
1115f3215338SJohn Baldwin 			job->handle_fn(job);
111684af4da6SJohn Dyson 
11179b84335cSDavid Xu 			mtx_lock(&aio_job_mtx);
11189b84335cSDavid Xu 			/* Decrement the active job count. */
11199b84335cSDavid Xu 			ki->kaio_active_count--;
11202244ea07SJohn Dyson 		}
11212244ea07SJohn Dyson 
1122fd3bf775SJohn Dyson 		/*
1123bfbbc4aaSJason Evans 		 * Disconnect from user address space.
1124fd3bf775SJohn Dyson 		 */
11258a4dc40fSJohn Baldwin 		if (p->p_vmspace != myvm) {
11261ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
11278a4dc40fSJohn Baldwin 			vmspace_switch_aio(myvm);
11281ce91824SDavid Xu 			mtx_lock(&aio_job_mtx);
11291ce91824SDavid Xu 			/*
11301ce91824SDavid Xu 			 * We have to restart to avoid race, we only sleep if
11318a4dc40fSJohn Baldwin 			 * no job can be selected.
11321ce91824SDavid Xu 			 */
11331ce91824SDavid Xu 			continue;
1134fd3bf775SJohn Dyson 		}
1135fd3bf775SJohn Dyson 
11361ce91824SDavid Xu 		mtx_assert(&aio_job_mtx, MA_OWNED);
11371ce91824SDavid Xu 
1138fd3bf775SJohn Dyson 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
113939314b7dSJohn Baldwin 		aiop->aioprocflags |= AIOP_FREE;
1140fd3bf775SJohn Dyson 
1141fd3bf775SJohn Dyson 		/*
1142bfbbc4aaSJason Evans 		 * If daemon is inactive for a long time, allow it to exit,
1143bfbbc4aaSJason Evans 		 * thereby freeing resources.
1144fd3bf775SJohn Dyson 		 */
114539314b7dSJohn Baldwin 		if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
11468a4dc40fSJohn Baldwin 		    aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
114739314b7dSJohn Baldwin 		    (aiop->aioprocflags & AIOP_FREE) &&
11488a4dc40fSJohn Baldwin 		    num_aio_procs > target_aio_procs)
11498a4dc40fSJohn Baldwin 			break;
11508a4dc40fSJohn Baldwin 	}
1151fd3bf775SJohn Dyson 	TAILQ_REMOVE(&aio_freeproc, aiop, list);
115284af4da6SJohn Dyson 	num_aio_procs--;
11531ce91824SDavid Xu 	mtx_unlock(&aio_job_mtx);
11541ce91824SDavid Xu 	uma_zfree(aiop_zone, aiop);
11551ce91824SDavid Xu 	free_unr(aiod_unr, id);
11568a4dc40fSJohn Baldwin 	vmspace_free(myvm);
11578a4dc40fSJohn Baldwin 
11588a4dc40fSJohn Baldwin 	KASSERT(p->p_vmspace == myvm,
11598a4dc40fSJohn Baldwin 	    ("AIOD: bad vmspace for exiting daemon"));
11608a4dc40fSJohn Baldwin 	KASSERT(myvm->vm_refcnt > 1,
11618a4dc40fSJohn Baldwin 	    ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt));
11623745c395SJulian Elischer 	kproc_exit(0);
1163fd3bf775SJohn Dyson }
11642244ea07SJohn Dyson 
11652244ea07SJohn Dyson /*
1166bfbbc4aaSJason Evans  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1167bfbbc4aaSJason Evans  * AIO daemon modifies its environment itself.
11682244ea07SJohn Dyson  */
11692244ea07SJohn Dyson static int
11701ce91824SDavid Xu aio_newproc(int *start)
1171fd3bf775SJohn Dyson {
11722244ea07SJohn Dyson 	int error;
1173c9a970a7SAlan Cox 	struct proc *p;
11741ce91824SDavid Xu 	int id;
11752244ea07SJohn Dyson 
11761ce91824SDavid Xu 	id = alloc_unr(aiod_unr);
11773745c395SJulian Elischer 	error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
11781ce91824SDavid Xu 		RFNOWAIT, 0, "aiod%d", id);
11791ce91824SDavid Xu 	if (error == 0) {
1180fd3bf775SJohn Dyson 		/*
11811ce91824SDavid Xu 		 * Wait until daemon is started.
1182fd3bf775SJohn Dyson 		 */
11831ce91824SDavid Xu 		sema_wait(&aio_newproc_sem);
11841ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
118584af4da6SJohn Dyson 		num_aio_procs++;
11861ce91824SDavid Xu 		if (start != NULL)
11877f34b521SDavid Xu 			(*start)--;
11881ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
11891ce91824SDavid Xu 	} else {
11901ce91824SDavid Xu 		free_unr(aiod_unr, id);
11911ce91824SDavid Xu 	}
1192ac41f2efSAlfred Perlstein 	return (error);
11932244ea07SJohn Dyson }
11942244ea07SJohn Dyson 
11952244ea07SJohn Dyson /*
119688ed460eSAlan Cox  * Try the high-performance, low-overhead physio method for eligible
119788ed460eSAlan Cox  * VCHR devices.  This method doesn't use an aio helper thread, and
119888ed460eSAlan Cox  * thus has very low overhead.
119988ed460eSAlan Cox  *
1200a9bf5e37SDavid Xu  * Assumes that the caller, aio_aqueue(), has incremented the file
120188ed460eSAlan Cox  * structure's reference count, preventing its deallocation for the
120288ed460eSAlan Cox  * duration of this call.
1203fd3bf775SJohn Dyson  */
120488ed460eSAlan Cox static int
12055652770dSJohn Baldwin aio_qphysio(struct proc *p, struct kaiocb *job)
1206fd3bf775SJohn Dyson {
1207fd3bf775SJohn Dyson 	struct aiocb *cb;
1208fd3bf775SJohn Dyson 	struct file *fp;
1209f743d981SAlexander Motin 	struct bio *bp;
1210f743d981SAlexander Motin 	struct buf *pbuf;
1211fd3bf775SJohn Dyson 	struct vnode *vp;
1212f3215a60SKonstantin Belousov 	struct cdevsw *csw;
1213f3215a60SKonstantin Belousov 	struct cdev *dev;
1214fd3bf775SJohn Dyson 	struct kaioinfo *ki;
12154d805eacSJohn Baldwin 	int error, ref, poff;
1216f743d981SAlexander Motin 	vm_prot_t prot;
1217fd3bf775SJohn Dyson 
12185652770dSJohn Baldwin 	cb = &job->uaiocb;
12195652770dSJohn Baldwin 	fp = job->fd_file;
1220fd3bf775SJohn Dyson 
12216160e12cSGleb Smirnoff 	if (fp == NULL || fp->f_type != DTYPE_VNODE)
1222008626c3SPoul-Henning Kamp 		return (-1);
1223fd3bf775SJohn Dyson 
12243b6d9652SPoul-Henning Kamp 	vp = fp->f_vnode;
1225f743d981SAlexander Motin 	if (vp->v_type != VCHR)
1226f582ac06SBrian Feldman 		return (-1);
1227ad8de0f2SDavid Xu 	if (vp->v_bufobj.bo_bsize == 0)
1228ad8de0f2SDavid Xu 		return (-1);
12295d9d81e7SPoul-Henning Kamp 	if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1230008626c3SPoul-Henning Kamp 		return (-1);
1231fd3bf775SJohn Dyson 
1232f3215a60SKonstantin Belousov 	ref = 0;
1233f3215a60SKonstantin Belousov 	csw = devvn_refthread(vp, &dev, &ref);
1234f3215a60SKonstantin Belousov 	if (csw == NULL)
1235f3215a60SKonstantin Belousov 		return (ENXIO);
1236f743d981SAlexander Motin 
1237f743d981SAlexander Motin 	if ((csw->d_flags & D_DISK) == 0) {
1238f743d981SAlexander Motin 		error = -1;
1239f743d981SAlexander Motin 		goto unref;
1240f743d981SAlexander Motin 	}
1241f3215a60SKonstantin Belousov 	if (cb->aio_nbytes > dev->si_iosize_max) {
1242f3215a60SKonstantin Belousov 		error = -1;
1243f3215a60SKonstantin Belousov 		goto unref;
1244f3215a60SKonstantin Belousov 	}
1245f3215a60SKonstantin Belousov 
1246f743d981SAlexander Motin 	ki = p->p_aioinfo;
1247f743d981SAlexander Motin 	poff = (vm_offset_t)cb->aio_buf & PAGE_MASK;
12484d805eacSJohn Baldwin 	if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
1249f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS) {
1250f743d981SAlexander Motin 			error = -1;
1251f743d981SAlexander Motin 			goto unref;
1252f743d981SAlexander Motin 		}
12534d805eacSJohn Baldwin 
12544d805eacSJohn Baldwin 		pbuf = NULL;
1255f743d981SAlexander Motin 	} else {
1256f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS - poff) {
1257f743d981SAlexander Motin 			error = -1;
1258f743d981SAlexander Motin 			goto unref;
1259f743d981SAlexander Motin 		}
1260f743d981SAlexander Motin 		if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) {
1261f743d981SAlexander Motin 			error = -1;
1262f743d981SAlexander Motin 			goto unref;
1263f743d981SAlexander Motin 		}
12644d805eacSJohn Baldwin 
12655652770dSJohn Baldwin 		job->pbuf = pbuf = (struct buf *)getpbuf(NULL);
1266f743d981SAlexander Motin 		BUF_KERNPROC(pbuf);
1267759ccccaSDavid Xu 		AIO_LOCK(ki);
12681ce91824SDavid Xu 		ki->kaio_buffer_count++;
1269759ccccaSDavid Xu 		AIO_UNLOCK(ki);
12704d805eacSJohn Baldwin 	}
12714d805eacSJohn Baldwin 	job->bp = bp = g_alloc_bio();
12721ce91824SDavid Xu 
1273f743d981SAlexander Motin 	bp->bio_length = cb->aio_nbytes;
1274f743d981SAlexander Motin 	bp->bio_bcount = cb->aio_nbytes;
1275f743d981SAlexander Motin 	bp->bio_done = aio_physwakeup;
1276f743d981SAlexander Motin 	bp->bio_data = (void *)(uintptr_t)cb->aio_buf;
1277f743d981SAlexander Motin 	bp->bio_offset = cb->aio_offset;
1278f743d981SAlexander Motin 	bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1279f743d981SAlexander Motin 	bp->bio_dev = dev;
12805652770dSJohn Baldwin 	bp->bio_caller1 = (void *)job;
1281f743d981SAlexander Motin 
1282f743d981SAlexander Motin 	prot = VM_PROT_READ;
1283f743d981SAlexander Motin 	if (cb->aio_lio_opcode == LIO_READ)
1284f743d981SAlexander Motin 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
12854d805eacSJohn Baldwin 	job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
12865652770dSJohn Baldwin 	    (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages,
12874d805eacSJohn Baldwin 	    nitems(job->pages));
12884d805eacSJohn Baldwin 	if (job->npages < 0) {
1289f743d981SAlexander Motin 		error = EFAULT;
1290f743d981SAlexander Motin 		goto doerror;
1291f743d981SAlexander Motin 	}
12924d805eacSJohn Baldwin 	if (pbuf != NULL) {
1293f743d981SAlexander Motin 		pmap_qenter((vm_offset_t)pbuf->b_data,
12945652770dSJohn Baldwin 		    job->pages, job->npages);
1295f743d981SAlexander Motin 		bp->bio_data = pbuf->b_data + poff;
12964d805eacSJohn Baldwin 		atomic_add_int(&num_buf_aio, 1);
1297f743d981SAlexander Motin 	} else {
12985652770dSJohn Baldwin 		bp->bio_ma = job->pages;
12995652770dSJohn Baldwin 		bp->bio_ma_n = job->npages;
1300f743d981SAlexander Motin 		bp->bio_ma_offset = poff;
1301f743d981SAlexander Motin 		bp->bio_data = unmapped_buf;
1302f743d981SAlexander Motin 		bp->bio_flags |= BIO_UNMAPPED;
1303f743d981SAlexander Motin 	}
1304f743d981SAlexander Motin 
1305bfbbc4aaSJason Evans 	/* Perform transfer. */
1306f743d981SAlexander Motin 	csw->d_strategy(bp);
1307f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1308ac41f2efSAlfred Perlstein 	return (0);
1309fd3bf775SJohn Dyson 
1310fd3bf775SJohn Dyson doerror:
13114d805eacSJohn Baldwin 	if (pbuf != NULL) {
1312759ccccaSDavid Xu 		AIO_LOCK(ki);
1313fd3bf775SJohn Dyson 		ki->kaio_buffer_count--;
1314759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1315f743d981SAlexander Motin 		relpbuf(pbuf, NULL);
13165652770dSJohn Baldwin 		job->pbuf = NULL;
1317f743d981SAlexander Motin 	}
1318f743d981SAlexander Motin 	g_destroy_bio(bp);
13195652770dSJohn Baldwin 	job->bp = NULL;
1320f3215a60SKonstantin Belousov unref:
1321f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1322fd3bf775SJohn Dyson 	return (error);
1323fd3bf775SJohn Dyson }
1324fd3bf775SJohn Dyson 
1325399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
13263858a1f4SJohn Baldwin static int
13273858a1f4SJohn Baldwin convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
13283858a1f4SJohn Baldwin {
13293858a1f4SJohn Baldwin 
13303858a1f4SJohn Baldwin 	/*
13313858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
13323858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
13333858a1f4SJohn Baldwin 	 */
13343858a1f4SJohn Baldwin 	nsig->sigev_notify = osig->sigev_notify;
13353858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
13363858a1f4SJohn Baldwin 	case SIGEV_NONE:
13373858a1f4SJohn Baldwin 		break;
13383858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
13393858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
13403858a1f4SJohn Baldwin 		break;
13413858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
13423858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
13433858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
13443858a1f4SJohn Baldwin 		nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
13453858a1f4SJohn Baldwin 		break;
13463858a1f4SJohn Baldwin 	default:
13473858a1f4SJohn Baldwin 		return (EINVAL);
13483858a1f4SJohn Baldwin 	}
13493858a1f4SJohn Baldwin 	return (0);
13503858a1f4SJohn Baldwin }
13513858a1f4SJohn Baldwin 
13523858a1f4SJohn Baldwin static int
13533858a1f4SJohn Baldwin aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
13543858a1f4SJohn Baldwin {
13553858a1f4SJohn Baldwin 	struct oaiocb *ojob;
13563858a1f4SJohn Baldwin 	int error;
13573858a1f4SJohn Baldwin 
13583858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
13593858a1f4SJohn Baldwin 	error = copyin(ujob, kjob, sizeof(struct oaiocb));
13603858a1f4SJohn Baldwin 	if (error)
13613858a1f4SJohn Baldwin 		return (error);
13623858a1f4SJohn Baldwin 	ojob = (struct oaiocb *)kjob;
13633858a1f4SJohn Baldwin 	return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent));
13643858a1f4SJohn Baldwin }
1365399e8c17SJohn Baldwin #endif
13663858a1f4SJohn Baldwin 
13673858a1f4SJohn Baldwin static int
13683858a1f4SJohn Baldwin aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
13693858a1f4SJohn Baldwin {
13703858a1f4SJohn Baldwin 
13713858a1f4SJohn Baldwin 	return (copyin(ujob, kjob, sizeof(struct aiocb)));
13723858a1f4SJohn Baldwin }
13733858a1f4SJohn Baldwin 
13743858a1f4SJohn Baldwin static long
13753858a1f4SJohn Baldwin aiocb_fetch_status(struct aiocb *ujob)
13763858a1f4SJohn Baldwin {
13773858a1f4SJohn Baldwin 
13783858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.status));
13793858a1f4SJohn Baldwin }
13803858a1f4SJohn Baldwin 
13813858a1f4SJohn Baldwin static long
13823858a1f4SJohn Baldwin aiocb_fetch_error(struct aiocb *ujob)
13833858a1f4SJohn Baldwin {
13843858a1f4SJohn Baldwin 
13853858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.error));
13863858a1f4SJohn Baldwin }
13873858a1f4SJohn Baldwin 
13883858a1f4SJohn Baldwin static int
13893858a1f4SJohn Baldwin aiocb_store_status(struct aiocb *ujob, long status)
13903858a1f4SJohn Baldwin {
13913858a1f4SJohn Baldwin 
13923858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.status, status));
13933858a1f4SJohn Baldwin }
13943858a1f4SJohn Baldwin 
13953858a1f4SJohn Baldwin static int
13963858a1f4SJohn Baldwin aiocb_store_error(struct aiocb *ujob, long error)
13973858a1f4SJohn Baldwin {
13983858a1f4SJohn Baldwin 
13993858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.error, error));
14003858a1f4SJohn Baldwin }
14013858a1f4SJohn Baldwin 
14023858a1f4SJohn Baldwin static int
14033858a1f4SJohn Baldwin aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
14043858a1f4SJohn Baldwin {
14053858a1f4SJohn Baldwin 
14063858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
14073858a1f4SJohn Baldwin }
14083858a1f4SJohn Baldwin 
14093858a1f4SJohn Baldwin static int
14103858a1f4SJohn Baldwin aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
14113858a1f4SJohn Baldwin {
14123858a1f4SJohn Baldwin 
14133858a1f4SJohn Baldwin 	return (suword(ujobp, (long)ujob));
14143858a1f4SJohn Baldwin }
14153858a1f4SJohn Baldwin 
14163858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops = {
14173858a1f4SJohn Baldwin 	.copyin = aiocb_copyin,
14183858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
14193858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
14203858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
14213858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
14223858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
14233858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
14243858a1f4SJohn Baldwin };
14253858a1f4SJohn Baldwin 
1426399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
14273858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops_osigevent = {
14283858a1f4SJohn Baldwin 	.copyin = aiocb_copyin_old_sigevent,
14293858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
14303858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
14313858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
14323858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
14333858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
14343858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
14353858a1f4SJohn Baldwin };
1436399e8c17SJohn Baldwin #endif
14373858a1f4SJohn Baldwin 
1438bfbbc4aaSJason Evans /*
1439bfbbc4aaSJason Evans  * Queue a new AIO request.  Choosing either the threaded or direct physio VCHR
1440bfbbc4aaSJason Evans  * technique is done in this code.
14412244ea07SJohn Dyson  */
14426a1162d4SAlexander Leidinger int
14435652770dSJohn Baldwin aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
14443858a1f4SJohn Baldwin 	int type, struct aiocb_ops *ops)
1445fd3bf775SJohn Dyson {
1446b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
14477008be5bSPawel Jakub Dawidek 	cap_rights_t rights;
14482244ea07SJohn Dyson 	struct file *fp;
1449f3215338SJohn Baldwin 	struct kaiocb *job;
14502244ea07SJohn Dyson 	struct kaioinfo *ki;
1451c6fa9f78SAlan Cox 	struct kevent kev;
14521ce91824SDavid Xu 	int opcode;
14531ce91824SDavid Xu 	int error;
14544db71d27SJohn-Mark Gurney 	int fd, kqfd;
14551ce91824SDavid Xu 	int jid;
1456fde80935SDavid Xu 	u_short evflags;
14572244ea07SJohn Dyson 
1458a9bf5e37SDavid Xu 	if (p->p_aioinfo == NULL)
1459a9bf5e37SDavid Xu 		aio_init_aioinfo(p);
1460a9bf5e37SDavid Xu 
14611ce91824SDavid Xu 	ki = p->p_aioinfo;
14621ce91824SDavid Xu 
14635652770dSJohn Baldwin 	ops->store_status(ujob, -1);
14645652770dSJohn Baldwin 	ops->store_error(ujob, 0);
14655652770dSJohn Baldwin 	ops->store_kernelinfo(ujob, -1);
1466a9bf5e37SDavid Xu 
1467a9bf5e37SDavid Xu 	if (num_queue_count >= max_queue_count ||
1468a9bf5e37SDavid Xu 	    ki->kaio_count >= ki->kaio_qallowed_count) {
14695652770dSJohn Baldwin 		ops->store_error(ujob, EAGAIN);
1470a9bf5e37SDavid Xu 		return (EAGAIN);
1471a9bf5e37SDavid Xu 	}
1472a9bf5e37SDavid Xu 
14735652770dSJohn Baldwin 	job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
14745652770dSJohn Baldwin 	knlist_init_mtx(&job->klist, AIO_MTX(ki));
1475fd3bf775SJohn Dyson 
14765652770dSJohn Baldwin 	error = ops->copyin(ujob, &job->uaiocb);
14772244ea07SJohn Dyson 	if (error) {
14785652770dSJohn Baldwin 		ops->store_error(ujob, error);
14795652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1480ac41f2efSAlfred Perlstein 		return (error);
14812244ea07SJohn Dyson 	}
148268d71118SDavid Xu 
1483bb430bc7SJohn Baldwin 	if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
14845652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1485434ea137SGleb Smirnoff 		return (EINVAL);
1486434ea137SGleb Smirnoff 	}
1487434ea137SGleb Smirnoff 
14885652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
14895652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
14905652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
14915652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
14925652770dSJohn Baldwin 		ops->store_error(ujob, EINVAL);
14935652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
149468d71118SDavid Xu 		return (EINVAL);
149568d71118SDavid Xu 	}
149668d71118SDavid Xu 
14975652770dSJohn Baldwin 	if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
14985652770dSJohn Baldwin 	     job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
14995652770dSJohn Baldwin 		!_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
15005652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1501ac41f2efSAlfred Perlstein 		return (EINVAL);
15022f3cf918SAlfred Perlstein 	}
15032244ea07SJohn Dyson 
15045652770dSJohn Baldwin 	ksiginfo_init(&job->ksi);
15054c0fb2cfSDavid Xu 
1506bfbbc4aaSJason Evans 	/* Save userspace address of the job info. */
15075652770dSJohn Baldwin 	job->ujob = ujob;
150811783b14SJohn Dyson 
1509bfbbc4aaSJason Evans 	/* Get the opcode. */
1510bfbbc4aaSJason Evans 	if (type != LIO_NOP)
15115652770dSJohn Baldwin 		job->uaiocb.aio_lio_opcode = type;
15125652770dSJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
15132244ea07SJohn Dyson 
1514a9d2f8d8SRobert Watson 	/*
1515a9d2f8d8SRobert Watson 	 * Validate the opcode and fetch the file object for the specified
1516a9d2f8d8SRobert Watson 	 * file descriptor.
1517a9d2f8d8SRobert Watson 	 *
1518a9d2f8d8SRobert Watson 	 * XXXRW: Moved the opcode validation up here so that we don't
1519a9d2f8d8SRobert Watson 	 * retrieve a file descriptor without knowing what the capabiltity
1520a9d2f8d8SRobert Watson 	 * should be.
1521a9d2f8d8SRobert Watson 	 */
15225652770dSJohn Baldwin 	fd = job->uaiocb.aio_fildes;
15232a522eb9SJohn Baldwin 	switch (opcode) {
15242a522eb9SJohn Baldwin 	case LIO_WRITE:
15257008be5bSPawel Jakub Dawidek 		error = fget_write(td, fd,
15267008be5bSPawel Jakub Dawidek 		    cap_rights_init(&rights, CAP_PWRITE), &fp);
15272a522eb9SJohn Baldwin 		break;
15282a522eb9SJohn Baldwin 	case LIO_READ:
15297008be5bSPawel Jakub Dawidek 		error = fget_read(td, fd,
15307008be5bSPawel Jakub Dawidek 		    cap_rights_init(&rights, CAP_PREAD), &fp);
1531a9d2f8d8SRobert Watson 		break;
1532a9d2f8d8SRobert Watson 	case LIO_SYNC:
15337008be5bSPawel Jakub Dawidek 		error = fget(td, fd, cap_rights_init(&rights, CAP_FSYNC), &fp);
1534a9d2f8d8SRobert Watson 		break;
15356160e12cSGleb Smirnoff 	case LIO_MLOCK:
15366160e12cSGleb Smirnoff 		fp = NULL;
15376160e12cSGleb Smirnoff 		break;
1538a9d2f8d8SRobert Watson 	case LIO_NOP:
15397008be5bSPawel Jakub Dawidek 		error = fget(td, fd, cap_rights_init(&rights), &fp);
15402a522eb9SJohn Baldwin 		break;
15412a522eb9SJohn Baldwin 	default:
1542a9d2f8d8SRobert Watson 		error = EINVAL;
15432a522eb9SJohn Baldwin 	}
15442a522eb9SJohn Baldwin 	if (error) {
15455652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
15465652770dSJohn Baldwin 		ops->store_error(ujob, error);
1547af56abaaSJohn Baldwin 		return (error);
15482244ea07SJohn Dyson 	}
154999eee864SDavid Xu 
155099eee864SDavid Xu 	if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
155199eee864SDavid Xu 		error = EINVAL;
155299eee864SDavid Xu 		goto aqueue_fail;
155399eee864SDavid Xu 	}
15542244ea07SJohn Dyson 
15555652770dSJohn Baldwin 	if (opcode != LIO_SYNC && job->uaiocb.aio_offset == -1LL) {
1556ae124fc4SAlan Cox 		error = EINVAL;
1557ae124fc4SAlan Cox 		goto aqueue_fail;
15582244ea07SJohn Dyson 	}
15591ce91824SDavid Xu 
15605652770dSJohn Baldwin 	job->fd_file = fp;
15611ce91824SDavid Xu 
156299eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
156399eee864SDavid Xu 	jid = jobrefid++;
15645652770dSJohn Baldwin 	job->seqno = jobseqno++;
156599eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
15665652770dSJohn Baldwin 	error = ops->store_kernelinfo(ujob, jid);
15671ce91824SDavid Xu 	if (error) {
15681ce91824SDavid Xu 		error = EINVAL;
15691ce91824SDavid Xu 		goto aqueue_fail;
15701ce91824SDavid Xu 	}
15715652770dSJohn Baldwin 	job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
15722244ea07SJohn Dyson 
15732244ea07SJohn Dyson 	if (opcode == LIO_NOP) {
1574a5c0b1c0SAlan Cox 		fdrop(fp, td);
15755652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1576ac41f2efSAlfred Perlstein 		return (0);
15772244ea07SJohn Dyson 	}
15782244ea07SJohn Dyson 
15795652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1580cb679c38SJonathan Lemon 		goto no_kqueue;
15815652770dSJohn Baldwin 	evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1582fde80935SDavid Xu 	if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1583fde80935SDavid Xu 		error = EINVAL;
1584fde80935SDavid Xu 		goto aqueue_fail;
1585fde80935SDavid Xu 	}
15865652770dSJohn Baldwin 	kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
15875652770dSJohn Baldwin 	kev.ident = (uintptr_t)job->ujob;
1588cb679c38SJonathan Lemon 	kev.filter = EVFILT_AIO;
1589fde80935SDavid Xu 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
15905652770dSJohn Baldwin 	kev.data = (intptr_t)job;
15915652770dSJohn Baldwin 	kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
15924db71d27SJohn-Mark Gurney 	error = kqfd_register(kqfd, &kev, td, 1);
1593f3215338SJohn Baldwin 	if (error)
1594f3215338SJohn Baldwin 		goto aqueue_fail;
1595f3215338SJohn Baldwin 
1596cb679c38SJonathan Lemon no_kqueue:
1597cb679c38SJonathan Lemon 
15985652770dSJohn Baldwin 	ops->store_error(ujob, EINPROGRESS);
15995652770dSJohn Baldwin 	job->uaiocb._aiocb_private.error = EINPROGRESS;
16005652770dSJohn Baldwin 	job->userproc = p;
16015652770dSJohn Baldwin 	job->cred = crhold(td->td_ucred);
1602f3215338SJohn Baldwin 	job->jobflags = KAIOCB_QUEUEING;
16035652770dSJohn Baldwin 	job->lio = lj;
16042244ea07SJohn Dyson 
1605f3215338SJohn Baldwin 	if (opcode == LIO_MLOCK) {
1606f3215338SJohn Baldwin 		aio_schedule(job, aio_process_mlock);
1607f3215338SJohn Baldwin 		error = 0;
1608f3215338SJohn Baldwin 	} else if (fp->f_ops->fo_aio_queue == NULL)
1609f3215338SJohn Baldwin 		error = aio_queue_file(fp, job);
1610f3215338SJohn Baldwin 	else
1611f3215338SJohn Baldwin 		error = fo_aio_queue(fp, job);
1612f3215338SJohn Baldwin 	if (error)
1613f3215338SJohn Baldwin 		goto aqueue_fail;
1614f3215338SJohn Baldwin 
1615f3215338SJohn Baldwin 	AIO_LOCK(ki);
1616f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_QUEUEING;
1617f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1618f3215338SJohn Baldwin 	ki->kaio_count++;
1619f3215338SJohn Baldwin 	if (lj)
1620f3215338SJohn Baldwin 		lj->lioj_count++;
1621f3215338SJohn Baldwin 	atomic_add_int(&num_queue_count, 1);
1622f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
1623f3215338SJohn Baldwin 		/*
1624f3215338SJohn Baldwin 		 * The queue callback completed the request synchronously.
1625f3215338SJohn Baldwin 		 * The bulk of the completion is deferred in that case
1626f3215338SJohn Baldwin 		 * until this point.
1627f3215338SJohn Baldwin 		 */
1628f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
1629f3215338SJohn Baldwin 	} else
1630f3215338SJohn Baldwin 		TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1631f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1632f3215338SJohn Baldwin 	return (0);
1633f3215338SJohn Baldwin 
1634f3215338SJohn Baldwin aqueue_fail:
1635f3215338SJohn Baldwin 	knlist_delete(&job->klist, curthread, 0);
1636f3215338SJohn Baldwin 	if (fp)
1637f3215338SJohn Baldwin 		fdrop(fp, td);
1638f3215338SJohn Baldwin 	uma_zfree(aiocb_zone, job);
1639f3215338SJohn Baldwin 	ops->store_error(ujob, error);
1640f3215338SJohn Baldwin 	return (error);
1641f3215338SJohn Baldwin }
1642f3215338SJohn Baldwin 
1643f3215338SJohn Baldwin static void
1644f3215338SJohn Baldwin aio_cancel_daemon_job(struct kaiocb *job)
1645f3215338SJohn Baldwin {
1646f3215338SJohn Baldwin 
1647f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1648f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1649f3215338SJohn Baldwin 		TAILQ_REMOVE(&aio_jobs, job, list);
1650f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1651f3215338SJohn Baldwin 	aio_cancel(job);
1652f3215338SJohn Baldwin }
1653f3215338SJohn Baldwin 
1654f3215338SJohn Baldwin void
1655f3215338SJohn Baldwin aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1656f3215338SJohn Baldwin {
1657f3215338SJohn Baldwin 
1658f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1659f3215338SJohn Baldwin 	if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1660f3215338SJohn Baldwin 		mtx_unlock(&aio_job_mtx);
1661f3215338SJohn Baldwin 		aio_cancel(job);
1662f3215338SJohn Baldwin 		return;
1663f3215338SJohn Baldwin 	}
1664f3215338SJohn Baldwin 	job->handle_fn = func;
1665f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1666f3215338SJohn Baldwin 	aio_kick_nowait(job->userproc);
1667f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1668f3215338SJohn Baldwin }
1669f3215338SJohn Baldwin 
1670f3215338SJohn Baldwin static void
1671f3215338SJohn Baldwin aio_cancel_sync(struct kaiocb *job)
1672f3215338SJohn Baldwin {
1673f3215338SJohn Baldwin 	struct kaioinfo *ki;
1674f3215338SJohn Baldwin 
1675f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1676*005ce8e4SJohn Baldwin 	AIO_LOCK(ki);
1677f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1678f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1679*005ce8e4SJohn Baldwin 	AIO_UNLOCK(ki);
1680f3215338SJohn Baldwin 	aio_cancel(job);
1681f3215338SJohn Baldwin }
1682f3215338SJohn Baldwin 
1683f3215338SJohn Baldwin int
1684f3215338SJohn Baldwin aio_queue_file(struct file *fp, struct kaiocb *job)
1685f3215338SJohn Baldwin {
1686f3215338SJohn Baldwin 	struct aioliojob *lj;
1687f3215338SJohn Baldwin 	struct kaioinfo *ki;
1688f3215338SJohn Baldwin 	struct kaiocb *job2;
16899fe297bbSKonstantin Belousov 	struct vnode *vp;
16909fe297bbSKonstantin Belousov 	struct mount *mp;
1691f3215338SJohn Baldwin 	int error, opcode;
16929fe297bbSKonstantin Belousov 	bool safe;
1693f3215338SJohn Baldwin 
1694f3215338SJohn Baldwin 	lj = job->lio;
1695f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1696f3215338SJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
169799eee864SDavid Xu 	if (opcode == LIO_SYNC)
169899eee864SDavid Xu 		goto queueit;
169999eee864SDavid Xu 
1700f3215338SJohn Baldwin 	if ((error = aio_qphysio(job->userproc, job)) == 0)
1701279d7226SMatthew Dillon 		goto done;
17021ce91824SDavid Xu #if 0
1703f3215338SJohn Baldwin 	/*
1704f3215338SJohn Baldwin 	 * XXX: This means qphysio() failed with EFAULT.  The current
1705f3215338SJohn Baldwin 	 * behavior is to retry the operation via fo_read/fo_write.
1706f3215338SJohn Baldwin 	 * Wouldn't it be better to just complete the request with an
1707f3215338SJohn Baldwin 	 * error here?
1708f3215338SJohn Baldwin 	 */
1709f3215338SJohn Baldwin 	if (error > 0)
1710279d7226SMatthew Dillon 		goto done;
17111ce91824SDavid Xu #endif
171299eee864SDavid Xu queueit:
17139fe297bbSKonstantin Belousov 	safe = false;
17149fe297bbSKonstantin Belousov 	if (fp->f_type == DTYPE_VNODE) {
17159fe297bbSKonstantin Belousov 		vp = fp->f_vnode;
17169fe297bbSKonstantin Belousov 		if (vp->v_type == VREG || vp->v_type == VDIR) {
17179fe297bbSKonstantin Belousov 			mp = fp->f_vnode->v_mount;
17189fe297bbSKonstantin Belousov 			if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0)
17199fe297bbSKonstantin Belousov 				safe = true;
17209fe297bbSKonstantin Belousov 		}
17219fe297bbSKonstantin Belousov 	}
17229c20dc99SJohn Baldwin 	if (!(safe || enable_aio_unsafe)) {
17239c20dc99SJohn Baldwin 		counted_warning(&unsafe_warningcnt,
17249c20dc99SJohn Baldwin 		    "is attempting to use unsafe AIO requests");
1725f3215338SJohn Baldwin 		return (EOPNOTSUPP);
17269c20dc99SJohn Baldwin 	}
172784af4da6SJohn Dyson 
172899eee864SDavid Xu 	if (opcode == LIO_SYNC) {
1729f3215338SJohn Baldwin 		AIO_LOCK(ki);
17305652770dSJohn Baldwin 		TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
17315652770dSJohn Baldwin 			if (job2->fd_file == job->fd_file &&
17325652770dSJohn Baldwin 			    job2->uaiocb.aio_lio_opcode != LIO_SYNC &&
17335652770dSJohn Baldwin 			    job2->seqno < job->seqno) {
17345652770dSJohn Baldwin 				job2->jobflags |= KAIOCB_CHECKSYNC;
17355652770dSJohn Baldwin 				job->pending++;
1736dbbccfe9SDavid Xu 			}
1737dbbccfe9SDavid Xu 		}
17385652770dSJohn Baldwin 		if (job->pending != 0) {
1739*005ce8e4SJohn Baldwin 			if (!aio_set_cancel_function_locked(job,
1740*005ce8e4SJohn Baldwin 				aio_cancel_sync)) {
1741f3215338SJohn Baldwin 				AIO_UNLOCK(ki);
1742f3215338SJohn Baldwin 				aio_cancel(job);
1743f3215338SJohn Baldwin 				return (0);
1744f3215338SJohn Baldwin 			}
17455652770dSJohn Baldwin 			TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1746759ccccaSDavid Xu 			AIO_UNLOCK(ki);
1747f3215338SJohn Baldwin 			return (0);
1748dbbccfe9SDavid Xu 		}
1749759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1750f3215338SJohn Baldwin 	}
1751f3215338SJohn Baldwin 
1752f3215338SJohn Baldwin 	switch (opcode) {
1753f3215338SJohn Baldwin 	case LIO_READ:
1754f3215338SJohn Baldwin 	case LIO_WRITE:
1755f3215338SJohn Baldwin 		aio_schedule(job, aio_process_rw);
17561ce91824SDavid Xu 		error = 0;
1757f3215338SJohn Baldwin 		break;
1758f3215338SJohn Baldwin 	case LIO_SYNC:
1759f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
1760f3215338SJohn Baldwin 		error = 0;
1761f3215338SJohn Baldwin 		break;
1762f3215338SJohn Baldwin 	default:
1763f3215338SJohn Baldwin 		error = EINVAL;
1764f3215338SJohn Baldwin 	}
176599eee864SDavid Xu done:
176699eee864SDavid Xu 	return (error);
176799eee864SDavid Xu }
176899eee864SDavid Xu 
176999eee864SDavid Xu static void
177099eee864SDavid Xu aio_kick_nowait(struct proc *userp)
177199eee864SDavid Xu {
177299eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
177339314b7dSJohn Baldwin 	struct aioproc *aiop;
177499eee864SDavid Xu 
177599eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
177699eee864SDavid Xu 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
177799eee864SDavid Xu 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
177839314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
177939314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
17800dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
17810dd6c035SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start <
17820dd6c035SJohn Baldwin 	    ki->kaio_maxactive_count) {
1783c85650caSJohn Baldwin 		taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
178499eee864SDavid Xu 	}
178599eee864SDavid Xu }
178699eee864SDavid Xu 
1787dbbccfe9SDavid Xu static int
178899eee864SDavid Xu aio_kick(struct proc *userp)
178999eee864SDavid Xu {
179099eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
179139314b7dSJohn Baldwin 	struct aioproc *aiop;
1792dbbccfe9SDavid Xu 	int error, ret = 0;
179399eee864SDavid Xu 
179499eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
179599eee864SDavid Xu retryproc:
1796d254af07SMatthew Dillon 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
17972244ea07SJohn Dyson 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
179839314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
179939314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
18000dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
18010dd6c035SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start <
18020dd6c035SJohn Baldwin 	    ki->kaio_maxactive_count) {
1803fd3bf775SJohn Dyson 		num_aio_resv_start++;
18041ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
18051ce91824SDavid Xu 		error = aio_newproc(&num_aio_resv_start);
18061ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
18071ce91824SDavid Xu 		if (error) {
180884af4da6SJohn Dyson 			num_aio_resv_start--;
18092244ea07SJohn Dyson 			goto retryproc;
1810fd3bf775SJohn Dyson 		}
1811dbbccfe9SDavid Xu 	} else {
1812dbbccfe9SDavid Xu 		ret = -1;
18131ce91824SDavid Xu 	}
1814dbbccfe9SDavid Xu 	return (ret);
181599eee864SDavid Xu }
18161ce91824SDavid Xu 
181799eee864SDavid Xu static void
181899eee864SDavid Xu aio_kick_helper(void *context, int pending)
181999eee864SDavid Xu {
182099eee864SDavid Xu 	struct proc *userp = context;
182199eee864SDavid Xu 
182299eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
1823dbbccfe9SDavid Xu 	while (--pending >= 0) {
1824dbbccfe9SDavid Xu 		if (aio_kick(userp))
1825dbbccfe9SDavid Xu 			break;
1826dbbccfe9SDavid Xu 	}
182799eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
18282244ea07SJohn Dyson }
18292244ea07SJohn Dyson 
1830fd3bf775SJohn Dyson /*
1831bfbbc4aaSJason Evans  * Support the aio_return system call, as a side-effect, kernel resources are
1832bfbbc4aaSJason Evans  * released.
18332244ea07SJohn Dyson  */
18343858a1f4SJohn Baldwin static int
18355652770dSJohn Baldwin kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1836fd3bf775SJohn Dyson {
1837b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18385652770dSJohn Baldwin 	struct kaiocb *job;
18392244ea07SJohn Dyson 	struct kaioinfo *ki;
1840bb430bc7SJohn Baldwin 	long status, error;
18412244ea07SJohn Dyson 
1842c0bf5caaSAlan Cox 	ki = p->p_aioinfo;
1843c0bf5caaSAlan Cox 	if (ki == NULL)
1844ac41f2efSAlfred Perlstein 		return (EINVAL);
1845759ccccaSDavid Xu 	AIO_LOCK(ki);
18465652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_done, plist) {
18475652770dSJohn Baldwin 		if (job->ujob == ujob)
1848c0bf5caaSAlan Cox 			break;
1849c0bf5caaSAlan Cox 	}
18505652770dSJohn Baldwin 	if (job != NULL) {
1851f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
18525652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
18535652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
18541ce91824SDavid Xu 		td->td_retval[0] = status;
1855b1012d80SJohn Baldwin 		td->td_ru.ru_oublock += job->outblock;
1856b1012d80SJohn Baldwin 		td->td_ru.ru_inblock += job->inblock;
1857b1012d80SJohn Baldwin 		td->td_ru.ru_msgsnd += job->msgsnd;
1858b1012d80SJohn Baldwin 		td->td_ru.ru_msgrcv += job->msgrcv;
18595652770dSJohn Baldwin 		aio_free_entry(job);
1860759ccccaSDavid Xu 		AIO_UNLOCK(ki);
18615652770dSJohn Baldwin 		ops->store_error(ujob, error);
18625652770dSJohn Baldwin 		ops->store_status(ujob, status);
186355a122bfSDavid Xu 	} else {
18641ce91824SDavid Xu 		error = EINVAL;
1865759ccccaSDavid Xu 		AIO_UNLOCK(ki);
186655a122bfSDavid Xu 	}
18671ce91824SDavid Xu 	return (error);
18682244ea07SJohn Dyson }
18692244ea07SJohn Dyson 
18703858a1f4SJohn Baldwin int
18718451d0ddSKip Macy sys_aio_return(struct thread *td, struct aio_return_args *uap)
18723858a1f4SJohn Baldwin {
18733858a1f4SJohn Baldwin 
18743858a1f4SJohn Baldwin 	return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
18753858a1f4SJohn Baldwin }
18763858a1f4SJohn Baldwin 
18772244ea07SJohn Dyson /*
1878bfbbc4aaSJason Evans  * Allow a process to wakeup when any of the I/O requests are completed.
18792244ea07SJohn Dyson  */
18803858a1f4SJohn Baldwin static int
18813858a1f4SJohn Baldwin kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
18823858a1f4SJohn Baldwin     struct timespec *ts)
1883fd3bf775SJohn Dyson {
1884b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18854a11ca4eSPoul-Henning Kamp 	struct timeval atv;
18862244ea07SJohn Dyson 	struct kaioinfo *ki;
18875652770dSJohn Baldwin 	struct kaiocb *firstjob, *job;
18883858a1f4SJohn Baldwin 	int error, i, timo;
18892244ea07SJohn Dyson 
18902244ea07SJohn Dyson 	timo = 0;
18913858a1f4SJohn Baldwin 	if (ts) {
18923858a1f4SJohn Baldwin 		if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
18932244ea07SJohn Dyson 			return (EINVAL);
18942244ea07SJohn Dyson 
18953858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
18962244ea07SJohn Dyson 		if (itimerfix(&atv))
18972244ea07SJohn Dyson 			return (EINVAL);
1898227ee8a1SPoul-Henning Kamp 		timo = tvtohz(&atv);
18992244ea07SJohn Dyson 	}
19002244ea07SJohn Dyson 
19012244ea07SJohn Dyson 	ki = p->p_aioinfo;
19022244ea07SJohn Dyson 	if (ki == NULL)
1903ac41f2efSAlfred Perlstein 		return (EAGAIN);
19042244ea07SJohn Dyson 
19053858a1f4SJohn Baldwin 	if (njoblist == 0)
1906ac41f2efSAlfred Perlstein 		return (0);
19072244ea07SJohn Dyson 
1908759ccccaSDavid Xu 	AIO_LOCK(ki);
19091ce91824SDavid Xu 	for (;;) {
19105652770dSJohn Baldwin 		firstjob = NULL;
19111ce91824SDavid Xu 		error = 0;
19125652770dSJohn Baldwin 		TAILQ_FOREACH(job, &ki->kaio_all, allist) {
191384af4da6SJohn Dyson 			for (i = 0; i < njoblist; i++) {
19145652770dSJohn Baldwin 				if (job->ujob == ujoblist[i]) {
19155652770dSJohn Baldwin 					if (firstjob == NULL)
19165652770dSJohn Baldwin 						firstjob = job;
1917f3215338SJohn Baldwin 					if (job->jobflags & KAIOCB_FINISHED)
19181ce91824SDavid Xu 						goto RETURN;
191984af4da6SJohn Dyson 				}
192084af4da6SJohn Dyson 			}
192184af4da6SJohn Dyson 		}
19221ce91824SDavid Xu 		/* All tasks were finished. */
19235652770dSJohn Baldwin 		if (firstjob == NULL)
19241ce91824SDavid Xu 			break;
19252244ea07SJohn Dyson 
1926fd3bf775SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
1927759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
19281ce91824SDavid Xu 		    "aiospn", timo);
19291ce91824SDavid Xu 		if (error == ERESTART)
19301ce91824SDavid Xu 			error = EINTR;
19311ce91824SDavid Xu 		if (error)
19321ce91824SDavid Xu 			break;
19332244ea07SJohn Dyson 	}
19341ce91824SDavid Xu RETURN:
1935759ccccaSDavid Xu 	AIO_UNLOCK(ki);
19363858a1f4SJohn Baldwin 	return (error);
19373858a1f4SJohn Baldwin }
19383858a1f4SJohn Baldwin 
19393858a1f4SJohn Baldwin int
19408451d0ddSKip Macy sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
19413858a1f4SJohn Baldwin {
19423858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
19433858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
19443858a1f4SJohn Baldwin 	int error;
19453858a1f4SJohn Baldwin 
19463858a1f4SJohn Baldwin 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
19473858a1f4SJohn Baldwin 		return (EINVAL);
19483858a1f4SJohn Baldwin 
19493858a1f4SJohn Baldwin 	if (uap->timeout) {
19503858a1f4SJohn Baldwin 		/* Get timespec struct. */
19513858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
19523858a1f4SJohn Baldwin 			return (error);
19533858a1f4SJohn Baldwin 		tsp = &ts;
19543858a1f4SJohn Baldwin 	} else
19553858a1f4SJohn Baldwin 		tsp = NULL;
19563858a1f4SJohn Baldwin 
19573858a1f4SJohn Baldwin 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
19583858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
19593858a1f4SJohn Baldwin 	if (error == 0)
19603858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
19611ce91824SDavid Xu 	uma_zfree(aiol_zone, ujoblist);
19621ce91824SDavid Xu 	return (error);
19632244ea07SJohn Dyson }
1964ee877a35SJohn Dyson 
1965ee877a35SJohn Dyson /*
1966dd85920aSJason Evans  * aio_cancel cancels any non-physio aio operations not currently in
1967dd85920aSJason Evans  * progress.
1968ee877a35SJohn Dyson  */
1969ee877a35SJohn Dyson int
19708451d0ddSKip Macy sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1971fd3bf775SJohn Dyson {
1972b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
1973dd85920aSJason Evans 	struct kaioinfo *ki;
19745652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
1975dd85920aSJason Evans 	struct file *fp;
1976f131759fSMateusz Guzik 	cap_rights_t rights;
19771ce91824SDavid Xu 	int error;
1978dd85920aSJason Evans 	int cancelled = 0;
1979dd85920aSJason Evans 	int notcancelled = 0;
1980dd85920aSJason Evans 	struct vnode *vp;
1981dd85920aSJason Evans 
19822a522eb9SJohn Baldwin 	/* Lookup file object. */
1983f131759fSMateusz Guzik 	error = fget(td, uap->fd, cap_rights_init(&rights), &fp);
19842a522eb9SJohn Baldwin 	if (error)
19852a522eb9SJohn Baldwin 		return (error);
1986dd85920aSJason Evans 
19871ce91824SDavid Xu 	ki = p->p_aioinfo;
19881ce91824SDavid Xu 	if (ki == NULL)
19891ce91824SDavid Xu 		goto done;
19901ce91824SDavid Xu 
1991dd85920aSJason Evans 	if (fp->f_type == DTYPE_VNODE) {
19923b6d9652SPoul-Henning Kamp 		vp = fp->f_vnode;
1993dd85920aSJason Evans 		if (vn_isdisk(vp, &error)) {
19942a522eb9SJohn Baldwin 			fdrop(fp, td);
1995b40ce416SJulian Elischer 			td->td_retval[0] = AIO_NOTCANCELED;
1996ac41f2efSAlfred Perlstein 			return (0);
1997dd85920aSJason Evans 		}
1998dd85920aSJason Evans 	}
1999dd85920aSJason Evans 
2000759ccccaSDavid Xu 	AIO_LOCK(ki);
20015652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
20025652770dSJohn Baldwin 		if ((uap->fd == job->uaiocb.aio_fildes) &&
2003dd85920aSJason Evans 		    ((uap->aiocbp == NULL) ||
20045652770dSJohn Baldwin 		     (uap->aiocbp == job->ujob))) {
2005f3215338SJohn Baldwin 			if (aio_cancel_job(p, ki, job)) {
20061ce91824SDavid Xu 				cancelled++;
2007dd85920aSJason Evans 			} else {
2008dd85920aSJason Evans 				notcancelled++;
2009dd85920aSJason Evans 			}
20101aa4c324SDavid Xu 			if (uap->aiocbp != NULL)
20111aa4c324SDavid Xu 				break;
2012dd85920aSJason Evans 		}
2013dd85920aSJason Evans 	}
2014759ccccaSDavid Xu 	AIO_UNLOCK(ki);
20151ce91824SDavid Xu 
2016ad49abc0SAlan Cox done:
20172a522eb9SJohn Baldwin 	fdrop(fp, td);
20181aa4c324SDavid Xu 
20191aa4c324SDavid Xu 	if (uap->aiocbp != NULL) {
2020dd85920aSJason Evans 		if (cancelled) {
2021b40ce416SJulian Elischer 			td->td_retval[0] = AIO_CANCELED;
2022ac41f2efSAlfred Perlstein 			return (0);
2023dd85920aSJason Evans 		}
20241aa4c324SDavid Xu 	}
20251aa4c324SDavid Xu 
20261aa4c324SDavid Xu 	if (notcancelled) {
20271aa4c324SDavid Xu 		td->td_retval[0] = AIO_NOTCANCELED;
20281aa4c324SDavid Xu 		return (0);
20291aa4c324SDavid Xu 	}
20301aa4c324SDavid Xu 
20311aa4c324SDavid Xu 	if (cancelled) {
20321aa4c324SDavid Xu 		td->td_retval[0] = AIO_CANCELED;
20331aa4c324SDavid Xu 		return (0);
20341aa4c324SDavid Xu 	}
20351aa4c324SDavid Xu 
2036b40ce416SJulian Elischer 	td->td_retval[0] = AIO_ALLDONE;
2037dd85920aSJason Evans 
2038ac41f2efSAlfred Perlstein 	return (0);
2039ee877a35SJohn Dyson }
2040ee877a35SJohn Dyson 
2041ee877a35SJohn Dyson /*
2042873fbcd7SRobert Watson  * aio_error is implemented in the kernel level for compatibility purposes
2043873fbcd7SRobert Watson  * only.  For a user mode async implementation, it would be best to do it in
2044873fbcd7SRobert Watson  * a userland subroutine.
2045ee877a35SJohn Dyson  */
20463858a1f4SJohn Baldwin static int
20475652770dSJohn Baldwin kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
2048fd3bf775SJohn Dyson {
2049b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
20505652770dSJohn Baldwin 	struct kaiocb *job;
20512244ea07SJohn Dyson 	struct kaioinfo *ki;
20521ce91824SDavid Xu 	int status;
2053ee877a35SJohn Dyson 
20542244ea07SJohn Dyson 	ki = p->p_aioinfo;
20551ce91824SDavid Xu 	if (ki == NULL) {
20561ce91824SDavid Xu 		td->td_retval[0] = EINVAL;
20571ce91824SDavid Xu 		return (0);
20581ce91824SDavid Xu 	}
2059ee877a35SJohn Dyson 
2060759ccccaSDavid Xu 	AIO_LOCK(ki);
20615652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_all, allist) {
20625652770dSJohn Baldwin 		if (job->ujob == ujob) {
2063f3215338SJohn Baldwin 			if (job->jobflags & KAIOCB_FINISHED)
20641ce91824SDavid Xu 				td->td_retval[0] =
20655652770dSJohn Baldwin 					job->uaiocb._aiocb_private.error;
20661ce91824SDavid Xu 			else
2067b40ce416SJulian Elischer 				td->td_retval[0] = EINPROGRESS;
2068759ccccaSDavid Xu 			AIO_UNLOCK(ki);
2069ac41f2efSAlfred Perlstein 			return (0);
20702244ea07SJohn Dyson 		}
20712244ea07SJohn Dyson 	}
2072759ccccaSDavid Xu 	AIO_UNLOCK(ki);
207384af4da6SJohn Dyson 
20742244ea07SJohn Dyson 	/*
2075a9bf5e37SDavid Xu 	 * Hack for failure of aio_aqueue.
20762244ea07SJohn Dyson 	 */
20775652770dSJohn Baldwin 	status = ops->fetch_status(ujob);
20781ce91824SDavid Xu 	if (status == -1) {
20795652770dSJohn Baldwin 		td->td_retval[0] = ops->fetch_error(ujob);
20801ce91824SDavid Xu 		return (0);
20811ce91824SDavid Xu 	}
20821ce91824SDavid Xu 
20831ce91824SDavid Xu 	td->td_retval[0] = EINVAL;
20841ce91824SDavid Xu 	return (0);
2085ee877a35SJohn Dyson }
2086ee877a35SJohn Dyson 
20873858a1f4SJohn Baldwin int
20888451d0ddSKip Macy sys_aio_error(struct thread *td, struct aio_error_args *uap)
20893858a1f4SJohn Baldwin {
20903858a1f4SJohn Baldwin 
20913858a1f4SJohn Baldwin 	return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
20923858a1f4SJohn Baldwin }
20933858a1f4SJohn Baldwin 
2094eb8e6d52SEivind Eklund /* syscall - asynchronous read from a file (REALTIME) */
2095399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
2096ee877a35SJohn Dyson int
2097399e8c17SJohn Baldwin freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap)
20980972628aSDavid Xu {
20990972628aSDavid Xu 
21003858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
21013858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
21020972628aSDavid Xu }
2103399e8c17SJohn Baldwin #endif
21040972628aSDavid Xu 
21050972628aSDavid Xu int
21068451d0ddSKip Macy sys_aio_read(struct thread *td, struct aio_read_args *uap)
2107fd3bf775SJohn Dyson {
210821d56e9cSAlfred Perlstein 
21093858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2110ee877a35SJohn Dyson }
2111ee877a35SJohn Dyson 
2112eb8e6d52SEivind Eklund /* syscall - asynchronous write to a file (REALTIME) */
2113399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
2114ee877a35SJohn Dyson int
2115399e8c17SJohn Baldwin freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap)
21160972628aSDavid Xu {
21170972628aSDavid Xu 
21183858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
21193858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
21200972628aSDavid Xu }
2121399e8c17SJohn Baldwin #endif
21220972628aSDavid Xu 
21230972628aSDavid Xu int
21248451d0ddSKip Macy sys_aio_write(struct thread *td, struct aio_write_args *uap)
2125fd3bf775SJohn Dyson {
212621d56e9cSAlfred Perlstein 
21273858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
21280972628aSDavid Xu }
21290972628aSDavid Xu 
21306160e12cSGleb Smirnoff int
21316160e12cSGleb Smirnoff sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
21326160e12cSGleb Smirnoff {
21336160e12cSGleb Smirnoff 
21346160e12cSGleb Smirnoff 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
21356160e12cSGleb Smirnoff }
21366160e12cSGleb Smirnoff 
21370972628aSDavid Xu static int
21383858a1f4SJohn Baldwin kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
21393858a1f4SJohn Baldwin     struct aiocb **acb_list, int nent, struct sigevent *sig,
21403858a1f4SJohn Baldwin     struct aiocb_ops *ops)
21410972628aSDavid Xu {
2142b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
21435652770dSJohn Baldwin 	struct aiocb *job;
21442244ea07SJohn Dyson 	struct kaioinfo *ki;
21451ce91824SDavid Xu 	struct aioliojob *lj;
214669cd28daSDoug Ambrisko 	struct kevent kev;
21471ce91824SDavid Xu 	int error;
2148fd3bf775SJohn Dyson 	int nerror;
2149ee877a35SJohn Dyson 	int i;
2150ee877a35SJohn Dyson 
21513858a1f4SJohn Baldwin 	if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2152ac41f2efSAlfred Perlstein 		return (EINVAL);
21532244ea07SJohn Dyson 
2154ae3b195fSTim J. Robbins 	if (nent < 0 || nent > AIO_LISTIO_MAX)
2155ac41f2efSAlfred Perlstein 		return (EINVAL);
21562244ea07SJohn Dyson 
2157bfbbc4aaSJason Evans 	if (p->p_aioinfo == NULL)
21582244ea07SJohn Dyson 		aio_init_aioinfo(p);
21592244ea07SJohn Dyson 
21602244ea07SJohn Dyson 	ki = p->p_aioinfo;
21612244ea07SJohn Dyson 
2162a163d034SWarner Losh 	lj = uma_zalloc(aiolio_zone, M_WAITOK);
216384af4da6SJohn Dyson 	lj->lioj_flags = 0;
21641ce91824SDavid Xu 	lj->lioj_count = 0;
21651ce91824SDavid Xu 	lj->lioj_finished_count = 0;
2166d8b0556cSKonstantin Belousov 	knlist_init_mtx(&lj->klist, AIO_MTX(ki));
21674c0fb2cfSDavid Xu 	ksiginfo_init(&lj->lioj_ksi);
216869cd28daSDoug Ambrisko 
216984af4da6SJohn Dyson 	/*
2170bfbbc4aaSJason Evans 	 * Setup signal.
217184af4da6SJohn Dyson 	 */
21723858a1f4SJohn Baldwin 	if (sig && (mode == LIO_NOWAIT)) {
21733858a1f4SJohn Baldwin 		bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
217469cd28daSDoug Ambrisko 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
217569cd28daSDoug Ambrisko 			/* Assume only new style KEVENT */
217669cd28daSDoug Ambrisko 			kev.filter = EVFILT_LIO;
217769cd28daSDoug Ambrisko 			kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
21783858a1f4SJohn Baldwin 			kev.ident = (uintptr_t)uacb_list; /* something unique */
217969cd28daSDoug Ambrisko 			kev.data = (intptr_t)lj;
21801ce91824SDavid Xu 			/* pass user defined sigval data */
21811ce91824SDavid Xu 			kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
21824db71d27SJohn-Mark Gurney 			error = kqfd_register(
21834db71d27SJohn-Mark Gurney 			    lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1);
218469cd28daSDoug Ambrisko 			if (error) {
218569cd28daSDoug Ambrisko 				uma_zfree(aiolio_zone, lj);
218669cd28daSDoug Ambrisko 				return (error);
218769cd28daSDoug Ambrisko 			}
21881ce91824SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
21891ce91824SDavid Xu 			;
219068d71118SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
219168d71118SDavid Xu 			   lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
219268d71118SDavid Xu 				if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
219369cd28daSDoug Ambrisko 					uma_zfree(aiolio_zone, lj);
219469cd28daSDoug Ambrisko 					return EINVAL;
219568d71118SDavid Xu 				}
219684af4da6SJohn Dyson 				lj->lioj_flags |= LIOJ_SIGNAL;
219768d71118SDavid Xu 		} else {
219868d71118SDavid Xu 			uma_zfree(aiolio_zone, lj);
219968d71118SDavid Xu 			return EINVAL;
22004d752b01SAlan Cox 		}
22011ce91824SDavid Xu 	}
220269cd28daSDoug Ambrisko 
2203759ccccaSDavid Xu 	AIO_LOCK(ki);
22042f3cf918SAlfred Perlstein 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
22052244ea07SJohn Dyson 	/*
22061ce91824SDavid Xu 	 * Add extra aiocb count to avoid the lio to be freed
22071ce91824SDavid Xu 	 * by other threads doing aio_waitcomplete or aio_return,
22081ce91824SDavid Xu 	 * and prevent event from being sent until we have queued
22091ce91824SDavid Xu 	 * all tasks.
22101ce91824SDavid Xu 	 */
22111ce91824SDavid Xu 	lj->lioj_count = 1;
2212759ccccaSDavid Xu 	AIO_UNLOCK(ki);
22131ce91824SDavid Xu 
22141ce91824SDavid Xu 	/*
2215bfbbc4aaSJason Evans 	 * Get pointers to the list of I/O requests.
22162244ea07SJohn Dyson 	 */
2217fd3bf775SJohn Dyson 	nerror = 0;
22183858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++) {
22195652770dSJohn Baldwin 		job = acb_list[i];
22205652770dSJohn Baldwin 		if (job != NULL) {
22215652770dSJohn Baldwin 			error = aio_aqueue(td, job, lj, LIO_NOP, ops);
22221ce91824SDavid Xu 			if (error != 0)
2223fd3bf775SJohn Dyson 				nerror++;
2224fd3bf775SJohn Dyson 		}
2225fd3bf775SJohn Dyson 	}
22262244ea07SJohn Dyson 
22271ce91824SDavid Xu 	error = 0;
2228759ccccaSDavid Xu 	AIO_LOCK(ki);
22293858a1f4SJohn Baldwin 	if (mode == LIO_WAIT) {
22301ce91824SDavid Xu 		while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2231fd3bf775SJohn Dyson 			ki->kaio_flags |= KAIO_WAKEUP;
2232759ccccaSDavid Xu 			error = msleep(&p->p_aioinfo, AIO_MTX(ki),
22331ce91824SDavid Xu 			    PRIBIO | PCATCH, "aiospn", 0);
22341ce91824SDavid Xu 			if (error == ERESTART)
22351ce91824SDavid Xu 				error = EINTR;
22361ce91824SDavid Xu 			if (error)
22371ce91824SDavid Xu 				break;
22381ce91824SDavid Xu 		}
22391ce91824SDavid Xu 	} else {
22401ce91824SDavid Xu 		if (lj->lioj_count - 1 == lj->lioj_finished_count) {
22411ce91824SDavid Xu 			if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
22421ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_KEVENT_POSTED;
22431ce91824SDavid Xu 				KNOTE_LOCKED(&lj->klist, 1);
22441ce91824SDavid Xu 			}
22451ce91824SDavid Xu 			if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
22461ce91824SDavid Xu 			    == LIOJ_SIGNAL
22471ce91824SDavid Xu 			    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
22481ce91824SDavid Xu 			    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
22491ce91824SDavid Xu 				aio_sendsig(p, &lj->lioj_signal,
22501ce91824SDavid Xu 					    &lj->lioj_ksi);
22511ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
22522244ea07SJohn Dyson 			}
22532244ea07SJohn Dyson 		}
22541ce91824SDavid Xu 	}
22551ce91824SDavid Xu 	lj->lioj_count--;
22561ce91824SDavid Xu 	if (lj->lioj_count == 0) {
22571ce91824SDavid Xu 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
22581ce91824SDavid Xu 		knlist_delete(&lj->klist, curthread, 1);
2259759ccccaSDavid Xu 		PROC_LOCK(p);
22601ce91824SDavid Xu 		sigqueue_take(&lj->lioj_ksi);
22611ce91824SDavid Xu 		PROC_UNLOCK(p);
2262759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22631ce91824SDavid Xu 		uma_zfree(aiolio_zone, lj);
22641ce91824SDavid Xu 	} else
2265759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22662244ea07SJohn Dyson 
22671ce91824SDavid Xu 	if (nerror)
22681ce91824SDavid Xu 		return (EIO);
22691ce91824SDavid Xu 	return (error);
2270ee877a35SJohn Dyson }
2271fd3bf775SJohn Dyson 
22723858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
2273399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
22743858a1f4SJohn Baldwin int
2275399e8c17SJohn Baldwin freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap)
22763858a1f4SJohn Baldwin {
22773858a1f4SJohn Baldwin 	struct aiocb **acb_list;
22783858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
22793858a1f4SJohn Baldwin 	struct osigevent osig;
22803858a1f4SJohn Baldwin 	int error, nent;
22813858a1f4SJohn Baldwin 
22823858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
22833858a1f4SJohn Baldwin 		return (EINVAL);
22843858a1f4SJohn Baldwin 
22853858a1f4SJohn Baldwin 	nent = uap->nent;
22863858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
22873858a1f4SJohn Baldwin 		return (EINVAL);
22883858a1f4SJohn Baldwin 
22893858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
22903858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
22913858a1f4SJohn Baldwin 		if (error)
22923858a1f4SJohn Baldwin 			return (error);
22933858a1f4SJohn Baldwin 		error = convert_old_sigevent(&osig, &sig);
22943858a1f4SJohn Baldwin 		if (error)
22953858a1f4SJohn Baldwin 			return (error);
22963858a1f4SJohn Baldwin 		sigp = &sig;
22973858a1f4SJohn Baldwin 	} else
22983858a1f4SJohn Baldwin 		sigp = NULL;
22993858a1f4SJohn Baldwin 
23003858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
23013858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
23023858a1f4SJohn Baldwin 	if (error == 0)
23033858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode,
23043858a1f4SJohn Baldwin 		    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
23053858a1f4SJohn Baldwin 		    &aiocb_ops_osigevent);
23063858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
23073858a1f4SJohn Baldwin 	return (error);
23083858a1f4SJohn Baldwin }
2309399e8c17SJohn Baldwin #endif
23103858a1f4SJohn Baldwin 
23113858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
23123858a1f4SJohn Baldwin int
23138451d0ddSKip Macy sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
23143858a1f4SJohn Baldwin {
23153858a1f4SJohn Baldwin 	struct aiocb **acb_list;
23163858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
23173858a1f4SJohn Baldwin 	int error, nent;
23183858a1f4SJohn Baldwin 
23193858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
23203858a1f4SJohn Baldwin 		return (EINVAL);
23213858a1f4SJohn Baldwin 
23223858a1f4SJohn Baldwin 	nent = uap->nent;
23233858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
23243858a1f4SJohn Baldwin 		return (EINVAL);
23253858a1f4SJohn Baldwin 
23263858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
23273858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig, sizeof(sig));
23283858a1f4SJohn Baldwin 		if (error)
23293858a1f4SJohn Baldwin 			return (error);
23303858a1f4SJohn Baldwin 		sigp = &sig;
23313858a1f4SJohn Baldwin 	} else
23323858a1f4SJohn Baldwin 		sigp = NULL;
23333858a1f4SJohn Baldwin 
23343858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
23353858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
23363858a1f4SJohn Baldwin 	if (error == 0)
23373858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
23383858a1f4SJohn Baldwin 		    nent, sigp, &aiocb_ops);
23393858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
23403858a1f4SJohn Baldwin 	return (error);
23413858a1f4SJohn Baldwin }
23423858a1f4SJohn Baldwin 
2343fd3bf775SJohn Dyson static void
2344f743d981SAlexander Motin aio_physwakeup(struct bio *bp)
2345fd3bf775SJohn Dyson {
23465652770dSJohn Baldwin 	struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
23471ce91824SDavid Xu 	struct proc *userp;
234827b8220dSDavid Xu 	struct kaioinfo *ki;
2349f3215338SJohn Baldwin 	size_t nbytes;
2350f3215338SJohn Baldwin 	int error, nblks;
23511ce91824SDavid Xu 
2352f743d981SAlexander Motin 	/* Release mapping into kernel space. */
2353f3215338SJohn Baldwin 	userp = job->userproc;
2354f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
23555652770dSJohn Baldwin 	if (job->pbuf) {
23565652770dSJohn Baldwin 		pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages);
23575652770dSJohn Baldwin 		relpbuf(job->pbuf, NULL);
23585652770dSJohn Baldwin 		job->pbuf = NULL;
2359f743d981SAlexander Motin 		atomic_subtract_int(&num_buf_aio, 1);
2360f3215338SJohn Baldwin 		AIO_LOCK(ki);
2361f3215338SJohn Baldwin 		ki->kaio_buffer_count--;
2362f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
2363f743d981SAlexander Motin 	}
23645652770dSJohn Baldwin 	vm_page_unhold_pages(job->pages, job->npages);
2365f743d981SAlexander Motin 
23665652770dSJohn Baldwin 	bp = job->bp;
23675652770dSJohn Baldwin 	job->bp = NULL;
2368f3215338SJohn Baldwin 	nbytes = job->uaiocb.aio_nbytes - bp->bio_resid;
2369f3215338SJohn Baldwin 	error = 0;
2370f743d981SAlexander Motin 	if (bp->bio_flags & BIO_ERROR)
2371f3215338SJohn Baldwin 		error = bp->bio_error;
2372f3215338SJohn Baldwin 	nblks = btodb(nbytes);
23735652770dSJohn Baldwin 	if (job->uaiocb.aio_lio_opcode == LIO_WRITE)
2374b1012d80SJohn Baldwin 		job->outblock += nblks;
23751ce91824SDavid Xu 	else
2376b1012d80SJohn Baldwin 		job->inblock += nblks;
2377f3215338SJohn Baldwin 
2378f0ec1740SJohn Baldwin 	if (error)
2379f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
2380f0ec1740SJohn Baldwin 	else
2381f0ec1740SJohn Baldwin 		aio_complete(job, nbytes, 0);
23821ce91824SDavid Xu 
2383f743d981SAlexander Motin 	g_destroy_bio(bp);
238484af4da6SJohn Dyson }
2385bfbbc4aaSJason Evans 
2386eb8e6d52SEivind Eklund /* syscall - wait for the next completion of an aio request */
23873858a1f4SJohn Baldwin static int
23885652770dSJohn Baldwin kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
23893858a1f4SJohn Baldwin     struct timespec *ts, struct aiocb_ops *ops)
2390bfbbc4aaSJason Evans {
2391b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
2392bfbbc4aaSJason Evans 	struct timeval atv;
2393bfbbc4aaSJason Evans 	struct kaioinfo *ki;
23945652770dSJohn Baldwin 	struct kaiocb *job;
23955652770dSJohn Baldwin 	struct aiocb *ujob;
2396bb430bc7SJohn Baldwin 	long error, status;
2397bb430bc7SJohn Baldwin 	int timo;
2398bfbbc4aaSJason Evans 
23995652770dSJohn Baldwin 	ops->store_aiocb(ujobp, NULL);
2400dd85920aSJason Evans 
240138d68e2dSPawel Jakub Dawidek 	if (ts == NULL) {
2402bfbbc4aaSJason Evans 		timo = 0;
240338d68e2dSPawel Jakub Dawidek 	} else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
240438d68e2dSPawel Jakub Dawidek 		timo = -1;
240538d68e2dSPawel Jakub Dawidek 	} else {
24063858a1f4SJohn Baldwin 		if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2407bfbbc4aaSJason Evans 			return (EINVAL);
2408bfbbc4aaSJason Evans 
24093858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
2410bfbbc4aaSJason Evans 		if (itimerfix(&atv))
2411bfbbc4aaSJason Evans 			return (EINVAL);
2412bfbbc4aaSJason Evans 		timo = tvtohz(&atv);
2413bfbbc4aaSJason Evans 	}
2414bfbbc4aaSJason Evans 
24158213baf0SChristian S.J. Peron 	if (p->p_aioinfo == NULL)
2416323fe565SDavid Xu 		aio_init_aioinfo(p);
24178213baf0SChristian S.J. Peron 	ki = p->p_aioinfo;
2418bfbbc4aaSJason Evans 
24191ce91824SDavid Xu 	error = 0;
24205652770dSJohn Baldwin 	job = NULL;
2421759ccccaSDavid Xu 	AIO_LOCK(ki);
24225652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
242338d68e2dSPawel Jakub Dawidek 		if (timo == -1) {
242438d68e2dSPawel Jakub Dawidek 			error = EWOULDBLOCK;
242538d68e2dSPawel Jakub Dawidek 			break;
242638d68e2dSPawel Jakub Dawidek 		}
24271ce91824SDavid Xu 		ki->kaio_flags |= KAIO_WAKEUP;
2428759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
24291ce91824SDavid Xu 		    "aiowc", timo);
243027b8220dSDavid Xu 		if (timo && error == ERESTART)
24311ce91824SDavid Xu 			error = EINTR;
24321ce91824SDavid Xu 		if (error)
24331ce91824SDavid Xu 			break;
24341ce91824SDavid Xu 	}
24351ce91824SDavid Xu 
24365652770dSJohn Baldwin 	if (job != NULL) {
2437f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
24385652770dSJohn Baldwin 		ujob = job->ujob;
24395652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
24405652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
24411ce91824SDavid Xu 		td->td_retval[0] = status;
2442b1012d80SJohn Baldwin 		td->td_ru.ru_oublock += job->outblock;
2443b1012d80SJohn Baldwin 		td->td_ru.ru_inblock += job->inblock;
2444b1012d80SJohn Baldwin 		td->td_ru.ru_msgsnd += job->msgsnd;
2445b1012d80SJohn Baldwin 		td->td_ru.ru_msgrcv += job->msgrcv;
24465652770dSJohn Baldwin 		aio_free_entry(job);
2447759ccccaSDavid Xu 		AIO_UNLOCK(ki);
24485652770dSJohn Baldwin 		ops->store_aiocb(ujobp, ujob);
24495652770dSJohn Baldwin 		ops->store_error(ujob, error);
24505652770dSJohn Baldwin 		ops->store_status(ujob, status);
24511ce91824SDavid Xu 	} else
2452759ccccaSDavid Xu 		AIO_UNLOCK(ki);
2453bfbbc4aaSJason Evans 
2454ac41f2efSAlfred Perlstein 	return (error);
2455bfbbc4aaSJason Evans }
2456cb679c38SJonathan Lemon 
245799eee864SDavid Xu int
24588451d0ddSKip Macy sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
24593858a1f4SJohn Baldwin {
24603858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
24613858a1f4SJohn Baldwin 	int error;
24623858a1f4SJohn Baldwin 
24633858a1f4SJohn Baldwin 	if (uap->timeout) {
24643858a1f4SJohn Baldwin 		/* Get timespec struct. */
24653858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts, sizeof(ts));
24663858a1f4SJohn Baldwin 		if (error)
24673858a1f4SJohn Baldwin 			return (error);
24683858a1f4SJohn Baldwin 		tsp = &ts;
24693858a1f4SJohn Baldwin 	} else
24703858a1f4SJohn Baldwin 		tsp = NULL;
24713858a1f4SJohn Baldwin 
24723858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
24733858a1f4SJohn Baldwin }
24743858a1f4SJohn Baldwin 
24753858a1f4SJohn Baldwin static int
24765652770dSJohn Baldwin kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
24773858a1f4SJohn Baldwin     struct aiocb_ops *ops)
247899eee864SDavid Xu {
247999eee864SDavid Xu 	struct proc *p = td->td_proc;
248099eee864SDavid Xu 	struct kaioinfo *ki;
248199eee864SDavid Xu 
24823858a1f4SJohn Baldwin 	if (op != O_SYNC) /* XXX lack of O_DSYNC */
248399eee864SDavid Xu 		return (EINVAL);
248499eee864SDavid Xu 	ki = p->p_aioinfo;
248599eee864SDavid Xu 	if (ki == NULL)
248699eee864SDavid Xu 		aio_init_aioinfo(p);
24875652770dSJohn Baldwin 	return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops));
24883858a1f4SJohn Baldwin }
24893858a1f4SJohn Baldwin 
24903858a1f4SJohn Baldwin int
24918451d0ddSKip Macy sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
24923858a1f4SJohn Baldwin {
24933858a1f4SJohn Baldwin 
24943858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
249599eee864SDavid Xu }
249699eee864SDavid Xu 
2497eb8e6d52SEivind Eklund /* kqueue attach function */
2498cb679c38SJonathan Lemon static int
2499cb679c38SJonathan Lemon filt_aioattach(struct knote *kn)
2500cb679c38SJonathan Lemon {
25015652770dSJohn Baldwin 	struct kaiocb *job = (struct kaiocb *)kn->kn_sdata;
2502cb679c38SJonathan Lemon 
2503cb679c38SJonathan Lemon 	/*
25045652770dSJohn Baldwin 	 * The job pointer must be validated before using it, so
2505cb679c38SJonathan Lemon 	 * registration is restricted to the kernel; the user cannot
2506cb679c38SJonathan Lemon 	 * set EV_FLAG1.
2507cb679c38SJonathan Lemon 	 */
2508cb679c38SJonathan Lemon 	if ((kn->kn_flags & EV_FLAG1) == 0)
2509cb679c38SJonathan Lemon 		return (EPERM);
25105652770dSJohn Baldwin 	kn->kn_ptr.p_aio = job;
2511cb679c38SJonathan Lemon 	kn->kn_flags &= ~EV_FLAG1;
2512cb679c38SJonathan Lemon 
25135652770dSJohn Baldwin 	knlist_add(&job->klist, kn, 0);
2514cb679c38SJonathan Lemon 
2515cb679c38SJonathan Lemon 	return (0);
2516cb679c38SJonathan Lemon }
2517cb679c38SJonathan Lemon 
2518eb8e6d52SEivind Eklund /* kqueue detach function */
2519cb679c38SJonathan Lemon static void
2520cb679c38SJonathan Lemon filt_aiodetach(struct knote *kn)
2521cb679c38SJonathan Lemon {
25228e9fc278SDoug Ambrisko 	struct knlist *knl;
2523cb679c38SJonathan Lemon 
25248e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_aio->klist;
25258e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25268e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25278e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25288e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
2529cb679c38SJonathan Lemon }
2530cb679c38SJonathan Lemon 
2531eb8e6d52SEivind Eklund /* kqueue filter function */
2532cb679c38SJonathan Lemon /*ARGSUSED*/
2533cb679c38SJonathan Lemon static int
2534cb679c38SJonathan Lemon filt_aio(struct knote *kn, long hint)
2535cb679c38SJonathan Lemon {
25365652770dSJohn Baldwin 	struct kaiocb *job = kn->kn_ptr.p_aio;
2537cb679c38SJonathan Lemon 
25385652770dSJohn Baldwin 	kn->kn_data = job->uaiocb._aiocb_private.error;
2539f3215338SJohn Baldwin 	if (!(job->jobflags & KAIOCB_FINISHED))
2540cb679c38SJonathan Lemon 		return (0);
2541cb679c38SJonathan Lemon 	kn->kn_flags |= EV_EOF;
2542cb679c38SJonathan Lemon 	return (1);
2543cb679c38SJonathan Lemon }
254469cd28daSDoug Ambrisko 
254569cd28daSDoug Ambrisko /* kqueue attach function */
254669cd28daSDoug Ambrisko static int
254769cd28daSDoug Ambrisko filt_lioattach(struct knote *kn)
254869cd28daSDoug Ambrisko {
25491ce91824SDavid Xu 	struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
255069cd28daSDoug Ambrisko 
255169cd28daSDoug Ambrisko 	/*
25521ce91824SDavid Xu 	 * The aioliojob pointer must be validated before using it, so
255369cd28daSDoug Ambrisko 	 * registration is restricted to the kernel; the user cannot
255469cd28daSDoug Ambrisko 	 * set EV_FLAG1.
255569cd28daSDoug Ambrisko 	 */
255669cd28daSDoug Ambrisko 	if ((kn->kn_flags & EV_FLAG1) == 0)
255769cd28daSDoug Ambrisko 		return (EPERM);
2558a8afa221SJean-Sébastien Pédron 	kn->kn_ptr.p_lio = lj;
255969cd28daSDoug Ambrisko 	kn->kn_flags &= ~EV_FLAG1;
256069cd28daSDoug Ambrisko 
256169cd28daSDoug Ambrisko 	knlist_add(&lj->klist, kn, 0);
256269cd28daSDoug Ambrisko 
256369cd28daSDoug Ambrisko 	return (0);
256469cd28daSDoug Ambrisko }
256569cd28daSDoug Ambrisko 
256669cd28daSDoug Ambrisko /* kqueue detach function */
256769cd28daSDoug Ambrisko static void
256869cd28daSDoug Ambrisko filt_liodetach(struct knote *kn)
256969cd28daSDoug Ambrisko {
25708e9fc278SDoug Ambrisko 	struct knlist *knl;
257169cd28daSDoug Ambrisko 
25728e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_lio->klist;
25738e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25748e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25758e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25768e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
257769cd28daSDoug Ambrisko }
257869cd28daSDoug Ambrisko 
257969cd28daSDoug Ambrisko /* kqueue filter function */
258069cd28daSDoug Ambrisko /*ARGSUSED*/
258169cd28daSDoug Ambrisko static int
258269cd28daSDoug Ambrisko filt_lio(struct knote *kn, long hint)
258369cd28daSDoug Ambrisko {
2584a8afa221SJean-Sébastien Pédron 	struct aioliojob * lj = kn->kn_ptr.p_lio;
25851ce91824SDavid Xu 
258669cd28daSDoug Ambrisko 	return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
258769cd28daSDoug Ambrisko }
25883858a1f4SJohn Baldwin 
2589841c0c7eSNathan Whitehorn #ifdef COMPAT_FREEBSD32
2590399e8c17SJohn Baldwin #include <sys/mount.h>
2591399e8c17SJohn Baldwin #include <sys/socket.h>
2592399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32.h>
2593399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_proto.h>
2594399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_signal.h>
2595399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_syscall.h>
2596399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_util.h>
25973858a1f4SJohn Baldwin 
25983858a1f4SJohn Baldwin struct __aiocb_private32 {
25993858a1f4SJohn Baldwin 	int32_t	status;
26003858a1f4SJohn Baldwin 	int32_t	error;
26013858a1f4SJohn Baldwin 	uint32_t kernelinfo;
26023858a1f4SJohn Baldwin };
26033858a1f4SJohn Baldwin 
2604399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
26053858a1f4SJohn Baldwin typedef struct oaiocb32 {
26063858a1f4SJohn Baldwin 	int	aio_fildes;		/* File descriptor */
26073858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
26083858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
26093858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
26103858a1f4SJohn Baldwin 	struct	osigevent32 aio_sigevent; /* Signal to deliver */
26113858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
26123858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
26133858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
26143858a1f4SJohn Baldwin } oaiocb32_t;
2615399e8c17SJohn Baldwin #endif
26163858a1f4SJohn Baldwin 
26173858a1f4SJohn Baldwin typedef struct aiocb32 {
26183858a1f4SJohn Baldwin 	int32_t	aio_fildes;		/* File descriptor */
26193858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
26203858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
26213858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
26223858a1f4SJohn Baldwin 	int	__spare__[2];
26233858a1f4SJohn Baldwin 	uint32_t __spare2__;
26243858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
26253858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
26263858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
26273858a1f4SJohn Baldwin 	struct	sigevent32 aio_sigevent;	/* Signal to deliver */
26283858a1f4SJohn Baldwin } aiocb32_t;
26293858a1f4SJohn Baldwin 
2630399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
26313858a1f4SJohn Baldwin static int
26323858a1f4SJohn Baldwin convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
26333858a1f4SJohn Baldwin {
26343858a1f4SJohn Baldwin 
26353858a1f4SJohn Baldwin 	/*
26363858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
26373858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
26383858a1f4SJohn Baldwin 	 */
26393858a1f4SJohn Baldwin 	CP(*osig, *nsig, sigev_notify);
26403858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
26413858a1f4SJohn Baldwin 	case SIGEV_NONE:
26423858a1f4SJohn Baldwin 		break;
26433858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
26443858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
26453858a1f4SJohn Baldwin 		break;
26463858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
26473858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
26483858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
26493858a1f4SJohn Baldwin 		PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
26503858a1f4SJohn Baldwin 		break;
26513858a1f4SJohn Baldwin 	default:
26523858a1f4SJohn Baldwin 		return (EINVAL);
26533858a1f4SJohn Baldwin 	}
26543858a1f4SJohn Baldwin 	return (0);
26553858a1f4SJohn Baldwin }
26563858a1f4SJohn Baldwin 
26573858a1f4SJohn Baldwin static int
26583858a1f4SJohn Baldwin aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
26593858a1f4SJohn Baldwin {
26603858a1f4SJohn Baldwin 	struct oaiocb32 job32;
26613858a1f4SJohn Baldwin 	int error;
26623858a1f4SJohn Baldwin 
26633858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
26643858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26653858a1f4SJohn Baldwin 	if (error)
26663858a1f4SJohn Baldwin 		return (error);
26673858a1f4SJohn Baldwin 
26683858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26693858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26703858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26713858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26723858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26733858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26743858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26753858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26763858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
26773858a1f4SJohn Baldwin 	return (convert_old_sigevent32(&job32.aio_sigevent,
26783858a1f4SJohn Baldwin 	    &kjob->aio_sigevent));
26793858a1f4SJohn Baldwin }
2680399e8c17SJohn Baldwin #endif
26813858a1f4SJohn Baldwin 
26823858a1f4SJohn Baldwin static int
26833858a1f4SJohn Baldwin aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob)
26843858a1f4SJohn Baldwin {
26853858a1f4SJohn Baldwin 	struct aiocb32 job32;
26863858a1f4SJohn Baldwin 	int error;
26873858a1f4SJohn Baldwin 
26883858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26893858a1f4SJohn Baldwin 	if (error)
26903858a1f4SJohn Baldwin 		return (error);
26913858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26923858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26933858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26943858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26953858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26963858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26973858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26983858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26993858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
27003858a1f4SJohn Baldwin 	return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent));
27013858a1f4SJohn Baldwin }
27023858a1f4SJohn Baldwin 
27033858a1f4SJohn Baldwin static long
27043858a1f4SJohn Baldwin aiocb32_fetch_status(struct aiocb *ujob)
27053858a1f4SJohn Baldwin {
27063858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27073858a1f4SJohn Baldwin 
27083858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27093858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.status));
27103858a1f4SJohn Baldwin }
27113858a1f4SJohn Baldwin 
27123858a1f4SJohn Baldwin static long
27133858a1f4SJohn Baldwin aiocb32_fetch_error(struct aiocb *ujob)
27143858a1f4SJohn Baldwin {
27153858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27163858a1f4SJohn Baldwin 
27173858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27183858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.error));
27193858a1f4SJohn Baldwin }
27203858a1f4SJohn Baldwin 
27213858a1f4SJohn Baldwin static int
27223858a1f4SJohn Baldwin aiocb32_store_status(struct aiocb *ujob, long status)
27233858a1f4SJohn Baldwin {
27243858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27253858a1f4SJohn Baldwin 
27263858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27273858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.status, status));
27283858a1f4SJohn Baldwin }
27293858a1f4SJohn Baldwin 
27303858a1f4SJohn Baldwin static int
27313858a1f4SJohn Baldwin aiocb32_store_error(struct aiocb *ujob, long error)
27323858a1f4SJohn Baldwin {
27333858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27343858a1f4SJohn Baldwin 
27353858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27363858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.error, error));
27373858a1f4SJohn Baldwin }
27383858a1f4SJohn Baldwin 
27393858a1f4SJohn Baldwin static int
27403858a1f4SJohn Baldwin aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
27413858a1f4SJohn Baldwin {
27423858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27433858a1f4SJohn Baldwin 
27443858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27453858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
27463858a1f4SJohn Baldwin }
27473858a1f4SJohn Baldwin 
27483858a1f4SJohn Baldwin static int
27493858a1f4SJohn Baldwin aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
27503858a1f4SJohn Baldwin {
27513858a1f4SJohn Baldwin 
27523858a1f4SJohn Baldwin 	return (suword32(ujobp, (long)ujob));
27533858a1f4SJohn Baldwin }
27543858a1f4SJohn Baldwin 
27553858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops = {
27563858a1f4SJohn Baldwin 	.copyin = aiocb32_copyin,
27573858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27583858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27593858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27603858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27613858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27623858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27633858a1f4SJohn Baldwin };
27643858a1f4SJohn Baldwin 
2765399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
27663858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops_osigevent = {
27673858a1f4SJohn Baldwin 	.copyin = aiocb32_copyin_old_sigevent,
27683858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27693858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27703858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27713858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27723858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27733858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27743858a1f4SJohn Baldwin };
2775399e8c17SJohn Baldwin #endif
27763858a1f4SJohn Baldwin 
27773858a1f4SJohn Baldwin int
27783858a1f4SJohn Baldwin freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
27793858a1f4SJohn Baldwin {
27803858a1f4SJohn Baldwin 
27813858a1f4SJohn Baldwin 	return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
27823858a1f4SJohn Baldwin }
27833858a1f4SJohn Baldwin 
27843858a1f4SJohn Baldwin int
27853858a1f4SJohn Baldwin freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
27863858a1f4SJohn Baldwin {
27873858a1f4SJohn Baldwin 	struct timespec32 ts32;
27883858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
27893858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
27903858a1f4SJohn Baldwin 	uint32_t *ujoblist32;
27913858a1f4SJohn Baldwin 	int error, i;
27923858a1f4SJohn Baldwin 
27933858a1f4SJohn Baldwin 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
27943858a1f4SJohn Baldwin 		return (EINVAL);
27953858a1f4SJohn Baldwin 
27963858a1f4SJohn Baldwin 	if (uap->timeout) {
27973858a1f4SJohn Baldwin 		/* Get timespec struct. */
27983858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
27993858a1f4SJohn Baldwin 			return (error);
28003858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
28013858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
28023858a1f4SJohn Baldwin 		tsp = &ts;
28033858a1f4SJohn Baldwin 	} else
28043858a1f4SJohn Baldwin 		tsp = NULL;
28053858a1f4SJohn Baldwin 
28063858a1f4SJohn Baldwin 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
28073858a1f4SJohn Baldwin 	ujoblist32 = (uint32_t *)ujoblist;
28083858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist32, uap->nent *
28093858a1f4SJohn Baldwin 	    sizeof(ujoblist32[0]));
28103858a1f4SJohn Baldwin 	if (error == 0) {
28113858a1f4SJohn Baldwin 		for (i = uap->nent; i > 0; i--)
28123858a1f4SJohn Baldwin 			ujoblist[i] = PTRIN(ujoblist32[i]);
28133858a1f4SJohn Baldwin 
28143858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
28153858a1f4SJohn Baldwin 	}
28163858a1f4SJohn Baldwin 	uma_zfree(aiol_zone, ujoblist);
28173858a1f4SJohn Baldwin 	return (error);
28183858a1f4SJohn Baldwin }
28193858a1f4SJohn Baldwin 
28203858a1f4SJohn Baldwin int
28213858a1f4SJohn Baldwin freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
28223858a1f4SJohn Baldwin {
28233858a1f4SJohn Baldwin 
28243858a1f4SJohn Baldwin 	return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
28253858a1f4SJohn Baldwin }
28263858a1f4SJohn Baldwin 
2827399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
28283858a1f4SJohn Baldwin int
2829399e8c17SJohn Baldwin freebsd6_freebsd32_aio_read(struct thread *td,
2830399e8c17SJohn Baldwin     struct freebsd6_freebsd32_aio_read_args *uap)
28313858a1f4SJohn Baldwin {
28323858a1f4SJohn Baldwin 
28333858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
28343858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28353858a1f4SJohn Baldwin }
2836399e8c17SJohn Baldwin #endif
28373858a1f4SJohn Baldwin 
28383858a1f4SJohn Baldwin int
28393858a1f4SJohn Baldwin freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
28403858a1f4SJohn Baldwin {
28413858a1f4SJohn Baldwin 
28423858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
28433858a1f4SJohn Baldwin 	    &aiocb32_ops));
28443858a1f4SJohn Baldwin }
28453858a1f4SJohn Baldwin 
2846399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
28473858a1f4SJohn Baldwin int
2848399e8c17SJohn Baldwin freebsd6_freebsd32_aio_write(struct thread *td,
2849399e8c17SJohn Baldwin     struct freebsd6_freebsd32_aio_write_args *uap)
28503858a1f4SJohn Baldwin {
28513858a1f4SJohn Baldwin 
28523858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28533858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28543858a1f4SJohn Baldwin }
2855399e8c17SJohn Baldwin #endif
28563858a1f4SJohn Baldwin 
28573858a1f4SJohn Baldwin int
28583858a1f4SJohn Baldwin freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
28593858a1f4SJohn Baldwin {
28603858a1f4SJohn Baldwin 
28613858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28623858a1f4SJohn Baldwin 	    &aiocb32_ops));
28633858a1f4SJohn Baldwin }
28643858a1f4SJohn Baldwin 
28653858a1f4SJohn Baldwin int
28666160e12cSGleb Smirnoff freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
28676160e12cSGleb Smirnoff {
28686160e12cSGleb Smirnoff 
28696160e12cSGleb Smirnoff 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
28706160e12cSGleb Smirnoff 	    &aiocb32_ops));
28716160e12cSGleb Smirnoff }
28726160e12cSGleb Smirnoff 
28736160e12cSGleb Smirnoff int
28743858a1f4SJohn Baldwin freebsd32_aio_waitcomplete(struct thread *td,
28753858a1f4SJohn Baldwin     struct freebsd32_aio_waitcomplete_args *uap)
28763858a1f4SJohn Baldwin {
2877e588eeb1SJohn Baldwin 	struct timespec32 ts32;
28783858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
28793858a1f4SJohn Baldwin 	int error;
28803858a1f4SJohn Baldwin 
28813858a1f4SJohn Baldwin 	if (uap->timeout) {
28823858a1f4SJohn Baldwin 		/* Get timespec struct. */
28833858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts32, sizeof(ts32));
28843858a1f4SJohn Baldwin 		if (error)
28853858a1f4SJohn Baldwin 			return (error);
28863858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
28873858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
28883858a1f4SJohn Baldwin 		tsp = &ts;
28893858a1f4SJohn Baldwin 	} else
28903858a1f4SJohn Baldwin 		tsp = NULL;
28913858a1f4SJohn Baldwin 
28923858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
28933858a1f4SJohn Baldwin 	    &aiocb32_ops));
28943858a1f4SJohn Baldwin }
28953858a1f4SJohn Baldwin 
28963858a1f4SJohn Baldwin int
28973858a1f4SJohn Baldwin freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
28983858a1f4SJohn Baldwin {
28993858a1f4SJohn Baldwin 
29003858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
29013858a1f4SJohn Baldwin 	    &aiocb32_ops));
29023858a1f4SJohn Baldwin }
29033858a1f4SJohn Baldwin 
2904399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
29053858a1f4SJohn Baldwin int
2906399e8c17SJohn Baldwin freebsd6_freebsd32_lio_listio(struct thread *td,
2907399e8c17SJohn Baldwin     struct freebsd6_freebsd32_lio_listio_args *uap)
29083858a1f4SJohn Baldwin {
29093858a1f4SJohn Baldwin 	struct aiocb **acb_list;
29103858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
29113858a1f4SJohn Baldwin 	struct osigevent32 osig;
29123858a1f4SJohn Baldwin 	uint32_t *acb_list32;
29133858a1f4SJohn Baldwin 	int error, i, nent;
29143858a1f4SJohn Baldwin 
29153858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
29163858a1f4SJohn Baldwin 		return (EINVAL);
29173858a1f4SJohn Baldwin 
29183858a1f4SJohn Baldwin 	nent = uap->nent;
29193858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
29203858a1f4SJohn Baldwin 		return (EINVAL);
29213858a1f4SJohn Baldwin 
29223858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29233858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
29243858a1f4SJohn Baldwin 		if (error)
29253858a1f4SJohn Baldwin 			return (error);
29263858a1f4SJohn Baldwin 		error = convert_old_sigevent32(&osig, &sig);
29273858a1f4SJohn Baldwin 		if (error)
29283858a1f4SJohn Baldwin 			return (error);
29293858a1f4SJohn Baldwin 		sigp = &sig;
29303858a1f4SJohn Baldwin 	} else
29313858a1f4SJohn Baldwin 		sigp = NULL;
29323858a1f4SJohn Baldwin 
29333858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29343858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29353858a1f4SJohn Baldwin 	if (error) {
29363858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29373858a1f4SJohn Baldwin 		return (error);
29383858a1f4SJohn Baldwin 	}
29393858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29403858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29413858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29423858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29433858a1f4SJohn Baldwin 
29443858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29453858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
29463858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent);
29473858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
29483858a1f4SJohn Baldwin 	return (error);
29493858a1f4SJohn Baldwin }
2950399e8c17SJohn Baldwin #endif
29513858a1f4SJohn Baldwin 
29523858a1f4SJohn Baldwin int
29533858a1f4SJohn Baldwin freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
29543858a1f4SJohn Baldwin {
29553858a1f4SJohn Baldwin 	struct aiocb **acb_list;
29563858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
29573858a1f4SJohn Baldwin 	struct sigevent32 sig32;
29583858a1f4SJohn Baldwin 	uint32_t *acb_list32;
29593858a1f4SJohn Baldwin 	int error, i, nent;
29603858a1f4SJohn Baldwin 
29613858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
29623858a1f4SJohn Baldwin 		return (EINVAL);
29633858a1f4SJohn Baldwin 
29643858a1f4SJohn Baldwin 	nent = uap->nent;
29653858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
29663858a1f4SJohn Baldwin 		return (EINVAL);
29673858a1f4SJohn Baldwin 
29683858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29693858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig32, sizeof(sig32));
29703858a1f4SJohn Baldwin 		if (error)
29713858a1f4SJohn Baldwin 			return (error);
29723858a1f4SJohn Baldwin 		error = convert_sigevent32(&sig32, &sig);
29733858a1f4SJohn Baldwin 		if (error)
29743858a1f4SJohn Baldwin 			return (error);
29753858a1f4SJohn Baldwin 		sigp = &sig;
29763858a1f4SJohn Baldwin 	} else
29773858a1f4SJohn Baldwin 		sigp = NULL;
29783858a1f4SJohn Baldwin 
29793858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29803858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29813858a1f4SJohn Baldwin 	if (error) {
29823858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29833858a1f4SJohn Baldwin 		return (error);
29843858a1f4SJohn Baldwin 	}
29853858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29863858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29873858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29883858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29893858a1f4SJohn Baldwin 
29903858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29913858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
29923858a1f4SJohn Baldwin 	    &aiocb32_ops);
29933858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
29943858a1f4SJohn Baldwin 	return (error);
29953858a1f4SJohn Baldwin }
29963858a1f4SJohn Baldwin 
29973858a1f4SJohn Baldwin #endif
2998