xref: /freebsd/sys/kern/vfs_aio.c (revision f3215338ef82c7798bebca17a7d502cc5ef8bc18)
19454b2d8SWarner Losh /*-
2ee877a35SJohn Dyson  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
3ee877a35SJohn Dyson  *
4ee877a35SJohn Dyson  * Redistribution and use in source and binary forms, with or without
5ee877a35SJohn Dyson  * modification, are permitted provided that the following conditions
6ee877a35SJohn Dyson  * are met:
7ee877a35SJohn Dyson  * 1. Redistributions of source code must retain the above copyright
8ee877a35SJohn Dyson  *    notice, this list of conditions and the following disclaimer.
9ee877a35SJohn Dyson  * 2. John S. Dyson's name may not be used to endorse or promote products
10ee877a35SJohn Dyson  *    derived from this software without specific prior written permission.
11ee877a35SJohn Dyson  *
12ee877a35SJohn Dyson  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
13ee877a35SJohn Dyson  * bad that happens because of using this software isn't the responsibility
14ee877a35SJohn Dyson  * of the author.  This software is distributed AS-IS.
15ee877a35SJohn Dyson  */
16ee877a35SJohn Dyson 
17ee877a35SJohn Dyson /*
188a6472b7SPeter Dufault  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19ee877a35SJohn Dyson  */
20ee877a35SJohn Dyson 
21677b542eSDavid E. O'Brien #include <sys/cdefs.h>
22677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
23677b542eSDavid E. O'Brien 
243858a1f4SJohn Baldwin #include "opt_compat.h"
253858a1f4SJohn Baldwin 
26ee877a35SJohn Dyson #include <sys/param.h>
27ee877a35SJohn Dyson #include <sys/systm.h>
28f591779bSSeigo Tanimura #include <sys/malloc.h>
299626b608SPoul-Henning Kamp #include <sys/bio.h>
30a5c9bce7SBruce Evans #include <sys/buf.h>
314a144410SRobert Watson #include <sys/capsicum.h>
3275b8b3b2SJohn Baldwin #include <sys/eventhandler.h>
33ee877a35SJohn Dyson #include <sys/sysproto.h>
34ee877a35SJohn Dyson #include <sys/filedesc.h>
35ee877a35SJohn Dyson #include <sys/kernel.h>
3677409fe1SPoul-Henning Kamp #include <sys/module.h>
37c9a970a7SAlan Cox #include <sys/kthread.h>
38ee877a35SJohn Dyson #include <sys/fcntl.h>
39ee877a35SJohn Dyson #include <sys/file.h>
40104a9b7eSAlexander Kabaev #include <sys/limits.h>
41fdebd4f0SBruce Evans #include <sys/lock.h>
4235e0e5b3SJohn Baldwin #include <sys/mutex.h>
43ee877a35SJohn Dyson #include <sys/unistd.h>
446aeb05d7STom Rhodes #include <sys/posix4.h>
45ee877a35SJohn Dyson #include <sys/proc.h>
462d2f8ae7SBruce Evans #include <sys/resourcevar.h>
47ee877a35SJohn Dyson #include <sys/signalvar.h>
48bfbbc4aaSJason Evans #include <sys/protosw.h>
4989f6b863SAttilio Rao #include <sys/rwlock.h>
501ce91824SDavid Xu #include <sys/sema.h>
511ce91824SDavid Xu #include <sys/socket.h>
52bfbbc4aaSJason Evans #include <sys/socketvar.h>
5321d56e9cSAlfred Perlstein #include <sys/syscall.h>
5421d56e9cSAlfred Perlstein #include <sys/sysent.h>
55a624e84fSJohn Dyson #include <sys/sysctl.h>
56ee99e978SBruce Evans #include <sys/sx.h>
571ce91824SDavid Xu #include <sys/taskqueue.h>
58fd3bf775SJohn Dyson #include <sys/vnode.h>
59fd3bf775SJohn Dyson #include <sys/conf.h>
60cb679c38SJonathan Lemon #include <sys/event.h>
6199eee864SDavid Xu #include <sys/mount.h>
62f743d981SAlexander Motin #include <geom/geom.h>
63ee877a35SJohn Dyson 
641ce91824SDavid Xu #include <machine/atomic.h>
651ce91824SDavid Xu 
66ee877a35SJohn Dyson #include <vm/vm.h>
67f743d981SAlexander Motin #include <vm/vm_page.h>
68ee877a35SJohn Dyson #include <vm/vm_extern.h>
692244ea07SJohn Dyson #include <vm/pmap.h>
702244ea07SJohn Dyson #include <vm/vm_map.h>
7199eee864SDavid Xu #include <vm/vm_object.h>
72c897b813SJeff Roberson #include <vm/uma.h>
73ee877a35SJohn Dyson #include <sys/aio.h>
745aaef07cSJohn Dyson 
75eb8e6d52SEivind Eklund /*
76eb8e6d52SEivind Eklund  * Counter for allocating reference ids to new jobs.  Wrapped to 1 on
7799eee864SDavid Xu  * overflow. (XXX will be removed soon.)
78eb8e6d52SEivind Eklund  */
7999eee864SDavid Xu static u_long jobrefid;
802244ea07SJohn Dyson 
8199eee864SDavid Xu /*
8299eee864SDavid Xu  * Counter for aio_fsync.
8399eee864SDavid Xu  */
8499eee864SDavid Xu static uint64_t jobseqno;
8599eee864SDavid Xu 
8684af4da6SJohn Dyson #ifndef MAX_AIO_PER_PROC
872244ea07SJohn Dyson #define MAX_AIO_PER_PROC	32
8884af4da6SJohn Dyson #endif
8984af4da6SJohn Dyson 
9084af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE_PER_PROC
912244ea07SJohn Dyson #define MAX_AIO_QUEUE_PER_PROC	256 /* Bigger than AIO_LISTIO_MAX */
9284af4da6SJohn Dyson #endif
9384af4da6SJohn Dyson 
9484af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE
952244ea07SJohn Dyson #define	MAX_AIO_QUEUE		1024 /* Bigger than AIO_LISTIO_MAX */
9684af4da6SJohn Dyson #endif
9784af4da6SJohn Dyson 
9884af4da6SJohn Dyson #ifndef MAX_BUF_AIO
9984af4da6SJohn Dyson #define MAX_BUF_AIO		16
10084af4da6SJohn Dyson #endif
10184af4da6SJohn Dyson 
102e603be7aSRobert Watson FEATURE(aio, "Asynchronous I/O");
103e603be7aSRobert Watson 
1043858a1f4SJohn Baldwin static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
1053858a1f4SJohn Baldwin 
1060dd6c035SJohn Baldwin static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0,
1070dd6c035SJohn Baldwin     "Async IO management");
108eb8e6d52SEivind Eklund 
109*f3215338SJohn Baldwin static int enable_aio_unsafe = 0;
110*f3215338SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
111*f3215338SJohn Baldwin     "Permit asynchronous IO on all file types, not just known-safe types");
112*f3215338SJohn Baldwin 
113303b270bSEivind Eklund static int max_aio_procs = MAX_AIO_PROCS;
1140dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
11539314b7dSJohn Baldwin     "Maximum number of kernel processes to use for handling async IO ");
116a624e84fSJohn Dyson 
117eb8e6d52SEivind Eklund static int num_aio_procs = 0;
1180dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
11939314b7dSJohn Baldwin     "Number of presently active kernel processes for async IO");
120a624e84fSJohn Dyson 
121eb8e6d52SEivind Eklund /*
122eb8e6d52SEivind Eklund  * The code will adjust the actual number of AIO processes towards this
123eb8e6d52SEivind Eklund  * number when it gets a chance.
124eb8e6d52SEivind Eklund  */
125eb8e6d52SEivind Eklund static int target_aio_procs = TARGET_AIO_PROCS;
126eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
1270dd6c035SJohn Baldwin     0,
1280dd6c035SJohn Baldwin     "Preferred number of ready kernel processes for async IO");
129a624e84fSJohn Dyson 
130eb8e6d52SEivind Eklund static int max_queue_count = MAX_AIO_QUEUE;
131eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
132eb8e6d52SEivind Eklund     "Maximum number of aio requests to queue, globally");
133a624e84fSJohn Dyson 
134eb8e6d52SEivind Eklund static int num_queue_count = 0;
135eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
136eb8e6d52SEivind Eklund     "Number of queued aio requests");
137a624e84fSJohn Dyson 
138eb8e6d52SEivind Eklund static int num_buf_aio = 0;
139eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
140eb8e6d52SEivind Eklund     "Number of aio requests presently handled by the buf subsystem");
141fd3bf775SJohn Dyson 
14239314b7dSJohn Baldwin /* Number of async I/O processes in the process of being started */
143a9bf5e37SDavid Xu /* XXX This should be local to aio_aqueue() */
144eb8e6d52SEivind Eklund static int num_aio_resv_start = 0;
145fd3bf775SJohn Dyson 
146eb8e6d52SEivind Eklund static int aiod_lifetime;
147eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
148eb8e6d52SEivind Eklund     "Maximum lifetime for idle aiod");
14984af4da6SJohn Dyson 
150eb8e6d52SEivind Eklund static int max_aio_per_proc = MAX_AIO_PER_PROC;
151eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
1520dd6c035SJohn Baldwin     0,
1530dd6c035SJohn Baldwin     "Maximum active aio requests per process (stored in the process)");
154eb8e6d52SEivind Eklund 
155eb8e6d52SEivind Eklund static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
156eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
157eb8e6d52SEivind Eklund     &max_aio_queue_per_proc, 0,
158eb8e6d52SEivind Eklund     "Maximum queued aio requests per process (stored in the process)");
159eb8e6d52SEivind Eklund 
160eb8e6d52SEivind Eklund static int max_buf_aio = MAX_BUF_AIO;
161eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
162eb8e6d52SEivind Eklund     "Maximum buf aio requests per process (stored in the process)");
163eb8e6d52SEivind Eklund 
1640972628aSDavid Xu typedef struct oaiocb {
1650972628aSDavid Xu 	int	aio_fildes;		/* File descriptor */
1660972628aSDavid Xu 	off_t	aio_offset;		/* File offset for I/O */
1670972628aSDavid Xu 	volatile void *aio_buf;         /* I/O buffer in process space */
1680972628aSDavid Xu 	size_t	aio_nbytes;		/* Number of bytes for I/O */
1690972628aSDavid Xu 	struct	osigevent aio_sigevent;	/* Signal to deliver */
1700972628aSDavid Xu 	int	aio_lio_opcode;		/* LIO opcode */
1710972628aSDavid Xu 	int	aio_reqprio;		/* Request priority -- ignored */
1720972628aSDavid Xu 	struct	__aiocb_private	_aiocb_private;
1730972628aSDavid Xu } oaiocb_t;
1740972628aSDavid Xu 
1751aa4c324SDavid Xu /*
1765652770dSJohn Baldwin  * Below is a key of locks used to protect each member of struct kaiocb
1771aa4c324SDavid Xu  * aioliojob and kaioinfo and any backends.
1781aa4c324SDavid Xu  *
1791aa4c324SDavid Xu  * * - need not protected
180759ccccaSDavid Xu  * a - locked by kaioinfo lock
1811aa4c324SDavid Xu  * b - locked by backend lock, the backend lock can be null in some cases,
1821aa4c324SDavid Xu  *     for example, BIO belongs to this type, in this case, proc lock is
1831aa4c324SDavid Xu  *     reused.
1841aa4c324SDavid Xu  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
1851aa4c324SDavid Xu  */
1861aa4c324SDavid Xu 
1871aa4c324SDavid Xu /*
188*f3215338SJohn Baldwin  * If the routine that services an AIO request blocks while running in an
189*f3215338SJohn Baldwin  * AIO kernel process it can starve other I/O requests.  BIO requests
190*f3215338SJohn Baldwin  * queued via aio_qphysio() complete in GEOM and do not use AIO kernel
191*f3215338SJohn Baldwin  * processes at all.  Socket I/O requests use a separate pool of
192*f3215338SJohn Baldwin  * kprocs and also force non-blocking I/O.  Other file I/O requests
193*f3215338SJohn Baldwin  * use the generic fo_read/fo_write operations which can block.  The
194*f3215338SJohn Baldwin  * fsync and mlock operations can also block while executing.  Ideally
195*f3215338SJohn Baldwin  * none of these requests would block while executing.
196*f3215338SJohn Baldwin  *
197*f3215338SJohn Baldwin  * Note that the service routines cannot toggle O_NONBLOCK in the file
198*f3215338SJohn Baldwin  * structure directly while handling a request due to races with
199*f3215338SJohn Baldwin  * userland threads.
2001aa4c324SDavid Xu  */
2011aa4c324SDavid Xu 
20248dac059SAlan Cox /* jobflags */
203*f3215338SJohn Baldwin #define	KAIOCB_QUEUEING		0x01
204*f3215338SJohn Baldwin #define	KAIOCB_CANCELLED	0x02
205*f3215338SJohn Baldwin #define	KAIOCB_CANCELLING	0x04
2065652770dSJohn Baldwin #define	KAIOCB_CHECKSYNC	0x08
207*f3215338SJohn Baldwin #define	KAIOCB_CLEARED		0x10
208*f3215338SJohn Baldwin #define	KAIOCB_FINISHED		0x20
20948dac059SAlan Cox 
2102244ea07SJohn Dyson /*
2112244ea07SJohn Dyson  * AIO process info
2122244ea07SJohn Dyson  */
21384af4da6SJohn Dyson #define AIOP_FREE	0x1			/* proc on free queue */
21484af4da6SJohn Dyson 
21539314b7dSJohn Baldwin struct aioproc {
21639314b7dSJohn Baldwin 	int	aioprocflags;			/* (c) AIO proc flags */
21739314b7dSJohn Baldwin 	TAILQ_ENTRY(aioproc) list;		/* (c) list of processes */
21839314b7dSJohn Baldwin 	struct	proc *aioproc;			/* (*) the AIO proc */
2192244ea07SJohn Dyson };
2202244ea07SJohn Dyson 
22184af4da6SJohn Dyson /*
22284af4da6SJohn Dyson  * data-structure for lio signal management
22384af4da6SJohn Dyson  */
2241ce91824SDavid Xu struct aioliojob {
2251aa4c324SDavid Xu 	int	lioj_flags;			/* (a) listio flags */
2261aa4c324SDavid Xu 	int	lioj_count;			/* (a) listio flags */
2271aa4c324SDavid Xu 	int	lioj_finished_count;		/* (a) listio flags */
2281aa4c324SDavid Xu 	struct	sigevent lioj_signal;		/* (a) signal on all I/O done */
2291aa4c324SDavid Xu 	TAILQ_ENTRY(aioliojob) lioj_list;	/* (a) lio list */
2301aa4c324SDavid Xu 	struct	knlist klist;			/* (a) list of knotes */
2311aa4c324SDavid Xu 	ksiginfo_t lioj_ksi;			/* (a) Realtime signal info */
23284af4da6SJohn Dyson };
2331ce91824SDavid Xu 
23484af4da6SJohn Dyson #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
23584af4da6SJohn Dyson #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
23669cd28daSDoug Ambrisko #define LIOJ_KEVENT_POSTED	0x4	/* kevent triggered */
23784af4da6SJohn Dyson 
23884af4da6SJohn Dyson /*
23984af4da6SJohn Dyson  * per process aio data structure
24084af4da6SJohn Dyson  */
2412244ea07SJohn Dyson struct kaioinfo {
242759ccccaSDavid Xu 	struct	mtx kaio_mtx;		/* the lock to protect this struct */
2431aa4c324SDavid Xu 	int	kaio_flags;		/* (a) per process kaio flags */
2441aa4c324SDavid Xu 	int	kaio_maxactive_count;	/* (*) maximum number of AIOs */
2451aa4c324SDavid Xu 	int	kaio_active_count;	/* (c) number of currently used AIOs */
2461aa4c324SDavid Xu 	int	kaio_qallowed_count;	/* (*) maxiumu size of AIO queue */
2471aa4c324SDavid Xu 	int	kaio_count;		/* (a) size of AIO queue */
2481aa4c324SDavid Xu 	int	kaio_ballowed_count;	/* (*) maximum number of buffers */
2491aa4c324SDavid Xu 	int	kaio_buffer_count;	/* (a) number of physio buffers */
2505652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_all;	/* (a) all AIOs in a process */
2515652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_done;	/* (a) done queue for process */
2521aa4c324SDavid Xu 	TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
2535652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
2545652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncqueue;	/* (a) queue for aio_fsync */
255*f3215338SJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncready;  /* (a) second q for aio_fsync */
25639314b7dSJohn Baldwin 	struct	task kaio_task;		/* (*) task to kick aio processes */
257*f3215338SJohn Baldwin 	struct	task kaio_sync_task;	/* (*) task to schedule fsync jobs */
2582244ea07SJohn Dyson };
2592244ea07SJohn Dyson 
260759ccccaSDavid Xu #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
261759ccccaSDavid Xu #define AIO_UNLOCK(ki)		mtx_unlock(&(ki)->kaio_mtx)
262759ccccaSDavid Xu #define AIO_LOCK_ASSERT(ki, f)	mtx_assert(&(ki)->kaio_mtx, (f))
263759ccccaSDavid Xu #define AIO_MTX(ki)		(&(ki)->kaio_mtx)
264759ccccaSDavid Xu 
26584af4da6SJohn Dyson #define KAIO_RUNDOWN	0x1	/* process is being run down */
2660dd6c035SJohn Baldwin #define KAIO_WAKEUP	0x2	/* wakeup process when AIO completes */
267fd3bf775SJohn Dyson 
2683858a1f4SJohn Baldwin /*
2693858a1f4SJohn Baldwin  * Operations used to interact with userland aio control blocks.
2703858a1f4SJohn Baldwin  * Different ABIs provide their own operations.
2713858a1f4SJohn Baldwin  */
2723858a1f4SJohn Baldwin struct aiocb_ops {
2733858a1f4SJohn Baldwin 	int	(*copyin)(struct aiocb *ujob, struct aiocb *kjob);
2743858a1f4SJohn Baldwin 	long	(*fetch_status)(struct aiocb *ujob);
2753858a1f4SJohn Baldwin 	long	(*fetch_error)(struct aiocb *ujob);
2763858a1f4SJohn Baldwin 	int	(*store_status)(struct aiocb *ujob, long status);
2773858a1f4SJohn Baldwin 	int	(*store_error)(struct aiocb *ujob, long error);
2783858a1f4SJohn Baldwin 	int	(*store_kernelinfo)(struct aiocb *ujob, long jobref);
2793858a1f4SJohn Baldwin 	int	(*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
2803858a1f4SJohn Baldwin };
2813858a1f4SJohn Baldwin 
28239314b7dSJohn Baldwin static TAILQ_HEAD(,aioproc) aio_freeproc;		/* (c) Idle daemons */
2831ce91824SDavid Xu static struct sema aio_newproc_sem;
2841ce91824SDavid Xu static struct mtx aio_job_mtx;
2855652770dSJohn Baldwin static TAILQ_HEAD(,kaiocb) aio_jobs;			/* (c) Async job list */
2861ce91824SDavid Xu static struct unrhdr *aiod_unr;
2872244ea07SJohn Dyson 
2886a1162d4SAlexander Leidinger void		aio_init_aioinfo(struct proc *p);
289723d37c0SKonstantin Belousov static int	aio_onceonly(void);
2905652770dSJohn Baldwin static int	aio_free_entry(struct kaiocb *job);
2915652770dSJohn Baldwin static void	aio_process_rw(struct kaiocb *job);
2925652770dSJohn Baldwin static void	aio_process_sync(struct kaiocb *job);
2935652770dSJohn Baldwin static void	aio_process_mlock(struct kaiocb *job);
294*f3215338SJohn Baldwin static void	aio_schedule_fsync(void *context, int pending);
2951ce91824SDavid Xu static int	aio_newproc(int *);
2965652770dSJohn Baldwin int		aio_aqueue(struct thread *td, struct aiocb *ujob,
2973858a1f4SJohn Baldwin 		    struct aioliojob *lio, int type, struct aiocb_ops *ops);
298*f3215338SJohn Baldwin static int	aio_queue_file(struct file *fp, struct kaiocb *job);
299f743d981SAlexander Motin static void	aio_physwakeup(struct bio *bp);
30075b8b3b2SJohn Baldwin static void	aio_proc_rundown(void *arg, struct proc *p);
3010dd6c035SJohn Baldwin static void	aio_proc_rundown_exec(void *arg, struct proc *p,
3020dd6c035SJohn Baldwin 		    struct image_params *imgp);
3035652770dSJohn Baldwin static int	aio_qphysio(struct proc *p, struct kaiocb *job);
3041ce91824SDavid Xu static void	aio_daemon(void *param);
305*f3215338SJohn Baldwin static void	aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
306dbbccfe9SDavid Xu static int	aio_kick(struct proc *userp);
30799eee864SDavid Xu static void	aio_kick_nowait(struct proc *userp);
30899eee864SDavid Xu static void	aio_kick_helper(void *context, int pending);
30921d56e9cSAlfred Perlstein static int	filt_aioattach(struct knote *kn);
31021d56e9cSAlfred Perlstein static void	filt_aiodetach(struct knote *kn);
31121d56e9cSAlfred Perlstein static int	filt_aio(struct knote *kn, long hint);
31269cd28daSDoug Ambrisko static int	filt_lioattach(struct knote *kn);
31369cd28daSDoug Ambrisko static void	filt_liodetach(struct knote *kn);
31469cd28daSDoug Ambrisko static int	filt_lio(struct knote *kn, long hint);
3152244ea07SJohn Dyson 
316eb8e6d52SEivind Eklund /*
317eb8e6d52SEivind Eklund  * Zones for:
318eb8e6d52SEivind Eklund  * 	kaio	Per process async io info
31939314b7dSJohn Baldwin  *	aiop	async io process data
320eb8e6d52SEivind Eklund  *	aiocb	async io jobs
321eb8e6d52SEivind Eklund  *	aiol	list io job pointer - internal to aio_suspend XXX
322eb8e6d52SEivind Eklund  *	aiolio	list io jobs
323eb8e6d52SEivind Eklund  */
324c897b813SJeff Roberson static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
325fd3bf775SJohn Dyson 
326eb8e6d52SEivind Eklund /* kqueue filters for aio */
327e76d823bSRobert Watson static struct filterops aio_filtops = {
328e76d823bSRobert Watson 	.f_isfd = 0,
329e76d823bSRobert Watson 	.f_attach = filt_aioattach,
330e76d823bSRobert Watson 	.f_detach = filt_aiodetach,
331e76d823bSRobert Watson 	.f_event = filt_aio,
332e76d823bSRobert Watson };
333e76d823bSRobert Watson static struct filterops lio_filtops = {
334e76d823bSRobert Watson 	.f_isfd = 0,
335e76d823bSRobert Watson 	.f_attach = filt_lioattach,
336e76d823bSRobert Watson 	.f_detach = filt_liodetach,
337e76d823bSRobert Watson 	.f_event = filt_lio
338e76d823bSRobert Watson };
33921d56e9cSAlfred Perlstein 
34075b8b3b2SJohn Baldwin static eventhandler_tag exit_tag, exec_tag;
34175b8b3b2SJohn Baldwin 
342c85650caSJohn Baldwin TASKQUEUE_DEFINE_THREAD(aiod_kick);
3431ce91824SDavid Xu 
344eb8e6d52SEivind Eklund /*
345eb8e6d52SEivind Eklund  * Main operations function for use as a kernel module.
346eb8e6d52SEivind Eklund  */
34721d56e9cSAlfred Perlstein static int
34821d56e9cSAlfred Perlstein aio_modload(struct module *module, int cmd, void *arg)
34921d56e9cSAlfred Perlstein {
35021d56e9cSAlfred Perlstein 	int error = 0;
35121d56e9cSAlfred Perlstein 
35221d56e9cSAlfred Perlstein 	switch (cmd) {
35321d56e9cSAlfred Perlstein 	case MOD_LOAD:
35421d56e9cSAlfred Perlstein 		aio_onceonly();
35521d56e9cSAlfred Perlstein 		break;
35621d56e9cSAlfred Perlstein 	case MOD_SHUTDOWN:
35721d56e9cSAlfred Perlstein 		break;
35821d56e9cSAlfred Perlstein 	default:
359*f3215338SJohn Baldwin 		error = EOPNOTSUPP;
36021d56e9cSAlfred Perlstein 		break;
36121d56e9cSAlfred Perlstein 	}
36221d56e9cSAlfred Perlstein 	return (error);
36321d56e9cSAlfred Perlstein }
36421d56e9cSAlfred Perlstein 
36521d56e9cSAlfred Perlstein static moduledata_t aio_mod = {
36621d56e9cSAlfred Perlstein 	"aio",
36721d56e9cSAlfred Perlstein 	&aio_modload,
36821d56e9cSAlfred Perlstein 	NULL
36921d56e9cSAlfred Perlstein };
37021d56e9cSAlfred Perlstein 
371723d37c0SKonstantin Belousov static struct syscall_helper_data aio_syscalls[] = {
372723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(aio_cancel),
373723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(aio_error),
374723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(aio_fsync),
3756160e12cSGleb Smirnoff 	SYSCALL_INIT_HELPER(aio_mlock),
376723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(aio_read),
377723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(aio_return),
378723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(aio_suspend),
379723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(aio_waitcomplete),
380723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(aio_write),
381723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(lio_listio),
382723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(oaio_read),
383723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(oaio_write),
384723d37c0SKonstantin Belousov 	SYSCALL_INIT_HELPER(olio_listio),
385723d37c0SKonstantin Belousov 	SYSCALL_INIT_LAST
386723d37c0SKonstantin Belousov };
387723d37c0SKonstantin Belousov 
388723d37c0SKonstantin Belousov #ifdef COMPAT_FREEBSD32
389723d37c0SKonstantin Belousov #include <sys/mount.h>
390723d37c0SKonstantin Belousov #include <sys/socket.h>
391723d37c0SKonstantin Belousov #include <compat/freebsd32/freebsd32.h>
392723d37c0SKonstantin Belousov #include <compat/freebsd32/freebsd32_proto.h>
393723d37c0SKonstantin Belousov #include <compat/freebsd32/freebsd32_signal.h>
394723d37c0SKonstantin Belousov #include <compat/freebsd32/freebsd32_syscall.h>
395723d37c0SKonstantin Belousov #include <compat/freebsd32/freebsd32_util.h>
396723d37c0SKonstantin Belousov 
397723d37c0SKonstantin Belousov static struct syscall_helper_data aio32_syscalls[] = {
398723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_aio_return),
399723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_aio_suspend),
400723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_aio_cancel),
401723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_aio_error),
402723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_aio_fsync),
4036160e12cSGleb Smirnoff 	SYSCALL32_INIT_HELPER(freebsd32_aio_mlock),
404723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_aio_read),
405723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_aio_write),
406723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_aio_waitcomplete),
407723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_lio_listio),
408723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_oaio_read),
409723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_oaio_write),
410723d37c0SKonstantin Belousov 	SYSCALL32_INIT_HELPER(freebsd32_olio_listio),
411723d37c0SKonstantin Belousov 	SYSCALL_INIT_LAST
412723d37c0SKonstantin Belousov };
413723d37c0SKonstantin Belousov #endif
41421d56e9cSAlfred Perlstein 
41521d56e9cSAlfred Perlstein DECLARE_MODULE(aio, aio_mod,
41621d56e9cSAlfred Perlstein 	SI_SUB_VFS, SI_ORDER_ANY);
41721d56e9cSAlfred Perlstein MODULE_VERSION(aio, 1);
41821d56e9cSAlfred Perlstein 
419fd3bf775SJohn Dyson /*
4202244ea07SJohn Dyson  * Startup initialization
4212244ea07SJohn Dyson  */
422723d37c0SKonstantin Belousov static int
42321d56e9cSAlfred Perlstein aio_onceonly(void)
424fd3bf775SJohn Dyson {
425723d37c0SKonstantin Belousov 	int error;
42621d56e9cSAlfred Perlstein 
42775b8b3b2SJohn Baldwin 	exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
42875b8b3b2SJohn Baldwin 	    EVENTHANDLER_PRI_ANY);
4290dd6c035SJohn Baldwin 	exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
4300dd6c035SJohn Baldwin 	    NULL, EVENTHANDLER_PRI_ANY);
43121d56e9cSAlfred Perlstein 	kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
43269cd28daSDoug Ambrisko 	kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
4332244ea07SJohn Dyson 	TAILQ_INIT(&aio_freeproc);
4341ce91824SDavid Xu 	sema_init(&aio_newproc_sem, 0, "aio_new_proc");
4351ce91824SDavid Xu 	mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
4362244ea07SJohn Dyson 	TAILQ_INIT(&aio_jobs);
4371ce91824SDavid Xu 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
438c897b813SJeff Roberson 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
439c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
44039314b7dSJohn Baldwin 	aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL,
441c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4425652770dSJohn Baldwin 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
443c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
444c897b813SJeff Roberson 	aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
445c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4461ce91824SDavid Xu 	aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
447c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
44884af4da6SJohn Dyson 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
449fd3bf775SJohn Dyson 	jobrefid = 1;
450c7047e52SGarrett Wollman 	async_io_version = _POSIX_VERSION;
451c844abc9SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
45286d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
45386d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
454723d37c0SKonstantin Belousov 
455e015b1abSMateusz Guzik 	error = syscall_helper_register(aio_syscalls, SY_THR_STATIC_KLD);
456723d37c0SKonstantin Belousov 	if (error)
457723d37c0SKonstantin Belousov 		return (error);
458723d37c0SKonstantin Belousov #ifdef COMPAT_FREEBSD32
459e015b1abSMateusz Guzik 	error = syscall32_helper_register(aio32_syscalls, SY_THR_STATIC_KLD);
460723d37c0SKonstantin Belousov 	if (error)
461723d37c0SKonstantin Belousov 		return (error);
462723d37c0SKonstantin Belousov #endif
463723d37c0SKonstantin Belousov 	return (0);
4642244ea07SJohn Dyson }
4652244ea07SJohn Dyson 
466eb8e6d52SEivind Eklund /*
467bfbbc4aaSJason Evans  * Init the per-process aioinfo structure.  The aioinfo limits are set
468bfbbc4aaSJason Evans  * per-process for user limit (resource) management.
4692244ea07SJohn Dyson  */
4706a1162d4SAlexander Leidinger void
471fd3bf775SJohn Dyson aio_init_aioinfo(struct proc *p)
472fd3bf775SJohn Dyson {
4732244ea07SJohn Dyson 	struct kaioinfo *ki;
474ac41f2efSAlfred Perlstein 
475a163d034SWarner Losh 	ki = uma_zalloc(kaio_zone, M_WAITOK);
4769889bbacSKonstantin Belousov 	mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
47784af4da6SJohn Dyson 	ki->kaio_flags = 0;
478a624e84fSJohn Dyson 	ki->kaio_maxactive_count = max_aio_per_proc;
4792244ea07SJohn Dyson 	ki->kaio_active_count = 0;
480a624e84fSJohn Dyson 	ki->kaio_qallowed_count = max_aio_queue_per_proc;
4811ce91824SDavid Xu 	ki->kaio_count = 0;
48284af4da6SJohn Dyson 	ki->kaio_ballowed_count = max_buf_aio;
483fd3bf775SJohn Dyson 	ki->kaio_buffer_count = 0;
4841ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_all);
4851ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_done);
4862244ea07SJohn Dyson 	TAILQ_INIT(&ki->kaio_jobqueue);
48784af4da6SJohn Dyson 	TAILQ_INIT(&ki->kaio_liojoblist);
48899eee864SDavid Xu 	TAILQ_INIT(&ki->kaio_syncqueue);
489*f3215338SJohn Baldwin 	TAILQ_INIT(&ki->kaio_syncready);
49099eee864SDavid Xu 	TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
491*f3215338SJohn Baldwin 	TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
4923999ebe3SAlan Cox 	PROC_LOCK(p);
4933999ebe3SAlan Cox 	if (p->p_aioinfo == NULL) {
4943999ebe3SAlan Cox 		p->p_aioinfo = ki;
4953999ebe3SAlan Cox 		PROC_UNLOCK(p);
4963999ebe3SAlan Cox 	} else {
4973999ebe3SAlan Cox 		PROC_UNLOCK(p);
498759ccccaSDavid Xu 		mtx_destroy(&ki->kaio_mtx);
4993999ebe3SAlan Cox 		uma_zfree(kaio_zone, ki);
5002244ea07SJohn Dyson 	}
501bfbbc4aaSJason Evans 
50222035f47SOleksandr Tymoshenko 	while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
5031ce91824SDavid Xu 		aio_newproc(NULL);
5042244ea07SJohn Dyson }
5052244ea07SJohn Dyson 
5064c0fb2cfSDavid Xu static int
5074c0fb2cfSDavid Xu aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
5084c0fb2cfSDavid Xu {
509cf7d9a8cSDavid Xu 	struct thread *td;
510cf7d9a8cSDavid Xu 	int error;
511759ccccaSDavid Xu 
512cf7d9a8cSDavid Xu 	error = sigev_findtd(p, sigev, &td);
513cf7d9a8cSDavid Xu 	if (error)
514cf7d9a8cSDavid Xu 		return (error);
5154c0fb2cfSDavid Xu 	if (!KSI_ONQ(ksi)) {
516cf7d9a8cSDavid Xu 		ksiginfo_set_sigev(ksi, sigev);
5174c0fb2cfSDavid Xu 		ksi->ksi_code = SI_ASYNCIO;
5184c0fb2cfSDavid Xu 		ksi->ksi_flags |= KSI_EXT | KSI_INS;
519cf7d9a8cSDavid Xu 		tdsendsignal(p, td, ksi->ksi_signo, ksi);
5204c0fb2cfSDavid Xu 	}
521759ccccaSDavid Xu 	PROC_UNLOCK(p);
522cf7d9a8cSDavid Xu 	return (error);
5234c0fb2cfSDavid Xu }
5244c0fb2cfSDavid Xu 
5252244ea07SJohn Dyson /*
526bfbbc4aaSJason Evans  * Free a job entry.  Wait for completion if it is currently active, but don't
527bfbbc4aaSJason Evans  * delay forever.  If we delay, we return a flag that says that we have to
528bfbbc4aaSJason Evans  * restart the queue scan.
5292244ea07SJohn Dyson  */
53088ed460eSAlan Cox static int
5315652770dSJohn Baldwin aio_free_entry(struct kaiocb *job)
532fd3bf775SJohn Dyson {
5332244ea07SJohn Dyson 	struct kaioinfo *ki;
5341ce91824SDavid Xu 	struct aioliojob *lj;
5352244ea07SJohn Dyson 	struct proc *p;
5362244ea07SJohn Dyson 
5375652770dSJohn Baldwin 	p = job->userproc;
5381ce91824SDavid Xu 	MPASS(curproc == p);
5392244ea07SJohn Dyson 	ki = p->p_aioinfo;
5401ce91824SDavid Xu 	MPASS(ki != NULL);
5411ce91824SDavid Xu 
542759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
543*f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
544759ccccaSDavid Xu 
5451ce91824SDavid Xu 	atomic_subtract_int(&num_queue_count, 1);
5461ce91824SDavid Xu 
5471ce91824SDavid Xu 	ki->kaio_count--;
5481ce91824SDavid Xu 	MPASS(ki->kaio_count >= 0);
5491ce91824SDavid Xu 
5505652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_done, job, plist);
5515652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_all, job, allist);
55227b8220dSDavid Xu 
5535652770dSJohn Baldwin 	lj = job->lio;
55484af4da6SJohn Dyson 	if (lj) {
5551ce91824SDavid Xu 		lj->lioj_count--;
5561ce91824SDavid Xu 		lj->lioj_finished_count--;
5571ce91824SDavid Xu 
558a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
5591ce91824SDavid Xu 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
5601ce91824SDavid Xu 			/* lio is going away, we need to destroy any knotes */
5611ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
562759ccccaSDavid Xu 			PROC_LOCK(p);
5631ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
564759ccccaSDavid Xu 			PROC_UNLOCK(p);
5651ce91824SDavid Xu 			uma_zfree(aiolio_zone, lj);
56684af4da6SJohn Dyson 		}
56784af4da6SJohn Dyson 	}
5681ce91824SDavid Xu 
5695652770dSJohn Baldwin 	/* job is going away, we need to destroy any knotes */
5705652770dSJohn Baldwin 	knlist_delete(&job->klist, curthread, 1);
571759ccccaSDavid Xu 	PROC_LOCK(p);
5725652770dSJohn Baldwin 	sigqueue_take(&job->ksi);
573759ccccaSDavid Xu 	PROC_UNLOCK(p);
5741ce91824SDavid Xu 
5755652770dSJohn Baldwin 	MPASS(job->bp == NULL);
576759ccccaSDavid Xu 	AIO_UNLOCK(ki);
5772a522eb9SJohn Baldwin 
5782a522eb9SJohn Baldwin 	/*
5792a522eb9SJohn Baldwin 	 * The thread argument here is used to find the owning process
5802a522eb9SJohn Baldwin 	 * and is also passed to fo_close() which may pass it to various
5812a522eb9SJohn Baldwin 	 * places such as devsw close() routines.  Because of that, we
5822a522eb9SJohn Baldwin 	 * need a thread pointer from the process owning the job that is
5832a522eb9SJohn Baldwin 	 * persistent and won't disappear out from under us or move to
5842a522eb9SJohn Baldwin 	 * another process.
5852a522eb9SJohn Baldwin 	 *
5862a522eb9SJohn Baldwin 	 * Currently, all the callers of this function call it to remove
5875652770dSJohn Baldwin 	 * a kaiocb from the current process' job list either via a
5882a522eb9SJohn Baldwin 	 * syscall or due to the current process calling exit() or
5892a522eb9SJohn Baldwin 	 * execve().  Thus, we know that p == curproc.  We also know that
5902a522eb9SJohn Baldwin 	 * curthread can't exit since we are curthread.
5912a522eb9SJohn Baldwin 	 *
5922a522eb9SJohn Baldwin 	 * Therefore, we use curthread as the thread to pass to
5932a522eb9SJohn Baldwin 	 * knlist_delete().  This does mean that it is possible for the
5942a522eb9SJohn Baldwin 	 * thread pointer at close time to differ from the thread pointer
5952a522eb9SJohn Baldwin 	 * at open time, but this is already true of file descriptors in
5962a522eb9SJohn Baldwin 	 * a multithreaded process.
597b40ce416SJulian Elischer 	 */
5985652770dSJohn Baldwin 	if (job->fd_file)
5995652770dSJohn Baldwin 		fdrop(job->fd_file, curthread);
6005652770dSJohn Baldwin 	crfree(job->cred);
6015652770dSJohn Baldwin 	uma_zfree(aiocb_zone, job);
602759ccccaSDavid Xu 	AIO_LOCK(ki);
6031ce91824SDavid Xu 
604ac41f2efSAlfred Perlstein 	return (0);
6052244ea07SJohn Dyson }
6062244ea07SJohn Dyson 
607993182e5SAlexander Leidinger static void
6080dd6c035SJohn Baldwin aio_proc_rundown_exec(void *arg, struct proc *p,
6090dd6c035SJohn Baldwin     struct image_params *imgp __unused)
610993182e5SAlexander Leidinger {
611993182e5SAlexander Leidinger    	aio_proc_rundown(arg, p);
612993182e5SAlexander Leidinger }
613993182e5SAlexander Leidinger 
614*f3215338SJohn Baldwin static int
615*f3215338SJohn Baldwin aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
616*f3215338SJohn Baldwin {
617*f3215338SJohn Baldwin 	aio_cancel_fn_t *func;
618*f3215338SJohn Baldwin 	int cancelled;
619*f3215338SJohn Baldwin 
620*f3215338SJohn Baldwin 	AIO_LOCK_ASSERT(ki, MA_OWNED);
621*f3215338SJohn Baldwin 	if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
622*f3215338SJohn Baldwin 		return (0);
623*f3215338SJohn Baldwin 	MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
624*f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLED;
625*f3215338SJohn Baldwin 
626*f3215338SJohn Baldwin 	func = job->cancel_fn;
627*f3215338SJohn Baldwin 
628*f3215338SJohn Baldwin 	/*
629*f3215338SJohn Baldwin 	 * If there is no cancel routine, just leave the job marked as
630*f3215338SJohn Baldwin 	 * cancelled.  The job should be in active use by a caller who
631*f3215338SJohn Baldwin 	 * should complete it normally or when it fails to install a
632*f3215338SJohn Baldwin 	 * cancel routine.
633*f3215338SJohn Baldwin 	 */
634*f3215338SJohn Baldwin 	if (func == NULL)
635*f3215338SJohn Baldwin 		return (0);
636*f3215338SJohn Baldwin 
637*f3215338SJohn Baldwin 	/*
638*f3215338SJohn Baldwin 	 * Set the CANCELLING flag so that aio_complete() will defer
639*f3215338SJohn Baldwin 	 * completions of this job.  This prevents the job from being
640*f3215338SJohn Baldwin 	 * freed out from under the cancel callback.  After the
641*f3215338SJohn Baldwin 	 * callback any deferred completion (whether from the callback
642*f3215338SJohn Baldwin 	 * or any other source) will be completed.
643*f3215338SJohn Baldwin 	 */
644*f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLING;
645*f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
646*f3215338SJohn Baldwin 	func(job);
647*f3215338SJohn Baldwin 	AIO_LOCK(ki);
648*f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_CANCELLING;
649*f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
650*f3215338SJohn Baldwin 		cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
651*f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
652*f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
653*f3215338SJohn Baldwin 	} else {
654*f3215338SJohn Baldwin 		/*
655*f3215338SJohn Baldwin 		 * The cancel callback might have scheduled an
656*f3215338SJohn Baldwin 		 * operation to cancel this request, but it is
657*f3215338SJohn Baldwin 		 * only counted as cancelled if the request is
658*f3215338SJohn Baldwin 		 * cancelled when the callback returns.
659*f3215338SJohn Baldwin 		 */
660*f3215338SJohn Baldwin 		cancelled = 0;
661*f3215338SJohn Baldwin 	}
662*f3215338SJohn Baldwin 	return (cancelled);
663*f3215338SJohn Baldwin }
664*f3215338SJohn Baldwin 
6652244ea07SJohn Dyson /*
6662244ea07SJohn Dyson  * Rundown the jobs for a given process.
6672244ea07SJohn Dyson  */
66821d56e9cSAlfred Perlstein static void
66975b8b3b2SJohn Baldwin aio_proc_rundown(void *arg, struct proc *p)
670fd3bf775SJohn Dyson {
6712244ea07SJohn Dyson 	struct kaioinfo *ki;
6721ce91824SDavid Xu 	struct aioliojob *lj;
6735652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
6742244ea07SJohn Dyson 
6752a522eb9SJohn Baldwin 	KASSERT(curthread->td_proc == p,
6762a522eb9SJohn Baldwin 	    ("%s: called on non-curproc", __func__));
6772244ea07SJohn Dyson 	ki = p->p_aioinfo;
6782244ea07SJohn Dyson 	if (ki == NULL)
6792244ea07SJohn Dyson 		return;
6802244ea07SJohn Dyson 
681759ccccaSDavid Xu 	AIO_LOCK(ki);
68227b8220dSDavid Xu 	ki->kaio_flags |= KAIO_RUNDOWN;
6831ce91824SDavid Xu 
6841ce91824SDavid Xu restart:
685a624e84fSJohn Dyson 
686bfbbc4aaSJason Evans 	/*
6871ce91824SDavid Xu 	 * Try to cancel all pending requests. This code simulates
6881ce91824SDavid Xu 	 * aio_cancel on all pending I/O requests.
689bfbbc4aaSJason Evans 	 */
6905652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
691*f3215338SJohn Baldwin 		aio_cancel_job(p, ki, job);
6922244ea07SJohn Dyson 	}
69384af4da6SJohn Dyson 
6941ce91824SDavid Xu 	/* Wait for all running I/O to be finished */
695*f3215338SJohn Baldwin 	if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
69684af4da6SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
697759ccccaSDavid Xu 		msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
6981ce91824SDavid Xu 		goto restart;
69984af4da6SJohn Dyson 	}
70084af4da6SJohn Dyson 
7011ce91824SDavid Xu 	/* Free all completed I/O requests. */
7025652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
7035652770dSJohn Baldwin 		aio_free_entry(job);
70484af4da6SJohn Dyson 
7051ce91824SDavid Xu 	while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
706a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
70784af4da6SJohn Dyson 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
7081ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
709759ccccaSDavid Xu 			PROC_LOCK(p);
7101ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
711759ccccaSDavid Xu 			PROC_UNLOCK(p);
712c897b813SJeff Roberson 			uma_zfree(aiolio_zone, lj);
713f4f0ecefSJohn Dyson 		} else {
714a9bf5e37SDavid Xu 			panic("LIO job not cleaned up: C:%d, FC:%d\n",
715a9bf5e37SDavid Xu 			    lj->lioj_count, lj->lioj_finished_count);
71684af4da6SJohn Dyson 		}
717f4f0ecefSJohn Dyson 	}
718759ccccaSDavid Xu 	AIO_UNLOCK(ki);
719c85650caSJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
720*f3215338SJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
7215114048bSKonstantin Belousov 	mtx_destroy(&ki->kaio_mtx);
722c897b813SJeff Roberson 	uma_zfree(kaio_zone, ki);
723a624e84fSJohn Dyson 	p->p_aioinfo = NULL;
7242244ea07SJohn Dyson }
7252244ea07SJohn Dyson 
7262244ea07SJohn Dyson /*
727bfbbc4aaSJason Evans  * Select a job to run (called by an AIO daemon).
7282244ea07SJohn Dyson  */
7295652770dSJohn Baldwin static struct kaiocb *
73039314b7dSJohn Baldwin aio_selectjob(struct aioproc *aiop)
731fd3bf775SJohn Dyson {
7325652770dSJohn Baldwin 	struct kaiocb *job;
733bfbbc4aaSJason Evans 	struct kaioinfo *ki;
734bfbbc4aaSJason Evans 	struct proc *userp;
7352244ea07SJohn Dyson 
7361ce91824SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
737*f3215338SJohn Baldwin restart:
7385652770dSJohn Baldwin 	TAILQ_FOREACH(job, &aio_jobs, list) {
7395652770dSJohn Baldwin 		userp = job->userproc;
7402244ea07SJohn Dyson 		ki = userp->p_aioinfo;
7412244ea07SJohn Dyson 
7422244ea07SJohn Dyson 		if (ki->kaio_active_count < ki->kaio_maxactive_count) {
7435652770dSJohn Baldwin 			TAILQ_REMOVE(&aio_jobs, job, list);
744*f3215338SJohn Baldwin 			if (!aio_clear_cancel_function(job))
745*f3215338SJohn Baldwin 				goto restart;
746*f3215338SJohn Baldwin 
7471ce91824SDavid Xu 			/* Account for currently active jobs. */
7481ce91824SDavid Xu 			ki->kaio_active_count++;
7491ce91824SDavid Xu 			break;
7501ce91824SDavid Xu 		}
7511ce91824SDavid Xu 	}
7525652770dSJohn Baldwin 	return (job);
7532244ea07SJohn Dyson }
7542244ea07SJohn Dyson 
7552244ea07SJohn Dyson /*
7560dd6c035SJohn Baldwin  * Move all data to a permanent storage device.  This code
7570dd6c035SJohn Baldwin  * simulates the fsync syscall.
75899eee864SDavid Xu  */
75999eee864SDavid Xu static int
76099eee864SDavid Xu aio_fsync_vnode(struct thread *td, struct vnode *vp)
76199eee864SDavid Xu {
76299eee864SDavid Xu 	struct mount *mp;
76399eee864SDavid Xu 	int error;
76499eee864SDavid Xu 
76599eee864SDavid Xu 	if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
76699eee864SDavid Xu 		goto drop;
767cb05b60aSAttilio Rao 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
76899eee864SDavid Xu 	if (vp->v_object != NULL) {
76989f6b863SAttilio Rao 		VM_OBJECT_WLOCK(vp->v_object);
77099eee864SDavid Xu 		vm_object_page_clean(vp->v_object, 0, 0, 0);
77189f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(vp->v_object);
77299eee864SDavid Xu 	}
77399eee864SDavid Xu 	error = VOP_FSYNC(vp, MNT_WAIT, td);
77499eee864SDavid Xu 
77522db15c0SAttilio Rao 	VOP_UNLOCK(vp, 0);
77699eee864SDavid Xu 	vn_finished_write(mp);
77799eee864SDavid Xu drop:
77899eee864SDavid Xu 	return (error);
77999eee864SDavid Xu }
78099eee864SDavid Xu 
78199eee864SDavid Xu /*
782f95c13dbSGleb Smirnoff  * The AIO processing activity for LIO_READ/LIO_WRITE.  This is the code that
783f95c13dbSGleb Smirnoff  * does the I/O request for the non-physio version of the operations.  The
784f95c13dbSGleb Smirnoff  * normal vn operations are used, and this code should work in all instances
785f95c13dbSGleb Smirnoff  * for every type of file, including pipes, sockets, fifos, and regular files.
7861ce91824SDavid Xu  *
7871aa4c324SDavid Xu  * XXX I don't think it works well for socket, pipe, and fifo.
7882244ea07SJohn Dyson  */
78988ed460eSAlan Cox static void
7905652770dSJohn Baldwin aio_process_rw(struct kaiocb *job)
791fd3bf775SJohn Dyson {
792f8f750c5SRobert Watson 	struct ucred *td_savedcred;
793b40ce416SJulian Elischer 	struct thread *td;
7942244ea07SJohn Dyson 	struct aiocb *cb;
7952244ea07SJohn Dyson 	struct file *fp;
7962244ea07SJohn Dyson 	struct uio auio;
7972244ea07SJohn Dyson 	struct iovec aiov;
7982244ea07SJohn Dyson 	int cnt;
7992244ea07SJohn Dyson 	int error;
800fd3bf775SJohn Dyson 	int oublock_st, oublock_end;
801fd3bf775SJohn Dyson 	int inblock_st, inblock_end;
8022244ea07SJohn Dyson 
8035652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
8045652770dSJohn Baldwin 	    job->uaiocb.aio_lio_opcode == LIO_WRITE,
8055652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
806f95c13dbSGleb Smirnoff 
807*f3215338SJohn Baldwin 	aio_switch_vmspace(job);
808b40ce416SJulian Elischer 	td = curthread;
809f8f750c5SRobert Watson 	td_savedcred = td->td_ucred;
8105652770dSJohn Baldwin 	td->td_ucred = job->cred;
8115652770dSJohn Baldwin 	cb = &job->uaiocb;
8125652770dSJohn Baldwin 	fp = job->fd_file;
813bfbbc4aaSJason Evans 
81491369fc7SAlan Cox 	aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
8152244ea07SJohn Dyson 	aiov.iov_len = cb->aio_nbytes;
8162244ea07SJohn Dyson 
8172244ea07SJohn Dyson 	auio.uio_iov = &aiov;
8182244ea07SJohn Dyson 	auio.uio_iovcnt = 1;
8199b16adc1SAlan Cox 	auio.uio_offset = cb->aio_offset;
8202244ea07SJohn Dyson 	auio.uio_resid = cb->aio_nbytes;
8212244ea07SJohn Dyson 	cnt = cb->aio_nbytes;
8222244ea07SJohn Dyson 	auio.uio_segflg = UIO_USERSPACE;
823b40ce416SJulian Elischer 	auio.uio_td = td;
8242244ea07SJohn Dyson 
8251c4bcd05SJeff Roberson 	inblock_st = td->td_ru.ru_inblock;
8261c4bcd05SJeff Roberson 	oublock_st = td->td_ru.ru_oublock;
827279d7226SMatthew Dillon 	/*
828a9bf5e37SDavid Xu 	 * aio_aqueue() acquires a reference to the file that is
8299b16adc1SAlan Cox 	 * released in aio_free_entry().
830279d7226SMatthew Dillon 	 */
8312244ea07SJohn Dyson 	if (cb->aio_lio_opcode == LIO_READ) {
8322244ea07SJohn Dyson 		auio.uio_rw = UIO_READ;
8335114048bSKonstantin Belousov 		if (auio.uio_resid == 0)
8345114048bSKonstantin Belousov 			error = 0;
8355114048bSKonstantin Belousov 		else
836b40ce416SJulian Elischer 			error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8372244ea07SJohn Dyson 	} else {
8386d53aa62SDavid Xu 		if (fp->f_type == DTYPE_VNODE)
8396d53aa62SDavid Xu 			bwillwrite();
8402244ea07SJohn Dyson 		auio.uio_rw = UIO_WRITE;
841b40ce416SJulian Elischer 		error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8422244ea07SJohn Dyson 	}
8431c4bcd05SJeff Roberson 	inblock_end = td->td_ru.ru_inblock;
8441c4bcd05SJeff Roberson 	oublock_end = td->td_ru.ru_oublock;
845fd3bf775SJohn Dyson 
8465652770dSJohn Baldwin 	job->inputcharge = inblock_end - inblock_st;
8475652770dSJohn Baldwin 	job->outputcharge = oublock_end - oublock_st;
8482244ea07SJohn Dyson 
849bfbbc4aaSJason Evans 	if ((error) && (auio.uio_resid != cnt)) {
8502244ea07SJohn Dyson 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
8512244ea07SJohn Dyson 			error = 0;
85219eb87d2SJohn Baldwin 		if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
8535652770dSJohn Baldwin 			PROC_LOCK(job->userproc);
8545652770dSJohn Baldwin 			kern_psignal(job->userproc, SIGPIPE);
8555652770dSJohn Baldwin 			PROC_UNLOCK(job->userproc);
85619eb87d2SJohn Baldwin 		}
8572244ea07SJohn Dyson 	}
8582244ea07SJohn Dyson 
8592244ea07SJohn Dyson 	cnt -= auio.uio_resid;
860f8f750c5SRobert Watson 	td->td_ucred = td_savedcred;
861*f3215338SJohn Baldwin 	aio_complete(job, cnt, error);
8622244ea07SJohn Dyson }
8632244ea07SJohn Dyson 
86469cd28daSDoug Ambrisko static void
8655652770dSJohn Baldwin aio_process_sync(struct kaiocb *job)
866f95c13dbSGleb Smirnoff {
867f95c13dbSGleb Smirnoff 	struct thread *td = curthread;
868f95c13dbSGleb Smirnoff 	struct ucred *td_savedcred = td->td_ucred;
8695652770dSJohn Baldwin 	struct file *fp = job->fd_file;
870f95c13dbSGleb Smirnoff 	int error = 0;
871f95c13dbSGleb Smirnoff 
8725652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC,
8735652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
874f95c13dbSGleb Smirnoff 
8755652770dSJohn Baldwin 	td->td_ucred = job->cred;
876f95c13dbSGleb Smirnoff 	if (fp->f_vnode != NULL)
877f95c13dbSGleb Smirnoff 		error = aio_fsync_vnode(td, fp->f_vnode);
878f95c13dbSGleb Smirnoff 	td->td_ucred = td_savedcred;
879*f3215338SJohn Baldwin 	aio_complete(job, 0, error);
880f95c13dbSGleb Smirnoff }
881f95c13dbSGleb Smirnoff 
882f95c13dbSGleb Smirnoff static void
8835652770dSJohn Baldwin aio_process_mlock(struct kaiocb *job)
8846160e12cSGleb Smirnoff {
8855652770dSJohn Baldwin 	struct aiocb *cb = &job->uaiocb;
8866160e12cSGleb Smirnoff 	int error;
8876160e12cSGleb Smirnoff 
8885652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
8895652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
8906160e12cSGleb Smirnoff 
891*f3215338SJohn Baldwin 	aio_switch_vmspace(job);
8925652770dSJohn Baldwin 	error = vm_mlock(job->userproc, job->cred,
8936160e12cSGleb Smirnoff 	    __DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes);
894*f3215338SJohn Baldwin 	aio_complete(job, 0, error);
8956160e12cSGleb Smirnoff }
8966160e12cSGleb Smirnoff 
8976160e12cSGleb Smirnoff static void
898*f3215338SJohn Baldwin aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
8991ce91824SDavid Xu {
9001ce91824SDavid Xu 	struct aioliojob *lj;
90169cd28daSDoug Ambrisko 	struct kaioinfo *ki;
9025652770dSJohn Baldwin 	struct kaiocb *sjob, *sjobn;
9031ce91824SDavid Xu 	int lj_done;
904*f3215338SJohn Baldwin 	bool schedule_fsync;
90569cd28daSDoug Ambrisko 
90669cd28daSDoug Ambrisko 	ki = userp->p_aioinfo;
907759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
9085652770dSJohn Baldwin 	lj = job->lio;
90969cd28daSDoug Ambrisko 	lj_done = 0;
91069cd28daSDoug Ambrisko 	if (lj) {
9111ce91824SDavid Xu 		lj->lioj_finished_count++;
9121ce91824SDavid Xu 		if (lj->lioj_count == lj->lioj_finished_count)
91369cd28daSDoug Ambrisko 			lj_done = 1;
91469cd28daSDoug Ambrisko 	}
9155652770dSJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
916*f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
91727b8220dSDavid Xu 
91827b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_RUNDOWN)
91927b8220dSDavid Xu 		goto notification_done;
92027b8220dSDavid Xu 
9215652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
9225652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
9235652770dSJohn Baldwin 		aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi);
9241ce91824SDavid Xu 
9255652770dSJohn Baldwin 	KNOTE_LOCKED(&job->klist, 1);
9261ce91824SDavid Xu 
92769cd28daSDoug Ambrisko 	if (lj_done) {
9281ce91824SDavid Xu 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
92969cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_KEVENT_POSTED;
9301ce91824SDavid Xu 			KNOTE_LOCKED(&lj->klist, 1);
93169cd28daSDoug Ambrisko 		}
9321ce91824SDavid Xu 		if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
93369cd28daSDoug Ambrisko 		    == LIOJ_SIGNAL
9344c0fb2cfSDavid Xu 		    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
9354c0fb2cfSDavid Xu 		        lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
9364c0fb2cfSDavid Xu 			aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
93769cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
93869cd28daSDoug Ambrisko 		}
93969cd28daSDoug Ambrisko 	}
94027b8220dSDavid Xu 
94127b8220dSDavid Xu notification_done:
9425652770dSJohn Baldwin 	if (job->jobflags & KAIOCB_CHECKSYNC) {
943*f3215338SJohn Baldwin 		schedule_fsync = false;
9445652770dSJohn Baldwin 		TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
9455652770dSJohn Baldwin 			if (job->fd_file == sjob->fd_file &&
9465652770dSJohn Baldwin 			    job->seqno < sjob->seqno) {
9475652770dSJohn Baldwin 				if (--sjob->pending == 0) {
9485652770dSJohn Baldwin 					TAILQ_REMOVE(&ki->kaio_syncqueue, sjob,
9490dd6c035SJohn Baldwin 					    list);
950*f3215338SJohn Baldwin 					if (!aio_clear_cancel_function(sjob))
951*f3215338SJohn Baldwin 						continue;
952*f3215338SJohn Baldwin 					TAILQ_INSERT_TAIL(&ki->kaio_syncready,
953*f3215338SJohn Baldwin 					    sjob, list);
954*f3215338SJohn Baldwin 					schedule_fsync = true;
95599eee864SDavid Xu 				}
95699eee864SDavid Xu 			}
95799eee864SDavid Xu 		}
958*f3215338SJohn Baldwin 		if (schedule_fsync)
959*f3215338SJohn Baldwin 			taskqueue_enqueue(taskqueue_aiod_kick,
960*f3215338SJohn Baldwin 			    &ki->kaio_sync_task);
96199eee864SDavid Xu 	}
96227b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_WAKEUP) {
96369cd28daSDoug Ambrisko 		ki->kaio_flags &= ~KAIO_WAKEUP;
9641ce91824SDavid Xu 		wakeup(&userp->p_aioinfo);
96569cd28daSDoug Ambrisko 	}
96669cd28daSDoug Ambrisko }
96769cd28daSDoug Ambrisko 
9688a4dc40fSJohn Baldwin static void
969*f3215338SJohn Baldwin aio_schedule_fsync(void *context, int pending)
970*f3215338SJohn Baldwin {
971*f3215338SJohn Baldwin 	struct kaioinfo *ki;
972*f3215338SJohn Baldwin 	struct kaiocb *job;
973*f3215338SJohn Baldwin 
974*f3215338SJohn Baldwin 	ki = context;
975*f3215338SJohn Baldwin 	AIO_LOCK(ki);
976*f3215338SJohn Baldwin 	while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
977*f3215338SJohn Baldwin 		job = TAILQ_FIRST(&ki->kaio_syncready);
978*f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncready, job, list);
979*f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
980*f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
981*f3215338SJohn Baldwin 		AIO_LOCK(ki);
982*f3215338SJohn Baldwin 	}
983*f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
984*f3215338SJohn Baldwin }
985*f3215338SJohn Baldwin 
986*f3215338SJohn Baldwin bool
987*f3215338SJohn Baldwin aio_cancel_cleared(struct kaiocb *job)
988*f3215338SJohn Baldwin {
989*f3215338SJohn Baldwin 	struct kaioinfo *ki;
990*f3215338SJohn Baldwin 
991*f3215338SJohn Baldwin 	/*
992*f3215338SJohn Baldwin 	 * The caller should hold the same queue lock held when
993*f3215338SJohn Baldwin 	 * aio_clear_cancel_function() was called and set this flag
994*f3215338SJohn Baldwin 	 * ensuring this check sees an up-to-date value.  However,
995*f3215338SJohn Baldwin 	 * there is no way to assert that.
996*f3215338SJohn Baldwin 	 */
997*f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
998*f3215338SJohn Baldwin 	return ((job->jobflags & KAIOCB_CLEARED) != 0);
999*f3215338SJohn Baldwin }
1000*f3215338SJohn Baldwin 
1001*f3215338SJohn Baldwin bool
1002*f3215338SJohn Baldwin aio_clear_cancel_function(struct kaiocb *job)
1003*f3215338SJohn Baldwin {
1004*f3215338SJohn Baldwin 	struct kaioinfo *ki;
1005*f3215338SJohn Baldwin 
1006*f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1007*f3215338SJohn Baldwin 	AIO_LOCK(ki);
1008*f3215338SJohn Baldwin 	MPASS(job->cancel_fn != NULL);
1009*f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLING) {
1010*f3215338SJohn Baldwin 		job->jobflags |= KAIOCB_CLEARED;
1011*f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
1012*f3215338SJohn Baldwin 		return (false);
1013*f3215338SJohn Baldwin 	}
1014*f3215338SJohn Baldwin 	job->cancel_fn = NULL;
1015*f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1016*f3215338SJohn Baldwin 	return (true);
1017*f3215338SJohn Baldwin }
1018*f3215338SJohn Baldwin 
1019*f3215338SJohn Baldwin bool
1020*f3215338SJohn Baldwin aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
1021*f3215338SJohn Baldwin {
1022*f3215338SJohn Baldwin 	struct kaioinfo *ki;
1023*f3215338SJohn Baldwin 
1024*f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1025*f3215338SJohn Baldwin 	AIO_LOCK(ki);
1026*f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLED) {
1027*f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
1028*f3215338SJohn Baldwin 		return (false);
1029*f3215338SJohn Baldwin 	}
1030*f3215338SJohn Baldwin 	job->cancel_fn = func;
1031*f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1032*f3215338SJohn Baldwin 	return (true);
1033*f3215338SJohn Baldwin }
1034*f3215338SJohn Baldwin 
1035*f3215338SJohn Baldwin void
1036*f3215338SJohn Baldwin aio_complete(struct kaiocb *job, long status, int error)
1037*f3215338SJohn Baldwin {
1038*f3215338SJohn Baldwin 	struct kaioinfo *ki;
1039*f3215338SJohn Baldwin 	struct proc *userp;
1040*f3215338SJohn Baldwin 
1041*f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.error = error;
1042*f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.status = status;
1043*f3215338SJohn Baldwin 
1044*f3215338SJohn Baldwin 	userp = job->userproc;
1045*f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
1046*f3215338SJohn Baldwin 
1047*f3215338SJohn Baldwin 	AIO_LOCK(ki);
1048*f3215338SJohn Baldwin 	KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1049*f3215338SJohn Baldwin 	    ("duplicate aio_complete"));
1050*f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_FINISHED;
1051*f3215338SJohn Baldwin 	if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1052*f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1053*f3215338SJohn Baldwin 		aio_bio_done_notify(userp, job);
1054*f3215338SJohn Baldwin 	}
1055*f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1056*f3215338SJohn Baldwin }
1057*f3215338SJohn Baldwin 
1058*f3215338SJohn Baldwin void
1059*f3215338SJohn Baldwin aio_cancel(struct kaiocb *job)
1060*f3215338SJohn Baldwin {
1061*f3215338SJohn Baldwin 
1062*f3215338SJohn Baldwin 	aio_complete(job, -1, ECANCELED);
1063*f3215338SJohn Baldwin }
1064*f3215338SJohn Baldwin 
1065*f3215338SJohn Baldwin void
10665652770dSJohn Baldwin aio_switch_vmspace(struct kaiocb *job)
10678a4dc40fSJohn Baldwin {
10688a4dc40fSJohn Baldwin 
10695652770dSJohn Baldwin 	vmspace_switch_aio(job->userproc->p_vmspace);
10708a4dc40fSJohn Baldwin }
10718a4dc40fSJohn Baldwin 
10722244ea07SJohn Dyson /*
1073f95c13dbSGleb Smirnoff  * The AIO daemon, most of the actual work is done in aio_process_*,
107484af4da6SJohn Dyson  * but the setup (and address space mgmt) is done in this routine.
10752244ea07SJohn Dyson  */
10762244ea07SJohn Dyson static void
10771ce91824SDavid Xu aio_daemon(void *_id)
10782244ea07SJohn Dyson {
10795652770dSJohn Baldwin 	struct kaiocb *job;
108039314b7dSJohn Baldwin 	struct aioproc *aiop;
1081bfbbc4aaSJason Evans 	struct kaioinfo *ki;
1082*f3215338SJohn Baldwin 	struct proc *p;
10838a4dc40fSJohn Baldwin 	struct vmspace *myvm;
1084b40ce416SJulian Elischer 	struct thread *td = curthread;
10851ce91824SDavid Xu 	int id = (intptr_t)_id;
10862244ea07SJohn Dyson 
10872244ea07SJohn Dyson 	/*
10888a4dc40fSJohn Baldwin 	 * Grab an extra reference on the daemon's vmspace so that it
10898a4dc40fSJohn Baldwin 	 * doesn't get freed by jobs that switch to a different
10908a4dc40fSJohn Baldwin 	 * vmspace.
10912244ea07SJohn Dyson 	 */
10928a4dc40fSJohn Baldwin 	p = td->td_proc;
10938a4dc40fSJohn Baldwin 	myvm = vmspace_acquire_ref(p);
1094fd3bf775SJohn Dyson 
10958a4dc40fSJohn Baldwin 	KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1096fd3bf775SJohn Dyson 
1097fd3bf775SJohn Dyson 	/*
1098bfbbc4aaSJason Evans 	 * Allocate and ready the aio control info.  There is one aiop structure
1099bfbbc4aaSJason Evans 	 * per daemon.
1100fd3bf775SJohn Dyson 	 */
1101a163d034SWarner Losh 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
110239314b7dSJohn Baldwin 	aiop->aioproc = p;
110339314b7dSJohn Baldwin 	aiop->aioprocflags = 0;
1104bfbbc4aaSJason Evans 
1105fd3bf775SJohn Dyson 	/*
1106fd3bf775SJohn Dyson 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
1107b40ce416SJulian Elischer 	 * and creating too many daemons.)
1108fd3bf775SJohn Dyson 	 */
11091ce91824SDavid Xu 	sema_post(&aio_newproc_sem);
11102244ea07SJohn Dyson 
11111ce91824SDavid Xu 	mtx_lock(&aio_job_mtx);
1112bfbbc4aaSJason Evans 	for (;;) {
1113fd3bf775SJohn Dyson 		/*
1114fd3bf775SJohn Dyson 		 * Take daemon off of free queue
1115fd3bf775SJohn Dyson 		 */
111639314b7dSJohn Baldwin 		if (aiop->aioprocflags & AIOP_FREE) {
11172244ea07SJohn Dyson 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
111839314b7dSJohn Baldwin 			aiop->aioprocflags &= ~AIOP_FREE;
11192244ea07SJohn Dyson 		}
11202244ea07SJohn Dyson 
1121fd3bf775SJohn Dyson 		/*
1122bfbbc4aaSJason Evans 		 * Check for jobs.
1123fd3bf775SJohn Dyson 		 */
11245652770dSJohn Baldwin 		while ((job = aio_selectjob(aiop)) != NULL) {
11251ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
11262244ea07SJohn Dyson 
1127*f3215338SJohn Baldwin 			ki = job->userproc->p_aioinfo;
1128*f3215338SJohn Baldwin 			job->handle_fn(job);
112984af4da6SJohn Dyson 
11309b84335cSDavid Xu 			mtx_lock(&aio_job_mtx);
11319b84335cSDavid Xu 			/* Decrement the active job count. */
11329b84335cSDavid Xu 			ki->kaio_active_count--;
11332244ea07SJohn Dyson 		}
11342244ea07SJohn Dyson 
1135fd3bf775SJohn Dyson 		/*
1136bfbbc4aaSJason Evans 		 * Disconnect from user address space.
1137fd3bf775SJohn Dyson 		 */
11388a4dc40fSJohn Baldwin 		if (p->p_vmspace != myvm) {
11391ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
11408a4dc40fSJohn Baldwin 			vmspace_switch_aio(myvm);
11411ce91824SDavid Xu 			mtx_lock(&aio_job_mtx);
11421ce91824SDavid Xu 			/*
11431ce91824SDavid Xu 			 * We have to restart to avoid race, we only sleep if
11448a4dc40fSJohn Baldwin 			 * no job can be selected.
11451ce91824SDavid Xu 			 */
11461ce91824SDavid Xu 			continue;
1147fd3bf775SJohn Dyson 		}
1148fd3bf775SJohn Dyson 
11491ce91824SDavid Xu 		mtx_assert(&aio_job_mtx, MA_OWNED);
11501ce91824SDavid Xu 
1151fd3bf775SJohn Dyson 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
115239314b7dSJohn Baldwin 		aiop->aioprocflags |= AIOP_FREE;
1153fd3bf775SJohn Dyson 
1154fd3bf775SJohn Dyson 		/*
1155bfbbc4aaSJason Evans 		 * If daemon is inactive for a long time, allow it to exit,
1156bfbbc4aaSJason Evans 		 * thereby freeing resources.
1157fd3bf775SJohn Dyson 		 */
115839314b7dSJohn Baldwin 		if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
11598a4dc40fSJohn Baldwin 		    aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
116039314b7dSJohn Baldwin 		    (aiop->aioprocflags & AIOP_FREE) &&
11618a4dc40fSJohn Baldwin 		    num_aio_procs > target_aio_procs)
11628a4dc40fSJohn Baldwin 			break;
11638a4dc40fSJohn Baldwin 	}
1164fd3bf775SJohn Dyson 	TAILQ_REMOVE(&aio_freeproc, aiop, list);
116584af4da6SJohn Dyson 	num_aio_procs--;
11661ce91824SDavid Xu 	mtx_unlock(&aio_job_mtx);
11671ce91824SDavid Xu 	uma_zfree(aiop_zone, aiop);
11681ce91824SDavid Xu 	free_unr(aiod_unr, id);
11698a4dc40fSJohn Baldwin 	vmspace_free(myvm);
11708a4dc40fSJohn Baldwin 
11718a4dc40fSJohn Baldwin 	KASSERT(p->p_vmspace == myvm,
11728a4dc40fSJohn Baldwin 	    ("AIOD: bad vmspace for exiting daemon"));
11738a4dc40fSJohn Baldwin 	KASSERT(myvm->vm_refcnt > 1,
11748a4dc40fSJohn Baldwin 	    ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt));
11753745c395SJulian Elischer 	kproc_exit(0);
1176fd3bf775SJohn Dyson }
11772244ea07SJohn Dyson 
11782244ea07SJohn Dyson /*
1179bfbbc4aaSJason Evans  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1180bfbbc4aaSJason Evans  * AIO daemon modifies its environment itself.
11812244ea07SJohn Dyson  */
11822244ea07SJohn Dyson static int
11831ce91824SDavid Xu aio_newproc(int *start)
1184fd3bf775SJohn Dyson {
11852244ea07SJohn Dyson 	int error;
1186c9a970a7SAlan Cox 	struct proc *p;
11871ce91824SDavid Xu 	int id;
11882244ea07SJohn Dyson 
11891ce91824SDavid Xu 	id = alloc_unr(aiod_unr);
11903745c395SJulian Elischer 	error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
11911ce91824SDavid Xu 		RFNOWAIT, 0, "aiod%d", id);
11921ce91824SDavid Xu 	if (error == 0) {
1193fd3bf775SJohn Dyson 		/*
11941ce91824SDavid Xu 		 * Wait until daemon is started.
1195fd3bf775SJohn Dyson 		 */
11961ce91824SDavid Xu 		sema_wait(&aio_newproc_sem);
11971ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
119884af4da6SJohn Dyson 		num_aio_procs++;
11991ce91824SDavid Xu 		if (start != NULL)
12007f34b521SDavid Xu 			(*start)--;
12011ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
12021ce91824SDavid Xu 	} else {
12031ce91824SDavid Xu 		free_unr(aiod_unr, id);
12041ce91824SDavid Xu 	}
1205ac41f2efSAlfred Perlstein 	return (error);
12062244ea07SJohn Dyson }
12072244ea07SJohn Dyson 
12082244ea07SJohn Dyson /*
120988ed460eSAlan Cox  * Try the high-performance, low-overhead physio method for eligible
121088ed460eSAlan Cox  * VCHR devices.  This method doesn't use an aio helper thread, and
121188ed460eSAlan Cox  * thus has very low overhead.
121288ed460eSAlan Cox  *
1213a9bf5e37SDavid Xu  * Assumes that the caller, aio_aqueue(), has incremented the file
121488ed460eSAlan Cox  * structure's reference count, preventing its deallocation for the
121588ed460eSAlan Cox  * duration of this call.
1216fd3bf775SJohn Dyson  */
121788ed460eSAlan Cox static int
12185652770dSJohn Baldwin aio_qphysio(struct proc *p, struct kaiocb *job)
1219fd3bf775SJohn Dyson {
1220fd3bf775SJohn Dyson 	struct aiocb *cb;
1221fd3bf775SJohn Dyson 	struct file *fp;
1222f743d981SAlexander Motin 	struct bio *bp;
1223f743d981SAlexander Motin 	struct buf *pbuf;
1224fd3bf775SJohn Dyson 	struct vnode *vp;
1225f3215a60SKonstantin Belousov 	struct cdevsw *csw;
1226f3215a60SKonstantin Belousov 	struct cdev *dev;
1227fd3bf775SJohn Dyson 	struct kaioinfo *ki;
1228f743d981SAlexander Motin 	int error, ref, unmap, poff;
1229f743d981SAlexander Motin 	vm_prot_t prot;
1230fd3bf775SJohn Dyson 
12315652770dSJohn Baldwin 	cb = &job->uaiocb;
12325652770dSJohn Baldwin 	fp = job->fd_file;
1233fd3bf775SJohn Dyson 
12346160e12cSGleb Smirnoff 	if (fp == NULL || fp->f_type != DTYPE_VNODE)
1235008626c3SPoul-Henning Kamp 		return (-1);
1236fd3bf775SJohn Dyson 
12373b6d9652SPoul-Henning Kamp 	vp = fp->f_vnode;
1238f743d981SAlexander Motin 	if (vp->v_type != VCHR)
1239f582ac06SBrian Feldman 		return (-1);
1240ad8de0f2SDavid Xu 	if (vp->v_bufobj.bo_bsize == 0)
1241ad8de0f2SDavid Xu 		return (-1);
12425d9d81e7SPoul-Henning Kamp 	if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1243008626c3SPoul-Henning Kamp 		return (-1);
1244fd3bf775SJohn Dyson 
1245f3215a60SKonstantin Belousov 	ref = 0;
1246f3215a60SKonstantin Belousov 	csw = devvn_refthread(vp, &dev, &ref);
1247f3215a60SKonstantin Belousov 	if (csw == NULL)
1248f3215a60SKonstantin Belousov 		return (ENXIO);
1249f743d981SAlexander Motin 
1250f743d981SAlexander Motin 	if ((csw->d_flags & D_DISK) == 0) {
1251f743d981SAlexander Motin 		error = -1;
1252f743d981SAlexander Motin 		goto unref;
1253f743d981SAlexander Motin 	}
1254f3215a60SKonstantin Belousov 	if (cb->aio_nbytes > dev->si_iosize_max) {
1255f3215a60SKonstantin Belousov 		error = -1;
1256f3215a60SKonstantin Belousov 		goto unref;
1257f3215a60SKonstantin Belousov 	}
1258f3215a60SKonstantin Belousov 
1259f743d981SAlexander Motin 	ki = p->p_aioinfo;
1260f743d981SAlexander Motin 	poff = (vm_offset_t)cb->aio_buf & PAGE_MASK;
1261f743d981SAlexander Motin 	unmap = ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed);
1262f743d981SAlexander Motin 	if (unmap) {
1263f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS) {
1264f743d981SAlexander Motin 			error = -1;
1265f743d981SAlexander Motin 			goto unref;
1266f743d981SAlexander Motin 		}
1267f743d981SAlexander Motin 	} else {
1268f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS - poff) {
1269f743d981SAlexander Motin 			error = -1;
1270f743d981SAlexander Motin 			goto unref;
1271f743d981SAlexander Motin 		}
1272f743d981SAlexander Motin 		if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) {
1273f743d981SAlexander Motin 			error = -1;
1274f743d981SAlexander Motin 			goto unref;
1275f743d981SAlexander Motin 		}
1276f743d981SAlexander Motin 	}
12775652770dSJohn Baldwin 	job->bp = bp = g_alloc_bio();
1278f743d981SAlexander Motin 	if (!unmap) {
12795652770dSJohn Baldwin 		job->pbuf = pbuf = (struct buf *)getpbuf(NULL);
1280f743d981SAlexander Motin 		BUF_KERNPROC(pbuf);
1281f743d981SAlexander Motin 	}
1282fd3bf775SJohn Dyson 
1283759ccccaSDavid Xu 	AIO_LOCK(ki);
1284f743d981SAlexander Motin 	if (!unmap)
12851ce91824SDavid Xu 		ki->kaio_buffer_count++;
1286759ccccaSDavid Xu 	AIO_UNLOCK(ki);
12871ce91824SDavid Xu 
1288f743d981SAlexander Motin 	bp->bio_length = cb->aio_nbytes;
1289f743d981SAlexander Motin 	bp->bio_bcount = cb->aio_nbytes;
1290f743d981SAlexander Motin 	bp->bio_done = aio_physwakeup;
1291f743d981SAlexander Motin 	bp->bio_data = (void *)(uintptr_t)cb->aio_buf;
1292f743d981SAlexander Motin 	bp->bio_offset = cb->aio_offset;
1293f743d981SAlexander Motin 	bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1294f743d981SAlexander Motin 	bp->bio_dev = dev;
12955652770dSJohn Baldwin 	bp->bio_caller1 = (void *)job;
1296f743d981SAlexander Motin 
1297f743d981SAlexander Motin 	prot = VM_PROT_READ;
1298f743d981SAlexander Motin 	if (cb->aio_lio_opcode == LIO_READ)
1299f743d981SAlexander Motin 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
13005652770dSJohn Baldwin 	if ((job->npages = vm_fault_quick_hold_pages(
1301f743d981SAlexander Motin 	    &curproc->p_vmspace->vm_map,
13025652770dSJohn Baldwin 	    (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages,
13035652770dSJohn Baldwin 	    sizeof(job->pages)/sizeof(job->pages[0]))) < 0) {
1304f743d981SAlexander Motin 		error = EFAULT;
1305f743d981SAlexander Motin 		goto doerror;
1306f743d981SAlexander Motin 	}
1307f743d981SAlexander Motin 	if (!unmap) {
1308f743d981SAlexander Motin 		pmap_qenter((vm_offset_t)pbuf->b_data,
13095652770dSJohn Baldwin 		    job->pages, job->npages);
1310f743d981SAlexander Motin 		bp->bio_data = pbuf->b_data + poff;
1311f743d981SAlexander Motin 	} else {
13125652770dSJohn Baldwin 		bp->bio_ma = job->pages;
13135652770dSJohn Baldwin 		bp->bio_ma_n = job->npages;
1314f743d981SAlexander Motin 		bp->bio_ma_offset = poff;
1315f743d981SAlexander Motin 		bp->bio_data = unmapped_buf;
1316f743d981SAlexander Motin 		bp->bio_flags |= BIO_UNMAPPED;
1317f743d981SAlexander Motin 	}
1318f743d981SAlexander Motin 
1319f743d981SAlexander Motin 	if (!unmap)
13201ce91824SDavid Xu 		atomic_add_int(&num_buf_aio, 1);
13211ce91824SDavid Xu 
1322bfbbc4aaSJason Evans 	/* Perform transfer. */
1323f743d981SAlexander Motin 	csw->d_strategy(bp);
1324f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1325ac41f2efSAlfred Perlstein 	return (0);
1326fd3bf775SJohn Dyson 
1327fd3bf775SJohn Dyson doerror:
1328759ccccaSDavid Xu 	AIO_LOCK(ki);
1329f743d981SAlexander Motin 	if (!unmap)
1330fd3bf775SJohn Dyson 		ki->kaio_buffer_count--;
1331759ccccaSDavid Xu 	AIO_UNLOCK(ki);
1332f743d981SAlexander Motin 	if (pbuf) {
1333f743d981SAlexander Motin 		relpbuf(pbuf, NULL);
13345652770dSJohn Baldwin 		job->pbuf = NULL;
1335f743d981SAlexander Motin 	}
1336f743d981SAlexander Motin 	g_destroy_bio(bp);
13375652770dSJohn Baldwin 	job->bp = NULL;
1338f3215a60SKonstantin Belousov unref:
1339f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1340fd3bf775SJohn Dyson 	return (error);
1341fd3bf775SJohn Dyson }
1342fd3bf775SJohn Dyson 
13433858a1f4SJohn Baldwin static int
13443858a1f4SJohn Baldwin convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
13453858a1f4SJohn Baldwin {
13463858a1f4SJohn Baldwin 
13473858a1f4SJohn Baldwin 	/*
13483858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
13493858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
13503858a1f4SJohn Baldwin 	 */
13513858a1f4SJohn Baldwin 	nsig->sigev_notify = osig->sigev_notify;
13523858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
13533858a1f4SJohn Baldwin 	case SIGEV_NONE:
13543858a1f4SJohn Baldwin 		break;
13553858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
13563858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
13573858a1f4SJohn Baldwin 		break;
13583858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
13593858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
13603858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
13613858a1f4SJohn Baldwin 		nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
13623858a1f4SJohn Baldwin 		break;
13633858a1f4SJohn Baldwin 	default:
13643858a1f4SJohn Baldwin 		return (EINVAL);
13653858a1f4SJohn Baldwin 	}
13663858a1f4SJohn Baldwin 	return (0);
13673858a1f4SJohn Baldwin }
13683858a1f4SJohn Baldwin 
13693858a1f4SJohn Baldwin static int
13703858a1f4SJohn Baldwin aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
13713858a1f4SJohn Baldwin {
13723858a1f4SJohn Baldwin 	struct oaiocb *ojob;
13733858a1f4SJohn Baldwin 	int error;
13743858a1f4SJohn Baldwin 
13753858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
13763858a1f4SJohn Baldwin 	error = copyin(ujob, kjob, sizeof(struct oaiocb));
13773858a1f4SJohn Baldwin 	if (error)
13783858a1f4SJohn Baldwin 		return (error);
13793858a1f4SJohn Baldwin 	ojob = (struct oaiocb *)kjob;
13803858a1f4SJohn Baldwin 	return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent));
13813858a1f4SJohn Baldwin }
13823858a1f4SJohn Baldwin 
13833858a1f4SJohn Baldwin static int
13843858a1f4SJohn Baldwin aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
13853858a1f4SJohn Baldwin {
13863858a1f4SJohn Baldwin 
13873858a1f4SJohn Baldwin 	return (copyin(ujob, kjob, sizeof(struct aiocb)));
13883858a1f4SJohn Baldwin }
13893858a1f4SJohn Baldwin 
13903858a1f4SJohn Baldwin static long
13913858a1f4SJohn Baldwin aiocb_fetch_status(struct aiocb *ujob)
13923858a1f4SJohn Baldwin {
13933858a1f4SJohn Baldwin 
13943858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.status));
13953858a1f4SJohn Baldwin }
13963858a1f4SJohn Baldwin 
13973858a1f4SJohn Baldwin static long
13983858a1f4SJohn Baldwin aiocb_fetch_error(struct aiocb *ujob)
13993858a1f4SJohn Baldwin {
14003858a1f4SJohn Baldwin 
14013858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.error));
14023858a1f4SJohn Baldwin }
14033858a1f4SJohn Baldwin 
14043858a1f4SJohn Baldwin static int
14053858a1f4SJohn Baldwin aiocb_store_status(struct aiocb *ujob, long status)
14063858a1f4SJohn Baldwin {
14073858a1f4SJohn Baldwin 
14083858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.status, status));
14093858a1f4SJohn Baldwin }
14103858a1f4SJohn Baldwin 
14113858a1f4SJohn Baldwin static int
14123858a1f4SJohn Baldwin aiocb_store_error(struct aiocb *ujob, long error)
14133858a1f4SJohn Baldwin {
14143858a1f4SJohn Baldwin 
14153858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.error, error));
14163858a1f4SJohn Baldwin }
14173858a1f4SJohn Baldwin 
14183858a1f4SJohn Baldwin static int
14193858a1f4SJohn Baldwin aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
14203858a1f4SJohn Baldwin {
14213858a1f4SJohn Baldwin 
14223858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
14233858a1f4SJohn Baldwin }
14243858a1f4SJohn Baldwin 
14253858a1f4SJohn Baldwin static int
14263858a1f4SJohn Baldwin aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
14273858a1f4SJohn Baldwin {
14283858a1f4SJohn Baldwin 
14293858a1f4SJohn Baldwin 	return (suword(ujobp, (long)ujob));
14303858a1f4SJohn Baldwin }
14313858a1f4SJohn Baldwin 
14323858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops = {
14333858a1f4SJohn Baldwin 	.copyin = aiocb_copyin,
14343858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
14353858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
14363858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
14373858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
14383858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
14393858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
14403858a1f4SJohn Baldwin };
14413858a1f4SJohn Baldwin 
14423858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops_osigevent = {
14433858a1f4SJohn Baldwin 	.copyin = aiocb_copyin_old_sigevent,
14443858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
14453858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
14463858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
14473858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
14483858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
14493858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
14503858a1f4SJohn Baldwin };
14513858a1f4SJohn Baldwin 
1452bfbbc4aaSJason Evans /*
1453bfbbc4aaSJason Evans  * Queue a new AIO request.  Choosing either the threaded or direct physio VCHR
1454bfbbc4aaSJason Evans  * technique is done in this code.
14552244ea07SJohn Dyson  */
14566a1162d4SAlexander Leidinger int
14575652770dSJohn Baldwin aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
14583858a1f4SJohn Baldwin 	int type, struct aiocb_ops *ops)
1459fd3bf775SJohn Dyson {
1460b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
14617008be5bSPawel Jakub Dawidek 	cap_rights_t rights;
14622244ea07SJohn Dyson 	struct file *fp;
1463*f3215338SJohn Baldwin 	struct kaiocb *job;
14642244ea07SJohn Dyson 	struct kaioinfo *ki;
1465c6fa9f78SAlan Cox 	struct kevent kev;
14661ce91824SDavid Xu 	int opcode;
14671ce91824SDavid Xu 	int error;
14684db71d27SJohn-Mark Gurney 	int fd, kqfd;
14691ce91824SDavid Xu 	int jid;
1470fde80935SDavid Xu 	u_short evflags;
14712244ea07SJohn Dyson 
1472a9bf5e37SDavid Xu 	if (p->p_aioinfo == NULL)
1473a9bf5e37SDavid Xu 		aio_init_aioinfo(p);
1474a9bf5e37SDavid Xu 
14751ce91824SDavid Xu 	ki = p->p_aioinfo;
14761ce91824SDavid Xu 
14775652770dSJohn Baldwin 	ops->store_status(ujob, -1);
14785652770dSJohn Baldwin 	ops->store_error(ujob, 0);
14795652770dSJohn Baldwin 	ops->store_kernelinfo(ujob, -1);
1480a9bf5e37SDavid Xu 
1481a9bf5e37SDavid Xu 	if (num_queue_count >= max_queue_count ||
1482a9bf5e37SDavid Xu 	    ki->kaio_count >= ki->kaio_qallowed_count) {
14835652770dSJohn Baldwin 		ops->store_error(ujob, EAGAIN);
1484a9bf5e37SDavid Xu 		return (EAGAIN);
1485a9bf5e37SDavid Xu 	}
1486a9bf5e37SDavid Xu 
14875652770dSJohn Baldwin 	job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
14885652770dSJohn Baldwin 	knlist_init_mtx(&job->klist, AIO_MTX(ki));
1489fd3bf775SJohn Dyson 
14905652770dSJohn Baldwin 	error = ops->copyin(ujob, &job->uaiocb);
14912244ea07SJohn Dyson 	if (error) {
14925652770dSJohn Baldwin 		ops->store_error(ujob, error);
14935652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1494ac41f2efSAlfred Perlstein 		return (error);
14952244ea07SJohn Dyson 	}
149668d71118SDavid Xu 
1497434ea137SGleb Smirnoff 	/* XXX: aio_nbytes is later casted to signed types. */
14985652770dSJohn Baldwin 	if (job->uaiocb.aio_nbytes > INT_MAX) {
14995652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1500434ea137SGleb Smirnoff 		return (EINVAL);
1501434ea137SGleb Smirnoff 	}
1502434ea137SGleb Smirnoff 
15035652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
15045652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
15055652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
15065652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
15075652770dSJohn Baldwin 		ops->store_error(ujob, EINVAL);
15085652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
150968d71118SDavid Xu 		return (EINVAL);
151068d71118SDavid Xu 	}
151168d71118SDavid Xu 
15125652770dSJohn Baldwin 	if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
15135652770dSJohn Baldwin 	     job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
15145652770dSJohn Baldwin 		!_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
15155652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1516ac41f2efSAlfred Perlstein 		return (EINVAL);
15172f3cf918SAlfred Perlstein 	}
15182244ea07SJohn Dyson 
15195652770dSJohn Baldwin 	ksiginfo_init(&job->ksi);
15204c0fb2cfSDavid Xu 
1521bfbbc4aaSJason Evans 	/* Save userspace address of the job info. */
15225652770dSJohn Baldwin 	job->ujob = ujob;
152311783b14SJohn Dyson 
1524bfbbc4aaSJason Evans 	/* Get the opcode. */
1525bfbbc4aaSJason Evans 	if (type != LIO_NOP)
15265652770dSJohn Baldwin 		job->uaiocb.aio_lio_opcode = type;
15275652770dSJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
15282244ea07SJohn Dyson 
1529a9d2f8d8SRobert Watson 	/*
1530a9d2f8d8SRobert Watson 	 * Validate the opcode and fetch the file object for the specified
1531a9d2f8d8SRobert Watson 	 * file descriptor.
1532a9d2f8d8SRobert Watson 	 *
1533a9d2f8d8SRobert Watson 	 * XXXRW: Moved the opcode validation up here so that we don't
1534a9d2f8d8SRobert Watson 	 * retrieve a file descriptor without knowing what the capabiltity
1535a9d2f8d8SRobert Watson 	 * should be.
1536a9d2f8d8SRobert Watson 	 */
15375652770dSJohn Baldwin 	fd = job->uaiocb.aio_fildes;
15382a522eb9SJohn Baldwin 	switch (opcode) {
15392a522eb9SJohn Baldwin 	case LIO_WRITE:
15407008be5bSPawel Jakub Dawidek 		error = fget_write(td, fd,
15417008be5bSPawel Jakub Dawidek 		    cap_rights_init(&rights, CAP_PWRITE), &fp);
15422a522eb9SJohn Baldwin 		break;
15432a522eb9SJohn Baldwin 	case LIO_READ:
15447008be5bSPawel Jakub Dawidek 		error = fget_read(td, fd,
15457008be5bSPawel Jakub Dawidek 		    cap_rights_init(&rights, CAP_PREAD), &fp);
1546a9d2f8d8SRobert Watson 		break;
1547a9d2f8d8SRobert Watson 	case LIO_SYNC:
15487008be5bSPawel Jakub Dawidek 		error = fget(td, fd, cap_rights_init(&rights, CAP_FSYNC), &fp);
1549a9d2f8d8SRobert Watson 		break;
15506160e12cSGleb Smirnoff 	case LIO_MLOCK:
15516160e12cSGleb Smirnoff 		fp = NULL;
15526160e12cSGleb Smirnoff 		break;
1553a9d2f8d8SRobert Watson 	case LIO_NOP:
15547008be5bSPawel Jakub Dawidek 		error = fget(td, fd, cap_rights_init(&rights), &fp);
15552a522eb9SJohn Baldwin 		break;
15562a522eb9SJohn Baldwin 	default:
1557a9d2f8d8SRobert Watson 		error = EINVAL;
15582a522eb9SJohn Baldwin 	}
15592a522eb9SJohn Baldwin 	if (error) {
15605652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
15615652770dSJohn Baldwin 		ops->store_error(ujob, error);
1562af56abaaSJohn Baldwin 		return (error);
15632244ea07SJohn Dyson 	}
156499eee864SDavid Xu 
156599eee864SDavid Xu 	if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
156699eee864SDavid Xu 		error = EINVAL;
156799eee864SDavid Xu 		goto aqueue_fail;
156899eee864SDavid Xu 	}
15692244ea07SJohn Dyson 
15705652770dSJohn Baldwin 	if (opcode != LIO_SYNC && job->uaiocb.aio_offset == -1LL) {
1571ae124fc4SAlan Cox 		error = EINVAL;
1572ae124fc4SAlan Cox 		goto aqueue_fail;
15732244ea07SJohn Dyson 	}
15741ce91824SDavid Xu 
15755652770dSJohn Baldwin 	job->fd_file = fp;
15761ce91824SDavid Xu 
157799eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
157899eee864SDavid Xu 	jid = jobrefid++;
15795652770dSJohn Baldwin 	job->seqno = jobseqno++;
158099eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
15815652770dSJohn Baldwin 	error = ops->store_kernelinfo(ujob, jid);
15821ce91824SDavid Xu 	if (error) {
15831ce91824SDavid Xu 		error = EINVAL;
15841ce91824SDavid Xu 		goto aqueue_fail;
15851ce91824SDavid Xu 	}
15865652770dSJohn Baldwin 	job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
15872244ea07SJohn Dyson 
15882244ea07SJohn Dyson 	if (opcode == LIO_NOP) {
1589a5c0b1c0SAlan Cox 		fdrop(fp, td);
15905652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1591ac41f2efSAlfred Perlstein 		return (0);
15922244ea07SJohn Dyson 	}
15932244ea07SJohn Dyson 
15945652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1595cb679c38SJonathan Lemon 		goto no_kqueue;
15965652770dSJohn Baldwin 	evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1597fde80935SDavid Xu 	if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1598fde80935SDavid Xu 		error = EINVAL;
1599fde80935SDavid Xu 		goto aqueue_fail;
1600fde80935SDavid Xu 	}
16015652770dSJohn Baldwin 	kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
16025652770dSJohn Baldwin 	kev.ident = (uintptr_t)job->ujob;
1603cb679c38SJonathan Lemon 	kev.filter = EVFILT_AIO;
1604fde80935SDavid Xu 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
16055652770dSJohn Baldwin 	kev.data = (intptr_t)job;
16065652770dSJohn Baldwin 	kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
16074db71d27SJohn-Mark Gurney 	error = kqfd_register(kqfd, &kev, td, 1);
1608*f3215338SJohn Baldwin 	if (error)
1609*f3215338SJohn Baldwin 		goto aqueue_fail;
1610*f3215338SJohn Baldwin 
1611cb679c38SJonathan Lemon no_kqueue:
1612cb679c38SJonathan Lemon 
16135652770dSJohn Baldwin 	ops->store_error(ujob, EINPROGRESS);
16145652770dSJohn Baldwin 	job->uaiocb._aiocb_private.error = EINPROGRESS;
16155652770dSJohn Baldwin 	job->userproc = p;
16165652770dSJohn Baldwin 	job->cred = crhold(td->td_ucred);
1617*f3215338SJohn Baldwin 	job->jobflags = KAIOCB_QUEUEING;
16185652770dSJohn Baldwin 	job->lio = lj;
16192244ea07SJohn Dyson 
1620*f3215338SJohn Baldwin 	if (opcode == LIO_MLOCK) {
1621*f3215338SJohn Baldwin 		aio_schedule(job, aio_process_mlock);
1622*f3215338SJohn Baldwin 		error = 0;
1623*f3215338SJohn Baldwin 	} else if (fp->f_ops->fo_aio_queue == NULL)
1624*f3215338SJohn Baldwin 		error = aio_queue_file(fp, job);
1625*f3215338SJohn Baldwin 	else
1626*f3215338SJohn Baldwin 		error = fo_aio_queue(fp, job);
1627*f3215338SJohn Baldwin 	if (error)
1628*f3215338SJohn Baldwin 		goto aqueue_fail;
1629*f3215338SJohn Baldwin 
1630*f3215338SJohn Baldwin 	AIO_LOCK(ki);
1631*f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_QUEUEING;
1632*f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1633*f3215338SJohn Baldwin 	ki->kaio_count++;
1634*f3215338SJohn Baldwin 	if (lj)
1635*f3215338SJohn Baldwin 		lj->lioj_count++;
1636*f3215338SJohn Baldwin 	atomic_add_int(&num_queue_count, 1);
1637*f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
1638*f3215338SJohn Baldwin 		/*
1639*f3215338SJohn Baldwin 		 * The queue callback completed the request synchronously.
1640*f3215338SJohn Baldwin 		 * The bulk of the completion is deferred in that case
1641*f3215338SJohn Baldwin 		 * until this point.
1642*f3215338SJohn Baldwin 		 */
1643*f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
1644*f3215338SJohn Baldwin 	} else
1645*f3215338SJohn Baldwin 		TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1646*f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1647*f3215338SJohn Baldwin 	return (0);
1648*f3215338SJohn Baldwin 
1649*f3215338SJohn Baldwin aqueue_fail:
1650*f3215338SJohn Baldwin 	knlist_delete(&job->klist, curthread, 0);
1651*f3215338SJohn Baldwin 	if (fp)
1652*f3215338SJohn Baldwin 		fdrop(fp, td);
1653*f3215338SJohn Baldwin 	uma_zfree(aiocb_zone, job);
1654*f3215338SJohn Baldwin 	ops->store_error(ujob, error);
1655*f3215338SJohn Baldwin 	return (error);
1656*f3215338SJohn Baldwin }
1657*f3215338SJohn Baldwin 
1658*f3215338SJohn Baldwin static void
1659*f3215338SJohn Baldwin aio_cancel_daemon_job(struct kaiocb *job)
1660*f3215338SJohn Baldwin {
1661*f3215338SJohn Baldwin 
1662*f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1663*f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1664*f3215338SJohn Baldwin 		TAILQ_REMOVE(&aio_jobs, job, list);
1665*f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1666*f3215338SJohn Baldwin 	aio_cancel(job);
1667*f3215338SJohn Baldwin }
1668*f3215338SJohn Baldwin 
1669*f3215338SJohn Baldwin void
1670*f3215338SJohn Baldwin aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1671*f3215338SJohn Baldwin {
1672*f3215338SJohn Baldwin 
1673*f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1674*f3215338SJohn Baldwin 	if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1675*f3215338SJohn Baldwin 		mtx_unlock(&aio_job_mtx);
1676*f3215338SJohn Baldwin 		aio_cancel(job);
1677*f3215338SJohn Baldwin 		return;
1678*f3215338SJohn Baldwin 	}
1679*f3215338SJohn Baldwin 	job->handle_fn = func;
1680*f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1681*f3215338SJohn Baldwin 	aio_kick_nowait(job->userproc);
1682*f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1683*f3215338SJohn Baldwin }
1684*f3215338SJohn Baldwin 
1685*f3215338SJohn Baldwin static void
1686*f3215338SJohn Baldwin aio_cancel_sync(struct kaiocb *job)
1687*f3215338SJohn Baldwin {
1688*f3215338SJohn Baldwin 	struct kaioinfo *ki;
1689*f3215338SJohn Baldwin 
1690*f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1691*f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1692*f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1693*f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1694*f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1695*f3215338SJohn Baldwin 	aio_cancel(job);
1696*f3215338SJohn Baldwin }
1697*f3215338SJohn Baldwin 
1698*f3215338SJohn Baldwin int
1699*f3215338SJohn Baldwin aio_queue_file(struct file *fp, struct kaiocb *job)
1700*f3215338SJohn Baldwin {
1701*f3215338SJohn Baldwin 	struct aioliojob *lj;
1702*f3215338SJohn Baldwin 	struct kaioinfo *ki;
1703*f3215338SJohn Baldwin 	struct kaiocb *job2;
1704*f3215338SJohn Baldwin 	int error, opcode;
1705*f3215338SJohn Baldwin 
1706*f3215338SJohn Baldwin 	lj = job->lio;
1707*f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1708*f3215338SJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
170999eee864SDavid Xu 	if (opcode == LIO_SYNC)
171099eee864SDavid Xu 		goto queueit;
171199eee864SDavid Xu 
1712*f3215338SJohn Baldwin 	if ((error = aio_qphysio(job->userproc, job)) == 0)
1713279d7226SMatthew Dillon 		goto done;
17141ce91824SDavid Xu #if 0
1715*f3215338SJohn Baldwin 	/*
1716*f3215338SJohn Baldwin 	 * XXX: This means qphysio() failed with EFAULT.  The current
1717*f3215338SJohn Baldwin 	 * behavior is to retry the operation via fo_read/fo_write.
1718*f3215338SJohn Baldwin 	 * Wouldn't it be better to just complete the request with an
1719*f3215338SJohn Baldwin 	 * error here?
1720*f3215338SJohn Baldwin 	 */
1721*f3215338SJohn Baldwin 	if (error > 0)
1722279d7226SMatthew Dillon 		goto done;
17231ce91824SDavid Xu #endif
172499eee864SDavid Xu queueit:
1725*f3215338SJohn Baldwin 	if (!enable_aio_unsafe)
1726*f3215338SJohn Baldwin 		return (EOPNOTSUPP);
172784af4da6SJohn Dyson 
172899eee864SDavid Xu 	if (opcode == LIO_SYNC) {
1729*f3215338SJohn Baldwin 		AIO_LOCK(ki);
17305652770dSJohn Baldwin 		TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
17315652770dSJohn Baldwin 			if (job2->fd_file == job->fd_file &&
17325652770dSJohn Baldwin 			    job2->uaiocb.aio_lio_opcode != LIO_SYNC &&
17335652770dSJohn Baldwin 			    job2->seqno < job->seqno) {
17345652770dSJohn Baldwin 				job2->jobflags |= KAIOCB_CHECKSYNC;
17355652770dSJohn Baldwin 				job->pending++;
1736dbbccfe9SDavid Xu 			}
1737dbbccfe9SDavid Xu 		}
17385652770dSJohn Baldwin 		if (job->pending != 0) {
1739*f3215338SJohn Baldwin 			if (!aio_set_cancel_function(job, aio_cancel_sync)) {
1740*f3215338SJohn Baldwin 				AIO_UNLOCK(ki);
1741*f3215338SJohn Baldwin 				aio_cancel(job);
1742*f3215338SJohn Baldwin 				return (0);
1743*f3215338SJohn Baldwin 			}
17445652770dSJohn Baldwin 			TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1745759ccccaSDavid Xu 			AIO_UNLOCK(ki);
1746*f3215338SJohn Baldwin 			return (0);
1747dbbccfe9SDavid Xu 		}
1748759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1749*f3215338SJohn Baldwin 	}
1750*f3215338SJohn Baldwin 
1751*f3215338SJohn Baldwin 	switch (opcode) {
1752*f3215338SJohn Baldwin 	case LIO_READ:
1753*f3215338SJohn Baldwin 	case LIO_WRITE:
1754*f3215338SJohn Baldwin 		aio_schedule(job, aio_process_rw);
17551ce91824SDavid Xu 		error = 0;
1756*f3215338SJohn Baldwin 		break;
1757*f3215338SJohn Baldwin 	case LIO_SYNC:
1758*f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
1759*f3215338SJohn Baldwin 		error = 0;
1760*f3215338SJohn Baldwin 		break;
1761*f3215338SJohn Baldwin 	default:
1762*f3215338SJohn Baldwin 		error = EINVAL;
1763*f3215338SJohn Baldwin 	}
176499eee864SDavid Xu done:
176599eee864SDavid Xu 	return (error);
176699eee864SDavid Xu }
176799eee864SDavid Xu 
176899eee864SDavid Xu static void
176999eee864SDavid Xu aio_kick_nowait(struct proc *userp)
177099eee864SDavid Xu {
177199eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
177239314b7dSJohn Baldwin 	struct aioproc *aiop;
177399eee864SDavid Xu 
177499eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
177599eee864SDavid Xu 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
177699eee864SDavid Xu 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
177739314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
177839314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
17790dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
17800dd6c035SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start <
17810dd6c035SJohn Baldwin 	    ki->kaio_maxactive_count) {
1782c85650caSJohn Baldwin 		taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
178399eee864SDavid Xu 	}
178499eee864SDavid Xu }
178599eee864SDavid Xu 
1786dbbccfe9SDavid Xu static int
178799eee864SDavid Xu aio_kick(struct proc *userp)
178899eee864SDavid Xu {
178999eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
179039314b7dSJohn Baldwin 	struct aioproc *aiop;
1791dbbccfe9SDavid Xu 	int error, ret = 0;
179299eee864SDavid Xu 
179399eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
179499eee864SDavid Xu retryproc:
1795d254af07SMatthew Dillon 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
17962244ea07SJohn Dyson 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
179739314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
179839314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
17990dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
18000dd6c035SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start <
18010dd6c035SJohn Baldwin 	    ki->kaio_maxactive_count) {
1802fd3bf775SJohn Dyson 		num_aio_resv_start++;
18031ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
18041ce91824SDavid Xu 		error = aio_newproc(&num_aio_resv_start);
18051ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
18061ce91824SDavid Xu 		if (error) {
180784af4da6SJohn Dyson 			num_aio_resv_start--;
18082244ea07SJohn Dyson 			goto retryproc;
1809fd3bf775SJohn Dyson 		}
1810dbbccfe9SDavid Xu 	} else {
1811dbbccfe9SDavid Xu 		ret = -1;
18121ce91824SDavid Xu 	}
1813dbbccfe9SDavid Xu 	return (ret);
181499eee864SDavid Xu }
18151ce91824SDavid Xu 
181699eee864SDavid Xu static void
181799eee864SDavid Xu aio_kick_helper(void *context, int pending)
181899eee864SDavid Xu {
181999eee864SDavid Xu 	struct proc *userp = context;
182099eee864SDavid Xu 
182199eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
1822dbbccfe9SDavid Xu 	while (--pending >= 0) {
1823dbbccfe9SDavid Xu 		if (aio_kick(userp))
1824dbbccfe9SDavid Xu 			break;
1825dbbccfe9SDavid Xu 	}
182699eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
18272244ea07SJohn Dyson }
18282244ea07SJohn Dyson 
1829fd3bf775SJohn Dyson /*
1830bfbbc4aaSJason Evans  * Support the aio_return system call, as a side-effect, kernel resources are
1831bfbbc4aaSJason Evans  * released.
18322244ea07SJohn Dyson  */
18333858a1f4SJohn Baldwin static int
18345652770dSJohn Baldwin kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1835fd3bf775SJohn Dyson {
1836b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18375652770dSJohn Baldwin 	struct kaiocb *job;
18382244ea07SJohn Dyson 	struct kaioinfo *ki;
18391ce91824SDavid Xu 	int status, error;
18402244ea07SJohn Dyson 
1841c0bf5caaSAlan Cox 	ki = p->p_aioinfo;
1842c0bf5caaSAlan Cox 	if (ki == NULL)
1843ac41f2efSAlfred Perlstein 		return (EINVAL);
1844759ccccaSDavid Xu 	AIO_LOCK(ki);
18455652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_done, plist) {
18465652770dSJohn Baldwin 		if (job->ujob == ujob)
1847c0bf5caaSAlan Cox 			break;
1848c0bf5caaSAlan Cox 	}
18495652770dSJohn Baldwin 	if (job != NULL) {
1850*f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
18515652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
18525652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
18531ce91824SDavid Xu 		td->td_retval[0] = status;
18545652770dSJohn Baldwin 		if (job->uaiocb.aio_lio_opcode == LIO_WRITE) {
18555652770dSJohn Baldwin 			td->td_ru.ru_oublock += job->outputcharge;
18565652770dSJohn Baldwin 			job->outputcharge = 0;
18575652770dSJohn Baldwin 		} else if (job->uaiocb.aio_lio_opcode == LIO_READ) {
18585652770dSJohn Baldwin 			td->td_ru.ru_inblock += job->inputcharge;
18595652770dSJohn Baldwin 			job->inputcharge = 0;
186069cd28daSDoug Ambrisko 		}
18615652770dSJohn Baldwin 		aio_free_entry(job);
1862759ccccaSDavid Xu 		AIO_UNLOCK(ki);
18635652770dSJohn Baldwin 		ops->store_error(ujob, error);
18645652770dSJohn Baldwin 		ops->store_status(ujob, status);
186555a122bfSDavid Xu 	} else {
18661ce91824SDavid Xu 		error = EINVAL;
1867759ccccaSDavid Xu 		AIO_UNLOCK(ki);
186855a122bfSDavid Xu 	}
18691ce91824SDavid Xu 	return (error);
18702244ea07SJohn Dyson }
18712244ea07SJohn Dyson 
18723858a1f4SJohn Baldwin int
18738451d0ddSKip Macy sys_aio_return(struct thread *td, struct aio_return_args *uap)
18743858a1f4SJohn Baldwin {
18753858a1f4SJohn Baldwin 
18763858a1f4SJohn Baldwin 	return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
18773858a1f4SJohn Baldwin }
18783858a1f4SJohn Baldwin 
18792244ea07SJohn Dyson /*
1880bfbbc4aaSJason Evans  * Allow a process to wakeup when any of the I/O requests are completed.
18812244ea07SJohn Dyson  */
18823858a1f4SJohn Baldwin static int
18833858a1f4SJohn Baldwin kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
18843858a1f4SJohn Baldwin     struct timespec *ts)
1885fd3bf775SJohn Dyson {
1886b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18874a11ca4eSPoul-Henning Kamp 	struct timeval atv;
18882244ea07SJohn Dyson 	struct kaioinfo *ki;
18895652770dSJohn Baldwin 	struct kaiocb *firstjob, *job;
18903858a1f4SJohn Baldwin 	int error, i, timo;
18912244ea07SJohn Dyson 
18922244ea07SJohn Dyson 	timo = 0;
18933858a1f4SJohn Baldwin 	if (ts) {
18943858a1f4SJohn Baldwin 		if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
18952244ea07SJohn Dyson 			return (EINVAL);
18962244ea07SJohn Dyson 
18973858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
18982244ea07SJohn Dyson 		if (itimerfix(&atv))
18992244ea07SJohn Dyson 			return (EINVAL);
1900227ee8a1SPoul-Henning Kamp 		timo = tvtohz(&atv);
19012244ea07SJohn Dyson 	}
19022244ea07SJohn Dyson 
19032244ea07SJohn Dyson 	ki = p->p_aioinfo;
19042244ea07SJohn Dyson 	if (ki == NULL)
1905ac41f2efSAlfred Perlstein 		return (EAGAIN);
19062244ea07SJohn Dyson 
19073858a1f4SJohn Baldwin 	if (njoblist == 0)
1908ac41f2efSAlfred Perlstein 		return (0);
19092244ea07SJohn Dyson 
1910759ccccaSDavid Xu 	AIO_LOCK(ki);
19111ce91824SDavid Xu 	for (;;) {
19125652770dSJohn Baldwin 		firstjob = NULL;
19131ce91824SDavid Xu 		error = 0;
19145652770dSJohn Baldwin 		TAILQ_FOREACH(job, &ki->kaio_all, allist) {
191584af4da6SJohn Dyson 			for (i = 0; i < njoblist; i++) {
19165652770dSJohn Baldwin 				if (job->ujob == ujoblist[i]) {
19175652770dSJohn Baldwin 					if (firstjob == NULL)
19185652770dSJohn Baldwin 						firstjob = job;
1919*f3215338SJohn Baldwin 					if (job->jobflags & KAIOCB_FINISHED)
19201ce91824SDavid Xu 						goto RETURN;
192184af4da6SJohn Dyson 				}
192284af4da6SJohn Dyson 			}
192384af4da6SJohn Dyson 		}
19241ce91824SDavid Xu 		/* All tasks were finished. */
19255652770dSJohn Baldwin 		if (firstjob == NULL)
19261ce91824SDavid Xu 			break;
19272244ea07SJohn Dyson 
1928fd3bf775SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
1929759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
19301ce91824SDavid Xu 		    "aiospn", timo);
19311ce91824SDavid Xu 		if (error == ERESTART)
19321ce91824SDavid Xu 			error = EINTR;
19331ce91824SDavid Xu 		if (error)
19341ce91824SDavid Xu 			break;
19352244ea07SJohn Dyson 	}
19361ce91824SDavid Xu RETURN:
1937759ccccaSDavid Xu 	AIO_UNLOCK(ki);
19383858a1f4SJohn Baldwin 	return (error);
19393858a1f4SJohn Baldwin }
19403858a1f4SJohn Baldwin 
19413858a1f4SJohn Baldwin int
19428451d0ddSKip Macy sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
19433858a1f4SJohn Baldwin {
19443858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
19453858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
19463858a1f4SJohn Baldwin 	int error;
19473858a1f4SJohn Baldwin 
19483858a1f4SJohn Baldwin 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
19493858a1f4SJohn Baldwin 		return (EINVAL);
19503858a1f4SJohn Baldwin 
19513858a1f4SJohn Baldwin 	if (uap->timeout) {
19523858a1f4SJohn Baldwin 		/* Get timespec struct. */
19533858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
19543858a1f4SJohn Baldwin 			return (error);
19553858a1f4SJohn Baldwin 		tsp = &ts;
19563858a1f4SJohn Baldwin 	} else
19573858a1f4SJohn Baldwin 		tsp = NULL;
19583858a1f4SJohn Baldwin 
19593858a1f4SJohn Baldwin 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
19603858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
19613858a1f4SJohn Baldwin 	if (error == 0)
19623858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
19631ce91824SDavid Xu 	uma_zfree(aiol_zone, ujoblist);
19641ce91824SDavid Xu 	return (error);
19652244ea07SJohn Dyson }
1966ee877a35SJohn Dyson 
1967ee877a35SJohn Dyson /*
1968dd85920aSJason Evans  * aio_cancel cancels any non-physio aio operations not currently in
1969dd85920aSJason Evans  * progress.
1970ee877a35SJohn Dyson  */
1971ee877a35SJohn Dyson int
19728451d0ddSKip Macy sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1973fd3bf775SJohn Dyson {
1974b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
1975dd85920aSJason Evans 	struct kaioinfo *ki;
19765652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
1977dd85920aSJason Evans 	struct file *fp;
1978f131759fSMateusz Guzik 	cap_rights_t rights;
19791ce91824SDavid Xu 	int error;
1980dd85920aSJason Evans 	int cancelled = 0;
1981dd85920aSJason Evans 	int notcancelled = 0;
1982dd85920aSJason Evans 	struct vnode *vp;
1983dd85920aSJason Evans 
19842a522eb9SJohn Baldwin 	/* Lookup file object. */
1985f131759fSMateusz Guzik 	error = fget(td, uap->fd, cap_rights_init(&rights), &fp);
19862a522eb9SJohn Baldwin 	if (error)
19872a522eb9SJohn Baldwin 		return (error);
1988dd85920aSJason Evans 
19891ce91824SDavid Xu 	ki = p->p_aioinfo;
19901ce91824SDavid Xu 	if (ki == NULL)
19911ce91824SDavid Xu 		goto done;
19921ce91824SDavid Xu 
1993dd85920aSJason Evans 	if (fp->f_type == DTYPE_VNODE) {
19943b6d9652SPoul-Henning Kamp 		vp = fp->f_vnode;
1995dd85920aSJason Evans 		if (vn_isdisk(vp, &error)) {
19962a522eb9SJohn Baldwin 			fdrop(fp, td);
1997b40ce416SJulian Elischer 			td->td_retval[0] = AIO_NOTCANCELED;
1998ac41f2efSAlfred Perlstein 			return (0);
1999dd85920aSJason Evans 		}
2000dd85920aSJason Evans 	}
2001dd85920aSJason Evans 
2002759ccccaSDavid Xu 	AIO_LOCK(ki);
20035652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
20045652770dSJohn Baldwin 		if ((uap->fd == job->uaiocb.aio_fildes) &&
2005dd85920aSJason Evans 		    ((uap->aiocbp == NULL) ||
20065652770dSJohn Baldwin 		     (uap->aiocbp == job->ujob))) {
2007*f3215338SJohn Baldwin 			if (aio_cancel_job(p, ki, job)) {
20081ce91824SDavid Xu 				cancelled++;
2009dd85920aSJason Evans 			} else {
2010dd85920aSJason Evans 				notcancelled++;
2011dd85920aSJason Evans 			}
20121aa4c324SDavid Xu 			if (uap->aiocbp != NULL)
20131aa4c324SDavid Xu 				break;
2014dd85920aSJason Evans 		}
2015dd85920aSJason Evans 	}
2016759ccccaSDavid Xu 	AIO_UNLOCK(ki);
20171ce91824SDavid Xu 
2018ad49abc0SAlan Cox done:
20192a522eb9SJohn Baldwin 	fdrop(fp, td);
20201aa4c324SDavid Xu 
20211aa4c324SDavid Xu 	if (uap->aiocbp != NULL) {
2022dd85920aSJason Evans 		if (cancelled) {
2023b40ce416SJulian Elischer 			td->td_retval[0] = AIO_CANCELED;
2024ac41f2efSAlfred Perlstein 			return (0);
2025dd85920aSJason Evans 		}
20261aa4c324SDavid Xu 	}
20271aa4c324SDavid Xu 
20281aa4c324SDavid Xu 	if (notcancelled) {
20291aa4c324SDavid Xu 		td->td_retval[0] = AIO_NOTCANCELED;
20301aa4c324SDavid Xu 		return (0);
20311aa4c324SDavid Xu 	}
20321aa4c324SDavid Xu 
20331aa4c324SDavid Xu 	if (cancelled) {
20341aa4c324SDavid Xu 		td->td_retval[0] = AIO_CANCELED;
20351aa4c324SDavid Xu 		return (0);
20361aa4c324SDavid Xu 	}
20371aa4c324SDavid Xu 
2038b40ce416SJulian Elischer 	td->td_retval[0] = AIO_ALLDONE;
2039dd85920aSJason Evans 
2040ac41f2efSAlfred Perlstein 	return (0);
2041ee877a35SJohn Dyson }
2042ee877a35SJohn Dyson 
2043ee877a35SJohn Dyson /*
2044873fbcd7SRobert Watson  * aio_error is implemented in the kernel level for compatibility purposes
2045873fbcd7SRobert Watson  * only.  For a user mode async implementation, it would be best to do it in
2046873fbcd7SRobert Watson  * a userland subroutine.
2047ee877a35SJohn Dyson  */
20483858a1f4SJohn Baldwin static int
20495652770dSJohn Baldwin kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
2050fd3bf775SJohn Dyson {
2051b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
20525652770dSJohn Baldwin 	struct kaiocb *job;
20532244ea07SJohn Dyson 	struct kaioinfo *ki;
20541ce91824SDavid Xu 	int status;
2055ee877a35SJohn Dyson 
20562244ea07SJohn Dyson 	ki = p->p_aioinfo;
20571ce91824SDavid Xu 	if (ki == NULL) {
20581ce91824SDavid Xu 		td->td_retval[0] = EINVAL;
20591ce91824SDavid Xu 		return (0);
20601ce91824SDavid Xu 	}
2061ee877a35SJohn Dyson 
2062759ccccaSDavid Xu 	AIO_LOCK(ki);
20635652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_all, allist) {
20645652770dSJohn Baldwin 		if (job->ujob == ujob) {
2065*f3215338SJohn Baldwin 			if (job->jobflags & KAIOCB_FINISHED)
20661ce91824SDavid Xu 				td->td_retval[0] =
20675652770dSJohn Baldwin 					job->uaiocb._aiocb_private.error;
20681ce91824SDavid Xu 			else
2069b40ce416SJulian Elischer 				td->td_retval[0] = EINPROGRESS;
2070759ccccaSDavid Xu 			AIO_UNLOCK(ki);
2071ac41f2efSAlfred Perlstein 			return (0);
20722244ea07SJohn Dyson 		}
20732244ea07SJohn Dyson 	}
2074759ccccaSDavid Xu 	AIO_UNLOCK(ki);
207584af4da6SJohn Dyson 
20762244ea07SJohn Dyson 	/*
2077a9bf5e37SDavid Xu 	 * Hack for failure of aio_aqueue.
20782244ea07SJohn Dyson 	 */
20795652770dSJohn Baldwin 	status = ops->fetch_status(ujob);
20801ce91824SDavid Xu 	if (status == -1) {
20815652770dSJohn Baldwin 		td->td_retval[0] = ops->fetch_error(ujob);
20821ce91824SDavid Xu 		return (0);
20831ce91824SDavid Xu 	}
20841ce91824SDavid Xu 
20851ce91824SDavid Xu 	td->td_retval[0] = EINVAL;
20861ce91824SDavid Xu 	return (0);
2087ee877a35SJohn Dyson }
2088ee877a35SJohn Dyson 
20893858a1f4SJohn Baldwin int
20908451d0ddSKip Macy sys_aio_error(struct thread *td, struct aio_error_args *uap)
20913858a1f4SJohn Baldwin {
20923858a1f4SJohn Baldwin 
20933858a1f4SJohn Baldwin 	return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
20943858a1f4SJohn Baldwin }
20953858a1f4SJohn Baldwin 
2096eb8e6d52SEivind Eklund /* syscall - asynchronous read from a file (REALTIME) */
2097ee877a35SJohn Dyson int
20988451d0ddSKip Macy sys_oaio_read(struct thread *td, struct oaio_read_args *uap)
20990972628aSDavid Xu {
21000972628aSDavid Xu 
21013858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
21023858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
21030972628aSDavid Xu }
21040972628aSDavid Xu 
21050972628aSDavid Xu int
21068451d0ddSKip Macy sys_aio_read(struct thread *td, struct aio_read_args *uap)
2107fd3bf775SJohn Dyson {
210821d56e9cSAlfred Perlstein 
21093858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2110ee877a35SJohn Dyson }
2111ee877a35SJohn Dyson 
2112eb8e6d52SEivind Eklund /* syscall - asynchronous write to a file (REALTIME) */
2113ee877a35SJohn Dyson int
21148451d0ddSKip Macy sys_oaio_write(struct thread *td, struct oaio_write_args *uap)
21150972628aSDavid Xu {
21160972628aSDavid Xu 
21173858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
21183858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
21190972628aSDavid Xu }
21200972628aSDavid Xu 
21210972628aSDavid Xu int
21228451d0ddSKip Macy sys_aio_write(struct thread *td, struct aio_write_args *uap)
2123fd3bf775SJohn Dyson {
212421d56e9cSAlfred Perlstein 
21253858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
21260972628aSDavid Xu }
21270972628aSDavid Xu 
21286160e12cSGleb Smirnoff int
21296160e12cSGleb Smirnoff sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
21306160e12cSGleb Smirnoff {
21316160e12cSGleb Smirnoff 
21326160e12cSGleb Smirnoff 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
21336160e12cSGleb Smirnoff }
21346160e12cSGleb Smirnoff 
21350972628aSDavid Xu static int
21363858a1f4SJohn Baldwin kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
21373858a1f4SJohn Baldwin     struct aiocb **acb_list, int nent, struct sigevent *sig,
21383858a1f4SJohn Baldwin     struct aiocb_ops *ops)
21390972628aSDavid Xu {
2140b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
21415652770dSJohn Baldwin 	struct aiocb *job;
21422244ea07SJohn Dyson 	struct kaioinfo *ki;
21431ce91824SDavid Xu 	struct aioliojob *lj;
214469cd28daSDoug Ambrisko 	struct kevent kev;
21451ce91824SDavid Xu 	int error;
2146fd3bf775SJohn Dyson 	int nerror;
2147ee877a35SJohn Dyson 	int i;
2148ee877a35SJohn Dyson 
21493858a1f4SJohn Baldwin 	if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2150ac41f2efSAlfred Perlstein 		return (EINVAL);
21512244ea07SJohn Dyson 
2152ae3b195fSTim J. Robbins 	if (nent < 0 || nent > AIO_LISTIO_MAX)
2153ac41f2efSAlfred Perlstein 		return (EINVAL);
21542244ea07SJohn Dyson 
2155bfbbc4aaSJason Evans 	if (p->p_aioinfo == NULL)
21562244ea07SJohn Dyson 		aio_init_aioinfo(p);
21572244ea07SJohn Dyson 
21582244ea07SJohn Dyson 	ki = p->p_aioinfo;
21592244ea07SJohn Dyson 
2160a163d034SWarner Losh 	lj = uma_zalloc(aiolio_zone, M_WAITOK);
216184af4da6SJohn Dyson 	lj->lioj_flags = 0;
21621ce91824SDavid Xu 	lj->lioj_count = 0;
21631ce91824SDavid Xu 	lj->lioj_finished_count = 0;
2164d8b0556cSKonstantin Belousov 	knlist_init_mtx(&lj->klist, AIO_MTX(ki));
21654c0fb2cfSDavid Xu 	ksiginfo_init(&lj->lioj_ksi);
216669cd28daSDoug Ambrisko 
216784af4da6SJohn Dyson 	/*
2168bfbbc4aaSJason Evans 	 * Setup signal.
216984af4da6SJohn Dyson 	 */
21703858a1f4SJohn Baldwin 	if (sig && (mode == LIO_NOWAIT)) {
21713858a1f4SJohn Baldwin 		bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
217269cd28daSDoug Ambrisko 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
217369cd28daSDoug Ambrisko 			/* Assume only new style KEVENT */
217469cd28daSDoug Ambrisko 			kev.filter = EVFILT_LIO;
217569cd28daSDoug Ambrisko 			kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
21763858a1f4SJohn Baldwin 			kev.ident = (uintptr_t)uacb_list; /* something unique */
217769cd28daSDoug Ambrisko 			kev.data = (intptr_t)lj;
21781ce91824SDavid Xu 			/* pass user defined sigval data */
21791ce91824SDavid Xu 			kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
21804db71d27SJohn-Mark Gurney 			error = kqfd_register(
21814db71d27SJohn-Mark Gurney 			    lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1);
218269cd28daSDoug Ambrisko 			if (error) {
218369cd28daSDoug Ambrisko 				uma_zfree(aiolio_zone, lj);
218469cd28daSDoug Ambrisko 				return (error);
218569cd28daSDoug Ambrisko 			}
21861ce91824SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
21871ce91824SDavid Xu 			;
218868d71118SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
218968d71118SDavid Xu 			   lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
219068d71118SDavid Xu 				if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
219169cd28daSDoug Ambrisko 					uma_zfree(aiolio_zone, lj);
219269cd28daSDoug Ambrisko 					return EINVAL;
219368d71118SDavid Xu 				}
219484af4da6SJohn Dyson 				lj->lioj_flags |= LIOJ_SIGNAL;
219568d71118SDavid Xu 		} else {
219668d71118SDavid Xu 			uma_zfree(aiolio_zone, lj);
219768d71118SDavid Xu 			return EINVAL;
21984d752b01SAlan Cox 		}
21991ce91824SDavid Xu 	}
220069cd28daSDoug Ambrisko 
2201759ccccaSDavid Xu 	AIO_LOCK(ki);
22022f3cf918SAlfred Perlstein 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
22032244ea07SJohn Dyson 	/*
22041ce91824SDavid Xu 	 * Add extra aiocb count to avoid the lio to be freed
22051ce91824SDavid Xu 	 * by other threads doing aio_waitcomplete or aio_return,
22061ce91824SDavid Xu 	 * and prevent event from being sent until we have queued
22071ce91824SDavid Xu 	 * all tasks.
22081ce91824SDavid Xu 	 */
22091ce91824SDavid Xu 	lj->lioj_count = 1;
2210759ccccaSDavid Xu 	AIO_UNLOCK(ki);
22111ce91824SDavid Xu 
22121ce91824SDavid Xu 	/*
2213bfbbc4aaSJason Evans 	 * Get pointers to the list of I/O requests.
22142244ea07SJohn Dyson 	 */
2215fd3bf775SJohn Dyson 	nerror = 0;
22163858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++) {
22175652770dSJohn Baldwin 		job = acb_list[i];
22185652770dSJohn Baldwin 		if (job != NULL) {
22195652770dSJohn Baldwin 			error = aio_aqueue(td, job, lj, LIO_NOP, ops);
22201ce91824SDavid Xu 			if (error != 0)
2221fd3bf775SJohn Dyson 				nerror++;
2222fd3bf775SJohn Dyson 		}
2223fd3bf775SJohn Dyson 	}
22242244ea07SJohn Dyson 
22251ce91824SDavid Xu 	error = 0;
2226759ccccaSDavid Xu 	AIO_LOCK(ki);
22273858a1f4SJohn Baldwin 	if (mode == LIO_WAIT) {
22281ce91824SDavid Xu 		while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2229fd3bf775SJohn Dyson 			ki->kaio_flags |= KAIO_WAKEUP;
2230759ccccaSDavid Xu 			error = msleep(&p->p_aioinfo, AIO_MTX(ki),
22311ce91824SDavid Xu 			    PRIBIO | PCATCH, "aiospn", 0);
22321ce91824SDavid Xu 			if (error == ERESTART)
22331ce91824SDavid Xu 				error = EINTR;
22341ce91824SDavid Xu 			if (error)
22351ce91824SDavid Xu 				break;
22361ce91824SDavid Xu 		}
22371ce91824SDavid Xu 	} else {
22381ce91824SDavid Xu 		if (lj->lioj_count - 1 == lj->lioj_finished_count) {
22391ce91824SDavid Xu 			if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
22401ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_KEVENT_POSTED;
22411ce91824SDavid Xu 				KNOTE_LOCKED(&lj->klist, 1);
22421ce91824SDavid Xu 			}
22431ce91824SDavid Xu 			if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
22441ce91824SDavid Xu 			    == LIOJ_SIGNAL
22451ce91824SDavid Xu 			    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
22461ce91824SDavid Xu 			    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
22471ce91824SDavid Xu 				aio_sendsig(p, &lj->lioj_signal,
22481ce91824SDavid Xu 					    &lj->lioj_ksi);
22491ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
22502244ea07SJohn Dyson 			}
22512244ea07SJohn Dyson 		}
22521ce91824SDavid Xu 	}
22531ce91824SDavid Xu 	lj->lioj_count--;
22541ce91824SDavid Xu 	if (lj->lioj_count == 0) {
22551ce91824SDavid Xu 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
22561ce91824SDavid Xu 		knlist_delete(&lj->klist, curthread, 1);
2257759ccccaSDavid Xu 		PROC_LOCK(p);
22581ce91824SDavid Xu 		sigqueue_take(&lj->lioj_ksi);
22591ce91824SDavid Xu 		PROC_UNLOCK(p);
2260759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22611ce91824SDavid Xu 		uma_zfree(aiolio_zone, lj);
22621ce91824SDavid Xu 	} else
2263759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22642244ea07SJohn Dyson 
22651ce91824SDavid Xu 	if (nerror)
22661ce91824SDavid Xu 		return (EIO);
22671ce91824SDavid Xu 	return (error);
2268ee877a35SJohn Dyson }
2269fd3bf775SJohn Dyson 
22703858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
22713858a1f4SJohn Baldwin int
22728451d0ddSKip Macy sys_olio_listio(struct thread *td, struct olio_listio_args *uap)
22733858a1f4SJohn Baldwin {
22743858a1f4SJohn Baldwin 	struct aiocb **acb_list;
22753858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
22763858a1f4SJohn Baldwin 	struct osigevent osig;
22773858a1f4SJohn Baldwin 	int error, nent;
22783858a1f4SJohn Baldwin 
22793858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
22803858a1f4SJohn Baldwin 		return (EINVAL);
22813858a1f4SJohn Baldwin 
22823858a1f4SJohn Baldwin 	nent = uap->nent;
22833858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
22843858a1f4SJohn Baldwin 		return (EINVAL);
22853858a1f4SJohn Baldwin 
22863858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
22873858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
22883858a1f4SJohn Baldwin 		if (error)
22893858a1f4SJohn Baldwin 			return (error);
22903858a1f4SJohn Baldwin 		error = convert_old_sigevent(&osig, &sig);
22913858a1f4SJohn Baldwin 		if (error)
22923858a1f4SJohn Baldwin 			return (error);
22933858a1f4SJohn Baldwin 		sigp = &sig;
22943858a1f4SJohn Baldwin 	} else
22953858a1f4SJohn Baldwin 		sigp = NULL;
22963858a1f4SJohn Baldwin 
22973858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
22983858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
22993858a1f4SJohn Baldwin 	if (error == 0)
23003858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode,
23013858a1f4SJohn Baldwin 		    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
23023858a1f4SJohn Baldwin 		    &aiocb_ops_osigevent);
23033858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
23043858a1f4SJohn Baldwin 	return (error);
23053858a1f4SJohn Baldwin }
23063858a1f4SJohn Baldwin 
23073858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
23083858a1f4SJohn Baldwin int
23098451d0ddSKip Macy sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
23103858a1f4SJohn Baldwin {
23113858a1f4SJohn Baldwin 	struct aiocb **acb_list;
23123858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
23133858a1f4SJohn Baldwin 	int error, nent;
23143858a1f4SJohn Baldwin 
23153858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
23163858a1f4SJohn Baldwin 		return (EINVAL);
23173858a1f4SJohn Baldwin 
23183858a1f4SJohn Baldwin 	nent = uap->nent;
23193858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
23203858a1f4SJohn Baldwin 		return (EINVAL);
23213858a1f4SJohn Baldwin 
23223858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
23233858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig, sizeof(sig));
23243858a1f4SJohn Baldwin 		if (error)
23253858a1f4SJohn Baldwin 			return (error);
23263858a1f4SJohn Baldwin 		sigp = &sig;
23273858a1f4SJohn Baldwin 	} else
23283858a1f4SJohn Baldwin 		sigp = NULL;
23293858a1f4SJohn Baldwin 
23303858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
23313858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
23323858a1f4SJohn Baldwin 	if (error == 0)
23333858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
23343858a1f4SJohn Baldwin 		    nent, sigp, &aiocb_ops);
23353858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
23363858a1f4SJohn Baldwin 	return (error);
23373858a1f4SJohn Baldwin }
23383858a1f4SJohn Baldwin 
2339fd3bf775SJohn Dyson static void
2340f743d981SAlexander Motin aio_physwakeup(struct bio *bp)
2341fd3bf775SJohn Dyson {
23425652770dSJohn Baldwin 	struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
23431ce91824SDavid Xu 	struct proc *userp;
234427b8220dSDavid Xu 	struct kaioinfo *ki;
2345*f3215338SJohn Baldwin 	size_t nbytes;
2346*f3215338SJohn Baldwin 	int error, nblks;
23471ce91824SDavid Xu 
2348f743d981SAlexander Motin 	/* Release mapping into kernel space. */
2349*f3215338SJohn Baldwin 	userp = job->userproc;
2350*f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
23515652770dSJohn Baldwin 	if (job->pbuf) {
23525652770dSJohn Baldwin 		pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages);
23535652770dSJohn Baldwin 		relpbuf(job->pbuf, NULL);
23545652770dSJohn Baldwin 		job->pbuf = NULL;
2355f743d981SAlexander Motin 		atomic_subtract_int(&num_buf_aio, 1);
2356*f3215338SJohn Baldwin 		AIO_LOCK(ki);
2357*f3215338SJohn Baldwin 		ki->kaio_buffer_count--;
2358*f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
2359f743d981SAlexander Motin 	}
23605652770dSJohn Baldwin 	vm_page_unhold_pages(job->pages, job->npages);
2361f743d981SAlexander Motin 
23625652770dSJohn Baldwin 	bp = job->bp;
23635652770dSJohn Baldwin 	job->bp = NULL;
2364*f3215338SJohn Baldwin 	nbytes = job->uaiocb.aio_nbytes - bp->bio_resid;
2365*f3215338SJohn Baldwin 	error = 0;
2366f743d981SAlexander Motin 	if (bp->bio_flags & BIO_ERROR)
2367*f3215338SJohn Baldwin 		error = bp->bio_error;
2368*f3215338SJohn Baldwin 	nblks = btodb(nbytes);
23695652770dSJohn Baldwin 	if (job->uaiocb.aio_lio_opcode == LIO_WRITE)
23705652770dSJohn Baldwin 		job->outputcharge += nblks;
23711ce91824SDavid Xu 	else
23725652770dSJohn Baldwin 		job->inputcharge += nblks;
2373*f3215338SJohn Baldwin 
2374*f3215338SJohn Baldwin 	aio_complete(job, nbytes, error);
23751ce91824SDavid Xu 
2376f743d981SAlexander Motin 	g_destroy_bio(bp);
237784af4da6SJohn Dyson }
2378bfbbc4aaSJason Evans 
2379eb8e6d52SEivind Eklund /* syscall - wait for the next completion of an aio request */
23803858a1f4SJohn Baldwin static int
23815652770dSJohn Baldwin kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
23823858a1f4SJohn Baldwin     struct timespec *ts, struct aiocb_ops *ops)
2383bfbbc4aaSJason Evans {
2384b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
2385bfbbc4aaSJason Evans 	struct timeval atv;
2386bfbbc4aaSJason Evans 	struct kaioinfo *ki;
23875652770dSJohn Baldwin 	struct kaiocb *job;
23885652770dSJohn Baldwin 	struct aiocb *ujob;
23891ce91824SDavid Xu 	int error, status, timo;
2390bfbbc4aaSJason Evans 
23915652770dSJohn Baldwin 	ops->store_aiocb(ujobp, NULL);
2392dd85920aSJason Evans 
239338d68e2dSPawel Jakub Dawidek 	if (ts == NULL) {
2394bfbbc4aaSJason Evans 		timo = 0;
239538d68e2dSPawel Jakub Dawidek 	} else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
239638d68e2dSPawel Jakub Dawidek 		timo = -1;
239738d68e2dSPawel Jakub Dawidek 	} else {
23983858a1f4SJohn Baldwin 		if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2399bfbbc4aaSJason Evans 			return (EINVAL);
2400bfbbc4aaSJason Evans 
24013858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
2402bfbbc4aaSJason Evans 		if (itimerfix(&atv))
2403bfbbc4aaSJason Evans 			return (EINVAL);
2404bfbbc4aaSJason Evans 		timo = tvtohz(&atv);
2405bfbbc4aaSJason Evans 	}
2406bfbbc4aaSJason Evans 
24078213baf0SChristian S.J. Peron 	if (p->p_aioinfo == NULL)
2408323fe565SDavid Xu 		aio_init_aioinfo(p);
24098213baf0SChristian S.J. Peron 	ki = p->p_aioinfo;
2410bfbbc4aaSJason Evans 
24111ce91824SDavid Xu 	error = 0;
24125652770dSJohn Baldwin 	job = NULL;
2413759ccccaSDavid Xu 	AIO_LOCK(ki);
24145652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
241538d68e2dSPawel Jakub Dawidek 		if (timo == -1) {
241638d68e2dSPawel Jakub Dawidek 			error = EWOULDBLOCK;
241738d68e2dSPawel Jakub Dawidek 			break;
241838d68e2dSPawel Jakub Dawidek 		}
24191ce91824SDavid Xu 		ki->kaio_flags |= KAIO_WAKEUP;
2420759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
24211ce91824SDavid Xu 		    "aiowc", timo);
242227b8220dSDavid Xu 		if (timo && error == ERESTART)
24231ce91824SDavid Xu 			error = EINTR;
24241ce91824SDavid Xu 		if (error)
24251ce91824SDavid Xu 			break;
24261ce91824SDavid Xu 	}
24271ce91824SDavid Xu 
24285652770dSJohn Baldwin 	if (job != NULL) {
2429*f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
24305652770dSJohn Baldwin 		ujob = job->ujob;
24315652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
24325652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
24331ce91824SDavid Xu 		td->td_retval[0] = status;
24345652770dSJohn Baldwin 		if (job->uaiocb.aio_lio_opcode == LIO_WRITE) {
24355652770dSJohn Baldwin 			td->td_ru.ru_oublock += job->outputcharge;
24365652770dSJohn Baldwin 			job->outputcharge = 0;
24375652770dSJohn Baldwin 		} else if (job->uaiocb.aio_lio_opcode == LIO_READ) {
24385652770dSJohn Baldwin 			td->td_ru.ru_inblock += job->inputcharge;
24395652770dSJohn Baldwin 			job->inputcharge = 0;
2440bfbbc4aaSJason Evans 		}
24415652770dSJohn Baldwin 		aio_free_entry(job);
2442759ccccaSDavid Xu 		AIO_UNLOCK(ki);
24435652770dSJohn Baldwin 		ops->store_aiocb(ujobp, ujob);
24445652770dSJohn Baldwin 		ops->store_error(ujob, error);
24455652770dSJohn Baldwin 		ops->store_status(ujob, status);
24461ce91824SDavid Xu 	} else
2447759ccccaSDavid Xu 		AIO_UNLOCK(ki);
2448bfbbc4aaSJason Evans 
2449ac41f2efSAlfred Perlstein 	return (error);
2450bfbbc4aaSJason Evans }
2451cb679c38SJonathan Lemon 
245299eee864SDavid Xu int
24538451d0ddSKip Macy sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
24543858a1f4SJohn Baldwin {
24553858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
24563858a1f4SJohn Baldwin 	int error;
24573858a1f4SJohn Baldwin 
24583858a1f4SJohn Baldwin 	if (uap->timeout) {
24593858a1f4SJohn Baldwin 		/* Get timespec struct. */
24603858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts, sizeof(ts));
24613858a1f4SJohn Baldwin 		if (error)
24623858a1f4SJohn Baldwin 			return (error);
24633858a1f4SJohn Baldwin 		tsp = &ts;
24643858a1f4SJohn Baldwin 	} else
24653858a1f4SJohn Baldwin 		tsp = NULL;
24663858a1f4SJohn Baldwin 
24673858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
24683858a1f4SJohn Baldwin }
24693858a1f4SJohn Baldwin 
24703858a1f4SJohn Baldwin static int
24715652770dSJohn Baldwin kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
24723858a1f4SJohn Baldwin     struct aiocb_ops *ops)
247399eee864SDavid Xu {
247499eee864SDavid Xu 	struct proc *p = td->td_proc;
247599eee864SDavid Xu 	struct kaioinfo *ki;
247699eee864SDavid Xu 
24773858a1f4SJohn Baldwin 	if (op != O_SYNC) /* XXX lack of O_DSYNC */
247899eee864SDavid Xu 		return (EINVAL);
247999eee864SDavid Xu 	ki = p->p_aioinfo;
248099eee864SDavid Xu 	if (ki == NULL)
248199eee864SDavid Xu 		aio_init_aioinfo(p);
24825652770dSJohn Baldwin 	return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops));
24833858a1f4SJohn Baldwin }
24843858a1f4SJohn Baldwin 
24853858a1f4SJohn Baldwin int
24868451d0ddSKip Macy sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
24873858a1f4SJohn Baldwin {
24883858a1f4SJohn Baldwin 
24893858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
249099eee864SDavid Xu }
249199eee864SDavid Xu 
2492eb8e6d52SEivind Eklund /* kqueue attach function */
2493cb679c38SJonathan Lemon static int
2494cb679c38SJonathan Lemon filt_aioattach(struct knote *kn)
2495cb679c38SJonathan Lemon {
24965652770dSJohn Baldwin 	struct kaiocb *job = (struct kaiocb *)kn->kn_sdata;
2497cb679c38SJonathan Lemon 
2498cb679c38SJonathan Lemon 	/*
24995652770dSJohn Baldwin 	 * The job pointer must be validated before using it, so
2500cb679c38SJonathan Lemon 	 * registration is restricted to the kernel; the user cannot
2501cb679c38SJonathan Lemon 	 * set EV_FLAG1.
2502cb679c38SJonathan Lemon 	 */
2503cb679c38SJonathan Lemon 	if ((kn->kn_flags & EV_FLAG1) == 0)
2504cb679c38SJonathan Lemon 		return (EPERM);
25055652770dSJohn Baldwin 	kn->kn_ptr.p_aio = job;
2506cb679c38SJonathan Lemon 	kn->kn_flags &= ~EV_FLAG1;
2507cb679c38SJonathan Lemon 
25085652770dSJohn Baldwin 	knlist_add(&job->klist, kn, 0);
2509cb679c38SJonathan Lemon 
2510cb679c38SJonathan Lemon 	return (0);
2511cb679c38SJonathan Lemon }
2512cb679c38SJonathan Lemon 
2513eb8e6d52SEivind Eklund /* kqueue detach function */
2514cb679c38SJonathan Lemon static void
2515cb679c38SJonathan Lemon filt_aiodetach(struct knote *kn)
2516cb679c38SJonathan Lemon {
25178e9fc278SDoug Ambrisko 	struct knlist *knl;
2518cb679c38SJonathan Lemon 
25198e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_aio->klist;
25208e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25218e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25228e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25238e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
2524cb679c38SJonathan Lemon }
2525cb679c38SJonathan Lemon 
2526eb8e6d52SEivind Eklund /* kqueue filter function */
2527cb679c38SJonathan Lemon /*ARGSUSED*/
2528cb679c38SJonathan Lemon static int
2529cb679c38SJonathan Lemon filt_aio(struct knote *kn, long hint)
2530cb679c38SJonathan Lemon {
25315652770dSJohn Baldwin 	struct kaiocb *job = kn->kn_ptr.p_aio;
2532cb679c38SJonathan Lemon 
25335652770dSJohn Baldwin 	kn->kn_data = job->uaiocb._aiocb_private.error;
2534*f3215338SJohn Baldwin 	if (!(job->jobflags & KAIOCB_FINISHED))
2535cb679c38SJonathan Lemon 		return (0);
2536cb679c38SJonathan Lemon 	kn->kn_flags |= EV_EOF;
2537cb679c38SJonathan Lemon 	return (1);
2538cb679c38SJonathan Lemon }
253969cd28daSDoug Ambrisko 
254069cd28daSDoug Ambrisko /* kqueue attach function */
254169cd28daSDoug Ambrisko static int
254269cd28daSDoug Ambrisko filt_lioattach(struct knote *kn)
254369cd28daSDoug Ambrisko {
25441ce91824SDavid Xu 	struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
254569cd28daSDoug Ambrisko 
254669cd28daSDoug Ambrisko 	/*
25471ce91824SDavid Xu 	 * The aioliojob pointer must be validated before using it, so
254869cd28daSDoug Ambrisko 	 * registration is restricted to the kernel; the user cannot
254969cd28daSDoug Ambrisko 	 * set EV_FLAG1.
255069cd28daSDoug Ambrisko 	 */
255169cd28daSDoug Ambrisko 	if ((kn->kn_flags & EV_FLAG1) == 0)
255269cd28daSDoug Ambrisko 		return (EPERM);
2553a8afa221SJean-Sébastien Pédron 	kn->kn_ptr.p_lio = lj;
255469cd28daSDoug Ambrisko 	kn->kn_flags &= ~EV_FLAG1;
255569cd28daSDoug Ambrisko 
255669cd28daSDoug Ambrisko 	knlist_add(&lj->klist, kn, 0);
255769cd28daSDoug Ambrisko 
255869cd28daSDoug Ambrisko 	return (0);
255969cd28daSDoug Ambrisko }
256069cd28daSDoug Ambrisko 
256169cd28daSDoug Ambrisko /* kqueue detach function */
256269cd28daSDoug Ambrisko static void
256369cd28daSDoug Ambrisko filt_liodetach(struct knote *kn)
256469cd28daSDoug Ambrisko {
25658e9fc278SDoug Ambrisko 	struct knlist *knl;
256669cd28daSDoug Ambrisko 
25678e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_lio->klist;
25688e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25698e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25708e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25718e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
257269cd28daSDoug Ambrisko }
257369cd28daSDoug Ambrisko 
257469cd28daSDoug Ambrisko /* kqueue filter function */
257569cd28daSDoug Ambrisko /*ARGSUSED*/
257669cd28daSDoug Ambrisko static int
257769cd28daSDoug Ambrisko filt_lio(struct knote *kn, long hint)
257869cd28daSDoug Ambrisko {
2579a8afa221SJean-Sébastien Pédron 	struct aioliojob * lj = kn->kn_ptr.p_lio;
25801ce91824SDavid Xu 
258169cd28daSDoug Ambrisko 	return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
258269cd28daSDoug Ambrisko }
25833858a1f4SJohn Baldwin 
2584841c0c7eSNathan Whitehorn #ifdef COMPAT_FREEBSD32
25853858a1f4SJohn Baldwin 
25863858a1f4SJohn Baldwin struct __aiocb_private32 {
25873858a1f4SJohn Baldwin 	int32_t	status;
25883858a1f4SJohn Baldwin 	int32_t	error;
25893858a1f4SJohn Baldwin 	uint32_t kernelinfo;
25903858a1f4SJohn Baldwin };
25913858a1f4SJohn Baldwin 
25923858a1f4SJohn Baldwin typedef struct oaiocb32 {
25933858a1f4SJohn Baldwin 	int	aio_fildes;		/* File descriptor */
25943858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
25953858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
25963858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
25973858a1f4SJohn Baldwin 	struct	osigevent32 aio_sigevent; /* Signal to deliver */
25983858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
25993858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
26003858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
26013858a1f4SJohn Baldwin } oaiocb32_t;
26023858a1f4SJohn Baldwin 
26033858a1f4SJohn Baldwin typedef struct aiocb32 {
26043858a1f4SJohn Baldwin 	int32_t	aio_fildes;		/* File descriptor */
26053858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
26063858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
26073858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
26083858a1f4SJohn Baldwin 	int	__spare__[2];
26093858a1f4SJohn Baldwin 	uint32_t __spare2__;
26103858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
26113858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
26123858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
26133858a1f4SJohn Baldwin 	struct	sigevent32 aio_sigevent;	/* Signal to deliver */
26143858a1f4SJohn Baldwin } aiocb32_t;
26153858a1f4SJohn Baldwin 
26163858a1f4SJohn Baldwin static int
26173858a1f4SJohn Baldwin convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
26183858a1f4SJohn Baldwin {
26193858a1f4SJohn Baldwin 
26203858a1f4SJohn Baldwin 	/*
26213858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
26223858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
26233858a1f4SJohn Baldwin 	 */
26243858a1f4SJohn Baldwin 	CP(*osig, *nsig, sigev_notify);
26253858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
26263858a1f4SJohn Baldwin 	case SIGEV_NONE:
26273858a1f4SJohn Baldwin 		break;
26283858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
26293858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
26303858a1f4SJohn Baldwin 		break;
26313858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
26323858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
26333858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
26343858a1f4SJohn Baldwin 		PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
26353858a1f4SJohn Baldwin 		break;
26363858a1f4SJohn Baldwin 	default:
26373858a1f4SJohn Baldwin 		return (EINVAL);
26383858a1f4SJohn Baldwin 	}
26393858a1f4SJohn Baldwin 	return (0);
26403858a1f4SJohn Baldwin }
26413858a1f4SJohn Baldwin 
26423858a1f4SJohn Baldwin static int
26433858a1f4SJohn Baldwin aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
26443858a1f4SJohn Baldwin {
26453858a1f4SJohn Baldwin 	struct oaiocb32 job32;
26463858a1f4SJohn Baldwin 	int error;
26473858a1f4SJohn Baldwin 
26483858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
26493858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26503858a1f4SJohn Baldwin 	if (error)
26513858a1f4SJohn Baldwin 		return (error);
26523858a1f4SJohn Baldwin 
26533858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26543858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26553858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26563858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26573858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26583858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26593858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26603858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26613858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
26623858a1f4SJohn Baldwin 	return (convert_old_sigevent32(&job32.aio_sigevent,
26633858a1f4SJohn Baldwin 	    &kjob->aio_sigevent));
26643858a1f4SJohn Baldwin }
26653858a1f4SJohn Baldwin 
26663858a1f4SJohn Baldwin static int
26673858a1f4SJohn Baldwin aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob)
26683858a1f4SJohn Baldwin {
26693858a1f4SJohn Baldwin 	struct aiocb32 job32;
26703858a1f4SJohn Baldwin 	int error;
26713858a1f4SJohn Baldwin 
26723858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26733858a1f4SJohn Baldwin 	if (error)
26743858a1f4SJohn Baldwin 		return (error);
26753858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26763858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26773858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26783858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26793858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26803858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26813858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26823858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26833858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
26843858a1f4SJohn Baldwin 	return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent));
26853858a1f4SJohn Baldwin }
26863858a1f4SJohn Baldwin 
26873858a1f4SJohn Baldwin static long
26883858a1f4SJohn Baldwin aiocb32_fetch_status(struct aiocb *ujob)
26893858a1f4SJohn Baldwin {
26903858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
26913858a1f4SJohn Baldwin 
26923858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
26933858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.status));
26943858a1f4SJohn Baldwin }
26953858a1f4SJohn Baldwin 
26963858a1f4SJohn Baldwin static long
26973858a1f4SJohn Baldwin aiocb32_fetch_error(struct aiocb *ujob)
26983858a1f4SJohn Baldwin {
26993858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27003858a1f4SJohn Baldwin 
27013858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27023858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.error));
27033858a1f4SJohn Baldwin }
27043858a1f4SJohn Baldwin 
27053858a1f4SJohn Baldwin static int
27063858a1f4SJohn Baldwin aiocb32_store_status(struct aiocb *ujob, long status)
27073858a1f4SJohn Baldwin {
27083858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27093858a1f4SJohn Baldwin 
27103858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27113858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.status, status));
27123858a1f4SJohn Baldwin }
27133858a1f4SJohn Baldwin 
27143858a1f4SJohn Baldwin static int
27153858a1f4SJohn Baldwin aiocb32_store_error(struct aiocb *ujob, long error)
27163858a1f4SJohn Baldwin {
27173858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27183858a1f4SJohn Baldwin 
27193858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27203858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.error, error));
27213858a1f4SJohn Baldwin }
27223858a1f4SJohn Baldwin 
27233858a1f4SJohn Baldwin static int
27243858a1f4SJohn Baldwin aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
27253858a1f4SJohn Baldwin {
27263858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27273858a1f4SJohn Baldwin 
27283858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27293858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
27303858a1f4SJohn Baldwin }
27313858a1f4SJohn Baldwin 
27323858a1f4SJohn Baldwin static int
27333858a1f4SJohn Baldwin aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
27343858a1f4SJohn Baldwin {
27353858a1f4SJohn Baldwin 
27363858a1f4SJohn Baldwin 	return (suword32(ujobp, (long)ujob));
27373858a1f4SJohn Baldwin }
27383858a1f4SJohn Baldwin 
27393858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops = {
27403858a1f4SJohn Baldwin 	.copyin = aiocb32_copyin,
27413858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27423858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27433858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27443858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27453858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27463858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27473858a1f4SJohn Baldwin };
27483858a1f4SJohn Baldwin 
27493858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops_osigevent = {
27503858a1f4SJohn Baldwin 	.copyin = aiocb32_copyin_old_sigevent,
27513858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27523858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27533858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27543858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27553858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27563858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27573858a1f4SJohn Baldwin };
27583858a1f4SJohn Baldwin 
27593858a1f4SJohn Baldwin int
27603858a1f4SJohn Baldwin freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
27613858a1f4SJohn Baldwin {
27623858a1f4SJohn Baldwin 
27633858a1f4SJohn Baldwin 	return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
27643858a1f4SJohn Baldwin }
27653858a1f4SJohn Baldwin 
27663858a1f4SJohn Baldwin int
27673858a1f4SJohn Baldwin freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
27683858a1f4SJohn Baldwin {
27693858a1f4SJohn Baldwin 	struct timespec32 ts32;
27703858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
27713858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
27723858a1f4SJohn Baldwin 	uint32_t *ujoblist32;
27733858a1f4SJohn Baldwin 	int error, i;
27743858a1f4SJohn Baldwin 
27753858a1f4SJohn Baldwin 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
27763858a1f4SJohn Baldwin 		return (EINVAL);
27773858a1f4SJohn Baldwin 
27783858a1f4SJohn Baldwin 	if (uap->timeout) {
27793858a1f4SJohn Baldwin 		/* Get timespec struct. */
27803858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
27813858a1f4SJohn Baldwin 			return (error);
27823858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
27833858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
27843858a1f4SJohn Baldwin 		tsp = &ts;
27853858a1f4SJohn Baldwin 	} else
27863858a1f4SJohn Baldwin 		tsp = NULL;
27873858a1f4SJohn Baldwin 
27883858a1f4SJohn Baldwin 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
27893858a1f4SJohn Baldwin 	ujoblist32 = (uint32_t *)ujoblist;
27903858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist32, uap->nent *
27913858a1f4SJohn Baldwin 	    sizeof(ujoblist32[0]));
27923858a1f4SJohn Baldwin 	if (error == 0) {
27933858a1f4SJohn Baldwin 		for (i = uap->nent; i > 0; i--)
27943858a1f4SJohn Baldwin 			ujoblist[i] = PTRIN(ujoblist32[i]);
27953858a1f4SJohn Baldwin 
27963858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
27973858a1f4SJohn Baldwin 	}
27983858a1f4SJohn Baldwin 	uma_zfree(aiol_zone, ujoblist);
27993858a1f4SJohn Baldwin 	return (error);
28003858a1f4SJohn Baldwin }
28013858a1f4SJohn Baldwin 
28023858a1f4SJohn Baldwin int
28033858a1f4SJohn Baldwin freebsd32_aio_cancel(struct thread *td, struct freebsd32_aio_cancel_args *uap)
28043858a1f4SJohn Baldwin {
28053858a1f4SJohn Baldwin 
28068451d0ddSKip Macy 	return (sys_aio_cancel(td, (struct aio_cancel_args *)uap));
28073858a1f4SJohn Baldwin }
28083858a1f4SJohn Baldwin 
28093858a1f4SJohn Baldwin int
28103858a1f4SJohn Baldwin freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
28113858a1f4SJohn Baldwin {
28123858a1f4SJohn Baldwin 
28133858a1f4SJohn Baldwin 	return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
28143858a1f4SJohn Baldwin }
28153858a1f4SJohn Baldwin 
28163858a1f4SJohn Baldwin int
28173858a1f4SJohn Baldwin freebsd32_oaio_read(struct thread *td, struct freebsd32_oaio_read_args *uap)
28183858a1f4SJohn Baldwin {
28193858a1f4SJohn Baldwin 
28203858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
28213858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28223858a1f4SJohn Baldwin }
28233858a1f4SJohn Baldwin 
28243858a1f4SJohn Baldwin int
28253858a1f4SJohn Baldwin freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
28263858a1f4SJohn Baldwin {
28273858a1f4SJohn Baldwin 
28283858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
28293858a1f4SJohn Baldwin 	    &aiocb32_ops));
28303858a1f4SJohn Baldwin }
28313858a1f4SJohn Baldwin 
28323858a1f4SJohn Baldwin int
28333858a1f4SJohn Baldwin freebsd32_oaio_write(struct thread *td, struct freebsd32_oaio_write_args *uap)
28343858a1f4SJohn Baldwin {
28353858a1f4SJohn Baldwin 
28363858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28373858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28383858a1f4SJohn Baldwin }
28393858a1f4SJohn Baldwin 
28403858a1f4SJohn Baldwin int
28413858a1f4SJohn Baldwin freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
28423858a1f4SJohn Baldwin {
28433858a1f4SJohn Baldwin 
28443858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28453858a1f4SJohn Baldwin 	    &aiocb32_ops));
28463858a1f4SJohn Baldwin }
28473858a1f4SJohn Baldwin 
28483858a1f4SJohn Baldwin int
28496160e12cSGleb Smirnoff freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
28506160e12cSGleb Smirnoff {
28516160e12cSGleb Smirnoff 
28526160e12cSGleb Smirnoff 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
28536160e12cSGleb Smirnoff 	    &aiocb32_ops));
28546160e12cSGleb Smirnoff }
28556160e12cSGleb Smirnoff 
28566160e12cSGleb Smirnoff int
28573858a1f4SJohn Baldwin freebsd32_aio_waitcomplete(struct thread *td,
28583858a1f4SJohn Baldwin     struct freebsd32_aio_waitcomplete_args *uap)
28593858a1f4SJohn Baldwin {
2860e588eeb1SJohn Baldwin 	struct timespec32 ts32;
28613858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
28623858a1f4SJohn Baldwin 	int error;
28633858a1f4SJohn Baldwin 
28643858a1f4SJohn Baldwin 	if (uap->timeout) {
28653858a1f4SJohn Baldwin 		/* Get timespec struct. */
28663858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts32, sizeof(ts32));
28673858a1f4SJohn Baldwin 		if (error)
28683858a1f4SJohn Baldwin 			return (error);
28693858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
28703858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
28713858a1f4SJohn Baldwin 		tsp = &ts;
28723858a1f4SJohn Baldwin 	} else
28733858a1f4SJohn Baldwin 		tsp = NULL;
28743858a1f4SJohn Baldwin 
28753858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
28763858a1f4SJohn Baldwin 	    &aiocb32_ops));
28773858a1f4SJohn Baldwin }
28783858a1f4SJohn Baldwin 
28793858a1f4SJohn Baldwin int
28803858a1f4SJohn Baldwin freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
28813858a1f4SJohn Baldwin {
28823858a1f4SJohn Baldwin 
28833858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
28843858a1f4SJohn Baldwin 	    &aiocb32_ops));
28853858a1f4SJohn Baldwin }
28863858a1f4SJohn Baldwin 
28873858a1f4SJohn Baldwin int
28883858a1f4SJohn Baldwin freebsd32_olio_listio(struct thread *td, struct freebsd32_olio_listio_args *uap)
28893858a1f4SJohn Baldwin {
28903858a1f4SJohn Baldwin 	struct aiocb **acb_list;
28913858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
28923858a1f4SJohn Baldwin 	struct osigevent32 osig;
28933858a1f4SJohn Baldwin 	uint32_t *acb_list32;
28943858a1f4SJohn Baldwin 	int error, i, nent;
28953858a1f4SJohn Baldwin 
28963858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
28973858a1f4SJohn Baldwin 		return (EINVAL);
28983858a1f4SJohn Baldwin 
28993858a1f4SJohn Baldwin 	nent = uap->nent;
29003858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
29013858a1f4SJohn Baldwin 		return (EINVAL);
29023858a1f4SJohn Baldwin 
29033858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29043858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
29053858a1f4SJohn Baldwin 		if (error)
29063858a1f4SJohn Baldwin 			return (error);
29073858a1f4SJohn Baldwin 		error = convert_old_sigevent32(&osig, &sig);
29083858a1f4SJohn Baldwin 		if (error)
29093858a1f4SJohn Baldwin 			return (error);
29103858a1f4SJohn Baldwin 		sigp = &sig;
29113858a1f4SJohn Baldwin 	} else
29123858a1f4SJohn Baldwin 		sigp = NULL;
29133858a1f4SJohn Baldwin 
29143858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29153858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29163858a1f4SJohn Baldwin 	if (error) {
29173858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29183858a1f4SJohn Baldwin 		return (error);
29193858a1f4SJohn Baldwin 	}
29203858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29213858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29223858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29233858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29243858a1f4SJohn Baldwin 
29253858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29263858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
29273858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent);
29283858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
29293858a1f4SJohn Baldwin 	return (error);
29303858a1f4SJohn Baldwin }
29313858a1f4SJohn Baldwin 
29323858a1f4SJohn Baldwin int
29333858a1f4SJohn Baldwin freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
29343858a1f4SJohn Baldwin {
29353858a1f4SJohn Baldwin 	struct aiocb **acb_list;
29363858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
29373858a1f4SJohn Baldwin 	struct sigevent32 sig32;
29383858a1f4SJohn Baldwin 	uint32_t *acb_list32;
29393858a1f4SJohn Baldwin 	int error, i, nent;
29403858a1f4SJohn Baldwin 
29413858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
29423858a1f4SJohn Baldwin 		return (EINVAL);
29433858a1f4SJohn Baldwin 
29443858a1f4SJohn Baldwin 	nent = uap->nent;
29453858a1f4SJohn Baldwin 	if (nent < 0 || nent > AIO_LISTIO_MAX)
29463858a1f4SJohn Baldwin 		return (EINVAL);
29473858a1f4SJohn Baldwin 
29483858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29493858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig32, sizeof(sig32));
29503858a1f4SJohn Baldwin 		if (error)
29513858a1f4SJohn Baldwin 			return (error);
29523858a1f4SJohn Baldwin 		error = convert_sigevent32(&sig32, &sig);
29533858a1f4SJohn Baldwin 		if (error)
29543858a1f4SJohn Baldwin 			return (error);
29553858a1f4SJohn Baldwin 		sigp = &sig;
29563858a1f4SJohn Baldwin 	} else
29573858a1f4SJohn Baldwin 		sigp = NULL;
29583858a1f4SJohn Baldwin 
29593858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29603858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29613858a1f4SJohn Baldwin 	if (error) {
29623858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29633858a1f4SJohn Baldwin 		return (error);
29643858a1f4SJohn Baldwin 	}
29653858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29663858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29673858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29683858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29693858a1f4SJohn Baldwin 
29703858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29713858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
29723858a1f4SJohn Baldwin 	    &aiocb32_ops);
29733858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
29743858a1f4SJohn Baldwin 	return (error);
29753858a1f4SJohn Baldwin }
29763858a1f4SJohn Baldwin 
29773858a1f4SJohn Baldwin #endif
2978