xref: /freebsd/sys/kern/vfs_aio.c (revision 7029da5c36f2d3cf6bb6c81bf551229f416399e8)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4ee877a35SJohn Dyson  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
5ee877a35SJohn Dyson  *
6ee877a35SJohn Dyson  * Redistribution and use in source and binary forms, with or without
7ee877a35SJohn Dyson  * modification, are permitted provided that the following conditions
8ee877a35SJohn Dyson  * are met:
9ee877a35SJohn Dyson  * 1. Redistributions of source code must retain the above copyright
10ee877a35SJohn Dyson  *    notice, this list of conditions and the following disclaimer.
11ee877a35SJohn Dyson  * 2. John S. Dyson's name may not be used to endorse or promote products
12ee877a35SJohn Dyson  *    derived from this software without specific prior written permission.
13ee877a35SJohn Dyson  *
14ee877a35SJohn Dyson  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
15ee877a35SJohn Dyson  * bad that happens because of using this software isn't the responsibility
16ee877a35SJohn Dyson  * of the author.  This software is distributed AS-IS.
17ee877a35SJohn Dyson  */
18ee877a35SJohn Dyson 
19ee877a35SJohn Dyson /*
208a6472b7SPeter Dufault  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
21ee877a35SJohn Dyson  */
22ee877a35SJohn Dyson 
23677b542eSDavid E. O'Brien #include <sys/cdefs.h>
24677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
25677b542eSDavid E. O'Brien 
26ee877a35SJohn Dyson #include <sys/param.h>
27ee877a35SJohn Dyson #include <sys/systm.h>
28f591779bSSeigo Tanimura #include <sys/malloc.h>
299626b608SPoul-Henning Kamp #include <sys/bio.h>
30a5c9bce7SBruce Evans #include <sys/buf.h>
314a144410SRobert Watson #include <sys/capsicum.h>
3275b8b3b2SJohn Baldwin #include <sys/eventhandler.h>
33ee877a35SJohn Dyson #include <sys/sysproto.h>
34ee877a35SJohn Dyson #include <sys/filedesc.h>
35ee877a35SJohn Dyson #include <sys/kernel.h>
3677409fe1SPoul-Henning Kamp #include <sys/module.h>
37c9a970a7SAlan Cox #include <sys/kthread.h>
38ee877a35SJohn Dyson #include <sys/fcntl.h>
39ee877a35SJohn Dyson #include <sys/file.h>
40104a9b7eSAlexander Kabaev #include <sys/limits.h>
41fdebd4f0SBruce Evans #include <sys/lock.h>
4235e0e5b3SJohn Baldwin #include <sys/mutex.h>
43ee877a35SJohn Dyson #include <sys/unistd.h>
446aeb05d7STom Rhodes #include <sys/posix4.h>
45ee877a35SJohn Dyson #include <sys/proc.h>
462d2f8ae7SBruce Evans #include <sys/resourcevar.h>
47ee877a35SJohn Dyson #include <sys/signalvar.h>
48496ab053SKonstantin Belousov #include <sys/syscallsubr.h>
49bfbbc4aaSJason Evans #include <sys/protosw.h>
5089f6b863SAttilio Rao #include <sys/rwlock.h>
511ce91824SDavid Xu #include <sys/sema.h>
521ce91824SDavid Xu #include <sys/socket.h>
53bfbbc4aaSJason Evans #include <sys/socketvar.h>
5421d56e9cSAlfred Perlstein #include <sys/syscall.h>
5521d56e9cSAlfred Perlstein #include <sys/sysent.h>
56a624e84fSJohn Dyson #include <sys/sysctl.h>
579c20dc99SJohn Baldwin #include <sys/syslog.h>
58ee99e978SBruce Evans #include <sys/sx.h>
591ce91824SDavid Xu #include <sys/taskqueue.h>
60fd3bf775SJohn Dyson #include <sys/vnode.h>
61fd3bf775SJohn Dyson #include <sys/conf.h>
62cb679c38SJonathan Lemon #include <sys/event.h>
6399eee864SDavid Xu #include <sys/mount.h>
64f743d981SAlexander Motin #include <geom/geom.h>
65ee877a35SJohn Dyson 
661ce91824SDavid Xu #include <machine/atomic.h>
671ce91824SDavid Xu 
68ee877a35SJohn Dyson #include <vm/vm.h>
69f743d981SAlexander Motin #include <vm/vm_page.h>
70ee877a35SJohn Dyson #include <vm/vm_extern.h>
712244ea07SJohn Dyson #include <vm/pmap.h>
722244ea07SJohn Dyson #include <vm/vm_map.h>
7399eee864SDavid Xu #include <vm/vm_object.h>
74c897b813SJeff Roberson #include <vm/uma.h>
75ee877a35SJohn Dyson #include <sys/aio.h>
765aaef07cSJohn Dyson 
77eb8e6d52SEivind Eklund /*
78eb8e6d52SEivind Eklund  * Counter for allocating reference ids to new jobs.  Wrapped to 1 on
7999eee864SDavid Xu  * overflow. (XXX will be removed soon.)
80eb8e6d52SEivind Eklund  */
8199eee864SDavid Xu static u_long jobrefid;
822244ea07SJohn Dyson 
8399eee864SDavid Xu /*
8499eee864SDavid Xu  * Counter for aio_fsync.
8599eee864SDavid Xu  */
8699eee864SDavid Xu static uint64_t jobseqno;
8799eee864SDavid Xu 
8884af4da6SJohn Dyson #ifndef MAX_AIO_PER_PROC
892244ea07SJohn Dyson #define MAX_AIO_PER_PROC	32
9084af4da6SJohn Dyson #endif
9184af4da6SJohn Dyson 
9284af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE_PER_PROC
93913b9329SAlan Somers #define MAX_AIO_QUEUE_PER_PROC	256
9484af4da6SJohn Dyson #endif
9584af4da6SJohn Dyson 
9684af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE
97913b9329SAlan Somers #define MAX_AIO_QUEUE		1024 /* Bigger than MAX_AIO_QUEUE_PER_PROC */
9884af4da6SJohn Dyson #endif
9984af4da6SJohn Dyson 
10084af4da6SJohn Dyson #ifndef MAX_BUF_AIO
10184af4da6SJohn Dyson #define MAX_BUF_AIO		16
10284af4da6SJohn Dyson #endif
10384af4da6SJohn Dyson 
104e603be7aSRobert Watson FEATURE(aio, "Asynchronous I/O");
105c45796d5SAlan Somers SYSCTL_DECL(_p1003_1b);
106e603be7aSRobert Watson 
1073858a1f4SJohn Baldwin static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
108913b9329SAlan Somers static MALLOC_DEFINE(M_AIOS, "aios", "aio_suspend aio control block list");
1093858a1f4SJohn Baldwin 
110*7029da5cSPawel Biernacki static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1110dd6c035SJohn Baldwin     "Async IO management");
112eb8e6d52SEivind Eklund 
113f3215338SJohn Baldwin static int enable_aio_unsafe = 0;
114f3215338SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
115f3215338SJohn Baldwin     "Permit asynchronous IO on all file types, not just known-safe types");
116f3215338SJohn Baldwin 
1179c20dc99SJohn Baldwin static unsigned int unsafe_warningcnt = 1;
1189c20dc99SJohn Baldwin SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW,
1199c20dc99SJohn Baldwin     &unsafe_warningcnt, 0,
1209c20dc99SJohn Baldwin     "Warnings that will be triggered upon failed IO requests on unsafe files");
1219c20dc99SJohn Baldwin 
122303b270bSEivind Eklund static int max_aio_procs = MAX_AIO_PROCS;
1230dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
12439314b7dSJohn Baldwin     "Maximum number of kernel processes to use for handling async IO ");
125a624e84fSJohn Dyson 
126eb8e6d52SEivind Eklund static int num_aio_procs = 0;
1270dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
12839314b7dSJohn Baldwin     "Number of presently active kernel processes for async IO");
129a624e84fSJohn Dyson 
130eb8e6d52SEivind Eklund /*
131eb8e6d52SEivind Eklund  * The code will adjust the actual number of AIO processes towards this
132eb8e6d52SEivind Eklund  * number when it gets a chance.
133eb8e6d52SEivind Eklund  */
134eb8e6d52SEivind Eklund static int target_aio_procs = TARGET_AIO_PROCS;
135eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
1360dd6c035SJohn Baldwin     0,
1370dd6c035SJohn Baldwin     "Preferred number of ready kernel processes for async IO");
138a624e84fSJohn Dyson 
139eb8e6d52SEivind Eklund static int max_queue_count = MAX_AIO_QUEUE;
140eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
141eb8e6d52SEivind Eklund     "Maximum number of aio requests to queue, globally");
142a624e84fSJohn Dyson 
143eb8e6d52SEivind Eklund static int num_queue_count = 0;
144eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
145eb8e6d52SEivind Eklund     "Number of queued aio requests");
146a624e84fSJohn Dyson 
147eb8e6d52SEivind Eklund static int num_buf_aio = 0;
148eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
149eb8e6d52SEivind Eklund     "Number of aio requests presently handled by the buf subsystem");
150fd3bf775SJohn Dyson 
1518091e52bSJohn Baldwin static int num_unmapped_aio = 0;
1528091e52bSJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, num_unmapped_aio, CTLFLAG_RD, &num_unmapped_aio,
1538091e52bSJohn Baldwin     0,
1548091e52bSJohn Baldwin     "Number of aio requests presently handled by unmapped I/O buffers");
1558091e52bSJohn Baldwin 
15639314b7dSJohn Baldwin /* Number of async I/O processes in the process of being started */
157a9bf5e37SDavid Xu /* XXX This should be local to aio_aqueue() */
158eb8e6d52SEivind Eklund static int num_aio_resv_start = 0;
159fd3bf775SJohn Dyson 
160eb8e6d52SEivind Eklund static int aiod_lifetime;
161eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
162eb8e6d52SEivind Eklund     "Maximum lifetime for idle aiod");
16384af4da6SJohn Dyson 
164eb8e6d52SEivind Eklund static int max_aio_per_proc = MAX_AIO_PER_PROC;
165eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
1660dd6c035SJohn Baldwin     0,
16786bbef43SJohn Baldwin     "Maximum active aio requests per process");
168eb8e6d52SEivind Eklund 
169eb8e6d52SEivind Eklund static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
170eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
171eb8e6d52SEivind Eklund     &max_aio_queue_per_proc, 0,
17286bbef43SJohn Baldwin     "Maximum queued aio requests per process");
173eb8e6d52SEivind Eklund 
174eb8e6d52SEivind Eklund static int max_buf_aio = MAX_BUF_AIO;
175eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
17686bbef43SJohn Baldwin     "Maximum buf aio requests per process");
177eb8e6d52SEivind Eklund 
178913b9329SAlan Somers /*
179913b9329SAlan Somers  * Though redundant with vfs.aio.max_aio_queue_per_proc, POSIX requires
180913b9329SAlan Somers  * sysconf(3) to support AIO_LISTIO_MAX, and we implement that with
181913b9329SAlan Somers  * vfs.aio.aio_listio_max.
182913b9329SAlan Somers  */
183c45796d5SAlan Somers SYSCTL_INT(_p1003_1b, CTL_P1003_1B_AIO_LISTIO_MAX, aio_listio_max,
184913b9329SAlan Somers     CTLFLAG_RD | CTLFLAG_CAPRD, &max_aio_queue_per_proc,
185913b9329SAlan Somers     0, "Maximum aio requests for a single lio_listio call");
186c45796d5SAlan Somers 
187399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
1880972628aSDavid Xu typedef struct oaiocb {
1890972628aSDavid Xu 	int	aio_fildes;		/* File descriptor */
1900972628aSDavid Xu 	off_t	aio_offset;		/* File offset for I/O */
1910972628aSDavid Xu 	volatile void *aio_buf;         /* I/O buffer in process space */
1920972628aSDavid Xu 	size_t	aio_nbytes;		/* Number of bytes for I/O */
1930972628aSDavid Xu 	struct	osigevent aio_sigevent;	/* Signal to deliver */
1940972628aSDavid Xu 	int	aio_lio_opcode;		/* LIO opcode */
1950972628aSDavid Xu 	int	aio_reqprio;		/* Request priority -- ignored */
1960972628aSDavid Xu 	struct	__aiocb_private	_aiocb_private;
1970972628aSDavid Xu } oaiocb_t;
198399e8c17SJohn Baldwin #endif
1990972628aSDavid Xu 
2001aa4c324SDavid Xu /*
2015652770dSJohn Baldwin  * Below is a key of locks used to protect each member of struct kaiocb
2021aa4c324SDavid Xu  * aioliojob and kaioinfo and any backends.
2031aa4c324SDavid Xu  *
2041aa4c324SDavid Xu  * * - need not protected
205759ccccaSDavid Xu  * a - locked by kaioinfo lock
2061aa4c324SDavid Xu  * b - locked by backend lock, the backend lock can be null in some cases,
2071aa4c324SDavid Xu  *     for example, BIO belongs to this type, in this case, proc lock is
2081aa4c324SDavid Xu  *     reused.
2091aa4c324SDavid Xu  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
2101aa4c324SDavid Xu  */
2111aa4c324SDavid Xu 
2121aa4c324SDavid Xu /*
213f3215338SJohn Baldwin  * If the routine that services an AIO request blocks while running in an
214f3215338SJohn Baldwin  * AIO kernel process it can starve other I/O requests.  BIO requests
21572bce9ffSAlan Somers  * queued via aio_qbio() complete asynchronously and do not use AIO kernel
216f3215338SJohn Baldwin  * processes at all.  Socket I/O requests use a separate pool of
217f3215338SJohn Baldwin  * kprocs and also force non-blocking I/O.  Other file I/O requests
218f3215338SJohn Baldwin  * use the generic fo_read/fo_write operations which can block.  The
219f3215338SJohn Baldwin  * fsync and mlock operations can also block while executing.  Ideally
220f3215338SJohn Baldwin  * none of these requests would block while executing.
221f3215338SJohn Baldwin  *
222f3215338SJohn Baldwin  * Note that the service routines cannot toggle O_NONBLOCK in the file
223f3215338SJohn Baldwin  * structure directly while handling a request due to races with
224f3215338SJohn Baldwin  * userland threads.
2251aa4c324SDavid Xu  */
2261aa4c324SDavid Xu 
22748dac059SAlan Cox /* jobflags */
228f3215338SJohn Baldwin #define	KAIOCB_QUEUEING		0x01
229f3215338SJohn Baldwin #define	KAIOCB_CANCELLED	0x02
230f3215338SJohn Baldwin #define	KAIOCB_CANCELLING	0x04
2315652770dSJohn Baldwin #define	KAIOCB_CHECKSYNC	0x08
232f3215338SJohn Baldwin #define	KAIOCB_CLEARED		0x10
233f3215338SJohn Baldwin #define	KAIOCB_FINISHED		0x20
23448dac059SAlan Cox 
2352244ea07SJohn Dyson /*
2362244ea07SJohn Dyson  * AIO process info
2372244ea07SJohn Dyson  */
23884af4da6SJohn Dyson #define AIOP_FREE	0x1			/* proc on free queue */
23984af4da6SJohn Dyson 
24039314b7dSJohn Baldwin struct aioproc {
24139314b7dSJohn Baldwin 	int	aioprocflags;			/* (c) AIO proc flags */
24239314b7dSJohn Baldwin 	TAILQ_ENTRY(aioproc) list;		/* (c) list of processes */
24339314b7dSJohn Baldwin 	struct	proc *aioproc;			/* (*) the AIO proc */
2442244ea07SJohn Dyson };
2452244ea07SJohn Dyson 
24684af4da6SJohn Dyson /*
24784af4da6SJohn Dyson  * data-structure for lio signal management
24884af4da6SJohn Dyson  */
2491ce91824SDavid Xu struct aioliojob {
2501aa4c324SDavid Xu 	int	lioj_flags;			/* (a) listio flags */
2511aa4c324SDavid Xu 	int	lioj_count;			/* (a) listio flags */
2521aa4c324SDavid Xu 	int	lioj_finished_count;		/* (a) listio flags */
2531aa4c324SDavid Xu 	struct	sigevent lioj_signal;		/* (a) signal on all I/O done */
2541aa4c324SDavid Xu 	TAILQ_ENTRY(aioliojob) lioj_list;	/* (a) lio list */
2551aa4c324SDavid Xu 	struct	knlist klist;			/* (a) list of knotes */
2561aa4c324SDavid Xu 	ksiginfo_t lioj_ksi;			/* (a) Realtime signal info */
25784af4da6SJohn Dyson };
2581ce91824SDavid Xu 
25984af4da6SJohn Dyson #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
26084af4da6SJohn Dyson #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
26169cd28daSDoug Ambrisko #define LIOJ_KEVENT_POSTED	0x4	/* kevent triggered */
26284af4da6SJohn Dyson 
26384af4da6SJohn Dyson /*
26484af4da6SJohn Dyson  * per process aio data structure
26584af4da6SJohn Dyson  */
2662244ea07SJohn Dyson struct kaioinfo {
267759ccccaSDavid Xu 	struct	mtx kaio_mtx;		/* the lock to protect this struct */
2681aa4c324SDavid Xu 	int	kaio_flags;		/* (a) per process kaio flags */
2691aa4c324SDavid Xu 	int	kaio_active_count;	/* (c) number of currently used AIOs */
2701aa4c324SDavid Xu 	int	kaio_count;		/* (a) size of AIO queue */
27172bce9ffSAlan Somers 	int	kaio_buffer_count;	/* (a) number of bio buffers */
2725652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_all;	/* (a) all AIOs in a process */
2735652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_done;	/* (a) done queue for process */
2741aa4c324SDavid Xu 	TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
2755652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
2765652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncqueue;	/* (a) queue for aio_fsync */
277f3215338SJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncready;  /* (a) second q for aio_fsync */
27839314b7dSJohn Baldwin 	struct	task kaio_task;		/* (*) task to kick aio processes */
279f3215338SJohn Baldwin 	struct	task kaio_sync_task;	/* (*) task to schedule fsync jobs */
2802244ea07SJohn Dyson };
2812244ea07SJohn Dyson 
282759ccccaSDavid Xu #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
283759ccccaSDavid Xu #define AIO_UNLOCK(ki)		mtx_unlock(&(ki)->kaio_mtx)
284759ccccaSDavid Xu #define AIO_LOCK_ASSERT(ki, f)	mtx_assert(&(ki)->kaio_mtx, (f))
285759ccccaSDavid Xu #define AIO_MTX(ki)		(&(ki)->kaio_mtx)
286759ccccaSDavid Xu 
28784af4da6SJohn Dyson #define KAIO_RUNDOWN	0x1	/* process is being run down */
2880dd6c035SJohn Baldwin #define KAIO_WAKEUP	0x2	/* wakeup process when AIO completes */
289fd3bf775SJohn Dyson 
2903858a1f4SJohn Baldwin /*
2913858a1f4SJohn Baldwin  * Operations used to interact with userland aio control blocks.
2923858a1f4SJohn Baldwin  * Different ABIs provide their own operations.
2933858a1f4SJohn Baldwin  */
2943858a1f4SJohn Baldwin struct aiocb_ops {
295849aef49SAndrew Turner 	int	(*aio_copyin)(struct aiocb *ujob, struct aiocb *kjob);
2963858a1f4SJohn Baldwin 	long	(*fetch_status)(struct aiocb *ujob);
2973858a1f4SJohn Baldwin 	long	(*fetch_error)(struct aiocb *ujob);
2983858a1f4SJohn Baldwin 	int	(*store_status)(struct aiocb *ujob, long status);
2993858a1f4SJohn Baldwin 	int	(*store_error)(struct aiocb *ujob, long error);
3003858a1f4SJohn Baldwin 	int	(*store_kernelinfo)(struct aiocb *ujob, long jobref);
3013858a1f4SJohn Baldwin 	int	(*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
3023858a1f4SJohn Baldwin };
3033858a1f4SJohn Baldwin 
30439314b7dSJohn Baldwin static TAILQ_HEAD(,aioproc) aio_freeproc;		/* (c) Idle daemons */
3051ce91824SDavid Xu static struct sema aio_newproc_sem;
3061ce91824SDavid Xu static struct mtx aio_job_mtx;
3075652770dSJohn Baldwin static TAILQ_HEAD(,kaiocb) aio_jobs;			/* (c) Async job list */
3081ce91824SDavid Xu static struct unrhdr *aiod_unr;
3092244ea07SJohn Dyson 
3106a1162d4SAlexander Leidinger void		aio_init_aioinfo(struct proc *p);
311723d37c0SKonstantin Belousov static int	aio_onceonly(void);
3125652770dSJohn Baldwin static int	aio_free_entry(struct kaiocb *job);
3135652770dSJohn Baldwin static void	aio_process_rw(struct kaiocb *job);
3145652770dSJohn Baldwin static void	aio_process_sync(struct kaiocb *job);
3155652770dSJohn Baldwin static void	aio_process_mlock(struct kaiocb *job);
316f3215338SJohn Baldwin static void	aio_schedule_fsync(void *context, int pending);
3171ce91824SDavid Xu static int	aio_newproc(int *);
3185652770dSJohn Baldwin int		aio_aqueue(struct thread *td, struct aiocb *ujob,
3193858a1f4SJohn Baldwin 		    struct aioliojob *lio, int type, struct aiocb_ops *ops);
320f3215338SJohn Baldwin static int	aio_queue_file(struct file *fp, struct kaiocb *job);
32172bce9ffSAlan Somers static void	aio_biowakeup(struct bio *bp);
32275b8b3b2SJohn Baldwin static void	aio_proc_rundown(void *arg, struct proc *p);
3230dd6c035SJohn Baldwin static void	aio_proc_rundown_exec(void *arg, struct proc *p,
3240dd6c035SJohn Baldwin 		    struct image_params *imgp);
32572bce9ffSAlan Somers static int	aio_qbio(struct proc *p, struct kaiocb *job);
3261ce91824SDavid Xu static void	aio_daemon(void *param);
327f3215338SJohn Baldwin static void	aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
328005ce8e4SJohn Baldwin static bool	aio_clear_cancel_function_locked(struct kaiocb *job);
329dbbccfe9SDavid Xu static int	aio_kick(struct proc *userp);
33099eee864SDavid Xu static void	aio_kick_nowait(struct proc *userp);
33199eee864SDavid Xu static void	aio_kick_helper(void *context, int pending);
33221d56e9cSAlfred Perlstein static int	filt_aioattach(struct knote *kn);
33321d56e9cSAlfred Perlstein static void	filt_aiodetach(struct knote *kn);
33421d56e9cSAlfred Perlstein static int	filt_aio(struct knote *kn, long hint);
33569cd28daSDoug Ambrisko static int	filt_lioattach(struct knote *kn);
33669cd28daSDoug Ambrisko static void	filt_liodetach(struct knote *kn);
33769cd28daSDoug Ambrisko static int	filt_lio(struct knote *kn, long hint);
3382244ea07SJohn Dyson 
339eb8e6d52SEivind Eklund /*
340eb8e6d52SEivind Eklund  * Zones for:
341eb8e6d52SEivind Eklund  * 	kaio	Per process async io info
34239314b7dSJohn Baldwin  *	aiop	async io process data
343eb8e6d52SEivind Eklund  *	aiocb	async io jobs
344eb8e6d52SEivind Eklund  *	aiolio	list io jobs
345eb8e6d52SEivind Eklund  */
346913b9329SAlan Somers static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiolio_zone;
347fd3bf775SJohn Dyson 
348eb8e6d52SEivind Eklund /* kqueue filters for aio */
349e76d823bSRobert Watson static struct filterops aio_filtops = {
350e76d823bSRobert Watson 	.f_isfd = 0,
351e76d823bSRobert Watson 	.f_attach = filt_aioattach,
352e76d823bSRobert Watson 	.f_detach = filt_aiodetach,
353e76d823bSRobert Watson 	.f_event = filt_aio,
354e76d823bSRobert Watson };
355e76d823bSRobert Watson static struct filterops lio_filtops = {
356e76d823bSRobert Watson 	.f_isfd = 0,
357e76d823bSRobert Watson 	.f_attach = filt_lioattach,
358e76d823bSRobert Watson 	.f_detach = filt_liodetach,
359e76d823bSRobert Watson 	.f_event = filt_lio
360e76d823bSRobert Watson };
36121d56e9cSAlfred Perlstein 
36275b8b3b2SJohn Baldwin static eventhandler_tag exit_tag, exec_tag;
36375b8b3b2SJohn Baldwin 
364c85650caSJohn Baldwin TASKQUEUE_DEFINE_THREAD(aiod_kick);
3651ce91824SDavid Xu 
366eb8e6d52SEivind Eklund /*
367eb8e6d52SEivind Eklund  * Main operations function for use as a kernel module.
368eb8e6d52SEivind Eklund  */
36921d56e9cSAlfred Perlstein static int
37021d56e9cSAlfred Perlstein aio_modload(struct module *module, int cmd, void *arg)
37121d56e9cSAlfred Perlstein {
37221d56e9cSAlfred Perlstein 	int error = 0;
37321d56e9cSAlfred Perlstein 
37421d56e9cSAlfred Perlstein 	switch (cmd) {
37521d56e9cSAlfred Perlstein 	case MOD_LOAD:
37621d56e9cSAlfred Perlstein 		aio_onceonly();
37721d56e9cSAlfred Perlstein 		break;
37821d56e9cSAlfred Perlstein 	case MOD_SHUTDOWN:
37921d56e9cSAlfred Perlstein 		break;
38021d56e9cSAlfred Perlstein 	default:
381f3215338SJohn Baldwin 		error = EOPNOTSUPP;
38221d56e9cSAlfred Perlstein 		break;
38321d56e9cSAlfred Perlstein 	}
38421d56e9cSAlfred Perlstein 	return (error);
38521d56e9cSAlfred Perlstein }
38621d56e9cSAlfred Perlstein 
38721d56e9cSAlfred Perlstein static moduledata_t aio_mod = {
38821d56e9cSAlfred Perlstein 	"aio",
38921d56e9cSAlfred Perlstein 	&aio_modload,
39021d56e9cSAlfred Perlstein 	NULL
39121d56e9cSAlfred Perlstein };
39221d56e9cSAlfred Perlstein 
393399e8c17SJohn Baldwin DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY);
39421d56e9cSAlfred Perlstein MODULE_VERSION(aio, 1);
39521d56e9cSAlfred Perlstein 
396fd3bf775SJohn Dyson /*
3972244ea07SJohn Dyson  * Startup initialization
3982244ea07SJohn Dyson  */
399723d37c0SKonstantin Belousov static int
40021d56e9cSAlfred Perlstein aio_onceonly(void)
401fd3bf775SJohn Dyson {
40221d56e9cSAlfred Perlstein 
40375b8b3b2SJohn Baldwin 	exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
40475b8b3b2SJohn Baldwin 	    EVENTHANDLER_PRI_ANY);
4050dd6c035SJohn Baldwin 	exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
4060dd6c035SJohn Baldwin 	    NULL, EVENTHANDLER_PRI_ANY);
40721d56e9cSAlfred Perlstein 	kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
40869cd28daSDoug Ambrisko 	kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
4092244ea07SJohn Dyson 	TAILQ_INIT(&aio_freeproc);
4101ce91824SDavid Xu 	sema_init(&aio_newproc_sem, 0, "aio_new_proc");
4111ce91824SDavid Xu 	mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
4122244ea07SJohn Dyson 	TAILQ_INIT(&aio_jobs);
4131ce91824SDavid Xu 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
414c897b813SJeff Roberson 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
415c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
41639314b7dSJohn Baldwin 	aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL,
417c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4185652770dSJohn Baldwin 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
419c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4201ce91824SDavid Xu 	aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
421c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
42284af4da6SJohn Dyson 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
423fd3bf775SJohn Dyson 	jobrefid = 1;
424399e8c17SJohn Baldwin 	p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO);
42586d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
42686d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
427723d37c0SKonstantin Belousov 
428723d37c0SKonstantin Belousov 	return (0);
4292244ea07SJohn Dyson }
4302244ea07SJohn Dyson 
431eb8e6d52SEivind Eklund /*
432bfbbc4aaSJason Evans  * Init the per-process aioinfo structure.  The aioinfo limits are set
433bfbbc4aaSJason Evans  * per-process for user limit (resource) management.
4342244ea07SJohn Dyson  */
4356a1162d4SAlexander Leidinger void
436fd3bf775SJohn Dyson aio_init_aioinfo(struct proc *p)
437fd3bf775SJohn Dyson {
4382244ea07SJohn Dyson 	struct kaioinfo *ki;
439ac41f2efSAlfred Perlstein 
440a163d034SWarner Losh 	ki = uma_zalloc(kaio_zone, M_WAITOK);
4419889bbacSKonstantin Belousov 	mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
44284af4da6SJohn Dyson 	ki->kaio_flags = 0;
4432244ea07SJohn Dyson 	ki->kaio_active_count = 0;
4441ce91824SDavid Xu 	ki->kaio_count = 0;
445fd3bf775SJohn Dyson 	ki->kaio_buffer_count = 0;
4461ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_all);
4471ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_done);
4482244ea07SJohn Dyson 	TAILQ_INIT(&ki->kaio_jobqueue);
44984af4da6SJohn Dyson 	TAILQ_INIT(&ki->kaio_liojoblist);
45099eee864SDavid Xu 	TAILQ_INIT(&ki->kaio_syncqueue);
451f3215338SJohn Baldwin 	TAILQ_INIT(&ki->kaio_syncready);
45299eee864SDavid Xu 	TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
453f3215338SJohn Baldwin 	TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
4543999ebe3SAlan Cox 	PROC_LOCK(p);
4553999ebe3SAlan Cox 	if (p->p_aioinfo == NULL) {
4563999ebe3SAlan Cox 		p->p_aioinfo = ki;
4573999ebe3SAlan Cox 		PROC_UNLOCK(p);
4583999ebe3SAlan Cox 	} else {
4593999ebe3SAlan Cox 		PROC_UNLOCK(p);
460759ccccaSDavid Xu 		mtx_destroy(&ki->kaio_mtx);
4613999ebe3SAlan Cox 		uma_zfree(kaio_zone, ki);
4622244ea07SJohn Dyson 	}
463bfbbc4aaSJason Evans 
46422035f47SOleksandr Tymoshenko 	while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
4651ce91824SDavid Xu 		aio_newproc(NULL);
4662244ea07SJohn Dyson }
4672244ea07SJohn Dyson 
4684c0fb2cfSDavid Xu static int
4694c0fb2cfSDavid Xu aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
4704c0fb2cfSDavid Xu {
471cf7d9a8cSDavid Xu 	struct thread *td;
472cf7d9a8cSDavid Xu 	int error;
473759ccccaSDavid Xu 
474cf7d9a8cSDavid Xu 	error = sigev_findtd(p, sigev, &td);
475cf7d9a8cSDavid Xu 	if (error)
476cf7d9a8cSDavid Xu 		return (error);
4774c0fb2cfSDavid Xu 	if (!KSI_ONQ(ksi)) {
478cf7d9a8cSDavid Xu 		ksiginfo_set_sigev(ksi, sigev);
4794c0fb2cfSDavid Xu 		ksi->ksi_code = SI_ASYNCIO;
4804c0fb2cfSDavid Xu 		ksi->ksi_flags |= KSI_EXT | KSI_INS;
481cf7d9a8cSDavid Xu 		tdsendsignal(p, td, ksi->ksi_signo, ksi);
4824c0fb2cfSDavid Xu 	}
483759ccccaSDavid Xu 	PROC_UNLOCK(p);
484cf7d9a8cSDavid Xu 	return (error);
4854c0fb2cfSDavid Xu }
4864c0fb2cfSDavid Xu 
4872244ea07SJohn Dyson /*
488bfbbc4aaSJason Evans  * Free a job entry.  Wait for completion if it is currently active, but don't
489bfbbc4aaSJason Evans  * delay forever.  If we delay, we return a flag that says that we have to
490bfbbc4aaSJason Evans  * restart the queue scan.
4912244ea07SJohn Dyson  */
49288ed460eSAlan Cox static int
4935652770dSJohn Baldwin aio_free_entry(struct kaiocb *job)
494fd3bf775SJohn Dyson {
4952244ea07SJohn Dyson 	struct kaioinfo *ki;
4961ce91824SDavid Xu 	struct aioliojob *lj;
4972244ea07SJohn Dyson 	struct proc *p;
4982244ea07SJohn Dyson 
4995652770dSJohn Baldwin 	p = job->userproc;
5001ce91824SDavid Xu 	MPASS(curproc == p);
5012244ea07SJohn Dyson 	ki = p->p_aioinfo;
5021ce91824SDavid Xu 	MPASS(ki != NULL);
5031ce91824SDavid Xu 
504759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
505f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
506759ccccaSDavid Xu 
5071ce91824SDavid Xu 	atomic_subtract_int(&num_queue_count, 1);
5081ce91824SDavid Xu 
5091ce91824SDavid Xu 	ki->kaio_count--;
5101ce91824SDavid Xu 	MPASS(ki->kaio_count >= 0);
5111ce91824SDavid Xu 
5125652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_done, job, plist);
5135652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_all, job, allist);
51427b8220dSDavid Xu 
5155652770dSJohn Baldwin 	lj = job->lio;
51684af4da6SJohn Dyson 	if (lj) {
5171ce91824SDavid Xu 		lj->lioj_count--;
5181ce91824SDavid Xu 		lj->lioj_finished_count--;
5191ce91824SDavid Xu 
520a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
5211ce91824SDavid Xu 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
5221ce91824SDavid Xu 			/* lio is going away, we need to destroy any knotes */
5231ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
524759ccccaSDavid Xu 			PROC_LOCK(p);
5251ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
526759ccccaSDavid Xu 			PROC_UNLOCK(p);
5271ce91824SDavid Xu 			uma_zfree(aiolio_zone, lj);
52884af4da6SJohn Dyson 		}
52984af4da6SJohn Dyson 	}
5301ce91824SDavid Xu 
5315652770dSJohn Baldwin 	/* job is going away, we need to destroy any knotes */
5325652770dSJohn Baldwin 	knlist_delete(&job->klist, curthread, 1);
533759ccccaSDavid Xu 	PROC_LOCK(p);
5345652770dSJohn Baldwin 	sigqueue_take(&job->ksi);
535759ccccaSDavid Xu 	PROC_UNLOCK(p);
5361ce91824SDavid Xu 
537759ccccaSDavid Xu 	AIO_UNLOCK(ki);
5382a522eb9SJohn Baldwin 
5392a522eb9SJohn Baldwin 	/*
5402a522eb9SJohn Baldwin 	 * The thread argument here is used to find the owning process
5412a522eb9SJohn Baldwin 	 * and is also passed to fo_close() which may pass it to various
5422a522eb9SJohn Baldwin 	 * places such as devsw close() routines.  Because of that, we
5432a522eb9SJohn Baldwin 	 * need a thread pointer from the process owning the job that is
5442a522eb9SJohn Baldwin 	 * persistent and won't disappear out from under us or move to
5452a522eb9SJohn Baldwin 	 * another process.
5462a522eb9SJohn Baldwin 	 *
5472a522eb9SJohn Baldwin 	 * Currently, all the callers of this function call it to remove
5485652770dSJohn Baldwin 	 * a kaiocb from the current process' job list either via a
5492a522eb9SJohn Baldwin 	 * syscall or due to the current process calling exit() or
5502a522eb9SJohn Baldwin 	 * execve().  Thus, we know that p == curproc.  We also know that
5512a522eb9SJohn Baldwin 	 * curthread can't exit since we are curthread.
5522a522eb9SJohn Baldwin 	 *
5532a522eb9SJohn Baldwin 	 * Therefore, we use curthread as the thread to pass to
5542a522eb9SJohn Baldwin 	 * knlist_delete().  This does mean that it is possible for the
5552a522eb9SJohn Baldwin 	 * thread pointer at close time to differ from the thread pointer
5562a522eb9SJohn Baldwin 	 * at open time, but this is already true of file descriptors in
5572a522eb9SJohn Baldwin 	 * a multithreaded process.
558b40ce416SJulian Elischer 	 */
5595652770dSJohn Baldwin 	if (job->fd_file)
5605652770dSJohn Baldwin 		fdrop(job->fd_file, curthread);
5615652770dSJohn Baldwin 	crfree(job->cred);
5625652770dSJohn Baldwin 	uma_zfree(aiocb_zone, job);
563759ccccaSDavid Xu 	AIO_LOCK(ki);
5641ce91824SDavid Xu 
565ac41f2efSAlfred Perlstein 	return (0);
5662244ea07SJohn Dyson }
5672244ea07SJohn Dyson 
568993182e5SAlexander Leidinger static void
5690dd6c035SJohn Baldwin aio_proc_rundown_exec(void *arg, struct proc *p,
5700dd6c035SJohn Baldwin     struct image_params *imgp __unused)
571993182e5SAlexander Leidinger {
572993182e5SAlexander Leidinger    	aio_proc_rundown(arg, p);
573993182e5SAlexander Leidinger }
574993182e5SAlexander Leidinger 
575f3215338SJohn Baldwin static int
576f3215338SJohn Baldwin aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
577f3215338SJohn Baldwin {
578f3215338SJohn Baldwin 	aio_cancel_fn_t *func;
579f3215338SJohn Baldwin 	int cancelled;
580f3215338SJohn Baldwin 
581f3215338SJohn Baldwin 	AIO_LOCK_ASSERT(ki, MA_OWNED);
582f3215338SJohn Baldwin 	if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
583f3215338SJohn Baldwin 		return (0);
584f3215338SJohn Baldwin 	MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
585f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLED;
586f3215338SJohn Baldwin 
587f3215338SJohn Baldwin 	func = job->cancel_fn;
588f3215338SJohn Baldwin 
589f3215338SJohn Baldwin 	/*
590f3215338SJohn Baldwin 	 * If there is no cancel routine, just leave the job marked as
591f3215338SJohn Baldwin 	 * cancelled.  The job should be in active use by a caller who
592f3215338SJohn Baldwin 	 * should complete it normally or when it fails to install a
593f3215338SJohn Baldwin 	 * cancel routine.
594f3215338SJohn Baldwin 	 */
595f3215338SJohn Baldwin 	if (func == NULL)
596f3215338SJohn Baldwin 		return (0);
597f3215338SJohn Baldwin 
598f3215338SJohn Baldwin 	/*
599f3215338SJohn Baldwin 	 * Set the CANCELLING flag so that aio_complete() will defer
600f3215338SJohn Baldwin 	 * completions of this job.  This prevents the job from being
601f3215338SJohn Baldwin 	 * freed out from under the cancel callback.  After the
602f3215338SJohn Baldwin 	 * callback any deferred completion (whether from the callback
603f3215338SJohn Baldwin 	 * or any other source) will be completed.
604f3215338SJohn Baldwin 	 */
605f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLING;
606f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
607f3215338SJohn Baldwin 	func(job);
608f3215338SJohn Baldwin 	AIO_LOCK(ki);
609f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_CANCELLING;
610f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
611f3215338SJohn Baldwin 		cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
612f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
613f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
614f3215338SJohn Baldwin 	} else {
615f3215338SJohn Baldwin 		/*
616f3215338SJohn Baldwin 		 * The cancel callback might have scheduled an
617f3215338SJohn Baldwin 		 * operation to cancel this request, but it is
618f3215338SJohn Baldwin 		 * only counted as cancelled if the request is
619f3215338SJohn Baldwin 		 * cancelled when the callback returns.
620f3215338SJohn Baldwin 		 */
621f3215338SJohn Baldwin 		cancelled = 0;
622f3215338SJohn Baldwin 	}
623f3215338SJohn Baldwin 	return (cancelled);
624f3215338SJohn Baldwin }
625f3215338SJohn Baldwin 
6262244ea07SJohn Dyson /*
6272244ea07SJohn Dyson  * Rundown the jobs for a given process.
6282244ea07SJohn Dyson  */
62921d56e9cSAlfred Perlstein static void
63075b8b3b2SJohn Baldwin aio_proc_rundown(void *arg, struct proc *p)
631fd3bf775SJohn Dyson {
6322244ea07SJohn Dyson 	struct kaioinfo *ki;
6331ce91824SDavid Xu 	struct aioliojob *lj;
6345652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
6352244ea07SJohn Dyson 
6362a522eb9SJohn Baldwin 	KASSERT(curthread->td_proc == p,
6372a522eb9SJohn Baldwin 	    ("%s: called on non-curproc", __func__));
6382244ea07SJohn Dyson 	ki = p->p_aioinfo;
6392244ea07SJohn Dyson 	if (ki == NULL)
6402244ea07SJohn Dyson 		return;
6412244ea07SJohn Dyson 
642759ccccaSDavid Xu 	AIO_LOCK(ki);
64327b8220dSDavid Xu 	ki->kaio_flags |= KAIO_RUNDOWN;
6441ce91824SDavid Xu 
6451ce91824SDavid Xu restart:
646a624e84fSJohn Dyson 
647bfbbc4aaSJason Evans 	/*
6481ce91824SDavid Xu 	 * Try to cancel all pending requests. This code simulates
6491ce91824SDavid Xu 	 * aio_cancel on all pending I/O requests.
650bfbbc4aaSJason Evans 	 */
6515652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
652f3215338SJohn Baldwin 		aio_cancel_job(p, ki, job);
6532244ea07SJohn Dyson 	}
65484af4da6SJohn Dyson 
6551ce91824SDavid Xu 	/* Wait for all running I/O to be finished */
656f3215338SJohn Baldwin 	if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
65784af4da6SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
658759ccccaSDavid Xu 		msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
6591ce91824SDavid Xu 		goto restart;
66084af4da6SJohn Dyson 	}
66184af4da6SJohn Dyson 
6621ce91824SDavid Xu 	/* Free all completed I/O requests. */
6635652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
6645652770dSJohn Baldwin 		aio_free_entry(job);
66584af4da6SJohn Dyson 
6661ce91824SDavid Xu 	while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
667a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
66884af4da6SJohn Dyson 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
6691ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
670759ccccaSDavid Xu 			PROC_LOCK(p);
6711ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
672759ccccaSDavid Xu 			PROC_UNLOCK(p);
673c897b813SJeff Roberson 			uma_zfree(aiolio_zone, lj);
674f4f0ecefSJohn Dyson 		} else {
675a9bf5e37SDavid Xu 			panic("LIO job not cleaned up: C:%d, FC:%d\n",
676a9bf5e37SDavid Xu 			    lj->lioj_count, lj->lioj_finished_count);
67784af4da6SJohn Dyson 		}
678f4f0ecefSJohn Dyson 	}
679759ccccaSDavid Xu 	AIO_UNLOCK(ki);
680c85650caSJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
681f3215338SJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
6825114048bSKonstantin Belousov 	mtx_destroy(&ki->kaio_mtx);
683c897b813SJeff Roberson 	uma_zfree(kaio_zone, ki);
684a624e84fSJohn Dyson 	p->p_aioinfo = NULL;
6852244ea07SJohn Dyson }
6862244ea07SJohn Dyson 
6872244ea07SJohn Dyson /*
688bfbbc4aaSJason Evans  * Select a job to run (called by an AIO daemon).
6892244ea07SJohn Dyson  */
6905652770dSJohn Baldwin static struct kaiocb *
69139314b7dSJohn Baldwin aio_selectjob(struct aioproc *aiop)
692fd3bf775SJohn Dyson {
6935652770dSJohn Baldwin 	struct kaiocb *job;
694bfbbc4aaSJason Evans 	struct kaioinfo *ki;
695bfbbc4aaSJason Evans 	struct proc *userp;
6962244ea07SJohn Dyson 
6971ce91824SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
698f3215338SJohn Baldwin restart:
6995652770dSJohn Baldwin 	TAILQ_FOREACH(job, &aio_jobs, list) {
7005652770dSJohn Baldwin 		userp = job->userproc;
7012244ea07SJohn Dyson 		ki = userp->p_aioinfo;
7022244ea07SJohn Dyson 
70386bbef43SJohn Baldwin 		if (ki->kaio_active_count < max_aio_per_proc) {
7045652770dSJohn Baldwin 			TAILQ_REMOVE(&aio_jobs, job, list);
705f3215338SJohn Baldwin 			if (!aio_clear_cancel_function(job))
706f3215338SJohn Baldwin 				goto restart;
707f3215338SJohn Baldwin 
7081ce91824SDavid Xu 			/* Account for currently active jobs. */
7091ce91824SDavid Xu 			ki->kaio_active_count++;
7101ce91824SDavid Xu 			break;
7111ce91824SDavid Xu 		}
7121ce91824SDavid Xu 	}
7135652770dSJohn Baldwin 	return (job);
7142244ea07SJohn Dyson }
7152244ea07SJohn Dyson 
7162244ea07SJohn Dyson /*
7170dd6c035SJohn Baldwin  * Move all data to a permanent storage device.  This code
7180dd6c035SJohn Baldwin  * simulates the fsync syscall.
71999eee864SDavid Xu  */
72099eee864SDavid Xu static int
72199eee864SDavid Xu aio_fsync_vnode(struct thread *td, struct vnode *vp)
72299eee864SDavid Xu {
72399eee864SDavid Xu 	struct mount *mp;
72499eee864SDavid Xu 	int error;
72599eee864SDavid Xu 
72699eee864SDavid Xu 	if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
72799eee864SDavid Xu 		goto drop;
728cb05b60aSAttilio Rao 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
72999eee864SDavid Xu 	if (vp->v_object != NULL) {
73089f6b863SAttilio Rao 		VM_OBJECT_WLOCK(vp->v_object);
73199eee864SDavid Xu 		vm_object_page_clean(vp->v_object, 0, 0, 0);
73289f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(vp->v_object);
73399eee864SDavid Xu 	}
73499eee864SDavid Xu 	error = VOP_FSYNC(vp, MNT_WAIT, td);
73599eee864SDavid Xu 
736b249ce48SMateusz Guzik 	VOP_UNLOCK(vp);
73799eee864SDavid Xu 	vn_finished_write(mp);
73899eee864SDavid Xu drop:
73999eee864SDavid Xu 	return (error);
74099eee864SDavid Xu }
74199eee864SDavid Xu 
74299eee864SDavid Xu /*
743f95c13dbSGleb Smirnoff  * The AIO processing activity for LIO_READ/LIO_WRITE.  This is the code that
74472bce9ffSAlan Somers  * does the I/O request for the non-bio version of the operations.  The normal
74572bce9ffSAlan Somers  * vn operations are used, and this code should work in all instances for every
74672bce9ffSAlan Somers  * type of file, including pipes, sockets, fifos, and regular files.
7471ce91824SDavid Xu  *
7481aa4c324SDavid Xu  * XXX I don't think it works well for socket, pipe, and fifo.
7492244ea07SJohn Dyson  */
75088ed460eSAlan Cox static void
7515652770dSJohn Baldwin aio_process_rw(struct kaiocb *job)
752fd3bf775SJohn Dyson {
753f8f750c5SRobert Watson 	struct ucred *td_savedcred;
754b40ce416SJulian Elischer 	struct thread *td;
7552244ea07SJohn Dyson 	struct aiocb *cb;
7562244ea07SJohn Dyson 	struct file *fp;
7572244ea07SJohn Dyson 	struct uio auio;
7582244ea07SJohn Dyson 	struct iovec aiov;
759bb430bc7SJohn Baldwin 	ssize_t cnt;
760b1012d80SJohn Baldwin 	long msgsnd_st, msgsnd_end;
761b1012d80SJohn Baldwin 	long msgrcv_st, msgrcv_end;
762b1012d80SJohn Baldwin 	long oublock_st, oublock_end;
763b1012d80SJohn Baldwin 	long inblock_st, inblock_end;
7642244ea07SJohn Dyson 	int error;
7652244ea07SJohn Dyson 
7665652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
7675652770dSJohn Baldwin 	    job->uaiocb.aio_lio_opcode == LIO_WRITE,
7685652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
769f95c13dbSGleb Smirnoff 
770f3215338SJohn Baldwin 	aio_switch_vmspace(job);
771b40ce416SJulian Elischer 	td = curthread;
772f8f750c5SRobert Watson 	td_savedcred = td->td_ucred;
7735652770dSJohn Baldwin 	td->td_ucred = job->cred;
7745652770dSJohn Baldwin 	cb = &job->uaiocb;
7755652770dSJohn Baldwin 	fp = job->fd_file;
776bfbbc4aaSJason Evans 
77791369fc7SAlan Cox 	aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
7782244ea07SJohn Dyson 	aiov.iov_len = cb->aio_nbytes;
7792244ea07SJohn Dyson 
7802244ea07SJohn Dyson 	auio.uio_iov = &aiov;
7812244ea07SJohn Dyson 	auio.uio_iovcnt = 1;
7829b16adc1SAlan Cox 	auio.uio_offset = cb->aio_offset;
7832244ea07SJohn Dyson 	auio.uio_resid = cb->aio_nbytes;
7842244ea07SJohn Dyson 	cnt = cb->aio_nbytes;
7852244ea07SJohn Dyson 	auio.uio_segflg = UIO_USERSPACE;
786b40ce416SJulian Elischer 	auio.uio_td = td;
7872244ea07SJohn Dyson 
788b1012d80SJohn Baldwin 	msgrcv_st = td->td_ru.ru_msgrcv;
789b1012d80SJohn Baldwin 	msgsnd_st = td->td_ru.ru_msgsnd;
7901c4bcd05SJeff Roberson 	inblock_st = td->td_ru.ru_inblock;
7911c4bcd05SJeff Roberson 	oublock_st = td->td_ru.ru_oublock;
792b1012d80SJohn Baldwin 
793279d7226SMatthew Dillon 	/*
794a9bf5e37SDavid Xu 	 * aio_aqueue() acquires a reference to the file that is
7959b16adc1SAlan Cox 	 * released in aio_free_entry().
796279d7226SMatthew Dillon 	 */
7972244ea07SJohn Dyson 	if (cb->aio_lio_opcode == LIO_READ) {
7982244ea07SJohn Dyson 		auio.uio_rw = UIO_READ;
7995114048bSKonstantin Belousov 		if (auio.uio_resid == 0)
8005114048bSKonstantin Belousov 			error = 0;
8015114048bSKonstantin Belousov 		else
802b40ce416SJulian Elischer 			error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8032244ea07SJohn Dyson 	} else {
8046d53aa62SDavid Xu 		if (fp->f_type == DTYPE_VNODE)
8056d53aa62SDavid Xu 			bwillwrite();
8062244ea07SJohn Dyson 		auio.uio_rw = UIO_WRITE;
807b40ce416SJulian Elischer 		error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8082244ea07SJohn Dyson 	}
809b1012d80SJohn Baldwin 	msgrcv_end = td->td_ru.ru_msgrcv;
810b1012d80SJohn Baldwin 	msgsnd_end = td->td_ru.ru_msgsnd;
8111c4bcd05SJeff Roberson 	inblock_end = td->td_ru.ru_inblock;
8121c4bcd05SJeff Roberson 	oublock_end = td->td_ru.ru_oublock;
813fd3bf775SJohn Dyson 
814b1012d80SJohn Baldwin 	job->msgrcv = msgrcv_end - msgrcv_st;
815b1012d80SJohn Baldwin 	job->msgsnd = msgsnd_end - msgsnd_st;
816b1012d80SJohn Baldwin 	job->inblock = inblock_end - inblock_st;
817b1012d80SJohn Baldwin 	job->outblock = oublock_end - oublock_st;
8182244ea07SJohn Dyson 
819bfbbc4aaSJason Evans 	if ((error) && (auio.uio_resid != cnt)) {
8202244ea07SJohn Dyson 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
8212244ea07SJohn Dyson 			error = 0;
82219eb87d2SJohn Baldwin 		if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
8235652770dSJohn Baldwin 			PROC_LOCK(job->userproc);
8245652770dSJohn Baldwin 			kern_psignal(job->userproc, SIGPIPE);
8255652770dSJohn Baldwin 			PROC_UNLOCK(job->userproc);
82619eb87d2SJohn Baldwin 		}
8272244ea07SJohn Dyson 	}
8282244ea07SJohn Dyson 
8292244ea07SJohn Dyson 	cnt -= auio.uio_resid;
830f8f750c5SRobert Watson 	td->td_ucred = td_savedcred;
831f0ec1740SJohn Baldwin 	if (error)
832f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
833f0ec1740SJohn Baldwin 	else
834f0ec1740SJohn Baldwin 		aio_complete(job, cnt, 0);
8352244ea07SJohn Dyson }
8362244ea07SJohn Dyson 
83769cd28daSDoug Ambrisko static void
8385652770dSJohn Baldwin aio_process_sync(struct kaiocb *job)
839f95c13dbSGleb Smirnoff {
840f95c13dbSGleb Smirnoff 	struct thread *td = curthread;
841f95c13dbSGleb Smirnoff 	struct ucred *td_savedcred = td->td_ucred;
8425652770dSJohn Baldwin 	struct file *fp = job->fd_file;
843f95c13dbSGleb Smirnoff 	int error = 0;
844f95c13dbSGleb Smirnoff 
8455652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC,
8465652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
847f95c13dbSGleb Smirnoff 
8485652770dSJohn Baldwin 	td->td_ucred = job->cred;
849f95c13dbSGleb Smirnoff 	if (fp->f_vnode != NULL)
850f95c13dbSGleb Smirnoff 		error = aio_fsync_vnode(td, fp->f_vnode);
851f95c13dbSGleb Smirnoff 	td->td_ucred = td_savedcred;
852f0ec1740SJohn Baldwin 	if (error)
853f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
854f0ec1740SJohn Baldwin 	else
855f0ec1740SJohn Baldwin 		aio_complete(job, 0, 0);
856f95c13dbSGleb Smirnoff }
857f95c13dbSGleb Smirnoff 
858f95c13dbSGleb Smirnoff static void
8595652770dSJohn Baldwin aio_process_mlock(struct kaiocb *job)
8606160e12cSGleb Smirnoff {
8615652770dSJohn Baldwin 	struct aiocb *cb = &job->uaiocb;
8626160e12cSGleb Smirnoff 	int error;
8636160e12cSGleb Smirnoff 
8645652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
8655652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
8666160e12cSGleb Smirnoff 
867f3215338SJohn Baldwin 	aio_switch_vmspace(job);
868496ab053SKonstantin Belousov 	error = kern_mlock(job->userproc, job->cred,
869496ab053SKonstantin Belousov 	    __DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes);
870496ab053SKonstantin Belousov 	aio_complete(job, error != 0 ? -1 : 0, error);
8716160e12cSGleb Smirnoff }
8726160e12cSGleb Smirnoff 
8736160e12cSGleb Smirnoff static void
874f3215338SJohn Baldwin aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
8751ce91824SDavid Xu {
8761ce91824SDavid Xu 	struct aioliojob *lj;
87769cd28daSDoug Ambrisko 	struct kaioinfo *ki;
8785652770dSJohn Baldwin 	struct kaiocb *sjob, *sjobn;
8791ce91824SDavid Xu 	int lj_done;
880f3215338SJohn Baldwin 	bool schedule_fsync;
88169cd28daSDoug Ambrisko 
88269cd28daSDoug Ambrisko 	ki = userp->p_aioinfo;
883759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
8845652770dSJohn Baldwin 	lj = job->lio;
88569cd28daSDoug Ambrisko 	lj_done = 0;
88669cd28daSDoug Ambrisko 	if (lj) {
8871ce91824SDavid Xu 		lj->lioj_finished_count++;
8881ce91824SDavid Xu 		if (lj->lioj_count == lj->lioj_finished_count)
88969cd28daSDoug Ambrisko 			lj_done = 1;
89069cd28daSDoug Ambrisko 	}
8915652770dSJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
892f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
89327b8220dSDavid Xu 
89427b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_RUNDOWN)
89527b8220dSDavid Xu 		goto notification_done;
89627b8220dSDavid Xu 
8975652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
8985652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
8995652770dSJohn Baldwin 		aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi);
9001ce91824SDavid Xu 
9015652770dSJohn Baldwin 	KNOTE_LOCKED(&job->klist, 1);
9021ce91824SDavid Xu 
90369cd28daSDoug Ambrisko 	if (lj_done) {
9041ce91824SDavid Xu 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
90569cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_KEVENT_POSTED;
9061ce91824SDavid Xu 			KNOTE_LOCKED(&lj->klist, 1);
90769cd28daSDoug Ambrisko 		}
9081ce91824SDavid Xu 		if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
90969cd28daSDoug Ambrisko 		    == LIOJ_SIGNAL
9104c0fb2cfSDavid Xu 		    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
9114c0fb2cfSDavid Xu 		        lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
9124c0fb2cfSDavid Xu 			aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
91369cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
91469cd28daSDoug Ambrisko 		}
91569cd28daSDoug Ambrisko 	}
91627b8220dSDavid Xu 
91727b8220dSDavid Xu notification_done:
9185652770dSJohn Baldwin 	if (job->jobflags & KAIOCB_CHECKSYNC) {
919f3215338SJohn Baldwin 		schedule_fsync = false;
9205652770dSJohn Baldwin 		TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
921b9a53e16SJohn Baldwin 			if (job->fd_file != sjob->fd_file ||
922b9a53e16SJohn Baldwin 			    job->seqno >= sjob->seqno)
923b9a53e16SJohn Baldwin 				continue;
924b9a53e16SJohn Baldwin 			if (--sjob->pending > 0)
925b9a53e16SJohn Baldwin 				continue;
926b9a53e16SJohn Baldwin 			TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list);
927005ce8e4SJohn Baldwin 			if (!aio_clear_cancel_function_locked(sjob))
928f3215338SJohn Baldwin 				continue;
929b9a53e16SJohn Baldwin 			TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list);
930f3215338SJohn Baldwin 			schedule_fsync = true;
93199eee864SDavid Xu 		}
932f3215338SJohn Baldwin 		if (schedule_fsync)
933f3215338SJohn Baldwin 			taskqueue_enqueue(taskqueue_aiod_kick,
934f3215338SJohn Baldwin 			    &ki->kaio_sync_task);
93599eee864SDavid Xu 	}
93627b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_WAKEUP) {
93769cd28daSDoug Ambrisko 		ki->kaio_flags &= ~KAIO_WAKEUP;
9381ce91824SDavid Xu 		wakeup(&userp->p_aioinfo);
93969cd28daSDoug Ambrisko 	}
94069cd28daSDoug Ambrisko }
94169cd28daSDoug Ambrisko 
9428a4dc40fSJohn Baldwin static void
943f3215338SJohn Baldwin aio_schedule_fsync(void *context, int pending)
944f3215338SJohn Baldwin {
945f3215338SJohn Baldwin 	struct kaioinfo *ki;
946f3215338SJohn Baldwin 	struct kaiocb *job;
947f3215338SJohn Baldwin 
948f3215338SJohn Baldwin 	ki = context;
949f3215338SJohn Baldwin 	AIO_LOCK(ki);
950f3215338SJohn Baldwin 	while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
951f3215338SJohn Baldwin 		job = TAILQ_FIRST(&ki->kaio_syncready);
952f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncready, job, list);
953f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
954f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
955f3215338SJohn Baldwin 		AIO_LOCK(ki);
956f3215338SJohn Baldwin 	}
957f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
958f3215338SJohn Baldwin }
959f3215338SJohn Baldwin 
960f3215338SJohn Baldwin bool
961f3215338SJohn Baldwin aio_cancel_cleared(struct kaiocb *job)
962f3215338SJohn Baldwin {
963f3215338SJohn Baldwin 
964f3215338SJohn Baldwin 	/*
965f3215338SJohn Baldwin 	 * The caller should hold the same queue lock held when
966f3215338SJohn Baldwin 	 * aio_clear_cancel_function() was called and set this flag
967f3215338SJohn Baldwin 	 * ensuring this check sees an up-to-date value.  However,
968f3215338SJohn Baldwin 	 * there is no way to assert that.
969f3215338SJohn Baldwin 	 */
970f3215338SJohn Baldwin 	return ((job->jobflags & KAIOCB_CLEARED) != 0);
971f3215338SJohn Baldwin }
972f3215338SJohn Baldwin 
973005ce8e4SJohn Baldwin static bool
974005ce8e4SJohn Baldwin aio_clear_cancel_function_locked(struct kaiocb *job)
975005ce8e4SJohn Baldwin {
976005ce8e4SJohn Baldwin 
977005ce8e4SJohn Baldwin 	AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
978005ce8e4SJohn Baldwin 	MPASS(job->cancel_fn != NULL);
979005ce8e4SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLING) {
980005ce8e4SJohn Baldwin 		job->jobflags |= KAIOCB_CLEARED;
981005ce8e4SJohn Baldwin 		return (false);
982005ce8e4SJohn Baldwin 	}
983005ce8e4SJohn Baldwin 	job->cancel_fn = NULL;
984005ce8e4SJohn Baldwin 	return (true);
985005ce8e4SJohn Baldwin }
986005ce8e4SJohn Baldwin 
987f3215338SJohn Baldwin bool
988f3215338SJohn Baldwin aio_clear_cancel_function(struct kaiocb *job)
989f3215338SJohn Baldwin {
990f3215338SJohn Baldwin 	struct kaioinfo *ki;
991005ce8e4SJohn Baldwin 	bool ret;
992f3215338SJohn Baldwin 
993f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
994f3215338SJohn Baldwin 	AIO_LOCK(ki);
995005ce8e4SJohn Baldwin 	ret = aio_clear_cancel_function_locked(job);
996f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
997005ce8e4SJohn Baldwin 	return (ret);
998f3215338SJohn Baldwin }
999005ce8e4SJohn Baldwin 
1000005ce8e4SJohn Baldwin static bool
1001005ce8e4SJohn Baldwin aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func)
1002005ce8e4SJohn Baldwin {
1003005ce8e4SJohn Baldwin 
1004005ce8e4SJohn Baldwin 	AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
1005005ce8e4SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLED)
1006005ce8e4SJohn Baldwin 		return (false);
1007005ce8e4SJohn Baldwin 	job->cancel_fn = func;
1008f3215338SJohn Baldwin 	return (true);
1009f3215338SJohn Baldwin }
1010f3215338SJohn Baldwin 
1011f3215338SJohn Baldwin bool
1012f3215338SJohn Baldwin aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
1013f3215338SJohn Baldwin {
1014f3215338SJohn Baldwin 	struct kaioinfo *ki;
1015005ce8e4SJohn Baldwin 	bool ret;
1016f3215338SJohn Baldwin 
1017f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1018f3215338SJohn Baldwin 	AIO_LOCK(ki);
1019005ce8e4SJohn Baldwin 	ret = aio_set_cancel_function_locked(job, func);
1020f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1021005ce8e4SJohn Baldwin 	return (ret);
1022f3215338SJohn Baldwin }
1023f3215338SJohn Baldwin 
1024f3215338SJohn Baldwin void
1025f3215338SJohn Baldwin aio_complete(struct kaiocb *job, long status, int error)
1026f3215338SJohn Baldwin {
1027f3215338SJohn Baldwin 	struct kaioinfo *ki;
1028f3215338SJohn Baldwin 	struct proc *userp;
1029f3215338SJohn Baldwin 
1030f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.error = error;
1031f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.status = status;
1032f3215338SJohn Baldwin 
1033f3215338SJohn Baldwin 	userp = job->userproc;
1034f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
1035f3215338SJohn Baldwin 
1036f3215338SJohn Baldwin 	AIO_LOCK(ki);
1037f3215338SJohn Baldwin 	KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1038f3215338SJohn Baldwin 	    ("duplicate aio_complete"));
1039f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_FINISHED;
1040f3215338SJohn Baldwin 	if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1041f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1042f3215338SJohn Baldwin 		aio_bio_done_notify(userp, job);
1043f3215338SJohn Baldwin 	}
1044f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1045f3215338SJohn Baldwin }
1046f3215338SJohn Baldwin 
1047f3215338SJohn Baldwin void
1048f3215338SJohn Baldwin aio_cancel(struct kaiocb *job)
1049f3215338SJohn Baldwin {
1050f3215338SJohn Baldwin 
1051f3215338SJohn Baldwin 	aio_complete(job, -1, ECANCELED);
1052f3215338SJohn Baldwin }
1053f3215338SJohn Baldwin 
1054f3215338SJohn Baldwin void
10555652770dSJohn Baldwin aio_switch_vmspace(struct kaiocb *job)
10568a4dc40fSJohn Baldwin {
10578a4dc40fSJohn Baldwin 
10585652770dSJohn Baldwin 	vmspace_switch_aio(job->userproc->p_vmspace);
10598a4dc40fSJohn Baldwin }
10608a4dc40fSJohn Baldwin 
10612244ea07SJohn Dyson /*
1062f95c13dbSGleb Smirnoff  * The AIO daemon, most of the actual work is done in aio_process_*,
106384af4da6SJohn Dyson  * but the setup (and address space mgmt) is done in this routine.
10642244ea07SJohn Dyson  */
10652244ea07SJohn Dyson static void
10661ce91824SDavid Xu aio_daemon(void *_id)
10672244ea07SJohn Dyson {
10685652770dSJohn Baldwin 	struct kaiocb *job;
106939314b7dSJohn Baldwin 	struct aioproc *aiop;
1070bfbbc4aaSJason Evans 	struct kaioinfo *ki;
1071f3215338SJohn Baldwin 	struct proc *p;
10728a4dc40fSJohn Baldwin 	struct vmspace *myvm;
1073b40ce416SJulian Elischer 	struct thread *td = curthread;
10741ce91824SDavid Xu 	int id = (intptr_t)_id;
10752244ea07SJohn Dyson 
10762244ea07SJohn Dyson 	/*
10778a4dc40fSJohn Baldwin 	 * Grab an extra reference on the daemon's vmspace so that it
10788a4dc40fSJohn Baldwin 	 * doesn't get freed by jobs that switch to a different
10798a4dc40fSJohn Baldwin 	 * vmspace.
10802244ea07SJohn Dyson 	 */
10818a4dc40fSJohn Baldwin 	p = td->td_proc;
10828a4dc40fSJohn Baldwin 	myvm = vmspace_acquire_ref(p);
1083fd3bf775SJohn Dyson 
10848a4dc40fSJohn Baldwin 	KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1085fd3bf775SJohn Dyson 
1086fd3bf775SJohn Dyson 	/*
1087bfbbc4aaSJason Evans 	 * Allocate and ready the aio control info.  There is one aiop structure
1088bfbbc4aaSJason Evans 	 * per daemon.
1089fd3bf775SJohn Dyson 	 */
1090a163d034SWarner Losh 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
109139314b7dSJohn Baldwin 	aiop->aioproc = p;
109239314b7dSJohn Baldwin 	aiop->aioprocflags = 0;
1093bfbbc4aaSJason Evans 
1094fd3bf775SJohn Dyson 	/*
1095fd3bf775SJohn Dyson 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
1096b40ce416SJulian Elischer 	 * and creating too many daemons.)
1097fd3bf775SJohn Dyson 	 */
10981ce91824SDavid Xu 	sema_post(&aio_newproc_sem);
10992244ea07SJohn Dyson 
11001ce91824SDavid Xu 	mtx_lock(&aio_job_mtx);
1101bfbbc4aaSJason Evans 	for (;;) {
1102fd3bf775SJohn Dyson 		/*
1103fd3bf775SJohn Dyson 		 * Take daemon off of free queue
1104fd3bf775SJohn Dyson 		 */
110539314b7dSJohn Baldwin 		if (aiop->aioprocflags & AIOP_FREE) {
11062244ea07SJohn Dyson 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
110739314b7dSJohn Baldwin 			aiop->aioprocflags &= ~AIOP_FREE;
11082244ea07SJohn Dyson 		}
11092244ea07SJohn Dyson 
1110fd3bf775SJohn Dyson 		/*
1111bfbbc4aaSJason Evans 		 * Check for jobs.
1112fd3bf775SJohn Dyson 		 */
11135652770dSJohn Baldwin 		while ((job = aio_selectjob(aiop)) != NULL) {
11141ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
11152244ea07SJohn Dyson 
1116f3215338SJohn Baldwin 			ki = job->userproc->p_aioinfo;
1117f3215338SJohn Baldwin 			job->handle_fn(job);
111884af4da6SJohn Dyson 
11199b84335cSDavid Xu 			mtx_lock(&aio_job_mtx);
11209b84335cSDavid Xu 			/* Decrement the active job count. */
11219b84335cSDavid Xu 			ki->kaio_active_count--;
11222244ea07SJohn Dyson 		}
11232244ea07SJohn Dyson 
1124fd3bf775SJohn Dyson 		/*
1125bfbbc4aaSJason Evans 		 * Disconnect from user address space.
1126fd3bf775SJohn Dyson 		 */
11278a4dc40fSJohn Baldwin 		if (p->p_vmspace != myvm) {
11281ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
11298a4dc40fSJohn Baldwin 			vmspace_switch_aio(myvm);
11301ce91824SDavid Xu 			mtx_lock(&aio_job_mtx);
11311ce91824SDavid Xu 			/*
11321ce91824SDavid Xu 			 * We have to restart to avoid race, we only sleep if
11338a4dc40fSJohn Baldwin 			 * no job can be selected.
11341ce91824SDavid Xu 			 */
11351ce91824SDavid Xu 			continue;
1136fd3bf775SJohn Dyson 		}
1137fd3bf775SJohn Dyson 
11381ce91824SDavid Xu 		mtx_assert(&aio_job_mtx, MA_OWNED);
11391ce91824SDavid Xu 
1140fd3bf775SJohn Dyson 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
114139314b7dSJohn Baldwin 		aiop->aioprocflags |= AIOP_FREE;
1142fd3bf775SJohn Dyson 
1143fd3bf775SJohn Dyson 		/*
1144bfbbc4aaSJason Evans 		 * If daemon is inactive for a long time, allow it to exit,
1145bfbbc4aaSJason Evans 		 * thereby freeing resources.
1146fd3bf775SJohn Dyson 		 */
114739314b7dSJohn Baldwin 		if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
11488a4dc40fSJohn Baldwin 		    aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
114939314b7dSJohn Baldwin 		    (aiop->aioprocflags & AIOP_FREE) &&
11508a4dc40fSJohn Baldwin 		    num_aio_procs > target_aio_procs)
11518a4dc40fSJohn Baldwin 			break;
11528a4dc40fSJohn Baldwin 	}
1153fd3bf775SJohn Dyson 	TAILQ_REMOVE(&aio_freeproc, aiop, list);
115484af4da6SJohn Dyson 	num_aio_procs--;
11551ce91824SDavid Xu 	mtx_unlock(&aio_job_mtx);
11561ce91824SDavid Xu 	uma_zfree(aiop_zone, aiop);
11571ce91824SDavid Xu 	free_unr(aiod_unr, id);
11588a4dc40fSJohn Baldwin 	vmspace_free(myvm);
11598a4dc40fSJohn Baldwin 
11608a4dc40fSJohn Baldwin 	KASSERT(p->p_vmspace == myvm,
11618a4dc40fSJohn Baldwin 	    ("AIOD: bad vmspace for exiting daemon"));
11628a4dc40fSJohn Baldwin 	KASSERT(myvm->vm_refcnt > 1,
11638a4dc40fSJohn Baldwin 	    ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt));
11643745c395SJulian Elischer 	kproc_exit(0);
1165fd3bf775SJohn Dyson }
11662244ea07SJohn Dyson 
11672244ea07SJohn Dyson /*
1168bfbbc4aaSJason Evans  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1169bfbbc4aaSJason Evans  * AIO daemon modifies its environment itself.
11702244ea07SJohn Dyson  */
11712244ea07SJohn Dyson static int
11721ce91824SDavid Xu aio_newproc(int *start)
1173fd3bf775SJohn Dyson {
11742244ea07SJohn Dyson 	int error;
1175c9a970a7SAlan Cox 	struct proc *p;
11761ce91824SDavid Xu 	int id;
11772244ea07SJohn Dyson 
11781ce91824SDavid Xu 	id = alloc_unr(aiod_unr);
11793745c395SJulian Elischer 	error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
11801ce91824SDavid Xu 		RFNOWAIT, 0, "aiod%d", id);
11811ce91824SDavid Xu 	if (error == 0) {
1182fd3bf775SJohn Dyson 		/*
11831ce91824SDavid Xu 		 * Wait until daemon is started.
1184fd3bf775SJohn Dyson 		 */
11851ce91824SDavid Xu 		sema_wait(&aio_newproc_sem);
11861ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
118784af4da6SJohn Dyson 		num_aio_procs++;
11881ce91824SDavid Xu 		if (start != NULL)
11897f34b521SDavid Xu 			(*start)--;
11901ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
11911ce91824SDavid Xu 	} else {
11921ce91824SDavid Xu 		free_unr(aiod_unr, id);
11931ce91824SDavid Xu 	}
1194ac41f2efSAlfred Perlstein 	return (error);
11952244ea07SJohn Dyson }
11962244ea07SJohn Dyson 
11972244ea07SJohn Dyson /*
119872bce9ffSAlan Somers  * Try the high-performance, low-overhead bio method for eligible
119988ed460eSAlan Cox  * VCHR devices.  This method doesn't use an aio helper thread, and
120088ed460eSAlan Cox  * thus has very low overhead.
120188ed460eSAlan Cox  *
1202a9bf5e37SDavid Xu  * Assumes that the caller, aio_aqueue(), has incremented the file
120388ed460eSAlan Cox  * structure's reference count, preventing its deallocation for the
120488ed460eSAlan Cox  * duration of this call.
1205fd3bf775SJohn Dyson  */
120688ed460eSAlan Cox static int
120772bce9ffSAlan Somers aio_qbio(struct proc *p, struct kaiocb *job)
1208fd3bf775SJohn Dyson {
1209fd3bf775SJohn Dyson 	struct aiocb *cb;
1210fd3bf775SJohn Dyson 	struct file *fp;
1211f743d981SAlexander Motin 	struct bio *bp;
1212f743d981SAlexander Motin 	struct buf *pbuf;
1213fd3bf775SJohn Dyson 	struct vnode *vp;
1214f3215a60SKonstantin Belousov 	struct cdevsw *csw;
1215f3215a60SKonstantin Belousov 	struct cdev *dev;
1216fd3bf775SJohn Dyson 	struct kaioinfo *ki;
12174d805eacSJohn Baldwin 	int error, ref, poff;
1218f743d981SAlexander Motin 	vm_prot_t prot;
1219fd3bf775SJohn Dyson 
12205652770dSJohn Baldwin 	cb = &job->uaiocb;
12215652770dSJohn Baldwin 	fp = job->fd_file;
1222fd3bf775SJohn Dyson 
1223f54c5606SJohn Baldwin 	if (!(cb->aio_lio_opcode == LIO_WRITE ||
1224f54c5606SJohn Baldwin 	    cb->aio_lio_opcode == LIO_READ))
1225f54c5606SJohn Baldwin 		return (-1);
12266160e12cSGleb Smirnoff 	if (fp == NULL || fp->f_type != DTYPE_VNODE)
1227008626c3SPoul-Henning Kamp 		return (-1);
1228fd3bf775SJohn Dyson 
12293b6d9652SPoul-Henning Kamp 	vp = fp->f_vnode;
1230f743d981SAlexander Motin 	if (vp->v_type != VCHR)
1231f582ac06SBrian Feldman 		return (-1);
1232ad8de0f2SDavid Xu 	if (vp->v_bufobj.bo_bsize == 0)
1233ad8de0f2SDavid Xu 		return (-1);
12345d9d81e7SPoul-Henning Kamp 	if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1235008626c3SPoul-Henning Kamp 		return (-1);
1236fd3bf775SJohn Dyson 
1237f3215a60SKonstantin Belousov 	ref = 0;
1238f3215a60SKonstantin Belousov 	csw = devvn_refthread(vp, &dev, &ref);
1239f3215a60SKonstantin Belousov 	if (csw == NULL)
1240f3215a60SKonstantin Belousov 		return (ENXIO);
1241f743d981SAlexander Motin 
1242f743d981SAlexander Motin 	if ((csw->d_flags & D_DISK) == 0) {
1243f743d981SAlexander Motin 		error = -1;
1244f743d981SAlexander Motin 		goto unref;
1245f743d981SAlexander Motin 	}
1246f3215a60SKonstantin Belousov 	if (cb->aio_nbytes > dev->si_iosize_max) {
1247f3215a60SKonstantin Belousov 		error = -1;
1248f3215a60SKonstantin Belousov 		goto unref;
1249f3215a60SKonstantin Belousov 	}
1250f3215a60SKonstantin Belousov 
1251f743d981SAlexander Motin 	ki = p->p_aioinfo;
1252f743d981SAlexander Motin 	poff = (vm_offset_t)cb->aio_buf & PAGE_MASK;
12534d805eacSJohn Baldwin 	if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
1254f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS) {
1255f743d981SAlexander Motin 			error = -1;
1256f743d981SAlexander Motin 			goto unref;
1257f743d981SAlexander Motin 		}
12584d805eacSJohn Baldwin 
12594d805eacSJohn Baldwin 		pbuf = NULL;
1260f743d981SAlexander Motin 	} else {
1261f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS - poff) {
1262f743d981SAlexander Motin 			error = -1;
1263f743d981SAlexander Motin 			goto unref;
1264f743d981SAlexander Motin 		}
126586bbef43SJohn Baldwin 		if (ki->kaio_buffer_count >= max_buf_aio) {
1266f54c5606SJohn Baldwin 			error = EAGAIN;
1267f743d981SAlexander Motin 			goto unref;
1268f743d981SAlexander Motin 		}
12694d805eacSJohn Baldwin 
1270756a5412SGleb Smirnoff 		job->pbuf = pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
1271f743d981SAlexander Motin 		BUF_KERNPROC(pbuf);
1272759ccccaSDavid Xu 		AIO_LOCK(ki);
12731ce91824SDavid Xu 		ki->kaio_buffer_count++;
1274759ccccaSDavid Xu 		AIO_UNLOCK(ki);
12754d805eacSJohn Baldwin 	}
12764d805eacSJohn Baldwin 	job->bp = bp = g_alloc_bio();
12771ce91824SDavid Xu 
1278f743d981SAlexander Motin 	bp->bio_length = cb->aio_nbytes;
1279f743d981SAlexander Motin 	bp->bio_bcount = cb->aio_nbytes;
128072bce9ffSAlan Somers 	bp->bio_done = aio_biowakeup;
1281f743d981SAlexander Motin 	bp->bio_data = (void *)(uintptr_t)cb->aio_buf;
1282f743d981SAlexander Motin 	bp->bio_offset = cb->aio_offset;
1283f743d981SAlexander Motin 	bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1284f743d981SAlexander Motin 	bp->bio_dev = dev;
12855652770dSJohn Baldwin 	bp->bio_caller1 = (void *)job;
1286f743d981SAlexander Motin 
1287f743d981SAlexander Motin 	prot = VM_PROT_READ;
1288f743d981SAlexander Motin 	if (cb->aio_lio_opcode == LIO_READ)
1289f743d981SAlexander Motin 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
12904d805eacSJohn Baldwin 	job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
12915652770dSJohn Baldwin 	    (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages,
12924d805eacSJohn Baldwin 	    nitems(job->pages));
12934d805eacSJohn Baldwin 	if (job->npages < 0) {
1294f743d981SAlexander Motin 		error = EFAULT;
1295f743d981SAlexander Motin 		goto doerror;
1296f743d981SAlexander Motin 	}
12974d805eacSJohn Baldwin 	if (pbuf != NULL) {
1298f743d981SAlexander Motin 		pmap_qenter((vm_offset_t)pbuf->b_data,
12995652770dSJohn Baldwin 		    job->pages, job->npages);
1300f743d981SAlexander Motin 		bp->bio_data = pbuf->b_data + poff;
13014d805eacSJohn Baldwin 		atomic_add_int(&num_buf_aio, 1);
1302f743d981SAlexander Motin 	} else {
13035652770dSJohn Baldwin 		bp->bio_ma = job->pages;
13045652770dSJohn Baldwin 		bp->bio_ma_n = job->npages;
1305f743d981SAlexander Motin 		bp->bio_ma_offset = poff;
1306f743d981SAlexander Motin 		bp->bio_data = unmapped_buf;
1307f743d981SAlexander Motin 		bp->bio_flags |= BIO_UNMAPPED;
13088091e52bSJohn Baldwin 		atomic_add_int(&num_unmapped_aio, 1);
1309f743d981SAlexander Motin 	}
1310f743d981SAlexander Motin 
1311bfbbc4aaSJason Evans 	/* Perform transfer. */
1312f743d981SAlexander Motin 	csw->d_strategy(bp);
1313f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1314ac41f2efSAlfred Perlstein 	return (0);
1315fd3bf775SJohn Dyson 
1316fd3bf775SJohn Dyson doerror:
13174d805eacSJohn Baldwin 	if (pbuf != NULL) {
1318759ccccaSDavid Xu 		AIO_LOCK(ki);
1319fd3bf775SJohn Dyson 		ki->kaio_buffer_count--;
1320759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1321756a5412SGleb Smirnoff 		uma_zfree(pbuf_zone, pbuf);
13225652770dSJohn Baldwin 		job->pbuf = NULL;
1323f743d981SAlexander Motin 	}
1324f743d981SAlexander Motin 	g_destroy_bio(bp);
13255652770dSJohn Baldwin 	job->bp = NULL;
1326f3215a60SKonstantin Belousov unref:
1327f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1328fd3bf775SJohn Dyson 	return (error);
1329fd3bf775SJohn Dyson }
1330fd3bf775SJohn Dyson 
1331399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
13323858a1f4SJohn Baldwin static int
13333858a1f4SJohn Baldwin convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
13343858a1f4SJohn Baldwin {
13353858a1f4SJohn Baldwin 
13363858a1f4SJohn Baldwin 	/*
13373858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
13383858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
13393858a1f4SJohn Baldwin 	 */
13403858a1f4SJohn Baldwin 	nsig->sigev_notify = osig->sigev_notify;
13413858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
13423858a1f4SJohn Baldwin 	case SIGEV_NONE:
13433858a1f4SJohn Baldwin 		break;
13443858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
13453858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
13463858a1f4SJohn Baldwin 		break;
13473858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
13483858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
13493858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
13503858a1f4SJohn Baldwin 		nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
13513858a1f4SJohn Baldwin 		break;
13523858a1f4SJohn Baldwin 	default:
13533858a1f4SJohn Baldwin 		return (EINVAL);
13543858a1f4SJohn Baldwin 	}
13553858a1f4SJohn Baldwin 	return (0);
13563858a1f4SJohn Baldwin }
13573858a1f4SJohn Baldwin 
13583858a1f4SJohn Baldwin static int
13593858a1f4SJohn Baldwin aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
13603858a1f4SJohn Baldwin {
13613858a1f4SJohn Baldwin 	struct oaiocb *ojob;
13623858a1f4SJohn Baldwin 	int error;
13633858a1f4SJohn Baldwin 
13643858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
13653858a1f4SJohn Baldwin 	error = copyin(ujob, kjob, sizeof(struct oaiocb));
13663858a1f4SJohn Baldwin 	if (error)
13673858a1f4SJohn Baldwin 		return (error);
13683858a1f4SJohn Baldwin 	ojob = (struct oaiocb *)kjob;
13693858a1f4SJohn Baldwin 	return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent));
13703858a1f4SJohn Baldwin }
1371399e8c17SJohn Baldwin #endif
13723858a1f4SJohn Baldwin 
13733858a1f4SJohn Baldwin static int
13743858a1f4SJohn Baldwin aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
13753858a1f4SJohn Baldwin {
13763858a1f4SJohn Baldwin 
13773858a1f4SJohn Baldwin 	return (copyin(ujob, kjob, sizeof(struct aiocb)));
13783858a1f4SJohn Baldwin }
13793858a1f4SJohn Baldwin 
13803858a1f4SJohn Baldwin static long
13813858a1f4SJohn Baldwin aiocb_fetch_status(struct aiocb *ujob)
13823858a1f4SJohn Baldwin {
13833858a1f4SJohn Baldwin 
13843858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.status));
13853858a1f4SJohn Baldwin }
13863858a1f4SJohn Baldwin 
13873858a1f4SJohn Baldwin static long
13883858a1f4SJohn Baldwin aiocb_fetch_error(struct aiocb *ujob)
13893858a1f4SJohn Baldwin {
13903858a1f4SJohn Baldwin 
13913858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.error));
13923858a1f4SJohn Baldwin }
13933858a1f4SJohn Baldwin 
13943858a1f4SJohn Baldwin static int
13953858a1f4SJohn Baldwin aiocb_store_status(struct aiocb *ujob, long status)
13963858a1f4SJohn Baldwin {
13973858a1f4SJohn Baldwin 
13983858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.status, status));
13993858a1f4SJohn Baldwin }
14003858a1f4SJohn Baldwin 
14013858a1f4SJohn Baldwin static int
14023858a1f4SJohn Baldwin aiocb_store_error(struct aiocb *ujob, long error)
14033858a1f4SJohn Baldwin {
14043858a1f4SJohn Baldwin 
14053858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.error, error));
14063858a1f4SJohn Baldwin }
14073858a1f4SJohn Baldwin 
14083858a1f4SJohn Baldwin static int
14093858a1f4SJohn Baldwin aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
14103858a1f4SJohn Baldwin {
14113858a1f4SJohn Baldwin 
14123858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
14133858a1f4SJohn Baldwin }
14143858a1f4SJohn Baldwin 
14153858a1f4SJohn Baldwin static int
14163858a1f4SJohn Baldwin aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
14173858a1f4SJohn Baldwin {
14183858a1f4SJohn Baldwin 
14193858a1f4SJohn Baldwin 	return (suword(ujobp, (long)ujob));
14203858a1f4SJohn Baldwin }
14213858a1f4SJohn Baldwin 
14223858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops = {
1423849aef49SAndrew Turner 	.aio_copyin = aiocb_copyin,
14243858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
14253858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
14263858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
14273858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
14283858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
14293858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
14303858a1f4SJohn Baldwin };
14313858a1f4SJohn Baldwin 
1432399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
14333858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops_osigevent = {
1434849aef49SAndrew Turner 	.aio_copyin = aiocb_copyin_old_sigevent,
14353858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
14363858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
14373858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
14383858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
14393858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
14403858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
14413858a1f4SJohn Baldwin };
1442399e8c17SJohn Baldwin #endif
14433858a1f4SJohn Baldwin 
1444bfbbc4aaSJason Evans /*
144572bce9ffSAlan Somers  * Queue a new AIO request.  Choosing either the threaded or direct bio VCHR
1446bfbbc4aaSJason Evans  * technique is done in this code.
14472244ea07SJohn Dyson  */
14486a1162d4SAlexander Leidinger int
14495652770dSJohn Baldwin aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
14503858a1f4SJohn Baldwin     int type, struct aiocb_ops *ops)
1451fd3bf775SJohn Dyson {
1452b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
14532244ea07SJohn Dyson 	struct file *fp;
1454f3215338SJohn Baldwin 	struct kaiocb *job;
14552244ea07SJohn Dyson 	struct kaioinfo *ki;
1456c6fa9f78SAlan Cox 	struct kevent kev;
14571ce91824SDavid Xu 	int opcode;
14581ce91824SDavid Xu 	int error;
14594db71d27SJohn-Mark Gurney 	int fd, kqfd;
14601ce91824SDavid Xu 	int jid;
1461fde80935SDavid Xu 	u_short evflags;
14622244ea07SJohn Dyson 
1463a9bf5e37SDavid Xu 	if (p->p_aioinfo == NULL)
1464a9bf5e37SDavid Xu 		aio_init_aioinfo(p);
1465a9bf5e37SDavid Xu 
14661ce91824SDavid Xu 	ki = p->p_aioinfo;
14671ce91824SDavid Xu 
14685652770dSJohn Baldwin 	ops->store_status(ujob, -1);
14695652770dSJohn Baldwin 	ops->store_error(ujob, 0);
14705652770dSJohn Baldwin 	ops->store_kernelinfo(ujob, -1);
1471a9bf5e37SDavid Xu 
1472a9bf5e37SDavid Xu 	if (num_queue_count >= max_queue_count ||
147386bbef43SJohn Baldwin 	    ki->kaio_count >= max_aio_queue_per_proc) {
14745652770dSJohn Baldwin 		ops->store_error(ujob, EAGAIN);
1475a9bf5e37SDavid Xu 		return (EAGAIN);
1476a9bf5e37SDavid Xu 	}
1477a9bf5e37SDavid Xu 
14785652770dSJohn Baldwin 	job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
14795652770dSJohn Baldwin 	knlist_init_mtx(&job->klist, AIO_MTX(ki));
1480fd3bf775SJohn Dyson 
1481849aef49SAndrew Turner 	error = ops->aio_copyin(ujob, &job->uaiocb);
14822244ea07SJohn Dyson 	if (error) {
14835652770dSJohn Baldwin 		ops->store_error(ujob, error);
14845652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1485ac41f2efSAlfred Perlstein 		return (error);
14862244ea07SJohn Dyson 	}
148768d71118SDavid Xu 
1488bb430bc7SJohn Baldwin 	if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
14895652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1490434ea137SGleb Smirnoff 		return (EINVAL);
1491434ea137SGleb Smirnoff 	}
1492434ea137SGleb Smirnoff 
14935652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
14945652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
14955652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
14965652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
14975652770dSJohn Baldwin 		ops->store_error(ujob, EINVAL);
14985652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
149968d71118SDavid Xu 		return (EINVAL);
150068d71118SDavid Xu 	}
150168d71118SDavid Xu 
15025652770dSJohn Baldwin 	if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
15035652770dSJohn Baldwin 	     job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
15045652770dSJohn Baldwin 		!_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
15055652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1506ac41f2efSAlfred Perlstein 		return (EINVAL);
15072f3cf918SAlfred Perlstein 	}
15082244ea07SJohn Dyson 
15095652770dSJohn Baldwin 	ksiginfo_init(&job->ksi);
15104c0fb2cfSDavid Xu 
1511bfbbc4aaSJason Evans 	/* Save userspace address of the job info. */
15125652770dSJohn Baldwin 	job->ujob = ujob;
151311783b14SJohn Dyson 
1514bfbbc4aaSJason Evans 	/* Get the opcode. */
1515bfbbc4aaSJason Evans 	if (type != LIO_NOP)
15165652770dSJohn Baldwin 		job->uaiocb.aio_lio_opcode = type;
15175652770dSJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
15182244ea07SJohn Dyson 
1519a9d2f8d8SRobert Watson 	/*
1520a9d2f8d8SRobert Watson 	 * Validate the opcode and fetch the file object for the specified
1521a9d2f8d8SRobert Watson 	 * file descriptor.
1522a9d2f8d8SRobert Watson 	 *
1523a9d2f8d8SRobert Watson 	 * XXXRW: Moved the opcode validation up here so that we don't
1524a9d2f8d8SRobert Watson 	 * retrieve a file descriptor without knowing what the capabiltity
1525a9d2f8d8SRobert Watson 	 * should be.
1526a9d2f8d8SRobert Watson 	 */
15275652770dSJohn Baldwin 	fd = job->uaiocb.aio_fildes;
15282a522eb9SJohn Baldwin 	switch (opcode) {
15292a522eb9SJohn Baldwin 	case LIO_WRITE:
1530cbd92ce6SMatt Macy 		error = fget_write(td, fd, &cap_pwrite_rights, &fp);
15312a522eb9SJohn Baldwin 		break;
15322a522eb9SJohn Baldwin 	case LIO_READ:
1533cbd92ce6SMatt Macy 		error = fget_read(td, fd, &cap_pread_rights, &fp);
1534a9d2f8d8SRobert Watson 		break;
1535a9d2f8d8SRobert Watson 	case LIO_SYNC:
1536cbd92ce6SMatt Macy 		error = fget(td, fd, &cap_fsync_rights, &fp);
1537a9d2f8d8SRobert Watson 		break;
15386160e12cSGleb Smirnoff 	case LIO_MLOCK:
15396160e12cSGleb Smirnoff 		fp = NULL;
15406160e12cSGleb Smirnoff 		break;
1541a9d2f8d8SRobert Watson 	case LIO_NOP:
1542cbd92ce6SMatt Macy 		error = fget(td, fd, &cap_no_rights, &fp);
15432a522eb9SJohn Baldwin 		break;
15442a522eb9SJohn Baldwin 	default:
1545a9d2f8d8SRobert Watson 		error = EINVAL;
15462a522eb9SJohn Baldwin 	}
15472a522eb9SJohn Baldwin 	if (error) {
15485652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
15495652770dSJohn Baldwin 		ops->store_error(ujob, error);
1550af56abaaSJohn Baldwin 		return (error);
15512244ea07SJohn Dyson 	}
155299eee864SDavid Xu 
155399eee864SDavid Xu 	if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
155499eee864SDavid Xu 		error = EINVAL;
155599eee864SDavid Xu 		goto aqueue_fail;
155699eee864SDavid Xu 	}
15572244ea07SJohn Dyson 
1558711dba24SKonstantin Belousov 	if ((opcode == LIO_READ || opcode == LIO_WRITE) &&
1559711dba24SKonstantin Belousov 	    job->uaiocb.aio_offset < 0 &&
1560711dba24SKonstantin Belousov 	    (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) {
1561ae124fc4SAlan Cox 		error = EINVAL;
1562ae124fc4SAlan Cox 		goto aqueue_fail;
15632244ea07SJohn Dyson 	}
15641ce91824SDavid Xu 
15655652770dSJohn Baldwin 	job->fd_file = fp;
15661ce91824SDavid Xu 
156799eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
156899eee864SDavid Xu 	jid = jobrefid++;
15695652770dSJohn Baldwin 	job->seqno = jobseqno++;
157099eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
15715652770dSJohn Baldwin 	error = ops->store_kernelinfo(ujob, jid);
15721ce91824SDavid Xu 	if (error) {
15731ce91824SDavid Xu 		error = EINVAL;
15741ce91824SDavid Xu 		goto aqueue_fail;
15751ce91824SDavid Xu 	}
15765652770dSJohn Baldwin 	job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
15772244ea07SJohn Dyson 
15782244ea07SJohn Dyson 	if (opcode == LIO_NOP) {
1579a5c0b1c0SAlan Cox 		fdrop(fp, td);
15805652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1581ac41f2efSAlfred Perlstein 		return (0);
15822244ea07SJohn Dyson 	}
15832244ea07SJohn Dyson 
15845652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1585cb679c38SJonathan Lemon 		goto no_kqueue;
15865652770dSJohn Baldwin 	evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1587fde80935SDavid Xu 	if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1588fde80935SDavid Xu 		error = EINVAL;
1589fde80935SDavid Xu 		goto aqueue_fail;
1590fde80935SDavid Xu 	}
15915652770dSJohn Baldwin 	kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
159236c4960eSMark Johnston 	memset(&kev, 0, sizeof(kev));
15935652770dSJohn Baldwin 	kev.ident = (uintptr_t)job->ujob;
1594cb679c38SJonathan Lemon 	kev.filter = EVFILT_AIO;
1595fde80935SDavid Xu 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
15965652770dSJohn Baldwin 	kev.data = (intptr_t)job;
15975652770dSJohn Baldwin 	kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1598792843c3SMark Johnston 	error = kqfd_register(kqfd, &kev, td, M_WAITOK);
1599f3215338SJohn Baldwin 	if (error)
1600f3215338SJohn Baldwin 		goto aqueue_fail;
1601f3215338SJohn Baldwin 
1602cb679c38SJonathan Lemon no_kqueue:
1603cb679c38SJonathan Lemon 
16045652770dSJohn Baldwin 	ops->store_error(ujob, EINPROGRESS);
16055652770dSJohn Baldwin 	job->uaiocb._aiocb_private.error = EINPROGRESS;
16065652770dSJohn Baldwin 	job->userproc = p;
16075652770dSJohn Baldwin 	job->cred = crhold(td->td_ucred);
1608f3215338SJohn Baldwin 	job->jobflags = KAIOCB_QUEUEING;
16095652770dSJohn Baldwin 	job->lio = lj;
16102244ea07SJohn Dyson 
1611f3215338SJohn Baldwin 	if (opcode == LIO_MLOCK) {
1612f3215338SJohn Baldwin 		aio_schedule(job, aio_process_mlock);
1613f3215338SJohn Baldwin 		error = 0;
1614f3215338SJohn Baldwin 	} else if (fp->f_ops->fo_aio_queue == NULL)
1615f3215338SJohn Baldwin 		error = aio_queue_file(fp, job);
1616f3215338SJohn Baldwin 	else
1617f3215338SJohn Baldwin 		error = fo_aio_queue(fp, job);
1618f3215338SJohn Baldwin 	if (error)
1619f3215338SJohn Baldwin 		goto aqueue_fail;
1620f3215338SJohn Baldwin 
1621f3215338SJohn Baldwin 	AIO_LOCK(ki);
1622f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_QUEUEING;
1623f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1624f3215338SJohn Baldwin 	ki->kaio_count++;
1625f3215338SJohn Baldwin 	if (lj)
1626f3215338SJohn Baldwin 		lj->lioj_count++;
1627f3215338SJohn Baldwin 	atomic_add_int(&num_queue_count, 1);
1628f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
1629f3215338SJohn Baldwin 		/*
1630f3215338SJohn Baldwin 		 * The queue callback completed the request synchronously.
1631f3215338SJohn Baldwin 		 * The bulk of the completion is deferred in that case
1632f3215338SJohn Baldwin 		 * until this point.
1633f3215338SJohn Baldwin 		 */
1634f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
1635f3215338SJohn Baldwin 	} else
1636f3215338SJohn Baldwin 		TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1637f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1638f3215338SJohn Baldwin 	return (0);
1639f3215338SJohn Baldwin 
1640f3215338SJohn Baldwin aqueue_fail:
1641f3215338SJohn Baldwin 	knlist_delete(&job->klist, curthread, 0);
1642f3215338SJohn Baldwin 	if (fp)
1643f3215338SJohn Baldwin 		fdrop(fp, td);
1644f3215338SJohn Baldwin 	uma_zfree(aiocb_zone, job);
1645f3215338SJohn Baldwin 	ops->store_error(ujob, error);
1646f3215338SJohn Baldwin 	return (error);
1647f3215338SJohn Baldwin }
1648f3215338SJohn Baldwin 
1649f3215338SJohn Baldwin static void
1650f3215338SJohn Baldwin aio_cancel_daemon_job(struct kaiocb *job)
1651f3215338SJohn Baldwin {
1652f3215338SJohn Baldwin 
1653f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1654f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1655f3215338SJohn Baldwin 		TAILQ_REMOVE(&aio_jobs, job, list);
1656f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1657f3215338SJohn Baldwin 	aio_cancel(job);
1658f3215338SJohn Baldwin }
1659f3215338SJohn Baldwin 
1660f3215338SJohn Baldwin void
1661f3215338SJohn Baldwin aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1662f3215338SJohn Baldwin {
1663f3215338SJohn Baldwin 
1664f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1665f3215338SJohn Baldwin 	if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1666f3215338SJohn Baldwin 		mtx_unlock(&aio_job_mtx);
1667f3215338SJohn Baldwin 		aio_cancel(job);
1668f3215338SJohn Baldwin 		return;
1669f3215338SJohn Baldwin 	}
1670f3215338SJohn Baldwin 	job->handle_fn = func;
1671f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1672f3215338SJohn Baldwin 	aio_kick_nowait(job->userproc);
1673f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1674f3215338SJohn Baldwin }
1675f3215338SJohn Baldwin 
1676f3215338SJohn Baldwin static void
1677f3215338SJohn Baldwin aio_cancel_sync(struct kaiocb *job)
1678f3215338SJohn Baldwin {
1679f3215338SJohn Baldwin 	struct kaioinfo *ki;
1680f3215338SJohn Baldwin 
1681f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1682005ce8e4SJohn Baldwin 	AIO_LOCK(ki);
1683f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1684f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1685005ce8e4SJohn Baldwin 	AIO_UNLOCK(ki);
1686f3215338SJohn Baldwin 	aio_cancel(job);
1687f3215338SJohn Baldwin }
1688f3215338SJohn Baldwin 
1689f3215338SJohn Baldwin int
1690f3215338SJohn Baldwin aio_queue_file(struct file *fp, struct kaiocb *job)
1691f3215338SJohn Baldwin {
1692f3215338SJohn Baldwin 	struct kaioinfo *ki;
1693f3215338SJohn Baldwin 	struct kaiocb *job2;
16949fe297bbSKonstantin Belousov 	struct vnode *vp;
16959fe297bbSKonstantin Belousov 	struct mount *mp;
1696f54c5606SJohn Baldwin 	int error;
16979fe297bbSKonstantin Belousov 	bool safe;
1698f3215338SJohn Baldwin 
1699f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
170072bce9ffSAlan Somers 	error = aio_qbio(job->userproc, job);
1701f54c5606SJohn Baldwin 	if (error >= 0)
1702f54c5606SJohn Baldwin 		return (error);
17039fe297bbSKonstantin Belousov 	safe = false;
17049fe297bbSKonstantin Belousov 	if (fp->f_type == DTYPE_VNODE) {
17059fe297bbSKonstantin Belousov 		vp = fp->f_vnode;
17069fe297bbSKonstantin Belousov 		if (vp->v_type == VREG || vp->v_type == VDIR) {
17079fe297bbSKonstantin Belousov 			mp = fp->f_vnode->v_mount;
17089fe297bbSKonstantin Belousov 			if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0)
17099fe297bbSKonstantin Belousov 				safe = true;
17109fe297bbSKonstantin Belousov 		}
17119fe297bbSKonstantin Belousov 	}
17129c20dc99SJohn Baldwin 	if (!(safe || enable_aio_unsafe)) {
17139c20dc99SJohn Baldwin 		counted_warning(&unsafe_warningcnt,
17149c20dc99SJohn Baldwin 		    "is attempting to use unsafe AIO requests");
1715f3215338SJohn Baldwin 		return (EOPNOTSUPP);
17169c20dc99SJohn Baldwin 	}
171784af4da6SJohn Dyson 
17187e409184SJohn Baldwin 	switch (job->uaiocb.aio_lio_opcode) {
17197e409184SJohn Baldwin 	case LIO_READ:
17207e409184SJohn Baldwin 	case LIO_WRITE:
17217e409184SJohn Baldwin 		aio_schedule(job, aio_process_rw);
17227e409184SJohn Baldwin 		error = 0;
17237e409184SJohn Baldwin 		break;
17247e409184SJohn Baldwin 	case LIO_SYNC:
1725f3215338SJohn Baldwin 		AIO_LOCK(ki);
17265652770dSJohn Baldwin 		TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
17275652770dSJohn Baldwin 			if (job2->fd_file == job->fd_file &&
17285652770dSJohn Baldwin 			    job2->uaiocb.aio_lio_opcode != LIO_SYNC &&
17295652770dSJohn Baldwin 			    job2->seqno < job->seqno) {
17305652770dSJohn Baldwin 				job2->jobflags |= KAIOCB_CHECKSYNC;
17315652770dSJohn Baldwin 				job->pending++;
1732dbbccfe9SDavid Xu 			}
1733dbbccfe9SDavid Xu 		}
17345652770dSJohn Baldwin 		if (job->pending != 0) {
1735005ce8e4SJohn Baldwin 			if (!aio_set_cancel_function_locked(job,
1736005ce8e4SJohn Baldwin 				aio_cancel_sync)) {
1737f3215338SJohn Baldwin 				AIO_UNLOCK(ki);
1738f3215338SJohn Baldwin 				aio_cancel(job);
1739f3215338SJohn Baldwin 				return (0);
1740f3215338SJohn Baldwin 			}
17415652770dSJohn Baldwin 			TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1742759ccccaSDavid Xu 			AIO_UNLOCK(ki);
1743f3215338SJohn Baldwin 			return (0);
1744dbbccfe9SDavid Xu 		}
1745759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1746f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
1747f3215338SJohn Baldwin 		error = 0;
1748f3215338SJohn Baldwin 		break;
1749f3215338SJohn Baldwin 	default:
1750f3215338SJohn Baldwin 		error = EINVAL;
1751f3215338SJohn Baldwin 	}
175299eee864SDavid Xu 	return (error);
175399eee864SDavid Xu }
175499eee864SDavid Xu 
175599eee864SDavid Xu static void
175699eee864SDavid Xu aio_kick_nowait(struct proc *userp)
175799eee864SDavid Xu {
175899eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
175939314b7dSJohn Baldwin 	struct aioproc *aiop;
176099eee864SDavid Xu 
176199eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
176299eee864SDavid Xu 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
176399eee864SDavid Xu 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
176439314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
176539314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
17660dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
176786bbef43SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) {
1768c85650caSJohn Baldwin 		taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
176999eee864SDavid Xu 	}
177099eee864SDavid Xu }
177199eee864SDavid Xu 
1772dbbccfe9SDavid Xu static int
177399eee864SDavid Xu aio_kick(struct proc *userp)
177499eee864SDavid Xu {
177599eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
177639314b7dSJohn Baldwin 	struct aioproc *aiop;
1777dbbccfe9SDavid Xu 	int error, ret = 0;
177899eee864SDavid Xu 
177999eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
178099eee864SDavid Xu retryproc:
1781d254af07SMatthew Dillon 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
17822244ea07SJohn Dyson 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
178339314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
178439314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
17850dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
178686bbef43SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) {
1787fd3bf775SJohn Dyson 		num_aio_resv_start++;
17881ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
17891ce91824SDavid Xu 		error = aio_newproc(&num_aio_resv_start);
17901ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
17911ce91824SDavid Xu 		if (error) {
179284af4da6SJohn Dyson 			num_aio_resv_start--;
17932244ea07SJohn Dyson 			goto retryproc;
1794fd3bf775SJohn Dyson 		}
1795dbbccfe9SDavid Xu 	} else {
1796dbbccfe9SDavid Xu 		ret = -1;
17971ce91824SDavid Xu 	}
1798dbbccfe9SDavid Xu 	return (ret);
179999eee864SDavid Xu }
18001ce91824SDavid Xu 
180199eee864SDavid Xu static void
180299eee864SDavid Xu aio_kick_helper(void *context, int pending)
180399eee864SDavid Xu {
180499eee864SDavid Xu 	struct proc *userp = context;
180599eee864SDavid Xu 
180699eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
1807dbbccfe9SDavid Xu 	while (--pending >= 0) {
1808dbbccfe9SDavid Xu 		if (aio_kick(userp))
1809dbbccfe9SDavid Xu 			break;
1810dbbccfe9SDavid Xu 	}
181199eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
18122244ea07SJohn Dyson }
18132244ea07SJohn Dyson 
1814fd3bf775SJohn Dyson /*
1815bfbbc4aaSJason Evans  * Support the aio_return system call, as a side-effect, kernel resources are
1816bfbbc4aaSJason Evans  * released.
18172244ea07SJohn Dyson  */
18183858a1f4SJohn Baldwin static int
18195652770dSJohn Baldwin kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1820fd3bf775SJohn Dyson {
1821b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18225652770dSJohn Baldwin 	struct kaiocb *job;
18232244ea07SJohn Dyson 	struct kaioinfo *ki;
1824bb430bc7SJohn Baldwin 	long status, error;
18252244ea07SJohn Dyson 
1826c0bf5caaSAlan Cox 	ki = p->p_aioinfo;
1827c0bf5caaSAlan Cox 	if (ki == NULL)
1828ac41f2efSAlfred Perlstein 		return (EINVAL);
1829759ccccaSDavid Xu 	AIO_LOCK(ki);
18305652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_done, plist) {
18315652770dSJohn Baldwin 		if (job->ujob == ujob)
1832c0bf5caaSAlan Cox 			break;
1833c0bf5caaSAlan Cox 	}
18345652770dSJohn Baldwin 	if (job != NULL) {
1835f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
18365652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
18375652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
18381ce91824SDavid Xu 		td->td_retval[0] = status;
1839b1012d80SJohn Baldwin 		td->td_ru.ru_oublock += job->outblock;
1840b1012d80SJohn Baldwin 		td->td_ru.ru_inblock += job->inblock;
1841b1012d80SJohn Baldwin 		td->td_ru.ru_msgsnd += job->msgsnd;
1842b1012d80SJohn Baldwin 		td->td_ru.ru_msgrcv += job->msgrcv;
18435652770dSJohn Baldwin 		aio_free_entry(job);
1844759ccccaSDavid Xu 		AIO_UNLOCK(ki);
18455652770dSJohn Baldwin 		ops->store_error(ujob, error);
18465652770dSJohn Baldwin 		ops->store_status(ujob, status);
184755a122bfSDavid Xu 	} else {
18481ce91824SDavid Xu 		error = EINVAL;
1849759ccccaSDavid Xu 		AIO_UNLOCK(ki);
185055a122bfSDavid Xu 	}
18511ce91824SDavid Xu 	return (error);
18522244ea07SJohn Dyson }
18532244ea07SJohn Dyson 
18543858a1f4SJohn Baldwin int
18558451d0ddSKip Macy sys_aio_return(struct thread *td, struct aio_return_args *uap)
18563858a1f4SJohn Baldwin {
18573858a1f4SJohn Baldwin 
18583858a1f4SJohn Baldwin 	return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
18593858a1f4SJohn Baldwin }
18603858a1f4SJohn Baldwin 
18612244ea07SJohn Dyson /*
1862bfbbc4aaSJason Evans  * Allow a process to wakeup when any of the I/O requests are completed.
18632244ea07SJohn Dyson  */
18643858a1f4SJohn Baldwin static int
18653858a1f4SJohn Baldwin kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
18663858a1f4SJohn Baldwin     struct timespec *ts)
1867fd3bf775SJohn Dyson {
1868b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18694a11ca4eSPoul-Henning Kamp 	struct timeval atv;
18702244ea07SJohn Dyson 	struct kaioinfo *ki;
18715652770dSJohn Baldwin 	struct kaiocb *firstjob, *job;
18723858a1f4SJohn Baldwin 	int error, i, timo;
18732244ea07SJohn Dyson 
18742244ea07SJohn Dyson 	timo = 0;
18753858a1f4SJohn Baldwin 	if (ts) {
18763858a1f4SJohn Baldwin 		if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
18772244ea07SJohn Dyson 			return (EINVAL);
18782244ea07SJohn Dyson 
18793858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
18802244ea07SJohn Dyson 		if (itimerfix(&atv))
18812244ea07SJohn Dyson 			return (EINVAL);
1882227ee8a1SPoul-Henning Kamp 		timo = tvtohz(&atv);
18832244ea07SJohn Dyson 	}
18842244ea07SJohn Dyson 
18852244ea07SJohn Dyson 	ki = p->p_aioinfo;
18862244ea07SJohn Dyson 	if (ki == NULL)
1887ac41f2efSAlfred Perlstein 		return (EAGAIN);
18882244ea07SJohn Dyson 
18893858a1f4SJohn Baldwin 	if (njoblist == 0)
1890ac41f2efSAlfred Perlstein 		return (0);
18912244ea07SJohn Dyson 
1892759ccccaSDavid Xu 	AIO_LOCK(ki);
18931ce91824SDavid Xu 	for (;;) {
18945652770dSJohn Baldwin 		firstjob = NULL;
18951ce91824SDavid Xu 		error = 0;
18965652770dSJohn Baldwin 		TAILQ_FOREACH(job, &ki->kaio_all, allist) {
189784af4da6SJohn Dyson 			for (i = 0; i < njoblist; i++) {
18985652770dSJohn Baldwin 				if (job->ujob == ujoblist[i]) {
18995652770dSJohn Baldwin 					if (firstjob == NULL)
19005652770dSJohn Baldwin 						firstjob = job;
1901f3215338SJohn Baldwin 					if (job->jobflags & KAIOCB_FINISHED)
19021ce91824SDavid Xu 						goto RETURN;
190384af4da6SJohn Dyson 				}
190484af4da6SJohn Dyson 			}
190584af4da6SJohn Dyson 		}
19061ce91824SDavid Xu 		/* All tasks were finished. */
19075652770dSJohn Baldwin 		if (firstjob == NULL)
19081ce91824SDavid Xu 			break;
19092244ea07SJohn Dyson 
1910fd3bf775SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
1911759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
19121ce91824SDavid Xu 		    "aiospn", timo);
19131ce91824SDavid Xu 		if (error == ERESTART)
19141ce91824SDavid Xu 			error = EINTR;
19151ce91824SDavid Xu 		if (error)
19161ce91824SDavid Xu 			break;
19172244ea07SJohn Dyson 	}
19181ce91824SDavid Xu RETURN:
1919759ccccaSDavid Xu 	AIO_UNLOCK(ki);
19203858a1f4SJohn Baldwin 	return (error);
19213858a1f4SJohn Baldwin }
19223858a1f4SJohn Baldwin 
19233858a1f4SJohn Baldwin int
19248451d0ddSKip Macy sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
19253858a1f4SJohn Baldwin {
19263858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
19273858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
19283858a1f4SJohn Baldwin 	int error;
19293858a1f4SJohn Baldwin 
1930913b9329SAlan Somers 	if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
19313858a1f4SJohn Baldwin 		return (EINVAL);
19323858a1f4SJohn Baldwin 
19333858a1f4SJohn Baldwin 	if (uap->timeout) {
19343858a1f4SJohn Baldwin 		/* Get timespec struct. */
19353858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
19363858a1f4SJohn Baldwin 			return (error);
19373858a1f4SJohn Baldwin 		tsp = &ts;
19383858a1f4SJohn Baldwin 	} else
19393858a1f4SJohn Baldwin 		tsp = NULL;
19403858a1f4SJohn Baldwin 
1941913b9329SAlan Somers 	ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK);
19423858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
19433858a1f4SJohn Baldwin 	if (error == 0)
19443858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
1945913b9329SAlan Somers 	free(ujoblist, M_AIOS);
19461ce91824SDavid Xu 	return (error);
19472244ea07SJohn Dyson }
1948ee877a35SJohn Dyson 
1949ee877a35SJohn Dyson /*
195072bce9ffSAlan Somers  * aio_cancel cancels any non-bio aio operations not currently in progress.
1951ee877a35SJohn Dyson  */
1952ee877a35SJohn Dyson int
19538451d0ddSKip Macy sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1954fd3bf775SJohn Dyson {
1955b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
1956dd85920aSJason Evans 	struct kaioinfo *ki;
19575652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
1958dd85920aSJason Evans 	struct file *fp;
19591ce91824SDavid Xu 	int error;
1960dd85920aSJason Evans 	int cancelled = 0;
1961dd85920aSJason Evans 	int notcancelled = 0;
1962dd85920aSJason Evans 	struct vnode *vp;
1963dd85920aSJason Evans 
19642a522eb9SJohn Baldwin 	/* Lookup file object. */
1965cbd92ce6SMatt Macy 	error = fget(td, uap->fd, &cap_no_rights, &fp);
19662a522eb9SJohn Baldwin 	if (error)
19672a522eb9SJohn Baldwin 		return (error);
1968dd85920aSJason Evans 
19691ce91824SDavid Xu 	ki = p->p_aioinfo;
19701ce91824SDavid Xu 	if (ki == NULL)
19711ce91824SDavid Xu 		goto done;
19721ce91824SDavid Xu 
1973dd85920aSJason Evans 	if (fp->f_type == DTYPE_VNODE) {
19743b6d9652SPoul-Henning Kamp 		vp = fp->f_vnode;
1975dd85920aSJason Evans 		if (vn_isdisk(vp, &error)) {
19762a522eb9SJohn Baldwin 			fdrop(fp, td);
1977b40ce416SJulian Elischer 			td->td_retval[0] = AIO_NOTCANCELED;
1978ac41f2efSAlfred Perlstein 			return (0);
1979dd85920aSJason Evans 		}
1980dd85920aSJason Evans 	}
1981dd85920aSJason Evans 
1982759ccccaSDavid Xu 	AIO_LOCK(ki);
19835652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
19845652770dSJohn Baldwin 		if ((uap->fd == job->uaiocb.aio_fildes) &&
1985dd85920aSJason Evans 		    ((uap->aiocbp == NULL) ||
19865652770dSJohn Baldwin 		     (uap->aiocbp == job->ujob))) {
1987f3215338SJohn Baldwin 			if (aio_cancel_job(p, ki, job)) {
19881ce91824SDavid Xu 				cancelled++;
1989dd85920aSJason Evans 			} else {
1990dd85920aSJason Evans 				notcancelled++;
1991dd85920aSJason Evans 			}
19921aa4c324SDavid Xu 			if (uap->aiocbp != NULL)
19931aa4c324SDavid Xu 				break;
1994dd85920aSJason Evans 		}
1995dd85920aSJason Evans 	}
1996759ccccaSDavid Xu 	AIO_UNLOCK(ki);
19971ce91824SDavid Xu 
1998ad49abc0SAlan Cox done:
19992a522eb9SJohn Baldwin 	fdrop(fp, td);
20001aa4c324SDavid Xu 
20011aa4c324SDavid Xu 	if (uap->aiocbp != NULL) {
2002dd85920aSJason Evans 		if (cancelled) {
2003b40ce416SJulian Elischer 			td->td_retval[0] = AIO_CANCELED;
2004ac41f2efSAlfred Perlstein 			return (0);
2005dd85920aSJason Evans 		}
20061aa4c324SDavid Xu 	}
20071aa4c324SDavid Xu 
20081aa4c324SDavid Xu 	if (notcancelled) {
20091aa4c324SDavid Xu 		td->td_retval[0] = AIO_NOTCANCELED;
20101aa4c324SDavid Xu 		return (0);
20111aa4c324SDavid Xu 	}
20121aa4c324SDavid Xu 
20131aa4c324SDavid Xu 	if (cancelled) {
20141aa4c324SDavid Xu 		td->td_retval[0] = AIO_CANCELED;
20151aa4c324SDavid Xu 		return (0);
20161aa4c324SDavid Xu 	}
20171aa4c324SDavid Xu 
2018b40ce416SJulian Elischer 	td->td_retval[0] = AIO_ALLDONE;
2019dd85920aSJason Evans 
2020ac41f2efSAlfred Perlstein 	return (0);
2021ee877a35SJohn Dyson }
2022ee877a35SJohn Dyson 
2023ee877a35SJohn Dyson /*
2024873fbcd7SRobert Watson  * aio_error is implemented in the kernel level for compatibility purposes
2025873fbcd7SRobert Watson  * only.  For a user mode async implementation, it would be best to do it in
2026873fbcd7SRobert Watson  * a userland subroutine.
2027ee877a35SJohn Dyson  */
20283858a1f4SJohn Baldwin static int
20295652770dSJohn Baldwin kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
2030fd3bf775SJohn Dyson {
2031b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
20325652770dSJohn Baldwin 	struct kaiocb *job;
20332244ea07SJohn Dyson 	struct kaioinfo *ki;
20341ce91824SDavid Xu 	int status;
2035ee877a35SJohn Dyson 
20362244ea07SJohn Dyson 	ki = p->p_aioinfo;
20371ce91824SDavid Xu 	if (ki == NULL) {
20381ce91824SDavid Xu 		td->td_retval[0] = EINVAL;
20391ce91824SDavid Xu 		return (0);
20401ce91824SDavid Xu 	}
2041ee877a35SJohn Dyson 
2042759ccccaSDavid Xu 	AIO_LOCK(ki);
20435652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_all, allist) {
20445652770dSJohn Baldwin 		if (job->ujob == ujob) {
2045f3215338SJohn Baldwin 			if (job->jobflags & KAIOCB_FINISHED)
20461ce91824SDavid Xu 				td->td_retval[0] =
20475652770dSJohn Baldwin 					job->uaiocb._aiocb_private.error;
20481ce91824SDavid Xu 			else
2049b40ce416SJulian Elischer 				td->td_retval[0] = EINPROGRESS;
2050759ccccaSDavid Xu 			AIO_UNLOCK(ki);
2051ac41f2efSAlfred Perlstein 			return (0);
20522244ea07SJohn Dyson 		}
20532244ea07SJohn Dyson 	}
2054759ccccaSDavid Xu 	AIO_UNLOCK(ki);
205584af4da6SJohn Dyson 
20562244ea07SJohn Dyson 	/*
2057a9bf5e37SDavid Xu 	 * Hack for failure of aio_aqueue.
20582244ea07SJohn Dyson 	 */
20595652770dSJohn Baldwin 	status = ops->fetch_status(ujob);
20601ce91824SDavid Xu 	if (status == -1) {
20615652770dSJohn Baldwin 		td->td_retval[0] = ops->fetch_error(ujob);
20621ce91824SDavid Xu 		return (0);
20631ce91824SDavid Xu 	}
20641ce91824SDavid Xu 
20651ce91824SDavid Xu 	td->td_retval[0] = EINVAL;
20661ce91824SDavid Xu 	return (0);
2067ee877a35SJohn Dyson }
2068ee877a35SJohn Dyson 
20693858a1f4SJohn Baldwin int
20708451d0ddSKip Macy sys_aio_error(struct thread *td, struct aio_error_args *uap)
20713858a1f4SJohn Baldwin {
20723858a1f4SJohn Baldwin 
20733858a1f4SJohn Baldwin 	return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
20743858a1f4SJohn Baldwin }
20753858a1f4SJohn Baldwin 
2076eb8e6d52SEivind Eklund /* syscall - asynchronous read from a file (REALTIME) */
2077399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
2078ee877a35SJohn Dyson int
2079399e8c17SJohn Baldwin freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap)
20800972628aSDavid Xu {
20810972628aSDavid Xu 
20823858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
20833858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
20840972628aSDavid Xu }
2085399e8c17SJohn Baldwin #endif
20860972628aSDavid Xu 
20870972628aSDavid Xu int
20888451d0ddSKip Macy sys_aio_read(struct thread *td, struct aio_read_args *uap)
2089fd3bf775SJohn Dyson {
209021d56e9cSAlfred Perlstein 
20913858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2092ee877a35SJohn Dyson }
2093ee877a35SJohn Dyson 
2094eb8e6d52SEivind Eklund /* syscall - asynchronous write to a file (REALTIME) */
2095399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
2096ee877a35SJohn Dyson int
2097399e8c17SJohn Baldwin freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap)
20980972628aSDavid Xu {
20990972628aSDavid Xu 
21003858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
21013858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
21020972628aSDavid Xu }
2103399e8c17SJohn Baldwin #endif
21040972628aSDavid Xu 
21050972628aSDavid Xu int
21068451d0ddSKip Macy sys_aio_write(struct thread *td, struct aio_write_args *uap)
2107fd3bf775SJohn Dyson {
210821d56e9cSAlfred Perlstein 
21093858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
21100972628aSDavid Xu }
21110972628aSDavid Xu 
21126160e12cSGleb Smirnoff int
21136160e12cSGleb Smirnoff sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
21146160e12cSGleb Smirnoff {
21156160e12cSGleb Smirnoff 
21166160e12cSGleb Smirnoff 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
21176160e12cSGleb Smirnoff }
21186160e12cSGleb Smirnoff 
21190972628aSDavid Xu static int
21203858a1f4SJohn Baldwin kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
21213858a1f4SJohn Baldwin     struct aiocb **acb_list, int nent, struct sigevent *sig,
21223858a1f4SJohn Baldwin     struct aiocb_ops *ops)
21230972628aSDavid Xu {
2124b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
21255652770dSJohn Baldwin 	struct aiocb *job;
21262244ea07SJohn Dyson 	struct kaioinfo *ki;
21271ce91824SDavid Xu 	struct aioliojob *lj;
212869cd28daSDoug Ambrisko 	struct kevent kev;
21291ce91824SDavid Xu 	int error;
213052c09831SAlan Somers 	int nagain, nerror;
2131ee877a35SJohn Dyson 	int i;
2132ee877a35SJohn Dyson 
21333858a1f4SJohn Baldwin 	if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2134ac41f2efSAlfred Perlstein 		return (EINVAL);
21352244ea07SJohn Dyson 
2136913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
2137ac41f2efSAlfred Perlstein 		return (EINVAL);
21382244ea07SJohn Dyson 
2139bfbbc4aaSJason Evans 	if (p->p_aioinfo == NULL)
21402244ea07SJohn Dyson 		aio_init_aioinfo(p);
21412244ea07SJohn Dyson 
21422244ea07SJohn Dyson 	ki = p->p_aioinfo;
21432244ea07SJohn Dyson 
2144a163d034SWarner Losh 	lj = uma_zalloc(aiolio_zone, M_WAITOK);
214584af4da6SJohn Dyson 	lj->lioj_flags = 0;
21461ce91824SDavid Xu 	lj->lioj_count = 0;
21471ce91824SDavid Xu 	lj->lioj_finished_count = 0;
2148d8b0556cSKonstantin Belousov 	knlist_init_mtx(&lj->klist, AIO_MTX(ki));
21494c0fb2cfSDavid Xu 	ksiginfo_init(&lj->lioj_ksi);
215069cd28daSDoug Ambrisko 
215184af4da6SJohn Dyson 	/*
2152bfbbc4aaSJason Evans 	 * Setup signal.
215384af4da6SJohn Dyson 	 */
21543858a1f4SJohn Baldwin 	if (sig && (mode == LIO_NOWAIT)) {
21553858a1f4SJohn Baldwin 		bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
215669cd28daSDoug Ambrisko 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
215769cd28daSDoug Ambrisko 			/* Assume only new style KEVENT */
215836c4960eSMark Johnston 			memset(&kev, 0, sizeof(kev));
215969cd28daSDoug Ambrisko 			kev.filter = EVFILT_LIO;
216069cd28daSDoug Ambrisko 			kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
21613858a1f4SJohn Baldwin 			kev.ident = (uintptr_t)uacb_list; /* something unique */
216269cd28daSDoug Ambrisko 			kev.data = (intptr_t)lj;
21631ce91824SDavid Xu 			/* pass user defined sigval data */
21641ce91824SDavid Xu 			kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
21654db71d27SJohn-Mark Gurney 			error = kqfd_register(
2166792843c3SMark Johnston 			    lj->lioj_signal.sigev_notify_kqueue, &kev, td,
2167792843c3SMark Johnston 			    M_WAITOK);
216869cd28daSDoug Ambrisko 			if (error) {
216969cd28daSDoug Ambrisko 				uma_zfree(aiolio_zone, lj);
217069cd28daSDoug Ambrisko 				return (error);
217169cd28daSDoug Ambrisko 			}
21721ce91824SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
21731ce91824SDavid Xu 			;
217468d71118SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
217568d71118SDavid Xu 			   lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
217668d71118SDavid Xu 				if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
217769cd28daSDoug Ambrisko 					uma_zfree(aiolio_zone, lj);
217869cd28daSDoug Ambrisko 					return EINVAL;
217968d71118SDavid Xu 				}
218084af4da6SJohn Dyson 				lj->lioj_flags |= LIOJ_SIGNAL;
218168d71118SDavid Xu 		} else {
218268d71118SDavid Xu 			uma_zfree(aiolio_zone, lj);
218368d71118SDavid Xu 			return EINVAL;
21844d752b01SAlan Cox 		}
21851ce91824SDavid Xu 	}
218669cd28daSDoug Ambrisko 
2187759ccccaSDavid Xu 	AIO_LOCK(ki);
21882f3cf918SAlfred Perlstein 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
21892244ea07SJohn Dyson 	/*
21901ce91824SDavid Xu 	 * Add extra aiocb count to avoid the lio to be freed
21911ce91824SDavid Xu 	 * by other threads doing aio_waitcomplete or aio_return,
21921ce91824SDavid Xu 	 * and prevent event from being sent until we have queued
21931ce91824SDavid Xu 	 * all tasks.
21941ce91824SDavid Xu 	 */
21951ce91824SDavid Xu 	lj->lioj_count = 1;
2196759ccccaSDavid Xu 	AIO_UNLOCK(ki);
21971ce91824SDavid Xu 
21981ce91824SDavid Xu 	/*
2199bfbbc4aaSJason Evans 	 * Get pointers to the list of I/O requests.
22002244ea07SJohn Dyson 	 */
220152c09831SAlan Somers 	nagain = 0;
2202fd3bf775SJohn Dyson 	nerror = 0;
22033858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++) {
22045652770dSJohn Baldwin 		job = acb_list[i];
22055652770dSJohn Baldwin 		if (job != NULL) {
22065652770dSJohn Baldwin 			error = aio_aqueue(td, job, lj, LIO_NOP, ops);
220752c09831SAlan Somers 			if (error == EAGAIN)
220852c09831SAlan Somers 				nagain++;
220952c09831SAlan Somers 			else if (error != 0)
2210fd3bf775SJohn Dyson 				nerror++;
2211fd3bf775SJohn Dyson 		}
2212fd3bf775SJohn Dyson 	}
22132244ea07SJohn Dyson 
22141ce91824SDavid Xu 	error = 0;
2215759ccccaSDavid Xu 	AIO_LOCK(ki);
22163858a1f4SJohn Baldwin 	if (mode == LIO_WAIT) {
22171ce91824SDavid Xu 		while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2218fd3bf775SJohn Dyson 			ki->kaio_flags |= KAIO_WAKEUP;
2219759ccccaSDavid Xu 			error = msleep(&p->p_aioinfo, AIO_MTX(ki),
22201ce91824SDavid Xu 			    PRIBIO | PCATCH, "aiospn", 0);
22211ce91824SDavid Xu 			if (error == ERESTART)
22221ce91824SDavid Xu 				error = EINTR;
22231ce91824SDavid Xu 			if (error)
22241ce91824SDavid Xu 				break;
22251ce91824SDavid Xu 		}
22261ce91824SDavid Xu 	} else {
22271ce91824SDavid Xu 		if (lj->lioj_count - 1 == lj->lioj_finished_count) {
22281ce91824SDavid Xu 			if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
22291ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_KEVENT_POSTED;
22301ce91824SDavid Xu 				KNOTE_LOCKED(&lj->klist, 1);
22311ce91824SDavid Xu 			}
22321ce91824SDavid Xu 			if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
22331ce91824SDavid Xu 			    == LIOJ_SIGNAL
22341ce91824SDavid Xu 			    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
22351ce91824SDavid Xu 			    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
22361ce91824SDavid Xu 				aio_sendsig(p, &lj->lioj_signal,
22371ce91824SDavid Xu 					    &lj->lioj_ksi);
22381ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
22392244ea07SJohn Dyson 			}
22402244ea07SJohn Dyson 		}
22411ce91824SDavid Xu 	}
22421ce91824SDavid Xu 	lj->lioj_count--;
22431ce91824SDavid Xu 	if (lj->lioj_count == 0) {
22441ce91824SDavid Xu 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
22451ce91824SDavid Xu 		knlist_delete(&lj->klist, curthread, 1);
2246759ccccaSDavid Xu 		PROC_LOCK(p);
22471ce91824SDavid Xu 		sigqueue_take(&lj->lioj_ksi);
22481ce91824SDavid Xu 		PROC_UNLOCK(p);
2249759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22501ce91824SDavid Xu 		uma_zfree(aiolio_zone, lj);
22511ce91824SDavid Xu 	} else
2252759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22532244ea07SJohn Dyson 
22541ce91824SDavid Xu 	if (nerror)
22551ce91824SDavid Xu 		return (EIO);
225652c09831SAlan Somers 	else if (nagain)
225752c09831SAlan Somers 		return (EAGAIN);
225852c09831SAlan Somers 	else
22591ce91824SDavid Xu 		return (error);
2260ee877a35SJohn Dyson }
2261fd3bf775SJohn Dyson 
22623858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
2263399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
22643858a1f4SJohn Baldwin int
2265399e8c17SJohn Baldwin freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap)
22663858a1f4SJohn Baldwin {
22673858a1f4SJohn Baldwin 	struct aiocb **acb_list;
22683858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
22693858a1f4SJohn Baldwin 	struct osigevent osig;
22703858a1f4SJohn Baldwin 	int error, nent;
22713858a1f4SJohn Baldwin 
22723858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
22733858a1f4SJohn Baldwin 		return (EINVAL);
22743858a1f4SJohn Baldwin 
22753858a1f4SJohn Baldwin 	nent = uap->nent;
2276913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
22773858a1f4SJohn Baldwin 		return (EINVAL);
22783858a1f4SJohn Baldwin 
22793858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
22803858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
22813858a1f4SJohn Baldwin 		if (error)
22823858a1f4SJohn Baldwin 			return (error);
22833858a1f4SJohn Baldwin 		error = convert_old_sigevent(&osig, &sig);
22843858a1f4SJohn Baldwin 		if (error)
22853858a1f4SJohn Baldwin 			return (error);
22863858a1f4SJohn Baldwin 		sigp = &sig;
22873858a1f4SJohn Baldwin 	} else
22883858a1f4SJohn Baldwin 		sigp = NULL;
22893858a1f4SJohn Baldwin 
22903858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
22913858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
22923858a1f4SJohn Baldwin 	if (error == 0)
22933858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode,
22943858a1f4SJohn Baldwin 		    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
22953858a1f4SJohn Baldwin 		    &aiocb_ops_osigevent);
22963858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
22973858a1f4SJohn Baldwin 	return (error);
22983858a1f4SJohn Baldwin }
2299399e8c17SJohn Baldwin #endif
23003858a1f4SJohn Baldwin 
23013858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
23023858a1f4SJohn Baldwin int
23038451d0ddSKip Macy sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
23043858a1f4SJohn Baldwin {
23053858a1f4SJohn Baldwin 	struct aiocb **acb_list;
23063858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
23073858a1f4SJohn Baldwin 	int error, nent;
23083858a1f4SJohn Baldwin 
23093858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
23103858a1f4SJohn Baldwin 		return (EINVAL);
23113858a1f4SJohn Baldwin 
23123858a1f4SJohn Baldwin 	nent = uap->nent;
2313913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
23143858a1f4SJohn Baldwin 		return (EINVAL);
23153858a1f4SJohn Baldwin 
23163858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
23173858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig, sizeof(sig));
23183858a1f4SJohn Baldwin 		if (error)
23193858a1f4SJohn Baldwin 			return (error);
23203858a1f4SJohn Baldwin 		sigp = &sig;
23213858a1f4SJohn Baldwin 	} else
23223858a1f4SJohn Baldwin 		sigp = NULL;
23233858a1f4SJohn Baldwin 
23243858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
23253858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
23263858a1f4SJohn Baldwin 	if (error == 0)
23273858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
23283858a1f4SJohn Baldwin 		    nent, sigp, &aiocb_ops);
23293858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
23303858a1f4SJohn Baldwin 	return (error);
23313858a1f4SJohn Baldwin }
23323858a1f4SJohn Baldwin 
2333fd3bf775SJohn Dyson static void
233472bce9ffSAlan Somers aio_biowakeup(struct bio *bp)
2335fd3bf775SJohn Dyson {
23365652770dSJohn Baldwin 	struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
23371ce91824SDavid Xu 	struct proc *userp;
233827b8220dSDavid Xu 	struct kaioinfo *ki;
2339f3215338SJohn Baldwin 	size_t nbytes;
2340f3215338SJohn Baldwin 	int error, nblks;
23411ce91824SDavid Xu 
2342f743d981SAlexander Motin 	/* Release mapping into kernel space. */
2343f3215338SJohn Baldwin 	userp = job->userproc;
2344f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
23455652770dSJohn Baldwin 	if (job->pbuf) {
23465652770dSJohn Baldwin 		pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages);
2347756a5412SGleb Smirnoff 		uma_zfree(pbuf_zone, job->pbuf);
23485652770dSJohn Baldwin 		job->pbuf = NULL;
2349f743d981SAlexander Motin 		atomic_subtract_int(&num_buf_aio, 1);
2350f3215338SJohn Baldwin 		AIO_LOCK(ki);
2351f3215338SJohn Baldwin 		ki->kaio_buffer_count--;
2352f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
23538091e52bSJohn Baldwin 	} else
23548091e52bSJohn Baldwin 		atomic_subtract_int(&num_unmapped_aio, 1);
23555652770dSJohn Baldwin 	vm_page_unhold_pages(job->pages, job->npages);
2356f743d981SAlexander Motin 
23575652770dSJohn Baldwin 	bp = job->bp;
23585652770dSJohn Baldwin 	job->bp = NULL;
2359f3215338SJohn Baldwin 	nbytes = job->uaiocb.aio_nbytes - bp->bio_resid;
2360f3215338SJohn Baldwin 	error = 0;
2361f743d981SAlexander Motin 	if (bp->bio_flags & BIO_ERROR)
2362f3215338SJohn Baldwin 		error = bp->bio_error;
2363f3215338SJohn Baldwin 	nblks = btodb(nbytes);
23645652770dSJohn Baldwin 	if (job->uaiocb.aio_lio_opcode == LIO_WRITE)
2365b1012d80SJohn Baldwin 		job->outblock += nblks;
23661ce91824SDavid Xu 	else
2367b1012d80SJohn Baldwin 		job->inblock += nblks;
2368f3215338SJohn Baldwin 
2369f0ec1740SJohn Baldwin 	if (error)
2370f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
2371f0ec1740SJohn Baldwin 	else
2372f0ec1740SJohn Baldwin 		aio_complete(job, nbytes, 0);
23731ce91824SDavid Xu 
2374f743d981SAlexander Motin 	g_destroy_bio(bp);
237584af4da6SJohn Dyson }
2376bfbbc4aaSJason Evans 
2377eb8e6d52SEivind Eklund /* syscall - wait for the next completion of an aio request */
23783858a1f4SJohn Baldwin static int
23795652770dSJohn Baldwin kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
23803858a1f4SJohn Baldwin     struct timespec *ts, struct aiocb_ops *ops)
2381bfbbc4aaSJason Evans {
2382b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
2383bfbbc4aaSJason Evans 	struct timeval atv;
2384bfbbc4aaSJason Evans 	struct kaioinfo *ki;
23855652770dSJohn Baldwin 	struct kaiocb *job;
23865652770dSJohn Baldwin 	struct aiocb *ujob;
2387bb430bc7SJohn Baldwin 	long error, status;
2388bb430bc7SJohn Baldwin 	int timo;
2389bfbbc4aaSJason Evans 
23905652770dSJohn Baldwin 	ops->store_aiocb(ujobp, NULL);
2391dd85920aSJason Evans 
239238d68e2dSPawel Jakub Dawidek 	if (ts == NULL) {
2393bfbbc4aaSJason Evans 		timo = 0;
239438d68e2dSPawel Jakub Dawidek 	} else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
239538d68e2dSPawel Jakub Dawidek 		timo = -1;
239638d68e2dSPawel Jakub Dawidek 	} else {
23973858a1f4SJohn Baldwin 		if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2398bfbbc4aaSJason Evans 			return (EINVAL);
2399bfbbc4aaSJason Evans 
24003858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
2401bfbbc4aaSJason Evans 		if (itimerfix(&atv))
2402bfbbc4aaSJason Evans 			return (EINVAL);
2403bfbbc4aaSJason Evans 		timo = tvtohz(&atv);
2404bfbbc4aaSJason Evans 	}
2405bfbbc4aaSJason Evans 
24068213baf0SChristian S.J. Peron 	if (p->p_aioinfo == NULL)
2407323fe565SDavid Xu 		aio_init_aioinfo(p);
24088213baf0SChristian S.J. Peron 	ki = p->p_aioinfo;
2409bfbbc4aaSJason Evans 
24101ce91824SDavid Xu 	error = 0;
24115652770dSJohn Baldwin 	job = NULL;
2412759ccccaSDavid Xu 	AIO_LOCK(ki);
24135652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
241438d68e2dSPawel Jakub Dawidek 		if (timo == -1) {
241538d68e2dSPawel Jakub Dawidek 			error = EWOULDBLOCK;
241638d68e2dSPawel Jakub Dawidek 			break;
241738d68e2dSPawel Jakub Dawidek 		}
24181ce91824SDavid Xu 		ki->kaio_flags |= KAIO_WAKEUP;
2419759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
24201ce91824SDavid Xu 		    "aiowc", timo);
242127b8220dSDavid Xu 		if (timo && error == ERESTART)
24221ce91824SDavid Xu 			error = EINTR;
24231ce91824SDavid Xu 		if (error)
24241ce91824SDavid Xu 			break;
24251ce91824SDavid Xu 	}
24261ce91824SDavid Xu 
24275652770dSJohn Baldwin 	if (job != NULL) {
2428f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
24295652770dSJohn Baldwin 		ujob = job->ujob;
24305652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
24315652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
24321ce91824SDavid Xu 		td->td_retval[0] = status;
2433b1012d80SJohn Baldwin 		td->td_ru.ru_oublock += job->outblock;
2434b1012d80SJohn Baldwin 		td->td_ru.ru_inblock += job->inblock;
2435b1012d80SJohn Baldwin 		td->td_ru.ru_msgsnd += job->msgsnd;
2436b1012d80SJohn Baldwin 		td->td_ru.ru_msgrcv += job->msgrcv;
24375652770dSJohn Baldwin 		aio_free_entry(job);
2438759ccccaSDavid Xu 		AIO_UNLOCK(ki);
24395652770dSJohn Baldwin 		ops->store_aiocb(ujobp, ujob);
24405652770dSJohn Baldwin 		ops->store_error(ujob, error);
24415652770dSJohn Baldwin 		ops->store_status(ujob, status);
24421ce91824SDavid Xu 	} else
2443759ccccaSDavid Xu 		AIO_UNLOCK(ki);
2444bfbbc4aaSJason Evans 
2445ac41f2efSAlfred Perlstein 	return (error);
2446bfbbc4aaSJason Evans }
2447cb679c38SJonathan Lemon 
244899eee864SDavid Xu int
24498451d0ddSKip Macy sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
24503858a1f4SJohn Baldwin {
24513858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
24523858a1f4SJohn Baldwin 	int error;
24533858a1f4SJohn Baldwin 
24543858a1f4SJohn Baldwin 	if (uap->timeout) {
24553858a1f4SJohn Baldwin 		/* Get timespec struct. */
24563858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts, sizeof(ts));
24573858a1f4SJohn Baldwin 		if (error)
24583858a1f4SJohn Baldwin 			return (error);
24593858a1f4SJohn Baldwin 		tsp = &ts;
24603858a1f4SJohn Baldwin 	} else
24613858a1f4SJohn Baldwin 		tsp = NULL;
24623858a1f4SJohn Baldwin 
24633858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
24643858a1f4SJohn Baldwin }
24653858a1f4SJohn Baldwin 
24663858a1f4SJohn Baldwin static int
24675652770dSJohn Baldwin kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
24683858a1f4SJohn Baldwin     struct aiocb_ops *ops)
246999eee864SDavid Xu {
247099eee864SDavid Xu 
24713858a1f4SJohn Baldwin 	if (op != O_SYNC) /* XXX lack of O_DSYNC */
247299eee864SDavid Xu 		return (EINVAL);
24735652770dSJohn Baldwin 	return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops));
24743858a1f4SJohn Baldwin }
24753858a1f4SJohn Baldwin 
24763858a1f4SJohn Baldwin int
24778451d0ddSKip Macy sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
24783858a1f4SJohn Baldwin {
24793858a1f4SJohn Baldwin 
24803858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
248199eee864SDavid Xu }
248299eee864SDavid Xu 
2483eb8e6d52SEivind Eklund /* kqueue attach function */
2484cb679c38SJonathan Lemon static int
2485cb679c38SJonathan Lemon filt_aioattach(struct knote *kn)
2486cb679c38SJonathan Lemon {
24872b34e843SKonstantin Belousov 	struct kaiocb *job;
24882b34e843SKonstantin Belousov 
24892b34e843SKonstantin Belousov 	job = (struct kaiocb *)(uintptr_t)kn->kn_sdata;
2490cb679c38SJonathan Lemon 
2491cb679c38SJonathan Lemon 	/*
24925652770dSJohn Baldwin 	 * The job pointer must be validated before using it, so
2493cb679c38SJonathan Lemon 	 * registration is restricted to the kernel; the user cannot
2494cb679c38SJonathan Lemon 	 * set EV_FLAG1.
2495cb679c38SJonathan Lemon 	 */
2496cb679c38SJonathan Lemon 	if ((kn->kn_flags & EV_FLAG1) == 0)
2497cb679c38SJonathan Lemon 		return (EPERM);
24985652770dSJohn Baldwin 	kn->kn_ptr.p_aio = job;
2499cb679c38SJonathan Lemon 	kn->kn_flags &= ~EV_FLAG1;
2500cb679c38SJonathan Lemon 
25015652770dSJohn Baldwin 	knlist_add(&job->klist, kn, 0);
2502cb679c38SJonathan Lemon 
2503cb679c38SJonathan Lemon 	return (0);
2504cb679c38SJonathan Lemon }
2505cb679c38SJonathan Lemon 
2506eb8e6d52SEivind Eklund /* kqueue detach function */
2507cb679c38SJonathan Lemon static void
2508cb679c38SJonathan Lemon filt_aiodetach(struct knote *kn)
2509cb679c38SJonathan Lemon {
25108e9fc278SDoug Ambrisko 	struct knlist *knl;
2511cb679c38SJonathan Lemon 
25128e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_aio->klist;
25138e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25148e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25158e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25168e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
2517cb679c38SJonathan Lemon }
2518cb679c38SJonathan Lemon 
2519eb8e6d52SEivind Eklund /* kqueue filter function */
2520cb679c38SJonathan Lemon /*ARGSUSED*/
2521cb679c38SJonathan Lemon static int
2522cb679c38SJonathan Lemon filt_aio(struct knote *kn, long hint)
2523cb679c38SJonathan Lemon {
25245652770dSJohn Baldwin 	struct kaiocb *job = kn->kn_ptr.p_aio;
2525cb679c38SJonathan Lemon 
25265652770dSJohn Baldwin 	kn->kn_data = job->uaiocb._aiocb_private.error;
2527f3215338SJohn Baldwin 	if (!(job->jobflags & KAIOCB_FINISHED))
2528cb679c38SJonathan Lemon 		return (0);
2529cb679c38SJonathan Lemon 	kn->kn_flags |= EV_EOF;
2530cb679c38SJonathan Lemon 	return (1);
2531cb679c38SJonathan Lemon }
253269cd28daSDoug Ambrisko 
253369cd28daSDoug Ambrisko /* kqueue attach function */
253469cd28daSDoug Ambrisko static int
253569cd28daSDoug Ambrisko filt_lioattach(struct knote *kn)
253669cd28daSDoug Ambrisko {
25372b34e843SKonstantin Belousov 	struct aioliojob *lj;
25382b34e843SKonstantin Belousov 
25392b34e843SKonstantin Belousov 	lj = (struct aioliojob *)(uintptr_t)kn->kn_sdata;
254069cd28daSDoug Ambrisko 
254169cd28daSDoug Ambrisko 	/*
25421ce91824SDavid Xu 	 * The aioliojob pointer must be validated before using it, so
254369cd28daSDoug Ambrisko 	 * registration is restricted to the kernel; the user cannot
254469cd28daSDoug Ambrisko 	 * set EV_FLAG1.
254569cd28daSDoug Ambrisko 	 */
254669cd28daSDoug Ambrisko 	if ((kn->kn_flags & EV_FLAG1) == 0)
254769cd28daSDoug Ambrisko 		return (EPERM);
2548a8afa221SJean-Sébastien Pédron 	kn->kn_ptr.p_lio = lj;
254969cd28daSDoug Ambrisko 	kn->kn_flags &= ~EV_FLAG1;
255069cd28daSDoug Ambrisko 
255169cd28daSDoug Ambrisko 	knlist_add(&lj->klist, kn, 0);
255269cd28daSDoug Ambrisko 
255369cd28daSDoug Ambrisko 	return (0);
255469cd28daSDoug Ambrisko }
255569cd28daSDoug Ambrisko 
255669cd28daSDoug Ambrisko /* kqueue detach function */
255769cd28daSDoug Ambrisko static void
255869cd28daSDoug Ambrisko filt_liodetach(struct knote *kn)
255969cd28daSDoug Ambrisko {
25608e9fc278SDoug Ambrisko 	struct knlist *knl;
256169cd28daSDoug Ambrisko 
25628e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_lio->klist;
25638e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25648e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25658e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25668e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
256769cd28daSDoug Ambrisko }
256869cd28daSDoug Ambrisko 
256969cd28daSDoug Ambrisko /* kqueue filter function */
257069cd28daSDoug Ambrisko /*ARGSUSED*/
257169cd28daSDoug Ambrisko static int
257269cd28daSDoug Ambrisko filt_lio(struct knote *kn, long hint)
257369cd28daSDoug Ambrisko {
2574a8afa221SJean-Sébastien Pédron 	struct aioliojob * lj = kn->kn_ptr.p_lio;
25751ce91824SDavid Xu 
257669cd28daSDoug Ambrisko 	return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
257769cd28daSDoug Ambrisko }
25783858a1f4SJohn Baldwin 
2579841c0c7eSNathan Whitehorn #ifdef COMPAT_FREEBSD32
2580399e8c17SJohn Baldwin #include <sys/mount.h>
2581399e8c17SJohn Baldwin #include <sys/socket.h>
2582399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32.h>
2583399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_proto.h>
2584399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_signal.h>
2585399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_syscall.h>
2586399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_util.h>
25873858a1f4SJohn Baldwin 
25883858a1f4SJohn Baldwin struct __aiocb_private32 {
25893858a1f4SJohn Baldwin 	int32_t	status;
25903858a1f4SJohn Baldwin 	int32_t	error;
25913858a1f4SJohn Baldwin 	uint32_t kernelinfo;
25923858a1f4SJohn Baldwin };
25933858a1f4SJohn Baldwin 
2594399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
25953858a1f4SJohn Baldwin typedef struct oaiocb32 {
25963858a1f4SJohn Baldwin 	int	aio_fildes;		/* File descriptor */
25973858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
25983858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
25993858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
26003858a1f4SJohn Baldwin 	struct	osigevent32 aio_sigevent; /* Signal to deliver */
26013858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
26023858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
26033858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
26043858a1f4SJohn Baldwin } oaiocb32_t;
2605399e8c17SJohn Baldwin #endif
26063858a1f4SJohn Baldwin 
26073858a1f4SJohn Baldwin typedef struct aiocb32 {
26083858a1f4SJohn Baldwin 	int32_t	aio_fildes;		/* File descriptor */
26093858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
26103858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
26113858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
26123858a1f4SJohn Baldwin 	int	__spare__[2];
26133858a1f4SJohn Baldwin 	uint32_t __spare2__;
26143858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
26153858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
26163858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
26173858a1f4SJohn Baldwin 	struct	sigevent32 aio_sigevent;	/* Signal to deliver */
26183858a1f4SJohn Baldwin } aiocb32_t;
26193858a1f4SJohn Baldwin 
2620399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
26213858a1f4SJohn Baldwin static int
26223858a1f4SJohn Baldwin convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
26233858a1f4SJohn Baldwin {
26243858a1f4SJohn Baldwin 
26253858a1f4SJohn Baldwin 	/*
26263858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
26273858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
26283858a1f4SJohn Baldwin 	 */
26293858a1f4SJohn Baldwin 	CP(*osig, *nsig, sigev_notify);
26303858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
26313858a1f4SJohn Baldwin 	case SIGEV_NONE:
26323858a1f4SJohn Baldwin 		break;
26333858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
26343858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
26353858a1f4SJohn Baldwin 		break;
26363858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
26373858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
26383858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
26393858a1f4SJohn Baldwin 		PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
26403858a1f4SJohn Baldwin 		break;
26413858a1f4SJohn Baldwin 	default:
26423858a1f4SJohn Baldwin 		return (EINVAL);
26433858a1f4SJohn Baldwin 	}
26443858a1f4SJohn Baldwin 	return (0);
26453858a1f4SJohn Baldwin }
26463858a1f4SJohn Baldwin 
26473858a1f4SJohn Baldwin static int
26483858a1f4SJohn Baldwin aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
26493858a1f4SJohn Baldwin {
26503858a1f4SJohn Baldwin 	struct oaiocb32 job32;
26513858a1f4SJohn Baldwin 	int error;
26523858a1f4SJohn Baldwin 
26533858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
26543858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26553858a1f4SJohn Baldwin 	if (error)
26563858a1f4SJohn Baldwin 		return (error);
26573858a1f4SJohn Baldwin 
26583858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26593858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26603858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26613858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26623858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26633858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26643858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26653858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26663858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
26673858a1f4SJohn Baldwin 	return (convert_old_sigevent32(&job32.aio_sigevent,
26683858a1f4SJohn Baldwin 	    &kjob->aio_sigevent));
26693858a1f4SJohn Baldwin }
2670399e8c17SJohn Baldwin #endif
26713858a1f4SJohn Baldwin 
26723858a1f4SJohn Baldwin static int
26733858a1f4SJohn Baldwin aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob)
26743858a1f4SJohn Baldwin {
26753858a1f4SJohn Baldwin 	struct aiocb32 job32;
26763858a1f4SJohn Baldwin 	int error;
26773858a1f4SJohn Baldwin 
26783858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26793858a1f4SJohn Baldwin 	if (error)
26803858a1f4SJohn Baldwin 		return (error);
26813858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26823858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26833858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26843858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26853858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26863858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26873858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26883858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26893858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
26903858a1f4SJohn Baldwin 	return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent));
26913858a1f4SJohn Baldwin }
26923858a1f4SJohn Baldwin 
26933858a1f4SJohn Baldwin static long
26943858a1f4SJohn Baldwin aiocb32_fetch_status(struct aiocb *ujob)
26953858a1f4SJohn Baldwin {
26963858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
26973858a1f4SJohn Baldwin 
26983858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
26993858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.status));
27003858a1f4SJohn Baldwin }
27013858a1f4SJohn Baldwin 
27023858a1f4SJohn Baldwin static long
27033858a1f4SJohn Baldwin aiocb32_fetch_error(struct aiocb *ujob)
27043858a1f4SJohn Baldwin {
27053858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27063858a1f4SJohn Baldwin 
27073858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27083858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.error));
27093858a1f4SJohn Baldwin }
27103858a1f4SJohn Baldwin 
27113858a1f4SJohn Baldwin static int
27123858a1f4SJohn Baldwin aiocb32_store_status(struct aiocb *ujob, long status)
27133858a1f4SJohn Baldwin {
27143858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27153858a1f4SJohn Baldwin 
27163858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27173858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.status, status));
27183858a1f4SJohn Baldwin }
27193858a1f4SJohn Baldwin 
27203858a1f4SJohn Baldwin static int
27213858a1f4SJohn Baldwin aiocb32_store_error(struct aiocb *ujob, long error)
27223858a1f4SJohn Baldwin {
27233858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27243858a1f4SJohn Baldwin 
27253858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27263858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.error, error));
27273858a1f4SJohn Baldwin }
27283858a1f4SJohn Baldwin 
27293858a1f4SJohn Baldwin static int
27303858a1f4SJohn Baldwin aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
27313858a1f4SJohn Baldwin {
27323858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27333858a1f4SJohn Baldwin 
27343858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27353858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
27363858a1f4SJohn Baldwin }
27373858a1f4SJohn Baldwin 
27383858a1f4SJohn Baldwin static int
27393858a1f4SJohn Baldwin aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
27403858a1f4SJohn Baldwin {
27413858a1f4SJohn Baldwin 
27423858a1f4SJohn Baldwin 	return (suword32(ujobp, (long)ujob));
27433858a1f4SJohn Baldwin }
27443858a1f4SJohn Baldwin 
27453858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops = {
2746849aef49SAndrew Turner 	.aio_copyin = aiocb32_copyin,
27473858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27483858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27493858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27503858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27513858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27523858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27533858a1f4SJohn Baldwin };
27543858a1f4SJohn Baldwin 
2755399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
27563858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops_osigevent = {
2757849aef49SAndrew Turner 	.aio_copyin = aiocb32_copyin_old_sigevent,
27583858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27593858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27603858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27613858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27623858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27633858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27643858a1f4SJohn Baldwin };
2765399e8c17SJohn Baldwin #endif
27663858a1f4SJohn Baldwin 
27673858a1f4SJohn Baldwin int
27683858a1f4SJohn Baldwin freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
27693858a1f4SJohn Baldwin {
27703858a1f4SJohn Baldwin 
27713858a1f4SJohn Baldwin 	return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
27723858a1f4SJohn Baldwin }
27733858a1f4SJohn Baldwin 
27743858a1f4SJohn Baldwin int
27753858a1f4SJohn Baldwin freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
27763858a1f4SJohn Baldwin {
27773858a1f4SJohn Baldwin 	struct timespec32 ts32;
27783858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
27793858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
27803858a1f4SJohn Baldwin 	uint32_t *ujoblist32;
27813858a1f4SJohn Baldwin 	int error, i;
27823858a1f4SJohn Baldwin 
2783913b9329SAlan Somers 	if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
27843858a1f4SJohn Baldwin 		return (EINVAL);
27853858a1f4SJohn Baldwin 
27863858a1f4SJohn Baldwin 	if (uap->timeout) {
27873858a1f4SJohn Baldwin 		/* Get timespec struct. */
27883858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
27893858a1f4SJohn Baldwin 			return (error);
27903858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
27913858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
27923858a1f4SJohn Baldwin 		tsp = &ts;
27933858a1f4SJohn Baldwin 	} else
27943858a1f4SJohn Baldwin 		tsp = NULL;
27953858a1f4SJohn Baldwin 
2796913b9329SAlan Somers 	ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK);
27973858a1f4SJohn Baldwin 	ujoblist32 = (uint32_t *)ujoblist;
27983858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist32, uap->nent *
27993858a1f4SJohn Baldwin 	    sizeof(ujoblist32[0]));
28003858a1f4SJohn Baldwin 	if (error == 0) {
2801df485bdbSAlan Somers 		for (i = uap->nent - 1; i >= 0; i--)
28023858a1f4SJohn Baldwin 			ujoblist[i] = PTRIN(ujoblist32[i]);
28033858a1f4SJohn Baldwin 
28043858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
28053858a1f4SJohn Baldwin 	}
2806913b9329SAlan Somers 	free(ujoblist, M_AIOS);
28073858a1f4SJohn Baldwin 	return (error);
28083858a1f4SJohn Baldwin }
28093858a1f4SJohn Baldwin 
28103858a1f4SJohn Baldwin int
28113858a1f4SJohn Baldwin freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
28123858a1f4SJohn Baldwin {
28133858a1f4SJohn Baldwin 
28143858a1f4SJohn Baldwin 	return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
28153858a1f4SJohn Baldwin }
28163858a1f4SJohn Baldwin 
2817399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
28183858a1f4SJohn Baldwin int
2819399e8c17SJohn Baldwin freebsd6_freebsd32_aio_read(struct thread *td,
2820399e8c17SJohn Baldwin     struct freebsd6_freebsd32_aio_read_args *uap)
28213858a1f4SJohn Baldwin {
28223858a1f4SJohn Baldwin 
28233858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
28243858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28253858a1f4SJohn Baldwin }
2826399e8c17SJohn Baldwin #endif
28273858a1f4SJohn Baldwin 
28283858a1f4SJohn Baldwin int
28293858a1f4SJohn Baldwin freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
28303858a1f4SJohn Baldwin {
28313858a1f4SJohn Baldwin 
28323858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
28333858a1f4SJohn Baldwin 	    &aiocb32_ops));
28343858a1f4SJohn Baldwin }
28353858a1f4SJohn Baldwin 
2836399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
28373858a1f4SJohn Baldwin int
2838399e8c17SJohn Baldwin freebsd6_freebsd32_aio_write(struct thread *td,
2839399e8c17SJohn Baldwin     struct freebsd6_freebsd32_aio_write_args *uap)
28403858a1f4SJohn Baldwin {
28413858a1f4SJohn Baldwin 
28423858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28433858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28443858a1f4SJohn Baldwin }
2845399e8c17SJohn Baldwin #endif
28463858a1f4SJohn Baldwin 
28473858a1f4SJohn Baldwin int
28483858a1f4SJohn Baldwin freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
28493858a1f4SJohn Baldwin {
28503858a1f4SJohn Baldwin 
28513858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28523858a1f4SJohn Baldwin 	    &aiocb32_ops));
28533858a1f4SJohn Baldwin }
28543858a1f4SJohn Baldwin 
28553858a1f4SJohn Baldwin int
28566160e12cSGleb Smirnoff freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
28576160e12cSGleb Smirnoff {
28586160e12cSGleb Smirnoff 
28596160e12cSGleb Smirnoff 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
28606160e12cSGleb Smirnoff 	    &aiocb32_ops));
28616160e12cSGleb Smirnoff }
28626160e12cSGleb Smirnoff 
28636160e12cSGleb Smirnoff int
28643858a1f4SJohn Baldwin freebsd32_aio_waitcomplete(struct thread *td,
28653858a1f4SJohn Baldwin     struct freebsd32_aio_waitcomplete_args *uap)
28663858a1f4SJohn Baldwin {
2867e588eeb1SJohn Baldwin 	struct timespec32 ts32;
28683858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
28693858a1f4SJohn Baldwin 	int error;
28703858a1f4SJohn Baldwin 
28713858a1f4SJohn Baldwin 	if (uap->timeout) {
28723858a1f4SJohn Baldwin 		/* Get timespec struct. */
28733858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts32, sizeof(ts32));
28743858a1f4SJohn Baldwin 		if (error)
28753858a1f4SJohn Baldwin 			return (error);
28763858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
28773858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
28783858a1f4SJohn Baldwin 		tsp = &ts;
28793858a1f4SJohn Baldwin 	} else
28803858a1f4SJohn Baldwin 		tsp = NULL;
28813858a1f4SJohn Baldwin 
28823858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
28833858a1f4SJohn Baldwin 	    &aiocb32_ops));
28843858a1f4SJohn Baldwin }
28853858a1f4SJohn Baldwin 
28863858a1f4SJohn Baldwin int
28873858a1f4SJohn Baldwin freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
28883858a1f4SJohn Baldwin {
28893858a1f4SJohn Baldwin 
28903858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
28913858a1f4SJohn Baldwin 	    &aiocb32_ops));
28923858a1f4SJohn Baldwin }
28933858a1f4SJohn Baldwin 
2894399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
28953858a1f4SJohn Baldwin int
2896399e8c17SJohn Baldwin freebsd6_freebsd32_lio_listio(struct thread *td,
2897399e8c17SJohn Baldwin     struct freebsd6_freebsd32_lio_listio_args *uap)
28983858a1f4SJohn Baldwin {
28993858a1f4SJohn Baldwin 	struct aiocb **acb_list;
29003858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
29013858a1f4SJohn Baldwin 	struct osigevent32 osig;
29023858a1f4SJohn Baldwin 	uint32_t *acb_list32;
29033858a1f4SJohn Baldwin 	int error, i, nent;
29043858a1f4SJohn Baldwin 
29053858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
29063858a1f4SJohn Baldwin 		return (EINVAL);
29073858a1f4SJohn Baldwin 
29083858a1f4SJohn Baldwin 	nent = uap->nent;
2909913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
29103858a1f4SJohn Baldwin 		return (EINVAL);
29113858a1f4SJohn Baldwin 
29123858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29133858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
29143858a1f4SJohn Baldwin 		if (error)
29153858a1f4SJohn Baldwin 			return (error);
29163858a1f4SJohn Baldwin 		error = convert_old_sigevent32(&osig, &sig);
29173858a1f4SJohn Baldwin 		if (error)
29183858a1f4SJohn Baldwin 			return (error);
29193858a1f4SJohn Baldwin 		sigp = &sig;
29203858a1f4SJohn Baldwin 	} else
29213858a1f4SJohn Baldwin 		sigp = NULL;
29223858a1f4SJohn Baldwin 
29233858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29243858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29253858a1f4SJohn Baldwin 	if (error) {
29263858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29273858a1f4SJohn Baldwin 		return (error);
29283858a1f4SJohn Baldwin 	}
29293858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29303858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29313858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29323858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29333858a1f4SJohn Baldwin 
29343858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29353858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
29363858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent);
29373858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
29383858a1f4SJohn Baldwin 	return (error);
29393858a1f4SJohn Baldwin }
2940399e8c17SJohn Baldwin #endif
29413858a1f4SJohn Baldwin 
29423858a1f4SJohn Baldwin int
29433858a1f4SJohn Baldwin freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
29443858a1f4SJohn Baldwin {
29453858a1f4SJohn Baldwin 	struct aiocb **acb_list;
29463858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
29473858a1f4SJohn Baldwin 	struct sigevent32 sig32;
29483858a1f4SJohn Baldwin 	uint32_t *acb_list32;
29493858a1f4SJohn Baldwin 	int error, i, nent;
29503858a1f4SJohn Baldwin 
29513858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
29523858a1f4SJohn Baldwin 		return (EINVAL);
29533858a1f4SJohn Baldwin 
29543858a1f4SJohn Baldwin 	nent = uap->nent;
2955913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
29563858a1f4SJohn Baldwin 		return (EINVAL);
29573858a1f4SJohn Baldwin 
29583858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29593858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig32, sizeof(sig32));
29603858a1f4SJohn Baldwin 		if (error)
29613858a1f4SJohn Baldwin 			return (error);
29623858a1f4SJohn Baldwin 		error = convert_sigevent32(&sig32, &sig);
29633858a1f4SJohn Baldwin 		if (error)
29643858a1f4SJohn Baldwin 			return (error);
29653858a1f4SJohn Baldwin 		sigp = &sig;
29663858a1f4SJohn Baldwin 	} else
29673858a1f4SJohn Baldwin 		sigp = NULL;
29683858a1f4SJohn Baldwin 
29693858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29703858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29713858a1f4SJohn Baldwin 	if (error) {
29723858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29733858a1f4SJohn Baldwin 		return (error);
29743858a1f4SJohn Baldwin 	}
29753858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29763858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29773858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29783858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29793858a1f4SJohn Baldwin 
29803858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29813858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
29823858a1f4SJohn Baldwin 	    &aiocb32_ops);
29833858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
29843858a1f4SJohn Baldwin 	return (error);
29853858a1f4SJohn Baldwin }
29863858a1f4SJohn Baldwin 
29873858a1f4SJohn Baldwin #endif
2988