xref: /freebsd/sys/kern/vfs_aio.c (revision 7e40918452b94a522b619c2a980bb281b2649857)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4ee877a35SJohn Dyson  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
5ee877a35SJohn Dyson  *
6ee877a35SJohn Dyson  * Redistribution and use in source and binary forms, with or without
7ee877a35SJohn Dyson  * modification, are permitted provided that the following conditions
8ee877a35SJohn Dyson  * are met:
9ee877a35SJohn Dyson  * 1. Redistributions of source code must retain the above copyright
10ee877a35SJohn Dyson  *    notice, this list of conditions and the following disclaimer.
11ee877a35SJohn Dyson  * 2. John S. Dyson's name may not be used to endorse or promote products
12ee877a35SJohn Dyson  *    derived from this software without specific prior written permission.
13ee877a35SJohn Dyson  *
14ee877a35SJohn Dyson  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
15ee877a35SJohn Dyson  * bad that happens because of using this software isn't the responsibility
16ee877a35SJohn Dyson  * of the author.  This software is distributed AS-IS.
17ee877a35SJohn Dyson  */
18ee877a35SJohn Dyson 
19ee877a35SJohn Dyson /*
208a6472b7SPeter Dufault  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
21ee877a35SJohn Dyson  */
22ee877a35SJohn Dyson 
23677b542eSDavid E. O'Brien #include <sys/cdefs.h>
24677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
25677b542eSDavid E. O'Brien 
263858a1f4SJohn Baldwin #include "opt_compat.h"
273858a1f4SJohn Baldwin 
28ee877a35SJohn Dyson #include <sys/param.h>
29ee877a35SJohn Dyson #include <sys/systm.h>
30f591779bSSeigo Tanimura #include <sys/malloc.h>
319626b608SPoul-Henning Kamp #include <sys/bio.h>
32a5c9bce7SBruce Evans #include <sys/buf.h>
334a144410SRobert Watson #include <sys/capsicum.h>
3475b8b3b2SJohn Baldwin #include <sys/eventhandler.h>
35ee877a35SJohn Dyson #include <sys/sysproto.h>
36ee877a35SJohn Dyson #include <sys/filedesc.h>
37ee877a35SJohn Dyson #include <sys/kernel.h>
3877409fe1SPoul-Henning Kamp #include <sys/module.h>
39c9a970a7SAlan Cox #include <sys/kthread.h>
40ee877a35SJohn Dyson #include <sys/fcntl.h>
41ee877a35SJohn Dyson #include <sys/file.h>
42104a9b7eSAlexander Kabaev #include <sys/limits.h>
43fdebd4f0SBruce Evans #include <sys/lock.h>
4435e0e5b3SJohn Baldwin #include <sys/mutex.h>
45ee877a35SJohn Dyson #include <sys/unistd.h>
466aeb05d7STom Rhodes #include <sys/posix4.h>
47ee877a35SJohn Dyson #include <sys/proc.h>
482d2f8ae7SBruce Evans #include <sys/resourcevar.h>
49ee877a35SJohn Dyson #include <sys/signalvar.h>
50496ab053SKonstantin Belousov #include <sys/syscallsubr.h>
51bfbbc4aaSJason Evans #include <sys/protosw.h>
5289f6b863SAttilio Rao #include <sys/rwlock.h>
531ce91824SDavid Xu #include <sys/sema.h>
541ce91824SDavid Xu #include <sys/socket.h>
55bfbbc4aaSJason Evans #include <sys/socketvar.h>
5621d56e9cSAlfred Perlstein #include <sys/syscall.h>
5721d56e9cSAlfred Perlstein #include <sys/sysent.h>
58a624e84fSJohn Dyson #include <sys/sysctl.h>
599c20dc99SJohn Baldwin #include <sys/syslog.h>
60ee99e978SBruce Evans #include <sys/sx.h>
611ce91824SDavid Xu #include <sys/taskqueue.h>
62fd3bf775SJohn Dyson #include <sys/vnode.h>
63fd3bf775SJohn Dyson #include <sys/conf.h>
64cb679c38SJonathan Lemon #include <sys/event.h>
6599eee864SDavid Xu #include <sys/mount.h>
66f743d981SAlexander Motin #include <geom/geom.h>
67ee877a35SJohn Dyson 
681ce91824SDavid Xu #include <machine/atomic.h>
691ce91824SDavid Xu 
70ee877a35SJohn Dyson #include <vm/vm.h>
71f743d981SAlexander Motin #include <vm/vm_page.h>
72ee877a35SJohn Dyson #include <vm/vm_extern.h>
732244ea07SJohn Dyson #include <vm/pmap.h>
742244ea07SJohn Dyson #include <vm/vm_map.h>
7599eee864SDavid Xu #include <vm/vm_object.h>
76c897b813SJeff Roberson #include <vm/uma.h>
77ee877a35SJohn Dyson #include <sys/aio.h>
785aaef07cSJohn Dyson 
79eb8e6d52SEivind Eklund /*
80eb8e6d52SEivind Eklund  * Counter for allocating reference ids to new jobs.  Wrapped to 1 on
8199eee864SDavid Xu  * overflow. (XXX will be removed soon.)
82eb8e6d52SEivind Eklund  */
8399eee864SDavid Xu static u_long jobrefid;
842244ea07SJohn Dyson 
8599eee864SDavid Xu /*
8699eee864SDavid Xu  * Counter for aio_fsync.
8799eee864SDavid Xu  */
8899eee864SDavid Xu static uint64_t jobseqno;
8999eee864SDavid Xu 
9084af4da6SJohn Dyson #ifndef MAX_AIO_PER_PROC
912244ea07SJohn Dyson #define MAX_AIO_PER_PROC	32
9284af4da6SJohn Dyson #endif
9384af4da6SJohn Dyson 
9484af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE_PER_PROC
95913b9329SAlan Somers #define MAX_AIO_QUEUE_PER_PROC	256
9684af4da6SJohn Dyson #endif
9784af4da6SJohn Dyson 
9884af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE
99913b9329SAlan Somers #define MAX_AIO_QUEUE		1024 /* Bigger than MAX_AIO_QUEUE_PER_PROC */
10084af4da6SJohn Dyson #endif
10184af4da6SJohn Dyson 
10284af4da6SJohn Dyson #ifndef MAX_BUF_AIO
10384af4da6SJohn Dyson #define MAX_BUF_AIO		16
10484af4da6SJohn Dyson #endif
10584af4da6SJohn Dyson 
106e603be7aSRobert Watson FEATURE(aio, "Asynchronous I/O");
107c45796d5SAlan Somers SYSCTL_DECL(_p1003_1b);
108e603be7aSRobert Watson 
1093858a1f4SJohn Baldwin static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list");
110913b9329SAlan Somers static MALLOC_DEFINE(M_AIOS, "aios", "aio_suspend aio control block list");
1113858a1f4SJohn Baldwin 
1120dd6c035SJohn Baldwin static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0,
1130dd6c035SJohn Baldwin     "Async IO management");
114eb8e6d52SEivind Eklund 
115f3215338SJohn Baldwin static int enable_aio_unsafe = 0;
116f3215338SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0,
117f3215338SJohn Baldwin     "Permit asynchronous IO on all file types, not just known-safe types");
118f3215338SJohn Baldwin 
1199c20dc99SJohn Baldwin static unsigned int unsafe_warningcnt = 1;
1209c20dc99SJohn Baldwin SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW,
1219c20dc99SJohn Baldwin     &unsafe_warningcnt, 0,
1229c20dc99SJohn Baldwin     "Warnings that will be triggered upon failed IO requests on unsafe files");
1239c20dc99SJohn Baldwin 
124303b270bSEivind Eklund static int max_aio_procs = MAX_AIO_PROCS;
1250dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0,
12639314b7dSJohn Baldwin     "Maximum number of kernel processes to use for handling async IO ");
127a624e84fSJohn Dyson 
128eb8e6d52SEivind Eklund static int num_aio_procs = 0;
1290dd6c035SJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0,
13039314b7dSJohn Baldwin     "Number of presently active kernel processes for async IO");
131a624e84fSJohn Dyson 
132eb8e6d52SEivind Eklund /*
133eb8e6d52SEivind Eklund  * The code will adjust the actual number of AIO processes towards this
134eb8e6d52SEivind Eklund  * number when it gets a chance.
135eb8e6d52SEivind Eklund  */
136eb8e6d52SEivind Eklund static int target_aio_procs = TARGET_AIO_PROCS;
137eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
1380dd6c035SJohn Baldwin     0,
1390dd6c035SJohn Baldwin     "Preferred number of ready kernel processes for async IO");
140a624e84fSJohn Dyson 
141eb8e6d52SEivind Eklund static int max_queue_count = MAX_AIO_QUEUE;
142eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
143eb8e6d52SEivind Eklund     "Maximum number of aio requests to queue, globally");
144a624e84fSJohn Dyson 
145eb8e6d52SEivind Eklund static int num_queue_count = 0;
146eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
147eb8e6d52SEivind Eklund     "Number of queued aio requests");
148a624e84fSJohn Dyson 
149eb8e6d52SEivind Eklund static int num_buf_aio = 0;
150eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
151eb8e6d52SEivind Eklund     "Number of aio requests presently handled by the buf subsystem");
152fd3bf775SJohn Dyson 
1538091e52bSJohn Baldwin static int num_unmapped_aio = 0;
1548091e52bSJohn Baldwin SYSCTL_INT(_vfs_aio, OID_AUTO, num_unmapped_aio, CTLFLAG_RD, &num_unmapped_aio,
1558091e52bSJohn Baldwin     0,
1568091e52bSJohn Baldwin     "Number of aio requests presently handled by unmapped I/O buffers");
1578091e52bSJohn Baldwin 
15839314b7dSJohn Baldwin /* Number of async I/O processes in the process of being started */
159a9bf5e37SDavid Xu /* XXX This should be local to aio_aqueue() */
160eb8e6d52SEivind Eklund static int num_aio_resv_start = 0;
161fd3bf775SJohn Dyson 
162eb8e6d52SEivind Eklund static int aiod_lifetime;
163eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
164eb8e6d52SEivind Eklund     "Maximum lifetime for idle aiod");
16584af4da6SJohn Dyson 
166eb8e6d52SEivind Eklund static int max_aio_per_proc = MAX_AIO_PER_PROC;
167eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
1680dd6c035SJohn Baldwin     0,
1690dd6c035SJohn Baldwin     "Maximum active aio requests per process (stored in the process)");
170eb8e6d52SEivind Eklund 
171eb8e6d52SEivind Eklund static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
172eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
173eb8e6d52SEivind Eklund     &max_aio_queue_per_proc, 0,
174eb8e6d52SEivind Eklund     "Maximum queued aio requests per process (stored in the process)");
175eb8e6d52SEivind Eklund 
176eb8e6d52SEivind Eklund static int max_buf_aio = MAX_BUF_AIO;
177eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
178eb8e6d52SEivind Eklund     "Maximum buf aio requests per process (stored in the process)");
179eb8e6d52SEivind Eklund 
180913b9329SAlan Somers /*
181913b9329SAlan Somers  * Though redundant with vfs.aio.max_aio_queue_per_proc, POSIX requires
182913b9329SAlan Somers  * sysconf(3) to support AIO_LISTIO_MAX, and we implement that with
183913b9329SAlan Somers  * vfs.aio.aio_listio_max.
184913b9329SAlan Somers  */
185c45796d5SAlan Somers SYSCTL_INT(_p1003_1b, CTL_P1003_1B_AIO_LISTIO_MAX, aio_listio_max,
186913b9329SAlan Somers     CTLFLAG_RD | CTLFLAG_CAPRD, &max_aio_queue_per_proc,
187913b9329SAlan Somers     0, "Maximum aio requests for a single lio_listio call");
188c45796d5SAlan Somers 
189399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
1900972628aSDavid Xu typedef struct oaiocb {
1910972628aSDavid Xu 	int	aio_fildes;		/* File descriptor */
1920972628aSDavid Xu 	off_t	aio_offset;		/* File offset for I/O */
1930972628aSDavid Xu 	volatile void *aio_buf;         /* I/O buffer in process space */
1940972628aSDavid Xu 	size_t	aio_nbytes;		/* Number of bytes for I/O */
1950972628aSDavid Xu 	struct	osigevent aio_sigevent;	/* Signal to deliver */
1960972628aSDavid Xu 	int	aio_lio_opcode;		/* LIO opcode */
1970972628aSDavid Xu 	int	aio_reqprio;		/* Request priority -- ignored */
1980972628aSDavid Xu 	struct	__aiocb_private	_aiocb_private;
1990972628aSDavid Xu } oaiocb_t;
200399e8c17SJohn Baldwin #endif
2010972628aSDavid Xu 
2021aa4c324SDavid Xu /*
2035652770dSJohn Baldwin  * Below is a key of locks used to protect each member of struct kaiocb
2041aa4c324SDavid Xu  * aioliojob and kaioinfo and any backends.
2051aa4c324SDavid Xu  *
2061aa4c324SDavid Xu  * * - need not protected
207759ccccaSDavid Xu  * a - locked by kaioinfo lock
2081aa4c324SDavid Xu  * b - locked by backend lock, the backend lock can be null in some cases,
2091aa4c324SDavid Xu  *     for example, BIO belongs to this type, in this case, proc lock is
2101aa4c324SDavid Xu  *     reused.
2111aa4c324SDavid Xu  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
2121aa4c324SDavid Xu  */
2131aa4c324SDavid Xu 
2141aa4c324SDavid Xu /*
215f3215338SJohn Baldwin  * If the routine that services an AIO request blocks while running in an
216f3215338SJohn Baldwin  * AIO kernel process it can starve other I/O requests.  BIO requests
217f3215338SJohn Baldwin  * queued via aio_qphysio() complete in GEOM and do not use AIO kernel
218f3215338SJohn Baldwin  * processes at all.  Socket I/O requests use a separate pool of
219f3215338SJohn Baldwin  * kprocs and also force non-blocking I/O.  Other file I/O requests
220f3215338SJohn Baldwin  * use the generic fo_read/fo_write operations which can block.  The
221f3215338SJohn Baldwin  * fsync and mlock operations can also block while executing.  Ideally
222f3215338SJohn Baldwin  * none of these requests would block while executing.
223f3215338SJohn Baldwin  *
224f3215338SJohn Baldwin  * Note that the service routines cannot toggle O_NONBLOCK in the file
225f3215338SJohn Baldwin  * structure directly while handling a request due to races with
226f3215338SJohn Baldwin  * userland threads.
2271aa4c324SDavid Xu  */
2281aa4c324SDavid Xu 
22948dac059SAlan Cox /* jobflags */
230f3215338SJohn Baldwin #define	KAIOCB_QUEUEING		0x01
231f3215338SJohn Baldwin #define	KAIOCB_CANCELLED	0x02
232f3215338SJohn Baldwin #define	KAIOCB_CANCELLING	0x04
2335652770dSJohn Baldwin #define	KAIOCB_CHECKSYNC	0x08
234f3215338SJohn Baldwin #define	KAIOCB_CLEARED		0x10
235f3215338SJohn Baldwin #define	KAIOCB_FINISHED		0x20
23648dac059SAlan Cox 
2372244ea07SJohn Dyson /*
2382244ea07SJohn Dyson  * AIO process info
2392244ea07SJohn Dyson  */
24084af4da6SJohn Dyson #define AIOP_FREE	0x1			/* proc on free queue */
24184af4da6SJohn Dyson 
24239314b7dSJohn Baldwin struct aioproc {
24339314b7dSJohn Baldwin 	int	aioprocflags;			/* (c) AIO proc flags */
24439314b7dSJohn Baldwin 	TAILQ_ENTRY(aioproc) list;		/* (c) list of processes */
24539314b7dSJohn Baldwin 	struct	proc *aioproc;			/* (*) the AIO proc */
2462244ea07SJohn Dyson };
2472244ea07SJohn Dyson 
24884af4da6SJohn Dyson /*
24984af4da6SJohn Dyson  * data-structure for lio signal management
25084af4da6SJohn Dyson  */
2511ce91824SDavid Xu struct aioliojob {
2521aa4c324SDavid Xu 	int	lioj_flags;			/* (a) listio flags */
2531aa4c324SDavid Xu 	int	lioj_count;			/* (a) listio flags */
2541aa4c324SDavid Xu 	int	lioj_finished_count;		/* (a) listio flags */
2551aa4c324SDavid Xu 	struct	sigevent lioj_signal;		/* (a) signal on all I/O done */
2561aa4c324SDavid Xu 	TAILQ_ENTRY(aioliojob) lioj_list;	/* (a) lio list */
2571aa4c324SDavid Xu 	struct	knlist klist;			/* (a) list of knotes */
2581aa4c324SDavid Xu 	ksiginfo_t lioj_ksi;			/* (a) Realtime signal info */
25984af4da6SJohn Dyson };
2601ce91824SDavid Xu 
26184af4da6SJohn Dyson #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
26284af4da6SJohn Dyson #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
26369cd28daSDoug Ambrisko #define LIOJ_KEVENT_POSTED	0x4	/* kevent triggered */
26484af4da6SJohn Dyson 
26584af4da6SJohn Dyson /*
26684af4da6SJohn Dyson  * per process aio data structure
26784af4da6SJohn Dyson  */
2682244ea07SJohn Dyson struct kaioinfo {
269759ccccaSDavid Xu 	struct	mtx kaio_mtx;		/* the lock to protect this struct */
2701aa4c324SDavid Xu 	int	kaio_flags;		/* (a) per process kaio flags */
2711aa4c324SDavid Xu 	int	kaio_maxactive_count;	/* (*) maximum number of AIOs */
2721aa4c324SDavid Xu 	int	kaio_active_count;	/* (c) number of currently used AIOs */
2731aa4c324SDavid Xu 	int	kaio_qallowed_count;	/* (*) maxiumu size of AIO queue */
2741aa4c324SDavid Xu 	int	kaio_count;		/* (a) size of AIO queue */
2751aa4c324SDavid Xu 	int	kaio_ballowed_count;	/* (*) maximum number of buffers */
2761aa4c324SDavid Xu 	int	kaio_buffer_count;	/* (a) number of physio buffers */
2775652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_all;	/* (a) all AIOs in a process */
2785652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_done;	/* (a) done queue for process */
2791aa4c324SDavid Xu 	TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
2805652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
2815652770dSJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncqueue;	/* (a) queue for aio_fsync */
282f3215338SJohn Baldwin 	TAILQ_HEAD(,kaiocb) kaio_syncready;  /* (a) second q for aio_fsync */
28339314b7dSJohn Baldwin 	struct	task kaio_task;		/* (*) task to kick aio processes */
284f3215338SJohn Baldwin 	struct	task kaio_sync_task;	/* (*) task to schedule fsync jobs */
2852244ea07SJohn Dyson };
2862244ea07SJohn Dyson 
287759ccccaSDavid Xu #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
288759ccccaSDavid Xu #define AIO_UNLOCK(ki)		mtx_unlock(&(ki)->kaio_mtx)
289759ccccaSDavid Xu #define AIO_LOCK_ASSERT(ki, f)	mtx_assert(&(ki)->kaio_mtx, (f))
290759ccccaSDavid Xu #define AIO_MTX(ki)		(&(ki)->kaio_mtx)
291759ccccaSDavid Xu 
29284af4da6SJohn Dyson #define KAIO_RUNDOWN	0x1	/* process is being run down */
2930dd6c035SJohn Baldwin #define KAIO_WAKEUP	0x2	/* wakeup process when AIO completes */
294fd3bf775SJohn Dyson 
2953858a1f4SJohn Baldwin /*
2963858a1f4SJohn Baldwin  * Operations used to interact with userland aio control blocks.
2973858a1f4SJohn Baldwin  * Different ABIs provide their own operations.
2983858a1f4SJohn Baldwin  */
2993858a1f4SJohn Baldwin struct aiocb_ops {
3003858a1f4SJohn Baldwin 	int	(*copyin)(struct aiocb *ujob, struct aiocb *kjob);
3013858a1f4SJohn Baldwin 	long	(*fetch_status)(struct aiocb *ujob);
3023858a1f4SJohn Baldwin 	long	(*fetch_error)(struct aiocb *ujob);
3033858a1f4SJohn Baldwin 	int	(*store_status)(struct aiocb *ujob, long status);
3043858a1f4SJohn Baldwin 	int	(*store_error)(struct aiocb *ujob, long error);
3053858a1f4SJohn Baldwin 	int	(*store_kernelinfo)(struct aiocb *ujob, long jobref);
3063858a1f4SJohn Baldwin 	int	(*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
3073858a1f4SJohn Baldwin };
3083858a1f4SJohn Baldwin 
30939314b7dSJohn Baldwin static TAILQ_HEAD(,aioproc) aio_freeproc;		/* (c) Idle daemons */
3101ce91824SDavid Xu static struct sema aio_newproc_sem;
3111ce91824SDavid Xu static struct mtx aio_job_mtx;
3125652770dSJohn Baldwin static TAILQ_HEAD(,kaiocb) aio_jobs;			/* (c) Async job list */
3131ce91824SDavid Xu static struct unrhdr *aiod_unr;
3142244ea07SJohn Dyson 
3156a1162d4SAlexander Leidinger void		aio_init_aioinfo(struct proc *p);
316723d37c0SKonstantin Belousov static int	aio_onceonly(void);
3175652770dSJohn Baldwin static int	aio_free_entry(struct kaiocb *job);
3185652770dSJohn Baldwin static void	aio_process_rw(struct kaiocb *job);
3195652770dSJohn Baldwin static void	aio_process_sync(struct kaiocb *job);
3205652770dSJohn Baldwin static void	aio_process_mlock(struct kaiocb *job);
321f3215338SJohn Baldwin static void	aio_schedule_fsync(void *context, int pending);
3221ce91824SDavid Xu static int	aio_newproc(int *);
3235652770dSJohn Baldwin int		aio_aqueue(struct thread *td, struct aiocb *ujob,
3243858a1f4SJohn Baldwin 		    struct aioliojob *lio, int type, struct aiocb_ops *ops);
325f3215338SJohn Baldwin static int	aio_queue_file(struct file *fp, struct kaiocb *job);
326f743d981SAlexander Motin static void	aio_physwakeup(struct bio *bp);
32775b8b3b2SJohn Baldwin static void	aio_proc_rundown(void *arg, struct proc *p);
3280dd6c035SJohn Baldwin static void	aio_proc_rundown_exec(void *arg, struct proc *p,
3290dd6c035SJohn Baldwin 		    struct image_params *imgp);
3305652770dSJohn Baldwin static int	aio_qphysio(struct proc *p, struct kaiocb *job);
3311ce91824SDavid Xu static void	aio_daemon(void *param);
332f3215338SJohn Baldwin static void	aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
333005ce8e4SJohn Baldwin static bool	aio_clear_cancel_function_locked(struct kaiocb *job);
334dbbccfe9SDavid Xu static int	aio_kick(struct proc *userp);
33599eee864SDavid Xu static void	aio_kick_nowait(struct proc *userp);
33699eee864SDavid Xu static void	aio_kick_helper(void *context, int pending);
33721d56e9cSAlfred Perlstein static int	filt_aioattach(struct knote *kn);
33821d56e9cSAlfred Perlstein static void	filt_aiodetach(struct knote *kn);
33921d56e9cSAlfred Perlstein static int	filt_aio(struct knote *kn, long hint);
34069cd28daSDoug Ambrisko static int	filt_lioattach(struct knote *kn);
34169cd28daSDoug Ambrisko static void	filt_liodetach(struct knote *kn);
34269cd28daSDoug Ambrisko static int	filt_lio(struct knote *kn, long hint);
3432244ea07SJohn Dyson 
344eb8e6d52SEivind Eklund /*
345eb8e6d52SEivind Eklund  * Zones for:
346eb8e6d52SEivind Eklund  * 	kaio	Per process async io info
34739314b7dSJohn Baldwin  *	aiop	async io process data
348eb8e6d52SEivind Eklund  *	aiocb	async io jobs
349eb8e6d52SEivind Eklund  *	aiolio	list io jobs
350eb8e6d52SEivind Eklund  */
351913b9329SAlan Somers static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiolio_zone;
352fd3bf775SJohn Dyson 
353eb8e6d52SEivind Eklund /* kqueue filters for aio */
354e76d823bSRobert Watson static struct filterops aio_filtops = {
355e76d823bSRobert Watson 	.f_isfd = 0,
356e76d823bSRobert Watson 	.f_attach = filt_aioattach,
357e76d823bSRobert Watson 	.f_detach = filt_aiodetach,
358e76d823bSRobert Watson 	.f_event = filt_aio,
359e76d823bSRobert Watson };
360e76d823bSRobert Watson static struct filterops lio_filtops = {
361e76d823bSRobert Watson 	.f_isfd = 0,
362e76d823bSRobert Watson 	.f_attach = filt_lioattach,
363e76d823bSRobert Watson 	.f_detach = filt_liodetach,
364e76d823bSRobert Watson 	.f_event = filt_lio
365e76d823bSRobert Watson };
36621d56e9cSAlfred Perlstein 
36775b8b3b2SJohn Baldwin static eventhandler_tag exit_tag, exec_tag;
36875b8b3b2SJohn Baldwin 
369c85650caSJohn Baldwin TASKQUEUE_DEFINE_THREAD(aiod_kick);
3701ce91824SDavid Xu 
371eb8e6d52SEivind Eklund /*
372eb8e6d52SEivind Eklund  * Main operations function for use as a kernel module.
373eb8e6d52SEivind Eklund  */
37421d56e9cSAlfred Perlstein static int
37521d56e9cSAlfred Perlstein aio_modload(struct module *module, int cmd, void *arg)
37621d56e9cSAlfred Perlstein {
37721d56e9cSAlfred Perlstein 	int error = 0;
37821d56e9cSAlfred Perlstein 
37921d56e9cSAlfred Perlstein 	switch (cmd) {
38021d56e9cSAlfred Perlstein 	case MOD_LOAD:
38121d56e9cSAlfred Perlstein 		aio_onceonly();
38221d56e9cSAlfred Perlstein 		break;
38321d56e9cSAlfred Perlstein 	case MOD_SHUTDOWN:
38421d56e9cSAlfred Perlstein 		break;
38521d56e9cSAlfred Perlstein 	default:
386f3215338SJohn Baldwin 		error = EOPNOTSUPP;
38721d56e9cSAlfred Perlstein 		break;
38821d56e9cSAlfred Perlstein 	}
38921d56e9cSAlfred Perlstein 	return (error);
39021d56e9cSAlfred Perlstein }
39121d56e9cSAlfred Perlstein 
39221d56e9cSAlfred Perlstein static moduledata_t aio_mod = {
39321d56e9cSAlfred Perlstein 	"aio",
39421d56e9cSAlfred Perlstein 	&aio_modload,
39521d56e9cSAlfred Perlstein 	NULL
39621d56e9cSAlfred Perlstein };
39721d56e9cSAlfred Perlstein 
398399e8c17SJohn Baldwin DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY);
39921d56e9cSAlfred Perlstein MODULE_VERSION(aio, 1);
40021d56e9cSAlfred Perlstein 
401fd3bf775SJohn Dyson /*
4022244ea07SJohn Dyson  * Startup initialization
4032244ea07SJohn Dyson  */
404723d37c0SKonstantin Belousov static int
40521d56e9cSAlfred Perlstein aio_onceonly(void)
406fd3bf775SJohn Dyson {
40721d56e9cSAlfred Perlstein 
40875b8b3b2SJohn Baldwin 	exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
40975b8b3b2SJohn Baldwin 	    EVENTHANDLER_PRI_ANY);
4100dd6c035SJohn Baldwin 	exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec,
4110dd6c035SJohn Baldwin 	    NULL, EVENTHANDLER_PRI_ANY);
41221d56e9cSAlfred Perlstein 	kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
41369cd28daSDoug Ambrisko 	kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
4142244ea07SJohn Dyson 	TAILQ_INIT(&aio_freeproc);
4151ce91824SDavid Xu 	sema_init(&aio_newproc_sem, 0, "aio_new_proc");
4161ce91824SDavid Xu 	mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
4172244ea07SJohn Dyson 	TAILQ_INIT(&aio_jobs);
4181ce91824SDavid Xu 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
419c897b813SJeff Roberson 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
420c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
42139314b7dSJohn Baldwin 	aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL,
422c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4235652770dSJohn Baldwin 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL,
424c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4251ce91824SDavid Xu 	aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
426c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
42784af4da6SJohn Dyson 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
428fd3bf775SJohn Dyson 	jobrefid = 1;
429399e8c17SJohn Baldwin 	p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO);
43086d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
43186d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
432723d37c0SKonstantin Belousov 
433723d37c0SKonstantin Belousov 	return (0);
4342244ea07SJohn Dyson }
4352244ea07SJohn Dyson 
436eb8e6d52SEivind Eklund /*
437bfbbc4aaSJason Evans  * Init the per-process aioinfo structure.  The aioinfo limits are set
438bfbbc4aaSJason Evans  * per-process for user limit (resource) management.
4392244ea07SJohn Dyson  */
4406a1162d4SAlexander Leidinger void
441fd3bf775SJohn Dyson aio_init_aioinfo(struct proc *p)
442fd3bf775SJohn Dyson {
4432244ea07SJohn Dyson 	struct kaioinfo *ki;
444ac41f2efSAlfred Perlstein 
445a163d034SWarner Losh 	ki = uma_zalloc(kaio_zone, M_WAITOK);
4469889bbacSKonstantin Belousov 	mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW);
44784af4da6SJohn Dyson 	ki->kaio_flags = 0;
448a624e84fSJohn Dyson 	ki->kaio_maxactive_count = max_aio_per_proc;
4492244ea07SJohn Dyson 	ki->kaio_active_count = 0;
450a624e84fSJohn Dyson 	ki->kaio_qallowed_count = max_aio_queue_per_proc;
4511ce91824SDavid Xu 	ki->kaio_count = 0;
45284af4da6SJohn Dyson 	ki->kaio_ballowed_count = max_buf_aio;
453fd3bf775SJohn Dyson 	ki->kaio_buffer_count = 0;
4541ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_all);
4551ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_done);
4562244ea07SJohn Dyson 	TAILQ_INIT(&ki->kaio_jobqueue);
45784af4da6SJohn Dyson 	TAILQ_INIT(&ki->kaio_liojoblist);
45899eee864SDavid Xu 	TAILQ_INIT(&ki->kaio_syncqueue);
459f3215338SJohn Baldwin 	TAILQ_INIT(&ki->kaio_syncready);
46099eee864SDavid Xu 	TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
461f3215338SJohn Baldwin 	TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki);
4623999ebe3SAlan Cox 	PROC_LOCK(p);
4633999ebe3SAlan Cox 	if (p->p_aioinfo == NULL) {
4643999ebe3SAlan Cox 		p->p_aioinfo = ki;
4653999ebe3SAlan Cox 		PROC_UNLOCK(p);
4663999ebe3SAlan Cox 	} else {
4673999ebe3SAlan Cox 		PROC_UNLOCK(p);
468759ccccaSDavid Xu 		mtx_destroy(&ki->kaio_mtx);
4693999ebe3SAlan Cox 		uma_zfree(kaio_zone, ki);
4702244ea07SJohn Dyson 	}
471bfbbc4aaSJason Evans 
47222035f47SOleksandr Tymoshenko 	while (num_aio_procs < MIN(target_aio_procs, max_aio_procs))
4731ce91824SDavid Xu 		aio_newproc(NULL);
4742244ea07SJohn Dyson }
4752244ea07SJohn Dyson 
4764c0fb2cfSDavid Xu static int
4774c0fb2cfSDavid Xu aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
4784c0fb2cfSDavid Xu {
479cf7d9a8cSDavid Xu 	struct thread *td;
480cf7d9a8cSDavid Xu 	int error;
481759ccccaSDavid Xu 
482cf7d9a8cSDavid Xu 	error = sigev_findtd(p, sigev, &td);
483cf7d9a8cSDavid Xu 	if (error)
484cf7d9a8cSDavid Xu 		return (error);
4854c0fb2cfSDavid Xu 	if (!KSI_ONQ(ksi)) {
486cf7d9a8cSDavid Xu 		ksiginfo_set_sigev(ksi, sigev);
4874c0fb2cfSDavid Xu 		ksi->ksi_code = SI_ASYNCIO;
4884c0fb2cfSDavid Xu 		ksi->ksi_flags |= KSI_EXT | KSI_INS;
489cf7d9a8cSDavid Xu 		tdsendsignal(p, td, ksi->ksi_signo, ksi);
4904c0fb2cfSDavid Xu 	}
491759ccccaSDavid Xu 	PROC_UNLOCK(p);
492cf7d9a8cSDavid Xu 	return (error);
4934c0fb2cfSDavid Xu }
4944c0fb2cfSDavid Xu 
4952244ea07SJohn Dyson /*
496bfbbc4aaSJason Evans  * Free a job entry.  Wait for completion if it is currently active, but don't
497bfbbc4aaSJason Evans  * delay forever.  If we delay, we return a flag that says that we have to
498bfbbc4aaSJason Evans  * restart the queue scan.
4992244ea07SJohn Dyson  */
50088ed460eSAlan Cox static int
5015652770dSJohn Baldwin aio_free_entry(struct kaiocb *job)
502fd3bf775SJohn Dyson {
5032244ea07SJohn Dyson 	struct kaioinfo *ki;
5041ce91824SDavid Xu 	struct aioliojob *lj;
5052244ea07SJohn Dyson 	struct proc *p;
5062244ea07SJohn Dyson 
5075652770dSJohn Baldwin 	p = job->userproc;
5081ce91824SDavid Xu 	MPASS(curproc == p);
5092244ea07SJohn Dyson 	ki = p->p_aioinfo;
5101ce91824SDavid Xu 	MPASS(ki != NULL);
5111ce91824SDavid Xu 
512759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
513f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
514759ccccaSDavid Xu 
5151ce91824SDavid Xu 	atomic_subtract_int(&num_queue_count, 1);
5161ce91824SDavid Xu 
5171ce91824SDavid Xu 	ki->kaio_count--;
5181ce91824SDavid Xu 	MPASS(ki->kaio_count >= 0);
5191ce91824SDavid Xu 
5205652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_done, job, plist);
5215652770dSJohn Baldwin 	TAILQ_REMOVE(&ki->kaio_all, job, allist);
52227b8220dSDavid Xu 
5235652770dSJohn Baldwin 	lj = job->lio;
52484af4da6SJohn Dyson 	if (lj) {
5251ce91824SDavid Xu 		lj->lioj_count--;
5261ce91824SDavid Xu 		lj->lioj_finished_count--;
5271ce91824SDavid Xu 
528a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
5291ce91824SDavid Xu 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
5301ce91824SDavid Xu 			/* lio is going away, we need to destroy any knotes */
5311ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
532759ccccaSDavid Xu 			PROC_LOCK(p);
5331ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
534759ccccaSDavid Xu 			PROC_UNLOCK(p);
5351ce91824SDavid Xu 			uma_zfree(aiolio_zone, lj);
53684af4da6SJohn Dyson 		}
53784af4da6SJohn Dyson 	}
5381ce91824SDavid Xu 
5395652770dSJohn Baldwin 	/* job is going away, we need to destroy any knotes */
5405652770dSJohn Baldwin 	knlist_delete(&job->klist, curthread, 1);
541759ccccaSDavid Xu 	PROC_LOCK(p);
5425652770dSJohn Baldwin 	sigqueue_take(&job->ksi);
543759ccccaSDavid Xu 	PROC_UNLOCK(p);
5441ce91824SDavid Xu 
545759ccccaSDavid Xu 	AIO_UNLOCK(ki);
5462a522eb9SJohn Baldwin 
5472a522eb9SJohn Baldwin 	/*
5482a522eb9SJohn Baldwin 	 * The thread argument here is used to find the owning process
5492a522eb9SJohn Baldwin 	 * and is also passed to fo_close() which may pass it to various
5502a522eb9SJohn Baldwin 	 * places such as devsw close() routines.  Because of that, we
5512a522eb9SJohn Baldwin 	 * need a thread pointer from the process owning the job that is
5522a522eb9SJohn Baldwin 	 * persistent and won't disappear out from under us or move to
5532a522eb9SJohn Baldwin 	 * another process.
5542a522eb9SJohn Baldwin 	 *
5552a522eb9SJohn Baldwin 	 * Currently, all the callers of this function call it to remove
5565652770dSJohn Baldwin 	 * a kaiocb from the current process' job list either via a
5572a522eb9SJohn Baldwin 	 * syscall or due to the current process calling exit() or
5582a522eb9SJohn Baldwin 	 * execve().  Thus, we know that p == curproc.  We also know that
5592a522eb9SJohn Baldwin 	 * curthread can't exit since we are curthread.
5602a522eb9SJohn Baldwin 	 *
5612a522eb9SJohn Baldwin 	 * Therefore, we use curthread as the thread to pass to
5622a522eb9SJohn Baldwin 	 * knlist_delete().  This does mean that it is possible for the
5632a522eb9SJohn Baldwin 	 * thread pointer at close time to differ from the thread pointer
5642a522eb9SJohn Baldwin 	 * at open time, but this is already true of file descriptors in
5652a522eb9SJohn Baldwin 	 * a multithreaded process.
566b40ce416SJulian Elischer 	 */
5675652770dSJohn Baldwin 	if (job->fd_file)
5685652770dSJohn Baldwin 		fdrop(job->fd_file, curthread);
5695652770dSJohn Baldwin 	crfree(job->cred);
5705652770dSJohn Baldwin 	uma_zfree(aiocb_zone, job);
571759ccccaSDavid Xu 	AIO_LOCK(ki);
5721ce91824SDavid Xu 
573ac41f2efSAlfred Perlstein 	return (0);
5742244ea07SJohn Dyson }
5752244ea07SJohn Dyson 
576993182e5SAlexander Leidinger static void
5770dd6c035SJohn Baldwin aio_proc_rundown_exec(void *arg, struct proc *p,
5780dd6c035SJohn Baldwin     struct image_params *imgp __unused)
579993182e5SAlexander Leidinger {
580993182e5SAlexander Leidinger    	aio_proc_rundown(arg, p);
581993182e5SAlexander Leidinger }
582993182e5SAlexander Leidinger 
583f3215338SJohn Baldwin static int
584f3215338SJohn Baldwin aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
585f3215338SJohn Baldwin {
586f3215338SJohn Baldwin 	aio_cancel_fn_t *func;
587f3215338SJohn Baldwin 	int cancelled;
588f3215338SJohn Baldwin 
589f3215338SJohn Baldwin 	AIO_LOCK_ASSERT(ki, MA_OWNED);
590f3215338SJohn Baldwin 	if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
591f3215338SJohn Baldwin 		return (0);
592f3215338SJohn Baldwin 	MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
593f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLED;
594f3215338SJohn Baldwin 
595f3215338SJohn Baldwin 	func = job->cancel_fn;
596f3215338SJohn Baldwin 
597f3215338SJohn Baldwin 	/*
598f3215338SJohn Baldwin 	 * If there is no cancel routine, just leave the job marked as
599f3215338SJohn Baldwin 	 * cancelled.  The job should be in active use by a caller who
600f3215338SJohn Baldwin 	 * should complete it normally or when it fails to install a
601f3215338SJohn Baldwin 	 * cancel routine.
602f3215338SJohn Baldwin 	 */
603f3215338SJohn Baldwin 	if (func == NULL)
604f3215338SJohn Baldwin 		return (0);
605f3215338SJohn Baldwin 
606f3215338SJohn Baldwin 	/*
607f3215338SJohn Baldwin 	 * Set the CANCELLING flag so that aio_complete() will defer
608f3215338SJohn Baldwin 	 * completions of this job.  This prevents the job from being
609f3215338SJohn Baldwin 	 * freed out from under the cancel callback.  After the
610f3215338SJohn Baldwin 	 * callback any deferred completion (whether from the callback
611f3215338SJohn Baldwin 	 * or any other source) will be completed.
612f3215338SJohn Baldwin 	 */
613f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_CANCELLING;
614f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
615f3215338SJohn Baldwin 	func(job);
616f3215338SJohn Baldwin 	AIO_LOCK(ki);
617f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_CANCELLING;
618f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
619f3215338SJohn Baldwin 		cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
620f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
621f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
622f3215338SJohn Baldwin 	} else {
623f3215338SJohn Baldwin 		/*
624f3215338SJohn Baldwin 		 * The cancel callback might have scheduled an
625f3215338SJohn Baldwin 		 * operation to cancel this request, but it is
626f3215338SJohn Baldwin 		 * only counted as cancelled if the request is
627f3215338SJohn Baldwin 		 * cancelled when the callback returns.
628f3215338SJohn Baldwin 		 */
629f3215338SJohn Baldwin 		cancelled = 0;
630f3215338SJohn Baldwin 	}
631f3215338SJohn Baldwin 	return (cancelled);
632f3215338SJohn Baldwin }
633f3215338SJohn Baldwin 
6342244ea07SJohn Dyson /*
6352244ea07SJohn Dyson  * Rundown the jobs for a given process.
6362244ea07SJohn Dyson  */
63721d56e9cSAlfred Perlstein static void
63875b8b3b2SJohn Baldwin aio_proc_rundown(void *arg, struct proc *p)
639fd3bf775SJohn Dyson {
6402244ea07SJohn Dyson 	struct kaioinfo *ki;
6411ce91824SDavid Xu 	struct aioliojob *lj;
6425652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
6432244ea07SJohn Dyson 
6442a522eb9SJohn Baldwin 	KASSERT(curthread->td_proc == p,
6452a522eb9SJohn Baldwin 	    ("%s: called on non-curproc", __func__));
6462244ea07SJohn Dyson 	ki = p->p_aioinfo;
6472244ea07SJohn Dyson 	if (ki == NULL)
6482244ea07SJohn Dyson 		return;
6492244ea07SJohn Dyson 
650759ccccaSDavid Xu 	AIO_LOCK(ki);
65127b8220dSDavid Xu 	ki->kaio_flags |= KAIO_RUNDOWN;
6521ce91824SDavid Xu 
6531ce91824SDavid Xu restart:
654a624e84fSJohn Dyson 
655bfbbc4aaSJason Evans 	/*
6561ce91824SDavid Xu 	 * Try to cancel all pending requests. This code simulates
6571ce91824SDavid Xu 	 * aio_cancel on all pending I/O requests.
658bfbbc4aaSJason Evans 	 */
6595652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
660f3215338SJohn Baldwin 		aio_cancel_job(p, ki, job);
6612244ea07SJohn Dyson 	}
66284af4da6SJohn Dyson 
6631ce91824SDavid Xu 	/* Wait for all running I/O to be finished */
664f3215338SJohn Baldwin 	if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) {
66584af4da6SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
666759ccccaSDavid Xu 		msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
6671ce91824SDavid Xu 		goto restart;
66884af4da6SJohn Dyson 	}
66984af4da6SJohn Dyson 
6701ce91824SDavid Xu 	/* Free all completed I/O requests. */
6715652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
6725652770dSJohn Baldwin 		aio_free_entry(job);
67384af4da6SJohn Dyson 
6741ce91824SDavid Xu 	while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
675a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
67684af4da6SJohn Dyson 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
6771ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
678759ccccaSDavid Xu 			PROC_LOCK(p);
6791ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
680759ccccaSDavid Xu 			PROC_UNLOCK(p);
681c897b813SJeff Roberson 			uma_zfree(aiolio_zone, lj);
682f4f0ecefSJohn Dyson 		} else {
683a9bf5e37SDavid Xu 			panic("LIO job not cleaned up: C:%d, FC:%d\n",
684a9bf5e37SDavid Xu 			    lj->lioj_count, lj->lioj_finished_count);
68584af4da6SJohn Dyson 		}
686f4f0ecefSJohn Dyson 	}
687759ccccaSDavid Xu 	AIO_UNLOCK(ki);
688c85650caSJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task);
689f3215338SJohn Baldwin 	taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task);
6905114048bSKonstantin Belousov 	mtx_destroy(&ki->kaio_mtx);
691c897b813SJeff Roberson 	uma_zfree(kaio_zone, ki);
692a624e84fSJohn Dyson 	p->p_aioinfo = NULL;
6932244ea07SJohn Dyson }
6942244ea07SJohn Dyson 
6952244ea07SJohn Dyson /*
696bfbbc4aaSJason Evans  * Select a job to run (called by an AIO daemon).
6972244ea07SJohn Dyson  */
6985652770dSJohn Baldwin static struct kaiocb *
69939314b7dSJohn Baldwin aio_selectjob(struct aioproc *aiop)
700fd3bf775SJohn Dyson {
7015652770dSJohn Baldwin 	struct kaiocb *job;
702bfbbc4aaSJason Evans 	struct kaioinfo *ki;
703bfbbc4aaSJason Evans 	struct proc *userp;
7042244ea07SJohn Dyson 
7051ce91824SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
706f3215338SJohn Baldwin restart:
7075652770dSJohn Baldwin 	TAILQ_FOREACH(job, &aio_jobs, list) {
7085652770dSJohn Baldwin 		userp = job->userproc;
7092244ea07SJohn Dyson 		ki = userp->p_aioinfo;
7102244ea07SJohn Dyson 
7112244ea07SJohn Dyson 		if (ki->kaio_active_count < ki->kaio_maxactive_count) {
7125652770dSJohn Baldwin 			TAILQ_REMOVE(&aio_jobs, job, list);
713f3215338SJohn Baldwin 			if (!aio_clear_cancel_function(job))
714f3215338SJohn Baldwin 				goto restart;
715f3215338SJohn Baldwin 
7161ce91824SDavid Xu 			/* Account for currently active jobs. */
7171ce91824SDavid Xu 			ki->kaio_active_count++;
7181ce91824SDavid Xu 			break;
7191ce91824SDavid Xu 		}
7201ce91824SDavid Xu 	}
7215652770dSJohn Baldwin 	return (job);
7222244ea07SJohn Dyson }
7232244ea07SJohn Dyson 
7242244ea07SJohn Dyson /*
7250dd6c035SJohn Baldwin  * Move all data to a permanent storage device.  This code
7260dd6c035SJohn Baldwin  * simulates the fsync syscall.
72799eee864SDavid Xu  */
72899eee864SDavid Xu static int
72999eee864SDavid Xu aio_fsync_vnode(struct thread *td, struct vnode *vp)
73099eee864SDavid Xu {
73199eee864SDavid Xu 	struct mount *mp;
73299eee864SDavid Xu 	int error;
73399eee864SDavid Xu 
73499eee864SDavid Xu 	if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
73599eee864SDavid Xu 		goto drop;
736cb05b60aSAttilio Rao 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
73799eee864SDavid Xu 	if (vp->v_object != NULL) {
73889f6b863SAttilio Rao 		VM_OBJECT_WLOCK(vp->v_object);
73999eee864SDavid Xu 		vm_object_page_clean(vp->v_object, 0, 0, 0);
74089f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(vp->v_object);
74199eee864SDavid Xu 	}
74299eee864SDavid Xu 	error = VOP_FSYNC(vp, MNT_WAIT, td);
74399eee864SDavid Xu 
74422db15c0SAttilio Rao 	VOP_UNLOCK(vp, 0);
74599eee864SDavid Xu 	vn_finished_write(mp);
74699eee864SDavid Xu drop:
74799eee864SDavid Xu 	return (error);
74899eee864SDavid Xu }
74999eee864SDavid Xu 
75099eee864SDavid Xu /*
751f95c13dbSGleb Smirnoff  * The AIO processing activity for LIO_READ/LIO_WRITE.  This is the code that
752f95c13dbSGleb Smirnoff  * does the I/O request for the non-physio version of the operations.  The
753f95c13dbSGleb Smirnoff  * normal vn operations are used, and this code should work in all instances
754f95c13dbSGleb Smirnoff  * for every type of file, including pipes, sockets, fifos, and regular files.
7551ce91824SDavid Xu  *
7561aa4c324SDavid Xu  * XXX I don't think it works well for socket, pipe, and fifo.
7572244ea07SJohn Dyson  */
75888ed460eSAlan Cox static void
7595652770dSJohn Baldwin aio_process_rw(struct kaiocb *job)
760fd3bf775SJohn Dyson {
761f8f750c5SRobert Watson 	struct ucred *td_savedcred;
762b40ce416SJulian Elischer 	struct thread *td;
7632244ea07SJohn Dyson 	struct aiocb *cb;
7642244ea07SJohn Dyson 	struct file *fp;
7652244ea07SJohn Dyson 	struct uio auio;
7662244ea07SJohn Dyson 	struct iovec aiov;
767bb430bc7SJohn Baldwin 	ssize_t cnt;
768b1012d80SJohn Baldwin 	long msgsnd_st, msgsnd_end;
769b1012d80SJohn Baldwin 	long msgrcv_st, msgrcv_end;
770b1012d80SJohn Baldwin 	long oublock_st, oublock_end;
771b1012d80SJohn Baldwin 	long inblock_st, inblock_end;
7722244ea07SJohn Dyson 	int error;
7732244ea07SJohn Dyson 
7745652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
7755652770dSJohn Baldwin 	    job->uaiocb.aio_lio_opcode == LIO_WRITE,
7765652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
777f95c13dbSGleb Smirnoff 
778f3215338SJohn Baldwin 	aio_switch_vmspace(job);
779b40ce416SJulian Elischer 	td = curthread;
780f8f750c5SRobert Watson 	td_savedcred = td->td_ucred;
7815652770dSJohn Baldwin 	td->td_ucred = job->cred;
7825652770dSJohn Baldwin 	cb = &job->uaiocb;
7835652770dSJohn Baldwin 	fp = job->fd_file;
784bfbbc4aaSJason Evans 
78591369fc7SAlan Cox 	aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
7862244ea07SJohn Dyson 	aiov.iov_len = cb->aio_nbytes;
7872244ea07SJohn Dyson 
7882244ea07SJohn Dyson 	auio.uio_iov = &aiov;
7892244ea07SJohn Dyson 	auio.uio_iovcnt = 1;
7909b16adc1SAlan Cox 	auio.uio_offset = cb->aio_offset;
7912244ea07SJohn Dyson 	auio.uio_resid = cb->aio_nbytes;
7922244ea07SJohn Dyson 	cnt = cb->aio_nbytes;
7932244ea07SJohn Dyson 	auio.uio_segflg = UIO_USERSPACE;
794b40ce416SJulian Elischer 	auio.uio_td = td;
7952244ea07SJohn Dyson 
796b1012d80SJohn Baldwin 	msgrcv_st = td->td_ru.ru_msgrcv;
797b1012d80SJohn Baldwin 	msgsnd_st = td->td_ru.ru_msgsnd;
7981c4bcd05SJeff Roberson 	inblock_st = td->td_ru.ru_inblock;
7991c4bcd05SJeff Roberson 	oublock_st = td->td_ru.ru_oublock;
800b1012d80SJohn Baldwin 
801279d7226SMatthew Dillon 	/*
802a9bf5e37SDavid Xu 	 * aio_aqueue() acquires a reference to the file that is
8039b16adc1SAlan Cox 	 * released in aio_free_entry().
804279d7226SMatthew Dillon 	 */
8052244ea07SJohn Dyson 	if (cb->aio_lio_opcode == LIO_READ) {
8062244ea07SJohn Dyson 		auio.uio_rw = UIO_READ;
8075114048bSKonstantin Belousov 		if (auio.uio_resid == 0)
8085114048bSKonstantin Belousov 			error = 0;
8095114048bSKonstantin Belousov 		else
810b40ce416SJulian Elischer 			error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8112244ea07SJohn Dyson 	} else {
8126d53aa62SDavid Xu 		if (fp->f_type == DTYPE_VNODE)
8136d53aa62SDavid Xu 			bwillwrite();
8142244ea07SJohn Dyson 		auio.uio_rw = UIO_WRITE;
815b40ce416SJulian Elischer 		error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8162244ea07SJohn Dyson 	}
817b1012d80SJohn Baldwin 	msgrcv_end = td->td_ru.ru_msgrcv;
818b1012d80SJohn Baldwin 	msgsnd_end = td->td_ru.ru_msgsnd;
8191c4bcd05SJeff Roberson 	inblock_end = td->td_ru.ru_inblock;
8201c4bcd05SJeff Roberson 	oublock_end = td->td_ru.ru_oublock;
821fd3bf775SJohn Dyson 
822b1012d80SJohn Baldwin 	job->msgrcv = msgrcv_end - msgrcv_st;
823b1012d80SJohn Baldwin 	job->msgsnd = msgsnd_end - msgsnd_st;
824b1012d80SJohn Baldwin 	job->inblock = inblock_end - inblock_st;
825b1012d80SJohn Baldwin 	job->outblock = oublock_end - oublock_st;
8262244ea07SJohn Dyson 
827bfbbc4aaSJason Evans 	if ((error) && (auio.uio_resid != cnt)) {
8282244ea07SJohn Dyson 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
8292244ea07SJohn Dyson 			error = 0;
83019eb87d2SJohn Baldwin 		if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
8315652770dSJohn Baldwin 			PROC_LOCK(job->userproc);
8325652770dSJohn Baldwin 			kern_psignal(job->userproc, SIGPIPE);
8335652770dSJohn Baldwin 			PROC_UNLOCK(job->userproc);
83419eb87d2SJohn Baldwin 		}
8352244ea07SJohn Dyson 	}
8362244ea07SJohn Dyson 
8372244ea07SJohn Dyson 	cnt -= auio.uio_resid;
838f8f750c5SRobert Watson 	td->td_ucred = td_savedcred;
839f0ec1740SJohn Baldwin 	if (error)
840f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
841f0ec1740SJohn Baldwin 	else
842f0ec1740SJohn Baldwin 		aio_complete(job, cnt, 0);
8432244ea07SJohn Dyson }
8442244ea07SJohn Dyson 
84569cd28daSDoug Ambrisko static void
8465652770dSJohn Baldwin aio_process_sync(struct kaiocb *job)
847f95c13dbSGleb Smirnoff {
848f95c13dbSGleb Smirnoff 	struct thread *td = curthread;
849f95c13dbSGleb Smirnoff 	struct ucred *td_savedcred = td->td_ucred;
8505652770dSJohn Baldwin 	struct file *fp = job->fd_file;
851f95c13dbSGleb Smirnoff 	int error = 0;
852f95c13dbSGleb Smirnoff 
8535652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC,
8545652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
855f95c13dbSGleb Smirnoff 
8565652770dSJohn Baldwin 	td->td_ucred = job->cred;
857f95c13dbSGleb Smirnoff 	if (fp->f_vnode != NULL)
858f95c13dbSGleb Smirnoff 		error = aio_fsync_vnode(td, fp->f_vnode);
859f95c13dbSGleb Smirnoff 	td->td_ucred = td_savedcred;
860f0ec1740SJohn Baldwin 	if (error)
861f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
862f0ec1740SJohn Baldwin 	else
863f0ec1740SJohn Baldwin 		aio_complete(job, 0, 0);
864f95c13dbSGleb Smirnoff }
865f95c13dbSGleb Smirnoff 
866f95c13dbSGleb Smirnoff static void
8675652770dSJohn Baldwin aio_process_mlock(struct kaiocb *job)
8686160e12cSGleb Smirnoff {
8695652770dSJohn Baldwin 	struct aiocb *cb = &job->uaiocb;
8706160e12cSGleb Smirnoff 	int error;
8716160e12cSGleb Smirnoff 
8725652770dSJohn Baldwin 	KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
8735652770dSJohn Baldwin 	    ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
8746160e12cSGleb Smirnoff 
875f3215338SJohn Baldwin 	aio_switch_vmspace(job);
876496ab053SKonstantin Belousov 	error = kern_mlock(job->userproc, job->cred,
877496ab053SKonstantin Belousov 	    __DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes);
878496ab053SKonstantin Belousov 	aio_complete(job, error != 0 ? -1 : 0, error);
8796160e12cSGleb Smirnoff }
8806160e12cSGleb Smirnoff 
8816160e12cSGleb Smirnoff static void
882f3215338SJohn Baldwin aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
8831ce91824SDavid Xu {
8841ce91824SDavid Xu 	struct aioliojob *lj;
88569cd28daSDoug Ambrisko 	struct kaioinfo *ki;
8865652770dSJohn Baldwin 	struct kaiocb *sjob, *sjobn;
8871ce91824SDavid Xu 	int lj_done;
888f3215338SJohn Baldwin 	bool schedule_fsync;
88969cd28daSDoug Ambrisko 
89069cd28daSDoug Ambrisko 	ki = userp->p_aioinfo;
891759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
8925652770dSJohn Baldwin 	lj = job->lio;
89369cd28daSDoug Ambrisko 	lj_done = 0;
89469cd28daSDoug Ambrisko 	if (lj) {
8951ce91824SDavid Xu 		lj->lioj_finished_count++;
8961ce91824SDavid Xu 		if (lj->lioj_count == lj->lioj_finished_count)
89769cd28daSDoug Ambrisko 			lj_done = 1;
89869cd28daSDoug Ambrisko 	}
8995652770dSJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
900f3215338SJohn Baldwin 	MPASS(job->jobflags & KAIOCB_FINISHED);
90127b8220dSDavid Xu 
90227b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_RUNDOWN)
90327b8220dSDavid Xu 		goto notification_done;
90427b8220dSDavid Xu 
9055652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
9065652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
9075652770dSJohn Baldwin 		aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi);
9081ce91824SDavid Xu 
9095652770dSJohn Baldwin 	KNOTE_LOCKED(&job->klist, 1);
9101ce91824SDavid Xu 
91169cd28daSDoug Ambrisko 	if (lj_done) {
9121ce91824SDavid Xu 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
91369cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_KEVENT_POSTED;
9141ce91824SDavid Xu 			KNOTE_LOCKED(&lj->klist, 1);
91569cd28daSDoug Ambrisko 		}
9161ce91824SDavid Xu 		if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
91769cd28daSDoug Ambrisko 		    == LIOJ_SIGNAL
9184c0fb2cfSDavid Xu 		    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
9194c0fb2cfSDavid Xu 		        lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
9204c0fb2cfSDavid Xu 			aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
92169cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
92269cd28daSDoug Ambrisko 		}
92369cd28daSDoug Ambrisko 	}
92427b8220dSDavid Xu 
92527b8220dSDavid Xu notification_done:
9265652770dSJohn Baldwin 	if (job->jobflags & KAIOCB_CHECKSYNC) {
927f3215338SJohn Baldwin 		schedule_fsync = false;
9285652770dSJohn Baldwin 		TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) {
929b9a53e16SJohn Baldwin 			if (job->fd_file != sjob->fd_file ||
930b9a53e16SJohn Baldwin 			    job->seqno >= sjob->seqno)
931b9a53e16SJohn Baldwin 				continue;
932b9a53e16SJohn Baldwin 			if (--sjob->pending > 0)
933b9a53e16SJohn Baldwin 				continue;
934b9a53e16SJohn Baldwin 			TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list);
935005ce8e4SJohn Baldwin 			if (!aio_clear_cancel_function_locked(sjob))
936f3215338SJohn Baldwin 				continue;
937b9a53e16SJohn Baldwin 			TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list);
938f3215338SJohn Baldwin 			schedule_fsync = true;
93999eee864SDavid Xu 		}
940f3215338SJohn Baldwin 		if (schedule_fsync)
941f3215338SJohn Baldwin 			taskqueue_enqueue(taskqueue_aiod_kick,
942f3215338SJohn Baldwin 			    &ki->kaio_sync_task);
94399eee864SDavid Xu 	}
94427b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_WAKEUP) {
94569cd28daSDoug Ambrisko 		ki->kaio_flags &= ~KAIO_WAKEUP;
9461ce91824SDavid Xu 		wakeup(&userp->p_aioinfo);
94769cd28daSDoug Ambrisko 	}
94869cd28daSDoug Ambrisko }
94969cd28daSDoug Ambrisko 
9508a4dc40fSJohn Baldwin static void
951f3215338SJohn Baldwin aio_schedule_fsync(void *context, int pending)
952f3215338SJohn Baldwin {
953f3215338SJohn Baldwin 	struct kaioinfo *ki;
954f3215338SJohn Baldwin 	struct kaiocb *job;
955f3215338SJohn Baldwin 
956f3215338SJohn Baldwin 	ki = context;
957f3215338SJohn Baldwin 	AIO_LOCK(ki);
958f3215338SJohn Baldwin 	while (!TAILQ_EMPTY(&ki->kaio_syncready)) {
959f3215338SJohn Baldwin 		job = TAILQ_FIRST(&ki->kaio_syncready);
960f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncready, job, list);
961f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
962f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
963f3215338SJohn Baldwin 		AIO_LOCK(ki);
964f3215338SJohn Baldwin 	}
965f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
966f3215338SJohn Baldwin }
967f3215338SJohn Baldwin 
968f3215338SJohn Baldwin bool
969f3215338SJohn Baldwin aio_cancel_cleared(struct kaiocb *job)
970f3215338SJohn Baldwin {
971f3215338SJohn Baldwin 
972f3215338SJohn Baldwin 	/*
973f3215338SJohn Baldwin 	 * The caller should hold the same queue lock held when
974f3215338SJohn Baldwin 	 * aio_clear_cancel_function() was called and set this flag
975f3215338SJohn Baldwin 	 * ensuring this check sees an up-to-date value.  However,
976f3215338SJohn Baldwin 	 * there is no way to assert that.
977f3215338SJohn Baldwin 	 */
978f3215338SJohn Baldwin 	return ((job->jobflags & KAIOCB_CLEARED) != 0);
979f3215338SJohn Baldwin }
980f3215338SJohn Baldwin 
981005ce8e4SJohn Baldwin static bool
982005ce8e4SJohn Baldwin aio_clear_cancel_function_locked(struct kaiocb *job)
983005ce8e4SJohn Baldwin {
984005ce8e4SJohn Baldwin 
985005ce8e4SJohn Baldwin 	AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
986005ce8e4SJohn Baldwin 	MPASS(job->cancel_fn != NULL);
987005ce8e4SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLING) {
988005ce8e4SJohn Baldwin 		job->jobflags |= KAIOCB_CLEARED;
989005ce8e4SJohn Baldwin 		return (false);
990005ce8e4SJohn Baldwin 	}
991005ce8e4SJohn Baldwin 	job->cancel_fn = NULL;
992005ce8e4SJohn Baldwin 	return (true);
993005ce8e4SJohn Baldwin }
994005ce8e4SJohn Baldwin 
995f3215338SJohn Baldwin bool
996f3215338SJohn Baldwin aio_clear_cancel_function(struct kaiocb *job)
997f3215338SJohn Baldwin {
998f3215338SJohn Baldwin 	struct kaioinfo *ki;
999005ce8e4SJohn Baldwin 	bool ret;
1000f3215338SJohn Baldwin 
1001f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1002f3215338SJohn Baldwin 	AIO_LOCK(ki);
1003005ce8e4SJohn Baldwin 	ret = aio_clear_cancel_function_locked(job);
1004f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1005005ce8e4SJohn Baldwin 	return (ret);
1006f3215338SJohn Baldwin }
1007005ce8e4SJohn Baldwin 
1008005ce8e4SJohn Baldwin static bool
1009005ce8e4SJohn Baldwin aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func)
1010005ce8e4SJohn Baldwin {
1011005ce8e4SJohn Baldwin 
1012005ce8e4SJohn Baldwin 	AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
1013005ce8e4SJohn Baldwin 	if (job->jobflags & KAIOCB_CANCELLED)
1014005ce8e4SJohn Baldwin 		return (false);
1015005ce8e4SJohn Baldwin 	job->cancel_fn = func;
1016f3215338SJohn Baldwin 	return (true);
1017f3215338SJohn Baldwin }
1018f3215338SJohn Baldwin 
1019f3215338SJohn Baldwin bool
1020f3215338SJohn Baldwin aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
1021f3215338SJohn Baldwin {
1022f3215338SJohn Baldwin 	struct kaioinfo *ki;
1023005ce8e4SJohn Baldwin 	bool ret;
1024f3215338SJohn Baldwin 
1025f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1026f3215338SJohn Baldwin 	AIO_LOCK(ki);
1027005ce8e4SJohn Baldwin 	ret = aio_set_cancel_function_locked(job, func);
1028f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1029005ce8e4SJohn Baldwin 	return (ret);
1030f3215338SJohn Baldwin }
1031f3215338SJohn Baldwin 
1032f3215338SJohn Baldwin void
1033f3215338SJohn Baldwin aio_complete(struct kaiocb *job, long status, int error)
1034f3215338SJohn Baldwin {
1035f3215338SJohn Baldwin 	struct kaioinfo *ki;
1036f3215338SJohn Baldwin 	struct proc *userp;
1037f3215338SJohn Baldwin 
1038f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.error = error;
1039f3215338SJohn Baldwin 	job->uaiocb._aiocb_private.status = status;
1040f3215338SJohn Baldwin 
1041f3215338SJohn Baldwin 	userp = job->userproc;
1042f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
1043f3215338SJohn Baldwin 
1044f3215338SJohn Baldwin 	AIO_LOCK(ki);
1045f3215338SJohn Baldwin 	KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1046f3215338SJohn Baldwin 	    ("duplicate aio_complete"));
1047f3215338SJohn Baldwin 	job->jobflags |= KAIOCB_FINISHED;
1048f3215338SJohn Baldwin 	if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1049f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1050f3215338SJohn Baldwin 		aio_bio_done_notify(userp, job);
1051f3215338SJohn Baldwin 	}
1052f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1053f3215338SJohn Baldwin }
1054f3215338SJohn Baldwin 
1055f3215338SJohn Baldwin void
1056f3215338SJohn Baldwin aio_cancel(struct kaiocb *job)
1057f3215338SJohn Baldwin {
1058f3215338SJohn Baldwin 
1059f3215338SJohn Baldwin 	aio_complete(job, -1, ECANCELED);
1060f3215338SJohn Baldwin }
1061f3215338SJohn Baldwin 
1062f3215338SJohn Baldwin void
10635652770dSJohn Baldwin aio_switch_vmspace(struct kaiocb *job)
10648a4dc40fSJohn Baldwin {
10658a4dc40fSJohn Baldwin 
10665652770dSJohn Baldwin 	vmspace_switch_aio(job->userproc->p_vmspace);
10678a4dc40fSJohn Baldwin }
10688a4dc40fSJohn Baldwin 
10692244ea07SJohn Dyson /*
1070f95c13dbSGleb Smirnoff  * The AIO daemon, most of the actual work is done in aio_process_*,
107184af4da6SJohn Dyson  * but the setup (and address space mgmt) is done in this routine.
10722244ea07SJohn Dyson  */
10732244ea07SJohn Dyson static void
10741ce91824SDavid Xu aio_daemon(void *_id)
10752244ea07SJohn Dyson {
10765652770dSJohn Baldwin 	struct kaiocb *job;
107739314b7dSJohn Baldwin 	struct aioproc *aiop;
1078bfbbc4aaSJason Evans 	struct kaioinfo *ki;
1079f3215338SJohn Baldwin 	struct proc *p;
10808a4dc40fSJohn Baldwin 	struct vmspace *myvm;
1081b40ce416SJulian Elischer 	struct thread *td = curthread;
10821ce91824SDavid Xu 	int id = (intptr_t)_id;
10832244ea07SJohn Dyson 
10842244ea07SJohn Dyson 	/*
10858a4dc40fSJohn Baldwin 	 * Grab an extra reference on the daemon's vmspace so that it
10868a4dc40fSJohn Baldwin 	 * doesn't get freed by jobs that switch to a different
10878a4dc40fSJohn Baldwin 	 * vmspace.
10882244ea07SJohn Dyson 	 */
10898a4dc40fSJohn Baldwin 	p = td->td_proc;
10908a4dc40fSJohn Baldwin 	myvm = vmspace_acquire_ref(p);
1091fd3bf775SJohn Dyson 
10928a4dc40fSJohn Baldwin 	KASSERT(p->p_textvp == NULL, ("kthread has a textvp"));
1093fd3bf775SJohn Dyson 
1094fd3bf775SJohn Dyson 	/*
1095bfbbc4aaSJason Evans 	 * Allocate and ready the aio control info.  There is one aiop structure
1096bfbbc4aaSJason Evans 	 * per daemon.
1097fd3bf775SJohn Dyson 	 */
1098a163d034SWarner Losh 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
109939314b7dSJohn Baldwin 	aiop->aioproc = p;
110039314b7dSJohn Baldwin 	aiop->aioprocflags = 0;
1101bfbbc4aaSJason Evans 
1102fd3bf775SJohn Dyson 	/*
1103fd3bf775SJohn Dyson 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
1104b40ce416SJulian Elischer 	 * and creating too many daemons.)
1105fd3bf775SJohn Dyson 	 */
11061ce91824SDavid Xu 	sema_post(&aio_newproc_sem);
11072244ea07SJohn Dyson 
11081ce91824SDavid Xu 	mtx_lock(&aio_job_mtx);
1109bfbbc4aaSJason Evans 	for (;;) {
1110fd3bf775SJohn Dyson 		/*
1111fd3bf775SJohn Dyson 		 * Take daemon off of free queue
1112fd3bf775SJohn Dyson 		 */
111339314b7dSJohn Baldwin 		if (aiop->aioprocflags & AIOP_FREE) {
11142244ea07SJohn Dyson 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
111539314b7dSJohn Baldwin 			aiop->aioprocflags &= ~AIOP_FREE;
11162244ea07SJohn Dyson 		}
11172244ea07SJohn Dyson 
1118fd3bf775SJohn Dyson 		/*
1119bfbbc4aaSJason Evans 		 * Check for jobs.
1120fd3bf775SJohn Dyson 		 */
11215652770dSJohn Baldwin 		while ((job = aio_selectjob(aiop)) != NULL) {
11221ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
11232244ea07SJohn Dyson 
1124f3215338SJohn Baldwin 			ki = job->userproc->p_aioinfo;
1125f3215338SJohn Baldwin 			job->handle_fn(job);
112684af4da6SJohn Dyson 
11279b84335cSDavid Xu 			mtx_lock(&aio_job_mtx);
11289b84335cSDavid Xu 			/* Decrement the active job count. */
11299b84335cSDavid Xu 			ki->kaio_active_count--;
11302244ea07SJohn Dyson 		}
11312244ea07SJohn Dyson 
1132fd3bf775SJohn Dyson 		/*
1133bfbbc4aaSJason Evans 		 * Disconnect from user address space.
1134fd3bf775SJohn Dyson 		 */
11358a4dc40fSJohn Baldwin 		if (p->p_vmspace != myvm) {
11361ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
11378a4dc40fSJohn Baldwin 			vmspace_switch_aio(myvm);
11381ce91824SDavid Xu 			mtx_lock(&aio_job_mtx);
11391ce91824SDavid Xu 			/*
11401ce91824SDavid Xu 			 * We have to restart to avoid race, we only sleep if
11418a4dc40fSJohn Baldwin 			 * no job can be selected.
11421ce91824SDavid Xu 			 */
11431ce91824SDavid Xu 			continue;
1144fd3bf775SJohn Dyson 		}
1145fd3bf775SJohn Dyson 
11461ce91824SDavid Xu 		mtx_assert(&aio_job_mtx, MA_OWNED);
11471ce91824SDavid Xu 
1148fd3bf775SJohn Dyson 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
114939314b7dSJohn Baldwin 		aiop->aioprocflags |= AIOP_FREE;
1150fd3bf775SJohn Dyson 
1151fd3bf775SJohn Dyson 		/*
1152bfbbc4aaSJason Evans 		 * If daemon is inactive for a long time, allow it to exit,
1153bfbbc4aaSJason Evans 		 * thereby freeing resources.
1154fd3bf775SJohn Dyson 		 */
115539314b7dSJohn Baldwin 		if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
11568a4dc40fSJohn Baldwin 		    aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
115739314b7dSJohn Baldwin 		    (aiop->aioprocflags & AIOP_FREE) &&
11588a4dc40fSJohn Baldwin 		    num_aio_procs > target_aio_procs)
11598a4dc40fSJohn Baldwin 			break;
11608a4dc40fSJohn Baldwin 	}
1161fd3bf775SJohn Dyson 	TAILQ_REMOVE(&aio_freeproc, aiop, list);
116284af4da6SJohn Dyson 	num_aio_procs--;
11631ce91824SDavid Xu 	mtx_unlock(&aio_job_mtx);
11641ce91824SDavid Xu 	uma_zfree(aiop_zone, aiop);
11651ce91824SDavid Xu 	free_unr(aiod_unr, id);
11668a4dc40fSJohn Baldwin 	vmspace_free(myvm);
11678a4dc40fSJohn Baldwin 
11688a4dc40fSJohn Baldwin 	KASSERT(p->p_vmspace == myvm,
11698a4dc40fSJohn Baldwin 	    ("AIOD: bad vmspace for exiting daemon"));
11708a4dc40fSJohn Baldwin 	KASSERT(myvm->vm_refcnt > 1,
11718a4dc40fSJohn Baldwin 	    ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt));
11723745c395SJulian Elischer 	kproc_exit(0);
1173fd3bf775SJohn Dyson }
11742244ea07SJohn Dyson 
11752244ea07SJohn Dyson /*
1176bfbbc4aaSJason Evans  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1177bfbbc4aaSJason Evans  * AIO daemon modifies its environment itself.
11782244ea07SJohn Dyson  */
11792244ea07SJohn Dyson static int
11801ce91824SDavid Xu aio_newproc(int *start)
1181fd3bf775SJohn Dyson {
11822244ea07SJohn Dyson 	int error;
1183c9a970a7SAlan Cox 	struct proc *p;
11841ce91824SDavid Xu 	int id;
11852244ea07SJohn Dyson 
11861ce91824SDavid Xu 	id = alloc_unr(aiod_unr);
11873745c395SJulian Elischer 	error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
11881ce91824SDavid Xu 		RFNOWAIT, 0, "aiod%d", id);
11891ce91824SDavid Xu 	if (error == 0) {
1190fd3bf775SJohn Dyson 		/*
11911ce91824SDavid Xu 		 * Wait until daemon is started.
1192fd3bf775SJohn Dyson 		 */
11931ce91824SDavid Xu 		sema_wait(&aio_newproc_sem);
11941ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
119584af4da6SJohn Dyson 		num_aio_procs++;
11961ce91824SDavid Xu 		if (start != NULL)
11977f34b521SDavid Xu 			(*start)--;
11981ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
11991ce91824SDavid Xu 	} else {
12001ce91824SDavid Xu 		free_unr(aiod_unr, id);
12011ce91824SDavid Xu 	}
1202ac41f2efSAlfred Perlstein 	return (error);
12032244ea07SJohn Dyson }
12042244ea07SJohn Dyson 
12052244ea07SJohn Dyson /*
120688ed460eSAlan Cox  * Try the high-performance, low-overhead physio method for eligible
120788ed460eSAlan Cox  * VCHR devices.  This method doesn't use an aio helper thread, and
120888ed460eSAlan Cox  * thus has very low overhead.
120988ed460eSAlan Cox  *
1210a9bf5e37SDavid Xu  * Assumes that the caller, aio_aqueue(), has incremented the file
121188ed460eSAlan Cox  * structure's reference count, preventing its deallocation for the
121288ed460eSAlan Cox  * duration of this call.
1213fd3bf775SJohn Dyson  */
121488ed460eSAlan Cox static int
12155652770dSJohn Baldwin aio_qphysio(struct proc *p, struct kaiocb *job)
1216fd3bf775SJohn Dyson {
1217fd3bf775SJohn Dyson 	struct aiocb *cb;
1218fd3bf775SJohn Dyson 	struct file *fp;
1219f743d981SAlexander Motin 	struct bio *bp;
1220f743d981SAlexander Motin 	struct buf *pbuf;
1221fd3bf775SJohn Dyson 	struct vnode *vp;
1222f3215a60SKonstantin Belousov 	struct cdevsw *csw;
1223f3215a60SKonstantin Belousov 	struct cdev *dev;
1224fd3bf775SJohn Dyson 	struct kaioinfo *ki;
12254d805eacSJohn Baldwin 	int error, ref, poff;
1226f743d981SAlexander Motin 	vm_prot_t prot;
1227fd3bf775SJohn Dyson 
12285652770dSJohn Baldwin 	cb = &job->uaiocb;
12295652770dSJohn Baldwin 	fp = job->fd_file;
1230fd3bf775SJohn Dyson 
12316160e12cSGleb Smirnoff 	if (fp == NULL || fp->f_type != DTYPE_VNODE)
1232008626c3SPoul-Henning Kamp 		return (-1);
1233fd3bf775SJohn Dyson 
12343b6d9652SPoul-Henning Kamp 	vp = fp->f_vnode;
1235f743d981SAlexander Motin 	if (vp->v_type != VCHR)
1236f582ac06SBrian Feldman 		return (-1);
1237ad8de0f2SDavid Xu 	if (vp->v_bufobj.bo_bsize == 0)
1238ad8de0f2SDavid Xu 		return (-1);
12395d9d81e7SPoul-Henning Kamp 	if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1240008626c3SPoul-Henning Kamp 		return (-1);
1241fd3bf775SJohn Dyson 
1242f3215a60SKonstantin Belousov 	ref = 0;
1243f3215a60SKonstantin Belousov 	csw = devvn_refthread(vp, &dev, &ref);
1244f3215a60SKonstantin Belousov 	if (csw == NULL)
1245f3215a60SKonstantin Belousov 		return (ENXIO);
1246f743d981SAlexander Motin 
1247f743d981SAlexander Motin 	if ((csw->d_flags & D_DISK) == 0) {
1248f743d981SAlexander Motin 		error = -1;
1249f743d981SAlexander Motin 		goto unref;
1250f743d981SAlexander Motin 	}
1251f3215a60SKonstantin Belousov 	if (cb->aio_nbytes > dev->si_iosize_max) {
1252f3215a60SKonstantin Belousov 		error = -1;
1253f3215a60SKonstantin Belousov 		goto unref;
1254f3215a60SKonstantin Belousov 	}
1255f3215a60SKonstantin Belousov 
1256f743d981SAlexander Motin 	ki = p->p_aioinfo;
1257f743d981SAlexander Motin 	poff = (vm_offset_t)cb->aio_buf & PAGE_MASK;
12584d805eacSJohn Baldwin 	if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
1259f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS) {
1260f743d981SAlexander Motin 			error = -1;
1261f743d981SAlexander Motin 			goto unref;
1262f743d981SAlexander Motin 		}
12634d805eacSJohn Baldwin 
12644d805eacSJohn Baldwin 		pbuf = NULL;
1265f743d981SAlexander Motin 	} else {
1266f743d981SAlexander Motin 		if (cb->aio_nbytes > MAXPHYS - poff) {
1267f743d981SAlexander Motin 			error = -1;
1268f743d981SAlexander Motin 			goto unref;
1269f743d981SAlexander Motin 		}
1270f743d981SAlexander Motin 		if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) {
1271f743d981SAlexander Motin 			error = -1;
1272f743d981SAlexander Motin 			goto unref;
1273f743d981SAlexander Motin 		}
12744d805eacSJohn Baldwin 
12755652770dSJohn Baldwin 		job->pbuf = pbuf = (struct buf *)getpbuf(NULL);
1276f743d981SAlexander Motin 		BUF_KERNPROC(pbuf);
1277759ccccaSDavid Xu 		AIO_LOCK(ki);
12781ce91824SDavid Xu 		ki->kaio_buffer_count++;
1279759ccccaSDavid Xu 		AIO_UNLOCK(ki);
12804d805eacSJohn Baldwin 	}
12814d805eacSJohn Baldwin 	job->bp = bp = g_alloc_bio();
12821ce91824SDavid Xu 
1283f743d981SAlexander Motin 	bp->bio_length = cb->aio_nbytes;
1284f743d981SAlexander Motin 	bp->bio_bcount = cb->aio_nbytes;
1285f743d981SAlexander Motin 	bp->bio_done = aio_physwakeup;
1286f743d981SAlexander Motin 	bp->bio_data = (void *)(uintptr_t)cb->aio_buf;
1287f743d981SAlexander Motin 	bp->bio_offset = cb->aio_offset;
1288f743d981SAlexander Motin 	bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1289f743d981SAlexander Motin 	bp->bio_dev = dev;
12905652770dSJohn Baldwin 	bp->bio_caller1 = (void *)job;
1291f743d981SAlexander Motin 
1292f743d981SAlexander Motin 	prot = VM_PROT_READ;
1293f743d981SAlexander Motin 	if (cb->aio_lio_opcode == LIO_READ)
1294f743d981SAlexander Motin 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
12954d805eacSJohn Baldwin 	job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
12965652770dSJohn Baldwin 	    (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages,
12974d805eacSJohn Baldwin 	    nitems(job->pages));
12984d805eacSJohn Baldwin 	if (job->npages < 0) {
1299f743d981SAlexander Motin 		error = EFAULT;
1300f743d981SAlexander Motin 		goto doerror;
1301f743d981SAlexander Motin 	}
13024d805eacSJohn Baldwin 	if (pbuf != NULL) {
1303f743d981SAlexander Motin 		pmap_qenter((vm_offset_t)pbuf->b_data,
13045652770dSJohn Baldwin 		    job->pages, job->npages);
1305f743d981SAlexander Motin 		bp->bio_data = pbuf->b_data + poff;
13064d805eacSJohn Baldwin 		atomic_add_int(&num_buf_aio, 1);
1307f743d981SAlexander Motin 	} else {
13085652770dSJohn Baldwin 		bp->bio_ma = job->pages;
13095652770dSJohn Baldwin 		bp->bio_ma_n = job->npages;
1310f743d981SAlexander Motin 		bp->bio_ma_offset = poff;
1311f743d981SAlexander Motin 		bp->bio_data = unmapped_buf;
1312f743d981SAlexander Motin 		bp->bio_flags |= BIO_UNMAPPED;
13138091e52bSJohn Baldwin 		atomic_add_int(&num_unmapped_aio, 1);
1314f743d981SAlexander Motin 	}
1315f743d981SAlexander Motin 
1316bfbbc4aaSJason Evans 	/* Perform transfer. */
1317f743d981SAlexander Motin 	csw->d_strategy(bp);
1318f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1319ac41f2efSAlfred Perlstein 	return (0);
1320fd3bf775SJohn Dyson 
1321fd3bf775SJohn Dyson doerror:
13224d805eacSJohn Baldwin 	if (pbuf != NULL) {
1323759ccccaSDavid Xu 		AIO_LOCK(ki);
1324fd3bf775SJohn Dyson 		ki->kaio_buffer_count--;
1325759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1326f743d981SAlexander Motin 		relpbuf(pbuf, NULL);
13275652770dSJohn Baldwin 		job->pbuf = NULL;
1328f743d981SAlexander Motin 	}
1329f743d981SAlexander Motin 	g_destroy_bio(bp);
13305652770dSJohn Baldwin 	job->bp = NULL;
1331f3215a60SKonstantin Belousov unref:
1332f3215a60SKonstantin Belousov 	dev_relthread(dev, ref);
1333fd3bf775SJohn Dyson 	return (error);
1334fd3bf775SJohn Dyson }
1335fd3bf775SJohn Dyson 
1336399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
13373858a1f4SJohn Baldwin static int
13383858a1f4SJohn Baldwin convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig)
13393858a1f4SJohn Baldwin {
13403858a1f4SJohn Baldwin 
13413858a1f4SJohn Baldwin 	/*
13423858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
13433858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
13443858a1f4SJohn Baldwin 	 */
13453858a1f4SJohn Baldwin 	nsig->sigev_notify = osig->sigev_notify;
13463858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
13473858a1f4SJohn Baldwin 	case SIGEV_NONE:
13483858a1f4SJohn Baldwin 		break;
13493858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
13503858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
13513858a1f4SJohn Baldwin 		break;
13523858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
13533858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
13543858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
13553858a1f4SJohn Baldwin 		nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr;
13563858a1f4SJohn Baldwin 		break;
13573858a1f4SJohn Baldwin 	default:
13583858a1f4SJohn Baldwin 		return (EINVAL);
13593858a1f4SJohn Baldwin 	}
13603858a1f4SJohn Baldwin 	return (0);
13613858a1f4SJohn Baldwin }
13623858a1f4SJohn Baldwin 
13633858a1f4SJohn Baldwin static int
13643858a1f4SJohn Baldwin aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
13653858a1f4SJohn Baldwin {
13663858a1f4SJohn Baldwin 	struct oaiocb *ojob;
13673858a1f4SJohn Baldwin 	int error;
13683858a1f4SJohn Baldwin 
13693858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
13703858a1f4SJohn Baldwin 	error = copyin(ujob, kjob, sizeof(struct oaiocb));
13713858a1f4SJohn Baldwin 	if (error)
13723858a1f4SJohn Baldwin 		return (error);
13733858a1f4SJohn Baldwin 	ojob = (struct oaiocb *)kjob;
13743858a1f4SJohn Baldwin 	return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent));
13753858a1f4SJohn Baldwin }
1376399e8c17SJohn Baldwin #endif
13773858a1f4SJohn Baldwin 
13783858a1f4SJohn Baldwin static int
13793858a1f4SJohn Baldwin aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob)
13803858a1f4SJohn Baldwin {
13813858a1f4SJohn Baldwin 
13823858a1f4SJohn Baldwin 	return (copyin(ujob, kjob, sizeof(struct aiocb)));
13833858a1f4SJohn Baldwin }
13843858a1f4SJohn Baldwin 
13853858a1f4SJohn Baldwin static long
13863858a1f4SJohn Baldwin aiocb_fetch_status(struct aiocb *ujob)
13873858a1f4SJohn Baldwin {
13883858a1f4SJohn Baldwin 
13893858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.status));
13903858a1f4SJohn Baldwin }
13913858a1f4SJohn Baldwin 
13923858a1f4SJohn Baldwin static long
13933858a1f4SJohn Baldwin aiocb_fetch_error(struct aiocb *ujob)
13943858a1f4SJohn Baldwin {
13953858a1f4SJohn Baldwin 
13963858a1f4SJohn Baldwin 	return (fuword(&ujob->_aiocb_private.error));
13973858a1f4SJohn Baldwin }
13983858a1f4SJohn Baldwin 
13993858a1f4SJohn Baldwin static int
14003858a1f4SJohn Baldwin aiocb_store_status(struct aiocb *ujob, long status)
14013858a1f4SJohn Baldwin {
14023858a1f4SJohn Baldwin 
14033858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.status, status));
14043858a1f4SJohn Baldwin }
14053858a1f4SJohn Baldwin 
14063858a1f4SJohn Baldwin static int
14073858a1f4SJohn Baldwin aiocb_store_error(struct aiocb *ujob, long error)
14083858a1f4SJohn Baldwin {
14093858a1f4SJohn Baldwin 
14103858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.error, error));
14113858a1f4SJohn Baldwin }
14123858a1f4SJohn Baldwin 
14133858a1f4SJohn Baldwin static int
14143858a1f4SJohn Baldwin aiocb_store_kernelinfo(struct aiocb *ujob, long jobref)
14153858a1f4SJohn Baldwin {
14163858a1f4SJohn Baldwin 
14173858a1f4SJohn Baldwin 	return (suword(&ujob->_aiocb_private.kernelinfo, jobref));
14183858a1f4SJohn Baldwin }
14193858a1f4SJohn Baldwin 
14203858a1f4SJohn Baldwin static int
14213858a1f4SJohn Baldwin aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
14223858a1f4SJohn Baldwin {
14233858a1f4SJohn Baldwin 
14243858a1f4SJohn Baldwin 	return (suword(ujobp, (long)ujob));
14253858a1f4SJohn Baldwin }
14263858a1f4SJohn Baldwin 
14273858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops = {
14283858a1f4SJohn Baldwin 	.copyin = aiocb_copyin,
14293858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
14303858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
14313858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
14323858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
14333858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
14343858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
14353858a1f4SJohn Baldwin };
14363858a1f4SJohn Baldwin 
1437399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
14383858a1f4SJohn Baldwin static struct aiocb_ops aiocb_ops_osigevent = {
14393858a1f4SJohn Baldwin 	.copyin = aiocb_copyin_old_sigevent,
14403858a1f4SJohn Baldwin 	.fetch_status = aiocb_fetch_status,
14413858a1f4SJohn Baldwin 	.fetch_error = aiocb_fetch_error,
14423858a1f4SJohn Baldwin 	.store_status = aiocb_store_status,
14433858a1f4SJohn Baldwin 	.store_error = aiocb_store_error,
14443858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb_store_kernelinfo,
14453858a1f4SJohn Baldwin 	.store_aiocb = aiocb_store_aiocb,
14463858a1f4SJohn Baldwin };
1447399e8c17SJohn Baldwin #endif
14483858a1f4SJohn Baldwin 
1449bfbbc4aaSJason Evans /*
1450bfbbc4aaSJason Evans  * Queue a new AIO request.  Choosing either the threaded or direct physio VCHR
1451bfbbc4aaSJason Evans  * technique is done in this code.
14522244ea07SJohn Dyson  */
14536a1162d4SAlexander Leidinger int
14545652770dSJohn Baldwin aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj,
14553858a1f4SJohn Baldwin     int type, struct aiocb_ops *ops)
1456fd3bf775SJohn Dyson {
1457b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
14587008be5bSPawel Jakub Dawidek 	cap_rights_t rights;
14592244ea07SJohn Dyson 	struct file *fp;
1460f3215338SJohn Baldwin 	struct kaiocb *job;
14612244ea07SJohn Dyson 	struct kaioinfo *ki;
1462c6fa9f78SAlan Cox 	struct kevent kev;
14631ce91824SDavid Xu 	int opcode;
14641ce91824SDavid Xu 	int error;
14654db71d27SJohn-Mark Gurney 	int fd, kqfd;
14661ce91824SDavid Xu 	int jid;
1467fde80935SDavid Xu 	u_short evflags;
14682244ea07SJohn Dyson 
1469a9bf5e37SDavid Xu 	if (p->p_aioinfo == NULL)
1470a9bf5e37SDavid Xu 		aio_init_aioinfo(p);
1471a9bf5e37SDavid Xu 
14721ce91824SDavid Xu 	ki = p->p_aioinfo;
14731ce91824SDavid Xu 
14745652770dSJohn Baldwin 	ops->store_status(ujob, -1);
14755652770dSJohn Baldwin 	ops->store_error(ujob, 0);
14765652770dSJohn Baldwin 	ops->store_kernelinfo(ujob, -1);
1477a9bf5e37SDavid Xu 
1478a9bf5e37SDavid Xu 	if (num_queue_count >= max_queue_count ||
1479a9bf5e37SDavid Xu 	    ki->kaio_count >= ki->kaio_qallowed_count) {
14805652770dSJohn Baldwin 		ops->store_error(ujob, EAGAIN);
1481a9bf5e37SDavid Xu 		return (EAGAIN);
1482a9bf5e37SDavid Xu 	}
1483a9bf5e37SDavid Xu 
14845652770dSJohn Baldwin 	job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
14855652770dSJohn Baldwin 	knlist_init_mtx(&job->klist, AIO_MTX(ki));
1486fd3bf775SJohn Dyson 
14875652770dSJohn Baldwin 	error = ops->copyin(ujob, &job->uaiocb);
14882244ea07SJohn Dyson 	if (error) {
14895652770dSJohn Baldwin 		ops->store_error(ujob, error);
14905652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1491ac41f2efSAlfred Perlstein 		return (error);
14922244ea07SJohn Dyson 	}
149368d71118SDavid Xu 
1494bb430bc7SJohn Baldwin 	if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
14955652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1496434ea137SGleb Smirnoff 		return (EINVAL);
1497434ea137SGleb Smirnoff 	}
1498434ea137SGleb Smirnoff 
14995652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
15005652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
15015652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
15025652770dSJohn Baldwin 	    job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
15035652770dSJohn Baldwin 		ops->store_error(ujob, EINVAL);
15045652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
150568d71118SDavid Xu 		return (EINVAL);
150668d71118SDavid Xu 	}
150768d71118SDavid Xu 
15085652770dSJohn Baldwin 	if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
15095652770dSJohn Baldwin 	     job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
15105652770dSJohn Baldwin 		!_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
15115652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1512ac41f2efSAlfred Perlstein 		return (EINVAL);
15132f3cf918SAlfred Perlstein 	}
15142244ea07SJohn Dyson 
15155652770dSJohn Baldwin 	ksiginfo_init(&job->ksi);
15164c0fb2cfSDavid Xu 
1517bfbbc4aaSJason Evans 	/* Save userspace address of the job info. */
15185652770dSJohn Baldwin 	job->ujob = ujob;
151911783b14SJohn Dyson 
1520bfbbc4aaSJason Evans 	/* Get the opcode. */
1521bfbbc4aaSJason Evans 	if (type != LIO_NOP)
15225652770dSJohn Baldwin 		job->uaiocb.aio_lio_opcode = type;
15235652770dSJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
15242244ea07SJohn Dyson 
1525a9d2f8d8SRobert Watson 	/*
1526a9d2f8d8SRobert Watson 	 * Validate the opcode and fetch the file object for the specified
1527a9d2f8d8SRobert Watson 	 * file descriptor.
1528a9d2f8d8SRobert Watson 	 *
1529a9d2f8d8SRobert Watson 	 * XXXRW: Moved the opcode validation up here so that we don't
1530a9d2f8d8SRobert Watson 	 * retrieve a file descriptor without knowing what the capabiltity
1531a9d2f8d8SRobert Watson 	 * should be.
1532a9d2f8d8SRobert Watson 	 */
15335652770dSJohn Baldwin 	fd = job->uaiocb.aio_fildes;
15342a522eb9SJohn Baldwin 	switch (opcode) {
15352a522eb9SJohn Baldwin 	case LIO_WRITE:
15367008be5bSPawel Jakub Dawidek 		error = fget_write(td, fd,
15377008be5bSPawel Jakub Dawidek 		    cap_rights_init(&rights, CAP_PWRITE), &fp);
15382a522eb9SJohn Baldwin 		break;
15392a522eb9SJohn Baldwin 	case LIO_READ:
15407008be5bSPawel Jakub Dawidek 		error = fget_read(td, fd,
15417008be5bSPawel Jakub Dawidek 		    cap_rights_init(&rights, CAP_PREAD), &fp);
1542a9d2f8d8SRobert Watson 		break;
1543a9d2f8d8SRobert Watson 	case LIO_SYNC:
15447008be5bSPawel Jakub Dawidek 		error = fget(td, fd, cap_rights_init(&rights, CAP_FSYNC), &fp);
1545a9d2f8d8SRobert Watson 		break;
15466160e12cSGleb Smirnoff 	case LIO_MLOCK:
15476160e12cSGleb Smirnoff 		fp = NULL;
15486160e12cSGleb Smirnoff 		break;
1549a9d2f8d8SRobert Watson 	case LIO_NOP:
15507008be5bSPawel Jakub Dawidek 		error = fget(td, fd, cap_rights_init(&rights), &fp);
15512a522eb9SJohn Baldwin 		break;
15522a522eb9SJohn Baldwin 	default:
1553a9d2f8d8SRobert Watson 		error = EINVAL;
15542a522eb9SJohn Baldwin 	}
15552a522eb9SJohn Baldwin 	if (error) {
15565652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
15575652770dSJohn Baldwin 		ops->store_error(ujob, error);
1558af56abaaSJohn Baldwin 		return (error);
15592244ea07SJohn Dyson 	}
156099eee864SDavid Xu 
156199eee864SDavid Xu 	if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
156299eee864SDavid Xu 		error = EINVAL;
156399eee864SDavid Xu 		goto aqueue_fail;
156499eee864SDavid Xu 	}
15652244ea07SJohn Dyson 
1566711dba24SKonstantin Belousov 	if ((opcode == LIO_READ || opcode == LIO_WRITE) &&
1567711dba24SKonstantin Belousov 	    job->uaiocb.aio_offset < 0 &&
1568711dba24SKonstantin Belousov 	    (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) {
1569ae124fc4SAlan Cox 		error = EINVAL;
1570ae124fc4SAlan Cox 		goto aqueue_fail;
15712244ea07SJohn Dyson 	}
15721ce91824SDavid Xu 
15735652770dSJohn Baldwin 	job->fd_file = fp;
15741ce91824SDavid Xu 
157599eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
157699eee864SDavid Xu 	jid = jobrefid++;
15775652770dSJohn Baldwin 	job->seqno = jobseqno++;
157899eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
15795652770dSJohn Baldwin 	error = ops->store_kernelinfo(ujob, jid);
15801ce91824SDavid Xu 	if (error) {
15811ce91824SDavid Xu 		error = EINVAL;
15821ce91824SDavid Xu 		goto aqueue_fail;
15831ce91824SDavid Xu 	}
15845652770dSJohn Baldwin 	job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
15852244ea07SJohn Dyson 
15862244ea07SJohn Dyson 	if (opcode == LIO_NOP) {
1587a5c0b1c0SAlan Cox 		fdrop(fp, td);
15885652770dSJohn Baldwin 		uma_zfree(aiocb_zone, job);
1589ac41f2efSAlfred Perlstein 		return (0);
15902244ea07SJohn Dyson 	}
15912244ea07SJohn Dyson 
15925652770dSJohn Baldwin 	if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1593cb679c38SJonathan Lemon 		goto no_kqueue;
15945652770dSJohn Baldwin 	evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1595fde80935SDavid Xu 	if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) {
1596fde80935SDavid Xu 		error = EINVAL;
1597fde80935SDavid Xu 		goto aqueue_fail;
1598fde80935SDavid Xu 	}
15995652770dSJohn Baldwin 	kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
16005652770dSJohn Baldwin 	kev.ident = (uintptr_t)job->ujob;
1601cb679c38SJonathan Lemon 	kev.filter = EVFILT_AIO;
1602fde80935SDavid Xu 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags;
16035652770dSJohn Baldwin 	kev.data = (intptr_t)job;
16045652770dSJohn Baldwin 	kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
16054db71d27SJohn-Mark Gurney 	error = kqfd_register(kqfd, &kev, td, 1);
1606f3215338SJohn Baldwin 	if (error)
1607f3215338SJohn Baldwin 		goto aqueue_fail;
1608f3215338SJohn Baldwin 
1609cb679c38SJonathan Lemon no_kqueue:
1610cb679c38SJonathan Lemon 
16115652770dSJohn Baldwin 	ops->store_error(ujob, EINPROGRESS);
16125652770dSJohn Baldwin 	job->uaiocb._aiocb_private.error = EINPROGRESS;
16135652770dSJohn Baldwin 	job->userproc = p;
16145652770dSJohn Baldwin 	job->cred = crhold(td->td_ucred);
1615f3215338SJohn Baldwin 	job->jobflags = KAIOCB_QUEUEING;
16165652770dSJohn Baldwin 	job->lio = lj;
16172244ea07SJohn Dyson 
1618f3215338SJohn Baldwin 	if (opcode == LIO_MLOCK) {
1619f3215338SJohn Baldwin 		aio_schedule(job, aio_process_mlock);
1620f3215338SJohn Baldwin 		error = 0;
1621f3215338SJohn Baldwin 	} else if (fp->f_ops->fo_aio_queue == NULL)
1622f3215338SJohn Baldwin 		error = aio_queue_file(fp, job);
1623f3215338SJohn Baldwin 	else
1624f3215338SJohn Baldwin 		error = fo_aio_queue(fp, job);
1625f3215338SJohn Baldwin 	if (error)
1626f3215338SJohn Baldwin 		goto aqueue_fail;
1627f3215338SJohn Baldwin 
1628f3215338SJohn Baldwin 	AIO_LOCK(ki);
1629f3215338SJohn Baldwin 	job->jobflags &= ~KAIOCB_QUEUEING;
1630f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1631f3215338SJohn Baldwin 	ki->kaio_count++;
1632f3215338SJohn Baldwin 	if (lj)
1633f3215338SJohn Baldwin 		lj->lioj_count++;
1634f3215338SJohn Baldwin 	atomic_add_int(&num_queue_count, 1);
1635f3215338SJohn Baldwin 	if (job->jobflags & KAIOCB_FINISHED) {
1636f3215338SJohn Baldwin 		/*
1637f3215338SJohn Baldwin 		 * The queue callback completed the request synchronously.
1638f3215338SJohn Baldwin 		 * The bulk of the completion is deferred in that case
1639f3215338SJohn Baldwin 		 * until this point.
1640f3215338SJohn Baldwin 		 */
1641f3215338SJohn Baldwin 		aio_bio_done_notify(p, job);
1642f3215338SJohn Baldwin 	} else
1643f3215338SJohn Baldwin 		TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1644f3215338SJohn Baldwin 	AIO_UNLOCK(ki);
1645f3215338SJohn Baldwin 	return (0);
1646f3215338SJohn Baldwin 
1647f3215338SJohn Baldwin aqueue_fail:
1648f3215338SJohn Baldwin 	knlist_delete(&job->klist, curthread, 0);
1649f3215338SJohn Baldwin 	if (fp)
1650f3215338SJohn Baldwin 		fdrop(fp, td);
1651f3215338SJohn Baldwin 	uma_zfree(aiocb_zone, job);
1652f3215338SJohn Baldwin 	ops->store_error(ujob, error);
1653f3215338SJohn Baldwin 	return (error);
1654f3215338SJohn Baldwin }
1655f3215338SJohn Baldwin 
1656f3215338SJohn Baldwin static void
1657f3215338SJohn Baldwin aio_cancel_daemon_job(struct kaiocb *job)
1658f3215338SJohn Baldwin {
1659f3215338SJohn Baldwin 
1660f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1661f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1662f3215338SJohn Baldwin 		TAILQ_REMOVE(&aio_jobs, job, list);
1663f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1664f3215338SJohn Baldwin 	aio_cancel(job);
1665f3215338SJohn Baldwin }
1666f3215338SJohn Baldwin 
1667f3215338SJohn Baldwin void
1668f3215338SJohn Baldwin aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1669f3215338SJohn Baldwin {
1670f3215338SJohn Baldwin 
1671f3215338SJohn Baldwin 	mtx_lock(&aio_job_mtx);
1672f3215338SJohn Baldwin 	if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1673f3215338SJohn Baldwin 		mtx_unlock(&aio_job_mtx);
1674f3215338SJohn Baldwin 		aio_cancel(job);
1675f3215338SJohn Baldwin 		return;
1676f3215338SJohn Baldwin 	}
1677f3215338SJohn Baldwin 	job->handle_fn = func;
1678f3215338SJohn Baldwin 	TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1679f3215338SJohn Baldwin 	aio_kick_nowait(job->userproc);
1680f3215338SJohn Baldwin 	mtx_unlock(&aio_job_mtx);
1681f3215338SJohn Baldwin }
1682f3215338SJohn Baldwin 
1683f3215338SJohn Baldwin static void
1684f3215338SJohn Baldwin aio_cancel_sync(struct kaiocb *job)
1685f3215338SJohn Baldwin {
1686f3215338SJohn Baldwin 	struct kaioinfo *ki;
1687f3215338SJohn Baldwin 
1688f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1689005ce8e4SJohn Baldwin 	AIO_LOCK(ki);
1690f3215338SJohn Baldwin 	if (!aio_cancel_cleared(job))
1691f3215338SJohn Baldwin 		TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1692005ce8e4SJohn Baldwin 	AIO_UNLOCK(ki);
1693f3215338SJohn Baldwin 	aio_cancel(job);
1694f3215338SJohn Baldwin }
1695f3215338SJohn Baldwin 
1696f3215338SJohn Baldwin int
1697f3215338SJohn Baldwin aio_queue_file(struct file *fp, struct kaiocb *job)
1698f3215338SJohn Baldwin {
1699f3215338SJohn Baldwin 	struct kaioinfo *ki;
1700f3215338SJohn Baldwin 	struct kaiocb *job2;
17019fe297bbSKonstantin Belousov 	struct vnode *vp;
17029fe297bbSKonstantin Belousov 	struct mount *mp;
1703f3215338SJohn Baldwin 	int error, opcode;
17049fe297bbSKonstantin Belousov 	bool safe;
1705f3215338SJohn Baldwin 
1706f3215338SJohn Baldwin 	ki = job->userproc->p_aioinfo;
1707f3215338SJohn Baldwin 	opcode = job->uaiocb.aio_lio_opcode;
170899eee864SDavid Xu 	if (opcode == LIO_SYNC)
170999eee864SDavid Xu 		goto queueit;
171099eee864SDavid Xu 
1711f3215338SJohn Baldwin 	if ((error = aio_qphysio(job->userproc, job)) == 0)
1712279d7226SMatthew Dillon 		goto done;
17131ce91824SDavid Xu #if 0
1714f3215338SJohn Baldwin 	/*
1715f3215338SJohn Baldwin 	 * XXX: This means qphysio() failed with EFAULT.  The current
1716f3215338SJohn Baldwin 	 * behavior is to retry the operation via fo_read/fo_write.
1717f3215338SJohn Baldwin 	 * Wouldn't it be better to just complete the request with an
1718f3215338SJohn Baldwin 	 * error here?
1719f3215338SJohn Baldwin 	 */
1720f3215338SJohn Baldwin 	if (error > 0)
1721279d7226SMatthew Dillon 		goto done;
17221ce91824SDavid Xu #endif
172399eee864SDavid Xu queueit:
17249fe297bbSKonstantin Belousov 	safe = false;
17259fe297bbSKonstantin Belousov 	if (fp->f_type == DTYPE_VNODE) {
17269fe297bbSKonstantin Belousov 		vp = fp->f_vnode;
17279fe297bbSKonstantin Belousov 		if (vp->v_type == VREG || vp->v_type == VDIR) {
17289fe297bbSKonstantin Belousov 			mp = fp->f_vnode->v_mount;
17299fe297bbSKonstantin Belousov 			if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0)
17309fe297bbSKonstantin Belousov 				safe = true;
17319fe297bbSKonstantin Belousov 		}
17329fe297bbSKonstantin Belousov 	}
17339c20dc99SJohn Baldwin 	if (!(safe || enable_aio_unsafe)) {
17349c20dc99SJohn Baldwin 		counted_warning(&unsafe_warningcnt,
17359c20dc99SJohn Baldwin 		    "is attempting to use unsafe AIO requests");
1736f3215338SJohn Baldwin 		return (EOPNOTSUPP);
17379c20dc99SJohn Baldwin 	}
173884af4da6SJohn Dyson 
1739*7e409184SJohn Baldwin 	switch (job->uaiocb.aio_lio_opcode) {
1740*7e409184SJohn Baldwin 	case LIO_READ:
1741*7e409184SJohn Baldwin 	case LIO_WRITE:
1742*7e409184SJohn Baldwin 		aio_schedule(job, aio_process_rw);
1743*7e409184SJohn Baldwin 		error = 0;
1744*7e409184SJohn Baldwin 		break;
1745*7e409184SJohn Baldwin 	case LIO_SYNC:
1746f3215338SJohn Baldwin 		AIO_LOCK(ki);
17475652770dSJohn Baldwin 		TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) {
17485652770dSJohn Baldwin 			if (job2->fd_file == job->fd_file &&
17495652770dSJohn Baldwin 			    job2->uaiocb.aio_lio_opcode != LIO_SYNC &&
17505652770dSJohn Baldwin 			    job2->seqno < job->seqno) {
17515652770dSJohn Baldwin 				job2->jobflags |= KAIOCB_CHECKSYNC;
17525652770dSJohn Baldwin 				job->pending++;
1753dbbccfe9SDavid Xu 			}
1754dbbccfe9SDavid Xu 		}
17555652770dSJohn Baldwin 		if (job->pending != 0) {
1756005ce8e4SJohn Baldwin 			if (!aio_set_cancel_function_locked(job,
1757005ce8e4SJohn Baldwin 				aio_cancel_sync)) {
1758f3215338SJohn Baldwin 				AIO_UNLOCK(ki);
1759f3215338SJohn Baldwin 				aio_cancel(job);
1760f3215338SJohn Baldwin 				return (0);
1761f3215338SJohn Baldwin 			}
17625652770dSJohn Baldwin 			TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1763759ccccaSDavid Xu 			AIO_UNLOCK(ki);
1764f3215338SJohn Baldwin 			return (0);
1765dbbccfe9SDavid Xu 		}
1766759ccccaSDavid Xu 		AIO_UNLOCK(ki);
1767f3215338SJohn Baldwin 		aio_schedule(job, aio_process_sync);
1768f3215338SJohn Baldwin 		error = 0;
1769f3215338SJohn Baldwin 		break;
1770f3215338SJohn Baldwin 	default:
1771f3215338SJohn Baldwin 		error = EINVAL;
1772f3215338SJohn Baldwin 	}
177399eee864SDavid Xu done:
177499eee864SDavid Xu 	return (error);
177599eee864SDavid Xu }
177699eee864SDavid Xu 
177799eee864SDavid Xu static void
177899eee864SDavid Xu aio_kick_nowait(struct proc *userp)
177999eee864SDavid Xu {
178099eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
178139314b7dSJohn Baldwin 	struct aioproc *aiop;
178299eee864SDavid Xu 
178399eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
178499eee864SDavid Xu 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
178599eee864SDavid Xu 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
178639314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
178739314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
17880dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
17890dd6c035SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start <
17900dd6c035SJohn Baldwin 	    ki->kaio_maxactive_count) {
1791c85650caSJohn Baldwin 		taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task);
179299eee864SDavid Xu 	}
179399eee864SDavid Xu }
179499eee864SDavid Xu 
1795dbbccfe9SDavid Xu static int
179699eee864SDavid Xu aio_kick(struct proc *userp)
179799eee864SDavid Xu {
179899eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
179939314b7dSJohn Baldwin 	struct aioproc *aiop;
1800dbbccfe9SDavid Xu 	int error, ret = 0;
180199eee864SDavid Xu 
180299eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
180399eee864SDavid Xu retryproc:
1804d254af07SMatthew Dillon 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
18052244ea07SJohn Dyson 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
180639314b7dSJohn Baldwin 		aiop->aioprocflags &= ~AIOP_FREE;
180739314b7dSJohn Baldwin 		wakeup(aiop->aioproc);
18080dd6c035SJohn Baldwin 	} else if (num_aio_resv_start + num_aio_procs < max_aio_procs &&
18090dd6c035SJohn Baldwin 	    ki->kaio_active_count + num_aio_resv_start <
18100dd6c035SJohn Baldwin 	    ki->kaio_maxactive_count) {
1811fd3bf775SJohn Dyson 		num_aio_resv_start++;
18121ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
18131ce91824SDavid Xu 		error = aio_newproc(&num_aio_resv_start);
18141ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
18151ce91824SDavid Xu 		if (error) {
181684af4da6SJohn Dyson 			num_aio_resv_start--;
18172244ea07SJohn Dyson 			goto retryproc;
1818fd3bf775SJohn Dyson 		}
1819dbbccfe9SDavid Xu 	} else {
1820dbbccfe9SDavid Xu 		ret = -1;
18211ce91824SDavid Xu 	}
1822dbbccfe9SDavid Xu 	return (ret);
182399eee864SDavid Xu }
18241ce91824SDavid Xu 
182599eee864SDavid Xu static void
182699eee864SDavid Xu aio_kick_helper(void *context, int pending)
182799eee864SDavid Xu {
182899eee864SDavid Xu 	struct proc *userp = context;
182999eee864SDavid Xu 
183099eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
1831dbbccfe9SDavid Xu 	while (--pending >= 0) {
1832dbbccfe9SDavid Xu 		if (aio_kick(userp))
1833dbbccfe9SDavid Xu 			break;
1834dbbccfe9SDavid Xu 	}
183599eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
18362244ea07SJohn Dyson }
18372244ea07SJohn Dyson 
1838fd3bf775SJohn Dyson /*
1839bfbbc4aaSJason Evans  * Support the aio_return system call, as a side-effect, kernel resources are
1840bfbbc4aaSJason Evans  * released.
18412244ea07SJohn Dyson  */
18423858a1f4SJohn Baldwin static int
18435652770dSJohn Baldwin kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
1844fd3bf775SJohn Dyson {
1845b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18465652770dSJohn Baldwin 	struct kaiocb *job;
18472244ea07SJohn Dyson 	struct kaioinfo *ki;
1848bb430bc7SJohn Baldwin 	long status, error;
18492244ea07SJohn Dyson 
1850c0bf5caaSAlan Cox 	ki = p->p_aioinfo;
1851c0bf5caaSAlan Cox 	if (ki == NULL)
1852ac41f2efSAlfred Perlstein 		return (EINVAL);
1853759ccccaSDavid Xu 	AIO_LOCK(ki);
18545652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_done, plist) {
18555652770dSJohn Baldwin 		if (job->ujob == ujob)
1856c0bf5caaSAlan Cox 			break;
1857c0bf5caaSAlan Cox 	}
18585652770dSJohn Baldwin 	if (job != NULL) {
1859f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
18605652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
18615652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
18621ce91824SDavid Xu 		td->td_retval[0] = status;
1863b1012d80SJohn Baldwin 		td->td_ru.ru_oublock += job->outblock;
1864b1012d80SJohn Baldwin 		td->td_ru.ru_inblock += job->inblock;
1865b1012d80SJohn Baldwin 		td->td_ru.ru_msgsnd += job->msgsnd;
1866b1012d80SJohn Baldwin 		td->td_ru.ru_msgrcv += job->msgrcv;
18675652770dSJohn Baldwin 		aio_free_entry(job);
1868759ccccaSDavid Xu 		AIO_UNLOCK(ki);
18695652770dSJohn Baldwin 		ops->store_error(ujob, error);
18705652770dSJohn Baldwin 		ops->store_status(ujob, status);
187155a122bfSDavid Xu 	} else {
18721ce91824SDavid Xu 		error = EINVAL;
1873759ccccaSDavid Xu 		AIO_UNLOCK(ki);
187455a122bfSDavid Xu 	}
18751ce91824SDavid Xu 	return (error);
18762244ea07SJohn Dyson }
18772244ea07SJohn Dyson 
18783858a1f4SJohn Baldwin int
18798451d0ddSKip Macy sys_aio_return(struct thread *td, struct aio_return_args *uap)
18803858a1f4SJohn Baldwin {
18813858a1f4SJohn Baldwin 
18823858a1f4SJohn Baldwin 	return (kern_aio_return(td, uap->aiocbp, &aiocb_ops));
18833858a1f4SJohn Baldwin }
18843858a1f4SJohn Baldwin 
18852244ea07SJohn Dyson /*
1886bfbbc4aaSJason Evans  * Allow a process to wakeup when any of the I/O requests are completed.
18872244ea07SJohn Dyson  */
18883858a1f4SJohn Baldwin static int
18893858a1f4SJohn Baldwin kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist,
18903858a1f4SJohn Baldwin     struct timespec *ts)
1891fd3bf775SJohn Dyson {
1892b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18934a11ca4eSPoul-Henning Kamp 	struct timeval atv;
18942244ea07SJohn Dyson 	struct kaioinfo *ki;
18955652770dSJohn Baldwin 	struct kaiocb *firstjob, *job;
18963858a1f4SJohn Baldwin 	int error, i, timo;
18972244ea07SJohn Dyson 
18982244ea07SJohn Dyson 	timo = 0;
18993858a1f4SJohn Baldwin 	if (ts) {
19003858a1f4SJohn Baldwin 		if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
19012244ea07SJohn Dyson 			return (EINVAL);
19022244ea07SJohn Dyson 
19033858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
19042244ea07SJohn Dyson 		if (itimerfix(&atv))
19052244ea07SJohn Dyson 			return (EINVAL);
1906227ee8a1SPoul-Henning Kamp 		timo = tvtohz(&atv);
19072244ea07SJohn Dyson 	}
19082244ea07SJohn Dyson 
19092244ea07SJohn Dyson 	ki = p->p_aioinfo;
19102244ea07SJohn Dyson 	if (ki == NULL)
1911ac41f2efSAlfred Perlstein 		return (EAGAIN);
19122244ea07SJohn Dyson 
19133858a1f4SJohn Baldwin 	if (njoblist == 0)
1914ac41f2efSAlfred Perlstein 		return (0);
19152244ea07SJohn Dyson 
1916759ccccaSDavid Xu 	AIO_LOCK(ki);
19171ce91824SDavid Xu 	for (;;) {
19185652770dSJohn Baldwin 		firstjob = NULL;
19191ce91824SDavid Xu 		error = 0;
19205652770dSJohn Baldwin 		TAILQ_FOREACH(job, &ki->kaio_all, allist) {
192184af4da6SJohn Dyson 			for (i = 0; i < njoblist; i++) {
19225652770dSJohn Baldwin 				if (job->ujob == ujoblist[i]) {
19235652770dSJohn Baldwin 					if (firstjob == NULL)
19245652770dSJohn Baldwin 						firstjob = job;
1925f3215338SJohn Baldwin 					if (job->jobflags & KAIOCB_FINISHED)
19261ce91824SDavid Xu 						goto RETURN;
192784af4da6SJohn Dyson 				}
192884af4da6SJohn Dyson 			}
192984af4da6SJohn Dyson 		}
19301ce91824SDavid Xu 		/* All tasks were finished. */
19315652770dSJohn Baldwin 		if (firstjob == NULL)
19321ce91824SDavid Xu 			break;
19332244ea07SJohn Dyson 
1934fd3bf775SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
1935759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
19361ce91824SDavid Xu 		    "aiospn", timo);
19371ce91824SDavid Xu 		if (error == ERESTART)
19381ce91824SDavid Xu 			error = EINTR;
19391ce91824SDavid Xu 		if (error)
19401ce91824SDavid Xu 			break;
19412244ea07SJohn Dyson 	}
19421ce91824SDavid Xu RETURN:
1943759ccccaSDavid Xu 	AIO_UNLOCK(ki);
19443858a1f4SJohn Baldwin 	return (error);
19453858a1f4SJohn Baldwin }
19463858a1f4SJohn Baldwin 
19473858a1f4SJohn Baldwin int
19488451d0ddSKip Macy sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap)
19493858a1f4SJohn Baldwin {
19503858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
19513858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
19523858a1f4SJohn Baldwin 	int error;
19533858a1f4SJohn Baldwin 
1954913b9329SAlan Somers 	if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
19553858a1f4SJohn Baldwin 		return (EINVAL);
19563858a1f4SJohn Baldwin 
19573858a1f4SJohn Baldwin 	if (uap->timeout) {
19583858a1f4SJohn Baldwin 		/* Get timespec struct. */
19593858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
19603858a1f4SJohn Baldwin 			return (error);
19613858a1f4SJohn Baldwin 		tsp = &ts;
19623858a1f4SJohn Baldwin 	} else
19633858a1f4SJohn Baldwin 		tsp = NULL;
19643858a1f4SJohn Baldwin 
1965913b9329SAlan Somers 	ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK);
19663858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0]));
19673858a1f4SJohn Baldwin 	if (error == 0)
19683858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
1969913b9329SAlan Somers 	free(ujoblist, M_AIOS);
19701ce91824SDavid Xu 	return (error);
19712244ea07SJohn Dyson }
1972ee877a35SJohn Dyson 
1973ee877a35SJohn Dyson /*
1974dd85920aSJason Evans  * aio_cancel cancels any non-physio aio operations not currently in
1975dd85920aSJason Evans  * progress.
1976ee877a35SJohn Dyson  */
1977ee877a35SJohn Dyson int
19788451d0ddSKip Macy sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1979fd3bf775SJohn Dyson {
1980b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
1981dd85920aSJason Evans 	struct kaioinfo *ki;
19825652770dSJohn Baldwin 	struct kaiocb *job, *jobn;
1983dd85920aSJason Evans 	struct file *fp;
1984f131759fSMateusz Guzik 	cap_rights_t rights;
19851ce91824SDavid Xu 	int error;
1986dd85920aSJason Evans 	int cancelled = 0;
1987dd85920aSJason Evans 	int notcancelled = 0;
1988dd85920aSJason Evans 	struct vnode *vp;
1989dd85920aSJason Evans 
19902a522eb9SJohn Baldwin 	/* Lookup file object. */
1991f131759fSMateusz Guzik 	error = fget(td, uap->fd, cap_rights_init(&rights), &fp);
19922a522eb9SJohn Baldwin 	if (error)
19932a522eb9SJohn Baldwin 		return (error);
1994dd85920aSJason Evans 
19951ce91824SDavid Xu 	ki = p->p_aioinfo;
19961ce91824SDavid Xu 	if (ki == NULL)
19971ce91824SDavid Xu 		goto done;
19981ce91824SDavid Xu 
1999dd85920aSJason Evans 	if (fp->f_type == DTYPE_VNODE) {
20003b6d9652SPoul-Henning Kamp 		vp = fp->f_vnode;
2001dd85920aSJason Evans 		if (vn_isdisk(vp, &error)) {
20022a522eb9SJohn Baldwin 			fdrop(fp, td);
2003b40ce416SJulian Elischer 			td->td_retval[0] = AIO_NOTCANCELED;
2004ac41f2efSAlfred Perlstein 			return (0);
2005dd85920aSJason Evans 		}
2006dd85920aSJason Evans 	}
2007dd85920aSJason Evans 
2008759ccccaSDavid Xu 	AIO_LOCK(ki);
20095652770dSJohn Baldwin 	TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
20105652770dSJohn Baldwin 		if ((uap->fd == job->uaiocb.aio_fildes) &&
2011dd85920aSJason Evans 		    ((uap->aiocbp == NULL) ||
20125652770dSJohn Baldwin 		     (uap->aiocbp == job->ujob))) {
2013f3215338SJohn Baldwin 			if (aio_cancel_job(p, ki, job)) {
20141ce91824SDavid Xu 				cancelled++;
2015dd85920aSJason Evans 			} else {
2016dd85920aSJason Evans 				notcancelled++;
2017dd85920aSJason Evans 			}
20181aa4c324SDavid Xu 			if (uap->aiocbp != NULL)
20191aa4c324SDavid Xu 				break;
2020dd85920aSJason Evans 		}
2021dd85920aSJason Evans 	}
2022759ccccaSDavid Xu 	AIO_UNLOCK(ki);
20231ce91824SDavid Xu 
2024ad49abc0SAlan Cox done:
20252a522eb9SJohn Baldwin 	fdrop(fp, td);
20261aa4c324SDavid Xu 
20271aa4c324SDavid Xu 	if (uap->aiocbp != NULL) {
2028dd85920aSJason Evans 		if (cancelled) {
2029b40ce416SJulian Elischer 			td->td_retval[0] = AIO_CANCELED;
2030ac41f2efSAlfred Perlstein 			return (0);
2031dd85920aSJason Evans 		}
20321aa4c324SDavid Xu 	}
20331aa4c324SDavid Xu 
20341aa4c324SDavid Xu 	if (notcancelled) {
20351aa4c324SDavid Xu 		td->td_retval[0] = AIO_NOTCANCELED;
20361aa4c324SDavid Xu 		return (0);
20371aa4c324SDavid Xu 	}
20381aa4c324SDavid Xu 
20391aa4c324SDavid Xu 	if (cancelled) {
20401aa4c324SDavid Xu 		td->td_retval[0] = AIO_CANCELED;
20411aa4c324SDavid Xu 		return (0);
20421aa4c324SDavid Xu 	}
20431aa4c324SDavid Xu 
2044b40ce416SJulian Elischer 	td->td_retval[0] = AIO_ALLDONE;
2045dd85920aSJason Evans 
2046ac41f2efSAlfred Perlstein 	return (0);
2047ee877a35SJohn Dyson }
2048ee877a35SJohn Dyson 
2049ee877a35SJohn Dyson /*
2050873fbcd7SRobert Watson  * aio_error is implemented in the kernel level for compatibility purposes
2051873fbcd7SRobert Watson  * only.  For a user mode async implementation, it would be best to do it in
2052873fbcd7SRobert Watson  * a userland subroutine.
2053ee877a35SJohn Dyson  */
20543858a1f4SJohn Baldwin static int
20555652770dSJohn Baldwin kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops)
2056fd3bf775SJohn Dyson {
2057b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
20585652770dSJohn Baldwin 	struct kaiocb *job;
20592244ea07SJohn Dyson 	struct kaioinfo *ki;
20601ce91824SDavid Xu 	int status;
2061ee877a35SJohn Dyson 
20622244ea07SJohn Dyson 	ki = p->p_aioinfo;
20631ce91824SDavid Xu 	if (ki == NULL) {
20641ce91824SDavid Xu 		td->td_retval[0] = EINVAL;
20651ce91824SDavid Xu 		return (0);
20661ce91824SDavid Xu 	}
2067ee877a35SJohn Dyson 
2068759ccccaSDavid Xu 	AIO_LOCK(ki);
20695652770dSJohn Baldwin 	TAILQ_FOREACH(job, &ki->kaio_all, allist) {
20705652770dSJohn Baldwin 		if (job->ujob == ujob) {
2071f3215338SJohn Baldwin 			if (job->jobflags & KAIOCB_FINISHED)
20721ce91824SDavid Xu 				td->td_retval[0] =
20735652770dSJohn Baldwin 					job->uaiocb._aiocb_private.error;
20741ce91824SDavid Xu 			else
2075b40ce416SJulian Elischer 				td->td_retval[0] = EINPROGRESS;
2076759ccccaSDavid Xu 			AIO_UNLOCK(ki);
2077ac41f2efSAlfred Perlstein 			return (0);
20782244ea07SJohn Dyson 		}
20792244ea07SJohn Dyson 	}
2080759ccccaSDavid Xu 	AIO_UNLOCK(ki);
208184af4da6SJohn Dyson 
20822244ea07SJohn Dyson 	/*
2083a9bf5e37SDavid Xu 	 * Hack for failure of aio_aqueue.
20842244ea07SJohn Dyson 	 */
20855652770dSJohn Baldwin 	status = ops->fetch_status(ujob);
20861ce91824SDavid Xu 	if (status == -1) {
20875652770dSJohn Baldwin 		td->td_retval[0] = ops->fetch_error(ujob);
20881ce91824SDavid Xu 		return (0);
20891ce91824SDavid Xu 	}
20901ce91824SDavid Xu 
20911ce91824SDavid Xu 	td->td_retval[0] = EINVAL;
20921ce91824SDavid Xu 	return (0);
2093ee877a35SJohn Dyson }
2094ee877a35SJohn Dyson 
20953858a1f4SJohn Baldwin int
20968451d0ddSKip Macy sys_aio_error(struct thread *td, struct aio_error_args *uap)
20973858a1f4SJohn Baldwin {
20983858a1f4SJohn Baldwin 
20993858a1f4SJohn Baldwin 	return (kern_aio_error(td, uap->aiocbp, &aiocb_ops));
21003858a1f4SJohn Baldwin }
21013858a1f4SJohn Baldwin 
2102eb8e6d52SEivind Eklund /* syscall - asynchronous read from a file (REALTIME) */
2103399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
2104ee877a35SJohn Dyson int
2105399e8c17SJohn Baldwin freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap)
21060972628aSDavid Xu {
21070972628aSDavid Xu 
21083858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
21093858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
21100972628aSDavid Xu }
2111399e8c17SJohn Baldwin #endif
21120972628aSDavid Xu 
21130972628aSDavid Xu int
21148451d0ddSKip Macy sys_aio_read(struct thread *td, struct aio_read_args *uap)
2115fd3bf775SJohn Dyson {
211621d56e9cSAlfred Perlstein 
21173858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops));
2118ee877a35SJohn Dyson }
2119ee877a35SJohn Dyson 
2120eb8e6d52SEivind Eklund /* syscall - asynchronous write to a file (REALTIME) */
2121399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
2122ee877a35SJohn Dyson int
2123399e8c17SJohn Baldwin freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap)
21240972628aSDavid Xu {
21250972628aSDavid Xu 
21263858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
21273858a1f4SJohn Baldwin 	    &aiocb_ops_osigevent));
21280972628aSDavid Xu }
2129399e8c17SJohn Baldwin #endif
21300972628aSDavid Xu 
21310972628aSDavid Xu int
21328451d0ddSKip Macy sys_aio_write(struct thread *td, struct aio_write_args *uap)
2133fd3bf775SJohn Dyson {
213421d56e9cSAlfred Perlstein 
21353858a1f4SJohn Baldwin 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops));
21360972628aSDavid Xu }
21370972628aSDavid Xu 
21386160e12cSGleb Smirnoff int
21396160e12cSGleb Smirnoff sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap)
21406160e12cSGleb Smirnoff {
21416160e12cSGleb Smirnoff 
21426160e12cSGleb Smirnoff 	return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops));
21436160e12cSGleb Smirnoff }
21446160e12cSGleb Smirnoff 
21450972628aSDavid Xu static int
21463858a1f4SJohn Baldwin kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list,
21473858a1f4SJohn Baldwin     struct aiocb **acb_list, int nent, struct sigevent *sig,
21483858a1f4SJohn Baldwin     struct aiocb_ops *ops)
21490972628aSDavid Xu {
2150b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
21515652770dSJohn Baldwin 	struct aiocb *job;
21522244ea07SJohn Dyson 	struct kaioinfo *ki;
21531ce91824SDavid Xu 	struct aioliojob *lj;
215469cd28daSDoug Ambrisko 	struct kevent kev;
21551ce91824SDavid Xu 	int error;
2156fd3bf775SJohn Dyson 	int nerror;
2157ee877a35SJohn Dyson 	int i;
2158ee877a35SJohn Dyson 
21593858a1f4SJohn Baldwin 	if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT))
2160ac41f2efSAlfred Perlstein 		return (EINVAL);
21612244ea07SJohn Dyson 
2162913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
2163ac41f2efSAlfred Perlstein 		return (EINVAL);
21642244ea07SJohn Dyson 
2165bfbbc4aaSJason Evans 	if (p->p_aioinfo == NULL)
21662244ea07SJohn Dyson 		aio_init_aioinfo(p);
21672244ea07SJohn Dyson 
21682244ea07SJohn Dyson 	ki = p->p_aioinfo;
21692244ea07SJohn Dyson 
2170a163d034SWarner Losh 	lj = uma_zalloc(aiolio_zone, M_WAITOK);
217184af4da6SJohn Dyson 	lj->lioj_flags = 0;
21721ce91824SDavid Xu 	lj->lioj_count = 0;
21731ce91824SDavid Xu 	lj->lioj_finished_count = 0;
2174d8b0556cSKonstantin Belousov 	knlist_init_mtx(&lj->klist, AIO_MTX(ki));
21754c0fb2cfSDavid Xu 	ksiginfo_init(&lj->lioj_ksi);
217669cd28daSDoug Ambrisko 
217784af4da6SJohn Dyson 	/*
2178bfbbc4aaSJason Evans 	 * Setup signal.
217984af4da6SJohn Dyson 	 */
21803858a1f4SJohn Baldwin 	if (sig && (mode == LIO_NOWAIT)) {
21813858a1f4SJohn Baldwin 		bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal));
218269cd28daSDoug Ambrisko 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
218369cd28daSDoug Ambrisko 			/* Assume only new style KEVENT */
218469cd28daSDoug Ambrisko 			kev.filter = EVFILT_LIO;
218569cd28daSDoug Ambrisko 			kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
21863858a1f4SJohn Baldwin 			kev.ident = (uintptr_t)uacb_list; /* something unique */
218769cd28daSDoug Ambrisko 			kev.data = (intptr_t)lj;
21881ce91824SDavid Xu 			/* pass user defined sigval data */
21891ce91824SDavid Xu 			kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
21904db71d27SJohn-Mark Gurney 			error = kqfd_register(
21914db71d27SJohn-Mark Gurney 			    lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1);
219269cd28daSDoug Ambrisko 			if (error) {
219369cd28daSDoug Ambrisko 				uma_zfree(aiolio_zone, lj);
219469cd28daSDoug Ambrisko 				return (error);
219569cd28daSDoug Ambrisko 			}
21961ce91824SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
21971ce91824SDavid Xu 			;
219868d71118SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
219968d71118SDavid Xu 			   lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
220068d71118SDavid Xu 				if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
220169cd28daSDoug Ambrisko 					uma_zfree(aiolio_zone, lj);
220269cd28daSDoug Ambrisko 					return EINVAL;
220368d71118SDavid Xu 				}
220484af4da6SJohn Dyson 				lj->lioj_flags |= LIOJ_SIGNAL;
220568d71118SDavid Xu 		} else {
220668d71118SDavid Xu 			uma_zfree(aiolio_zone, lj);
220768d71118SDavid Xu 			return EINVAL;
22084d752b01SAlan Cox 		}
22091ce91824SDavid Xu 	}
221069cd28daSDoug Ambrisko 
2211759ccccaSDavid Xu 	AIO_LOCK(ki);
22122f3cf918SAlfred Perlstein 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
22132244ea07SJohn Dyson 	/*
22141ce91824SDavid Xu 	 * Add extra aiocb count to avoid the lio to be freed
22151ce91824SDavid Xu 	 * by other threads doing aio_waitcomplete or aio_return,
22161ce91824SDavid Xu 	 * and prevent event from being sent until we have queued
22171ce91824SDavid Xu 	 * all tasks.
22181ce91824SDavid Xu 	 */
22191ce91824SDavid Xu 	lj->lioj_count = 1;
2220759ccccaSDavid Xu 	AIO_UNLOCK(ki);
22211ce91824SDavid Xu 
22221ce91824SDavid Xu 	/*
2223bfbbc4aaSJason Evans 	 * Get pointers to the list of I/O requests.
22242244ea07SJohn Dyson 	 */
2225fd3bf775SJohn Dyson 	nerror = 0;
22263858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++) {
22275652770dSJohn Baldwin 		job = acb_list[i];
22285652770dSJohn Baldwin 		if (job != NULL) {
22295652770dSJohn Baldwin 			error = aio_aqueue(td, job, lj, LIO_NOP, ops);
22301ce91824SDavid Xu 			if (error != 0)
2231fd3bf775SJohn Dyson 				nerror++;
2232fd3bf775SJohn Dyson 		}
2233fd3bf775SJohn Dyson 	}
22342244ea07SJohn Dyson 
22351ce91824SDavid Xu 	error = 0;
2236759ccccaSDavid Xu 	AIO_LOCK(ki);
22373858a1f4SJohn Baldwin 	if (mode == LIO_WAIT) {
22381ce91824SDavid Xu 		while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2239fd3bf775SJohn Dyson 			ki->kaio_flags |= KAIO_WAKEUP;
2240759ccccaSDavid Xu 			error = msleep(&p->p_aioinfo, AIO_MTX(ki),
22411ce91824SDavid Xu 			    PRIBIO | PCATCH, "aiospn", 0);
22421ce91824SDavid Xu 			if (error == ERESTART)
22431ce91824SDavid Xu 				error = EINTR;
22441ce91824SDavid Xu 			if (error)
22451ce91824SDavid Xu 				break;
22461ce91824SDavid Xu 		}
22471ce91824SDavid Xu 	} else {
22481ce91824SDavid Xu 		if (lj->lioj_count - 1 == lj->lioj_finished_count) {
22491ce91824SDavid Xu 			if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
22501ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_KEVENT_POSTED;
22511ce91824SDavid Xu 				KNOTE_LOCKED(&lj->klist, 1);
22521ce91824SDavid Xu 			}
22531ce91824SDavid Xu 			if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
22541ce91824SDavid Xu 			    == LIOJ_SIGNAL
22551ce91824SDavid Xu 			    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
22561ce91824SDavid Xu 			    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
22571ce91824SDavid Xu 				aio_sendsig(p, &lj->lioj_signal,
22581ce91824SDavid Xu 					    &lj->lioj_ksi);
22591ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
22602244ea07SJohn Dyson 			}
22612244ea07SJohn Dyson 		}
22621ce91824SDavid Xu 	}
22631ce91824SDavid Xu 	lj->lioj_count--;
22641ce91824SDavid Xu 	if (lj->lioj_count == 0) {
22651ce91824SDavid Xu 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
22661ce91824SDavid Xu 		knlist_delete(&lj->klist, curthread, 1);
2267759ccccaSDavid Xu 		PROC_LOCK(p);
22681ce91824SDavid Xu 		sigqueue_take(&lj->lioj_ksi);
22691ce91824SDavid Xu 		PROC_UNLOCK(p);
2270759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22711ce91824SDavid Xu 		uma_zfree(aiolio_zone, lj);
22721ce91824SDavid Xu 	} else
2273759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22742244ea07SJohn Dyson 
22751ce91824SDavid Xu 	if (nerror)
22761ce91824SDavid Xu 		return (EIO);
22771ce91824SDavid Xu 	return (error);
2278ee877a35SJohn Dyson }
2279fd3bf775SJohn Dyson 
22803858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
2281399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
22823858a1f4SJohn Baldwin int
2283399e8c17SJohn Baldwin freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap)
22843858a1f4SJohn Baldwin {
22853858a1f4SJohn Baldwin 	struct aiocb **acb_list;
22863858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
22873858a1f4SJohn Baldwin 	struct osigevent osig;
22883858a1f4SJohn Baldwin 	int error, nent;
22893858a1f4SJohn Baldwin 
22903858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
22913858a1f4SJohn Baldwin 		return (EINVAL);
22923858a1f4SJohn Baldwin 
22933858a1f4SJohn Baldwin 	nent = uap->nent;
2294913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
22953858a1f4SJohn Baldwin 		return (EINVAL);
22963858a1f4SJohn Baldwin 
22973858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
22983858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
22993858a1f4SJohn Baldwin 		if (error)
23003858a1f4SJohn Baldwin 			return (error);
23013858a1f4SJohn Baldwin 		error = convert_old_sigevent(&osig, &sig);
23023858a1f4SJohn Baldwin 		if (error)
23033858a1f4SJohn Baldwin 			return (error);
23043858a1f4SJohn Baldwin 		sigp = &sig;
23053858a1f4SJohn Baldwin 	} else
23063858a1f4SJohn Baldwin 		sigp = NULL;
23073858a1f4SJohn Baldwin 
23083858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
23093858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
23103858a1f4SJohn Baldwin 	if (error == 0)
23113858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode,
23123858a1f4SJohn Baldwin 		    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
23133858a1f4SJohn Baldwin 		    &aiocb_ops_osigevent);
23143858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
23153858a1f4SJohn Baldwin 	return (error);
23163858a1f4SJohn Baldwin }
2317399e8c17SJohn Baldwin #endif
23183858a1f4SJohn Baldwin 
23193858a1f4SJohn Baldwin /* syscall - list directed I/O (REALTIME) */
23203858a1f4SJohn Baldwin int
23218451d0ddSKip Macy sys_lio_listio(struct thread *td, struct lio_listio_args *uap)
23223858a1f4SJohn Baldwin {
23233858a1f4SJohn Baldwin 	struct aiocb **acb_list;
23243858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
23253858a1f4SJohn Baldwin 	int error, nent;
23263858a1f4SJohn Baldwin 
23273858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
23283858a1f4SJohn Baldwin 		return (EINVAL);
23293858a1f4SJohn Baldwin 
23303858a1f4SJohn Baldwin 	nent = uap->nent;
2331913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
23323858a1f4SJohn Baldwin 		return (EINVAL);
23333858a1f4SJohn Baldwin 
23343858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
23353858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig, sizeof(sig));
23363858a1f4SJohn Baldwin 		if (error)
23373858a1f4SJohn Baldwin 			return (error);
23383858a1f4SJohn Baldwin 		sigp = &sig;
23393858a1f4SJohn Baldwin 	} else
23403858a1f4SJohn Baldwin 		sigp = NULL;
23413858a1f4SJohn Baldwin 
23423858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
23433858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0]));
23443858a1f4SJohn Baldwin 	if (error == 0)
23453858a1f4SJohn Baldwin 		error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list,
23463858a1f4SJohn Baldwin 		    nent, sigp, &aiocb_ops);
23473858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
23483858a1f4SJohn Baldwin 	return (error);
23493858a1f4SJohn Baldwin }
23503858a1f4SJohn Baldwin 
2351fd3bf775SJohn Dyson static void
2352f743d981SAlexander Motin aio_physwakeup(struct bio *bp)
2353fd3bf775SJohn Dyson {
23545652770dSJohn Baldwin 	struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
23551ce91824SDavid Xu 	struct proc *userp;
235627b8220dSDavid Xu 	struct kaioinfo *ki;
2357f3215338SJohn Baldwin 	size_t nbytes;
2358f3215338SJohn Baldwin 	int error, nblks;
23591ce91824SDavid Xu 
2360f743d981SAlexander Motin 	/* Release mapping into kernel space. */
2361f3215338SJohn Baldwin 	userp = job->userproc;
2362f3215338SJohn Baldwin 	ki = userp->p_aioinfo;
23635652770dSJohn Baldwin 	if (job->pbuf) {
23645652770dSJohn Baldwin 		pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages);
23655652770dSJohn Baldwin 		relpbuf(job->pbuf, NULL);
23665652770dSJohn Baldwin 		job->pbuf = NULL;
2367f743d981SAlexander Motin 		atomic_subtract_int(&num_buf_aio, 1);
2368f3215338SJohn Baldwin 		AIO_LOCK(ki);
2369f3215338SJohn Baldwin 		ki->kaio_buffer_count--;
2370f3215338SJohn Baldwin 		AIO_UNLOCK(ki);
23718091e52bSJohn Baldwin 	} else
23728091e52bSJohn Baldwin 		atomic_subtract_int(&num_unmapped_aio, 1);
23735652770dSJohn Baldwin 	vm_page_unhold_pages(job->pages, job->npages);
2374f743d981SAlexander Motin 
23755652770dSJohn Baldwin 	bp = job->bp;
23765652770dSJohn Baldwin 	job->bp = NULL;
2377f3215338SJohn Baldwin 	nbytes = job->uaiocb.aio_nbytes - bp->bio_resid;
2378f3215338SJohn Baldwin 	error = 0;
2379f743d981SAlexander Motin 	if (bp->bio_flags & BIO_ERROR)
2380f3215338SJohn Baldwin 		error = bp->bio_error;
2381f3215338SJohn Baldwin 	nblks = btodb(nbytes);
23825652770dSJohn Baldwin 	if (job->uaiocb.aio_lio_opcode == LIO_WRITE)
2383b1012d80SJohn Baldwin 		job->outblock += nblks;
23841ce91824SDavid Xu 	else
2385b1012d80SJohn Baldwin 		job->inblock += nblks;
2386f3215338SJohn Baldwin 
2387f0ec1740SJohn Baldwin 	if (error)
2388f0ec1740SJohn Baldwin 		aio_complete(job, -1, error);
2389f0ec1740SJohn Baldwin 	else
2390f0ec1740SJohn Baldwin 		aio_complete(job, nbytes, 0);
23911ce91824SDavid Xu 
2392f743d981SAlexander Motin 	g_destroy_bio(bp);
239384af4da6SJohn Dyson }
2394bfbbc4aaSJason Evans 
2395eb8e6d52SEivind Eklund /* syscall - wait for the next completion of an aio request */
23963858a1f4SJohn Baldwin static int
23975652770dSJohn Baldwin kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp,
23983858a1f4SJohn Baldwin     struct timespec *ts, struct aiocb_ops *ops)
2399bfbbc4aaSJason Evans {
2400b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
2401bfbbc4aaSJason Evans 	struct timeval atv;
2402bfbbc4aaSJason Evans 	struct kaioinfo *ki;
24035652770dSJohn Baldwin 	struct kaiocb *job;
24045652770dSJohn Baldwin 	struct aiocb *ujob;
2405bb430bc7SJohn Baldwin 	long error, status;
2406bb430bc7SJohn Baldwin 	int timo;
2407bfbbc4aaSJason Evans 
24085652770dSJohn Baldwin 	ops->store_aiocb(ujobp, NULL);
2409dd85920aSJason Evans 
241038d68e2dSPawel Jakub Dawidek 	if (ts == NULL) {
2411bfbbc4aaSJason Evans 		timo = 0;
241238d68e2dSPawel Jakub Dawidek 	} else if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
241338d68e2dSPawel Jakub Dawidek 		timo = -1;
241438d68e2dSPawel Jakub Dawidek 	} else {
24153858a1f4SJohn Baldwin 		if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000))
2416bfbbc4aaSJason Evans 			return (EINVAL);
2417bfbbc4aaSJason Evans 
24183858a1f4SJohn Baldwin 		TIMESPEC_TO_TIMEVAL(&atv, ts);
2419bfbbc4aaSJason Evans 		if (itimerfix(&atv))
2420bfbbc4aaSJason Evans 			return (EINVAL);
2421bfbbc4aaSJason Evans 		timo = tvtohz(&atv);
2422bfbbc4aaSJason Evans 	}
2423bfbbc4aaSJason Evans 
24248213baf0SChristian S.J. Peron 	if (p->p_aioinfo == NULL)
2425323fe565SDavid Xu 		aio_init_aioinfo(p);
24268213baf0SChristian S.J. Peron 	ki = p->p_aioinfo;
2427bfbbc4aaSJason Evans 
24281ce91824SDavid Xu 	error = 0;
24295652770dSJohn Baldwin 	job = NULL;
2430759ccccaSDavid Xu 	AIO_LOCK(ki);
24315652770dSJohn Baldwin 	while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
243238d68e2dSPawel Jakub Dawidek 		if (timo == -1) {
243338d68e2dSPawel Jakub Dawidek 			error = EWOULDBLOCK;
243438d68e2dSPawel Jakub Dawidek 			break;
243538d68e2dSPawel Jakub Dawidek 		}
24361ce91824SDavid Xu 		ki->kaio_flags |= KAIO_WAKEUP;
2437759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
24381ce91824SDavid Xu 		    "aiowc", timo);
243927b8220dSDavid Xu 		if (timo && error == ERESTART)
24401ce91824SDavid Xu 			error = EINTR;
24411ce91824SDavid Xu 		if (error)
24421ce91824SDavid Xu 			break;
24431ce91824SDavid Xu 	}
24441ce91824SDavid Xu 
24455652770dSJohn Baldwin 	if (job != NULL) {
2446f3215338SJohn Baldwin 		MPASS(job->jobflags & KAIOCB_FINISHED);
24475652770dSJohn Baldwin 		ujob = job->ujob;
24485652770dSJohn Baldwin 		status = job->uaiocb._aiocb_private.status;
24495652770dSJohn Baldwin 		error = job->uaiocb._aiocb_private.error;
24501ce91824SDavid Xu 		td->td_retval[0] = status;
2451b1012d80SJohn Baldwin 		td->td_ru.ru_oublock += job->outblock;
2452b1012d80SJohn Baldwin 		td->td_ru.ru_inblock += job->inblock;
2453b1012d80SJohn Baldwin 		td->td_ru.ru_msgsnd += job->msgsnd;
2454b1012d80SJohn Baldwin 		td->td_ru.ru_msgrcv += job->msgrcv;
24555652770dSJohn Baldwin 		aio_free_entry(job);
2456759ccccaSDavid Xu 		AIO_UNLOCK(ki);
24575652770dSJohn Baldwin 		ops->store_aiocb(ujobp, ujob);
24585652770dSJohn Baldwin 		ops->store_error(ujob, error);
24595652770dSJohn Baldwin 		ops->store_status(ujob, status);
24601ce91824SDavid Xu 	} else
2461759ccccaSDavid Xu 		AIO_UNLOCK(ki);
2462bfbbc4aaSJason Evans 
2463ac41f2efSAlfred Perlstein 	return (error);
2464bfbbc4aaSJason Evans }
2465cb679c38SJonathan Lemon 
246699eee864SDavid Xu int
24678451d0ddSKip Macy sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
24683858a1f4SJohn Baldwin {
24693858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
24703858a1f4SJohn Baldwin 	int error;
24713858a1f4SJohn Baldwin 
24723858a1f4SJohn Baldwin 	if (uap->timeout) {
24733858a1f4SJohn Baldwin 		/* Get timespec struct. */
24743858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts, sizeof(ts));
24753858a1f4SJohn Baldwin 		if (error)
24763858a1f4SJohn Baldwin 			return (error);
24773858a1f4SJohn Baldwin 		tsp = &ts;
24783858a1f4SJohn Baldwin 	} else
24793858a1f4SJohn Baldwin 		tsp = NULL;
24803858a1f4SJohn Baldwin 
24813858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops));
24823858a1f4SJohn Baldwin }
24833858a1f4SJohn Baldwin 
24843858a1f4SJohn Baldwin static int
24855652770dSJohn Baldwin kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob,
24863858a1f4SJohn Baldwin     struct aiocb_ops *ops)
248799eee864SDavid Xu {
248899eee864SDavid Xu 
24893858a1f4SJohn Baldwin 	if (op != O_SYNC) /* XXX lack of O_DSYNC */
249099eee864SDavid Xu 		return (EINVAL);
24915652770dSJohn Baldwin 	return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops));
24923858a1f4SJohn Baldwin }
24933858a1f4SJohn Baldwin 
24943858a1f4SJohn Baldwin int
24958451d0ddSKip Macy sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap)
24963858a1f4SJohn Baldwin {
24973858a1f4SJohn Baldwin 
24983858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops));
249999eee864SDavid Xu }
250099eee864SDavid Xu 
2501eb8e6d52SEivind Eklund /* kqueue attach function */
2502cb679c38SJonathan Lemon static int
2503cb679c38SJonathan Lemon filt_aioattach(struct knote *kn)
2504cb679c38SJonathan Lemon {
25052b34e843SKonstantin Belousov 	struct kaiocb *job;
25062b34e843SKonstantin Belousov 
25072b34e843SKonstantin Belousov 	job = (struct kaiocb *)(uintptr_t)kn->kn_sdata;
2508cb679c38SJonathan Lemon 
2509cb679c38SJonathan Lemon 	/*
25105652770dSJohn Baldwin 	 * The job pointer must be validated before using it, so
2511cb679c38SJonathan Lemon 	 * registration is restricted to the kernel; the user cannot
2512cb679c38SJonathan Lemon 	 * set EV_FLAG1.
2513cb679c38SJonathan Lemon 	 */
2514cb679c38SJonathan Lemon 	if ((kn->kn_flags & EV_FLAG1) == 0)
2515cb679c38SJonathan Lemon 		return (EPERM);
25165652770dSJohn Baldwin 	kn->kn_ptr.p_aio = job;
2517cb679c38SJonathan Lemon 	kn->kn_flags &= ~EV_FLAG1;
2518cb679c38SJonathan Lemon 
25195652770dSJohn Baldwin 	knlist_add(&job->klist, kn, 0);
2520cb679c38SJonathan Lemon 
2521cb679c38SJonathan Lemon 	return (0);
2522cb679c38SJonathan Lemon }
2523cb679c38SJonathan Lemon 
2524eb8e6d52SEivind Eklund /* kqueue detach function */
2525cb679c38SJonathan Lemon static void
2526cb679c38SJonathan Lemon filt_aiodetach(struct knote *kn)
2527cb679c38SJonathan Lemon {
25288e9fc278SDoug Ambrisko 	struct knlist *knl;
2529cb679c38SJonathan Lemon 
25308e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_aio->klist;
25318e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25328e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25338e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25348e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
2535cb679c38SJonathan Lemon }
2536cb679c38SJonathan Lemon 
2537eb8e6d52SEivind Eklund /* kqueue filter function */
2538cb679c38SJonathan Lemon /*ARGSUSED*/
2539cb679c38SJonathan Lemon static int
2540cb679c38SJonathan Lemon filt_aio(struct knote *kn, long hint)
2541cb679c38SJonathan Lemon {
25425652770dSJohn Baldwin 	struct kaiocb *job = kn->kn_ptr.p_aio;
2543cb679c38SJonathan Lemon 
25445652770dSJohn Baldwin 	kn->kn_data = job->uaiocb._aiocb_private.error;
2545f3215338SJohn Baldwin 	if (!(job->jobflags & KAIOCB_FINISHED))
2546cb679c38SJonathan Lemon 		return (0);
2547cb679c38SJonathan Lemon 	kn->kn_flags |= EV_EOF;
2548cb679c38SJonathan Lemon 	return (1);
2549cb679c38SJonathan Lemon }
255069cd28daSDoug Ambrisko 
255169cd28daSDoug Ambrisko /* kqueue attach function */
255269cd28daSDoug Ambrisko static int
255369cd28daSDoug Ambrisko filt_lioattach(struct knote *kn)
255469cd28daSDoug Ambrisko {
25552b34e843SKonstantin Belousov 	struct aioliojob *lj;
25562b34e843SKonstantin Belousov 
25572b34e843SKonstantin Belousov 	lj = (struct aioliojob *)(uintptr_t)kn->kn_sdata;
255869cd28daSDoug Ambrisko 
255969cd28daSDoug Ambrisko 	/*
25601ce91824SDavid Xu 	 * The aioliojob pointer must be validated before using it, so
256169cd28daSDoug Ambrisko 	 * registration is restricted to the kernel; the user cannot
256269cd28daSDoug Ambrisko 	 * set EV_FLAG1.
256369cd28daSDoug Ambrisko 	 */
256469cd28daSDoug Ambrisko 	if ((kn->kn_flags & EV_FLAG1) == 0)
256569cd28daSDoug Ambrisko 		return (EPERM);
2566a8afa221SJean-Sébastien Pédron 	kn->kn_ptr.p_lio = lj;
256769cd28daSDoug Ambrisko 	kn->kn_flags &= ~EV_FLAG1;
256869cd28daSDoug Ambrisko 
256969cd28daSDoug Ambrisko 	knlist_add(&lj->klist, kn, 0);
257069cd28daSDoug Ambrisko 
257169cd28daSDoug Ambrisko 	return (0);
257269cd28daSDoug Ambrisko }
257369cd28daSDoug Ambrisko 
257469cd28daSDoug Ambrisko /* kqueue detach function */
257569cd28daSDoug Ambrisko static void
257669cd28daSDoug Ambrisko filt_liodetach(struct knote *kn)
257769cd28daSDoug Ambrisko {
25788e9fc278SDoug Ambrisko 	struct knlist *knl;
257969cd28daSDoug Ambrisko 
25808e9fc278SDoug Ambrisko 	knl = &kn->kn_ptr.p_lio->klist;
25818e9fc278SDoug Ambrisko 	knl->kl_lock(knl->kl_lockarg);
25828e9fc278SDoug Ambrisko 	if (!knlist_empty(knl))
25838e9fc278SDoug Ambrisko 		knlist_remove(knl, kn, 1);
25848e9fc278SDoug Ambrisko 	knl->kl_unlock(knl->kl_lockarg);
258569cd28daSDoug Ambrisko }
258669cd28daSDoug Ambrisko 
258769cd28daSDoug Ambrisko /* kqueue filter function */
258869cd28daSDoug Ambrisko /*ARGSUSED*/
258969cd28daSDoug Ambrisko static int
259069cd28daSDoug Ambrisko filt_lio(struct knote *kn, long hint)
259169cd28daSDoug Ambrisko {
2592a8afa221SJean-Sébastien Pédron 	struct aioliojob * lj = kn->kn_ptr.p_lio;
25931ce91824SDavid Xu 
259469cd28daSDoug Ambrisko 	return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
259569cd28daSDoug Ambrisko }
25963858a1f4SJohn Baldwin 
2597841c0c7eSNathan Whitehorn #ifdef COMPAT_FREEBSD32
2598399e8c17SJohn Baldwin #include <sys/mount.h>
2599399e8c17SJohn Baldwin #include <sys/socket.h>
2600399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32.h>
2601399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_proto.h>
2602399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_signal.h>
2603399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_syscall.h>
2604399e8c17SJohn Baldwin #include <compat/freebsd32/freebsd32_util.h>
26053858a1f4SJohn Baldwin 
26063858a1f4SJohn Baldwin struct __aiocb_private32 {
26073858a1f4SJohn Baldwin 	int32_t	status;
26083858a1f4SJohn Baldwin 	int32_t	error;
26093858a1f4SJohn Baldwin 	uint32_t kernelinfo;
26103858a1f4SJohn Baldwin };
26113858a1f4SJohn Baldwin 
2612399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
26133858a1f4SJohn Baldwin typedef struct oaiocb32 {
26143858a1f4SJohn Baldwin 	int	aio_fildes;		/* File descriptor */
26153858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
26163858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
26173858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
26183858a1f4SJohn Baldwin 	struct	osigevent32 aio_sigevent; /* Signal to deliver */
26193858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
26203858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
26213858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
26223858a1f4SJohn Baldwin } oaiocb32_t;
2623399e8c17SJohn Baldwin #endif
26243858a1f4SJohn Baldwin 
26253858a1f4SJohn Baldwin typedef struct aiocb32 {
26263858a1f4SJohn Baldwin 	int32_t	aio_fildes;		/* File descriptor */
26273858a1f4SJohn Baldwin 	uint64_t aio_offset __packed;	/* File offset for I/O */
26283858a1f4SJohn Baldwin 	uint32_t aio_buf;		/* I/O buffer in process space */
26293858a1f4SJohn Baldwin 	uint32_t aio_nbytes;		/* Number of bytes for I/O */
26303858a1f4SJohn Baldwin 	int	__spare__[2];
26313858a1f4SJohn Baldwin 	uint32_t __spare2__;
26323858a1f4SJohn Baldwin 	int	aio_lio_opcode;		/* LIO opcode */
26333858a1f4SJohn Baldwin 	int	aio_reqprio;		/* Request priority -- ignored */
26343858a1f4SJohn Baldwin 	struct	__aiocb_private32 _aiocb_private;
26353858a1f4SJohn Baldwin 	struct	sigevent32 aio_sigevent;	/* Signal to deliver */
26363858a1f4SJohn Baldwin } aiocb32_t;
26373858a1f4SJohn Baldwin 
2638399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
26393858a1f4SJohn Baldwin static int
26403858a1f4SJohn Baldwin convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig)
26413858a1f4SJohn Baldwin {
26423858a1f4SJohn Baldwin 
26433858a1f4SJohn Baldwin 	/*
26443858a1f4SJohn Baldwin 	 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are
26453858a1f4SJohn Baldwin 	 * supported by AIO with the old sigevent structure.
26463858a1f4SJohn Baldwin 	 */
26473858a1f4SJohn Baldwin 	CP(*osig, *nsig, sigev_notify);
26483858a1f4SJohn Baldwin 	switch (nsig->sigev_notify) {
26493858a1f4SJohn Baldwin 	case SIGEV_NONE:
26503858a1f4SJohn Baldwin 		break;
26513858a1f4SJohn Baldwin 	case SIGEV_SIGNAL:
26523858a1f4SJohn Baldwin 		nsig->sigev_signo = osig->__sigev_u.__sigev_signo;
26533858a1f4SJohn Baldwin 		break;
26543858a1f4SJohn Baldwin 	case SIGEV_KEVENT:
26553858a1f4SJohn Baldwin 		nsig->sigev_notify_kqueue =
26563858a1f4SJohn Baldwin 		    osig->__sigev_u.__sigev_notify_kqueue;
26573858a1f4SJohn Baldwin 		PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr);
26583858a1f4SJohn Baldwin 		break;
26593858a1f4SJohn Baldwin 	default:
26603858a1f4SJohn Baldwin 		return (EINVAL);
26613858a1f4SJohn Baldwin 	}
26623858a1f4SJohn Baldwin 	return (0);
26633858a1f4SJohn Baldwin }
26643858a1f4SJohn Baldwin 
26653858a1f4SJohn Baldwin static int
26663858a1f4SJohn Baldwin aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob)
26673858a1f4SJohn Baldwin {
26683858a1f4SJohn Baldwin 	struct oaiocb32 job32;
26693858a1f4SJohn Baldwin 	int error;
26703858a1f4SJohn Baldwin 
26713858a1f4SJohn Baldwin 	bzero(kjob, sizeof(struct aiocb));
26723858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26733858a1f4SJohn Baldwin 	if (error)
26743858a1f4SJohn Baldwin 		return (error);
26753858a1f4SJohn Baldwin 
26763858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
26773858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
26783858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
26793858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
26803858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
26813858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
26823858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
26833858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
26843858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
26853858a1f4SJohn Baldwin 	return (convert_old_sigevent32(&job32.aio_sigevent,
26863858a1f4SJohn Baldwin 	    &kjob->aio_sigevent));
26873858a1f4SJohn Baldwin }
2688399e8c17SJohn Baldwin #endif
26893858a1f4SJohn Baldwin 
26903858a1f4SJohn Baldwin static int
26913858a1f4SJohn Baldwin aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob)
26923858a1f4SJohn Baldwin {
26933858a1f4SJohn Baldwin 	struct aiocb32 job32;
26943858a1f4SJohn Baldwin 	int error;
26953858a1f4SJohn Baldwin 
26963858a1f4SJohn Baldwin 	error = copyin(ujob, &job32, sizeof(job32));
26973858a1f4SJohn Baldwin 	if (error)
26983858a1f4SJohn Baldwin 		return (error);
26993858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_fildes);
27003858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_offset);
27013858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, aio_buf);
27023858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_nbytes);
27033858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_lio_opcode);
27043858a1f4SJohn Baldwin 	CP(job32, *kjob, aio_reqprio);
27053858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.status);
27063858a1f4SJohn Baldwin 	CP(job32, *kjob, _aiocb_private.error);
27073858a1f4SJohn Baldwin 	PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo);
27083858a1f4SJohn Baldwin 	return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent));
27093858a1f4SJohn Baldwin }
27103858a1f4SJohn Baldwin 
27113858a1f4SJohn Baldwin static long
27123858a1f4SJohn Baldwin aiocb32_fetch_status(struct aiocb *ujob)
27133858a1f4SJohn Baldwin {
27143858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27153858a1f4SJohn Baldwin 
27163858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27173858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.status));
27183858a1f4SJohn Baldwin }
27193858a1f4SJohn Baldwin 
27203858a1f4SJohn Baldwin static long
27213858a1f4SJohn Baldwin aiocb32_fetch_error(struct aiocb *ujob)
27223858a1f4SJohn Baldwin {
27233858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27243858a1f4SJohn Baldwin 
27253858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27263858a1f4SJohn Baldwin 	return (fuword32(&ujob32->_aiocb_private.error));
27273858a1f4SJohn Baldwin }
27283858a1f4SJohn Baldwin 
27293858a1f4SJohn Baldwin static int
27303858a1f4SJohn Baldwin aiocb32_store_status(struct aiocb *ujob, long status)
27313858a1f4SJohn Baldwin {
27323858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27333858a1f4SJohn Baldwin 
27343858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27353858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.status, status));
27363858a1f4SJohn Baldwin }
27373858a1f4SJohn Baldwin 
27383858a1f4SJohn Baldwin static int
27393858a1f4SJohn Baldwin aiocb32_store_error(struct aiocb *ujob, long error)
27403858a1f4SJohn Baldwin {
27413858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27423858a1f4SJohn Baldwin 
27433858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27443858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.error, error));
27453858a1f4SJohn Baldwin }
27463858a1f4SJohn Baldwin 
27473858a1f4SJohn Baldwin static int
27483858a1f4SJohn Baldwin aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref)
27493858a1f4SJohn Baldwin {
27503858a1f4SJohn Baldwin 	struct aiocb32 *ujob32;
27513858a1f4SJohn Baldwin 
27523858a1f4SJohn Baldwin 	ujob32 = (struct aiocb32 *)ujob;
27533858a1f4SJohn Baldwin 	return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref));
27543858a1f4SJohn Baldwin }
27553858a1f4SJohn Baldwin 
27563858a1f4SJohn Baldwin static int
27573858a1f4SJohn Baldwin aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob)
27583858a1f4SJohn Baldwin {
27593858a1f4SJohn Baldwin 
27603858a1f4SJohn Baldwin 	return (suword32(ujobp, (long)ujob));
27613858a1f4SJohn Baldwin }
27623858a1f4SJohn Baldwin 
27633858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops = {
27643858a1f4SJohn Baldwin 	.copyin = aiocb32_copyin,
27653858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27663858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27673858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27683858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27693858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27703858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27713858a1f4SJohn Baldwin };
27723858a1f4SJohn Baldwin 
2773399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
27743858a1f4SJohn Baldwin static struct aiocb_ops aiocb32_ops_osigevent = {
27753858a1f4SJohn Baldwin 	.copyin = aiocb32_copyin_old_sigevent,
27763858a1f4SJohn Baldwin 	.fetch_status = aiocb32_fetch_status,
27773858a1f4SJohn Baldwin 	.fetch_error = aiocb32_fetch_error,
27783858a1f4SJohn Baldwin 	.store_status = aiocb32_store_status,
27793858a1f4SJohn Baldwin 	.store_error = aiocb32_store_error,
27803858a1f4SJohn Baldwin 	.store_kernelinfo = aiocb32_store_kernelinfo,
27813858a1f4SJohn Baldwin 	.store_aiocb = aiocb32_store_aiocb,
27823858a1f4SJohn Baldwin };
2783399e8c17SJohn Baldwin #endif
27843858a1f4SJohn Baldwin 
27853858a1f4SJohn Baldwin int
27863858a1f4SJohn Baldwin freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap)
27873858a1f4SJohn Baldwin {
27883858a1f4SJohn Baldwin 
27893858a1f4SJohn Baldwin 	return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
27903858a1f4SJohn Baldwin }
27913858a1f4SJohn Baldwin 
27923858a1f4SJohn Baldwin int
27933858a1f4SJohn Baldwin freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap)
27943858a1f4SJohn Baldwin {
27953858a1f4SJohn Baldwin 	struct timespec32 ts32;
27963858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
27973858a1f4SJohn Baldwin 	struct aiocb **ujoblist;
27983858a1f4SJohn Baldwin 	uint32_t *ujoblist32;
27993858a1f4SJohn Baldwin 	int error, i;
28003858a1f4SJohn Baldwin 
2801913b9329SAlan Somers 	if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc)
28023858a1f4SJohn Baldwin 		return (EINVAL);
28033858a1f4SJohn Baldwin 
28043858a1f4SJohn Baldwin 	if (uap->timeout) {
28053858a1f4SJohn Baldwin 		/* Get timespec struct. */
28063858a1f4SJohn Baldwin 		if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0)
28073858a1f4SJohn Baldwin 			return (error);
28083858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
28093858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
28103858a1f4SJohn Baldwin 		tsp = &ts;
28113858a1f4SJohn Baldwin 	} else
28123858a1f4SJohn Baldwin 		tsp = NULL;
28133858a1f4SJohn Baldwin 
2814913b9329SAlan Somers 	ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK);
28153858a1f4SJohn Baldwin 	ujoblist32 = (uint32_t *)ujoblist;
28163858a1f4SJohn Baldwin 	error = copyin(uap->aiocbp, ujoblist32, uap->nent *
28173858a1f4SJohn Baldwin 	    sizeof(ujoblist32[0]));
28183858a1f4SJohn Baldwin 	if (error == 0) {
2819df485bdbSAlan Somers 		for (i = uap->nent - 1; i >= 0; i--)
28203858a1f4SJohn Baldwin 			ujoblist[i] = PTRIN(ujoblist32[i]);
28213858a1f4SJohn Baldwin 
28223858a1f4SJohn Baldwin 		error = kern_aio_suspend(td, uap->nent, ujoblist, tsp);
28233858a1f4SJohn Baldwin 	}
2824913b9329SAlan Somers 	free(ujoblist, M_AIOS);
28253858a1f4SJohn Baldwin 	return (error);
28263858a1f4SJohn Baldwin }
28273858a1f4SJohn Baldwin 
28283858a1f4SJohn Baldwin int
28293858a1f4SJohn Baldwin freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap)
28303858a1f4SJohn Baldwin {
28313858a1f4SJohn Baldwin 
28323858a1f4SJohn Baldwin 	return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops));
28333858a1f4SJohn Baldwin }
28343858a1f4SJohn Baldwin 
2835399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
28363858a1f4SJohn Baldwin int
2837399e8c17SJohn Baldwin freebsd6_freebsd32_aio_read(struct thread *td,
2838399e8c17SJohn Baldwin     struct freebsd6_freebsd32_aio_read_args *uap)
28393858a1f4SJohn Baldwin {
28403858a1f4SJohn Baldwin 
28413858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
28423858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28433858a1f4SJohn Baldwin }
2844399e8c17SJohn Baldwin #endif
28453858a1f4SJohn Baldwin 
28463858a1f4SJohn Baldwin int
28473858a1f4SJohn Baldwin freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap)
28483858a1f4SJohn Baldwin {
28493858a1f4SJohn Baldwin 
28503858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ,
28513858a1f4SJohn Baldwin 	    &aiocb32_ops));
28523858a1f4SJohn Baldwin }
28533858a1f4SJohn Baldwin 
2854399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
28553858a1f4SJohn Baldwin int
2856399e8c17SJohn Baldwin freebsd6_freebsd32_aio_write(struct thread *td,
2857399e8c17SJohn Baldwin     struct freebsd6_freebsd32_aio_write_args *uap)
28583858a1f4SJohn Baldwin {
28593858a1f4SJohn Baldwin 
28603858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28613858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent));
28623858a1f4SJohn Baldwin }
2863399e8c17SJohn Baldwin #endif
28643858a1f4SJohn Baldwin 
28653858a1f4SJohn Baldwin int
28663858a1f4SJohn Baldwin freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap)
28673858a1f4SJohn Baldwin {
28683858a1f4SJohn Baldwin 
28693858a1f4SJohn Baldwin 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE,
28703858a1f4SJohn Baldwin 	    &aiocb32_ops));
28713858a1f4SJohn Baldwin }
28723858a1f4SJohn Baldwin 
28733858a1f4SJohn Baldwin int
28746160e12cSGleb Smirnoff freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap)
28756160e12cSGleb Smirnoff {
28766160e12cSGleb Smirnoff 
28776160e12cSGleb Smirnoff 	return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK,
28786160e12cSGleb Smirnoff 	    &aiocb32_ops));
28796160e12cSGleb Smirnoff }
28806160e12cSGleb Smirnoff 
28816160e12cSGleb Smirnoff int
28823858a1f4SJohn Baldwin freebsd32_aio_waitcomplete(struct thread *td,
28833858a1f4SJohn Baldwin     struct freebsd32_aio_waitcomplete_args *uap)
28843858a1f4SJohn Baldwin {
2885e588eeb1SJohn Baldwin 	struct timespec32 ts32;
28863858a1f4SJohn Baldwin 	struct timespec ts, *tsp;
28873858a1f4SJohn Baldwin 	int error;
28883858a1f4SJohn Baldwin 
28893858a1f4SJohn Baldwin 	if (uap->timeout) {
28903858a1f4SJohn Baldwin 		/* Get timespec struct. */
28913858a1f4SJohn Baldwin 		error = copyin(uap->timeout, &ts32, sizeof(ts32));
28923858a1f4SJohn Baldwin 		if (error)
28933858a1f4SJohn Baldwin 			return (error);
28943858a1f4SJohn Baldwin 		CP(ts32, ts, tv_sec);
28953858a1f4SJohn Baldwin 		CP(ts32, ts, tv_nsec);
28963858a1f4SJohn Baldwin 		tsp = &ts;
28973858a1f4SJohn Baldwin 	} else
28983858a1f4SJohn Baldwin 		tsp = NULL;
28993858a1f4SJohn Baldwin 
29003858a1f4SJohn Baldwin 	return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp,
29013858a1f4SJohn Baldwin 	    &aiocb32_ops));
29023858a1f4SJohn Baldwin }
29033858a1f4SJohn Baldwin 
29043858a1f4SJohn Baldwin int
29053858a1f4SJohn Baldwin freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap)
29063858a1f4SJohn Baldwin {
29073858a1f4SJohn Baldwin 
29083858a1f4SJohn Baldwin 	return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp,
29093858a1f4SJohn Baldwin 	    &aiocb32_ops));
29103858a1f4SJohn Baldwin }
29113858a1f4SJohn Baldwin 
2912399e8c17SJohn Baldwin #ifdef COMPAT_FREEBSD6
29133858a1f4SJohn Baldwin int
2914399e8c17SJohn Baldwin freebsd6_freebsd32_lio_listio(struct thread *td,
2915399e8c17SJohn Baldwin     struct freebsd6_freebsd32_lio_listio_args *uap)
29163858a1f4SJohn Baldwin {
29173858a1f4SJohn Baldwin 	struct aiocb **acb_list;
29183858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
29193858a1f4SJohn Baldwin 	struct osigevent32 osig;
29203858a1f4SJohn Baldwin 	uint32_t *acb_list32;
29213858a1f4SJohn Baldwin 	int error, i, nent;
29223858a1f4SJohn Baldwin 
29233858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
29243858a1f4SJohn Baldwin 		return (EINVAL);
29253858a1f4SJohn Baldwin 
29263858a1f4SJohn Baldwin 	nent = uap->nent;
2927913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
29283858a1f4SJohn Baldwin 		return (EINVAL);
29293858a1f4SJohn Baldwin 
29303858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29313858a1f4SJohn Baldwin 		error = copyin(uap->sig, &osig, sizeof(osig));
29323858a1f4SJohn Baldwin 		if (error)
29333858a1f4SJohn Baldwin 			return (error);
29343858a1f4SJohn Baldwin 		error = convert_old_sigevent32(&osig, &sig);
29353858a1f4SJohn Baldwin 		if (error)
29363858a1f4SJohn Baldwin 			return (error);
29373858a1f4SJohn Baldwin 		sigp = &sig;
29383858a1f4SJohn Baldwin 	} else
29393858a1f4SJohn Baldwin 		sigp = NULL;
29403858a1f4SJohn Baldwin 
29413858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29423858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29433858a1f4SJohn Baldwin 	if (error) {
29443858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29453858a1f4SJohn Baldwin 		return (error);
29463858a1f4SJohn Baldwin 	}
29473858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29483858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29493858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29503858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29513858a1f4SJohn Baldwin 
29523858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29533858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
29543858a1f4SJohn Baldwin 	    &aiocb32_ops_osigevent);
29553858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
29563858a1f4SJohn Baldwin 	return (error);
29573858a1f4SJohn Baldwin }
2958399e8c17SJohn Baldwin #endif
29593858a1f4SJohn Baldwin 
29603858a1f4SJohn Baldwin int
29613858a1f4SJohn Baldwin freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap)
29623858a1f4SJohn Baldwin {
29633858a1f4SJohn Baldwin 	struct aiocb **acb_list;
29643858a1f4SJohn Baldwin 	struct sigevent *sigp, sig;
29653858a1f4SJohn Baldwin 	struct sigevent32 sig32;
29663858a1f4SJohn Baldwin 	uint32_t *acb_list32;
29673858a1f4SJohn Baldwin 	int error, i, nent;
29683858a1f4SJohn Baldwin 
29693858a1f4SJohn Baldwin 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
29703858a1f4SJohn Baldwin 		return (EINVAL);
29713858a1f4SJohn Baldwin 
29723858a1f4SJohn Baldwin 	nent = uap->nent;
2973913b9329SAlan Somers 	if (nent < 0 || nent > max_aio_queue_per_proc)
29743858a1f4SJohn Baldwin 		return (EINVAL);
29753858a1f4SJohn Baldwin 
29763858a1f4SJohn Baldwin 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
29773858a1f4SJohn Baldwin 		error = copyin(uap->sig, &sig32, sizeof(sig32));
29783858a1f4SJohn Baldwin 		if (error)
29793858a1f4SJohn Baldwin 			return (error);
29803858a1f4SJohn Baldwin 		error = convert_sigevent32(&sig32, &sig);
29813858a1f4SJohn Baldwin 		if (error)
29823858a1f4SJohn Baldwin 			return (error);
29833858a1f4SJohn Baldwin 		sigp = &sig;
29843858a1f4SJohn Baldwin 	} else
29853858a1f4SJohn Baldwin 		sigp = NULL;
29863858a1f4SJohn Baldwin 
29873858a1f4SJohn Baldwin 	acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK);
29883858a1f4SJohn Baldwin 	error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t));
29893858a1f4SJohn Baldwin 	if (error) {
29903858a1f4SJohn Baldwin 		free(acb_list32, M_LIO);
29913858a1f4SJohn Baldwin 		return (error);
29923858a1f4SJohn Baldwin 	}
29933858a1f4SJohn Baldwin 	acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK);
29943858a1f4SJohn Baldwin 	for (i = 0; i < nent; i++)
29953858a1f4SJohn Baldwin 		acb_list[i] = PTRIN(acb_list32[i]);
29963858a1f4SJohn Baldwin 	free(acb_list32, M_LIO);
29973858a1f4SJohn Baldwin 
29983858a1f4SJohn Baldwin 	error = kern_lio_listio(td, uap->mode,
29993858a1f4SJohn Baldwin 	    (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp,
30003858a1f4SJohn Baldwin 	    &aiocb32_ops);
30013858a1f4SJohn Baldwin 	free(acb_list, M_LIO);
30023858a1f4SJohn Baldwin 	return (error);
30033858a1f4SJohn Baldwin }
30043858a1f4SJohn Baldwin 
30053858a1f4SJohn Baldwin #endif
3006