xref: /freebsd/sys/kern/vfs_aio.c (revision 759ccccadb11320701e1dbba91639f605aa7067a)
19454b2d8SWarner Losh /*-
2ee877a35SJohn Dyson  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
3ee877a35SJohn Dyson  *
4ee877a35SJohn Dyson  * Redistribution and use in source and binary forms, with or without
5ee877a35SJohn Dyson  * modification, are permitted provided that the following conditions
6ee877a35SJohn Dyson  * are met:
7ee877a35SJohn Dyson  * 1. Redistributions of source code must retain the above copyright
8ee877a35SJohn Dyson  *    notice, this list of conditions and the following disclaimer.
9ee877a35SJohn Dyson  * 2. John S. Dyson's name may not be used to endorse or promote products
10ee877a35SJohn Dyson  *    derived from this software without specific prior written permission.
11ee877a35SJohn Dyson  *
12ee877a35SJohn Dyson  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
13ee877a35SJohn Dyson  * bad that happens because of using this software isn't the responsibility
14ee877a35SJohn Dyson  * of the author.  This software is distributed AS-IS.
15ee877a35SJohn Dyson  */
16ee877a35SJohn Dyson 
17ee877a35SJohn Dyson /*
188a6472b7SPeter Dufault  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19ee877a35SJohn Dyson  */
20ee877a35SJohn Dyson 
21677b542eSDavid E. O'Brien #include <sys/cdefs.h>
22677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
23677b542eSDavid E. O'Brien 
24ee877a35SJohn Dyson #include <sys/param.h>
25ee877a35SJohn Dyson #include <sys/systm.h>
26f591779bSSeigo Tanimura #include <sys/malloc.h>
279626b608SPoul-Henning Kamp #include <sys/bio.h>
28a5c9bce7SBruce Evans #include <sys/buf.h>
2975b8b3b2SJohn Baldwin #include <sys/eventhandler.h>
30ee877a35SJohn Dyson #include <sys/sysproto.h>
31ee877a35SJohn Dyson #include <sys/filedesc.h>
32ee877a35SJohn Dyson #include <sys/kernel.h>
3377409fe1SPoul-Henning Kamp #include <sys/module.h>
34c9a970a7SAlan Cox #include <sys/kthread.h>
35ee877a35SJohn Dyson #include <sys/fcntl.h>
36ee877a35SJohn Dyson #include <sys/file.h>
37104a9b7eSAlexander Kabaev #include <sys/limits.h>
38fdebd4f0SBruce Evans #include <sys/lock.h>
3935e0e5b3SJohn Baldwin #include <sys/mutex.h>
40ee877a35SJohn Dyson #include <sys/unistd.h>
41ee877a35SJohn Dyson #include <sys/proc.h>
422d2f8ae7SBruce Evans #include <sys/resourcevar.h>
43ee877a35SJohn Dyson #include <sys/signalvar.h>
44bfbbc4aaSJason Evans #include <sys/protosw.h>
451ce91824SDavid Xu #include <sys/sema.h>
461ce91824SDavid Xu #include <sys/socket.h>
47bfbbc4aaSJason Evans #include <sys/socketvar.h>
4821d56e9cSAlfred Perlstein #include <sys/syscall.h>
4921d56e9cSAlfred Perlstein #include <sys/sysent.h>
50a624e84fSJohn Dyson #include <sys/sysctl.h>
51ee99e978SBruce Evans #include <sys/sx.h>
521ce91824SDavid Xu #include <sys/taskqueue.h>
53fd3bf775SJohn Dyson #include <sys/vnode.h>
54fd3bf775SJohn Dyson #include <sys/conf.h>
55cb679c38SJonathan Lemon #include <sys/event.h>
5699eee864SDavid Xu #include <sys/mount.h>
57ee877a35SJohn Dyson 
581ce91824SDavid Xu #include <machine/atomic.h>
591ce91824SDavid Xu 
60c844abc9SAlfred Perlstein #include <posix4/posix4.h>
61ee877a35SJohn Dyson #include <vm/vm.h>
62ee877a35SJohn Dyson #include <vm/vm_extern.h>
632244ea07SJohn Dyson #include <vm/pmap.h>
642244ea07SJohn Dyson #include <vm/vm_map.h>
6599eee864SDavid Xu #include <vm/vm_object.h>
66c897b813SJeff Roberson #include <vm/uma.h>
67ee877a35SJohn Dyson #include <sys/aio.h>
685aaef07cSJohn Dyson 
69dd85920aSJason Evans #include "opt_vfs_aio.h"
70ee877a35SJohn Dyson 
71eb8e6d52SEivind Eklund /*
72eb8e6d52SEivind Eklund  * Counter for allocating reference ids to new jobs.  Wrapped to 1 on
7399eee864SDavid Xu  * overflow. (XXX will be removed soon.)
74eb8e6d52SEivind Eklund  */
7599eee864SDavid Xu static u_long jobrefid;
762244ea07SJohn Dyson 
7799eee864SDavid Xu /*
7899eee864SDavid Xu  * Counter for aio_fsync.
7999eee864SDavid Xu  */
8099eee864SDavid Xu static uint64_t jobseqno;
8199eee864SDavid Xu 
8299eee864SDavid Xu #define JOBST_NULL		0
8399eee864SDavid Xu #define JOBST_JOBQSOCK		1
8499eee864SDavid Xu #define JOBST_JOBQGLOBAL	2
8599eee864SDavid Xu #define JOBST_JOBRUNNING	3
8699eee864SDavid Xu #define JOBST_JOBFINISHED	4
8799eee864SDavid Xu #define JOBST_JOBQBUF		5
8899eee864SDavid Xu #define JOBST_JOBQSYNC		6
892244ea07SJohn Dyson 
9084af4da6SJohn Dyson #ifndef MAX_AIO_PER_PROC
912244ea07SJohn Dyson #define MAX_AIO_PER_PROC	32
9284af4da6SJohn Dyson #endif
9384af4da6SJohn Dyson 
9484af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE_PER_PROC
952244ea07SJohn Dyson #define MAX_AIO_QUEUE_PER_PROC	256 /* Bigger than AIO_LISTIO_MAX */
9684af4da6SJohn Dyson #endif
9784af4da6SJohn Dyson 
9884af4da6SJohn Dyson #ifndef MAX_AIO_PROCS
99fd3bf775SJohn Dyson #define MAX_AIO_PROCS		32
10084af4da6SJohn Dyson #endif
10184af4da6SJohn Dyson 
10284af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE
1032244ea07SJohn Dyson #define	MAX_AIO_QUEUE		1024 /* Bigger than AIO_LISTIO_MAX */
10484af4da6SJohn Dyson #endif
10584af4da6SJohn Dyson 
10684af4da6SJohn Dyson #ifndef TARGET_AIO_PROCS
107bfbbc4aaSJason Evans #define TARGET_AIO_PROCS	4
10884af4da6SJohn Dyson #endif
10984af4da6SJohn Dyson 
11084af4da6SJohn Dyson #ifndef MAX_BUF_AIO
11184af4da6SJohn Dyson #define MAX_BUF_AIO		16
11284af4da6SJohn Dyson #endif
11384af4da6SJohn Dyson 
11484af4da6SJohn Dyson #ifndef AIOD_TIMEOUT_DEFAULT
11584af4da6SJohn Dyson #define	AIOD_TIMEOUT_DEFAULT	(10 * hz)
11684af4da6SJohn Dyson #endif
11784af4da6SJohn Dyson 
11884af4da6SJohn Dyson #ifndef AIOD_LIFETIME_DEFAULT
11984af4da6SJohn Dyson #define AIOD_LIFETIME_DEFAULT	(30 * hz)
12084af4da6SJohn Dyson #endif
1212244ea07SJohn Dyson 
1225ece08f5SPoul-Henning Kamp static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management");
123eb8e6d52SEivind Eklund 
124303b270bSEivind Eklund static int max_aio_procs = MAX_AIO_PROCS;
125a624e84fSJohn Dyson SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
126eb8e6d52SEivind Eklund 	CTLFLAG_RW, &max_aio_procs, 0,
127eb8e6d52SEivind Eklund 	"Maximum number of kernel threads to use for handling async IO ");
128a624e84fSJohn Dyson 
129eb8e6d52SEivind Eklund static int num_aio_procs = 0;
130a624e84fSJohn Dyson SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
131eb8e6d52SEivind Eklund 	CTLFLAG_RD, &num_aio_procs, 0,
132eb8e6d52SEivind Eklund 	"Number of presently active kernel threads for async IO");
133a624e84fSJohn Dyson 
134eb8e6d52SEivind Eklund /*
135eb8e6d52SEivind Eklund  * The code will adjust the actual number of AIO processes towards this
136eb8e6d52SEivind Eklund  * number when it gets a chance.
137eb8e6d52SEivind Eklund  */
138eb8e6d52SEivind Eklund static int target_aio_procs = TARGET_AIO_PROCS;
139eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
140eb8e6d52SEivind Eklund 	0, "Preferred number of ready kernel threads for async IO");
141a624e84fSJohn Dyson 
142eb8e6d52SEivind Eklund static int max_queue_count = MAX_AIO_QUEUE;
143eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
144eb8e6d52SEivind Eklund     "Maximum number of aio requests to queue, globally");
145a624e84fSJohn Dyson 
146eb8e6d52SEivind Eklund static int num_queue_count = 0;
147eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
148eb8e6d52SEivind Eklund     "Number of queued aio requests");
149a624e84fSJohn Dyson 
150eb8e6d52SEivind Eklund static int num_buf_aio = 0;
151eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
152eb8e6d52SEivind Eklund     "Number of aio requests presently handled by the buf subsystem");
153fd3bf775SJohn Dyson 
154eb8e6d52SEivind Eklund /* Number of async I/O thread in the process of being started */
155a9bf5e37SDavid Xu /* XXX This should be local to aio_aqueue() */
156eb8e6d52SEivind Eklund static int num_aio_resv_start = 0;
157fd3bf775SJohn Dyson 
158eb8e6d52SEivind Eklund static int aiod_timeout;
159eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0,
160eb8e6d52SEivind Eklund     "Timeout value for synchronous aio operations");
16184af4da6SJohn Dyson 
162eb8e6d52SEivind Eklund static int aiod_lifetime;
163eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
164eb8e6d52SEivind Eklund     "Maximum lifetime for idle aiod");
16584af4da6SJohn Dyson 
166eb8e6d52SEivind Eklund static int unloadable = 0;
16721d56e9cSAlfred Perlstein SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0,
16821d56e9cSAlfred Perlstein     "Allow unload of aio (not recommended)");
16921d56e9cSAlfred Perlstein 
170eb8e6d52SEivind Eklund 
171eb8e6d52SEivind Eklund static int max_aio_per_proc = MAX_AIO_PER_PROC;
172eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
173eb8e6d52SEivind Eklund     0, "Maximum active aio requests per process (stored in the process)");
174eb8e6d52SEivind Eklund 
175eb8e6d52SEivind Eklund static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
176eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
177eb8e6d52SEivind Eklund     &max_aio_queue_per_proc, 0,
178eb8e6d52SEivind Eklund     "Maximum queued aio requests per process (stored in the process)");
179eb8e6d52SEivind Eklund 
180eb8e6d52SEivind Eklund static int max_buf_aio = MAX_BUF_AIO;
181eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
182eb8e6d52SEivind Eklund     "Maximum buf aio requests per process (stored in the process)");
183eb8e6d52SEivind Eklund 
1840972628aSDavid Xu typedef struct oaiocb {
1850972628aSDavid Xu 	int	aio_fildes;		/* File descriptor */
1860972628aSDavid Xu 	off_t	aio_offset;		/* File offset for I/O */
1870972628aSDavid Xu 	volatile void *aio_buf;         /* I/O buffer in process space */
1880972628aSDavid Xu 	size_t	aio_nbytes;		/* Number of bytes for I/O */
1890972628aSDavid Xu 	struct	osigevent aio_sigevent;	/* Signal to deliver */
1900972628aSDavid Xu 	int	aio_lio_opcode;		/* LIO opcode */
1910972628aSDavid Xu 	int	aio_reqprio;		/* Request priority -- ignored */
1920972628aSDavid Xu 	struct	__aiocb_private	_aiocb_private;
1930972628aSDavid Xu } oaiocb_t;
1940972628aSDavid Xu 
1951aa4c324SDavid Xu /*
1961aa4c324SDavid Xu  * Below is a key of locks used to protect each member of struct aiocblist
1971aa4c324SDavid Xu  * aioliojob and kaioinfo and any backends.
1981aa4c324SDavid Xu  *
1991aa4c324SDavid Xu  * * - need not protected
200759ccccaSDavid Xu  * a - locked by kaioinfo lock
2011aa4c324SDavid Xu  * b - locked by backend lock, the backend lock can be null in some cases,
2021aa4c324SDavid Xu  *     for example, BIO belongs to this type, in this case, proc lock is
2031aa4c324SDavid Xu  *     reused.
2041aa4c324SDavid Xu  * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
2051aa4c324SDavid Xu  */
2061aa4c324SDavid Xu 
2071aa4c324SDavid Xu /*
2081aa4c324SDavid Xu  * Current, there is only two backends: BIO and generic file I/O.
2091aa4c324SDavid Xu  * socket I/O is served by generic file I/O, this is not a good idea, since
2101aa4c324SDavid Xu  * disk file I/O and any other types without O_NONBLOCK flag can block daemon
2111aa4c324SDavid Xu  * threads, if there is no thread to serve socket I/O, the socket I/O will be
2121aa4c324SDavid Xu  * delayed too long or starved, we should create some threads dedicated to
2131aa4c324SDavid Xu  * sockets to do non-blocking I/O, same for pipe and fifo, for these I/O
2141aa4c324SDavid Xu  * systems we really need non-blocking interface, fiddling O_NONBLOCK in file
2151aa4c324SDavid Xu  * structure is not safe because there is race between userland and aio
2161aa4c324SDavid Xu  * daemons.
2171aa4c324SDavid Xu  */
2181aa4c324SDavid Xu 
21948dac059SAlan Cox struct aiocblist {
2201aa4c324SDavid Xu 	TAILQ_ENTRY(aiocblist) list;	/* (b) internal list of for backend */
2211aa4c324SDavid Xu 	TAILQ_ENTRY(aiocblist) plist;	/* (a) list of jobs for each backend */
2221aa4c324SDavid Xu 	TAILQ_ENTRY(aiocblist) allist;  /* (a) list of all jobs in proc */
2231aa4c324SDavid Xu 	int	jobflags;		/* (a) job flags */
2241aa4c324SDavid Xu 	int	jobstate;		/* (b) job state */
2251aa4c324SDavid Xu 	int	inputcharge;		/* (*) input blockes */
2261aa4c324SDavid Xu 	int	outputcharge;		/* (*) output blockes */
2271aa4c324SDavid Xu 	struct	buf *bp;		/* (*) private to BIO backend,
2281aa4c324SDavid Xu 				  	 * buffer pointer
2291aa4c324SDavid Xu 					 */
2301aa4c324SDavid Xu 	struct	proc *userproc;		/* (*) user process */
2311aa4c324SDavid Xu 	struct  ucred *cred;		/* (*) active credential when created */
2321aa4c324SDavid Xu 	struct	file *fd_file;		/* (*) pointer to file structure */
2331aa4c324SDavid Xu 	struct	aioliojob *lio;		/* (*) optional lio job */
2341aa4c324SDavid Xu 	struct	aiocb *uuaiocb;		/* (*) pointer in userspace of aiocb */
2351aa4c324SDavid Xu 	struct	knlist klist;		/* (a) list of knotes */
2361aa4c324SDavid Xu 	struct	aiocb uaiocb;		/* (*) kernel I/O control block */
2371aa4c324SDavid Xu 	ksiginfo_t ksi;			/* (a) realtime signal info */
2381aa4c324SDavid Xu 	struct	task biotask;		/* (*) private to BIO backend */
23999eee864SDavid Xu 	uint64_t seqno;			/* (*) job number */
24099eee864SDavid Xu 	int	pending;		/* (a) number of pending I/O, aio_fsync only */
24148dac059SAlan Cox };
24248dac059SAlan Cox 
24348dac059SAlan Cox /* jobflags */
24499eee864SDavid Xu #define AIOCBLIST_DONE		0x01
24599eee864SDavid Xu #define AIOCBLIST_BUFDONE	0x02
2461ce91824SDavid Xu #define AIOCBLIST_RUNDOWN	0x04
24799eee864SDavid Xu #define AIOCBLIST_CHECKSYNC	0x08
24848dac059SAlan Cox 
2492244ea07SJohn Dyson /*
2502244ea07SJohn Dyson  * AIO process info
2512244ea07SJohn Dyson  */
25284af4da6SJohn Dyson #define AIOP_FREE	0x1			/* proc on free queue */
25384af4da6SJohn Dyson 
254b40ce416SJulian Elischer struct aiothreadlist {
2551aa4c324SDavid Xu 	int aiothreadflags;			/* (c) AIO proc flags */
2561aa4c324SDavid Xu 	TAILQ_ENTRY(aiothreadlist) list;	/* (c) list of processes */
2571aa4c324SDavid Xu 	struct thread *aiothread;		/* (*) the AIO thread */
2582244ea07SJohn Dyson };
2592244ea07SJohn Dyson 
26084af4da6SJohn Dyson /*
26184af4da6SJohn Dyson  * data-structure for lio signal management
26284af4da6SJohn Dyson  */
2631ce91824SDavid Xu struct aioliojob {
2641aa4c324SDavid Xu 	int	lioj_flags;			/* (a) listio flags */
2651aa4c324SDavid Xu 	int	lioj_count;			/* (a) listio flags */
2661aa4c324SDavid Xu 	int	lioj_finished_count;		/* (a) listio flags */
2671aa4c324SDavid Xu 	struct	sigevent lioj_signal;		/* (a) signal on all I/O done */
2681aa4c324SDavid Xu 	TAILQ_ENTRY(aioliojob) lioj_list;	/* (a) lio list */
2691aa4c324SDavid Xu 	struct  knlist klist;			/* (a) list of knotes */
2701aa4c324SDavid Xu 	ksiginfo_t lioj_ksi;			/* (a) Realtime signal info */
27184af4da6SJohn Dyson };
2721ce91824SDavid Xu 
27384af4da6SJohn Dyson #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
27484af4da6SJohn Dyson #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
27569cd28daSDoug Ambrisko #define LIOJ_KEVENT_POSTED	0x4	/* kevent triggered */
27684af4da6SJohn Dyson 
27784af4da6SJohn Dyson /*
27884af4da6SJohn Dyson  * per process aio data structure
27984af4da6SJohn Dyson  */
2802244ea07SJohn Dyson struct kaioinfo {
281759ccccaSDavid Xu 	struct mtx	kaio_mtx;	/* the lock to protect this struct */
2821aa4c324SDavid Xu 	int	kaio_flags;		/* (a) per process kaio flags */
2831aa4c324SDavid Xu 	int	kaio_maxactive_count;	/* (*) maximum number of AIOs */
2841aa4c324SDavid Xu 	int	kaio_active_count;	/* (c) number of currently used AIOs */
2851aa4c324SDavid Xu 	int	kaio_qallowed_count;	/* (*) maxiumu size of AIO queue */
2861aa4c324SDavid Xu 	int	kaio_count;		/* (a) size of AIO queue */
2871aa4c324SDavid Xu 	int	kaio_ballowed_count;	/* (*) maximum number of buffers */
2881aa4c324SDavid Xu 	int	kaio_buffer_count;	/* (a) number of physio buffers */
2891aa4c324SDavid Xu 	TAILQ_HEAD(,aiocblist) kaio_all;	/* (a) all AIOs in the process */
2901aa4c324SDavid Xu 	TAILQ_HEAD(,aiocblist) kaio_done;	/* (a) done queue for process */
2911aa4c324SDavid Xu 	TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
2921aa4c324SDavid Xu 	TAILQ_HEAD(,aiocblist) kaio_jobqueue;	/* (a) job queue for process */
2931aa4c324SDavid Xu 	TAILQ_HEAD(,aiocblist) kaio_bufqueue;	/* (a) buffer job queue for process */
2941aa4c324SDavid Xu 	TAILQ_HEAD(,aiocblist) kaio_sockqueue;  /* (a) queue for aios waiting on sockets,
29599eee864SDavid Xu 						 *  NOT USED YET.
2961aa4c324SDavid Xu 						 */
29799eee864SDavid Xu 	TAILQ_HEAD(,aiocblist) kaio_syncqueue;	/* (a) queue for aio_fsync */
29899eee864SDavid Xu 	struct	task	kaio_task;	/* (*) task to kick aio threads */
2992244ea07SJohn Dyson };
3002244ea07SJohn Dyson 
301759ccccaSDavid Xu #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
302759ccccaSDavid Xu #define AIO_UNLOCK(ki)		mtx_unlock(&(ki)->kaio_mtx)
303759ccccaSDavid Xu #define AIO_LOCK_ASSERT(ki, f)	mtx_assert(&(ki)->kaio_mtx, (f))
304759ccccaSDavid Xu #define AIO_MTX(ki)		(&(ki)->kaio_mtx)
305759ccccaSDavid Xu 
30684af4da6SJohn Dyson #define KAIO_RUNDOWN	0x1	/* process is being run down */
307bfbbc4aaSJason Evans #define KAIO_WAKEUP	0x2	/* wakeup process when there is a significant event */
308fd3bf775SJohn Dyson 
3091aa4c324SDavid Xu static TAILQ_HEAD(,aiothreadlist) aio_freeproc;		/* (c) Idle daemons */
3101ce91824SDavid Xu static struct sema aio_newproc_sem;
3111ce91824SDavid Xu static struct mtx aio_job_mtx;
3121ce91824SDavid Xu static struct mtx aio_sock_mtx;
3131aa4c324SDavid Xu static TAILQ_HEAD(,aiocblist) aio_jobs;			/* (c) Async job list */
3141ce91824SDavid Xu static struct unrhdr *aiod_unr;
3152244ea07SJohn Dyson 
316fd3bf775SJohn Dyson static void	aio_init_aioinfo(struct proc *p);
31721d56e9cSAlfred Perlstein static void	aio_onceonly(void);
318fd3bf775SJohn Dyson static int	aio_free_entry(struct aiocblist *aiocbe);
319fd3bf775SJohn Dyson static void	aio_process(struct aiocblist *aiocbe);
3201ce91824SDavid Xu static int	aio_newproc(int *);
321a9bf5e37SDavid Xu static int	aio_aqueue(struct thread *td, struct aiocb *job,
322dbbccfe9SDavid Xu 			struct aioliojob *lio, int type, int osigev);
323fd3bf775SJohn Dyson static void	aio_physwakeup(struct buf *bp);
32475b8b3b2SJohn Baldwin static void	aio_proc_rundown(void *arg, struct proc *p);
325fd3bf775SJohn Dyson static int	aio_qphysio(struct proc *p, struct aiocblist *iocb);
3261ce91824SDavid Xu static void	biohelper(void *, int);
3271ce91824SDavid Xu static void	aio_daemon(void *param);
32848dac059SAlan Cox static void	aio_swake_cb(struct socket *, struct sockbuf *);
32921d56e9cSAlfred Perlstein static int	aio_unload(void);
33099eee864SDavid Xu static void	aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type);
33199eee864SDavid Xu #define DONE_BUF	1
33299eee864SDavid Xu #define DONE_QUEUE	2
33399eee864SDavid Xu static int	do_lio_listio(struct thread *td, struct lio_listio_args *uap, int oldsigev);
334dbbccfe9SDavid Xu static int	aio_kick(struct proc *userp);
33599eee864SDavid Xu static void	aio_kick_nowait(struct proc *userp);
33699eee864SDavid Xu static void	aio_kick_helper(void *context, int pending);
33721d56e9cSAlfred Perlstein static int	filt_aioattach(struct knote *kn);
33821d56e9cSAlfred Perlstein static void	filt_aiodetach(struct knote *kn);
33921d56e9cSAlfred Perlstein static int	filt_aio(struct knote *kn, long hint);
34069cd28daSDoug Ambrisko static int	filt_lioattach(struct knote *kn);
34169cd28daSDoug Ambrisko static void	filt_liodetach(struct knote *kn);
34269cd28daSDoug Ambrisko static int	filt_lio(struct knote *kn, long hint);
3432244ea07SJohn Dyson 
344eb8e6d52SEivind Eklund /*
345eb8e6d52SEivind Eklund  * Zones for:
346eb8e6d52SEivind Eklund  * 	kaio	Per process async io info
347eb8e6d52SEivind Eklund  *	aiop	async io thread data
348eb8e6d52SEivind Eklund  *	aiocb	async io jobs
349eb8e6d52SEivind Eklund  *	aiol	list io job pointer - internal to aio_suspend XXX
350eb8e6d52SEivind Eklund  *	aiolio	list io jobs
351eb8e6d52SEivind Eklund  */
352c897b813SJeff Roberson static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
353fd3bf775SJohn Dyson 
354eb8e6d52SEivind Eklund /* kqueue filters for aio */
35521d56e9cSAlfred Perlstein static struct filterops aio_filtops =
35621d56e9cSAlfred Perlstein 	{ 0, filt_aioattach, filt_aiodetach, filt_aio };
35769cd28daSDoug Ambrisko static struct filterops lio_filtops =
35869cd28daSDoug Ambrisko 	{ 0, filt_lioattach, filt_liodetach, filt_lio };
35921d56e9cSAlfred Perlstein 
36075b8b3b2SJohn Baldwin static eventhandler_tag exit_tag, exec_tag;
36175b8b3b2SJohn Baldwin 
3621ce91824SDavid Xu TASKQUEUE_DEFINE_THREAD(aiod_bio);
3631ce91824SDavid Xu 
364eb8e6d52SEivind Eklund /*
365eb8e6d52SEivind Eklund  * Main operations function for use as a kernel module.
366eb8e6d52SEivind Eklund  */
36721d56e9cSAlfred Perlstein static int
36821d56e9cSAlfred Perlstein aio_modload(struct module *module, int cmd, void *arg)
36921d56e9cSAlfred Perlstein {
37021d56e9cSAlfred Perlstein 	int error = 0;
37121d56e9cSAlfred Perlstein 
37221d56e9cSAlfred Perlstein 	switch (cmd) {
37321d56e9cSAlfred Perlstein 	case MOD_LOAD:
37421d56e9cSAlfred Perlstein 		aio_onceonly();
37521d56e9cSAlfred Perlstein 		break;
37621d56e9cSAlfred Perlstein 	case MOD_UNLOAD:
37721d56e9cSAlfred Perlstein 		error = aio_unload();
37821d56e9cSAlfred Perlstein 		break;
37921d56e9cSAlfred Perlstein 	case MOD_SHUTDOWN:
38021d56e9cSAlfred Perlstein 		break;
38121d56e9cSAlfred Perlstein 	default:
38221d56e9cSAlfred Perlstein 		error = EINVAL;
38321d56e9cSAlfred Perlstein 		break;
38421d56e9cSAlfred Perlstein 	}
38521d56e9cSAlfred Perlstein 	return (error);
38621d56e9cSAlfred Perlstein }
38721d56e9cSAlfred Perlstein 
38821d56e9cSAlfred Perlstein static moduledata_t aio_mod = {
38921d56e9cSAlfred Perlstein 	"aio",
39021d56e9cSAlfred Perlstein 	&aio_modload,
39121d56e9cSAlfred Perlstein 	NULL
39221d56e9cSAlfred Perlstein };
39321d56e9cSAlfred Perlstein 
39421d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_cancel);
39521d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_error);
39699eee864SDavid Xu SYSCALL_MODULE_HELPER(aio_fsync);
39721d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_read);
39899eee864SDavid Xu SYSCALL_MODULE_HELPER(aio_return);
39999eee864SDavid Xu SYSCALL_MODULE_HELPER(aio_suspend);
40021d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_waitcomplete);
40199eee864SDavid Xu SYSCALL_MODULE_HELPER(aio_write);
40221d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(lio_listio);
4030972628aSDavid Xu SYSCALL_MODULE_HELPER(oaio_read);
4040972628aSDavid Xu SYSCALL_MODULE_HELPER(oaio_write);
4050972628aSDavid Xu SYSCALL_MODULE_HELPER(olio_listio);
40621d56e9cSAlfred Perlstein 
40721d56e9cSAlfred Perlstein DECLARE_MODULE(aio, aio_mod,
40821d56e9cSAlfred Perlstein 	SI_SUB_VFS, SI_ORDER_ANY);
40921d56e9cSAlfred Perlstein MODULE_VERSION(aio, 1);
41021d56e9cSAlfred Perlstein 
411fd3bf775SJohn Dyson /*
4122244ea07SJohn Dyson  * Startup initialization
4132244ea07SJohn Dyson  */
41488ed460eSAlan Cox static void
41521d56e9cSAlfred Perlstein aio_onceonly(void)
416fd3bf775SJohn Dyson {
41721d56e9cSAlfred Perlstein 
41821d56e9cSAlfred Perlstein 	/* XXX: should probably just use so->callback */
41921d56e9cSAlfred Perlstein 	aio_swake = &aio_swake_cb;
42075b8b3b2SJohn Baldwin 	exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
42175b8b3b2SJohn Baldwin 	    EVENTHANDLER_PRI_ANY);
42275b8b3b2SJohn Baldwin 	exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown, NULL,
42375b8b3b2SJohn Baldwin 	    EVENTHANDLER_PRI_ANY);
42421d56e9cSAlfred Perlstein 	kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
42569cd28daSDoug Ambrisko 	kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
4262244ea07SJohn Dyson 	TAILQ_INIT(&aio_freeproc);
4271ce91824SDavid Xu 	sema_init(&aio_newproc_sem, 0, "aio_new_proc");
4281ce91824SDavid Xu 	mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
4291ce91824SDavid Xu 	mtx_init(&aio_sock_mtx, "aio_sock", NULL, MTX_DEF);
4302244ea07SJohn Dyson 	TAILQ_INIT(&aio_jobs);
4311ce91824SDavid Xu 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
432c897b813SJeff Roberson 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
433c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
434c897b813SJeff Roberson 	aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
435c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
436c897b813SJeff Roberson 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
437c897b813SJeff Roberson 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
438c897b813SJeff Roberson 	aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
439c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4401ce91824SDavid Xu 	aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
441c897b813SJeff Roberson 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
44284af4da6SJohn Dyson 	aiod_timeout = AIOD_TIMEOUT_DEFAULT;
44384af4da6SJohn Dyson 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
444fd3bf775SJohn Dyson 	jobrefid = 1;
445c7047e52SGarrett Wollman 	async_io_version = _POSIX_VERSION;
446c844abc9SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
44786d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
44886d52125SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
4492244ea07SJohn Dyson }
4502244ea07SJohn Dyson 
451eb8e6d52SEivind Eklund /*
452eb8e6d52SEivind Eklund  * Callback for unload of AIO when used as a module.
453eb8e6d52SEivind Eklund  */
45421d56e9cSAlfred Perlstein static int
45521d56e9cSAlfred Perlstein aio_unload(void)
45621d56e9cSAlfred Perlstein {
457ad3b9257SJohn-Mark Gurney 	int error;
45821d56e9cSAlfred Perlstein 
45921d56e9cSAlfred Perlstein 	/*
46021d56e9cSAlfred Perlstein 	 * XXX: no unloads by default, it's too dangerous.
46121d56e9cSAlfred Perlstein 	 * perhaps we could do it if locked out callers and then
46221d56e9cSAlfred Perlstein 	 * did an aio_proc_rundown() on each process.
4632a522eb9SJohn Baldwin 	 *
4642a522eb9SJohn Baldwin 	 * jhb: aio_proc_rundown() needs to run on curproc though,
4652a522eb9SJohn Baldwin 	 * so I don't think that would fly.
46621d56e9cSAlfred Perlstein 	 */
46721d56e9cSAlfred Perlstein 	if (!unloadable)
46821d56e9cSAlfred Perlstein 		return (EOPNOTSUPP);
46921d56e9cSAlfred Perlstein 
470ad3b9257SJohn-Mark Gurney 	error = kqueue_del_filteropts(EVFILT_AIO);
471ad3b9257SJohn-Mark Gurney 	if (error)
472ad3b9257SJohn-Mark Gurney 		return error;
473bd793be3SDavid Xu 	error = kqueue_del_filteropts(EVFILT_LIO);
474bd793be3SDavid Xu 	if (error)
475bd793be3SDavid Xu 		return error;
476c7047e52SGarrett Wollman 	async_io_version = 0;
47721d56e9cSAlfred Perlstein 	aio_swake = NULL;
4781ce91824SDavid Xu 	taskqueue_free(taskqueue_aiod_bio);
4791ce91824SDavid Xu 	delete_unrhdr(aiod_unr);
480bd793be3SDavid Xu 	uma_zdestroy(kaio_zone);
481bd793be3SDavid Xu 	uma_zdestroy(aiop_zone);
482bd793be3SDavid Xu 	uma_zdestroy(aiocb_zone);
483bd793be3SDavid Xu 	uma_zdestroy(aiol_zone);
484bd793be3SDavid Xu 	uma_zdestroy(aiolio_zone);
48575b8b3b2SJohn Baldwin 	EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
48675b8b3b2SJohn Baldwin 	EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
4871ce91824SDavid Xu 	mtx_destroy(&aio_job_mtx);
4881ce91824SDavid Xu 	mtx_destroy(&aio_sock_mtx);
4891ce91824SDavid Xu 	sema_destroy(&aio_newproc_sem);
490f51c1e89SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1);
491f51c1e89SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1);
492f51c1e89SAlfred Perlstein 	p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1);
49321d56e9cSAlfred Perlstein 	return (0);
49421d56e9cSAlfred Perlstein }
49521d56e9cSAlfred Perlstein 
4962244ea07SJohn Dyson /*
497bfbbc4aaSJason Evans  * Init the per-process aioinfo structure.  The aioinfo limits are set
498bfbbc4aaSJason Evans  * per-process for user limit (resource) management.
4992244ea07SJohn Dyson  */
50088ed460eSAlan Cox static void
501fd3bf775SJohn Dyson aio_init_aioinfo(struct proc *p)
502fd3bf775SJohn Dyson {
5032244ea07SJohn Dyson 	struct kaioinfo *ki;
504ac41f2efSAlfred Perlstein 
505a163d034SWarner Losh 	ki = uma_zalloc(kaio_zone, M_WAITOK);
506759ccccaSDavid Xu 	mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF);
50784af4da6SJohn Dyson 	ki->kaio_flags = 0;
508a624e84fSJohn Dyson 	ki->kaio_maxactive_count = max_aio_per_proc;
5092244ea07SJohn Dyson 	ki->kaio_active_count = 0;
510a624e84fSJohn Dyson 	ki->kaio_qallowed_count = max_aio_queue_per_proc;
5111ce91824SDavid Xu 	ki->kaio_count = 0;
51284af4da6SJohn Dyson 	ki->kaio_ballowed_count = max_buf_aio;
513fd3bf775SJohn Dyson 	ki->kaio_buffer_count = 0;
5141ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_all);
5151ce91824SDavid Xu 	TAILQ_INIT(&ki->kaio_done);
5162244ea07SJohn Dyson 	TAILQ_INIT(&ki->kaio_jobqueue);
517fd3bf775SJohn Dyson 	TAILQ_INIT(&ki->kaio_bufqueue);
51884af4da6SJohn Dyson 	TAILQ_INIT(&ki->kaio_liojoblist);
519bfbbc4aaSJason Evans 	TAILQ_INIT(&ki->kaio_sockqueue);
52099eee864SDavid Xu 	TAILQ_INIT(&ki->kaio_syncqueue);
52199eee864SDavid Xu 	TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
5223999ebe3SAlan Cox 	PROC_LOCK(p);
5233999ebe3SAlan Cox 	if (p->p_aioinfo == NULL) {
5243999ebe3SAlan Cox 		p->p_aioinfo = ki;
5253999ebe3SAlan Cox 		PROC_UNLOCK(p);
5263999ebe3SAlan Cox 	} else {
5273999ebe3SAlan Cox 		PROC_UNLOCK(p);
528759ccccaSDavid Xu 		mtx_destroy(&ki->kaio_mtx);
5293999ebe3SAlan Cox 		uma_zfree(kaio_zone, ki);
5302244ea07SJohn Dyson 	}
531bfbbc4aaSJason Evans 
532bfbbc4aaSJason Evans 	while (num_aio_procs < target_aio_procs)
5331ce91824SDavid Xu 		aio_newproc(NULL);
5342244ea07SJohn Dyson }
5352244ea07SJohn Dyson 
5364c0fb2cfSDavid Xu static int
5374c0fb2cfSDavid Xu aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
5384c0fb2cfSDavid Xu {
539759ccccaSDavid Xu 	int ret = 0;
540759ccccaSDavid Xu 
541759ccccaSDavid Xu 	PROC_LOCK(p);
5424c0fb2cfSDavid Xu 	if (!KSI_ONQ(ksi)) {
5434c0fb2cfSDavid Xu 		ksi->ksi_code = SI_ASYNCIO;
5444c0fb2cfSDavid Xu 		ksi->ksi_flags |= KSI_EXT | KSI_INS;
545759ccccaSDavid Xu 		ret = psignal_event(p, sigev, ksi);
5464c0fb2cfSDavid Xu 	}
547759ccccaSDavid Xu 	PROC_UNLOCK(p);
548759ccccaSDavid Xu 	return (ret);
5494c0fb2cfSDavid Xu }
5504c0fb2cfSDavid Xu 
5512244ea07SJohn Dyson /*
552bfbbc4aaSJason Evans  * Free a job entry.  Wait for completion if it is currently active, but don't
553bfbbc4aaSJason Evans  * delay forever.  If we delay, we return a flag that says that we have to
554bfbbc4aaSJason Evans  * restart the queue scan.
5552244ea07SJohn Dyson  */
55688ed460eSAlan Cox static int
557fd3bf775SJohn Dyson aio_free_entry(struct aiocblist *aiocbe)
558fd3bf775SJohn Dyson {
5592244ea07SJohn Dyson 	struct kaioinfo *ki;
5601ce91824SDavid Xu 	struct aioliojob *lj;
5612244ea07SJohn Dyson 	struct proc *p;
5622244ea07SJohn Dyson 
5632244ea07SJohn Dyson 	p = aiocbe->userproc;
5641ce91824SDavid Xu 	MPASS(curproc == p);
5652244ea07SJohn Dyson 	ki = p->p_aioinfo;
5661ce91824SDavid Xu 	MPASS(ki != NULL);
5671ce91824SDavid Xu 
568759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
569759ccccaSDavid Xu 	MPASS(aiocbe->jobstate == JOBST_JOBFINISHED);
570759ccccaSDavid Xu 
5711ce91824SDavid Xu 	atomic_subtract_int(&num_queue_count, 1);
5721ce91824SDavid Xu 
5731ce91824SDavid Xu 	ki->kaio_count--;
5741ce91824SDavid Xu 	MPASS(ki->kaio_count >= 0);
5751ce91824SDavid Xu 
57627b8220dSDavid Xu 	TAILQ_REMOVE(&ki->kaio_done, aiocbe, plist);
57727b8220dSDavid Xu 	TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist);
57827b8220dSDavid Xu 
57984af4da6SJohn Dyson 	lj = aiocbe->lio;
58084af4da6SJohn Dyson 	if (lj) {
5811ce91824SDavid Xu 		lj->lioj_count--;
5821ce91824SDavid Xu 		lj->lioj_finished_count--;
5831ce91824SDavid Xu 
584a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
5851ce91824SDavid Xu 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
5861ce91824SDavid Xu 			/* lio is going away, we need to destroy any knotes */
5871ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
588759ccccaSDavid Xu 			PROC_LOCK(p);
5891ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
590759ccccaSDavid Xu 			PROC_UNLOCK(p);
5911ce91824SDavid Xu 			uma_zfree(aiolio_zone, lj);
59284af4da6SJohn Dyson 		}
59384af4da6SJohn Dyson 	}
5941ce91824SDavid Xu 
595cb679c38SJonathan Lemon 	/* aiocbe is going away, we need to destroy any knotes */
5961ce91824SDavid Xu 	knlist_delete(&aiocbe->klist, curthread, 1);
597759ccccaSDavid Xu 	PROC_LOCK(p);
5981ce91824SDavid Xu 	sigqueue_take(&aiocbe->ksi);
599759ccccaSDavid Xu 	PROC_UNLOCK(p);
6001ce91824SDavid Xu 
6011ce91824SDavid Xu 	MPASS(aiocbe->bp == NULL);
6021ce91824SDavid Xu 	aiocbe->jobstate = JOBST_NULL;
603759ccccaSDavid Xu 	AIO_UNLOCK(ki);
6042a522eb9SJohn Baldwin 
6052a522eb9SJohn Baldwin 	/*
6062a522eb9SJohn Baldwin 	 * The thread argument here is used to find the owning process
6072a522eb9SJohn Baldwin 	 * and is also passed to fo_close() which may pass it to various
6082a522eb9SJohn Baldwin 	 * places such as devsw close() routines.  Because of that, we
6092a522eb9SJohn Baldwin 	 * need a thread pointer from the process owning the job that is
6102a522eb9SJohn Baldwin 	 * persistent and won't disappear out from under us or move to
6112a522eb9SJohn Baldwin 	 * another process.
6122a522eb9SJohn Baldwin 	 *
6132a522eb9SJohn Baldwin 	 * Currently, all the callers of this function call it to remove
6142a522eb9SJohn Baldwin 	 * an aiocblist from the current process' job list either via a
6152a522eb9SJohn Baldwin 	 * syscall or due to the current process calling exit() or
6162a522eb9SJohn Baldwin 	 * execve().  Thus, we know that p == curproc.  We also know that
6172a522eb9SJohn Baldwin 	 * curthread can't exit since we are curthread.
6182a522eb9SJohn Baldwin 	 *
6192a522eb9SJohn Baldwin 	 * Therefore, we use curthread as the thread to pass to
6202a522eb9SJohn Baldwin 	 * knlist_delete().  This does mean that it is possible for the
6212a522eb9SJohn Baldwin 	 * thread pointer at close time to differ from the thread pointer
6222a522eb9SJohn Baldwin 	 * at open time, but this is already true of file descriptors in
6232a522eb9SJohn Baldwin 	 * a multithreaded process.
624b40ce416SJulian Elischer 	 */
625a5c0b1c0SAlan Cox 	fdrop(aiocbe->fd_file, curthread);
626f8f750c5SRobert Watson 	crfree(aiocbe->cred);
627c897b813SJeff Roberson 	uma_zfree(aiocb_zone, aiocbe);
628759ccccaSDavid Xu 	AIO_LOCK(ki);
6291ce91824SDavid Xu 
630ac41f2efSAlfred Perlstein 	return (0);
6312244ea07SJohn Dyson }
6322244ea07SJohn Dyson 
6332244ea07SJohn Dyson /*
6342244ea07SJohn Dyson  * Rundown the jobs for a given process.
6352244ea07SJohn Dyson  */
63621d56e9cSAlfred Perlstein static void
63775b8b3b2SJohn Baldwin aio_proc_rundown(void *arg, struct proc *p)
638fd3bf775SJohn Dyson {
6392244ea07SJohn Dyson 	struct kaioinfo *ki;
6401ce91824SDavid Xu 	struct aioliojob *lj;
6411ce91824SDavid Xu 	struct aiocblist *cbe, *cbn;
642bfbbc4aaSJason Evans 	struct file *fp;
643bfbbc4aaSJason Evans 	struct socket *so;
6441aa4c324SDavid Xu 	int remove;
6452244ea07SJohn Dyson 
6462a522eb9SJohn Baldwin 	KASSERT(curthread->td_proc == p,
6472a522eb9SJohn Baldwin 	    ("%s: called on non-curproc", __func__));
6482244ea07SJohn Dyson 	ki = p->p_aioinfo;
6492244ea07SJohn Dyson 	if (ki == NULL)
6502244ea07SJohn Dyson 		return;
6512244ea07SJohn Dyson 
652759ccccaSDavid Xu 	AIO_LOCK(ki);
65327b8220dSDavid Xu 	ki->kaio_flags |= KAIO_RUNDOWN;
6541ce91824SDavid Xu 
6551ce91824SDavid Xu restart:
656a624e84fSJohn Dyson 
657bfbbc4aaSJason Evans 	/*
6581ce91824SDavid Xu 	 * Try to cancel all pending requests. This code simulates
6591ce91824SDavid Xu 	 * aio_cancel on all pending I/O requests.
660bfbbc4aaSJason Evans 	 */
6611ce91824SDavid Xu 	TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) {
6621aa4c324SDavid Xu 		remove = 0;
6631ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
6641ce91824SDavid Xu 		if (cbe->jobstate == JOBST_JOBQGLOBAL) {
6651ce91824SDavid Xu 			TAILQ_REMOVE(&aio_jobs, cbe, list);
6661aa4c324SDavid Xu 			remove = 1;
6671aa4c324SDavid Xu 		} else if (cbe->jobstate == JOBST_JOBQSOCK) {
6681aa4c324SDavid Xu 			fp = cbe->fd_file;
6691aa4c324SDavid Xu 			MPASS(fp->f_type == DTYPE_SOCKET);
6701aa4c324SDavid Xu 			so = fp->f_data;
6711aa4c324SDavid Xu 			TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
6721aa4c324SDavid Xu 			remove = 1;
67399eee864SDavid Xu 		} else if (cbe->jobstate == JOBST_JOBQSYNC) {
67499eee864SDavid Xu 			TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list);
67599eee864SDavid Xu 			remove = 1;
6761aa4c324SDavid Xu 		}
6771ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
6781aa4c324SDavid Xu 
6791aa4c324SDavid Xu 		if (remove) {
6801ce91824SDavid Xu 			cbe->jobstate = JOBST_JOBFINISHED;
6811ce91824SDavid Xu 			cbe->uaiocb._aiocb_private.status = -1;
6821ce91824SDavid Xu 			cbe->uaiocb._aiocb_private.error = ECANCELED;
6831ce91824SDavid Xu 			TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
6841ce91824SDavid Xu 			aio_bio_done_notify(p, cbe, DONE_QUEUE);
6851ce91824SDavid Xu 		}
6862244ea07SJohn Dyson 	}
68784af4da6SJohn Dyson 
6881ce91824SDavid Xu 	/* Wait for all running I/O to be finished */
6891ce91824SDavid Xu 	if (TAILQ_FIRST(&ki->kaio_bufqueue) ||
6901ce91824SDavid Xu 	    TAILQ_FIRST(&ki->kaio_jobqueue)) {
69184af4da6SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
692759ccccaSDavid Xu 		msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
6931ce91824SDavid Xu 		goto restart;
69484af4da6SJohn Dyson 	}
69584af4da6SJohn Dyson 
6961ce91824SDavid Xu 	/* Free all completed I/O requests. */
6971ce91824SDavid Xu 	while ((cbe = TAILQ_FIRST(&ki->kaio_done)) != NULL)
6981ce91824SDavid Xu 		aio_free_entry(cbe);
69984af4da6SJohn Dyson 
7001ce91824SDavid Xu 	while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
701a9bf5e37SDavid Xu 		if (lj->lioj_count == 0) {
70284af4da6SJohn Dyson 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
7031ce91824SDavid Xu 			knlist_delete(&lj->klist, curthread, 1);
704759ccccaSDavid Xu 			PROC_LOCK(p);
7051ce91824SDavid Xu 			sigqueue_take(&lj->lioj_ksi);
706759ccccaSDavid Xu 			PROC_UNLOCK(p);
707c897b813SJeff Roberson 			uma_zfree(aiolio_zone, lj);
708f4f0ecefSJohn Dyson 		} else {
709a9bf5e37SDavid Xu 			panic("LIO job not cleaned up: C:%d, FC:%d\n",
710a9bf5e37SDavid Xu 			    lj->lioj_count, lj->lioj_finished_count);
71184af4da6SJohn Dyson 		}
712f4f0ecefSJohn Dyson 	}
713759ccccaSDavid Xu 	AIO_UNLOCK(ki);
71499eee864SDavid Xu 	taskqueue_drain(taskqueue_aiod_bio, &ki->kaio_task);
715c897b813SJeff Roberson 	uma_zfree(kaio_zone, ki);
716a624e84fSJohn Dyson 	p->p_aioinfo = NULL;
7172244ea07SJohn Dyson }
7182244ea07SJohn Dyson 
7192244ea07SJohn Dyson /*
720bfbbc4aaSJason Evans  * Select a job to run (called by an AIO daemon).
7212244ea07SJohn Dyson  */
7222244ea07SJohn Dyson static struct aiocblist *
723b40ce416SJulian Elischer aio_selectjob(struct aiothreadlist *aiop)
724fd3bf775SJohn Dyson {
7252244ea07SJohn Dyson 	struct aiocblist *aiocbe;
726bfbbc4aaSJason Evans 	struct kaioinfo *ki;
727bfbbc4aaSJason Evans 	struct proc *userp;
7282244ea07SJohn Dyson 
7291ce91824SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
7302a522eb9SJohn Baldwin 	TAILQ_FOREACH(aiocbe, &aio_jobs, list) {
7312244ea07SJohn Dyson 		userp = aiocbe->userproc;
7322244ea07SJohn Dyson 		ki = userp->p_aioinfo;
7332244ea07SJohn Dyson 
7342244ea07SJohn Dyson 		if (ki->kaio_active_count < ki->kaio_maxactive_count) {
7352244ea07SJohn Dyson 			TAILQ_REMOVE(&aio_jobs, aiocbe, list);
7361ce91824SDavid Xu 			/* Account for currently active jobs. */
7371ce91824SDavid Xu 			ki->kaio_active_count++;
7381ce91824SDavid Xu 			aiocbe->jobstate = JOBST_JOBRUNNING;
7391ce91824SDavid Xu 			break;
7401ce91824SDavid Xu 		}
7411ce91824SDavid Xu 	}
742ac41f2efSAlfred Perlstein 	return (aiocbe);
7432244ea07SJohn Dyson }
7442244ea07SJohn Dyson 
7452244ea07SJohn Dyson /*
74699eee864SDavid Xu  *  Move all data to a permanent storage device, this code
74799eee864SDavid Xu  *  simulates fsync syscall.
74899eee864SDavid Xu  */
74999eee864SDavid Xu static int
75099eee864SDavid Xu aio_fsync_vnode(struct thread *td, struct vnode *vp)
75199eee864SDavid Xu {
75299eee864SDavid Xu 	struct mount *mp;
75399eee864SDavid Xu 	int vfslocked;
75499eee864SDavid Xu 	int error;
75599eee864SDavid Xu 
75699eee864SDavid Xu 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
75799eee864SDavid Xu 	if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
75899eee864SDavid Xu 		goto drop;
75999eee864SDavid Xu 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
76099eee864SDavid Xu 	if (vp->v_object != NULL) {
76199eee864SDavid Xu 		VM_OBJECT_LOCK(vp->v_object);
76299eee864SDavid Xu 		vm_object_page_clean(vp->v_object, 0, 0, 0);
76399eee864SDavid Xu 		VM_OBJECT_UNLOCK(vp->v_object);
76499eee864SDavid Xu 	}
76599eee864SDavid Xu 	error = VOP_FSYNC(vp, MNT_WAIT, td);
76699eee864SDavid Xu 
76799eee864SDavid Xu 	VOP_UNLOCK(vp, 0, td);
76899eee864SDavid Xu 	vn_finished_write(mp);
76999eee864SDavid Xu drop:
77099eee864SDavid Xu 	VFS_UNLOCK_GIANT(vfslocked);
77199eee864SDavid Xu 	return (error);
77299eee864SDavid Xu }
77399eee864SDavid Xu 
77499eee864SDavid Xu /*
775bfbbc4aaSJason Evans  * The AIO processing activity.  This is the code that does the I/O request for
776bfbbc4aaSJason Evans  * the non-physio version of the operations.  The normal vn operations are used,
777bfbbc4aaSJason Evans  * and this code should work in all instances for every type of file, including
778bfbbc4aaSJason Evans  * pipes, sockets, fifos, and regular files.
7791ce91824SDavid Xu  *
7801aa4c324SDavid Xu  * XXX I don't think it works well for socket, pipe, and fifo.
7812244ea07SJohn Dyson  */
78288ed460eSAlan Cox static void
783fd3bf775SJohn Dyson aio_process(struct aiocblist *aiocbe)
784fd3bf775SJohn Dyson {
785f8f750c5SRobert Watson 	struct ucred *td_savedcred;
786b40ce416SJulian Elischer 	struct thread *td;
787b40ce416SJulian Elischer 	struct proc *mycp;
7882244ea07SJohn Dyson 	struct aiocb *cb;
7892244ea07SJohn Dyson 	struct file *fp;
7901ce91824SDavid Xu 	struct socket *so;
7912244ea07SJohn Dyson 	struct uio auio;
7922244ea07SJohn Dyson 	struct iovec aiov;
7932244ea07SJohn Dyson 	int cnt;
7942244ea07SJohn Dyson 	int error;
795fd3bf775SJohn Dyson 	int oublock_st, oublock_end;
796fd3bf775SJohn Dyson 	int inblock_st, inblock_end;
7972244ea07SJohn Dyson 
798b40ce416SJulian Elischer 	td = curthread;
799f8f750c5SRobert Watson 	td_savedcred = td->td_ucred;
800f8f750c5SRobert Watson 	td->td_ucred = aiocbe->cred;
801b40ce416SJulian Elischer 	mycp = td->td_proc;
8022244ea07SJohn Dyson 	cb = &aiocbe->uaiocb;
80300e73160SAlan Cox 	fp = aiocbe->fd_file;
804bfbbc4aaSJason Evans 
80599eee864SDavid Xu 	if (cb->aio_lio_opcode == LIO_SYNC) {
80699eee864SDavid Xu 		error = 0;
80799eee864SDavid Xu 		cnt = 0;
80899eee864SDavid Xu 		if (fp->f_vnode != NULL)
80999eee864SDavid Xu 			error = aio_fsync_vnode(td, fp->f_vnode);
81099eee864SDavid Xu 		cb->_aiocb_private.error = error;
81199eee864SDavid Xu 		cb->_aiocb_private.status = 0;
81299eee864SDavid Xu 		td->td_ucred = td_savedcred;
81399eee864SDavid Xu 		return;
81499eee864SDavid Xu 	}
81599eee864SDavid Xu 
81691369fc7SAlan Cox 	aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
8172244ea07SJohn Dyson 	aiov.iov_len = cb->aio_nbytes;
8182244ea07SJohn Dyson 
8192244ea07SJohn Dyson 	auio.uio_iov = &aiov;
8202244ea07SJohn Dyson 	auio.uio_iovcnt = 1;
8219b16adc1SAlan Cox 	auio.uio_offset = cb->aio_offset;
8222244ea07SJohn Dyson 	auio.uio_resid = cb->aio_nbytes;
8232244ea07SJohn Dyson 	cnt = cb->aio_nbytes;
8242244ea07SJohn Dyson 	auio.uio_segflg = UIO_USERSPACE;
825b40ce416SJulian Elischer 	auio.uio_td = td;
8262244ea07SJohn Dyson 
827fd3bf775SJohn Dyson 	inblock_st = mycp->p_stats->p_ru.ru_inblock;
828fd3bf775SJohn Dyson 	oublock_st = mycp->p_stats->p_ru.ru_oublock;
829279d7226SMatthew Dillon 	/*
830a9bf5e37SDavid Xu 	 * aio_aqueue() acquires a reference to the file that is
8319b16adc1SAlan Cox 	 * released in aio_free_entry().
832279d7226SMatthew Dillon 	 */
8332244ea07SJohn Dyson 	if (cb->aio_lio_opcode == LIO_READ) {
8342244ea07SJohn Dyson 		auio.uio_rw = UIO_READ;
835b40ce416SJulian Elischer 		error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8362244ea07SJohn Dyson 	} else {
8376d53aa62SDavid Xu 		if (fp->f_type == DTYPE_VNODE)
8386d53aa62SDavid Xu 			bwillwrite();
8392244ea07SJohn Dyson 		auio.uio_rw = UIO_WRITE;
840b40ce416SJulian Elischer 		error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
8412244ea07SJohn Dyson 	}
842fd3bf775SJohn Dyson 	inblock_end = mycp->p_stats->p_ru.ru_inblock;
843fd3bf775SJohn Dyson 	oublock_end = mycp->p_stats->p_ru.ru_oublock;
844fd3bf775SJohn Dyson 
845fd3bf775SJohn Dyson 	aiocbe->inputcharge = inblock_end - inblock_st;
846fd3bf775SJohn Dyson 	aiocbe->outputcharge = oublock_end - oublock_st;
8472244ea07SJohn Dyson 
848bfbbc4aaSJason Evans 	if ((error) && (auio.uio_resid != cnt)) {
8492244ea07SJohn Dyson 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
8502244ea07SJohn Dyson 			error = 0;
85119eb87d2SJohn Baldwin 		if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
8521ce91824SDavid Xu 			int sigpipe = 1;
8531ce91824SDavid Xu 			if (fp->f_type == DTYPE_SOCKET) {
8541ce91824SDavid Xu 				so = fp->f_data;
8551ce91824SDavid Xu 				if (so->so_options & SO_NOSIGPIPE)
8561ce91824SDavid Xu 					sigpipe = 0;
8571ce91824SDavid Xu 			}
8581ce91824SDavid Xu 			if (sigpipe) {
8599b16adc1SAlan Cox 				PROC_LOCK(aiocbe->userproc);
8609b16adc1SAlan Cox 				psignal(aiocbe->userproc, SIGPIPE);
8619b16adc1SAlan Cox 				PROC_UNLOCK(aiocbe->userproc);
86219eb87d2SJohn Baldwin 			}
8632244ea07SJohn Dyson 		}
8641ce91824SDavid Xu 	}
8652244ea07SJohn Dyson 
8662244ea07SJohn Dyson 	cnt -= auio.uio_resid;
8672244ea07SJohn Dyson 	cb->_aiocb_private.error = error;
8682244ea07SJohn Dyson 	cb->_aiocb_private.status = cnt;
869f8f750c5SRobert Watson 	td->td_ucred = td_savedcred;
8702244ea07SJohn Dyson }
8712244ea07SJohn Dyson 
87269cd28daSDoug Ambrisko static void
8731ce91824SDavid Xu aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type)
8741ce91824SDavid Xu {
8751ce91824SDavid Xu 	struct aioliojob *lj;
87669cd28daSDoug Ambrisko 	struct kaioinfo *ki;
87799eee864SDavid Xu 	struct aiocblist *scb, *scbn;
8781ce91824SDavid Xu 	int lj_done;
87969cd28daSDoug Ambrisko 
88069cd28daSDoug Ambrisko 	ki = userp->p_aioinfo;
881759ccccaSDavid Xu 	AIO_LOCK_ASSERT(ki, MA_OWNED);
88269cd28daSDoug Ambrisko 	lj = aiocbe->lio;
88369cd28daSDoug Ambrisko 	lj_done = 0;
88469cd28daSDoug Ambrisko 	if (lj) {
8851ce91824SDavid Xu 		lj->lioj_finished_count++;
8861ce91824SDavid Xu 		if (lj->lioj_count == lj->lioj_finished_count)
88769cd28daSDoug Ambrisko 			lj_done = 1;
88869cd28daSDoug Ambrisko 	}
88969cd28daSDoug Ambrisko 	if (type == DONE_QUEUE) {
8901ce91824SDavid Xu 		aiocbe->jobflags |= AIOCBLIST_DONE;
89169cd28daSDoug Ambrisko 	} else {
8921ce91824SDavid Xu 		aiocbe->jobflags |= AIOCBLIST_BUFDONE;
89369cd28daSDoug Ambrisko 	}
8941ce91824SDavid Xu 	TAILQ_INSERT_TAIL(&ki->kaio_done, aiocbe, plist);
8951ce91824SDavid Xu 	aiocbe->jobstate = JOBST_JOBFINISHED;
89627b8220dSDavid Xu 
89727b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_RUNDOWN)
89827b8220dSDavid Xu 		goto notification_done;
89927b8220dSDavid Xu 
9001ce91824SDavid Xu 	if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
9011ce91824SDavid Xu 	    aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
9021ce91824SDavid Xu 		aio_sendsig(userp, &aiocbe->uaiocb.aio_sigevent, &aiocbe->ksi);
9031ce91824SDavid Xu 
9041ce91824SDavid Xu 	KNOTE_LOCKED(&aiocbe->klist, 1);
9051ce91824SDavid Xu 
90669cd28daSDoug Ambrisko 	if (lj_done) {
9071ce91824SDavid Xu 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
90869cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_KEVENT_POSTED;
9091ce91824SDavid Xu 			KNOTE_LOCKED(&lj->klist, 1);
91069cd28daSDoug Ambrisko 		}
9111ce91824SDavid Xu 		if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
91269cd28daSDoug Ambrisko 		    == LIOJ_SIGNAL
9134c0fb2cfSDavid Xu 		    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
9144c0fb2cfSDavid Xu 		        lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
9154c0fb2cfSDavid Xu 			aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
91669cd28daSDoug Ambrisko 			lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
91769cd28daSDoug Ambrisko 		}
91869cd28daSDoug Ambrisko 	}
91927b8220dSDavid Xu 
92027b8220dSDavid Xu notification_done:
92199eee864SDavid Xu 	if (aiocbe->jobflags & AIOCBLIST_CHECKSYNC) {
92299eee864SDavid Xu 		TAILQ_FOREACH_SAFE(scb, &ki->kaio_syncqueue, list, scbn) {
923dbbccfe9SDavid Xu 			if (aiocbe->fd_file == scb->fd_file &&
92499eee864SDavid Xu 			    aiocbe->seqno < scb->seqno) {
92599eee864SDavid Xu 				if (--scb->pending == 0) {
92699eee864SDavid Xu 					mtx_lock(&aio_job_mtx);
92799eee864SDavid Xu 					scb->jobstate = JOBST_JOBQGLOBAL;
92899eee864SDavid Xu 					TAILQ_REMOVE(&ki->kaio_syncqueue, scb, list);
92999eee864SDavid Xu 					TAILQ_INSERT_TAIL(&aio_jobs, scb, list);
93099eee864SDavid Xu 					aio_kick_nowait(userp);
93199eee864SDavid Xu 					mtx_unlock(&aio_job_mtx);
93299eee864SDavid Xu 				}
93399eee864SDavid Xu 			}
93499eee864SDavid Xu 		}
93599eee864SDavid Xu 	}
93627b8220dSDavid Xu 	if (ki->kaio_flags & KAIO_WAKEUP) {
93769cd28daSDoug Ambrisko 		ki->kaio_flags &= ~KAIO_WAKEUP;
9381ce91824SDavid Xu 		wakeup(&userp->p_aioinfo);
93969cd28daSDoug Ambrisko 	}
94069cd28daSDoug Ambrisko }
94169cd28daSDoug Ambrisko 
9422244ea07SJohn Dyson /*
94384af4da6SJohn Dyson  * The AIO daemon, most of the actual work is done in aio_process,
94484af4da6SJohn Dyson  * but the setup (and address space mgmt) is done in this routine.
9452244ea07SJohn Dyson  */
9462244ea07SJohn Dyson static void
9471ce91824SDavid Xu aio_daemon(void *_id)
9482244ea07SJohn Dyson {
949bfbbc4aaSJason Evans 	struct aiocblist *aiocbe;
950b40ce416SJulian Elischer 	struct aiothreadlist *aiop;
951bfbbc4aaSJason Evans 	struct kaioinfo *ki;
952bfbbc4aaSJason Evans 	struct proc *curcp, *mycp, *userp;
953bfbbc4aaSJason Evans 	struct vmspace *myvm, *tmpvm;
954b40ce416SJulian Elischer 	struct thread *td = curthread;
9551ce91824SDavid Xu 	int id = (intptr_t)_id;
9562244ea07SJohn Dyson 
9572244ea07SJohn Dyson 	/*
958fd3bf775SJohn Dyson 	 * Local copies of curproc (cp) and vmspace (myvm)
9592244ea07SJohn Dyson 	 */
960b40ce416SJulian Elischer 	mycp = td->td_proc;
961fd3bf775SJohn Dyson 	myvm = mycp->p_vmspace;
962fd3bf775SJohn Dyson 
963cd4ed3b5SJohn Baldwin 	KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp"));
964fd3bf775SJohn Dyson 
965fd3bf775SJohn Dyson 	/*
966bfbbc4aaSJason Evans 	 * Allocate and ready the aio control info.  There is one aiop structure
967bfbbc4aaSJason Evans 	 * per daemon.
968fd3bf775SJohn Dyson 	 */
969a163d034SWarner Losh 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
970b40ce416SJulian Elischer 	aiop->aiothread = td;
97199eee864SDavid Xu 	aiop->aiothreadflags = 0;
972bfbbc4aaSJason Evans 
9732244ea07SJohn Dyson 	/*
974fd3bf775SJohn Dyson 	 * Get rid of our current filedescriptors.  AIOD's don't need any
975fd3bf775SJohn Dyson 	 * filedescriptors, except as temporarily inherited from the client.
9762244ea07SJohn Dyson 	 */
977b40ce416SJulian Elischer 	fdfree(td);
978fd3bf775SJohn Dyson 
979e6bdc05fSDavid Xu 	/* The daemon resides in its own pgrp. */
980bd793be3SDavid Xu 	setsid(td, NULL);
981fd3bf775SJohn Dyson 
982fd3bf775SJohn Dyson 	/*
983fd3bf775SJohn Dyson 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
984b40ce416SJulian Elischer 	 * and creating too many daemons.)
985fd3bf775SJohn Dyson 	 */
9861ce91824SDavid Xu 	sema_post(&aio_newproc_sem);
9872244ea07SJohn Dyson 
9881ce91824SDavid Xu 	mtx_lock(&aio_job_mtx);
989bfbbc4aaSJason Evans 	for (;;) {
990fd3bf775SJohn Dyson 		/*
991fd3bf775SJohn Dyson 		 * curcp is the current daemon process context.
992fd3bf775SJohn Dyson 		 * userp is the current user process context.
993fd3bf775SJohn Dyson 		 */
994fd3bf775SJohn Dyson 		curcp = mycp;
995c4860686SJohn Dyson 
996fd3bf775SJohn Dyson 		/*
997fd3bf775SJohn Dyson 		 * Take daemon off of free queue
998fd3bf775SJohn Dyson 		 */
999b40ce416SJulian Elischer 		if (aiop->aiothreadflags & AIOP_FREE) {
10002244ea07SJohn Dyson 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
1001b40ce416SJulian Elischer 			aiop->aiothreadflags &= ~AIOP_FREE;
10022244ea07SJohn Dyson 		}
10032244ea07SJohn Dyson 
1004fd3bf775SJohn Dyson 		/*
1005bfbbc4aaSJason Evans 		 * Check for jobs.
1006fd3bf775SJohn Dyson 		 */
1007d254af07SMatthew Dillon 		while ((aiocbe = aio_selectjob(aiop)) != NULL) {
10081ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
10092244ea07SJohn Dyson 			userp = aiocbe->userproc;
10102244ea07SJohn Dyson 
1011fd3bf775SJohn Dyson 			/*
1012bfbbc4aaSJason Evans 			 * Connect to process address space for user program.
1013fd3bf775SJohn Dyson 			 */
1014fd3bf775SJohn Dyson 			if (userp != curcp) {
1015fd3bf775SJohn Dyson 				/*
1016bfbbc4aaSJason Evans 				 * Save the current address space that we are
1017bfbbc4aaSJason Evans 				 * connected to.
1018fd3bf775SJohn Dyson 				 */
1019fd3bf775SJohn Dyson 				tmpvm = mycp->p_vmspace;
1020bfbbc4aaSJason Evans 
1021fd3bf775SJohn Dyson 				/*
1022bfbbc4aaSJason Evans 				 * Point to the new user address space, and
1023bfbbc4aaSJason Evans 				 * refer to it.
1024fd3bf775SJohn Dyson 				 */
1025fd3bf775SJohn Dyson 				mycp->p_vmspace = userp->p_vmspace;
10261a276a3fSAlan Cox 				atomic_add_int(&mycp->p_vmspace->vm_refcnt, 1);
1027bfbbc4aaSJason Evans 
1028bfbbc4aaSJason Evans 				/* Activate the new mapping. */
1029079b7badSJulian Elischer 				pmap_activate(FIRST_THREAD_IN_PROC(mycp));
1030bfbbc4aaSJason Evans 
1031fd3bf775SJohn Dyson 				/*
1032bfbbc4aaSJason Evans 				 * If the old address space wasn't the daemons
1033bfbbc4aaSJason Evans 				 * own address space, then we need to remove the
1034bfbbc4aaSJason Evans 				 * daemon's reference from the other process
1035bfbbc4aaSJason Evans 				 * that it was acting on behalf of.
1036fd3bf775SJohn Dyson 				 */
10372244ea07SJohn Dyson 				if (tmpvm != myvm) {
10382244ea07SJohn Dyson 					vmspace_free(tmpvm);
10392244ea07SJohn Dyson 				}
1040fd3bf775SJohn Dyson 				curcp = userp;
10412244ea07SJohn Dyson 			}
10422244ea07SJohn Dyson 
1043fd3bf775SJohn Dyson 			ki = userp->p_aioinfo;
104484af4da6SJohn Dyson 
1045bfbbc4aaSJason Evans 			/* Do the I/O function. */
10462244ea07SJohn Dyson 			aio_process(aiocbe);
104784af4da6SJohn Dyson 
10489b84335cSDavid Xu 			mtx_lock(&aio_job_mtx);
10499b84335cSDavid Xu 			/* Decrement the active job count. */
10509b84335cSDavid Xu 			ki->kaio_active_count--;
10519b84335cSDavid Xu 			mtx_unlock(&aio_job_mtx);
10529b84335cSDavid Xu 
1053759ccccaSDavid Xu 			AIO_LOCK(ki);
10541ce91824SDavid Xu 			TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
105569cd28daSDoug Ambrisko 			aio_bio_done_notify(userp, aiocbe, DONE_QUEUE);
1056759ccccaSDavid Xu 			AIO_UNLOCK(ki);
10571ce91824SDavid Xu 
10581ce91824SDavid Xu 			mtx_lock(&aio_job_mtx);
10592244ea07SJohn Dyson 		}
10602244ea07SJohn Dyson 
1061fd3bf775SJohn Dyson 		/*
1062bfbbc4aaSJason Evans 		 * Disconnect from user address space.
1063fd3bf775SJohn Dyson 		 */
1064fd3bf775SJohn Dyson 		if (curcp != mycp) {
10651ce91824SDavid Xu 
10661ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
10671ce91824SDavid Xu 
1068bfbbc4aaSJason Evans 			/* Get the user address space to disconnect from. */
1069fd3bf775SJohn Dyson 			tmpvm = mycp->p_vmspace;
1070bfbbc4aaSJason Evans 
1071bfbbc4aaSJason Evans 			/* Get original address space for daemon. */
1072fd3bf775SJohn Dyson 			mycp->p_vmspace = myvm;
1073bfbbc4aaSJason Evans 
1074bfbbc4aaSJason Evans 			/* Activate the daemon's address space. */
1075079b7badSJulian Elischer 			pmap_activate(FIRST_THREAD_IN_PROC(mycp));
1076bfbbc4aaSJason Evans #ifdef DIAGNOSTIC
1077bfbbc4aaSJason Evans 			if (tmpvm == myvm) {
1078bfbbc4aaSJason Evans 				printf("AIOD: vmspace problem -- %d\n",
1079bfbbc4aaSJason Evans 				    mycp->p_pid);
1080bfbbc4aaSJason Evans 			}
108111783b14SJohn Dyson #endif
1082bfbbc4aaSJason Evans 			/* Remove our vmspace reference. */
10832244ea07SJohn Dyson 			vmspace_free(tmpvm);
1084bfbbc4aaSJason Evans 
1085fd3bf775SJohn Dyson 			curcp = mycp;
10861ce91824SDavid Xu 
10871ce91824SDavid Xu 			mtx_lock(&aio_job_mtx);
10881ce91824SDavid Xu 			/*
10891ce91824SDavid Xu 			 * We have to restart to avoid race, we only sleep if
10901ce91824SDavid Xu 			 * no job can be selected, that should be
10911ce91824SDavid Xu 			 * curcp == mycp.
10921ce91824SDavid Xu 			 */
10931ce91824SDavid Xu 			continue;
1094fd3bf775SJohn Dyson 		}
1095fd3bf775SJohn Dyson 
10961ce91824SDavid Xu 		mtx_assert(&aio_job_mtx, MA_OWNED);
10971ce91824SDavid Xu 
1098fd3bf775SJohn Dyson 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
1099b40ce416SJulian Elischer 		aiop->aiothreadflags |= AIOP_FREE;
1100fd3bf775SJohn Dyson 
1101fd3bf775SJohn Dyson 		/*
1102bfbbc4aaSJason Evans 		 * If daemon is inactive for a long time, allow it to exit,
1103bfbbc4aaSJason Evans 		 * thereby freeing resources.
1104fd3bf775SJohn Dyson 		 */
11051ce91824SDavid Xu 		if (msleep(aiop->aiothread, &aio_job_mtx, PRIBIO, "aiordy",
11061ce91824SDavid Xu 		    aiod_lifetime)) {
1107c3869e4bSAlan Cox 			if (TAILQ_EMPTY(&aio_jobs)) {
1108b40ce416SJulian Elischer 				if ((aiop->aiothreadflags & AIOP_FREE) &&
110984af4da6SJohn Dyson 				    (num_aio_procs > target_aio_procs)) {
1110fd3bf775SJohn Dyson 					TAILQ_REMOVE(&aio_freeproc, aiop, list);
111184af4da6SJohn Dyson 					num_aio_procs--;
11121ce91824SDavid Xu 					mtx_unlock(&aio_job_mtx);
11131ce91824SDavid Xu 					uma_zfree(aiop_zone, aiop);
11141ce91824SDavid Xu 					free_unr(aiod_unr, id);
1115bfbbc4aaSJason Evans #ifdef DIAGNOSTIC
1116bfbbc4aaSJason Evans 					if (mycp->p_vmspace->vm_refcnt <= 1) {
1117bfbbc4aaSJason Evans 						printf("AIOD: bad vm refcnt for"
1118bfbbc4aaSJason Evans 						    " exiting daemon: %d\n",
1119fd3bf775SJohn Dyson 						    mycp->p_vmspace->vm_refcnt);
1120bfbbc4aaSJason Evans 					}
112111783b14SJohn Dyson #endif
1122c9a970a7SAlan Cox 					kthread_exit(0);
1123fd3bf775SJohn Dyson 				}
11242244ea07SJohn Dyson 			}
11252244ea07SJohn Dyson 		}
11262244ea07SJohn Dyson 	}
11271ce91824SDavid Xu 	mtx_unlock(&aio_job_mtx);
11281ce91824SDavid Xu 	panic("shouldn't be here\n");
11291ce91824SDavid Xu }
11302244ea07SJohn Dyson 
11312244ea07SJohn Dyson /*
1132bfbbc4aaSJason Evans  * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1133bfbbc4aaSJason Evans  * AIO daemon modifies its environment itself.
11342244ea07SJohn Dyson  */
11352244ea07SJohn Dyson static int
11361ce91824SDavid Xu aio_newproc(int *start)
1137fd3bf775SJohn Dyson {
11382244ea07SJohn Dyson 	int error;
1139c9a970a7SAlan Cox 	struct proc *p;
11401ce91824SDavid Xu 	int id;
11412244ea07SJohn Dyson 
11421ce91824SDavid Xu 	id = alloc_unr(aiod_unr);
11431ce91824SDavid Xu 	error = kthread_create(aio_daemon, (void *)(intptr_t)id, &p,
11441ce91824SDavid Xu 		RFNOWAIT, 0, "aiod%d", id);
11451ce91824SDavid Xu 	if (error == 0) {
1146fd3bf775SJohn Dyson 		/*
11471ce91824SDavid Xu 		 * Wait until daemon is started.
1148fd3bf775SJohn Dyson 		 */
11491ce91824SDavid Xu 		sema_wait(&aio_newproc_sem);
11501ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
115184af4da6SJohn Dyson 		num_aio_procs++;
11521ce91824SDavid Xu 		if (start != NULL)
11537f34b521SDavid Xu 			(*start)--;
11541ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
11551ce91824SDavid Xu 	} else {
11561ce91824SDavid Xu 		free_unr(aiod_unr, id);
11571ce91824SDavid Xu 	}
1158ac41f2efSAlfred Perlstein 	return (error);
11592244ea07SJohn Dyson }
11602244ea07SJohn Dyson 
11612244ea07SJohn Dyson /*
116288ed460eSAlan Cox  * Try the high-performance, low-overhead physio method for eligible
116388ed460eSAlan Cox  * VCHR devices.  This method doesn't use an aio helper thread, and
116488ed460eSAlan Cox  * thus has very low overhead.
116588ed460eSAlan Cox  *
1166a9bf5e37SDavid Xu  * Assumes that the caller, aio_aqueue(), has incremented the file
116788ed460eSAlan Cox  * structure's reference count, preventing its deallocation for the
116888ed460eSAlan Cox  * duration of this call.
1169fd3bf775SJohn Dyson  */
117088ed460eSAlan Cox static int
1171bfbbc4aaSJason Evans aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
1172fd3bf775SJohn Dyson {
1173fd3bf775SJohn Dyson 	struct aiocb *cb;
1174fd3bf775SJohn Dyson 	struct file *fp;
1175fd3bf775SJohn Dyson 	struct buf *bp;
1176fd3bf775SJohn Dyson 	struct vnode *vp;
1177fd3bf775SJohn Dyson 	struct kaioinfo *ki;
11781ce91824SDavid Xu 	struct aioliojob *lj;
11791ce91824SDavid Xu 	int error;
1180fd3bf775SJohn Dyson 
118184af4da6SJohn Dyson 	cb = &aiocbe->uaiocb;
11829fbd7ccfSAlan Cox 	fp = aiocbe->fd_file;
1183fd3bf775SJohn Dyson 
1184008626c3SPoul-Henning Kamp 	if (fp->f_type != DTYPE_VNODE)
1185008626c3SPoul-Henning Kamp 		return (-1);
1186fd3bf775SJohn Dyson 
11873b6d9652SPoul-Henning Kamp 	vp = fp->f_vnode;
118811783b14SJohn Dyson 
1189f582ac06SBrian Feldman 	/*
1190f582ac06SBrian Feldman 	 * If its not a disk, we don't want to return a positive error.
1191f582ac06SBrian Feldman 	 * It causes the aio code to not fall through to try the thread
1192f582ac06SBrian Feldman 	 * way when you're talking to a regular file.
1193f582ac06SBrian Feldman 	 */
1194f582ac06SBrian Feldman 	if (!vn_isdisk(vp, &error)) {
1195f582ac06SBrian Feldman 		if (error == ENOTBLK)
1196f582ac06SBrian Feldman 			return (-1);
1197f582ac06SBrian Feldman 		else
1198ba4ad1fcSPoul-Henning Kamp 			return (error);
1199f582ac06SBrian Feldman 	}
1200fd3bf775SJohn Dyson 
1201ad8de0f2SDavid Xu 	if (vp->v_bufobj.bo_bsize == 0)
1202ad8de0f2SDavid Xu 		return (-1);
1203ad8de0f2SDavid Xu 
12045d9d81e7SPoul-Henning Kamp  	if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
1205008626c3SPoul-Henning Kamp 		return (-1);
1206fd3bf775SJohn Dyson 
120769cd28daSDoug Ambrisko 	if (cb->aio_nbytes > vp->v_rdev->si_iosize_max)
120869cd28daSDoug Ambrisko 		return (-1);
120969cd28daSDoug Ambrisko 
121013644654SAlan Cox 	if (cb->aio_nbytes >
121113644654SAlan Cox 	    MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK))
1212008626c3SPoul-Henning Kamp 		return (-1);
1213fd3bf775SJohn Dyson 
1214fd3bf775SJohn Dyson 	ki = p->p_aioinfo;
1215008626c3SPoul-Henning Kamp 	if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
1216008626c3SPoul-Henning Kamp 		return (-1);
1217fd3bf775SJohn Dyson 
1218bfbbc4aaSJason Evans 	/* Create and build a buffer header for a transfer. */
12191c7c3c6aSMatthew Dillon 	bp = (struct buf *)getpbuf(NULL);
122013644654SAlan Cox 	BUF_KERNPROC(bp);
1221fd3bf775SJohn Dyson 
1222759ccccaSDavid Xu 	AIO_LOCK(ki);
12231ce91824SDavid Xu 	ki->kaio_count++;
12241ce91824SDavid Xu 	ki->kaio_buffer_count++;
12251ce91824SDavid Xu 	lj = aiocbe->lio;
12261ce91824SDavid Xu 	if (lj)
12271ce91824SDavid Xu 		lj->lioj_count++;
1228759ccccaSDavid Xu 	AIO_UNLOCK(ki);
12291ce91824SDavid Xu 
1230fd3bf775SJohn Dyson 	/*
1231bfbbc4aaSJason Evans 	 * Get a copy of the kva from the physical buffer.
1232fd3bf775SJohn Dyson 	 */
1233ef38cda1SAlan Cox 	error = 0;
1234fd3bf775SJohn Dyson 
1235fd3bf775SJohn Dyson 	bp->b_bcount = cb->aio_nbytes;
1236fd3bf775SJohn Dyson 	bp->b_bufsize = cb->aio_nbytes;
1237fd3bf775SJohn Dyson 	bp->b_iodone = aio_physwakeup;
1238fd3bf775SJohn Dyson 	bp->b_saveaddr = bp->b_data;
123991369fc7SAlan Cox 	bp->b_data = (void *)(uintptr_t)cb->aio_buf;
1240a44ca4f0SHidetoshi Shimokawa 	bp->b_offset = cb->aio_offset;
1241a44ca4f0SHidetoshi Shimokawa 	bp->b_iooffset = cb->aio_offset;
1242fd3bf775SJohn Dyson 	bp->b_blkno = btodb(cb->aio_offset);
124306363906SAlan Cox 	bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1244fd3bf775SJohn Dyson 
12452d5c7e45SMatthew Dillon 	/*
12462d5c7e45SMatthew Dillon 	 * Bring buffer into kernel space.
12472d5c7e45SMatthew Dillon 	 */
12482d5c7e45SMatthew Dillon 	if (vmapbuf(bp) < 0) {
12492d5c7e45SMatthew Dillon 		error = EFAULT;
12502d5c7e45SMatthew Dillon 		goto doerror;
12512d5c7e45SMatthew Dillon 	}
1252fd3bf775SJohn Dyson 
1253759ccccaSDavid Xu 	AIO_LOCK(ki);
1254fd3bf775SJohn Dyson 	aiocbe->bp = bp;
12558edbaf85SHidetoshi Shimokawa 	bp->b_caller1 = (void *)aiocbe;
125684af4da6SJohn Dyson 	TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
12571ce91824SDavid Xu 	TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
1258fd3bf775SJohn Dyson 	aiocbe->jobstate = JOBST_JOBQBUF;
125984af4da6SJohn Dyson 	cb->_aiocb_private.status = cb->aio_nbytes;
1260759ccccaSDavid Xu 	AIO_UNLOCK(ki);
12611ce91824SDavid Xu 
12621ce91824SDavid Xu 	atomic_add_int(&num_queue_count, 1);
12631ce91824SDavid Xu 	atomic_add_int(&num_buf_aio, 1);
12641ce91824SDavid Xu 
1265fd3bf775SJohn Dyson 	bp->b_error = 0;
1266fd3bf775SJohn Dyson 
12671ce91824SDavid Xu 	TASK_INIT(&aiocbe->biotask, 0, biohelper, aiocbe);
1268bfbbc4aaSJason Evans 
1269bfbbc4aaSJason Evans 	/* Perform transfer. */
12706afb3b1cSPoul-Henning Kamp 	dev_strategy(vp->v_rdev, bp);
1271ac41f2efSAlfred Perlstein 	return (0);
1272fd3bf775SJohn Dyson 
1273fd3bf775SJohn Dyson doerror:
1274759ccccaSDavid Xu 	AIO_LOCK(ki);
12751ce91824SDavid Xu 	ki->kaio_count--;
1276fd3bf775SJohn Dyson 	ki->kaio_buffer_count--;
1277bfbbc4aaSJason Evans 	if (lj)
12781ce91824SDavid Xu 		lj->lioj_count--;
127984af4da6SJohn Dyson 	aiocbe->bp = NULL;
1280759ccccaSDavid Xu 	AIO_UNLOCK(ki);
12811c7c3c6aSMatthew Dillon 	relpbuf(bp, NULL);
1282fd3bf775SJohn Dyson 	return (error);
1283fd3bf775SJohn Dyson }
1284fd3bf775SJohn Dyson 
1285fd3bf775SJohn Dyson /*
1286bfbbc4aaSJason Evans  * Wake up aio requests that may be serviceable now.
1287bfbbc4aaSJason Evans  */
128848dac059SAlan Cox static void
128921d56e9cSAlfred Perlstein aio_swake_cb(struct socket *so, struct sockbuf *sb)
1290bfbbc4aaSJason Evans {
1291bfbbc4aaSJason Evans 	struct aiocblist *cb, *cbn;
129299eee864SDavid Xu 	int opcode;
1293bfbbc4aaSJason Evans 
12941aa4c324SDavid Xu 	if (sb == &so->so_snd)
1295bfbbc4aaSJason Evans 		opcode = LIO_WRITE;
12961aa4c324SDavid Xu 	else
1297bfbbc4aaSJason Evans 		opcode = LIO_READ;
1298bfbbc4aaSJason Evans 
12991aa4c324SDavid Xu 	SOCKBUF_LOCK(sb);
13001aa4c324SDavid Xu 	sb->sb_flags &= ~SB_AIO;
13011aa4c324SDavid Xu 	mtx_lock(&aio_job_mtx);
13022a522eb9SJohn Baldwin 	TAILQ_FOREACH_SAFE(cb, &so->so_aiojobq, list, cbn) {
1303bfbbc4aaSJason Evans 		if (opcode == cb->uaiocb.aio_lio_opcode) {
13048c0d9af5SDavid Xu 			if (cb->jobstate != JOBST_JOBQSOCK)
13051ce91824SDavid Xu 				panic("invalid queue value");
13061aa4c324SDavid Xu 			/* XXX
13071aa4c324SDavid Xu 			 * We don't have actual sockets backend yet,
13081aa4c324SDavid Xu 			 * so we simply move the requests to the generic
13091aa4c324SDavid Xu 			 * file I/O backend.
13101ce91824SDavid Xu 			 */
13111aa4c324SDavid Xu 			TAILQ_REMOVE(&so->so_aiojobq, cb, list);
13121ce91824SDavid Xu 			TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
131399eee864SDavid Xu 			aio_kick_nowait(cb->userproc);
1314bfbbc4aaSJason Evans 		}
1315bfbbc4aaSJason Evans 	}
13161aa4c324SDavid Xu 	mtx_unlock(&aio_job_mtx);
13171aa4c324SDavid Xu 	SOCKBUF_UNLOCK(sb);
1318bfbbc4aaSJason Evans }
1319bfbbc4aaSJason Evans 
1320bfbbc4aaSJason Evans /*
1321bfbbc4aaSJason Evans  * Queue a new AIO request.  Choosing either the threaded or direct physio VCHR
1322bfbbc4aaSJason Evans  * technique is done in this code.
13232244ea07SJohn Dyson  */
13242244ea07SJohn Dyson static int
1325a9bf5e37SDavid Xu aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj,
1326dbbccfe9SDavid Xu 	int type, int oldsigev)
1327fd3bf775SJohn Dyson {
1328b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
13292244ea07SJohn Dyson 	struct file *fp;
1330bfbbc4aaSJason Evans 	struct socket *so;
1331dbbccfe9SDavid Xu 	struct aiocblist *aiocbe, *cb;
13322244ea07SJohn Dyson 	struct kaioinfo *ki;
1333c6fa9f78SAlan Cox 	struct kevent kev;
1334c6fa9f78SAlan Cox 	struct kqueue *kq;
1335c6fa9f78SAlan Cox 	struct file *kq_fp;
1336576c004fSAlfred Perlstein 	struct sockbuf *sb;
13371ce91824SDavid Xu 	int opcode;
13381ce91824SDavid Xu 	int error;
13391ce91824SDavid Xu 	int fd;
13401ce91824SDavid Xu 	int jid;
13412244ea07SJohn Dyson 
1342a9bf5e37SDavid Xu 	if (p->p_aioinfo == NULL)
1343a9bf5e37SDavid Xu 		aio_init_aioinfo(p);
1344a9bf5e37SDavid Xu 
13451ce91824SDavid Xu 	ki = p->p_aioinfo;
13461ce91824SDavid Xu 
1347a9bf5e37SDavid Xu 	suword(&job->_aiocb_private.status, -1);
1348a9bf5e37SDavid Xu 	suword(&job->_aiocb_private.error, 0);
1349a9bf5e37SDavid Xu 	suword(&job->_aiocb_private.kernelinfo, -1);
1350a9bf5e37SDavid Xu 
1351a9bf5e37SDavid Xu 	if (num_queue_count >= max_queue_count ||
1352a9bf5e37SDavid Xu 	    ki->kaio_count >= ki->kaio_qallowed_count) {
1353a9bf5e37SDavid Xu 		suword(&job->_aiocb_private.error, EAGAIN);
1354a9bf5e37SDavid Xu 		return (EAGAIN);
1355a9bf5e37SDavid Xu 	}
1356a9bf5e37SDavid Xu 
13571ce91824SDavid Xu 	aiocbe = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
1358fd3bf775SJohn Dyson 	aiocbe->inputcharge = 0;
1359fd3bf775SJohn Dyson 	aiocbe->outputcharge = 0;
1360759ccccaSDavid Xu 	knlist_init(&aiocbe->klist, AIO_MTX(ki), NULL, NULL, NULL);
1361fd3bf775SJohn Dyson 
13620972628aSDavid Xu 	if (oldsigev) {
13630972628aSDavid Xu 		bzero(&aiocbe->uaiocb, sizeof(struct aiocb));
13640972628aSDavid Xu 		error = copyin(job, &aiocbe->uaiocb, sizeof(struct oaiocb));
13650972628aSDavid Xu 		bcopy(&aiocbe->uaiocb.__spare__, &aiocbe->uaiocb.aio_sigevent,
13660972628aSDavid Xu 			sizeof(struct osigevent));
13670972628aSDavid Xu 	} else {
13680972628aSDavid Xu 		error = copyin(job, &aiocbe->uaiocb, sizeof(struct aiocb));
13690972628aSDavid Xu 	}
13702244ea07SJohn Dyson 	if (error) {
1371fd3bf775SJohn Dyson 		suword(&job->_aiocb_private.error, error);
1372c897b813SJeff Roberson 		uma_zfree(aiocb_zone, aiocbe);
1373ac41f2efSAlfred Perlstein 		return (error);
13742244ea07SJohn Dyson 	}
137568d71118SDavid Xu 
137668d71118SDavid Xu 	if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
137768d71118SDavid Xu 	    aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
137868d71118SDavid Xu 	    aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
137968d71118SDavid Xu 	    aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
138068d71118SDavid Xu 		suword(&job->_aiocb_private.error, EINVAL);
138168d71118SDavid Xu 		uma_zfree(aiocb_zone, aiocbe);
138268d71118SDavid Xu 		return (EINVAL);
138368d71118SDavid Xu 	}
138468d71118SDavid Xu 
13854c0fb2cfSDavid Xu 	if ((aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
13864c0fb2cfSDavid Xu 	     aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
13872f3cf918SAlfred Perlstein 		!_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
1388c897b813SJeff Roberson 		uma_zfree(aiocb_zone, aiocbe);
1389ac41f2efSAlfred Perlstein 		return (EINVAL);
13902f3cf918SAlfred Perlstein 	}
13912244ea07SJohn Dyson 
13924c0fb2cfSDavid Xu 	ksiginfo_init(&aiocbe->ksi);
13934c0fb2cfSDavid Xu 
1394bfbbc4aaSJason Evans 	/* Save userspace address of the job info. */
139511783b14SJohn Dyson 	aiocbe->uuaiocb = job;
139611783b14SJohn Dyson 
1397bfbbc4aaSJason Evans 	/* Get the opcode. */
1398bfbbc4aaSJason Evans 	if (type != LIO_NOP)
1399a624e84fSJohn Dyson 		aiocbe->uaiocb.aio_lio_opcode = type;
1400a624e84fSJohn Dyson 	opcode = aiocbe->uaiocb.aio_lio_opcode;
14012244ea07SJohn Dyson 
14022a522eb9SJohn Baldwin 	/* Fetch the file object for the specified file descriptor. */
14032244ea07SJohn Dyson 	fd = aiocbe->uaiocb.aio_fildes;
14042a522eb9SJohn Baldwin 	switch (opcode) {
14052a522eb9SJohn Baldwin 	case LIO_WRITE:
14062a522eb9SJohn Baldwin 		error = fget_write(td, fd, &fp);
14072a522eb9SJohn Baldwin 		break;
14082a522eb9SJohn Baldwin 	case LIO_READ:
14092a522eb9SJohn Baldwin 		error = fget_read(td, fd, &fp);
14102a522eb9SJohn Baldwin 		break;
14112a522eb9SJohn Baldwin 	default:
14122a522eb9SJohn Baldwin 		error = fget(td, fd, &fp);
14132a522eb9SJohn Baldwin 	}
14142a522eb9SJohn Baldwin 	if (error) {
1415c897b813SJeff Roberson 		uma_zfree(aiocb_zone, aiocbe);
141655a122bfSDavid Xu 		suword(&job->_aiocb_private.error, error);
1417af56abaaSJohn Baldwin 		return (error);
14182244ea07SJohn Dyson 	}
141999eee864SDavid Xu 
142099eee864SDavid Xu 	if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
142199eee864SDavid Xu 		error = EINVAL;
142299eee864SDavid Xu 		goto aqueue_fail;
142399eee864SDavid Xu 	}
14242244ea07SJohn Dyson 
1425dbbccfe9SDavid Xu 	if (opcode != LIO_SYNC && aiocbe->uaiocb.aio_offset == -1LL) {
1426ae124fc4SAlan Cox 		error = EINVAL;
1427ae124fc4SAlan Cox 		goto aqueue_fail;
14282244ea07SJohn Dyson 	}
14291ce91824SDavid Xu 
143099eee864SDavid Xu 	aiocbe->fd_file = fp;
14311ce91824SDavid Xu 
143299eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
143399eee864SDavid Xu 	jid = jobrefid++;
143499eee864SDavid Xu 	aiocbe->seqno = jobseqno++;
143599eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
14361ce91824SDavid Xu 	error = suword(&job->_aiocb_private.kernelinfo, jid);
14371ce91824SDavid Xu 	if (error) {
14381ce91824SDavid Xu 		error = EINVAL;
14391ce91824SDavid Xu 		goto aqueue_fail;
14401ce91824SDavid Xu 	}
14411ce91824SDavid Xu 	aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
14422244ea07SJohn Dyson 
14432244ea07SJohn Dyson 	if (opcode == LIO_NOP) {
1444a5c0b1c0SAlan Cox 		fdrop(fp, td);
1445c897b813SJeff Roberson 		uma_zfree(aiocb_zone, aiocbe);
1446ac41f2efSAlfred Perlstein 		return (0);
14472244ea07SJohn Dyson 	}
144899eee864SDavid Xu 	if ((opcode != LIO_READ) && (opcode != LIO_WRITE) &&
144999eee864SDavid Xu 	    (opcode != LIO_SYNC)) {
1450ae124fc4SAlan Cox 		error = EINVAL;
1451ae124fc4SAlan Cox 		goto aqueue_fail;
14522244ea07SJohn Dyson 	}
14532244ea07SJohn Dyson 
145499eee864SDavid Xu 	if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT)
1455c6fa9f78SAlan Cox 		kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue;
145699eee864SDavid Xu 	else
1457cb679c38SJonathan Lemon 		goto no_kqueue;
14582a522eb9SJohn Baldwin 	error = fget(td, (u_int)kev.ident, &kq_fp);
14592a522eb9SJohn Baldwin 	if (error)
14602a522eb9SJohn Baldwin 		goto aqueue_fail;
14612a522eb9SJohn Baldwin 	if (kq_fp->f_type != DTYPE_KQUEUE) {
14622a522eb9SJohn Baldwin 		fdrop(kq_fp, td);
1463cb679c38SJonathan Lemon 		error = EBADF;
1464cb679c38SJonathan Lemon 		goto aqueue_fail;
1465cb679c38SJonathan Lemon 	}
146648e3128bSMatthew Dillon 	kq = kq_fp->f_data;
1467b46f1c55SAlan Cox 	kev.ident = (uintptr_t)aiocbe->uuaiocb;
1468cb679c38SJonathan Lemon 	kev.filter = EVFILT_AIO;
1469cb679c38SJonathan Lemon 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
1470b46f1c55SAlan Cox 	kev.data = (intptr_t)aiocbe;
14711ce91824SDavid Xu 	kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1472ad3b9257SJohn-Mark Gurney 	error = kqueue_register(kq, &kev, td, 1);
14732a522eb9SJohn Baldwin 	fdrop(kq_fp, td);
1474cb679c38SJonathan Lemon aqueue_fail:
1475cb679c38SJonathan Lemon 	if (error) {
1476a5c0b1c0SAlan Cox 		fdrop(fp, td);
1477c897b813SJeff Roberson 		uma_zfree(aiocb_zone, aiocbe);
1478cb679c38SJonathan Lemon 		suword(&job->_aiocb_private.error, error);
1479279d7226SMatthew Dillon 		goto done;
1480cb679c38SJonathan Lemon 	}
1481cb679c38SJonathan Lemon no_kqueue:
1482cb679c38SJonathan Lemon 
1483fd3bf775SJohn Dyson 	suword(&job->_aiocb_private.error, EINPROGRESS);
1484fd3bf775SJohn Dyson 	aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
14852244ea07SJohn Dyson 	aiocbe->userproc = p;
1486f8f750c5SRobert Watson 	aiocbe->cred = crhold(td->td_ucred);
14872244ea07SJohn Dyson 	aiocbe->jobflags = 0;
148884af4da6SJohn Dyson 	aiocbe->lio = lj;
14892244ea07SJohn Dyson 
149099eee864SDavid Xu 	if (opcode == LIO_SYNC)
149199eee864SDavid Xu 		goto queueit;
149299eee864SDavid Xu 
1493bfbbc4aaSJason Evans 	if (fp->f_type == DTYPE_SOCKET) {
1494bfbbc4aaSJason Evans 		/*
1495bfbbc4aaSJason Evans 		 * Alternate queueing for socket ops: Reach down into the
1496bfbbc4aaSJason Evans 		 * descriptor to get the socket data.  Then check to see if the
1497bfbbc4aaSJason Evans 		 * socket is ready to be read or written (based on the requested
1498bfbbc4aaSJason Evans 		 * operation).
1499bfbbc4aaSJason Evans 		 *
1500bfbbc4aaSJason Evans 		 * If it is not ready for io, then queue the aiocbe on the
1501bfbbc4aaSJason Evans 		 * socket, and set the flags so we get a call when sbnotify()
1502bfbbc4aaSJason Evans 		 * happens.
1503576c004fSAlfred Perlstein 		 *
1504576c004fSAlfred Perlstein 		 * Note if opcode is neither LIO_WRITE nor LIO_READ we lock
1505576c004fSAlfred Perlstein 		 * and unlock the snd sockbuf for no reason.
1506bfbbc4aaSJason Evans 		 */
150748e3128bSMatthew Dillon 		so = fp->f_data;
1508576c004fSAlfred Perlstein 		sb = (opcode == LIO_READ) ? &so->so_rcv : &so->so_snd;
1509576c004fSAlfred Perlstein 		SOCKBUF_LOCK(sb);
1510bfbbc4aaSJason Evans 		if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
1511bfbbc4aaSJason Evans 		    LIO_WRITE) && (!sowriteable(so)))) {
1512576c004fSAlfred Perlstein 			sb->sb_flags |= SB_AIO;
15131aa4c324SDavid Xu 
15141aa4c324SDavid Xu 			mtx_lock(&aio_job_mtx);
15151aa4c324SDavid Xu 			TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
15161aa4c324SDavid Xu 			mtx_unlock(&aio_job_mtx);
15171aa4c324SDavid Xu 
1518759ccccaSDavid Xu 			AIO_LOCK(ki);
15191ce91824SDavid Xu 			TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
15201aa4c324SDavid Xu 			TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
15211ce91824SDavid Xu 			aiocbe->jobstate = JOBST_JOBQSOCK;
15221ce91824SDavid Xu 			ki->kaio_count++;
15231ce91824SDavid Xu 			if (lj)
15241ce91824SDavid Xu 				lj->lioj_count++;
1525759ccccaSDavid Xu 			AIO_UNLOCK(ki);
1526576c004fSAlfred Perlstein 			SOCKBUF_UNLOCK(sb);
15271ce91824SDavid Xu 			atomic_add_int(&num_queue_count, 1);
1528279d7226SMatthew Dillon 			error = 0;
1529279d7226SMatthew Dillon 			goto done;
1530bfbbc4aaSJason Evans 		}
1531576c004fSAlfred Perlstein 		SOCKBUF_UNLOCK(sb);
1532bfbbc4aaSJason Evans 	}
1533bfbbc4aaSJason Evans 
1534bfbbc4aaSJason Evans 	if ((error = aio_qphysio(p, aiocbe)) == 0)
1535279d7226SMatthew Dillon 		goto done;
15361ce91824SDavid Xu #if 0
1537279d7226SMatthew Dillon 	if (error > 0) {
1538fd3bf775SJohn Dyson 		aiocbe->uaiocb._aiocb_private.error = error;
1539fd3bf775SJohn Dyson 		suword(&job->_aiocb_private.error, error);
1540279d7226SMatthew Dillon 		goto done;
1541fd3bf775SJohn Dyson 	}
15421ce91824SDavid Xu #endif
154399eee864SDavid Xu queueit:
1544bfbbc4aaSJason Evans 	/* No buffer for daemon I/O. */
154584af4da6SJohn Dyson 	aiocbe->bp = NULL;
154699eee864SDavid Xu 	atomic_add_int(&num_queue_count, 1);
154784af4da6SJohn Dyson 
1548759ccccaSDavid Xu 	AIO_LOCK(ki);
15491ce91824SDavid Xu 	ki->kaio_count++;
1550bfbbc4aaSJason Evans 	if (lj)
15511ce91824SDavid Xu 		lj->lioj_count++;
1552fd3bf775SJohn Dyson 	TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
15531ce91824SDavid Xu 	TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
155499eee864SDavid Xu 	if (opcode == LIO_SYNC) {
1555dbbccfe9SDavid Xu 		TAILQ_FOREACH(cb, &ki->kaio_jobqueue, plist) {
1556dbbccfe9SDavid Xu 			if (cb->fd_file == aiocbe->fd_file &&
1557dbbccfe9SDavid Xu 			    cb->uaiocb.aio_lio_opcode != LIO_SYNC &&
1558dbbccfe9SDavid Xu 			    cb->seqno < aiocbe->seqno) {
1559dbbccfe9SDavid Xu 				cb->jobflags |= AIOCBLIST_CHECKSYNC;
1560dbbccfe9SDavid Xu 				aiocbe->pending++;
1561dbbccfe9SDavid Xu 			}
1562dbbccfe9SDavid Xu 		}
1563dbbccfe9SDavid Xu 		TAILQ_FOREACH(cb, &ki->kaio_bufqueue, plist) {
1564dbbccfe9SDavid Xu 			if (cb->fd_file == aiocbe->fd_file &&
1565dbbccfe9SDavid Xu 			    cb->uaiocb.aio_lio_opcode != LIO_SYNC &&
1566dbbccfe9SDavid Xu 			    cb->seqno < aiocbe->seqno) {
1567dbbccfe9SDavid Xu 				cb->jobflags |= AIOCBLIST_CHECKSYNC;
1568dbbccfe9SDavid Xu 				aiocbe->pending++;
1569dbbccfe9SDavid Xu 			}
1570dbbccfe9SDavid Xu 		}
1571dbbccfe9SDavid Xu 		if (aiocbe->pending != 0) {
157299eee864SDavid Xu 			TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, aiocbe, list);
157399eee864SDavid Xu 			aiocbe->jobstate = JOBST_JOBQSYNC;
1574759ccccaSDavid Xu 			AIO_UNLOCK(ki);
1575dbbccfe9SDavid Xu 			goto done;
1576dbbccfe9SDavid Xu 		}
1577dbbccfe9SDavid Xu 	}
15781ce91824SDavid Xu 	mtx_lock(&aio_job_mtx);
15791ce91824SDavid Xu 	TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
15801ce91824SDavid Xu 	aiocbe->jobstate = JOBST_JOBQGLOBAL;
158199eee864SDavid Xu 	aio_kick_nowait(p);
158299eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
1583759ccccaSDavid Xu 	AIO_UNLOCK(ki);
15841ce91824SDavid Xu 	error = 0;
158599eee864SDavid Xu done:
158699eee864SDavid Xu 	return (error);
158799eee864SDavid Xu }
158899eee864SDavid Xu 
158999eee864SDavid Xu static void
159099eee864SDavid Xu aio_kick_nowait(struct proc *userp)
159199eee864SDavid Xu {
159299eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
159399eee864SDavid Xu 	struct aiothreadlist *aiop;
159499eee864SDavid Xu 
159599eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
159699eee864SDavid Xu 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
159799eee864SDavid Xu 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
159899eee864SDavid Xu 		aiop->aiothreadflags &= ~AIOP_FREE;
159999eee864SDavid Xu 		wakeup(aiop->aiothread);
1600dbbccfe9SDavid Xu 	} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1601dbbccfe9SDavid Xu 	    ((ki->kaio_active_count + num_aio_resv_start) <
1602dbbccfe9SDavid Xu 	    ki->kaio_maxactive_count)) {
160399eee864SDavid Xu 		taskqueue_enqueue(taskqueue_aiod_bio, &ki->kaio_task);
160499eee864SDavid Xu 	}
160599eee864SDavid Xu }
160699eee864SDavid Xu 
1607dbbccfe9SDavid Xu static int
160899eee864SDavid Xu aio_kick(struct proc *userp)
160999eee864SDavid Xu {
161099eee864SDavid Xu 	struct kaioinfo *ki = userp->p_aioinfo;
161199eee864SDavid Xu 	struct aiothreadlist *aiop;
1612dbbccfe9SDavid Xu 	int error, ret = 0;
161399eee864SDavid Xu 
161499eee864SDavid Xu 	mtx_assert(&aio_job_mtx, MA_OWNED);
161599eee864SDavid Xu retryproc:
1616d254af07SMatthew Dillon 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
16172244ea07SJohn Dyson 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
1618b40ce416SJulian Elischer 		aiop->aiothreadflags &= ~AIOP_FREE;
1619b40ce416SJulian Elischer 		wakeup(aiop->aiothread);
1620fd3bf775SJohn Dyson 	} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1621fd3bf775SJohn Dyson 	    ((ki->kaio_active_count + num_aio_resv_start) <
1622fd3bf775SJohn Dyson 	    ki->kaio_maxactive_count)) {
1623fd3bf775SJohn Dyson 		num_aio_resv_start++;
16241ce91824SDavid Xu 		mtx_unlock(&aio_job_mtx);
16251ce91824SDavid Xu 		error = aio_newproc(&num_aio_resv_start);
16261ce91824SDavid Xu 		mtx_lock(&aio_job_mtx);
16271ce91824SDavid Xu 		if (error) {
162884af4da6SJohn Dyson 			num_aio_resv_start--;
16292244ea07SJohn Dyson 			goto retryproc;
1630fd3bf775SJohn Dyson 		}
1631dbbccfe9SDavid Xu 	} else {
1632dbbccfe9SDavid Xu 		ret = -1;
16331ce91824SDavid Xu 	}
1634dbbccfe9SDavid Xu 	return (ret);
163599eee864SDavid Xu }
16361ce91824SDavid Xu 
163799eee864SDavid Xu static void
163899eee864SDavid Xu aio_kick_helper(void *context, int pending)
163999eee864SDavid Xu {
164099eee864SDavid Xu 	struct proc *userp = context;
164199eee864SDavid Xu 
164299eee864SDavid Xu 	mtx_lock(&aio_job_mtx);
1643dbbccfe9SDavid Xu 	while (--pending >= 0) {
1644dbbccfe9SDavid Xu 		if (aio_kick(userp))
1645dbbccfe9SDavid Xu 			break;
1646dbbccfe9SDavid Xu 	}
164799eee864SDavid Xu 	mtx_unlock(&aio_job_mtx);
16482244ea07SJohn Dyson }
16492244ea07SJohn Dyson 
1650fd3bf775SJohn Dyson /*
1651bfbbc4aaSJason Evans  * Support the aio_return system call, as a side-effect, kernel resources are
1652bfbbc4aaSJason Evans  * released.
16532244ea07SJohn Dyson  */
16542244ea07SJohn Dyson int
1655b40ce416SJulian Elischer aio_return(struct thread *td, struct aio_return_args *uap)
1656fd3bf775SJohn Dyson {
1657b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
16581ce91824SDavid Xu 	struct aiocblist *cb;
16591ce91824SDavid Xu 	struct aiocb *uaiocb;
16602244ea07SJohn Dyson 	struct kaioinfo *ki;
16611ce91824SDavid Xu 	int status, error;
16622244ea07SJohn Dyson 
1663c0bf5caaSAlan Cox 	ki = p->p_aioinfo;
1664c0bf5caaSAlan Cox 	if (ki == NULL)
1665ac41f2efSAlfred Perlstein 		return (EINVAL);
16661ce91824SDavid Xu 	uaiocb = uap->aiocbp;
1667759ccccaSDavid Xu 	AIO_LOCK(ki);
16681ce91824SDavid Xu 	TAILQ_FOREACH(cb, &ki->kaio_done, plist) {
16691ce91824SDavid Xu 		if (cb->uuaiocb == uaiocb)
1670c0bf5caaSAlan Cox 			break;
1671c0bf5caaSAlan Cox 	}
1672c0bf5caaSAlan Cox 	if (cb != NULL) {
16731ce91824SDavid Xu 		MPASS(cb->jobstate == JOBST_JOBFINISHED);
16741ce91824SDavid Xu 		status = cb->uaiocb._aiocb_private.status;
16751ce91824SDavid Xu 		error = cb->uaiocb._aiocb_private.error;
16761ce91824SDavid Xu 		td->td_retval[0] = status;
167769cd28daSDoug Ambrisko 		if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
167869cd28daSDoug Ambrisko 			p->p_stats->p_ru.ru_oublock +=
167969cd28daSDoug Ambrisko 			    cb->outputcharge;
168069cd28daSDoug Ambrisko 			cb->outputcharge = 0;
168169cd28daSDoug Ambrisko 		} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
168269cd28daSDoug Ambrisko 			p->p_stats->p_ru.ru_inblock += cb->inputcharge;
168369cd28daSDoug Ambrisko 			cb->inputcharge = 0;
168469cd28daSDoug Ambrisko 		}
168584af4da6SJohn Dyson 		aio_free_entry(cb);
1686759ccccaSDavid Xu 		AIO_UNLOCK(ki);
16871ce91824SDavid Xu 		suword(&uaiocb->_aiocb_private.error, error);
16881ce91824SDavid Xu 		suword(&uaiocb->_aiocb_private.status, status);
168955a122bfSDavid Xu 	} else {
16901ce91824SDavid Xu 		error = EINVAL;
1691759ccccaSDavid Xu 		AIO_UNLOCK(ki);
169255a122bfSDavid Xu 	}
16931ce91824SDavid Xu 	return (error);
16942244ea07SJohn Dyson }
16952244ea07SJohn Dyson 
16962244ea07SJohn Dyson /*
1697bfbbc4aaSJason Evans  * Allow a process to wakeup when any of the I/O requests are completed.
16982244ea07SJohn Dyson  */
16992244ea07SJohn Dyson int
1700b40ce416SJulian Elischer aio_suspend(struct thread *td, struct aio_suspend_args *uap)
1701fd3bf775SJohn Dyson {
1702b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
17034a11ca4eSPoul-Henning Kamp 	struct timeval atv;
17042244ea07SJohn Dyson 	struct timespec ts;
17052244ea07SJohn Dyson 	struct aiocb *const *cbptr, *cbp;
17062244ea07SJohn Dyson 	struct kaioinfo *ki;
17071ce91824SDavid Xu 	struct aiocblist *cb, *cbfirst;
170811783b14SJohn Dyson 	struct aiocb **ujoblist;
17091ce91824SDavid Xu 	int njoblist;
17101ce91824SDavid Xu 	int error;
17111ce91824SDavid Xu 	int timo;
17121ce91824SDavid Xu 	int i;
17132244ea07SJohn Dyson 
1714ae3b195fSTim J. Robbins 	if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
1715ac41f2efSAlfred Perlstein 		return (EINVAL);
17162244ea07SJohn Dyson 
17172244ea07SJohn Dyson 	timo = 0;
17182244ea07SJohn Dyson 	if (uap->timeout) {
1719bfbbc4aaSJason Evans 		/* Get timespec struct. */
1720bfbbc4aaSJason Evans 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
1721ac41f2efSAlfred Perlstein 			return (error);
17222244ea07SJohn Dyson 
17232244ea07SJohn Dyson 		if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000)
17242244ea07SJohn Dyson 			return (EINVAL);
17252244ea07SJohn Dyson 
1726e3b3ba2dSDag-Erling Smørgrav 		TIMESPEC_TO_TIMEVAL(&atv, &ts);
17272244ea07SJohn Dyson 		if (itimerfix(&atv))
17282244ea07SJohn Dyson 			return (EINVAL);
1729227ee8a1SPoul-Henning Kamp 		timo = tvtohz(&atv);
17302244ea07SJohn Dyson 	}
17312244ea07SJohn Dyson 
17322244ea07SJohn Dyson 	ki = p->p_aioinfo;
17332244ea07SJohn Dyson 	if (ki == NULL)
1734ac41f2efSAlfred Perlstein 		return (EAGAIN);
17352244ea07SJohn Dyson 
173684af4da6SJohn Dyson 	njoblist = 0;
1737a163d034SWarner Losh 	ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
17382244ea07SJohn Dyson 	cbptr = uap->aiocbp;
17392244ea07SJohn Dyson 
17402244ea07SJohn Dyson 	for (i = 0; i < uap->nent; i++) {
1741a739e09cSAlan Cox 		cbp = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
174284af4da6SJohn Dyson 		if (cbp == 0)
174384af4da6SJohn Dyson 			continue;
174411783b14SJohn Dyson 		ujoblist[njoblist] = cbp;
174584af4da6SJohn Dyson 		njoblist++;
17462244ea07SJohn Dyson 	}
1747bfbbc4aaSJason Evans 
174811783b14SJohn Dyson 	if (njoblist == 0) {
1749c897b813SJeff Roberson 		uma_zfree(aiol_zone, ujoblist);
1750ac41f2efSAlfred Perlstein 		return (0);
175111783b14SJohn Dyson 	}
17522244ea07SJohn Dyson 
1753759ccccaSDavid Xu 	AIO_LOCK(ki);
17541ce91824SDavid Xu 	for (;;) {
17551ce91824SDavid Xu 		cbfirst = NULL;
17561ce91824SDavid Xu 		error = 0;
17571ce91824SDavid Xu 		TAILQ_FOREACH(cb, &ki->kaio_all, allist) {
175884af4da6SJohn Dyson 			for (i = 0; i < njoblist; i++) {
17591ce91824SDavid Xu 				if (cb->uuaiocb == ujoblist[i]) {
17601ce91824SDavid Xu 					if (cbfirst == NULL)
17611ce91824SDavid Xu 						cbfirst = cb;
17621ce91824SDavid Xu 					if (cb->jobstate == JOBST_JOBFINISHED)
17631ce91824SDavid Xu 						goto RETURN;
176484af4da6SJohn Dyson 				}
176584af4da6SJohn Dyson 			}
176684af4da6SJohn Dyson 		}
17671ce91824SDavid Xu 		/* All tasks were finished. */
17681ce91824SDavid Xu 		if (cbfirst == NULL)
17691ce91824SDavid Xu 			break;
17702244ea07SJohn Dyson 
1771fd3bf775SJohn Dyson 		ki->kaio_flags |= KAIO_WAKEUP;
1772759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
17731ce91824SDavid Xu 		    "aiospn", timo);
17741ce91824SDavid Xu 		if (error == ERESTART)
17751ce91824SDavid Xu 			error = EINTR;
17761ce91824SDavid Xu 		if (error)
17771ce91824SDavid Xu 			break;
17782244ea07SJohn Dyson 	}
17791ce91824SDavid Xu RETURN:
1780759ccccaSDavid Xu 	AIO_UNLOCK(ki);
17811ce91824SDavid Xu 	uma_zfree(aiol_zone, ujoblist);
17821ce91824SDavid Xu 	return (error);
17832244ea07SJohn Dyson }
1784ee877a35SJohn Dyson 
1785ee877a35SJohn Dyson /*
1786dd85920aSJason Evans  * aio_cancel cancels any non-physio aio operations not currently in
1787dd85920aSJason Evans  * progress.
1788ee877a35SJohn Dyson  */
1789ee877a35SJohn Dyson int
1790b40ce416SJulian Elischer aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1791fd3bf775SJohn Dyson {
1792b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
1793dd85920aSJason Evans 	struct kaioinfo *ki;
1794dd85920aSJason Evans 	struct aiocblist *cbe, *cbn;
1795dd85920aSJason Evans 	struct file *fp;
1796dd85920aSJason Evans 	struct socket *so;
17971ce91824SDavid Xu 	int error;
17981aa4c324SDavid Xu 	int remove;
1799dd85920aSJason Evans 	int cancelled = 0;
1800dd85920aSJason Evans 	int notcancelled = 0;
1801dd85920aSJason Evans 	struct vnode *vp;
1802dd85920aSJason Evans 
18032a522eb9SJohn Baldwin 	/* Lookup file object. */
18041ce91824SDavid Xu 	error = fget(td, uap->fd, &fp);
18052a522eb9SJohn Baldwin 	if (error)
18062a522eb9SJohn Baldwin 		return (error);
1807dd85920aSJason Evans 
18081ce91824SDavid Xu 	ki = p->p_aioinfo;
18091ce91824SDavid Xu 	if (ki == NULL)
18101ce91824SDavid Xu 		goto done;
18111ce91824SDavid Xu 
1812dd85920aSJason Evans 	if (fp->f_type == DTYPE_VNODE) {
18133b6d9652SPoul-Henning Kamp 		vp = fp->f_vnode;
1814dd85920aSJason Evans 		if (vn_isdisk(vp, &error)) {
18152a522eb9SJohn Baldwin 			fdrop(fp, td);
1816b40ce416SJulian Elischer 			td->td_retval[0] = AIO_NOTCANCELED;
1817ac41f2efSAlfred Perlstein 			return (0);
1818dd85920aSJason Evans 		}
1819dd85920aSJason Evans 	}
1820dd85920aSJason Evans 
1821759ccccaSDavid Xu 	AIO_LOCK(ki);
18222a522eb9SJohn Baldwin 	TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) {
1823dd85920aSJason Evans 		if ((uap->fd == cbe->uaiocb.aio_fildes) &&
1824dd85920aSJason Evans 		    ((uap->aiocbp == NULL) ||
1825dd85920aSJason Evans 		     (uap->aiocbp == cbe->uuaiocb))) {
18261aa4c324SDavid Xu 			remove = 0;
18271aa4c324SDavid Xu 
18281ce91824SDavid Xu 			mtx_lock(&aio_job_mtx);
1829dd85920aSJason Evans 			if (cbe->jobstate == JOBST_JOBQGLOBAL) {
1830dd85920aSJason Evans 				TAILQ_REMOVE(&aio_jobs, cbe, list);
18311aa4c324SDavid Xu 				remove = 1;
18321aa4c324SDavid Xu 			} else if (cbe->jobstate == JOBST_JOBQSOCK) {
18331aa4c324SDavid Xu 				MPASS(fp->f_type == DTYPE_SOCKET);
18341aa4c324SDavid Xu 				so = fp->f_data;
18351aa4c324SDavid Xu 				TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
18361aa4c324SDavid Xu 				remove = 1;
183799eee864SDavid Xu 			} else if (cbe->jobstate == JOBST_JOBQSYNC) {
183899eee864SDavid Xu 				TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list);
183999eee864SDavid Xu 				remove = 1;
18401aa4c324SDavid Xu 			}
18411ce91824SDavid Xu 			mtx_unlock(&aio_job_mtx);
18421aa4c324SDavid Xu 
18431aa4c324SDavid Xu 			if (remove) {
18441ce91824SDavid Xu 				TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
1845dd85920aSJason Evans 				cbe->uaiocb._aiocb_private.status = -1;
1846dd85920aSJason Evans 				cbe->uaiocb._aiocb_private.error = ECANCELED;
18471ce91824SDavid Xu 				aio_bio_done_notify(p, cbe, DONE_QUEUE);
18481ce91824SDavid Xu 				cancelled++;
1849dd85920aSJason Evans 			} else {
1850dd85920aSJason Evans 				notcancelled++;
1851dd85920aSJason Evans 			}
18521aa4c324SDavid Xu 			if (uap->aiocbp != NULL)
18531aa4c324SDavid Xu 				break;
1854dd85920aSJason Evans 		}
1855dd85920aSJason Evans 	}
1856759ccccaSDavid Xu 	AIO_UNLOCK(ki);
18571ce91824SDavid Xu 
1858ad49abc0SAlan Cox done:
18592a522eb9SJohn Baldwin 	fdrop(fp, td);
18601aa4c324SDavid Xu 
18611aa4c324SDavid Xu 	if (uap->aiocbp != NULL) {
1862dd85920aSJason Evans 		if (cancelled) {
1863b40ce416SJulian Elischer 			td->td_retval[0] = AIO_CANCELED;
1864ac41f2efSAlfred Perlstein 			return (0);
1865dd85920aSJason Evans 		}
18661aa4c324SDavid Xu 	}
18671aa4c324SDavid Xu 
18681aa4c324SDavid Xu 	if (notcancelled) {
18691aa4c324SDavid Xu 		td->td_retval[0] = AIO_NOTCANCELED;
18701aa4c324SDavid Xu 		return (0);
18711aa4c324SDavid Xu 	}
18721aa4c324SDavid Xu 
18731aa4c324SDavid Xu 	if (cancelled) {
18741aa4c324SDavid Xu 		td->td_retval[0] = AIO_CANCELED;
18751aa4c324SDavid Xu 		return (0);
18761aa4c324SDavid Xu 	}
18771aa4c324SDavid Xu 
1878b40ce416SJulian Elischer 	td->td_retval[0] = AIO_ALLDONE;
1879dd85920aSJason Evans 
1880ac41f2efSAlfred Perlstein 	return (0);
1881ee877a35SJohn Dyson }
1882ee877a35SJohn Dyson 
1883ee877a35SJohn Dyson /*
1884bfbbc4aaSJason Evans  * aio_error is implemented in the kernel level for compatibility purposes only.
1885bfbbc4aaSJason Evans  * For a user mode async implementation, it would be best to do it in a userland
1886bfbbc4aaSJason Evans  * subroutine.
1887ee877a35SJohn Dyson  */
1888ee877a35SJohn Dyson int
1889b40ce416SJulian Elischer aio_error(struct thread *td, struct aio_error_args *uap)
1890fd3bf775SJohn Dyson {
1891b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
18922244ea07SJohn Dyson 	struct aiocblist *cb;
18932244ea07SJohn Dyson 	struct kaioinfo *ki;
18941ce91824SDavid Xu 	int status;
1895ee877a35SJohn Dyson 
18962244ea07SJohn Dyson 	ki = p->p_aioinfo;
18971ce91824SDavid Xu 	if (ki == NULL) {
18981ce91824SDavid Xu 		td->td_retval[0] = EINVAL;
18991ce91824SDavid Xu 		return (0);
19001ce91824SDavid Xu 	}
1901ee877a35SJohn Dyson 
1902759ccccaSDavid Xu 	AIO_LOCK(ki);
19031ce91824SDavid Xu 	TAILQ_FOREACH(cb, &ki->kaio_all, allist) {
19041ce91824SDavid Xu 		if (cb->uuaiocb == uap->aiocbp) {
19051ce91824SDavid Xu 			if (cb->jobstate == JOBST_JOBFINISHED)
19061ce91824SDavid Xu 				td->td_retval[0] =
19071ce91824SDavid Xu 					cb->uaiocb._aiocb_private.error;
19081ce91824SDavid Xu 			else
1909b40ce416SJulian Elischer 				td->td_retval[0] = EINPROGRESS;
1910759ccccaSDavid Xu 			AIO_UNLOCK(ki);
1911ac41f2efSAlfred Perlstein 			return (0);
19122244ea07SJohn Dyson 		}
19132244ea07SJohn Dyson 	}
1914759ccccaSDavid Xu 	AIO_UNLOCK(ki);
191584af4da6SJohn Dyson 
19162244ea07SJohn Dyson 	/*
1917a9bf5e37SDavid Xu 	 * Hack for failure of aio_aqueue.
19182244ea07SJohn Dyson 	 */
19192244ea07SJohn Dyson 	status = fuword(&uap->aiocbp->_aiocb_private.status);
19201ce91824SDavid Xu 	if (status == -1) {
19211ce91824SDavid Xu 		td->td_retval[0] = fuword(&uap->aiocbp->_aiocb_private.error);
19221ce91824SDavid Xu 		return (0);
19231ce91824SDavid Xu 	}
19241ce91824SDavid Xu 
19251ce91824SDavid Xu 	td->td_retval[0] = EINVAL;
19261ce91824SDavid Xu 	return (0);
1927ee877a35SJohn Dyson }
1928ee877a35SJohn Dyson 
1929eb8e6d52SEivind Eklund /* syscall - asynchronous read from a file (REALTIME) */
1930ee877a35SJohn Dyson int
19310972628aSDavid Xu oaio_read(struct thread *td, struct oaio_read_args *uap)
19320972628aSDavid Xu {
19330972628aSDavid Xu 
1934dbbccfe9SDavid Xu 	return aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 1);
19350972628aSDavid Xu }
19360972628aSDavid Xu 
19370972628aSDavid Xu int
1938b40ce416SJulian Elischer aio_read(struct thread *td, struct aio_read_args *uap)
1939fd3bf775SJohn Dyson {
194021d56e9cSAlfred Perlstein 
1941dbbccfe9SDavid Xu 	return aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, 0);
1942ee877a35SJohn Dyson }
1943ee877a35SJohn Dyson 
1944eb8e6d52SEivind Eklund /* syscall - asynchronous write to a file (REALTIME) */
1945ee877a35SJohn Dyson int
19460972628aSDavid Xu oaio_write(struct thread *td, struct oaio_write_args *uap)
19470972628aSDavid Xu {
19480972628aSDavid Xu 
1949dbbccfe9SDavid Xu 	return aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 1);
19500972628aSDavid Xu }
19510972628aSDavid Xu 
19520972628aSDavid Xu int
1953b40ce416SJulian Elischer aio_write(struct thread *td, struct aio_write_args *uap)
1954fd3bf775SJohn Dyson {
195521d56e9cSAlfred Perlstein 
1956dbbccfe9SDavid Xu 	return aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, 0);
19570972628aSDavid Xu }
19580972628aSDavid Xu 
19590972628aSDavid Xu /* syscall - list directed I/O (REALTIME) */
19600972628aSDavid Xu int
19610972628aSDavid Xu olio_listio(struct thread *td, struct olio_listio_args *uap)
19620972628aSDavid Xu {
19630972628aSDavid Xu 	return do_lio_listio(td, (struct lio_listio_args *)uap, 1);
1964ee877a35SJohn Dyson }
1965ee877a35SJohn Dyson 
196644a2c818STim J. Robbins /* syscall - list directed I/O (REALTIME) */
1967ee877a35SJohn Dyson int
1968b40ce416SJulian Elischer lio_listio(struct thread *td, struct lio_listio_args *uap)
1969fd3bf775SJohn Dyson {
19700972628aSDavid Xu 	return do_lio_listio(td, uap, 0);
19710972628aSDavid Xu }
19720972628aSDavid Xu 
19730972628aSDavid Xu static int
19740972628aSDavid Xu do_lio_listio(struct thread *td, struct lio_listio_args *uap, int oldsigev)
19750972628aSDavid Xu {
1976b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
19772244ea07SJohn Dyson 	struct aiocb *iocb, * const *cbptr;
19782244ea07SJohn Dyson 	struct kaioinfo *ki;
19791ce91824SDavid Xu 	struct aioliojob *lj;
198069cd28daSDoug Ambrisko 	struct kevent kev;
198169cd28daSDoug Ambrisko 	struct kqueue * kq;
198269cd28daSDoug Ambrisko 	struct file *kq_fp;
19831ce91824SDavid Xu 	int nent;
19841ce91824SDavid Xu 	int error;
1985fd3bf775SJohn Dyson 	int nerror;
1986ee877a35SJohn Dyson 	int i;
1987ee877a35SJohn Dyson 
1988bfbbc4aaSJason Evans 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
1989ac41f2efSAlfred Perlstein 		return (EINVAL);
19902244ea07SJohn Dyson 
19912244ea07SJohn Dyson 	nent = uap->nent;
1992ae3b195fSTim J. Robbins 	if (nent < 0 || nent > AIO_LISTIO_MAX)
1993ac41f2efSAlfred Perlstein 		return (EINVAL);
19942244ea07SJohn Dyson 
1995bfbbc4aaSJason Evans 	if (p->p_aioinfo == NULL)
19962244ea07SJohn Dyson 		aio_init_aioinfo(p);
19972244ea07SJohn Dyson 
19982244ea07SJohn Dyson 	ki = p->p_aioinfo;
19992244ea07SJohn Dyson 
2000a163d034SWarner Losh 	lj = uma_zalloc(aiolio_zone, M_WAITOK);
200184af4da6SJohn Dyson 	lj->lioj_flags = 0;
20021ce91824SDavid Xu 	lj->lioj_count = 0;
20031ce91824SDavid Xu 	lj->lioj_finished_count = 0;
2004759ccccaSDavid Xu 	knlist_init(&lj->klist, AIO_MTX(ki), NULL, NULL, NULL);
20054c0fb2cfSDavid Xu 	ksiginfo_init(&lj->lioj_ksi);
200669cd28daSDoug Ambrisko 
200784af4da6SJohn Dyson 	/*
2008bfbbc4aaSJason Evans 	 * Setup signal.
200984af4da6SJohn Dyson 	 */
201084af4da6SJohn Dyson 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
20110972628aSDavid Xu 		bzero(&lj->lioj_signal, sizeof(&lj->lioj_signal));
2012bfbbc4aaSJason Evans 		error = copyin(uap->sig, &lj->lioj_signal,
20130972628aSDavid Xu 				oldsigev ? sizeof(struct osigevent) :
20140972628aSDavid Xu 					   sizeof(struct sigevent));
20152f3cf918SAlfred Perlstein 		if (error) {
2016c897b813SJeff Roberson 			uma_zfree(aiolio_zone, lj);
2017ac41f2efSAlfred Perlstein 			return (error);
20182f3cf918SAlfred Perlstein 		}
201969cd28daSDoug Ambrisko 
202069cd28daSDoug Ambrisko 		if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
202169cd28daSDoug Ambrisko 			/* Assume only new style KEVENT */
20221ce91824SDavid Xu 			error = fget(td, lj->lioj_signal.sigev_notify_kqueue,
20231ce91824SDavid Xu 				&kq_fp);
20242a522eb9SJohn Baldwin 			if (error) {
20252a522eb9SJohn Baldwin 				uma_zfree(aiolio_zone, lj);
20262a522eb9SJohn Baldwin 				return (error);
20272a522eb9SJohn Baldwin 			}
20282a522eb9SJohn Baldwin 			if (kq_fp->f_type != DTYPE_KQUEUE) {
20292a522eb9SJohn Baldwin 				fdrop(kq_fp, td);
2030c897b813SJeff Roberson 				uma_zfree(aiolio_zone, lj);
203169cd28daSDoug Ambrisko 				return (EBADF);
20322f3cf918SAlfred Perlstein 			}
203369cd28daSDoug Ambrisko 			kq = (struct kqueue *)kq_fp->f_data;
203469cd28daSDoug Ambrisko 			kev.filter = EVFILT_LIO;
203569cd28daSDoug Ambrisko 			kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
203669cd28daSDoug Ambrisko 			kev.ident = (uintptr_t)lj; /* something unique */
203769cd28daSDoug Ambrisko 			kev.data = (intptr_t)lj;
20381ce91824SDavid Xu 			/* pass user defined sigval data */
20391ce91824SDavid Xu 			kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
204069cd28daSDoug Ambrisko 			error = kqueue_register(kq, &kev, td, 1);
20412a522eb9SJohn Baldwin 			fdrop(kq_fp, td);
204269cd28daSDoug Ambrisko 			if (error) {
204369cd28daSDoug Ambrisko 				uma_zfree(aiolio_zone, lj);
204469cd28daSDoug Ambrisko 				return (error);
204569cd28daSDoug Ambrisko 			}
20461ce91824SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
20471ce91824SDavid Xu 			;
204868d71118SDavid Xu 		} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
204968d71118SDavid Xu 			   lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
205068d71118SDavid Xu 				if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
205169cd28daSDoug Ambrisko 					uma_zfree(aiolio_zone, lj);
205269cd28daSDoug Ambrisko 					return EINVAL;
205368d71118SDavid Xu 				}
205484af4da6SJohn Dyson 				lj->lioj_flags |= LIOJ_SIGNAL;
205568d71118SDavid Xu 		} else {
205668d71118SDavid Xu 			uma_zfree(aiolio_zone, lj);
205768d71118SDavid Xu 			return EINVAL;
20584d752b01SAlan Cox 		}
20591ce91824SDavid Xu 	}
206069cd28daSDoug Ambrisko 
2061759ccccaSDavid Xu 	AIO_LOCK(ki);
20622f3cf918SAlfred Perlstein 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
20632244ea07SJohn Dyson 	/*
20641ce91824SDavid Xu 	 * Add extra aiocb count to avoid the lio to be freed
20651ce91824SDavid Xu 	 * by other threads doing aio_waitcomplete or aio_return,
20661ce91824SDavid Xu 	 * and prevent event from being sent until we have queued
20671ce91824SDavid Xu 	 * all tasks.
20681ce91824SDavid Xu 	 */
20691ce91824SDavid Xu 	lj->lioj_count = 1;
2070759ccccaSDavid Xu 	AIO_UNLOCK(ki);
20711ce91824SDavid Xu 
20721ce91824SDavid Xu 	/*
2073bfbbc4aaSJason Evans 	 * Get pointers to the list of I/O requests.
20742244ea07SJohn Dyson 	 */
2075fd3bf775SJohn Dyson 	nerror = 0;
20762244ea07SJohn Dyson 	cbptr = uap->acb_list;
20772244ea07SJohn Dyson 	for (i = 0; i < uap->nent; i++) {
2078a739e09cSAlan Cox 		iocb = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
20794a6a94d8SArchie Cobbs 		if (((intptr_t)iocb != -1) && ((intptr_t)iocb != 0)) {
2080dbbccfe9SDavid Xu 			error = aio_aqueue(td, iocb, lj, LIO_NOP, oldsigev);
20811ce91824SDavid Xu 			if (error != 0)
2082fd3bf775SJohn Dyson 				nerror++;
2083fd3bf775SJohn Dyson 		}
2084fd3bf775SJohn Dyson 	}
20852244ea07SJohn Dyson 
20861ce91824SDavid Xu 	error = 0;
2087759ccccaSDavid Xu 	AIO_LOCK(ki);
20882244ea07SJohn Dyson 	if (uap->mode == LIO_WAIT) {
20891ce91824SDavid Xu 		while (lj->lioj_count - 1 != lj->lioj_finished_count) {
2090fd3bf775SJohn Dyson 			ki->kaio_flags |= KAIO_WAKEUP;
2091759ccccaSDavid Xu 			error = msleep(&p->p_aioinfo, AIO_MTX(ki),
20921ce91824SDavid Xu 			    PRIBIO | PCATCH, "aiospn", 0);
20931ce91824SDavid Xu 			if (error == ERESTART)
20941ce91824SDavid Xu 				error = EINTR;
20951ce91824SDavid Xu 			if (error)
20961ce91824SDavid Xu 				break;
20971ce91824SDavid Xu 		}
20981ce91824SDavid Xu 	} else {
20991ce91824SDavid Xu 		if (lj->lioj_count - 1 == lj->lioj_finished_count) {
21001ce91824SDavid Xu 			if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
21011ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_KEVENT_POSTED;
21021ce91824SDavid Xu 				KNOTE_LOCKED(&lj->klist, 1);
21031ce91824SDavid Xu 			}
21041ce91824SDavid Xu 			if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
21051ce91824SDavid Xu 			    == LIOJ_SIGNAL
21061ce91824SDavid Xu 			    && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
21071ce91824SDavid Xu 			    lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
21081ce91824SDavid Xu 				aio_sendsig(p, &lj->lioj_signal,
21091ce91824SDavid Xu 					    &lj->lioj_ksi);
21101ce91824SDavid Xu 				lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
21112244ea07SJohn Dyson 			}
21122244ea07SJohn Dyson 		}
21131ce91824SDavid Xu 	}
21141ce91824SDavid Xu 	lj->lioj_count--;
21151ce91824SDavid Xu 	if (lj->lioj_count == 0) {
21161ce91824SDavid Xu 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
21171ce91824SDavid Xu 		knlist_delete(&lj->klist, curthread, 1);
2118759ccccaSDavid Xu 		PROC_LOCK(p);
21191ce91824SDavid Xu 		sigqueue_take(&lj->lioj_ksi);
21201ce91824SDavid Xu 		PROC_UNLOCK(p);
2121759ccccaSDavid Xu 		AIO_UNLOCK(ki);
21221ce91824SDavid Xu 		uma_zfree(aiolio_zone, lj);
21231ce91824SDavid Xu 	} else
2124759ccccaSDavid Xu 		AIO_UNLOCK(ki);
21252244ea07SJohn Dyson 
21261ce91824SDavid Xu 	if (nerror)
21271ce91824SDavid Xu 		return (EIO);
21281ce91824SDavid Xu 	return (error);
2129ee877a35SJohn Dyson }
2130fd3bf775SJohn Dyson 
213184af4da6SJohn Dyson /*
21321ce91824SDavid Xu  * Called from interrupt thread for physio, we should return as fast
21331ce91824SDavid Xu  * as possible, so we schedule a biohelper task.
213484af4da6SJohn Dyson  */
2135fd3bf775SJohn Dyson static void
2136bfbbc4aaSJason Evans aio_physwakeup(struct buf *bp)
2137fd3bf775SJohn Dyson {
213884af4da6SJohn Dyson 	struct aiocblist *aiocbe;
2139fd3bf775SJohn Dyson 
21408edbaf85SHidetoshi Shimokawa 	aiocbe = (struct aiocblist *)bp->b_caller1;
21411ce91824SDavid Xu 	taskqueue_enqueue(taskqueue_aiod_bio, &aiocbe->biotask);
21421ce91824SDavid Xu }
214384af4da6SJohn Dyson 
21441ce91824SDavid Xu /*
21451ce91824SDavid Xu  * Task routine to perform heavy tasks, process wakeup, and signals.
21461ce91824SDavid Xu  */
21471ce91824SDavid Xu static void
21481ce91824SDavid Xu biohelper(void *context, int pending)
21491ce91824SDavid Xu {
21501ce91824SDavid Xu 	struct aiocblist *aiocbe = context;
21511ce91824SDavid Xu 	struct buf *bp;
21521ce91824SDavid Xu 	struct proc *userp;
215327b8220dSDavid Xu 	struct kaioinfo *ki;
21541ce91824SDavid Xu 	int nblks;
21551ce91824SDavid Xu 
21561ce91824SDavid Xu 	bp = aiocbe->bp;
21571ce91824SDavid Xu 	userp = aiocbe->userproc;
215827b8220dSDavid Xu 	ki = userp->p_aioinfo;
2159759ccccaSDavid Xu 	AIO_LOCK(ki);
216084af4da6SJohn Dyson 	aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
216184af4da6SJohn Dyson 	aiocbe->uaiocb._aiocb_private.error = 0;
2162c244d2deSPoul-Henning Kamp 	if (bp->b_ioflags & BIO_ERROR)
216384af4da6SJohn Dyson 		aiocbe->uaiocb._aiocb_private.error = bp->b_error;
21641ce91824SDavid Xu 	nblks = btodb(aiocbe->uaiocb.aio_nbytes);
21651ce91824SDavid Xu 	if (aiocbe->uaiocb.aio_lio_opcode == LIO_WRITE)
21661ce91824SDavid Xu 		aiocbe->outputcharge += nblks;
21671ce91824SDavid Xu 	else
21681ce91824SDavid Xu 		aiocbe->inputcharge += nblks;
21691ce91824SDavid Xu 	aiocbe->bp = NULL;
21701ce91824SDavid Xu 	TAILQ_REMOVE(&userp->p_aioinfo->kaio_bufqueue, aiocbe, plist);
217127b8220dSDavid Xu 	ki->kaio_buffer_count--;
217269cd28daSDoug Ambrisko 	aio_bio_done_notify(userp, aiocbe, DONE_BUF);
2173759ccccaSDavid Xu 	AIO_UNLOCK(ki);
21741ce91824SDavid Xu 
21751ce91824SDavid Xu 	/* Release mapping into kernel space. */
21761ce91824SDavid Xu 	vunmapbuf(bp);
21771ce91824SDavid Xu 	relpbuf(bp, NULL);
21781ce91824SDavid Xu 	atomic_subtract_int(&num_buf_aio, 1);
217984af4da6SJohn Dyson }
2180bfbbc4aaSJason Evans 
2181eb8e6d52SEivind Eklund /* syscall - wait for the next completion of an aio request */
2182bfbbc4aaSJason Evans int
2183b40ce416SJulian Elischer aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2184bfbbc4aaSJason Evans {
2185b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
2186bfbbc4aaSJason Evans 	struct timeval atv;
2187bfbbc4aaSJason Evans 	struct timespec ts;
2188bfbbc4aaSJason Evans 	struct kaioinfo *ki;
21891ce91824SDavid Xu 	struct aiocblist *cb;
21901ce91824SDavid Xu 	struct aiocb *uuaiocb;
21911ce91824SDavid Xu 	int error, status, timo;
2192bfbbc4aaSJason Evans 
21931ce91824SDavid Xu 	suword(uap->aiocbp, (long)NULL);
2194dd85920aSJason Evans 
2195bfbbc4aaSJason Evans 	timo = 0;
2196bfbbc4aaSJason Evans 	if (uap->timeout) {
2197bfbbc4aaSJason Evans 		/* Get timespec struct. */
219888ed460eSAlan Cox 		error = copyin(uap->timeout, &ts, sizeof(ts));
2199bfbbc4aaSJason Evans 		if (error)
2200ac41f2efSAlfred Perlstein 			return (error);
2201bfbbc4aaSJason Evans 
2202bfbbc4aaSJason Evans 		if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000))
2203bfbbc4aaSJason Evans 			return (EINVAL);
2204bfbbc4aaSJason Evans 
2205bfbbc4aaSJason Evans 		TIMESPEC_TO_TIMEVAL(&atv, &ts);
2206bfbbc4aaSJason Evans 		if (itimerfix(&atv))
2207bfbbc4aaSJason Evans 			return (EINVAL);
2208bfbbc4aaSJason Evans 		timo = tvtohz(&atv);
2209bfbbc4aaSJason Evans 	}
2210bfbbc4aaSJason Evans 
22118213baf0SChristian S.J. Peron 	if (p->p_aioinfo == NULL)
2212323fe565SDavid Xu 		aio_init_aioinfo(p);
22138213baf0SChristian S.J. Peron 	ki = p->p_aioinfo;
2214bfbbc4aaSJason Evans 
22151ce91824SDavid Xu 	error = 0;
22161ce91824SDavid Xu 	cb = NULL;
2217759ccccaSDavid Xu 	AIO_LOCK(ki);
22181ce91824SDavid Xu 	while ((cb = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
22191ce91824SDavid Xu 		ki->kaio_flags |= KAIO_WAKEUP;
2220759ccccaSDavid Xu 		error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
22211ce91824SDavid Xu 		    "aiowc", timo);
222227b8220dSDavid Xu 		if (timo && error == ERESTART)
22231ce91824SDavid Xu 			error = EINTR;
22241ce91824SDavid Xu 		if (error)
22251ce91824SDavid Xu 			break;
22261ce91824SDavid Xu 	}
22271ce91824SDavid Xu 
22281ce91824SDavid Xu 	if (cb != NULL) {
22291ce91824SDavid Xu 		MPASS(cb->jobstate == JOBST_JOBFINISHED);
22301ce91824SDavid Xu 		uuaiocb = cb->uuaiocb;
22311ce91824SDavid Xu 		status = cb->uaiocb._aiocb_private.status;
22321ce91824SDavid Xu 		error = cb->uaiocb._aiocb_private.error;
22331ce91824SDavid Xu 		td->td_retval[0] = status;
2234bfbbc4aaSJason Evans 		if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
22351ce91824SDavid Xu 			p->p_stats->p_ru.ru_oublock += cb->outputcharge;
2236bfbbc4aaSJason Evans 			cb->outputcharge = 0;
2237bfbbc4aaSJason Evans 		} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
2238516d2564SAlan Cox 			p->p_stats->p_ru.ru_inblock += cb->inputcharge;
2239bfbbc4aaSJason Evans 			cb->inputcharge = 0;
2240bfbbc4aaSJason Evans 		}
2241bfbbc4aaSJason Evans 		aio_free_entry(cb);
2242759ccccaSDavid Xu 		AIO_UNLOCK(ki);
22431ce91824SDavid Xu 		suword(uap->aiocbp, (long)uuaiocb);
22441ce91824SDavid Xu 		suword(&uuaiocb->_aiocb_private.error, error);
22451ce91824SDavid Xu 		suword(&uuaiocb->_aiocb_private.status, status);
22461ce91824SDavid Xu 	} else
2247759ccccaSDavid Xu 		AIO_UNLOCK(ki);
2248bfbbc4aaSJason Evans 
2249ac41f2efSAlfred Perlstein 	return (error);
2250bfbbc4aaSJason Evans }
2251cb679c38SJonathan Lemon 
225299eee864SDavid Xu int
225399eee864SDavid Xu aio_fsync(struct thread *td, struct aio_fsync_args *uap)
225499eee864SDavid Xu {
225599eee864SDavid Xu 	struct proc *p = td->td_proc;
225699eee864SDavid Xu 	struct kaioinfo *ki;
225799eee864SDavid Xu 
225899eee864SDavid Xu 	if (uap->op != O_SYNC) /* XXX lack of O_DSYNC */
225999eee864SDavid Xu 		return (EINVAL);
226099eee864SDavid Xu 	ki = p->p_aioinfo;
226199eee864SDavid Xu 	if (ki == NULL)
226299eee864SDavid Xu 		aio_init_aioinfo(p);
2263dbbccfe9SDavid Xu 	return aio_aqueue(td, uap->aiocbp, NULL, LIO_SYNC, 0);
226499eee864SDavid Xu }
226599eee864SDavid Xu 
2266eb8e6d52SEivind Eklund /* kqueue attach function */
2267cb679c38SJonathan Lemon static int
2268cb679c38SJonathan Lemon filt_aioattach(struct knote *kn)
2269cb679c38SJonathan Lemon {
2270b46f1c55SAlan Cox 	struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2271cb679c38SJonathan Lemon 
2272cb679c38SJonathan Lemon 	/*
2273cb679c38SJonathan Lemon 	 * The aiocbe pointer must be validated before using it, so
2274cb679c38SJonathan Lemon 	 * registration is restricted to the kernel; the user cannot
2275cb679c38SJonathan Lemon 	 * set EV_FLAG1.
2276cb679c38SJonathan Lemon 	 */
2277cb679c38SJonathan Lemon 	if ((kn->kn_flags & EV_FLAG1) == 0)
2278cb679c38SJonathan Lemon 		return (EPERM);
2279cb679c38SJonathan Lemon 	kn->kn_flags &= ~EV_FLAG1;
2280cb679c38SJonathan Lemon 
2281ad3b9257SJohn-Mark Gurney 	knlist_add(&aiocbe->klist, kn, 0);
2282cb679c38SJonathan Lemon 
2283cb679c38SJonathan Lemon 	return (0);
2284cb679c38SJonathan Lemon }
2285cb679c38SJonathan Lemon 
2286eb8e6d52SEivind Eklund /* kqueue detach function */
2287cb679c38SJonathan Lemon static void
2288cb679c38SJonathan Lemon filt_aiodetach(struct knote *kn)
2289cb679c38SJonathan Lemon {
2290b46f1c55SAlan Cox 	struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2291cb679c38SJonathan Lemon 
229269cd28daSDoug Ambrisko 	if (!knlist_empty(&aiocbe->klist))
2293ad3b9257SJohn-Mark Gurney 		knlist_remove(&aiocbe->klist, kn, 0);
2294cb679c38SJonathan Lemon }
2295cb679c38SJonathan Lemon 
2296eb8e6d52SEivind Eklund /* kqueue filter function */
2297cb679c38SJonathan Lemon /*ARGSUSED*/
2298cb679c38SJonathan Lemon static int
2299cb679c38SJonathan Lemon filt_aio(struct knote *kn, long hint)
2300cb679c38SJonathan Lemon {
2301b46f1c55SAlan Cox 	struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2302cb679c38SJonathan Lemon 
230391369fc7SAlan Cox 	kn->kn_data = aiocbe->uaiocb._aiocb_private.error;
23041ce91824SDavid Xu 	if (aiocbe->jobstate != JOBST_JOBFINISHED)
2305cb679c38SJonathan Lemon 		return (0);
2306cb679c38SJonathan Lemon 	kn->kn_flags |= EV_EOF;
2307cb679c38SJonathan Lemon 	return (1);
2308cb679c38SJonathan Lemon }
230969cd28daSDoug Ambrisko 
231069cd28daSDoug Ambrisko /* kqueue attach function */
231169cd28daSDoug Ambrisko static int
231269cd28daSDoug Ambrisko filt_lioattach(struct knote *kn)
231369cd28daSDoug Ambrisko {
23141ce91824SDavid Xu 	struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
231569cd28daSDoug Ambrisko 
231669cd28daSDoug Ambrisko 	/*
23171ce91824SDavid Xu 	 * The aioliojob pointer must be validated before using it, so
231869cd28daSDoug Ambrisko 	 * registration is restricted to the kernel; the user cannot
231969cd28daSDoug Ambrisko 	 * set EV_FLAG1.
232069cd28daSDoug Ambrisko 	 */
232169cd28daSDoug Ambrisko 	if ((kn->kn_flags & EV_FLAG1) == 0)
232269cd28daSDoug Ambrisko 		return (EPERM);
232369cd28daSDoug Ambrisko 	kn->kn_flags &= ~EV_FLAG1;
232469cd28daSDoug Ambrisko 
232569cd28daSDoug Ambrisko 	knlist_add(&lj->klist, kn, 0);
232669cd28daSDoug Ambrisko 
232769cd28daSDoug Ambrisko 	return (0);
232869cd28daSDoug Ambrisko }
232969cd28daSDoug Ambrisko 
233069cd28daSDoug Ambrisko /* kqueue detach function */
233169cd28daSDoug Ambrisko static void
233269cd28daSDoug Ambrisko filt_liodetach(struct knote *kn)
233369cd28daSDoug Ambrisko {
23341ce91824SDavid Xu 	struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
233569cd28daSDoug Ambrisko 
233669cd28daSDoug Ambrisko 	if (!knlist_empty(&lj->klist))
233769cd28daSDoug Ambrisko 		knlist_remove(&lj->klist, kn, 0);
233869cd28daSDoug Ambrisko }
233969cd28daSDoug Ambrisko 
234069cd28daSDoug Ambrisko /* kqueue filter function */
234169cd28daSDoug Ambrisko /*ARGSUSED*/
234269cd28daSDoug Ambrisko static int
234369cd28daSDoug Ambrisko filt_lio(struct knote *kn, long hint)
234469cd28daSDoug Ambrisko {
23451ce91824SDavid Xu 	struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
23461ce91824SDavid Xu 
234769cd28daSDoug Ambrisko 	return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
234869cd28daSDoug Ambrisko }
2349