xref: /freebsd/sys/sys/aio.h (revision 82397d791966b09d344251bc709cd9db2b3a1902)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. John S. Dyson's name may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
15  * bad that happens because of using this software isn't the responsibility
16  * of the author.  This software is distributed AS-IS.
17  *
18  * $FreeBSD$
19  */
20 
21 #ifndef _SYS_AIO_H_
22 #define	_SYS_AIO_H_
23 
24 #include <sys/types.h>
25 #include <sys/signal.h>
26 #ifdef _KERNEL
27 #include <sys/queue.h>
28 #include <sys/event.h>
29 #include <sys/signalvar.h>
30 #include <sys/uio.h>
31 #endif
32 
33 /*
34  * Returned by aio_cancel:
35  */
36 #define	AIO_CANCELED		0x1
37 #define	AIO_NOTCANCELED		0x2
38 #define	AIO_ALLDONE		0x3
39 
40 /*
41  * LIO opcodes
42  */
43 #define	LIO_NOP			0x0
44 #define LIO_WRITE		0x1
45 #define	LIO_READ		0x2
46 #ifdef _KERNEL
47 #define	LIO_SYNC		0x3
48 #define	LIO_MLOCK		0x4
49 #define	LIO_WRITEV		0x5
50 #define	LIO_READV		0x6
51 #endif
52 
53 /*
54  * LIO modes
55  */
56 #define	LIO_NOWAIT		0x0
57 #define	LIO_WAIT		0x1
58 
59 /*
60  * Maximum number of operations in a single lio_listio call
61  */
62 #define	AIO_LISTIO_MAX		16
63 
64 #ifdef _KERNEL
65 
66 /* Default values of tunables for the AIO worker pool. */
67 
68 #ifndef MAX_AIO_PROCS
69 #define MAX_AIO_PROCS		32
70 #endif
71 
72 #ifndef TARGET_AIO_PROCS
73 #define TARGET_AIO_PROCS	4
74 #endif
75 
76 #ifndef AIOD_LIFETIME_DEFAULT
77 #define AIOD_LIFETIME_DEFAULT	(30 * hz)
78 #endif
79 
80 #endif
81 
82 /*
83  * Private members for aiocb -- don't access
84  * directly.
85  */
86 struct __aiocb_private {
87 	long	status;
88 	long	error;
89 	void	*kernelinfo;
90 };
91 
92 /*
93  * I/O control block
94  */
95 typedef struct aiocb {
96 	int	aio_fildes;		/* File descriptor */
97 	off_t	aio_offset;		/* File offset for I/O */
98 	volatile void *aio_buf;		/* I/O buffer in process space */
99 	size_t	aio_nbytes;		/* Number of bytes for I/O */
100 	int	__spare__[2];
101 	void	*__spare2__;
102 	int	aio_lio_opcode;		/* LIO opcode */
103 	int	aio_reqprio;		/* Request priority -- ignored */
104 	struct	__aiocb_private	_aiocb_private;
105 	struct	sigevent aio_sigevent;	/* Signal to deliver */
106 } aiocb_t;
107 
108 #define	aio_iov	aio_buf			/* I/O scatter/gather list */
109 #define	aio_iovcnt	aio_nbytes	/* Length of aio_iov */
110 
111 #ifdef _KERNEL
112 
113 typedef void aio_cancel_fn_t(struct kaiocb *);
114 typedef void aio_handle_fn_t(struct kaiocb *);
115 
116 /*
117  * Kernel version of an I/O control block.
118  *
119  * Locking key:
120  * * - need not protected
121  * a - locked by kaioinfo lock
122  * b - locked by backend lock
123  * c - locked by aio_job_mtx
124  */
125 struct kaiocb {
126 	TAILQ_ENTRY(kaiocb) list;	/* (b) backend-specific list of jobs */
127 	TAILQ_ENTRY(kaiocb) plist;	/* (a) lists of pending / done jobs */
128 	TAILQ_ENTRY(kaiocb) allist;	/* (a) list of all jobs in proc */
129 	int	jobflags;		/* (a) job flags */
130 	int	inblock;		/* (*) input blocks */
131 	int	outblock;		/* (*) output blocks */
132 	int	msgsnd;			/* (*) messages sent */
133 	int	msgrcv;			/* (*) messages received */
134 	struct	proc *userproc;		/* (*) user process */
135 	struct	ucred *cred;		/* (*) active credential when created */
136 	struct	file *fd_file;		/* (*) pointer to file structure */
137 	struct	aioliojob *lio;		/* (*) optional lio job */
138 	struct	aiocb *ujob;		/* (*) pointer in userspace of aiocb */
139 	struct	knlist klist;		/* (a) list of knotes */
140 	struct	aiocb uaiocb;		/* (*) copy of user I/O control block */
141 	struct	uio uio;		/* (*) storage for non-vectored uio */
142 	struct	iovec iov[1];		/* (*) storage for non-vectored uio */
143 	struct	uio *uiop;		/* (*) Possibly malloced uio */
144 	ksiginfo_t ksi;			/* (a) realtime signal info */
145 	uint64_t seqno;			/* (*) job number */
146 	aio_cancel_fn_t *cancel_fn;	/* (a) backend cancel function */
147 	aio_handle_fn_t *handle_fn;	/* (c) backend handle function */
148 	union {				/* Backend-specific data fields */
149 		struct {		/* BIO backend */
150 			int	nbio;	/* Number of remaining bios */
151 			int	error;	/* Worst error of all bios */
152 			long	nbytes;	/* Bytes completed so far */
153 		};
154 		struct {		/* fsync() requests */
155 			int	pending; /* (a) number of pending I/O */
156 		};
157 		struct {		/* socket backend */
158 			void	*backend1;
159 			long	backend3;
160 			int	backend4;
161 		};
162 	};
163 };
164 
165 struct socket;
166 struct sockbuf;
167 
168 /*
169  * AIO backends should permit cancellation of queued requests waiting to
170  * be serviced by installing a cancel routine while the request is
171  * queued.  The cancellation routine should dequeue the request if
172  * necessary and cancel it.  Care must be used to handle races between
173  * queueing and dequeueing requests and cancellation.
174  *
175  * When queueing a request somewhere such that it can be cancelled, the
176  * caller should:
177  *
178  *  1) Acquire lock that protects the associated queue.
179  *  2) Call aio_set_cancel_function() to install the cancel routine.
180  *  3) If that fails, the request has a pending cancel and should be
181  *     cancelled via aio_cancel().
182  *  4) Queue the request.
183  *
184  * When dequeueing a request to service it or hand it off to somewhere else,
185  * the caller should:
186  *
187  *  1) Acquire the lock that protects the associated queue.
188  *  2) Dequeue the request.
189  *  3) Call aio_clear_cancel_function() to clear the cancel routine.
190  *  4) If that fails, the cancel routine is about to be called.  The
191  *     caller should ignore the request.
192  *
193  * The cancel routine should:
194  *
195  *  1) Acquire the lock that protects the associated queue.
196  *  2) Call aio_cancel_cleared() to determine if the request is already
197  *     dequeued due to a race with dequeueing thread.
198  *  3) If that fails, dequeue the request.
199  *  4) Cancel the request via aio_cancel().
200  */
201 
202 bool	aio_cancel_cleared(struct kaiocb *job);
203 void	aio_cancel(struct kaiocb *job);
204 bool	aio_clear_cancel_function(struct kaiocb *job);
205 void	aio_complete(struct kaiocb *job, long status, int error);
206 void	aio_schedule(struct kaiocb *job, aio_handle_fn_t *func);
207 bool	aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func);
208 void	aio_switch_vmspace(struct kaiocb *job);
209 
210 #else /* !_KERNEL */
211 
212 struct timespec;
213 
214 __BEGIN_DECLS
215 /*
216  * Asynchronously read from a file
217  */
218 int	aio_read(struct aiocb *);
219 #if __BSD_VISIBLE
220 int	aio_readv(struct aiocb *);
221 #endif
222 
223 /*
224  * Asynchronously write to file
225  */
226 int	aio_write(struct aiocb *);
227 #if __BSD_VISIBLE
228 int	aio_writev(struct aiocb *);
229 #endif
230 
231 /*
232  * List I/O Asynchronously/synchronously read/write to/from file
233  *	"lio_mode" specifies whether or not the I/O is synchronous.
234  *	"acb_list" is an array of "nacb_listent" I/O control blocks.
235  *	when all I/Os are complete, the optional signal "sig" is sent.
236  */
237 int	lio_listio(int, struct aiocb *__restrict const *__restrict, int,
238     struct sigevent *);
239 
240 /*
241  * Get completion status
242  *	returns EINPROGRESS until I/O is complete.
243  *	this routine does not block.
244  */
245 int	aio_error(const struct aiocb *);
246 
247 /*
248  * Finish up I/O, releasing I/O resources and returns the value
249  *	that would have been associated with a synchronous I/O request.
250  *	This routine must be called once and only once for each
251  *	I/O control block who has had I/O associated with it.
252  */
253 ssize_t	aio_return(struct aiocb *);
254 
255 /*
256  * Cancel I/O
257  */
258 int	aio_cancel(int, struct aiocb *);
259 
260 /*
261  * Suspend until all specified I/O or timeout is complete.
262  */
263 int	aio_suspend(const struct aiocb * const[], int, const struct timespec *);
264 
265 /*
266  * Asynchronous mlock
267  */
268 int	aio_mlock(struct aiocb *);
269 
270 #if __BSD_VISIBLE
271 ssize_t	aio_waitcomplete(struct aiocb **, struct timespec *);
272 #endif
273 
274 int	aio_fsync(int op, struct aiocb *aiocbp);
275 __END_DECLS
276 
277 #endif /* !_KERNEL */
278 
279 #endif /* !_SYS_AIO_H_ */
280