1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997 John S. Dyson. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. John S. Dyson's name may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 15 * bad that happens because of using this software isn't the responsibility 16 * of the author. This software is distributed AS-IS. 17 * 18 * $FreeBSD$ 19 */ 20 21 #ifndef _SYS_AIO_H_ 22 #define _SYS_AIO_H_ 23 24 #include <sys/types.h> 25 #include <sys/signal.h> 26 #ifdef _KERNEL 27 #include <sys/queue.h> 28 #include <sys/event.h> 29 #include <sys/signalvar.h> 30 #include <sys/uio.h> 31 #endif 32 33 /* 34 * Returned by aio_cancel: 35 */ 36 #define AIO_CANCELED 0x1 37 #define AIO_NOTCANCELED 0x2 38 #define AIO_ALLDONE 0x3 39 40 /* 41 * LIO opcodes 42 */ 43 #define LIO_NOP 0x0 44 #define LIO_WRITE 0x1 45 #define LIO_READ 0x2 46 #if __BSD_VISIBLE 47 #define LIO_VECTORED 0x4 48 #define LIO_WRITEV (LIO_WRITE | LIO_VECTORED) 49 #define LIO_READV (LIO_READ | LIO_VECTORED) 50 #endif 51 #if defined(_KERNEL) || defined(_WANT_ALL_LIO_OPCODES) 52 #define LIO_SYNC 0x8 53 #define LIO_DSYNC (0x10 | LIO_SYNC) 54 #define LIO_MLOCK 0x20 55 #endif 56 57 /* 58 * LIO modes 59 */ 60 #define LIO_NOWAIT 0x0 61 #define LIO_WAIT 0x1 62 63 /* 64 * Maximum number of operations in a single lio_listio call 65 */ 66 #define AIO_LISTIO_MAX 16 67 68 #ifdef _KERNEL 69 70 /* Default values of tunables for the AIO worker pool. */ 71 72 #ifndef MAX_AIO_PROCS 73 #define MAX_AIO_PROCS 32 74 #endif 75 76 #ifndef TARGET_AIO_PROCS 77 #define TARGET_AIO_PROCS 4 78 #endif 79 80 #ifndef AIOD_LIFETIME_DEFAULT 81 #define AIOD_LIFETIME_DEFAULT (30 * hz) 82 #endif 83 84 #endif 85 86 /* 87 * Private members for aiocb -- don't access 88 * directly. 89 */ 90 struct __aiocb_private { 91 long status; 92 long error; 93 void *kernelinfo; 94 }; 95 96 /* 97 * I/O control block 98 */ 99 typedef struct aiocb { 100 int aio_fildes; /* File descriptor */ 101 off_t aio_offset; /* File offset for I/O */ 102 volatile void *aio_buf; /* I/O buffer in process space */ 103 size_t aio_nbytes; /* Number of bytes for I/O */ 104 int __spare__[2]; 105 void *__spare2__; 106 int aio_lio_opcode; /* LIO opcode */ 107 int aio_reqprio; /* Request priority -- ignored */ 108 struct __aiocb_private _aiocb_private; 109 struct sigevent aio_sigevent; /* Signal to deliver */ 110 } aiocb_t; 111 112 #define aio_iov aio_buf /* I/O scatter/gather list */ 113 #define aio_iovcnt aio_nbytes /* Length of aio_iov */ 114 115 #ifdef _KERNEL 116 117 typedef void aio_cancel_fn_t(struct kaiocb *); 118 typedef void aio_handle_fn_t(struct kaiocb *); 119 120 /* 121 * Kernel version of an I/O control block. 122 * 123 * Locking key: 124 * * - need not protected 125 * a - locked by kaioinfo lock 126 * b - locked by backend lock 127 * c - locked by aio_job_mtx 128 */ 129 struct kaiocb { 130 TAILQ_ENTRY(kaiocb) list; /* (b) backend-specific list of jobs */ 131 TAILQ_ENTRY(kaiocb) plist; /* (a) lists of pending / done jobs */ 132 TAILQ_ENTRY(kaiocb) allist; /* (a) list of all jobs in proc */ 133 int jobflags; /* (a) job flags */ 134 int inblock; /* (*) input blocks */ 135 int outblock; /* (*) output blocks */ 136 int msgsnd; /* (*) messages sent */ 137 int msgrcv; /* (*) messages received */ 138 struct proc *userproc; /* (*) user process */ 139 struct ucred *cred; /* (*) active credential when created */ 140 struct file *fd_file; /* (*) pointer to file structure */ 141 struct aioliojob *lio; /* (*) optional lio job */ 142 struct aiocb *ujob; /* (*) pointer in userspace of aiocb */ 143 struct knlist klist; /* (a) list of knotes */ 144 struct aiocb uaiocb; /* (*) copy of user I/O control block */ 145 struct uio uio; /* (*) storage for non-vectored uio */ 146 struct iovec iov[1]; /* (*) storage for non-vectored uio */ 147 struct uio *uiop; /* (*) Possibly malloced uio */ 148 ksiginfo_t ksi; /* (a) realtime signal info */ 149 uint64_t seqno; /* (*) job number */ 150 aio_cancel_fn_t *cancel_fn; /* (a) backend cancel function */ 151 aio_handle_fn_t *handle_fn; /* (c) backend handle function */ 152 union { /* Backend-specific data fields */ 153 struct { /* BIO backend */ 154 volatile u_int nbio; /* Number of remaining bios */ 155 int error; /* Worst error of all bios */ 156 long nbytes; /* Bytes completed so far */ 157 }; 158 struct { /* fsync() requests */ 159 int pending; /* (a) number of pending I/O */ 160 }; 161 struct { /* socket backend */ 162 void *backend1; 163 long backend3; 164 int backend4; 165 }; 166 }; 167 }; 168 169 struct socket; 170 struct sockbuf; 171 172 /* 173 * AIO backends should permit cancellation of queued requests waiting to 174 * be serviced by installing a cancel routine while the request is 175 * queued. The cancellation routine should dequeue the request if 176 * necessary and cancel it. Care must be used to handle races between 177 * queueing and dequeueing requests and cancellation. 178 * 179 * When queueing a request somewhere such that it can be cancelled, the 180 * caller should: 181 * 182 * 1) Acquire lock that protects the associated queue. 183 * 2) Call aio_set_cancel_function() to install the cancel routine. 184 * 3) If that fails, the request has a pending cancel and should be 185 * cancelled via aio_cancel(). 186 * 4) Queue the request. 187 * 188 * When dequeueing a request to service it or hand it off to somewhere else, 189 * the caller should: 190 * 191 * 1) Acquire the lock that protects the associated queue. 192 * 2) Dequeue the request. 193 * 3) Call aio_clear_cancel_function() to clear the cancel routine. 194 * 4) If that fails, the cancel routine is about to be called. The 195 * caller should ignore the request. 196 * 197 * The cancel routine should: 198 * 199 * 1) Acquire the lock that protects the associated queue. 200 * 2) Call aio_cancel_cleared() to determine if the request is already 201 * dequeued due to a race with dequeueing thread. 202 * 3) If that fails, dequeue the request. 203 * 4) Cancel the request via aio_cancel(). 204 */ 205 206 bool aio_cancel_cleared(struct kaiocb *job); 207 void aio_cancel(struct kaiocb *job); 208 bool aio_clear_cancel_function(struct kaiocb *job); 209 void aio_complete(struct kaiocb *job, long status, int error); 210 void aio_schedule(struct kaiocb *job, aio_handle_fn_t *func); 211 bool aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func); 212 void aio_switch_vmspace(struct kaiocb *job); 213 214 #else /* !_KERNEL */ 215 216 struct timespec; 217 218 __BEGIN_DECLS 219 /* 220 * Asynchronously read from a file 221 */ 222 int aio_read(struct aiocb *); 223 #if __BSD_VISIBLE 224 int aio_readv(struct aiocb *); 225 #endif 226 227 /* 228 * Asynchronously write to file 229 */ 230 int aio_write(struct aiocb *); 231 #if __BSD_VISIBLE 232 int aio_writev(struct aiocb *); 233 #endif 234 235 /* 236 * List I/O Asynchronously/synchronously read/write to/from file 237 * "lio_mode" specifies whether or not the I/O is synchronous. 238 * "acb_list" is an array of "nacb_listent" I/O control blocks. 239 * when all I/Os are complete, the optional signal "sig" is sent. 240 */ 241 int lio_listio(int, struct aiocb *__restrict const *__restrict, int, 242 struct sigevent *); 243 244 /* 245 * Get completion status 246 * returns EINPROGRESS until I/O is complete. 247 * this routine does not block. 248 */ 249 int aio_error(const struct aiocb *); 250 251 /* 252 * Finish up I/O, releasing I/O resources and returns the value 253 * that would have been associated with a synchronous I/O request. 254 * This routine must be called once and only once for each 255 * I/O control block who has had I/O associated with it. 256 */ 257 ssize_t aio_return(struct aiocb *); 258 259 /* 260 * Cancel I/O 261 */ 262 int aio_cancel(int, struct aiocb *); 263 264 /* 265 * Suspend until all specified I/O or timeout is complete. 266 */ 267 int aio_suspend(const struct aiocb * const[], int, const struct timespec *); 268 269 /* 270 * Asynchronous mlock 271 */ 272 int aio_mlock(struct aiocb *); 273 274 #if __BSD_VISIBLE 275 ssize_t aio_waitcomplete(struct aiocb **, struct timespec *); 276 #endif 277 278 int aio_fsync(int op, struct aiocb *aiocbp); 279 __END_DECLS 280 281 #endif /* !_KERNEL */ 282 283 #endif /* !_SYS_AIO_H_ */ 284