1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997 John S. Dyson. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. John S. Dyson's name may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 15 * bad that happens because of using this software isn't the responsibility 16 * of the author. This software is distributed AS-IS. 17 */ 18 19 /* 20 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 21 */ 22 23 #include <sys/cdefs.h> 24 __FBSDID("$FreeBSD$"); 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/malloc.h> 29 #include <sys/bio.h> 30 #include <sys/buf.h> 31 #include <sys/capsicum.h> 32 #include <sys/eventhandler.h> 33 #include <sys/sysproto.h> 34 #include <sys/filedesc.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/kthread.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/unistd.h> 44 #include <sys/posix4.h> 45 #include <sys/proc.h> 46 #include <sys/resourcevar.h> 47 #include <sys/signalvar.h> 48 #include <sys/syscallsubr.h> 49 #include <sys/protosw.h> 50 #include <sys/rwlock.h> 51 #include <sys/sema.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/syscall.h> 55 #include <sys/sysent.h> 56 #include <sys/sysctl.h> 57 #include <sys/syslog.h> 58 #include <sys/sx.h> 59 #include <sys/taskqueue.h> 60 #include <sys/vnode.h> 61 #include <sys/conf.h> 62 #include <sys/event.h> 63 #include <sys/mount.h> 64 #include <geom/geom.h> 65 66 #include <machine/atomic.h> 67 68 #include <vm/vm.h> 69 #include <vm/vm_page.h> 70 #include <vm/vm_extern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_object.h> 74 #include <vm/uma.h> 75 #include <sys/aio.h> 76 77 /* 78 * Counter for allocating reference ids to new jobs. Wrapped to 1 on 79 * overflow. (XXX will be removed soon.) 80 */ 81 static u_long jobrefid; 82 83 /* 84 * Counter for aio_fsync. 85 */ 86 static uint64_t jobseqno; 87 88 #ifndef MAX_AIO_PER_PROC 89 #define MAX_AIO_PER_PROC 32 90 #endif 91 92 #ifndef MAX_AIO_QUEUE_PER_PROC 93 #define MAX_AIO_QUEUE_PER_PROC 256 94 #endif 95 96 #ifndef MAX_AIO_QUEUE 97 #define MAX_AIO_QUEUE 1024 /* Bigger than MAX_AIO_QUEUE_PER_PROC */ 98 #endif 99 100 #ifndef MAX_BUF_AIO 101 #define MAX_BUF_AIO 16 102 #endif 103 104 FEATURE(aio, "Asynchronous I/O"); 105 SYSCTL_DECL(_p1003_1b); 106 107 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list"); 108 static MALLOC_DEFINE(M_AIOS, "aios", "aio_suspend aio control block list"); 109 110 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 111 "Async IO management"); 112 113 static int enable_aio_unsafe = 0; 114 SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0, 115 "Permit asynchronous IO on all file types, not just known-safe types"); 116 117 static unsigned int unsafe_warningcnt = 1; 118 SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW, 119 &unsafe_warningcnt, 0, 120 "Warnings that will be triggered upon failed IO requests on unsafe files"); 121 122 static int max_aio_procs = MAX_AIO_PROCS; 123 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0, 124 "Maximum number of kernel processes to use for handling async IO "); 125 126 static int num_aio_procs = 0; 127 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0, 128 "Number of presently active kernel processes for async IO"); 129 130 /* 131 * The code will adjust the actual number of AIO processes towards this 132 * number when it gets a chance. 133 */ 134 static int target_aio_procs = TARGET_AIO_PROCS; 135 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, 136 0, 137 "Preferred number of ready kernel processes for async IO"); 138 139 static int max_queue_count = MAX_AIO_QUEUE; 140 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, 141 "Maximum number of aio requests to queue, globally"); 142 143 static int num_queue_count = 0; 144 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0, 145 "Number of queued aio requests"); 146 147 static int num_buf_aio = 0; 148 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0, 149 "Number of aio requests presently handled by the buf subsystem"); 150 151 static int num_unmapped_aio = 0; 152 SYSCTL_INT(_vfs_aio, OID_AUTO, num_unmapped_aio, CTLFLAG_RD, &num_unmapped_aio, 153 0, 154 "Number of aio requests presently handled by unmapped I/O buffers"); 155 156 /* Number of async I/O processes in the process of being started */ 157 /* XXX This should be local to aio_aqueue() */ 158 static int num_aio_resv_start = 0; 159 160 static int aiod_lifetime; 161 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0, 162 "Maximum lifetime for idle aiod"); 163 164 static int max_aio_per_proc = MAX_AIO_PER_PROC; 165 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc, 166 0, 167 "Maximum active aio requests per process"); 168 169 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 170 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW, 171 &max_aio_queue_per_proc, 0, 172 "Maximum queued aio requests per process"); 173 174 static int max_buf_aio = MAX_BUF_AIO; 175 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0, 176 "Maximum buf aio requests per process"); 177 178 /* 179 * Though redundant with vfs.aio.max_aio_queue_per_proc, POSIX requires 180 * sysconf(3) to support AIO_LISTIO_MAX, and we implement that with 181 * vfs.aio.aio_listio_max. 182 */ 183 SYSCTL_INT(_p1003_1b, CTL_P1003_1B_AIO_LISTIO_MAX, aio_listio_max, 184 CTLFLAG_RD | CTLFLAG_CAPRD, &max_aio_queue_per_proc, 185 0, "Maximum aio requests for a single lio_listio call"); 186 187 #ifdef COMPAT_FREEBSD6 188 typedef struct oaiocb { 189 int aio_fildes; /* File descriptor */ 190 off_t aio_offset; /* File offset for I/O */ 191 volatile void *aio_buf; /* I/O buffer in process space */ 192 size_t aio_nbytes; /* Number of bytes for I/O */ 193 struct osigevent aio_sigevent; /* Signal to deliver */ 194 int aio_lio_opcode; /* LIO opcode */ 195 int aio_reqprio; /* Request priority -- ignored */ 196 struct __aiocb_private _aiocb_private; 197 } oaiocb_t; 198 #endif 199 200 /* 201 * Below is a key of locks used to protect each member of struct kaiocb 202 * aioliojob and kaioinfo and any backends. 203 * 204 * * - need not protected 205 * a - locked by kaioinfo lock 206 * b - locked by backend lock, the backend lock can be null in some cases, 207 * for example, BIO belongs to this type, in this case, proc lock is 208 * reused. 209 * c - locked by aio_job_mtx, the lock for the generic file I/O backend. 210 */ 211 212 /* 213 * If the routine that services an AIO request blocks while running in an 214 * AIO kernel process it can starve other I/O requests. BIO requests 215 * queued via aio_qbio() complete asynchronously and do not use AIO kernel 216 * processes at all. Socket I/O requests use a separate pool of 217 * kprocs and also force non-blocking I/O. Other file I/O requests 218 * use the generic fo_read/fo_write operations which can block. The 219 * fsync and mlock operations can also block while executing. Ideally 220 * none of these requests would block while executing. 221 * 222 * Note that the service routines cannot toggle O_NONBLOCK in the file 223 * structure directly while handling a request due to races with 224 * userland threads. 225 */ 226 227 /* jobflags */ 228 #define KAIOCB_QUEUEING 0x01 229 #define KAIOCB_CANCELLED 0x02 230 #define KAIOCB_CANCELLING 0x04 231 #define KAIOCB_CHECKSYNC 0x08 232 #define KAIOCB_CLEARED 0x10 233 #define KAIOCB_FINISHED 0x20 234 235 /* 236 * AIO process info 237 */ 238 #define AIOP_FREE 0x1 /* proc on free queue */ 239 240 struct aioproc { 241 int aioprocflags; /* (c) AIO proc flags */ 242 TAILQ_ENTRY(aioproc) list; /* (c) list of processes */ 243 struct proc *aioproc; /* (*) the AIO proc */ 244 }; 245 246 /* 247 * data-structure for lio signal management 248 */ 249 struct aioliojob { 250 int lioj_flags; /* (a) listio flags */ 251 int lioj_count; /* (a) count of jobs */ 252 int lioj_finished_count; /* (a) count of finished jobs */ 253 struct sigevent lioj_signal; /* (a) signal on all I/O done */ 254 TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */ 255 struct knlist klist; /* (a) list of knotes */ 256 ksiginfo_t lioj_ksi; /* (a) Realtime signal info */ 257 }; 258 259 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 260 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 261 #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */ 262 263 /* 264 * per process aio data structure 265 */ 266 struct kaioinfo { 267 struct mtx kaio_mtx; /* the lock to protect this struct */ 268 int kaio_flags; /* (a) per process kaio flags */ 269 int kaio_active_count; /* (c) number of currently used AIOs */ 270 int kaio_count; /* (a) size of AIO queue */ 271 int kaio_buffer_count; /* (a) number of bio buffers */ 272 TAILQ_HEAD(,kaiocb) kaio_all; /* (a) all AIOs in a process */ 273 TAILQ_HEAD(,kaiocb) kaio_done; /* (a) done queue for process */ 274 TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */ 275 TAILQ_HEAD(,kaiocb) kaio_jobqueue; /* (a) job queue for process */ 276 TAILQ_HEAD(,kaiocb) kaio_syncqueue; /* (a) queue for aio_fsync */ 277 TAILQ_HEAD(,kaiocb) kaio_syncready; /* (a) second q for aio_fsync */ 278 struct task kaio_task; /* (*) task to kick aio processes */ 279 struct task kaio_sync_task; /* (*) task to schedule fsync jobs */ 280 }; 281 282 #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx) 283 #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx) 284 #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f)) 285 #define AIO_MTX(ki) (&(ki)->kaio_mtx) 286 287 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 288 #define KAIO_WAKEUP 0x2 /* wakeup process when AIO completes */ 289 290 /* 291 * Operations used to interact with userland aio control blocks. 292 * Different ABIs provide their own operations. 293 */ 294 struct aiocb_ops { 295 int (*aio_copyin)(struct aiocb *ujob, struct kaiocb *kjob, int ty); 296 long (*fetch_status)(struct aiocb *ujob); 297 long (*fetch_error)(struct aiocb *ujob); 298 int (*store_status)(struct aiocb *ujob, long status); 299 int (*store_error)(struct aiocb *ujob, long error); 300 int (*store_kernelinfo)(struct aiocb *ujob, long jobref); 301 int (*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob); 302 }; 303 304 static TAILQ_HEAD(,aioproc) aio_freeproc; /* (c) Idle daemons */ 305 static struct sema aio_newproc_sem; 306 static struct mtx aio_job_mtx; 307 static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */ 308 static struct unrhdr *aiod_unr; 309 310 static void aio_biocleanup(struct bio *bp); 311 void aio_init_aioinfo(struct proc *p); 312 static int aio_onceonly(void); 313 static int aio_free_entry(struct kaiocb *job); 314 static void aio_process_rw(struct kaiocb *job); 315 static void aio_process_sync(struct kaiocb *job); 316 static void aio_process_mlock(struct kaiocb *job); 317 static void aio_schedule_fsync(void *context, int pending); 318 static int aio_newproc(int *); 319 int aio_aqueue(struct thread *td, struct aiocb *ujob, 320 struct aioliojob *lio, int type, struct aiocb_ops *ops); 321 static int aio_queue_file(struct file *fp, struct kaiocb *job); 322 static void aio_biowakeup(struct bio *bp); 323 static void aio_proc_rundown(void *arg, struct proc *p); 324 static void aio_proc_rundown_exec(void *arg, struct proc *p, 325 struct image_params *imgp); 326 static int aio_qbio(struct proc *p, struct kaiocb *job); 327 static void aio_daemon(void *param); 328 static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job); 329 static bool aio_clear_cancel_function_locked(struct kaiocb *job); 330 static int aio_kick(struct proc *userp); 331 static void aio_kick_nowait(struct proc *userp); 332 static void aio_kick_helper(void *context, int pending); 333 static int filt_aioattach(struct knote *kn); 334 static void filt_aiodetach(struct knote *kn); 335 static int filt_aio(struct knote *kn, long hint); 336 static int filt_lioattach(struct knote *kn); 337 static void filt_liodetach(struct knote *kn); 338 static int filt_lio(struct knote *kn, long hint); 339 340 /* 341 * Zones for: 342 * kaio Per process async io info 343 * aiop async io process data 344 * aiocb async io jobs 345 * aiolio list io jobs 346 */ 347 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiolio_zone; 348 349 /* kqueue filters for aio */ 350 static struct filterops aio_filtops = { 351 .f_isfd = 0, 352 .f_attach = filt_aioattach, 353 .f_detach = filt_aiodetach, 354 .f_event = filt_aio, 355 }; 356 static struct filterops lio_filtops = { 357 .f_isfd = 0, 358 .f_attach = filt_lioattach, 359 .f_detach = filt_liodetach, 360 .f_event = filt_lio 361 }; 362 363 static eventhandler_tag exit_tag, exec_tag; 364 365 TASKQUEUE_DEFINE_THREAD(aiod_kick); 366 367 /* 368 * Main operations function for use as a kernel module. 369 */ 370 static int 371 aio_modload(struct module *module, int cmd, void *arg) 372 { 373 int error = 0; 374 375 switch (cmd) { 376 case MOD_LOAD: 377 aio_onceonly(); 378 break; 379 case MOD_SHUTDOWN: 380 break; 381 default: 382 error = EOPNOTSUPP; 383 break; 384 } 385 return (error); 386 } 387 388 static moduledata_t aio_mod = { 389 "aio", 390 &aio_modload, 391 NULL 392 }; 393 394 DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY); 395 MODULE_VERSION(aio, 1); 396 397 /* 398 * Startup initialization 399 */ 400 static int 401 aio_onceonly(void) 402 { 403 404 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL, 405 EVENTHANDLER_PRI_ANY); 406 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, 407 NULL, EVENTHANDLER_PRI_ANY); 408 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops); 409 kqueue_add_filteropts(EVFILT_LIO, &lio_filtops); 410 TAILQ_INIT(&aio_freeproc); 411 sema_init(&aio_newproc_sem, 0, "aio_new_proc"); 412 mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF); 413 TAILQ_INIT(&aio_jobs); 414 aiod_unr = new_unrhdr(1, INT_MAX, NULL); 415 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL, 416 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 417 aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL, 418 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 419 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL, 420 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 421 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL, 422 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 423 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 424 jobrefid = 1; 425 p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO); 426 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE); 427 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0); 428 429 return (0); 430 } 431 432 /* 433 * Init the per-process aioinfo structure. The aioinfo limits are set 434 * per-process for user limit (resource) management. 435 */ 436 void 437 aio_init_aioinfo(struct proc *p) 438 { 439 struct kaioinfo *ki; 440 441 ki = uma_zalloc(kaio_zone, M_WAITOK); 442 mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW); 443 ki->kaio_flags = 0; 444 ki->kaio_active_count = 0; 445 ki->kaio_count = 0; 446 ki->kaio_buffer_count = 0; 447 TAILQ_INIT(&ki->kaio_all); 448 TAILQ_INIT(&ki->kaio_done); 449 TAILQ_INIT(&ki->kaio_jobqueue); 450 TAILQ_INIT(&ki->kaio_liojoblist); 451 TAILQ_INIT(&ki->kaio_syncqueue); 452 TAILQ_INIT(&ki->kaio_syncready); 453 TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p); 454 TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki); 455 PROC_LOCK(p); 456 if (p->p_aioinfo == NULL) { 457 p->p_aioinfo = ki; 458 PROC_UNLOCK(p); 459 } else { 460 PROC_UNLOCK(p); 461 mtx_destroy(&ki->kaio_mtx); 462 uma_zfree(kaio_zone, ki); 463 } 464 465 while (num_aio_procs < MIN(target_aio_procs, max_aio_procs)) 466 aio_newproc(NULL); 467 } 468 469 static int 470 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi, bool ext) 471 { 472 struct thread *td; 473 int error; 474 475 error = sigev_findtd(p, sigev, &td); 476 if (error) 477 return (error); 478 if (!KSI_ONQ(ksi)) { 479 ksiginfo_set_sigev(ksi, sigev); 480 ksi->ksi_code = SI_ASYNCIO; 481 ksi->ksi_flags |= ext ? (KSI_EXT | KSI_INS) : 0; 482 tdsendsignal(p, td, ksi->ksi_signo, ksi); 483 } 484 PROC_UNLOCK(p); 485 return (error); 486 } 487 488 /* 489 * Free a job entry. Wait for completion if it is currently active, but don't 490 * delay forever. If we delay, we return a flag that says that we have to 491 * restart the queue scan. 492 */ 493 static int 494 aio_free_entry(struct kaiocb *job) 495 { 496 struct kaioinfo *ki; 497 struct aioliojob *lj; 498 struct proc *p; 499 500 p = job->userproc; 501 MPASS(curproc == p); 502 ki = p->p_aioinfo; 503 MPASS(ki != NULL); 504 505 AIO_LOCK_ASSERT(ki, MA_OWNED); 506 MPASS(job->jobflags & KAIOCB_FINISHED); 507 508 atomic_subtract_int(&num_queue_count, 1); 509 510 ki->kaio_count--; 511 MPASS(ki->kaio_count >= 0); 512 513 TAILQ_REMOVE(&ki->kaio_done, job, plist); 514 TAILQ_REMOVE(&ki->kaio_all, job, allist); 515 516 lj = job->lio; 517 if (lj) { 518 lj->lioj_count--; 519 lj->lioj_finished_count--; 520 521 if (lj->lioj_count == 0) { 522 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 523 /* lio is going away, we need to destroy any knotes */ 524 knlist_delete(&lj->klist, curthread, 1); 525 PROC_LOCK(p); 526 sigqueue_take(&lj->lioj_ksi); 527 PROC_UNLOCK(p); 528 uma_zfree(aiolio_zone, lj); 529 } 530 } 531 532 /* job is going away, we need to destroy any knotes */ 533 knlist_delete(&job->klist, curthread, 1); 534 PROC_LOCK(p); 535 sigqueue_take(&job->ksi); 536 PROC_UNLOCK(p); 537 538 AIO_UNLOCK(ki); 539 540 /* 541 * The thread argument here is used to find the owning process 542 * and is also passed to fo_close() which may pass it to various 543 * places such as devsw close() routines. Because of that, we 544 * need a thread pointer from the process owning the job that is 545 * persistent and won't disappear out from under us or move to 546 * another process. 547 * 548 * Currently, all the callers of this function call it to remove 549 * a kaiocb from the current process' job list either via a 550 * syscall or due to the current process calling exit() or 551 * execve(). Thus, we know that p == curproc. We also know that 552 * curthread can't exit since we are curthread. 553 * 554 * Therefore, we use curthread as the thread to pass to 555 * knlist_delete(). This does mean that it is possible for the 556 * thread pointer at close time to differ from the thread pointer 557 * at open time, but this is already true of file descriptors in 558 * a multithreaded process. 559 */ 560 if (job->fd_file) 561 fdrop(job->fd_file, curthread); 562 crfree(job->cred); 563 if (job->uiop != &job->uio) 564 free(job->uiop, M_IOV); 565 uma_zfree(aiocb_zone, job); 566 AIO_LOCK(ki); 567 568 return (0); 569 } 570 571 static void 572 aio_proc_rundown_exec(void *arg, struct proc *p, 573 struct image_params *imgp __unused) 574 { 575 aio_proc_rundown(arg, p); 576 } 577 578 static int 579 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job) 580 { 581 aio_cancel_fn_t *func; 582 int cancelled; 583 584 AIO_LOCK_ASSERT(ki, MA_OWNED); 585 if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED)) 586 return (0); 587 MPASS((job->jobflags & KAIOCB_CANCELLING) == 0); 588 job->jobflags |= KAIOCB_CANCELLED; 589 590 func = job->cancel_fn; 591 592 /* 593 * If there is no cancel routine, just leave the job marked as 594 * cancelled. The job should be in active use by a caller who 595 * should complete it normally or when it fails to install a 596 * cancel routine. 597 */ 598 if (func == NULL) 599 return (0); 600 601 /* 602 * Set the CANCELLING flag so that aio_complete() will defer 603 * completions of this job. This prevents the job from being 604 * freed out from under the cancel callback. After the 605 * callback any deferred completion (whether from the callback 606 * or any other source) will be completed. 607 */ 608 job->jobflags |= KAIOCB_CANCELLING; 609 AIO_UNLOCK(ki); 610 func(job); 611 AIO_LOCK(ki); 612 job->jobflags &= ~KAIOCB_CANCELLING; 613 if (job->jobflags & KAIOCB_FINISHED) { 614 cancelled = job->uaiocb._aiocb_private.error == ECANCELED; 615 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); 616 aio_bio_done_notify(p, job); 617 } else { 618 /* 619 * The cancel callback might have scheduled an 620 * operation to cancel this request, but it is 621 * only counted as cancelled if the request is 622 * cancelled when the callback returns. 623 */ 624 cancelled = 0; 625 } 626 return (cancelled); 627 } 628 629 /* 630 * Rundown the jobs for a given process. 631 */ 632 static void 633 aio_proc_rundown(void *arg, struct proc *p) 634 { 635 struct kaioinfo *ki; 636 struct aioliojob *lj; 637 struct kaiocb *job, *jobn; 638 639 KASSERT(curthread->td_proc == p, 640 ("%s: called on non-curproc", __func__)); 641 ki = p->p_aioinfo; 642 if (ki == NULL) 643 return; 644 645 AIO_LOCK(ki); 646 ki->kaio_flags |= KAIO_RUNDOWN; 647 648 restart: 649 650 /* 651 * Try to cancel all pending requests. This code simulates 652 * aio_cancel on all pending I/O requests. 653 */ 654 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { 655 aio_cancel_job(p, ki, job); 656 } 657 658 /* Wait for all running I/O to be finished */ 659 if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) { 660 ki->kaio_flags |= KAIO_WAKEUP; 661 msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz); 662 goto restart; 663 } 664 665 /* Free all completed I/O requests. */ 666 while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL) 667 aio_free_entry(job); 668 669 while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) { 670 if (lj->lioj_count == 0) { 671 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 672 knlist_delete(&lj->klist, curthread, 1); 673 PROC_LOCK(p); 674 sigqueue_take(&lj->lioj_ksi); 675 PROC_UNLOCK(p); 676 uma_zfree(aiolio_zone, lj); 677 } else { 678 panic("LIO job not cleaned up: C:%d, FC:%d\n", 679 lj->lioj_count, lj->lioj_finished_count); 680 } 681 } 682 AIO_UNLOCK(ki); 683 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task); 684 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task); 685 mtx_destroy(&ki->kaio_mtx); 686 uma_zfree(kaio_zone, ki); 687 p->p_aioinfo = NULL; 688 } 689 690 /* 691 * Select a job to run (called by an AIO daemon). 692 */ 693 static struct kaiocb * 694 aio_selectjob(struct aioproc *aiop) 695 { 696 struct kaiocb *job; 697 struct kaioinfo *ki; 698 struct proc *userp; 699 700 mtx_assert(&aio_job_mtx, MA_OWNED); 701 restart: 702 TAILQ_FOREACH(job, &aio_jobs, list) { 703 userp = job->userproc; 704 ki = userp->p_aioinfo; 705 706 if (ki->kaio_active_count < max_aio_per_proc) { 707 TAILQ_REMOVE(&aio_jobs, job, list); 708 if (!aio_clear_cancel_function(job)) 709 goto restart; 710 711 /* Account for currently active jobs. */ 712 ki->kaio_active_count++; 713 break; 714 } 715 } 716 return (job); 717 } 718 719 /* 720 * Move all data to a permanent storage device. This code 721 * simulates the fsync and fdatasync syscalls. 722 */ 723 static int 724 aio_fsync_vnode(struct thread *td, struct vnode *vp, int op) 725 { 726 struct mount *mp; 727 int error; 728 729 if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 730 goto drop; 731 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 732 if (vp->v_object != NULL) { 733 VM_OBJECT_WLOCK(vp->v_object); 734 vm_object_page_clean(vp->v_object, 0, 0, 0); 735 VM_OBJECT_WUNLOCK(vp->v_object); 736 } 737 if (op == LIO_DSYNC) 738 error = VOP_FDATASYNC(vp, td); 739 else 740 error = VOP_FSYNC(vp, MNT_WAIT, td); 741 742 VOP_UNLOCK(vp); 743 vn_finished_write(mp); 744 drop: 745 return (error); 746 } 747 748 /* 749 * The AIO processing activity for LIO_READ/LIO_WRITE. This is the code that 750 * does the I/O request for the non-bio version of the operations. The normal 751 * vn operations are used, and this code should work in all instances for every 752 * type of file, including pipes, sockets, fifos, and regular files. 753 * 754 * XXX I don't think it works well for socket, pipe, and fifo. 755 */ 756 static void 757 aio_process_rw(struct kaiocb *job) 758 { 759 struct ucred *td_savedcred; 760 struct thread *td; 761 struct aiocb *cb; 762 struct file *fp; 763 ssize_t cnt; 764 long msgsnd_st, msgsnd_end; 765 long msgrcv_st, msgrcv_end; 766 long oublock_st, oublock_end; 767 long inblock_st, inblock_end; 768 int error, opcode; 769 770 KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ || 771 job->uaiocb.aio_lio_opcode == LIO_READV || 772 job->uaiocb.aio_lio_opcode == LIO_WRITE || 773 job->uaiocb.aio_lio_opcode == LIO_WRITEV, 774 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 775 776 aio_switch_vmspace(job); 777 td = curthread; 778 td_savedcred = td->td_ucred; 779 td->td_ucred = job->cred; 780 job->uiop->uio_td = td; 781 cb = &job->uaiocb; 782 fp = job->fd_file; 783 784 opcode = job->uaiocb.aio_lio_opcode; 785 cnt = job->uiop->uio_resid; 786 787 msgrcv_st = td->td_ru.ru_msgrcv; 788 msgsnd_st = td->td_ru.ru_msgsnd; 789 inblock_st = td->td_ru.ru_inblock; 790 oublock_st = td->td_ru.ru_oublock; 791 792 /* 793 * aio_aqueue() acquires a reference to the file that is 794 * released in aio_free_entry(). 795 */ 796 if (opcode == LIO_READ || opcode == LIO_READV) { 797 if (job->uiop->uio_resid == 0) 798 error = 0; 799 else 800 error = fo_read(fp, job->uiop, fp->f_cred, FOF_OFFSET, 801 td); 802 } else { 803 if (fp->f_type == DTYPE_VNODE) 804 bwillwrite(); 805 error = fo_write(fp, job->uiop, fp->f_cred, FOF_OFFSET, td); 806 } 807 msgrcv_end = td->td_ru.ru_msgrcv; 808 msgsnd_end = td->td_ru.ru_msgsnd; 809 inblock_end = td->td_ru.ru_inblock; 810 oublock_end = td->td_ru.ru_oublock; 811 812 job->msgrcv = msgrcv_end - msgrcv_st; 813 job->msgsnd = msgsnd_end - msgsnd_st; 814 job->inblock = inblock_end - inblock_st; 815 job->outblock = oublock_end - oublock_st; 816 817 if (error != 0 && job->uiop->uio_resid != cnt) { 818 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 819 error = 0; 820 if (error == EPIPE && 821 (opcode == LIO_WRITE || opcode == LIO_WRITEV)) { 822 PROC_LOCK(job->userproc); 823 kern_psignal(job->userproc, SIGPIPE); 824 PROC_UNLOCK(job->userproc); 825 } 826 } 827 828 cnt -= job->uiop->uio_resid; 829 td->td_ucred = td_savedcred; 830 if (error) 831 aio_complete(job, -1, error); 832 else 833 aio_complete(job, cnt, 0); 834 } 835 836 static void 837 aio_process_sync(struct kaiocb *job) 838 { 839 struct thread *td = curthread; 840 struct ucred *td_savedcred = td->td_ucred; 841 struct file *fp = job->fd_file; 842 int error = 0; 843 844 KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC || 845 job->uaiocb.aio_lio_opcode == LIO_DSYNC, 846 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 847 848 td->td_ucred = job->cred; 849 if (fp->f_vnode != NULL) { 850 error = aio_fsync_vnode(td, fp->f_vnode, 851 job->uaiocb.aio_lio_opcode); 852 } 853 td->td_ucred = td_savedcred; 854 if (error) 855 aio_complete(job, -1, error); 856 else 857 aio_complete(job, 0, 0); 858 } 859 860 static void 861 aio_process_mlock(struct kaiocb *job) 862 { 863 struct aiocb *cb = &job->uaiocb; 864 int error; 865 866 KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK, 867 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 868 869 aio_switch_vmspace(job); 870 error = kern_mlock(job->userproc, job->cred, 871 __DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes); 872 aio_complete(job, error != 0 ? -1 : 0, error); 873 } 874 875 static void 876 aio_bio_done_notify(struct proc *userp, struct kaiocb *job) 877 { 878 struct aioliojob *lj; 879 struct kaioinfo *ki; 880 struct kaiocb *sjob, *sjobn; 881 int lj_done; 882 bool schedule_fsync; 883 884 ki = userp->p_aioinfo; 885 AIO_LOCK_ASSERT(ki, MA_OWNED); 886 lj = job->lio; 887 lj_done = 0; 888 if (lj) { 889 lj->lioj_finished_count++; 890 if (lj->lioj_count == lj->lioj_finished_count) 891 lj_done = 1; 892 } 893 TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist); 894 MPASS(job->jobflags & KAIOCB_FINISHED); 895 896 if (ki->kaio_flags & KAIO_RUNDOWN) 897 goto notification_done; 898 899 if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || 900 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) 901 aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi, true); 902 903 KNOTE_LOCKED(&job->klist, 1); 904 905 if (lj_done) { 906 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 907 lj->lioj_flags |= LIOJ_KEVENT_POSTED; 908 KNOTE_LOCKED(&lj->klist, 1); 909 } 910 if ((lj->lioj_flags & (LIOJ_SIGNAL | LIOJ_SIGNAL_POSTED)) 911 == LIOJ_SIGNAL && 912 (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 913 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { 914 aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi, 915 true); 916 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 917 } 918 } 919 920 notification_done: 921 if (job->jobflags & KAIOCB_CHECKSYNC) { 922 schedule_fsync = false; 923 TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) { 924 if (job->fd_file != sjob->fd_file || 925 job->seqno >= sjob->seqno) 926 continue; 927 if (--sjob->pending > 0) 928 continue; 929 TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list); 930 if (!aio_clear_cancel_function_locked(sjob)) 931 continue; 932 TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list); 933 schedule_fsync = true; 934 } 935 if (schedule_fsync) 936 taskqueue_enqueue(taskqueue_aiod_kick, 937 &ki->kaio_sync_task); 938 } 939 if (ki->kaio_flags & KAIO_WAKEUP) { 940 ki->kaio_flags &= ~KAIO_WAKEUP; 941 wakeup(&userp->p_aioinfo); 942 } 943 } 944 945 static void 946 aio_schedule_fsync(void *context, int pending) 947 { 948 struct kaioinfo *ki; 949 struct kaiocb *job; 950 951 ki = context; 952 AIO_LOCK(ki); 953 while (!TAILQ_EMPTY(&ki->kaio_syncready)) { 954 job = TAILQ_FIRST(&ki->kaio_syncready); 955 TAILQ_REMOVE(&ki->kaio_syncready, job, list); 956 AIO_UNLOCK(ki); 957 aio_schedule(job, aio_process_sync); 958 AIO_LOCK(ki); 959 } 960 AIO_UNLOCK(ki); 961 } 962 963 bool 964 aio_cancel_cleared(struct kaiocb *job) 965 { 966 967 /* 968 * The caller should hold the same queue lock held when 969 * aio_clear_cancel_function() was called and set this flag 970 * ensuring this check sees an up-to-date value. However, 971 * there is no way to assert that. 972 */ 973 return ((job->jobflags & KAIOCB_CLEARED) != 0); 974 } 975 976 static bool 977 aio_clear_cancel_function_locked(struct kaiocb *job) 978 { 979 980 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); 981 MPASS(job->cancel_fn != NULL); 982 if (job->jobflags & KAIOCB_CANCELLING) { 983 job->jobflags |= KAIOCB_CLEARED; 984 return (false); 985 } 986 job->cancel_fn = NULL; 987 return (true); 988 } 989 990 bool 991 aio_clear_cancel_function(struct kaiocb *job) 992 { 993 struct kaioinfo *ki; 994 bool ret; 995 996 ki = job->userproc->p_aioinfo; 997 AIO_LOCK(ki); 998 ret = aio_clear_cancel_function_locked(job); 999 AIO_UNLOCK(ki); 1000 return (ret); 1001 } 1002 1003 static bool 1004 aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func) 1005 { 1006 1007 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); 1008 if (job->jobflags & KAIOCB_CANCELLED) 1009 return (false); 1010 job->cancel_fn = func; 1011 return (true); 1012 } 1013 1014 bool 1015 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func) 1016 { 1017 struct kaioinfo *ki; 1018 bool ret; 1019 1020 ki = job->userproc->p_aioinfo; 1021 AIO_LOCK(ki); 1022 ret = aio_set_cancel_function_locked(job, func); 1023 AIO_UNLOCK(ki); 1024 return (ret); 1025 } 1026 1027 void 1028 aio_complete(struct kaiocb *job, long status, int error) 1029 { 1030 struct kaioinfo *ki; 1031 struct proc *userp; 1032 1033 job->uaiocb._aiocb_private.error = error; 1034 job->uaiocb._aiocb_private.status = status; 1035 1036 userp = job->userproc; 1037 ki = userp->p_aioinfo; 1038 1039 AIO_LOCK(ki); 1040 KASSERT(!(job->jobflags & KAIOCB_FINISHED), 1041 ("duplicate aio_complete")); 1042 job->jobflags |= KAIOCB_FINISHED; 1043 if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) { 1044 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); 1045 aio_bio_done_notify(userp, job); 1046 } 1047 AIO_UNLOCK(ki); 1048 } 1049 1050 void 1051 aio_cancel(struct kaiocb *job) 1052 { 1053 1054 aio_complete(job, -1, ECANCELED); 1055 } 1056 1057 void 1058 aio_switch_vmspace(struct kaiocb *job) 1059 { 1060 1061 vmspace_switch_aio(job->userproc->p_vmspace); 1062 } 1063 1064 /* 1065 * The AIO daemon, most of the actual work is done in aio_process_*, 1066 * but the setup (and address space mgmt) is done in this routine. 1067 */ 1068 static void 1069 aio_daemon(void *_id) 1070 { 1071 struct kaiocb *job; 1072 struct aioproc *aiop; 1073 struct kaioinfo *ki; 1074 struct proc *p; 1075 struct vmspace *myvm; 1076 struct thread *td = curthread; 1077 int id = (intptr_t)_id; 1078 1079 /* 1080 * Grab an extra reference on the daemon's vmspace so that it 1081 * doesn't get freed by jobs that switch to a different 1082 * vmspace. 1083 */ 1084 p = td->td_proc; 1085 myvm = vmspace_acquire_ref(p); 1086 1087 KASSERT(p->p_textvp == NULL, ("kthread has a textvp")); 1088 1089 /* 1090 * Allocate and ready the aio control info. There is one aiop structure 1091 * per daemon. 1092 */ 1093 aiop = uma_zalloc(aiop_zone, M_WAITOK); 1094 aiop->aioproc = p; 1095 aiop->aioprocflags = 0; 1096 1097 /* 1098 * Wakeup parent process. (Parent sleeps to keep from blasting away 1099 * and creating too many daemons.) 1100 */ 1101 sema_post(&aio_newproc_sem); 1102 1103 mtx_lock(&aio_job_mtx); 1104 for (;;) { 1105 /* 1106 * Take daemon off of free queue 1107 */ 1108 if (aiop->aioprocflags & AIOP_FREE) { 1109 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1110 aiop->aioprocflags &= ~AIOP_FREE; 1111 } 1112 1113 /* 1114 * Check for jobs. 1115 */ 1116 while ((job = aio_selectjob(aiop)) != NULL) { 1117 mtx_unlock(&aio_job_mtx); 1118 1119 ki = job->userproc->p_aioinfo; 1120 job->handle_fn(job); 1121 1122 mtx_lock(&aio_job_mtx); 1123 /* Decrement the active job count. */ 1124 ki->kaio_active_count--; 1125 } 1126 1127 /* 1128 * Disconnect from user address space. 1129 */ 1130 if (p->p_vmspace != myvm) { 1131 mtx_unlock(&aio_job_mtx); 1132 vmspace_switch_aio(myvm); 1133 mtx_lock(&aio_job_mtx); 1134 /* 1135 * We have to restart to avoid race, we only sleep if 1136 * no job can be selected. 1137 */ 1138 continue; 1139 } 1140 1141 mtx_assert(&aio_job_mtx, MA_OWNED); 1142 1143 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 1144 aiop->aioprocflags |= AIOP_FREE; 1145 1146 /* 1147 * If daemon is inactive for a long time, allow it to exit, 1148 * thereby freeing resources. 1149 */ 1150 if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy", 1151 aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) && 1152 (aiop->aioprocflags & AIOP_FREE) && 1153 num_aio_procs > target_aio_procs) 1154 break; 1155 } 1156 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1157 num_aio_procs--; 1158 mtx_unlock(&aio_job_mtx); 1159 uma_zfree(aiop_zone, aiop); 1160 free_unr(aiod_unr, id); 1161 vmspace_free(myvm); 1162 1163 KASSERT(p->p_vmspace == myvm, 1164 ("AIOD: bad vmspace for exiting daemon")); 1165 KASSERT(refcount_load(&myvm->vm_refcnt) > 1, 1166 ("AIOD: bad vm refcnt for exiting daemon: %d", 1167 refcount_load(&myvm->vm_refcnt))); 1168 kproc_exit(0); 1169 } 1170 1171 /* 1172 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 1173 * AIO daemon modifies its environment itself. 1174 */ 1175 static int 1176 aio_newproc(int *start) 1177 { 1178 int error; 1179 struct proc *p; 1180 int id; 1181 1182 id = alloc_unr(aiod_unr); 1183 error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p, 1184 RFNOWAIT, 0, "aiod%d", id); 1185 if (error == 0) { 1186 /* 1187 * Wait until daemon is started. 1188 */ 1189 sema_wait(&aio_newproc_sem); 1190 mtx_lock(&aio_job_mtx); 1191 num_aio_procs++; 1192 if (start != NULL) 1193 (*start)--; 1194 mtx_unlock(&aio_job_mtx); 1195 } else { 1196 free_unr(aiod_unr, id); 1197 } 1198 return (error); 1199 } 1200 1201 /* 1202 * Try the high-performance, low-overhead bio method for eligible 1203 * VCHR devices. This method doesn't use an aio helper thread, and 1204 * thus has very low overhead. 1205 * 1206 * Assumes that the caller, aio_aqueue(), has incremented the file 1207 * structure's reference count, preventing its deallocation for the 1208 * duration of this call. 1209 */ 1210 static int 1211 aio_qbio(struct proc *p, struct kaiocb *job) 1212 { 1213 struct aiocb *cb; 1214 struct file *fp; 1215 struct buf *pbuf; 1216 struct vnode *vp; 1217 struct cdevsw *csw; 1218 struct cdev *dev; 1219 struct kaioinfo *ki; 1220 struct bio **bios = NULL; 1221 off_t offset; 1222 int bio_cmd, error, i, iovcnt, opcode, poff, ref; 1223 vm_prot_t prot; 1224 bool use_unmapped; 1225 1226 cb = &job->uaiocb; 1227 fp = job->fd_file; 1228 opcode = cb->aio_lio_opcode; 1229 1230 if (!(opcode == LIO_WRITE || opcode == LIO_WRITEV || 1231 opcode == LIO_READ || opcode == LIO_READV)) 1232 return (-1); 1233 if (fp == NULL || fp->f_type != DTYPE_VNODE) 1234 return (-1); 1235 1236 vp = fp->f_vnode; 1237 if (vp->v_type != VCHR) 1238 return (-1); 1239 if (vp->v_bufobj.bo_bsize == 0) 1240 return (-1); 1241 1242 bio_cmd = opcode == LIO_WRITE || opcode == LIO_WRITEV ? BIO_WRITE : 1243 BIO_READ; 1244 iovcnt = job->uiop->uio_iovcnt; 1245 if (iovcnt > max_buf_aio) 1246 return (-1); 1247 for (i = 0; i < iovcnt; i++) { 1248 if (job->uiop->uio_iov[i].iov_len % vp->v_bufobj.bo_bsize != 0) 1249 return (-1); 1250 if (job->uiop->uio_iov[i].iov_len > maxphys) { 1251 error = -1; 1252 return (-1); 1253 } 1254 } 1255 offset = cb->aio_offset; 1256 1257 ref = 0; 1258 csw = devvn_refthread(vp, &dev, &ref); 1259 if (csw == NULL) 1260 return (ENXIO); 1261 1262 if ((csw->d_flags & D_DISK) == 0) { 1263 error = -1; 1264 goto unref; 1265 } 1266 if (job->uiop->uio_resid > dev->si_iosize_max) { 1267 error = -1; 1268 goto unref; 1269 } 1270 1271 ki = p->p_aioinfo; 1272 job->error = 0; 1273 1274 use_unmapped = (dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed; 1275 if (!use_unmapped) { 1276 AIO_LOCK(ki); 1277 if (ki->kaio_buffer_count + iovcnt > max_buf_aio) { 1278 AIO_UNLOCK(ki); 1279 error = EAGAIN; 1280 goto unref; 1281 } 1282 ki->kaio_buffer_count += iovcnt; 1283 AIO_UNLOCK(ki); 1284 } 1285 1286 bios = malloc(sizeof(struct bio *) * iovcnt, M_TEMP, M_WAITOK); 1287 atomic_store_int(&job->nbio, iovcnt); 1288 for (i = 0; i < iovcnt; i++) { 1289 struct vm_page** pages; 1290 struct bio *bp; 1291 void *buf; 1292 size_t nbytes; 1293 int npages; 1294 1295 buf = job->uiop->uio_iov[i].iov_base; 1296 nbytes = job->uiop->uio_iov[i].iov_len; 1297 1298 bios[i] = g_alloc_bio(); 1299 bp = bios[i]; 1300 1301 poff = (vm_offset_t)buf & PAGE_MASK; 1302 if (use_unmapped) { 1303 pbuf = NULL; 1304 pages = malloc(sizeof(vm_page_t) * (atop(round_page( 1305 nbytes)) + 1), M_TEMP, M_WAITOK | M_ZERO); 1306 } else { 1307 pbuf = uma_zalloc(pbuf_zone, M_WAITOK); 1308 BUF_KERNPROC(pbuf); 1309 pages = pbuf->b_pages; 1310 } 1311 1312 bp->bio_length = nbytes; 1313 bp->bio_bcount = nbytes; 1314 bp->bio_done = aio_biowakeup; 1315 bp->bio_offset = offset; 1316 bp->bio_cmd = bio_cmd; 1317 bp->bio_dev = dev; 1318 bp->bio_caller1 = job; 1319 bp->bio_caller2 = pbuf; 1320 1321 prot = VM_PROT_READ; 1322 if (opcode == LIO_READ || opcode == LIO_READV) 1323 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 1324 npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 1325 (vm_offset_t)buf, bp->bio_length, prot, pages, 1326 atop(maxphys) + 1); 1327 if (npages < 0) { 1328 if (pbuf != NULL) 1329 uma_zfree(pbuf_zone, pbuf); 1330 else 1331 free(pages, M_TEMP); 1332 error = EFAULT; 1333 g_destroy_bio(bp); 1334 i--; 1335 goto destroy_bios; 1336 } 1337 if (pbuf != NULL) { 1338 pmap_qenter((vm_offset_t)pbuf->b_data, pages, npages); 1339 bp->bio_data = pbuf->b_data + poff; 1340 pbuf->b_npages = npages; 1341 atomic_add_int(&num_buf_aio, 1); 1342 } else { 1343 bp->bio_ma = pages; 1344 bp->bio_ma_n = npages; 1345 bp->bio_ma_offset = poff; 1346 bp->bio_data = unmapped_buf; 1347 bp->bio_flags |= BIO_UNMAPPED; 1348 atomic_add_int(&num_unmapped_aio, 1); 1349 } 1350 1351 offset += nbytes; 1352 } 1353 1354 /* Perform transfer. */ 1355 for (i = 0; i < iovcnt; i++) 1356 csw->d_strategy(bios[i]); 1357 free(bios, M_TEMP); 1358 1359 dev_relthread(dev, ref); 1360 return (0); 1361 1362 destroy_bios: 1363 for (; i >= 0; i--) 1364 aio_biocleanup(bios[i]); 1365 free(bios, M_TEMP); 1366 unref: 1367 dev_relthread(dev, ref); 1368 return (error); 1369 } 1370 1371 #ifdef COMPAT_FREEBSD6 1372 static int 1373 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig) 1374 { 1375 1376 /* 1377 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are 1378 * supported by AIO with the old sigevent structure. 1379 */ 1380 nsig->sigev_notify = osig->sigev_notify; 1381 switch (nsig->sigev_notify) { 1382 case SIGEV_NONE: 1383 break; 1384 case SIGEV_SIGNAL: 1385 nsig->sigev_signo = osig->__sigev_u.__sigev_signo; 1386 break; 1387 case SIGEV_KEVENT: 1388 nsig->sigev_notify_kqueue = 1389 osig->__sigev_u.__sigev_notify_kqueue; 1390 nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr; 1391 break; 1392 default: 1393 return (EINVAL); 1394 } 1395 return (0); 1396 } 1397 1398 static int 1399 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct kaiocb *kjob, 1400 int type __unused) 1401 { 1402 struct oaiocb *ojob; 1403 struct aiocb *kcb = &kjob->uaiocb; 1404 int error; 1405 1406 bzero(kcb, sizeof(struct aiocb)); 1407 error = copyin(ujob, kcb, sizeof(struct oaiocb)); 1408 if (error) 1409 return (error); 1410 /* No need to copyin aio_iov, because it did not exist in FreeBSD 6 */ 1411 ojob = (struct oaiocb *)kcb; 1412 return (convert_old_sigevent(&ojob->aio_sigevent, &kcb->aio_sigevent)); 1413 } 1414 #endif 1415 1416 static int 1417 aiocb_copyin(struct aiocb *ujob, struct kaiocb *kjob, int type) 1418 { 1419 struct aiocb *kcb = &kjob->uaiocb; 1420 int error; 1421 1422 error = copyin(ujob, kcb, sizeof(struct aiocb)); 1423 if (error) 1424 return (error); 1425 if (type == LIO_READV || type == LIO_WRITEV) { 1426 /* malloc a uio and copy in the iovec */ 1427 error = copyinuio(__DEVOLATILE(struct iovec*, kcb->aio_iov), 1428 kcb->aio_iovcnt, &kjob->uiop); 1429 } 1430 1431 return (error); 1432 } 1433 1434 static long 1435 aiocb_fetch_status(struct aiocb *ujob) 1436 { 1437 1438 return (fuword(&ujob->_aiocb_private.status)); 1439 } 1440 1441 static long 1442 aiocb_fetch_error(struct aiocb *ujob) 1443 { 1444 1445 return (fuword(&ujob->_aiocb_private.error)); 1446 } 1447 1448 static int 1449 aiocb_store_status(struct aiocb *ujob, long status) 1450 { 1451 1452 return (suword(&ujob->_aiocb_private.status, status)); 1453 } 1454 1455 static int 1456 aiocb_store_error(struct aiocb *ujob, long error) 1457 { 1458 1459 return (suword(&ujob->_aiocb_private.error, error)); 1460 } 1461 1462 static int 1463 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref) 1464 { 1465 1466 return (suword(&ujob->_aiocb_private.kernelinfo, jobref)); 1467 } 1468 1469 static int 1470 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob) 1471 { 1472 1473 return (suword(ujobp, (long)ujob)); 1474 } 1475 1476 static struct aiocb_ops aiocb_ops = { 1477 .aio_copyin = aiocb_copyin, 1478 .fetch_status = aiocb_fetch_status, 1479 .fetch_error = aiocb_fetch_error, 1480 .store_status = aiocb_store_status, 1481 .store_error = aiocb_store_error, 1482 .store_kernelinfo = aiocb_store_kernelinfo, 1483 .store_aiocb = aiocb_store_aiocb, 1484 }; 1485 1486 #ifdef COMPAT_FREEBSD6 1487 static struct aiocb_ops aiocb_ops_osigevent = { 1488 .aio_copyin = aiocb_copyin_old_sigevent, 1489 .fetch_status = aiocb_fetch_status, 1490 .fetch_error = aiocb_fetch_error, 1491 .store_status = aiocb_store_status, 1492 .store_error = aiocb_store_error, 1493 .store_kernelinfo = aiocb_store_kernelinfo, 1494 .store_aiocb = aiocb_store_aiocb, 1495 }; 1496 #endif 1497 1498 /* 1499 * Queue a new AIO request. Choosing either the threaded or direct bio VCHR 1500 * technique is done in this code. 1501 */ 1502 int 1503 aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj, 1504 int type, struct aiocb_ops *ops) 1505 { 1506 struct proc *p = td->td_proc; 1507 struct file *fp = NULL; 1508 struct kaiocb *job; 1509 struct kaioinfo *ki; 1510 struct kevent kev; 1511 int opcode; 1512 int error; 1513 int fd, kqfd; 1514 int jid; 1515 u_short evflags; 1516 1517 if (p->p_aioinfo == NULL) 1518 aio_init_aioinfo(p); 1519 1520 ki = p->p_aioinfo; 1521 1522 ops->store_status(ujob, -1); 1523 ops->store_error(ujob, 0); 1524 ops->store_kernelinfo(ujob, -1); 1525 1526 if (num_queue_count >= max_queue_count || 1527 ki->kaio_count >= max_aio_queue_per_proc) { 1528 error = EAGAIN; 1529 goto err1; 1530 } 1531 1532 job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); 1533 knlist_init_mtx(&job->klist, AIO_MTX(ki)); 1534 1535 error = ops->aio_copyin(ujob, job, type); 1536 if (error) 1537 goto err2; 1538 1539 if (job->uaiocb.aio_nbytes > IOSIZE_MAX) { 1540 error = EINVAL; 1541 goto err2; 1542 } 1543 1544 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && 1545 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && 1546 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && 1547 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { 1548 error = EINVAL; 1549 goto err2; 1550 } 1551 1552 if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || 1553 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && 1554 !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) { 1555 error = EINVAL; 1556 goto err2; 1557 } 1558 1559 ksiginfo_init(&job->ksi); 1560 1561 /* Save userspace address of the job info. */ 1562 job->ujob = ujob; 1563 1564 /* Get the opcode. */ 1565 if (type != LIO_NOP) 1566 job->uaiocb.aio_lio_opcode = type; 1567 opcode = job->uaiocb.aio_lio_opcode; 1568 1569 /* 1570 * Validate the opcode and fetch the file object for the specified 1571 * file descriptor. 1572 * 1573 * XXXRW: Moved the opcode validation up here so that we don't 1574 * retrieve a file descriptor without knowing what the capabiltity 1575 * should be. 1576 */ 1577 fd = job->uaiocb.aio_fildes; 1578 switch (opcode) { 1579 case LIO_WRITE: 1580 case LIO_WRITEV: 1581 error = fget_write(td, fd, &cap_pwrite_rights, &fp); 1582 break; 1583 case LIO_READ: 1584 case LIO_READV: 1585 error = fget_read(td, fd, &cap_pread_rights, &fp); 1586 break; 1587 case LIO_SYNC: 1588 case LIO_DSYNC: 1589 error = fget(td, fd, &cap_fsync_rights, &fp); 1590 break; 1591 case LIO_MLOCK: 1592 break; 1593 case LIO_NOP: 1594 error = fget(td, fd, &cap_no_rights, &fp); 1595 break; 1596 default: 1597 error = EINVAL; 1598 } 1599 if (error) 1600 goto err3; 1601 1602 if ((opcode == LIO_SYNC || opcode == LIO_DSYNC) && fp->f_vnode == NULL) { 1603 error = EINVAL; 1604 goto err3; 1605 } 1606 1607 if ((opcode == LIO_READ || opcode == LIO_READV || 1608 opcode == LIO_WRITE || opcode == LIO_WRITEV) && 1609 job->uaiocb.aio_offset < 0 && 1610 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) { 1611 error = EINVAL; 1612 goto err3; 1613 } 1614 1615 job->fd_file = fp; 1616 1617 mtx_lock(&aio_job_mtx); 1618 jid = jobrefid++; 1619 job->seqno = jobseqno++; 1620 mtx_unlock(&aio_job_mtx); 1621 error = ops->store_kernelinfo(ujob, jid); 1622 if (error) { 1623 error = EINVAL; 1624 goto err3; 1625 } 1626 job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; 1627 1628 if (opcode == LIO_NOP) { 1629 fdrop(fp, td); 1630 MPASS(job->uiop == &job->uio || job->uiop == NULL); 1631 uma_zfree(aiocb_zone, job); 1632 return (0); 1633 } 1634 1635 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) 1636 goto no_kqueue; 1637 evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags; 1638 if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) { 1639 error = EINVAL; 1640 goto err3; 1641 } 1642 kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue; 1643 memset(&kev, 0, sizeof(kev)); 1644 kev.ident = (uintptr_t)job->ujob; 1645 kev.filter = EVFILT_AIO; 1646 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags; 1647 kev.data = (intptr_t)job; 1648 kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr; 1649 error = kqfd_register(kqfd, &kev, td, M_WAITOK); 1650 if (error) 1651 goto err3; 1652 1653 no_kqueue: 1654 1655 ops->store_error(ujob, EINPROGRESS); 1656 job->uaiocb._aiocb_private.error = EINPROGRESS; 1657 job->userproc = p; 1658 job->cred = crhold(td->td_ucred); 1659 job->jobflags = KAIOCB_QUEUEING; 1660 job->lio = lj; 1661 1662 switch (opcode) { 1663 case LIO_READV: 1664 case LIO_WRITEV: 1665 /* Use the uio copied in by aio_copyin */ 1666 MPASS(job->uiop != &job->uio && job->uiop != NULL); 1667 break; 1668 case LIO_READ: 1669 case LIO_WRITE: 1670 /* Setup the inline uio */ 1671 job->iov[0].iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf; 1672 job->iov[0].iov_len = job->uaiocb.aio_nbytes; 1673 job->uio.uio_iov = job->iov; 1674 job->uio.uio_iovcnt = 1; 1675 job->uio.uio_resid = job->uaiocb.aio_nbytes; 1676 job->uio.uio_segflg = UIO_USERSPACE; 1677 /* FALLTHROUGH */ 1678 default: 1679 job->uiop = &job->uio; 1680 break; 1681 } 1682 switch (opcode) { 1683 case LIO_READ: 1684 case LIO_READV: 1685 job->uiop->uio_rw = UIO_READ; 1686 break; 1687 case LIO_WRITE: 1688 case LIO_WRITEV: 1689 job->uiop->uio_rw = UIO_WRITE; 1690 break; 1691 } 1692 job->uiop->uio_offset = job->uaiocb.aio_offset; 1693 job->uiop->uio_td = td; 1694 1695 if (opcode == LIO_MLOCK) { 1696 aio_schedule(job, aio_process_mlock); 1697 error = 0; 1698 } else if (fp->f_ops->fo_aio_queue == NULL) 1699 error = aio_queue_file(fp, job); 1700 else 1701 error = fo_aio_queue(fp, job); 1702 if (error) 1703 goto err3; 1704 1705 AIO_LOCK(ki); 1706 job->jobflags &= ~KAIOCB_QUEUEING; 1707 TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); 1708 ki->kaio_count++; 1709 if (lj) 1710 lj->lioj_count++; 1711 atomic_add_int(&num_queue_count, 1); 1712 if (job->jobflags & KAIOCB_FINISHED) { 1713 /* 1714 * The queue callback completed the request synchronously. 1715 * The bulk of the completion is deferred in that case 1716 * until this point. 1717 */ 1718 aio_bio_done_notify(p, job); 1719 } else 1720 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); 1721 AIO_UNLOCK(ki); 1722 return (0); 1723 1724 err3: 1725 if (fp) 1726 fdrop(fp, td); 1727 knlist_delete(&job->klist, curthread, 0); 1728 err2: 1729 if (job->uiop != &job->uio) 1730 free(job->uiop, M_IOV); 1731 uma_zfree(aiocb_zone, job); 1732 err1: 1733 ops->store_error(ujob, error); 1734 return (error); 1735 } 1736 1737 static void 1738 aio_cancel_daemon_job(struct kaiocb *job) 1739 { 1740 1741 mtx_lock(&aio_job_mtx); 1742 if (!aio_cancel_cleared(job)) 1743 TAILQ_REMOVE(&aio_jobs, job, list); 1744 mtx_unlock(&aio_job_mtx); 1745 aio_cancel(job); 1746 } 1747 1748 void 1749 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func) 1750 { 1751 1752 mtx_lock(&aio_job_mtx); 1753 if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) { 1754 mtx_unlock(&aio_job_mtx); 1755 aio_cancel(job); 1756 return; 1757 } 1758 job->handle_fn = func; 1759 TAILQ_INSERT_TAIL(&aio_jobs, job, list); 1760 aio_kick_nowait(job->userproc); 1761 mtx_unlock(&aio_job_mtx); 1762 } 1763 1764 static void 1765 aio_cancel_sync(struct kaiocb *job) 1766 { 1767 struct kaioinfo *ki; 1768 1769 ki = job->userproc->p_aioinfo; 1770 AIO_LOCK(ki); 1771 if (!aio_cancel_cleared(job)) 1772 TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); 1773 AIO_UNLOCK(ki); 1774 aio_cancel(job); 1775 } 1776 1777 int 1778 aio_queue_file(struct file *fp, struct kaiocb *job) 1779 { 1780 struct kaioinfo *ki; 1781 struct kaiocb *job2; 1782 struct vnode *vp; 1783 struct mount *mp; 1784 int error; 1785 bool safe; 1786 1787 ki = job->userproc->p_aioinfo; 1788 error = aio_qbio(job->userproc, job); 1789 if (error >= 0) 1790 return (error); 1791 safe = false; 1792 if (fp->f_type == DTYPE_VNODE) { 1793 vp = fp->f_vnode; 1794 if (vp->v_type == VREG || vp->v_type == VDIR) { 1795 mp = fp->f_vnode->v_mount; 1796 if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0) 1797 safe = true; 1798 } 1799 } 1800 if (!(safe || enable_aio_unsafe)) { 1801 counted_warning(&unsafe_warningcnt, 1802 "is attempting to use unsafe AIO requests"); 1803 return (EOPNOTSUPP); 1804 } 1805 1806 switch (job->uaiocb.aio_lio_opcode) { 1807 case LIO_READ: 1808 case LIO_READV: 1809 case LIO_WRITE: 1810 case LIO_WRITEV: 1811 aio_schedule(job, aio_process_rw); 1812 error = 0; 1813 break; 1814 case LIO_SYNC: 1815 case LIO_DSYNC: 1816 AIO_LOCK(ki); 1817 TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) { 1818 if (job2->fd_file == job->fd_file && 1819 job2->uaiocb.aio_lio_opcode != LIO_SYNC && 1820 job2->uaiocb.aio_lio_opcode != LIO_DSYNC && 1821 job2->seqno < job->seqno) { 1822 job2->jobflags |= KAIOCB_CHECKSYNC; 1823 job->pending++; 1824 } 1825 } 1826 if (job->pending != 0) { 1827 if (!aio_set_cancel_function_locked(job, 1828 aio_cancel_sync)) { 1829 AIO_UNLOCK(ki); 1830 aio_cancel(job); 1831 return (0); 1832 } 1833 TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list); 1834 AIO_UNLOCK(ki); 1835 return (0); 1836 } 1837 AIO_UNLOCK(ki); 1838 aio_schedule(job, aio_process_sync); 1839 error = 0; 1840 break; 1841 default: 1842 error = EINVAL; 1843 } 1844 return (error); 1845 } 1846 1847 static void 1848 aio_kick_nowait(struct proc *userp) 1849 { 1850 struct kaioinfo *ki = userp->p_aioinfo; 1851 struct aioproc *aiop; 1852 1853 mtx_assert(&aio_job_mtx, MA_OWNED); 1854 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1855 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1856 aiop->aioprocflags &= ~AIOP_FREE; 1857 wakeup(aiop->aioproc); 1858 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && 1859 ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) { 1860 taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task); 1861 } 1862 } 1863 1864 static int 1865 aio_kick(struct proc *userp) 1866 { 1867 struct kaioinfo *ki = userp->p_aioinfo; 1868 struct aioproc *aiop; 1869 int error, ret = 0; 1870 1871 mtx_assert(&aio_job_mtx, MA_OWNED); 1872 retryproc: 1873 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1874 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1875 aiop->aioprocflags &= ~AIOP_FREE; 1876 wakeup(aiop->aioproc); 1877 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && 1878 ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) { 1879 num_aio_resv_start++; 1880 mtx_unlock(&aio_job_mtx); 1881 error = aio_newproc(&num_aio_resv_start); 1882 mtx_lock(&aio_job_mtx); 1883 if (error) { 1884 num_aio_resv_start--; 1885 goto retryproc; 1886 } 1887 } else { 1888 ret = -1; 1889 } 1890 return (ret); 1891 } 1892 1893 static void 1894 aio_kick_helper(void *context, int pending) 1895 { 1896 struct proc *userp = context; 1897 1898 mtx_lock(&aio_job_mtx); 1899 while (--pending >= 0) { 1900 if (aio_kick(userp)) 1901 break; 1902 } 1903 mtx_unlock(&aio_job_mtx); 1904 } 1905 1906 /* 1907 * Support the aio_return system call, as a side-effect, kernel resources are 1908 * released. 1909 */ 1910 static int 1911 kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) 1912 { 1913 struct proc *p = td->td_proc; 1914 struct kaiocb *job; 1915 struct kaioinfo *ki; 1916 long status, error; 1917 1918 ki = p->p_aioinfo; 1919 if (ki == NULL) 1920 return (EINVAL); 1921 AIO_LOCK(ki); 1922 TAILQ_FOREACH(job, &ki->kaio_done, plist) { 1923 if (job->ujob == ujob) 1924 break; 1925 } 1926 if (job != NULL) { 1927 MPASS(job->jobflags & KAIOCB_FINISHED); 1928 status = job->uaiocb._aiocb_private.status; 1929 error = job->uaiocb._aiocb_private.error; 1930 td->td_retval[0] = status; 1931 td->td_ru.ru_oublock += job->outblock; 1932 td->td_ru.ru_inblock += job->inblock; 1933 td->td_ru.ru_msgsnd += job->msgsnd; 1934 td->td_ru.ru_msgrcv += job->msgrcv; 1935 aio_free_entry(job); 1936 AIO_UNLOCK(ki); 1937 ops->store_error(ujob, error); 1938 ops->store_status(ujob, status); 1939 } else { 1940 error = EINVAL; 1941 AIO_UNLOCK(ki); 1942 } 1943 return (error); 1944 } 1945 1946 int 1947 sys_aio_return(struct thread *td, struct aio_return_args *uap) 1948 { 1949 1950 return (kern_aio_return(td, uap->aiocbp, &aiocb_ops)); 1951 } 1952 1953 /* 1954 * Allow a process to wakeup when any of the I/O requests are completed. 1955 */ 1956 static int 1957 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist, 1958 struct timespec *ts) 1959 { 1960 struct proc *p = td->td_proc; 1961 struct timeval atv; 1962 struct kaioinfo *ki; 1963 struct kaiocb *firstjob, *job; 1964 int error, i, timo; 1965 1966 timo = 0; 1967 if (ts) { 1968 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 1969 return (EINVAL); 1970 1971 TIMESPEC_TO_TIMEVAL(&atv, ts); 1972 if (itimerfix(&atv)) 1973 return (EINVAL); 1974 timo = tvtohz(&atv); 1975 } 1976 1977 ki = p->p_aioinfo; 1978 if (ki == NULL) 1979 return (EAGAIN); 1980 1981 if (njoblist == 0) 1982 return (0); 1983 1984 AIO_LOCK(ki); 1985 for (;;) { 1986 firstjob = NULL; 1987 error = 0; 1988 TAILQ_FOREACH(job, &ki->kaio_all, allist) { 1989 for (i = 0; i < njoblist; i++) { 1990 if (job->ujob == ujoblist[i]) { 1991 if (firstjob == NULL) 1992 firstjob = job; 1993 if (job->jobflags & KAIOCB_FINISHED) 1994 goto RETURN; 1995 } 1996 } 1997 } 1998 /* All tasks were finished. */ 1999 if (firstjob == NULL) 2000 break; 2001 2002 ki->kaio_flags |= KAIO_WAKEUP; 2003 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, 2004 "aiospn", timo); 2005 if (error == ERESTART) 2006 error = EINTR; 2007 if (error) 2008 break; 2009 } 2010 RETURN: 2011 AIO_UNLOCK(ki); 2012 return (error); 2013 } 2014 2015 int 2016 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap) 2017 { 2018 struct timespec ts, *tsp; 2019 struct aiocb **ujoblist; 2020 int error; 2021 2022 if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc) 2023 return (EINVAL); 2024 2025 if (uap->timeout) { 2026 /* Get timespec struct. */ 2027 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 2028 return (error); 2029 tsp = &ts; 2030 } else 2031 tsp = NULL; 2032 2033 ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK); 2034 error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0])); 2035 if (error == 0) 2036 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp); 2037 free(ujoblist, M_AIOS); 2038 return (error); 2039 } 2040 2041 /* 2042 * aio_cancel cancels any non-bio aio operations not currently in progress. 2043 */ 2044 int 2045 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap) 2046 { 2047 struct proc *p = td->td_proc; 2048 struct kaioinfo *ki; 2049 struct kaiocb *job, *jobn; 2050 struct file *fp; 2051 int error; 2052 int cancelled = 0; 2053 int notcancelled = 0; 2054 struct vnode *vp; 2055 2056 /* Lookup file object. */ 2057 error = fget(td, uap->fd, &cap_no_rights, &fp); 2058 if (error) 2059 return (error); 2060 2061 ki = p->p_aioinfo; 2062 if (ki == NULL) 2063 goto done; 2064 2065 if (fp->f_type == DTYPE_VNODE) { 2066 vp = fp->f_vnode; 2067 if (vn_isdisk(vp)) { 2068 fdrop(fp, td); 2069 td->td_retval[0] = AIO_NOTCANCELED; 2070 return (0); 2071 } 2072 } 2073 2074 AIO_LOCK(ki); 2075 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { 2076 if ((uap->fd == job->uaiocb.aio_fildes) && 2077 ((uap->aiocbp == NULL) || 2078 (uap->aiocbp == job->ujob))) { 2079 if (aio_cancel_job(p, ki, job)) { 2080 cancelled++; 2081 } else { 2082 notcancelled++; 2083 } 2084 if (uap->aiocbp != NULL) 2085 break; 2086 } 2087 } 2088 AIO_UNLOCK(ki); 2089 2090 done: 2091 fdrop(fp, td); 2092 2093 if (uap->aiocbp != NULL) { 2094 if (cancelled) { 2095 td->td_retval[0] = AIO_CANCELED; 2096 return (0); 2097 } 2098 } 2099 2100 if (notcancelled) { 2101 td->td_retval[0] = AIO_NOTCANCELED; 2102 return (0); 2103 } 2104 2105 if (cancelled) { 2106 td->td_retval[0] = AIO_CANCELED; 2107 return (0); 2108 } 2109 2110 td->td_retval[0] = AIO_ALLDONE; 2111 2112 return (0); 2113 } 2114 2115 /* 2116 * aio_error is implemented in the kernel level for compatibility purposes 2117 * only. For a user mode async implementation, it would be best to do it in 2118 * a userland subroutine. 2119 */ 2120 static int 2121 kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) 2122 { 2123 struct proc *p = td->td_proc; 2124 struct kaiocb *job; 2125 struct kaioinfo *ki; 2126 int status; 2127 2128 ki = p->p_aioinfo; 2129 if (ki == NULL) { 2130 td->td_retval[0] = EINVAL; 2131 return (0); 2132 } 2133 2134 AIO_LOCK(ki); 2135 TAILQ_FOREACH(job, &ki->kaio_all, allist) { 2136 if (job->ujob == ujob) { 2137 if (job->jobflags & KAIOCB_FINISHED) 2138 td->td_retval[0] = 2139 job->uaiocb._aiocb_private.error; 2140 else 2141 td->td_retval[0] = EINPROGRESS; 2142 AIO_UNLOCK(ki); 2143 return (0); 2144 } 2145 } 2146 AIO_UNLOCK(ki); 2147 2148 /* 2149 * Hack for failure of aio_aqueue. 2150 */ 2151 status = ops->fetch_status(ujob); 2152 if (status == -1) { 2153 td->td_retval[0] = ops->fetch_error(ujob); 2154 return (0); 2155 } 2156 2157 td->td_retval[0] = EINVAL; 2158 return (0); 2159 } 2160 2161 int 2162 sys_aio_error(struct thread *td, struct aio_error_args *uap) 2163 { 2164 2165 return (kern_aio_error(td, uap->aiocbp, &aiocb_ops)); 2166 } 2167 2168 /* syscall - asynchronous read from a file (REALTIME) */ 2169 #ifdef COMPAT_FREEBSD6 2170 int 2171 freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap) 2172 { 2173 2174 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2175 &aiocb_ops_osigevent)); 2176 } 2177 #endif 2178 2179 int 2180 sys_aio_read(struct thread *td, struct aio_read_args *uap) 2181 { 2182 2183 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops)); 2184 } 2185 2186 int 2187 sys_aio_readv(struct thread *td, struct aio_readv_args *uap) 2188 { 2189 2190 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READV, &aiocb_ops)); 2191 } 2192 2193 /* syscall - asynchronous write to a file (REALTIME) */ 2194 #ifdef COMPAT_FREEBSD6 2195 int 2196 freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap) 2197 { 2198 2199 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 2200 &aiocb_ops_osigevent)); 2201 } 2202 #endif 2203 2204 int 2205 sys_aio_write(struct thread *td, struct aio_write_args *uap) 2206 { 2207 2208 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops)); 2209 } 2210 2211 int 2212 sys_aio_writev(struct thread *td, struct aio_writev_args *uap) 2213 { 2214 2215 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITEV, &aiocb_ops)); 2216 } 2217 2218 int 2219 sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap) 2220 { 2221 2222 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops)); 2223 } 2224 2225 static int 2226 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list, 2227 struct aiocb **acb_list, int nent, struct sigevent *sig, 2228 struct aiocb_ops *ops) 2229 { 2230 struct proc *p = td->td_proc; 2231 struct aiocb *job; 2232 struct kaioinfo *ki; 2233 struct aioliojob *lj; 2234 struct kevent kev; 2235 int error; 2236 int nagain, nerror; 2237 int i; 2238 2239 if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT)) 2240 return (EINVAL); 2241 2242 if (nent < 0 || nent > max_aio_queue_per_proc) 2243 return (EINVAL); 2244 2245 if (p->p_aioinfo == NULL) 2246 aio_init_aioinfo(p); 2247 2248 ki = p->p_aioinfo; 2249 2250 lj = uma_zalloc(aiolio_zone, M_WAITOK); 2251 lj->lioj_flags = 0; 2252 lj->lioj_count = 0; 2253 lj->lioj_finished_count = 0; 2254 knlist_init_mtx(&lj->klist, AIO_MTX(ki)); 2255 ksiginfo_init(&lj->lioj_ksi); 2256 2257 /* 2258 * Setup signal. 2259 */ 2260 if (sig && (mode == LIO_NOWAIT)) { 2261 bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal)); 2262 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 2263 /* Assume only new style KEVENT */ 2264 memset(&kev, 0, sizeof(kev)); 2265 kev.filter = EVFILT_LIO; 2266 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 2267 kev.ident = (uintptr_t)uacb_list; /* something unique */ 2268 kev.data = (intptr_t)lj; 2269 /* pass user defined sigval data */ 2270 kev.udata = lj->lioj_signal.sigev_value.sival_ptr; 2271 error = kqfd_register( 2272 lj->lioj_signal.sigev_notify_kqueue, &kev, td, 2273 M_WAITOK); 2274 if (error) { 2275 uma_zfree(aiolio_zone, lj); 2276 return (error); 2277 } 2278 } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) { 2279 ; 2280 } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 2281 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) { 2282 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { 2283 uma_zfree(aiolio_zone, lj); 2284 return EINVAL; 2285 } 2286 lj->lioj_flags |= LIOJ_SIGNAL; 2287 } else { 2288 uma_zfree(aiolio_zone, lj); 2289 return EINVAL; 2290 } 2291 } 2292 2293 AIO_LOCK(ki); 2294 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 2295 /* 2296 * Add extra aiocb count to avoid the lio to be freed 2297 * by other threads doing aio_waitcomplete or aio_return, 2298 * and prevent event from being sent until we have queued 2299 * all tasks. 2300 */ 2301 lj->lioj_count = 1; 2302 AIO_UNLOCK(ki); 2303 2304 /* 2305 * Get pointers to the list of I/O requests. 2306 */ 2307 nagain = 0; 2308 nerror = 0; 2309 for (i = 0; i < nent; i++) { 2310 job = acb_list[i]; 2311 if (job != NULL) { 2312 error = aio_aqueue(td, job, lj, LIO_NOP, ops); 2313 if (error == EAGAIN) 2314 nagain++; 2315 else if (error != 0) 2316 nerror++; 2317 } 2318 } 2319 2320 error = 0; 2321 AIO_LOCK(ki); 2322 if (mode == LIO_WAIT) { 2323 while (lj->lioj_count - 1 != lj->lioj_finished_count) { 2324 ki->kaio_flags |= KAIO_WAKEUP; 2325 error = msleep(&p->p_aioinfo, AIO_MTX(ki), 2326 PRIBIO | PCATCH, "aiospn", 0); 2327 if (error == ERESTART) 2328 error = EINTR; 2329 if (error) 2330 break; 2331 } 2332 } else { 2333 if (lj->lioj_count - 1 == lj->lioj_finished_count) { 2334 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 2335 lj->lioj_flags |= LIOJ_KEVENT_POSTED; 2336 KNOTE_LOCKED(&lj->klist, 1); 2337 } 2338 if ((lj->lioj_flags & (LIOJ_SIGNAL | 2339 LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL && 2340 (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 2341 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { 2342 aio_sendsig(p, &lj->lioj_signal, &lj->lioj_ksi, 2343 lj->lioj_count != 1); 2344 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2345 } 2346 } 2347 } 2348 lj->lioj_count--; 2349 if (lj->lioj_count == 0) { 2350 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 2351 knlist_delete(&lj->klist, curthread, 1); 2352 PROC_LOCK(p); 2353 sigqueue_take(&lj->lioj_ksi); 2354 PROC_UNLOCK(p); 2355 AIO_UNLOCK(ki); 2356 uma_zfree(aiolio_zone, lj); 2357 } else 2358 AIO_UNLOCK(ki); 2359 2360 if (nerror) 2361 return (EIO); 2362 else if (nagain) 2363 return (EAGAIN); 2364 else 2365 return (error); 2366 } 2367 2368 /* syscall - list directed I/O (REALTIME) */ 2369 #ifdef COMPAT_FREEBSD6 2370 int 2371 freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap) 2372 { 2373 struct aiocb **acb_list; 2374 struct sigevent *sigp, sig; 2375 struct osigevent osig; 2376 int error, nent; 2377 2378 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2379 return (EINVAL); 2380 2381 nent = uap->nent; 2382 if (nent < 0 || nent > max_aio_queue_per_proc) 2383 return (EINVAL); 2384 2385 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2386 error = copyin(uap->sig, &osig, sizeof(osig)); 2387 if (error) 2388 return (error); 2389 error = convert_old_sigevent(&osig, &sig); 2390 if (error) 2391 return (error); 2392 sigp = &sig; 2393 } else 2394 sigp = NULL; 2395 2396 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2397 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0])); 2398 if (error == 0) 2399 error = kern_lio_listio(td, uap->mode, 2400 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 2401 &aiocb_ops_osigevent); 2402 free(acb_list, M_LIO); 2403 return (error); 2404 } 2405 #endif 2406 2407 /* syscall - list directed I/O (REALTIME) */ 2408 int 2409 sys_lio_listio(struct thread *td, struct lio_listio_args *uap) 2410 { 2411 struct aiocb **acb_list; 2412 struct sigevent *sigp, sig; 2413 int error, nent; 2414 2415 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2416 return (EINVAL); 2417 2418 nent = uap->nent; 2419 if (nent < 0 || nent > max_aio_queue_per_proc) 2420 return (EINVAL); 2421 2422 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2423 error = copyin(uap->sig, &sig, sizeof(sig)); 2424 if (error) 2425 return (error); 2426 sigp = &sig; 2427 } else 2428 sigp = NULL; 2429 2430 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2431 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0])); 2432 if (error == 0) 2433 error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list, 2434 nent, sigp, &aiocb_ops); 2435 free(acb_list, M_LIO); 2436 return (error); 2437 } 2438 2439 static void 2440 aio_biocleanup(struct bio *bp) 2441 { 2442 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; 2443 struct kaioinfo *ki; 2444 struct buf *pbuf = (struct buf *)bp->bio_caller2; 2445 2446 /* Release mapping into kernel space. */ 2447 if (pbuf != NULL) { 2448 MPASS(pbuf->b_npages <= atop(maxphys) + 1); 2449 pmap_qremove((vm_offset_t)pbuf->b_data, pbuf->b_npages); 2450 vm_page_unhold_pages(pbuf->b_pages, pbuf->b_npages); 2451 uma_zfree(pbuf_zone, pbuf); 2452 atomic_subtract_int(&num_buf_aio, 1); 2453 ki = job->userproc->p_aioinfo; 2454 AIO_LOCK(ki); 2455 ki->kaio_buffer_count--; 2456 AIO_UNLOCK(ki); 2457 } else { 2458 MPASS(bp->bio_ma_n <= atop(maxphys) + 1); 2459 vm_page_unhold_pages(bp->bio_ma, bp->bio_ma_n); 2460 free(bp->bio_ma, M_TEMP); 2461 atomic_subtract_int(&num_unmapped_aio, 1); 2462 } 2463 g_destroy_bio(bp); 2464 } 2465 2466 static void 2467 aio_biowakeup(struct bio *bp) 2468 { 2469 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; 2470 size_t nbytes; 2471 long bcount = bp->bio_bcount; 2472 long resid = bp->bio_resid; 2473 int error, opcode, nblks; 2474 int bio_error = bp->bio_error; 2475 uint16_t flags = bp->bio_flags; 2476 2477 opcode = job->uaiocb.aio_lio_opcode; 2478 2479 aio_biocleanup(bp); 2480 2481 nbytes =bcount - resid; 2482 atomic_add_acq_long(&job->nbytes, nbytes); 2483 nblks = btodb(nbytes); 2484 error = 0; 2485 /* 2486 * If multiple bios experienced an error, the job will reflect the 2487 * error of whichever failed bio completed last. 2488 */ 2489 if (flags & BIO_ERROR) 2490 atomic_set_int(&job->error, bio_error); 2491 if (opcode == LIO_WRITE || opcode == LIO_WRITEV) 2492 atomic_add_int(&job->outblock, nblks); 2493 else 2494 atomic_add_int(&job->inblock, nblks); 2495 atomic_subtract_int(&job->nbio, 1); 2496 2497 2498 if (atomic_load_int(&job->nbio) == 0) { 2499 if (atomic_load_int(&job->error)) 2500 aio_complete(job, -1, job->error); 2501 else 2502 aio_complete(job, atomic_load_long(&job->nbytes), 0); 2503 } 2504 } 2505 2506 /* syscall - wait for the next completion of an aio request */ 2507 static int 2508 kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp, 2509 struct timespec *ts, struct aiocb_ops *ops) 2510 { 2511 struct proc *p = td->td_proc; 2512 struct timeval atv; 2513 struct kaioinfo *ki; 2514 struct kaiocb *job; 2515 struct aiocb *ujob; 2516 long error, status; 2517 int timo; 2518 2519 ops->store_aiocb(ujobp, NULL); 2520 2521 if (ts == NULL) { 2522 timo = 0; 2523 } else if (ts->tv_sec == 0 && ts->tv_nsec == 0) { 2524 timo = -1; 2525 } else { 2526 if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000)) 2527 return (EINVAL); 2528 2529 TIMESPEC_TO_TIMEVAL(&atv, ts); 2530 if (itimerfix(&atv)) 2531 return (EINVAL); 2532 timo = tvtohz(&atv); 2533 } 2534 2535 if (p->p_aioinfo == NULL) 2536 aio_init_aioinfo(p); 2537 ki = p->p_aioinfo; 2538 2539 error = 0; 2540 job = NULL; 2541 AIO_LOCK(ki); 2542 while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) { 2543 if (timo == -1) { 2544 error = EWOULDBLOCK; 2545 break; 2546 } 2547 ki->kaio_flags |= KAIO_WAKEUP; 2548 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, 2549 "aiowc", timo); 2550 if (timo && error == ERESTART) 2551 error = EINTR; 2552 if (error) 2553 break; 2554 } 2555 2556 if (job != NULL) { 2557 MPASS(job->jobflags & KAIOCB_FINISHED); 2558 ujob = job->ujob; 2559 status = job->uaiocb._aiocb_private.status; 2560 error = job->uaiocb._aiocb_private.error; 2561 td->td_retval[0] = status; 2562 td->td_ru.ru_oublock += job->outblock; 2563 td->td_ru.ru_inblock += job->inblock; 2564 td->td_ru.ru_msgsnd += job->msgsnd; 2565 td->td_ru.ru_msgrcv += job->msgrcv; 2566 aio_free_entry(job); 2567 AIO_UNLOCK(ki); 2568 ops->store_aiocb(ujobp, ujob); 2569 ops->store_error(ujob, error); 2570 ops->store_status(ujob, status); 2571 } else 2572 AIO_UNLOCK(ki); 2573 2574 return (error); 2575 } 2576 2577 int 2578 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap) 2579 { 2580 struct timespec ts, *tsp; 2581 int error; 2582 2583 if (uap->timeout) { 2584 /* Get timespec struct. */ 2585 error = copyin(uap->timeout, &ts, sizeof(ts)); 2586 if (error) 2587 return (error); 2588 tsp = &ts; 2589 } else 2590 tsp = NULL; 2591 2592 return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops)); 2593 } 2594 2595 static int 2596 kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob, 2597 struct aiocb_ops *ops) 2598 { 2599 int listop; 2600 2601 switch (op) { 2602 case O_SYNC: 2603 listop = LIO_SYNC; 2604 break; 2605 case O_DSYNC: 2606 listop = LIO_DSYNC; 2607 break; 2608 default: 2609 return (EINVAL); 2610 } 2611 2612 return (aio_aqueue(td, ujob, NULL, listop, ops)); 2613 } 2614 2615 int 2616 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap) 2617 { 2618 2619 return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops)); 2620 } 2621 2622 /* kqueue attach function */ 2623 static int 2624 filt_aioattach(struct knote *kn) 2625 { 2626 struct kaiocb *job; 2627 2628 job = (struct kaiocb *)(uintptr_t)kn->kn_sdata; 2629 2630 /* 2631 * The job pointer must be validated before using it, so 2632 * registration is restricted to the kernel; the user cannot 2633 * set EV_FLAG1. 2634 */ 2635 if ((kn->kn_flags & EV_FLAG1) == 0) 2636 return (EPERM); 2637 kn->kn_ptr.p_aio = job; 2638 kn->kn_flags &= ~EV_FLAG1; 2639 2640 knlist_add(&job->klist, kn, 0); 2641 2642 return (0); 2643 } 2644 2645 /* kqueue detach function */ 2646 static void 2647 filt_aiodetach(struct knote *kn) 2648 { 2649 struct knlist *knl; 2650 2651 knl = &kn->kn_ptr.p_aio->klist; 2652 knl->kl_lock(knl->kl_lockarg); 2653 if (!knlist_empty(knl)) 2654 knlist_remove(knl, kn, 1); 2655 knl->kl_unlock(knl->kl_lockarg); 2656 } 2657 2658 /* kqueue filter function */ 2659 /*ARGSUSED*/ 2660 static int 2661 filt_aio(struct knote *kn, long hint) 2662 { 2663 struct kaiocb *job = kn->kn_ptr.p_aio; 2664 2665 kn->kn_data = job->uaiocb._aiocb_private.error; 2666 if (!(job->jobflags & KAIOCB_FINISHED)) 2667 return (0); 2668 kn->kn_flags |= EV_EOF; 2669 return (1); 2670 } 2671 2672 /* kqueue attach function */ 2673 static int 2674 filt_lioattach(struct knote *kn) 2675 { 2676 struct aioliojob *lj; 2677 2678 lj = (struct aioliojob *)(uintptr_t)kn->kn_sdata; 2679 2680 /* 2681 * The aioliojob pointer must be validated before using it, so 2682 * registration is restricted to the kernel; the user cannot 2683 * set EV_FLAG1. 2684 */ 2685 if ((kn->kn_flags & EV_FLAG1) == 0) 2686 return (EPERM); 2687 kn->kn_ptr.p_lio = lj; 2688 kn->kn_flags &= ~EV_FLAG1; 2689 2690 knlist_add(&lj->klist, kn, 0); 2691 2692 return (0); 2693 } 2694 2695 /* kqueue detach function */ 2696 static void 2697 filt_liodetach(struct knote *kn) 2698 { 2699 struct knlist *knl; 2700 2701 knl = &kn->kn_ptr.p_lio->klist; 2702 knl->kl_lock(knl->kl_lockarg); 2703 if (!knlist_empty(knl)) 2704 knlist_remove(knl, kn, 1); 2705 knl->kl_unlock(knl->kl_lockarg); 2706 } 2707 2708 /* kqueue filter function */ 2709 /*ARGSUSED*/ 2710 static int 2711 filt_lio(struct knote *kn, long hint) 2712 { 2713 struct aioliojob * lj = kn->kn_ptr.p_lio; 2714 2715 return (lj->lioj_flags & LIOJ_KEVENT_POSTED); 2716 } 2717 2718 #ifdef COMPAT_FREEBSD32 2719 #include <sys/mount.h> 2720 #include <sys/socket.h> 2721 #include <compat/freebsd32/freebsd32.h> 2722 #include <compat/freebsd32/freebsd32_proto.h> 2723 #include <compat/freebsd32/freebsd32_signal.h> 2724 #include <compat/freebsd32/freebsd32_syscall.h> 2725 #include <compat/freebsd32/freebsd32_util.h> 2726 2727 struct __aiocb_private32 { 2728 int32_t status; 2729 int32_t error; 2730 uint32_t kernelinfo; 2731 }; 2732 2733 #ifdef COMPAT_FREEBSD6 2734 typedef struct oaiocb32 { 2735 int aio_fildes; /* File descriptor */ 2736 uint64_t aio_offset __packed; /* File offset for I/O */ 2737 uint32_t aio_buf; /* I/O buffer in process space */ 2738 uint32_t aio_nbytes; /* Number of bytes for I/O */ 2739 struct osigevent32 aio_sigevent; /* Signal to deliver */ 2740 int aio_lio_opcode; /* LIO opcode */ 2741 int aio_reqprio; /* Request priority -- ignored */ 2742 struct __aiocb_private32 _aiocb_private; 2743 } oaiocb32_t; 2744 #endif 2745 2746 typedef struct aiocb32 { 2747 int32_t aio_fildes; /* File descriptor */ 2748 uint64_t aio_offset __packed; /* File offset for I/O */ 2749 uint32_t aio_buf; /* I/O buffer in process space */ 2750 uint32_t aio_nbytes; /* Number of bytes for I/O */ 2751 int __spare__[2]; 2752 uint32_t __spare2__; 2753 int aio_lio_opcode; /* LIO opcode */ 2754 int aio_reqprio; /* Request priority -- ignored */ 2755 struct __aiocb_private32 _aiocb_private; 2756 struct sigevent32 aio_sigevent; /* Signal to deliver */ 2757 } aiocb32_t; 2758 2759 #ifdef COMPAT_FREEBSD6 2760 static int 2761 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig) 2762 { 2763 2764 /* 2765 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are 2766 * supported by AIO with the old sigevent structure. 2767 */ 2768 CP(*osig, *nsig, sigev_notify); 2769 switch (nsig->sigev_notify) { 2770 case SIGEV_NONE: 2771 break; 2772 case SIGEV_SIGNAL: 2773 nsig->sigev_signo = osig->__sigev_u.__sigev_signo; 2774 break; 2775 case SIGEV_KEVENT: 2776 nsig->sigev_notify_kqueue = 2777 osig->__sigev_u.__sigev_notify_kqueue; 2778 PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr); 2779 break; 2780 default: 2781 return (EINVAL); 2782 } 2783 return (0); 2784 } 2785 2786 static int 2787 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct kaiocb *kjob, 2788 int type __unused) 2789 { 2790 struct oaiocb32 job32; 2791 struct aiocb *kcb = &kjob->uaiocb; 2792 int error; 2793 2794 bzero(kcb, sizeof(struct aiocb)); 2795 error = copyin(ujob, &job32, sizeof(job32)); 2796 if (error) 2797 return (error); 2798 2799 /* No need to copyin aio_iov, because it did not exist in FreeBSD 6 */ 2800 2801 CP(job32, *kcb, aio_fildes); 2802 CP(job32, *kcb, aio_offset); 2803 PTRIN_CP(job32, *kcb, aio_buf); 2804 CP(job32, *kcb, aio_nbytes); 2805 CP(job32, *kcb, aio_lio_opcode); 2806 CP(job32, *kcb, aio_reqprio); 2807 CP(job32, *kcb, _aiocb_private.status); 2808 CP(job32, *kcb, _aiocb_private.error); 2809 PTRIN_CP(job32, *kcb, _aiocb_private.kernelinfo); 2810 return (convert_old_sigevent32(&job32.aio_sigevent, 2811 &kcb->aio_sigevent)); 2812 } 2813 #endif 2814 2815 static int 2816 aiocb32_copyin(struct aiocb *ujob, struct kaiocb *kjob, int type) 2817 { 2818 struct aiocb32 job32; 2819 struct aiocb *kcb = &kjob->uaiocb; 2820 struct iovec32 *iov32; 2821 int error; 2822 2823 error = copyin(ujob, &job32, sizeof(job32)); 2824 if (error) 2825 return (error); 2826 CP(job32, *kcb, aio_fildes); 2827 CP(job32, *kcb, aio_offset); 2828 CP(job32, *kcb, aio_lio_opcode); 2829 if (type == LIO_READV || type == LIO_WRITEV) { 2830 iov32 = PTRIN(job32.aio_iov); 2831 CP(job32, *kcb, aio_iovcnt); 2832 /* malloc a uio and copy in the iovec */ 2833 error = freebsd32_copyinuio(iov32, 2834 kcb->aio_iovcnt, &kjob->uiop); 2835 if (error) 2836 return (error); 2837 } else { 2838 PTRIN_CP(job32, *kcb, aio_buf); 2839 CP(job32, *kcb, aio_nbytes); 2840 } 2841 CP(job32, *kcb, aio_reqprio); 2842 CP(job32, *kcb, _aiocb_private.status); 2843 CP(job32, *kcb, _aiocb_private.error); 2844 PTRIN_CP(job32, *kcb, _aiocb_private.kernelinfo); 2845 error = convert_sigevent32(&job32.aio_sigevent, &kcb->aio_sigevent); 2846 2847 return (error); 2848 } 2849 2850 static long 2851 aiocb32_fetch_status(struct aiocb *ujob) 2852 { 2853 struct aiocb32 *ujob32; 2854 2855 ujob32 = (struct aiocb32 *)ujob; 2856 return (fuword32(&ujob32->_aiocb_private.status)); 2857 } 2858 2859 static long 2860 aiocb32_fetch_error(struct aiocb *ujob) 2861 { 2862 struct aiocb32 *ujob32; 2863 2864 ujob32 = (struct aiocb32 *)ujob; 2865 return (fuword32(&ujob32->_aiocb_private.error)); 2866 } 2867 2868 static int 2869 aiocb32_store_status(struct aiocb *ujob, long status) 2870 { 2871 struct aiocb32 *ujob32; 2872 2873 ujob32 = (struct aiocb32 *)ujob; 2874 return (suword32(&ujob32->_aiocb_private.status, status)); 2875 } 2876 2877 static int 2878 aiocb32_store_error(struct aiocb *ujob, long error) 2879 { 2880 struct aiocb32 *ujob32; 2881 2882 ujob32 = (struct aiocb32 *)ujob; 2883 return (suword32(&ujob32->_aiocb_private.error, error)); 2884 } 2885 2886 static int 2887 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref) 2888 { 2889 struct aiocb32 *ujob32; 2890 2891 ujob32 = (struct aiocb32 *)ujob; 2892 return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref)); 2893 } 2894 2895 static int 2896 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob) 2897 { 2898 2899 return (suword32(ujobp, (long)ujob)); 2900 } 2901 2902 static struct aiocb_ops aiocb32_ops = { 2903 .aio_copyin = aiocb32_copyin, 2904 .fetch_status = aiocb32_fetch_status, 2905 .fetch_error = aiocb32_fetch_error, 2906 .store_status = aiocb32_store_status, 2907 .store_error = aiocb32_store_error, 2908 .store_kernelinfo = aiocb32_store_kernelinfo, 2909 .store_aiocb = aiocb32_store_aiocb, 2910 }; 2911 2912 #ifdef COMPAT_FREEBSD6 2913 static struct aiocb_ops aiocb32_ops_osigevent = { 2914 .aio_copyin = aiocb32_copyin_old_sigevent, 2915 .fetch_status = aiocb32_fetch_status, 2916 .fetch_error = aiocb32_fetch_error, 2917 .store_status = aiocb32_store_status, 2918 .store_error = aiocb32_store_error, 2919 .store_kernelinfo = aiocb32_store_kernelinfo, 2920 .store_aiocb = aiocb32_store_aiocb, 2921 }; 2922 #endif 2923 2924 int 2925 freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap) 2926 { 2927 2928 return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); 2929 } 2930 2931 int 2932 freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap) 2933 { 2934 struct timespec32 ts32; 2935 struct timespec ts, *tsp; 2936 struct aiocb **ujoblist; 2937 uint32_t *ujoblist32; 2938 int error, i; 2939 2940 if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc) 2941 return (EINVAL); 2942 2943 if (uap->timeout) { 2944 /* Get timespec struct. */ 2945 if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0) 2946 return (error); 2947 CP(ts32, ts, tv_sec); 2948 CP(ts32, ts, tv_nsec); 2949 tsp = &ts; 2950 } else 2951 tsp = NULL; 2952 2953 ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK); 2954 ujoblist32 = (uint32_t *)ujoblist; 2955 error = copyin(uap->aiocbp, ujoblist32, uap->nent * 2956 sizeof(ujoblist32[0])); 2957 if (error == 0) { 2958 for (i = uap->nent - 1; i >= 0; i--) 2959 ujoblist[i] = PTRIN(ujoblist32[i]); 2960 2961 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp); 2962 } 2963 free(ujoblist, M_AIOS); 2964 return (error); 2965 } 2966 2967 int 2968 freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap) 2969 { 2970 2971 return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); 2972 } 2973 2974 #ifdef COMPAT_FREEBSD6 2975 int 2976 freebsd6_freebsd32_aio_read(struct thread *td, 2977 struct freebsd6_freebsd32_aio_read_args *uap) 2978 { 2979 2980 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2981 &aiocb32_ops_osigevent)); 2982 } 2983 #endif 2984 2985 int 2986 freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap) 2987 { 2988 2989 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2990 &aiocb32_ops)); 2991 } 2992 2993 int 2994 freebsd32_aio_readv(struct thread *td, struct freebsd32_aio_readv_args *uap) 2995 { 2996 2997 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READV, 2998 &aiocb32_ops)); 2999 } 3000 3001 #ifdef COMPAT_FREEBSD6 3002 int 3003 freebsd6_freebsd32_aio_write(struct thread *td, 3004 struct freebsd6_freebsd32_aio_write_args *uap) 3005 { 3006 3007 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 3008 &aiocb32_ops_osigevent)); 3009 } 3010 #endif 3011 3012 int 3013 freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap) 3014 { 3015 3016 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 3017 &aiocb32_ops)); 3018 } 3019 3020 int 3021 freebsd32_aio_writev(struct thread *td, struct freebsd32_aio_writev_args *uap) 3022 { 3023 3024 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITEV, 3025 &aiocb32_ops)); 3026 } 3027 3028 int 3029 freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap) 3030 { 3031 3032 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK, 3033 &aiocb32_ops)); 3034 } 3035 3036 int 3037 freebsd32_aio_waitcomplete(struct thread *td, 3038 struct freebsd32_aio_waitcomplete_args *uap) 3039 { 3040 struct timespec32 ts32; 3041 struct timespec ts, *tsp; 3042 int error; 3043 3044 if (uap->timeout) { 3045 /* Get timespec struct. */ 3046 error = copyin(uap->timeout, &ts32, sizeof(ts32)); 3047 if (error) 3048 return (error); 3049 CP(ts32, ts, tv_sec); 3050 CP(ts32, ts, tv_nsec); 3051 tsp = &ts; 3052 } else 3053 tsp = NULL; 3054 3055 return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp, 3056 &aiocb32_ops)); 3057 } 3058 3059 int 3060 freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap) 3061 { 3062 3063 return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp, 3064 &aiocb32_ops)); 3065 } 3066 3067 #ifdef COMPAT_FREEBSD6 3068 int 3069 freebsd6_freebsd32_lio_listio(struct thread *td, 3070 struct freebsd6_freebsd32_lio_listio_args *uap) 3071 { 3072 struct aiocb **acb_list; 3073 struct sigevent *sigp, sig; 3074 struct osigevent32 osig; 3075 uint32_t *acb_list32; 3076 int error, i, nent; 3077 3078 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 3079 return (EINVAL); 3080 3081 nent = uap->nent; 3082 if (nent < 0 || nent > max_aio_queue_per_proc) 3083 return (EINVAL); 3084 3085 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 3086 error = copyin(uap->sig, &osig, sizeof(osig)); 3087 if (error) 3088 return (error); 3089 error = convert_old_sigevent32(&osig, &sig); 3090 if (error) 3091 return (error); 3092 sigp = &sig; 3093 } else 3094 sigp = NULL; 3095 3096 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK); 3097 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t)); 3098 if (error) { 3099 free(acb_list32, M_LIO); 3100 return (error); 3101 } 3102 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 3103 for (i = 0; i < nent; i++) 3104 acb_list[i] = PTRIN(acb_list32[i]); 3105 free(acb_list32, M_LIO); 3106 3107 error = kern_lio_listio(td, uap->mode, 3108 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 3109 &aiocb32_ops_osigevent); 3110 free(acb_list, M_LIO); 3111 return (error); 3112 } 3113 #endif 3114 3115 int 3116 freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap) 3117 { 3118 struct aiocb **acb_list; 3119 struct sigevent *sigp, sig; 3120 struct sigevent32 sig32; 3121 uint32_t *acb_list32; 3122 int error, i, nent; 3123 3124 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 3125 return (EINVAL); 3126 3127 nent = uap->nent; 3128 if (nent < 0 || nent > max_aio_queue_per_proc) 3129 return (EINVAL); 3130 3131 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 3132 error = copyin(uap->sig, &sig32, sizeof(sig32)); 3133 if (error) 3134 return (error); 3135 error = convert_sigevent32(&sig32, &sig); 3136 if (error) 3137 return (error); 3138 sigp = &sig; 3139 } else 3140 sigp = NULL; 3141 3142 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK); 3143 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t)); 3144 if (error) { 3145 free(acb_list32, M_LIO); 3146 return (error); 3147 } 3148 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 3149 for (i = 0; i < nent; i++) 3150 acb_list[i] = PTRIN(acb_list32[i]); 3151 free(acb_list32, M_LIO); 3152 3153 error = kern_lio_listio(td, uap->mode, 3154 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 3155 &aiocb32_ops); 3156 free(acb_list, M_LIO); 3157 return (error); 3158 } 3159 3160 #endif 3161