1 /*- 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 */ 16 17 /* 18 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 19 */ 20 21 #include <sys/cdefs.h> 22 __FBSDID("$FreeBSD$"); 23 24 #include "opt_compat.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/malloc.h> 29 #include <sys/bio.h> 30 #include <sys/buf.h> 31 #include <sys/capsicum.h> 32 #include <sys/eventhandler.h> 33 #include <sys/sysproto.h> 34 #include <sys/filedesc.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/kthread.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/unistd.h> 44 #include <sys/posix4.h> 45 #include <sys/proc.h> 46 #include <sys/resourcevar.h> 47 #include <sys/signalvar.h> 48 #include <sys/protosw.h> 49 #include <sys/rwlock.h> 50 #include <sys/sema.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/syscall.h> 54 #include <sys/sysent.h> 55 #include <sys/sysctl.h> 56 #include <sys/sx.h> 57 #include <sys/taskqueue.h> 58 #include <sys/vnode.h> 59 #include <sys/conf.h> 60 #include <sys/event.h> 61 #include <sys/mount.h> 62 #include <geom/geom.h> 63 64 #include <machine/atomic.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_page.h> 68 #include <vm/vm_extern.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_object.h> 72 #include <vm/uma.h> 73 #include <sys/aio.h> 74 75 /* 76 * Counter for allocating reference ids to new jobs. Wrapped to 1 on 77 * overflow. (XXX will be removed soon.) 78 */ 79 static u_long jobrefid; 80 81 /* 82 * Counter for aio_fsync. 83 */ 84 static uint64_t jobseqno; 85 86 #ifndef MAX_AIO_PER_PROC 87 #define MAX_AIO_PER_PROC 32 88 #endif 89 90 #ifndef MAX_AIO_QUEUE_PER_PROC 91 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 92 #endif 93 94 #ifndef MAX_AIO_QUEUE 95 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 96 #endif 97 98 #ifndef MAX_BUF_AIO 99 #define MAX_BUF_AIO 16 100 #endif 101 102 FEATURE(aio, "Asynchronous I/O"); 103 104 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list"); 105 106 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, 107 "Async IO management"); 108 109 static int enable_aio_unsafe = 0; 110 SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0, 111 "Permit asynchronous IO on all file types, not just known-safe types"); 112 113 static int max_aio_procs = MAX_AIO_PROCS; 114 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0, 115 "Maximum number of kernel processes to use for handling async IO "); 116 117 static int num_aio_procs = 0; 118 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0, 119 "Number of presently active kernel processes for async IO"); 120 121 /* 122 * The code will adjust the actual number of AIO processes towards this 123 * number when it gets a chance. 124 */ 125 static int target_aio_procs = TARGET_AIO_PROCS; 126 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, 127 0, 128 "Preferred number of ready kernel processes for async IO"); 129 130 static int max_queue_count = MAX_AIO_QUEUE; 131 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, 132 "Maximum number of aio requests to queue, globally"); 133 134 static int num_queue_count = 0; 135 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0, 136 "Number of queued aio requests"); 137 138 static int num_buf_aio = 0; 139 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0, 140 "Number of aio requests presently handled by the buf subsystem"); 141 142 /* Number of async I/O processes in the process of being started */ 143 /* XXX This should be local to aio_aqueue() */ 144 static int num_aio_resv_start = 0; 145 146 static int aiod_lifetime; 147 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0, 148 "Maximum lifetime for idle aiod"); 149 150 static int max_aio_per_proc = MAX_AIO_PER_PROC; 151 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc, 152 0, 153 "Maximum active aio requests per process (stored in the process)"); 154 155 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 156 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW, 157 &max_aio_queue_per_proc, 0, 158 "Maximum queued aio requests per process (stored in the process)"); 159 160 static int max_buf_aio = MAX_BUF_AIO; 161 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0, 162 "Maximum buf aio requests per process (stored in the process)"); 163 164 #ifdef COMPAT_FREEBSD6 165 typedef struct oaiocb { 166 int aio_fildes; /* File descriptor */ 167 off_t aio_offset; /* File offset for I/O */ 168 volatile void *aio_buf; /* I/O buffer in process space */ 169 size_t aio_nbytes; /* Number of bytes for I/O */ 170 struct osigevent aio_sigevent; /* Signal to deliver */ 171 int aio_lio_opcode; /* LIO opcode */ 172 int aio_reqprio; /* Request priority -- ignored */ 173 struct __aiocb_private _aiocb_private; 174 } oaiocb_t; 175 #endif 176 177 /* 178 * Below is a key of locks used to protect each member of struct kaiocb 179 * aioliojob and kaioinfo and any backends. 180 * 181 * * - need not protected 182 * a - locked by kaioinfo lock 183 * b - locked by backend lock, the backend lock can be null in some cases, 184 * for example, BIO belongs to this type, in this case, proc lock is 185 * reused. 186 * c - locked by aio_job_mtx, the lock for the generic file I/O backend. 187 */ 188 189 /* 190 * If the routine that services an AIO request blocks while running in an 191 * AIO kernel process it can starve other I/O requests. BIO requests 192 * queued via aio_qphysio() complete in GEOM and do not use AIO kernel 193 * processes at all. Socket I/O requests use a separate pool of 194 * kprocs and also force non-blocking I/O. Other file I/O requests 195 * use the generic fo_read/fo_write operations which can block. The 196 * fsync and mlock operations can also block while executing. Ideally 197 * none of these requests would block while executing. 198 * 199 * Note that the service routines cannot toggle O_NONBLOCK in the file 200 * structure directly while handling a request due to races with 201 * userland threads. 202 */ 203 204 /* jobflags */ 205 #define KAIOCB_QUEUEING 0x01 206 #define KAIOCB_CANCELLED 0x02 207 #define KAIOCB_CANCELLING 0x04 208 #define KAIOCB_CHECKSYNC 0x08 209 #define KAIOCB_CLEARED 0x10 210 #define KAIOCB_FINISHED 0x20 211 212 /* 213 * AIO process info 214 */ 215 #define AIOP_FREE 0x1 /* proc on free queue */ 216 217 struct aioproc { 218 int aioprocflags; /* (c) AIO proc flags */ 219 TAILQ_ENTRY(aioproc) list; /* (c) list of processes */ 220 struct proc *aioproc; /* (*) the AIO proc */ 221 }; 222 223 /* 224 * data-structure for lio signal management 225 */ 226 struct aioliojob { 227 int lioj_flags; /* (a) listio flags */ 228 int lioj_count; /* (a) listio flags */ 229 int lioj_finished_count; /* (a) listio flags */ 230 struct sigevent lioj_signal; /* (a) signal on all I/O done */ 231 TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */ 232 struct knlist klist; /* (a) list of knotes */ 233 ksiginfo_t lioj_ksi; /* (a) Realtime signal info */ 234 }; 235 236 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 237 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 238 #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */ 239 240 /* 241 * per process aio data structure 242 */ 243 struct kaioinfo { 244 struct mtx kaio_mtx; /* the lock to protect this struct */ 245 int kaio_flags; /* (a) per process kaio flags */ 246 int kaio_maxactive_count; /* (*) maximum number of AIOs */ 247 int kaio_active_count; /* (c) number of currently used AIOs */ 248 int kaio_qallowed_count; /* (*) maxiumu size of AIO queue */ 249 int kaio_count; /* (a) size of AIO queue */ 250 int kaio_ballowed_count; /* (*) maximum number of buffers */ 251 int kaio_buffer_count; /* (a) number of physio buffers */ 252 TAILQ_HEAD(,kaiocb) kaio_all; /* (a) all AIOs in a process */ 253 TAILQ_HEAD(,kaiocb) kaio_done; /* (a) done queue for process */ 254 TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */ 255 TAILQ_HEAD(,kaiocb) kaio_jobqueue; /* (a) job queue for process */ 256 TAILQ_HEAD(,kaiocb) kaio_syncqueue; /* (a) queue for aio_fsync */ 257 TAILQ_HEAD(,kaiocb) kaio_syncready; /* (a) second q for aio_fsync */ 258 struct task kaio_task; /* (*) task to kick aio processes */ 259 struct task kaio_sync_task; /* (*) task to schedule fsync jobs */ 260 }; 261 262 #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx) 263 #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx) 264 #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f)) 265 #define AIO_MTX(ki) (&(ki)->kaio_mtx) 266 267 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 268 #define KAIO_WAKEUP 0x2 /* wakeup process when AIO completes */ 269 270 /* 271 * Operations used to interact with userland aio control blocks. 272 * Different ABIs provide their own operations. 273 */ 274 struct aiocb_ops { 275 int (*copyin)(struct aiocb *ujob, struct aiocb *kjob); 276 long (*fetch_status)(struct aiocb *ujob); 277 long (*fetch_error)(struct aiocb *ujob); 278 int (*store_status)(struct aiocb *ujob, long status); 279 int (*store_error)(struct aiocb *ujob, long error); 280 int (*store_kernelinfo)(struct aiocb *ujob, long jobref); 281 int (*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob); 282 }; 283 284 static TAILQ_HEAD(,aioproc) aio_freeproc; /* (c) Idle daemons */ 285 static struct sema aio_newproc_sem; 286 static struct mtx aio_job_mtx; 287 static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */ 288 static struct unrhdr *aiod_unr; 289 290 void aio_init_aioinfo(struct proc *p); 291 static int aio_onceonly(void); 292 static int aio_free_entry(struct kaiocb *job); 293 static void aio_process_rw(struct kaiocb *job); 294 static void aio_process_sync(struct kaiocb *job); 295 static void aio_process_mlock(struct kaiocb *job); 296 static void aio_schedule_fsync(void *context, int pending); 297 static int aio_newproc(int *); 298 int aio_aqueue(struct thread *td, struct aiocb *ujob, 299 struct aioliojob *lio, int type, struct aiocb_ops *ops); 300 static int aio_queue_file(struct file *fp, struct kaiocb *job); 301 static void aio_physwakeup(struct bio *bp); 302 static void aio_proc_rundown(void *arg, struct proc *p); 303 static void aio_proc_rundown_exec(void *arg, struct proc *p, 304 struct image_params *imgp); 305 static int aio_qphysio(struct proc *p, struct kaiocb *job); 306 static void aio_daemon(void *param); 307 static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job); 308 static int aio_kick(struct proc *userp); 309 static void aio_kick_nowait(struct proc *userp); 310 static void aio_kick_helper(void *context, int pending); 311 static int filt_aioattach(struct knote *kn); 312 static void filt_aiodetach(struct knote *kn); 313 static int filt_aio(struct knote *kn, long hint); 314 static int filt_lioattach(struct knote *kn); 315 static void filt_liodetach(struct knote *kn); 316 static int filt_lio(struct knote *kn, long hint); 317 318 /* 319 * Zones for: 320 * kaio Per process async io info 321 * aiop async io process data 322 * aiocb async io jobs 323 * aiol list io job pointer - internal to aio_suspend XXX 324 * aiolio list io jobs 325 */ 326 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone; 327 328 /* kqueue filters for aio */ 329 static struct filterops aio_filtops = { 330 .f_isfd = 0, 331 .f_attach = filt_aioattach, 332 .f_detach = filt_aiodetach, 333 .f_event = filt_aio, 334 }; 335 static struct filterops lio_filtops = { 336 .f_isfd = 0, 337 .f_attach = filt_lioattach, 338 .f_detach = filt_liodetach, 339 .f_event = filt_lio 340 }; 341 342 static eventhandler_tag exit_tag, exec_tag; 343 344 TASKQUEUE_DEFINE_THREAD(aiod_kick); 345 346 /* 347 * Main operations function for use as a kernel module. 348 */ 349 static int 350 aio_modload(struct module *module, int cmd, void *arg) 351 { 352 int error = 0; 353 354 switch (cmd) { 355 case MOD_LOAD: 356 aio_onceonly(); 357 break; 358 case MOD_SHUTDOWN: 359 break; 360 default: 361 error = EOPNOTSUPP; 362 break; 363 } 364 return (error); 365 } 366 367 static moduledata_t aio_mod = { 368 "aio", 369 &aio_modload, 370 NULL 371 }; 372 373 DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY); 374 MODULE_VERSION(aio, 1); 375 376 /* 377 * Startup initialization 378 */ 379 static int 380 aio_onceonly(void) 381 { 382 383 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL, 384 EVENTHANDLER_PRI_ANY); 385 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, 386 NULL, EVENTHANDLER_PRI_ANY); 387 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops); 388 kqueue_add_filteropts(EVFILT_LIO, &lio_filtops); 389 TAILQ_INIT(&aio_freeproc); 390 sema_init(&aio_newproc_sem, 0, "aio_new_proc"); 391 mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF); 392 TAILQ_INIT(&aio_jobs); 393 aiod_unr = new_unrhdr(1, INT_MAX, NULL); 394 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL, 395 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 396 aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL, 397 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 398 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL, 399 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 400 aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL, 401 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 402 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL, 403 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 404 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 405 jobrefid = 1; 406 p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO); 407 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX); 408 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE); 409 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0); 410 411 return (0); 412 } 413 414 /* 415 * Init the per-process aioinfo structure. The aioinfo limits are set 416 * per-process for user limit (resource) management. 417 */ 418 void 419 aio_init_aioinfo(struct proc *p) 420 { 421 struct kaioinfo *ki; 422 423 ki = uma_zalloc(kaio_zone, M_WAITOK); 424 mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW); 425 ki->kaio_flags = 0; 426 ki->kaio_maxactive_count = max_aio_per_proc; 427 ki->kaio_active_count = 0; 428 ki->kaio_qallowed_count = max_aio_queue_per_proc; 429 ki->kaio_count = 0; 430 ki->kaio_ballowed_count = max_buf_aio; 431 ki->kaio_buffer_count = 0; 432 TAILQ_INIT(&ki->kaio_all); 433 TAILQ_INIT(&ki->kaio_done); 434 TAILQ_INIT(&ki->kaio_jobqueue); 435 TAILQ_INIT(&ki->kaio_liojoblist); 436 TAILQ_INIT(&ki->kaio_syncqueue); 437 TAILQ_INIT(&ki->kaio_syncready); 438 TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p); 439 TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki); 440 PROC_LOCK(p); 441 if (p->p_aioinfo == NULL) { 442 p->p_aioinfo = ki; 443 PROC_UNLOCK(p); 444 } else { 445 PROC_UNLOCK(p); 446 mtx_destroy(&ki->kaio_mtx); 447 uma_zfree(kaio_zone, ki); 448 } 449 450 while (num_aio_procs < MIN(target_aio_procs, max_aio_procs)) 451 aio_newproc(NULL); 452 } 453 454 static int 455 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi) 456 { 457 struct thread *td; 458 int error; 459 460 error = sigev_findtd(p, sigev, &td); 461 if (error) 462 return (error); 463 if (!KSI_ONQ(ksi)) { 464 ksiginfo_set_sigev(ksi, sigev); 465 ksi->ksi_code = SI_ASYNCIO; 466 ksi->ksi_flags |= KSI_EXT | KSI_INS; 467 tdsendsignal(p, td, ksi->ksi_signo, ksi); 468 } 469 PROC_UNLOCK(p); 470 return (error); 471 } 472 473 /* 474 * Free a job entry. Wait for completion if it is currently active, but don't 475 * delay forever. If we delay, we return a flag that says that we have to 476 * restart the queue scan. 477 */ 478 static int 479 aio_free_entry(struct kaiocb *job) 480 { 481 struct kaioinfo *ki; 482 struct aioliojob *lj; 483 struct proc *p; 484 485 p = job->userproc; 486 MPASS(curproc == p); 487 ki = p->p_aioinfo; 488 MPASS(ki != NULL); 489 490 AIO_LOCK_ASSERT(ki, MA_OWNED); 491 MPASS(job->jobflags & KAIOCB_FINISHED); 492 493 atomic_subtract_int(&num_queue_count, 1); 494 495 ki->kaio_count--; 496 MPASS(ki->kaio_count >= 0); 497 498 TAILQ_REMOVE(&ki->kaio_done, job, plist); 499 TAILQ_REMOVE(&ki->kaio_all, job, allist); 500 501 lj = job->lio; 502 if (lj) { 503 lj->lioj_count--; 504 lj->lioj_finished_count--; 505 506 if (lj->lioj_count == 0) { 507 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 508 /* lio is going away, we need to destroy any knotes */ 509 knlist_delete(&lj->klist, curthread, 1); 510 PROC_LOCK(p); 511 sigqueue_take(&lj->lioj_ksi); 512 PROC_UNLOCK(p); 513 uma_zfree(aiolio_zone, lj); 514 } 515 } 516 517 /* job is going away, we need to destroy any knotes */ 518 knlist_delete(&job->klist, curthread, 1); 519 PROC_LOCK(p); 520 sigqueue_take(&job->ksi); 521 PROC_UNLOCK(p); 522 523 AIO_UNLOCK(ki); 524 525 /* 526 * The thread argument here is used to find the owning process 527 * and is also passed to fo_close() which may pass it to various 528 * places such as devsw close() routines. Because of that, we 529 * need a thread pointer from the process owning the job that is 530 * persistent and won't disappear out from under us or move to 531 * another process. 532 * 533 * Currently, all the callers of this function call it to remove 534 * a kaiocb from the current process' job list either via a 535 * syscall or due to the current process calling exit() or 536 * execve(). Thus, we know that p == curproc. We also know that 537 * curthread can't exit since we are curthread. 538 * 539 * Therefore, we use curthread as the thread to pass to 540 * knlist_delete(). This does mean that it is possible for the 541 * thread pointer at close time to differ from the thread pointer 542 * at open time, but this is already true of file descriptors in 543 * a multithreaded process. 544 */ 545 if (job->fd_file) 546 fdrop(job->fd_file, curthread); 547 crfree(job->cred); 548 uma_zfree(aiocb_zone, job); 549 AIO_LOCK(ki); 550 551 return (0); 552 } 553 554 static void 555 aio_proc_rundown_exec(void *arg, struct proc *p, 556 struct image_params *imgp __unused) 557 { 558 aio_proc_rundown(arg, p); 559 } 560 561 static int 562 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job) 563 { 564 aio_cancel_fn_t *func; 565 int cancelled; 566 567 AIO_LOCK_ASSERT(ki, MA_OWNED); 568 if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED)) 569 return (0); 570 MPASS((job->jobflags & KAIOCB_CANCELLING) == 0); 571 job->jobflags |= KAIOCB_CANCELLED; 572 573 func = job->cancel_fn; 574 575 /* 576 * If there is no cancel routine, just leave the job marked as 577 * cancelled. The job should be in active use by a caller who 578 * should complete it normally or when it fails to install a 579 * cancel routine. 580 */ 581 if (func == NULL) 582 return (0); 583 584 /* 585 * Set the CANCELLING flag so that aio_complete() will defer 586 * completions of this job. This prevents the job from being 587 * freed out from under the cancel callback. After the 588 * callback any deferred completion (whether from the callback 589 * or any other source) will be completed. 590 */ 591 job->jobflags |= KAIOCB_CANCELLING; 592 AIO_UNLOCK(ki); 593 func(job); 594 AIO_LOCK(ki); 595 job->jobflags &= ~KAIOCB_CANCELLING; 596 if (job->jobflags & KAIOCB_FINISHED) { 597 cancelled = job->uaiocb._aiocb_private.error == ECANCELED; 598 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); 599 aio_bio_done_notify(p, job); 600 } else { 601 /* 602 * The cancel callback might have scheduled an 603 * operation to cancel this request, but it is 604 * only counted as cancelled if the request is 605 * cancelled when the callback returns. 606 */ 607 cancelled = 0; 608 } 609 return (cancelled); 610 } 611 612 /* 613 * Rundown the jobs for a given process. 614 */ 615 static void 616 aio_proc_rundown(void *arg, struct proc *p) 617 { 618 struct kaioinfo *ki; 619 struct aioliojob *lj; 620 struct kaiocb *job, *jobn; 621 622 KASSERT(curthread->td_proc == p, 623 ("%s: called on non-curproc", __func__)); 624 ki = p->p_aioinfo; 625 if (ki == NULL) 626 return; 627 628 AIO_LOCK(ki); 629 ki->kaio_flags |= KAIO_RUNDOWN; 630 631 restart: 632 633 /* 634 * Try to cancel all pending requests. This code simulates 635 * aio_cancel on all pending I/O requests. 636 */ 637 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { 638 aio_cancel_job(p, ki, job); 639 } 640 641 /* Wait for all running I/O to be finished */ 642 if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) { 643 ki->kaio_flags |= KAIO_WAKEUP; 644 msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz); 645 goto restart; 646 } 647 648 /* Free all completed I/O requests. */ 649 while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL) 650 aio_free_entry(job); 651 652 while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) { 653 if (lj->lioj_count == 0) { 654 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 655 knlist_delete(&lj->klist, curthread, 1); 656 PROC_LOCK(p); 657 sigqueue_take(&lj->lioj_ksi); 658 PROC_UNLOCK(p); 659 uma_zfree(aiolio_zone, lj); 660 } else { 661 panic("LIO job not cleaned up: C:%d, FC:%d\n", 662 lj->lioj_count, lj->lioj_finished_count); 663 } 664 } 665 AIO_UNLOCK(ki); 666 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task); 667 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task); 668 mtx_destroy(&ki->kaio_mtx); 669 uma_zfree(kaio_zone, ki); 670 p->p_aioinfo = NULL; 671 } 672 673 /* 674 * Select a job to run (called by an AIO daemon). 675 */ 676 static struct kaiocb * 677 aio_selectjob(struct aioproc *aiop) 678 { 679 struct kaiocb *job; 680 struct kaioinfo *ki; 681 struct proc *userp; 682 683 mtx_assert(&aio_job_mtx, MA_OWNED); 684 restart: 685 TAILQ_FOREACH(job, &aio_jobs, list) { 686 userp = job->userproc; 687 ki = userp->p_aioinfo; 688 689 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 690 TAILQ_REMOVE(&aio_jobs, job, list); 691 if (!aio_clear_cancel_function(job)) 692 goto restart; 693 694 /* Account for currently active jobs. */ 695 ki->kaio_active_count++; 696 break; 697 } 698 } 699 return (job); 700 } 701 702 /* 703 * Move all data to a permanent storage device. This code 704 * simulates the fsync syscall. 705 */ 706 static int 707 aio_fsync_vnode(struct thread *td, struct vnode *vp) 708 { 709 struct mount *mp; 710 int error; 711 712 if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 713 goto drop; 714 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 715 if (vp->v_object != NULL) { 716 VM_OBJECT_WLOCK(vp->v_object); 717 vm_object_page_clean(vp->v_object, 0, 0, 0); 718 VM_OBJECT_WUNLOCK(vp->v_object); 719 } 720 error = VOP_FSYNC(vp, MNT_WAIT, td); 721 722 VOP_UNLOCK(vp, 0); 723 vn_finished_write(mp); 724 drop: 725 return (error); 726 } 727 728 /* 729 * The AIO processing activity for LIO_READ/LIO_WRITE. This is the code that 730 * does the I/O request for the non-physio version of the operations. The 731 * normal vn operations are used, and this code should work in all instances 732 * for every type of file, including pipes, sockets, fifos, and regular files. 733 * 734 * XXX I don't think it works well for socket, pipe, and fifo. 735 */ 736 static void 737 aio_process_rw(struct kaiocb *job) 738 { 739 struct ucred *td_savedcred; 740 struct thread *td; 741 struct aiocb *cb; 742 struct file *fp; 743 struct uio auio; 744 struct iovec aiov; 745 ssize_t cnt; 746 long msgsnd_st, msgsnd_end; 747 long msgrcv_st, msgrcv_end; 748 long oublock_st, oublock_end; 749 long inblock_st, inblock_end; 750 int error; 751 752 KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ || 753 job->uaiocb.aio_lio_opcode == LIO_WRITE, 754 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 755 756 aio_switch_vmspace(job); 757 td = curthread; 758 td_savedcred = td->td_ucred; 759 td->td_ucred = job->cred; 760 cb = &job->uaiocb; 761 fp = job->fd_file; 762 763 aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; 764 aiov.iov_len = cb->aio_nbytes; 765 766 auio.uio_iov = &aiov; 767 auio.uio_iovcnt = 1; 768 auio.uio_offset = cb->aio_offset; 769 auio.uio_resid = cb->aio_nbytes; 770 cnt = cb->aio_nbytes; 771 auio.uio_segflg = UIO_USERSPACE; 772 auio.uio_td = td; 773 774 msgrcv_st = td->td_ru.ru_msgrcv; 775 msgsnd_st = td->td_ru.ru_msgsnd; 776 inblock_st = td->td_ru.ru_inblock; 777 oublock_st = td->td_ru.ru_oublock; 778 779 /* 780 * aio_aqueue() acquires a reference to the file that is 781 * released in aio_free_entry(). 782 */ 783 if (cb->aio_lio_opcode == LIO_READ) { 784 auio.uio_rw = UIO_READ; 785 if (auio.uio_resid == 0) 786 error = 0; 787 else 788 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td); 789 } else { 790 if (fp->f_type == DTYPE_VNODE) 791 bwillwrite(); 792 auio.uio_rw = UIO_WRITE; 793 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td); 794 } 795 msgrcv_end = td->td_ru.ru_msgrcv; 796 msgsnd_end = td->td_ru.ru_msgsnd; 797 inblock_end = td->td_ru.ru_inblock; 798 oublock_end = td->td_ru.ru_oublock; 799 800 job->msgrcv = msgrcv_end - msgrcv_st; 801 job->msgsnd = msgsnd_end - msgsnd_st; 802 job->inblock = inblock_end - inblock_st; 803 job->outblock = oublock_end - oublock_st; 804 805 if ((error) && (auio.uio_resid != cnt)) { 806 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 807 error = 0; 808 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) { 809 PROC_LOCK(job->userproc); 810 kern_psignal(job->userproc, SIGPIPE); 811 PROC_UNLOCK(job->userproc); 812 } 813 } 814 815 cnt -= auio.uio_resid; 816 td->td_ucred = td_savedcred; 817 if (error) 818 aio_complete(job, -1, error); 819 else 820 aio_complete(job, cnt, 0); 821 } 822 823 static void 824 aio_process_sync(struct kaiocb *job) 825 { 826 struct thread *td = curthread; 827 struct ucred *td_savedcred = td->td_ucred; 828 struct file *fp = job->fd_file; 829 int error = 0; 830 831 KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC, 832 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 833 834 td->td_ucred = job->cred; 835 if (fp->f_vnode != NULL) 836 error = aio_fsync_vnode(td, fp->f_vnode); 837 td->td_ucred = td_savedcred; 838 if (error) 839 aio_complete(job, -1, error); 840 else 841 aio_complete(job, 0, 0); 842 } 843 844 static void 845 aio_process_mlock(struct kaiocb *job) 846 { 847 struct aiocb *cb = &job->uaiocb; 848 int error; 849 850 KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK, 851 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 852 853 aio_switch_vmspace(job); 854 error = vm_mlock(job->userproc, job->cred, 855 __DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes); 856 if (error) 857 aio_complete(job, -1, error); 858 else 859 aio_complete(job, 0, 0); 860 } 861 862 static void 863 aio_bio_done_notify(struct proc *userp, struct kaiocb *job) 864 { 865 struct aioliojob *lj; 866 struct kaioinfo *ki; 867 struct kaiocb *sjob, *sjobn; 868 int lj_done; 869 bool schedule_fsync; 870 871 ki = userp->p_aioinfo; 872 AIO_LOCK_ASSERT(ki, MA_OWNED); 873 lj = job->lio; 874 lj_done = 0; 875 if (lj) { 876 lj->lioj_finished_count++; 877 if (lj->lioj_count == lj->lioj_finished_count) 878 lj_done = 1; 879 } 880 TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist); 881 MPASS(job->jobflags & KAIOCB_FINISHED); 882 883 if (ki->kaio_flags & KAIO_RUNDOWN) 884 goto notification_done; 885 886 if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || 887 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) 888 aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi); 889 890 KNOTE_LOCKED(&job->klist, 1); 891 892 if (lj_done) { 893 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 894 lj->lioj_flags |= LIOJ_KEVENT_POSTED; 895 KNOTE_LOCKED(&lj->klist, 1); 896 } 897 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) 898 == LIOJ_SIGNAL 899 && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 900 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { 901 aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi); 902 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 903 } 904 } 905 906 notification_done: 907 if (job->jobflags & KAIOCB_CHECKSYNC) { 908 schedule_fsync = false; 909 TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) { 910 if (job->fd_file == sjob->fd_file && 911 job->seqno < sjob->seqno) { 912 if (--sjob->pending == 0) { 913 TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, 914 list); 915 if (!aio_clear_cancel_function(sjob)) 916 continue; 917 TAILQ_INSERT_TAIL(&ki->kaio_syncready, 918 sjob, list); 919 schedule_fsync = true; 920 } 921 } 922 } 923 if (schedule_fsync) 924 taskqueue_enqueue(taskqueue_aiod_kick, 925 &ki->kaio_sync_task); 926 } 927 if (ki->kaio_flags & KAIO_WAKEUP) { 928 ki->kaio_flags &= ~KAIO_WAKEUP; 929 wakeup(&userp->p_aioinfo); 930 } 931 } 932 933 static void 934 aio_schedule_fsync(void *context, int pending) 935 { 936 struct kaioinfo *ki; 937 struct kaiocb *job; 938 939 ki = context; 940 AIO_LOCK(ki); 941 while (!TAILQ_EMPTY(&ki->kaio_syncready)) { 942 job = TAILQ_FIRST(&ki->kaio_syncready); 943 TAILQ_REMOVE(&ki->kaio_syncready, job, list); 944 AIO_UNLOCK(ki); 945 aio_schedule(job, aio_process_sync); 946 AIO_LOCK(ki); 947 } 948 AIO_UNLOCK(ki); 949 } 950 951 bool 952 aio_cancel_cleared(struct kaiocb *job) 953 { 954 struct kaioinfo *ki; 955 956 /* 957 * The caller should hold the same queue lock held when 958 * aio_clear_cancel_function() was called and set this flag 959 * ensuring this check sees an up-to-date value. However, 960 * there is no way to assert that. 961 */ 962 ki = job->userproc->p_aioinfo; 963 return ((job->jobflags & KAIOCB_CLEARED) != 0); 964 } 965 966 bool 967 aio_clear_cancel_function(struct kaiocb *job) 968 { 969 struct kaioinfo *ki; 970 971 ki = job->userproc->p_aioinfo; 972 AIO_LOCK(ki); 973 MPASS(job->cancel_fn != NULL); 974 if (job->jobflags & KAIOCB_CANCELLING) { 975 job->jobflags |= KAIOCB_CLEARED; 976 AIO_UNLOCK(ki); 977 return (false); 978 } 979 job->cancel_fn = NULL; 980 AIO_UNLOCK(ki); 981 return (true); 982 } 983 984 bool 985 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func) 986 { 987 struct kaioinfo *ki; 988 989 ki = job->userproc->p_aioinfo; 990 AIO_LOCK(ki); 991 if (job->jobflags & KAIOCB_CANCELLED) { 992 AIO_UNLOCK(ki); 993 return (false); 994 } 995 job->cancel_fn = func; 996 AIO_UNLOCK(ki); 997 return (true); 998 } 999 1000 void 1001 aio_complete(struct kaiocb *job, long status, int error) 1002 { 1003 struct kaioinfo *ki; 1004 struct proc *userp; 1005 1006 job->uaiocb._aiocb_private.error = error; 1007 job->uaiocb._aiocb_private.status = status; 1008 1009 userp = job->userproc; 1010 ki = userp->p_aioinfo; 1011 1012 AIO_LOCK(ki); 1013 KASSERT(!(job->jobflags & KAIOCB_FINISHED), 1014 ("duplicate aio_complete")); 1015 job->jobflags |= KAIOCB_FINISHED; 1016 if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) { 1017 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); 1018 aio_bio_done_notify(userp, job); 1019 } 1020 AIO_UNLOCK(ki); 1021 } 1022 1023 void 1024 aio_cancel(struct kaiocb *job) 1025 { 1026 1027 aio_complete(job, -1, ECANCELED); 1028 } 1029 1030 void 1031 aio_switch_vmspace(struct kaiocb *job) 1032 { 1033 1034 vmspace_switch_aio(job->userproc->p_vmspace); 1035 } 1036 1037 /* 1038 * The AIO daemon, most of the actual work is done in aio_process_*, 1039 * but the setup (and address space mgmt) is done in this routine. 1040 */ 1041 static void 1042 aio_daemon(void *_id) 1043 { 1044 struct kaiocb *job; 1045 struct aioproc *aiop; 1046 struct kaioinfo *ki; 1047 struct proc *p; 1048 struct vmspace *myvm; 1049 struct thread *td = curthread; 1050 int id = (intptr_t)_id; 1051 1052 /* 1053 * Grab an extra reference on the daemon's vmspace so that it 1054 * doesn't get freed by jobs that switch to a different 1055 * vmspace. 1056 */ 1057 p = td->td_proc; 1058 myvm = vmspace_acquire_ref(p); 1059 1060 KASSERT(p->p_textvp == NULL, ("kthread has a textvp")); 1061 1062 /* 1063 * Allocate and ready the aio control info. There is one aiop structure 1064 * per daemon. 1065 */ 1066 aiop = uma_zalloc(aiop_zone, M_WAITOK); 1067 aiop->aioproc = p; 1068 aiop->aioprocflags = 0; 1069 1070 /* 1071 * Wakeup parent process. (Parent sleeps to keep from blasting away 1072 * and creating too many daemons.) 1073 */ 1074 sema_post(&aio_newproc_sem); 1075 1076 mtx_lock(&aio_job_mtx); 1077 for (;;) { 1078 /* 1079 * Take daemon off of free queue 1080 */ 1081 if (aiop->aioprocflags & AIOP_FREE) { 1082 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1083 aiop->aioprocflags &= ~AIOP_FREE; 1084 } 1085 1086 /* 1087 * Check for jobs. 1088 */ 1089 while ((job = aio_selectjob(aiop)) != NULL) { 1090 mtx_unlock(&aio_job_mtx); 1091 1092 ki = job->userproc->p_aioinfo; 1093 job->handle_fn(job); 1094 1095 mtx_lock(&aio_job_mtx); 1096 /* Decrement the active job count. */ 1097 ki->kaio_active_count--; 1098 } 1099 1100 /* 1101 * Disconnect from user address space. 1102 */ 1103 if (p->p_vmspace != myvm) { 1104 mtx_unlock(&aio_job_mtx); 1105 vmspace_switch_aio(myvm); 1106 mtx_lock(&aio_job_mtx); 1107 /* 1108 * We have to restart to avoid race, we only sleep if 1109 * no job can be selected. 1110 */ 1111 continue; 1112 } 1113 1114 mtx_assert(&aio_job_mtx, MA_OWNED); 1115 1116 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 1117 aiop->aioprocflags |= AIOP_FREE; 1118 1119 /* 1120 * If daemon is inactive for a long time, allow it to exit, 1121 * thereby freeing resources. 1122 */ 1123 if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy", 1124 aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) && 1125 (aiop->aioprocflags & AIOP_FREE) && 1126 num_aio_procs > target_aio_procs) 1127 break; 1128 } 1129 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1130 num_aio_procs--; 1131 mtx_unlock(&aio_job_mtx); 1132 uma_zfree(aiop_zone, aiop); 1133 free_unr(aiod_unr, id); 1134 vmspace_free(myvm); 1135 1136 KASSERT(p->p_vmspace == myvm, 1137 ("AIOD: bad vmspace for exiting daemon")); 1138 KASSERT(myvm->vm_refcnt > 1, 1139 ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt)); 1140 kproc_exit(0); 1141 } 1142 1143 /* 1144 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 1145 * AIO daemon modifies its environment itself. 1146 */ 1147 static int 1148 aio_newproc(int *start) 1149 { 1150 int error; 1151 struct proc *p; 1152 int id; 1153 1154 id = alloc_unr(aiod_unr); 1155 error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p, 1156 RFNOWAIT, 0, "aiod%d", id); 1157 if (error == 0) { 1158 /* 1159 * Wait until daemon is started. 1160 */ 1161 sema_wait(&aio_newproc_sem); 1162 mtx_lock(&aio_job_mtx); 1163 num_aio_procs++; 1164 if (start != NULL) 1165 (*start)--; 1166 mtx_unlock(&aio_job_mtx); 1167 } else { 1168 free_unr(aiod_unr, id); 1169 } 1170 return (error); 1171 } 1172 1173 /* 1174 * Try the high-performance, low-overhead physio method for eligible 1175 * VCHR devices. This method doesn't use an aio helper thread, and 1176 * thus has very low overhead. 1177 * 1178 * Assumes that the caller, aio_aqueue(), has incremented the file 1179 * structure's reference count, preventing its deallocation for the 1180 * duration of this call. 1181 */ 1182 static int 1183 aio_qphysio(struct proc *p, struct kaiocb *job) 1184 { 1185 struct aiocb *cb; 1186 struct file *fp; 1187 struct bio *bp; 1188 struct buf *pbuf; 1189 struct vnode *vp; 1190 struct cdevsw *csw; 1191 struct cdev *dev; 1192 struct kaioinfo *ki; 1193 int error, ref, poff; 1194 vm_prot_t prot; 1195 1196 cb = &job->uaiocb; 1197 fp = job->fd_file; 1198 1199 if (fp == NULL || fp->f_type != DTYPE_VNODE) 1200 return (-1); 1201 1202 vp = fp->f_vnode; 1203 if (vp->v_type != VCHR) 1204 return (-1); 1205 if (vp->v_bufobj.bo_bsize == 0) 1206 return (-1); 1207 if (cb->aio_nbytes % vp->v_bufobj.bo_bsize) 1208 return (-1); 1209 1210 ref = 0; 1211 csw = devvn_refthread(vp, &dev, &ref); 1212 if (csw == NULL) 1213 return (ENXIO); 1214 1215 if ((csw->d_flags & D_DISK) == 0) { 1216 error = -1; 1217 goto unref; 1218 } 1219 if (cb->aio_nbytes > dev->si_iosize_max) { 1220 error = -1; 1221 goto unref; 1222 } 1223 1224 ki = p->p_aioinfo; 1225 poff = (vm_offset_t)cb->aio_buf & PAGE_MASK; 1226 if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { 1227 if (cb->aio_nbytes > MAXPHYS) { 1228 error = -1; 1229 goto unref; 1230 } 1231 1232 pbuf = NULL; 1233 } else { 1234 if (cb->aio_nbytes > MAXPHYS - poff) { 1235 error = -1; 1236 goto unref; 1237 } 1238 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) { 1239 error = -1; 1240 goto unref; 1241 } 1242 1243 job->pbuf = pbuf = (struct buf *)getpbuf(NULL); 1244 BUF_KERNPROC(pbuf); 1245 AIO_LOCK(ki); 1246 ki->kaio_buffer_count++; 1247 AIO_UNLOCK(ki); 1248 } 1249 job->bp = bp = g_alloc_bio(); 1250 1251 bp->bio_length = cb->aio_nbytes; 1252 bp->bio_bcount = cb->aio_nbytes; 1253 bp->bio_done = aio_physwakeup; 1254 bp->bio_data = (void *)(uintptr_t)cb->aio_buf; 1255 bp->bio_offset = cb->aio_offset; 1256 bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ; 1257 bp->bio_dev = dev; 1258 bp->bio_caller1 = (void *)job; 1259 1260 prot = VM_PROT_READ; 1261 if (cb->aio_lio_opcode == LIO_READ) 1262 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 1263 job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 1264 (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages, 1265 nitems(job->pages)); 1266 if (job->npages < 0) { 1267 error = EFAULT; 1268 goto doerror; 1269 } 1270 if (pbuf != NULL) { 1271 pmap_qenter((vm_offset_t)pbuf->b_data, 1272 job->pages, job->npages); 1273 bp->bio_data = pbuf->b_data + poff; 1274 atomic_add_int(&num_buf_aio, 1); 1275 } else { 1276 bp->bio_ma = job->pages; 1277 bp->bio_ma_n = job->npages; 1278 bp->bio_ma_offset = poff; 1279 bp->bio_data = unmapped_buf; 1280 bp->bio_flags |= BIO_UNMAPPED; 1281 } 1282 1283 /* Perform transfer. */ 1284 csw->d_strategy(bp); 1285 dev_relthread(dev, ref); 1286 return (0); 1287 1288 doerror: 1289 if (pbuf != NULL) { 1290 AIO_LOCK(ki); 1291 ki->kaio_buffer_count--; 1292 AIO_UNLOCK(ki); 1293 relpbuf(pbuf, NULL); 1294 job->pbuf = NULL; 1295 } 1296 g_destroy_bio(bp); 1297 job->bp = NULL; 1298 unref: 1299 dev_relthread(dev, ref); 1300 return (error); 1301 } 1302 1303 #ifdef COMPAT_FREEBSD6 1304 static int 1305 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig) 1306 { 1307 1308 /* 1309 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are 1310 * supported by AIO with the old sigevent structure. 1311 */ 1312 nsig->sigev_notify = osig->sigev_notify; 1313 switch (nsig->sigev_notify) { 1314 case SIGEV_NONE: 1315 break; 1316 case SIGEV_SIGNAL: 1317 nsig->sigev_signo = osig->__sigev_u.__sigev_signo; 1318 break; 1319 case SIGEV_KEVENT: 1320 nsig->sigev_notify_kqueue = 1321 osig->__sigev_u.__sigev_notify_kqueue; 1322 nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr; 1323 break; 1324 default: 1325 return (EINVAL); 1326 } 1327 return (0); 1328 } 1329 1330 static int 1331 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob) 1332 { 1333 struct oaiocb *ojob; 1334 int error; 1335 1336 bzero(kjob, sizeof(struct aiocb)); 1337 error = copyin(ujob, kjob, sizeof(struct oaiocb)); 1338 if (error) 1339 return (error); 1340 ojob = (struct oaiocb *)kjob; 1341 return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent)); 1342 } 1343 #endif 1344 1345 static int 1346 aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob) 1347 { 1348 1349 return (copyin(ujob, kjob, sizeof(struct aiocb))); 1350 } 1351 1352 static long 1353 aiocb_fetch_status(struct aiocb *ujob) 1354 { 1355 1356 return (fuword(&ujob->_aiocb_private.status)); 1357 } 1358 1359 static long 1360 aiocb_fetch_error(struct aiocb *ujob) 1361 { 1362 1363 return (fuword(&ujob->_aiocb_private.error)); 1364 } 1365 1366 static int 1367 aiocb_store_status(struct aiocb *ujob, long status) 1368 { 1369 1370 return (suword(&ujob->_aiocb_private.status, status)); 1371 } 1372 1373 static int 1374 aiocb_store_error(struct aiocb *ujob, long error) 1375 { 1376 1377 return (suword(&ujob->_aiocb_private.error, error)); 1378 } 1379 1380 static int 1381 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref) 1382 { 1383 1384 return (suword(&ujob->_aiocb_private.kernelinfo, jobref)); 1385 } 1386 1387 static int 1388 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob) 1389 { 1390 1391 return (suword(ujobp, (long)ujob)); 1392 } 1393 1394 static struct aiocb_ops aiocb_ops = { 1395 .copyin = aiocb_copyin, 1396 .fetch_status = aiocb_fetch_status, 1397 .fetch_error = aiocb_fetch_error, 1398 .store_status = aiocb_store_status, 1399 .store_error = aiocb_store_error, 1400 .store_kernelinfo = aiocb_store_kernelinfo, 1401 .store_aiocb = aiocb_store_aiocb, 1402 }; 1403 1404 #ifdef COMPAT_FREEBSD6 1405 static struct aiocb_ops aiocb_ops_osigevent = { 1406 .copyin = aiocb_copyin_old_sigevent, 1407 .fetch_status = aiocb_fetch_status, 1408 .fetch_error = aiocb_fetch_error, 1409 .store_status = aiocb_store_status, 1410 .store_error = aiocb_store_error, 1411 .store_kernelinfo = aiocb_store_kernelinfo, 1412 .store_aiocb = aiocb_store_aiocb, 1413 }; 1414 #endif 1415 1416 /* 1417 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1418 * technique is done in this code. 1419 */ 1420 int 1421 aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj, 1422 int type, struct aiocb_ops *ops) 1423 { 1424 struct proc *p = td->td_proc; 1425 cap_rights_t rights; 1426 struct file *fp; 1427 struct kaiocb *job; 1428 struct kaioinfo *ki; 1429 struct kevent kev; 1430 int opcode; 1431 int error; 1432 int fd, kqfd; 1433 int jid; 1434 u_short evflags; 1435 1436 if (p->p_aioinfo == NULL) 1437 aio_init_aioinfo(p); 1438 1439 ki = p->p_aioinfo; 1440 1441 ops->store_status(ujob, -1); 1442 ops->store_error(ujob, 0); 1443 ops->store_kernelinfo(ujob, -1); 1444 1445 if (num_queue_count >= max_queue_count || 1446 ki->kaio_count >= ki->kaio_qallowed_count) { 1447 ops->store_error(ujob, EAGAIN); 1448 return (EAGAIN); 1449 } 1450 1451 job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); 1452 knlist_init_mtx(&job->klist, AIO_MTX(ki)); 1453 1454 error = ops->copyin(ujob, &job->uaiocb); 1455 if (error) { 1456 ops->store_error(ujob, error); 1457 uma_zfree(aiocb_zone, job); 1458 return (error); 1459 } 1460 1461 if (job->uaiocb.aio_nbytes > IOSIZE_MAX) { 1462 uma_zfree(aiocb_zone, job); 1463 return (EINVAL); 1464 } 1465 1466 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && 1467 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && 1468 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && 1469 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { 1470 ops->store_error(ujob, EINVAL); 1471 uma_zfree(aiocb_zone, job); 1472 return (EINVAL); 1473 } 1474 1475 if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || 1476 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && 1477 !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) { 1478 uma_zfree(aiocb_zone, job); 1479 return (EINVAL); 1480 } 1481 1482 ksiginfo_init(&job->ksi); 1483 1484 /* Save userspace address of the job info. */ 1485 job->ujob = ujob; 1486 1487 /* Get the opcode. */ 1488 if (type != LIO_NOP) 1489 job->uaiocb.aio_lio_opcode = type; 1490 opcode = job->uaiocb.aio_lio_opcode; 1491 1492 /* 1493 * Validate the opcode and fetch the file object for the specified 1494 * file descriptor. 1495 * 1496 * XXXRW: Moved the opcode validation up here so that we don't 1497 * retrieve a file descriptor without knowing what the capabiltity 1498 * should be. 1499 */ 1500 fd = job->uaiocb.aio_fildes; 1501 switch (opcode) { 1502 case LIO_WRITE: 1503 error = fget_write(td, fd, 1504 cap_rights_init(&rights, CAP_PWRITE), &fp); 1505 break; 1506 case LIO_READ: 1507 error = fget_read(td, fd, 1508 cap_rights_init(&rights, CAP_PREAD), &fp); 1509 break; 1510 case LIO_SYNC: 1511 error = fget(td, fd, cap_rights_init(&rights, CAP_FSYNC), &fp); 1512 break; 1513 case LIO_MLOCK: 1514 fp = NULL; 1515 break; 1516 case LIO_NOP: 1517 error = fget(td, fd, cap_rights_init(&rights), &fp); 1518 break; 1519 default: 1520 error = EINVAL; 1521 } 1522 if (error) { 1523 uma_zfree(aiocb_zone, job); 1524 ops->store_error(ujob, error); 1525 return (error); 1526 } 1527 1528 if (opcode == LIO_SYNC && fp->f_vnode == NULL) { 1529 error = EINVAL; 1530 goto aqueue_fail; 1531 } 1532 1533 if (opcode != LIO_SYNC && job->uaiocb.aio_offset == -1LL) { 1534 error = EINVAL; 1535 goto aqueue_fail; 1536 } 1537 1538 job->fd_file = fp; 1539 1540 mtx_lock(&aio_job_mtx); 1541 jid = jobrefid++; 1542 job->seqno = jobseqno++; 1543 mtx_unlock(&aio_job_mtx); 1544 error = ops->store_kernelinfo(ujob, jid); 1545 if (error) { 1546 error = EINVAL; 1547 goto aqueue_fail; 1548 } 1549 job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; 1550 1551 if (opcode == LIO_NOP) { 1552 fdrop(fp, td); 1553 uma_zfree(aiocb_zone, job); 1554 return (0); 1555 } 1556 1557 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) 1558 goto no_kqueue; 1559 evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags; 1560 if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) { 1561 error = EINVAL; 1562 goto aqueue_fail; 1563 } 1564 kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue; 1565 kev.ident = (uintptr_t)job->ujob; 1566 kev.filter = EVFILT_AIO; 1567 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags; 1568 kev.data = (intptr_t)job; 1569 kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr; 1570 error = kqfd_register(kqfd, &kev, td, 1); 1571 if (error) 1572 goto aqueue_fail; 1573 1574 no_kqueue: 1575 1576 ops->store_error(ujob, EINPROGRESS); 1577 job->uaiocb._aiocb_private.error = EINPROGRESS; 1578 job->userproc = p; 1579 job->cred = crhold(td->td_ucred); 1580 job->jobflags = KAIOCB_QUEUEING; 1581 job->lio = lj; 1582 1583 if (opcode == LIO_MLOCK) { 1584 aio_schedule(job, aio_process_mlock); 1585 error = 0; 1586 } else if (fp->f_ops->fo_aio_queue == NULL) 1587 error = aio_queue_file(fp, job); 1588 else 1589 error = fo_aio_queue(fp, job); 1590 if (error) 1591 goto aqueue_fail; 1592 1593 AIO_LOCK(ki); 1594 job->jobflags &= ~KAIOCB_QUEUEING; 1595 TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); 1596 ki->kaio_count++; 1597 if (lj) 1598 lj->lioj_count++; 1599 atomic_add_int(&num_queue_count, 1); 1600 if (job->jobflags & KAIOCB_FINISHED) { 1601 /* 1602 * The queue callback completed the request synchronously. 1603 * The bulk of the completion is deferred in that case 1604 * until this point. 1605 */ 1606 aio_bio_done_notify(p, job); 1607 } else 1608 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); 1609 AIO_UNLOCK(ki); 1610 return (0); 1611 1612 aqueue_fail: 1613 knlist_delete(&job->klist, curthread, 0); 1614 if (fp) 1615 fdrop(fp, td); 1616 uma_zfree(aiocb_zone, job); 1617 ops->store_error(ujob, error); 1618 return (error); 1619 } 1620 1621 static void 1622 aio_cancel_daemon_job(struct kaiocb *job) 1623 { 1624 1625 mtx_lock(&aio_job_mtx); 1626 if (!aio_cancel_cleared(job)) 1627 TAILQ_REMOVE(&aio_jobs, job, list); 1628 mtx_unlock(&aio_job_mtx); 1629 aio_cancel(job); 1630 } 1631 1632 void 1633 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func) 1634 { 1635 1636 mtx_lock(&aio_job_mtx); 1637 if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) { 1638 mtx_unlock(&aio_job_mtx); 1639 aio_cancel(job); 1640 return; 1641 } 1642 job->handle_fn = func; 1643 TAILQ_INSERT_TAIL(&aio_jobs, job, list); 1644 aio_kick_nowait(job->userproc); 1645 mtx_unlock(&aio_job_mtx); 1646 } 1647 1648 static void 1649 aio_cancel_sync(struct kaiocb *job) 1650 { 1651 struct kaioinfo *ki; 1652 1653 ki = job->userproc->p_aioinfo; 1654 mtx_lock(&aio_job_mtx); 1655 if (!aio_cancel_cleared(job)) 1656 TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); 1657 mtx_unlock(&aio_job_mtx); 1658 aio_cancel(job); 1659 } 1660 1661 int 1662 aio_queue_file(struct file *fp, struct kaiocb *job) 1663 { 1664 struct aioliojob *lj; 1665 struct kaioinfo *ki; 1666 struct kaiocb *job2; 1667 int error, opcode; 1668 1669 lj = job->lio; 1670 ki = job->userproc->p_aioinfo; 1671 opcode = job->uaiocb.aio_lio_opcode; 1672 if (opcode == LIO_SYNC) 1673 goto queueit; 1674 1675 if ((error = aio_qphysio(job->userproc, job)) == 0) 1676 goto done; 1677 #if 0 1678 /* 1679 * XXX: This means qphysio() failed with EFAULT. The current 1680 * behavior is to retry the operation via fo_read/fo_write. 1681 * Wouldn't it be better to just complete the request with an 1682 * error here? 1683 */ 1684 if (error > 0) 1685 goto done; 1686 #endif 1687 queueit: 1688 if (!enable_aio_unsafe) 1689 return (EOPNOTSUPP); 1690 1691 if (opcode == LIO_SYNC) { 1692 AIO_LOCK(ki); 1693 TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) { 1694 if (job2->fd_file == job->fd_file && 1695 job2->uaiocb.aio_lio_opcode != LIO_SYNC && 1696 job2->seqno < job->seqno) { 1697 job2->jobflags |= KAIOCB_CHECKSYNC; 1698 job->pending++; 1699 } 1700 } 1701 if (job->pending != 0) { 1702 if (!aio_set_cancel_function(job, aio_cancel_sync)) { 1703 AIO_UNLOCK(ki); 1704 aio_cancel(job); 1705 return (0); 1706 } 1707 TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list); 1708 AIO_UNLOCK(ki); 1709 return (0); 1710 } 1711 AIO_UNLOCK(ki); 1712 } 1713 1714 switch (opcode) { 1715 case LIO_READ: 1716 case LIO_WRITE: 1717 aio_schedule(job, aio_process_rw); 1718 error = 0; 1719 break; 1720 case LIO_SYNC: 1721 aio_schedule(job, aio_process_sync); 1722 error = 0; 1723 break; 1724 default: 1725 error = EINVAL; 1726 } 1727 done: 1728 return (error); 1729 } 1730 1731 static void 1732 aio_kick_nowait(struct proc *userp) 1733 { 1734 struct kaioinfo *ki = userp->p_aioinfo; 1735 struct aioproc *aiop; 1736 1737 mtx_assert(&aio_job_mtx, MA_OWNED); 1738 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1739 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1740 aiop->aioprocflags &= ~AIOP_FREE; 1741 wakeup(aiop->aioproc); 1742 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && 1743 ki->kaio_active_count + num_aio_resv_start < 1744 ki->kaio_maxactive_count) { 1745 taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task); 1746 } 1747 } 1748 1749 static int 1750 aio_kick(struct proc *userp) 1751 { 1752 struct kaioinfo *ki = userp->p_aioinfo; 1753 struct aioproc *aiop; 1754 int error, ret = 0; 1755 1756 mtx_assert(&aio_job_mtx, MA_OWNED); 1757 retryproc: 1758 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1759 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1760 aiop->aioprocflags &= ~AIOP_FREE; 1761 wakeup(aiop->aioproc); 1762 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && 1763 ki->kaio_active_count + num_aio_resv_start < 1764 ki->kaio_maxactive_count) { 1765 num_aio_resv_start++; 1766 mtx_unlock(&aio_job_mtx); 1767 error = aio_newproc(&num_aio_resv_start); 1768 mtx_lock(&aio_job_mtx); 1769 if (error) { 1770 num_aio_resv_start--; 1771 goto retryproc; 1772 } 1773 } else { 1774 ret = -1; 1775 } 1776 return (ret); 1777 } 1778 1779 static void 1780 aio_kick_helper(void *context, int pending) 1781 { 1782 struct proc *userp = context; 1783 1784 mtx_lock(&aio_job_mtx); 1785 while (--pending >= 0) { 1786 if (aio_kick(userp)) 1787 break; 1788 } 1789 mtx_unlock(&aio_job_mtx); 1790 } 1791 1792 /* 1793 * Support the aio_return system call, as a side-effect, kernel resources are 1794 * released. 1795 */ 1796 static int 1797 kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) 1798 { 1799 struct proc *p = td->td_proc; 1800 struct kaiocb *job; 1801 struct kaioinfo *ki; 1802 long status, error; 1803 1804 ki = p->p_aioinfo; 1805 if (ki == NULL) 1806 return (EINVAL); 1807 AIO_LOCK(ki); 1808 TAILQ_FOREACH(job, &ki->kaio_done, plist) { 1809 if (job->ujob == ujob) 1810 break; 1811 } 1812 if (job != NULL) { 1813 MPASS(job->jobflags & KAIOCB_FINISHED); 1814 status = job->uaiocb._aiocb_private.status; 1815 error = job->uaiocb._aiocb_private.error; 1816 td->td_retval[0] = status; 1817 td->td_ru.ru_oublock += job->outblock; 1818 td->td_ru.ru_inblock += job->inblock; 1819 td->td_ru.ru_msgsnd += job->msgsnd; 1820 td->td_ru.ru_msgrcv += job->msgrcv; 1821 aio_free_entry(job); 1822 AIO_UNLOCK(ki); 1823 ops->store_error(ujob, error); 1824 ops->store_status(ujob, status); 1825 } else { 1826 error = EINVAL; 1827 AIO_UNLOCK(ki); 1828 } 1829 return (error); 1830 } 1831 1832 int 1833 sys_aio_return(struct thread *td, struct aio_return_args *uap) 1834 { 1835 1836 return (kern_aio_return(td, uap->aiocbp, &aiocb_ops)); 1837 } 1838 1839 /* 1840 * Allow a process to wakeup when any of the I/O requests are completed. 1841 */ 1842 static int 1843 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist, 1844 struct timespec *ts) 1845 { 1846 struct proc *p = td->td_proc; 1847 struct timeval atv; 1848 struct kaioinfo *ki; 1849 struct kaiocb *firstjob, *job; 1850 int error, i, timo; 1851 1852 timo = 0; 1853 if (ts) { 1854 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 1855 return (EINVAL); 1856 1857 TIMESPEC_TO_TIMEVAL(&atv, ts); 1858 if (itimerfix(&atv)) 1859 return (EINVAL); 1860 timo = tvtohz(&atv); 1861 } 1862 1863 ki = p->p_aioinfo; 1864 if (ki == NULL) 1865 return (EAGAIN); 1866 1867 if (njoblist == 0) 1868 return (0); 1869 1870 AIO_LOCK(ki); 1871 for (;;) { 1872 firstjob = NULL; 1873 error = 0; 1874 TAILQ_FOREACH(job, &ki->kaio_all, allist) { 1875 for (i = 0; i < njoblist; i++) { 1876 if (job->ujob == ujoblist[i]) { 1877 if (firstjob == NULL) 1878 firstjob = job; 1879 if (job->jobflags & KAIOCB_FINISHED) 1880 goto RETURN; 1881 } 1882 } 1883 } 1884 /* All tasks were finished. */ 1885 if (firstjob == NULL) 1886 break; 1887 1888 ki->kaio_flags |= KAIO_WAKEUP; 1889 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, 1890 "aiospn", timo); 1891 if (error == ERESTART) 1892 error = EINTR; 1893 if (error) 1894 break; 1895 } 1896 RETURN: 1897 AIO_UNLOCK(ki); 1898 return (error); 1899 } 1900 1901 int 1902 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap) 1903 { 1904 struct timespec ts, *tsp; 1905 struct aiocb **ujoblist; 1906 int error; 1907 1908 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX) 1909 return (EINVAL); 1910 1911 if (uap->timeout) { 1912 /* Get timespec struct. */ 1913 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1914 return (error); 1915 tsp = &ts; 1916 } else 1917 tsp = NULL; 1918 1919 ujoblist = uma_zalloc(aiol_zone, M_WAITOK); 1920 error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0])); 1921 if (error == 0) 1922 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp); 1923 uma_zfree(aiol_zone, ujoblist); 1924 return (error); 1925 } 1926 1927 /* 1928 * aio_cancel cancels any non-physio aio operations not currently in 1929 * progress. 1930 */ 1931 int 1932 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap) 1933 { 1934 struct proc *p = td->td_proc; 1935 struct kaioinfo *ki; 1936 struct kaiocb *job, *jobn; 1937 struct file *fp; 1938 cap_rights_t rights; 1939 int error; 1940 int cancelled = 0; 1941 int notcancelled = 0; 1942 struct vnode *vp; 1943 1944 /* Lookup file object. */ 1945 error = fget(td, uap->fd, cap_rights_init(&rights), &fp); 1946 if (error) 1947 return (error); 1948 1949 ki = p->p_aioinfo; 1950 if (ki == NULL) 1951 goto done; 1952 1953 if (fp->f_type == DTYPE_VNODE) { 1954 vp = fp->f_vnode; 1955 if (vn_isdisk(vp, &error)) { 1956 fdrop(fp, td); 1957 td->td_retval[0] = AIO_NOTCANCELED; 1958 return (0); 1959 } 1960 } 1961 1962 AIO_LOCK(ki); 1963 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { 1964 if ((uap->fd == job->uaiocb.aio_fildes) && 1965 ((uap->aiocbp == NULL) || 1966 (uap->aiocbp == job->ujob))) { 1967 if (aio_cancel_job(p, ki, job)) { 1968 cancelled++; 1969 } else { 1970 notcancelled++; 1971 } 1972 if (uap->aiocbp != NULL) 1973 break; 1974 } 1975 } 1976 AIO_UNLOCK(ki); 1977 1978 done: 1979 fdrop(fp, td); 1980 1981 if (uap->aiocbp != NULL) { 1982 if (cancelled) { 1983 td->td_retval[0] = AIO_CANCELED; 1984 return (0); 1985 } 1986 } 1987 1988 if (notcancelled) { 1989 td->td_retval[0] = AIO_NOTCANCELED; 1990 return (0); 1991 } 1992 1993 if (cancelled) { 1994 td->td_retval[0] = AIO_CANCELED; 1995 return (0); 1996 } 1997 1998 td->td_retval[0] = AIO_ALLDONE; 1999 2000 return (0); 2001 } 2002 2003 /* 2004 * aio_error is implemented in the kernel level for compatibility purposes 2005 * only. For a user mode async implementation, it would be best to do it in 2006 * a userland subroutine. 2007 */ 2008 static int 2009 kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) 2010 { 2011 struct proc *p = td->td_proc; 2012 struct kaiocb *job; 2013 struct kaioinfo *ki; 2014 int status; 2015 2016 ki = p->p_aioinfo; 2017 if (ki == NULL) { 2018 td->td_retval[0] = EINVAL; 2019 return (0); 2020 } 2021 2022 AIO_LOCK(ki); 2023 TAILQ_FOREACH(job, &ki->kaio_all, allist) { 2024 if (job->ujob == ujob) { 2025 if (job->jobflags & KAIOCB_FINISHED) 2026 td->td_retval[0] = 2027 job->uaiocb._aiocb_private.error; 2028 else 2029 td->td_retval[0] = EINPROGRESS; 2030 AIO_UNLOCK(ki); 2031 return (0); 2032 } 2033 } 2034 AIO_UNLOCK(ki); 2035 2036 /* 2037 * Hack for failure of aio_aqueue. 2038 */ 2039 status = ops->fetch_status(ujob); 2040 if (status == -1) { 2041 td->td_retval[0] = ops->fetch_error(ujob); 2042 return (0); 2043 } 2044 2045 td->td_retval[0] = EINVAL; 2046 return (0); 2047 } 2048 2049 int 2050 sys_aio_error(struct thread *td, struct aio_error_args *uap) 2051 { 2052 2053 return (kern_aio_error(td, uap->aiocbp, &aiocb_ops)); 2054 } 2055 2056 /* syscall - asynchronous read from a file (REALTIME) */ 2057 #ifdef COMPAT_FREEBSD6 2058 int 2059 freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap) 2060 { 2061 2062 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2063 &aiocb_ops_osigevent)); 2064 } 2065 #endif 2066 2067 int 2068 sys_aio_read(struct thread *td, struct aio_read_args *uap) 2069 { 2070 2071 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops)); 2072 } 2073 2074 /* syscall - asynchronous write to a file (REALTIME) */ 2075 #ifdef COMPAT_FREEBSD6 2076 int 2077 freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap) 2078 { 2079 2080 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 2081 &aiocb_ops_osigevent)); 2082 } 2083 #endif 2084 2085 int 2086 sys_aio_write(struct thread *td, struct aio_write_args *uap) 2087 { 2088 2089 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops)); 2090 } 2091 2092 int 2093 sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap) 2094 { 2095 2096 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops)); 2097 } 2098 2099 static int 2100 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list, 2101 struct aiocb **acb_list, int nent, struct sigevent *sig, 2102 struct aiocb_ops *ops) 2103 { 2104 struct proc *p = td->td_proc; 2105 struct aiocb *job; 2106 struct kaioinfo *ki; 2107 struct aioliojob *lj; 2108 struct kevent kev; 2109 int error; 2110 int nerror; 2111 int i; 2112 2113 if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT)) 2114 return (EINVAL); 2115 2116 if (nent < 0 || nent > AIO_LISTIO_MAX) 2117 return (EINVAL); 2118 2119 if (p->p_aioinfo == NULL) 2120 aio_init_aioinfo(p); 2121 2122 ki = p->p_aioinfo; 2123 2124 lj = uma_zalloc(aiolio_zone, M_WAITOK); 2125 lj->lioj_flags = 0; 2126 lj->lioj_count = 0; 2127 lj->lioj_finished_count = 0; 2128 knlist_init_mtx(&lj->klist, AIO_MTX(ki)); 2129 ksiginfo_init(&lj->lioj_ksi); 2130 2131 /* 2132 * Setup signal. 2133 */ 2134 if (sig && (mode == LIO_NOWAIT)) { 2135 bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal)); 2136 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 2137 /* Assume only new style KEVENT */ 2138 kev.filter = EVFILT_LIO; 2139 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 2140 kev.ident = (uintptr_t)uacb_list; /* something unique */ 2141 kev.data = (intptr_t)lj; 2142 /* pass user defined sigval data */ 2143 kev.udata = lj->lioj_signal.sigev_value.sival_ptr; 2144 error = kqfd_register( 2145 lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1); 2146 if (error) { 2147 uma_zfree(aiolio_zone, lj); 2148 return (error); 2149 } 2150 } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) { 2151 ; 2152 } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 2153 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) { 2154 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { 2155 uma_zfree(aiolio_zone, lj); 2156 return EINVAL; 2157 } 2158 lj->lioj_flags |= LIOJ_SIGNAL; 2159 } else { 2160 uma_zfree(aiolio_zone, lj); 2161 return EINVAL; 2162 } 2163 } 2164 2165 AIO_LOCK(ki); 2166 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 2167 /* 2168 * Add extra aiocb count to avoid the lio to be freed 2169 * by other threads doing aio_waitcomplete or aio_return, 2170 * and prevent event from being sent until we have queued 2171 * all tasks. 2172 */ 2173 lj->lioj_count = 1; 2174 AIO_UNLOCK(ki); 2175 2176 /* 2177 * Get pointers to the list of I/O requests. 2178 */ 2179 nerror = 0; 2180 for (i = 0; i < nent; i++) { 2181 job = acb_list[i]; 2182 if (job != NULL) { 2183 error = aio_aqueue(td, job, lj, LIO_NOP, ops); 2184 if (error != 0) 2185 nerror++; 2186 } 2187 } 2188 2189 error = 0; 2190 AIO_LOCK(ki); 2191 if (mode == LIO_WAIT) { 2192 while (lj->lioj_count - 1 != lj->lioj_finished_count) { 2193 ki->kaio_flags |= KAIO_WAKEUP; 2194 error = msleep(&p->p_aioinfo, AIO_MTX(ki), 2195 PRIBIO | PCATCH, "aiospn", 0); 2196 if (error == ERESTART) 2197 error = EINTR; 2198 if (error) 2199 break; 2200 } 2201 } else { 2202 if (lj->lioj_count - 1 == lj->lioj_finished_count) { 2203 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 2204 lj->lioj_flags |= LIOJ_KEVENT_POSTED; 2205 KNOTE_LOCKED(&lj->klist, 1); 2206 } 2207 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) 2208 == LIOJ_SIGNAL 2209 && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 2210 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { 2211 aio_sendsig(p, &lj->lioj_signal, 2212 &lj->lioj_ksi); 2213 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2214 } 2215 } 2216 } 2217 lj->lioj_count--; 2218 if (lj->lioj_count == 0) { 2219 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 2220 knlist_delete(&lj->klist, curthread, 1); 2221 PROC_LOCK(p); 2222 sigqueue_take(&lj->lioj_ksi); 2223 PROC_UNLOCK(p); 2224 AIO_UNLOCK(ki); 2225 uma_zfree(aiolio_zone, lj); 2226 } else 2227 AIO_UNLOCK(ki); 2228 2229 if (nerror) 2230 return (EIO); 2231 return (error); 2232 } 2233 2234 /* syscall - list directed I/O (REALTIME) */ 2235 #ifdef COMPAT_FREEBSD6 2236 int 2237 freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap) 2238 { 2239 struct aiocb **acb_list; 2240 struct sigevent *sigp, sig; 2241 struct osigevent osig; 2242 int error, nent; 2243 2244 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2245 return (EINVAL); 2246 2247 nent = uap->nent; 2248 if (nent < 0 || nent > AIO_LISTIO_MAX) 2249 return (EINVAL); 2250 2251 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2252 error = copyin(uap->sig, &osig, sizeof(osig)); 2253 if (error) 2254 return (error); 2255 error = convert_old_sigevent(&osig, &sig); 2256 if (error) 2257 return (error); 2258 sigp = &sig; 2259 } else 2260 sigp = NULL; 2261 2262 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2263 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0])); 2264 if (error == 0) 2265 error = kern_lio_listio(td, uap->mode, 2266 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 2267 &aiocb_ops_osigevent); 2268 free(acb_list, M_LIO); 2269 return (error); 2270 } 2271 #endif 2272 2273 /* syscall - list directed I/O (REALTIME) */ 2274 int 2275 sys_lio_listio(struct thread *td, struct lio_listio_args *uap) 2276 { 2277 struct aiocb **acb_list; 2278 struct sigevent *sigp, sig; 2279 int error, nent; 2280 2281 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2282 return (EINVAL); 2283 2284 nent = uap->nent; 2285 if (nent < 0 || nent > AIO_LISTIO_MAX) 2286 return (EINVAL); 2287 2288 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2289 error = copyin(uap->sig, &sig, sizeof(sig)); 2290 if (error) 2291 return (error); 2292 sigp = &sig; 2293 } else 2294 sigp = NULL; 2295 2296 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2297 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0])); 2298 if (error == 0) 2299 error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list, 2300 nent, sigp, &aiocb_ops); 2301 free(acb_list, M_LIO); 2302 return (error); 2303 } 2304 2305 static void 2306 aio_physwakeup(struct bio *bp) 2307 { 2308 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; 2309 struct proc *userp; 2310 struct kaioinfo *ki; 2311 size_t nbytes; 2312 int error, nblks; 2313 2314 /* Release mapping into kernel space. */ 2315 userp = job->userproc; 2316 ki = userp->p_aioinfo; 2317 if (job->pbuf) { 2318 pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages); 2319 relpbuf(job->pbuf, NULL); 2320 job->pbuf = NULL; 2321 atomic_subtract_int(&num_buf_aio, 1); 2322 AIO_LOCK(ki); 2323 ki->kaio_buffer_count--; 2324 AIO_UNLOCK(ki); 2325 } 2326 vm_page_unhold_pages(job->pages, job->npages); 2327 2328 bp = job->bp; 2329 job->bp = NULL; 2330 nbytes = job->uaiocb.aio_nbytes - bp->bio_resid; 2331 error = 0; 2332 if (bp->bio_flags & BIO_ERROR) 2333 error = bp->bio_error; 2334 nblks = btodb(nbytes); 2335 if (job->uaiocb.aio_lio_opcode == LIO_WRITE) 2336 job->outblock += nblks; 2337 else 2338 job->inblock += nblks; 2339 2340 if (error) 2341 aio_complete(job, -1, error); 2342 else 2343 aio_complete(job, nbytes, 0); 2344 2345 g_destroy_bio(bp); 2346 } 2347 2348 /* syscall - wait for the next completion of an aio request */ 2349 static int 2350 kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp, 2351 struct timespec *ts, struct aiocb_ops *ops) 2352 { 2353 struct proc *p = td->td_proc; 2354 struct timeval atv; 2355 struct kaioinfo *ki; 2356 struct kaiocb *job; 2357 struct aiocb *ujob; 2358 long error, status; 2359 int timo; 2360 2361 ops->store_aiocb(ujobp, NULL); 2362 2363 if (ts == NULL) { 2364 timo = 0; 2365 } else if (ts->tv_sec == 0 && ts->tv_nsec == 0) { 2366 timo = -1; 2367 } else { 2368 if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000)) 2369 return (EINVAL); 2370 2371 TIMESPEC_TO_TIMEVAL(&atv, ts); 2372 if (itimerfix(&atv)) 2373 return (EINVAL); 2374 timo = tvtohz(&atv); 2375 } 2376 2377 if (p->p_aioinfo == NULL) 2378 aio_init_aioinfo(p); 2379 ki = p->p_aioinfo; 2380 2381 error = 0; 2382 job = NULL; 2383 AIO_LOCK(ki); 2384 while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) { 2385 if (timo == -1) { 2386 error = EWOULDBLOCK; 2387 break; 2388 } 2389 ki->kaio_flags |= KAIO_WAKEUP; 2390 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, 2391 "aiowc", timo); 2392 if (timo && error == ERESTART) 2393 error = EINTR; 2394 if (error) 2395 break; 2396 } 2397 2398 if (job != NULL) { 2399 MPASS(job->jobflags & KAIOCB_FINISHED); 2400 ujob = job->ujob; 2401 status = job->uaiocb._aiocb_private.status; 2402 error = job->uaiocb._aiocb_private.error; 2403 td->td_retval[0] = status; 2404 td->td_ru.ru_oublock += job->outblock; 2405 td->td_ru.ru_inblock += job->inblock; 2406 td->td_ru.ru_msgsnd += job->msgsnd; 2407 td->td_ru.ru_msgrcv += job->msgrcv; 2408 aio_free_entry(job); 2409 AIO_UNLOCK(ki); 2410 ops->store_aiocb(ujobp, ujob); 2411 ops->store_error(ujob, error); 2412 ops->store_status(ujob, status); 2413 } else 2414 AIO_UNLOCK(ki); 2415 2416 return (error); 2417 } 2418 2419 int 2420 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap) 2421 { 2422 struct timespec ts, *tsp; 2423 int error; 2424 2425 if (uap->timeout) { 2426 /* Get timespec struct. */ 2427 error = copyin(uap->timeout, &ts, sizeof(ts)); 2428 if (error) 2429 return (error); 2430 tsp = &ts; 2431 } else 2432 tsp = NULL; 2433 2434 return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops)); 2435 } 2436 2437 static int 2438 kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob, 2439 struct aiocb_ops *ops) 2440 { 2441 struct proc *p = td->td_proc; 2442 struct kaioinfo *ki; 2443 2444 if (op != O_SYNC) /* XXX lack of O_DSYNC */ 2445 return (EINVAL); 2446 ki = p->p_aioinfo; 2447 if (ki == NULL) 2448 aio_init_aioinfo(p); 2449 return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops)); 2450 } 2451 2452 int 2453 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap) 2454 { 2455 2456 return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops)); 2457 } 2458 2459 /* kqueue attach function */ 2460 static int 2461 filt_aioattach(struct knote *kn) 2462 { 2463 struct kaiocb *job = (struct kaiocb *)kn->kn_sdata; 2464 2465 /* 2466 * The job pointer must be validated before using it, so 2467 * registration is restricted to the kernel; the user cannot 2468 * set EV_FLAG1. 2469 */ 2470 if ((kn->kn_flags & EV_FLAG1) == 0) 2471 return (EPERM); 2472 kn->kn_ptr.p_aio = job; 2473 kn->kn_flags &= ~EV_FLAG1; 2474 2475 knlist_add(&job->klist, kn, 0); 2476 2477 return (0); 2478 } 2479 2480 /* kqueue detach function */ 2481 static void 2482 filt_aiodetach(struct knote *kn) 2483 { 2484 struct knlist *knl; 2485 2486 knl = &kn->kn_ptr.p_aio->klist; 2487 knl->kl_lock(knl->kl_lockarg); 2488 if (!knlist_empty(knl)) 2489 knlist_remove(knl, kn, 1); 2490 knl->kl_unlock(knl->kl_lockarg); 2491 } 2492 2493 /* kqueue filter function */ 2494 /*ARGSUSED*/ 2495 static int 2496 filt_aio(struct knote *kn, long hint) 2497 { 2498 struct kaiocb *job = kn->kn_ptr.p_aio; 2499 2500 kn->kn_data = job->uaiocb._aiocb_private.error; 2501 if (!(job->jobflags & KAIOCB_FINISHED)) 2502 return (0); 2503 kn->kn_flags |= EV_EOF; 2504 return (1); 2505 } 2506 2507 /* kqueue attach function */ 2508 static int 2509 filt_lioattach(struct knote *kn) 2510 { 2511 struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata; 2512 2513 /* 2514 * The aioliojob pointer must be validated before using it, so 2515 * registration is restricted to the kernel; the user cannot 2516 * set EV_FLAG1. 2517 */ 2518 if ((kn->kn_flags & EV_FLAG1) == 0) 2519 return (EPERM); 2520 kn->kn_ptr.p_lio = lj; 2521 kn->kn_flags &= ~EV_FLAG1; 2522 2523 knlist_add(&lj->klist, kn, 0); 2524 2525 return (0); 2526 } 2527 2528 /* kqueue detach function */ 2529 static void 2530 filt_liodetach(struct knote *kn) 2531 { 2532 struct knlist *knl; 2533 2534 knl = &kn->kn_ptr.p_lio->klist; 2535 knl->kl_lock(knl->kl_lockarg); 2536 if (!knlist_empty(knl)) 2537 knlist_remove(knl, kn, 1); 2538 knl->kl_unlock(knl->kl_lockarg); 2539 } 2540 2541 /* kqueue filter function */ 2542 /*ARGSUSED*/ 2543 static int 2544 filt_lio(struct knote *kn, long hint) 2545 { 2546 struct aioliojob * lj = kn->kn_ptr.p_lio; 2547 2548 return (lj->lioj_flags & LIOJ_KEVENT_POSTED); 2549 } 2550 2551 #ifdef COMPAT_FREEBSD32 2552 #include <sys/mount.h> 2553 #include <sys/socket.h> 2554 #include <compat/freebsd32/freebsd32.h> 2555 #include <compat/freebsd32/freebsd32_proto.h> 2556 #include <compat/freebsd32/freebsd32_signal.h> 2557 #include <compat/freebsd32/freebsd32_syscall.h> 2558 #include <compat/freebsd32/freebsd32_util.h> 2559 2560 struct __aiocb_private32 { 2561 int32_t status; 2562 int32_t error; 2563 uint32_t kernelinfo; 2564 }; 2565 2566 #ifdef COMPAT_FREEBSD6 2567 typedef struct oaiocb32 { 2568 int aio_fildes; /* File descriptor */ 2569 uint64_t aio_offset __packed; /* File offset for I/O */ 2570 uint32_t aio_buf; /* I/O buffer in process space */ 2571 uint32_t aio_nbytes; /* Number of bytes for I/O */ 2572 struct osigevent32 aio_sigevent; /* Signal to deliver */ 2573 int aio_lio_opcode; /* LIO opcode */ 2574 int aio_reqprio; /* Request priority -- ignored */ 2575 struct __aiocb_private32 _aiocb_private; 2576 } oaiocb32_t; 2577 #endif 2578 2579 typedef struct aiocb32 { 2580 int32_t aio_fildes; /* File descriptor */ 2581 uint64_t aio_offset __packed; /* File offset for I/O */ 2582 uint32_t aio_buf; /* I/O buffer in process space */ 2583 uint32_t aio_nbytes; /* Number of bytes for I/O */ 2584 int __spare__[2]; 2585 uint32_t __spare2__; 2586 int aio_lio_opcode; /* LIO opcode */ 2587 int aio_reqprio; /* Request priority -- ignored */ 2588 struct __aiocb_private32 _aiocb_private; 2589 struct sigevent32 aio_sigevent; /* Signal to deliver */ 2590 } aiocb32_t; 2591 2592 #ifdef COMPAT_FREEBSD6 2593 static int 2594 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig) 2595 { 2596 2597 /* 2598 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are 2599 * supported by AIO with the old sigevent structure. 2600 */ 2601 CP(*osig, *nsig, sigev_notify); 2602 switch (nsig->sigev_notify) { 2603 case SIGEV_NONE: 2604 break; 2605 case SIGEV_SIGNAL: 2606 nsig->sigev_signo = osig->__sigev_u.__sigev_signo; 2607 break; 2608 case SIGEV_KEVENT: 2609 nsig->sigev_notify_kqueue = 2610 osig->__sigev_u.__sigev_notify_kqueue; 2611 PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr); 2612 break; 2613 default: 2614 return (EINVAL); 2615 } 2616 return (0); 2617 } 2618 2619 static int 2620 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob) 2621 { 2622 struct oaiocb32 job32; 2623 int error; 2624 2625 bzero(kjob, sizeof(struct aiocb)); 2626 error = copyin(ujob, &job32, sizeof(job32)); 2627 if (error) 2628 return (error); 2629 2630 CP(job32, *kjob, aio_fildes); 2631 CP(job32, *kjob, aio_offset); 2632 PTRIN_CP(job32, *kjob, aio_buf); 2633 CP(job32, *kjob, aio_nbytes); 2634 CP(job32, *kjob, aio_lio_opcode); 2635 CP(job32, *kjob, aio_reqprio); 2636 CP(job32, *kjob, _aiocb_private.status); 2637 CP(job32, *kjob, _aiocb_private.error); 2638 PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo); 2639 return (convert_old_sigevent32(&job32.aio_sigevent, 2640 &kjob->aio_sigevent)); 2641 } 2642 #endif 2643 2644 static int 2645 aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob) 2646 { 2647 struct aiocb32 job32; 2648 int error; 2649 2650 error = copyin(ujob, &job32, sizeof(job32)); 2651 if (error) 2652 return (error); 2653 CP(job32, *kjob, aio_fildes); 2654 CP(job32, *kjob, aio_offset); 2655 PTRIN_CP(job32, *kjob, aio_buf); 2656 CP(job32, *kjob, aio_nbytes); 2657 CP(job32, *kjob, aio_lio_opcode); 2658 CP(job32, *kjob, aio_reqprio); 2659 CP(job32, *kjob, _aiocb_private.status); 2660 CP(job32, *kjob, _aiocb_private.error); 2661 PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo); 2662 return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent)); 2663 } 2664 2665 static long 2666 aiocb32_fetch_status(struct aiocb *ujob) 2667 { 2668 struct aiocb32 *ujob32; 2669 2670 ujob32 = (struct aiocb32 *)ujob; 2671 return (fuword32(&ujob32->_aiocb_private.status)); 2672 } 2673 2674 static long 2675 aiocb32_fetch_error(struct aiocb *ujob) 2676 { 2677 struct aiocb32 *ujob32; 2678 2679 ujob32 = (struct aiocb32 *)ujob; 2680 return (fuword32(&ujob32->_aiocb_private.error)); 2681 } 2682 2683 static int 2684 aiocb32_store_status(struct aiocb *ujob, long status) 2685 { 2686 struct aiocb32 *ujob32; 2687 2688 ujob32 = (struct aiocb32 *)ujob; 2689 return (suword32(&ujob32->_aiocb_private.status, status)); 2690 } 2691 2692 static int 2693 aiocb32_store_error(struct aiocb *ujob, long error) 2694 { 2695 struct aiocb32 *ujob32; 2696 2697 ujob32 = (struct aiocb32 *)ujob; 2698 return (suword32(&ujob32->_aiocb_private.error, error)); 2699 } 2700 2701 static int 2702 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref) 2703 { 2704 struct aiocb32 *ujob32; 2705 2706 ujob32 = (struct aiocb32 *)ujob; 2707 return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref)); 2708 } 2709 2710 static int 2711 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob) 2712 { 2713 2714 return (suword32(ujobp, (long)ujob)); 2715 } 2716 2717 static struct aiocb_ops aiocb32_ops = { 2718 .copyin = aiocb32_copyin, 2719 .fetch_status = aiocb32_fetch_status, 2720 .fetch_error = aiocb32_fetch_error, 2721 .store_status = aiocb32_store_status, 2722 .store_error = aiocb32_store_error, 2723 .store_kernelinfo = aiocb32_store_kernelinfo, 2724 .store_aiocb = aiocb32_store_aiocb, 2725 }; 2726 2727 #ifdef COMPAT_FREEBSD6 2728 static struct aiocb_ops aiocb32_ops_osigevent = { 2729 .copyin = aiocb32_copyin_old_sigevent, 2730 .fetch_status = aiocb32_fetch_status, 2731 .fetch_error = aiocb32_fetch_error, 2732 .store_status = aiocb32_store_status, 2733 .store_error = aiocb32_store_error, 2734 .store_kernelinfo = aiocb32_store_kernelinfo, 2735 .store_aiocb = aiocb32_store_aiocb, 2736 }; 2737 #endif 2738 2739 int 2740 freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap) 2741 { 2742 2743 return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); 2744 } 2745 2746 int 2747 freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap) 2748 { 2749 struct timespec32 ts32; 2750 struct timespec ts, *tsp; 2751 struct aiocb **ujoblist; 2752 uint32_t *ujoblist32; 2753 int error, i; 2754 2755 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX) 2756 return (EINVAL); 2757 2758 if (uap->timeout) { 2759 /* Get timespec struct. */ 2760 if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0) 2761 return (error); 2762 CP(ts32, ts, tv_sec); 2763 CP(ts32, ts, tv_nsec); 2764 tsp = &ts; 2765 } else 2766 tsp = NULL; 2767 2768 ujoblist = uma_zalloc(aiol_zone, M_WAITOK); 2769 ujoblist32 = (uint32_t *)ujoblist; 2770 error = copyin(uap->aiocbp, ujoblist32, uap->nent * 2771 sizeof(ujoblist32[0])); 2772 if (error == 0) { 2773 for (i = uap->nent; i > 0; i--) 2774 ujoblist[i] = PTRIN(ujoblist32[i]); 2775 2776 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp); 2777 } 2778 uma_zfree(aiol_zone, ujoblist); 2779 return (error); 2780 } 2781 2782 int 2783 freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap) 2784 { 2785 2786 return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); 2787 } 2788 2789 #ifdef COMPAT_FREEBSD6 2790 int 2791 freebsd6_freebsd32_aio_read(struct thread *td, 2792 struct freebsd6_freebsd32_aio_read_args *uap) 2793 { 2794 2795 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2796 &aiocb32_ops_osigevent)); 2797 } 2798 #endif 2799 2800 int 2801 freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap) 2802 { 2803 2804 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2805 &aiocb32_ops)); 2806 } 2807 2808 #ifdef COMPAT_FREEBSD6 2809 int 2810 freebsd6_freebsd32_aio_write(struct thread *td, 2811 struct freebsd6_freebsd32_aio_write_args *uap) 2812 { 2813 2814 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 2815 &aiocb32_ops_osigevent)); 2816 } 2817 #endif 2818 2819 int 2820 freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap) 2821 { 2822 2823 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 2824 &aiocb32_ops)); 2825 } 2826 2827 int 2828 freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap) 2829 { 2830 2831 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK, 2832 &aiocb32_ops)); 2833 } 2834 2835 int 2836 freebsd32_aio_waitcomplete(struct thread *td, 2837 struct freebsd32_aio_waitcomplete_args *uap) 2838 { 2839 struct timespec32 ts32; 2840 struct timespec ts, *tsp; 2841 int error; 2842 2843 if (uap->timeout) { 2844 /* Get timespec struct. */ 2845 error = copyin(uap->timeout, &ts32, sizeof(ts32)); 2846 if (error) 2847 return (error); 2848 CP(ts32, ts, tv_sec); 2849 CP(ts32, ts, tv_nsec); 2850 tsp = &ts; 2851 } else 2852 tsp = NULL; 2853 2854 return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp, 2855 &aiocb32_ops)); 2856 } 2857 2858 int 2859 freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap) 2860 { 2861 2862 return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp, 2863 &aiocb32_ops)); 2864 } 2865 2866 #ifdef COMPAT_FREEBSD6 2867 int 2868 freebsd6_freebsd32_lio_listio(struct thread *td, 2869 struct freebsd6_freebsd32_lio_listio_args *uap) 2870 { 2871 struct aiocb **acb_list; 2872 struct sigevent *sigp, sig; 2873 struct osigevent32 osig; 2874 uint32_t *acb_list32; 2875 int error, i, nent; 2876 2877 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2878 return (EINVAL); 2879 2880 nent = uap->nent; 2881 if (nent < 0 || nent > AIO_LISTIO_MAX) 2882 return (EINVAL); 2883 2884 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2885 error = copyin(uap->sig, &osig, sizeof(osig)); 2886 if (error) 2887 return (error); 2888 error = convert_old_sigevent32(&osig, &sig); 2889 if (error) 2890 return (error); 2891 sigp = &sig; 2892 } else 2893 sigp = NULL; 2894 2895 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK); 2896 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t)); 2897 if (error) { 2898 free(acb_list32, M_LIO); 2899 return (error); 2900 } 2901 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2902 for (i = 0; i < nent; i++) 2903 acb_list[i] = PTRIN(acb_list32[i]); 2904 free(acb_list32, M_LIO); 2905 2906 error = kern_lio_listio(td, uap->mode, 2907 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 2908 &aiocb32_ops_osigevent); 2909 free(acb_list, M_LIO); 2910 return (error); 2911 } 2912 #endif 2913 2914 int 2915 freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap) 2916 { 2917 struct aiocb **acb_list; 2918 struct sigevent *sigp, sig; 2919 struct sigevent32 sig32; 2920 uint32_t *acb_list32; 2921 int error, i, nent; 2922 2923 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2924 return (EINVAL); 2925 2926 nent = uap->nent; 2927 if (nent < 0 || nent > AIO_LISTIO_MAX) 2928 return (EINVAL); 2929 2930 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2931 error = copyin(uap->sig, &sig32, sizeof(sig32)); 2932 if (error) 2933 return (error); 2934 error = convert_sigevent32(&sig32, &sig); 2935 if (error) 2936 return (error); 2937 sigp = &sig; 2938 } else 2939 sigp = NULL; 2940 2941 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK); 2942 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t)); 2943 if (error) { 2944 free(acb_list32, M_LIO); 2945 return (error); 2946 } 2947 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2948 for (i = 0; i < nent; i++) 2949 acb_list[i] = PTRIN(acb_list32[i]); 2950 free(acb_list32, M_LIO); 2951 2952 error = kern_lio_listio(td, uap->mode, 2953 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 2954 &aiocb32_ops); 2955 free(acb_list, M_LIO); 2956 return (error); 2957 } 2958 2959 #endif 2960