1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 */ 16 17 /* 18 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 19 */ 20 21 #include <sys/cdefs.h> 22 __FBSDID("$FreeBSD$"); 23 24 #include <sys/param.h> 25 #include <sys/systm.h> 26 #include <sys/malloc.h> 27 #include <sys/bio.h> 28 #include <sys/buf.h> 29 #include <sys/eventhandler.h> 30 #include <sys/sysproto.h> 31 #include <sys/filedesc.h> 32 #include <sys/kernel.h> 33 #include <sys/kthread.h> 34 #include <sys/fcntl.h> 35 #include <sys/file.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/unistd.h> 40 #include <sys/proc.h> 41 #include <sys/resourcevar.h> 42 #include <sys/signalvar.h> 43 #include <sys/protosw.h> 44 #include <sys/socketvar.h> 45 #include <sys/syscall.h> 46 #include <sys/sysent.h> 47 #include <sys/sysctl.h> 48 #include <sys/sx.h> 49 #include <sys/vnode.h> 50 #include <sys/conf.h> 51 #include <sys/event.h> 52 53 #include <posix4/posix4.h> 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_map.h> 58 #include <vm/uma.h> 59 #include <sys/aio.h> 60 61 #include "opt_vfs_aio.h" 62 63 /* 64 * Counter for allocating reference ids to new jobs. Wrapped to 1 on 65 * overflow. 66 */ 67 static long jobrefid; 68 69 #define JOBST_NULL 0x0 70 #define JOBST_JOBQGLOBAL 0x2 71 #define JOBST_JOBRUNNING 0x3 72 #define JOBST_JOBFINISHED 0x4 73 #define JOBST_JOBQBUF 0x5 74 #define JOBST_JOBBFINISHED 0x6 75 76 #ifndef MAX_AIO_PER_PROC 77 #define MAX_AIO_PER_PROC 32 78 #endif 79 80 #ifndef MAX_AIO_QUEUE_PER_PROC 81 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 82 #endif 83 84 #ifndef MAX_AIO_PROCS 85 #define MAX_AIO_PROCS 32 86 #endif 87 88 #ifndef MAX_AIO_QUEUE 89 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 90 #endif 91 92 #ifndef TARGET_AIO_PROCS 93 #define TARGET_AIO_PROCS 4 94 #endif 95 96 #ifndef MAX_BUF_AIO 97 #define MAX_BUF_AIO 16 98 #endif 99 100 #ifndef AIOD_TIMEOUT_DEFAULT 101 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 102 #endif 103 104 #ifndef AIOD_LIFETIME_DEFAULT 105 #define AIOD_LIFETIME_DEFAULT (30 * hz) 106 #endif 107 108 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management"); 109 110 static int max_aio_procs = MAX_AIO_PROCS; 111 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 112 CTLFLAG_RW, &max_aio_procs, 0, 113 "Maximum number of kernel threads to use for handling async IO "); 114 115 static int num_aio_procs = 0; 116 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 117 CTLFLAG_RD, &num_aio_procs, 0, 118 "Number of presently active kernel threads for async IO"); 119 120 /* 121 * The code will adjust the actual number of AIO processes towards this 122 * number when it gets a chance. 123 */ 124 static int target_aio_procs = TARGET_AIO_PROCS; 125 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, 126 0, "Preferred number of ready kernel threads for async IO"); 127 128 static int max_queue_count = MAX_AIO_QUEUE; 129 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, 130 "Maximum number of aio requests to queue, globally"); 131 132 static int num_queue_count = 0; 133 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0, 134 "Number of queued aio requests"); 135 136 static int num_buf_aio = 0; 137 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0, 138 "Number of aio requests presently handled by the buf subsystem"); 139 140 /* Number of async I/O thread in the process of being started */ 141 /* XXX This should be local to _aio_aqueue() */ 142 static int num_aio_resv_start = 0; 143 144 static int aiod_timeout; 145 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0, 146 "Timeout value for synchronous aio operations"); 147 148 static int aiod_lifetime; 149 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0, 150 "Maximum lifetime for idle aiod"); 151 152 static int unloadable = 0; 153 SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0, 154 "Allow unload of aio (not recommended)"); 155 156 157 static int max_aio_per_proc = MAX_AIO_PER_PROC; 158 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc, 159 0, "Maximum active aio requests per process (stored in the process)"); 160 161 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 162 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW, 163 &max_aio_queue_per_proc, 0, 164 "Maximum queued aio requests per process (stored in the process)"); 165 166 static int max_buf_aio = MAX_BUF_AIO; 167 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0, 168 "Maximum buf aio requests per process (stored in the process)"); 169 170 struct aiocblist { 171 TAILQ_ENTRY(aiocblist) list; /* List of jobs */ 172 TAILQ_ENTRY(aiocblist) plist; /* List of jobs for proc */ 173 int jobflags; 174 int jobstate; 175 int inputcharge; 176 int outputcharge; 177 struct callout_handle timeouthandle; 178 struct buf *bp; /* Buffer pointer */ 179 struct proc *userproc; /* User process */ /* Not td! */ 180 struct ucred *cred; /* Active credential when created */ 181 struct file *fd_file; /* Pointer to file structure */ 182 struct aio_liojob *lio; /* Optional lio job */ 183 struct aiocb *uuaiocb; /* Pointer in userspace of aiocb */ 184 struct klist klist; /* list of knotes */ 185 struct aiocb uaiocb; /* Kernel I/O control block */ 186 }; 187 188 /* jobflags */ 189 #define AIOCBLIST_RUNDOWN 0x4 190 #define AIOCBLIST_DONE 0x10 191 192 /* 193 * AIO process info 194 */ 195 #define AIOP_FREE 0x1 /* proc on free queue */ 196 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 197 198 struct aiothreadlist { 199 int aiothreadflags; /* AIO proc flags */ 200 TAILQ_ENTRY(aiothreadlist) list; /* List of processes */ 201 struct thread *aiothread; /* The AIO thread */ 202 }; 203 204 /* 205 * data-structure for lio signal management 206 */ 207 struct aio_liojob { 208 int lioj_flags; 209 int lioj_buffer_count; 210 int lioj_buffer_finished_count; 211 int lioj_queue_count; 212 int lioj_queue_finished_count; 213 struct sigevent lioj_signal; /* signal on all I/O done */ 214 TAILQ_ENTRY(aio_liojob) lioj_list; 215 struct kaioinfo *lioj_ki; 216 }; 217 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 218 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 219 220 /* 221 * per process aio data structure 222 */ 223 struct kaioinfo { 224 int kaio_flags; /* per process kaio flags */ 225 int kaio_maxactive_count; /* maximum number of AIOs */ 226 int kaio_active_count; /* number of currently used AIOs */ 227 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 228 int kaio_queue_count; /* size of AIO queue */ 229 int kaio_ballowed_count; /* maximum number of buffers */ 230 int kaio_queue_finished_count; /* number of daemon jobs finished */ 231 int kaio_buffer_count; /* number of physio buffers */ 232 int kaio_buffer_finished_count; /* count of I/O done */ 233 struct proc *kaio_p; /* process that uses this kaio block */ 234 TAILQ_HEAD(,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 235 TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* job queue for process */ 236 TAILQ_HEAD(,aiocblist) kaio_jobdone; /* done queue for process */ 237 TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 238 TAILQ_HEAD(,aiocblist) kaio_bufdone; /* buffer done queue for process */ 239 TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */ 240 }; 241 242 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 243 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */ 244 245 static TAILQ_HEAD(,aiothreadlist) aio_activeproc; /* Active daemons */ 246 static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* Idle daemons */ 247 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 248 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 249 250 static void aio_init_aioinfo(struct proc *p); 251 static void aio_onceonly(void); 252 static int aio_free_entry(struct aiocblist *aiocbe); 253 static void aio_process(struct aiocblist *aiocbe); 254 static int aio_newproc(void); 255 static int aio_aqueue(struct thread *td, struct aiocb *job, int type); 256 static void aio_physwakeup(struct buf *bp); 257 static void aio_proc_rundown(void *arg, struct proc *p); 258 static int aio_fphysio(struct aiocblist *aiocbe); 259 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 260 static void aio_daemon(void *uproc); 261 static void aio_swake_cb(struct socket *, struct sockbuf *); 262 static int aio_unload(void); 263 static void process_signal(void *aioj); 264 static int filt_aioattach(struct knote *kn); 265 static void filt_aiodetach(struct knote *kn); 266 static int filt_aio(struct knote *kn, long hint); 267 268 /* 269 * Zones for: 270 * kaio Per process async io info 271 * aiop async io thread data 272 * aiocb async io jobs 273 * aiol list io job pointer - internal to aio_suspend XXX 274 * aiolio list io jobs 275 */ 276 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone; 277 278 /* kqueue filters for aio */ 279 static struct filterops aio_filtops = 280 { 0, filt_aioattach, filt_aiodetach, filt_aio }; 281 282 static eventhandler_tag exit_tag, exec_tag; 283 284 /* 285 * Main operations function for use as a kernel module. 286 */ 287 static int 288 aio_modload(struct module *module, int cmd, void *arg) 289 { 290 int error = 0; 291 292 switch (cmd) { 293 case MOD_LOAD: 294 aio_onceonly(); 295 break; 296 case MOD_UNLOAD: 297 error = aio_unload(); 298 break; 299 case MOD_SHUTDOWN: 300 break; 301 default: 302 error = EINVAL; 303 break; 304 } 305 return (error); 306 } 307 308 static moduledata_t aio_mod = { 309 "aio", 310 &aio_modload, 311 NULL 312 }; 313 314 SYSCALL_MODULE_HELPER(aio_return); 315 SYSCALL_MODULE_HELPER(aio_suspend); 316 SYSCALL_MODULE_HELPER(aio_cancel); 317 SYSCALL_MODULE_HELPER(aio_error); 318 SYSCALL_MODULE_HELPER(aio_read); 319 SYSCALL_MODULE_HELPER(aio_write); 320 SYSCALL_MODULE_HELPER(aio_waitcomplete); 321 SYSCALL_MODULE_HELPER(lio_listio); 322 323 DECLARE_MODULE(aio, aio_mod, 324 SI_SUB_VFS, SI_ORDER_ANY); 325 MODULE_VERSION(aio, 1); 326 327 /* 328 * Startup initialization 329 */ 330 static void 331 aio_onceonly(void) 332 { 333 334 /* XXX: should probably just use so->callback */ 335 aio_swake = &aio_swake_cb; 336 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL, 337 EVENTHANDLER_PRI_ANY); 338 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown, NULL, 339 EVENTHANDLER_PRI_ANY); 340 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops); 341 TAILQ_INIT(&aio_freeproc); 342 TAILQ_INIT(&aio_activeproc); 343 TAILQ_INIT(&aio_jobs); 344 TAILQ_INIT(&aio_bufjobs); 345 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL, 346 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 347 aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL, 348 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 349 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL, 350 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 351 aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL, 352 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 353 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aio_liojob), NULL, 354 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 355 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 356 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 357 jobrefid = 1; 358 async_io_version = _POSIX_VERSION; 359 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX); 360 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE); 361 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0); 362 } 363 364 /* 365 * Callback for unload of AIO when used as a module. 366 */ 367 static int 368 aio_unload(void) 369 { 370 371 /* 372 * XXX: no unloads by default, it's too dangerous. 373 * perhaps we could do it if locked out callers and then 374 * did an aio_proc_rundown() on each process. 375 */ 376 if (!unloadable) 377 return (EOPNOTSUPP); 378 379 async_io_version = 0; 380 aio_swake = NULL; 381 EVENTHANDLER_DEREGISTER(process_exit, exit_tag); 382 EVENTHANDLER_DEREGISTER(process_exec, exec_tag); 383 kqueue_del_filteropts(EVFILT_AIO); 384 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1); 385 p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1); 386 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1); 387 return (0); 388 } 389 390 /* 391 * Init the per-process aioinfo structure. The aioinfo limits are set 392 * per-process for user limit (resource) management. 393 */ 394 static void 395 aio_init_aioinfo(struct proc *p) 396 { 397 struct kaioinfo *ki; 398 399 if (p->p_aioinfo == NULL) { 400 ki = uma_zalloc(kaio_zone, M_WAITOK); 401 p->p_aioinfo = ki; 402 ki->kaio_flags = 0; 403 ki->kaio_maxactive_count = max_aio_per_proc; 404 ki->kaio_active_count = 0; 405 ki->kaio_qallowed_count = max_aio_queue_per_proc; 406 ki->kaio_queue_count = 0; 407 ki->kaio_ballowed_count = max_buf_aio; 408 ki->kaio_buffer_count = 0; 409 ki->kaio_buffer_finished_count = 0; 410 ki->kaio_p = p; 411 TAILQ_INIT(&ki->kaio_jobdone); 412 TAILQ_INIT(&ki->kaio_jobqueue); 413 TAILQ_INIT(&ki->kaio_bufdone); 414 TAILQ_INIT(&ki->kaio_bufqueue); 415 TAILQ_INIT(&ki->kaio_liojoblist); 416 TAILQ_INIT(&ki->kaio_sockqueue); 417 } 418 419 while (num_aio_procs < target_aio_procs) 420 aio_newproc(); 421 } 422 423 /* 424 * Free a job entry. Wait for completion if it is currently active, but don't 425 * delay forever. If we delay, we return a flag that says that we have to 426 * restart the queue scan. 427 */ 428 static int 429 aio_free_entry(struct aiocblist *aiocbe) 430 { 431 struct kaioinfo *ki; 432 struct aio_liojob *lj; 433 struct proc *p; 434 int error; 435 int s; 436 437 if (aiocbe->jobstate == JOBST_NULL) 438 panic("aio_free_entry: freeing already free job"); 439 440 p = aiocbe->userproc; 441 ki = p->p_aioinfo; 442 lj = aiocbe->lio; 443 if (ki == NULL) 444 panic("aio_free_entry: missing p->p_aioinfo"); 445 446 while (aiocbe->jobstate == JOBST_JOBRUNNING) { 447 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 448 tsleep(aiocbe, PRIBIO, "jobwai", 0); 449 } 450 if (aiocbe->bp == NULL) { 451 if (ki->kaio_queue_count <= 0) 452 panic("aio_free_entry: process queue size <= 0"); 453 if (num_queue_count <= 0) 454 panic("aio_free_entry: system wide queue size <= 0"); 455 456 if (lj) { 457 lj->lioj_queue_count--; 458 if (aiocbe->jobflags & AIOCBLIST_DONE) 459 lj->lioj_queue_finished_count--; 460 } 461 ki->kaio_queue_count--; 462 if (aiocbe->jobflags & AIOCBLIST_DONE) 463 ki->kaio_queue_finished_count--; 464 num_queue_count--; 465 } else { 466 if (lj) { 467 lj->lioj_buffer_count--; 468 if (aiocbe->jobflags & AIOCBLIST_DONE) 469 lj->lioj_buffer_finished_count--; 470 } 471 if (aiocbe->jobflags & AIOCBLIST_DONE) 472 ki->kaio_buffer_finished_count--; 473 ki->kaio_buffer_count--; 474 num_buf_aio--; 475 } 476 477 /* aiocbe is going away, we need to destroy any knotes */ 478 /* XXXKSE Note the thread here is used to eventually find the 479 * owning process again, but it is also used to do a fo_close 480 * and that requires the thread. (but does it require the 481 * OWNING thread? (or maybe the running thread?) 482 * There is a semantic problem here... 483 */ 484 knote_remove(FIRST_THREAD_IN_PROC(p), &aiocbe->klist); /* XXXKSE */ 485 486 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN) 487 && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) { 488 ki->kaio_flags &= ~KAIO_WAKEUP; 489 wakeup(p); 490 } 491 492 if (aiocbe->jobstate == JOBST_JOBQBUF) { 493 if ((error = aio_fphysio(aiocbe)) != 0) 494 return (error); 495 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 496 panic("aio_free_entry: invalid physio finish-up state"); 497 s = splbio(); 498 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 499 splx(s); 500 } else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) { 501 s = splnet(); 502 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 503 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 504 splx(s); 505 } else if (aiocbe->jobstate == JOBST_JOBFINISHED) 506 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 507 else if (aiocbe->jobstate == JOBST_JOBBFINISHED) { 508 s = splbio(); 509 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 510 splx(s); 511 if (aiocbe->bp) { 512 vunmapbuf(aiocbe->bp); 513 relpbuf(aiocbe->bp, NULL); 514 aiocbe->bp = NULL; 515 } 516 } 517 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 518 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 519 uma_zfree(aiolio_zone, lj); 520 } 521 aiocbe->jobstate = JOBST_NULL; 522 untimeout(process_signal, aiocbe, aiocbe->timeouthandle); 523 fdrop(aiocbe->fd_file, curthread); 524 crfree(aiocbe->cred); 525 uma_zfree(aiocb_zone, aiocbe); 526 return (0); 527 } 528 529 /* 530 * Rundown the jobs for a given process. 531 */ 532 static void 533 aio_proc_rundown(void *arg, struct proc *p) 534 { 535 int s; 536 struct kaioinfo *ki; 537 struct aio_liojob *lj, *ljn; 538 struct aiocblist *aiocbe, *aiocbn; 539 struct file *fp; 540 struct socket *so; 541 542 ki = p->p_aioinfo; 543 if (ki == NULL) 544 return; 545 546 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 547 while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count > 548 ki->kaio_buffer_finished_count)) { 549 ki->kaio_flags |= KAIO_RUNDOWN; 550 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout)) 551 break; 552 } 553 554 /* 555 * Move any aio ops that are waiting on socket I/O to the normal job 556 * queues so they are cleaned up with any others. 557 */ 558 s = splnet(); 559 for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe = 560 aiocbn) { 561 aiocbn = TAILQ_NEXT(aiocbe, plist); 562 fp = aiocbe->fd_file; 563 if (fp != NULL) { 564 so = fp->f_data; 565 TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list); 566 if (TAILQ_EMPTY(&so->so_aiojobq)) { 567 so->so_snd.sb_flags &= ~SB_AIO; 568 so->so_rcv.sb_flags &= ~SB_AIO; 569 } 570 } 571 TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist); 572 TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list); 573 TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist); 574 } 575 splx(s); 576 577 restart1: 578 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) { 579 aiocbn = TAILQ_NEXT(aiocbe, plist); 580 if (aio_free_entry(aiocbe)) 581 goto restart1; 582 } 583 584 restart2: 585 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe = 586 aiocbn) { 587 aiocbn = TAILQ_NEXT(aiocbe, plist); 588 if (aio_free_entry(aiocbe)) 589 goto restart2; 590 } 591 592 /* 593 * Note the use of lots of splbio here, trying to avoid splbio for long chains 594 * of I/O. Probably unnecessary. 595 */ 596 restart3: 597 s = splbio(); 598 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 599 ki->kaio_flags |= KAIO_WAKEUP; 600 tsleep(p, PRIBIO, "aioprn", 0); 601 splx(s); 602 goto restart3; 603 } 604 splx(s); 605 606 restart4: 607 s = splbio(); 608 for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) { 609 aiocbn = TAILQ_NEXT(aiocbe, plist); 610 if (aio_free_entry(aiocbe)) { 611 splx(s); 612 goto restart4; 613 } 614 } 615 splx(s); 616 617 /* 618 * If we've slept, jobs might have moved from one queue to another. 619 * Retry rundown if we didn't manage to empty the queues. 620 */ 621 if (TAILQ_FIRST(&ki->kaio_jobdone) != NULL || 622 TAILQ_FIRST(&ki->kaio_jobqueue) != NULL || 623 TAILQ_FIRST(&ki->kaio_bufqueue) != NULL || 624 TAILQ_FIRST(&ki->kaio_bufdone) != NULL) 625 goto restart1; 626 627 for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) { 628 ljn = TAILQ_NEXT(lj, lioj_list); 629 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 630 0)) { 631 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 632 uma_zfree(aiolio_zone, lj); 633 } else { 634 #ifdef DIAGNOSTIC 635 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, " 636 "QF:%d\n", lj->lioj_buffer_count, 637 lj->lioj_buffer_finished_count, 638 lj->lioj_queue_count, 639 lj->lioj_queue_finished_count); 640 #endif 641 } 642 } 643 644 uma_zfree(kaio_zone, ki); 645 p->p_aioinfo = NULL; 646 } 647 648 /* 649 * Select a job to run (called by an AIO daemon). 650 */ 651 static struct aiocblist * 652 aio_selectjob(struct aiothreadlist *aiop) 653 { 654 int s; 655 struct aiocblist *aiocbe; 656 struct kaioinfo *ki; 657 struct proc *userp; 658 659 s = splnet(); 660 for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe = 661 TAILQ_NEXT(aiocbe, list)) { 662 userp = aiocbe->userproc; 663 ki = userp->p_aioinfo; 664 665 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 666 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 667 splx(s); 668 return (aiocbe); 669 } 670 } 671 splx(s); 672 673 return (NULL); 674 } 675 676 /* 677 * The AIO processing activity. This is the code that does the I/O request for 678 * the non-physio version of the operations. The normal vn operations are used, 679 * and this code should work in all instances for every type of file, including 680 * pipes, sockets, fifos, and regular files. 681 */ 682 static void 683 aio_process(struct aiocblist *aiocbe) 684 { 685 struct ucred *td_savedcred; 686 struct thread *td; 687 struct proc *mycp; 688 struct aiocb *cb; 689 struct file *fp; 690 struct uio auio; 691 struct iovec aiov; 692 int cnt; 693 int error; 694 int oublock_st, oublock_end; 695 int inblock_st, inblock_end; 696 697 td = curthread; 698 td_savedcred = td->td_ucred; 699 td->td_ucred = aiocbe->cred; 700 mycp = td->td_proc; 701 cb = &aiocbe->uaiocb; 702 fp = aiocbe->fd_file; 703 704 aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; 705 aiov.iov_len = cb->aio_nbytes; 706 707 auio.uio_iov = &aiov; 708 auio.uio_iovcnt = 1; 709 auio.uio_offset = cb->aio_offset; 710 auio.uio_resid = cb->aio_nbytes; 711 cnt = cb->aio_nbytes; 712 auio.uio_segflg = UIO_USERSPACE; 713 auio.uio_td = td; 714 715 inblock_st = mycp->p_stats->p_ru.ru_inblock; 716 oublock_st = mycp->p_stats->p_ru.ru_oublock; 717 /* 718 * _aio_aqueue() acquires a reference to the file that is 719 * released in aio_free_entry(). 720 */ 721 if (cb->aio_lio_opcode == LIO_READ) { 722 auio.uio_rw = UIO_READ; 723 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td); 724 } else { 725 auio.uio_rw = UIO_WRITE; 726 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td); 727 } 728 inblock_end = mycp->p_stats->p_ru.ru_inblock; 729 oublock_end = mycp->p_stats->p_ru.ru_oublock; 730 731 aiocbe->inputcharge = inblock_end - inblock_st; 732 aiocbe->outputcharge = oublock_end - oublock_st; 733 734 if ((error) && (auio.uio_resid != cnt)) { 735 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 736 error = 0; 737 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) { 738 PROC_LOCK(aiocbe->userproc); 739 psignal(aiocbe->userproc, SIGPIPE); 740 PROC_UNLOCK(aiocbe->userproc); 741 } 742 } 743 744 cnt -= auio.uio_resid; 745 cb->_aiocb_private.error = error; 746 cb->_aiocb_private.status = cnt; 747 td->td_ucred = td_savedcred; 748 } 749 750 /* 751 * The AIO daemon, most of the actual work is done in aio_process, 752 * but the setup (and address space mgmt) is done in this routine. 753 */ 754 static void 755 aio_daemon(void *uproc) 756 { 757 int s; 758 struct aio_liojob *lj; 759 struct aiocb *cb; 760 struct aiocblist *aiocbe; 761 struct aiothreadlist *aiop; 762 struct kaioinfo *ki; 763 struct proc *curcp, *mycp, *userp; 764 struct vmspace *myvm, *tmpvm; 765 struct thread *td = curthread; 766 struct pgrp *newpgrp; 767 struct session *newsess; 768 769 mtx_lock(&Giant); 770 /* 771 * Local copies of curproc (cp) and vmspace (myvm) 772 */ 773 mycp = td->td_proc; 774 myvm = mycp->p_vmspace; 775 776 KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp")); 777 778 /* 779 * Allocate and ready the aio control info. There is one aiop structure 780 * per daemon. 781 */ 782 aiop = uma_zalloc(aiop_zone, M_WAITOK); 783 aiop->aiothread = td; 784 aiop->aiothreadflags |= AIOP_FREE; 785 786 s = splnet(); 787 788 /* 789 * Place thread (lightweight process) onto the AIO free thread list. 790 */ 791 if (TAILQ_EMPTY(&aio_freeproc)) 792 wakeup(&aio_freeproc); 793 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 794 795 splx(s); 796 797 /* 798 * Get rid of our current filedescriptors. AIOD's don't need any 799 * filedescriptors, except as temporarily inherited from the client. 800 */ 801 fdfree(td); 802 803 mtx_unlock(&Giant); 804 /* The daemon resides in its own pgrp. */ 805 MALLOC(newpgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP, 806 M_WAITOK | M_ZERO); 807 MALLOC(newsess, struct session *, sizeof(struct session), M_SESSION, 808 M_WAITOK | M_ZERO); 809 810 sx_xlock(&proctree_lock); 811 enterpgrp(mycp, mycp->p_pid, newpgrp, newsess); 812 sx_xunlock(&proctree_lock); 813 mtx_lock(&Giant); 814 815 /* 816 * Wakeup parent process. (Parent sleeps to keep from blasting away 817 * and creating too many daemons.) 818 */ 819 wakeup(mycp); 820 821 for (;;) { 822 /* 823 * curcp is the current daemon process context. 824 * userp is the current user process context. 825 */ 826 curcp = mycp; 827 828 /* 829 * Take daemon off of free queue 830 */ 831 if (aiop->aiothreadflags & AIOP_FREE) { 832 s = splnet(); 833 TAILQ_REMOVE(&aio_freeproc, aiop, list); 834 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 835 aiop->aiothreadflags &= ~AIOP_FREE; 836 splx(s); 837 } 838 aiop->aiothreadflags &= ~AIOP_SCHED; 839 840 /* 841 * Check for jobs. 842 */ 843 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 844 cb = &aiocbe->uaiocb; 845 userp = aiocbe->userproc; 846 847 aiocbe->jobstate = JOBST_JOBRUNNING; 848 849 /* 850 * Connect to process address space for user program. 851 */ 852 if (userp != curcp) { 853 /* 854 * Save the current address space that we are 855 * connected to. 856 */ 857 tmpvm = mycp->p_vmspace; 858 859 /* 860 * Point to the new user address space, and 861 * refer to it. 862 */ 863 mycp->p_vmspace = userp->p_vmspace; 864 mycp->p_vmspace->vm_refcnt++; 865 866 /* Activate the new mapping. */ 867 pmap_activate(FIRST_THREAD_IN_PROC(mycp)); 868 869 /* 870 * If the old address space wasn't the daemons 871 * own address space, then we need to remove the 872 * daemon's reference from the other process 873 * that it was acting on behalf of. 874 */ 875 if (tmpvm != myvm) { 876 vmspace_free(tmpvm); 877 } 878 curcp = userp; 879 } 880 881 ki = userp->p_aioinfo; 882 lj = aiocbe->lio; 883 884 /* Account for currently active jobs. */ 885 ki->kaio_active_count++; 886 887 /* Do the I/O function. */ 888 aio_process(aiocbe); 889 890 /* Decrement the active job count. */ 891 ki->kaio_active_count--; 892 893 /* 894 * Increment the completion count for wakeup/signal 895 * comparisons. 896 */ 897 aiocbe->jobflags |= AIOCBLIST_DONE; 898 ki->kaio_queue_finished_count++; 899 if (lj) 900 lj->lioj_queue_finished_count++; 901 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags 902 & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) { 903 ki->kaio_flags &= ~KAIO_WAKEUP; 904 wakeup(userp); 905 } 906 907 s = splbio(); 908 if (lj && (lj->lioj_flags & 909 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) { 910 if ((lj->lioj_queue_finished_count == 911 lj->lioj_queue_count) && 912 (lj->lioj_buffer_finished_count == 913 lj->lioj_buffer_count)) { 914 PROC_LOCK(userp); 915 psignal(userp, 916 lj->lioj_signal.sigev_signo); 917 PROC_UNLOCK(userp); 918 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 919 } 920 } 921 splx(s); 922 923 aiocbe->jobstate = JOBST_JOBFINISHED; 924 925 s = splnet(); 926 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 927 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, plist); 928 splx(s); 929 KNOTE(&aiocbe->klist, 0); 930 931 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 932 wakeup(aiocbe); 933 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 934 } 935 936 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 937 PROC_LOCK(userp); 938 psignal(userp, cb->aio_sigevent.sigev_signo); 939 PROC_UNLOCK(userp); 940 } 941 } 942 943 /* 944 * Disconnect from user address space. 945 */ 946 if (curcp != mycp) { 947 /* Get the user address space to disconnect from. */ 948 tmpvm = mycp->p_vmspace; 949 950 /* Get original address space for daemon. */ 951 mycp->p_vmspace = myvm; 952 953 /* Activate the daemon's address space. */ 954 pmap_activate(FIRST_THREAD_IN_PROC(mycp)); 955 #ifdef DIAGNOSTIC 956 if (tmpvm == myvm) { 957 printf("AIOD: vmspace problem -- %d\n", 958 mycp->p_pid); 959 } 960 #endif 961 /* Remove our vmspace reference. */ 962 vmspace_free(tmpvm); 963 964 curcp = mycp; 965 } 966 967 /* 968 * If we are the first to be put onto the free queue, wakeup 969 * anyone waiting for a daemon. 970 */ 971 s = splnet(); 972 TAILQ_REMOVE(&aio_activeproc, aiop, list); 973 if (TAILQ_EMPTY(&aio_freeproc)) 974 wakeup(&aio_freeproc); 975 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 976 aiop->aiothreadflags |= AIOP_FREE; 977 splx(s); 978 979 /* 980 * If daemon is inactive for a long time, allow it to exit, 981 * thereby freeing resources. 982 */ 983 if ((aiop->aiothreadflags & AIOP_SCHED) == 0 && 984 tsleep(aiop->aiothread, PRIBIO, "aiordy", aiod_lifetime)) { 985 s = splnet(); 986 if (TAILQ_EMPTY(&aio_jobs)) { 987 if ((aiop->aiothreadflags & AIOP_FREE) && 988 (num_aio_procs > target_aio_procs)) { 989 TAILQ_REMOVE(&aio_freeproc, aiop, list); 990 splx(s); 991 uma_zfree(aiop_zone, aiop); 992 num_aio_procs--; 993 #ifdef DIAGNOSTIC 994 if (mycp->p_vmspace->vm_refcnt <= 1) { 995 printf("AIOD: bad vm refcnt for" 996 " exiting daemon: %d\n", 997 mycp->p_vmspace->vm_refcnt); 998 } 999 #endif 1000 kthread_exit(0); 1001 } 1002 } 1003 splx(s); 1004 } 1005 } 1006 } 1007 1008 /* 1009 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 1010 * AIO daemon modifies its environment itself. 1011 */ 1012 static int 1013 aio_newproc(void) 1014 { 1015 int error; 1016 struct proc *p; 1017 1018 error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, 0, "aiod%d", 1019 num_aio_procs); 1020 if (error) 1021 return (error); 1022 1023 /* 1024 * Wait until daemon is started, but continue on just in case to 1025 * handle error conditions. 1026 */ 1027 error = tsleep(p, PZERO, "aiosta", aiod_timeout); 1028 1029 num_aio_procs++; 1030 1031 return (error); 1032 } 1033 1034 /* 1035 * Try the high-performance, low-overhead physio method for eligible 1036 * VCHR devices. This method doesn't use an aio helper thread, and 1037 * thus has very low overhead. 1038 * 1039 * Assumes that the caller, _aio_aqueue(), has incremented the file 1040 * structure's reference count, preventing its deallocation for the 1041 * duration of this call. 1042 */ 1043 static int 1044 aio_qphysio(struct proc *p, struct aiocblist *aiocbe) 1045 { 1046 int error; 1047 struct aiocb *cb; 1048 struct file *fp; 1049 struct buf *bp; 1050 struct vnode *vp; 1051 struct kaioinfo *ki; 1052 struct aio_liojob *lj; 1053 int s; 1054 int notify; 1055 1056 cb = &aiocbe->uaiocb; 1057 fp = aiocbe->fd_file; 1058 1059 if (fp->f_type != DTYPE_VNODE) 1060 return (-1); 1061 1062 vp = fp->f_vnode; 1063 1064 /* 1065 * If its not a disk, we don't want to return a positive error. 1066 * It causes the aio code to not fall through to try the thread 1067 * way when you're talking to a regular file. 1068 */ 1069 if (!vn_isdisk(vp, &error)) { 1070 if (error == ENOTBLK) 1071 return (-1); 1072 else 1073 return (error); 1074 } 1075 1076 if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys) 1077 return (-1); 1078 1079 if (cb->aio_nbytes > 1080 MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK)) 1081 return (-1); 1082 1083 ki = p->p_aioinfo; 1084 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) 1085 return (-1); 1086 1087 ki->kaio_buffer_count++; 1088 1089 lj = aiocbe->lio; 1090 if (lj) 1091 lj->lioj_buffer_count++; 1092 1093 /* Create and build a buffer header for a transfer. */ 1094 bp = (struct buf *)getpbuf(NULL); 1095 BUF_KERNPROC(bp); 1096 1097 /* 1098 * Get a copy of the kva from the physical buffer. 1099 */ 1100 bp->b_caller1 = p; 1101 bp->b_dev = vp->v_rdev; 1102 error = 0; 1103 1104 bp->b_bcount = cb->aio_nbytes; 1105 bp->b_bufsize = cb->aio_nbytes; 1106 bp->b_flags = B_PHYS; 1107 bp->b_iodone = aio_physwakeup; 1108 bp->b_saveaddr = bp->b_data; 1109 bp->b_data = (void *)(uintptr_t)cb->aio_buf; 1110 bp->b_blkno = btodb(cb->aio_offset); 1111 bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ; 1112 1113 /* 1114 * Bring buffer into kernel space. 1115 */ 1116 if (vmapbuf(bp) < 0) { 1117 error = EFAULT; 1118 goto doerror; 1119 } 1120 1121 s = splbio(); 1122 aiocbe->bp = bp; 1123 bp->b_caller2 = (void *)aiocbe; 1124 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 1125 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 1126 aiocbe->jobstate = JOBST_JOBQBUF; 1127 cb->_aiocb_private.status = cb->aio_nbytes; 1128 num_buf_aio++; 1129 bp->b_error = 0; 1130 1131 splx(s); 1132 1133 /* Perform transfer. */ 1134 DEV_STRATEGY(bp); 1135 1136 notify = 0; 1137 s = splbio(); 1138 1139 /* 1140 * If we had an error invoking the request, or an error in processing 1141 * the request before we have returned, we process it as an error in 1142 * transfer. Note that such an I/O error is not indicated immediately, 1143 * but is returned using the aio_error mechanism. In this case, 1144 * aio_suspend will return immediately. 1145 */ 1146 if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) { 1147 struct aiocb *job = aiocbe->uuaiocb; 1148 1149 aiocbe->uaiocb._aiocb_private.status = 0; 1150 suword(&job->_aiocb_private.status, 0); 1151 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1152 suword(&job->_aiocb_private.error, bp->b_error); 1153 1154 ki->kaio_buffer_finished_count++; 1155 1156 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1157 aiocbe->jobstate = JOBST_JOBBFINISHED; 1158 aiocbe->jobflags |= AIOCBLIST_DONE; 1159 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1160 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1161 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1162 notify = 1; 1163 } 1164 } 1165 splx(s); 1166 if (notify) 1167 KNOTE(&aiocbe->klist, 0); 1168 return (0); 1169 1170 doerror: 1171 ki->kaio_buffer_count--; 1172 if (lj) 1173 lj->lioj_buffer_count--; 1174 aiocbe->bp = NULL; 1175 relpbuf(bp, NULL); 1176 return (error); 1177 } 1178 1179 /* 1180 * This waits/tests physio completion. 1181 */ 1182 static int 1183 aio_fphysio(struct aiocblist *iocb) 1184 { 1185 int s; 1186 struct buf *bp; 1187 int error; 1188 1189 bp = iocb->bp; 1190 1191 s = splbio(); 1192 while ((bp->b_flags & B_DONE) == 0) { 1193 if (tsleep(bp, PRIBIO, "physstr", aiod_timeout)) { 1194 if ((bp->b_flags & B_DONE) == 0) { 1195 splx(s); 1196 return (EINPROGRESS); 1197 } else 1198 break; 1199 } 1200 } 1201 splx(s); 1202 1203 /* Release mapping into kernel space. */ 1204 vunmapbuf(bp); 1205 iocb->bp = 0; 1206 1207 error = 0; 1208 1209 /* Check for an error. */ 1210 if (bp->b_ioflags & BIO_ERROR) 1211 error = bp->b_error; 1212 1213 relpbuf(bp, NULL); 1214 return (error); 1215 } 1216 1217 /* 1218 * Wake up aio requests that may be serviceable now. 1219 */ 1220 static void 1221 aio_swake_cb(struct socket *so, struct sockbuf *sb) 1222 { 1223 struct aiocblist *cb,*cbn; 1224 struct proc *p; 1225 struct kaioinfo *ki = NULL; 1226 int opcode, wakecount = 0; 1227 struct aiothreadlist *aiop; 1228 1229 if (sb == &so->so_snd) { 1230 opcode = LIO_WRITE; 1231 so->so_snd.sb_flags &= ~SB_AIO; 1232 } else { 1233 opcode = LIO_READ; 1234 so->so_rcv.sb_flags &= ~SB_AIO; 1235 } 1236 1237 for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) { 1238 cbn = TAILQ_NEXT(cb, list); 1239 if (opcode == cb->uaiocb.aio_lio_opcode) { 1240 p = cb->userproc; 1241 ki = p->p_aioinfo; 1242 TAILQ_REMOVE(&so->so_aiojobq, cb, list); 1243 TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist); 1244 TAILQ_INSERT_TAIL(&aio_jobs, cb, list); 1245 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist); 1246 wakecount++; 1247 if (cb->jobstate != JOBST_JOBQGLOBAL) 1248 panic("invalid queue value"); 1249 } 1250 } 1251 1252 while (wakecount--) { 1253 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) { 1254 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1255 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1256 aiop->aiothreadflags &= ~AIOP_FREE; 1257 wakeup(aiop->aiothread); 1258 } 1259 } 1260 } 1261 1262 /* 1263 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1264 * technique is done in this code. 1265 */ 1266 static int 1267 _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int type) 1268 { 1269 struct proc *p = td->td_proc; 1270 struct filedesc *fdp; 1271 struct file *fp; 1272 unsigned int fd; 1273 struct socket *so; 1274 int s; 1275 int error; 1276 int opcode, user_opcode; 1277 struct aiocblist *aiocbe; 1278 struct aiothreadlist *aiop; 1279 struct kaioinfo *ki; 1280 struct kevent kev; 1281 struct kqueue *kq; 1282 struct file *kq_fp; 1283 1284 aiocbe = uma_zalloc(aiocb_zone, M_WAITOK); 1285 aiocbe->inputcharge = 0; 1286 aiocbe->outputcharge = 0; 1287 callout_handle_init(&aiocbe->timeouthandle); 1288 SLIST_INIT(&aiocbe->klist); 1289 1290 suword(&job->_aiocb_private.status, -1); 1291 suword(&job->_aiocb_private.error, 0); 1292 suword(&job->_aiocb_private.kernelinfo, -1); 1293 1294 error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb)); 1295 if (error) { 1296 suword(&job->_aiocb_private.error, error); 1297 uma_zfree(aiocb_zone, aiocbe); 1298 return (error); 1299 } 1300 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL && 1301 !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) { 1302 uma_zfree(aiocb_zone, aiocbe); 1303 return (EINVAL); 1304 } 1305 1306 /* Save userspace address of the job info. */ 1307 aiocbe->uuaiocb = job; 1308 1309 /* Get the opcode. */ 1310 user_opcode = aiocbe->uaiocb.aio_lio_opcode; 1311 if (type != LIO_NOP) 1312 aiocbe->uaiocb.aio_lio_opcode = type; 1313 opcode = aiocbe->uaiocb.aio_lio_opcode; 1314 1315 /* Get the fd info for process. */ 1316 fdp = p->p_fd; 1317 1318 /* 1319 * Range check file descriptor. 1320 */ 1321 FILEDESC_LOCK(fdp); 1322 fd = aiocbe->uaiocb.aio_fildes; 1323 if (fd >= fdp->fd_nfiles) { 1324 FILEDESC_UNLOCK(fdp); 1325 uma_zfree(aiocb_zone, aiocbe); 1326 if (type == 0) 1327 suword(&job->_aiocb_private.error, EBADF); 1328 return (EBADF); 1329 } 1330 1331 fp = aiocbe->fd_file = fdp->fd_ofiles[fd]; 1332 if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 1333 0))) { 1334 FILEDESC_UNLOCK(fdp); 1335 uma_zfree(aiocb_zone, aiocbe); 1336 if (type == 0) 1337 suword(&job->_aiocb_private.error, EBADF); 1338 return (EBADF); 1339 } 1340 fhold(fp); 1341 FILEDESC_UNLOCK(fdp); 1342 1343 if (aiocbe->uaiocb.aio_offset == -1LL) { 1344 error = EINVAL; 1345 goto aqueue_fail; 1346 } 1347 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1348 if (error) { 1349 error = EINVAL; 1350 goto aqueue_fail; 1351 } 1352 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1353 if (jobrefid == LONG_MAX) 1354 jobrefid = 1; 1355 else 1356 jobrefid++; 1357 1358 if (opcode == LIO_NOP) { 1359 fdrop(fp, td); 1360 uma_zfree(aiocb_zone, aiocbe); 1361 if (type == 0) { 1362 suword(&job->_aiocb_private.error, 0); 1363 suword(&job->_aiocb_private.status, 0); 1364 suword(&job->_aiocb_private.kernelinfo, 0); 1365 } 1366 return (0); 1367 } 1368 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1369 if (type == 0) 1370 suword(&job->_aiocb_private.status, 0); 1371 error = EINVAL; 1372 goto aqueue_fail; 1373 } 1374 1375 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) { 1376 kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue; 1377 kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sigval_ptr; 1378 } 1379 else { 1380 /* 1381 * This method for requesting kevent-based notification won't 1382 * work on the alpha, since we're passing in a pointer 1383 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT- 1384 * based method instead. 1385 */ 1386 if (user_opcode == LIO_NOP || user_opcode == LIO_READ || 1387 user_opcode == LIO_WRITE) 1388 goto no_kqueue; 1389 1390 error = copyin((struct kevent *)(uintptr_t)user_opcode, 1391 &kev, sizeof(kev)); 1392 if (error) 1393 goto aqueue_fail; 1394 } 1395 if ((u_int)kev.ident >= fdp->fd_nfiles || 1396 (kq_fp = fdp->fd_ofiles[kev.ident]) == NULL || 1397 (kq_fp->f_type != DTYPE_KQUEUE)) { 1398 error = EBADF; 1399 goto aqueue_fail; 1400 } 1401 kq = kq_fp->f_data; 1402 kev.ident = (uintptr_t)aiocbe->uuaiocb; 1403 kev.filter = EVFILT_AIO; 1404 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 1405 kev.data = (intptr_t)aiocbe; 1406 error = kqueue_register(kq, &kev, td); 1407 aqueue_fail: 1408 if (error) { 1409 fdrop(fp, td); 1410 uma_zfree(aiocb_zone, aiocbe); 1411 if (type == 0) 1412 suword(&job->_aiocb_private.error, error); 1413 goto done; 1414 } 1415 no_kqueue: 1416 1417 suword(&job->_aiocb_private.error, EINPROGRESS); 1418 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1419 aiocbe->userproc = p; 1420 aiocbe->cred = crhold(td->td_ucred); 1421 aiocbe->jobflags = 0; 1422 aiocbe->lio = lj; 1423 ki = p->p_aioinfo; 1424 1425 if (fp->f_type == DTYPE_SOCKET) { 1426 /* 1427 * Alternate queueing for socket ops: Reach down into the 1428 * descriptor to get the socket data. Then check to see if the 1429 * socket is ready to be read or written (based on the requested 1430 * operation). 1431 * 1432 * If it is not ready for io, then queue the aiocbe on the 1433 * socket, and set the flags so we get a call when sbnotify() 1434 * happens. 1435 */ 1436 so = fp->f_data; 1437 s = splnet(); 1438 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode == 1439 LIO_WRITE) && (!sowriteable(so)))) { 1440 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); 1441 TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist); 1442 if (opcode == LIO_READ) 1443 so->so_rcv.sb_flags |= SB_AIO; 1444 else 1445 so->so_snd.sb_flags |= SB_AIO; 1446 aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */ 1447 ki->kaio_queue_count++; 1448 num_queue_count++; 1449 splx(s); 1450 error = 0; 1451 goto done; 1452 } 1453 splx(s); 1454 } 1455 1456 if ((error = aio_qphysio(p, aiocbe)) == 0) 1457 goto done; 1458 if (error > 0) { 1459 suword(&job->_aiocb_private.status, 0); 1460 aiocbe->uaiocb._aiocb_private.error = error; 1461 suword(&job->_aiocb_private.error, error); 1462 goto done; 1463 } 1464 1465 /* No buffer for daemon I/O. */ 1466 aiocbe->bp = NULL; 1467 1468 ki->kaio_queue_count++; 1469 if (lj) 1470 lj->lioj_queue_count++; 1471 s = splnet(); 1472 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1473 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1474 splx(s); 1475 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1476 1477 num_queue_count++; 1478 error = 0; 1479 1480 /* 1481 * If we don't have a free AIO process, and we are below our quota, then 1482 * start one. Otherwise, depend on the subsequent I/O completions to 1483 * pick-up this job. If we don't sucessfully create the new process 1484 * (thread) due to resource issues, we return an error for now (EAGAIN), 1485 * which is likely not the correct thing to do. 1486 */ 1487 s = splnet(); 1488 retryproc: 1489 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1490 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1491 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1492 aiop->aiothreadflags &= ~AIOP_FREE; 1493 wakeup(aiop->aiothread); 1494 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1495 ((ki->kaio_active_count + num_aio_resv_start) < 1496 ki->kaio_maxactive_count)) { 1497 num_aio_resv_start++; 1498 if ((error = aio_newproc()) == 0) { 1499 num_aio_resv_start--; 1500 goto retryproc; 1501 } 1502 num_aio_resv_start--; 1503 } 1504 splx(s); 1505 done: 1506 return (error); 1507 } 1508 1509 /* 1510 * This routine queues an AIO request, checking for quotas. 1511 */ 1512 static int 1513 aio_aqueue(struct thread *td, struct aiocb *job, int type) 1514 { 1515 struct proc *p = td->td_proc; 1516 struct kaioinfo *ki; 1517 1518 if (p->p_aioinfo == NULL) 1519 aio_init_aioinfo(p); 1520 1521 if (num_queue_count >= max_queue_count) 1522 return (EAGAIN); 1523 1524 ki = p->p_aioinfo; 1525 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1526 return (EAGAIN); 1527 1528 return _aio_aqueue(td, job, NULL, type); 1529 } 1530 1531 /* 1532 * Support the aio_return system call, as a side-effect, kernel resources are 1533 * released. 1534 */ 1535 int 1536 aio_return(struct thread *td, struct aio_return_args *uap) 1537 { 1538 struct proc *p = td->td_proc; 1539 int s; 1540 long jobref; 1541 struct aiocblist *cb, *ncb; 1542 struct aiocb *ujob; 1543 struct kaioinfo *ki; 1544 1545 ujob = uap->aiocbp; 1546 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1547 if (jobref == -1 || jobref == 0) 1548 return (EINVAL); 1549 1550 ki = p->p_aioinfo; 1551 if (ki == NULL) 1552 return (EINVAL); 1553 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1554 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1555 jobref) { 1556 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1557 p->p_stats->p_ru.ru_oublock += 1558 cb->outputcharge; 1559 cb->outputcharge = 0; 1560 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1561 p->p_stats->p_ru.ru_inblock += cb->inputcharge; 1562 cb->inputcharge = 0; 1563 } 1564 goto done; 1565 } 1566 } 1567 s = splbio(); 1568 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) { 1569 ncb = TAILQ_NEXT(cb, plist); 1570 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) 1571 == jobref) { 1572 break; 1573 } 1574 } 1575 splx(s); 1576 done: 1577 if (cb != NULL) { 1578 if (ujob == cb->uuaiocb) { 1579 td->td_retval[0] = 1580 cb->uaiocb._aiocb_private.status; 1581 } else 1582 td->td_retval[0] = EFAULT; 1583 aio_free_entry(cb); 1584 return (0); 1585 } 1586 return (EINVAL); 1587 } 1588 1589 /* 1590 * Allow a process to wakeup when any of the I/O requests are completed. 1591 */ 1592 int 1593 aio_suspend(struct thread *td, struct aio_suspend_args *uap) 1594 { 1595 struct proc *p = td->td_proc; 1596 struct timeval atv; 1597 struct timespec ts; 1598 struct aiocb *const *cbptr, *cbp; 1599 struct kaioinfo *ki; 1600 struct aiocblist *cb; 1601 int i; 1602 int njoblist; 1603 int error, s, timo; 1604 long *ijoblist; 1605 struct aiocb **ujoblist; 1606 1607 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX) 1608 return (EINVAL); 1609 1610 timo = 0; 1611 if (uap->timeout) { 1612 /* Get timespec struct. */ 1613 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1614 return (error); 1615 1616 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1617 return (EINVAL); 1618 1619 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1620 if (itimerfix(&atv)) 1621 return (EINVAL); 1622 timo = tvtohz(&atv); 1623 } 1624 1625 ki = p->p_aioinfo; 1626 if (ki == NULL) 1627 return (EAGAIN); 1628 1629 njoblist = 0; 1630 ijoblist = uma_zalloc(aiol_zone, M_WAITOK); 1631 ujoblist = uma_zalloc(aiol_zone, M_WAITOK); 1632 cbptr = uap->aiocbp; 1633 1634 for (i = 0; i < uap->nent; i++) { 1635 cbp = (struct aiocb *)(intptr_t)fuword(&cbptr[i]); 1636 if (cbp == 0) 1637 continue; 1638 ujoblist[njoblist] = cbp; 1639 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1640 njoblist++; 1641 } 1642 1643 if (njoblist == 0) { 1644 uma_zfree(aiol_zone, ijoblist); 1645 uma_zfree(aiol_zone, ujoblist); 1646 return (0); 1647 } 1648 1649 error = 0; 1650 for (;;) { 1651 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1652 for (i = 0; i < njoblist; i++) { 1653 if (((intptr_t) 1654 cb->uaiocb._aiocb_private.kernelinfo) == 1655 ijoblist[i]) { 1656 if (ujoblist[i] != cb->uuaiocb) 1657 error = EINVAL; 1658 uma_zfree(aiol_zone, ijoblist); 1659 uma_zfree(aiol_zone, ujoblist); 1660 return (error); 1661 } 1662 } 1663 } 1664 1665 s = splbio(); 1666 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = 1667 TAILQ_NEXT(cb, plist)) { 1668 for (i = 0; i < njoblist; i++) { 1669 if (((intptr_t) 1670 cb->uaiocb._aiocb_private.kernelinfo) == 1671 ijoblist[i]) { 1672 splx(s); 1673 if (ujoblist[i] != cb->uuaiocb) 1674 error = EINVAL; 1675 uma_zfree(aiol_zone, ijoblist); 1676 uma_zfree(aiol_zone, ujoblist); 1677 return (error); 1678 } 1679 } 1680 } 1681 1682 ki->kaio_flags |= KAIO_WAKEUP; 1683 error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo); 1684 splx(s); 1685 1686 if (error == ERESTART || error == EINTR) { 1687 uma_zfree(aiol_zone, ijoblist); 1688 uma_zfree(aiol_zone, ujoblist); 1689 return (EINTR); 1690 } else if (error == EWOULDBLOCK) { 1691 uma_zfree(aiol_zone, ijoblist); 1692 uma_zfree(aiol_zone, ujoblist); 1693 return (EAGAIN); 1694 } 1695 } 1696 1697 /* NOTREACHED */ 1698 return (EINVAL); 1699 } 1700 1701 /* 1702 * aio_cancel cancels any non-physio aio operations not currently in 1703 * progress. 1704 */ 1705 int 1706 aio_cancel(struct thread *td, struct aio_cancel_args *uap) 1707 { 1708 struct proc *p = td->td_proc; 1709 struct kaioinfo *ki; 1710 struct aiocblist *cbe, *cbn; 1711 struct file *fp; 1712 struct filedesc *fdp; 1713 struct socket *so; 1714 struct proc *po; 1715 int s,error; 1716 int cancelled=0; 1717 int notcancelled=0; 1718 struct vnode *vp; 1719 1720 fdp = p->p_fd; 1721 if ((u_int)uap->fd >= fdp->fd_nfiles || 1722 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 1723 return (EBADF); 1724 1725 if (fp->f_type == DTYPE_VNODE) { 1726 vp = fp->f_vnode; 1727 1728 if (vn_isdisk(vp,&error)) { 1729 td->td_retval[0] = AIO_NOTCANCELED; 1730 return (0); 1731 } 1732 } else if (fp->f_type == DTYPE_SOCKET) { 1733 so = fp->f_data; 1734 1735 s = splnet(); 1736 1737 for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) { 1738 cbn = TAILQ_NEXT(cbe, list); 1739 if ((uap->aiocbp == NULL) || 1740 (uap->aiocbp == cbe->uuaiocb) ) { 1741 po = cbe->userproc; 1742 ki = po->p_aioinfo; 1743 TAILQ_REMOVE(&so->so_aiojobq, cbe, list); 1744 TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist); 1745 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist); 1746 if (ki->kaio_flags & KAIO_WAKEUP) { 1747 wakeup(po); 1748 } 1749 cbe->jobstate = JOBST_JOBFINISHED; 1750 cbe->uaiocb._aiocb_private.status=-1; 1751 cbe->uaiocb._aiocb_private.error=ECANCELED; 1752 cancelled++; 1753 /* XXX cancelled, knote? */ 1754 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1755 SIGEV_SIGNAL) { 1756 PROC_LOCK(cbe->userproc); 1757 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1758 PROC_UNLOCK(cbe->userproc); 1759 } 1760 if (uap->aiocbp) 1761 break; 1762 } 1763 } 1764 splx(s); 1765 1766 if ((cancelled) && (uap->aiocbp)) { 1767 td->td_retval[0] = AIO_CANCELED; 1768 return (0); 1769 } 1770 } 1771 ki=p->p_aioinfo; 1772 if (ki == NULL) 1773 goto done; 1774 s = splnet(); 1775 1776 for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) { 1777 cbn = TAILQ_NEXT(cbe, plist); 1778 1779 if ((uap->fd == cbe->uaiocb.aio_fildes) && 1780 ((uap->aiocbp == NULL ) || 1781 (uap->aiocbp == cbe->uuaiocb))) { 1782 1783 if (cbe->jobstate == JOBST_JOBQGLOBAL) { 1784 TAILQ_REMOVE(&aio_jobs, cbe, list); 1785 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); 1786 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, 1787 plist); 1788 cancelled++; 1789 ki->kaio_queue_finished_count++; 1790 cbe->jobstate = JOBST_JOBFINISHED; 1791 cbe->uaiocb._aiocb_private.status = -1; 1792 cbe->uaiocb._aiocb_private.error = ECANCELED; 1793 /* XXX cancelled, knote? */ 1794 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1795 SIGEV_SIGNAL) { 1796 PROC_LOCK(cbe->userproc); 1797 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1798 PROC_UNLOCK(cbe->userproc); 1799 } 1800 } else { 1801 notcancelled++; 1802 } 1803 } 1804 } 1805 splx(s); 1806 done: 1807 if (notcancelled) { 1808 td->td_retval[0] = AIO_NOTCANCELED; 1809 return (0); 1810 } 1811 if (cancelled) { 1812 td->td_retval[0] = AIO_CANCELED; 1813 return (0); 1814 } 1815 td->td_retval[0] = AIO_ALLDONE; 1816 1817 return (0); 1818 } 1819 1820 /* 1821 * aio_error is implemented in the kernel level for compatibility purposes only. 1822 * For a user mode async implementation, it would be best to do it in a userland 1823 * subroutine. 1824 */ 1825 int 1826 aio_error(struct thread *td, struct aio_error_args *uap) 1827 { 1828 struct proc *p = td->td_proc; 1829 int s; 1830 struct aiocblist *cb; 1831 struct kaioinfo *ki; 1832 long jobref; 1833 1834 ki = p->p_aioinfo; 1835 if (ki == NULL) 1836 return (EINVAL); 1837 1838 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1839 if ((jobref == -1) || (jobref == 0)) 1840 return (EINVAL); 1841 1842 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1843 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1844 jobref) { 1845 td->td_retval[0] = cb->uaiocb._aiocb_private.error; 1846 return (0); 1847 } 1848 } 1849 1850 s = splnet(); 1851 1852 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb, 1853 plist)) { 1854 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1855 jobref) { 1856 td->td_retval[0] = EINPROGRESS; 1857 splx(s); 1858 return (0); 1859 } 1860 } 1861 1862 for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb, 1863 plist)) { 1864 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1865 jobref) { 1866 td->td_retval[0] = EINPROGRESS; 1867 splx(s); 1868 return (0); 1869 } 1870 } 1871 splx(s); 1872 1873 s = splbio(); 1874 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb, 1875 plist)) { 1876 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1877 jobref) { 1878 td->td_retval[0] = cb->uaiocb._aiocb_private.error; 1879 splx(s); 1880 return (0); 1881 } 1882 } 1883 1884 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb, 1885 plist)) { 1886 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1887 jobref) { 1888 td->td_retval[0] = EINPROGRESS; 1889 splx(s); 1890 return (0); 1891 } 1892 } 1893 splx(s); 1894 1895 #if (0) 1896 /* 1897 * Hack for lio. 1898 */ 1899 status = fuword(&uap->aiocbp->_aiocb_private.status); 1900 if (status == -1) 1901 return fuword(&uap->aiocbp->_aiocb_private.error); 1902 #endif 1903 return (EINVAL); 1904 } 1905 1906 /* syscall - asynchronous read from a file (REALTIME) */ 1907 int 1908 aio_read(struct thread *td, struct aio_read_args *uap) 1909 { 1910 1911 return aio_aqueue(td, uap->aiocbp, LIO_READ); 1912 } 1913 1914 /* syscall - asynchronous write to a file (REALTIME) */ 1915 int 1916 aio_write(struct thread *td, struct aio_write_args *uap) 1917 { 1918 1919 return aio_aqueue(td, uap->aiocbp, LIO_WRITE); 1920 } 1921 1922 /* syscall - list directed I/O (REALTIME) */ 1923 int 1924 lio_listio(struct thread *td, struct lio_listio_args *uap) 1925 { 1926 struct proc *p = td->td_proc; 1927 int nent, nentqueued; 1928 struct aiocb *iocb, * const *cbptr; 1929 struct aiocblist *cb; 1930 struct kaioinfo *ki; 1931 struct aio_liojob *lj; 1932 int error, runningcode; 1933 int nerror; 1934 int i; 1935 int s; 1936 1937 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 1938 return (EINVAL); 1939 1940 nent = uap->nent; 1941 if (nent < 0 || nent > AIO_LISTIO_MAX) 1942 return (EINVAL); 1943 1944 if (p->p_aioinfo == NULL) 1945 aio_init_aioinfo(p); 1946 1947 if ((nent + num_queue_count) > max_queue_count) 1948 return (EAGAIN); 1949 1950 ki = p->p_aioinfo; 1951 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) 1952 return (EAGAIN); 1953 1954 lj = uma_zalloc(aiolio_zone, M_WAITOK); 1955 if (!lj) 1956 return (EAGAIN); 1957 1958 lj->lioj_flags = 0; 1959 lj->lioj_buffer_count = 0; 1960 lj->lioj_buffer_finished_count = 0; 1961 lj->lioj_queue_count = 0; 1962 lj->lioj_queue_finished_count = 0; 1963 lj->lioj_ki = ki; 1964 1965 /* 1966 * Setup signal. 1967 */ 1968 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 1969 error = copyin(uap->sig, &lj->lioj_signal, 1970 sizeof(lj->lioj_signal)); 1971 if (error) { 1972 uma_zfree(aiolio_zone, lj); 1973 return (error); 1974 } 1975 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { 1976 uma_zfree(aiolio_zone, lj); 1977 return (EINVAL); 1978 } 1979 lj->lioj_flags |= LIOJ_SIGNAL; 1980 } 1981 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 1982 /* 1983 * Get pointers to the list of I/O requests. 1984 */ 1985 nerror = 0; 1986 nentqueued = 0; 1987 cbptr = uap->acb_list; 1988 for (i = 0; i < uap->nent; i++) { 1989 iocb = (struct aiocb *)(intptr_t)fuword(&cbptr[i]); 1990 if (((intptr_t)iocb != -1) && ((intptr_t)iocb != 0)) { 1991 error = _aio_aqueue(td, iocb, lj, 0); 1992 if (error == 0) 1993 nentqueued++; 1994 else 1995 nerror++; 1996 } 1997 } 1998 1999 /* 2000 * If we haven't queued any, then just return error. 2001 */ 2002 if (nentqueued == 0) 2003 return (0); 2004 2005 /* 2006 * Calculate the appropriate error return. 2007 */ 2008 runningcode = 0; 2009 if (nerror) 2010 runningcode = EIO; 2011 2012 if (uap->mode == LIO_WAIT) { 2013 int command, found, jobref; 2014 2015 for (;;) { 2016 found = 0; 2017 for (i = 0; i < uap->nent; i++) { 2018 /* 2019 * Fetch address of the control buf pointer in 2020 * user space. 2021 */ 2022 iocb = (struct aiocb *) 2023 (intptr_t)fuword(&cbptr[i]); 2024 if (((intptr_t)iocb == -1) || ((intptr_t)iocb 2025 == 0)) 2026 continue; 2027 2028 /* 2029 * Fetch the associated command from user space. 2030 */ 2031 command = fuword(&iocb->aio_lio_opcode); 2032 if (command == LIO_NOP) { 2033 found++; 2034 continue; 2035 } 2036 2037 jobref = 2038 fuword(&iocb->_aiocb_private.kernelinfo); 2039 2040 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 2041 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 2042 == jobref) { 2043 if (cb->uaiocb.aio_lio_opcode 2044 == LIO_WRITE) { 2045 p->p_stats->p_ru.ru_oublock 2046 += 2047 cb->outputcharge; 2048 cb->outputcharge = 0; 2049 } else if (cb->uaiocb.aio_lio_opcode 2050 == LIO_READ) { 2051 p->p_stats->p_ru.ru_inblock 2052 += cb->inputcharge; 2053 cb->inputcharge = 0; 2054 } 2055 found++; 2056 break; 2057 } 2058 } 2059 2060 s = splbio(); 2061 TAILQ_FOREACH(cb, &ki->kaio_bufdone, plist) { 2062 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 2063 == jobref) { 2064 found++; 2065 break; 2066 } 2067 } 2068 splx(s); 2069 } 2070 2071 /* 2072 * If all I/Os have been disposed of, then we can 2073 * return. 2074 */ 2075 if (found == nentqueued) 2076 return (runningcode); 2077 2078 ki->kaio_flags |= KAIO_WAKEUP; 2079 error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0); 2080 2081 if (error == EINTR) 2082 return (EINTR); 2083 else if (error == EWOULDBLOCK) 2084 return (EAGAIN); 2085 } 2086 } 2087 2088 return (runningcode); 2089 } 2090 2091 /* 2092 * This is a weird hack so that we can post a signal. It is safe to do so from 2093 * a timeout routine, but *not* from an interrupt routine. 2094 */ 2095 static void 2096 process_signal(void *aioj) 2097 { 2098 struct aiocblist *aiocbe = aioj; 2099 struct aio_liojob *lj = aiocbe->lio; 2100 struct aiocb *cb = &aiocbe->uaiocb; 2101 2102 if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) && 2103 (lj->lioj_queue_count == lj->lioj_queue_finished_count)) { 2104 PROC_LOCK(lj->lioj_ki->kaio_p); 2105 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 2106 PROC_UNLOCK(lj->lioj_ki->kaio_p); 2107 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2108 } 2109 2110 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 2111 PROC_LOCK(aiocbe->userproc); 2112 psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo); 2113 PROC_UNLOCK(aiocbe->userproc); 2114 } 2115 } 2116 2117 /* 2118 * Interrupt handler for physio, performs the necessary process wakeups, and 2119 * signals. 2120 */ 2121 static void 2122 aio_physwakeup(struct buf *bp) 2123 { 2124 struct aiocblist *aiocbe; 2125 struct proc *p; 2126 struct kaioinfo *ki; 2127 struct aio_liojob *lj; 2128 2129 wakeup(bp); 2130 2131 aiocbe = (struct aiocblist *)bp->b_caller2; 2132 if (aiocbe) { 2133 p = bp->b_caller1; 2134 2135 aiocbe->jobstate = JOBST_JOBBFINISHED; 2136 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 2137 aiocbe->uaiocb._aiocb_private.error = 0; 2138 aiocbe->jobflags |= AIOCBLIST_DONE; 2139 2140 if (bp->b_ioflags & BIO_ERROR) 2141 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 2142 2143 lj = aiocbe->lio; 2144 if (lj) { 2145 lj->lioj_buffer_finished_count++; 2146 2147 /* 2148 * wakeup/signal if all of the interrupt jobs are done. 2149 */ 2150 if (lj->lioj_buffer_finished_count == 2151 lj->lioj_buffer_count) { 2152 /* 2153 * Post a signal if it is called for. 2154 */ 2155 if ((lj->lioj_flags & 2156 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 2157 LIOJ_SIGNAL) { 2158 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2159 aiocbe->timeouthandle = 2160 timeout(process_signal, 2161 aiocbe, 0); 2162 } 2163 } 2164 } 2165 2166 ki = p->p_aioinfo; 2167 if (ki) { 2168 ki->kaio_buffer_finished_count++; 2169 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 2170 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 2171 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 2172 2173 KNOTE(&aiocbe->klist, 0); 2174 /* Do the wakeup. */ 2175 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 2176 ki->kaio_flags &= ~KAIO_WAKEUP; 2177 wakeup(p); 2178 } 2179 } 2180 2181 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL) 2182 aiocbe->timeouthandle = 2183 timeout(process_signal, aiocbe, 0); 2184 } 2185 } 2186 2187 /* syscall - wait for the next completion of an aio request */ 2188 int 2189 aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap) 2190 { 2191 struct proc *p = td->td_proc; 2192 struct timeval atv; 2193 struct timespec ts; 2194 struct kaioinfo *ki; 2195 struct aiocblist *cb = NULL; 2196 int error, s, timo; 2197 2198 suword(uap->aiocbp, (int)NULL); 2199 2200 timo = 0; 2201 if (uap->timeout) { 2202 /* Get timespec struct. */ 2203 error = copyin(uap->timeout, &ts, sizeof(ts)); 2204 if (error) 2205 return (error); 2206 2207 if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000)) 2208 return (EINVAL); 2209 2210 TIMESPEC_TO_TIMEVAL(&atv, &ts); 2211 if (itimerfix(&atv)) 2212 return (EINVAL); 2213 timo = tvtohz(&atv); 2214 } 2215 2216 ki = p->p_aioinfo; 2217 if (ki == NULL) 2218 return (EAGAIN); 2219 2220 for (;;) { 2221 if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) { 2222 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb); 2223 td->td_retval[0] = cb->uaiocb._aiocb_private.status; 2224 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 2225 p->p_stats->p_ru.ru_oublock += 2226 cb->outputcharge; 2227 cb->outputcharge = 0; 2228 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 2229 p->p_stats->p_ru.ru_inblock += cb->inputcharge; 2230 cb->inputcharge = 0; 2231 } 2232 aio_free_entry(cb); 2233 return (cb->uaiocb._aiocb_private.error); 2234 } 2235 2236 s = splbio(); 2237 if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) { 2238 splx(s); 2239 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb); 2240 td->td_retval[0] = cb->uaiocb._aiocb_private.status; 2241 aio_free_entry(cb); 2242 return (cb->uaiocb._aiocb_private.error); 2243 } 2244 2245 ki->kaio_flags |= KAIO_WAKEUP; 2246 error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo); 2247 splx(s); 2248 2249 if (error == ERESTART) 2250 return (EINTR); 2251 else if (error < 0) 2252 return (error); 2253 else if (error == EINTR) 2254 return (EINTR); 2255 else if (error == EWOULDBLOCK) 2256 return (EAGAIN); 2257 } 2258 } 2259 2260 /* kqueue attach function */ 2261 static int 2262 filt_aioattach(struct knote *kn) 2263 { 2264 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2265 2266 /* 2267 * The aiocbe pointer must be validated before using it, so 2268 * registration is restricted to the kernel; the user cannot 2269 * set EV_FLAG1. 2270 */ 2271 if ((kn->kn_flags & EV_FLAG1) == 0) 2272 return (EPERM); 2273 kn->kn_flags &= ~EV_FLAG1; 2274 2275 SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext); 2276 2277 return (0); 2278 } 2279 2280 /* kqueue detach function */ 2281 static void 2282 filt_aiodetach(struct knote *kn) 2283 { 2284 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2285 2286 SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext); 2287 } 2288 2289 /* kqueue filter function */ 2290 /*ARGSUSED*/ 2291 static int 2292 filt_aio(struct knote *kn, long hint) 2293 { 2294 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2295 2296 kn->kn_data = aiocbe->uaiocb._aiocb_private.error; 2297 if (aiocbe->jobstate != JOBST_JOBFINISHED && 2298 aiocbe->jobstate != JOBST_JOBBFINISHED) 2299 return (0); 2300 kn->kn_flags |= EV_EOF; 2301 return (1); 2302 } 2303