1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 * 16 * $FreeBSD$ 17 */ 18 19 /* 20 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/malloc.h> 26 #include <sys/bio.h> 27 #include <sys/buf.h> 28 #include <sys/sysproto.h> 29 #include <sys/filedesc.h> 30 #include <sys/kernel.h> 31 #include <sys/kthread.h> 32 #include <sys/fcntl.h> 33 #include <sys/file.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/unistd.h> 37 #include <sys/proc.h> 38 #include <sys/resourcevar.h> 39 #include <sys/signalvar.h> 40 #include <sys/protosw.h> 41 #include <sys/socketvar.h> 42 #include <sys/syscall.h> 43 #include <sys/sysent.h> 44 #include <sys/sysctl.h> 45 #include <sys/vnode.h> 46 #include <sys/conf.h> 47 #include <sys/event.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 #include <vm/pmap.h> 52 #include <vm/vm_map.h> 53 #include <vm/uma.h> 54 #include <sys/aio.h> 55 56 #include <machine/limits.h> 57 58 #include "opt_vfs_aio.h" 59 60 /* 61 * Counter for allocating reference ids to new jobs. Wrapped to 1 on 62 * overflow. 63 */ 64 static long jobrefid; 65 66 #define JOBST_NULL 0x0 67 #define JOBST_JOBQGLOBAL 0x2 68 #define JOBST_JOBRUNNING 0x3 69 #define JOBST_JOBFINISHED 0x4 70 #define JOBST_JOBQBUF 0x5 71 #define JOBST_JOBBFINISHED 0x6 72 73 #ifndef MAX_AIO_PER_PROC 74 #define MAX_AIO_PER_PROC 32 75 #endif 76 77 #ifndef MAX_AIO_QUEUE_PER_PROC 78 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 79 #endif 80 81 #ifndef MAX_AIO_PROCS 82 #define MAX_AIO_PROCS 32 83 #endif 84 85 #ifndef MAX_AIO_QUEUE 86 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 87 #endif 88 89 #ifndef TARGET_AIO_PROCS 90 #define TARGET_AIO_PROCS 4 91 #endif 92 93 #ifndef MAX_BUF_AIO 94 #define MAX_BUF_AIO 16 95 #endif 96 97 #ifndef AIOD_TIMEOUT_DEFAULT 98 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 99 #endif 100 101 #ifndef AIOD_LIFETIME_DEFAULT 102 #define AIOD_LIFETIME_DEFAULT (30 * hz) 103 #endif 104 105 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management"); 106 107 static int max_aio_procs = MAX_AIO_PROCS; 108 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 109 CTLFLAG_RW, &max_aio_procs, 0, 110 "Maximum number of kernel threads to use for handling async IO "); 111 112 static int num_aio_procs = 0; 113 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 114 CTLFLAG_RD, &num_aio_procs, 0, 115 "Number of presently active kernel threads for async IO"); 116 117 /* 118 * The code will adjust the actual number of AIO processes towards this 119 * number when it gets a chance. 120 */ 121 static int target_aio_procs = TARGET_AIO_PROCS; 122 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, 123 0, "Preferred number of ready kernel threads for async IO"); 124 125 static int max_queue_count = MAX_AIO_QUEUE; 126 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, 127 "Maximum number of aio requests to queue, globally"); 128 129 static int num_queue_count = 0; 130 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0, 131 "Number of queued aio requests"); 132 133 static int num_buf_aio = 0; 134 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0, 135 "Number of aio requests presently handled by the buf subsystem"); 136 137 /* Number of async I/O thread in the process of being started */ 138 /* XXX This should be local to _aio_aqueue() */ 139 static int num_aio_resv_start = 0; 140 141 static int aiod_timeout; 142 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0, 143 "Timeout value for synchronous aio operations"); 144 145 static int aiod_lifetime; 146 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0, 147 "Maximum lifetime for idle aiod"); 148 149 static int unloadable = 0; 150 SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0, 151 "Allow unload of aio (not recommended)"); 152 153 154 static int max_aio_per_proc = MAX_AIO_PER_PROC; 155 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc, 156 0, "Maximum active aio requests per process (stored in the process)"); 157 158 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 159 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW, 160 &max_aio_queue_per_proc, 0, 161 "Maximum queued aio requests per process (stored in the process)"); 162 163 static int max_buf_aio = MAX_BUF_AIO; 164 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0, 165 "Maximum buf aio requests per process (stored in the process)"); 166 167 struct aiocblist { 168 TAILQ_ENTRY(aiocblist) list; /* List of jobs */ 169 TAILQ_ENTRY(aiocblist) plist; /* List of jobs for proc */ 170 int jobflags; 171 int jobstate; 172 int inputcharge; 173 int outputcharge; 174 struct callout_handle timeouthandle; 175 struct buf *bp; /* Buffer pointer */ 176 struct proc *userproc; /* User process */ /* Not td! */ 177 struct file *fd_file; /* Pointer to file structure */ 178 struct aiothreadlist *jobaiothread; /* AIO process descriptor */ 179 struct aio_liojob *lio; /* Optional lio job */ 180 struct aiocb *uuaiocb; /* Pointer in userspace of aiocb */ 181 struct klist klist; /* list of knotes */ 182 struct aiocb uaiocb; /* Kernel I/O control block */ 183 }; 184 185 /* jobflags */ 186 #define AIOCBLIST_RUNDOWN 0x4 187 #define AIOCBLIST_ASYNCFREE 0x8 188 #define AIOCBLIST_DONE 0x10 189 190 /* 191 * AIO process info 192 */ 193 #define AIOP_FREE 0x1 /* proc on free queue */ 194 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 195 196 struct aiothreadlist { 197 int aiothreadflags; /* AIO proc flags */ 198 TAILQ_ENTRY(aiothreadlist) list; /* List of processes */ 199 struct thread *aiothread; /* The AIO thread */ 200 }; 201 202 /* 203 * data-structure for lio signal management 204 */ 205 struct aio_liojob { 206 int lioj_flags; 207 int lioj_buffer_count; 208 int lioj_buffer_finished_count; 209 int lioj_queue_count; 210 int lioj_queue_finished_count; 211 struct sigevent lioj_signal; /* signal on all I/O done */ 212 TAILQ_ENTRY(aio_liojob) lioj_list; 213 struct kaioinfo *lioj_ki; 214 }; 215 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 216 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 217 218 /* 219 * per process aio data structure 220 */ 221 struct kaioinfo { 222 int kaio_flags; /* per process kaio flags */ 223 int kaio_maxactive_count; /* maximum number of AIOs */ 224 int kaio_active_count; /* number of currently used AIOs */ 225 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 226 int kaio_queue_count; /* size of AIO queue */ 227 int kaio_ballowed_count; /* maximum number of buffers */ 228 int kaio_queue_finished_count; /* number of daemon jobs finished */ 229 int kaio_buffer_count; /* number of physio buffers */ 230 int kaio_buffer_finished_count; /* count of I/O done */ 231 struct proc *kaio_p; /* process that uses this kaio block */ 232 TAILQ_HEAD(,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 233 TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* job queue for process */ 234 TAILQ_HEAD(,aiocblist) kaio_jobdone; /* done queue for process */ 235 TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 236 TAILQ_HEAD(,aiocblist) kaio_bufdone; /* buffer done queue for process */ 237 TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */ 238 }; 239 240 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 241 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */ 242 243 static TAILQ_HEAD(,aiothreadlist) aio_activeproc; /* Active daemons */ 244 static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* Idle daemons */ 245 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 246 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 247 248 static void aio_init_aioinfo(struct proc *p); 249 static void aio_onceonly(void); 250 static int aio_free_entry(struct aiocblist *aiocbe); 251 static void aio_process(struct aiocblist *aiocbe); 252 static int aio_newproc(void); 253 static int aio_aqueue(struct thread *td, struct aiocb *job, int type); 254 static void aio_physwakeup(struct buf *bp); 255 static void aio_proc_rundown(struct proc *p); 256 static int aio_fphysio(struct aiocblist *aiocbe); 257 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 258 static void aio_daemon(void *uproc); 259 static void aio_swake_cb(struct socket *, struct sockbuf *); 260 static int aio_unload(void); 261 static void process_signal(void *aioj); 262 static int filt_aioattach(struct knote *kn); 263 static void filt_aiodetach(struct knote *kn); 264 static int filt_aio(struct knote *kn, long hint); 265 266 /* 267 * Zones for: 268 * kaio Per process async io info 269 * aiop async io thread data 270 * aiocb async io jobs 271 * aiol list io job pointer - internal to aio_suspend XXX 272 * aiolio list io jobs 273 */ 274 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone; 275 276 /* kqueue filters for aio */ 277 static struct filterops aio_filtops = 278 { 0, filt_aioattach, filt_aiodetach, filt_aio }; 279 280 /* 281 * Main operations function for use as a kernel module. 282 */ 283 static int 284 aio_modload(struct module *module, int cmd, void *arg) 285 { 286 int error = 0; 287 288 switch (cmd) { 289 case MOD_LOAD: 290 aio_onceonly(); 291 break; 292 case MOD_UNLOAD: 293 error = aio_unload(); 294 break; 295 case MOD_SHUTDOWN: 296 break; 297 default: 298 error = EINVAL; 299 break; 300 } 301 return (error); 302 } 303 304 static moduledata_t aio_mod = { 305 "aio", 306 &aio_modload, 307 NULL 308 }; 309 310 SYSCALL_MODULE_HELPER(aio_return); 311 SYSCALL_MODULE_HELPER(aio_suspend); 312 SYSCALL_MODULE_HELPER(aio_cancel); 313 SYSCALL_MODULE_HELPER(aio_error); 314 SYSCALL_MODULE_HELPER(aio_read); 315 SYSCALL_MODULE_HELPER(aio_write); 316 SYSCALL_MODULE_HELPER(aio_waitcomplete); 317 SYSCALL_MODULE_HELPER(lio_listio); 318 319 DECLARE_MODULE(aio, aio_mod, 320 SI_SUB_VFS, SI_ORDER_ANY); 321 MODULE_VERSION(aio, 1); 322 323 /* 324 * Startup initialization 325 */ 326 static void 327 aio_onceonly(void) 328 { 329 330 /* XXX: should probably just use so->callback */ 331 aio_swake = &aio_swake_cb; 332 at_exit(aio_proc_rundown); 333 at_exec(aio_proc_rundown); 334 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops); 335 TAILQ_INIT(&aio_freeproc); 336 TAILQ_INIT(&aio_activeproc); 337 TAILQ_INIT(&aio_jobs); 338 TAILQ_INIT(&aio_bufjobs); 339 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL, 340 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 341 aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL, 342 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 343 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL, 344 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 345 aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL, 346 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 347 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aio_liojob), NULL, 348 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 349 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 350 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 351 jobrefid = 1; 352 } 353 354 /* 355 * Callback for unload of AIO when used as a module. 356 */ 357 static int 358 aio_unload(void) 359 { 360 361 /* 362 * XXX: no unloads by default, it's too dangerous. 363 * perhaps we could do it if locked out callers and then 364 * did an aio_proc_rundown() on each process. 365 */ 366 if (!unloadable) 367 return (EOPNOTSUPP); 368 369 aio_swake = NULL; 370 rm_at_exit(aio_proc_rundown); 371 rm_at_exec(aio_proc_rundown); 372 kqueue_del_filteropts(EVFILT_AIO); 373 return (0); 374 } 375 376 /* 377 * Init the per-process aioinfo structure. The aioinfo limits are set 378 * per-process for user limit (resource) management. 379 */ 380 static void 381 aio_init_aioinfo(struct proc *p) 382 { 383 struct kaioinfo *ki; 384 if (p->p_aioinfo == NULL) { 385 ki = uma_zalloc(kaio_zone, M_WAITOK); 386 p->p_aioinfo = ki; 387 ki->kaio_flags = 0; 388 ki->kaio_maxactive_count = max_aio_per_proc; 389 ki->kaio_active_count = 0; 390 ki->kaio_qallowed_count = max_aio_queue_per_proc; 391 ki->kaio_queue_count = 0; 392 ki->kaio_ballowed_count = max_buf_aio; 393 ki->kaio_buffer_count = 0; 394 ki->kaio_buffer_finished_count = 0; 395 ki->kaio_p = p; 396 TAILQ_INIT(&ki->kaio_jobdone); 397 TAILQ_INIT(&ki->kaio_jobqueue); 398 TAILQ_INIT(&ki->kaio_bufdone); 399 TAILQ_INIT(&ki->kaio_bufqueue); 400 TAILQ_INIT(&ki->kaio_liojoblist); 401 TAILQ_INIT(&ki->kaio_sockqueue); 402 } 403 404 while (num_aio_procs < target_aio_procs) 405 aio_newproc(); 406 } 407 408 /* 409 * Free a job entry. Wait for completion if it is currently active, but don't 410 * delay forever. If we delay, we return a flag that says that we have to 411 * restart the queue scan. 412 */ 413 static int 414 aio_free_entry(struct aiocblist *aiocbe) 415 { 416 struct kaioinfo *ki; 417 struct aio_liojob *lj; 418 struct proc *p; 419 int error; 420 int s; 421 422 if (aiocbe->jobstate == JOBST_NULL) 423 panic("aio_free_entry: freeing already free job"); 424 425 p = aiocbe->userproc; 426 ki = p->p_aioinfo; 427 lj = aiocbe->lio; 428 if (ki == NULL) 429 panic("aio_free_entry: missing p->p_aioinfo"); 430 431 while (aiocbe->jobstate == JOBST_JOBRUNNING) { 432 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) 433 return 0; 434 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 435 tsleep(aiocbe, PRIBIO, "jobwai", 0); 436 } 437 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 438 439 if (aiocbe->bp == NULL) { 440 if (ki->kaio_queue_count <= 0) 441 panic("aio_free_entry: process queue size <= 0"); 442 if (num_queue_count <= 0) 443 panic("aio_free_entry: system wide queue size <= 0"); 444 445 if (lj) { 446 lj->lioj_queue_count--; 447 if (aiocbe->jobflags & AIOCBLIST_DONE) 448 lj->lioj_queue_finished_count--; 449 } 450 ki->kaio_queue_count--; 451 if (aiocbe->jobflags & AIOCBLIST_DONE) 452 ki->kaio_queue_finished_count--; 453 num_queue_count--; 454 } else { 455 if (lj) { 456 lj->lioj_buffer_count--; 457 if (aiocbe->jobflags & AIOCBLIST_DONE) 458 lj->lioj_buffer_finished_count--; 459 } 460 if (aiocbe->jobflags & AIOCBLIST_DONE) 461 ki->kaio_buffer_finished_count--; 462 ki->kaio_buffer_count--; 463 num_buf_aio--; 464 } 465 466 /* aiocbe is going away, we need to destroy any knotes */ 467 /* XXXKSE Note the thread here is used to eventually find the 468 * owning process again, but it is also used to do a fo_close 469 * and that requires the thread. (but does it require the 470 * OWNING thread? (or maybe the running thread?) 471 * There is a semantic problem here... 472 */ 473 knote_remove(FIRST_THREAD_IN_PROC(p), &aiocbe->klist); /* XXXKSE */ 474 475 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN) 476 && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) { 477 ki->kaio_flags &= ~KAIO_WAKEUP; 478 wakeup(p); 479 } 480 481 if (aiocbe->jobstate == JOBST_JOBQBUF) { 482 if ((error = aio_fphysio(aiocbe)) != 0) 483 return error; 484 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 485 panic("aio_free_entry: invalid physio finish-up state"); 486 s = splbio(); 487 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 488 splx(s); 489 } else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) { 490 s = splnet(); 491 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 492 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 493 splx(s); 494 } else if (aiocbe->jobstate == JOBST_JOBFINISHED) 495 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 496 else if (aiocbe->jobstate == JOBST_JOBBFINISHED) { 497 s = splbio(); 498 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 499 splx(s); 500 if (aiocbe->bp) { 501 vunmapbuf(aiocbe->bp); 502 relpbuf(aiocbe->bp, NULL); 503 aiocbe->bp = NULL; 504 } 505 } 506 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 507 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 508 uma_zfree(aiolio_zone, lj); 509 } 510 aiocbe->jobstate = JOBST_NULL; 511 untimeout(process_signal, aiocbe, aiocbe->timeouthandle); 512 uma_zfree(aiocb_zone, aiocbe); 513 return 0; 514 } 515 516 /* 517 * Rundown the jobs for a given process. 518 */ 519 static void 520 aio_proc_rundown(struct proc *p) 521 { 522 int s; 523 struct kaioinfo *ki; 524 struct aio_liojob *lj, *ljn; 525 struct aiocblist *aiocbe, *aiocbn; 526 struct file *fp; 527 struct filedesc *fdp; 528 struct socket *so; 529 530 ki = p->p_aioinfo; 531 if (ki == NULL) 532 return; 533 534 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 535 while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count > 536 ki->kaio_buffer_finished_count)) { 537 ki->kaio_flags |= KAIO_RUNDOWN; 538 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout)) 539 break; 540 } 541 542 /* 543 * Move any aio ops that are waiting on socket I/O to the normal job 544 * queues so they are cleaned up with any others. 545 */ 546 fdp = p->p_fd; 547 548 s = splnet(); 549 for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe = 550 aiocbn) { 551 aiocbn = TAILQ_NEXT(aiocbe, plist); 552 fp = fdp->fd_ofiles[aiocbe->uaiocb.aio_fildes]; 553 554 /* 555 * Under some circumstances, the aio_fildes and the file 556 * structure don't match. This would leave aiocbe's in the 557 * TAILQ associated with the socket and cause a panic later. 558 * 559 * Detect and fix. 560 */ 561 if ((fp == NULL) || (fp != aiocbe->fd_file)) 562 fp = aiocbe->fd_file; 563 if (fp) { 564 so = (struct socket *)fp->f_data; 565 TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list); 566 if (TAILQ_EMPTY(&so->so_aiojobq)) { 567 so->so_snd.sb_flags &= ~SB_AIO; 568 so->so_rcv.sb_flags &= ~SB_AIO; 569 } 570 } 571 TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist); 572 TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list); 573 TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist); 574 } 575 splx(s); 576 577 restart1: 578 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) { 579 aiocbn = TAILQ_NEXT(aiocbe, plist); 580 if (aio_free_entry(aiocbe)) 581 goto restart1; 582 } 583 584 restart2: 585 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe = 586 aiocbn) { 587 aiocbn = TAILQ_NEXT(aiocbe, plist); 588 if (aio_free_entry(aiocbe)) 589 goto restart2; 590 } 591 592 /* 593 * Note the use of lots of splbio here, trying to avoid splbio for long chains 594 * of I/O. Probably unnecessary. 595 */ 596 restart3: 597 s = splbio(); 598 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 599 ki->kaio_flags |= KAIO_WAKEUP; 600 tsleep(p, PRIBIO, "aioprn", 0); 601 splx(s); 602 goto restart3; 603 } 604 splx(s); 605 606 restart4: 607 s = splbio(); 608 for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) { 609 aiocbn = TAILQ_NEXT(aiocbe, plist); 610 if (aio_free_entry(aiocbe)) { 611 splx(s); 612 goto restart4; 613 } 614 } 615 splx(s); 616 617 /* 618 * If we've slept, jobs might have moved from one queue to another. 619 * Retry rundown if we didn't manage to empty the queues. 620 */ 621 if (TAILQ_FIRST(&ki->kaio_jobdone) != NULL || 622 TAILQ_FIRST(&ki->kaio_jobqueue) != NULL || 623 TAILQ_FIRST(&ki->kaio_bufqueue) != NULL || 624 TAILQ_FIRST(&ki->kaio_bufdone) != NULL) 625 goto restart1; 626 627 for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) { 628 ljn = TAILQ_NEXT(lj, lioj_list); 629 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 630 0)) { 631 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 632 uma_zfree(aiolio_zone, lj); 633 } else { 634 #ifdef DIAGNOSTIC 635 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, " 636 "QF:%d\n", lj->lioj_buffer_count, 637 lj->lioj_buffer_finished_count, 638 lj->lioj_queue_count, 639 lj->lioj_queue_finished_count); 640 #endif 641 } 642 } 643 644 uma_zfree(kaio_zone, ki); 645 p->p_aioinfo = NULL; 646 } 647 648 /* 649 * Select a job to run (called by an AIO daemon). 650 */ 651 static struct aiocblist * 652 aio_selectjob(struct aiothreadlist *aiop) 653 { 654 int s; 655 struct aiocblist *aiocbe; 656 struct kaioinfo *ki; 657 struct proc *userp; 658 659 s = splnet(); 660 for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe = 661 TAILQ_NEXT(aiocbe, list)) { 662 userp = aiocbe->userproc; 663 ki = userp->p_aioinfo; 664 665 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 666 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 667 splx(s); 668 return aiocbe; 669 } 670 } 671 splx(s); 672 673 return NULL; 674 } 675 676 /* 677 * The AIO processing activity. This is the code that does the I/O request for 678 * the non-physio version of the operations. The normal vn operations are used, 679 * and this code should work in all instances for every type of file, including 680 * pipes, sockets, fifos, and regular files. 681 */ 682 static void 683 aio_process(struct aiocblist *aiocbe) 684 { 685 struct filedesc *fdp; 686 struct thread *td; 687 struct proc *userp; 688 struct proc *mycp; 689 struct aiocb *cb; 690 struct file *fp; 691 struct uio auio; 692 struct iovec aiov; 693 unsigned int fd; 694 int cnt; 695 int error; 696 off_t offset; 697 int oublock_st, oublock_end; 698 int inblock_st, inblock_end; 699 700 userp = aiocbe->userproc; 701 td = curthread; 702 mycp = td->td_proc; 703 cb = &aiocbe->uaiocb; 704 705 fdp = mycp->p_fd; 706 fd = cb->aio_fildes; 707 fp = fdp->fd_ofiles[fd]; 708 709 if ((fp == NULL) || (fp != aiocbe->fd_file)) { 710 cb->_aiocb_private.error = EBADF; 711 cb->_aiocb_private.status = -1; 712 return; 713 } 714 715 aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; 716 aiov.iov_len = cb->aio_nbytes; 717 718 auio.uio_iov = &aiov; 719 auio.uio_iovcnt = 1; 720 auio.uio_offset = offset = cb->aio_offset; 721 auio.uio_resid = cb->aio_nbytes; 722 cnt = cb->aio_nbytes; 723 auio.uio_segflg = UIO_USERSPACE; 724 auio.uio_td = td; 725 726 inblock_st = mycp->p_stats->p_ru.ru_inblock; 727 oublock_st = mycp->p_stats->p_ru.ru_oublock; 728 /* 729 * Temporarily bump the ref count while reading to avoid the 730 * descriptor being ripped out from under us. 731 */ 732 fhold(fp); 733 if (cb->aio_lio_opcode == LIO_READ) { 734 auio.uio_rw = UIO_READ; 735 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td); 736 } else { 737 auio.uio_rw = UIO_WRITE; 738 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td); 739 } 740 fdrop(fp, td); 741 inblock_end = mycp->p_stats->p_ru.ru_inblock; 742 oublock_end = mycp->p_stats->p_ru.ru_oublock; 743 744 aiocbe->inputcharge = inblock_end - inblock_st; 745 aiocbe->outputcharge = oublock_end - oublock_st; 746 747 if ((error) && (auio.uio_resid != cnt)) { 748 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 749 error = 0; 750 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) { 751 PROC_LOCK(userp); 752 psignal(userp, SIGPIPE); 753 PROC_UNLOCK(userp); 754 } 755 } 756 757 cnt -= auio.uio_resid; 758 cb->_aiocb_private.error = error; 759 cb->_aiocb_private.status = cnt; 760 } 761 762 /* 763 * The AIO daemon, most of the actual work is done in aio_process, 764 * but the setup (and address space mgmt) is done in this routine. 765 */ 766 static void 767 aio_daemon(void *uproc) 768 { 769 int s; 770 struct aio_liojob *lj; 771 struct aiocb *cb; 772 struct aiocblist *aiocbe; 773 struct aiothreadlist *aiop; 774 struct kaioinfo *ki; 775 struct proc *curcp, *mycp, *userp; 776 struct vmspace *myvm, *tmpvm; 777 struct thread *td = curthread; 778 struct pgrp *newpgrp; 779 struct session *newsess; 780 781 mtx_lock(&Giant); 782 /* 783 * Local copies of curproc (cp) and vmspace (myvm) 784 */ 785 mycp = td->td_proc; 786 myvm = mycp->p_vmspace; 787 788 if (mycp->p_textvp) { 789 vrele(mycp->p_textvp); 790 mycp->p_textvp = NULL; 791 } 792 793 /* 794 * Allocate and ready the aio control info. There is one aiop structure 795 * per daemon. 796 */ 797 aiop = uma_zalloc(aiop_zone, M_WAITOK); 798 aiop->aiothread = td; 799 aiop->aiothreadflags |= AIOP_FREE; 800 801 s = splnet(); 802 803 /* 804 * Place thread (lightweight process) onto the AIO free thread list. 805 */ 806 if (TAILQ_EMPTY(&aio_freeproc)) 807 wakeup(&aio_freeproc); 808 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 809 810 splx(s); 811 812 /* 813 * Get rid of our current filedescriptors. AIOD's don't need any 814 * filedescriptors, except as temporarily inherited from the client. 815 */ 816 fdfree(td); 817 mycp->p_fd = NULL; 818 819 /* The daemon resides in its own pgrp. */ 820 MALLOC(newpgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP, 821 M_WAITOK | M_ZERO); 822 MALLOC(newsess, struct session *, sizeof(struct session), M_SESSION, 823 M_WAITOK | M_ZERO); 824 825 PGRPSESS_XLOCK(); 826 enterpgrp(mycp, mycp->p_pid, newpgrp, newsess); 827 PGRPSESS_XUNLOCK(); 828 829 /* Mark special process type. */ 830 mycp->p_flag |= P_SYSTEM; 831 832 /* 833 * Wakeup parent process. (Parent sleeps to keep from blasting away 834 * and creating too many daemons.) 835 */ 836 wakeup(mycp); 837 838 for (;;) { 839 /* 840 * curcp is the current daemon process context. 841 * userp is the current user process context. 842 */ 843 curcp = mycp; 844 845 /* 846 * Take daemon off of free queue 847 */ 848 if (aiop->aiothreadflags & AIOP_FREE) { 849 s = splnet(); 850 TAILQ_REMOVE(&aio_freeproc, aiop, list); 851 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 852 aiop->aiothreadflags &= ~AIOP_FREE; 853 splx(s); 854 } 855 aiop->aiothreadflags &= ~AIOP_SCHED; 856 857 /* 858 * Check for jobs. 859 */ 860 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 861 cb = &aiocbe->uaiocb; 862 userp = aiocbe->userproc; 863 864 aiocbe->jobstate = JOBST_JOBRUNNING; 865 866 /* 867 * Connect to process address space for user program. 868 */ 869 if (userp != curcp) { 870 /* 871 * Save the current address space that we are 872 * connected to. 873 */ 874 tmpvm = mycp->p_vmspace; 875 876 /* 877 * Point to the new user address space, and 878 * refer to it. 879 */ 880 mycp->p_vmspace = userp->p_vmspace; 881 mycp->p_vmspace->vm_refcnt++; 882 883 /* Activate the new mapping. */ 884 pmap_activate(FIRST_THREAD_IN_PROC(mycp)); 885 886 /* 887 * If the old address space wasn't the daemons 888 * own address space, then we need to remove the 889 * daemon's reference from the other process 890 * that it was acting on behalf of. 891 */ 892 if (tmpvm != myvm) { 893 vmspace_free(tmpvm); 894 } 895 896 /* 897 * Disassociate from previous clients file 898 * descriptors, and associate to the new clients 899 * descriptors. Note that the daemon doesn't 900 * need to worry about its orginal descriptors, 901 * because they were originally freed. 902 */ 903 if (mycp->p_fd) 904 fdfree(td); 905 mycp->p_fd = fdshare(userp); 906 curcp = userp; 907 } 908 909 ki = userp->p_aioinfo; 910 lj = aiocbe->lio; 911 912 /* Account for currently active jobs. */ 913 ki->kaio_active_count++; 914 915 /* Do the I/O function. */ 916 aiocbe->jobaiothread = aiop; 917 aio_process(aiocbe); 918 919 /* Decrement the active job count. */ 920 ki->kaio_active_count--; 921 922 /* 923 * Increment the completion count for wakeup/signal 924 * comparisons. 925 */ 926 aiocbe->jobflags |= AIOCBLIST_DONE; 927 ki->kaio_queue_finished_count++; 928 if (lj) 929 lj->lioj_queue_finished_count++; 930 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags 931 & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) { 932 ki->kaio_flags &= ~KAIO_WAKEUP; 933 wakeup(userp); 934 } 935 936 s = splbio(); 937 if (lj && (lj->lioj_flags & 938 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) { 939 if ((lj->lioj_queue_finished_count == 940 lj->lioj_queue_count) && 941 (lj->lioj_buffer_finished_count == 942 lj->lioj_buffer_count)) { 943 PROC_LOCK(userp); 944 psignal(userp, 945 lj->lioj_signal.sigev_signo); 946 PROC_UNLOCK(userp); 947 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 948 } 949 } 950 splx(s); 951 952 aiocbe->jobstate = JOBST_JOBFINISHED; 953 954 /* 955 * If the I/O request should be automatically rundown, 956 * do the needed cleanup. Otherwise, place the queue 957 * entry for the just finished I/O request into the done 958 * queue for the associated client. 959 */ 960 s = splnet(); 961 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) { 962 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 963 uma_zfree(aiocb_zone, aiocbe); 964 } else { 965 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 966 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, 967 plist); 968 } 969 splx(s); 970 KNOTE(&aiocbe->klist, 0); 971 972 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 973 wakeup(aiocbe); 974 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 975 } 976 977 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 978 PROC_LOCK(userp); 979 psignal(userp, cb->aio_sigevent.sigev_signo); 980 PROC_UNLOCK(userp); 981 } 982 } 983 984 /* 985 * Disconnect from user address space. 986 */ 987 if (curcp != mycp) { 988 /* Get the user address space to disconnect from. */ 989 tmpvm = mycp->p_vmspace; 990 991 /* Get original address space for daemon. */ 992 mycp->p_vmspace = myvm; 993 994 /* Activate the daemon's address space. */ 995 pmap_activate(FIRST_THREAD_IN_PROC(mycp)); 996 #ifdef DIAGNOSTIC 997 if (tmpvm == myvm) { 998 printf("AIOD: vmspace problem -- %d\n", 999 mycp->p_pid); 1000 } 1001 #endif 1002 /* Remove our vmspace reference. */ 1003 vmspace_free(tmpvm); 1004 1005 /* 1006 * Disassociate from the user process's file 1007 * descriptors. 1008 */ 1009 if (mycp->p_fd) 1010 fdfree(td); 1011 mycp->p_fd = NULL; 1012 curcp = mycp; 1013 } 1014 1015 /* 1016 * If we are the first to be put onto the free queue, wakeup 1017 * anyone waiting for a daemon. 1018 */ 1019 s = splnet(); 1020 TAILQ_REMOVE(&aio_activeproc, aiop, list); 1021 if (TAILQ_EMPTY(&aio_freeproc)) 1022 wakeup(&aio_freeproc); 1023 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 1024 aiop->aiothreadflags |= AIOP_FREE; 1025 splx(s); 1026 1027 /* 1028 * If daemon is inactive for a long time, allow it to exit, 1029 * thereby freeing resources. 1030 */ 1031 if ((aiop->aiothreadflags & AIOP_SCHED) == 0 && 1032 tsleep(aiop->aiothread, PRIBIO, "aiordy", aiod_lifetime)) { 1033 s = splnet(); 1034 if (TAILQ_EMPTY(&aio_jobs)) { 1035 if ((aiop->aiothreadflags & AIOP_FREE) && 1036 (num_aio_procs > target_aio_procs)) { 1037 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1038 splx(s); 1039 uma_zfree(aiop_zone, aiop); 1040 num_aio_procs--; 1041 #ifdef DIAGNOSTIC 1042 if (mycp->p_vmspace->vm_refcnt <= 1) { 1043 printf("AIOD: bad vm refcnt for" 1044 " exiting daemon: %d\n", 1045 mycp->p_vmspace->vm_refcnt); 1046 } 1047 #endif 1048 kthread_exit(0); 1049 } 1050 } 1051 splx(s); 1052 } 1053 } 1054 } 1055 1056 /* 1057 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 1058 * AIO daemon modifies its environment itself. 1059 */ 1060 static int 1061 aio_newproc() 1062 { 1063 int error; 1064 struct proc *p; 1065 1066 error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, "aiod%d", 1067 num_aio_procs); 1068 if (error) 1069 return error; 1070 1071 /* 1072 * Wait until daemon is started, but continue on just in case to 1073 * handle error conditions. 1074 */ 1075 error = tsleep(p, PZERO, "aiosta", aiod_timeout); 1076 1077 num_aio_procs++; 1078 1079 return error; 1080 } 1081 1082 /* 1083 * Try the high-performance, low-overhead physio method for eligible 1084 * VCHR devices. This method doesn't use an aio helper thread, and 1085 * thus has very low overhead. 1086 * 1087 * Assumes that the caller, _aio_aqueue(), has incremented the file 1088 * structure's reference count, preventing its deallocation for the 1089 * duration of this call. 1090 */ 1091 static int 1092 aio_qphysio(struct proc *p, struct aiocblist *aiocbe) 1093 { 1094 int error; 1095 struct aiocb *cb; 1096 struct file *fp; 1097 struct buf *bp; 1098 struct vnode *vp; 1099 struct kaioinfo *ki; 1100 struct aio_liojob *lj; 1101 int s; 1102 int notify; 1103 1104 cb = &aiocbe->uaiocb; 1105 fp = aiocbe->fd_file; 1106 1107 if (fp->f_type != DTYPE_VNODE) 1108 return (-1); 1109 1110 vp = (struct vnode *)fp->f_data; 1111 1112 /* 1113 * If its not a disk, we don't want to return a positive error. 1114 * It causes the aio code to not fall through to try the thread 1115 * way when you're talking to a regular file. 1116 */ 1117 if (!vn_isdisk(vp, &error)) { 1118 if (error == ENOTBLK) 1119 return (-1); 1120 else 1121 return (error); 1122 } 1123 1124 if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys) 1125 return (-1); 1126 1127 if (cb->aio_nbytes > 1128 MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK)) 1129 return (-1); 1130 1131 ki = p->p_aioinfo; 1132 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) 1133 return (-1); 1134 1135 ki->kaio_buffer_count++; 1136 1137 lj = aiocbe->lio; 1138 if (lj) 1139 lj->lioj_buffer_count++; 1140 1141 /* Create and build a buffer header for a transfer. */ 1142 bp = (struct buf *)getpbuf(NULL); 1143 BUF_KERNPROC(bp); 1144 1145 /* 1146 * Get a copy of the kva from the physical buffer. 1147 */ 1148 bp->b_caller1 = p; 1149 bp->b_dev = vp->v_rdev; 1150 error = bp->b_error = 0; 1151 1152 bp->b_bcount = cb->aio_nbytes; 1153 bp->b_bufsize = cb->aio_nbytes; 1154 bp->b_flags = B_PHYS; 1155 bp->b_iodone = aio_physwakeup; 1156 bp->b_saveaddr = bp->b_data; 1157 bp->b_data = (void *)(uintptr_t)cb->aio_buf; 1158 bp->b_blkno = btodb(cb->aio_offset); 1159 1160 if (cb->aio_lio_opcode == LIO_WRITE) { 1161 bp->b_iocmd = BIO_WRITE; 1162 if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_READ)) { 1163 error = EFAULT; 1164 goto doerror; 1165 } 1166 } else { 1167 bp->b_iocmd = BIO_READ; 1168 if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_WRITE)) { 1169 error = EFAULT; 1170 goto doerror; 1171 } 1172 } 1173 1174 /* Bring buffer into kernel space. */ 1175 vmapbuf(bp); 1176 1177 s = splbio(); 1178 aiocbe->bp = bp; 1179 bp->b_spc = (void *)aiocbe; 1180 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 1181 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 1182 aiocbe->jobstate = JOBST_JOBQBUF; 1183 cb->_aiocb_private.status = cb->aio_nbytes; 1184 num_buf_aio++; 1185 bp->b_error = 0; 1186 1187 splx(s); 1188 1189 /* Perform transfer. */ 1190 DEV_STRATEGY(bp, 0); 1191 1192 notify = 0; 1193 s = splbio(); 1194 1195 /* 1196 * If we had an error invoking the request, or an error in processing 1197 * the request before we have returned, we process it as an error in 1198 * transfer. Note that such an I/O error is not indicated immediately, 1199 * but is returned using the aio_error mechanism. In this case, 1200 * aio_suspend will return immediately. 1201 */ 1202 if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) { 1203 struct aiocb *job = aiocbe->uuaiocb; 1204 1205 aiocbe->uaiocb._aiocb_private.status = 0; 1206 suword(&job->_aiocb_private.status, 0); 1207 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1208 suword(&job->_aiocb_private.error, bp->b_error); 1209 1210 ki->kaio_buffer_finished_count++; 1211 1212 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1213 aiocbe->jobstate = JOBST_JOBBFINISHED; 1214 aiocbe->jobflags |= AIOCBLIST_DONE; 1215 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1216 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1217 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1218 notify = 1; 1219 } 1220 } 1221 splx(s); 1222 if (notify) 1223 KNOTE(&aiocbe->klist, 0); 1224 return 0; 1225 1226 doerror: 1227 ki->kaio_buffer_count--; 1228 if (lj) 1229 lj->lioj_buffer_count--; 1230 aiocbe->bp = NULL; 1231 relpbuf(bp, NULL); 1232 return error; 1233 } 1234 1235 /* 1236 * This waits/tests physio completion. 1237 */ 1238 static int 1239 aio_fphysio(struct aiocblist *iocb) 1240 { 1241 int s; 1242 struct buf *bp; 1243 int error; 1244 1245 bp = iocb->bp; 1246 1247 s = splbio(); 1248 while ((bp->b_flags & B_DONE) == 0) { 1249 if (tsleep(bp, PRIBIO, "physstr", aiod_timeout)) { 1250 if ((bp->b_flags & B_DONE) == 0) { 1251 splx(s); 1252 return EINPROGRESS; 1253 } else 1254 break; 1255 } 1256 } 1257 splx(s); 1258 1259 /* Release mapping into kernel space. */ 1260 vunmapbuf(bp); 1261 iocb->bp = 0; 1262 1263 error = 0; 1264 1265 /* Check for an error. */ 1266 if (bp->b_ioflags & BIO_ERROR) 1267 error = bp->b_error; 1268 1269 relpbuf(bp, NULL); 1270 return (error); 1271 } 1272 1273 /* 1274 * Wake up aio requests that may be serviceable now. 1275 */ 1276 static void 1277 aio_swake_cb(struct socket *so, struct sockbuf *sb) 1278 { 1279 struct aiocblist *cb,*cbn; 1280 struct proc *p; 1281 struct kaioinfo *ki = NULL; 1282 int opcode, wakecount = 0; 1283 struct aiothreadlist *aiop; 1284 1285 if (sb == &so->so_snd) { 1286 opcode = LIO_WRITE; 1287 so->so_snd.sb_flags &= ~SB_AIO; 1288 } else { 1289 opcode = LIO_READ; 1290 so->so_rcv.sb_flags &= ~SB_AIO; 1291 } 1292 1293 for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) { 1294 cbn = TAILQ_NEXT(cb, list); 1295 if (opcode == cb->uaiocb.aio_lio_opcode) { 1296 p = cb->userproc; 1297 ki = p->p_aioinfo; 1298 TAILQ_REMOVE(&so->so_aiojobq, cb, list); 1299 TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist); 1300 TAILQ_INSERT_TAIL(&aio_jobs, cb, list); 1301 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist); 1302 wakecount++; 1303 if (cb->jobstate != JOBST_JOBQGLOBAL) 1304 panic("invalid queue value"); 1305 } 1306 } 1307 1308 while (wakecount--) { 1309 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) { 1310 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1311 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1312 aiop->aiothreadflags &= ~AIOP_FREE; 1313 wakeup(aiop->aiothread); 1314 } 1315 } 1316 } 1317 1318 /* 1319 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1320 * technique is done in this code. 1321 */ 1322 static int 1323 _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int type) 1324 { 1325 struct proc *p = td->td_proc; 1326 struct filedesc *fdp; 1327 struct file *fp; 1328 unsigned int fd; 1329 struct socket *so; 1330 int s; 1331 int error; 1332 int opcode; 1333 struct aiocblist *aiocbe; 1334 struct aiothreadlist *aiop; 1335 struct kaioinfo *ki; 1336 struct kevent kev; 1337 struct kqueue *kq; 1338 struct file *kq_fp; 1339 1340 aiocbe = uma_zalloc(aiocb_zone, M_WAITOK); 1341 aiocbe->inputcharge = 0; 1342 aiocbe->outputcharge = 0; 1343 callout_handle_init(&aiocbe->timeouthandle); 1344 SLIST_INIT(&aiocbe->klist); 1345 1346 suword(&job->_aiocb_private.status, -1); 1347 suword(&job->_aiocb_private.error, 0); 1348 suword(&job->_aiocb_private.kernelinfo, -1); 1349 1350 error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb)); 1351 if (error) { 1352 suword(&job->_aiocb_private.error, error); 1353 uma_zfree(aiocb_zone, aiocbe); 1354 return error; 1355 } 1356 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL && 1357 !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) { 1358 uma_zfree(aiocb_zone, aiocbe); 1359 return EINVAL; 1360 } 1361 1362 /* Save userspace address of the job info. */ 1363 aiocbe->uuaiocb = job; 1364 1365 /* Get the opcode. */ 1366 if (type != LIO_NOP) 1367 aiocbe->uaiocb.aio_lio_opcode = type; 1368 opcode = aiocbe->uaiocb.aio_lio_opcode; 1369 1370 /* Get the fd info for process. */ 1371 fdp = p->p_fd; 1372 1373 /* 1374 * Range check file descriptor. 1375 */ 1376 fd = aiocbe->uaiocb.aio_fildes; 1377 if (fd >= fdp->fd_nfiles) { 1378 uma_zfree(aiocb_zone, aiocbe); 1379 if (type == 0) 1380 suword(&job->_aiocb_private.error, EBADF); 1381 return EBADF; 1382 } 1383 1384 fp = aiocbe->fd_file = fdp->fd_ofiles[fd]; 1385 if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 1386 0))) { 1387 uma_zfree(aiocb_zone, aiocbe); 1388 if (type == 0) 1389 suword(&job->_aiocb_private.error, EBADF); 1390 return EBADF; 1391 } 1392 1393 if (aiocbe->uaiocb.aio_offset == -1LL) { 1394 uma_zfree(aiocb_zone, aiocbe); 1395 if (type == 0) 1396 suword(&job->_aiocb_private.error, EINVAL); 1397 return EINVAL; 1398 } 1399 1400 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1401 if (error) { 1402 uma_zfree(aiocb_zone, aiocbe); 1403 if (type == 0) 1404 suword(&job->_aiocb_private.error, EINVAL); 1405 return error; 1406 } 1407 1408 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1409 if (jobrefid == LONG_MAX) 1410 jobrefid = 1; 1411 else 1412 jobrefid++; 1413 1414 if (opcode == LIO_NOP) { 1415 uma_zfree(aiocb_zone, aiocbe); 1416 if (type == 0) { 1417 suword(&job->_aiocb_private.error, 0); 1418 suword(&job->_aiocb_private.status, 0); 1419 suword(&job->_aiocb_private.kernelinfo, 0); 1420 } 1421 return 0; 1422 } 1423 1424 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1425 uma_zfree(aiocb_zone, aiocbe); 1426 if (type == 0) { 1427 suword(&job->_aiocb_private.status, 0); 1428 suword(&job->_aiocb_private.error, EINVAL); 1429 } 1430 return EINVAL; 1431 } 1432 1433 fhold(fp); 1434 1435 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) { 1436 kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue; 1437 kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sigval_ptr; 1438 } 1439 else { 1440 /* 1441 * This method for requesting kevent-based notification won't 1442 * work on the alpha, since we're passing in a pointer 1443 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT- 1444 * based method instead. 1445 */ 1446 struct kevent *kevp; 1447 1448 kevp = (struct kevent *)(uintptr_t)job->aio_lio_opcode; 1449 if (kevp == NULL) 1450 goto no_kqueue; 1451 1452 error = copyin(kevp, &kev, sizeof(kev)); 1453 if (error) 1454 goto aqueue_fail; 1455 } 1456 if ((u_int)kev.ident >= fdp->fd_nfiles || 1457 (kq_fp = fdp->fd_ofiles[kev.ident]) == NULL || 1458 (kq_fp->f_type != DTYPE_KQUEUE)) { 1459 error = EBADF; 1460 goto aqueue_fail; 1461 } 1462 kq = (struct kqueue *)kq_fp->f_data; 1463 kev.ident = (uintptr_t)aiocbe; 1464 kev.filter = EVFILT_AIO; 1465 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 1466 error = kqueue_register(kq, &kev, td); 1467 aqueue_fail: 1468 if (error) { 1469 uma_zfree(aiocb_zone, aiocbe); 1470 if (type == 0) 1471 suword(&job->_aiocb_private.error, error); 1472 goto done; 1473 } 1474 no_kqueue: 1475 1476 suword(&job->_aiocb_private.error, EINPROGRESS); 1477 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1478 aiocbe->userproc = p; 1479 aiocbe->jobflags = 0; 1480 aiocbe->lio = lj; 1481 ki = p->p_aioinfo; 1482 1483 if (fp->f_type == DTYPE_SOCKET) { 1484 /* 1485 * Alternate queueing for socket ops: Reach down into the 1486 * descriptor to get the socket data. Then check to see if the 1487 * socket is ready to be read or written (based on the requested 1488 * operation). 1489 * 1490 * If it is not ready for io, then queue the aiocbe on the 1491 * socket, and set the flags so we get a call when sbnotify() 1492 * happens. 1493 */ 1494 so = (struct socket *)fp->f_data; 1495 s = splnet(); 1496 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode == 1497 LIO_WRITE) && (!sowriteable(so)))) { 1498 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); 1499 TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist); 1500 if (opcode == LIO_READ) 1501 so->so_rcv.sb_flags |= SB_AIO; 1502 else 1503 so->so_snd.sb_flags |= SB_AIO; 1504 aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */ 1505 ki->kaio_queue_count++; 1506 num_queue_count++; 1507 splx(s); 1508 error = 0; 1509 goto done; 1510 } 1511 splx(s); 1512 } 1513 1514 if ((error = aio_qphysio(p, aiocbe)) == 0) 1515 goto done; 1516 if (error > 0) { 1517 suword(&job->_aiocb_private.status, 0); 1518 aiocbe->uaiocb._aiocb_private.error = error; 1519 suword(&job->_aiocb_private.error, error); 1520 goto done; 1521 } 1522 1523 /* No buffer for daemon I/O. */ 1524 aiocbe->bp = NULL; 1525 1526 ki->kaio_queue_count++; 1527 if (lj) 1528 lj->lioj_queue_count++; 1529 s = splnet(); 1530 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1531 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1532 splx(s); 1533 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1534 1535 num_queue_count++; 1536 error = 0; 1537 1538 /* 1539 * If we don't have a free AIO process, and we are below our quota, then 1540 * start one. Otherwise, depend on the subsequent I/O completions to 1541 * pick-up this job. If we don't sucessfully create the new process 1542 * (thread) due to resource issues, we return an error for now (EAGAIN), 1543 * which is likely not the correct thing to do. 1544 */ 1545 s = splnet(); 1546 retryproc: 1547 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1548 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1549 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1550 aiop->aiothreadflags &= ~AIOP_FREE; 1551 wakeup(aiop->aiothread); 1552 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1553 ((ki->kaio_active_count + num_aio_resv_start) < 1554 ki->kaio_maxactive_count)) { 1555 num_aio_resv_start++; 1556 if ((error = aio_newproc()) == 0) { 1557 num_aio_resv_start--; 1558 goto retryproc; 1559 } 1560 num_aio_resv_start--; 1561 } 1562 splx(s); 1563 done: 1564 fdrop(fp, td); 1565 return error; 1566 } 1567 1568 /* 1569 * This routine queues an AIO request, checking for quotas. 1570 */ 1571 static int 1572 aio_aqueue(struct thread *td, struct aiocb *job, int type) 1573 { 1574 struct proc *p = td->td_proc; 1575 struct kaioinfo *ki; 1576 1577 if (p->p_aioinfo == NULL) 1578 aio_init_aioinfo(p); 1579 1580 if (num_queue_count >= max_queue_count) 1581 return EAGAIN; 1582 1583 ki = p->p_aioinfo; 1584 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1585 return EAGAIN; 1586 1587 return _aio_aqueue(td, job, NULL, type); 1588 } 1589 1590 /* 1591 * Support the aio_return system call, as a side-effect, kernel resources are 1592 * released. 1593 */ 1594 int 1595 aio_return(struct thread *td, struct aio_return_args *uap) 1596 { 1597 struct proc *p = td->td_proc; 1598 int s; 1599 int jobref; 1600 struct aiocblist *cb, *ncb; 1601 struct aiocb *ujob; 1602 struct kaioinfo *ki; 1603 1604 ki = p->p_aioinfo; 1605 if (ki == NULL) 1606 return EINVAL; 1607 1608 ujob = uap->aiocbp; 1609 1610 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1611 if (jobref == -1 || jobref == 0) 1612 return EINVAL; 1613 1614 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1615 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1616 jobref) { 1617 if (ujob == cb->uuaiocb) { 1618 td->td_retval[0] = 1619 cb->uaiocb._aiocb_private.status; 1620 } else 1621 td->td_retval[0] = EFAULT; 1622 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1623 p->p_stats->p_ru.ru_oublock += 1624 cb->outputcharge; 1625 cb->outputcharge = 0; 1626 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1627 p->p_stats->p_ru.ru_inblock += cb->inputcharge; 1628 cb->inputcharge = 0; 1629 } 1630 aio_free_entry(cb); 1631 return 0; 1632 } 1633 } 1634 s = splbio(); 1635 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) { 1636 ncb = TAILQ_NEXT(cb, plist); 1637 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) 1638 == jobref) { 1639 splx(s); 1640 if (ujob == cb->uuaiocb) { 1641 td->td_retval[0] = 1642 cb->uaiocb._aiocb_private.status; 1643 } else 1644 td->td_retval[0] = EFAULT; 1645 aio_free_entry(cb); 1646 return 0; 1647 } 1648 } 1649 splx(s); 1650 1651 return (EINVAL); 1652 } 1653 1654 /* 1655 * Allow a process to wakeup when any of the I/O requests are completed. 1656 */ 1657 int 1658 aio_suspend(struct thread *td, struct aio_suspend_args *uap) 1659 { 1660 struct proc *p = td->td_proc; 1661 struct timeval atv; 1662 struct timespec ts; 1663 struct aiocb *const *cbptr, *cbp; 1664 struct kaioinfo *ki; 1665 struct aiocblist *cb; 1666 int i; 1667 int njoblist; 1668 int error, s, timo; 1669 int *ijoblist; 1670 struct aiocb **ujoblist; 1671 1672 if (uap->nent > AIO_LISTIO_MAX) 1673 return EINVAL; 1674 1675 timo = 0; 1676 if (uap->timeout) { 1677 /* Get timespec struct. */ 1678 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1679 return error; 1680 1681 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1682 return (EINVAL); 1683 1684 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1685 if (itimerfix(&atv)) 1686 return (EINVAL); 1687 timo = tvtohz(&atv); 1688 } 1689 1690 ki = p->p_aioinfo; 1691 if (ki == NULL) 1692 return EAGAIN; 1693 1694 njoblist = 0; 1695 ijoblist = uma_zalloc(aiol_zone, M_WAITOK); 1696 ujoblist = uma_zalloc(aiol_zone, M_WAITOK); 1697 cbptr = uap->aiocbp; 1698 1699 for (i = 0; i < uap->nent; i++) { 1700 cbp = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 1701 if (cbp == 0) 1702 continue; 1703 ujoblist[njoblist] = cbp; 1704 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1705 njoblist++; 1706 } 1707 1708 if (njoblist == 0) { 1709 uma_zfree(aiol_zone, ijoblist); 1710 uma_zfree(aiol_zone, ujoblist); 1711 return 0; 1712 } 1713 1714 error = 0; 1715 for (;;) { 1716 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1717 for (i = 0; i < njoblist; i++) { 1718 if (((intptr_t) 1719 cb->uaiocb._aiocb_private.kernelinfo) == 1720 ijoblist[i]) { 1721 if (ujoblist[i] != cb->uuaiocb) 1722 error = EINVAL; 1723 uma_zfree(aiol_zone, ijoblist); 1724 uma_zfree(aiol_zone, ujoblist); 1725 return error; 1726 } 1727 } 1728 } 1729 1730 s = splbio(); 1731 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = 1732 TAILQ_NEXT(cb, plist)) { 1733 for (i = 0; i < njoblist; i++) { 1734 if (((intptr_t) 1735 cb->uaiocb._aiocb_private.kernelinfo) == 1736 ijoblist[i]) { 1737 splx(s); 1738 if (ujoblist[i] != cb->uuaiocb) 1739 error = EINVAL; 1740 uma_zfree(aiol_zone, ijoblist); 1741 uma_zfree(aiol_zone, ujoblist); 1742 return error; 1743 } 1744 } 1745 } 1746 1747 ki->kaio_flags |= KAIO_WAKEUP; 1748 error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo); 1749 splx(s); 1750 1751 if (error == ERESTART || error == EINTR) { 1752 uma_zfree(aiol_zone, ijoblist); 1753 uma_zfree(aiol_zone, ujoblist); 1754 return EINTR; 1755 } else if (error == EWOULDBLOCK) { 1756 uma_zfree(aiol_zone, ijoblist); 1757 uma_zfree(aiol_zone, ujoblist); 1758 return EAGAIN; 1759 } 1760 } 1761 1762 /* NOTREACHED */ 1763 return EINVAL; 1764 } 1765 1766 /* 1767 * aio_cancel cancels any non-physio aio operations not currently in 1768 * progress. 1769 */ 1770 int 1771 aio_cancel(struct thread *td, struct aio_cancel_args *uap) 1772 { 1773 struct proc *p = td->td_proc; 1774 struct kaioinfo *ki; 1775 struct aiocblist *cbe, *cbn; 1776 struct file *fp; 1777 struct filedesc *fdp; 1778 struct socket *so; 1779 struct proc *po; 1780 int s,error; 1781 int cancelled=0; 1782 int notcancelled=0; 1783 struct vnode *vp; 1784 1785 fdp = p->p_fd; 1786 if ((u_int)uap->fd >= fdp->fd_nfiles || 1787 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 1788 return (EBADF); 1789 1790 if (fp->f_type == DTYPE_VNODE) { 1791 vp = (struct vnode *)fp->f_data; 1792 1793 if (vn_isdisk(vp,&error)) { 1794 td->td_retval[0] = AIO_NOTCANCELED; 1795 return 0; 1796 } 1797 } else if (fp->f_type == DTYPE_SOCKET) { 1798 so = (struct socket *)fp->f_data; 1799 1800 s = splnet(); 1801 1802 for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) { 1803 cbn = TAILQ_NEXT(cbe, list); 1804 if ((uap->aiocbp == NULL) || 1805 (uap->aiocbp == cbe->uuaiocb) ) { 1806 po = cbe->userproc; 1807 ki = po->p_aioinfo; 1808 TAILQ_REMOVE(&so->so_aiojobq, cbe, list); 1809 TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist); 1810 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist); 1811 if (ki->kaio_flags & KAIO_WAKEUP) { 1812 wakeup(po); 1813 } 1814 cbe->jobstate = JOBST_JOBFINISHED; 1815 cbe->uaiocb._aiocb_private.status=-1; 1816 cbe->uaiocb._aiocb_private.error=ECANCELED; 1817 cancelled++; 1818 /* XXX cancelled, knote? */ 1819 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1820 SIGEV_SIGNAL) { 1821 PROC_LOCK(cbe->userproc); 1822 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1823 PROC_UNLOCK(cbe->userproc); 1824 } 1825 if (uap->aiocbp) 1826 break; 1827 } 1828 } 1829 splx(s); 1830 1831 if ((cancelled) && (uap->aiocbp)) { 1832 td->td_retval[0] = AIO_CANCELED; 1833 return 0; 1834 } 1835 } 1836 ki=p->p_aioinfo; 1837 s = splnet(); 1838 1839 for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) { 1840 cbn = TAILQ_NEXT(cbe, plist); 1841 1842 if ((uap->fd == cbe->uaiocb.aio_fildes) && 1843 ((uap->aiocbp == NULL ) || 1844 (uap->aiocbp == cbe->uuaiocb))) { 1845 1846 if (cbe->jobstate == JOBST_JOBQGLOBAL) { 1847 TAILQ_REMOVE(&aio_jobs, cbe, list); 1848 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); 1849 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, 1850 plist); 1851 cancelled++; 1852 ki->kaio_queue_finished_count++; 1853 cbe->jobstate = JOBST_JOBFINISHED; 1854 cbe->uaiocb._aiocb_private.status = -1; 1855 cbe->uaiocb._aiocb_private.error = ECANCELED; 1856 /* XXX cancelled, knote? */ 1857 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1858 SIGEV_SIGNAL) { 1859 PROC_LOCK(cbe->userproc); 1860 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1861 PROC_UNLOCK(cbe->userproc); 1862 } 1863 } else { 1864 notcancelled++; 1865 } 1866 } 1867 } 1868 splx(s); 1869 1870 if (notcancelled) { 1871 td->td_retval[0] = AIO_NOTCANCELED; 1872 return 0; 1873 } 1874 if (cancelled) { 1875 td->td_retval[0] = AIO_CANCELED; 1876 return 0; 1877 } 1878 td->td_retval[0] = AIO_ALLDONE; 1879 1880 return 0; 1881 } 1882 1883 /* 1884 * aio_error is implemented in the kernel level for compatibility purposes only. 1885 * For a user mode async implementation, it would be best to do it in a userland 1886 * subroutine. 1887 */ 1888 int 1889 aio_error(struct thread *td, struct aio_error_args *uap) 1890 { 1891 struct proc *p = td->td_proc; 1892 int s; 1893 struct aiocblist *cb; 1894 struct kaioinfo *ki; 1895 int jobref; 1896 1897 ki = p->p_aioinfo; 1898 if (ki == NULL) 1899 return EINVAL; 1900 1901 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1902 if ((jobref == -1) || (jobref == 0)) 1903 return EINVAL; 1904 1905 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1906 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1907 jobref) { 1908 td->td_retval[0] = cb->uaiocb._aiocb_private.error; 1909 return 0; 1910 } 1911 } 1912 1913 s = splnet(); 1914 1915 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb, 1916 plist)) { 1917 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1918 jobref) { 1919 td->td_retval[0] = EINPROGRESS; 1920 splx(s); 1921 return 0; 1922 } 1923 } 1924 1925 for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb, 1926 plist)) { 1927 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1928 jobref) { 1929 td->td_retval[0] = EINPROGRESS; 1930 splx(s); 1931 return 0; 1932 } 1933 } 1934 splx(s); 1935 1936 s = splbio(); 1937 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb, 1938 plist)) { 1939 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1940 jobref) { 1941 td->td_retval[0] = cb->uaiocb._aiocb_private.error; 1942 splx(s); 1943 return 0; 1944 } 1945 } 1946 1947 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb, 1948 plist)) { 1949 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1950 jobref) { 1951 td->td_retval[0] = EINPROGRESS; 1952 splx(s); 1953 return 0; 1954 } 1955 } 1956 splx(s); 1957 1958 #if (0) 1959 /* 1960 * Hack for lio. 1961 */ 1962 status = fuword(&uap->aiocbp->_aiocb_private.status); 1963 if (status == -1) 1964 return fuword(&uap->aiocbp->_aiocb_private.error); 1965 #endif 1966 return EINVAL; 1967 } 1968 1969 /* syscall - asynchronous read from a file (REALTIME) */ 1970 int 1971 aio_read(struct thread *td, struct aio_read_args *uap) 1972 { 1973 1974 return aio_aqueue(td, uap->aiocbp, LIO_READ); 1975 } 1976 1977 /* syscall - asynchronous write to a file (REALTIME) */ 1978 int 1979 aio_write(struct thread *td, struct aio_write_args *uap) 1980 { 1981 1982 return aio_aqueue(td, uap->aiocbp, LIO_WRITE); 1983 } 1984 1985 /* syscall - XXX undocumented */ 1986 int 1987 lio_listio(struct thread *td, struct lio_listio_args *uap) 1988 { 1989 struct proc *p = td->td_proc; 1990 int nent, nentqueued; 1991 struct aiocb *iocb, * const *cbptr; 1992 struct aiocblist *cb; 1993 struct kaioinfo *ki; 1994 struct aio_liojob *lj; 1995 int error, runningcode; 1996 int nerror; 1997 int i; 1998 int s; 1999 2000 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2001 return EINVAL; 2002 2003 nent = uap->nent; 2004 if (nent > AIO_LISTIO_MAX) 2005 return EINVAL; 2006 2007 if (p->p_aioinfo == NULL) 2008 aio_init_aioinfo(p); 2009 2010 if ((nent + num_queue_count) > max_queue_count) 2011 return EAGAIN; 2012 2013 ki = p->p_aioinfo; 2014 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) 2015 return EAGAIN; 2016 2017 lj = uma_zalloc(aiolio_zone, M_WAITOK); 2018 if (!lj) 2019 return EAGAIN; 2020 2021 lj->lioj_flags = 0; 2022 lj->lioj_buffer_count = 0; 2023 lj->lioj_buffer_finished_count = 0; 2024 lj->lioj_queue_count = 0; 2025 lj->lioj_queue_finished_count = 0; 2026 lj->lioj_ki = ki; 2027 2028 /* 2029 * Setup signal. 2030 */ 2031 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2032 error = copyin(uap->sig, &lj->lioj_signal, 2033 sizeof(lj->lioj_signal)); 2034 if (error) { 2035 uma_zfree(aiolio_zone, lj); 2036 return error; 2037 } 2038 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { 2039 uma_zfree(aiolio_zone, lj); 2040 return EINVAL; 2041 } 2042 lj->lioj_flags |= LIOJ_SIGNAL; 2043 lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED; 2044 } else 2045 lj->lioj_flags &= ~LIOJ_SIGNAL; 2046 2047 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 2048 /* 2049 * Get pointers to the list of I/O requests. 2050 */ 2051 nerror = 0; 2052 nentqueued = 0; 2053 cbptr = uap->acb_list; 2054 for (i = 0; i < uap->nent; i++) { 2055 iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 2056 if (((intptr_t)iocb != -1) && ((intptr_t)iocb != NULL)) { 2057 error = _aio_aqueue(td, iocb, lj, 0); 2058 if (error == 0) 2059 nentqueued++; 2060 else 2061 nerror++; 2062 } 2063 } 2064 2065 /* 2066 * If we haven't queued any, then just return error. 2067 */ 2068 if (nentqueued == 0) 2069 return 0; 2070 2071 /* 2072 * Calculate the appropriate error return. 2073 */ 2074 runningcode = 0; 2075 if (nerror) 2076 runningcode = EIO; 2077 2078 if (uap->mode == LIO_WAIT) { 2079 int command, found, jobref; 2080 2081 for (;;) { 2082 found = 0; 2083 for (i = 0; i < uap->nent; i++) { 2084 /* 2085 * Fetch address of the control buf pointer in 2086 * user space. 2087 */ 2088 iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 2089 if (((intptr_t)iocb == -1) || ((intptr_t)iocb 2090 == 0)) 2091 continue; 2092 2093 /* 2094 * Fetch the associated command from user space. 2095 */ 2096 command = fuword(&iocb->aio_lio_opcode); 2097 if (command == LIO_NOP) { 2098 found++; 2099 continue; 2100 } 2101 2102 jobref = fuword(&iocb->_aiocb_private.kernelinfo); 2103 2104 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 2105 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 2106 == jobref) { 2107 if (cb->uaiocb.aio_lio_opcode 2108 == LIO_WRITE) { 2109 p->p_stats->p_ru.ru_oublock 2110 += 2111 cb->outputcharge; 2112 cb->outputcharge = 0; 2113 } else if (cb->uaiocb.aio_lio_opcode 2114 == LIO_READ) { 2115 p->p_stats->p_ru.ru_inblock 2116 += cb->inputcharge; 2117 cb->inputcharge = 0; 2118 } 2119 found++; 2120 break; 2121 } 2122 } 2123 2124 s = splbio(); 2125 TAILQ_FOREACH(cb, &ki->kaio_bufdone, plist) { 2126 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 2127 == jobref) { 2128 found++; 2129 break; 2130 } 2131 } 2132 splx(s); 2133 } 2134 2135 /* 2136 * If all I/Os have been disposed of, then we can 2137 * return. 2138 */ 2139 if (found == nentqueued) 2140 return runningcode; 2141 2142 ki->kaio_flags |= KAIO_WAKEUP; 2143 error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0); 2144 2145 if (error == EINTR) 2146 return EINTR; 2147 else if (error == EWOULDBLOCK) 2148 return EAGAIN; 2149 } 2150 } 2151 2152 return runningcode; 2153 } 2154 2155 /* 2156 * This is a weird hack so that we can post a signal. It is safe to do so from 2157 * a timeout routine, but *not* from an interrupt routine. 2158 */ 2159 static void 2160 process_signal(void *aioj) 2161 { 2162 struct aiocblist *aiocbe = aioj; 2163 struct aio_liojob *lj = aiocbe->lio; 2164 struct aiocb *cb = &aiocbe->uaiocb; 2165 2166 if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) && 2167 (lj->lioj_queue_count == lj->lioj_queue_finished_count)) { 2168 PROC_LOCK(lj->lioj_ki->kaio_p); 2169 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 2170 PROC_UNLOCK(lj->lioj_ki->kaio_p); 2171 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2172 } 2173 2174 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 2175 PROC_LOCK(aiocbe->userproc); 2176 psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo); 2177 PROC_UNLOCK(aiocbe->userproc); 2178 } 2179 } 2180 2181 /* 2182 * Interrupt handler for physio, performs the necessary process wakeups, and 2183 * signals. 2184 */ 2185 static void 2186 aio_physwakeup(struct buf *bp) 2187 { 2188 struct aiocblist *aiocbe; 2189 struct proc *p; 2190 struct kaioinfo *ki; 2191 struct aio_liojob *lj; 2192 2193 wakeup(bp); 2194 2195 aiocbe = (struct aiocblist *)bp->b_spc; 2196 if (aiocbe) { 2197 p = bp->b_caller1; 2198 2199 aiocbe->jobstate = JOBST_JOBBFINISHED; 2200 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 2201 aiocbe->uaiocb._aiocb_private.error = 0; 2202 aiocbe->jobflags |= AIOCBLIST_DONE; 2203 2204 if (bp->b_ioflags & BIO_ERROR) 2205 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 2206 2207 lj = aiocbe->lio; 2208 if (lj) { 2209 lj->lioj_buffer_finished_count++; 2210 2211 /* 2212 * wakeup/signal if all of the interrupt jobs are done. 2213 */ 2214 if (lj->lioj_buffer_finished_count == 2215 lj->lioj_buffer_count) { 2216 /* 2217 * Post a signal if it is called for. 2218 */ 2219 if ((lj->lioj_flags & 2220 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 2221 LIOJ_SIGNAL) { 2222 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2223 aiocbe->timeouthandle = 2224 timeout(process_signal, 2225 aiocbe, 0); 2226 } 2227 } 2228 } 2229 2230 ki = p->p_aioinfo; 2231 if (ki) { 2232 ki->kaio_buffer_finished_count++; 2233 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 2234 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 2235 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 2236 2237 KNOTE(&aiocbe->klist, 0); 2238 /* Do the wakeup. */ 2239 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 2240 ki->kaio_flags &= ~KAIO_WAKEUP; 2241 wakeup(p); 2242 } 2243 } 2244 2245 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL) 2246 aiocbe->timeouthandle = 2247 timeout(process_signal, aiocbe, 0); 2248 } 2249 } 2250 2251 /* syscall - wait for the next completion of an aio request */ 2252 int 2253 aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap) 2254 { 2255 struct proc *p = td->td_proc; 2256 struct timeval atv; 2257 struct timespec ts; 2258 struct aiocb **cbptr; 2259 struct kaioinfo *ki; 2260 struct aiocblist *cb = NULL; 2261 int error, s, timo; 2262 2263 suword(uap->aiocbp, (int)NULL); 2264 2265 timo = 0; 2266 if (uap->timeout) { 2267 /* Get timespec struct. */ 2268 error = copyin(uap->timeout, &ts, sizeof(ts)); 2269 if (error) 2270 return error; 2271 2272 if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000)) 2273 return (EINVAL); 2274 2275 TIMESPEC_TO_TIMEVAL(&atv, &ts); 2276 if (itimerfix(&atv)) 2277 return (EINVAL); 2278 timo = tvtohz(&atv); 2279 } 2280 2281 ki = p->p_aioinfo; 2282 if (ki == NULL) 2283 return EAGAIN; 2284 2285 cbptr = uap->aiocbp; 2286 2287 for (;;) { 2288 if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) { 2289 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb); 2290 td->td_retval[0] = cb->uaiocb._aiocb_private.status; 2291 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 2292 p->p_stats->p_ru.ru_oublock += 2293 cb->outputcharge; 2294 cb->outputcharge = 0; 2295 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 2296 p->p_stats->p_ru.ru_inblock += cb->inputcharge; 2297 cb->inputcharge = 0; 2298 } 2299 aio_free_entry(cb); 2300 return cb->uaiocb._aiocb_private.error; 2301 } 2302 2303 s = splbio(); 2304 if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) { 2305 splx(s); 2306 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb); 2307 td->td_retval[0] = cb->uaiocb._aiocb_private.status; 2308 aio_free_entry(cb); 2309 return cb->uaiocb._aiocb_private.error; 2310 } 2311 2312 ki->kaio_flags |= KAIO_WAKEUP; 2313 error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo); 2314 splx(s); 2315 2316 if (error == ERESTART) 2317 return EINTR; 2318 else if (error < 0) 2319 return error; 2320 else if (error == EINTR) 2321 return EINTR; 2322 else if (error == EWOULDBLOCK) 2323 return EAGAIN; 2324 } 2325 } 2326 2327 /* kqueue attach function */ 2328 static int 2329 filt_aioattach(struct knote *kn) 2330 { 2331 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id; 2332 2333 /* 2334 * The aiocbe pointer must be validated before using it, so 2335 * registration is restricted to the kernel; the user cannot 2336 * set EV_FLAG1. 2337 */ 2338 if ((kn->kn_flags & EV_FLAG1) == 0) 2339 return (EPERM); 2340 kn->kn_flags &= ~EV_FLAG1; 2341 2342 SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext); 2343 2344 return (0); 2345 } 2346 2347 /* kqueue detach function */ 2348 static void 2349 filt_aiodetach(struct knote *kn) 2350 { 2351 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id; 2352 2353 SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext); 2354 } 2355 2356 /* kqueue filter function */ 2357 /*ARGSUSED*/ 2358 static int 2359 filt_aio(struct knote *kn, long hint) 2360 { 2361 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id; 2362 2363 kn->kn_data = aiocbe->uaiocb._aiocb_private.error; 2364 if (aiocbe->jobstate != JOBST_JOBFINISHED && 2365 aiocbe->jobstate != JOBST_JOBBFINISHED) 2366 return (0); 2367 kn->kn_flags |= EV_EOF; 2368 return (1); 2369 } 2370