1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 * 16 * $FreeBSD$ 17 */ 18 19 /* 20 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/bio.h> 26 #include <sys/buf.h> 27 #include <sys/sysproto.h> 28 #include <sys/filedesc.h> 29 #include <sys/kernel.h> 30 #include <sys/kthread.h> 31 #include <sys/fcntl.h> 32 #include <sys/file.h> 33 #include <sys/lock.h> 34 #include <sys/mutex.h> 35 #include <sys/unistd.h> 36 #include <sys/proc.h> 37 #include <sys/resourcevar.h> 38 #include <sys/signalvar.h> 39 #include <sys/protosw.h> 40 #include <sys/socketvar.h> 41 #include <sys/sysctl.h> 42 #include <sys/vnode.h> 43 #include <sys/conf.h> 44 #include <sys/event.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_map.h> 50 #include <vm/vm_zone.h> 51 #include <sys/aio.h> 52 53 #include <machine/limits.h> 54 55 #include "opt_vfs_aio.h" 56 57 #ifdef VFS_AIO 58 59 static long jobrefid; 60 61 #define JOBST_NULL 0x0 62 #define JOBST_JOBQPROC 0x1 63 #define JOBST_JOBQGLOBAL 0x2 64 #define JOBST_JOBRUNNING 0x3 65 #define JOBST_JOBFINISHED 0x4 66 #define JOBST_JOBQBUF 0x5 67 #define JOBST_JOBBFINISHED 0x6 68 69 #ifndef MAX_AIO_PER_PROC 70 #define MAX_AIO_PER_PROC 32 71 #endif 72 73 #ifndef MAX_AIO_QUEUE_PER_PROC 74 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 75 #endif 76 77 #ifndef MAX_AIO_PROCS 78 #define MAX_AIO_PROCS 32 79 #endif 80 81 #ifndef MAX_AIO_QUEUE 82 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 83 #endif 84 85 #ifndef TARGET_AIO_PROCS 86 #define TARGET_AIO_PROCS 4 87 #endif 88 89 #ifndef MAX_BUF_AIO 90 #define MAX_BUF_AIO 16 91 #endif 92 93 #ifndef AIOD_TIMEOUT_DEFAULT 94 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 95 #endif 96 97 #ifndef AIOD_LIFETIME_DEFAULT 98 #define AIOD_LIFETIME_DEFAULT (30 * hz) 99 #endif 100 101 static int max_aio_procs = MAX_AIO_PROCS; 102 static int num_aio_procs = 0; 103 static int target_aio_procs = TARGET_AIO_PROCS; 104 static int max_queue_count = MAX_AIO_QUEUE; 105 static int num_queue_count = 0; 106 static int num_buf_aio = 0; 107 static int num_aio_resv_start = 0; 108 static int aiod_timeout; 109 static int aiod_lifetime; 110 111 static int max_aio_per_proc = MAX_AIO_PER_PROC; 112 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 113 static int max_buf_aio = MAX_BUF_AIO; 114 115 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "AIO mgmt"); 116 117 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, 118 CTLFLAG_RW, &max_aio_per_proc, 0, ""); 119 120 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, 121 CTLFLAG_RW, &max_aio_queue_per_proc, 0, ""); 122 123 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 124 CTLFLAG_RW, &max_aio_procs, 0, ""); 125 126 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 127 CTLFLAG_RD, &num_aio_procs, 0, ""); 128 129 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, 130 CTLFLAG_RD, &num_queue_count, 0, ""); 131 132 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, 133 CTLFLAG_RW, &max_queue_count, 0, ""); 134 135 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, 136 CTLFLAG_RW, &target_aio_procs, 0, ""); 137 138 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, 139 CTLFLAG_RW, &max_buf_aio, 0, ""); 140 141 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, 142 CTLFLAG_RD, &num_buf_aio, 0, ""); 143 144 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, 145 CTLFLAG_RW, &aiod_lifetime, 0, ""); 146 147 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, 148 CTLFLAG_RW, &aiod_timeout, 0, ""); 149 150 /* 151 * AIO process info 152 */ 153 #define AIOP_FREE 0x1 /* proc on free queue */ 154 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 155 156 struct aiothreadlist { 157 int aiothreadflags; /* AIO proc flags */ 158 TAILQ_ENTRY(aiothreadlist) list; /* List of processes */ 159 struct thread *aiothread; /* The AIO thread */ 160 TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */ 161 }; 162 163 /* 164 * data-structure for lio signal management 165 */ 166 struct aio_liojob { 167 int lioj_flags; 168 int lioj_buffer_count; 169 int lioj_buffer_finished_count; 170 int lioj_queue_count; 171 int lioj_queue_finished_count; 172 struct sigevent lioj_signal; /* signal on all I/O done */ 173 TAILQ_ENTRY (aio_liojob) lioj_list; 174 struct kaioinfo *lioj_ki; 175 }; 176 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 177 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 178 179 /* 180 * per process aio data structure 181 */ 182 struct kaioinfo { 183 int kaio_flags; /* per process kaio flags */ 184 int kaio_maxactive_count; /* maximum number of AIOs */ 185 int kaio_active_count; /* number of currently used AIOs */ 186 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 187 int kaio_queue_count; /* size of AIO queue */ 188 int kaio_ballowed_count; /* maximum number of buffers */ 189 int kaio_queue_finished_count; /* number of daemon jobs finished */ 190 int kaio_buffer_count; /* number of physio buffers */ 191 int kaio_buffer_finished_count; /* count of I/O done */ 192 struct proc *kaio_p; /* process that uses this kaio block */ 193 TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 194 TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */ 195 TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */ 196 TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 197 TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */ 198 TAILQ_HEAD (,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */ 199 }; 200 201 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 202 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */ 203 204 static TAILQ_HEAD(,aiothreadlist) aio_freeproc, aio_activeproc; 205 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 206 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 207 208 static void aio_init_aioinfo(struct proc *p); 209 static void aio_onceonly(void *); 210 static int aio_free_entry(struct aiocblist *aiocbe); 211 static void aio_process(struct aiocblist *aiocbe); 212 static int aio_newproc(void); 213 static int aio_aqueue(struct thread *td, struct aiocb *job, int type); 214 static void aio_physwakeup(struct buf *bp); 215 static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe); 216 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 217 static void aio_daemon(void *uproc); 218 static void process_signal(void *aioj); 219 220 SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL); 221 222 static vm_zone_t kaio_zone = 0, aiop_zone = 0, aiocb_zone = 0, aiol_zone = 0; 223 static vm_zone_t aiolio_zone = 0; 224 225 /* 226 * Startup initialization 227 */ 228 static void 229 aio_onceonly(void *na) 230 { 231 TAILQ_INIT(&aio_freeproc); 232 TAILQ_INIT(&aio_activeproc); 233 TAILQ_INIT(&aio_jobs); 234 TAILQ_INIT(&aio_bufjobs); 235 kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1); 236 aiop_zone = zinit("AIOP", sizeof (struct aiothreadlist), 0, 0, 1); 237 aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1); 238 aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1); 239 aiolio_zone = zinit("AIOLIO", AIO_LISTIO_MAX * sizeof (struct 240 aio_liojob), 0, 0, 1); 241 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 242 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 243 jobrefid = 1; 244 } 245 246 /* 247 * Init the per-process aioinfo structure. The aioinfo limits are set 248 * per-process for user limit (resource) management. 249 */ 250 static void 251 aio_init_aioinfo(struct proc *p) 252 { 253 struct kaioinfo *ki; 254 if (p->p_aioinfo == NULL) { 255 ki = zalloc(kaio_zone); 256 p->p_aioinfo = ki; 257 ki->kaio_flags = 0; 258 ki->kaio_maxactive_count = max_aio_per_proc; 259 ki->kaio_active_count = 0; 260 ki->kaio_qallowed_count = max_aio_queue_per_proc; 261 ki->kaio_queue_count = 0; 262 ki->kaio_ballowed_count = max_buf_aio; 263 ki->kaio_buffer_count = 0; 264 ki->kaio_buffer_finished_count = 0; 265 ki->kaio_p = p; 266 TAILQ_INIT(&ki->kaio_jobdone); 267 TAILQ_INIT(&ki->kaio_jobqueue); 268 TAILQ_INIT(&ki->kaio_bufdone); 269 TAILQ_INIT(&ki->kaio_bufqueue); 270 TAILQ_INIT(&ki->kaio_liojoblist); 271 TAILQ_INIT(&ki->kaio_sockqueue); 272 } 273 274 while (num_aio_procs < target_aio_procs) 275 aio_newproc(); 276 } 277 278 /* 279 * Free a job entry. Wait for completion if it is currently active, but don't 280 * delay forever. If we delay, we return a flag that says that we have to 281 * restart the queue scan. 282 */ 283 static int 284 aio_free_entry(struct aiocblist *aiocbe) 285 { 286 struct kaioinfo *ki; 287 struct aiothreadlist *aiop; 288 struct aio_liojob *lj; 289 struct proc *p; 290 int error; 291 int s; 292 293 if (aiocbe->jobstate == JOBST_NULL) 294 panic("aio_free_entry: freeing already free job"); 295 296 p = aiocbe->userproc; 297 ki = p->p_aioinfo; 298 lj = aiocbe->lio; 299 if (ki == NULL) 300 panic("aio_free_entry: missing p->p_aioinfo"); 301 302 while (aiocbe->jobstate == JOBST_JOBRUNNING) { 303 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) 304 return 0; 305 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 306 tsleep(aiocbe, PRIBIO, "jobwai", 0); 307 } 308 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 309 310 if (aiocbe->bp == NULL) { 311 if (ki->kaio_queue_count <= 0) 312 panic("aio_free_entry: process queue size <= 0"); 313 if (num_queue_count <= 0) 314 panic("aio_free_entry: system wide queue size <= 0"); 315 316 if (lj) { 317 lj->lioj_queue_count--; 318 if (aiocbe->jobflags & AIOCBLIST_DONE) 319 lj->lioj_queue_finished_count--; 320 } 321 ki->kaio_queue_count--; 322 if (aiocbe->jobflags & AIOCBLIST_DONE) 323 ki->kaio_queue_finished_count--; 324 num_queue_count--; 325 } else { 326 if (lj) { 327 lj->lioj_buffer_count--; 328 if (aiocbe->jobflags & AIOCBLIST_DONE) 329 lj->lioj_buffer_finished_count--; 330 } 331 if (aiocbe->jobflags & AIOCBLIST_DONE) 332 ki->kaio_buffer_finished_count--; 333 ki->kaio_buffer_count--; 334 num_buf_aio--; 335 } 336 337 /* aiocbe is going away, we need to destroy any knotes */ 338 knote_remove(&p->p_thread, &aiocbe->klist); /* XXXKSE */ 339 /* XXXKSE Note the thread here is used to eventually find the 340 * owning process again, but it is also used to do a fo_close 341 * and that requires the thread. (but does it require the 342 * OWNING thread? (or maby the running thread?) 343 * There is a semantic problem here... 344 */ 345 346 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN) 347 && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) { 348 ki->kaio_flags &= ~KAIO_WAKEUP; 349 wakeup(p); 350 } 351 352 if (aiocbe->jobstate == JOBST_JOBQBUF) { 353 if ((error = aio_fphysio(p, aiocbe)) != 0) 354 return error; 355 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 356 panic("aio_free_entry: invalid physio finish-up state"); 357 s = splbio(); 358 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 359 splx(s); 360 } else if (aiocbe->jobstate == JOBST_JOBQPROC) { 361 aiop = aiocbe->jobaiothread; 362 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 363 } else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) { 364 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 365 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 366 } else if (aiocbe->jobstate == JOBST_JOBFINISHED) 367 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 368 else if (aiocbe->jobstate == JOBST_JOBBFINISHED) { 369 s = splbio(); 370 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 371 splx(s); 372 if (aiocbe->bp) { 373 vunmapbuf(aiocbe->bp); 374 relpbuf(aiocbe->bp, NULL); 375 aiocbe->bp = NULL; 376 } 377 } 378 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 379 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 380 zfree(aiolio_zone, lj); 381 } 382 aiocbe->jobstate = JOBST_NULL; 383 untimeout(process_signal, aiocbe, aiocbe->timeouthandle); 384 zfree(aiocb_zone, aiocbe); 385 return 0; 386 } 387 #endif /* VFS_AIO */ 388 389 /* 390 * Rundown the jobs for a given process. 391 */ 392 void 393 aio_proc_rundown(struct proc *p) 394 { 395 #ifndef VFS_AIO 396 return; 397 #else 398 int s; 399 struct kaioinfo *ki; 400 struct aio_liojob *lj, *ljn; 401 struct aiocblist *aiocbe, *aiocbn; 402 struct file *fp; 403 struct filedesc *fdp; 404 struct socket *so; 405 406 ki = p->p_aioinfo; 407 if (ki == NULL) 408 return; 409 410 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 411 while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count > 412 ki->kaio_buffer_finished_count)) { 413 ki->kaio_flags |= KAIO_RUNDOWN; 414 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout)) 415 break; 416 } 417 418 /* 419 * Move any aio ops that are waiting on socket I/O to the normal job 420 * queues so they are cleaned up with any others. 421 */ 422 fdp = p->p_fd; 423 424 s = splnet(); 425 for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe = 426 aiocbn) { 427 aiocbn = TAILQ_NEXT(aiocbe, plist); 428 fp = fdp->fd_ofiles[aiocbe->uaiocb.aio_fildes]; 429 430 /* 431 * Under some circumstances, the aio_fildes and the file 432 * structure don't match. This would leave aiocbe's in the 433 * TAILQ associated with the socket and cause a panic later. 434 * 435 * Detect and fix. 436 */ 437 if ((fp == NULL) || (fp != aiocbe->fd_file)) 438 fp = aiocbe->fd_file; 439 if (fp) { 440 so = (struct socket *)fp->f_data; 441 TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list); 442 if (TAILQ_EMPTY(&so->so_aiojobq)) { 443 so->so_snd.sb_flags &= ~SB_AIO; 444 so->so_rcv.sb_flags &= ~SB_AIO; 445 } 446 } 447 TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist); 448 TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list); 449 TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist); 450 } 451 splx(s); 452 453 restart1: 454 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) { 455 aiocbn = TAILQ_NEXT(aiocbe, plist); 456 if (aio_free_entry(aiocbe)) 457 goto restart1; 458 } 459 460 restart2: 461 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe = 462 aiocbn) { 463 aiocbn = TAILQ_NEXT(aiocbe, plist); 464 if (aio_free_entry(aiocbe)) 465 goto restart2; 466 } 467 468 /* 469 * Note the use of lots of splbio here, trying to avoid splbio for long chains 470 * of I/O. Probably unnecessary. 471 */ 472 restart3: 473 s = splbio(); 474 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 475 ki->kaio_flags |= KAIO_WAKEUP; 476 tsleep(p, PRIBIO, "aioprn", 0); 477 splx(s); 478 goto restart3; 479 } 480 splx(s); 481 482 restart4: 483 s = splbio(); 484 for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) { 485 aiocbn = TAILQ_NEXT(aiocbe, plist); 486 if (aio_free_entry(aiocbe)) { 487 splx(s); 488 goto restart4; 489 } 490 } 491 splx(s); 492 493 /* 494 * If we've slept, jobs might have moved from one queue to another. 495 * Retry rundown if we didn't manage to empty the queues. 496 */ 497 if (TAILQ_FIRST(&ki->kaio_jobdone) != NULL || 498 TAILQ_FIRST(&ki->kaio_jobqueue) != NULL || 499 TAILQ_FIRST(&ki->kaio_bufqueue) != NULL || 500 TAILQ_FIRST(&ki->kaio_bufdone) != NULL) 501 goto restart1; 502 503 for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) { 504 ljn = TAILQ_NEXT(lj, lioj_list); 505 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 506 0)) { 507 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 508 zfree(aiolio_zone, lj); 509 } else { 510 #ifdef DIAGNOSTIC 511 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, " 512 "QF:%d\n", lj->lioj_buffer_count, 513 lj->lioj_buffer_finished_count, 514 lj->lioj_queue_count, 515 lj->lioj_queue_finished_count); 516 #endif 517 } 518 } 519 520 zfree(kaio_zone, ki); 521 p->p_aioinfo = NULL; 522 #endif /* VFS_AIO */ 523 } 524 525 #ifdef VFS_AIO 526 /* 527 * Select a job to run (called by an AIO daemon). 528 */ 529 static struct aiocblist * 530 aio_selectjob(struct aiothreadlist *aiop) 531 { 532 int s; 533 struct aiocblist *aiocbe; 534 struct kaioinfo *ki; 535 struct proc *userp; 536 537 aiocbe = TAILQ_FIRST(&aiop->jobtorun); 538 if (aiocbe) { 539 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 540 return aiocbe; 541 } 542 543 s = splnet(); 544 for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe = 545 TAILQ_NEXT(aiocbe, list)) { 546 userp = aiocbe->userproc; 547 ki = userp->p_aioinfo; 548 549 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 550 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 551 splx(s); 552 return aiocbe; 553 } 554 } 555 splx(s); 556 557 return NULL; 558 } 559 560 /* 561 * The AIO processing activity. This is the code that does the I/O request for 562 * the non-physio version of the operations. The normal vn operations are used, 563 * and this code should work in all instances for every type of file, including 564 * pipes, sockets, fifos, and regular files. 565 */ 566 static void 567 aio_process(struct aiocblist *aiocbe) 568 { 569 struct filedesc *fdp; 570 struct thread *td; 571 struct proc *userp; 572 struct proc *mycp; 573 struct aiocb *cb; 574 struct file *fp; 575 struct uio auio; 576 struct iovec aiov; 577 unsigned int fd; 578 int cnt; 579 int error; 580 off_t offset; 581 int oublock_st, oublock_end; 582 int inblock_st, inblock_end; 583 584 userp = aiocbe->userproc; 585 td = curthread; 586 mycp = td->td_proc; 587 cb = &aiocbe->uaiocb; 588 589 fdp = mycp->p_fd; 590 fd = cb->aio_fildes; 591 fp = fdp->fd_ofiles[fd]; 592 593 if ((fp == NULL) || (fp != aiocbe->fd_file)) { 594 cb->_aiocb_private.error = EBADF; 595 cb->_aiocb_private.status = -1; 596 return; 597 } 598 599 aiov.iov_base = cb->aio_buf; 600 aiov.iov_len = cb->aio_nbytes; 601 602 auio.uio_iov = &aiov; 603 auio.uio_iovcnt = 1; 604 auio.uio_offset = offset = cb->aio_offset; 605 auio.uio_resid = cb->aio_nbytes; 606 cnt = cb->aio_nbytes; 607 auio.uio_segflg = UIO_USERSPACE; 608 auio.uio_td = td; 609 610 inblock_st = mycp->p_stats->p_ru.ru_inblock; 611 oublock_st = mycp->p_stats->p_ru.ru_oublock; 612 /* 613 * Temporarily bump the ref count while reading to avoid the 614 * descriptor being ripped out from under us. 615 */ 616 fhold(fp); 617 if (cb->aio_lio_opcode == LIO_READ) { 618 auio.uio_rw = UIO_READ; 619 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td); 620 } else { 621 auio.uio_rw = UIO_WRITE; 622 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td); 623 } 624 fdrop(fp, td); 625 inblock_end = mycp->p_stats->p_ru.ru_inblock; 626 oublock_end = mycp->p_stats->p_ru.ru_oublock; 627 628 aiocbe->inputcharge = inblock_end - inblock_st; 629 aiocbe->outputcharge = oublock_end - oublock_st; 630 631 if ((error) && (auio.uio_resid != cnt)) { 632 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 633 error = 0; 634 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) { 635 PROC_LOCK(userp); 636 psignal(userp, SIGPIPE); 637 PROC_UNLOCK(userp); 638 } 639 } 640 641 cnt -= auio.uio_resid; 642 cb->_aiocb_private.error = error; 643 cb->_aiocb_private.status = cnt; 644 } 645 646 /* 647 * The AIO daemon, most of the actual work is done in aio_process, 648 * but the setup (and address space mgmt) is done in this routine. 649 */ 650 static void 651 aio_daemon(void *uproc) 652 { 653 int s; 654 struct aio_liojob *lj; 655 struct aiocb *cb; 656 struct aiocblist *aiocbe; 657 struct aiothreadlist *aiop; 658 struct kaioinfo *ki; 659 struct proc *curcp, *mycp, *userp; 660 struct vmspace *myvm, *tmpvm; 661 struct thread *td = curthread; 662 663 mtx_lock(&Giant); 664 /* 665 * Local copies of curproc (cp) and vmspace (myvm) 666 */ 667 mycp = td->td_proc; 668 myvm = mycp->p_vmspace; 669 670 if (mycp->p_textvp) { 671 vrele(mycp->p_textvp); 672 mycp->p_textvp = NULL; 673 } 674 675 /* 676 * Allocate and ready the aio control info. There is one aiop structure 677 * per daemon. 678 */ 679 aiop = zalloc(aiop_zone); 680 aiop->aiothread = td; 681 aiop->aiothreadflags |= AIOP_FREE; 682 TAILQ_INIT(&aiop->jobtorun); 683 684 s = splnet(); 685 686 /* 687 * Place thread (lightweight process) onto the AIO free thread list. 688 */ 689 if (TAILQ_EMPTY(&aio_freeproc)) 690 wakeup(&aio_freeproc); 691 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 692 693 splx(s); 694 695 /* 696 * Get rid of our current filedescriptors. AIOD's don't need any 697 * filedescriptors, except as temporarily inherited from the client. 698 */ 699 fdfree(td); 700 mycp->p_fd = NULL; 701 702 /* The daemon resides in its own pgrp. */ 703 enterpgrp(mycp, mycp->p_pid, 1); 704 705 /* Mark special process type. */ 706 mycp->p_flag |= P_SYSTEM; 707 708 /* 709 * Wakeup parent process. (Parent sleeps to keep from blasting away 710 * and creating too many daemons.) 711 */ 712 wakeup(mycp); 713 714 for (;;) { 715 /* 716 * curcp is the current daemon process context. 717 * userp is the current user process context. 718 */ 719 curcp = mycp; 720 721 /* 722 * Take daemon off of free queue 723 */ 724 if (aiop->aiothreadflags & AIOP_FREE) { 725 s = splnet(); 726 TAILQ_REMOVE(&aio_freeproc, aiop, list); 727 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 728 aiop->aiothreadflags &= ~AIOP_FREE; 729 splx(s); 730 } 731 aiop->aiothreadflags &= ~AIOP_SCHED; 732 733 /* 734 * Check for jobs. 735 */ 736 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 737 cb = &aiocbe->uaiocb; 738 userp = aiocbe->userproc; 739 740 aiocbe->jobstate = JOBST_JOBRUNNING; 741 742 /* 743 * Connect to process address space for user program. 744 */ 745 if (userp != curcp) { 746 /* 747 * Save the current address space that we are 748 * connected to. 749 */ 750 tmpvm = mycp->p_vmspace; 751 752 /* 753 * Point to the new user address space, and 754 * refer to it. 755 */ 756 mycp->p_vmspace = userp->p_vmspace; 757 mycp->p_vmspace->vm_refcnt++; 758 759 /* Activate the new mapping. */ 760 pmap_activate(&mycp->p_thread); 761 762 /* 763 * If the old address space wasn't the daemons 764 * own address space, then we need to remove the 765 * daemon's reference from the other process 766 * that it was acting on behalf of. 767 */ 768 if (tmpvm != myvm) { 769 vmspace_free(tmpvm); 770 } 771 772 /* 773 * Disassociate from previous clients file 774 * descriptors, and associate to the new clients 775 * descriptors. Note that the daemon doesn't 776 * need to worry about its orginal descriptors, 777 * because they were originally freed. 778 */ 779 if (mycp->p_fd) 780 fdfree(td); 781 mycp->p_fd = fdshare(userp); 782 curcp = userp; 783 } 784 785 ki = userp->p_aioinfo; 786 lj = aiocbe->lio; 787 788 /* Account for currently active jobs. */ 789 ki->kaio_active_count++; 790 791 /* Do the I/O function. */ 792 aiocbe->jobaiothread = aiop; 793 aio_process(aiocbe); 794 795 /* Decrement the active job count. */ 796 ki->kaio_active_count--; 797 798 /* 799 * Increment the completion count for wakeup/signal 800 * comparisons. 801 */ 802 aiocbe->jobflags |= AIOCBLIST_DONE; 803 ki->kaio_queue_finished_count++; 804 if (lj) 805 lj->lioj_queue_finished_count++; 806 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags 807 & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) { 808 ki->kaio_flags &= ~KAIO_WAKEUP; 809 wakeup(userp); 810 } 811 812 s = splbio(); 813 if (lj && (lj->lioj_flags & 814 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) { 815 if ((lj->lioj_queue_finished_count == 816 lj->lioj_queue_count) && 817 (lj->lioj_buffer_finished_count == 818 lj->lioj_buffer_count)) { 819 PROC_LOCK(userp); 820 psignal(userp, 821 lj->lioj_signal.sigev_signo); 822 PROC_UNLOCK(userp); 823 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 824 } 825 } 826 splx(s); 827 828 aiocbe->jobstate = JOBST_JOBFINISHED; 829 830 /* 831 * If the I/O request should be automatically rundown, 832 * do the needed cleanup. Otherwise, place the queue 833 * entry for the just finished I/O request into the done 834 * queue for the associated client. 835 */ 836 s = splnet(); 837 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) { 838 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 839 zfree(aiocb_zone, aiocbe); 840 } else { 841 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 842 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, 843 plist); 844 } 845 splx(s); 846 KNOTE(&aiocbe->klist, 0); 847 848 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 849 wakeup(aiocbe); 850 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 851 } 852 853 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 854 PROC_LOCK(userp); 855 psignal(userp, cb->aio_sigevent.sigev_signo); 856 PROC_UNLOCK(userp); 857 } 858 } 859 860 /* 861 * Disconnect from user address space. 862 */ 863 if (curcp != mycp) { 864 /* Get the user address space to disconnect from. */ 865 tmpvm = mycp->p_vmspace; 866 867 /* Get original address space for daemon. */ 868 mycp->p_vmspace = myvm; 869 870 /* Activate the daemon's address space. */ 871 pmap_activate(&mycp->p_thread); 872 #ifdef DIAGNOSTIC 873 if (tmpvm == myvm) { 874 printf("AIOD: vmspace problem -- %d\n", 875 mycp->p_pid); 876 } 877 #endif 878 /* Remove our vmspace reference. */ 879 vmspace_free(tmpvm); 880 881 /* 882 * Disassociate from the user process's file 883 * descriptors. 884 */ 885 if (mycp->p_fd) 886 fdfree(td); 887 mycp->p_fd = NULL; 888 curcp = mycp; 889 } 890 891 /* 892 * If we are the first to be put onto the free queue, wakeup 893 * anyone waiting for a daemon. 894 */ 895 s = splnet(); 896 TAILQ_REMOVE(&aio_activeproc, aiop, list); 897 if (TAILQ_EMPTY(&aio_freeproc)) 898 wakeup(&aio_freeproc); 899 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 900 aiop->aiothreadflags |= AIOP_FREE; 901 splx(s); 902 903 /* 904 * If daemon is inactive for a long time, allow it to exit, 905 * thereby freeing resources. 906 */ 907 if (((aiop->aiothreadflags & AIOP_SCHED) == 0) && tsleep(mycp, 908 PRIBIO, "aiordy", aiod_lifetime)) { 909 s = splnet(); 910 if ((TAILQ_FIRST(&aio_jobs) == NULL) && 911 (TAILQ_FIRST(&aiop->jobtorun) == NULL)) { 912 if ((aiop->aiothreadflags & AIOP_FREE) && 913 (num_aio_procs > target_aio_procs)) { 914 TAILQ_REMOVE(&aio_freeproc, aiop, list); 915 splx(s); 916 zfree(aiop_zone, aiop); 917 num_aio_procs--; 918 #ifdef DIAGNOSTIC 919 if (mycp->p_vmspace->vm_refcnt <= 1) { 920 printf("AIOD: bad vm refcnt for" 921 " exiting daemon: %d\n", 922 mycp->p_vmspace->vm_refcnt); 923 } 924 #endif 925 kthread_exit(0); 926 } 927 } 928 splx(s); 929 } 930 } 931 } 932 933 /* 934 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 935 * AIO daemon modifies its environment itself. 936 */ 937 static int 938 aio_newproc() 939 { 940 int error; 941 struct proc *p; 942 943 error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, "aiod%d", 944 num_aio_procs); 945 if (error) 946 return error; 947 948 /* 949 * Wait until daemon is started, but continue on just in case to 950 * handle error conditions. 951 */ 952 error = tsleep(p, PZERO, "aiosta", aiod_timeout); 953 954 num_aio_procs++; 955 956 return error; 957 } 958 959 /* 960 * Try the high-performance, low-overhead physio method for eligible 961 * VCHR devices. This method doesn't use an aio helper thread, and 962 * thus has very low overhead. 963 * 964 * Assumes that the caller, _aio_aqueue(), has incremented the file 965 * structure's reference count, preventing its deallocation for the 966 * duration of this call. 967 */ 968 static int 969 aio_qphysio(struct proc *p, struct aiocblist *aiocbe) 970 { 971 int error; 972 struct aiocb *cb; 973 struct file *fp; 974 struct buf *bp; 975 struct vnode *vp; 976 struct kaioinfo *ki; 977 struct filedesc *fdp; 978 struct aio_liojob *lj; 979 int fd; 980 int s; 981 int notify; 982 983 cb = &aiocbe->uaiocb; 984 fdp = p->p_fd; 985 fd = cb->aio_fildes; 986 fp = fdp->fd_ofiles[fd]; 987 988 if (fp->f_type != DTYPE_VNODE) 989 return (-1); 990 991 vp = (struct vnode *)fp->f_data; 992 993 /* 994 * If its not a disk, we don't want to return a positive error. 995 * It causes the aio code to not fall through to try the thread 996 * way when you're talking to a regular file. 997 */ 998 if (!vn_isdisk(vp, &error)) { 999 if (error == ENOTBLK) 1000 return (-1); 1001 else 1002 return (error); 1003 } 1004 1005 if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys) 1006 return (-1); 1007 1008 if (cb->aio_nbytes > 1009 MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK)) 1010 return (-1); 1011 1012 ki = p->p_aioinfo; 1013 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) 1014 return (-1); 1015 1016 ki->kaio_buffer_count++; 1017 1018 lj = aiocbe->lio; 1019 if (lj) 1020 lj->lioj_buffer_count++; 1021 1022 /* Create and build a buffer header for a transfer. */ 1023 bp = (struct buf *)getpbuf(NULL); 1024 BUF_KERNPROC(bp); 1025 1026 /* 1027 * Get a copy of the kva from the physical buffer. 1028 */ 1029 bp->b_caller1 = p; 1030 bp->b_dev = vp->v_rdev; 1031 error = bp->b_error = 0; 1032 1033 bp->b_bcount = cb->aio_nbytes; 1034 bp->b_bufsize = cb->aio_nbytes; 1035 bp->b_flags = B_PHYS; 1036 bp->b_iodone = aio_physwakeup; 1037 bp->b_saveaddr = bp->b_data; 1038 bp->b_data = cb->aio_buf; 1039 bp->b_blkno = btodb(cb->aio_offset); 1040 1041 if (cb->aio_lio_opcode == LIO_WRITE) { 1042 bp->b_iocmd = BIO_WRITE; 1043 if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_READ)) { 1044 error = EFAULT; 1045 goto doerror; 1046 } 1047 } else { 1048 bp->b_iocmd = BIO_READ; 1049 if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_WRITE)) { 1050 error = EFAULT; 1051 goto doerror; 1052 } 1053 } 1054 1055 /* Bring buffer into kernel space. */ 1056 vmapbuf(bp); 1057 1058 s = splbio(); 1059 aiocbe->bp = bp; 1060 bp->b_spc = (void *)aiocbe; 1061 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 1062 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 1063 aiocbe->jobstate = JOBST_JOBQBUF; 1064 cb->_aiocb_private.status = cb->aio_nbytes; 1065 num_buf_aio++; 1066 bp->b_error = 0; 1067 1068 splx(s); 1069 1070 /* Perform transfer. */ 1071 DEV_STRATEGY(bp, 0); 1072 1073 notify = 0; 1074 s = splbio(); 1075 1076 /* 1077 * If we had an error invoking the request, or an error in processing 1078 * the request before we have returned, we process it as an error in 1079 * transfer. Note that such an I/O error is not indicated immediately, 1080 * but is returned using the aio_error mechanism. In this case, 1081 * aio_suspend will return immediately. 1082 */ 1083 if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) { 1084 struct aiocb *job = aiocbe->uuaiocb; 1085 1086 aiocbe->uaiocb._aiocb_private.status = 0; 1087 suword(&job->_aiocb_private.status, 0); 1088 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1089 suword(&job->_aiocb_private.error, bp->b_error); 1090 1091 ki->kaio_buffer_finished_count++; 1092 1093 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1094 aiocbe->jobstate = JOBST_JOBBFINISHED; 1095 aiocbe->jobflags |= AIOCBLIST_DONE; 1096 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1097 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1098 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1099 notify = 1; 1100 } 1101 } 1102 splx(s); 1103 if (notify) 1104 KNOTE(&aiocbe->klist, 0); 1105 return 0; 1106 1107 doerror: 1108 ki->kaio_buffer_count--; 1109 if (lj) 1110 lj->lioj_buffer_count--; 1111 aiocbe->bp = NULL; 1112 relpbuf(bp, NULL); 1113 return error; 1114 } 1115 1116 /* 1117 * This waits/tests physio completion. 1118 */ 1119 static int 1120 aio_fphysio(struct proc *p, struct aiocblist *iocb) 1121 { 1122 int s; 1123 struct buf *bp; 1124 int error; 1125 1126 bp = iocb->bp; 1127 1128 s = splbio(); 1129 while ((bp->b_flags & B_DONE) == 0) { 1130 if (tsleep(bp, PRIBIO, "physstr", aiod_timeout)) { 1131 if ((bp->b_flags & B_DONE) == 0) { 1132 splx(s); 1133 return EINPROGRESS; 1134 } else 1135 break; 1136 } 1137 } 1138 splx(s); 1139 1140 /* Release mapping into kernel space. */ 1141 vunmapbuf(bp); 1142 iocb->bp = 0; 1143 1144 error = 0; 1145 1146 /* Check for an error. */ 1147 if (bp->b_ioflags & BIO_ERROR) 1148 error = bp->b_error; 1149 1150 relpbuf(bp, NULL); 1151 return (error); 1152 } 1153 #endif /* VFS_AIO */ 1154 1155 /* 1156 * Wake up aio requests that may be serviceable now. 1157 */ 1158 void 1159 aio_swake(struct socket *so, struct sockbuf *sb) 1160 { 1161 #ifndef VFS_AIO 1162 return; 1163 #else 1164 struct aiocblist *cb,*cbn; 1165 struct proc *p; 1166 struct kaioinfo *ki = NULL; 1167 int opcode, wakecount = 0; 1168 struct aiothreadlist *aiop; 1169 1170 if (sb == &so->so_snd) { 1171 opcode = LIO_WRITE; 1172 so->so_snd.sb_flags &= ~SB_AIO; 1173 } else { 1174 opcode = LIO_READ; 1175 so->so_rcv.sb_flags &= ~SB_AIO; 1176 } 1177 1178 for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) { 1179 cbn = TAILQ_NEXT(cb, list); 1180 if (opcode == cb->uaiocb.aio_lio_opcode) { 1181 p = cb->userproc; 1182 ki = p->p_aioinfo; 1183 TAILQ_REMOVE(&so->so_aiojobq, cb, list); 1184 TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist); 1185 TAILQ_INSERT_TAIL(&aio_jobs, cb, list); 1186 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist); 1187 wakecount++; 1188 if (cb->jobstate != JOBST_JOBQGLOBAL) 1189 panic("invalid queue value"); 1190 } 1191 } 1192 1193 while (wakecount--) { 1194 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) { 1195 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1196 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1197 aiop->aiothreadflags &= ~AIOP_FREE; 1198 wakeup(aiop->aiothread); 1199 } 1200 } 1201 #endif /* VFS_AIO */ 1202 } 1203 1204 #ifdef VFS_AIO 1205 /* 1206 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1207 * technique is done in this code. 1208 */ 1209 static int 1210 _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int type) 1211 { 1212 struct proc *p = td->td_proc; 1213 struct filedesc *fdp; 1214 struct file *fp; 1215 unsigned int fd; 1216 struct socket *so; 1217 int s; 1218 int error; 1219 int opcode; 1220 struct aiocblist *aiocbe; 1221 struct aiothreadlist *aiop; 1222 struct kaioinfo *ki; 1223 struct kevent kev; 1224 struct kqueue *kq; 1225 struct file *kq_fp; 1226 1227 aiocbe = zalloc(aiocb_zone); 1228 aiocbe->inputcharge = 0; 1229 aiocbe->outputcharge = 0; 1230 callout_handle_init(&aiocbe->timeouthandle); 1231 SLIST_INIT(&aiocbe->klist); 1232 1233 suword(&job->_aiocb_private.status, -1); 1234 suword(&job->_aiocb_private.error, 0); 1235 suword(&job->_aiocb_private.kernelinfo, -1); 1236 1237 error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb)); 1238 if (error) { 1239 suword(&job->_aiocb_private.error, error); 1240 zfree(aiocb_zone, aiocbe); 1241 return error; 1242 } 1243 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL && 1244 !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) { 1245 zfree(aiocb_zone, aiocbe); 1246 return EINVAL; 1247 } 1248 1249 /* Save userspace address of the job info. */ 1250 aiocbe->uuaiocb = job; 1251 1252 /* Get the opcode. */ 1253 if (type != LIO_NOP) 1254 aiocbe->uaiocb.aio_lio_opcode = type; 1255 opcode = aiocbe->uaiocb.aio_lio_opcode; 1256 1257 /* Get the fd info for process. */ 1258 fdp = p->p_fd; 1259 1260 /* 1261 * Range check file descriptor. 1262 */ 1263 fd = aiocbe->uaiocb.aio_fildes; 1264 if (fd >= fdp->fd_nfiles) { 1265 zfree(aiocb_zone, aiocbe); 1266 if (type == 0) 1267 suword(&job->_aiocb_private.error, EBADF); 1268 return EBADF; 1269 } 1270 1271 fp = aiocbe->fd_file = fdp->fd_ofiles[fd]; 1272 if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 1273 0))) { 1274 zfree(aiocb_zone, aiocbe); 1275 if (type == 0) 1276 suword(&job->_aiocb_private.error, EBADF); 1277 return EBADF; 1278 } 1279 1280 if (aiocbe->uaiocb.aio_offset == -1LL) { 1281 zfree(aiocb_zone, aiocbe); 1282 if (type == 0) 1283 suword(&job->_aiocb_private.error, EINVAL); 1284 return EINVAL; 1285 } 1286 1287 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1288 if (error) { 1289 zfree(aiocb_zone, aiocbe); 1290 if (type == 0) 1291 suword(&job->_aiocb_private.error, EINVAL); 1292 return error; 1293 } 1294 1295 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1296 if (jobrefid == LONG_MAX) 1297 jobrefid = 1; 1298 else 1299 jobrefid++; 1300 1301 if (opcode == LIO_NOP) { 1302 zfree(aiocb_zone, aiocbe); 1303 if (type == 0) { 1304 suword(&job->_aiocb_private.error, 0); 1305 suword(&job->_aiocb_private.status, 0); 1306 suword(&job->_aiocb_private.kernelinfo, 0); 1307 } 1308 return 0; 1309 } 1310 1311 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1312 zfree(aiocb_zone, aiocbe); 1313 if (type == 0) { 1314 suword(&job->_aiocb_private.status, 0); 1315 suword(&job->_aiocb_private.error, EINVAL); 1316 } 1317 return EINVAL; 1318 } 1319 1320 fhold(fp); 1321 1322 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) { 1323 kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue; 1324 kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sigval_ptr; 1325 } 1326 else { 1327 /* 1328 * This method for requesting kevent-based notification won't 1329 * work on the alpha, since we're passing in a pointer 1330 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT- 1331 * based method instead. 1332 */ 1333 struct kevent *kevp; 1334 1335 kevp = (struct kevent *)job->aio_lio_opcode; 1336 if (kevp == NULL) 1337 goto no_kqueue; 1338 1339 error = copyin(kevp, &kev, sizeof(kev)); 1340 if (error) 1341 goto aqueue_fail; 1342 } 1343 if ((u_int)kev.ident >= fdp->fd_nfiles || 1344 (kq_fp = fdp->fd_ofiles[kev.ident]) == NULL || 1345 (kq_fp->f_type != DTYPE_KQUEUE)) { 1346 error = EBADF; 1347 goto aqueue_fail; 1348 } 1349 kq = (struct kqueue *)kq_fp->f_data; 1350 kev.ident = (uintptr_t)aiocbe; 1351 kev.filter = EVFILT_AIO; 1352 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 1353 error = kqueue_register(kq, &kev, td); 1354 aqueue_fail: 1355 if (error) { 1356 zfree(aiocb_zone, aiocbe); 1357 if (type == 0) 1358 suword(&job->_aiocb_private.error, error); 1359 goto done; 1360 } 1361 no_kqueue: 1362 1363 suword(&job->_aiocb_private.error, EINPROGRESS); 1364 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1365 aiocbe->userproc = p; 1366 aiocbe->jobflags = 0; 1367 aiocbe->lio = lj; 1368 ki = p->p_aioinfo; 1369 1370 if (fp->f_type == DTYPE_SOCKET) { 1371 /* 1372 * Alternate queueing for socket ops: Reach down into the 1373 * descriptor to get the socket data. Then check to see if the 1374 * socket is ready to be read or written (based on the requested 1375 * operation). 1376 * 1377 * If it is not ready for io, then queue the aiocbe on the 1378 * socket, and set the flags so we get a call when sbnotify() 1379 * happens. 1380 */ 1381 so = (struct socket *)fp->f_data; 1382 s = splnet(); 1383 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode == 1384 LIO_WRITE) && (!sowriteable(so)))) { 1385 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); 1386 TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist); 1387 if (opcode == LIO_READ) 1388 so->so_rcv.sb_flags |= SB_AIO; 1389 else 1390 so->so_snd.sb_flags |= SB_AIO; 1391 aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */ 1392 ki->kaio_queue_count++; 1393 num_queue_count++; 1394 splx(s); 1395 error = 0; 1396 goto done; 1397 } 1398 splx(s); 1399 } 1400 1401 if ((error = aio_qphysio(p, aiocbe)) == 0) 1402 goto done; 1403 if (error > 0) { 1404 suword(&job->_aiocb_private.status, 0); 1405 aiocbe->uaiocb._aiocb_private.error = error; 1406 suword(&job->_aiocb_private.error, error); 1407 goto done; 1408 } 1409 1410 /* No buffer for daemon I/O. */ 1411 aiocbe->bp = NULL; 1412 1413 ki->kaio_queue_count++; 1414 if (lj) 1415 lj->lioj_queue_count++; 1416 s = splnet(); 1417 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1418 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1419 splx(s); 1420 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1421 1422 num_queue_count++; 1423 error = 0; 1424 1425 /* 1426 * If we don't have a free AIO process, and we are below our quota, then 1427 * start one. Otherwise, depend on the subsequent I/O completions to 1428 * pick-up this job. If we don't sucessfully create the new process 1429 * (thread) due to resource issues, we return an error for now (EAGAIN), 1430 * which is likely not the correct thing to do. 1431 */ 1432 retryproc: 1433 s = splnet(); 1434 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1435 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1436 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1437 aiop->aiothreadflags &= ~AIOP_FREE; 1438 wakeup(aiop->aiothread); 1439 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1440 ((ki->kaio_active_count + num_aio_resv_start) < 1441 ki->kaio_maxactive_count)) { 1442 num_aio_resv_start++; 1443 if ((error = aio_newproc()) == 0) { 1444 num_aio_resv_start--; 1445 td->td_retval[0] = 0; 1446 goto retryproc; 1447 } 1448 num_aio_resv_start--; 1449 } 1450 splx(s); 1451 done: 1452 fdrop(fp, td); 1453 return error; 1454 } 1455 1456 /* 1457 * This routine queues an AIO request, checking for quotas. 1458 */ 1459 static int 1460 aio_aqueue(struct thread *td, struct aiocb *job, int type) 1461 { 1462 struct proc *p = td->td_proc; 1463 struct kaioinfo *ki; 1464 1465 if (p->p_aioinfo == NULL) 1466 aio_init_aioinfo(p); 1467 1468 if (num_queue_count >= max_queue_count) 1469 return EAGAIN; 1470 1471 ki = p->p_aioinfo; 1472 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1473 return EAGAIN; 1474 1475 return _aio_aqueue(td, job, NULL, type); 1476 } 1477 #endif /* VFS_AIO */ 1478 1479 /* 1480 * Support the aio_return system call, as a side-effect, kernel resources are 1481 * released. 1482 */ 1483 int 1484 aio_return(struct thread *td, struct aio_return_args *uap) 1485 { 1486 #ifndef VFS_AIO 1487 return ENOSYS; 1488 #else 1489 struct proc *p = td->td_proc; 1490 int s; 1491 int jobref; 1492 struct aiocblist *cb, *ncb; 1493 struct aiocb *ujob; 1494 struct kaioinfo *ki; 1495 1496 ki = p->p_aioinfo; 1497 if (ki == NULL) 1498 return EINVAL; 1499 1500 ujob = uap->aiocbp; 1501 1502 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1503 if (jobref == -1 || jobref == 0) 1504 return EINVAL; 1505 1506 s = splnet(); 1507 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb, 1508 plist)) { 1509 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1510 jobref) { 1511 splx(s); 1512 if (ujob == cb->uuaiocb) { 1513 td->td_retval[0] = 1514 cb->uaiocb._aiocb_private.status; 1515 } else 1516 td->td_retval[0] = EFAULT; 1517 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1518 curproc->p_stats->p_ru.ru_oublock += 1519 cb->outputcharge; 1520 cb->outputcharge = 0; 1521 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1522 curproc->p_stats->p_ru.ru_inblock += 1523 cb->inputcharge; 1524 cb->inputcharge = 0; 1525 } 1526 aio_free_entry(cb); 1527 return 0; 1528 } 1529 } 1530 splx(s); 1531 1532 s = splbio(); 1533 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) { 1534 ncb = TAILQ_NEXT(cb, plist); 1535 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) 1536 == jobref) { 1537 splx(s); 1538 if (ujob == cb->uuaiocb) { 1539 td->td_retval[0] = 1540 cb->uaiocb._aiocb_private.status; 1541 } else 1542 td->td_retval[0] = EFAULT; 1543 aio_free_entry(cb); 1544 return 0; 1545 } 1546 } 1547 splx(s); 1548 1549 return (EINVAL); 1550 #endif /* VFS_AIO */ 1551 } 1552 1553 /* 1554 * Allow a process to wakeup when any of the I/O requests are completed. 1555 */ 1556 int 1557 aio_suspend(struct thread *td, struct aio_suspend_args *uap) 1558 { 1559 #ifndef VFS_AIO 1560 return ENOSYS; 1561 #else 1562 struct proc *p = td->td_proc; 1563 struct timeval atv; 1564 struct timespec ts; 1565 struct aiocb *const *cbptr, *cbp; 1566 struct kaioinfo *ki; 1567 struct aiocblist *cb; 1568 int i; 1569 int njoblist; 1570 int error, s, timo; 1571 int *ijoblist; 1572 struct aiocb **ujoblist; 1573 1574 if (uap->nent >= AIO_LISTIO_MAX) 1575 return EINVAL; 1576 1577 timo = 0; 1578 if (uap->timeout) { 1579 /* Get timespec struct. */ 1580 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1581 return error; 1582 1583 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1584 return (EINVAL); 1585 1586 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1587 if (itimerfix(&atv)) 1588 return (EINVAL); 1589 timo = tvtohz(&atv); 1590 } 1591 1592 ki = p->p_aioinfo; 1593 if (ki == NULL) 1594 return EAGAIN; 1595 1596 njoblist = 0; 1597 ijoblist = zalloc(aiol_zone); 1598 ujoblist = zalloc(aiol_zone); 1599 cbptr = uap->aiocbp; 1600 1601 for (i = 0; i < uap->nent; i++) { 1602 cbp = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 1603 if (cbp == 0) 1604 continue; 1605 ujoblist[njoblist] = cbp; 1606 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1607 njoblist++; 1608 } 1609 1610 if (njoblist == 0) { 1611 zfree(aiol_zone, ijoblist); 1612 zfree(aiol_zone, ujoblist); 1613 return 0; 1614 } 1615 1616 error = 0; 1617 for (;;) { 1618 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = 1619 TAILQ_NEXT(cb, plist)) { 1620 for (i = 0; i < njoblist; i++) { 1621 if (((intptr_t) 1622 cb->uaiocb._aiocb_private.kernelinfo) == 1623 ijoblist[i]) { 1624 if (ujoblist[i] != cb->uuaiocb) 1625 error = EINVAL; 1626 zfree(aiol_zone, ijoblist); 1627 zfree(aiol_zone, ujoblist); 1628 return error; 1629 } 1630 } 1631 } 1632 1633 s = splbio(); 1634 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = 1635 TAILQ_NEXT(cb, plist)) { 1636 for (i = 0; i < njoblist; i++) { 1637 if (((intptr_t) 1638 cb->uaiocb._aiocb_private.kernelinfo) == 1639 ijoblist[i]) { 1640 splx(s); 1641 if (ujoblist[i] != cb->uuaiocb) 1642 error = EINVAL; 1643 zfree(aiol_zone, ijoblist); 1644 zfree(aiol_zone, ujoblist); 1645 return error; 1646 } 1647 } 1648 } 1649 1650 ki->kaio_flags |= KAIO_WAKEUP; 1651 error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo); 1652 splx(s); 1653 1654 if (error == ERESTART || error == EINTR) { 1655 zfree(aiol_zone, ijoblist); 1656 zfree(aiol_zone, ujoblist); 1657 return EINTR; 1658 } else if (error == EWOULDBLOCK) { 1659 zfree(aiol_zone, ijoblist); 1660 zfree(aiol_zone, ujoblist); 1661 return EAGAIN; 1662 } 1663 } 1664 1665 /* NOTREACHED */ 1666 return EINVAL; 1667 #endif /* VFS_AIO */ 1668 } 1669 1670 /* 1671 * aio_cancel cancels any non-physio aio operations not currently in 1672 * progress. 1673 */ 1674 int 1675 aio_cancel(struct thread *td, struct aio_cancel_args *uap) 1676 { 1677 #ifndef VFS_AIO 1678 return ENOSYS; 1679 #else 1680 struct proc *p = td->td_proc; 1681 struct kaioinfo *ki; 1682 struct aiocblist *cbe, *cbn; 1683 struct file *fp; 1684 struct filedesc *fdp; 1685 struct socket *so; 1686 struct proc *po; 1687 int s,error; 1688 int cancelled=0; 1689 int notcancelled=0; 1690 struct vnode *vp; 1691 1692 fdp = p->p_fd; 1693 1694 fp = fdp->fd_ofiles[uap->fd]; 1695 1696 if (fp == NULL) { 1697 return EBADF; 1698 } 1699 1700 if (fp->f_type == DTYPE_VNODE) { 1701 vp = (struct vnode *)fp->f_data; 1702 1703 if (vn_isdisk(vp,&error)) { 1704 td->td_retval[0] = AIO_NOTCANCELED; 1705 return 0; 1706 } 1707 } else if (fp->f_type == DTYPE_SOCKET) { 1708 so = (struct socket *)fp->f_data; 1709 1710 s = splnet(); 1711 1712 for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) { 1713 cbn = TAILQ_NEXT(cbe, list); 1714 if ((uap->aiocbp == NULL) || 1715 (uap->aiocbp == cbe->uuaiocb) ) { 1716 po = cbe->userproc; 1717 ki = po->p_aioinfo; 1718 TAILQ_REMOVE(&so->so_aiojobq, cbe, list); 1719 TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist); 1720 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist); 1721 if (ki->kaio_flags & KAIO_WAKEUP) { 1722 wakeup(po); 1723 } 1724 cbe->jobstate = JOBST_JOBFINISHED; 1725 cbe->uaiocb._aiocb_private.status=-1; 1726 cbe->uaiocb._aiocb_private.error=ECANCELED; 1727 cancelled++; 1728 /* XXX cancelled, knote? */ 1729 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1730 SIGEV_SIGNAL) { 1731 PROC_LOCK(cbe->userproc); 1732 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1733 PROC_UNLOCK(cbe->userproc); 1734 } 1735 if (uap->aiocbp) 1736 break; 1737 } 1738 } 1739 1740 splx(s); 1741 1742 if ((cancelled) && (uap->aiocbp)) { 1743 td->td_retval[0] = AIO_CANCELED; 1744 return 0; 1745 } 1746 1747 } 1748 1749 ki=p->p_aioinfo; 1750 1751 s = splnet(); 1752 1753 for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) { 1754 cbn = TAILQ_NEXT(cbe, plist); 1755 1756 if ((uap->fd == cbe->uaiocb.aio_fildes) && 1757 ((uap->aiocbp == NULL ) || 1758 (uap->aiocbp == cbe->uuaiocb))) { 1759 1760 if (cbe->jobstate == JOBST_JOBQGLOBAL) { 1761 TAILQ_REMOVE(&aio_jobs, cbe, list); 1762 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); 1763 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, 1764 plist); 1765 cancelled++; 1766 ki->kaio_queue_finished_count++; 1767 cbe->jobstate = JOBST_JOBFINISHED; 1768 cbe->uaiocb._aiocb_private.status = -1; 1769 cbe->uaiocb._aiocb_private.error = ECANCELED; 1770 /* XXX cancelled, knote? */ 1771 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1772 SIGEV_SIGNAL) { 1773 PROC_LOCK(cbe->userproc); 1774 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1775 PROC_UNLOCK(cbe->userproc); 1776 } 1777 } else { 1778 notcancelled++; 1779 } 1780 } 1781 } 1782 1783 splx(s); 1784 1785 1786 if (notcancelled) { 1787 td->td_retval[0] = AIO_NOTCANCELED; 1788 return 0; 1789 } 1790 1791 if (cancelled) { 1792 td->td_retval[0] = AIO_CANCELED; 1793 return 0; 1794 } 1795 1796 td->td_retval[0] = AIO_ALLDONE; 1797 1798 return 0; 1799 #endif /* VFS_AIO */ 1800 } 1801 1802 /* 1803 * aio_error is implemented in the kernel level for compatibility purposes only. 1804 * For a user mode async implementation, it would be best to do it in a userland 1805 * subroutine. 1806 */ 1807 int 1808 aio_error(struct thread *td, struct aio_error_args *uap) 1809 { 1810 #ifndef VFS_AIO 1811 return ENOSYS; 1812 #else 1813 struct proc *p = td->td_proc; 1814 int s; 1815 struct aiocblist *cb; 1816 struct kaioinfo *ki; 1817 int jobref; 1818 1819 ki = p->p_aioinfo; 1820 if (ki == NULL) 1821 return EINVAL; 1822 1823 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1824 if ((jobref == -1) || (jobref == 0)) 1825 return EINVAL; 1826 1827 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb, 1828 plist)) { 1829 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1830 jobref) { 1831 td->td_retval[0] = cb->uaiocb._aiocb_private.error; 1832 return 0; 1833 } 1834 } 1835 1836 s = splnet(); 1837 1838 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb, 1839 plist)) { 1840 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1841 jobref) { 1842 td->td_retval[0] = EINPROGRESS; 1843 splx(s); 1844 return 0; 1845 } 1846 } 1847 1848 for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb, 1849 plist)) { 1850 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1851 jobref) { 1852 td->td_retval[0] = EINPROGRESS; 1853 splx(s); 1854 return 0; 1855 } 1856 } 1857 splx(s); 1858 1859 s = splbio(); 1860 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb, 1861 plist)) { 1862 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1863 jobref) { 1864 td->td_retval[0] = cb->uaiocb._aiocb_private.error; 1865 splx(s); 1866 return 0; 1867 } 1868 } 1869 1870 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb, 1871 plist)) { 1872 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1873 jobref) { 1874 td->td_retval[0] = EINPROGRESS; 1875 splx(s); 1876 return 0; 1877 } 1878 } 1879 splx(s); 1880 1881 #if (0) 1882 /* 1883 * Hack for lio. 1884 */ 1885 status = fuword(&uap->aiocbp->_aiocb_private.status); 1886 if (status == -1) 1887 return fuword(&uap->aiocbp->_aiocb_private.error); 1888 #endif 1889 return EINVAL; 1890 #endif /* VFS_AIO */ 1891 } 1892 1893 int 1894 aio_read(struct thread *td, struct aio_read_args *uap) 1895 { 1896 #ifndef VFS_AIO 1897 return ENOSYS; 1898 #else 1899 return aio_aqueue(td, uap->aiocbp, LIO_READ); 1900 #endif /* VFS_AIO */ 1901 } 1902 1903 int 1904 aio_write(struct thread *td, struct aio_write_args *uap) 1905 { 1906 #ifndef VFS_AIO 1907 return ENOSYS; 1908 #else 1909 return aio_aqueue(td, uap->aiocbp, LIO_WRITE); 1910 #endif /* VFS_AIO */ 1911 } 1912 1913 int 1914 lio_listio(struct thread *td, struct lio_listio_args *uap) 1915 { 1916 #ifndef VFS_AIO 1917 return ENOSYS; 1918 #else 1919 struct proc *p = td->td_proc; 1920 int nent, nentqueued; 1921 struct aiocb *iocb, * const *cbptr; 1922 struct aiocblist *cb; 1923 struct kaioinfo *ki; 1924 struct aio_liojob *lj; 1925 int error, runningcode; 1926 int nerror; 1927 int i; 1928 int s; 1929 1930 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 1931 return EINVAL; 1932 1933 nent = uap->nent; 1934 if (nent > AIO_LISTIO_MAX) 1935 return EINVAL; 1936 1937 if (p->p_aioinfo == NULL) 1938 aio_init_aioinfo(p); 1939 1940 if ((nent + num_queue_count) > max_queue_count) 1941 return EAGAIN; 1942 1943 ki = p->p_aioinfo; 1944 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) 1945 return EAGAIN; 1946 1947 lj = zalloc(aiolio_zone); 1948 if (!lj) 1949 return EAGAIN; 1950 1951 lj->lioj_flags = 0; 1952 lj->lioj_buffer_count = 0; 1953 lj->lioj_buffer_finished_count = 0; 1954 lj->lioj_queue_count = 0; 1955 lj->lioj_queue_finished_count = 0; 1956 lj->lioj_ki = ki; 1957 1958 /* 1959 * Setup signal. 1960 */ 1961 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 1962 error = copyin(uap->sig, &lj->lioj_signal, 1963 sizeof(lj->lioj_signal)); 1964 if (error) { 1965 zfree(aiolio_zone, lj); 1966 return error; 1967 } 1968 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { 1969 zfree(aiolio_zone, lj); 1970 return EINVAL; 1971 } 1972 lj->lioj_flags |= LIOJ_SIGNAL; 1973 lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED; 1974 } else 1975 lj->lioj_flags &= ~LIOJ_SIGNAL; 1976 1977 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 1978 /* 1979 * Get pointers to the list of I/O requests. 1980 */ 1981 nerror = 0; 1982 nentqueued = 0; 1983 cbptr = uap->acb_list; 1984 for (i = 0; i < uap->nent; i++) { 1985 iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 1986 if (((intptr_t)iocb != -1) && ((intptr_t)iocb != NULL)) { 1987 error = _aio_aqueue(td, iocb, lj, 0); 1988 if (error == 0) 1989 nentqueued++; 1990 else 1991 nerror++; 1992 } 1993 } 1994 1995 /* 1996 * If we haven't queued any, then just return error. 1997 */ 1998 if (nentqueued == 0) 1999 return 0; 2000 2001 /* 2002 * Calculate the appropriate error return. 2003 */ 2004 runningcode = 0; 2005 if (nerror) 2006 runningcode = EIO; 2007 2008 if (uap->mode == LIO_WAIT) { 2009 int command, found, jobref; 2010 2011 for (;;) { 2012 found = 0; 2013 for (i = 0; i < uap->nent; i++) { 2014 /* 2015 * Fetch address of the control buf pointer in 2016 * user space. 2017 */ 2018 iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 2019 if (((intptr_t)iocb == -1) || ((intptr_t)iocb 2020 == 0)) 2021 continue; 2022 2023 /* 2024 * Fetch the associated command from user space. 2025 */ 2026 command = fuword(&iocb->aio_lio_opcode); 2027 if (command == LIO_NOP) { 2028 found++; 2029 continue; 2030 } 2031 2032 jobref = fuword(&iocb->_aiocb_private.kernelinfo); 2033 2034 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 2035 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 2036 == jobref) { 2037 if (cb->uaiocb.aio_lio_opcode 2038 == LIO_WRITE) { 2039 curproc->p_stats->p_ru.ru_oublock 2040 += 2041 cb->outputcharge; 2042 cb->outputcharge = 0; 2043 } else if (cb->uaiocb.aio_lio_opcode 2044 == LIO_READ) { 2045 curproc->p_stats->p_ru.ru_inblock 2046 += cb->inputcharge; 2047 cb->inputcharge = 0; 2048 } 2049 found++; 2050 break; 2051 } 2052 } 2053 2054 s = splbio(); 2055 TAILQ_FOREACH(cb, &ki->kaio_bufdone, plist) { 2056 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 2057 == jobref) { 2058 found++; 2059 break; 2060 } 2061 } 2062 splx(s); 2063 } 2064 2065 /* 2066 * If all I/Os have been disposed of, then we can 2067 * return. 2068 */ 2069 if (found == nentqueued) 2070 return runningcode; 2071 2072 ki->kaio_flags |= KAIO_WAKEUP; 2073 error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0); 2074 2075 if (error == EINTR) 2076 return EINTR; 2077 else if (error == EWOULDBLOCK) 2078 return EAGAIN; 2079 } 2080 } 2081 2082 return runningcode; 2083 #endif /* VFS_AIO */ 2084 } 2085 2086 #ifdef VFS_AIO 2087 /* 2088 * This is a weird hack so that we can post a signal. It is safe to do so from 2089 * a timeout routine, but *not* from an interrupt routine. 2090 */ 2091 static void 2092 process_signal(void *aioj) 2093 { 2094 struct aiocblist *aiocbe = aioj; 2095 struct aio_liojob *lj = aiocbe->lio; 2096 struct aiocb *cb = &aiocbe->uaiocb; 2097 2098 if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) && 2099 (lj->lioj_queue_count == lj->lioj_queue_finished_count)) { 2100 PROC_LOCK(lj->lioj_ki->kaio_p); 2101 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 2102 PROC_UNLOCK(lj->lioj_ki->kaio_p); 2103 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2104 } 2105 2106 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 2107 PROC_LOCK(aiocbe->userproc); 2108 psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo); 2109 PROC_UNLOCK(aiocbe->userproc); 2110 } 2111 } 2112 2113 /* 2114 * Interrupt handler for physio, performs the necessary process wakeups, and 2115 * signals. 2116 */ 2117 static void 2118 aio_physwakeup(struct buf *bp) 2119 { 2120 struct aiocblist *aiocbe; 2121 struct proc *p; 2122 struct kaioinfo *ki; 2123 struct aio_liojob *lj; 2124 2125 wakeup(bp); 2126 2127 aiocbe = (struct aiocblist *)bp->b_spc; 2128 if (aiocbe) { 2129 p = bp->b_caller1; 2130 2131 aiocbe->jobstate = JOBST_JOBBFINISHED; 2132 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 2133 aiocbe->uaiocb._aiocb_private.error = 0; 2134 aiocbe->jobflags |= AIOCBLIST_DONE; 2135 2136 if (bp->b_ioflags & BIO_ERROR) 2137 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 2138 2139 lj = aiocbe->lio; 2140 if (lj) { 2141 lj->lioj_buffer_finished_count++; 2142 2143 /* 2144 * wakeup/signal if all of the interrupt jobs are done. 2145 */ 2146 if (lj->lioj_buffer_finished_count == 2147 lj->lioj_buffer_count) { 2148 /* 2149 * Post a signal if it is called for. 2150 */ 2151 if ((lj->lioj_flags & 2152 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 2153 LIOJ_SIGNAL) { 2154 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2155 aiocbe->timeouthandle = 2156 timeout(process_signal, 2157 aiocbe, 0); 2158 } 2159 } 2160 } 2161 2162 ki = p->p_aioinfo; 2163 if (ki) { 2164 ki->kaio_buffer_finished_count++; 2165 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 2166 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 2167 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 2168 2169 KNOTE(&aiocbe->klist, 0); 2170 /* Do the wakeup. */ 2171 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 2172 ki->kaio_flags &= ~KAIO_WAKEUP; 2173 wakeup(p); 2174 } 2175 } 2176 2177 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL) 2178 aiocbe->timeouthandle = 2179 timeout(process_signal, aiocbe, 0); 2180 } 2181 } 2182 #endif /* VFS_AIO */ 2183 2184 int 2185 aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap) 2186 { 2187 #ifndef VFS_AIO 2188 return ENOSYS; 2189 #else 2190 struct proc *p = td->td_proc; 2191 struct timeval atv; 2192 struct timespec ts; 2193 struct aiocb **cbptr; 2194 struct kaioinfo *ki; 2195 struct aiocblist *cb = NULL; 2196 int error, s, timo; 2197 2198 suword(uap->aiocbp, (int)NULL); 2199 2200 timo = 0; 2201 if (uap->timeout) { 2202 /* Get timespec struct. */ 2203 error = copyin(uap->timeout, &ts, sizeof(ts)); 2204 if (error) 2205 return error; 2206 2207 if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000)) 2208 return (EINVAL); 2209 2210 TIMESPEC_TO_TIMEVAL(&atv, &ts); 2211 if (itimerfix(&atv)) 2212 return (EINVAL); 2213 timo = tvtohz(&atv); 2214 } 2215 2216 ki = p->p_aioinfo; 2217 if (ki == NULL) 2218 return EAGAIN; 2219 2220 cbptr = uap->aiocbp; 2221 2222 for (;;) { 2223 if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) { 2224 suword(uap->aiocbp, (int)cb->uuaiocb); 2225 td->td_retval[0] = cb->uaiocb._aiocb_private.status; 2226 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 2227 curproc->p_stats->p_ru.ru_oublock += 2228 cb->outputcharge; 2229 cb->outputcharge = 0; 2230 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 2231 curproc->p_stats->p_ru.ru_inblock += 2232 cb->inputcharge; 2233 cb->inputcharge = 0; 2234 } 2235 aio_free_entry(cb); 2236 return cb->uaiocb._aiocb_private.error; 2237 } 2238 2239 s = splbio(); 2240 if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) { 2241 splx(s); 2242 suword(uap->aiocbp, (int)cb->uuaiocb); 2243 td->td_retval[0] = cb->uaiocb._aiocb_private.status; 2244 aio_free_entry(cb); 2245 return cb->uaiocb._aiocb_private.error; 2246 } 2247 2248 ki->kaio_flags |= KAIO_WAKEUP; 2249 error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo); 2250 splx(s); 2251 2252 if (error == ERESTART) 2253 return EINTR; 2254 else if (error < 0) 2255 return error; 2256 else if (error == EINTR) 2257 return EINTR; 2258 else if (error == EWOULDBLOCK) 2259 return EAGAIN; 2260 } 2261 #endif /* VFS_AIO */ 2262 } 2263 2264 2265 #ifndef VFS_AIO 2266 static int 2267 filt_aioattach(struct knote *kn) 2268 { 2269 2270 return (ENXIO); 2271 } 2272 2273 struct filterops aio_filtops = 2274 { 0, filt_aioattach, NULL, NULL }; 2275 2276 #else 2277 static int 2278 filt_aioattach(struct knote *kn) 2279 { 2280 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id; 2281 2282 /* 2283 * The aiocbe pointer must be validated before using it, so 2284 * registration is restricted to the kernel; the user cannot 2285 * set EV_FLAG1. 2286 */ 2287 if ((kn->kn_flags & EV_FLAG1) == 0) 2288 return (EPERM); 2289 kn->kn_flags &= ~EV_FLAG1; 2290 2291 SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext); 2292 2293 return (0); 2294 } 2295 2296 static void 2297 filt_aiodetach(struct knote *kn) 2298 { 2299 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id; 2300 int s = splhigh(); /* XXX no clue, so overkill */ 2301 2302 SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext); 2303 splx(s); 2304 } 2305 2306 /*ARGSUSED*/ 2307 static int 2308 filt_aio(struct knote *kn, long hint) 2309 { 2310 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id; 2311 2312 kn->kn_data = 0; /* XXX data returned? */ 2313 if (aiocbe->jobstate != JOBST_JOBFINISHED && 2314 aiocbe->jobstate != JOBST_JOBBFINISHED) 2315 return (0); 2316 kn->kn_flags |= EV_EOF; 2317 return (1); 2318 } 2319 2320 struct filterops aio_filtops = 2321 { 0, filt_aioattach, filt_aiodetach, filt_aio }; 2322 #endif /* VFS_AIO */ 2323