1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 * 16 * $FreeBSD$ 17 */ 18 19 /* 20 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/sysproto.h> 27 #include <sys/filedesc.h> 28 #include <sys/kernel.h> 29 #include <sys/fcntl.h> 30 #include <sys/file.h> 31 #include <sys/lock.h> 32 #include <sys/unistd.h> 33 #include <sys/proc.h> 34 #include <sys/resourcevar.h> 35 #include <sys/signalvar.h> 36 #include <sys/protosw.h> 37 #include <sys/socketvar.h> 38 #include <sys/sysctl.h> 39 #include <sys/vnode.h> 40 #include <sys/conf.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_extern.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_map.h> 46 #include <vm/vm_zone.h> 47 #include <sys/aio.h> 48 49 #include <machine/limits.h> 50 51 static long jobrefid; 52 53 #define JOBST_NULL 0x0 54 #define JOBST_JOBQPROC 0x1 55 #define JOBST_JOBQGLOBAL 0x2 56 #define JOBST_JOBRUNNING 0x3 57 #define JOBST_JOBFINISHED 0x4 58 #define JOBST_JOBQBUF 0x5 59 #define JOBST_JOBBFINISHED 0x6 60 61 #ifndef MAX_AIO_PER_PROC 62 #define MAX_AIO_PER_PROC 32 63 #endif 64 65 #ifndef MAX_AIO_QUEUE_PER_PROC 66 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 67 #endif 68 69 #ifndef MAX_AIO_PROCS 70 #define MAX_AIO_PROCS 32 71 #endif 72 73 #ifndef MAX_AIO_QUEUE 74 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 75 #endif 76 77 #ifndef TARGET_AIO_PROCS 78 #define TARGET_AIO_PROCS 4 79 #endif 80 81 #ifndef MAX_BUF_AIO 82 #define MAX_BUF_AIO 16 83 #endif 84 85 #ifndef AIOD_TIMEOUT_DEFAULT 86 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 87 #endif 88 89 #ifndef AIOD_LIFETIME_DEFAULT 90 #define AIOD_LIFETIME_DEFAULT (30 * hz) 91 #endif 92 93 static int max_aio_procs = MAX_AIO_PROCS; 94 static int num_aio_procs = 0; 95 static int target_aio_procs = TARGET_AIO_PROCS; 96 static int max_queue_count = MAX_AIO_QUEUE; 97 static int num_queue_count = 0; 98 static int num_buf_aio = 0; 99 static int num_aio_resv_start = 0; 100 static int aiod_timeout; 101 static int aiod_lifetime; 102 103 static int max_aio_per_proc = MAX_AIO_PER_PROC; 104 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 105 static int max_buf_aio = MAX_BUF_AIO; 106 107 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "AIO mgmt"); 108 109 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, 110 CTLFLAG_RW, &max_aio_per_proc, 0, ""); 111 112 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, 113 CTLFLAG_RW, &max_aio_queue_per_proc, 0, ""); 114 115 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 116 CTLFLAG_RW, &max_aio_procs, 0, ""); 117 118 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 119 CTLFLAG_RD, &num_aio_procs, 0, ""); 120 121 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, 122 CTLFLAG_RD, &num_queue_count, 0, ""); 123 124 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, 125 CTLFLAG_RW, &max_queue_count, 0, ""); 126 127 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, 128 CTLFLAG_RW, &target_aio_procs, 0, ""); 129 130 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, 131 CTLFLAG_RW, &max_buf_aio, 0, ""); 132 133 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, 134 CTLFLAG_RD, &num_buf_aio, 0, ""); 135 136 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, 137 CTLFLAG_RW, &aiod_lifetime, 0, ""); 138 139 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, 140 CTLFLAG_RW, &aiod_timeout, 0, ""); 141 142 /* 143 * AIO process info 144 */ 145 #define AIOP_FREE 0x1 /* proc on free queue */ 146 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 147 148 struct aioproclist { 149 int aioprocflags; /* AIO proc flags */ 150 TAILQ_ENTRY(aioproclist) list; /* List of processes */ 151 struct proc *aioproc; /* The AIO thread */ 152 TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */ 153 }; 154 155 /* 156 * data-structure for lio signal management 157 */ 158 struct aio_liojob { 159 int lioj_flags; 160 int lioj_buffer_count; 161 int lioj_buffer_finished_count; 162 int lioj_queue_count; 163 int lioj_queue_finished_count; 164 struct sigevent lioj_signal; /* signal on all I/O done */ 165 TAILQ_ENTRY (aio_liojob) lioj_list; 166 struct kaioinfo *lioj_ki; 167 }; 168 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 169 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 170 171 /* 172 * per process aio data structure 173 */ 174 struct kaioinfo { 175 int kaio_flags; /* per process kaio flags */ 176 int kaio_maxactive_count; /* maximum number of AIOs */ 177 int kaio_active_count; /* number of currently used AIOs */ 178 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 179 int kaio_queue_count; /* size of AIO queue */ 180 int kaio_ballowed_count; /* maximum number of buffers */ 181 int kaio_queue_finished_count; /* number of daemon jobs finished */ 182 int kaio_buffer_count; /* number of physio buffers */ 183 int kaio_buffer_finished_count; /* count of I/O done */ 184 struct proc *kaio_p; /* process that uses this kaio block */ 185 TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 186 TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */ 187 TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */ 188 TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 189 TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */ 190 TAILQ_HEAD (,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */ 191 }; 192 193 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 194 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */ 195 196 static TAILQ_HEAD(,aioproclist) aio_freeproc, aio_activeproc; 197 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 198 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 199 static TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */ 200 201 static void aio_init_aioinfo(struct proc *p); 202 static void aio_onceonly(void *); 203 static int aio_free_entry(struct aiocblist *aiocbe); 204 static void aio_process(struct aiocblist *aiocbe); 205 static int aio_newproc(void); 206 static int aio_aqueue(struct proc *p, struct aiocb *job, int type); 207 static void aio_physwakeup(struct buf *bp); 208 static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type); 209 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 210 static void aio_daemon(void *uproc); 211 212 SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL); 213 214 static vm_zone_t kaio_zone = 0, aiop_zone = 0, aiocb_zone = 0, aiol_zone = 0; 215 static vm_zone_t aiolio_zone = 0; 216 217 /* 218 * Startup initialization 219 */ 220 void 221 aio_onceonly(void *na) 222 { 223 TAILQ_INIT(&aio_freeproc); 224 TAILQ_INIT(&aio_activeproc); 225 TAILQ_INIT(&aio_jobs); 226 TAILQ_INIT(&aio_bufjobs); 227 TAILQ_INIT(&aio_freejobs); 228 kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1); 229 aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1); 230 aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1); 231 aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1); 232 aiolio_zone = zinit("AIOLIO", AIO_LISTIO_MAX * sizeof (struct 233 aio_liojob), 0, 0, 1); 234 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 235 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 236 jobrefid = 1; 237 } 238 239 /* 240 * Init the per-process aioinfo structure. The aioinfo limits are set 241 * per-process for user limit (resource) management. 242 */ 243 void 244 aio_init_aioinfo(struct proc *p) 245 { 246 struct kaioinfo *ki; 247 if (p->p_aioinfo == NULL) { 248 ki = zalloc(kaio_zone); 249 p->p_aioinfo = ki; 250 ki->kaio_flags = 0; 251 ki->kaio_maxactive_count = max_aio_per_proc; 252 ki->kaio_active_count = 0; 253 ki->kaio_qallowed_count = max_aio_queue_per_proc; 254 ki->kaio_queue_count = 0; 255 ki->kaio_ballowed_count = max_buf_aio; 256 ki->kaio_buffer_count = 0; 257 ki->kaio_buffer_finished_count = 0; 258 ki->kaio_p = p; 259 TAILQ_INIT(&ki->kaio_jobdone); 260 TAILQ_INIT(&ki->kaio_jobqueue); 261 TAILQ_INIT(&ki->kaio_bufdone); 262 TAILQ_INIT(&ki->kaio_bufqueue); 263 TAILQ_INIT(&ki->kaio_liojoblist); 264 TAILQ_INIT(&ki->kaio_sockqueue); 265 } 266 267 while (num_aio_procs < target_aio_procs) 268 aio_newproc(); 269 } 270 271 /* 272 * Free a job entry. Wait for completion if it is currently active, but don't 273 * delay forever. If we delay, we return a flag that says that we have to 274 * restart the queue scan. 275 */ 276 int 277 aio_free_entry(struct aiocblist *aiocbe) 278 { 279 struct kaioinfo *ki; 280 struct aioproclist *aiop; 281 struct aio_liojob *lj; 282 struct proc *p; 283 int error; 284 int s; 285 286 if (aiocbe->jobstate == JOBST_NULL) 287 panic("aio_free_entry: freeing already free job"); 288 289 p = aiocbe->userproc; 290 ki = p->p_aioinfo; 291 lj = aiocbe->lio; 292 if (ki == NULL) 293 panic("aio_free_entry: missing p->p_aioinfo"); 294 295 if (aiocbe->jobstate == JOBST_JOBRUNNING) { 296 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) 297 return 0; 298 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 299 tsleep(aiocbe, PRIBIO|PCATCH, "jobwai", 0); 300 } 301 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 302 303 if (aiocbe->bp == NULL) { 304 if (ki->kaio_queue_count <= 0) 305 panic("aio_free_entry: process queue size <= 0"); 306 if (num_queue_count <= 0) 307 panic("aio_free_entry: system wide queue size <= 0"); 308 309 if (lj) { 310 lj->lioj_queue_count--; 311 if (aiocbe->jobflags & AIOCBLIST_DONE) 312 lj->lioj_queue_finished_count--; 313 } 314 ki->kaio_queue_count--; 315 if (aiocbe->jobflags & AIOCBLIST_DONE) 316 ki->kaio_queue_finished_count--; 317 num_queue_count--; 318 } else { 319 if (lj) { 320 lj->lioj_buffer_count--; 321 if (aiocbe->jobflags & AIOCBLIST_DONE) 322 lj->lioj_buffer_finished_count--; 323 } 324 if (aiocbe->jobflags & AIOCBLIST_DONE) 325 ki->kaio_buffer_finished_count--; 326 ki->kaio_buffer_count--; 327 num_buf_aio--; 328 } 329 330 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN) 331 && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) { 332 ki->kaio_flags &= ~KAIO_WAKEUP; 333 wakeup(p); 334 } 335 336 if (aiocbe->jobstate == JOBST_JOBQBUF) { 337 if ((error = aio_fphysio(p, aiocbe, 1)) != 0) 338 return error; 339 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 340 panic("aio_free_entry: invalid physio finish-up state"); 341 s = splbio(); 342 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 343 splx(s); 344 } else if (aiocbe->jobstate == JOBST_JOBQPROC) { 345 aiop = aiocbe->jobaioproc; 346 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 347 } else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) 348 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 349 else if (aiocbe->jobstate == JOBST_JOBFINISHED) 350 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 351 else if (aiocbe->jobstate == JOBST_JOBBFINISHED) { 352 s = splbio(); 353 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 354 splx(s); 355 if (aiocbe->bp) { 356 vunmapbuf(aiocbe->bp); 357 relpbuf(aiocbe->bp, NULL); 358 aiocbe->bp = NULL; 359 } 360 } 361 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 362 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 363 zfree(aiolio_zone, lj); 364 } 365 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 366 aiocbe->jobstate = JOBST_NULL; 367 return 0; 368 } 369 370 /* 371 * Rundown the jobs for a given process. 372 */ 373 void 374 aio_proc_rundown(struct proc *p) 375 { 376 int s; 377 struct kaioinfo *ki; 378 struct aio_liojob *lj, *ljn; 379 struct aiocblist *aiocbe, *aiocbn; 380 struct file *fp; 381 struct filedesc *fdp; 382 struct socket *so; 383 384 ki = p->p_aioinfo; 385 if (ki == NULL) 386 return; 387 388 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 389 while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count > 390 ki->kaio_buffer_finished_count)) { 391 ki->kaio_flags |= KAIO_RUNDOWN; 392 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout)) 393 break; 394 } 395 396 /* 397 * Move any aio ops that are waiting on socket I/O to the normal job 398 * queues so they are cleaned up with any others. 399 */ 400 fdp = p->p_fd; 401 402 s = splnet(); 403 for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe = 404 aiocbn) { 405 aiocbn = TAILQ_NEXT(aiocbe, plist); 406 fp = fdp->fd_ofiles[aiocbe->uaiocb.aio_fildes]; 407 408 /* 409 * Under some circumstances, the aio_fildes and the file 410 * structure don't match. This would leave aiocbe's in the 411 * TAILQ associated with the socket and cause a panic later. 412 * 413 * Detect and fix. 414 */ 415 if ((fp == NULL) || (fp != aiocbe->fd_file)) 416 fp = aiocbe->fd_file; 417 if (fp) { 418 so = (struct socket *)fp->f_data; 419 TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list); 420 if (TAILQ_EMPTY(&so->so_aiojobq)) { 421 so->so_snd.sb_flags &= ~SB_AIO; 422 so->so_rcv.sb_flags &= ~SB_AIO; 423 } 424 } 425 TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist); 426 TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list); 427 TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist); 428 } 429 splx(s); 430 431 restart1: 432 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) { 433 aiocbn = TAILQ_NEXT(aiocbe, plist); 434 if (aio_free_entry(aiocbe)) 435 goto restart1; 436 } 437 438 restart2: 439 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe = 440 aiocbn) { 441 aiocbn = TAILQ_NEXT(aiocbe, plist); 442 if (aio_free_entry(aiocbe)) 443 goto restart2; 444 } 445 446 /* 447 * Note the use of lots of splbio here, trying to avoid splbio for long chains 448 * of I/O. Probably unnecessary. 449 */ 450 restart3: 451 s = splbio(); 452 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 453 ki->kaio_flags |= KAIO_WAKEUP; 454 tsleep(p, PRIBIO, "aioprn", 0); 455 splx(s); 456 goto restart3; 457 } 458 splx(s); 459 460 restart4: 461 s = splbio(); 462 for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) { 463 aiocbn = TAILQ_NEXT(aiocbe, plist); 464 if (aio_free_entry(aiocbe)) { 465 splx(s); 466 goto restart4; 467 } 468 } 469 splx(s); 470 471 for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) { 472 ljn = TAILQ_NEXT(lj, lioj_list); 473 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 474 0)) { 475 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 476 zfree(aiolio_zone, lj); 477 } else { 478 #ifdef DIAGNOSTIC 479 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, " 480 "QF:%d\n", lj->lioj_buffer_count, 481 lj->lioj_buffer_finished_count, 482 lj->lioj_queue_count, 483 lj->lioj_queue_finished_count); 484 #endif 485 } 486 } 487 488 zfree(kaio_zone, ki); 489 p->p_aioinfo = NULL; 490 } 491 492 /* 493 * Select a job to run (called by an AIO daemon). 494 */ 495 static struct aiocblist * 496 aio_selectjob(struct aioproclist *aiop) 497 { 498 int s; 499 struct aiocblist *aiocbe; 500 struct kaioinfo *ki; 501 struct proc *userp; 502 503 aiocbe = TAILQ_FIRST(&aiop->jobtorun); 504 if (aiocbe) { 505 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 506 return aiocbe; 507 } 508 509 s = splnet(); 510 for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe = 511 TAILQ_NEXT(aiocbe, list)) { 512 userp = aiocbe->userproc; 513 ki = userp->p_aioinfo; 514 515 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 516 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 517 splx(s); 518 return aiocbe; 519 } 520 } 521 splx(s); 522 523 return NULL; 524 } 525 526 /* 527 * The AIO processing activity. This is the code that does the I/O request for 528 * the non-physio version of the operations. The normal vn operations are used, 529 * and this code should work in all instances for every type of file, including 530 * pipes, sockets, fifos, and regular files. 531 */ 532 void 533 aio_process(struct aiocblist *aiocbe) 534 { 535 struct filedesc *fdp; 536 struct proc *userp, *mycp; 537 struct aiocb *cb; 538 struct file *fp; 539 struct uio auio; 540 struct iovec aiov; 541 unsigned int fd; 542 int cnt; 543 int error; 544 off_t offset; 545 int oublock_st, oublock_end; 546 int inblock_st, inblock_end; 547 548 userp = aiocbe->userproc; 549 cb = &aiocbe->uaiocb; 550 551 mycp = curproc; 552 553 fdp = mycp->p_fd; 554 fd = cb->aio_fildes; 555 fp = fdp->fd_ofiles[fd]; 556 557 if ((fp == NULL) || (fp != aiocbe->fd_file)) { 558 cb->_aiocb_private.error = EBADF; 559 cb->_aiocb_private.status = -1; 560 return; 561 } 562 563 aiov.iov_base = (void *)cb->aio_buf; 564 aiov.iov_len = cb->aio_nbytes; 565 566 auio.uio_iov = &aiov; 567 auio.uio_iovcnt = 1; 568 auio.uio_offset = offset = cb->aio_offset; 569 auio.uio_resid = cb->aio_nbytes; 570 cnt = cb->aio_nbytes; 571 auio.uio_segflg = UIO_USERSPACE; 572 auio.uio_procp = mycp; 573 574 inblock_st = mycp->p_stats->p_ru.ru_inblock; 575 oublock_st = mycp->p_stats->p_ru.ru_oublock; 576 if (cb->aio_lio_opcode == LIO_READ) { 577 auio.uio_rw = UIO_READ; 578 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, mycp); 579 } else { 580 auio.uio_rw = UIO_WRITE; 581 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, mycp); 582 } 583 inblock_end = mycp->p_stats->p_ru.ru_inblock; 584 oublock_end = mycp->p_stats->p_ru.ru_oublock; 585 586 aiocbe->inputcharge = inblock_end - inblock_st; 587 aiocbe->outputcharge = oublock_end - oublock_st; 588 589 if ((error) && (auio.uio_resid != cnt)) { 590 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 591 error = 0; 592 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) 593 psignal(userp, SIGPIPE); 594 } 595 596 cnt -= auio.uio_resid; 597 cb->_aiocb_private.error = error; 598 cb->_aiocb_private.status = cnt; 599 600 return; 601 } 602 603 /* 604 * The AIO daemon, most of the actual work is done in aio_process, 605 * but the setup (and address space mgmt) is done in this routine. 606 */ 607 static void 608 aio_daemon(void *uproc) 609 { 610 int s; 611 struct aio_liojob *lj; 612 struct aiocb *cb; 613 struct aiocblist *aiocbe; 614 struct aioproclist *aiop; 615 struct kaioinfo *ki; 616 struct proc *curcp, *mycp, *userp; 617 struct vmspace *myvm, *tmpvm; 618 619 /* 620 * Local copies of curproc (cp) and vmspace (myvm) 621 */ 622 mycp = curproc; 623 myvm = mycp->p_vmspace; 624 625 if (mycp->p_textvp) { 626 vrele(mycp->p_textvp); 627 mycp->p_textvp = NULL; 628 } 629 630 /* 631 * Allocate and ready the aio control info. There is one aiop structure 632 * per daemon. 633 */ 634 aiop = zalloc(aiop_zone); 635 aiop->aioproc = mycp; 636 aiop->aioprocflags |= AIOP_FREE; 637 TAILQ_INIT(&aiop->jobtorun); 638 639 s = splnet(); 640 641 /* 642 * Place thread (lightweight process) onto the AIO free thread list. 643 */ 644 if (TAILQ_EMPTY(&aio_freeproc)) 645 wakeup(&aio_freeproc); 646 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 647 648 splx(s); 649 650 /* Make up a name for the daemon. */ 651 strcpy(mycp->p_comm, "aiod"); 652 653 /* 654 * Get rid of our current filedescriptors. AIOD's don't need any 655 * filedescriptors, except as temporarily inherited from the client. 656 * Credentials are also cloned, and made equivalent to "root". 657 */ 658 fdfree(mycp); 659 mycp->p_fd = NULL; 660 mycp->p_ucred = crcopy(mycp->p_ucred); 661 mycp->p_ucred->cr_uid = 0; 662 mycp->p_ucred->cr_ngroups = 1; 663 mycp->p_ucred->cr_groups[0] = 1; 664 665 /* The daemon resides in its own pgrp. */ 666 enterpgrp(mycp, mycp->p_pid, 1); 667 668 /* Mark special process type. */ 669 mycp->p_flag |= P_SYSTEM | P_KTHREADP; 670 671 /* 672 * Wakeup parent process. (Parent sleeps to keep from blasting away 673 * creating to many daemons.) 674 */ 675 wakeup(mycp); 676 677 for (;;) { 678 /* 679 * curcp is the current daemon process context. 680 * userp is the current user process context. 681 */ 682 curcp = mycp; 683 684 /* 685 * Take daemon off of free queue 686 */ 687 if (aiop->aioprocflags & AIOP_FREE) { 688 s = splnet(); 689 TAILQ_REMOVE(&aio_freeproc, aiop, list); 690 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 691 aiop->aioprocflags &= ~AIOP_FREE; 692 splx(s); 693 } 694 aiop->aioprocflags &= ~AIOP_SCHED; 695 696 /* 697 * Check for jobs. 698 */ 699 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 700 cb = &aiocbe->uaiocb; 701 userp = aiocbe->userproc; 702 703 aiocbe->jobstate = JOBST_JOBRUNNING; 704 705 /* 706 * Connect to process address space for user program. 707 */ 708 if (userp != curcp) { 709 /* 710 * Save the current address space that we are 711 * connected to. 712 */ 713 tmpvm = mycp->p_vmspace; 714 715 /* 716 * Point to the new user address space, and 717 * refer to it. 718 */ 719 mycp->p_vmspace = userp->p_vmspace; 720 mycp->p_vmspace->vm_refcnt++; 721 722 /* Activate the new mapping. */ 723 pmap_activate(mycp); 724 725 /* 726 * If the old address space wasn't the daemons 727 * own address space, then we need to remove the 728 * daemon's reference from the other process 729 * that it was acting on behalf of. 730 */ 731 if (tmpvm != myvm) { 732 vmspace_free(tmpvm); 733 } 734 735 /* 736 * Disassociate from previous clients file 737 * descriptors, and associate to the new clients 738 * descriptors. Note that the daemon doesn't 739 * need to worry about its orginal descriptors, 740 * because they were originally freed. 741 */ 742 if (mycp->p_fd) 743 fdfree(mycp); 744 mycp->p_fd = fdshare(userp); 745 curcp = userp; 746 } 747 748 ki = userp->p_aioinfo; 749 lj = aiocbe->lio; 750 751 /* Account for currently active jobs. */ 752 ki->kaio_active_count++; 753 754 /* Do the I/O function. */ 755 aiocbe->jobaioproc = aiop; 756 aio_process(aiocbe); 757 758 /* Decrement the active job count. */ 759 ki->kaio_active_count--; 760 761 /* 762 * Increment the completion count for wakeup/signal 763 * comparisons. 764 */ 765 aiocbe->jobflags |= AIOCBLIST_DONE; 766 ki->kaio_queue_finished_count++; 767 if (lj) 768 lj->lioj_queue_finished_count++; 769 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags 770 & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) { 771 ki->kaio_flags &= ~KAIO_WAKEUP; 772 wakeup(userp); 773 } 774 775 s = splbio(); 776 if (lj && (lj->lioj_flags & 777 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) { 778 if ((lj->lioj_queue_finished_count == 779 lj->lioj_queue_count) && 780 (lj->lioj_buffer_finished_count == 781 lj->lioj_buffer_count)) { 782 psignal(userp, 783 lj->lioj_signal.sigev_signo); 784 lj->lioj_flags |= 785 LIOJ_SIGNAL_POSTED; 786 } 787 } 788 splx(s); 789 790 aiocbe->jobstate = JOBST_JOBFINISHED; 791 792 /* 793 * If the I/O request should be automatically rundown, 794 * do the needed cleanup. Otherwise, place the queue 795 * entry for the just finished I/O request into the done 796 * queue for the associated client. 797 */ 798 s = splnet(); 799 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) { 800 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 801 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 802 } else { 803 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 804 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, 805 plist); 806 } 807 splx(s); 808 809 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 810 wakeup(aiocbe); 811 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 812 } 813 814 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 815 psignal(userp, cb->aio_sigevent.sigev_signo); 816 } 817 } 818 819 /* 820 * Disconnect from user address space. 821 */ 822 if (curcp != mycp) { 823 /* Get the user address space to disconnect from. */ 824 tmpvm = mycp->p_vmspace; 825 826 /* Get original address space for daemon. */ 827 mycp->p_vmspace = myvm; 828 829 /* Activate the daemon's address space. */ 830 pmap_activate(mycp); 831 #ifdef DIAGNOSTIC 832 if (tmpvm == myvm) { 833 printf("AIOD: vmspace problem -- %d\n", 834 mycp->p_pid); 835 } 836 #endif 837 /* Remove our vmspace reference. */ 838 vmspace_free(tmpvm); 839 840 /* 841 * Disassociate from the user process's file 842 * descriptors. 843 */ 844 if (mycp->p_fd) 845 fdfree(mycp); 846 mycp->p_fd = NULL; 847 curcp = mycp; 848 } 849 850 /* 851 * If we are the first to be put onto the free queue, wakeup 852 * anyone waiting for a daemon. 853 */ 854 s = splnet(); 855 TAILQ_REMOVE(&aio_activeproc, aiop, list); 856 if (TAILQ_EMPTY(&aio_freeproc)) 857 wakeup(&aio_freeproc); 858 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 859 aiop->aioprocflags |= AIOP_FREE; 860 splx(s); 861 862 /* 863 * If daemon is inactive for a long time, allow it to exit, 864 * thereby freeing resources. 865 */ 866 if (((aiop->aioprocflags & AIOP_SCHED) == 0) && tsleep(mycp, 867 PRIBIO, "aiordy", aiod_lifetime)) { 868 s = splnet(); 869 if ((TAILQ_FIRST(&aio_jobs) == NULL) && 870 (TAILQ_FIRST(&aiop->jobtorun) == NULL)) { 871 if ((aiop->aioprocflags & AIOP_FREE) && 872 (num_aio_procs > target_aio_procs)) { 873 TAILQ_REMOVE(&aio_freeproc, aiop, list); 874 splx(s); 875 zfree(aiop_zone, aiop); 876 num_aio_procs--; 877 #ifdef DIAGNOSTIC 878 if (mycp->p_vmspace->vm_refcnt <= 1) { 879 printf("AIOD: bad vm refcnt for" 880 " exiting daemon: %d\n", 881 mycp->p_vmspace->vm_refcnt); 882 } 883 #endif 884 exit1(mycp, 0); 885 } 886 } 887 splx(s); 888 } 889 } 890 } 891 892 /* 893 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 894 * AIO daemon modifies its environment itself. 895 */ 896 static int 897 aio_newproc() 898 { 899 int error; 900 struct proc *p, *np; 901 902 p = &proc0; 903 error = fork1(p, RFPROC|RFMEM|RFNOWAIT, &np); 904 if (error) 905 return error; 906 cpu_set_fork_handler(np, aio_daemon, curproc); 907 908 /* 909 * Wait until daemon is started, but continue on just in case to 910 * handle error conditions. 911 */ 912 error = tsleep(np, PZERO, "aiosta", aiod_timeout); 913 num_aio_procs++; 914 915 return error; 916 } 917 918 /* 919 * Try the high-performance physio method for eligible VCHR devices. This 920 * routine doesn't require the use of any additional threads, and have overhead. 921 */ 922 int 923 aio_qphysio(struct proc *p, struct aiocblist *aiocbe) 924 { 925 int error; 926 struct aiocb *cb; 927 struct file *fp; 928 struct buf *bp; 929 struct vnode *vp; 930 struct kaioinfo *ki; 931 struct filedesc *fdp; 932 struct aio_liojob *lj; 933 int fd; 934 int s; 935 int cnt; 936 937 cb = &aiocbe->uaiocb; 938 fdp = p->p_fd; 939 fd = cb->aio_fildes; 940 fp = fdp->fd_ofiles[fd]; 941 942 if (fp->f_type != DTYPE_VNODE) 943 return (-1); 944 945 vp = (struct vnode *)fp->f_data; 946 947 if (!vn_isdisk(vp, &error)) 948 return (error); 949 950 if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys) 951 return (-1); 952 953 if ((cb->aio_nbytes > MAXPHYS) && (num_buf_aio >= max_buf_aio)) 954 return (-1); 955 956 ki = p->p_aioinfo; 957 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) 958 return (-1); 959 960 cnt = cb->aio_nbytes; 961 if (cnt > MAXPHYS) 962 return (-1); 963 964 /* 965 * Physical I/O is charged directly to the process, so we don't have to 966 * fake it. 967 */ 968 aiocbe->inputcharge = 0; 969 aiocbe->outputcharge = 0; 970 971 ki->kaio_buffer_count++; 972 973 lj = aiocbe->lio; 974 if (lj) 975 lj->lioj_buffer_count++; 976 977 /* Create and build a buffer header for a transfer. */ 978 bp = (struct buf *)getpbuf(NULL); 979 980 /* 981 * Get a copy of the kva from the physical buffer. 982 */ 983 bp->b_caller1 = p; 984 bp->b_dev = vp->v_rdev; 985 error = bp->b_error = 0; 986 987 bp->b_bcount = cb->aio_nbytes; 988 bp->b_bufsize = cb->aio_nbytes; 989 bp->b_flags = B_PHYS | B_CALL; 990 bp->b_iodone = aio_physwakeup; 991 bp->b_saveaddr = bp->b_data; 992 bp->b_data = (void *)cb->aio_buf; 993 bp->b_blkno = btodb(cb->aio_offset); 994 995 if (cb->aio_lio_opcode == LIO_WRITE) { 996 bp->b_flags |= B_WRITE; 997 if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_READ)) { 998 error = EFAULT; 999 goto doerror; 1000 } 1001 } else { 1002 bp->b_flags |= B_READ; 1003 if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_WRITE)) { 1004 error = EFAULT; 1005 goto doerror; 1006 } 1007 } 1008 1009 /* Bring buffer into kernel space. */ 1010 vmapbuf(bp); 1011 1012 s = splbio(); 1013 aiocbe->bp = bp; 1014 bp->b_spc = (void *)aiocbe; 1015 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 1016 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 1017 aiocbe->jobstate = JOBST_JOBQBUF; 1018 cb->_aiocb_private.status = cb->aio_nbytes; 1019 num_buf_aio++; 1020 bp->b_error = 0; 1021 1022 splx(s); 1023 1024 /* Perform transfer. */ 1025 BUF_STRATEGY(bp, 0); 1026 1027 s = splbio(); 1028 1029 /* 1030 * If we had an error invoking the request, or an error in processing 1031 * the request before we have returned, we process it as an error in 1032 * transfer. Note that such an I/O error is not indicated immediately, 1033 * but is returned using the aio_error mechanism. In this case, 1034 * aio_suspend will return immediately. 1035 */ 1036 if (bp->b_error || (bp->b_flags & B_ERROR)) { 1037 struct aiocb *job = aiocbe->uuaiocb; 1038 1039 aiocbe->uaiocb._aiocb_private.status = 0; 1040 suword(&job->_aiocb_private.status, 0); 1041 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1042 suword(&job->_aiocb_private.error, bp->b_error); 1043 1044 ki->kaio_buffer_finished_count++; 1045 1046 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1047 aiocbe->jobstate = JOBST_JOBBFINISHED; 1048 aiocbe->jobflags |= AIOCBLIST_DONE; 1049 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1050 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1051 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1052 } 1053 } 1054 splx(s); 1055 return 0; 1056 1057 doerror: 1058 ki->kaio_buffer_count--; 1059 if (lj) 1060 lj->lioj_buffer_count--; 1061 aiocbe->bp = NULL; 1062 relpbuf(bp, NULL); 1063 return error; 1064 } 1065 1066 /* 1067 * This waits/tests physio completion. 1068 */ 1069 int 1070 aio_fphysio(struct proc *p, struct aiocblist *iocb, int flgwait) 1071 { 1072 int s; 1073 struct buf *bp; 1074 int error; 1075 1076 bp = iocb->bp; 1077 1078 s = splbio(); 1079 if (flgwait == 0) { 1080 if ((bp->b_flags & B_DONE) == 0) { 1081 splx(s); 1082 return EINPROGRESS; 1083 } 1084 } 1085 1086 while ((bp->b_flags & B_DONE) == 0) { 1087 if (tsleep((caddr_t)bp, PRIBIO, "physstr", aiod_timeout)) { 1088 if ((bp->b_flags & B_DONE) == 0) { 1089 splx(s); 1090 return EINPROGRESS; 1091 } else 1092 break; 1093 } 1094 } 1095 1096 /* Release mapping into kernel space. */ 1097 vunmapbuf(bp); 1098 iocb->bp = 0; 1099 1100 error = 0; 1101 1102 /* Check for an error. */ 1103 if (bp->b_flags & B_ERROR) 1104 error = bp->b_error; 1105 1106 relpbuf(bp, NULL); 1107 return (error); 1108 } 1109 1110 /* 1111 * Wake up aio requests that may be serviceable now. 1112 */ 1113 void 1114 aio_swake(struct socket *so, struct sockbuf *sb) 1115 { 1116 struct aiocblist *cb,*cbn; 1117 struct proc *p; 1118 struct kaioinfo *ki = NULL; 1119 int opcode, wakecount = 0; 1120 struct aioproclist *aiop; 1121 1122 if (sb == &so->so_snd) { 1123 opcode = LIO_WRITE; 1124 so->so_snd.sb_flags &= ~SB_AIO; 1125 } else { 1126 opcode = LIO_READ; 1127 so->so_rcv.sb_flags &= ~SB_AIO; 1128 } 1129 1130 for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) { 1131 cbn = TAILQ_NEXT(cb, list); 1132 if (opcode == cb->uaiocb.aio_lio_opcode) { 1133 p = cb->userproc; 1134 ki = p->p_aioinfo; 1135 TAILQ_REMOVE(&so->so_aiojobq, cb, list); 1136 TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist); 1137 TAILQ_INSERT_TAIL(&aio_jobs, cb, list); 1138 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist); 1139 wakecount++; 1140 if (cb->jobstate != JOBST_JOBQGLOBAL) 1141 panic("invalid queue value"); 1142 } 1143 } 1144 1145 while (wakecount--) { 1146 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) { 1147 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1148 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1149 aiop->aioprocflags &= ~AIOP_FREE; 1150 wakeup(aiop->aioproc); 1151 } 1152 } 1153 } 1154 1155 /* 1156 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1157 * technique is done in this code. 1158 */ 1159 static int 1160 _aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type) 1161 { 1162 struct filedesc *fdp; 1163 struct file *fp; 1164 unsigned int fd; 1165 struct socket *so; 1166 int s; 1167 int error; 1168 int opcode; 1169 struct aiocblist *aiocbe; 1170 struct aioproclist *aiop; 1171 struct kaioinfo *ki; 1172 1173 if ((aiocbe = TAILQ_FIRST(&aio_freejobs)) != NULL) 1174 TAILQ_REMOVE(&aio_freejobs, aiocbe, list); 1175 else 1176 aiocbe = zalloc (aiocb_zone); 1177 1178 aiocbe->inputcharge = 0; 1179 aiocbe->outputcharge = 0; 1180 1181 suword(&job->_aiocb_private.status, -1); 1182 suword(&job->_aiocb_private.error, 0); 1183 suword(&job->_aiocb_private.kernelinfo, -1); 1184 1185 error = copyin((caddr_t)job, (caddr_t) &aiocbe->uaiocb, sizeof 1186 aiocbe->uaiocb); 1187 if (error) { 1188 suword(&job->_aiocb_private.error, error); 1189 1190 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1191 return error; 1192 } 1193 1194 /* Save userspace address of the job info. */ 1195 aiocbe->uuaiocb = job; 1196 1197 /* Get the opcode. */ 1198 if (type != LIO_NOP) 1199 aiocbe->uaiocb.aio_lio_opcode = type; 1200 opcode = aiocbe->uaiocb.aio_lio_opcode; 1201 1202 /* Get the fd info for process. */ 1203 fdp = p->p_fd; 1204 1205 /* 1206 * Range check file descriptor. 1207 */ 1208 fd = aiocbe->uaiocb.aio_fildes; 1209 if (fd >= fdp->fd_nfiles) { 1210 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1211 if (type == 0) 1212 suword(&job->_aiocb_private.error, EBADF); 1213 return EBADF; 1214 } 1215 1216 fp = aiocbe->fd_file = fdp->fd_ofiles[fd]; 1217 if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 1218 0))) { 1219 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1220 if (type == 0) 1221 suword(&job->_aiocb_private.error, EBADF); 1222 return EBADF; 1223 } 1224 1225 if (aiocbe->uaiocb.aio_offset == -1LL) { 1226 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1227 if (type == 0) 1228 suword(&job->_aiocb_private.error, EINVAL); 1229 return EINVAL; 1230 } 1231 1232 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1233 if (error) { 1234 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1235 if (type == 0) 1236 suword(&job->_aiocb_private.error, EINVAL); 1237 return error; 1238 } 1239 1240 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1241 if (jobrefid == LONG_MAX) 1242 jobrefid = 1; 1243 else 1244 jobrefid++; 1245 1246 if (opcode == LIO_NOP) { 1247 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1248 if (type == 0) { 1249 suword(&job->_aiocb_private.error, 0); 1250 suword(&job->_aiocb_private.status, 0); 1251 suword(&job->_aiocb_private.kernelinfo, 0); 1252 } 1253 return 0; 1254 } 1255 1256 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1257 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1258 if (type == 0) { 1259 suword(&job->_aiocb_private.status, 0); 1260 suword(&job->_aiocb_private.error, EINVAL); 1261 } 1262 return EINVAL; 1263 } 1264 1265 suword(&job->_aiocb_private.error, EINPROGRESS); 1266 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1267 aiocbe->userproc = p; 1268 aiocbe->jobflags = 0; 1269 aiocbe->lio = lj; 1270 ki = p->p_aioinfo; 1271 1272 if (fp->f_type == DTYPE_SOCKET) { 1273 /* 1274 * Alternate queueing for socket ops: Reach down into the 1275 * descriptor to get the socket data. Then check to see if the 1276 * socket is ready to be read or written (based on the requested 1277 * operation). 1278 * 1279 * If it is not ready for io, then queue the aiocbe on the 1280 * socket, and set the flags so we get a call when sbnotify() 1281 * happens. 1282 */ 1283 so = (struct socket *)fp->f_data; 1284 s = splnet(); 1285 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode == 1286 LIO_WRITE) && (!sowriteable(so)))) { 1287 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); 1288 TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist); 1289 if (opcode == LIO_READ) 1290 so->so_rcv.sb_flags |= SB_AIO; 1291 else 1292 so->so_snd.sb_flags |= SB_AIO; 1293 aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */ 1294 ki->kaio_queue_count++; 1295 num_queue_count++; 1296 splx(s); 1297 return 0; 1298 } 1299 splx(s); 1300 } 1301 1302 if ((error = aio_qphysio(p, aiocbe)) == 0) 1303 return 0; 1304 else if (error > 0) { 1305 suword(&job->_aiocb_private.status, 0); 1306 aiocbe->uaiocb._aiocb_private.error = error; 1307 suword(&job->_aiocb_private.error, error); 1308 return error; 1309 } 1310 1311 /* No buffer for daemon I/O. */ 1312 aiocbe->bp = NULL; 1313 1314 ki->kaio_queue_count++; 1315 if (lj) 1316 lj->lioj_queue_count++; 1317 s = splnet(); 1318 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1319 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1320 splx(s); 1321 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1322 1323 num_queue_count++; 1324 error = 0; 1325 1326 /* 1327 * If we don't have a free AIO process, and we are below our quota, then 1328 * start one. Otherwise, depend on the subsequent I/O completions to 1329 * pick-up this job. If we don't sucessfully create the new process 1330 * (thread) due to resource issues, we return an error for now (EAGAIN), 1331 * which is likely not the correct thing to do. 1332 */ 1333 retryproc: 1334 s = splnet(); 1335 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1336 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1337 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1338 aiop->aioprocflags &= ~AIOP_FREE; 1339 wakeup(aiop->aioproc); 1340 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1341 ((ki->kaio_active_count + num_aio_resv_start) < 1342 ki->kaio_maxactive_count)) { 1343 num_aio_resv_start++; 1344 if ((error = aio_newproc()) == 0) { 1345 num_aio_resv_start--; 1346 p->p_retval[0] = 0; 1347 goto retryproc; 1348 } 1349 num_aio_resv_start--; 1350 } 1351 splx(s); 1352 return error; 1353 } 1354 1355 /* 1356 * This routine queues an AIO request, checking for quotas. 1357 */ 1358 static int 1359 aio_aqueue(struct proc *p, struct aiocb *job, int type) 1360 { 1361 struct kaioinfo *ki; 1362 1363 if (p->p_aioinfo == NULL) 1364 aio_init_aioinfo(p); 1365 1366 if (num_queue_count >= max_queue_count) 1367 return EAGAIN; 1368 1369 ki = p->p_aioinfo; 1370 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1371 return EAGAIN; 1372 1373 return _aio_aqueue(p, job, NULL, type); 1374 } 1375 1376 /* 1377 * Support the aio_return system call, as a side-effect, kernel resources are 1378 * released. 1379 */ 1380 int 1381 aio_return(struct proc *p, struct aio_return_args *uap) 1382 { 1383 int s; 1384 int jobref; 1385 struct aiocblist *cb, *ncb; 1386 struct aiocb *ujob; 1387 struct kaioinfo *ki; 1388 1389 ki = p->p_aioinfo; 1390 if (ki == NULL) 1391 return EINVAL; 1392 1393 ujob = uap->aiocbp; 1394 1395 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1396 if (jobref == -1 || jobref == 0) 1397 return EINVAL; 1398 1399 s = splnet(); 1400 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb, 1401 plist)) { 1402 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1403 jobref) { 1404 splx(s); 1405 if (ujob == cb->uuaiocb) { 1406 p->p_retval[0] = 1407 cb->uaiocb._aiocb_private.status; 1408 } else 1409 p->p_retval[0] = EFAULT; 1410 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1411 curproc->p_stats->p_ru.ru_oublock += 1412 cb->outputcharge; 1413 cb->outputcharge = 0; 1414 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1415 curproc->p_stats->p_ru.ru_inblock += 1416 cb->inputcharge; 1417 cb->inputcharge = 0; 1418 } 1419 aio_free_entry(cb); 1420 return 0; 1421 } 1422 } 1423 splx(s); 1424 1425 s = splbio(); 1426 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) { 1427 ncb = TAILQ_NEXT(cb, plist); 1428 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) 1429 == jobref) { 1430 splx(s); 1431 if (ujob == cb->uuaiocb) { 1432 p->p_retval[0] = 1433 cb->uaiocb._aiocb_private.status; 1434 } else 1435 p->p_retval[0] = EFAULT; 1436 aio_free_entry(cb); 1437 return 0; 1438 } 1439 } 1440 splx(s); 1441 1442 return (EINVAL); 1443 } 1444 1445 /* 1446 * Allow a process to wakeup when any of the I/O requests are completed. 1447 */ 1448 int 1449 aio_suspend(struct proc *p, struct aio_suspend_args *uap) 1450 { 1451 struct timeval atv; 1452 struct timespec ts; 1453 struct aiocb *const *cbptr, *cbp; 1454 struct kaioinfo *ki; 1455 struct aiocblist *cb; 1456 int i; 1457 int njoblist; 1458 int error, s, timo; 1459 int *ijoblist; 1460 struct aiocb **ujoblist; 1461 1462 if (uap->nent >= AIO_LISTIO_MAX) 1463 return EINVAL; 1464 1465 timo = 0; 1466 if (uap->timeout) { 1467 /* Get timespec struct. */ 1468 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1469 return error; 1470 1471 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1472 return (EINVAL); 1473 1474 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1475 if (itimerfix(&atv)) 1476 return (EINVAL); 1477 timo = tvtohz(&atv); 1478 } 1479 1480 ki = p->p_aioinfo; 1481 if (ki == NULL) 1482 return EAGAIN; 1483 1484 njoblist = 0; 1485 ijoblist = zalloc(aiol_zone); 1486 ujoblist = zalloc(aiol_zone); 1487 cbptr = uap->aiocbp; 1488 1489 for (i = 0; i < uap->nent; i++) { 1490 cbp = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 1491 if (cbp == 0) 1492 continue; 1493 ujoblist[njoblist] = cbp; 1494 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1495 njoblist++; 1496 } 1497 1498 if (njoblist == 0) { 1499 zfree(aiol_zone, ijoblist); 1500 zfree(aiol_zone, ujoblist); 1501 return 0; 1502 } 1503 1504 error = 0; 1505 for (;;) { 1506 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = 1507 TAILQ_NEXT(cb, plist)) { 1508 for (i = 0; i < njoblist; i++) { 1509 if (((intptr_t) 1510 cb->uaiocb._aiocb_private.kernelinfo) == 1511 ijoblist[i]) { 1512 if (ujoblist[i] != cb->uuaiocb) 1513 error = EINVAL; 1514 zfree(aiol_zone, ijoblist); 1515 zfree(aiol_zone, ujoblist); 1516 return error; 1517 } 1518 } 1519 } 1520 1521 s = splbio(); 1522 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = 1523 TAILQ_NEXT(cb, plist)) { 1524 for (i = 0; i < njoblist; i++) { 1525 if (((intptr_t) 1526 cb->uaiocb._aiocb_private.kernelinfo) == 1527 ijoblist[i]) { 1528 splx(s); 1529 if (ujoblist[i] != cb->uuaiocb) 1530 error = EINVAL; 1531 zfree(aiol_zone, ijoblist); 1532 zfree(aiol_zone, ujoblist); 1533 return error; 1534 } 1535 } 1536 } 1537 1538 ki->kaio_flags |= KAIO_WAKEUP; 1539 error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo); 1540 splx(s); 1541 1542 if (error == EINTR) { 1543 zfree(aiol_zone, ijoblist); 1544 zfree(aiol_zone, ujoblist); 1545 return EINTR; 1546 } else if (error == EWOULDBLOCK) { 1547 zfree(aiol_zone, ijoblist); 1548 zfree(aiol_zone, ujoblist); 1549 return EAGAIN; 1550 } 1551 } 1552 1553 /* NOTREACHED */ 1554 return EINVAL; 1555 } 1556 1557 /* 1558 * aio_cancel at the kernel level is a NOOP right now. It might be possible to 1559 * support it partially in user mode, or in kernel mode later on. 1560 */ 1561 int 1562 aio_cancel(struct proc *p, struct aio_cancel_args *uap) 1563 { 1564 return ENOSYS; 1565 } 1566 1567 /* 1568 * aio_error is implemented in the kernel level for compatibility purposes only. 1569 * For a user mode async implementation, it would be best to do it in a userland 1570 * subroutine. 1571 */ 1572 int 1573 aio_error(struct proc *p, struct aio_error_args *uap) 1574 { 1575 int s; 1576 struct aiocblist *cb; 1577 struct kaioinfo *ki; 1578 int jobref; 1579 1580 ki = p->p_aioinfo; 1581 if (ki == NULL) 1582 return EINVAL; 1583 1584 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1585 if ((jobref == -1) || (jobref == 0)) 1586 return EINVAL; 1587 1588 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb, 1589 plist)) { 1590 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1591 jobref) { 1592 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1593 return 0; 1594 } 1595 } 1596 1597 s = splnet(); 1598 1599 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb, 1600 plist)) { 1601 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1602 jobref) { 1603 p->p_retval[0] = EINPROGRESS; 1604 splx(s); 1605 return 0; 1606 } 1607 } 1608 splx(s); 1609 1610 s = splbio(); 1611 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb, 1612 plist)) { 1613 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1614 jobref) { 1615 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1616 splx(s); 1617 return 0; 1618 } 1619 } 1620 1621 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb, 1622 plist)) { 1623 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1624 jobref) { 1625 p->p_retval[0] = EINPROGRESS; 1626 splx(s); 1627 return 0; 1628 } 1629 } 1630 splx(s); 1631 1632 #if (0) 1633 /* 1634 * Hack for lio. 1635 */ 1636 status = fuword(&uap->aiocbp->_aiocb_private.status); 1637 if (status == -1) 1638 return fuword(&uap->aiocbp->_aiocb_private.error); 1639 #endif 1640 return EINVAL; 1641 } 1642 1643 int 1644 aio_read(struct proc *p, struct aio_read_args *uap) 1645 { 1646 struct filedesc *fdp; 1647 struct file *fp; 1648 struct uio auio; 1649 struct iovec aiov; 1650 unsigned int fd; 1651 int cnt; 1652 struct aiocb iocb; 1653 int error, pmodes; 1654 1655 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1656 if ((pmodes & AIO_PMODE_SYNC) == 0) 1657 return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_READ); 1658 1659 /* Get control block. */ 1660 if ((error = copyin((caddr_t)uap->aiocbp, (caddr_t)&iocb, sizeof iocb)) 1661 != 0) 1662 return error; 1663 1664 /* Get the fd info for process. */ 1665 fdp = p->p_fd; 1666 1667 /* 1668 * Range check file descriptor. 1669 */ 1670 fd = iocb.aio_fildes; 1671 if (fd >= fdp->fd_nfiles) 1672 return EBADF; 1673 fp = fdp->fd_ofiles[fd]; 1674 if ((fp == NULL) || ((fp->f_flag & FREAD) == 0)) 1675 return EBADF; 1676 if (iocb.aio_offset == -1LL) 1677 return EINVAL; 1678 1679 auio.uio_resid = iocb.aio_nbytes; 1680 if (auio.uio_resid < 0) 1681 return (EINVAL); 1682 1683 /* 1684 * Process sync simply -- queue async request. 1685 */ 1686 if ((iocb._aiocb_private.privatemodes & AIO_PMODE_SYNC) == 0) 1687 return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_READ); 1688 1689 aiov.iov_base = (void *)iocb.aio_buf; 1690 aiov.iov_len = iocb.aio_nbytes; 1691 1692 auio.uio_iov = &aiov; 1693 auio.uio_iovcnt = 1; 1694 auio.uio_offset = iocb.aio_offset; 1695 auio.uio_rw = UIO_READ; 1696 auio.uio_segflg = UIO_USERSPACE; 1697 auio.uio_procp = p; 1698 1699 cnt = iocb.aio_nbytes; 1700 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, p); 1701 if (error && (auio.uio_resid != cnt) && (error == ERESTART || error == 1702 EINTR || error == EWOULDBLOCK)) 1703 error = 0; 1704 cnt -= auio.uio_resid; 1705 p->p_retval[0] = cnt; 1706 return error; 1707 } 1708 1709 int 1710 aio_write(struct proc *p, struct aio_write_args *uap) 1711 { 1712 struct filedesc *fdp; 1713 struct file *fp; 1714 struct uio auio; 1715 struct iovec aiov; 1716 unsigned int fd; 1717 int cnt; 1718 struct aiocb iocb; 1719 int error; 1720 int pmodes; 1721 1722 /* 1723 * Process sync simply -- queue async request. 1724 */ 1725 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1726 if ((pmodes & AIO_PMODE_SYNC) == 0) 1727 return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_WRITE); 1728 1729 if ((error = copyin((caddr_t)uap->aiocbp, (caddr_t)&iocb, sizeof iocb)) 1730 != 0) 1731 return error; 1732 1733 /* Get the fd info for process. */ 1734 fdp = p->p_fd; 1735 1736 /* 1737 * Range check file descriptor. 1738 */ 1739 fd = iocb.aio_fildes; 1740 if (fd >= fdp->fd_nfiles) 1741 return EBADF; 1742 fp = fdp->fd_ofiles[fd]; 1743 if ((fp == NULL) || ((fp->f_flag & FWRITE) == 0)) 1744 return EBADF; 1745 if (iocb.aio_offset == -1LL) 1746 return EINVAL; 1747 1748 aiov.iov_base = (void *)iocb.aio_buf; 1749 aiov.iov_len = iocb.aio_nbytes; 1750 auio.uio_iov = &aiov; 1751 auio.uio_iovcnt = 1; 1752 auio.uio_offset = iocb.aio_offset; 1753 1754 auio.uio_resid = iocb.aio_nbytes; 1755 if (auio.uio_resid < 0) 1756 return (EINVAL); 1757 1758 auio.uio_rw = UIO_WRITE; 1759 auio.uio_segflg = UIO_USERSPACE; 1760 auio.uio_procp = p; 1761 1762 cnt = iocb.aio_nbytes; 1763 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, p); 1764 if (error) { 1765 if (auio.uio_resid != cnt) { 1766 if (error == ERESTART || error == EINTR || error == 1767 EWOULDBLOCK) 1768 error = 0; 1769 if (error == EPIPE) 1770 psignal(p, SIGPIPE); 1771 } 1772 } 1773 cnt -= auio.uio_resid; 1774 p->p_retval[0] = cnt; 1775 return error; 1776 } 1777 1778 int 1779 lio_listio(struct proc *p, struct lio_listio_args *uap) 1780 { 1781 int nent, nentqueued; 1782 struct aiocb *iocb, * const *cbptr; 1783 struct aiocblist *cb; 1784 struct kaioinfo *ki; 1785 struct aio_liojob *lj; 1786 int error, runningcode; 1787 int nerror; 1788 int i; 1789 int s; 1790 1791 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 1792 return EINVAL; 1793 1794 nent = uap->nent; 1795 if (nent > AIO_LISTIO_MAX) 1796 return EINVAL; 1797 1798 if (p->p_aioinfo == NULL) 1799 aio_init_aioinfo(p); 1800 1801 if ((nent + num_queue_count) > max_queue_count) 1802 return EAGAIN; 1803 1804 ki = p->p_aioinfo; 1805 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) 1806 return EAGAIN; 1807 1808 lj = zalloc(aiolio_zone); 1809 if (!lj) 1810 return EAGAIN; 1811 1812 lj->lioj_flags = 0; 1813 lj->lioj_buffer_count = 0; 1814 lj->lioj_buffer_finished_count = 0; 1815 lj->lioj_queue_count = 0; 1816 lj->lioj_queue_finished_count = 0; 1817 lj->lioj_ki = ki; 1818 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 1819 1820 /* 1821 * Setup signal. 1822 */ 1823 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 1824 error = copyin(uap->sig, &lj->lioj_signal, 1825 sizeof(lj->lioj_signal)); 1826 if (error) 1827 return error; 1828 lj->lioj_flags |= LIOJ_SIGNAL; 1829 lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED; 1830 } else 1831 lj->lioj_flags &= ~LIOJ_SIGNAL; 1832 1833 /* 1834 * Get pointers to the list of I/O requests. 1835 */ 1836 nerror = 0; 1837 nentqueued = 0; 1838 cbptr = uap->acb_list; 1839 for (i = 0; i < uap->nent; i++) { 1840 iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 1841 if (((intptr_t)iocb != -1) && ((intptr_t)iocb != NULL)) { 1842 error = _aio_aqueue(p, iocb, lj, 0); 1843 if (error == 0) 1844 nentqueued++; 1845 else 1846 nerror++; 1847 } 1848 } 1849 1850 /* 1851 * If we haven't queued any, then just return error. 1852 */ 1853 if (nentqueued == 0) 1854 return 0; 1855 1856 /* 1857 * Calculate the appropriate error return. 1858 */ 1859 runningcode = 0; 1860 if (nerror) 1861 runningcode = EIO; 1862 1863 if (uap->mode == LIO_WAIT) { 1864 int command, found, jobref; 1865 1866 for (;;) { 1867 found = 0; 1868 for (i = 0; i < uap->nent; i++) { 1869 /* 1870 * Fetch address of the control buf pointer in 1871 * user space. 1872 */ 1873 iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 1874 if (((intptr_t)iocb == -1) || ((intptr_t)iocb 1875 == 0)) 1876 continue; 1877 1878 /* 1879 * Fetch the associated command from user space. 1880 */ 1881 command = fuword(&iocb->aio_lio_opcode); 1882 if (command == LIO_NOP) { 1883 found++; 1884 continue; 1885 } 1886 1887 jobref = fuword(&iocb->_aiocb_private.kernelinfo); 1888 1889 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; 1890 cb = TAILQ_NEXT(cb, plist)) { 1891 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 1892 == jobref) { 1893 if (cb->uaiocb.aio_lio_opcode 1894 == LIO_WRITE) { 1895 curproc->p_stats->p_ru.ru_oublock 1896 += 1897 cb->outputcharge; 1898 cb->outputcharge = 0; 1899 } else if (cb->uaiocb.aio_lio_opcode 1900 == LIO_READ) { 1901 curproc->p_stats->p_ru.ru_inblock 1902 += cb->inputcharge; 1903 cb->inputcharge = 0; 1904 } 1905 found++; 1906 break; 1907 } 1908 } 1909 1910 s = splbio(); 1911 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; 1912 cb = TAILQ_NEXT(cb, plist)) { 1913 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 1914 == jobref) { 1915 found++; 1916 break; 1917 } 1918 } 1919 splx(s); 1920 } 1921 1922 /* 1923 * If all I/Os have been disposed of, then we can 1924 * return. 1925 */ 1926 if (found == nentqueued) 1927 return runningcode; 1928 1929 ki->kaio_flags |= KAIO_WAKEUP; 1930 error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0); 1931 1932 if (error == EINTR) 1933 return EINTR; 1934 else if (error == EWOULDBLOCK) 1935 return EAGAIN; 1936 } 1937 } 1938 1939 return runningcode; 1940 } 1941 1942 /* 1943 * This is a wierd hack so that we can post a signal. It is safe to do so from 1944 * a timeout routine, but *not* from an interrupt routine. 1945 */ 1946 static void 1947 process_signal(void *aioj) 1948 { 1949 struct aiocblist *aiocbe = aioj; 1950 struct aio_liojob *lj = aiocbe->lio; 1951 struct aiocb *cb = &aiocbe->uaiocb; 1952 1953 if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) && 1954 (lj->lioj_queue_count == lj->lioj_queue_finished_count)) { 1955 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 1956 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 1957 } 1958 1959 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) 1960 psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo); 1961 } 1962 1963 /* 1964 * Interrupt handler for physio, performs the necessary process wakeups, and 1965 * signals. 1966 */ 1967 static void 1968 aio_physwakeup(struct buf *bp) 1969 { 1970 struct aiocblist *aiocbe; 1971 struct proc *p; 1972 struct kaioinfo *ki; 1973 struct aio_liojob *lj; 1974 int s; 1975 s = splbio(); 1976 1977 wakeup((caddr_t)bp); 1978 bp->b_flags &= ~B_CALL; 1979 bp->b_flags |= B_DONE; 1980 1981 aiocbe = (struct aiocblist *)bp->b_spc; 1982 if (aiocbe) { 1983 p = bp->b_caller1; 1984 1985 aiocbe->jobstate = JOBST_JOBBFINISHED; 1986 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 1987 aiocbe->uaiocb._aiocb_private.error = 0; 1988 aiocbe->jobflags |= AIOCBLIST_DONE; 1989 1990 if (bp->b_flags & B_ERROR) 1991 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1992 1993 lj = aiocbe->lio; 1994 if (lj) { 1995 lj->lioj_buffer_finished_count++; 1996 1997 /* 1998 * wakeup/signal if all of the interrupt jobs are done. 1999 */ 2000 if (lj->lioj_buffer_finished_count == 2001 lj->lioj_buffer_count) { 2002 /* 2003 * Post a signal if it is called for. 2004 */ 2005 if ((lj->lioj_flags & 2006 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 2007 LIOJ_SIGNAL) { 2008 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2009 timeout(process_signal, aiocbe, 0); 2010 } 2011 } 2012 } 2013 2014 ki = p->p_aioinfo; 2015 if (ki) { 2016 ki->kaio_buffer_finished_count++; 2017 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 2018 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 2019 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 2020 2021 /* Do the wakeup. */ 2022 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 2023 ki->kaio_flags &= ~KAIO_WAKEUP; 2024 wakeup(p); 2025 } 2026 } 2027 2028 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL) 2029 timeout(process_signal, aiocbe, 0); 2030 } 2031 splx(s); 2032 } 2033 2034 int 2035 aio_waitcomplete(struct proc *p, struct aio_waitcomplete_args *uap) 2036 { 2037 struct timeval atv; 2038 struct timespec ts; 2039 struct aiocb **cbptr; 2040 struct kaioinfo *ki; 2041 struct aiocblist *cb = NULL; 2042 int error, s, timo; 2043 2044 timo = 0; 2045 if (uap->timeout) { 2046 /* Get timespec struct. */ 2047 error = copyin((caddr_t)uap->timeout, (caddr_t)&ts, 2048 sizeof(ts)); 2049 if (error) 2050 return error; 2051 2052 if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000)) 2053 return (EINVAL); 2054 2055 TIMESPEC_TO_TIMEVAL(&atv, &ts); 2056 if (itimerfix(&atv)) 2057 return (EINVAL); 2058 timo = tvtohz(&atv); 2059 } 2060 2061 ki = p->p_aioinfo; 2062 if (ki == NULL) 2063 return EAGAIN; 2064 2065 cbptr = uap->aiocbp; 2066 2067 for (;;) { 2068 if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) { 2069 suword(uap->aiocbp, (int)cb->uuaiocb); 2070 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 2071 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 2072 curproc->p_stats->p_ru.ru_oublock += 2073 cb->outputcharge; 2074 cb->outputcharge = 0; 2075 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 2076 curproc->p_stats->p_ru.ru_inblock += 2077 cb->inputcharge; 2078 cb->inputcharge = 0; 2079 } 2080 aio_free_entry(cb); 2081 return 0; 2082 } 2083 2084 s = splbio(); 2085 if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) { 2086 splx(s); 2087 suword(uap->aiocbp, (int)cb->uuaiocb); 2088 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 2089 aio_free_entry(cb); 2090 return 0; 2091 } 2092 splx(s); 2093 2094 ki->kaio_flags |= KAIO_WAKEUP; 2095 error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo); 2096 2097 if (error < 0) 2098 return error; 2099 else if (error == EINTR) 2100 return EINTR; 2101 else if (error == EWOULDBLOCK) 2102 return EAGAIN; 2103 } 2104 } 2105