1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 * 16 * $FreeBSD$ 17 */ 18 19 /* 20 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/sysproto.h> 27 #include <sys/filedesc.h> 28 #include <sys/kernel.h> 29 #include <sys/fcntl.h> 30 #include <sys/file.h> 31 #include <sys/lock.h> 32 #include <sys/unistd.h> 33 #include <sys/proc.h> 34 #include <sys/resourcevar.h> 35 #include <sys/signalvar.h> 36 #include <sys/protosw.h> 37 #include <sys/socketvar.h> 38 #include <sys/sysctl.h> 39 #include <sys/vnode.h> 40 #include <sys/conf.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_extern.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_map.h> 46 #include <vm/vm_zone.h> 47 #include <sys/aio.h> 48 49 #include <machine/limits.h> 50 #include "opt_vfs_aio.h" 51 52 static long jobrefid; 53 54 #define JOBST_NULL 0x0 55 #define JOBST_JOBQPROC 0x1 56 #define JOBST_JOBQGLOBAL 0x2 57 #define JOBST_JOBRUNNING 0x3 58 #define JOBST_JOBFINISHED 0x4 59 #define JOBST_JOBQBUF 0x5 60 #define JOBST_JOBBFINISHED 0x6 61 62 #ifndef MAX_AIO_PER_PROC 63 #define MAX_AIO_PER_PROC 32 64 #endif 65 66 #ifndef MAX_AIO_QUEUE_PER_PROC 67 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 68 #endif 69 70 #ifndef MAX_AIO_PROCS 71 #define MAX_AIO_PROCS 32 72 #endif 73 74 #ifndef MAX_AIO_QUEUE 75 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 76 #endif 77 78 #ifndef TARGET_AIO_PROCS 79 #define TARGET_AIO_PROCS 4 80 #endif 81 82 #ifndef MAX_BUF_AIO 83 #define MAX_BUF_AIO 16 84 #endif 85 86 #ifndef AIOD_TIMEOUT_DEFAULT 87 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 88 #endif 89 90 #ifndef AIOD_LIFETIME_DEFAULT 91 #define AIOD_LIFETIME_DEFAULT (30 * hz) 92 #endif 93 94 static int max_aio_procs = MAX_AIO_PROCS; 95 static int num_aio_procs = 0; 96 static int target_aio_procs = TARGET_AIO_PROCS; 97 static int max_queue_count = MAX_AIO_QUEUE; 98 static int num_queue_count = 0; 99 static int num_buf_aio = 0; 100 static int num_aio_resv_start = 0; 101 static int aiod_timeout; 102 static int aiod_lifetime; 103 104 static int max_aio_per_proc = MAX_AIO_PER_PROC; 105 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 106 static int max_buf_aio = MAX_BUF_AIO; 107 108 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "AIO mgmt"); 109 110 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, 111 CTLFLAG_RW, &max_aio_per_proc, 0, ""); 112 113 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, 114 CTLFLAG_RW, &max_aio_queue_per_proc, 0, ""); 115 116 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 117 CTLFLAG_RW, &max_aio_procs, 0, ""); 118 119 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 120 CTLFLAG_RD, &num_aio_procs, 0, ""); 121 122 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, 123 CTLFLAG_RD, &num_queue_count, 0, ""); 124 125 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, 126 CTLFLAG_RW, &max_queue_count, 0, ""); 127 128 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, 129 CTLFLAG_RW, &target_aio_procs, 0, ""); 130 131 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, 132 CTLFLAG_RW, &max_buf_aio, 0, ""); 133 134 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, 135 CTLFLAG_RD, &num_buf_aio, 0, ""); 136 137 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, 138 CTLFLAG_RW, &aiod_lifetime, 0, ""); 139 140 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, 141 CTLFLAG_RW, &aiod_timeout, 0, ""); 142 143 /* 144 * AIO process info 145 */ 146 #define AIOP_FREE 0x1 /* proc on free queue */ 147 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 148 149 struct aioproclist { 150 int aioprocflags; /* AIO proc flags */ 151 TAILQ_ENTRY(aioproclist) list; /* List of processes */ 152 struct proc *aioproc; /* The AIO thread */ 153 TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */ 154 }; 155 156 /* 157 * data-structure for lio signal management 158 */ 159 struct aio_liojob { 160 int lioj_flags; 161 int lioj_buffer_count; 162 int lioj_buffer_finished_count; 163 int lioj_queue_count; 164 int lioj_queue_finished_count; 165 struct sigevent lioj_signal; /* signal on all I/O done */ 166 TAILQ_ENTRY (aio_liojob) lioj_list; 167 struct kaioinfo *lioj_ki; 168 }; 169 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 170 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 171 172 /* 173 * per process aio data structure 174 */ 175 struct kaioinfo { 176 int kaio_flags; /* per process kaio flags */ 177 int kaio_maxactive_count; /* maximum number of AIOs */ 178 int kaio_active_count; /* number of currently used AIOs */ 179 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 180 int kaio_queue_count; /* size of AIO queue */ 181 int kaio_ballowed_count; /* maximum number of buffers */ 182 int kaio_queue_finished_count; /* number of daemon jobs finished */ 183 int kaio_buffer_count; /* number of physio buffers */ 184 int kaio_buffer_finished_count; /* count of I/O done */ 185 struct proc *kaio_p; /* process that uses this kaio block */ 186 TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 187 TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */ 188 TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */ 189 TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 190 TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */ 191 TAILQ_HEAD (,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */ 192 }; 193 194 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 195 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */ 196 197 static TAILQ_HEAD(,aioproclist) aio_freeproc, aio_activeproc; 198 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 199 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 200 static TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */ 201 202 static void aio_init_aioinfo(struct proc *p); 203 static void aio_onceonly(void *); 204 static int aio_free_entry(struct aiocblist *aiocbe); 205 static void aio_process(struct aiocblist *aiocbe); 206 static int aio_newproc(void); 207 static int aio_aqueue(struct proc *p, struct aiocb *job, int type); 208 static void aio_physwakeup(struct buf *bp); 209 static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type); 210 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 211 static void aio_daemon(void *uproc); 212 213 SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL); 214 215 static vm_zone_t kaio_zone = 0, aiop_zone = 0, aiocb_zone = 0, aiol_zone = 0; 216 static vm_zone_t aiolio_zone = 0; 217 218 /* 219 * Startup initialization 220 */ 221 void 222 aio_onceonly(void *na) 223 { 224 TAILQ_INIT(&aio_freeproc); 225 TAILQ_INIT(&aio_activeproc); 226 TAILQ_INIT(&aio_jobs); 227 TAILQ_INIT(&aio_bufjobs); 228 TAILQ_INIT(&aio_freejobs); 229 kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1); 230 aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1); 231 aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1); 232 aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1); 233 aiolio_zone = zinit("AIOLIO", AIO_LISTIO_MAX * sizeof (struct 234 aio_liojob), 0, 0, 1); 235 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 236 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 237 jobrefid = 1; 238 } 239 240 /* 241 * Init the per-process aioinfo structure. The aioinfo limits are set 242 * per-process for user limit (resource) management. 243 */ 244 void 245 aio_init_aioinfo(struct proc *p) 246 { 247 struct kaioinfo *ki; 248 if (p->p_aioinfo == NULL) { 249 ki = zalloc(kaio_zone); 250 p->p_aioinfo = ki; 251 ki->kaio_flags = 0; 252 ki->kaio_maxactive_count = max_aio_per_proc; 253 ki->kaio_active_count = 0; 254 ki->kaio_qallowed_count = max_aio_queue_per_proc; 255 ki->kaio_queue_count = 0; 256 ki->kaio_ballowed_count = max_buf_aio; 257 ki->kaio_buffer_count = 0; 258 ki->kaio_buffer_finished_count = 0; 259 ki->kaio_p = p; 260 TAILQ_INIT(&ki->kaio_jobdone); 261 TAILQ_INIT(&ki->kaio_jobqueue); 262 TAILQ_INIT(&ki->kaio_bufdone); 263 TAILQ_INIT(&ki->kaio_bufqueue); 264 TAILQ_INIT(&ki->kaio_liojoblist); 265 TAILQ_INIT(&ki->kaio_sockqueue); 266 } 267 268 while (num_aio_procs < target_aio_procs) 269 aio_newproc(); 270 } 271 272 /* 273 * Free a job entry. Wait for completion if it is currently active, but don't 274 * delay forever. If we delay, we return a flag that says that we have to 275 * restart the queue scan. 276 */ 277 int 278 aio_free_entry(struct aiocblist *aiocbe) 279 { 280 struct kaioinfo *ki; 281 struct aioproclist *aiop; 282 struct aio_liojob *lj; 283 struct proc *p; 284 int error; 285 int s; 286 287 if (aiocbe->jobstate == JOBST_NULL) 288 panic("aio_free_entry: freeing already free job"); 289 290 p = aiocbe->userproc; 291 ki = p->p_aioinfo; 292 lj = aiocbe->lio; 293 if (ki == NULL) 294 panic("aio_free_entry: missing p->p_aioinfo"); 295 296 if (aiocbe->jobstate == JOBST_JOBRUNNING) { 297 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) 298 return 0; 299 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 300 tsleep(aiocbe, PRIBIO|PCATCH, "jobwai", 0); 301 } 302 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 303 304 if (aiocbe->bp == NULL) { 305 if (ki->kaio_queue_count <= 0) 306 panic("aio_free_entry: process queue size <= 0"); 307 if (num_queue_count <= 0) 308 panic("aio_free_entry: system wide queue size <= 0"); 309 310 if (lj) { 311 lj->lioj_queue_count--; 312 if (aiocbe->jobflags & AIOCBLIST_DONE) 313 lj->lioj_queue_finished_count--; 314 } 315 ki->kaio_queue_count--; 316 if (aiocbe->jobflags & AIOCBLIST_DONE) 317 ki->kaio_queue_finished_count--; 318 num_queue_count--; 319 } else { 320 if (lj) { 321 lj->lioj_buffer_count--; 322 if (aiocbe->jobflags & AIOCBLIST_DONE) 323 lj->lioj_buffer_finished_count--; 324 } 325 if (aiocbe->jobflags & AIOCBLIST_DONE) 326 ki->kaio_buffer_finished_count--; 327 ki->kaio_buffer_count--; 328 num_buf_aio--; 329 } 330 331 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN) 332 && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) { 333 ki->kaio_flags &= ~KAIO_WAKEUP; 334 wakeup(p); 335 } 336 337 if (aiocbe->jobstate == JOBST_JOBQBUF) { 338 if ((error = aio_fphysio(p, aiocbe, 1)) != 0) 339 return error; 340 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 341 panic("aio_free_entry: invalid physio finish-up state"); 342 s = splbio(); 343 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 344 splx(s); 345 } else if (aiocbe->jobstate == JOBST_JOBQPROC) { 346 aiop = aiocbe->jobaioproc; 347 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 348 } else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) 349 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 350 else if (aiocbe->jobstate == JOBST_JOBFINISHED) 351 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 352 else if (aiocbe->jobstate == JOBST_JOBBFINISHED) { 353 s = splbio(); 354 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 355 splx(s); 356 if (aiocbe->bp) { 357 vunmapbuf(aiocbe->bp); 358 relpbuf(aiocbe->bp, NULL); 359 aiocbe->bp = NULL; 360 } 361 } 362 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 363 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 364 zfree(aiolio_zone, lj); 365 } 366 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 367 aiocbe->jobstate = JOBST_NULL; 368 return 0; 369 } 370 371 /* 372 * Rundown the jobs for a given process. 373 */ 374 void 375 aio_proc_rundown(struct proc *p) 376 { 377 int s; 378 struct kaioinfo *ki; 379 struct aio_liojob *lj, *ljn; 380 struct aiocblist *aiocbe, *aiocbn; 381 struct file *fp; 382 struct filedesc *fdp; 383 struct socket *so; 384 385 ki = p->p_aioinfo; 386 if (ki == NULL) 387 return; 388 389 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 390 while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count > 391 ki->kaio_buffer_finished_count)) { 392 ki->kaio_flags |= KAIO_RUNDOWN; 393 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout)) 394 break; 395 } 396 397 /* 398 * Move any aio ops that are waiting on socket I/O to the normal job 399 * queues so they are cleaned up with any others. 400 */ 401 fdp = p->p_fd; 402 403 s = splnet(); 404 for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe = 405 aiocbn) { 406 aiocbn = TAILQ_NEXT(aiocbe, plist); 407 fp = fdp->fd_ofiles[aiocbe->uaiocb.aio_fildes]; 408 409 /* 410 * Under some circumstances, the aio_fildes and the file 411 * structure don't match. This would leave aiocbe's in the 412 * TAILQ associated with the socket and cause a panic later. 413 * 414 * Detect and fix. 415 */ 416 if ((fp == NULL) || (fp != aiocbe->fd_file)) 417 fp = aiocbe->fd_file; 418 if (fp) { 419 so = (struct socket *)fp->f_data; 420 TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list); 421 if (TAILQ_EMPTY(&so->so_aiojobq)) { 422 so->so_snd.sb_flags &= ~SB_AIO; 423 so->so_rcv.sb_flags &= ~SB_AIO; 424 } 425 } 426 TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist); 427 TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list); 428 TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist); 429 } 430 splx(s); 431 432 restart1: 433 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) { 434 aiocbn = TAILQ_NEXT(aiocbe, plist); 435 if (aio_free_entry(aiocbe)) 436 goto restart1; 437 } 438 439 restart2: 440 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe = 441 aiocbn) { 442 aiocbn = TAILQ_NEXT(aiocbe, plist); 443 if (aio_free_entry(aiocbe)) 444 goto restart2; 445 } 446 447 /* 448 * Note the use of lots of splbio here, trying to avoid splbio for long chains 449 * of I/O. Probably unnecessary. 450 */ 451 restart3: 452 s = splbio(); 453 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 454 ki->kaio_flags |= KAIO_WAKEUP; 455 tsleep(p, PRIBIO, "aioprn", 0); 456 splx(s); 457 goto restart3; 458 } 459 splx(s); 460 461 restart4: 462 s = splbio(); 463 for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) { 464 aiocbn = TAILQ_NEXT(aiocbe, plist); 465 if (aio_free_entry(aiocbe)) { 466 splx(s); 467 goto restart4; 468 } 469 } 470 splx(s); 471 472 for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) { 473 ljn = TAILQ_NEXT(lj, lioj_list); 474 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 475 0)) { 476 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 477 zfree(aiolio_zone, lj); 478 } else { 479 #ifdef DIAGNOSTIC 480 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, " 481 "QF:%d\n", lj->lioj_buffer_count, 482 lj->lioj_buffer_finished_count, 483 lj->lioj_queue_count, 484 lj->lioj_queue_finished_count); 485 #endif 486 } 487 } 488 489 zfree(kaio_zone, ki); 490 p->p_aioinfo = NULL; 491 } 492 493 /* 494 * Select a job to run (called by an AIO daemon). 495 */ 496 static struct aiocblist * 497 aio_selectjob(struct aioproclist *aiop) 498 { 499 int s; 500 struct aiocblist *aiocbe; 501 struct kaioinfo *ki; 502 struct proc *userp; 503 504 aiocbe = TAILQ_FIRST(&aiop->jobtorun); 505 if (aiocbe) { 506 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 507 return aiocbe; 508 } 509 510 s = splnet(); 511 for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe = 512 TAILQ_NEXT(aiocbe, list)) { 513 userp = aiocbe->userproc; 514 ki = userp->p_aioinfo; 515 516 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 517 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 518 splx(s); 519 return aiocbe; 520 } 521 } 522 splx(s); 523 524 return NULL; 525 } 526 527 /* 528 * The AIO processing activity. This is the code that does the I/O request for 529 * the non-physio version of the operations. The normal vn operations are used, 530 * and this code should work in all instances for every type of file, including 531 * pipes, sockets, fifos, and regular files. 532 */ 533 void 534 aio_process(struct aiocblist *aiocbe) 535 { 536 struct filedesc *fdp; 537 struct proc *userp, *mycp; 538 struct aiocb *cb; 539 struct file *fp; 540 struct uio auio; 541 struct iovec aiov; 542 unsigned int fd; 543 int cnt; 544 int error; 545 off_t offset; 546 int oublock_st, oublock_end; 547 int inblock_st, inblock_end; 548 549 userp = aiocbe->userproc; 550 cb = &aiocbe->uaiocb; 551 552 mycp = curproc; 553 554 fdp = mycp->p_fd; 555 fd = cb->aio_fildes; 556 fp = fdp->fd_ofiles[fd]; 557 558 if ((fp == NULL) || (fp != aiocbe->fd_file)) { 559 cb->_aiocb_private.error = EBADF; 560 cb->_aiocb_private.status = -1; 561 return; 562 } 563 564 aiov.iov_base = (void *)cb->aio_buf; 565 aiov.iov_len = cb->aio_nbytes; 566 567 auio.uio_iov = &aiov; 568 auio.uio_iovcnt = 1; 569 auio.uio_offset = offset = cb->aio_offset; 570 auio.uio_resid = cb->aio_nbytes; 571 cnt = cb->aio_nbytes; 572 auio.uio_segflg = UIO_USERSPACE; 573 auio.uio_procp = mycp; 574 575 inblock_st = mycp->p_stats->p_ru.ru_inblock; 576 oublock_st = mycp->p_stats->p_ru.ru_oublock; 577 if (cb->aio_lio_opcode == LIO_READ) { 578 auio.uio_rw = UIO_READ; 579 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, mycp); 580 } else { 581 auio.uio_rw = UIO_WRITE; 582 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, mycp); 583 } 584 inblock_end = mycp->p_stats->p_ru.ru_inblock; 585 oublock_end = mycp->p_stats->p_ru.ru_oublock; 586 587 aiocbe->inputcharge = inblock_end - inblock_st; 588 aiocbe->outputcharge = oublock_end - oublock_st; 589 590 if ((error) && (auio.uio_resid != cnt)) { 591 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 592 error = 0; 593 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) 594 psignal(userp, SIGPIPE); 595 } 596 597 cnt -= auio.uio_resid; 598 cb->_aiocb_private.error = error; 599 cb->_aiocb_private.status = cnt; 600 601 return; 602 } 603 604 /* 605 * The AIO daemon, most of the actual work is done in aio_process, 606 * but the setup (and address space mgmt) is done in this routine. 607 */ 608 static void 609 aio_daemon(void *uproc) 610 { 611 int s; 612 struct aio_liojob *lj; 613 struct aiocb *cb; 614 struct aiocblist *aiocbe; 615 struct aioproclist *aiop; 616 struct kaioinfo *ki; 617 struct proc *curcp, *mycp, *userp; 618 struct vmspace *myvm, *tmpvm; 619 620 /* 621 * Local copies of curproc (cp) and vmspace (myvm) 622 */ 623 mycp = curproc; 624 myvm = mycp->p_vmspace; 625 626 if (mycp->p_textvp) { 627 vrele(mycp->p_textvp); 628 mycp->p_textvp = NULL; 629 } 630 631 /* 632 * Allocate and ready the aio control info. There is one aiop structure 633 * per daemon. 634 */ 635 aiop = zalloc(aiop_zone); 636 aiop->aioproc = mycp; 637 aiop->aioprocflags |= AIOP_FREE; 638 TAILQ_INIT(&aiop->jobtorun); 639 640 s = splnet(); 641 642 /* 643 * Place thread (lightweight process) onto the AIO free thread list. 644 */ 645 if (TAILQ_EMPTY(&aio_freeproc)) 646 wakeup(&aio_freeproc); 647 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 648 649 splx(s); 650 651 /* Make up a name for the daemon. */ 652 strcpy(mycp->p_comm, "aiod"); 653 654 /* 655 * Get rid of our current filedescriptors. AIOD's don't need any 656 * filedescriptors, except as temporarily inherited from the client. 657 * Credentials are also cloned, and made equivalent to "root". 658 */ 659 fdfree(mycp); 660 mycp->p_fd = NULL; 661 mycp->p_ucred = crcopy(mycp->p_ucred); 662 mycp->p_ucred->cr_uid = 0; 663 mycp->p_ucred->cr_ngroups = 1; 664 mycp->p_ucred->cr_groups[0] = 1; 665 666 /* The daemon resides in its own pgrp. */ 667 enterpgrp(mycp, mycp->p_pid, 1); 668 669 /* Mark special process type. */ 670 mycp->p_flag |= P_SYSTEM | P_KTHREADP; 671 672 /* 673 * Wakeup parent process. (Parent sleeps to keep from blasting away 674 * creating to many daemons.) 675 */ 676 wakeup(mycp); 677 678 for (;;) { 679 /* 680 * curcp is the current daemon process context. 681 * userp is the current user process context. 682 */ 683 curcp = mycp; 684 685 /* 686 * Take daemon off of free queue 687 */ 688 if (aiop->aioprocflags & AIOP_FREE) { 689 s = splnet(); 690 TAILQ_REMOVE(&aio_freeproc, aiop, list); 691 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 692 aiop->aioprocflags &= ~AIOP_FREE; 693 splx(s); 694 } 695 aiop->aioprocflags &= ~AIOP_SCHED; 696 697 /* 698 * Check for jobs. 699 */ 700 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 701 cb = &aiocbe->uaiocb; 702 userp = aiocbe->userproc; 703 704 aiocbe->jobstate = JOBST_JOBRUNNING; 705 706 /* 707 * Connect to process address space for user program. 708 */ 709 if (userp != curcp) { 710 /* 711 * Save the current address space that we are 712 * connected to. 713 */ 714 tmpvm = mycp->p_vmspace; 715 716 /* 717 * Point to the new user address space, and 718 * refer to it. 719 */ 720 mycp->p_vmspace = userp->p_vmspace; 721 mycp->p_vmspace->vm_refcnt++; 722 723 /* Activate the new mapping. */ 724 pmap_activate(mycp); 725 726 /* 727 * If the old address space wasn't the daemons 728 * own address space, then we need to remove the 729 * daemon's reference from the other process 730 * that it was acting on behalf of. 731 */ 732 if (tmpvm != myvm) { 733 vmspace_free(tmpvm); 734 } 735 736 /* 737 * Disassociate from previous clients file 738 * descriptors, and associate to the new clients 739 * descriptors. Note that the daemon doesn't 740 * need to worry about its orginal descriptors, 741 * because they were originally freed. 742 */ 743 if (mycp->p_fd) 744 fdfree(mycp); 745 mycp->p_fd = fdshare(userp); 746 curcp = userp; 747 } 748 749 ki = userp->p_aioinfo; 750 lj = aiocbe->lio; 751 752 /* Account for currently active jobs. */ 753 ki->kaio_active_count++; 754 755 /* Do the I/O function. */ 756 aiocbe->jobaioproc = aiop; 757 aio_process(aiocbe); 758 759 /* Decrement the active job count. */ 760 ki->kaio_active_count--; 761 762 /* 763 * Increment the completion count for wakeup/signal 764 * comparisons. 765 */ 766 aiocbe->jobflags |= AIOCBLIST_DONE; 767 ki->kaio_queue_finished_count++; 768 if (lj) 769 lj->lioj_queue_finished_count++; 770 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags 771 & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) { 772 ki->kaio_flags &= ~KAIO_WAKEUP; 773 wakeup(userp); 774 } 775 776 s = splbio(); 777 if (lj && (lj->lioj_flags & 778 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) { 779 if ((lj->lioj_queue_finished_count == 780 lj->lioj_queue_count) && 781 (lj->lioj_buffer_finished_count == 782 lj->lioj_buffer_count)) { 783 psignal(userp, 784 lj->lioj_signal.sigev_signo); 785 lj->lioj_flags |= 786 LIOJ_SIGNAL_POSTED; 787 } 788 } 789 splx(s); 790 791 aiocbe->jobstate = JOBST_JOBFINISHED; 792 793 /* 794 * If the I/O request should be automatically rundown, 795 * do the needed cleanup. Otherwise, place the queue 796 * entry for the just finished I/O request into the done 797 * queue for the associated client. 798 */ 799 s = splnet(); 800 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) { 801 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 802 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 803 } else { 804 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 805 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, 806 plist); 807 } 808 splx(s); 809 810 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 811 wakeup(aiocbe); 812 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 813 } 814 815 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 816 psignal(userp, cb->aio_sigevent.sigev_signo); 817 } 818 } 819 820 /* 821 * Disconnect from user address space. 822 */ 823 if (curcp != mycp) { 824 /* Get the user address space to disconnect from. */ 825 tmpvm = mycp->p_vmspace; 826 827 /* Get original address space for daemon. */ 828 mycp->p_vmspace = myvm; 829 830 /* Activate the daemon's address space. */ 831 pmap_activate(mycp); 832 #ifdef DIAGNOSTIC 833 if (tmpvm == myvm) { 834 printf("AIOD: vmspace problem -- %d\n", 835 mycp->p_pid); 836 } 837 #endif 838 /* Remove our vmspace reference. */ 839 vmspace_free(tmpvm); 840 841 /* 842 * Disassociate from the user process's file 843 * descriptors. 844 */ 845 if (mycp->p_fd) 846 fdfree(mycp); 847 mycp->p_fd = NULL; 848 curcp = mycp; 849 } 850 851 /* 852 * If we are the first to be put onto the free queue, wakeup 853 * anyone waiting for a daemon. 854 */ 855 s = splnet(); 856 TAILQ_REMOVE(&aio_activeproc, aiop, list); 857 if (TAILQ_EMPTY(&aio_freeproc)) 858 wakeup(&aio_freeproc); 859 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 860 aiop->aioprocflags |= AIOP_FREE; 861 splx(s); 862 863 /* 864 * If daemon is inactive for a long time, allow it to exit, 865 * thereby freeing resources. 866 */ 867 if (((aiop->aioprocflags & AIOP_SCHED) == 0) && tsleep(mycp, 868 PRIBIO, "aiordy", aiod_lifetime)) { 869 s = splnet(); 870 if ((TAILQ_FIRST(&aio_jobs) == NULL) && 871 (TAILQ_FIRST(&aiop->jobtorun) == NULL)) { 872 if ((aiop->aioprocflags & AIOP_FREE) && 873 (num_aio_procs > target_aio_procs)) { 874 TAILQ_REMOVE(&aio_freeproc, aiop, list); 875 splx(s); 876 zfree(aiop_zone, aiop); 877 num_aio_procs--; 878 #ifdef DIAGNOSTIC 879 if (mycp->p_vmspace->vm_refcnt <= 1) { 880 printf("AIOD: bad vm refcnt for" 881 " exiting daemon: %d\n", 882 mycp->p_vmspace->vm_refcnt); 883 } 884 #endif 885 exit1(mycp, 0); 886 } 887 } 888 splx(s); 889 } 890 } 891 } 892 893 /* 894 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 895 * AIO daemon modifies its environment itself. 896 */ 897 static int 898 aio_newproc() 899 { 900 int error; 901 struct proc *p, *np; 902 903 p = &proc0; 904 error = fork1(p, RFPROC|RFMEM|RFNOWAIT, &np); 905 if (error) 906 return error; 907 cpu_set_fork_handler(np, aio_daemon, curproc); 908 909 /* 910 * Wait until daemon is started, but continue on just in case to 911 * handle error conditions. 912 */ 913 error = tsleep(np, PZERO, "aiosta", aiod_timeout); 914 num_aio_procs++; 915 916 return error; 917 } 918 919 /* 920 * Try the high-performance physio method for eligible VCHR devices. This 921 * routine doesn't require the use of any additional threads, and have overhead. 922 */ 923 int 924 aio_qphysio(struct proc *p, struct aiocblist *aiocbe) 925 { 926 int error; 927 struct aiocb *cb; 928 struct file *fp; 929 struct buf *bp; 930 struct vnode *vp; 931 struct kaioinfo *ki; 932 struct filedesc *fdp; 933 struct aio_liojob *lj; 934 int fd; 935 int s; 936 int cnt; 937 938 cb = &aiocbe->uaiocb; 939 fdp = p->p_fd; 940 fd = cb->aio_fildes; 941 fp = fdp->fd_ofiles[fd]; 942 943 if (fp->f_type != DTYPE_VNODE) 944 return (-1); 945 946 vp = (struct vnode *)fp->f_data; 947 948 /* 949 * If its not a disk, we don't want to return a positive error. 950 * It causes the aio code to not fall through to try the thread 951 * way when you're talking to a regular file. 952 */ 953 if (!vn_isdisk(vp, &error)) { 954 if (error == ENOTBLK) 955 return (-1); 956 else 957 return (error); 958 } 959 960 if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys) 961 return (-1); 962 963 if ((cb->aio_nbytes > MAXPHYS) && (num_buf_aio >= max_buf_aio)) 964 return (-1); 965 966 ki = p->p_aioinfo; 967 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) 968 return (-1); 969 970 cnt = cb->aio_nbytes; 971 if (cnt > MAXPHYS) 972 return (-1); 973 974 /* 975 * Physical I/O is charged directly to the process, so we don't have to 976 * fake it. 977 */ 978 aiocbe->inputcharge = 0; 979 aiocbe->outputcharge = 0; 980 981 ki->kaio_buffer_count++; 982 983 lj = aiocbe->lio; 984 if (lj) 985 lj->lioj_buffer_count++; 986 987 /* Create and build a buffer header for a transfer. */ 988 bp = (struct buf *)getpbuf(NULL); 989 990 /* 991 * Get a copy of the kva from the physical buffer. 992 */ 993 bp->b_caller1 = p; 994 bp->b_dev = vp->v_rdev; 995 error = bp->b_error = 0; 996 997 bp->b_bcount = cb->aio_nbytes; 998 bp->b_bufsize = cb->aio_nbytes; 999 bp->b_flags = B_PHYS | B_CALL; 1000 bp->b_iodone = aio_physwakeup; 1001 bp->b_saveaddr = bp->b_data; 1002 bp->b_data = (void *)cb->aio_buf; 1003 bp->b_blkno = btodb(cb->aio_offset); 1004 1005 if (cb->aio_lio_opcode == LIO_WRITE) { 1006 bp->b_flags |= B_WRITE; 1007 if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_READ)) { 1008 error = EFAULT; 1009 goto doerror; 1010 } 1011 } else { 1012 bp->b_flags |= B_READ; 1013 if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_WRITE)) { 1014 error = EFAULT; 1015 goto doerror; 1016 } 1017 } 1018 1019 /* Bring buffer into kernel space. */ 1020 vmapbuf(bp); 1021 1022 s = splbio(); 1023 aiocbe->bp = bp; 1024 bp->b_spc = (void *)aiocbe; 1025 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 1026 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 1027 aiocbe->jobstate = JOBST_JOBQBUF; 1028 cb->_aiocb_private.status = cb->aio_nbytes; 1029 num_buf_aio++; 1030 bp->b_error = 0; 1031 1032 splx(s); 1033 1034 /* Perform transfer. */ 1035 BUF_STRATEGY(bp, 0); 1036 1037 s = splbio(); 1038 1039 /* 1040 * If we had an error invoking the request, or an error in processing 1041 * the request before we have returned, we process it as an error in 1042 * transfer. Note that such an I/O error is not indicated immediately, 1043 * but is returned using the aio_error mechanism. In this case, 1044 * aio_suspend will return immediately. 1045 */ 1046 if (bp->b_error || (bp->b_flags & B_ERROR)) { 1047 struct aiocb *job = aiocbe->uuaiocb; 1048 1049 aiocbe->uaiocb._aiocb_private.status = 0; 1050 suword(&job->_aiocb_private.status, 0); 1051 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1052 suword(&job->_aiocb_private.error, bp->b_error); 1053 1054 ki->kaio_buffer_finished_count++; 1055 1056 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1057 aiocbe->jobstate = JOBST_JOBBFINISHED; 1058 aiocbe->jobflags |= AIOCBLIST_DONE; 1059 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1060 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1061 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1062 } 1063 } 1064 splx(s); 1065 return 0; 1066 1067 doerror: 1068 ki->kaio_buffer_count--; 1069 if (lj) 1070 lj->lioj_buffer_count--; 1071 aiocbe->bp = NULL; 1072 relpbuf(bp, NULL); 1073 return error; 1074 } 1075 1076 /* 1077 * This waits/tests physio completion. 1078 */ 1079 int 1080 aio_fphysio(struct proc *p, struct aiocblist *iocb, int flgwait) 1081 { 1082 int s; 1083 struct buf *bp; 1084 int error; 1085 1086 bp = iocb->bp; 1087 1088 s = splbio(); 1089 if (flgwait == 0) { 1090 if ((bp->b_flags & B_DONE) == 0) { 1091 splx(s); 1092 return EINPROGRESS; 1093 } 1094 } 1095 1096 while ((bp->b_flags & B_DONE) == 0) { 1097 if (tsleep((caddr_t)bp, PRIBIO, "physstr", aiod_timeout)) { 1098 if ((bp->b_flags & B_DONE) == 0) { 1099 splx(s); 1100 return EINPROGRESS; 1101 } else 1102 break; 1103 } 1104 } 1105 1106 /* Release mapping into kernel space. */ 1107 vunmapbuf(bp); 1108 iocb->bp = 0; 1109 1110 error = 0; 1111 1112 /* Check for an error. */ 1113 if (bp->b_flags & B_ERROR) 1114 error = bp->b_error; 1115 1116 relpbuf(bp, NULL); 1117 return (error); 1118 } 1119 1120 /* 1121 * Wake up aio requests that may be serviceable now. 1122 */ 1123 void 1124 aio_swake(struct socket *so, struct sockbuf *sb) 1125 { 1126 struct aiocblist *cb,*cbn; 1127 struct proc *p; 1128 struct kaioinfo *ki = NULL; 1129 int opcode, wakecount = 0; 1130 struct aioproclist *aiop; 1131 1132 if (sb == &so->so_snd) { 1133 opcode = LIO_WRITE; 1134 so->so_snd.sb_flags &= ~SB_AIO; 1135 } else { 1136 opcode = LIO_READ; 1137 so->so_rcv.sb_flags &= ~SB_AIO; 1138 } 1139 1140 for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) { 1141 cbn = TAILQ_NEXT(cb, list); 1142 if (opcode == cb->uaiocb.aio_lio_opcode) { 1143 p = cb->userproc; 1144 ki = p->p_aioinfo; 1145 TAILQ_REMOVE(&so->so_aiojobq, cb, list); 1146 TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist); 1147 TAILQ_INSERT_TAIL(&aio_jobs, cb, list); 1148 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist); 1149 wakecount++; 1150 if (cb->jobstate != JOBST_JOBQGLOBAL) 1151 panic("invalid queue value"); 1152 } 1153 } 1154 1155 while (wakecount--) { 1156 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) { 1157 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1158 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1159 aiop->aioprocflags &= ~AIOP_FREE; 1160 wakeup(aiop->aioproc); 1161 } 1162 } 1163 } 1164 1165 /* 1166 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1167 * technique is done in this code. 1168 */ 1169 static int 1170 _aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type) 1171 { 1172 struct filedesc *fdp; 1173 struct file *fp; 1174 unsigned int fd; 1175 struct socket *so; 1176 int s; 1177 int error; 1178 int opcode; 1179 struct aiocblist *aiocbe; 1180 struct aioproclist *aiop; 1181 struct kaioinfo *ki; 1182 1183 if ((aiocbe = TAILQ_FIRST(&aio_freejobs)) != NULL) 1184 TAILQ_REMOVE(&aio_freejobs, aiocbe, list); 1185 else 1186 aiocbe = zalloc (aiocb_zone); 1187 1188 aiocbe->inputcharge = 0; 1189 aiocbe->outputcharge = 0; 1190 1191 suword(&job->_aiocb_private.status, -1); 1192 suword(&job->_aiocb_private.error, 0); 1193 suword(&job->_aiocb_private.kernelinfo, -1); 1194 1195 error = copyin((caddr_t)job, (caddr_t) &aiocbe->uaiocb, sizeof 1196 aiocbe->uaiocb); 1197 if (error) { 1198 suword(&job->_aiocb_private.error, error); 1199 1200 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1201 return error; 1202 } 1203 1204 /* Save userspace address of the job info. */ 1205 aiocbe->uuaiocb = job; 1206 1207 /* Get the opcode. */ 1208 if (type != LIO_NOP) 1209 aiocbe->uaiocb.aio_lio_opcode = type; 1210 opcode = aiocbe->uaiocb.aio_lio_opcode; 1211 1212 /* Get the fd info for process. */ 1213 fdp = p->p_fd; 1214 1215 /* 1216 * Range check file descriptor. 1217 */ 1218 fd = aiocbe->uaiocb.aio_fildes; 1219 if (fd >= fdp->fd_nfiles) { 1220 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1221 if (type == 0) 1222 suword(&job->_aiocb_private.error, EBADF); 1223 return EBADF; 1224 } 1225 1226 fp = aiocbe->fd_file = fdp->fd_ofiles[fd]; 1227 if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 1228 0))) { 1229 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1230 if (type == 0) 1231 suword(&job->_aiocb_private.error, EBADF); 1232 return EBADF; 1233 } 1234 1235 if (aiocbe->uaiocb.aio_offset == -1LL) { 1236 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1237 if (type == 0) 1238 suword(&job->_aiocb_private.error, EINVAL); 1239 return EINVAL; 1240 } 1241 1242 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1243 if (error) { 1244 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1245 if (type == 0) 1246 suword(&job->_aiocb_private.error, EINVAL); 1247 return error; 1248 } 1249 1250 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1251 if (jobrefid == LONG_MAX) 1252 jobrefid = 1; 1253 else 1254 jobrefid++; 1255 1256 if (opcode == LIO_NOP) { 1257 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1258 if (type == 0) { 1259 suword(&job->_aiocb_private.error, 0); 1260 suword(&job->_aiocb_private.status, 0); 1261 suword(&job->_aiocb_private.kernelinfo, 0); 1262 } 1263 return 0; 1264 } 1265 1266 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1267 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1268 if (type == 0) { 1269 suword(&job->_aiocb_private.status, 0); 1270 suword(&job->_aiocb_private.error, EINVAL); 1271 } 1272 return EINVAL; 1273 } 1274 1275 suword(&job->_aiocb_private.error, EINPROGRESS); 1276 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1277 aiocbe->userproc = p; 1278 aiocbe->jobflags = 0; 1279 aiocbe->lio = lj; 1280 ki = p->p_aioinfo; 1281 1282 if (fp->f_type == DTYPE_SOCKET) { 1283 /* 1284 * Alternate queueing for socket ops: Reach down into the 1285 * descriptor to get the socket data. Then check to see if the 1286 * socket is ready to be read or written (based on the requested 1287 * operation). 1288 * 1289 * If it is not ready for io, then queue the aiocbe on the 1290 * socket, and set the flags so we get a call when sbnotify() 1291 * happens. 1292 */ 1293 so = (struct socket *)fp->f_data; 1294 s = splnet(); 1295 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode == 1296 LIO_WRITE) && (!sowriteable(so)))) { 1297 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); 1298 TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist); 1299 if (opcode == LIO_READ) 1300 so->so_rcv.sb_flags |= SB_AIO; 1301 else 1302 so->so_snd.sb_flags |= SB_AIO; 1303 aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */ 1304 ki->kaio_queue_count++; 1305 num_queue_count++; 1306 splx(s); 1307 return 0; 1308 } 1309 splx(s); 1310 } 1311 1312 if ((error = aio_qphysio(p, aiocbe)) == 0) 1313 return 0; 1314 else if (error > 0) { 1315 suword(&job->_aiocb_private.status, 0); 1316 aiocbe->uaiocb._aiocb_private.error = error; 1317 suword(&job->_aiocb_private.error, error); 1318 return error; 1319 } 1320 1321 /* No buffer for daemon I/O. */ 1322 aiocbe->bp = NULL; 1323 1324 ki->kaio_queue_count++; 1325 if (lj) 1326 lj->lioj_queue_count++; 1327 s = splnet(); 1328 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1329 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1330 splx(s); 1331 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1332 1333 num_queue_count++; 1334 error = 0; 1335 1336 /* 1337 * If we don't have a free AIO process, and we are below our quota, then 1338 * start one. Otherwise, depend on the subsequent I/O completions to 1339 * pick-up this job. If we don't sucessfully create the new process 1340 * (thread) due to resource issues, we return an error for now (EAGAIN), 1341 * which is likely not the correct thing to do. 1342 */ 1343 retryproc: 1344 s = splnet(); 1345 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1346 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1347 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1348 aiop->aioprocflags &= ~AIOP_FREE; 1349 wakeup(aiop->aioproc); 1350 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1351 ((ki->kaio_active_count + num_aio_resv_start) < 1352 ki->kaio_maxactive_count)) { 1353 num_aio_resv_start++; 1354 if ((error = aio_newproc()) == 0) { 1355 num_aio_resv_start--; 1356 p->p_retval[0] = 0; 1357 goto retryproc; 1358 } 1359 num_aio_resv_start--; 1360 } 1361 splx(s); 1362 return error; 1363 } 1364 1365 /* 1366 * This routine queues an AIO request, checking for quotas. 1367 */ 1368 static int 1369 aio_aqueue(struct proc *p, struct aiocb *job, int type) 1370 { 1371 struct kaioinfo *ki; 1372 1373 if (p->p_aioinfo == NULL) 1374 aio_init_aioinfo(p); 1375 1376 if (num_queue_count >= max_queue_count) 1377 return EAGAIN; 1378 1379 ki = p->p_aioinfo; 1380 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1381 return EAGAIN; 1382 1383 return _aio_aqueue(p, job, NULL, type); 1384 } 1385 1386 /* 1387 * Support the aio_return system call, as a side-effect, kernel resources are 1388 * released. 1389 */ 1390 int 1391 aio_return(struct proc *p, struct aio_return_args *uap) 1392 { 1393 #ifndef VFS_AIO 1394 return ENOSYS; 1395 #else 1396 int s; 1397 int jobref; 1398 struct aiocblist *cb, *ncb; 1399 struct aiocb *ujob; 1400 struct kaioinfo *ki; 1401 1402 ki = p->p_aioinfo; 1403 if (ki == NULL) 1404 return EINVAL; 1405 1406 ujob = uap->aiocbp; 1407 1408 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1409 if (jobref == -1 || jobref == 0) 1410 return EINVAL; 1411 1412 s = splnet(); 1413 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb, 1414 plist)) { 1415 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1416 jobref) { 1417 splx(s); 1418 if (ujob == cb->uuaiocb) { 1419 p->p_retval[0] = 1420 cb->uaiocb._aiocb_private.status; 1421 } else 1422 p->p_retval[0] = EFAULT; 1423 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1424 curproc->p_stats->p_ru.ru_oublock += 1425 cb->outputcharge; 1426 cb->outputcharge = 0; 1427 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1428 curproc->p_stats->p_ru.ru_inblock += 1429 cb->inputcharge; 1430 cb->inputcharge = 0; 1431 } 1432 aio_free_entry(cb); 1433 return 0; 1434 } 1435 } 1436 splx(s); 1437 1438 s = splbio(); 1439 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) { 1440 ncb = TAILQ_NEXT(cb, plist); 1441 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) 1442 == jobref) { 1443 splx(s); 1444 if (ujob == cb->uuaiocb) { 1445 p->p_retval[0] = 1446 cb->uaiocb._aiocb_private.status; 1447 } else 1448 p->p_retval[0] = EFAULT; 1449 aio_free_entry(cb); 1450 return 0; 1451 } 1452 } 1453 splx(s); 1454 1455 return (EINVAL); 1456 #endif /* VFS_AIO */ 1457 } 1458 1459 /* 1460 * Allow a process to wakeup when any of the I/O requests are completed. 1461 */ 1462 int 1463 aio_suspend(struct proc *p, struct aio_suspend_args *uap) 1464 { 1465 #ifndef VFS_AIO 1466 return ENOSYS; 1467 #else 1468 struct timeval atv; 1469 struct timespec ts; 1470 struct aiocb *const *cbptr, *cbp; 1471 struct kaioinfo *ki; 1472 struct aiocblist *cb; 1473 int i; 1474 int njoblist; 1475 int error, s, timo; 1476 int *ijoblist; 1477 struct aiocb **ujoblist; 1478 1479 if (uap->nent >= AIO_LISTIO_MAX) 1480 return EINVAL; 1481 1482 timo = 0; 1483 if (uap->timeout) { 1484 /* Get timespec struct. */ 1485 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1486 return error; 1487 1488 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1489 return (EINVAL); 1490 1491 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1492 if (itimerfix(&atv)) 1493 return (EINVAL); 1494 timo = tvtohz(&atv); 1495 } 1496 1497 ki = p->p_aioinfo; 1498 if (ki == NULL) 1499 return EAGAIN; 1500 1501 njoblist = 0; 1502 ijoblist = zalloc(aiol_zone); 1503 ujoblist = zalloc(aiol_zone); 1504 cbptr = uap->aiocbp; 1505 1506 for (i = 0; i < uap->nent; i++) { 1507 cbp = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 1508 if (cbp == 0) 1509 continue; 1510 ujoblist[njoblist] = cbp; 1511 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1512 njoblist++; 1513 } 1514 1515 if (njoblist == 0) { 1516 zfree(aiol_zone, ijoblist); 1517 zfree(aiol_zone, ujoblist); 1518 return 0; 1519 } 1520 1521 error = 0; 1522 for (;;) { 1523 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = 1524 TAILQ_NEXT(cb, plist)) { 1525 for (i = 0; i < njoblist; i++) { 1526 if (((intptr_t) 1527 cb->uaiocb._aiocb_private.kernelinfo) == 1528 ijoblist[i]) { 1529 if (ujoblist[i] != cb->uuaiocb) 1530 error = EINVAL; 1531 zfree(aiol_zone, ijoblist); 1532 zfree(aiol_zone, ujoblist); 1533 return error; 1534 } 1535 } 1536 } 1537 1538 s = splbio(); 1539 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = 1540 TAILQ_NEXT(cb, plist)) { 1541 for (i = 0; i < njoblist; i++) { 1542 if (((intptr_t) 1543 cb->uaiocb._aiocb_private.kernelinfo) == 1544 ijoblist[i]) { 1545 splx(s); 1546 if (ujoblist[i] != cb->uuaiocb) 1547 error = EINVAL; 1548 zfree(aiol_zone, ijoblist); 1549 zfree(aiol_zone, ujoblist); 1550 return error; 1551 } 1552 } 1553 } 1554 1555 ki->kaio_flags |= KAIO_WAKEUP; 1556 error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo); 1557 splx(s); 1558 1559 if (error == ERESTART || error == EINTR) { 1560 zfree(aiol_zone, ijoblist); 1561 zfree(aiol_zone, ujoblist); 1562 return EINTR; 1563 } else if (error == EWOULDBLOCK) { 1564 zfree(aiol_zone, ijoblist); 1565 zfree(aiol_zone, ujoblist); 1566 return EAGAIN; 1567 } 1568 } 1569 1570 /* NOTREACHED */ 1571 return EINVAL; 1572 #endif /* VFS_AIO */ 1573 } 1574 1575 /* 1576 * aio_cancel cancels any non-physio aio operations not currently in 1577 * progress. 1578 */ 1579 int 1580 aio_cancel(struct proc *p, struct aio_cancel_args *uap) 1581 { 1582 #ifndef VFS_AIO 1583 return ENOSYS; 1584 #else 1585 struct kaioinfo *ki; 1586 struct aiocblist *cbe, *cbn; 1587 struct file *fp; 1588 struct filedesc *fdp; 1589 struct socket *so; 1590 struct proc *po; 1591 int s,error; 1592 int cancelled=0; 1593 int notcancelled=0; 1594 struct vnode *vp; 1595 1596 fdp = p->p_fd; 1597 1598 fp = fdp->fd_ofiles[uap->fd]; 1599 1600 if (fp == NULL) { 1601 return EBADF; 1602 } 1603 1604 if (fp->f_type == DTYPE_VNODE) { 1605 vp = (struct vnode *)fp->f_data; 1606 1607 if (vn_isdisk(vp,&error)) { 1608 p->p_retval[0] = AIO_NOTCANCELED; 1609 return 0; 1610 } 1611 } else if (fp->f_type == DTYPE_SOCKET) { 1612 so = (struct socket *)fp->f_data; 1613 1614 s = splnet(); 1615 1616 for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) { 1617 cbn = TAILQ_NEXT(cbe, list); 1618 if ((uap->aiocbp == NULL) || 1619 (uap->aiocbp == cbe->uuaiocb) ) { 1620 po = cbe->userproc; 1621 ki = po->p_aioinfo; 1622 TAILQ_REMOVE(&so->so_aiojobq, cbe, list); 1623 TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist); 1624 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist); 1625 if (ki->kaio_flags & KAIO_WAKEUP) { 1626 wakeup(po); 1627 } 1628 cbe->jobstate = JOBST_JOBFINISHED; 1629 cbe->uaiocb._aiocb_private.status=-1; 1630 cbe->uaiocb._aiocb_private.error=ECANCELED; 1631 cancelled++; 1632 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1633 SIGEV_SIGNAL) 1634 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1635 if (uap->aiocbp) 1636 break; 1637 } 1638 } 1639 1640 splx(s); 1641 1642 if ((cancelled) && (uap->aiocbp)) { 1643 p->p_retval[0] = AIO_CANCELED; 1644 return 0; 1645 } 1646 1647 } 1648 1649 ki=p->p_aioinfo; 1650 1651 s = splnet(); 1652 1653 for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) { 1654 cbn = TAILQ_NEXT(cbe, plist); 1655 1656 if ((uap->fd == cbe->uaiocb.aio_fildes) && 1657 ((uap->aiocbp == NULL ) || 1658 (uap->aiocbp == cbe->uuaiocb))) { 1659 1660 if (cbe->jobstate == JOBST_JOBQGLOBAL) { 1661 TAILQ_REMOVE(&aio_jobs, cbe, list); 1662 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); 1663 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, 1664 plist); 1665 cancelled++; 1666 ki->kaio_queue_finished_count++; 1667 cbe->jobstate = JOBST_JOBFINISHED; 1668 cbe->uaiocb._aiocb_private.status = -1; 1669 cbe->uaiocb._aiocb_private.error = ECANCELED; 1670 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1671 SIGEV_SIGNAL) 1672 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1673 } else { 1674 notcancelled++; 1675 } 1676 } 1677 } 1678 1679 splx(s); 1680 1681 1682 if (notcancelled) { 1683 p->p_retval[0] = AIO_NOTCANCELED; 1684 return 0; 1685 } 1686 1687 if (cancelled) { 1688 p->p_retval[0] = AIO_CANCELED; 1689 return 0; 1690 } 1691 1692 p->p_retval[0] = AIO_ALLDONE; 1693 1694 return 0; 1695 #endif /* VFS_AIO */ 1696 } 1697 1698 /* 1699 * aio_error is implemented in the kernel level for compatibility purposes only. 1700 * For a user mode async implementation, it would be best to do it in a userland 1701 * subroutine. 1702 */ 1703 int 1704 aio_error(struct proc *p, struct aio_error_args *uap) 1705 { 1706 #ifndef VFS_AIO 1707 return ENOSYS; 1708 #else 1709 int s; 1710 struct aiocblist *cb; 1711 struct kaioinfo *ki; 1712 int jobref; 1713 1714 ki = p->p_aioinfo; 1715 if (ki == NULL) 1716 return EINVAL; 1717 1718 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1719 if ((jobref == -1) || (jobref == 0)) 1720 return EINVAL; 1721 1722 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb, 1723 plist)) { 1724 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1725 jobref) { 1726 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1727 return 0; 1728 } 1729 } 1730 1731 s = splnet(); 1732 1733 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb, 1734 plist)) { 1735 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1736 jobref) { 1737 p->p_retval[0] = EINPROGRESS; 1738 splx(s); 1739 return 0; 1740 } 1741 } 1742 1743 for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb, 1744 plist)) { 1745 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1746 jobref) { 1747 p->p_retval[0] = EINPROGRESS; 1748 splx(s); 1749 return 0; 1750 } 1751 } 1752 splx(s); 1753 1754 s = splbio(); 1755 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb, 1756 plist)) { 1757 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1758 jobref) { 1759 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1760 splx(s); 1761 return 0; 1762 } 1763 } 1764 1765 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb, 1766 plist)) { 1767 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1768 jobref) { 1769 p->p_retval[0] = EINPROGRESS; 1770 splx(s); 1771 return 0; 1772 } 1773 } 1774 splx(s); 1775 1776 #if (0) 1777 /* 1778 * Hack for lio. 1779 */ 1780 status = fuword(&uap->aiocbp->_aiocb_private.status); 1781 if (status == -1) 1782 return fuword(&uap->aiocbp->_aiocb_private.error); 1783 #endif 1784 return EINVAL; 1785 #endif /* VFS_AIO */ 1786 } 1787 1788 int 1789 aio_read(struct proc *p, struct aio_read_args *uap) 1790 { 1791 #ifndef VFS_AIO 1792 return ENOSYS; 1793 #else 1794 struct filedesc *fdp; 1795 struct file *fp; 1796 struct uio auio; 1797 struct iovec aiov; 1798 unsigned int fd; 1799 int cnt; 1800 struct aiocb iocb; 1801 int error, pmodes; 1802 1803 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1804 if ((pmodes & AIO_PMODE_SYNC) == 0) 1805 return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_READ); 1806 1807 /* Get control block. */ 1808 if ((error = copyin((caddr_t)uap->aiocbp, (caddr_t)&iocb, sizeof iocb)) 1809 != 0) 1810 return error; 1811 1812 /* Get the fd info for process. */ 1813 fdp = p->p_fd; 1814 1815 /* 1816 * Range check file descriptor. 1817 */ 1818 fd = iocb.aio_fildes; 1819 if (fd >= fdp->fd_nfiles) 1820 return EBADF; 1821 fp = fdp->fd_ofiles[fd]; 1822 if ((fp == NULL) || ((fp->f_flag & FREAD) == 0)) 1823 return EBADF; 1824 if (iocb.aio_offset == -1LL) 1825 return EINVAL; 1826 1827 auio.uio_resid = iocb.aio_nbytes; 1828 if (auio.uio_resid < 0) 1829 return (EINVAL); 1830 1831 /* 1832 * Process sync simply -- queue async request. 1833 */ 1834 if ((iocb._aiocb_private.privatemodes & AIO_PMODE_SYNC) == 0) 1835 return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_READ); 1836 1837 aiov.iov_base = (void *)iocb.aio_buf; 1838 aiov.iov_len = iocb.aio_nbytes; 1839 1840 auio.uio_iov = &aiov; 1841 auio.uio_iovcnt = 1; 1842 auio.uio_offset = iocb.aio_offset; 1843 auio.uio_rw = UIO_READ; 1844 auio.uio_segflg = UIO_USERSPACE; 1845 auio.uio_procp = p; 1846 1847 cnt = iocb.aio_nbytes; 1848 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, p); 1849 if (error && (auio.uio_resid != cnt) && (error == ERESTART || error == 1850 EINTR || error == EWOULDBLOCK)) 1851 error = 0; 1852 cnt -= auio.uio_resid; 1853 p->p_retval[0] = cnt; 1854 return error; 1855 #endif /* VFS_AIO */ 1856 } 1857 1858 int 1859 aio_write(struct proc *p, struct aio_write_args *uap) 1860 { 1861 #ifndef VFS_AIO 1862 return ENOSYS; 1863 #else 1864 struct filedesc *fdp; 1865 struct file *fp; 1866 struct uio auio; 1867 struct iovec aiov; 1868 unsigned int fd; 1869 int cnt; 1870 struct aiocb iocb; 1871 int error; 1872 int pmodes; 1873 1874 /* 1875 * Process sync simply -- queue async request. 1876 */ 1877 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1878 if ((pmodes & AIO_PMODE_SYNC) == 0) 1879 return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_WRITE); 1880 1881 if ((error = copyin((caddr_t)uap->aiocbp, (caddr_t)&iocb, sizeof iocb)) 1882 != 0) 1883 return error; 1884 1885 /* Get the fd info for process. */ 1886 fdp = p->p_fd; 1887 1888 /* 1889 * Range check file descriptor. 1890 */ 1891 fd = iocb.aio_fildes; 1892 if (fd >= fdp->fd_nfiles) 1893 return EBADF; 1894 fp = fdp->fd_ofiles[fd]; 1895 if ((fp == NULL) || ((fp->f_flag & FWRITE) == 0)) 1896 return EBADF; 1897 if (iocb.aio_offset == -1LL) 1898 return EINVAL; 1899 1900 aiov.iov_base = (void *)iocb.aio_buf; 1901 aiov.iov_len = iocb.aio_nbytes; 1902 auio.uio_iov = &aiov; 1903 auio.uio_iovcnt = 1; 1904 auio.uio_offset = iocb.aio_offset; 1905 1906 auio.uio_resid = iocb.aio_nbytes; 1907 if (auio.uio_resid < 0) 1908 return (EINVAL); 1909 1910 auio.uio_rw = UIO_WRITE; 1911 auio.uio_segflg = UIO_USERSPACE; 1912 auio.uio_procp = p; 1913 1914 cnt = iocb.aio_nbytes; 1915 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, p); 1916 if (error) { 1917 if (auio.uio_resid != cnt) { 1918 if (error == ERESTART || error == EINTR || error == 1919 EWOULDBLOCK) 1920 error = 0; 1921 if (error == EPIPE) 1922 psignal(p, SIGPIPE); 1923 } 1924 } 1925 cnt -= auio.uio_resid; 1926 p->p_retval[0] = cnt; 1927 return error; 1928 #endif /* VFS_AIO */ 1929 } 1930 1931 int 1932 lio_listio(struct proc *p, struct lio_listio_args *uap) 1933 { 1934 #ifndef VFS_AIO 1935 return ENOSYS; 1936 #else 1937 int nent, nentqueued; 1938 struct aiocb *iocb, * const *cbptr; 1939 struct aiocblist *cb; 1940 struct kaioinfo *ki; 1941 struct aio_liojob *lj; 1942 int error, runningcode; 1943 int nerror; 1944 int i; 1945 int s; 1946 1947 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 1948 return EINVAL; 1949 1950 nent = uap->nent; 1951 if (nent > AIO_LISTIO_MAX) 1952 return EINVAL; 1953 1954 if (p->p_aioinfo == NULL) 1955 aio_init_aioinfo(p); 1956 1957 if ((nent + num_queue_count) > max_queue_count) 1958 return EAGAIN; 1959 1960 ki = p->p_aioinfo; 1961 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) 1962 return EAGAIN; 1963 1964 lj = zalloc(aiolio_zone); 1965 if (!lj) 1966 return EAGAIN; 1967 1968 lj->lioj_flags = 0; 1969 lj->lioj_buffer_count = 0; 1970 lj->lioj_buffer_finished_count = 0; 1971 lj->lioj_queue_count = 0; 1972 lj->lioj_queue_finished_count = 0; 1973 lj->lioj_ki = ki; 1974 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 1975 1976 /* 1977 * Setup signal. 1978 */ 1979 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 1980 error = copyin(uap->sig, &lj->lioj_signal, 1981 sizeof(lj->lioj_signal)); 1982 if (error) 1983 return error; 1984 lj->lioj_flags |= LIOJ_SIGNAL; 1985 lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED; 1986 } else 1987 lj->lioj_flags &= ~LIOJ_SIGNAL; 1988 1989 /* 1990 * Get pointers to the list of I/O requests. 1991 */ 1992 nerror = 0; 1993 nentqueued = 0; 1994 cbptr = uap->acb_list; 1995 for (i = 0; i < uap->nent; i++) { 1996 iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 1997 if (((intptr_t)iocb != -1) && ((intptr_t)iocb != NULL)) { 1998 error = _aio_aqueue(p, iocb, lj, 0); 1999 if (error == 0) 2000 nentqueued++; 2001 else 2002 nerror++; 2003 } 2004 } 2005 2006 /* 2007 * If we haven't queued any, then just return error. 2008 */ 2009 if (nentqueued == 0) 2010 return 0; 2011 2012 /* 2013 * Calculate the appropriate error return. 2014 */ 2015 runningcode = 0; 2016 if (nerror) 2017 runningcode = EIO; 2018 2019 if (uap->mode == LIO_WAIT) { 2020 int command, found, jobref; 2021 2022 for (;;) { 2023 found = 0; 2024 for (i = 0; i < uap->nent; i++) { 2025 /* 2026 * Fetch address of the control buf pointer in 2027 * user space. 2028 */ 2029 iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]); 2030 if (((intptr_t)iocb == -1) || ((intptr_t)iocb 2031 == 0)) 2032 continue; 2033 2034 /* 2035 * Fetch the associated command from user space. 2036 */ 2037 command = fuword(&iocb->aio_lio_opcode); 2038 if (command == LIO_NOP) { 2039 found++; 2040 continue; 2041 } 2042 2043 jobref = fuword(&iocb->_aiocb_private.kernelinfo); 2044 2045 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; 2046 cb = TAILQ_NEXT(cb, plist)) { 2047 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 2048 == jobref) { 2049 if (cb->uaiocb.aio_lio_opcode 2050 == LIO_WRITE) { 2051 curproc->p_stats->p_ru.ru_oublock 2052 += 2053 cb->outputcharge; 2054 cb->outputcharge = 0; 2055 } else if (cb->uaiocb.aio_lio_opcode 2056 == LIO_READ) { 2057 curproc->p_stats->p_ru.ru_inblock 2058 += cb->inputcharge; 2059 cb->inputcharge = 0; 2060 } 2061 found++; 2062 break; 2063 } 2064 } 2065 2066 s = splbio(); 2067 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; 2068 cb = TAILQ_NEXT(cb, plist)) { 2069 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 2070 == jobref) { 2071 found++; 2072 break; 2073 } 2074 } 2075 splx(s); 2076 } 2077 2078 /* 2079 * If all I/Os have been disposed of, then we can 2080 * return. 2081 */ 2082 if (found == nentqueued) 2083 return runningcode; 2084 2085 ki->kaio_flags |= KAIO_WAKEUP; 2086 error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0); 2087 2088 if (error == EINTR) 2089 return EINTR; 2090 else if (error == EWOULDBLOCK) 2091 return EAGAIN; 2092 } 2093 } 2094 2095 return runningcode; 2096 #endif /* VFS_AIO */ 2097 } 2098 2099 /* 2100 * This is a wierd hack so that we can post a signal. It is safe to do so from 2101 * a timeout routine, but *not* from an interrupt routine. 2102 */ 2103 static void 2104 process_signal(void *aioj) 2105 { 2106 struct aiocblist *aiocbe = aioj; 2107 struct aio_liojob *lj = aiocbe->lio; 2108 struct aiocb *cb = &aiocbe->uaiocb; 2109 2110 if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) && 2111 (lj->lioj_queue_count == lj->lioj_queue_finished_count)) { 2112 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 2113 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2114 } 2115 2116 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) 2117 psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo); 2118 } 2119 2120 /* 2121 * Interrupt handler for physio, performs the necessary process wakeups, and 2122 * signals. 2123 */ 2124 static void 2125 aio_physwakeup(struct buf *bp) 2126 { 2127 struct aiocblist *aiocbe; 2128 struct proc *p; 2129 struct kaioinfo *ki; 2130 struct aio_liojob *lj; 2131 int s; 2132 s = splbio(); 2133 2134 wakeup((caddr_t)bp); 2135 bp->b_flags &= ~B_CALL; 2136 bp->b_flags |= B_DONE; 2137 2138 aiocbe = (struct aiocblist *)bp->b_spc; 2139 if (aiocbe) { 2140 p = bp->b_caller1; 2141 2142 aiocbe->jobstate = JOBST_JOBBFINISHED; 2143 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 2144 aiocbe->uaiocb._aiocb_private.error = 0; 2145 aiocbe->jobflags |= AIOCBLIST_DONE; 2146 2147 if (bp->b_flags & B_ERROR) 2148 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 2149 2150 lj = aiocbe->lio; 2151 if (lj) { 2152 lj->lioj_buffer_finished_count++; 2153 2154 /* 2155 * wakeup/signal if all of the interrupt jobs are done. 2156 */ 2157 if (lj->lioj_buffer_finished_count == 2158 lj->lioj_buffer_count) { 2159 /* 2160 * Post a signal if it is called for. 2161 */ 2162 if ((lj->lioj_flags & 2163 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 2164 LIOJ_SIGNAL) { 2165 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2166 timeout(process_signal, aiocbe, 0); 2167 } 2168 } 2169 } 2170 2171 ki = p->p_aioinfo; 2172 if (ki) { 2173 ki->kaio_buffer_finished_count++; 2174 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 2175 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 2176 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 2177 2178 /* Do the wakeup. */ 2179 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 2180 ki->kaio_flags &= ~KAIO_WAKEUP; 2181 wakeup(p); 2182 } 2183 } 2184 2185 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL) 2186 timeout(process_signal, aiocbe, 0); 2187 } 2188 splx(s); 2189 } 2190 2191 int 2192 aio_waitcomplete(struct proc *p, struct aio_waitcomplete_args *uap) 2193 { 2194 #ifndef VFS_AIO 2195 return ENOSYS; 2196 #else 2197 struct timeval atv; 2198 struct timespec ts; 2199 struct aiocb **cbptr; 2200 struct kaioinfo *ki; 2201 struct aiocblist *cb = NULL; 2202 int error, s, timo; 2203 2204 suword(uap->aiocbp, (int)NULL); 2205 2206 timo = 0; 2207 if (uap->timeout) { 2208 /* Get timespec struct. */ 2209 error = copyin((caddr_t)uap->timeout, (caddr_t)&ts, 2210 sizeof(ts)); 2211 if (error) 2212 return error; 2213 2214 if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000)) 2215 return (EINVAL); 2216 2217 TIMESPEC_TO_TIMEVAL(&atv, &ts); 2218 if (itimerfix(&atv)) 2219 return (EINVAL); 2220 timo = tvtohz(&atv); 2221 } 2222 2223 ki = p->p_aioinfo; 2224 if (ki == NULL) 2225 return EAGAIN; 2226 2227 cbptr = uap->aiocbp; 2228 2229 for (;;) { 2230 if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) { 2231 suword(uap->aiocbp, (int)cb->uuaiocb); 2232 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 2233 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 2234 curproc->p_stats->p_ru.ru_oublock += 2235 cb->outputcharge; 2236 cb->outputcharge = 0; 2237 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 2238 curproc->p_stats->p_ru.ru_inblock += 2239 cb->inputcharge; 2240 cb->inputcharge = 0; 2241 } 2242 aio_free_entry(cb); 2243 return cb->uaiocb._aiocb_private.error; 2244 } 2245 2246 s = splbio(); 2247 if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) { 2248 splx(s); 2249 suword(uap->aiocbp, (int)cb->uuaiocb); 2250 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 2251 aio_free_entry(cb); 2252 return cb->uaiocb._aiocb_private.error; 2253 } 2254 2255 ki->kaio_flags |= KAIO_WAKEUP; 2256 error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo); 2257 splx(s); 2258 2259 if (error == ERESTART) 2260 return EINTR; 2261 else if (error < 0) 2262 return error; 2263 else if (error == EINTR) 2264 return EINTR; 2265 else if (error == EWOULDBLOCK) 2266 return EAGAIN; 2267 } 2268 #endif /* VFS_AIO */ 2269 } 2270