1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 * 16 * $Id: vfs_aio.c,v 1.53 1999/06/30 15:33:36 peter Exp $ 17 */ 18 19 /* 20 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/sysproto.h> 27 #include <sys/filedesc.h> 28 #include <sys/kernel.h> 29 #include <sys/fcntl.h> 30 #include <sys/file.h> 31 #include <sys/lock.h> 32 #include <sys/unistd.h> 33 #include <sys/proc.h> 34 #include <sys/resourcevar.h> 35 #include <sys/signalvar.h> 36 #include <sys/sysctl.h> 37 #include <sys/vnode.h> 38 #include <sys/conf.h> 39 #include <miscfs/specfs/specdev.h> 40 41 #include <vm/vm.h> 42 #include <vm/vm_param.h> 43 #include <vm/vm_extern.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_map.h> 46 #include <vm/vm_zone.h> 47 #include <sys/aio.h> 48 #include <sys/shm.h> 49 50 #include <machine/cpu.h> 51 #include <machine/limits.h> 52 53 static long jobrefid; 54 55 #define JOBST_NULL 0x0 56 #define JOBST_JOBQPROC 0x1 57 #define JOBST_JOBQGLOBAL 0x2 58 #define JOBST_JOBRUNNING 0x3 59 #define JOBST_JOBFINISHED 0x4 60 #define JOBST_JOBQBUF 0x5 61 #define JOBST_JOBBFINISHED 0x6 62 63 #ifndef MAX_AIO_PER_PROC 64 #define MAX_AIO_PER_PROC 32 65 #endif 66 67 #ifndef MAX_AIO_QUEUE_PER_PROC 68 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 69 #endif 70 71 #ifndef MAX_AIO_PROCS 72 #define MAX_AIO_PROCS 32 73 #endif 74 75 #ifndef MAX_AIO_QUEUE 76 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 77 #endif 78 79 #ifndef TARGET_AIO_PROCS 80 #define TARGET_AIO_PROCS 0 81 #endif 82 83 #ifndef MAX_BUF_AIO 84 #define MAX_BUF_AIO 16 85 #endif 86 87 #ifndef AIOD_TIMEOUT_DEFAULT 88 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 89 #endif 90 91 #ifndef AIOD_LIFETIME_DEFAULT 92 #define AIOD_LIFETIME_DEFAULT (30 * hz) 93 #endif 94 95 static int max_aio_procs = MAX_AIO_PROCS; 96 static int num_aio_procs = 0; 97 static int target_aio_procs = TARGET_AIO_PROCS; 98 static int max_queue_count = MAX_AIO_QUEUE; 99 static int num_queue_count = 0; 100 static int num_buf_aio = 0; 101 static int num_aio_resv_start = 0; 102 static int aiod_timeout; 103 static int aiod_lifetime; 104 105 static int max_aio_per_proc = MAX_AIO_PER_PROC, 106 max_aio_queue_per_proc=MAX_AIO_QUEUE_PER_PROC; 107 108 static int max_buf_aio = MAX_BUF_AIO; 109 110 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "AIO mgmt"); 111 112 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, 113 CTLFLAG_RW, &max_aio_per_proc, 0, ""); 114 115 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, 116 CTLFLAG_RW, &max_aio_queue_per_proc, 0, ""); 117 118 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 119 CTLFLAG_RW, &max_aio_procs, 0, ""); 120 121 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 122 CTLFLAG_RD, &num_aio_procs, 0, ""); 123 124 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, 125 CTLFLAG_RD, &num_queue_count, 0, ""); 126 127 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, 128 CTLFLAG_RW, &max_queue_count, 0, ""); 129 130 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, 131 CTLFLAG_RW, &target_aio_procs, 0, ""); 132 133 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, 134 CTLFLAG_RW, &max_buf_aio, 0, ""); 135 136 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, 137 CTLFLAG_RD, &num_buf_aio, 0, ""); 138 139 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, 140 CTLFLAG_RW, &aiod_lifetime, 0, ""); 141 142 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, 143 CTLFLAG_RW, &aiod_timeout, 0, ""); 144 145 146 /* 147 * Job queue item 148 */ 149 150 #define AIOCBLIST_CANCELLED 0x1 151 #define AIOCBLIST_RUNDOWN 0x4 152 #define AIOCBLIST_ASYNCFREE 0x8 153 #define AIOCBLIST_DONE 0x10 154 155 struct aiocblist { 156 TAILQ_ENTRY (aiocblist) list; /* List of jobs */ 157 TAILQ_ENTRY (aiocblist) plist; /* List of jobs for proc */ 158 int jobflags; 159 int jobstate; 160 int inputcharge, outputcharge; 161 struct buf *bp; /* buffer pointer */ 162 struct proc *userproc; /* User process */ 163 struct aioproclist *jobaioproc; /* AIO process descriptor */ 164 struct aio_liojob *lio; /* optional lio job */ 165 struct aiocb *uuaiocb; /* pointer in userspace of aiocb */ 166 struct aiocb uaiocb; /* Kernel I/O control block */ 167 }; 168 169 170 /* 171 * AIO process info 172 */ 173 #define AIOP_FREE 0x1 /* proc on free queue */ 174 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 175 176 struct aioproclist { 177 int aioprocflags; /* AIO proc flags */ 178 TAILQ_ENTRY(aioproclist) list; /* List of processes */ 179 struct proc *aioproc; /* The AIO thread */ 180 TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */ 181 }; 182 183 /* 184 * data-structure for lio signal management 185 */ 186 struct aio_liojob { 187 int lioj_flags; 188 int lioj_buffer_count; 189 int lioj_buffer_finished_count; 190 int lioj_queue_count; 191 int lioj_queue_finished_count; 192 struct sigevent lioj_signal; /* signal on all I/O done */ 193 TAILQ_ENTRY (aio_liojob) lioj_list; 194 struct kaioinfo *lioj_ki; 195 }; 196 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 197 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 198 199 /* 200 * per process aio data structure 201 */ 202 struct kaioinfo { 203 int kaio_flags; /* per process kaio flags */ 204 int kaio_maxactive_count; /* maximum number of AIOs */ 205 int kaio_active_count; /* number of currently used AIOs */ 206 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 207 int kaio_queue_count; /* size of AIO queue */ 208 int kaio_ballowed_count; /* maximum number of buffers */ 209 int kaio_queue_finished_count; /* number of daemon jobs finished */ 210 int kaio_buffer_count; /* number of physio buffers */ 211 int kaio_buffer_finished_count; /* count of I/O done */ 212 struct proc *kaio_p; /* process that uses this kaio block */ 213 TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 214 TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */ 215 TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */ 216 TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 217 TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */ 218 }; 219 220 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 221 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant 222 event */ 223 224 225 static TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc; 226 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 227 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 228 static TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */ 229 230 static void aio_init_aioinfo(struct proc *p) ; 231 static void aio_onceonly(void *) ; 232 static int aio_free_entry(struct aiocblist *aiocbe); 233 static void aio_process(struct aiocblist *aiocbe); 234 static int aio_newproc(void) ; 235 static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ; 236 static void aio_physwakeup(struct buf *bp); 237 static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type); 238 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 239 static void aio_daemon(void *uproc); 240 241 SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL); 242 243 static vm_zone_t kaio_zone=0, aiop_zone=0, 244 aiocb_zone=0, aiol_zone=0, aiolio_zone=0; 245 246 /* 247 * Startup initialization 248 */ 249 void 250 aio_onceonly(void *na) 251 { 252 TAILQ_INIT(&aio_freeproc); 253 TAILQ_INIT(&aio_activeproc); 254 TAILQ_INIT(&aio_jobs); 255 TAILQ_INIT(&aio_bufjobs); 256 TAILQ_INIT(&aio_freejobs); 257 kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1); 258 aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1); 259 aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1); 260 aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1); 261 aiolio_zone = zinit("AIOLIO", 262 AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1); 263 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 264 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 265 jobrefid = 1; 266 } 267 268 /* 269 * Init the per-process aioinfo structure. 270 * The aioinfo limits are set per-process for user limit (resource) management. 271 */ 272 void 273 aio_init_aioinfo(struct proc *p) 274 { 275 struct kaioinfo *ki; 276 if (p->p_aioinfo == NULL) { 277 ki = zalloc(kaio_zone); 278 p->p_aioinfo = ki; 279 ki->kaio_flags = 0; 280 ki->kaio_maxactive_count = max_aio_per_proc; 281 ki->kaio_active_count = 0; 282 ki->kaio_qallowed_count = max_aio_queue_per_proc; 283 ki->kaio_queue_count = 0; 284 ki->kaio_ballowed_count = max_buf_aio; 285 ki->kaio_buffer_count = 0; 286 ki->kaio_buffer_finished_count = 0; 287 ki->kaio_p = p; 288 TAILQ_INIT(&ki->kaio_jobdone); 289 TAILQ_INIT(&ki->kaio_jobqueue); 290 TAILQ_INIT(&ki->kaio_bufdone); 291 TAILQ_INIT(&ki->kaio_bufqueue); 292 TAILQ_INIT(&ki->kaio_liojoblist); 293 } 294 } 295 296 /* 297 * Free a job entry. Wait for completion if it is currently 298 * active, but don't delay forever. If we delay, we return 299 * a flag that says that we have to restart the queue scan. 300 */ 301 int 302 aio_free_entry(struct aiocblist *aiocbe) 303 { 304 struct kaioinfo *ki; 305 struct aioproclist *aiop; 306 struct aio_liojob *lj; 307 struct proc *p; 308 int error; 309 int s; 310 311 if (aiocbe->jobstate == JOBST_NULL) 312 panic("aio_free_entry: freeing already free job"); 313 314 p = aiocbe->userproc; 315 ki = p->p_aioinfo; 316 lj = aiocbe->lio; 317 if (ki == NULL) 318 panic("aio_free_entry: missing p->p_aioinfo"); 319 320 if (aiocbe->jobstate == JOBST_JOBRUNNING) { 321 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) 322 return 0; 323 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 324 tsleep(aiocbe, PRIBIO|PCATCH, "jobwai", 0); 325 } 326 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 327 328 if (aiocbe->bp == NULL) { 329 if (ki->kaio_queue_count <= 0) 330 panic("aio_free_entry: process queue size <= 0"); 331 if (num_queue_count <= 0) 332 panic("aio_free_entry: system wide queue size <= 0"); 333 334 if(lj) { 335 lj->lioj_queue_count--; 336 if (aiocbe->jobflags & AIOCBLIST_DONE) 337 lj->lioj_queue_finished_count--; 338 } 339 ki->kaio_queue_count--; 340 if (aiocbe->jobflags & AIOCBLIST_DONE) 341 ki->kaio_queue_finished_count--; 342 num_queue_count--; 343 344 } else { 345 if(lj) { 346 lj->lioj_buffer_count--; 347 if (aiocbe->jobflags & AIOCBLIST_DONE) 348 lj->lioj_buffer_finished_count--; 349 } 350 if (aiocbe->jobflags & AIOCBLIST_DONE) 351 ki->kaio_buffer_finished_count--; 352 ki->kaio_buffer_count--; 353 num_buf_aio--; 354 355 } 356 357 if ((ki->kaio_flags & KAIO_WAKEUP) || 358 ((ki->kaio_flags & KAIO_RUNDOWN) && 359 ((ki->kaio_buffer_count == 0) && 360 (ki->kaio_queue_count == 0)))) { 361 ki->kaio_flags &= ~KAIO_WAKEUP; 362 wakeup(p); 363 } 364 365 if ( aiocbe->jobstate == JOBST_JOBQBUF) { 366 if ((error = aio_fphysio(p, aiocbe, 1)) != 0) 367 return error; 368 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 369 panic("aio_free_entry: invalid physio finish-up state"); 370 s = splbio(); 371 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 372 splx(s); 373 } else if ( aiocbe->jobstate == JOBST_JOBQPROC) { 374 aiop = aiocbe->jobaioproc; 375 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 376 } else if ( aiocbe->jobstate == JOBST_JOBQGLOBAL) { 377 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 378 } else if ( aiocbe->jobstate == JOBST_JOBFINISHED) { 379 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 380 } else if ( aiocbe->jobstate == JOBST_JOBBFINISHED) { 381 s = splbio(); 382 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 383 splx(s); 384 if (aiocbe->bp) { 385 vunmapbuf(aiocbe->bp); 386 relpbuf(aiocbe->bp, NULL); 387 aiocbe->bp = NULL; 388 } 389 } 390 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 391 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 392 zfree(aiolio_zone, lj); 393 } 394 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 395 aiocbe->jobstate = JOBST_NULL; 396 return 0; 397 } 398 399 /* 400 * Rundown the jobs for a given process. 401 */ 402 void 403 aio_proc_rundown(struct proc *p) 404 { 405 int s; 406 struct kaioinfo *ki; 407 struct aio_liojob *lj, *ljn; 408 struct aiocblist *aiocbe, *aiocbn; 409 410 ki = p->p_aioinfo; 411 if (ki == NULL) 412 return; 413 414 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 415 while ((ki->kaio_active_count > 0) || 416 (ki->kaio_buffer_count > ki->kaio_buffer_finished_count)) { 417 ki->kaio_flags |= KAIO_RUNDOWN; 418 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout)) 419 break; 420 } 421 422 restart1: 423 for ( aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); 424 aiocbe; 425 aiocbe = aiocbn) { 426 aiocbn = TAILQ_NEXT(aiocbe, plist); 427 if (aio_free_entry(aiocbe)) 428 goto restart1; 429 } 430 431 restart2: 432 for ( aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); 433 aiocbe; 434 aiocbe = aiocbn) { 435 aiocbn = TAILQ_NEXT(aiocbe, plist); 436 if (aio_free_entry(aiocbe)) 437 goto restart2; 438 } 439 440 /* 441 * Note the use of lots of splbio here, trying to avoid 442 * splbio for long chains of I/O. Probably unnecessary. 443 */ 444 445 restart3: 446 s = splbio(); 447 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 448 ki->kaio_flags |= KAIO_WAKEUP; 449 tsleep (p, PRIBIO, "aioprn", 0); 450 splx(s); 451 goto restart3; 452 } 453 splx(s); 454 455 restart4: 456 s = splbio(); 457 for ( aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); 458 aiocbe; 459 aiocbe = aiocbn) { 460 aiocbn = TAILQ_NEXT(aiocbe, plist); 461 if (aio_free_entry(aiocbe)) { 462 splx(s); 463 goto restart4; 464 } 465 } 466 splx(s); 467 468 for ( lj = TAILQ_FIRST(&ki->kaio_liojoblist); 469 lj; 470 lj = ljn) { 471 ljn = TAILQ_NEXT(lj, lioj_list); 472 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 473 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 474 zfree(aiolio_zone, lj); 475 } else { 476 #if defined(DIAGNOSTIC) 477 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, QF:%d\n", 478 lj->lioj_buffer_count, lj->lioj_buffer_finished_count, 479 lj->lioj_queue_count, lj->lioj_queue_finished_count); 480 #endif 481 } 482 } 483 484 zfree(kaio_zone, ki); 485 p->p_aioinfo = NULL; 486 } 487 488 /* 489 * Select a job to run (called by an AIO daemon) 490 */ 491 static struct aiocblist * 492 aio_selectjob(struct aioproclist *aiop) 493 { 494 495 struct aiocblist *aiocbe; 496 497 aiocbe = TAILQ_FIRST(&aiop->jobtorun); 498 if (aiocbe) { 499 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 500 return aiocbe; 501 } 502 503 for (aiocbe = TAILQ_FIRST(&aio_jobs); 504 aiocbe; 505 aiocbe = TAILQ_NEXT(aiocbe, list)) { 506 struct kaioinfo *ki; 507 struct proc *userp; 508 509 userp = aiocbe->userproc; 510 ki = userp->p_aioinfo; 511 512 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 513 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 514 return aiocbe; 515 } 516 } 517 518 return NULL; 519 } 520 521 /* 522 * The AIO processing activity. This is the code that does the 523 * I/O request for the non-physio version of the operations. The 524 * normal vn operations are used, and this code should work in 525 * all instances for every type of file, including pipes, sockets, 526 * fifos, and regular files. 527 */ 528 void 529 aio_process(struct aiocblist *aiocbe) 530 { 531 struct filedesc *fdp; 532 struct proc *userp, *mycp; 533 struct aiocb *cb; 534 struct file *fp; 535 struct uio auio; 536 struct iovec aiov; 537 unsigned int fd; 538 int cnt; 539 int error; 540 off_t offset; 541 int oublock_st, oublock_end; 542 int inblock_st, inblock_end; 543 544 userp = aiocbe->userproc; 545 cb = &aiocbe->uaiocb; 546 547 mycp = curproc; 548 549 fdp = mycp->p_fd; 550 fd = cb->aio_fildes; 551 fp = fdp->fd_ofiles[fd]; 552 553 aiov.iov_base = (void *) cb->aio_buf; 554 aiov.iov_len = cb->aio_nbytes; 555 556 auio.uio_iov = &aiov; 557 auio.uio_iovcnt = 1; 558 auio.uio_offset = offset = cb->aio_offset; 559 auio.uio_resid = cb->aio_nbytes; 560 cnt = cb->aio_nbytes; 561 auio.uio_segflg = UIO_USERSPACE; 562 auio.uio_procp = mycp; 563 564 inblock_st = mycp->p_stats->p_ru.ru_inblock; 565 oublock_st = mycp->p_stats->p_ru.ru_oublock; 566 if (cb->aio_lio_opcode == LIO_READ) { 567 auio.uio_rw = UIO_READ; 568 error = (*fp->f_ops->fo_read)(fp, &auio, fp->f_cred, FOF_OFFSET); 569 } else { 570 auio.uio_rw = UIO_WRITE; 571 error = (*fp->f_ops->fo_write)(fp, &auio, fp->f_cred, FOF_OFFSET); 572 } 573 inblock_end = mycp->p_stats->p_ru.ru_inblock; 574 oublock_end = mycp->p_stats->p_ru.ru_oublock; 575 576 aiocbe->inputcharge = inblock_end - inblock_st; 577 aiocbe->outputcharge = oublock_end - oublock_st; 578 579 if (error) { 580 if (auio.uio_resid != cnt) { 581 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 582 error = 0; 583 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) 584 psignal(userp, SIGPIPE); 585 } 586 } 587 588 cnt -= auio.uio_resid; 589 cb->_aiocb_private.error = error; 590 cb->_aiocb_private.status = cnt; 591 592 return; 593 594 } 595 596 /* 597 * The AIO daemon, most of the actual work is done in aio_process, 598 * but the setup (and address space mgmt) is done in this routine. 599 */ 600 static void 601 aio_daemon(void *uproc) 602 { 603 int s; 604 struct aioproclist *aiop; 605 struct vmspace *myvm; 606 struct proc *mycp; 607 608 /* 609 * Local copies of curproc (cp) and vmspace (myvm) 610 */ 611 mycp = curproc; 612 myvm = mycp->p_vmspace; 613 614 if (mycp->p_textvp) { 615 vrele(mycp->p_textvp); 616 mycp->p_textvp = NULL; 617 } 618 619 /* 620 * Allocate and ready the aio control info. There is one 621 * aiop structure per daemon. 622 */ 623 aiop = zalloc(aiop_zone); 624 aiop->aioproc = mycp; 625 aiop->aioprocflags |= AIOP_FREE; 626 TAILQ_INIT(&aiop->jobtorun); 627 628 /* 629 * Place thread (lightweight process) onto the AIO free thread list 630 */ 631 if (TAILQ_EMPTY(&aio_freeproc)) 632 wakeup(&aio_freeproc); 633 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 634 635 /* 636 * Make up a name for the daemon 637 */ 638 strcpy(mycp->p_comm, "aiod"); 639 640 /* 641 * Get rid of our current filedescriptors. AIOD's don't need any 642 * filedescriptors, except as temporarily inherited from the client. 643 * Credentials are also cloned, and made equivalent to "root." 644 */ 645 fdfree(mycp); 646 mycp->p_fd = NULL; 647 mycp->p_ucred = crcopy(mycp->p_ucred); 648 mycp->p_ucred->cr_uid = 0; 649 mycp->p_ucred->cr_ngroups = 1; 650 mycp->p_ucred->cr_groups[0] = 1; 651 652 /* 653 * The daemon resides in its own pgrp. 654 */ 655 enterpgrp(mycp, mycp->p_pid, 1); 656 657 /* 658 * Mark special process type 659 */ 660 mycp->p_flag |= P_SYSTEM|P_KTHREADP; 661 662 /* 663 * Wakeup parent process. (Parent sleeps to keep from blasting away 664 * creating to many daemons.) 665 */ 666 wakeup(mycp); 667 668 while(1) { 669 struct proc *curcp; 670 struct aiocblist *aiocbe; 671 672 /* 673 * curcp is the current daemon process context. 674 * userp is the current user process context. 675 */ 676 curcp = mycp; 677 678 /* 679 * Take daemon off of free queue 680 */ 681 if (aiop->aioprocflags & AIOP_FREE) { 682 TAILQ_REMOVE(&aio_freeproc, aiop, list); 683 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 684 aiop->aioprocflags &= ~AIOP_FREE; 685 } 686 aiop->aioprocflags &= ~AIOP_SCHED; 687 688 /* 689 * Check for jobs 690 */ 691 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 692 struct proc *userp; 693 struct aiocb *cb; 694 struct kaioinfo *ki; 695 struct aio_liojob *lj; 696 697 cb = &aiocbe->uaiocb; 698 userp = aiocbe->userproc; 699 700 aiocbe->jobstate = JOBST_JOBRUNNING; 701 702 /* 703 * Connect to process address space for user program 704 */ 705 if (userp != curcp) { 706 struct vmspace *tmpvm; 707 /* 708 * Save the current address space that we are connected to. 709 */ 710 tmpvm = mycp->p_vmspace; 711 /* 712 * Point to the new user address space, and refer to it. 713 */ 714 mycp->p_vmspace = userp->p_vmspace; 715 mycp->p_vmspace->vm_refcnt++; 716 /* 717 * Activate the new mapping. 718 */ 719 pmap_activate(mycp); 720 /* 721 * If the old address space wasn't the daemons own address 722 * space, then we need to remove the daemon's reference from 723 * the other process that it was acting on behalf of. 724 */ 725 if (tmpvm != myvm) { 726 vmspace_free(tmpvm); 727 } 728 /* 729 * Disassociate from previous clients file descriptors, and 730 * associate to the new clients descriptors. Note that 731 * the daemon doesn't need to worry about its orginal 732 * descriptors, because they were originally freed. 733 */ 734 if (mycp->p_fd) 735 fdfree(mycp); 736 mycp->p_fd = fdshare(userp); 737 curcp = userp; 738 } 739 740 ki = userp->p_aioinfo; 741 lj = aiocbe->lio; 742 743 /* 744 * Account for currently active jobs 745 */ 746 ki->kaio_active_count++; 747 748 /* 749 * Do the I/O function 750 */ 751 aiocbe->jobaioproc = aiop; 752 aio_process(aiocbe); 753 754 /* 755 * decrement the active job count 756 */ 757 ki->kaio_active_count--; 758 759 /* 760 * increment the completion count for wakeup/signal comparisons 761 */ 762 aiocbe->jobflags |= AIOCBLIST_DONE; 763 ki->kaio_queue_finished_count++; 764 if (lj) { 765 lj->lioj_queue_finished_count++; 766 } 767 if ((ki->kaio_flags & KAIO_WAKEUP) || 768 ((ki->kaio_flags & KAIO_RUNDOWN) && 769 (ki->kaio_active_count == 0))) { 770 ki->kaio_flags &= ~KAIO_WAKEUP; 771 wakeup(userp); 772 } 773 774 s = splbio(); 775 if (lj && (lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 776 LIOJ_SIGNAL) { 777 if ((lj->lioj_queue_finished_count == lj->lioj_queue_count) && 778 (lj->lioj_buffer_finished_count == lj->lioj_buffer_count)) { 779 psignal(userp, lj->lioj_signal.sigev_signo); 780 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 781 } 782 } 783 splx(s); 784 785 aiocbe->jobstate = JOBST_JOBFINISHED; 786 787 /* 788 * If the I/O request should be automatically rundown, do the 789 * needed cleanup. Otherwise, place the queue entry for 790 * the just finished I/O request into the done queue for the 791 * associated client. 792 */ 793 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) { 794 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 795 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 796 } else { 797 TAILQ_REMOVE(&ki->kaio_jobqueue, 798 aiocbe, plist); 799 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, 800 aiocbe, plist); 801 } 802 803 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 804 wakeup(aiocbe); 805 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 806 } 807 808 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 809 psignal(userp, cb->aio_sigevent.sigev_signo); 810 } 811 } 812 813 /* 814 * Disconnect from user address space 815 */ 816 if (curcp != mycp) { 817 struct vmspace *tmpvm; 818 /* 819 * Get the user address space to disconnect from. 820 */ 821 tmpvm = mycp->p_vmspace; 822 /* 823 * Get original address space for daemon. 824 */ 825 mycp->p_vmspace = myvm; 826 /* 827 * Activate the daemon's address space. 828 */ 829 pmap_activate(mycp); 830 #if defined(DIAGNOSTIC) 831 if (tmpvm == myvm) 832 printf("AIOD: vmspace problem -- %d\n", mycp->p_pid); 833 #endif 834 /* 835 * remove our vmspace reference. 836 */ 837 vmspace_free(tmpvm); 838 /* 839 * disassociate from the user process's file descriptors. 840 */ 841 if (mycp->p_fd) 842 fdfree(mycp); 843 mycp->p_fd = NULL; 844 curcp = mycp; 845 } 846 847 /* 848 * If we are the first to be put onto the free queue, wakeup 849 * anyone waiting for a daemon. 850 */ 851 TAILQ_REMOVE(&aio_activeproc, aiop, list); 852 if (TAILQ_EMPTY(&aio_freeproc)) 853 wakeup(&aio_freeproc); 854 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 855 aiop->aioprocflags |= AIOP_FREE; 856 857 /* 858 * If daemon is inactive for a long time, allow it to exit, thereby 859 * freeing resources. 860 */ 861 if (((aiop->aioprocflags & AIOP_SCHED) == 0) && 862 tsleep(mycp, PRIBIO, "aiordy", aiod_lifetime)) { 863 if ((TAILQ_FIRST(&aio_jobs) == NULL) && 864 (TAILQ_FIRST(&aiop->jobtorun) == NULL)) { 865 if ((aiop->aioprocflags & AIOP_FREE) && 866 (num_aio_procs > target_aio_procs)) { 867 TAILQ_REMOVE(&aio_freeproc, aiop, list); 868 zfree(aiop_zone, aiop); 869 num_aio_procs--; 870 #if defined(DIAGNOSTIC) 871 if (mycp->p_vmspace->vm_refcnt <= 1) 872 printf("AIOD: bad vm refcnt for exiting daemon: %d\n", 873 mycp->p_vmspace->vm_refcnt); 874 #endif 875 exit1(mycp, 0); 876 } 877 } 878 } 879 } 880 } 881 882 /* 883 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. 884 * The AIO daemon modifies its environment itself. 885 */ 886 static int 887 aio_newproc() 888 { 889 int error; 890 struct proc *p, *np; 891 892 p = &proc0; 893 error = fork1(p, RFPROC|RFMEM|RFNOWAIT, &np); 894 if (error) 895 return error; 896 cpu_set_fork_handler(np, aio_daemon, curproc); 897 898 /* 899 * Wait until daemon is started, but continue on just in case (to 900 * handle error conditions. 901 */ 902 error = tsleep(np, PZERO, "aiosta", aiod_timeout); 903 num_aio_procs++; 904 905 return error; 906 907 } 908 909 /* 910 * Try the high-performance physio method for eligible VCHR devices. This 911 * routine doesn't require the use of any additional threads, and have 912 * overhead. 913 */ 914 int 915 aio_qphysio(p, aiocbe) 916 struct proc *p; 917 struct aiocblist *aiocbe; 918 { 919 int error; 920 struct aiocb *cb; 921 struct file *fp; 922 struct buf *bp; 923 int bflags; 924 struct vnode *vp; 925 struct kaioinfo *ki; 926 struct filedesc *fdp; 927 struct aio_liojob *lj; 928 int fd; 929 int s; 930 int cnt; 931 dev_t dev; 932 int rw; 933 d_strategy_t *fstrategy; 934 struct cdevsw *cdev; 935 struct cdevsw *bdev; 936 937 cb = &aiocbe->uaiocb; 938 fdp = p->p_fd; 939 fd = cb->aio_fildes; 940 fp = fdp->fd_ofiles[fd]; 941 942 if (fp->f_type != DTYPE_VNODE) { 943 return -1; 944 } 945 946 vp = (struct vnode *)fp->f_data; 947 if (vp->v_type != VCHR || ((cb->aio_nbytes & (DEV_BSIZE - 1)) != 0)) { 948 return -1; 949 } 950 951 if ((cb->aio_nbytes > MAXPHYS) && (num_buf_aio >= max_buf_aio)) { 952 return -1; 953 } 954 955 if ((vp->v_specinfo == NULL) || (vp->v_flag & VISTTY)) { 956 return -1; 957 } 958 959 if (vp->v_rdev == NODEV) { 960 return -1; 961 } 962 963 cdev = devsw(vp->v_rdev); 964 if (cdev == NULL) { 965 return -1; 966 } 967 968 if (cdev->d_bmaj == -1) { 969 return -1; 970 } 971 bdev = cdev; 972 973 ki = p->p_aioinfo; 974 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) { 975 return -1; 976 } 977 978 cnt = cb->aio_nbytes; 979 if (cnt > MAXPHYS) { 980 return -1; 981 } 982 983 dev = makebdev(bdev->d_bmaj, minor(vp->v_rdev)); 984 985 /* 986 * Physical I/O is charged directly to the process, so we don't have 987 * to fake it. 988 */ 989 aiocbe->inputcharge = 0; 990 aiocbe->outputcharge = 0; 991 992 ki->kaio_buffer_count++; 993 994 lj = aiocbe->lio; 995 if (lj) { 996 lj->lioj_buffer_count++; 997 } 998 999 /* create and build a buffer header for a transfer */ 1000 bp = (struct buf *)getpbuf(NULL); 1001 1002 /* 1003 * get a copy of the kva from the physical buffer 1004 */ 1005 bp->b_caller1 = p; 1006 bp->b_dev = dev; 1007 error = bp->b_error = 0; 1008 1009 if (cb->aio_lio_opcode == LIO_WRITE) { 1010 rw = 0; 1011 bflags = B_WRITE; 1012 } else { 1013 rw = 1; 1014 bflags = B_READ; 1015 } 1016 1017 bp->b_bcount = cb->aio_nbytes; 1018 bp->b_bufsize = cb->aio_nbytes; 1019 bp->b_flags = B_PHYS | B_CALL | bflags; 1020 bp->b_iodone = aio_physwakeup; 1021 bp->b_saveaddr = bp->b_data; 1022 bp->b_data = (void *) cb->aio_buf; 1023 bp->b_blkno = btodb(cb->aio_offset); 1024 1025 if (rw && !useracc(bp->b_data, bp->b_bufsize, B_WRITE)) { 1026 error = EFAULT; 1027 goto doerror; 1028 } 1029 if (!rw && !useracc(bp->b_data, bp->b_bufsize, B_READ)) { 1030 error = EFAULT; 1031 goto doerror; 1032 } 1033 1034 /* bring buffer into kernel space */ 1035 vmapbuf(bp); 1036 1037 s = splbio(); 1038 aiocbe->bp = bp; 1039 bp->b_spc = (void *)aiocbe; 1040 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 1041 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 1042 aiocbe->jobstate = JOBST_JOBQBUF; 1043 cb->_aiocb_private.status = cb->aio_nbytes; 1044 num_buf_aio++; 1045 fstrategy = bdev->d_strategy; 1046 bp->b_error = 0; 1047 1048 splx(s); 1049 /* perform transfer */ 1050 (*fstrategy)(bp); 1051 1052 s = splbio(); 1053 /* 1054 * If we had an error invoking the request, or an error in processing 1055 * the request before we have returned, we process it as an error 1056 * in transfer. Note that such an I/O error is not indicated immediately, 1057 * but is returned using the aio_error mechanism. In this case, aio_suspend 1058 * will return immediately. 1059 */ 1060 if (bp->b_error || (bp->b_flags & B_ERROR)) { 1061 struct aiocb *job = aiocbe->uuaiocb; 1062 1063 aiocbe->uaiocb._aiocb_private.status = 0; 1064 suword(&job->_aiocb_private.status, 0); 1065 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1066 suword(&job->_aiocb_private.error, bp->b_error); 1067 1068 ki->kaio_buffer_finished_count++; 1069 1070 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1071 aiocbe->jobstate = JOBST_JOBBFINISHED; 1072 aiocbe->jobflags |= AIOCBLIST_DONE; 1073 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1074 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1075 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1076 } 1077 } 1078 splx(s); 1079 return 0; 1080 1081 doerror: 1082 ki->kaio_buffer_count--; 1083 if (lj) { 1084 lj->lioj_buffer_count--; 1085 } 1086 aiocbe->bp = NULL; 1087 relpbuf(bp, NULL); 1088 return error; 1089 } 1090 1091 /* 1092 * This waits/tests physio completion. 1093 */ 1094 int 1095 aio_fphysio(p, iocb, flgwait) 1096 struct proc *p; 1097 struct aiocblist *iocb; 1098 int flgwait; 1099 { 1100 int s; 1101 struct buf *bp; 1102 int error; 1103 1104 bp = iocb->bp; 1105 1106 s = splbio(); 1107 if (flgwait == 0) { 1108 if ((bp->b_flags & B_DONE) == 0) { 1109 splx(s); 1110 return EINPROGRESS; 1111 } 1112 } 1113 1114 while ((bp->b_flags & B_DONE) == 0) { 1115 if (tsleep((caddr_t)bp, PRIBIO, "physstr", aiod_timeout)) { 1116 if ((bp->b_flags & B_DONE) == 0) { 1117 splx(s); 1118 return EINPROGRESS; 1119 } else { 1120 break; 1121 } 1122 } 1123 } 1124 1125 /* release mapping into kernel space */ 1126 vunmapbuf(bp); 1127 iocb->bp = 0; 1128 1129 error = 0; 1130 /* 1131 * check for an error 1132 */ 1133 if (bp->b_flags & B_ERROR) { 1134 error = bp->b_error; 1135 } 1136 1137 relpbuf(bp, NULL); 1138 return (error); 1139 } 1140 1141 /* 1142 * Queue a new AIO request. Choosing either the threaded or direct physio 1143 * VCHR technique is done in this code. 1144 */ 1145 static int 1146 _aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type) 1147 { 1148 struct filedesc *fdp; 1149 struct file *fp; 1150 unsigned int fd; 1151 1152 int error; 1153 int opcode; 1154 struct aiocblist *aiocbe; 1155 struct aioproclist *aiop; 1156 struct kaioinfo *ki; 1157 1158 if ((aiocbe = TAILQ_FIRST(&aio_freejobs)) != NULL) { 1159 TAILQ_REMOVE(&aio_freejobs, aiocbe, list); 1160 } else { 1161 aiocbe = zalloc (aiocb_zone); 1162 } 1163 1164 aiocbe->inputcharge = 0; 1165 aiocbe->outputcharge = 0; 1166 1167 suword(&job->_aiocb_private.status, -1); 1168 suword(&job->_aiocb_private.error, 0); 1169 suword(&job->_aiocb_private.kernelinfo, -1); 1170 1171 error = copyin((caddr_t)job, 1172 (caddr_t) &aiocbe->uaiocb, sizeof aiocbe->uaiocb); 1173 if (error) { 1174 suword(&job->_aiocb_private.error, error); 1175 1176 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1177 return error; 1178 } 1179 1180 /* 1181 * Save userspace address of the job info 1182 */ 1183 aiocbe->uuaiocb = job; 1184 1185 /* 1186 * Get the opcode 1187 */ 1188 if (type != LIO_NOP) { 1189 aiocbe->uaiocb.aio_lio_opcode = type; 1190 } 1191 opcode = aiocbe->uaiocb.aio_lio_opcode; 1192 1193 /* 1194 * Get the fd info for process 1195 */ 1196 fdp = p->p_fd; 1197 1198 /* 1199 * Range check file descriptor 1200 */ 1201 fd = aiocbe->uaiocb.aio_fildes; 1202 if (fd >= fdp->fd_nfiles) { 1203 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1204 if (type == 0) { 1205 suword(&job->_aiocb_private.error, EBADF); 1206 } 1207 return EBADF; 1208 } 1209 1210 fp = fdp->fd_ofiles[fd]; 1211 if ((fp == NULL) || 1212 ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 0))) { 1213 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1214 if (type == 0) { 1215 suword(&job->_aiocb_private.error, EBADF); 1216 } 1217 return EBADF; 1218 } 1219 1220 if (aiocbe->uaiocb.aio_offset == -1LL) { 1221 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1222 if (type == 0) { 1223 suword(&job->_aiocb_private.error, EINVAL); 1224 } 1225 return EINVAL; 1226 } 1227 1228 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1229 if (error) { 1230 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1231 if (type == 0) { 1232 suword(&job->_aiocb_private.error, EINVAL); 1233 } 1234 return error; 1235 } 1236 1237 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1238 if (jobrefid == LONG_MAX) 1239 jobrefid = 1; 1240 else 1241 jobrefid++; 1242 1243 if (opcode == LIO_NOP) { 1244 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1245 if (type == 0) { 1246 suword(&job->_aiocb_private.error, 0); 1247 suword(&job->_aiocb_private.status, 0); 1248 suword(&job->_aiocb_private.kernelinfo, 0); 1249 } 1250 return 0; 1251 } 1252 1253 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1254 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1255 if (type == 0) { 1256 suword(&job->_aiocb_private.status, 0); 1257 suword(&job->_aiocb_private.error, EINVAL); 1258 } 1259 return EINVAL; 1260 } 1261 1262 suword(&job->_aiocb_private.error, EINPROGRESS); 1263 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1264 aiocbe->userproc = p; 1265 aiocbe->jobflags = 0; 1266 aiocbe->lio = lj; 1267 ki = p->p_aioinfo; 1268 1269 if ((error = aio_qphysio(p, aiocbe)) == 0) { 1270 return 0; 1271 } else if (error > 0) { 1272 suword(&job->_aiocb_private.status, 0); 1273 aiocbe->uaiocb._aiocb_private.error = error; 1274 suword(&job->_aiocb_private.error, error); 1275 return error; 1276 } 1277 1278 /* 1279 * No buffer for daemon I/O 1280 */ 1281 aiocbe->bp = NULL; 1282 1283 ki->kaio_queue_count++; 1284 if (lj) { 1285 lj->lioj_queue_count++; 1286 } 1287 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1288 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1289 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1290 1291 num_queue_count++; 1292 error = 0; 1293 1294 /* 1295 * If we don't have a free AIO process, and we are below our 1296 * quota, then start one. Otherwise, depend on the subsequent 1297 * I/O completions to pick-up this job. If we don't sucessfully 1298 * create the new process (thread) due to resource issues, we 1299 * return an error for now (EAGAIN), which is likely not the 1300 * correct thing to do. 1301 */ 1302 retryproc: 1303 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1304 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1305 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1306 aiop->aioprocflags &= ~AIOP_FREE; 1307 wakeup(aiop->aioproc); 1308 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1309 ((ki->kaio_active_count + num_aio_resv_start) < 1310 ki->kaio_maxactive_count)) { 1311 num_aio_resv_start++; 1312 if ((error = aio_newproc()) == 0) { 1313 num_aio_resv_start--; 1314 p->p_retval[0] = 0; 1315 goto retryproc; 1316 } 1317 num_aio_resv_start--; 1318 } 1319 return error; 1320 } 1321 1322 /* 1323 * This routine queues an AIO request, checking for quotas. 1324 */ 1325 static int 1326 aio_aqueue(struct proc *p, struct aiocb *job, int type) 1327 { 1328 struct kaioinfo *ki; 1329 1330 if (p->p_aioinfo == NULL) { 1331 aio_init_aioinfo(p); 1332 } 1333 1334 if (num_queue_count >= max_queue_count) 1335 return EAGAIN; 1336 1337 ki = p->p_aioinfo; 1338 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1339 return EAGAIN; 1340 1341 return _aio_aqueue(p, job, NULL, type); 1342 } 1343 1344 /* 1345 * Support the aio_return system call, as a side-effect, kernel 1346 * resources are released. 1347 */ 1348 int 1349 aio_return(struct proc *p, struct aio_return_args *uap) 1350 { 1351 int s; 1352 int jobref; 1353 struct aiocblist *cb, *ncb; 1354 struct aiocb *ujob; 1355 struct kaioinfo *ki; 1356 1357 ki = p->p_aioinfo; 1358 if (ki == NULL) { 1359 return EINVAL; 1360 } 1361 1362 ujob = uap->aiocbp; 1363 1364 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1365 if (jobref == -1 || jobref == 0) 1366 return EINVAL; 1367 1368 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1369 cb; 1370 cb = TAILQ_NEXT(cb, plist)) { 1371 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1372 if (ujob == cb->uuaiocb) { 1373 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 1374 } else { 1375 p->p_retval[0] = EFAULT; 1376 } 1377 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1378 curproc->p_stats->p_ru.ru_oublock += cb->outputcharge; 1379 cb->outputcharge = 0; 1380 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1381 curproc->p_stats->p_ru.ru_inblock += cb->inputcharge; 1382 cb->inputcharge = 0; 1383 } 1384 aio_free_entry(cb); 1385 return 0; 1386 } 1387 } 1388 1389 s = splbio(); 1390 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1391 cb; 1392 cb = ncb) { 1393 ncb = TAILQ_NEXT(cb, plist); 1394 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1395 splx(s); 1396 if (ujob == cb->uuaiocb) { 1397 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 1398 } else { 1399 p->p_retval[0] = EFAULT; 1400 } 1401 aio_free_entry(cb); 1402 return 0; 1403 } 1404 } 1405 splx(s); 1406 1407 return (EINVAL); 1408 } 1409 1410 /* 1411 * Allow a process to wakeup when any of the I/O requests are 1412 * completed. 1413 */ 1414 int 1415 aio_suspend(struct proc *p, struct aio_suspend_args *uap) 1416 { 1417 struct timeval atv; 1418 struct timespec ts; 1419 struct aiocb *const *cbptr, *cbp; 1420 struct kaioinfo *ki; 1421 struct aiocblist *cb; 1422 int i; 1423 int njoblist; 1424 int error, s, timo; 1425 int *ijoblist; 1426 struct aiocb **ujoblist; 1427 1428 if (uap->nent >= AIO_LISTIO_MAX) 1429 return EINVAL; 1430 1431 timo = 0; 1432 if (uap->timeout) { 1433 /* 1434 * Get timespec struct 1435 */ 1436 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) { 1437 return error; 1438 } 1439 1440 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1441 return (EINVAL); 1442 1443 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1444 if (itimerfix(&atv)) 1445 return (EINVAL); 1446 timo = tvtohz(&atv); 1447 } 1448 1449 ki = p->p_aioinfo; 1450 if (ki == NULL) 1451 return EAGAIN; 1452 1453 njoblist = 0; 1454 ijoblist = zalloc(aiol_zone); 1455 ujoblist = zalloc(aiol_zone); 1456 cbptr = uap->aiocbp; 1457 1458 for(i = 0; i < uap->nent; i++) { 1459 cbp = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1460 if (cbp == 0) 1461 continue; 1462 ujoblist[njoblist] = cbp; 1463 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1464 njoblist++; 1465 } 1466 if (njoblist == 0) { 1467 zfree(aiol_zone, ijoblist); 1468 zfree(aiol_zone, ujoblist); 1469 return 0; 1470 } 1471 1472 error = 0; 1473 while (1) { 1474 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1475 cb; cb = TAILQ_NEXT(cb, plist)) { 1476 for(i = 0; i < njoblist; i++) { 1477 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1478 ijoblist[i]) { 1479 if (ujoblist[i] != cb->uuaiocb) 1480 error = EINVAL; 1481 zfree(aiol_zone, ijoblist); 1482 zfree(aiol_zone, ujoblist); 1483 return error; 1484 } 1485 } 1486 } 1487 1488 s = splbio(); 1489 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1490 cb; cb = TAILQ_NEXT(cb, plist)) { 1491 for(i = 0; i < njoblist; i++) { 1492 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1493 ijoblist[i]) { 1494 splx(s); 1495 if (ujoblist[i] != cb->uuaiocb) 1496 error = EINVAL; 1497 zfree(aiol_zone, ijoblist); 1498 zfree(aiol_zone, ujoblist); 1499 return error; 1500 } 1501 } 1502 } 1503 1504 ki->kaio_flags |= KAIO_WAKEUP; 1505 error = tsleep(p, PRIBIO|PCATCH, "aiospn", timo); 1506 splx(s); 1507 1508 if (error == EINTR) { 1509 zfree(aiol_zone, ijoblist); 1510 zfree(aiol_zone, ujoblist); 1511 return EINTR; 1512 } else if (error == EWOULDBLOCK) { 1513 zfree(aiol_zone, ijoblist); 1514 zfree(aiol_zone, ujoblist); 1515 return EAGAIN; 1516 } 1517 } 1518 1519 /* NOTREACHED */ 1520 return EINVAL; 1521 } 1522 1523 /* 1524 * aio_cancel at the kernel level is a NOOP right now. It 1525 * might be possible to support it partially in user mode, or 1526 * in kernel mode later on. 1527 */ 1528 int 1529 aio_cancel(struct proc *p, struct aio_cancel_args *uap) 1530 { 1531 return ENOSYS; 1532 } 1533 1534 /* 1535 * aio_error is implemented in the kernel level for compatibility 1536 * purposes only. For a user mode async implementation, it would be 1537 * best to do it in a userland subroutine. 1538 */ 1539 int 1540 aio_error(struct proc *p, struct aio_error_args *uap) 1541 { 1542 int s; 1543 struct aiocblist *cb; 1544 struct kaioinfo *ki; 1545 int jobref; 1546 1547 ki = p->p_aioinfo; 1548 if (ki == NULL) 1549 return EINVAL; 1550 1551 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1552 if ((jobref == -1) || (jobref == 0)) 1553 return EINVAL; 1554 1555 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1556 cb; 1557 cb = TAILQ_NEXT(cb, plist)) { 1558 1559 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1560 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1561 return 0; 1562 } 1563 } 1564 1565 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); 1566 cb; 1567 cb = TAILQ_NEXT(cb, plist)) { 1568 1569 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1570 p->p_retval[0] = EINPROGRESS; 1571 return 0; 1572 } 1573 } 1574 1575 s = splbio(); 1576 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1577 cb; 1578 cb = TAILQ_NEXT(cb, plist)) { 1579 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1580 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1581 splx(s); 1582 return 0; 1583 } 1584 } 1585 1586 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); 1587 cb; 1588 cb = TAILQ_NEXT(cb, plist)) { 1589 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1590 p->p_retval[0] = EINPROGRESS; 1591 splx(s); 1592 return 0; 1593 } 1594 } 1595 splx(s); 1596 1597 1598 /* 1599 * Hack for lio 1600 */ 1601 /* 1602 status = fuword(&uap->aiocbp->_aiocb_private.status); 1603 if (status == -1) { 1604 return fuword(&uap->aiocbp->_aiocb_private.error); 1605 } 1606 */ 1607 return EINVAL; 1608 } 1609 1610 int 1611 aio_read(struct proc *p, struct aio_read_args *uap) 1612 { 1613 struct filedesc *fdp; 1614 struct file *fp; 1615 struct uio auio; 1616 struct iovec aiov; 1617 unsigned int fd; 1618 int cnt; 1619 struct aiocb iocb; 1620 int error, pmodes; 1621 1622 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1623 if ((pmodes & AIO_PMODE_SYNC) == 0) { 1624 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_READ); 1625 } 1626 1627 /* 1628 * Get control block 1629 */ 1630 if ((error = copyin((caddr_t) uap->aiocbp, (caddr_t) &iocb, sizeof iocb)) != 0) 1631 return error; 1632 1633 /* 1634 * Get the fd info for process 1635 */ 1636 fdp = p->p_fd; 1637 1638 /* 1639 * Range check file descriptor 1640 */ 1641 fd = iocb.aio_fildes; 1642 if (fd >= fdp->fd_nfiles) 1643 return EBADF; 1644 fp = fdp->fd_ofiles[fd]; 1645 if ((fp == NULL) || ((fp->f_flag & FREAD) == 0)) 1646 return EBADF; 1647 if (iocb.aio_offset == -1LL) 1648 return EINVAL; 1649 1650 auio.uio_resid = iocb.aio_nbytes; 1651 if (auio.uio_resid < 0) 1652 return (EINVAL); 1653 1654 /* 1655 * Process sync simply -- queue async request. 1656 */ 1657 if ((iocb._aiocb_private.privatemodes & AIO_PMODE_SYNC) == 0) { 1658 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_READ); 1659 } 1660 1661 aiov.iov_base = (void *) iocb.aio_buf; 1662 aiov.iov_len = iocb.aio_nbytes; 1663 1664 auio.uio_iov = &aiov; 1665 auio.uio_iovcnt = 1; 1666 auio.uio_offset = iocb.aio_offset; 1667 auio.uio_rw = UIO_READ; 1668 auio.uio_segflg = UIO_USERSPACE; 1669 auio.uio_procp = p; 1670 1671 cnt = iocb.aio_nbytes; 1672 error = (*fp->f_ops->fo_read)(fp, &auio, fp->f_cred, FOF_OFFSET); 1673 if (error && 1674 (auio.uio_resid != cnt) && 1675 (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) 1676 error = 0; 1677 cnt -= auio.uio_resid; 1678 p->p_retval[0] = cnt; 1679 return error; 1680 } 1681 1682 int 1683 aio_write(struct proc *p, struct aio_write_args *uap) 1684 { 1685 struct filedesc *fdp; 1686 struct file *fp; 1687 struct uio auio; 1688 struct iovec aiov; 1689 unsigned int fd; 1690 int cnt; 1691 struct aiocb iocb; 1692 int error; 1693 int pmodes; 1694 1695 /* 1696 * Process sync simply -- queue async request. 1697 */ 1698 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1699 if ((pmodes & AIO_PMODE_SYNC) == 0) { 1700 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_WRITE); 1701 } 1702 1703 if ((error = copyin((caddr_t) uap->aiocbp, (caddr_t) &iocb, sizeof iocb)) != 0) 1704 return error; 1705 1706 /* 1707 * Get the fd info for process 1708 */ 1709 fdp = p->p_fd; 1710 1711 /* 1712 * Range check file descriptor 1713 */ 1714 fd = iocb.aio_fildes; 1715 if (fd >= fdp->fd_nfiles) 1716 return EBADF; 1717 fp = fdp->fd_ofiles[fd]; 1718 if ((fp == NULL) || ((fp->f_flag & FWRITE) == 0)) 1719 return EBADF; 1720 if (iocb.aio_offset == -1LL) 1721 return EINVAL; 1722 1723 aiov.iov_base = (void *) iocb.aio_buf; 1724 aiov.iov_len = iocb.aio_nbytes; 1725 auio.uio_iov = &aiov; 1726 auio.uio_iovcnt = 1; 1727 auio.uio_offset = iocb.aio_offset; 1728 1729 auio.uio_resid = iocb.aio_nbytes; 1730 if (auio.uio_resid < 0) 1731 return (EINVAL); 1732 1733 auio.uio_rw = UIO_WRITE; 1734 auio.uio_segflg = UIO_USERSPACE; 1735 auio.uio_procp = p; 1736 1737 cnt = iocb.aio_nbytes; 1738 error = (*fp->f_ops->fo_write)(fp, &auio, fp->f_cred, FOF_OFFSET); 1739 if (error) { 1740 if (auio.uio_resid != cnt) { 1741 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 1742 error = 0; 1743 if (error == EPIPE) 1744 psignal(p, SIGPIPE); 1745 } 1746 } 1747 cnt -= auio.uio_resid; 1748 p->p_retval[0] = cnt; 1749 return error; 1750 } 1751 1752 int 1753 lio_listio(struct proc *p, struct lio_listio_args *uap) 1754 { 1755 int nent, nentqueued; 1756 struct aiocb *iocb, * const *cbptr; 1757 struct aiocblist *cb; 1758 struct kaioinfo *ki; 1759 struct aio_liojob *lj; 1760 int error, runningcode; 1761 int nerror; 1762 int i; 1763 int s; 1764 1765 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) { 1766 return EINVAL; 1767 } 1768 1769 nent = uap->nent; 1770 if (nent > AIO_LISTIO_MAX) { 1771 return EINVAL; 1772 } 1773 1774 if (p->p_aioinfo == NULL) { 1775 aio_init_aioinfo(p); 1776 } 1777 1778 if ((nent + num_queue_count) > max_queue_count) { 1779 return EAGAIN; 1780 } 1781 1782 ki = p->p_aioinfo; 1783 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) { 1784 return EAGAIN; 1785 } 1786 1787 lj = zalloc(aiolio_zone); 1788 if (!lj) { 1789 return EAGAIN; 1790 } 1791 1792 lj->lioj_flags = 0; 1793 lj->lioj_buffer_count = 0; 1794 lj->lioj_buffer_finished_count = 0; 1795 lj->lioj_queue_count = 0; 1796 lj->lioj_queue_finished_count = 0; 1797 lj->lioj_ki = ki; 1798 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 1799 1800 /* 1801 * Setup signal 1802 */ 1803 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 1804 error = copyin(uap->sig, &lj->lioj_signal, sizeof lj->lioj_signal); 1805 if (error) 1806 return error; 1807 lj->lioj_flags |= LIOJ_SIGNAL; 1808 lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED; 1809 } else { 1810 lj->lioj_flags &= ~LIOJ_SIGNAL; 1811 } 1812 1813 /* 1814 * get pointers to the list of I/O requests 1815 */ 1816 1817 nerror = 0; 1818 nentqueued = 0; 1819 cbptr = uap->acb_list; 1820 for(i = 0; i < uap->nent; i++) { 1821 iocb = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1822 if (((intptr_t) iocb != -1) && ((intptr_t) iocb != NULL)) { 1823 error = _aio_aqueue(p, iocb, lj, 0); 1824 if (error == 0) { 1825 nentqueued++; 1826 } else { 1827 nerror++; 1828 } 1829 } 1830 } 1831 1832 /* 1833 * If we haven't queued any, then just return error 1834 */ 1835 if (nentqueued == 0) { 1836 return 0; 1837 } 1838 1839 /* 1840 * Calculate the appropriate error return 1841 */ 1842 runningcode = 0; 1843 if (nerror) 1844 runningcode = EIO; 1845 1846 if (uap->mode == LIO_WAIT) { 1847 while (1) { 1848 int found; 1849 found = 0; 1850 for(i = 0; i < uap->nent; i++) { 1851 int jobref, command; 1852 1853 /* 1854 * Fetch address of the control buf pointer in user space 1855 */ 1856 iocb = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1857 if (((intptr_t) iocb == -1) || ((intptr_t) iocb == 0)) 1858 continue; 1859 1860 /* 1861 * Fetch the associated command from user space 1862 */ 1863 command = fuword(&iocb->aio_lio_opcode); 1864 if (command == LIO_NOP) { 1865 found++; 1866 continue; 1867 } 1868 1869 jobref = fuword(&iocb->_aiocb_private.kernelinfo); 1870 1871 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1872 cb; 1873 cb = TAILQ_NEXT(cb, plist)) { 1874 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1875 jobref) { 1876 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1877 curproc->p_stats->p_ru.ru_oublock += 1878 cb->outputcharge; 1879 cb->outputcharge = 0; 1880 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1881 curproc->p_stats->p_ru.ru_inblock += 1882 cb->inputcharge; 1883 cb->inputcharge = 0; 1884 } 1885 found++; 1886 break; 1887 } 1888 } 1889 1890 s = splbio(); 1891 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1892 cb; 1893 cb = TAILQ_NEXT(cb, plist)) { 1894 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1895 jobref) { 1896 found++; 1897 break; 1898 } 1899 } 1900 splx(s); 1901 1902 } 1903 1904 /* 1905 * If all I/Os have been disposed of, then we can return 1906 */ 1907 if (found == nentqueued) { 1908 return runningcode; 1909 } 1910 1911 ki->kaio_flags |= KAIO_WAKEUP; 1912 error = tsleep(p, PRIBIO|PCATCH, "aiospn", 0); 1913 1914 if (error == EINTR) { 1915 return EINTR; 1916 } else if (error == EWOULDBLOCK) { 1917 return EAGAIN; 1918 } 1919 1920 } 1921 } 1922 1923 return runningcode; 1924 } 1925 1926 /* 1927 * This is a wierd hack so that we can post a signal. It is safe 1928 * to do so from a timeout routine, but *not* from an interrupt routine. 1929 */ 1930 static void 1931 process_signal(void *ljarg) 1932 { 1933 struct aio_liojob *lj = ljarg; 1934 if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) { 1935 if (lj->lioj_queue_count == lj->lioj_queue_finished_count) { 1936 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 1937 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 1938 } 1939 } 1940 } 1941 1942 /* 1943 * Interrupt handler for physio, performs the necessary process wakeups, 1944 * and signals. 1945 */ 1946 static void 1947 aio_physwakeup(bp) 1948 struct buf *bp; 1949 { 1950 struct aiocblist *aiocbe; 1951 struct proc *p; 1952 struct kaioinfo *ki; 1953 struct aio_liojob *lj; 1954 int s; 1955 s = splbio(); 1956 1957 wakeup((caddr_t) bp); 1958 bp->b_flags &= ~B_CALL; 1959 bp->b_flags |= B_DONE; 1960 1961 aiocbe = (struct aiocblist *)bp->b_spc; 1962 if (aiocbe) { 1963 p = bp->b_caller1; 1964 1965 aiocbe->jobstate = JOBST_JOBBFINISHED; 1966 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 1967 aiocbe->uaiocb._aiocb_private.error = 0; 1968 aiocbe->jobflags |= AIOCBLIST_DONE; 1969 1970 if (bp->b_flags & B_ERROR) { 1971 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1972 } 1973 1974 lj = aiocbe->lio; 1975 if (lj) { 1976 lj->lioj_buffer_finished_count++; 1977 /* 1978 * wakeup/signal if all of the interrupt jobs are done 1979 */ 1980 if (lj->lioj_buffer_finished_count == lj->lioj_buffer_count) { 1981 /* 1982 * post a signal if it is called for 1983 */ 1984 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 1985 LIOJ_SIGNAL) { 1986 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 1987 timeout(process_signal, lj, 0); 1988 } 1989 } 1990 } 1991 1992 ki = p->p_aioinfo; 1993 if (ki) { 1994 ki->kaio_buffer_finished_count++; 1995 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1996 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1997 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1998 /* 1999 * and do the wakeup 2000 */ 2001 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 2002 ki->kaio_flags &= ~KAIO_WAKEUP; 2003 wakeup(p); 2004 } 2005 } 2006 } 2007 splx(s); 2008 } 2009