1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 * 16 * $FreeBSD$ 17 */ 18 19 /* 20 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/sysproto.h> 27 #include <sys/filedesc.h> 28 #include <sys/kernel.h> 29 #include <sys/fcntl.h> 30 #include <sys/file.h> 31 #include <sys/lock.h> 32 #include <sys/unistd.h> 33 #include <sys/proc.h> 34 #include <sys/resourcevar.h> 35 #include <sys/signalvar.h> 36 #include <sys/sysctl.h> 37 #include <sys/vnode.h> 38 #include <sys/conf.h> 39 40 #include <vm/vm.h> 41 #include <vm/vm_param.h> 42 #include <vm/vm_extern.h> 43 #include <vm/pmap.h> 44 #include <vm/vm_map.h> 45 #include <vm/vm_zone.h> 46 #include <sys/aio.h> 47 #include <sys/shm.h> 48 49 #include <machine/cpu.h> 50 #include <machine/limits.h> 51 52 static long jobrefid; 53 54 #define JOBST_NULL 0x0 55 #define JOBST_JOBQPROC 0x1 56 #define JOBST_JOBQGLOBAL 0x2 57 #define JOBST_JOBRUNNING 0x3 58 #define JOBST_JOBFINISHED 0x4 59 #define JOBST_JOBQBUF 0x5 60 #define JOBST_JOBBFINISHED 0x6 61 62 #ifndef MAX_AIO_PER_PROC 63 #define MAX_AIO_PER_PROC 32 64 #endif 65 66 #ifndef MAX_AIO_QUEUE_PER_PROC 67 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 68 #endif 69 70 #ifndef MAX_AIO_PROCS 71 #define MAX_AIO_PROCS 32 72 #endif 73 74 #ifndef MAX_AIO_QUEUE 75 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 76 #endif 77 78 #ifndef TARGET_AIO_PROCS 79 #define TARGET_AIO_PROCS 0 80 #endif 81 82 #ifndef MAX_BUF_AIO 83 #define MAX_BUF_AIO 16 84 #endif 85 86 #ifndef AIOD_TIMEOUT_DEFAULT 87 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 88 #endif 89 90 #ifndef AIOD_LIFETIME_DEFAULT 91 #define AIOD_LIFETIME_DEFAULT (30 * hz) 92 #endif 93 94 static int max_aio_procs = MAX_AIO_PROCS; 95 static int num_aio_procs = 0; 96 static int target_aio_procs = TARGET_AIO_PROCS; 97 static int max_queue_count = MAX_AIO_QUEUE; 98 static int num_queue_count = 0; 99 static int num_buf_aio = 0; 100 static int num_aio_resv_start = 0; 101 static int aiod_timeout; 102 static int aiod_lifetime; 103 104 static int max_aio_per_proc = MAX_AIO_PER_PROC, 105 max_aio_queue_per_proc=MAX_AIO_QUEUE_PER_PROC; 106 107 static int max_buf_aio = MAX_BUF_AIO; 108 109 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "AIO mgmt"); 110 111 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, 112 CTLFLAG_RW, &max_aio_per_proc, 0, ""); 113 114 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, 115 CTLFLAG_RW, &max_aio_queue_per_proc, 0, ""); 116 117 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 118 CTLFLAG_RW, &max_aio_procs, 0, ""); 119 120 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 121 CTLFLAG_RD, &num_aio_procs, 0, ""); 122 123 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, 124 CTLFLAG_RD, &num_queue_count, 0, ""); 125 126 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, 127 CTLFLAG_RW, &max_queue_count, 0, ""); 128 129 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, 130 CTLFLAG_RW, &target_aio_procs, 0, ""); 131 132 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, 133 CTLFLAG_RW, &max_buf_aio, 0, ""); 134 135 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, 136 CTLFLAG_RD, &num_buf_aio, 0, ""); 137 138 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, 139 CTLFLAG_RW, &aiod_lifetime, 0, ""); 140 141 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, 142 CTLFLAG_RW, &aiod_timeout, 0, ""); 143 144 145 /* 146 * Job queue item 147 */ 148 149 #define AIOCBLIST_CANCELLED 0x1 150 #define AIOCBLIST_RUNDOWN 0x4 151 #define AIOCBLIST_ASYNCFREE 0x8 152 #define AIOCBLIST_DONE 0x10 153 154 struct aiocblist { 155 TAILQ_ENTRY (aiocblist) list; /* List of jobs */ 156 TAILQ_ENTRY (aiocblist) plist; /* List of jobs for proc */ 157 int jobflags; 158 int jobstate; 159 int inputcharge, outputcharge; 160 struct buf *bp; /* buffer pointer */ 161 struct proc *userproc; /* User process */ 162 struct aioproclist *jobaioproc; /* AIO process descriptor */ 163 struct aio_liojob *lio; /* optional lio job */ 164 struct aiocb *uuaiocb; /* pointer in userspace of aiocb */ 165 struct aiocb uaiocb; /* Kernel I/O control block */ 166 }; 167 168 169 /* 170 * AIO process info 171 */ 172 #define AIOP_FREE 0x1 /* proc on free queue */ 173 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 174 175 struct aioproclist { 176 int aioprocflags; /* AIO proc flags */ 177 TAILQ_ENTRY(aioproclist) list; /* List of processes */ 178 struct proc *aioproc; /* The AIO thread */ 179 TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */ 180 }; 181 182 /* 183 * data-structure for lio signal management 184 */ 185 struct aio_liojob { 186 int lioj_flags; 187 int lioj_buffer_count; 188 int lioj_buffer_finished_count; 189 int lioj_queue_count; 190 int lioj_queue_finished_count; 191 struct sigevent lioj_signal; /* signal on all I/O done */ 192 TAILQ_ENTRY (aio_liojob) lioj_list; 193 struct kaioinfo *lioj_ki; 194 }; 195 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 196 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 197 198 /* 199 * per process aio data structure 200 */ 201 struct kaioinfo { 202 int kaio_flags; /* per process kaio flags */ 203 int kaio_maxactive_count; /* maximum number of AIOs */ 204 int kaio_active_count; /* number of currently used AIOs */ 205 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 206 int kaio_queue_count; /* size of AIO queue */ 207 int kaio_ballowed_count; /* maximum number of buffers */ 208 int kaio_queue_finished_count; /* number of daemon jobs finished */ 209 int kaio_buffer_count; /* number of physio buffers */ 210 int kaio_buffer_finished_count; /* count of I/O done */ 211 struct proc *kaio_p; /* process that uses this kaio block */ 212 TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 213 TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */ 214 TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */ 215 TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 216 TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */ 217 }; 218 219 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 220 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant 221 event */ 222 223 224 static TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc; 225 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 226 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 227 static TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */ 228 229 static void aio_init_aioinfo(struct proc *p) ; 230 static void aio_onceonly(void *) ; 231 static int aio_free_entry(struct aiocblist *aiocbe); 232 static void aio_process(struct aiocblist *aiocbe); 233 static int aio_newproc(void) ; 234 static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ; 235 static void aio_physwakeup(struct buf *bp); 236 static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type); 237 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 238 static void aio_daemon(void *uproc); 239 240 SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL); 241 242 static vm_zone_t kaio_zone=0, aiop_zone=0, 243 aiocb_zone=0, aiol_zone=0, aiolio_zone=0; 244 245 /* 246 * Startup initialization 247 */ 248 void 249 aio_onceonly(void *na) 250 { 251 TAILQ_INIT(&aio_freeproc); 252 TAILQ_INIT(&aio_activeproc); 253 TAILQ_INIT(&aio_jobs); 254 TAILQ_INIT(&aio_bufjobs); 255 TAILQ_INIT(&aio_freejobs); 256 kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1); 257 aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1); 258 aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1); 259 aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1); 260 aiolio_zone = zinit("AIOLIO", 261 AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1); 262 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 263 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 264 jobrefid = 1; 265 } 266 267 /* 268 * Init the per-process aioinfo structure. 269 * The aioinfo limits are set per-process for user limit (resource) management. 270 */ 271 void 272 aio_init_aioinfo(struct proc *p) 273 { 274 struct kaioinfo *ki; 275 if (p->p_aioinfo == NULL) { 276 ki = zalloc(kaio_zone); 277 p->p_aioinfo = ki; 278 ki->kaio_flags = 0; 279 ki->kaio_maxactive_count = max_aio_per_proc; 280 ki->kaio_active_count = 0; 281 ki->kaio_qallowed_count = max_aio_queue_per_proc; 282 ki->kaio_queue_count = 0; 283 ki->kaio_ballowed_count = max_buf_aio; 284 ki->kaio_buffer_count = 0; 285 ki->kaio_buffer_finished_count = 0; 286 ki->kaio_p = p; 287 TAILQ_INIT(&ki->kaio_jobdone); 288 TAILQ_INIT(&ki->kaio_jobqueue); 289 TAILQ_INIT(&ki->kaio_bufdone); 290 TAILQ_INIT(&ki->kaio_bufqueue); 291 TAILQ_INIT(&ki->kaio_liojoblist); 292 } 293 } 294 295 /* 296 * Free a job entry. Wait for completion if it is currently 297 * active, but don't delay forever. If we delay, we return 298 * a flag that says that we have to restart the queue scan. 299 */ 300 int 301 aio_free_entry(struct aiocblist *aiocbe) 302 { 303 struct kaioinfo *ki; 304 struct aioproclist *aiop; 305 struct aio_liojob *lj; 306 struct proc *p; 307 int error; 308 int s; 309 310 if (aiocbe->jobstate == JOBST_NULL) 311 panic("aio_free_entry: freeing already free job"); 312 313 p = aiocbe->userproc; 314 ki = p->p_aioinfo; 315 lj = aiocbe->lio; 316 if (ki == NULL) 317 panic("aio_free_entry: missing p->p_aioinfo"); 318 319 if (aiocbe->jobstate == JOBST_JOBRUNNING) { 320 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) 321 return 0; 322 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 323 tsleep(aiocbe, PRIBIO|PCATCH, "jobwai", 0); 324 } 325 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 326 327 if (aiocbe->bp == NULL) { 328 if (ki->kaio_queue_count <= 0) 329 panic("aio_free_entry: process queue size <= 0"); 330 if (num_queue_count <= 0) 331 panic("aio_free_entry: system wide queue size <= 0"); 332 333 if(lj) { 334 lj->lioj_queue_count--; 335 if (aiocbe->jobflags & AIOCBLIST_DONE) 336 lj->lioj_queue_finished_count--; 337 } 338 ki->kaio_queue_count--; 339 if (aiocbe->jobflags & AIOCBLIST_DONE) 340 ki->kaio_queue_finished_count--; 341 num_queue_count--; 342 343 } else { 344 if(lj) { 345 lj->lioj_buffer_count--; 346 if (aiocbe->jobflags & AIOCBLIST_DONE) 347 lj->lioj_buffer_finished_count--; 348 } 349 if (aiocbe->jobflags & AIOCBLIST_DONE) 350 ki->kaio_buffer_finished_count--; 351 ki->kaio_buffer_count--; 352 num_buf_aio--; 353 354 } 355 356 if ((ki->kaio_flags & KAIO_WAKEUP) || 357 ((ki->kaio_flags & KAIO_RUNDOWN) && 358 ((ki->kaio_buffer_count == 0) && 359 (ki->kaio_queue_count == 0)))) { 360 ki->kaio_flags &= ~KAIO_WAKEUP; 361 wakeup(p); 362 } 363 364 if ( aiocbe->jobstate == JOBST_JOBQBUF) { 365 if ((error = aio_fphysio(p, aiocbe, 1)) != 0) 366 return error; 367 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 368 panic("aio_free_entry: invalid physio finish-up state"); 369 s = splbio(); 370 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 371 splx(s); 372 } else if ( aiocbe->jobstate == JOBST_JOBQPROC) { 373 aiop = aiocbe->jobaioproc; 374 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 375 } else if ( aiocbe->jobstate == JOBST_JOBQGLOBAL) { 376 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 377 } else if ( aiocbe->jobstate == JOBST_JOBFINISHED) { 378 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 379 } else if ( aiocbe->jobstate == JOBST_JOBBFINISHED) { 380 s = splbio(); 381 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 382 splx(s); 383 if (aiocbe->bp) { 384 vunmapbuf(aiocbe->bp); 385 relpbuf(aiocbe->bp, NULL); 386 aiocbe->bp = NULL; 387 } 388 } 389 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 390 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 391 zfree(aiolio_zone, lj); 392 } 393 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 394 aiocbe->jobstate = JOBST_NULL; 395 return 0; 396 } 397 398 /* 399 * Rundown the jobs for a given process. 400 */ 401 void 402 aio_proc_rundown(struct proc *p) 403 { 404 int s; 405 struct kaioinfo *ki; 406 struct aio_liojob *lj, *ljn; 407 struct aiocblist *aiocbe, *aiocbn; 408 409 ki = p->p_aioinfo; 410 if (ki == NULL) 411 return; 412 413 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 414 while ((ki->kaio_active_count > 0) || 415 (ki->kaio_buffer_count > ki->kaio_buffer_finished_count)) { 416 ki->kaio_flags |= KAIO_RUNDOWN; 417 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout)) 418 break; 419 } 420 421 restart1: 422 for ( aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); 423 aiocbe; 424 aiocbe = aiocbn) { 425 aiocbn = TAILQ_NEXT(aiocbe, plist); 426 if (aio_free_entry(aiocbe)) 427 goto restart1; 428 } 429 430 restart2: 431 for ( aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); 432 aiocbe; 433 aiocbe = aiocbn) { 434 aiocbn = TAILQ_NEXT(aiocbe, plist); 435 if (aio_free_entry(aiocbe)) 436 goto restart2; 437 } 438 439 /* 440 * Note the use of lots of splbio here, trying to avoid 441 * splbio for long chains of I/O. Probably unnecessary. 442 */ 443 444 restart3: 445 s = splbio(); 446 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 447 ki->kaio_flags |= KAIO_WAKEUP; 448 tsleep (p, PRIBIO, "aioprn", 0); 449 splx(s); 450 goto restart3; 451 } 452 splx(s); 453 454 restart4: 455 s = splbio(); 456 for ( aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); 457 aiocbe; 458 aiocbe = aiocbn) { 459 aiocbn = TAILQ_NEXT(aiocbe, plist); 460 if (aio_free_entry(aiocbe)) { 461 splx(s); 462 goto restart4; 463 } 464 } 465 splx(s); 466 467 for ( lj = TAILQ_FIRST(&ki->kaio_liojoblist); 468 lj; 469 lj = ljn) { 470 ljn = TAILQ_NEXT(lj, lioj_list); 471 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 472 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 473 zfree(aiolio_zone, lj); 474 } else { 475 #if defined(DIAGNOSTIC) 476 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, QF:%d\n", 477 lj->lioj_buffer_count, lj->lioj_buffer_finished_count, 478 lj->lioj_queue_count, lj->lioj_queue_finished_count); 479 #endif 480 } 481 } 482 483 zfree(kaio_zone, ki); 484 p->p_aioinfo = NULL; 485 } 486 487 /* 488 * Select a job to run (called by an AIO daemon) 489 */ 490 static struct aiocblist * 491 aio_selectjob(struct aioproclist *aiop) 492 { 493 494 struct aiocblist *aiocbe; 495 496 aiocbe = TAILQ_FIRST(&aiop->jobtorun); 497 if (aiocbe) { 498 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 499 return aiocbe; 500 } 501 502 for (aiocbe = TAILQ_FIRST(&aio_jobs); 503 aiocbe; 504 aiocbe = TAILQ_NEXT(aiocbe, list)) { 505 struct kaioinfo *ki; 506 struct proc *userp; 507 508 userp = aiocbe->userproc; 509 ki = userp->p_aioinfo; 510 511 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 512 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 513 return aiocbe; 514 } 515 } 516 517 return NULL; 518 } 519 520 /* 521 * The AIO processing activity. This is the code that does the 522 * I/O request for the non-physio version of the operations. The 523 * normal vn operations are used, and this code should work in 524 * all instances for every type of file, including pipes, sockets, 525 * fifos, and regular files. 526 */ 527 void 528 aio_process(struct aiocblist *aiocbe) 529 { 530 struct filedesc *fdp; 531 struct proc *userp, *mycp; 532 struct aiocb *cb; 533 struct file *fp; 534 struct uio auio; 535 struct iovec aiov; 536 unsigned int fd; 537 int cnt; 538 int error; 539 off_t offset; 540 int oublock_st, oublock_end; 541 int inblock_st, inblock_end; 542 543 userp = aiocbe->userproc; 544 cb = &aiocbe->uaiocb; 545 546 mycp = curproc; 547 548 fdp = mycp->p_fd; 549 fd = cb->aio_fildes; 550 fp = fdp->fd_ofiles[fd]; 551 552 aiov.iov_base = (void *) cb->aio_buf; 553 aiov.iov_len = cb->aio_nbytes; 554 555 auio.uio_iov = &aiov; 556 auio.uio_iovcnt = 1; 557 auio.uio_offset = offset = cb->aio_offset; 558 auio.uio_resid = cb->aio_nbytes; 559 cnt = cb->aio_nbytes; 560 auio.uio_segflg = UIO_USERSPACE; 561 auio.uio_procp = mycp; 562 563 inblock_st = mycp->p_stats->p_ru.ru_inblock; 564 oublock_st = mycp->p_stats->p_ru.ru_oublock; 565 if (cb->aio_lio_opcode == LIO_READ) { 566 auio.uio_rw = UIO_READ; 567 error = (*fp->f_ops->fo_read)(fp, &auio, fp->f_cred, FOF_OFFSET); 568 } else { 569 auio.uio_rw = UIO_WRITE; 570 error = (*fp->f_ops->fo_write)(fp, &auio, fp->f_cred, FOF_OFFSET); 571 } 572 inblock_end = mycp->p_stats->p_ru.ru_inblock; 573 oublock_end = mycp->p_stats->p_ru.ru_oublock; 574 575 aiocbe->inputcharge = inblock_end - inblock_st; 576 aiocbe->outputcharge = oublock_end - oublock_st; 577 578 if (error) { 579 if (auio.uio_resid != cnt) { 580 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 581 error = 0; 582 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) 583 psignal(userp, SIGPIPE); 584 } 585 } 586 587 cnt -= auio.uio_resid; 588 cb->_aiocb_private.error = error; 589 cb->_aiocb_private.status = cnt; 590 591 return; 592 593 } 594 595 /* 596 * The AIO daemon, most of the actual work is done in aio_process, 597 * but the setup (and address space mgmt) is done in this routine. 598 */ 599 static void 600 aio_daemon(void *uproc) 601 { 602 int s; 603 struct aioproclist *aiop; 604 struct vmspace *myvm; 605 struct proc *mycp; 606 607 /* 608 * Local copies of curproc (cp) and vmspace (myvm) 609 */ 610 mycp = curproc; 611 myvm = mycp->p_vmspace; 612 613 if (mycp->p_textvp) { 614 vrele(mycp->p_textvp); 615 mycp->p_textvp = NULL; 616 } 617 618 /* 619 * Allocate and ready the aio control info. There is one 620 * aiop structure per daemon. 621 */ 622 aiop = zalloc(aiop_zone); 623 aiop->aioproc = mycp; 624 aiop->aioprocflags |= AIOP_FREE; 625 TAILQ_INIT(&aiop->jobtorun); 626 627 /* 628 * Place thread (lightweight process) onto the AIO free thread list 629 */ 630 if (TAILQ_EMPTY(&aio_freeproc)) 631 wakeup(&aio_freeproc); 632 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 633 634 /* 635 * Make up a name for the daemon 636 */ 637 strcpy(mycp->p_comm, "aiod"); 638 639 /* 640 * Get rid of our current filedescriptors. AIOD's don't need any 641 * filedescriptors, except as temporarily inherited from the client. 642 * Credentials are also cloned, and made equivalent to "root." 643 */ 644 fdfree(mycp); 645 mycp->p_fd = NULL; 646 mycp->p_ucred = crcopy(mycp->p_ucred); 647 mycp->p_ucred->cr_uid = 0; 648 mycp->p_ucred->cr_ngroups = 1; 649 mycp->p_ucred->cr_groups[0] = 1; 650 651 /* 652 * The daemon resides in its own pgrp. 653 */ 654 enterpgrp(mycp, mycp->p_pid, 1); 655 656 /* 657 * Mark special process type 658 */ 659 mycp->p_flag |= P_SYSTEM|P_KTHREADP; 660 661 /* 662 * Wakeup parent process. (Parent sleeps to keep from blasting away 663 * creating to many daemons.) 664 */ 665 wakeup(mycp); 666 667 while(1) { 668 struct proc *curcp; 669 struct aiocblist *aiocbe; 670 671 /* 672 * curcp is the current daemon process context. 673 * userp is the current user process context. 674 */ 675 curcp = mycp; 676 677 /* 678 * Take daemon off of free queue 679 */ 680 if (aiop->aioprocflags & AIOP_FREE) { 681 TAILQ_REMOVE(&aio_freeproc, aiop, list); 682 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 683 aiop->aioprocflags &= ~AIOP_FREE; 684 } 685 aiop->aioprocflags &= ~AIOP_SCHED; 686 687 /* 688 * Check for jobs 689 */ 690 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 691 struct proc *userp; 692 struct aiocb *cb; 693 struct kaioinfo *ki; 694 struct aio_liojob *lj; 695 696 cb = &aiocbe->uaiocb; 697 userp = aiocbe->userproc; 698 699 aiocbe->jobstate = JOBST_JOBRUNNING; 700 701 /* 702 * Connect to process address space for user program 703 */ 704 if (userp != curcp) { 705 struct vmspace *tmpvm; 706 /* 707 * Save the current address space that we are connected to. 708 */ 709 tmpvm = mycp->p_vmspace; 710 /* 711 * Point to the new user address space, and refer to it. 712 */ 713 mycp->p_vmspace = userp->p_vmspace; 714 mycp->p_vmspace->vm_refcnt++; 715 /* 716 * Activate the new mapping. 717 */ 718 pmap_activate(mycp); 719 /* 720 * If the old address space wasn't the daemons own address 721 * space, then we need to remove the daemon's reference from 722 * the other process that it was acting on behalf of. 723 */ 724 if (tmpvm != myvm) { 725 vmspace_free(tmpvm); 726 } 727 /* 728 * Disassociate from previous clients file descriptors, and 729 * associate to the new clients descriptors. Note that 730 * the daemon doesn't need to worry about its orginal 731 * descriptors, because they were originally freed. 732 */ 733 if (mycp->p_fd) 734 fdfree(mycp); 735 mycp->p_fd = fdshare(userp); 736 curcp = userp; 737 } 738 739 ki = userp->p_aioinfo; 740 lj = aiocbe->lio; 741 742 /* 743 * Account for currently active jobs 744 */ 745 ki->kaio_active_count++; 746 747 /* 748 * Do the I/O function 749 */ 750 aiocbe->jobaioproc = aiop; 751 aio_process(aiocbe); 752 753 /* 754 * decrement the active job count 755 */ 756 ki->kaio_active_count--; 757 758 /* 759 * increment the completion count for wakeup/signal comparisons 760 */ 761 aiocbe->jobflags |= AIOCBLIST_DONE; 762 ki->kaio_queue_finished_count++; 763 if (lj) { 764 lj->lioj_queue_finished_count++; 765 } 766 if ((ki->kaio_flags & KAIO_WAKEUP) || 767 ((ki->kaio_flags & KAIO_RUNDOWN) && 768 (ki->kaio_active_count == 0))) { 769 ki->kaio_flags &= ~KAIO_WAKEUP; 770 wakeup(userp); 771 } 772 773 s = splbio(); 774 if (lj && (lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 775 LIOJ_SIGNAL) { 776 if ((lj->lioj_queue_finished_count == lj->lioj_queue_count) && 777 (lj->lioj_buffer_finished_count == lj->lioj_buffer_count)) { 778 psignal(userp, lj->lioj_signal.sigev_signo); 779 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 780 } 781 } 782 splx(s); 783 784 aiocbe->jobstate = JOBST_JOBFINISHED; 785 786 /* 787 * If the I/O request should be automatically rundown, do the 788 * needed cleanup. Otherwise, place the queue entry for 789 * the just finished I/O request into the done queue for the 790 * associated client. 791 */ 792 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) { 793 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 794 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 795 } else { 796 TAILQ_REMOVE(&ki->kaio_jobqueue, 797 aiocbe, plist); 798 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, 799 aiocbe, plist); 800 } 801 802 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 803 wakeup(aiocbe); 804 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 805 } 806 807 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 808 psignal(userp, cb->aio_sigevent.sigev_signo); 809 } 810 } 811 812 /* 813 * Disconnect from user address space 814 */ 815 if (curcp != mycp) { 816 struct vmspace *tmpvm; 817 /* 818 * Get the user address space to disconnect from. 819 */ 820 tmpvm = mycp->p_vmspace; 821 /* 822 * Get original address space for daemon. 823 */ 824 mycp->p_vmspace = myvm; 825 /* 826 * Activate the daemon's address space. 827 */ 828 pmap_activate(mycp); 829 #if defined(DIAGNOSTIC) 830 if (tmpvm == myvm) 831 printf("AIOD: vmspace problem -- %d\n", mycp->p_pid); 832 #endif 833 /* 834 * remove our vmspace reference. 835 */ 836 vmspace_free(tmpvm); 837 /* 838 * disassociate from the user process's file descriptors. 839 */ 840 if (mycp->p_fd) 841 fdfree(mycp); 842 mycp->p_fd = NULL; 843 curcp = mycp; 844 } 845 846 /* 847 * If we are the first to be put onto the free queue, wakeup 848 * anyone waiting for a daemon. 849 */ 850 TAILQ_REMOVE(&aio_activeproc, aiop, list); 851 if (TAILQ_EMPTY(&aio_freeproc)) 852 wakeup(&aio_freeproc); 853 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 854 aiop->aioprocflags |= AIOP_FREE; 855 856 /* 857 * If daemon is inactive for a long time, allow it to exit, thereby 858 * freeing resources. 859 */ 860 if (((aiop->aioprocflags & AIOP_SCHED) == 0) && 861 tsleep(mycp, PRIBIO, "aiordy", aiod_lifetime)) { 862 if ((TAILQ_FIRST(&aio_jobs) == NULL) && 863 (TAILQ_FIRST(&aiop->jobtorun) == NULL)) { 864 if ((aiop->aioprocflags & AIOP_FREE) && 865 (num_aio_procs > target_aio_procs)) { 866 TAILQ_REMOVE(&aio_freeproc, aiop, list); 867 zfree(aiop_zone, aiop); 868 num_aio_procs--; 869 #if defined(DIAGNOSTIC) 870 if (mycp->p_vmspace->vm_refcnt <= 1) 871 printf("AIOD: bad vm refcnt for exiting daemon: %d\n", 872 mycp->p_vmspace->vm_refcnt); 873 #endif 874 exit1(mycp, 0); 875 } 876 } 877 } 878 } 879 } 880 881 /* 882 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. 883 * The AIO daemon modifies its environment itself. 884 */ 885 static int 886 aio_newproc() 887 { 888 int error; 889 struct proc *p, *np; 890 891 p = &proc0; 892 error = fork1(p, RFPROC|RFMEM|RFNOWAIT, &np); 893 if (error) 894 return error; 895 cpu_set_fork_handler(np, aio_daemon, curproc); 896 897 /* 898 * Wait until daemon is started, but continue on just in case (to 899 * handle error conditions. 900 */ 901 error = tsleep(np, PZERO, "aiosta", aiod_timeout); 902 num_aio_procs++; 903 904 return error; 905 906 } 907 908 /* 909 * Try the high-performance physio method for eligible VCHR devices. This 910 * routine doesn't require the use of any additional threads, and have 911 * overhead. 912 */ 913 int 914 aio_qphysio(p, aiocbe) 915 struct proc *p; 916 struct aiocblist *aiocbe; 917 { 918 int error; 919 struct aiocb *cb; 920 struct file *fp; 921 struct buf *bp; 922 int bflags; 923 struct vnode *vp; 924 struct kaioinfo *ki; 925 struct filedesc *fdp; 926 struct aio_liojob *lj; 927 int fd; 928 int s; 929 int cnt; 930 int rw; 931 struct cdevsw *cdev; 932 933 cb = &aiocbe->uaiocb; 934 fdp = p->p_fd; 935 fd = cb->aio_fildes; 936 fp = fdp->fd_ofiles[fd]; 937 938 if (fp->f_type != DTYPE_VNODE) { 939 return -1; 940 } 941 942 vp = (struct vnode *)fp->f_data; 943 if (vp->v_type != VCHR || ((cb->aio_nbytes & (DEV_BSIZE - 1)) != 0)) { 944 return -1; 945 } 946 947 if ((cb->aio_nbytes > MAXPHYS) && (num_buf_aio >= max_buf_aio)) { 948 return -1; 949 } 950 951 if ((vp->v_rdev == NULL) || (vp->v_flag & VISTTY)) { 952 return -1; 953 } 954 955 if (vp->v_rdev == NODEV) { 956 return -1; 957 } 958 959 cdev = devsw(vp->v_rdev); 960 if (cdev == NULL) { 961 return -1; 962 } 963 964 if (cdev->d_bmaj == -1) { 965 return -1; 966 } 967 968 ki = p->p_aioinfo; 969 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) { 970 return -1; 971 } 972 973 cnt = cb->aio_nbytes; 974 if (cnt > MAXPHYS) { 975 return -1; 976 } 977 978 /* 979 * Physical I/O is charged directly to the process, so we don't have 980 * to fake it. 981 */ 982 aiocbe->inputcharge = 0; 983 aiocbe->outputcharge = 0; 984 985 ki->kaio_buffer_count++; 986 987 lj = aiocbe->lio; 988 if (lj) { 989 lj->lioj_buffer_count++; 990 } 991 992 /* create and build a buffer header for a transfer */ 993 bp = (struct buf *)getpbuf(NULL); 994 995 /* 996 * get a copy of the kva from the physical buffer 997 */ 998 bp->b_caller1 = p; 999 bp->b_dev = vp->v_rdev; 1000 error = bp->b_error = 0; 1001 1002 if (cb->aio_lio_opcode == LIO_WRITE) { 1003 rw = 0; 1004 bflags = B_WRITE; 1005 } else { 1006 rw = 1; 1007 bflags = B_READ; 1008 } 1009 1010 bp->b_bcount = cb->aio_nbytes; 1011 bp->b_bufsize = cb->aio_nbytes; 1012 bp->b_flags = B_PHYS | B_CALL | bflags; 1013 bp->b_iodone = aio_physwakeup; 1014 bp->b_saveaddr = bp->b_data; 1015 bp->b_data = (void *) cb->aio_buf; 1016 bp->b_blkno = btodb(cb->aio_offset); 1017 1018 if (rw && !useracc(bp->b_data, bp->b_bufsize, B_WRITE)) { 1019 error = EFAULT; 1020 goto doerror; 1021 } 1022 if (!rw && !useracc(bp->b_data, bp->b_bufsize, B_READ)) { 1023 error = EFAULT; 1024 goto doerror; 1025 } 1026 1027 /* bring buffer into kernel space */ 1028 vmapbuf(bp); 1029 1030 s = splbio(); 1031 aiocbe->bp = bp; 1032 bp->b_spc = (void *)aiocbe; 1033 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 1034 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 1035 aiocbe->jobstate = JOBST_JOBQBUF; 1036 cb->_aiocb_private.status = cb->aio_nbytes; 1037 num_buf_aio++; 1038 bp->b_error = 0; 1039 1040 splx(s); 1041 /* perform transfer */ 1042 BUF_STRATEGY(bp, 0); 1043 1044 s = splbio(); 1045 /* 1046 * If we had an error invoking the request, or an error in processing 1047 * the request before we have returned, we process it as an error 1048 * in transfer. Note that such an I/O error is not indicated immediately, 1049 * but is returned using the aio_error mechanism. In this case, aio_suspend 1050 * will return immediately. 1051 */ 1052 if (bp->b_error || (bp->b_flags & B_ERROR)) { 1053 struct aiocb *job = aiocbe->uuaiocb; 1054 1055 aiocbe->uaiocb._aiocb_private.status = 0; 1056 suword(&job->_aiocb_private.status, 0); 1057 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1058 suword(&job->_aiocb_private.error, bp->b_error); 1059 1060 ki->kaio_buffer_finished_count++; 1061 1062 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1063 aiocbe->jobstate = JOBST_JOBBFINISHED; 1064 aiocbe->jobflags |= AIOCBLIST_DONE; 1065 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1066 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1067 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1068 } 1069 } 1070 splx(s); 1071 return 0; 1072 1073 doerror: 1074 ki->kaio_buffer_count--; 1075 if (lj) { 1076 lj->lioj_buffer_count--; 1077 } 1078 aiocbe->bp = NULL; 1079 relpbuf(bp, NULL); 1080 return error; 1081 } 1082 1083 /* 1084 * This waits/tests physio completion. 1085 */ 1086 int 1087 aio_fphysio(p, iocb, flgwait) 1088 struct proc *p; 1089 struct aiocblist *iocb; 1090 int flgwait; 1091 { 1092 int s; 1093 struct buf *bp; 1094 int error; 1095 1096 bp = iocb->bp; 1097 1098 s = splbio(); 1099 if (flgwait == 0) { 1100 if ((bp->b_flags & B_DONE) == 0) { 1101 splx(s); 1102 return EINPROGRESS; 1103 } 1104 } 1105 1106 while ((bp->b_flags & B_DONE) == 0) { 1107 if (tsleep((caddr_t)bp, PRIBIO, "physstr", aiod_timeout)) { 1108 if ((bp->b_flags & B_DONE) == 0) { 1109 splx(s); 1110 return EINPROGRESS; 1111 } else { 1112 break; 1113 } 1114 } 1115 } 1116 1117 /* release mapping into kernel space */ 1118 vunmapbuf(bp); 1119 iocb->bp = 0; 1120 1121 error = 0; 1122 /* 1123 * check for an error 1124 */ 1125 if (bp->b_flags & B_ERROR) { 1126 error = bp->b_error; 1127 } 1128 1129 relpbuf(bp, NULL); 1130 return (error); 1131 } 1132 1133 /* 1134 * Queue a new AIO request. Choosing either the threaded or direct physio 1135 * VCHR technique is done in this code. 1136 */ 1137 static int 1138 _aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type) 1139 { 1140 struct filedesc *fdp; 1141 struct file *fp; 1142 unsigned int fd; 1143 1144 int error; 1145 int opcode; 1146 struct aiocblist *aiocbe; 1147 struct aioproclist *aiop; 1148 struct kaioinfo *ki; 1149 1150 if ((aiocbe = TAILQ_FIRST(&aio_freejobs)) != NULL) { 1151 TAILQ_REMOVE(&aio_freejobs, aiocbe, list); 1152 } else { 1153 aiocbe = zalloc (aiocb_zone); 1154 } 1155 1156 aiocbe->inputcharge = 0; 1157 aiocbe->outputcharge = 0; 1158 1159 suword(&job->_aiocb_private.status, -1); 1160 suword(&job->_aiocb_private.error, 0); 1161 suword(&job->_aiocb_private.kernelinfo, -1); 1162 1163 error = copyin((caddr_t)job, 1164 (caddr_t) &aiocbe->uaiocb, sizeof aiocbe->uaiocb); 1165 if (error) { 1166 suword(&job->_aiocb_private.error, error); 1167 1168 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1169 return error; 1170 } 1171 1172 /* 1173 * Save userspace address of the job info 1174 */ 1175 aiocbe->uuaiocb = job; 1176 1177 /* 1178 * Get the opcode 1179 */ 1180 if (type != LIO_NOP) { 1181 aiocbe->uaiocb.aio_lio_opcode = type; 1182 } 1183 opcode = aiocbe->uaiocb.aio_lio_opcode; 1184 1185 /* 1186 * Get the fd info for process 1187 */ 1188 fdp = p->p_fd; 1189 1190 /* 1191 * Range check file descriptor 1192 */ 1193 fd = aiocbe->uaiocb.aio_fildes; 1194 if (fd >= fdp->fd_nfiles) { 1195 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1196 if (type == 0) { 1197 suword(&job->_aiocb_private.error, EBADF); 1198 } 1199 return EBADF; 1200 } 1201 1202 fp = fdp->fd_ofiles[fd]; 1203 if ((fp == NULL) || 1204 ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 0))) { 1205 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1206 if (type == 0) { 1207 suword(&job->_aiocb_private.error, EBADF); 1208 } 1209 return EBADF; 1210 } 1211 1212 if (aiocbe->uaiocb.aio_offset == -1LL) { 1213 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1214 if (type == 0) { 1215 suword(&job->_aiocb_private.error, EINVAL); 1216 } 1217 return EINVAL; 1218 } 1219 1220 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1221 if (error) { 1222 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1223 if (type == 0) { 1224 suword(&job->_aiocb_private.error, EINVAL); 1225 } 1226 return error; 1227 } 1228 1229 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1230 if (jobrefid == LONG_MAX) 1231 jobrefid = 1; 1232 else 1233 jobrefid++; 1234 1235 if (opcode == LIO_NOP) { 1236 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1237 if (type == 0) { 1238 suword(&job->_aiocb_private.error, 0); 1239 suword(&job->_aiocb_private.status, 0); 1240 suword(&job->_aiocb_private.kernelinfo, 0); 1241 } 1242 return 0; 1243 } 1244 1245 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1246 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1247 if (type == 0) { 1248 suword(&job->_aiocb_private.status, 0); 1249 suword(&job->_aiocb_private.error, EINVAL); 1250 } 1251 return EINVAL; 1252 } 1253 1254 suword(&job->_aiocb_private.error, EINPROGRESS); 1255 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1256 aiocbe->userproc = p; 1257 aiocbe->jobflags = 0; 1258 aiocbe->lio = lj; 1259 ki = p->p_aioinfo; 1260 1261 if ((error = aio_qphysio(p, aiocbe)) == 0) { 1262 return 0; 1263 } else if (error > 0) { 1264 suword(&job->_aiocb_private.status, 0); 1265 aiocbe->uaiocb._aiocb_private.error = error; 1266 suword(&job->_aiocb_private.error, error); 1267 return error; 1268 } 1269 1270 /* 1271 * No buffer for daemon I/O 1272 */ 1273 aiocbe->bp = NULL; 1274 1275 ki->kaio_queue_count++; 1276 if (lj) { 1277 lj->lioj_queue_count++; 1278 } 1279 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1280 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1281 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1282 1283 num_queue_count++; 1284 error = 0; 1285 1286 /* 1287 * If we don't have a free AIO process, and we are below our 1288 * quota, then start one. Otherwise, depend on the subsequent 1289 * I/O completions to pick-up this job. If we don't sucessfully 1290 * create the new process (thread) due to resource issues, we 1291 * return an error for now (EAGAIN), which is likely not the 1292 * correct thing to do. 1293 */ 1294 retryproc: 1295 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1296 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1297 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1298 aiop->aioprocflags &= ~AIOP_FREE; 1299 wakeup(aiop->aioproc); 1300 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1301 ((ki->kaio_active_count + num_aio_resv_start) < 1302 ki->kaio_maxactive_count)) { 1303 num_aio_resv_start++; 1304 if ((error = aio_newproc()) == 0) { 1305 num_aio_resv_start--; 1306 p->p_retval[0] = 0; 1307 goto retryproc; 1308 } 1309 num_aio_resv_start--; 1310 } 1311 return error; 1312 } 1313 1314 /* 1315 * This routine queues an AIO request, checking for quotas. 1316 */ 1317 static int 1318 aio_aqueue(struct proc *p, struct aiocb *job, int type) 1319 { 1320 struct kaioinfo *ki; 1321 1322 if (p->p_aioinfo == NULL) { 1323 aio_init_aioinfo(p); 1324 } 1325 1326 if (num_queue_count >= max_queue_count) 1327 return EAGAIN; 1328 1329 ki = p->p_aioinfo; 1330 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1331 return EAGAIN; 1332 1333 return _aio_aqueue(p, job, NULL, type); 1334 } 1335 1336 /* 1337 * Support the aio_return system call, as a side-effect, kernel 1338 * resources are released. 1339 */ 1340 int 1341 aio_return(struct proc *p, struct aio_return_args *uap) 1342 { 1343 int s; 1344 int jobref; 1345 struct aiocblist *cb, *ncb; 1346 struct aiocb *ujob; 1347 struct kaioinfo *ki; 1348 1349 ki = p->p_aioinfo; 1350 if (ki == NULL) { 1351 return EINVAL; 1352 } 1353 1354 ujob = uap->aiocbp; 1355 1356 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1357 if (jobref == -1 || jobref == 0) 1358 return EINVAL; 1359 1360 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1361 cb; 1362 cb = TAILQ_NEXT(cb, plist)) { 1363 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1364 if (ujob == cb->uuaiocb) { 1365 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 1366 } else { 1367 p->p_retval[0] = EFAULT; 1368 } 1369 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1370 curproc->p_stats->p_ru.ru_oublock += cb->outputcharge; 1371 cb->outputcharge = 0; 1372 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1373 curproc->p_stats->p_ru.ru_inblock += cb->inputcharge; 1374 cb->inputcharge = 0; 1375 } 1376 aio_free_entry(cb); 1377 return 0; 1378 } 1379 } 1380 1381 s = splbio(); 1382 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1383 cb; 1384 cb = ncb) { 1385 ncb = TAILQ_NEXT(cb, plist); 1386 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1387 splx(s); 1388 if (ujob == cb->uuaiocb) { 1389 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 1390 } else { 1391 p->p_retval[0] = EFAULT; 1392 } 1393 aio_free_entry(cb); 1394 return 0; 1395 } 1396 } 1397 splx(s); 1398 1399 return (EINVAL); 1400 } 1401 1402 /* 1403 * Allow a process to wakeup when any of the I/O requests are 1404 * completed. 1405 */ 1406 int 1407 aio_suspend(struct proc *p, struct aio_suspend_args *uap) 1408 { 1409 struct timeval atv; 1410 struct timespec ts; 1411 struct aiocb *const *cbptr, *cbp; 1412 struct kaioinfo *ki; 1413 struct aiocblist *cb; 1414 int i; 1415 int njoblist; 1416 int error, s, timo; 1417 int *ijoblist; 1418 struct aiocb **ujoblist; 1419 1420 if (uap->nent >= AIO_LISTIO_MAX) 1421 return EINVAL; 1422 1423 timo = 0; 1424 if (uap->timeout) { 1425 /* 1426 * Get timespec struct 1427 */ 1428 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) { 1429 return error; 1430 } 1431 1432 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1433 return (EINVAL); 1434 1435 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1436 if (itimerfix(&atv)) 1437 return (EINVAL); 1438 timo = tvtohz(&atv); 1439 } 1440 1441 ki = p->p_aioinfo; 1442 if (ki == NULL) 1443 return EAGAIN; 1444 1445 njoblist = 0; 1446 ijoblist = zalloc(aiol_zone); 1447 ujoblist = zalloc(aiol_zone); 1448 cbptr = uap->aiocbp; 1449 1450 for(i = 0; i < uap->nent; i++) { 1451 cbp = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1452 if (cbp == 0) 1453 continue; 1454 ujoblist[njoblist] = cbp; 1455 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1456 njoblist++; 1457 } 1458 if (njoblist == 0) { 1459 zfree(aiol_zone, ijoblist); 1460 zfree(aiol_zone, ujoblist); 1461 return 0; 1462 } 1463 1464 error = 0; 1465 while (1) { 1466 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1467 cb; cb = TAILQ_NEXT(cb, plist)) { 1468 for(i = 0; i < njoblist; i++) { 1469 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1470 ijoblist[i]) { 1471 if (ujoblist[i] != cb->uuaiocb) 1472 error = EINVAL; 1473 zfree(aiol_zone, ijoblist); 1474 zfree(aiol_zone, ujoblist); 1475 return error; 1476 } 1477 } 1478 } 1479 1480 s = splbio(); 1481 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1482 cb; cb = TAILQ_NEXT(cb, plist)) { 1483 for(i = 0; i < njoblist; i++) { 1484 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1485 ijoblist[i]) { 1486 splx(s); 1487 if (ujoblist[i] != cb->uuaiocb) 1488 error = EINVAL; 1489 zfree(aiol_zone, ijoblist); 1490 zfree(aiol_zone, ujoblist); 1491 return error; 1492 } 1493 } 1494 } 1495 1496 ki->kaio_flags |= KAIO_WAKEUP; 1497 error = tsleep(p, PRIBIO|PCATCH, "aiospn", timo); 1498 splx(s); 1499 1500 if (error == EINTR) { 1501 zfree(aiol_zone, ijoblist); 1502 zfree(aiol_zone, ujoblist); 1503 return EINTR; 1504 } else if (error == EWOULDBLOCK) { 1505 zfree(aiol_zone, ijoblist); 1506 zfree(aiol_zone, ujoblist); 1507 return EAGAIN; 1508 } 1509 } 1510 1511 /* NOTREACHED */ 1512 return EINVAL; 1513 } 1514 1515 /* 1516 * aio_cancel at the kernel level is a NOOP right now. It 1517 * might be possible to support it partially in user mode, or 1518 * in kernel mode later on. 1519 */ 1520 int 1521 aio_cancel(struct proc *p, struct aio_cancel_args *uap) 1522 { 1523 return ENOSYS; 1524 } 1525 1526 /* 1527 * aio_error is implemented in the kernel level for compatibility 1528 * purposes only. For a user mode async implementation, it would be 1529 * best to do it in a userland subroutine. 1530 */ 1531 int 1532 aio_error(struct proc *p, struct aio_error_args *uap) 1533 { 1534 int s; 1535 struct aiocblist *cb; 1536 struct kaioinfo *ki; 1537 int jobref; 1538 1539 ki = p->p_aioinfo; 1540 if (ki == NULL) 1541 return EINVAL; 1542 1543 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1544 if ((jobref == -1) || (jobref == 0)) 1545 return EINVAL; 1546 1547 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1548 cb; 1549 cb = TAILQ_NEXT(cb, plist)) { 1550 1551 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1552 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1553 return 0; 1554 } 1555 } 1556 1557 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); 1558 cb; 1559 cb = TAILQ_NEXT(cb, plist)) { 1560 1561 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1562 p->p_retval[0] = EINPROGRESS; 1563 return 0; 1564 } 1565 } 1566 1567 s = splbio(); 1568 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1569 cb; 1570 cb = TAILQ_NEXT(cb, plist)) { 1571 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1572 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1573 splx(s); 1574 return 0; 1575 } 1576 } 1577 1578 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); 1579 cb; 1580 cb = TAILQ_NEXT(cb, plist)) { 1581 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1582 p->p_retval[0] = EINPROGRESS; 1583 splx(s); 1584 return 0; 1585 } 1586 } 1587 splx(s); 1588 1589 1590 /* 1591 * Hack for lio 1592 */ 1593 /* 1594 status = fuword(&uap->aiocbp->_aiocb_private.status); 1595 if (status == -1) { 1596 return fuword(&uap->aiocbp->_aiocb_private.error); 1597 } 1598 */ 1599 return EINVAL; 1600 } 1601 1602 int 1603 aio_read(struct proc *p, struct aio_read_args *uap) 1604 { 1605 struct filedesc *fdp; 1606 struct file *fp; 1607 struct uio auio; 1608 struct iovec aiov; 1609 unsigned int fd; 1610 int cnt; 1611 struct aiocb iocb; 1612 int error, pmodes; 1613 1614 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1615 if ((pmodes & AIO_PMODE_SYNC) == 0) { 1616 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_READ); 1617 } 1618 1619 /* 1620 * Get control block 1621 */ 1622 if ((error = copyin((caddr_t) uap->aiocbp, (caddr_t) &iocb, sizeof iocb)) != 0) 1623 return error; 1624 1625 /* 1626 * Get the fd info for process 1627 */ 1628 fdp = p->p_fd; 1629 1630 /* 1631 * Range check file descriptor 1632 */ 1633 fd = iocb.aio_fildes; 1634 if (fd >= fdp->fd_nfiles) 1635 return EBADF; 1636 fp = fdp->fd_ofiles[fd]; 1637 if ((fp == NULL) || ((fp->f_flag & FREAD) == 0)) 1638 return EBADF; 1639 if (iocb.aio_offset == -1LL) 1640 return EINVAL; 1641 1642 auio.uio_resid = iocb.aio_nbytes; 1643 if (auio.uio_resid < 0) 1644 return (EINVAL); 1645 1646 /* 1647 * Process sync simply -- queue async request. 1648 */ 1649 if ((iocb._aiocb_private.privatemodes & AIO_PMODE_SYNC) == 0) { 1650 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_READ); 1651 } 1652 1653 aiov.iov_base = (void *) iocb.aio_buf; 1654 aiov.iov_len = iocb.aio_nbytes; 1655 1656 auio.uio_iov = &aiov; 1657 auio.uio_iovcnt = 1; 1658 auio.uio_offset = iocb.aio_offset; 1659 auio.uio_rw = UIO_READ; 1660 auio.uio_segflg = UIO_USERSPACE; 1661 auio.uio_procp = p; 1662 1663 cnt = iocb.aio_nbytes; 1664 error = (*fp->f_ops->fo_read)(fp, &auio, fp->f_cred, FOF_OFFSET); 1665 if (error && 1666 (auio.uio_resid != cnt) && 1667 (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) 1668 error = 0; 1669 cnt -= auio.uio_resid; 1670 p->p_retval[0] = cnt; 1671 return error; 1672 } 1673 1674 int 1675 aio_write(struct proc *p, struct aio_write_args *uap) 1676 { 1677 struct filedesc *fdp; 1678 struct file *fp; 1679 struct uio auio; 1680 struct iovec aiov; 1681 unsigned int fd; 1682 int cnt; 1683 struct aiocb iocb; 1684 int error; 1685 int pmodes; 1686 1687 /* 1688 * Process sync simply -- queue async request. 1689 */ 1690 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1691 if ((pmodes & AIO_PMODE_SYNC) == 0) { 1692 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_WRITE); 1693 } 1694 1695 if ((error = copyin((caddr_t) uap->aiocbp, (caddr_t) &iocb, sizeof iocb)) != 0) 1696 return error; 1697 1698 /* 1699 * Get the fd info for process 1700 */ 1701 fdp = p->p_fd; 1702 1703 /* 1704 * Range check file descriptor 1705 */ 1706 fd = iocb.aio_fildes; 1707 if (fd >= fdp->fd_nfiles) 1708 return EBADF; 1709 fp = fdp->fd_ofiles[fd]; 1710 if ((fp == NULL) || ((fp->f_flag & FWRITE) == 0)) 1711 return EBADF; 1712 if (iocb.aio_offset == -1LL) 1713 return EINVAL; 1714 1715 aiov.iov_base = (void *) iocb.aio_buf; 1716 aiov.iov_len = iocb.aio_nbytes; 1717 auio.uio_iov = &aiov; 1718 auio.uio_iovcnt = 1; 1719 auio.uio_offset = iocb.aio_offset; 1720 1721 auio.uio_resid = iocb.aio_nbytes; 1722 if (auio.uio_resid < 0) 1723 return (EINVAL); 1724 1725 auio.uio_rw = UIO_WRITE; 1726 auio.uio_segflg = UIO_USERSPACE; 1727 auio.uio_procp = p; 1728 1729 cnt = iocb.aio_nbytes; 1730 error = (*fp->f_ops->fo_write)(fp, &auio, fp->f_cred, FOF_OFFSET); 1731 if (error) { 1732 if (auio.uio_resid != cnt) { 1733 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 1734 error = 0; 1735 if (error == EPIPE) 1736 psignal(p, SIGPIPE); 1737 } 1738 } 1739 cnt -= auio.uio_resid; 1740 p->p_retval[0] = cnt; 1741 return error; 1742 } 1743 1744 int 1745 lio_listio(struct proc *p, struct lio_listio_args *uap) 1746 { 1747 int nent, nentqueued; 1748 struct aiocb *iocb, * const *cbptr; 1749 struct aiocblist *cb; 1750 struct kaioinfo *ki; 1751 struct aio_liojob *lj; 1752 int error, runningcode; 1753 int nerror; 1754 int i; 1755 int s; 1756 1757 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) { 1758 return EINVAL; 1759 } 1760 1761 nent = uap->nent; 1762 if (nent > AIO_LISTIO_MAX) { 1763 return EINVAL; 1764 } 1765 1766 if (p->p_aioinfo == NULL) { 1767 aio_init_aioinfo(p); 1768 } 1769 1770 if ((nent + num_queue_count) > max_queue_count) { 1771 return EAGAIN; 1772 } 1773 1774 ki = p->p_aioinfo; 1775 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) { 1776 return EAGAIN; 1777 } 1778 1779 lj = zalloc(aiolio_zone); 1780 if (!lj) { 1781 return EAGAIN; 1782 } 1783 1784 lj->lioj_flags = 0; 1785 lj->lioj_buffer_count = 0; 1786 lj->lioj_buffer_finished_count = 0; 1787 lj->lioj_queue_count = 0; 1788 lj->lioj_queue_finished_count = 0; 1789 lj->lioj_ki = ki; 1790 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 1791 1792 /* 1793 * Setup signal 1794 */ 1795 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 1796 error = copyin(uap->sig, &lj->lioj_signal, sizeof lj->lioj_signal); 1797 if (error) 1798 return error; 1799 lj->lioj_flags |= LIOJ_SIGNAL; 1800 lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED; 1801 } else { 1802 lj->lioj_flags &= ~LIOJ_SIGNAL; 1803 } 1804 1805 /* 1806 * get pointers to the list of I/O requests 1807 */ 1808 1809 nerror = 0; 1810 nentqueued = 0; 1811 cbptr = uap->acb_list; 1812 for(i = 0; i < uap->nent; i++) { 1813 iocb = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1814 if (((intptr_t) iocb != -1) && ((intptr_t) iocb != NULL)) { 1815 error = _aio_aqueue(p, iocb, lj, 0); 1816 if (error == 0) { 1817 nentqueued++; 1818 } else { 1819 nerror++; 1820 } 1821 } 1822 } 1823 1824 /* 1825 * If we haven't queued any, then just return error 1826 */ 1827 if (nentqueued == 0) { 1828 return 0; 1829 } 1830 1831 /* 1832 * Calculate the appropriate error return 1833 */ 1834 runningcode = 0; 1835 if (nerror) 1836 runningcode = EIO; 1837 1838 if (uap->mode == LIO_WAIT) { 1839 while (1) { 1840 int found; 1841 found = 0; 1842 for(i = 0; i < uap->nent; i++) { 1843 int jobref, command; 1844 1845 /* 1846 * Fetch address of the control buf pointer in user space 1847 */ 1848 iocb = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1849 if (((intptr_t) iocb == -1) || ((intptr_t) iocb == 0)) 1850 continue; 1851 1852 /* 1853 * Fetch the associated command from user space 1854 */ 1855 command = fuword(&iocb->aio_lio_opcode); 1856 if (command == LIO_NOP) { 1857 found++; 1858 continue; 1859 } 1860 1861 jobref = fuword(&iocb->_aiocb_private.kernelinfo); 1862 1863 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1864 cb; 1865 cb = TAILQ_NEXT(cb, plist)) { 1866 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1867 jobref) { 1868 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1869 curproc->p_stats->p_ru.ru_oublock += 1870 cb->outputcharge; 1871 cb->outputcharge = 0; 1872 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1873 curproc->p_stats->p_ru.ru_inblock += 1874 cb->inputcharge; 1875 cb->inputcharge = 0; 1876 } 1877 found++; 1878 break; 1879 } 1880 } 1881 1882 s = splbio(); 1883 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1884 cb; 1885 cb = TAILQ_NEXT(cb, plist)) { 1886 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1887 jobref) { 1888 found++; 1889 break; 1890 } 1891 } 1892 splx(s); 1893 1894 } 1895 1896 /* 1897 * If all I/Os have been disposed of, then we can return 1898 */ 1899 if (found == nentqueued) { 1900 return runningcode; 1901 } 1902 1903 ki->kaio_flags |= KAIO_WAKEUP; 1904 error = tsleep(p, PRIBIO|PCATCH, "aiospn", 0); 1905 1906 if (error == EINTR) { 1907 return EINTR; 1908 } else if (error == EWOULDBLOCK) { 1909 return EAGAIN; 1910 } 1911 1912 } 1913 } 1914 1915 return runningcode; 1916 } 1917 1918 /* 1919 * This is a wierd hack so that we can post a signal. It is safe 1920 * to do so from a timeout routine, but *not* from an interrupt routine. 1921 */ 1922 static void 1923 process_signal(void *ljarg) 1924 { 1925 struct aio_liojob *lj = ljarg; 1926 if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) { 1927 if (lj->lioj_queue_count == lj->lioj_queue_finished_count) { 1928 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 1929 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 1930 } 1931 } 1932 } 1933 1934 /* 1935 * Interrupt handler for physio, performs the necessary process wakeups, 1936 * and signals. 1937 */ 1938 static void 1939 aio_physwakeup(bp) 1940 struct buf *bp; 1941 { 1942 struct aiocblist *aiocbe; 1943 struct proc *p; 1944 struct kaioinfo *ki; 1945 struct aio_liojob *lj; 1946 int s; 1947 s = splbio(); 1948 1949 wakeup((caddr_t) bp); 1950 bp->b_flags &= ~B_CALL; 1951 bp->b_flags |= B_DONE; 1952 1953 aiocbe = (struct aiocblist *)bp->b_spc; 1954 if (aiocbe) { 1955 p = bp->b_caller1; 1956 1957 aiocbe->jobstate = JOBST_JOBBFINISHED; 1958 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 1959 aiocbe->uaiocb._aiocb_private.error = 0; 1960 aiocbe->jobflags |= AIOCBLIST_DONE; 1961 1962 if (bp->b_flags & B_ERROR) { 1963 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1964 } 1965 1966 lj = aiocbe->lio; 1967 if (lj) { 1968 lj->lioj_buffer_finished_count++; 1969 /* 1970 * wakeup/signal if all of the interrupt jobs are done 1971 */ 1972 if (lj->lioj_buffer_finished_count == lj->lioj_buffer_count) { 1973 /* 1974 * post a signal if it is called for 1975 */ 1976 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 1977 LIOJ_SIGNAL) { 1978 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 1979 timeout(process_signal, lj, 0); 1980 } 1981 } 1982 } 1983 1984 ki = p->p_aioinfo; 1985 if (ki) { 1986 ki->kaio_buffer_finished_count++; 1987 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1988 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1989 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1990 /* 1991 * and do the wakeup 1992 */ 1993 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 1994 ki->kaio_flags &= ~KAIO_WAKEUP; 1995 wakeup(p); 1996 } 1997 } 1998 } 1999 splx(s); 2000 } 2001