1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 * 16 * $Id: vfs_aio.c,v 1.50 1999/05/09 13:13:52 phk Exp $ 17 */ 18 19 /* 20 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/sysproto.h> 27 #include <sys/filedesc.h> 28 #include <sys/kernel.h> 29 #include <sys/fcntl.h> 30 #include <sys/file.h> 31 #include <sys/lock.h> 32 #include <sys/unistd.h> 33 #include <sys/proc.h> 34 #include <sys/resourcevar.h> 35 #include <sys/signalvar.h> 36 #include <sys/sysctl.h> 37 #include <sys/vnode.h> 38 #include <sys/conf.h> 39 #include <miscfs/specfs/specdev.h> 40 41 #include <vm/vm.h> 42 #include <vm/vm_param.h> 43 #include <vm/vm_extern.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_map.h> 46 #include <vm/vm_zone.h> 47 #include <sys/aio.h> 48 #include <sys/shm.h> 49 50 #include <machine/cpu.h> 51 #include <machine/limits.h> 52 53 static long jobrefid; 54 55 #define JOBST_NULL 0x0 56 #define JOBST_JOBQPROC 0x1 57 #define JOBST_JOBQGLOBAL 0x2 58 #define JOBST_JOBRUNNING 0x3 59 #define JOBST_JOBFINISHED 0x4 60 #define JOBST_JOBQBUF 0x5 61 #define JOBST_JOBBFINISHED 0x6 62 63 #ifndef MAX_AIO_PER_PROC 64 #define MAX_AIO_PER_PROC 32 65 #endif 66 67 #ifndef MAX_AIO_QUEUE_PER_PROC 68 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 69 #endif 70 71 #ifndef MAX_AIO_PROCS 72 #define MAX_AIO_PROCS 32 73 #endif 74 75 #ifndef MAX_AIO_QUEUE 76 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 77 #endif 78 79 #ifndef TARGET_AIO_PROCS 80 #define TARGET_AIO_PROCS 0 81 #endif 82 83 #ifndef MAX_BUF_AIO 84 #define MAX_BUF_AIO 16 85 #endif 86 87 #ifndef AIOD_TIMEOUT_DEFAULT 88 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 89 #endif 90 91 #ifndef AIOD_LIFETIME_DEFAULT 92 #define AIOD_LIFETIME_DEFAULT (30 * hz) 93 #endif 94 95 static int max_aio_procs = MAX_AIO_PROCS; 96 static int num_aio_procs = 0; 97 static int target_aio_procs = TARGET_AIO_PROCS; 98 static int max_queue_count = MAX_AIO_QUEUE; 99 static int num_queue_count = 0; 100 static int num_buf_aio = 0; 101 static int num_aio_resv_start = 0; 102 static int aiod_timeout; 103 static int aiod_lifetime; 104 105 static int max_aio_per_proc = MAX_AIO_PER_PROC, 106 max_aio_queue_per_proc=MAX_AIO_QUEUE_PER_PROC; 107 108 static int max_buf_aio = MAX_BUF_AIO; 109 110 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "AIO mgmt"); 111 112 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, 113 CTLFLAG_RW, &max_aio_per_proc, 0, ""); 114 115 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, 116 CTLFLAG_RW, &max_aio_queue_per_proc, 0, ""); 117 118 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 119 CTLFLAG_RW, &max_aio_procs, 0, ""); 120 121 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 122 CTLFLAG_RD, &num_aio_procs, 0, ""); 123 124 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, 125 CTLFLAG_RD, &num_queue_count, 0, ""); 126 127 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, 128 CTLFLAG_RW, &max_queue_count, 0, ""); 129 130 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, 131 CTLFLAG_RW, &target_aio_procs, 0, ""); 132 133 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, 134 CTLFLAG_RW, &max_buf_aio, 0, ""); 135 136 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, 137 CTLFLAG_RD, &num_buf_aio, 0, ""); 138 139 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, 140 CTLFLAG_RW, &aiod_lifetime, 0, ""); 141 142 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, 143 CTLFLAG_RW, &aiod_timeout, 0, ""); 144 145 146 /* 147 * Job queue item 148 */ 149 150 #define AIOCBLIST_CANCELLED 0x1 151 #define AIOCBLIST_RUNDOWN 0x4 152 #define AIOCBLIST_ASYNCFREE 0x8 153 #define AIOCBLIST_DONE 0x10 154 155 struct aiocblist { 156 TAILQ_ENTRY (aiocblist) list; /* List of jobs */ 157 TAILQ_ENTRY (aiocblist) plist; /* List of jobs for proc */ 158 int jobflags; 159 int jobstate; 160 int inputcharge, outputcharge; 161 struct buf *bp; /* buffer pointer */ 162 struct proc *userproc; /* User process */ 163 struct aioproclist *jobaioproc; /* AIO process descriptor */ 164 struct aio_liojob *lio; /* optional lio job */ 165 struct aiocb *uuaiocb; /* pointer in userspace of aiocb */ 166 struct aiocb uaiocb; /* Kernel I/O control block */ 167 }; 168 169 170 /* 171 * AIO process info 172 */ 173 #define AIOP_FREE 0x1 /* proc on free queue */ 174 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 175 176 struct aioproclist { 177 int aioprocflags; /* AIO proc flags */ 178 TAILQ_ENTRY(aioproclist) list; /* List of processes */ 179 struct proc *aioproc; /* The AIO thread */ 180 TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */ 181 }; 182 183 /* 184 * data-structure for lio signal management 185 */ 186 struct aio_liojob { 187 int lioj_flags; 188 int lioj_buffer_count; 189 int lioj_buffer_finished_count; 190 int lioj_queue_count; 191 int lioj_queue_finished_count; 192 struct sigevent lioj_signal; /* signal on all I/O done */ 193 TAILQ_ENTRY (aio_liojob) lioj_list; 194 struct kaioinfo *lioj_ki; 195 }; 196 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 197 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 198 199 /* 200 * per process aio data structure 201 */ 202 struct kaioinfo { 203 int kaio_flags; /* per process kaio flags */ 204 int kaio_maxactive_count; /* maximum number of AIOs */ 205 int kaio_active_count; /* number of currently used AIOs */ 206 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 207 int kaio_queue_count; /* size of AIO queue */ 208 int kaio_ballowed_count; /* maximum number of buffers */ 209 int kaio_queue_finished_count; /* number of daemon jobs finished */ 210 int kaio_buffer_count; /* number of physio buffers */ 211 int kaio_buffer_finished_count; /* count of I/O done */ 212 struct proc *kaio_p; /* process that uses this kaio block */ 213 TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 214 TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */ 215 TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */ 216 TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 217 TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */ 218 }; 219 220 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 221 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant 222 event */ 223 224 225 static TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc; 226 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 227 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 228 static TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */ 229 230 static void aio_init_aioinfo(struct proc *p) ; 231 static void aio_onceonly(void *) ; 232 static int aio_free_entry(struct aiocblist *aiocbe); 233 static void aio_process(struct aiocblist *aiocbe); 234 static int aio_newproc(void) ; 235 static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ; 236 static void aio_physwakeup(struct buf *bp); 237 static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type); 238 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 239 static void aio_daemon(const void *uproc); 240 241 SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL); 242 243 static vm_zone_t kaio_zone=0, aiop_zone=0, 244 aiocb_zone=0, aiol_zone=0, aiolio_zone=0; 245 246 /* 247 * Startup initialization 248 */ 249 void 250 aio_onceonly(void *na) 251 { 252 TAILQ_INIT(&aio_freeproc); 253 TAILQ_INIT(&aio_activeproc); 254 TAILQ_INIT(&aio_jobs); 255 TAILQ_INIT(&aio_bufjobs); 256 TAILQ_INIT(&aio_freejobs); 257 kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1); 258 aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1); 259 aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1); 260 aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1); 261 aiolio_zone = zinit("AIOLIO", 262 AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1); 263 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 264 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 265 jobrefid = 1; 266 } 267 268 /* 269 * Init the per-process aioinfo structure. 270 * The aioinfo limits are set per-process for user limit (resource) management. 271 */ 272 void 273 aio_init_aioinfo(struct proc *p) 274 { 275 struct kaioinfo *ki; 276 if (p->p_aioinfo == NULL) { 277 ki = zalloc(kaio_zone); 278 p->p_aioinfo = ki; 279 ki->kaio_flags = 0; 280 ki->kaio_maxactive_count = max_aio_per_proc; 281 ki->kaio_active_count = 0; 282 ki->kaio_qallowed_count = max_aio_queue_per_proc; 283 ki->kaio_queue_count = 0; 284 ki->kaio_ballowed_count = max_buf_aio; 285 ki->kaio_buffer_count = 0; 286 ki->kaio_buffer_finished_count = 0; 287 ki->kaio_p = p; 288 TAILQ_INIT(&ki->kaio_jobdone); 289 TAILQ_INIT(&ki->kaio_jobqueue); 290 TAILQ_INIT(&ki->kaio_bufdone); 291 TAILQ_INIT(&ki->kaio_bufqueue); 292 TAILQ_INIT(&ki->kaio_liojoblist); 293 } 294 } 295 296 /* 297 * Free a job entry. Wait for completion if it is currently 298 * active, but don't delay forever. If we delay, we return 299 * a flag that says that we have to restart the queue scan. 300 */ 301 int 302 aio_free_entry(struct aiocblist *aiocbe) 303 { 304 struct kaioinfo *ki; 305 struct aioproclist *aiop; 306 struct aio_liojob *lj; 307 struct proc *p; 308 int error; 309 int s; 310 311 if (aiocbe->jobstate == JOBST_NULL) 312 panic("aio_free_entry: freeing already free job"); 313 314 p = aiocbe->userproc; 315 ki = p->p_aioinfo; 316 lj = aiocbe->lio; 317 if (ki == NULL) 318 panic("aio_free_entry: missing p->p_aioinfo"); 319 320 if (aiocbe->jobstate == JOBST_JOBRUNNING) { 321 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) 322 return 0; 323 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 324 tsleep(aiocbe, PRIBIO|PCATCH, "jobwai", 0); 325 } 326 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 327 328 if (aiocbe->bp == NULL) { 329 if (ki->kaio_queue_count <= 0) 330 panic("aio_free_entry: process queue size <= 0"); 331 if (num_queue_count <= 0) 332 panic("aio_free_entry: system wide queue size <= 0"); 333 334 if(lj) { 335 lj->lioj_queue_count--; 336 if (aiocbe->jobflags & AIOCBLIST_DONE) 337 lj->lioj_queue_finished_count--; 338 } 339 ki->kaio_queue_count--; 340 if (aiocbe->jobflags & AIOCBLIST_DONE) 341 ki->kaio_queue_finished_count--; 342 num_queue_count--; 343 344 } else { 345 if(lj) { 346 lj->lioj_buffer_count--; 347 if (aiocbe->jobflags & AIOCBLIST_DONE) 348 lj->lioj_buffer_finished_count--; 349 } 350 if (aiocbe->jobflags & AIOCBLIST_DONE) 351 ki->kaio_buffer_finished_count--; 352 ki->kaio_buffer_count--; 353 num_buf_aio--; 354 355 } 356 357 if ((ki->kaio_flags & KAIO_WAKEUP) || 358 ((ki->kaio_flags & KAIO_RUNDOWN) && 359 ((ki->kaio_buffer_count == 0) && 360 (ki->kaio_queue_count == 0)))) { 361 ki->kaio_flags &= ~KAIO_WAKEUP; 362 wakeup(p); 363 } 364 365 if ( aiocbe->jobstate == JOBST_JOBQBUF) { 366 if ((error = aio_fphysio(p, aiocbe, 1)) != 0) 367 return error; 368 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 369 panic("aio_free_entry: invalid physio finish-up state"); 370 s = splbio(); 371 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 372 splx(s); 373 } else if ( aiocbe->jobstate == JOBST_JOBQPROC) { 374 aiop = aiocbe->jobaioproc; 375 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 376 } else if ( aiocbe->jobstate == JOBST_JOBQGLOBAL) { 377 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 378 } else if ( aiocbe->jobstate == JOBST_JOBFINISHED) { 379 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 380 } else if ( aiocbe->jobstate == JOBST_JOBBFINISHED) { 381 s = splbio(); 382 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 383 splx(s); 384 if (aiocbe->bp) { 385 vunmapbuf(aiocbe->bp); 386 relpbuf(aiocbe->bp, NULL); 387 aiocbe->bp = NULL; 388 } 389 } 390 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 391 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 392 zfree(aiolio_zone, lj); 393 } 394 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 395 aiocbe->jobstate = JOBST_NULL; 396 return 0; 397 } 398 399 /* 400 * Rundown the jobs for a given process. 401 */ 402 void 403 aio_proc_rundown(struct proc *p) 404 { 405 int s; 406 struct kaioinfo *ki; 407 struct aio_liojob *lj, *ljn; 408 struct aiocblist *aiocbe, *aiocbn; 409 410 ki = p->p_aioinfo; 411 if (ki == NULL) 412 return; 413 414 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 415 while ((ki->kaio_active_count > 0) || 416 (ki->kaio_buffer_count > ki->kaio_buffer_finished_count)) { 417 ki->kaio_flags |= KAIO_RUNDOWN; 418 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout)) 419 break; 420 } 421 422 restart1: 423 for ( aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); 424 aiocbe; 425 aiocbe = aiocbn) { 426 aiocbn = TAILQ_NEXT(aiocbe, plist); 427 if (aio_free_entry(aiocbe)) 428 goto restart1; 429 } 430 431 restart2: 432 for ( aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); 433 aiocbe; 434 aiocbe = aiocbn) { 435 aiocbn = TAILQ_NEXT(aiocbe, plist); 436 if (aio_free_entry(aiocbe)) 437 goto restart2; 438 } 439 440 /* 441 * Note the use of lots of splbio here, trying to avoid 442 * splbio for long chains of I/O. Probably unnecessary. 443 */ 444 445 restart3: 446 s = splbio(); 447 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 448 ki->kaio_flags |= KAIO_WAKEUP; 449 tsleep (p, PRIBIO, "aioprn", 0); 450 splx(s); 451 goto restart3; 452 } 453 splx(s); 454 455 restart4: 456 s = splbio(); 457 for ( aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); 458 aiocbe; 459 aiocbe = aiocbn) { 460 aiocbn = TAILQ_NEXT(aiocbe, plist); 461 if (aio_free_entry(aiocbe)) { 462 splx(s); 463 goto restart4; 464 } 465 } 466 splx(s); 467 468 for ( lj = TAILQ_FIRST(&ki->kaio_liojoblist); 469 lj; 470 lj = ljn) { 471 ljn = TAILQ_NEXT(lj, lioj_list); 472 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 473 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 474 zfree(aiolio_zone, lj); 475 } else { 476 #if defined(DIAGNOSTIC) 477 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, QF:%d\n", 478 lj->lioj_buffer_count, lj->lioj_buffer_finished_count, 479 lj->lioj_queue_count, lj->lioj_queue_finished_count); 480 #endif 481 } 482 } 483 484 zfree(kaio_zone, ki); 485 p->p_aioinfo = NULL; 486 } 487 488 /* 489 * Select a job to run (called by an AIO daemon) 490 */ 491 static struct aiocblist * 492 aio_selectjob(struct aioproclist *aiop) 493 { 494 495 struct aiocblist *aiocbe; 496 497 aiocbe = TAILQ_FIRST(&aiop->jobtorun); 498 if (aiocbe) { 499 TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list); 500 return aiocbe; 501 } 502 503 for (aiocbe = TAILQ_FIRST(&aio_jobs); 504 aiocbe; 505 aiocbe = TAILQ_NEXT(aiocbe, list)) { 506 struct kaioinfo *ki; 507 struct proc *userp; 508 509 userp = aiocbe->userproc; 510 ki = userp->p_aioinfo; 511 512 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 513 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 514 return aiocbe; 515 } 516 } 517 518 return NULL; 519 } 520 521 /* 522 * The AIO processing activity. This is the code that does the 523 * I/O request for the non-physio version of the operations. The 524 * normal vn operations are used, and this code should work in 525 * all instances for every type of file, including pipes, sockets, 526 * fifos, and regular files. 527 */ 528 void 529 aio_process(struct aiocblist *aiocbe) 530 { 531 struct filedesc *fdp; 532 struct proc *userp, *mycp; 533 struct aiocb *cb; 534 struct file *fp; 535 struct uio auio; 536 struct iovec aiov; 537 unsigned int fd; 538 int cnt; 539 int error; 540 off_t offset; 541 int oublock_st, oublock_end; 542 int inblock_st, inblock_end; 543 544 userp = aiocbe->userproc; 545 cb = &aiocbe->uaiocb; 546 547 mycp = curproc; 548 549 fdp = mycp->p_fd; 550 fd = cb->aio_fildes; 551 fp = fdp->fd_ofiles[fd]; 552 553 aiov.iov_base = (void *) cb->aio_buf; 554 aiov.iov_len = cb->aio_nbytes; 555 556 auio.uio_iov = &aiov; 557 auio.uio_iovcnt = 1; 558 auio.uio_offset = offset = cb->aio_offset; 559 auio.uio_resid = cb->aio_nbytes; 560 cnt = cb->aio_nbytes; 561 auio.uio_segflg = UIO_USERSPACE; 562 auio.uio_procp = mycp; 563 564 inblock_st = mycp->p_stats->p_ru.ru_inblock; 565 oublock_st = mycp->p_stats->p_ru.ru_oublock; 566 if (cb->aio_lio_opcode == LIO_READ) { 567 auio.uio_rw = UIO_READ; 568 error = (*fp->f_ops->fo_read)(fp, &auio, fp->f_cred, FOF_OFFSET); 569 } else { 570 auio.uio_rw = UIO_WRITE; 571 error = (*fp->f_ops->fo_write)(fp, &auio, fp->f_cred, FOF_OFFSET); 572 } 573 inblock_end = mycp->p_stats->p_ru.ru_inblock; 574 oublock_end = mycp->p_stats->p_ru.ru_oublock; 575 576 aiocbe->inputcharge = inblock_end - inblock_st; 577 aiocbe->outputcharge = oublock_end - oublock_st; 578 579 if (error) { 580 if (auio.uio_resid != cnt) { 581 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 582 error = 0; 583 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) 584 psignal(userp, SIGPIPE); 585 } 586 } 587 588 cnt -= auio.uio_resid; 589 cb->_aiocb_private.error = error; 590 cb->_aiocb_private.status = cnt; 591 592 return; 593 594 } 595 596 /* 597 * The AIO daemon, most of the actual work is done in aio_process, 598 * but the setup (and address space mgmt) is done in this routine. 599 */ 600 static void 601 aio_daemon(const void *uproc) 602 { 603 int s; 604 struct aioproclist *aiop; 605 struct vmspace *myvm; 606 struct proc *mycp; 607 608 /* 609 * Local copies of curproc (cp) and vmspace (myvm) 610 */ 611 mycp = curproc; 612 myvm = mycp->p_vmspace; 613 614 if (mycp->p_textvp) { 615 vrele(mycp->p_textvp); 616 mycp->p_textvp = NULL; 617 } 618 619 /* 620 * Allocate and ready the aio control info. There is one 621 * aiop structure per daemon. 622 */ 623 aiop = zalloc(aiop_zone); 624 aiop->aioproc = mycp; 625 aiop->aioprocflags |= AIOP_FREE; 626 TAILQ_INIT(&aiop->jobtorun); 627 628 /* 629 * Place thread (lightweight process) onto the AIO free thread list 630 */ 631 if (TAILQ_EMPTY(&aio_freeproc)) 632 wakeup(&aio_freeproc); 633 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 634 635 /* 636 * Make up a name for the daemon 637 */ 638 strcpy(mycp->p_comm, "aiod"); 639 640 /* 641 * Get rid of our current filedescriptors. AIOD's don't need any 642 * filedescriptors, except as temporarily inherited from the client. 643 * Credentials are also cloned, and made equivalent to "root." 644 */ 645 fdfree(mycp); 646 mycp->p_fd = NULL; 647 mycp->p_ucred = crcopy(mycp->p_ucred); 648 mycp->p_ucred->cr_uid = 0; 649 mycp->p_ucred->cr_ngroups = 1; 650 mycp->p_ucred->cr_groups[0] = 1; 651 652 /* 653 * The daemon resides in its own pgrp. 654 */ 655 enterpgrp(mycp, mycp->p_pid, 1); 656 657 /* 658 * Mark special process type 659 */ 660 mycp->p_flag |= P_SYSTEM|P_KTHREADP; 661 662 /* 663 * Wakeup parent process. (Parent sleeps to keep from blasting away 664 * creating to many daemons.) 665 */ 666 wakeup(mycp); 667 668 while(1) { 669 struct proc *curcp; 670 struct aiocblist *aiocbe; 671 672 /* 673 * curcp is the current daemon process context. 674 * userp is the current user process context. 675 */ 676 curcp = mycp; 677 678 /* 679 * Take daemon off of free queue 680 */ 681 if (aiop->aioprocflags & AIOP_FREE) { 682 TAILQ_REMOVE(&aio_freeproc, aiop, list); 683 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 684 aiop->aioprocflags &= ~AIOP_FREE; 685 } 686 aiop->aioprocflags &= ~AIOP_SCHED; 687 688 /* 689 * Check for jobs 690 */ 691 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 692 struct proc *userp; 693 struct aiocb *cb; 694 struct kaioinfo *ki; 695 struct aio_liojob *lj; 696 697 cb = &aiocbe->uaiocb; 698 userp = aiocbe->userproc; 699 700 aiocbe->jobstate = JOBST_JOBRUNNING; 701 702 /* 703 * Connect to process address space for user program 704 */ 705 if (userp != curcp) { 706 struct vmspace *tmpvm; 707 /* 708 * Save the current address space that we are connected to. 709 */ 710 tmpvm = mycp->p_vmspace; 711 /* 712 * Point to the new user address space, and refer to it. 713 */ 714 mycp->p_vmspace = userp->p_vmspace; 715 mycp->p_vmspace->vm_refcnt++; 716 /* 717 * Activate the new mapping. 718 */ 719 pmap_activate(mycp); 720 /* 721 * If the old address space wasn't the daemons own address 722 * space, then we need to remove the daemon's reference from 723 * the other process that it was acting on behalf of. 724 */ 725 if (tmpvm != myvm) { 726 vmspace_free(tmpvm); 727 } 728 /* 729 * Disassociate from previous clients file descriptors, and 730 * associate to the new clients descriptors. Note that 731 * the daemon doesn't need to worry about its orginal 732 * descriptors, because they were originally freed. 733 */ 734 if (mycp->p_fd) 735 fdfree(mycp); 736 mycp->p_fd = fdshare(userp); 737 curcp = userp; 738 } 739 740 ki = userp->p_aioinfo; 741 lj = aiocbe->lio; 742 743 /* 744 * Account for currently active jobs 745 */ 746 ki->kaio_active_count++; 747 748 /* 749 * Do the I/O function 750 */ 751 aiocbe->jobaioproc = aiop; 752 aio_process(aiocbe); 753 754 /* 755 * decrement the active job count 756 */ 757 ki->kaio_active_count--; 758 759 /* 760 * increment the completion count for wakeup/signal comparisons 761 */ 762 aiocbe->jobflags |= AIOCBLIST_DONE; 763 ki->kaio_queue_finished_count++; 764 if (lj) { 765 lj->lioj_queue_finished_count++; 766 } 767 if ((ki->kaio_flags & KAIO_WAKEUP) || 768 ((ki->kaio_flags & KAIO_RUNDOWN) && 769 (ki->kaio_active_count == 0))) { 770 ki->kaio_flags &= ~KAIO_WAKEUP; 771 wakeup(userp); 772 } 773 774 s = splbio(); 775 if (lj && (lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 776 LIOJ_SIGNAL) { 777 if ((lj->lioj_queue_finished_count == lj->lioj_queue_count) && 778 (lj->lioj_buffer_finished_count == lj->lioj_buffer_count)) { 779 psignal(userp, lj->lioj_signal.sigev_signo); 780 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 781 } 782 } 783 splx(s); 784 785 aiocbe->jobstate = JOBST_JOBFINISHED; 786 787 /* 788 * If the I/O request should be automatically rundown, do the 789 * needed cleanup. Otherwise, place the queue entry for 790 * the just finished I/O request into the done queue for the 791 * associated client. 792 */ 793 if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) { 794 aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE; 795 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 796 } else { 797 TAILQ_REMOVE(&ki->kaio_jobqueue, 798 aiocbe, plist); 799 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, 800 aiocbe, plist); 801 } 802 803 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 804 wakeup(aiocbe); 805 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 806 } 807 808 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 809 psignal(userp, cb->aio_sigevent.sigev_signo); 810 } 811 } 812 813 /* 814 * Disconnect from user address space 815 */ 816 if (curcp != mycp) { 817 struct vmspace *tmpvm; 818 /* 819 * Get the user address space to disconnect from. 820 */ 821 tmpvm = mycp->p_vmspace; 822 /* 823 * Get original address space for daemon. 824 */ 825 mycp->p_vmspace = myvm; 826 /* 827 * Activate the daemon's address space. 828 */ 829 pmap_activate(mycp); 830 #if defined(DIAGNOSTIC) 831 if (tmpvm == myvm) 832 printf("AIOD: vmspace problem -- %d\n", mycp->p_pid); 833 #endif 834 /* 835 * remove our vmspace reference. 836 */ 837 vmspace_free(tmpvm); 838 /* 839 * disassociate from the user process's file descriptors. 840 */ 841 if (mycp->p_fd) 842 fdfree(mycp); 843 mycp->p_fd = NULL; 844 curcp = mycp; 845 } 846 847 /* 848 * If we are the first to be put onto the free queue, wakeup 849 * anyone waiting for a daemon. 850 */ 851 TAILQ_REMOVE(&aio_activeproc, aiop, list); 852 if (TAILQ_EMPTY(&aio_freeproc)) 853 wakeup(&aio_freeproc); 854 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 855 aiop->aioprocflags |= AIOP_FREE; 856 857 /* 858 * If daemon is inactive for a long time, allow it to exit, thereby 859 * freeing resources. 860 */ 861 if (((aiop->aioprocflags & AIOP_SCHED) == 0) && 862 tsleep(mycp, PRIBIO, "aiordy", aiod_lifetime)) { 863 if ((TAILQ_FIRST(&aio_jobs) == NULL) && 864 (TAILQ_FIRST(&aiop->jobtorun) == NULL)) { 865 if ((aiop->aioprocflags & AIOP_FREE) && 866 (num_aio_procs > target_aio_procs)) { 867 TAILQ_REMOVE(&aio_freeproc, aiop, list); 868 zfree(aiop_zone, aiop); 869 num_aio_procs--; 870 #if defined(DIAGNOSTIC) 871 if (mycp->p_vmspace->vm_refcnt <= 1) 872 printf("AIOD: bad vm refcnt for exiting daemon: %d\n", 873 mycp->p_vmspace->vm_refcnt); 874 #endif 875 exit1(mycp, 0); 876 } 877 } 878 } 879 } 880 } 881 882 /* 883 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. 884 * The AIO daemon modifies its environment itself. 885 */ 886 static int 887 aio_newproc() 888 { 889 int error; 890 struct proc *p, *np; 891 892 p = &proc0; 893 error = fork1(p, RFPROC|RFMEM|RFNOWAIT); 894 if (error) 895 return error; 896 np = pfind(p->p_retval[0]); 897 cpu_set_fork_handler(np, aio_daemon, curproc); 898 899 /* 900 * Wait until daemon is started, but continue on just in case (to 901 * handle error conditions. 902 */ 903 error = tsleep(np, PZERO, "aiosta", aiod_timeout); 904 num_aio_procs++; 905 906 return error; 907 908 } 909 910 /* 911 * Try the high-performance physio method for eligible VCHR devices. This 912 * routine doesn't require the use of any additional threads, and have 913 * overhead. 914 */ 915 int 916 aio_qphysio(p, aiocbe) 917 struct proc *p; 918 struct aiocblist *aiocbe; 919 { 920 int error; 921 struct aiocb *cb; 922 struct file *fp; 923 struct buf *bp; 924 int bflags; 925 struct vnode *vp; 926 struct kaioinfo *ki; 927 struct filedesc *fdp; 928 struct aio_liojob *lj; 929 int fd; 930 int s; 931 int cnt; 932 dev_t dev; 933 int rw; 934 d_strategy_t *fstrategy; 935 struct cdevsw *cdev; 936 struct cdevsw *bdev; 937 938 cb = &aiocbe->uaiocb; 939 fdp = p->p_fd; 940 fd = cb->aio_fildes; 941 fp = fdp->fd_ofiles[fd]; 942 943 if (fp->f_type != DTYPE_VNODE) { 944 return -1; 945 } 946 947 vp = (struct vnode *)fp->f_data; 948 if (vp->v_type != VCHR || ((cb->aio_nbytes & (DEV_BSIZE - 1)) != 0)) { 949 return -1; 950 } 951 952 if ((cb->aio_nbytes > MAXPHYS) && (num_buf_aio >= max_buf_aio)) { 953 return -1; 954 } 955 956 if ((vp->v_specinfo == NULL) || (vp->v_flag & VISTTY)) { 957 return -1; 958 } 959 960 if (vp->v_rdev == NODEV) { 961 return -1; 962 } 963 964 cdev = devsw(vp->v_rdev); 965 if (cdev == NULL) { 966 return -1; 967 } 968 969 if (cdev->d_bmaj == -1) { 970 return -1; 971 } 972 bdev = cdev; 973 974 ki = p->p_aioinfo; 975 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) { 976 return -1; 977 } 978 979 cnt = cb->aio_nbytes; 980 if (cnt > MAXPHYS) { 981 return -1; 982 } 983 984 dev = makebdev(bdev->d_bmaj, minor(vp->v_rdev)); 985 986 /* 987 * Physical I/O is charged directly to the process, so we don't have 988 * to fake it. 989 */ 990 aiocbe->inputcharge = 0; 991 aiocbe->outputcharge = 0; 992 993 ki->kaio_buffer_count++; 994 995 lj = aiocbe->lio; 996 if (lj) { 997 lj->lioj_buffer_count++; 998 } 999 1000 /* create and build a buffer header for a transfer */ 1001 bp = (struct buf *)getpbuf(NULL); 1002 1003 /* 1004 * get a copy of the kva from the physical buffer 1005 */ 1006 bp->b_caller1 = p; 1007 bp->b_dev = dev; 1008 error = bp->b_error = 0; 1009 1010 if (cb->aio_lio_opcode == LIO_WRITE) { 1011 rw = 0; 1012 bflags = B_WRITE; 1013 } else { 1014 rw = 1; 1015 bflags = B_READ; 1016 } 1017 1018 bp->b_bcount = cb->aio_nbytes; 1019 bp->b_bufsize = cb->aio_nbytes; 1020 bp->b_flags = B_BUSY | B_PHYS | B_CALL | bflags; 1021 bp->b_iodone = aio_physwakeup; 1022 bp->b_saveaddr = bp->b_data; 1023 bp->b_data = (void *) cb->aio_buf; 1024 bp->b_blkno = btodb(cb->aio_offset); 1025 1026 if (rw && !useracc(bp->b_data, bp->b_bufsize, B_WRITE)) { 1027 error = EFAULT; 1028 goto doerror; 1029 } 1030 if (!rw && !useracc(bp->b_data, bp->b_bufsize, B_READ)) { 1031 error = EFAULT; 1032 goto doerror; 1033 } 1034 1035 /* bring buffer into kernel space */ 1036 vmapbuf(bp); 1037 1038 s = splbio(); 1039 aiocbe->bp = bp; 1040 bp->b_spc = (void *)aiocbe; 1041 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 1042 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 1043 aiocbe->jobstate = JOBST_JOBQBUF; 1044 cb->_aiocb_private.status = cb->aio_nbytes; 1045 num_buf_aio++; 1046 fstrategy = bdev->d_strategy; 1047 bp->b_error = 0; 1048 1049 splx(s); 1050 /* perform transfer */ 1051 (*fstrategy)(bp); 1052 1053 s = splbio(); 1054 /* 1055 * If we had an error invoking the request, or an error in processing 1056 * the request before we have returned, we process it as an error 1057 * in transfer. Note that such an I/O error is not indicated immediately, 1058 * but is returned using the aio_error mechanism. In this case, aio_suspend 1059 * will return immediately. 1060 */ 1061 if (bp->b_error || (bp->b_flags & B_ERROR)) { 1062 struct aiocb *job = aiocbe->uuaiocb; 1063 1064 aiocbe->uaiocb._aiocb_private.status = 0; 1065 suword(&job->_aiocb_private.status, 0); 1066 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1067 suword(&job->_aiocb_private.error, bp->b_error); 1068 1069 ki->kaio_buffer_finished_count++; 1070 1071 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1072 aiocbe->jobstate = JOBST_JOBBFINISHED; 1073 aiocbe->jobflags |= AIOCBLIST_DONE; 1074 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1075 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1076 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1077 } 1078 } 1079 splx(s); 1080 return 0; 1081 1082 doerror: 1083 ki->kaio_buffer_count--; 1084 if (lj) { 1085 lj->lioj_buffer_count--; 1086 } 1087 aiocbe->bp = NULL; 1088 relpbuf(bp, NULL); 1089 return error; 1090 } 1091 1092 /* 1093 * This waits/tests physio completion. 1094 */ 1095 int 1096 aio_fphysio(p, iocb, flgwait) 1097 struct proc *p; 1098 struct aiocblist *iocb; 1099 int flgwait; 1100 { 1101 int s; 1102 struct buf *bp; 1103 int error; 1104 1105 bp = iocb->bp; 1106 1107 s = splbio(); 1108 if (flgwait == 0) { 1109 if ((bp->b_flags & B_DONE) == 0) { 1110 splx(s); 1111 return EINPROGRESS; 1112 } 1113 } 1114 1115 while ((bp->b_flags & B_DONE) == 0) { 1116 if (tsleep((caddr_t)bp, PRIBIO, "physstr", aiod_timeout)) { 1117 if ((bp->b_flags & B_DONE) == 0) { 1118 splx(s); 1119 return EINPROGRESS; 1120 } else { 1121 break; 1122 } 1123 } 1124 } 1125 1126 /* release mapping into kernel space */ 1127 vunmapbuf(bp); 1128 iocb->bp = 0; 1129 1130 error = 0; 1131 /* 1132 * check for an error 1133 */ 1134 if (bp->b_flags & B_ERROR) { 1135 error = bp->b_error; 1136 } 1137 1138 relpbuf(bp, NULL); 1139 return (error); 1140 } 1141 1142 /* 1143 * Queue a new AIO request. Choosing either the threaded or direct physio 1144 * VCHR technique is done in this code. 1145 */ 1146 static int 1147 _aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type) 1148 { 1149 struct filedesc *fdp; 1150 struct file *fp; 1151 unsigned int fd; 1152 1153 int error; 1154 int opcode; 1155 struct aiocblist *aiocbe; 1156 struct aioproclist *aiop; 1157 struct kaioinfo *ki; 1158 1159 if ((aiocbe = TAILQ_FIRST(&aio_freejobs)) != NULL) { 1160 TAILQ_REMOVE(&aio_freejobs, aiocbe, list); 1161 } else { 1162 aiocbe = zalloc (aiocb_zone); 1163 } 1164 1165 aiocbe->inputcharge = 0; 1166 aiocbe->outputcharge = 0; 1167 1168 suword(&job->_aiocb_private.status, -1); 1169 suword(&job->_aiocb_private.error, 0); 1170 suword(&job->_aiocb_private.kernelinfo, -1); 1171 1172 error = copyin((caddr_t)job, 1173 (caddr_t) &aiocbe->uaiocb, sizeof aiocbe->uaiocb); 1174 if (error) { 1175 suword(&job->_aiocb_private.error, error); 1176 1177 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1178 return error; 1179 } 1180 1181 /* 1182 * Save userspace address of the job info 1183 */ 1184 aiocbe->uuaiocb = job; 1185 1186 /* 1187 * Get the opcode 1188 */ 1189 if (type != LIO_NOP) { 1190 aiocbe->uaiocb.aio_lio_opcode = type; 1191 } 1192 opcode = aiocbe->uaiocb.aio_lio_opcode; 1193 1194 /* 1195 * Get the fd info for process 1196 */ 1197 fdp = p->p_fd; 1198 1199 /* 1200 * Range check file descriptor 1201 */ 1202 fd = aiocbe->uaiocb.aio_fildes; 1203 if (fd >= fdp->fd_nfiles) { 1204 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1205 if (type == 0) { 1206 suword(&job->_aiocb_private.error, EBADF); 1207 } 1208 return EBADF; 1209 } 1210 1211 fp = fdp->fd_ofiles[fd]; 1212 if ((fp == NULL) || 1213 ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 0))) { 1214 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1215 if (type == 0) { 1216 suword(&job->_aiocb_private.error, EBADF); 1217 } 1218 return EBADF; 1219 } 1220 1221 if (aiocbe->uaiocb.aio_offset == -1LL) { 1222 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1223 if (type == 0) { 1224 suword(&job->_aiocb_private.error, EINVAL); 1225 } 1226 return EINVAL; 1227 } 1228 1229 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1230 if (error) { 1231 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1232 if (type == 0) { 1233 suword(&job->_aiocb_private.error, EINVAL); 1234 } 1235 return error; 1236 } 1237 1238 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1239 if (jobrefid == LONG_MAX) 1240 jobrefid = 1; 1241 else 1242 jobrefid++; 1243 1244 if (opcode == LIO_NOP) { 1245 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1246 if (type == 0) { 1247 suword(&job->_aiocb_private.error, 0); 1248 suword(&job->_aiocb_private.status, 0); 1249 suword(&job->_aiocb_private.kernelinfo, 0); 1250 } 1251 return 0; 1252 } 1253 1254 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1255 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1256 if (type == 0) { 1257 suword(&job->_aiocb_private.status, 0); 1258 suword(&job->_aiocb_private.error, EINVAL); 1259 } 1260 return EINVAL; 1261 } 1262 1263 suword(&job->_aiocb_private.error, EINPROGRESS); 1264 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1265 aiocbe->userproc = p; 1266 aiocbe->jobflags = 0; 1267 aiocbe->lio = lj; 1268 ki = p->p_aioinfo; 1269 1270 if ((error = aio_qphysio(p, aiocbe)) == 0) { 1271 return 0; 1272 } else if (error > 0) { 1273 suword(&job->_aiocb_private.status, 0); 1274 aiocbe->uaiocb._aiocb_private.error = error; 1275 suword(&job->_aiocb_private.error, error); 1276 return error; 1277 } 1278 1279 /* 1280 * No buffer for daemon I/O 1281 */ 1282 aiocbe->bp = NULL; 1283 1284 ki->kaio_queue_count++; 1285 if (lj) { 1286 lj->lioj_queue_count++; 1287 } 1288 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1289 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1290 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1291 1292 num_queue_count++; 1293 error = 0; 1294 1295 /* 1296 * If we don't have a free AIO process, and we are below our 1297 * quota, then start one. Otherwise, depend on the subsequent 1298 * I/O completions to pick-up this job. If we don't sucessfully 1299 * create the new process (thread) due to resource issues, we 1300 * return an error for now (EAGAIN), which is likely not the 1301 * correct thing to do. 1302 */ 1303 retryproc: 1304 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1305 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1306 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1307 aiop->aioprocflags &= ~AIOP_FREE; 1308 wakeup(aiop->aioproc); 1309 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1310 ((ki->kaio_active_count + num_aio_resv_start) < 1311 ki->kaio_maxactive_count)) { 1312 num_aio_resv_start++; 1313 if ((error = aio_newproc()) == 0) { 1314 num_aio_resv_start--; 1315 p->p_retval[0] = 0; 1316 goto retryproc; 1317 } 1318 num_aio_resv_start--; 1319 } 1320 return error; 1321 } 1322 1323 /* 1324 * This routine queues an AIO request, checking for quotas. 1325 */ 1326 static int 1327 aio_aqueue(struct proc *p, struct aiocb *job, int type) 1328 { 1329 struct kaioinfo *ki; 1330 1331 if (p->p_aioinfo == NULL) { 1332 aio_init_aioinfo(p); 1333 } 1334 1335 if (num_queue_count >= max_queue_count) 1336 return EAGAIN; 1337 1338 ki = p->p_aioinfo; 1339 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1340 return EAGAIN; 1341 1342 return _aio_aqueue(p, job, NULL, type); 1343 } 1344 1345 /* 1346 * Support the aio_return system call, as a side-effect, kernel 1347 * resources are released. 1348 */ 1349 int 1350 aio_return(struct proc *p, struct aio_return_args *uap) 1351 { 1352 int s; 1353 int jobref; 1354 struct aiocblist *cb, *ncb; 1355 struct aiocb *ujob; 1356 struct kaioinfo *ki; 1357 1358 ki = p->p_aioinfo; 1359 if (ki == NULL) { 1360 return EINVAL; 1361 } 1362 1363 ujob = uap->aiocbp; 1364 1365 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1366 if (jobref == -1 || jobref == 0) 1367 return EINVAL; 1368 1369 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1370 cb; 1371 cb = TAILQ_NEXT(cb, plist)) { 1372 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1373 if (ujob == cb->uuaiocb) { 1374 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 1375 } else { 1376 p->p_retval[0] = EFAULT; 1377 } 1378 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1379 curproc->p_stats->p_ru.ru_oublock += cb->outputcharge; 1380 cb->outputcharge = 0; 1381 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1382 curproc->p_stats->p_ru.ru_inblock += cb->inputcharge; 1383 cb->inputcharge = 0; 1384 } 1385 aio_free_entry(cb); 1386 return 0; 1387 } 1388 } 1389 1390 s = splbio(); 1391 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1392 cb; 1393 cb = ncb) { 1394 ncb = TAILQ_NEXT(cb, plist); 1395 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1396 splx(s); 1397 if (ujob == cb->uuaiocb) { 1398 p->p_retval[0] = cb->uaiocb._aiocb_private.status; 1399 } else { 1400 p->p_retval[0] = EFAULT; 1401 } 1402 aio_free_entry(cb); 1403 return 0; 1404 } 1405 } 1406 splx(s); 1407 1408 return (EINVAL); 1409 } 1410 1411 /* 1412 * Allow a process to wakeup when any of the I/O requests are 1413 * completed. 1414 */ 1415 int 1416 aio_suspend(struct proc *p, struct aio_suspend_args *uap) 1417 { 1418 struct timeval atv; 1419 struct timespec ts; 1420 struct aiocb *const *cbptr, *cbp; 1421 struct kaioinfo *ki; 1422 struct aiocblist *cb; 1423 int i; 1424 int njoblist; 1425 int error, s, timo; 1426 int *ijoblist; 1427 struct aiocb **ujoblist; 1428 1429 if (uap->nent >= AIO_LISTIO_MAX) 1430 return EINVAL; 1431 1432 timo = 0; 1433 if (uap->timeout) { 1434 /* 1435 * Get timespec struct 1436 */ 1437 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) { 1438 return error; 1439 } 1440 1441 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1442 return (EINVAL); 1443 1444 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1445 if (itimerfix(&atv)) 1446 return (EINVAL); 1447 timo = tvtohz(&atv); 1448 } 1449 1450 ki = p->p_aioinfo; 1451 if (ki == NULL) 1452 return EAGAIN; 1453 1454 njoblist = 0; 1455 ijoblist = zalloc(aiol_zone); 1456 ujoblist = zalloc(aiol_zone); 1457 cbptr = uap->aiocbp; 1458 1459 for(i = 0; i < uap->nent; i++) { 1460 cbp = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1461 if (cbp == 0) 1462 continue; 1463 ujoblist[njoblist] = cbp; 1464 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1465 njoblist++; 1466 } 1467 if (njoblist == 0) { 1468 zfree(aiol_zone, ijoblist); 1469 zfree(aiol_zone, ujoblist); 1470 return 0; 1471 } 1472 1473 error = 0; 1474 while (1) { 1475 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1476 cb; cb = TAILQ_NEXT(cb, plist)) { 1477 for(i = 0; i < njoblist; i++) { 1478 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1479 ijoblist[i]) { 1480 if (ujoblist[i] != cb->uuaiocb) 1481 error = EINVAL; 1482 zfree(aiol_zone, ijoblist); 1483 zfree(aiol_zone, ujoblist); 1484 return error; 1485 } 1486 } 1487 } 1488 1489 s = splbio(); 1490 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1491 cb; cb = TAILQ_NEXT(cb, plist)) { 1492 for(i = 0; i < njoblist; i++) { 1493 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1494 ijoblist[i]) { 1495 splx(s); 1496 if (ujoblist[i] != cb->uuaiocb) 1497 error = EINVAL; 1498 zfree(aiol_zone, ijoblist); 1499 zfree(aiol_zone, ujoblist); 1500 return error; 1501 } 1502 } 1503 } 1504 1505 ki->kaio_flags |= KAIO_WAKEUP; 1506 error = tsleep(p, PRIBIO|PCATCH, "aiospn", timo); 1507 splx(s); 1508 1509 if (error == EINTR) { 1510 zfree(aiol_zone, ijoblist); 1511 zfree(aiol_zone, ujoblist); 1512 return EINTR; 1513 } else if (error == EWOULDBLOCK) { 1514 zfree(aiol_zone, ijoblist); 1515 zfree(aiol_zone, ujoblist); 1516 return EAGAIN; 1517 } 1518 } 1519 1520 /* NOTREACHED */ 1521 return EINVAL; 1522 } 1523 1524 /* 1525 * aio_cancel at the kernel level is a NOOP right now. It 1526 * might be possible to support it partially in user mode, or 1527 * in kernel mode later on. 1528 */ 1529 int 1530 aio_cancel(struct proc *p, struct aio_cancel_args *uap) 1531 { 1532 return ENOSYS; 1533 } 1534 1535 /* 1536 * aio_error is implemented in the kernel level for compatibility 1537 * purposes only. For a user mode async implementation, it would be 1538 * best to do it in a userland subroutine. 1539 */ 1540 int 1541 aio_error(struct proc *p, struct aio_error_args *uap) 1542 { 1543 int s; 1544 struct aiocblist *cb; 1545 struct kaioinfo *ki; 1546 int jobref; 1547 1548 ki = p->p_aioinfo; 1549 if (ki == NULL) 1550 return EINVAL; 1551 1552 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1553 if ((jobref == -1) || (jobref == 0)) 1554 return EINVAL; 1555 1556 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1557 cb; 1558 cb = TAILQ_NEXT(cb, plist)) { 1559 1560 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1561 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1562 return 0; 1563 } 1564 } 1565 1566 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); 1567 cb; 1568 cb = TAILQ_NEXT(cb, plist)) { 1569 1570 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1571 p->p_retval[0] = EINPROGRESS; 1572 return 0; 1573 } 1574 } 1575 1576 s = splbio(); 1577 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1578 cb; 1579 cb = TAILQ_NEXT(cb, plist)) { 1580 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1581 p->p_retval[0] = cb->uaiocb._aiocb_private.error; 1582 splx(s); 1583 return 0; 1584 } 1585 } 1586 1587 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); 1588 cb; 1589 cb = TAILQ_NEXT(cb, plist)) { 1590 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == jobref) { 1591 p->p_retval[0] = EINPROGRESS; 1592 splx(s); 1593 return 0; 1594 } 1595 } 1596 splx(s); 1597 1598 1599 /* 1600 * Hack for lio 1601 */ 1602 /* 1603 status = fuword(&uap->aiocbp->_aiocb_private.status); 1604 if (status == -1) { 1605 return fuword(&uap->aiocbp->_aiocb_private.error); 1606 } 1607 */ 1608 return EINVAL; 1609 } 1610 1611 int 1612 aio_read(struct proc *p, struct aio_read_args *uap) 1613 { 1614 struct filedesc *fdp; 1615 struct file *fp; 1616 struct uio auio; 1617 struct iovec aiov; 1618 unsigned int fd; 1619 int cnt; 1620 struct aiocb iocb; 1621 int error, pmodes; 1622 1623 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1624 if ((pmodes & AIO_PMODE_SYNC) == 0) { 1625 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_READ); 1626 } 1627 1628 /* 1629 * Get control block 1630 */ 1631 if ((error = copyin((caddr_t) uap->aiocbp, (caddr_t) &iocb, sizeof iocb)) != 0) 1632 return error; 1633 1634 /* 1635 * Get the fd info for process 1636 */ 1637 fdp = p->p_fd; 1638 1639 /* 1640 * Range check file descriptor 1641 */ 1642 fd = iocb.aio_fildes; 1643 if (fd >= fdp->fd_nfiles) 1644 return EBADF; 1645 fp = fdp->fd_ofiles[fd]; 1646 if ((fp == NULL) || ((fp->f_flag & FREAD) == 0)) 1647 return EBADF; 1648 if (iocb.aio_offset == -1LL) 1649 return EINVAL; 1650 1651 auio.uio_resid = iocb.aio_nbytes; 1652 if (auio.uio_resid < 0) 1653 return (EINVAL); 1654 1655 /* 1656 * Process sync simply -- queue async request. 1657 */ 1658 if ((iocb._aiocb_private.privatemodes & AIO_PMODE_SYNC) == 0) { 1659 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_READ); 1660 } 1661 1662 aiov.iov_base = (void *) iocb.aio_buf; 1663 aiov.iov_len = iocb.aio_nbytes; 1664 1665 auio.uio_iov = &aiov; 1666 auio.uio_iovcnt = 1; 1667 auio.uio_offset = iocb.aio_offset; 1668 auio.uio_rw = UIO_READ; 1669 auio.uio_segflg = UIO_USERSPACE; 1670 auio.uio_procp = p; 1671 1672 cnt = iocb.aio_nbytes; 1673 error = (*fp->f_ops->fo_read)(fp, &auio, fp->f_cred, FOF_OFFSET); 1674 if (error && 1675 (auio.uio_resid != cnt) && 1676 (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) 1677 error = 0; 1678 cnt -= auio.uio_resid; 1679 p->p_retval[0] = cnt; 1680 return error; 1681 } 1682 1683 int 1684 aio_write(struct proc *p, struct aio_write_args *uap) 1685 { 1686 struct filedesc *fdp; 1687 struct file *fp; 1688 struct uio auio; 1689 struct iovec aiov; 1690 unsigned int fd; 1691 int cnt; 1692 struct aiocb iocb; 1693 int error; 1694 int pmodes; 1695 1696 /* 1697 * Process sync simply -- queue async request. 1698 */ 1699 pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes); 1700 if ((pmodes & AIO_PMODE_SYNC) == 0) { 1701 return aio_aqueue(p, (struct aiocb *) uap->aiocbp, LIO_WRITE); 1702 } 1703 1704 if ((error = copyin((caddr_t) uap->aiocbp, (caddr_t) &iocb, sizeof iocb)) != 0) 1705 return error; 1706 1707 /* 1708 * Get the fd info for process 1709 */ 1710 fdp = p->p_fd; 1711 1712 /* 1713 * Range check file descriptor 1714 */ 1715 fd = iocb.aio_fildes; 1716 if (fd >= fdp->fd_nfiles) 1717 return EBADF; 1718 fp = fdp->fd_ofiles[fd]; 1719 if ((fp == NULL) || ((fp->f_flag & FWRITE) == 0)) 1720 return EBADF; 1721 if (iocb.aio_offset == -1LL) 1722 return EINVAL; 1723 1724 aiov.iov_base = (void *) iocb.aio_buf; 1725 aiov.iov_len = iocb.aio_nbytes; 1726 auio.uio_iov = &aiov; 1727 auio.uio_iovcnt = 1; 1728 auio.uio_offset = iocb.aio_offset; 1729 1730 auio.uio_resid = iocb.aio_nbytes; 1731 if (auio.uio_resid < 0) 1732 return (EINVAL); 1733 1734 auio.uio_rw = UIO_WRITE; 1735 auio.uio_segflg = UIO_USERSPACE; 1736 auio.uio_procp = p; 1737 1738 cnt = iocb.aio_nbytes; 1739 error = (*fp->f_ops->fo_write)(fp, &auio, fp->f_cred, FOF_OFFSET); 1740 if (error) { 1741 if (auio.uio_resid != cnt) { 1742 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 1743 error = 0; 1744 if (error == EPIPE) 1745 psignal(p, SIGPIPE); 1746 } 1747 } 1748 cnt -= auio.uio_resid; 1749 p->p_retval[0] = cnt; 1750 return error; 1751 } 1752 1753 int 1754 lio_listio(struct proc *p, struct lio_listio_args *uap) 1755 { 1756 int nent, nentqueued; 1757 struct aiocb *iocb, * const *cbptr; 1758 struct aiocblist *cb; 1759 struct kaioinfo *ki; 1760 struct aio_liojob *lj; 1761 int error, runningcode; 1762 int nerror; 1763 int i; 1764 int s; 1765 1766 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) { 1767 return EINVAL; 1768 } 1769 1770 nent = uap->nent; 1771 if (nent > AIO_LISTIO_MAX) { 1772 return EINVAL; 1773 } 1774 1775 if (p->p_aioinfo == NULL) { 1776 aio_init_aioinfo(p); 1777 } 1778 1779 if ((nent + num_queue_count) > max_queue_count) { 1780 return EAGAIN; 1781 } 1782 1783 ki = p->p_aioinfo; 1784 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) { 1785 return EAGAIN; 1786 } 1787 1788 lj = zalloc(aiolio_zone); 1789 if (!lj) { 1790 return EAGAIN; 1791 } 1792 1793 lj->lioj_flags = 0; 1794 lj->lioj_buffer_count = 0; 1795 lj->lioj_buffer_finished_count = 0; 1796 lj->lioj_queue_count = 0; 1797 lj->lioj_queue_finished_count = 0; 1798 lj->lioj_ki = ki; 1799 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 1800 1801 /* 1802 * Setup signal 1803 */ 1804 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 1805 error = copyin(uap->sig, &lj->lioj_signal, sizeof lj->lioj_signal); 1806 if (error) 1807 return error; 1808 lj->lioj_flags |= LIOJ_SIGNAL; 1809 lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED; 1810 } else { 1811 lj->lioj_flags &= ~LIOJ_SIGNAL; 1812 } 1813 1814 /* 1815 * get pointers to the list of I/O requests 1816 */ 1817 1818 nerror = 0; 1819 nentqueued = 0; 1820 cbptr = uap->acb_list; 1821 for(i = 0; i < uap->nent; i++) { 1822 iocb = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1823 if (((intptr_t) iocb != -1) && ((intptr_t) iocb != NULL)) { 1824 error = _aio_aqueue(p, iocb, lj, 0); 1825 if (error == 0) { 1826 nentqueued++; 1827 } else { 1828 nerror++; 1829 } 1830 } 1831 } 1832 1833 /* 1834 * If we haven't queued any, then just return error 1835 */ 1836 if (nentqueued == 0) { 1837 return 0; 1838 } 1839 1840 /* 1841 * Calculate the appropriate error return 1842 */ 1843 runningcode = 0; 1844 if (nerror) 1845 runningcode = EIO; 1846 1847 if (uap->mode == LIO_WAIT) { 1848 while (1) { 1849 int found; 1850 found = 0; 1851 for(i = 0; i < uap->nent; i++) { 1852 int jobref, command; 1853 1854 /* 1855 * Fetch address of the control buf pointer in user space 1856 */ 1857 iocb = (struct aiocb *) (intptr_t) fuword((caddr_t) &cbptr[i]); 1858 if (((intptr_t) iocb == -1) || ((intptr_t) iocb == 0)) 1859 continue; 1860 1861 /* 1862 * Fetch the associated command from user space 1863 */ 1864 command = fuword(&iocb->aio_lio_opcode); 1865 if (command == LIO_NOP) { 1866 found++; 1867 continue; 1868 } 1869 1870 jobref = fuword(&iocb->_aiocb_private.kernelinfo); 1871 1872 for (cb = TAILQ_FIRST(&ki->kaio_jobdone); 1873 cb; 1874 cb = TAILQ_NEXT(cb, plist)) { 1875 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1876 jobref) { 1877 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1878 curproc->p_stats->p_ru.ru_oublock += 1879 cb->outputcharge; 1880 cb->outputcharge = 0; 1881 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1882 curproc->p_stats->p_ru.ru_inblock += 1883 cb->inputcharge; 1884 cb->inputcharge = 0; 1885 } 1886 found++; 1887 break; 1888 } 1889 } 1890 1891 s = splbio(); 1892 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); 1893 cb; 1894 cb = TAILQ_NEXT(cb, plist)) { 1895 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1896 jobref) { 1897 found++; 1898 break; 1899 } 1900 } 1901 splx(s); 1902 1903 } 1904 1905 /* 1906 * If all I/Os have been disposed of, then we can return 1907 */ 1908 if (found == nentqueued) { 1909 return runningcode; 1910 } 1911 1912 ki->kaio_flags |= KAIO_WAKEUP; 1913 error = tsleep(p, PRIBIO|PCATCH, "aiospn", 0); 1914 1915 if (error == EINTR) { 1916 return EINTR; 1917 } else if (error == EWOULDBLOCK) { 1918 return EAGAIN; 1919 } 1920 1921 } 1922 } 1923 1924 return runningcode; 1925 } 1926 1927 /* 1928 * This is a wierd hack so that we can post a signal. It is safe 1929 * to do so from a timeout routine, but *not* from an interrupt routine. 1930 */ 1931 static void 1932 process_signal(void *ljarg) 1933 { 1934 struct aio_liojob *lj = ljarg; 1935 if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) { 1936 if (lj->lioj_queue_count == lj->lioj_queue_finished_count) { 1937 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 1938 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 1939 } 1940 } 1941 } 1942 1943 /* 1944 * Interrupt handler for physio, performs the necessary process wakeups, 1945 * and signals. 1946 */ 1947 static void 1948 aio_physwakeup(bp) 1949 struct buf *bp; 1950 { 1951 struct aiocblist *aiocbe; 1952 struct proc *p; 1953 struct kaioinfo *ki; 1954 struct aio_liojob *lj; 1955 int s; 1956 s = splbio(); 1957 1958 wakeup((caddr_t) bp); 1959 bp->b_flags &= ~B_CALL; 1960 bp->b_flags |= B_DONE; 1961 1962 aiocbe = (struct aiocblist *)bp->b_spc; 1963 if (aiocbe) { 1964 p = bp->b_caller1; 1965 1966 aiocbe->jobstate = JOBST_JOBBFINISHED; 1967 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 1968 aiocbe->uaiocb._aiocb_private.error = 0; 1969 aiocbe->jobflags |= AIOCBLIST_DONE; 1970 1971 if (bp->b_flags & B_ERROR) { 1972 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1973 } 1974 1975 lj = aiocbe->lio; 1976 if (lj) { 1977 lj->lioj_buffer_finished_count++; 1978 /* 1979 * wakeup/signal if all of the interrupt jobs are done 1980 */ 1981 if (lj->lioj_buffer_finished_count == lj->lioj_buffer_count) { 1982 /* 1983 * post a signal if it is called for 1984 */ 1985 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 1986 LIOJ_SIGNAL) { 1987 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 1988 timeout(process_signal, lj, 0); 1989 } 1990 } 1991 } 1992 1993 ki = p->p_aioinfo; 1994 if (ki) { 1995 ki->kaio_buffer_finished_count++; 1996 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1997 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1998 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1999 /* 2000 * and do the wakeup 2001 */ 2002 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 2003 ki->kaio_flags &= ~KAIO_WAKEUP; 2004 wakeup(p); 2005 } 2006 } 2007 } 2008 splx(s); 2009 } 2010