Lines Matching full:job

269 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
300 static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */
306 static int aio_free_entry(struct kaiocb *job);
307 static void aio_process_rw(struct kaiocb *job);
308 static void aio_process_sync(struct kaiocb *job);
309 static void aio_process_mlock(struct kaiocb *job);
314 static int aio_queue_file(struct file *fp, struct kaiocb *job);
319 static int aio_qbio(struct proc *p, struct kaiocb *job);
321 static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
322 static bool aio_clear_cancel_function_locked(struct kaiocb *job);
478 * Free a job entry. Wait for completion if it is currently active, but don't
483 aio_free_entry(struct kaiocb *job) in aio_free_entry() argument
489 p = job->userproc; in aio_free_entry()
495 MPASS(job->jobflags & KAIOCB_FINISHED); in aio_free_entry()
502 TAILQ_REMOVE(&ki->kaio_done, job, plist); in aio_free_entry()
503 TAILQ_REMOVE(&ki->kaio_all, job, allist); in aio_free_entry()
505 lj = job->lio; in aio_free_entry()
521 /* job is going away, we need to destroy any knotes */ in aio_free_entry()
522 knlist_delete(&job->klist, curthread, 1); in aio_free_entry()
524 sigqueue_take(&job->ksi); in aio_free_entry()
533 * need a thread pointer from the process owning the job that is in aio_free_entry()
538 * a kaiocb from the current process' job list either via a in aio_free_entry()
549 if (job->fd_file) in aio_free_entry()
550 fdrop(job->fd_file, curthread); in aio_free_entry()
551 crfree(job->cred); in aio_free_entry()
552 if (job->uiop != &job->uio) in aio_free_entry()
553 freeuio(job->uiop); in aio_free_entry()
554 uma_zfree(aiocb_zone, job); in aio_free_entry()
568 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job) in aio_cancel_job() argument
574 if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED)) in aio_cancel_job()
576 MPASS((job->jobflags & KAIOCB_CANCELLING) == 0); in aio_cancel_job()
577 job->jobflags |= KAIOCB_CANCELLED; in aio_cancel_job()
579 func = job->cancel_fn; in aio_cancel_job()
582 * If there is no cancel routine, just leave the job marked as in aio_cancel_job()
583 * cancelled. The job should be in active use by a caller who in aio_cancel_job()
592 * completions of this job. This prevents the job from being in aio_cancel_job()
597 job->jobflags |= KAIOCB_CANCELLING; in aio_cancel_job()
599 func(job); in aio_cancel_job()
601 job->jobflags &= ~KAIOCB_CANCELLING; in aio_cancel_job()
602 if (job->jobflags & KAIOCB_FINISHED) { in aio_cancel_job()
603 cancelled = job->uaiocb._aiocb_private.error == ECANCELED; in aio_cancel_job()
604 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); in aio_cancel_job()
605 aio_bio_done_notify(p, job); in aio_cancel_job()
626 struct kaiocb *job, *jobn; in aio_proc_rundown() local
643 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { in aio_proc_rundown()
644 aio_cancel_job(p, ki, job); in aio_proc_rundown()
655 while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL) in aio_proc_rundown()
656 aio_free_entry(job); in aio_proc_rundown()
667 panic("LIO job not cleaned up: C:%d, FC:%d\n", in aio_proc_rundown()
680 * Select a job to run (called by an AIO daemon).
685 struct kaiocb *job; in aio_selectjob() local
691 TAILQ_FOREACH(job, &aio_jobs, list) { in aio_selectjob()
692 userp = job->userproc; in aio_selectjob()
696 TAILQ_REMOVE(&aio_jobs, job, list); in aio_selectjob()
697 if (!aio_clear_cancel_function(job)) in aio_selectjob()
705 return (job); in aio_selectjob()
746 aio_process_rw(struct kaiocb *job) in aio_process_rw() argument
758 KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ || in aio_process_rw()
759 job->uaiocb.aio_lio_opcode == LIO_READV || in aio_process_rw()
760 job->uaiocb.aio_lio_opcode == LIO_WRITE || in aio_process_rw()
761 job->uaiocb.aio_lio_opcode == LIO_WRITEV, in aio_process_rw()
762 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); in aio_process_rw()
764 aio_switch_vmspace(job); in aio_process_rw()
767 td->td_ucred = job->cred; in aio_process_rw()
768 job->uiop->uio_td = td; in aio_process_rw()
769 fp = job->fd_file; in aio_process_rw()
771 opcode = job->uaiocb.aio_lio_opcode; in aio_process_rw()
772 cnt = job->uiop->uio_resid; in aio_process_rw()
784 if (job->uiop->uio_resid == 0) in aio_process_rw()
787 error = fo_read(fp, job->uiop, fp->f_cred, in aio_process_rw()
788 (job->ioflags & KAIOCB_IO_FOFFSET) != 0 ? 0 : in aio_process_rw()
793 error = fo_write(fp, job->uiop, fp->f_cred, (job->ioflags & in aio_process_rw()
801 job->msgrcv = msgrcv_end - msgrcv_st; in aio_process_rw()
802 job->msgsnd = msgsnd_end - msgsnd_st; in aio_process_rw()
803 job->inblock = inblock_end - inblock_st; in aio_process_rw()
804 job->outblock = oublock_end - oublock_st; in aio_process_rw()
806 if (error != 0 && job->uiop->uio_resid != cnt) { in aio_process_rw()
810 PROC_LOCK(job->userproc); in aio_process_rw()
811 kern_psignal(job->userproc, SIGPIPE); in aio_process_rw()
812 PROC_UNLOCK(job->userproc); in aio_process_rw()
816 cnt -= job->uiop->uio_resid; in aio_process_rw()
819 aio_complete(job, -1, error); in aio_process_rw()
821 aio_complete(job, cnt, 0); in aio_process_rw()
825 aio_process_sync(struct kaiocb *job) in aio_process_sync() argument
829 struct file *fp = job->fd_file; in aio_process_sync()
832 KASSERT(job->uaiocb.aio_lio_opcode & LIO_SYNC, in aio_process_sync()
833 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); in aio_process_sync()
835 td->td_ucred = job->cred; in aio_process_sync()
838 job->uaiocb.aio_lio_opcode); in aio_process_sync()
842 aio_complete(job, -1, error); in aio_process_sync()
844 aio_complete(job, 0, 0); in aio_process_sync()
848 aio_process_mlock(struct kaiocb *job) in aio_process_mlock() argument
850 struct aiocb *cb = &job->uaiocb; in aio_process_mlock()
853 KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK, in aio_process_mlock()
854 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); in aio_process_mlock()
856 aio_switch_vmspace(job); in aio_process_mlock()
857 error = kern_mlock(job->userproc, job->cred, in aio_process_mlock()
859 aio_complete(job, error != 0 ? -1 : 0, error); in aio_process_mlock()
863 aio_bio_done_notify(struct proc *userp, struct kaiocb *job) in aio_bio_done_notify() argument
873 lj = job->lio; in aio_bio_done_notify()
880 TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist); in aio_bio_done_notify()
881 MPASS(job->jobflags & KAIOCB_FINISHED); in aio_bio_done_notify()
886 if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || in aio_bio_done_notify()
887 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) in aio_bio_done_notify()
888 aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi, true); in aio_bio_done_notify()
890 KNOTE_LOCKED(&job->klist, 1); in aio_bio_done_notify()
908 if (job->jobflags & KAIOCB_CHECKSYNC) { in aio_bio_done_notify()
911 if (job->fd_file != sjob->fd_file || in aio_bio_done_notify()
912 job->seqno >= sjob->seqno) in aio_bio_done_notify()
936 struct kaiocb *job; in aio_schedule_fsync() local
941 job = TAILQ_FIRST(&ki->kaio_syncready); in aio_schedule_fsync()
942 TAILQ_REMOVE(&ki->kaio_syncready, job, list); in aio_schedule_fsync()
944 aio_schedule(job, aio_process_sync); in aio_schedule_fsync()
951 aio_cancel_cleared(struct kaiocb *job) in aio_cancel_cleared() argument
960 return ((job->jobflags & KAIOCB_CLEARED) != 0); in aio_cancel_cleared()
964 aio_clear_cancel_function_locked(struct kaiocb *job) in aio_clear_cancel_function_locked() argument
967 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); in aio_clear_cancel_function_locked()
968 MPASS(job->cancel_fn != NULL); in aio_clear_cancel_function_locked()
969 if (job->jobflags & KAIOCB_CANCELLING) { in aio_clear_cancel_function_locked()
970 job->jobflags |= KAIOCB_CLEARED; in aio_clear_cancel_function_locked()
973 job->cancel_fn = NULL; in aio_clear_cancel_function_locked()
978 aio_clear_cancel_function(struct kaiocb *job) in aio_clear_cancel_function() argument
983 ki = job->userproc->p_aioinfo; in aio_clear_cancel_function()
985 ret = aio_clear_cancel_function_locked(job); in aio_clear_cancel_function()
991 aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func) in aio_set_cancel_function_locked() argument
994 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); in aio_set_cancel_function_locked()
995 if (job->jobflags & KAIOCB_CANCELLED) in aio_set_cancel_function_locked()
997 job->cancel_fn = func; in aio_set_cancel_function_locked()
1002 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func) in aio_set_cancel_function() argument
1007 ki = job->userproc->p_aioinfo; in aio_set_cancel_function()
1009 ret = aio_set_cancel_function_locked(job, func); in aio_set_cancel_function()
1015 aio_complete(struct kaiocb *job, long status, int error) in aio_complete() argument
1020 job->uaiocb._aiocb_private.error = error; in aio_complete()
1021 job->uaiocb._aiocb_private.status = status; in aio_complete()
1023 userp = job->userproc; in aio_complete()
1027 KASSERT(!(job->jobflags & KAIOCB_FINISHED), in aio_complete()
1029 job->jobflags |= KAIOCB_FINISHED; in aio_complete()
1030 if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) { in aio_complete()
1031 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); in aio_complete()
1032 aio_bio_done_notify(userp, job); in aio_complete()
1038 aio_cancel(struct kaiocb *job) in aio_cancel() argument
1041 aio_complete(job, -1, ECANCELED); in aio_cancel()
1045 aio_switch_vmspace(struct kaiocb *job) in aio_switch_vmspace() argument
1048 vmspace_switch_aio(job->userproc->p_vmspace); in aio_switch_vmspace()
1058 struct kaiocb *job; in aio_daemon() local
1103 while ((job = aio_selectjob(aiop)) != NULL) { in aio_daemon()
1106 ki = job->userproc->p_aioinfo; in aio_daemon()
1107 job->handle_fn(job); in aio_daemon()
1110 /* Decrement the active job count. */ in aio_daemon()
1123 * no job can be selected. in aio_daemon()
1198 aio_qbio(struct proc *p, struct kaiocb *job) in aio_qbio() argument
1213 cb = &job->uaiocb; in aio_qbio()
1214 fp = job->fd_file; in aio_qbio()
1230 iovcnt = job->uiop->uio_iovcnt; in aio_qbio()
1234 if (job->uiop->uio_iov[i].iov_len % vp->v_bufobj.bo_bsize != 0) in aio_qbio()
1236 if (job->uiop->uio_iov[i].iov_len > maxphys) { in aio_qbio()
1252 if (job->uiop->uio_resid > dev->si_iosize_max) { in aio_qbio()
1258 job->error = 0; in aio_qbio()
1273 refcount_init(&job->nbio, iovcnt); in aio_qbio()
1281 buf = job->uiop->uio_iov[i].iov_base; in aio_qbio()
1282 nbytes = job->uiop->uio_iov[i].iov_len; in aio_qbio()
1304 bp->bio_caller1 = job; in aio_qbio()
1487 struct kaiocb *job; in aio_aqueue() local
1509 job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); in aio_aqueue()
1510 knlist_init_mtx(&job->klist, AIO_MTX(ki)); in aio_aqueue()
1512 error = ops->aio_copyin(ujob, job, type); in aio_aqueue()
1516 if (job->uaiocb.aio_nbytes > IOSIZE_MAX) { in aio_aqueue()
1521 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && in aio_aqueue()
1522 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && in aio_aqueue()
1523 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && in aio_aqueue()
1524 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { in aio_aqueue()
1529 if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || in aio_aqueue()
1530 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && in aio_aqueue()
1531 !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) { in aio_aqueue()
1538 switch (job->uaiocb.aio_lio_opcode & ~LIO_FOFFSET) { in aio_aqueue()
1544 opcode = job->uaiocb.aio_lio_opcode & ~LIO_FOFFSET; in aio_aqueue()
1545 if ((job->uaiocb.aio_lio_opcode & LIO_FOFFSET) != 0) in aio_aqueue()
1546 job->ioflags |= KAIOCB_IO_FOFFSET; in aio_aqueue()
1553 opcode = job->uaiocb.aio_lio_opcode = type; in aio_aqueue()
1555 ksiginfo_init(&job->ksi); in aio_aqueue()
1557 /* Save userspace address of the job info. */ in aio_aqueue()
1558 job->ujob = ujob; in aio_aqueue()
1568 fd = job->uaiocb.aio_fildes; in aio_aqueue()
1600 job->uaiocb.aio_offset < 0 && in aio_aqueue()
1611 job->fd_file = fp; in aio_aqueue()
1614 job->seqno = jobseqno++; in aio_aqueue()
1618 MPASS(job->uiop == &job->uio || job->uiop == NULL); in aio_aqueue()
1619 uma_zfree(aiocb_zone, job); in aio_aqueue()
1623 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) in aio_aqueue()
1625 evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags; in aio_aqueue()
1630 kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue; in aio_aqueue()
1632 kev.ident = (uintptr_t)job->ujob; in aio_aqueue()
1635 kev.data = (intptr_t)job; in aio_aqueue()
1636 kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr; in aio_aqueue()
1644 job->uaiocb._aiocb_private.error = EINPROGRESS; in aio_aqueue()
1645 job->userproc = p; in aio_aqueue()
1646 job->cred = crhold(td->td_ucred); in aio_aqueue()
1647 job->jobflags = KAIOCB_QUEUEING; in aio_aqueue()
1648 job->lio = lj; in aio_aqueue()
1652 MPASS(job->uiop != &job->uio && job->uiop != NULL); in aio_aqueue()
1655 job->iov[0].iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf; in aio_aqueue()
1656 job->iov[0].iov_len = job->uaiocb.aio_nbytes; in aio_aqueue()
1657 job->uio.uio_iov = job->iov; in aio_aqueue()
1658 job->uio.uio_iovcnt = 1; in aio_aqueue()
1659 job->uio.uio_resid = job->uaiocb.aio_nbytes; in aio_aqueue()
1660 job->uio.uio_segflg = UIO_USERSPACE; in aio_aqueue()
1661 job->uiop = &job->uio; in aio_aqueue()
1665 job->uiop->uio_rw = UIO_READ; in aio_aqueue()
1668 job->uiop->uio_rw = UIO_WRITE; in aio_aqueue()
1671 job->uiop->uio_offset = job->uaiocb.aio_offset; in aio_aqueue()
1672 job->uiop->uio_td = td; in aio_aqueue()
1675 aio_schedule(job, aio_process_mlock); in aio_aqueue()
1678 error = aio_queue_file(fp, job); in aio_aqueue()
1680 error = fo_aio_queue(fp, job); in aio_aqueue()
1685 job->jobflags &= ~KAIOCB_QUEUEING; in aio_aqueue()
1686 TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); in aio_aqueue()
1691 if (job->jobflags & KAIOCB_FINISHED) { in aio_aqueue()
1697 aio_bio_done_notify(p, job); in aio_aqueue()
1699 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); in aio_aqueue()
1704 crfree(job->cred); in aio_aqueue()
1708 knlist_delete(&job->klist, curthread, 0); in aio_aqueue()
1710 if (job->uiop != &job->uio) in aio_aqueue()
1711 freeuio(job->uiop); in aio_aqueue()
1712 uma_zfree(aiocb_zone, job); in aio_aqueue()
1719 aio_cancel_daemon_job(struct kaiocb *job) in aio_cancel_daemon_job() argument
1723 if (!aio_cancel_cleared(job)) in aio_cancel_daemon_job()
1724 TAILQ_REMOVE(&aio_jobs, job, list); in aio_cancel_daemon_job()
1726 aio_cancel(job); in aio_cancel_daemon_job()
1730 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func) in aio_schedule() argument
1734 if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) { in aio_schedule()
1736 aio_cancel(job); in aio_schedule()
1739 job->handle_fn = func; in aio_schedule()
1740 TAILQ_INSERT_TAIL(&aio_jobs, job, list); in aio_schedule()
1741 aio_kick_nowait(job->userproc); in aio_schedule()
1746 aio_cancel_sync(struct kaiocb *job) in aio_cancel_sync() argument
1750 ki = job->userproc->p_aioinfo; in aio_cancel_sync()
1752 if (!aio_cancel_cleared(job)) in aio_cancel_sync()
1753 TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); in aio_cancel_sync()
1755 aio_cancel(job); in aio_cancel_sync()
1759 aio_queue_file(struct file *fp, struct kaiocb *job) in aio_queue_file() argument
1768 ki = job->userproc->p_aioinfo; in aio_queue_file()
1769 error = aio_qbio(job->userproc, job); in aio_queue_file()
1787 if (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) { in aio_queue_file()
1788 aio_schedule(job, aio_process_rw); in aio_queue_file()
1790 } else if (job->uaiocb.aio_lio_opcode & LIO_SYNC) { in aio_queue_file()
1793 if (job2->fd_file == job->fd_file && in aio_queue_file()
1795 job2->seqno < job->seqno) { in aio_queue_file()
1797 job->pending++; in aio_queue_file()
1800 if (job->pending != 0) { in aio_queue_file()
1801 if (!aio_set_cancel_function_locked(job, in aio_queue_file()
1804 aio_cancel(job); in aio_queue_file()
1807 TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list); in aio_queue_file()
1812 aio_schedule(job, aio_process_sync); in aio_queue_file()
1887 struct kaiocb *job; in kern_aio_return() local
1895 TAILQ_FOREACH(job, &ki->kaio_done, plist) { in kern_aio_return()
1896 if (job->ujob == ujob) in kern_aio_return()
1899 if (job != NULL) { in kern_aio_return()
1900 MPASS(job->jobflags & KAIOCB_FINISHED); in kern_aio_return()
1901 status = job->uaiocb._aiocb_private.status; in kern_aio_return()
1902 error = job->uaiocb._aiocb_private.error; in kern_aio_return()
1904 td->td_ru.ru_oublock += job->outblock; in kern_aio_return()
1905 td->td_ru.ru_inblock += job->inblock; in kern_aio_return()
1906 td->td_ru.ru_msgsnd += job->msgsnd; in kern_aio_return()
1907 td->td_ru.ru_msgrcv += job->msgrcv; in kern_aio_return()
1908 aio_free_entry(job); in kern_aio_return()
1936 struct kaiocb *firstjob, *job; in kern_aio_suspend() local
1961 TAILQ_FOREACH(job, &ki->kaio_all, allist) { in kern_aio_suspend()
1963 if (job->ujob == ujoblist[i]) { in kern_aio_suspend()
1965 firstjob = job; in kern_aio_suspend()
1966 if (job->jobflags & KAIOCB_FINISHED) in kern_aio_suspend()
2022 struct kaiocb *job, *jobn; in sys_aio_cancel() local
2048 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { in sys_aio_cancel()
2049 if ((uap->fd == job->uaiocb.aio_fildes) && in sys_aio_cancel()
2051 (uap->aiocbp == job->ujob))) { in sys_aio_cancel()
2052 if (aio_cancel_job(p, ki, job)) { in sys_aio_cancel()
2097 struct kaiocb *job; in kern_aio_error() local
2108 TAILQ_FOREACH(job, &ki->kaio_all, allist) { in kern_aio_error()
2109 if (job->ujob == ujob) { in kern_aio_error()
2110 if (job->jobflags & KAIOCB_FINISHED) in kern_aio_error()
2112 job->uaiocb._aiocb_private.error; in kern_aio_error()
2204 struct aiocb *job; in kern_lio_listio() local
2284 job = acb_list[i]; in kern_lio_listio()
2285 if (job != NULL) { in kern_lio_listio()
2286 error = aio_aqueue(td, job, lj, LIO_NOP, ops); in kern_lio_listio()
2416 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; in aio_biocleanup() local
2427 ki = job->userproc->p_aioinfo; in aio_biocleanup()
2443 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; in aio_biowakeup() local
2451 opcode = job->uaiocb.aio_lio_opcode; in aio_biowakeup()
2456 atomic_add_acq_long(&job->nbytes, nbytes); in aio_biowakeup()
2460 * If multiple bios experienced an error, the job will reflect the in aio_biowakeup()
2464 atomic_store_int(&job->error, bio_error); in aio_biowakeup()
2466 atomic_add_int(&job->outblock, nblks); in aio_biowakeup()
2468 atomic_add_int(&job->inblock, nblks); in aio_biowakeup()
2470 if (refcount_release(&job->nbio)) { in aio_biowakeup()
2471 bio_error = atomic_load_int(&job->error); in aio_biowakeup()
2473 aio_complete(job, -1, bio_error); in aio_biowakeup()
2475 aio_complete(job, atomic_load_long(&job->nbytes), 0); in aio_biowakeup()
2487 struct kaiocb *job; in kern_aio_waitcomplete() local
2513 job = NULL; in kern_aio_waitcomplete()
2515 while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) { in kern_aio_waitcomplete()
2529 if (job != NULL) { in kern_aio_waitcomplete()
2530 MPASS(job->jobflags & KAIOCB_FINISHED); in kern_aio_waitcomplete()
2531 ujob = job->ujob; in kern_aio_waitcomplete()
2532 status = job->uaiocb._aiocb_private.status; in kern_aio_waitcomplete()
2533 error = job->uaiocb._aiocb_private.error; in kern_aio_waitcomplete()
2535 td->td_ru.ru_oublock += job->outblock; in kern_aio_waitcomplete()
2536 td->td_ru.ru_inblock += job->inblock; in kern_aio_waitcomplete()
2537 td->td_ru.ru_msgsnd += job->msgsnd; in kern_aio_waitcomplete()
2538 td->td_ru.ru_msgrcv += job->msgrcv; in kern_aio_waitcomplete()
2539 aio_free_entry(job); in kern_aio_waitcomplete()
2599 struct kaiocb *job; in filt_aioattach() local
2601 job = (struct kaiocb *)(uintptr_t)kn->kn_sdata; in filt_aioattach()
2604 * The job pointer must be validated before using it, so in filt_aioattach()
2610 kn->kn_ptr.p_aio = job; in filt_aioattach()
2613 knlist_add(&job->klist, kn, 0); in filt_aioattach()
2636 struct kaiocb *job = kn->kn_ptr.p_aio; in filt_aio() local
2638 kn->kn_data = job->uaiocb._aiocb_private.error; in filt_aio()
2639 if (!(job->jobflags & KAIOCB_FINISHED)) in filt_aio()