19454b2d8SWarner Losh /*- 2ee877a35SJohn Dyson * Copyright (c) 1997 John S. Dyson. All rights reserved. 3ee877a35SJohn Dyson * 4ee877a35SJohn Dyson * Redistribution and use in source and binary forms, with or without 5ee877a35SJohn Dyson * modification, are permitted provided that the following conditions 6ee877a35SJohn Dyson * are met: 7ee877a35SJohn Dyson * 1. Redistributions of source code must retain the above copyright 8ee877a35SJohn Dyson * notice, this list of conditions and the following disclaimer. 9ee877a35SJohn Dyson * 2. John S. Dyson's name may not be used to endorse or promote products 10ee877a35SJohn Dyson * derived from this software without specific prior written permission. 11ee877a35SJohn Dyson * 12ee877a35SJohn Dyson * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13ee877a35SJohn Dyson * bad that happens because of using this software isn't the responsibility 14ee877a35SJohn Dyson * of the author. This software is distributed AS-IS. 15ee877a35SJohn Dyson */ 16ee877a35SJohn Dyson 17ee877a35SJohn Dyson /* 188a6472b7SPeter Dufault * This file contains support for the POSIX 1003.1B AIO/LIO facility. 19ee877a35SJohn Dyson */ 20ee877a35SJohn Dyson 21677b542eSDavid E. O'Brien #include <sys/cdefs.h> 22677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 23677b542eSDavid E. O'Brien 24ee877a35SJohn Dyson #include <sys/param.h> 25ee877a35SJohn Dyson #include <sys/systm.h> 26f591779bSSeigo Tanimura #include <sys/malloc.h> 279626b608SPoul-Henning Kamp #include <sys/bio.h> 28a5c9bce7SBruce Evans #include <sys/buf.h> 2975b8b3b2SJohn Baldwin #include <sys/eventhandler.h> 30ee877a35SJohn Dyson #include <sys/sysproto.h> 31ee877a35SJohn Dyson #include <sys/filedesc.h> 32ee877a35SJohn Dyson #include <sys/kernel.h> 3377409fe1SPoul-Henning Kamp #include <sys/module.h> 34c9a970a7SAlan Cox #include <sys/kthread.h> 35ee877a35SJohn Dyson #include <sys/fcntl.h> 36ee877a35SJohn Dyson #include <sys/file.h> 37104a9b7eSAlexander Kabaev #include <sys/limits.h> 38fdebd4f0SBruce Evans #include <sys/lock.h> 3935e0e5b3SJohn Baldwin #include <sys/mutex.h> 40ee877a35SJohn Dyson #include <sys/unistd.h> 41ee877a35SJohn Dyson #include <sys/proc.h> 422d2f8ae7SBruce Evans #include <sys/resourcevar.h> 43ee877a35SJohn Dyson #include <sys/signalvar.h> 44bfbbc4aaSJason Evans #include <sys/protosw.h> 451ce91824SDavid Xu #include <sys/sema.h> 461ce91824SDavid Xu #include <sys/socket.h> 47bfbbc4aaSJason Evans #include <sys/socketvar.h> 4821d56e9cSAlfred Perlstein #include <sys/syscall.h> 4921d56e9cSAlfred Perlstein #include <sys/sysent.h> 50a624e84fSJohn Dyson #include <sys/sysctl.h> 51ee99e978SBruce Evans #include <sys/sx.h> 521ce91824SDavid Xu #include <sys/taskqueue.h> 53fd3bf775SJohn Dyson #include <sys/vnode.h> 54fd3bf775SJohn Dyson #include <sys/conf.h> 55cb679c38SJonathan Lemon #include <sys/event.h> 56ee877a35SJohn Dyson 571ce91824SDavid Xu #include <machine/atomic.h> 581ce91824SDavid Xu 59c844abc9SAlfred Perlstein #include <posix4/posix4.h> 60ee877a35SJohn Dyson #include <vm/vm.h> 61ee877a35SJohn Dyson #include <vm/vm_extern.h> 622244ea07SJohn Dyson #include <vm/pmap.h> 632244ea07SJohn Dyson #include <vm/vm_map.h> 64c897b813SJeff Roberson #include <vm/uma.h> 65ee877a35SJohn Dyson #include <sys/aio.h> 665aaef07cSJohn Dyson 67dd85920aSJason Evans #include "opt_vfs_aio.h" 68ee877a35SJohn Dyson 69eb8e6d52SEivind Eklund /* 70eb8e6d52SEivind Eklund * Counter for allocating reference ids to new jobs. Wrapped to 1 on 71eb8e6d52SEivind Eklund * overflow. 72eb8e6d52SEivind Eklund */ 738c12612cSDoug Rabson static long jobrefid; 742244ea07SJohn Dyson 752244ea07SJohn Dyson #define JOBST_NULL 0x0 761ce91824SDavid Xu #define JOBST_JOBQSOCK 0x1 772244ea07SJohn Dyson #define JOBST_JOBQGLOBAL 0x2 782244ea07SJohn Dyson #define JOBST_JOBRUNNING 0x3 792244ea07SJohn Dyson #define JOBST_JOBFINISHED 0x4 80fd3bf775SJohn Dyson #define JOBST_JOBQBUF 0x5 812244ea07SJohn Dyson 8284af4da6SJohn Dyson #ifndef MAX_AIO_PER_PROC 832244ea07SJohn Dyson #define MAX_AIO_PER_PROC 32 8484af4da6SJohn Dyson #endif 8584af4da6SJohn Dyson 8684af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE_PER_PROC 872244ea07SJohn Dyson #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 8884af4da6SJohn Dyson #endif 8984af4da6SJohn Dyson 9084af4da6SJohn Dyson #ifndef MAX_AIO_PROCS 91fd3bf775SJohn Dyson #define MAX_AIO_PROCS 32 9284af4da6SJohn Dyson #endif 9384af4da6SJohn Dyson 9484af4da6SJohn Dyson #ifndef MAX_AIO_QUEUE 952244ea07SJohn Dyson #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 9684af4da6SJohn Dyson #endif 9784af4da6SJohn Dyson 9884af4da6SJohn Dyson #ifndef TARGET_AIO_PROCS 99bfbbc4aaSJason Evans #define TARGET_AIO_PROCS 4 10084af4da6SJohn Dyson #endif 10184af4da6SJohn Dyson 10284af4da6SJohn Dyson #ifndef MAX_BUF_AIO 10384af4da6SJohn Dyson #define MAX_BUF_AIO 16 10484af4da6SJohn Dyson #endif 10584af4da6SJohn Dyson 10684af4da6SJohn Dyson #ifndef AIOD_TIMEOUT_DEFAULT 10784af4da6SJohn Dyson #define AIOD_TIMEOUT_DEFAULT (10 * hz) 10884af4da6SJohn Dyson #endif 10984af4da6SJohn Dyson 11084af4da6SJohn Dyson #ifndef AIOD_LIFETIME_DEFAULT 11184af4da6SJohn Dyson #define AIOD_LIFETIME_DEFAULT (30 * hz) 11284af4da6SJohn Dyson #endif 1132244ea07SJohn Dyson 1145ece08f5SPoul-Henning Kamp static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management"); 115eb8e6d52SEivind Eklund 116303b270bSEivind Eklund static int max_aio_procs = MAX_AIO_PROCS; 117a624e84fSJohn Dyson SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 118eb8e6d52SEivind Eklund CTLFLAG_RW, &max_aio_procs, 0, 119eb8e6d52SEivind Eklund "Maximum number of kernel threads to use for handling async IO "); 120a624e84fSJohn Dyson 121eb8e6d52SEivind Eklund static int num_aio_procs = 0; 122a624e84fSJohn Dyson SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 123eb8e6d52SEivind Eklund CTLFLAG_RD, &num_aio_procs, 0, 124eb8e6d52SEivind Eklund "Number of presently active kernel threads for async IO"); 125a624e84fSJohn Dyson 126eb8e6d52SEivind Eklund /* 127eb8e6d52SEivind Eklund * The code will adjust the actual number of AIO processes towards this 128eb8e6d52SEivind Eklund * number when it gets a chance. 129eb8e6d52SEivind Eklund */ 130eb8e6d52SEivind Eklund static int target_aio_procs = TARGET_AIO_PROCS; 131eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, 132eb8e6d52SEivind Eklund 0, "Preferred number of ready kernel threads for async IO"); 133a624e84fSJohn Dyson 134eb8e6d52SEivind Eklund static int max_queue_count = MAX_AIO_QUEUE; 135eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, 136eb8e6d52SEivind Eklund "Maximum number of aio requests to queue, globally"); 137a624e84fSJohn Dyson 138eb8e6d52SEivind Eklund static int num_queue_count = 0; 139eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0, 140eb8e6d52SEivind Eklund "Number of queued aio requests"); 141a624e84fSJohn Dyson 142eb8e6d52SEivind Eklund static int num_buf_aio = 0; 143eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0, 144eb8e6d52SEivind Eklund "Number of aio requests presently handled by the buf subsystem"); 145fd3bf775SJohn Dyson 146eb8e6d52SEivind Eklund /* Number of async I/O thread in the process of being started */ 147a9bf5e37SDavid Xu /* XXX This should be local to aio_aqueue() */ 148eb8e6d52SEivind Eklund static int num_aio_resv_start = 0; 149fd3bf775SJohn Dyson 150eb8e6d52SEivind Eklund static int aiod_timeout; 151eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0, 152eb8e6d52SEivind Eklund "Timeout value for synchronous aio operations"); 15384af4da6SJohn Dyson 154eb8e6d52SEivind Eklund static int aiod_lifetime; 155eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0, 156eb8e6d52SEivind Eklund "Maximum lifetime for idle aiod"); 15784af4da6SJohn Dyson 158eb8e6d52SEivind Eklund static int unloadable = 0; 15921d56e9cSAlfred Perlstein SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0, 16021d56e9cSAlfred Perlstein "Allow unload of aio (not recommended)"); 16121d56e9cSAlfred Perlstein 162eb8e6d52SEivind Eklund 163eb8e6d52SEivind Eklund static int max_aio_per_proc = MAX_AIO_PER_PROC; 164eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc, 165eb8e6d52SEivind Eklund 0, "Maximum active aio requests per process (stored in the process)"); 166eb8e6d52SEivind Eklund 167eb8e6d52SEivind Eklund static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 168eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW, 169eb8e6d52SEivind Eklund &max_aio_queue_per_proc, 0, 170eb8e6d52SEivind Eklund "Maximum queued aio requests per process (stored in the process)"); 171eb8e6d52SEivind Eklund 172eb8e6d52SEivind Eklund static int max_buf_aio = MAX_BUF_AIO; 173eb8e6d52SEivind Eklund SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0, 174eb8e6d52SEivind Eklund "Maximum buf aio requests per process (stored in the process)"); 175eb8e6d52SEivind Eklund 1760972628aSDavid Xu typedef struct oaiocb { 1770972628aSDavid Xu int aio_fildes; /* File descriptor */ 1780972628aSDavid Xu off_t aio_offset; /* File offset for I/O */ 1790972628aSDavid Xu volatile void *aio_buf; /* I/O buffer in process space */ 1800972628aSDavid Xu size_t aio_nbytes; /* Number of bytes for I/O */ 1810972628aSDavid Xu struct osigevent aio_sigevent; /* Signal to deliver */ 1820972628aSDavid Xu int aio_lio_opcode; /* LIO opcode */ 1830972628aSDavid Xu int aio_reqprio; /* Request priority -- ignored */ 1840972628aSDavid Xu struct __aiocb_private _aiocb_private; 1850972628aSDavid Xu } oaiocb_t; 1860972628aSDavid Xu 18748dac059SAlan Cox struct aiocblist { 18848dac059SAlan Cox TAILQ_ENTRY(aiocblist) list; /* List of jobs */ 18948dac059SAlan Cox TAILQ_ENTRY(aiocblist) plist; /* List of jobs for proc */ 1901ce91824SDavid Xu TAILQ_ENTRY(aiocblist) allist; 19148dac059SAlan Cox int jobflags; 19248dac059SAlan Cox int jobstate; 19348dac059SAlan Cox int inputcharge; 19448dac059SAlan Cox int outputcharge; 19548dac059SAlan Cox struct buf *bp; /* Buffer pointer */ 1962a522eb9SJohn Baldwin struct proc *userproc; /* User process */ 197f8f750c5SRobert Watson struct ucred *cred; /* Active credential when created */ 19848dac059SAlan Cox struct file *fd_file; /* Pointer to file structure */ 1991ce91824SDavid Xu struct aioliojob *lio; /* Optional lio job */ 20048dac059SAlan Cox struct aiocb *uuaiocb; /* Pointer in userspace of aiocb */ 201ad3b9257SJohn-Mark Gurney struct knlist klist; /* list of knotes */ 20248dac059SAlan Cox struct aiocb uaiocb; /* Kernel I/O control block */ 2034c0fb2cfSDavid Xu ksiginfo_t ksi; /* Realtime signal info */ 2041ce91824SDavid Xu struct task biotask; 20548dac059SAlan Cox }; 20648dac059SAlan Cox 20748dac059SAlan Cox /* jobflags */ 2081ce91824SDavid Xu #define AIOCBLIST_RUNDOWN 0x04 20948dac059SAlan Cox #define AIOCBLIST_DONE 0x10 2101ce91824SDavid Xu #define AIOCBLIST_BUFDONE 0x20 21148dac059SAlan Cox 2122244ea07SJohn Dyson /* 2132244ea07SJohn Dyson * AIO process info 2142244ea07SJohn Dyson */ 21584af4da6SJohn Dyson #define AIOP_FREE 0x1 /* proc on free queue */ 21684af4da6SJohn Dyson 217b40ce416SJulian Elischer struct aiothreadlist { 218b40ce416SJulian Elischer int aiothreadflags; /* AIO proc flags */ 219b40ce416SJulian Elischer TAILQ_ENTRY(aiothreadlist) list; /* List of processes */ 220b40ce416SJulian Elischer struct thread *aiothread; /* The AIO thread */ 2212244ea07SJohn Dyson }; 2222244ea07SJohn Dyson 22384af4da6SJohn Dyson /* 22484af4da6SJohn Dyson * data-structure for lio signal management 22584af4da6SJohn Dyson */ 2261ce91824SDavid Xu struct aioliojob { 22784af4da6SJohn Dyson int lioj_flags; 2281ce91824SDavid Xu int lioj_count; 2291ce91824SDavid Xu int lioj_finished_count; 23084af4da6SJohn Dyson struct sigevent lioj_signal; /* signal on all I/O done */ 2311ce91824SDavid Xu TAILQ_ENTRY(aioliojob) lioj_list; 23269cd28daSDoug Ambrisko struct knlist klist; /* list of knotes */ 2334c0fb2cfSDavid Xu ksiginfo_t lioj_ksi; /* Realtime signal info */ 23484af4da6SJohn Dyson }; 2351ce91824SDavid Xu 23684af4da6SJohn Dyson #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 23784af4da6SJohn Dyson #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 23869cd28daSDoug Ambrisko #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */ 23984af4da6SJohn Dyson 24084af4da6SJohn Dyson /* 24184af4da6SJohn Dyson * per process aio data structure 24284af4da6SJohn Dyson */ 2432244ea07SJohn Dyson struct kaioinfo { 244fd3bf775SJohn Dyson int kaio_flags; /* per process kaio flags */ 2452244ea07SJohn Dyson int kaio_maxactive_count; /* maximum number of AIOs */ 2462244ea07SJohn Dyson int kaio_active_count; /* number of currently used AIOs */ 2472244ea07SJohn Dyson int kaio_qallowed_count; /* maxiumu size of AIO queue */ 2481ce91824SDavid Xu int kaio_count; /* size of AIO queue */ 249fd3bf775SJohn Dyson int kaio_ballowed_count; /* maximum number of buffers */ 250fd3bf775SJohn Dyson int kaio_buffer_count; /* number of physio buffers */ 2511ce91824SDavid Xu TAILQ_HEAD(,aiocblist) kaio_all; /* all AIOs in the process */ 2521ce91824SDavid Xu TAILQ_HEAD(,aiocblist) kaio_done; /* done queue for process */ 2531ce91824SDavid Xu TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* list of lio jobs */ 254e3975643SJake Burkholder TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* job queue for process */ 255e3975643SJake Burkholder TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 256e3975643SJake Burkholder TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */ 2572244ea07SJohn Dyson }; 2582244ea07SJohn Dyson 25984af4da6SJohn Dyson #define KAIO_RUNDOWN 0x1 /* process is being run down */ 260bfbbc4aaSJason Evans #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */ 261fd3bf775SJohn Dyson 262eb8e6d52SEivind Eklund static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* Idle daemons */ 2631ce91824SDavid Xu static struct sema aio_newproc_sem; 2641ce91824SDavid Xu static struct mtx aio_job_mtx; 2651ce91824SDavid Xu static struct mtx aio_sock_mtx; 266e3975643SJake Burkholder static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 2671ce91824SDavid Xu static struct unrhdr *aiod_unr; 2682244ea07SJohn Dyson 269fd3bf775SJohn Dyson static void aio_init_aioinfo(struct proc *p); 27021d56e9cSAlfred Perlstein static void aio_onceonly(void); 271fd3bf775SJohn Dyson static int aio_free_entry(struct aiocblist *aiocbe); 272fd3bf775SJohn Dyson static void aio_process(struct aiocblist *aiocbe); 2731ce91824SDavid Xu static int aio_newproc(int *); 274a9bf5e37SDavid Xu static int aio_aqueue(struct thread *td, struct aiocb *job, 275a9bf5e37SDavid Xu struct aioliojob *lio, int type, int osigev); 276fd3bf775SJohn Dyson static void aio_physwakeup(struct buf *bp); 27775b8b3b2SJohn Baldwin static void aio_proc_rundown(void *arg, struct proc *p); 278fd3bf775SJohn Dyson static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 2791ce91824SDavid Xu static void biohelper(void *, int); 2801ce91824SDavid Xu static void aio_daemon(void *param); 28148dac059SAlan Cox static void aio_swake_cb(struct socket *, struct sockbuf *); 28221d56e9cSAlfred Perlstein static int aio_unload(void); 28321d56e9cSAlfred Perlstein static int filt_aioattach(struct knote *kn); 28421d56e9cSAlfred Perlstein static void filt_aiodetach(struct knote *kn); 28521d56e9cSAlfred Perlstein static int filt_aio(struct knote *kn, long hint); 28669cd28daSDoug Ambrisko static int filt_lioattach(struct knote *kn); 28769cd28daSDoug Ambrisko static void filt_liodetach(struct knote *kn); 28869cd28daSDoug Ambrisko static int filt_lio(struct knote *kn, long hint); 28969cd28daSDoug Ambrisko #define DONE_BUF 1 29069cd28daSDoug Ambrisko #define DONE_QUEUE 2 29169cd28daSDoug Ambrisko static void aio_bio_done_notify( struct proc *userp, struct aiocblist *aiocbe, int type); 2920972628aSDavid Xu static int do_lio_listio(struct thread *td, struct lio_listio_args *uap, 2930972628aSDavid Xu int oldsigev); 2942244ea07SJohn Dyson 295eb8e6d52SEivind Eklund /* 296eb8e6d52SEivind Eklund * Zones for: 297eb8e6d52SEivind Eklund * kaio Per process async io info 298eb8e6d52SEivind Eklund * aiop async io thread data 299eb8e6d52SEivind Eklund * aiocb async io jobs 300eb8e6d52SEivind Eklund * aiol list io job pointer - internal to aio_suspend XXX 301eb8e6d52SEivind Eklund * aiolio list io jobs 302eb8e6d52SEivind Eklund */ 303c897b813SJeff Roberson static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone; 304fd3bf775SJohn Dyson 305eb8e6d52SEivind Eklund /* kqueue filters for aio */ 30621d56e9cSAlfred Perlstein static struct filterops aio_filtops = 30721d56e9cSAlfred Perlstein { 0, filt_aioattach, filt_aiodetach, filt_aio }; 30869cd28daSDoug Ambrisko static struct filterops lio_filtops = 30969cd28daSDoug Ambrisko { 0, filt_lioattach, filt_liodetach, filt_lio }; 31021d56e9cSAlfred Perlstein 31175b8b3b2SJohn Baldwin static eventhandler_tag exit_tag, exec_tag; 31275b8b3b2SJohn Baldwin 3131ce91824SDavid Xu TASKQUEUE_DEFINE_THREAD(aiod_bio); 3141ce91824SDavid Xu 315eb8e6d52SEivind Eklund /* 316eb8e6d52SEivind Eklund * Main operations function for use as a kernel module. 317eb8e6d52SEivind Eklund */ 31821d56e9cSAlfred Perlstein static int 31921d56e9cSAlfred Perlstein aio_modload(struct module *module, int cmd, void *arg) 32021d56e9cSAlfred Perlstein { 32121d56e9cSAlfred Perlstein int error = 0; 32221d56e9cSAlfred Perlstein 32321d56e9cSAlfred Perlstein switch (cmd) { 32421d56e9cSAlfred Perlstein case MOD_LOAD: 32521d56e9cSAlfred Perlstein aio_onceonly(); 32621d56e9cSAlfred Perlstein break; 32721d56e9cSAlfred Perlstein case MOD_UNLOAD: 32821d56e9cSAlfred Perlstein error = aio_unload(); 32921d56e9cSAlfred Perlstein break; 33021d56e9cSAlfred Perlstein case MOD_SHUTDOWN: 33121d56e9cSAlfred Perlstein break; 33221d56e9cSAlfred Perlstein default: 33321d56e9cSAlfred Perlstein error = EINVAL; 33421d56e9cSAlfred Perlstein break; 33521d56e9cSAlfred Perlstein } 33621d56e9cSAlfred Perlstein return (error); 33721d56e9cSAlfred Perlstein } 33821d56e9cSAlfred Perlstein 33921d56e9cSAlfred Perlstein static moduledata_t aio_mod = { 34021d56e9cSAlfred Perlstein "aio", 34121d56e9cSAlfred Perlstein &aio_modload, 34221d56e9cSAlfred Perlstein NULL 34321d56e9cSAlfred Perlstein }; 34421d56e9cSAlfred Perlstein 34521d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_return); 34621d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_suspend); 34721d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_cancel); 34821d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_error); 34921d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_read); 35021d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_write); 35121d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(aio_waitcomplete); 35221d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(lio_listio); 3530972628aSDavid Xu SYSCALL_MODULE_HELPER(oaio_read); 3540972628aSDavid Xu SYSCALL_MODULE_HELPER(oaio_write); 3550972628aSDavid Xu SYSCALL_MODULE_HELPER(olio_listio); 35621d56e9cSAlfred Perlstein 35721d56e9cSAlfred Perlstein DECLARE_MODULE(aio, aio_mod, 35821d56e9cSAlfred Perlstein SI_SUB_VFS, SI_ORDER_ANY); 35921d56e9cSAlfred Perlstein MODULE_VERSION(aio, 1); 36021d56e9cSAlfred Perlstein 361fd3bf775SJohn Dyson /* 3622244ea07SJohn Dyson * Startup initialization 3632244ea07SJohn Dyson */ 36488ed460eSAlan Cox static void 36521d56e9cSAlfred Perlstein aio_onceonly(void) 366fd3bf775SJohn Dyson { 36721d56e9cSAlfred Perlstein 36821d56e9cSAlfred Perlstein /* XXX: should probably just use so->callback */ 36921d56e9cSAlfred Perlstein aio_swake = &aio_swake_cb; 37075b8b3b2SJohn Baldwin exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL, 37175b8b3b2SJohn Baldwin EVENTHANDLER_PRI_ANY); 37275b8b3b2SJohn Baldwin exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown, NULL, 37375b8b3b2SJohn Baldwin EVENTHANDLER_PRI_ANY); 37421d56e9cSAlfred Perlstein kqueue_add_filteropts(EVFILT_AIO, &aio_filtops); 37569cd28daSDoug Ambrisko kqueue_add_filteropts(EVFILT_LIO, &lio_filtops); 3762244ea07SJohn Dyson TAILQ_INIT(&aio_freeproc); 3771ce91824SDavid Xu sema_init(&aio_newproc_sem, 0, "aio_new_proc"); 3781ce91824SDavid Xu mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF); 3791ce91824SDavid Xu mtx_init(&aio_sock_mtx, "aio_sock", NULL, MTX_DEF); 3802244ea07SJohn Dyson TAILQ_INIT(&aio_jobs); 3811ce91824SDavid Xu aiod_unr = new_unrhdr(1, INT_MAX, NULL); 382c897b813SJeff Roberson kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL, 383c897b813SJeff Roberson NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 384c897b813SJeff Roberson aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL, 385c897b813SJeff Roberson NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 386c897b813SJeff Roberson aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL, 387c897b813SJeff Roberson NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 388c897b813SJeff Roberson aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL, 389c897b813SJeff Roberson NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 3901ce91824SDavid Xu aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL, 391c897b813SJeff Roberson NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 39284af4da6SJohn Dyson aiod_timeout = AIOD_TIMEOUT_DEFAULT; 39384af4da6SJohn Dyson aiod_lifetime = AIOD_LIFETIME_DEFAULT; 394fd3bf775SJohn Dyson jobrefid = 1; 395c7047e52SGarrett Wollman async_io_version = _POSIX_VERSION; 396c844abc9SAlfred Perlstein p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX); 39786d52125SAlfred Perlstein p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE); 39886d52125SAlfred Perlstein p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0); 3992244ea07SJohn Dyson } 4002244ea07SJohn Dyson 401eb8e6d52SEivind Eklund /* 402eb8e6d52SEivind Eklund * Callback for unload of AIO when used as a module. 403eb8e6d52SEivind Eklund */ 40421d56e9cSAlfred Perlstein static int 40521d56e9cSAlfred Perlstein aio_unload(void) 40621d56e9cSAlfred Perlstein { 407ad3b9257SJohn-Mark Gurney int error; 40821d56e9cSAlfred Perlstein 40921d56e9cSAlfred Perlstein /* 41021d56e9cSAlfred Perlstein * XXX: no unloads by default, it's too dangerous. 41121d56e9cSAlfred Perlstein * perhaps we could do it if locked out callers and then 41221d56e9cSAlfred Perlstein * did an aio_proc_rundown() on each process. 4132a522eb9SJohn Baldwin * 4142a522eb9SJohn Baldwin * jhb: aio_proc_rundown() needs to run on curproc though, 4152a522eb9SJohn Baldwin * so I don't think that would fly. 41621d56e9cSAlfred Perlstein */ 41721d56e9cSAlfred Perlstein if (!unloadable) 41821d56e9cSAlfred Perlstein return (EOPNOTSUPP); 41921d56e9cSAlfred Perlstein 420ad3b9257SJohn-Mark Gurney error = kqueue_del_filteropts(EVFILT_AIO); 421ad3b9257SJohn-Mark Gurney if (error) 422ad3b9257SJohn-Mark Gurney return error; 423c7047e52SGarrett Wollman async_io_version = 0; 42421d56e9cSAlfred Perlstein aio_swake = NULL; 4251ce91824SDavid Xu taskqueue_free(taskqueue_aiod_bio); 4261ce91824SDavid Xu delete_unrhdr(aiod_unr); 42775b8b3b2SJohn Baldwin EVENTHANDLER_DEREGISTER(process_exit, exit_tag); 42875b8b3b2SJohn Baldwin EVENTHANDLER_DEREGISTER(process_exec, exec_tag); 4291ce91824SDavid Xu mtx_destroy(&aio_job_mtx); 4301ce91824SDavid Xu mtx_destroy(&aio_sock_mtx); 4311ce91824SDavid Xu sema_destroy(&aio_newproc_sem); 432f51c1e89SAlfred Perlstein p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1); 433f51c1e89SAlfred Perlstein p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1); 434f51c1e89SAlfred Perlstein p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1); 43521d56e9cSAlfred Perlstein return (0); 43621d56e9cSAlfred Perlstein } 43721d56e9cSAlfred Perlstein 4382244ea07SJohn Dyson /* 439bfbbc4aaSJason Evans * Init the per-process aioinfo structure. The aioinfo limits are set 440bfbbc4aaSJason Evans * per-process for user limit (resource) management. 4412244ea07SJohn Dyson */ 44288ed460eSAlan Cox static void 443fd3bf775SJohn Dyson aio_init_aioinfo(struct proc *p) 444fd3bf775SJohn Dyson { 4452244ea07SJohn Dyson struct kaioinfo *ki; 446ac41f2efSAlfred Perlstein 447a163d034SWarner Losh ki = uma_zalloc(kaio_zone, M_WAITOK); 44884af4da6SJohn Dyson ki->kaio_flags = 0; 449a624e84fSJohn Dyson ki->kaio_maxactive_count = max_aio_per_proc; 4502244ea07SJohn Dyson ki->kaio_active_count = 0; 451a624e84fSJohn Dyson ki->kaio_qallowed_count = max_aio_queue_per_proc; 4521ce91824SDavid Xu ki->kaio_count = 0; 45384af4da6SJohn Dyson ki->kaio_ballowed_count = max_buf_aio; 454fd3bf775SJohn Dyson ki->kaio_buffer_count = 0; 4551ce91824SDavid Xu TAILQ_INIT(&ki->kaio_all); 4561ce91824SDavid Xu TAILQ_INIT(&ki->kaio_done); 4572244ea07SJohn Dyson TAILQ_INIT(&ki->kaio_jobqueue); 458fd3bf775SJohn Dyson TAILQ_INIT(&ki->kaio_bufqueue); 45984af4da6SJohn Dyson TAILQ_INIT(&ki->kaio_liojoblist); 460bfbbc4aaSJason Evans TAILQ_INIT(&ki->kaio_sockqueue); 4613999ebe3SAlan Cox PROC_LOCK(p); 4623999ebe3SAlan Cox if (p->p_aioinfo == NULL) { 4633999ebe3SAlan Cox p->p_aioinfo = ki; 4643999ebe3SAlan Cox PROC_UNLOCK(p); 4653999ebe3SAlan Cox } else { 4663999ebe3SAlan Cox PROC_UNLOCK(p); 4673999ebe3SAlan Cox uma_zfree(kaio_zone, ki); 4682244ea07SJohn Dyson } 469bfbbc4aaSJason Evans 470bfbbc4aaSJason Evans while (num_aio_procs < target_aio_procs) 4711ce91824SDavid Xu aio_newproc(NULL); 4722244ea07SJohn Dyson } 4732244ea07SJohn Dyson 4744c0fb2cfSDavid Xu static int 4754c0fb2cfSDavid Xu aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi) 4764c0fb2cfSDavid Xu { 4774c0fb2cfSDavid Xu PROC_LOCK_ASSERT(p, MA_OWNED); 4784c0fb2cfSDavid Xu if (!KSI_ONQ(ksi)) { 4794c0fb2cfSDavid Xu ksi->ksi_code = SI_ASYNCIO; 4804c0fb2cfSDavid Xu ksi->ksi_flags |= KSI_EXT | KSI_INS; 4814c0fb2cfSDavid Xu return (psignal_event(p, sigev, ksi)); 4824c0fb2cfSDavid Xu } 4834c0fb2cfSDavid Xu return (0); 4844c0fb2cfSDavid Xu } 4854c0fb2cfSDavid Xu 4862244ea07SJohn Dyson /* 487bfbbc4aaSJason Evans * Free a job entry. Wait for completion if it is currently active, but don't 488bfbbc4aaSJason Evans * delay forever. If we delay, we return a flag that says that we have to 489bfbbc4aaSJason Evans * restart the queue scan. 4902244ea07SJohn Dyson */ 49188ed460eSAlan Cox static int 492fd3bf775SJohn Dyson aio_free_entry(struct aiocblist *aiocbe) 493fd3bf775SJohn Dyson { 4942244ea07SJohn Dyson struct kaioinfo *ki; 4951ce91824SDavid Xu struct aioliojob *lj; 4962244ea07SJohn Dyson struct proc *p; 4972244ea07SJohn Dyson 4982244ea07SJohn Dyson p = aiocbe->userproc; 4991ce91824SDavid Xu 5001ce91824SDavid Xu PROC_LOCK_ASSERT(p, MA_OWNED); 5011ce91824SDavid Xu MPASS(curproc == p); 5021ce91824SDavid Xu MPASS(aiocbe->jobstate == JOBST_JOBFINISHED); 5031ce91824SDavid Xu 5042244ea07SJohn Dyson ki = p->p_aioinfo; 5051ce91824SDavid Xu MPASS(ki != NULL); 5061ce91824SDavid Xu 5071ce91824SDavid Xu atomic_subtract_int(&num_queue_count, 1); 5081ce91824SDavid Xu 5091ce91824SDavid Xu ki->kaio_count--; 5101ce91824SDavid Xu MPASS(ki->kaio_count >= 0); 5111ce91824SDavid Xu 51284af4da6SJohn Dyson lj = aiocbe->lio; 51384af4da6SJohn Dyson if (lj) { 5141ce91824SDavid Xu lj->lioj_count--; 5151ce91824SDavid Xu lj->lioj_finished_count--; 5161ce91824SDavid Xu 517a9bf5e37SDavid Xu if (lj->lioj_count == 0) { 5181ce91824SDavid Xu TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 5191ce91824SDavid Xu /* lio is going away, we need to destroy any knotes */ 5201ce91824SDavid Xu knlist_delete(&lj->klist, curthread, 1); 5211ce91824SDavid Xu sigqueue_take(&lj->lioj_ksi); 5221ce91824SDavid Xu uma_zfree(aiolio_zone, lj); 52384af4da6SJohn Dyson } 52484af4da6SJohn Dyson } 5251ce91824SDavid Xu 5261ce91824SDavid Xu TAILQ_REMOVE(&ki->kaio_done, aiocbe, plist); 5271ce91824SDavid Xu TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist); 528fd3bf775SJohn Dyson 529cb679c38SJonathan Lemon /* aiocbe is going away, we need to destroy any knotes */ 5301ce91824SDavid Xu knlist_delete(&aiocbe->klist, curthread, 1); 5311ce91824SDavid Xu sigqueue_take(&aiocbe->ksi); 5321ce91824SDavid Xu 5331ce91824SDavid Xu MPASS(aiocbe->bp == NULL); 5341ce91824SDavid Xu aiocbe->jobstate = JOBST_NULL; 5351ce91824SDavid Xu 5361ce91824SDavid Xu /* Wake up anyone who has interest to do cleanup work. */ 5371ce91824SDavid Xu if (ki->kaio_flags & (KAIO_WAKEUP | KAIO_RUNDOWN)) { 5381ce91824SDavid Xu ki->kaio_flags &= ~KAIO_WAKEUP; 5391ce91824SDavid Xu wakeup(&p->p_aioinfo); 5401ce91824SDavid Xu } 5411ce91824SDavid Xu PROC_UNLOCK(p); 5422a522eb9SJohn Baldwin 5432a522eb9SJohn Baldwin /* 5442a522eb9SJohn Baldwin * The thread argument here is used to find the owning process 5452a522eb9SJohn Baldwin * and is also passed to fo_close() which may pass it to various 5462a522eb9SJohn Baldwin * places such as devsw close() routines. Because of that, we 5472a522eb9SJohn Baldwin * need a thread pointer from the process owning the job that is 5482a522eb9SJohn Baldwin * persistent and won't disappear out from under us or move to 5492a522eb9SJohn Baldwin * another process. 5502a522eb9SJohn Baldwin * 5512a522eb9SJohn Baldwin * Currently, all the callers of this function call it to remove 5522a522eb9SJohn Baldwin * an aiocblist from the current process' job list either via a 5532a522eb9SJohn Baldwin * syscall or due to the current process calling exit() or 5542a522eb9SJohn Baldwin * execve(). Thus, we know that p == curproc. We also know that 5552a522eb9SJohn Baldwin * curthread can't exit since we are curthread. 5562a522eb9SJohn Baldwin * 5572a522eb9SJohn Baldwin * Therefore, we use curthread as the thread to pass to 5582a522eb9SJohn Baldwin * knlist_delete(). This does mean that it is possible for the 5592a522eb9SJohn Baldwin * thread pointer at close time to differ from the thread pointer 5602a522eb9SJohn Baldwin * at open time, but this is already true of file descriptors in 5612a522eb9SJohn Baldwin * a multithreaded process. 562b40ce416SJulian Elischer */ 563a5c0b1c0SAlan Cox fdrop(aiocbe->fd_file, curthread); 564f8f750c5SRobert Watson crfree(aiocbe->cred); 565c897b813SJeff Roberson uma_zfree(aiocb_zone, aiocbe); 5661ce91824SDavid Xu PROC_LOCK(p); 5671ce91824SDavid Xu 568ac41f2efSAlfred Perlstein return (0); 5692244ea07SJohn Dyson } 5702244ea07SJohn Dyson 5712244ea07SJohn Dyson /* 5722244ea07SJohn Dyson * Rundown the jobs for a given process. 5732244ea07SJohn Dyson */ 57421d56e9cSAlfred Perlstein static void 57575b8b3b2SJohn Baldwin aio_proc_rundown(void *arg, struct proc *p) 576fd3bf775SJohn Dyson { 5772244ea07SJohn Dyson struct kaioinfo *ki; 5781ce91824SDavid Xu struct aioliojob *lj; 5791ce91824SDavid Xu struct aiocblist *cbe, *cbn; 580bfbbc4aaSJason Evans struct file *fp; 581bfbbc4aaSJason Evans struct socket *so; 5822244ea07SJohn Dyson 5832a522eb9SJohn Baldwin KASSERT(curthread->td_proc == p, 5842a522eb9SJohn Baldwin ("%s: called on non-curproc", __func__)); 5852244ea07SJohn Dyson ki = p->p_aioinfo; 5862244ea07SJohn Dyson if (ki == NULL) 5872244ea07SJohn Dyson return; 5882244ea07SJohn Dyson 5891ce91824SDavid Xu PROC_LOCK(p); 5901ce91824SDavid Xu 5911ce91824SDavid Xu restart: 592fd3bf775SJohn Dyson ki->kaio_flags |= KAIO_RUNDOWN; 593a624e84fSJohn Dyson 594bfbbc4aaSJason Evans /* 5951ce91824SDavid Xu * Try to cancel all pending requests. This code simulates 5961ce91824SDavid Xu * aio_cancel on all pending I/O requests. 597bfbbc4aaSJason Evans */ 5981ce91824SDavid Xu while ((cbe = TAILQ_FIRST(&ki->kaio_sockqueue))) { 5991ce91824SDavid Xu fp = cbe->fd_file; 60048e3128bSMatthew Dillon so = fp->f_data; 6011ce91824SDavid Xu mtx_lock(&aio_sock_mtx); 6021ce91824SDavid Xu TAILQ_REMOVE(&so->so_aiojobq, cbe, list); 6031ce91824SDavid Xu mtx_unlock(&aio_sock_mtx); 6041ce91824SDavid Xu TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist); 6051ce91824SDavid Xu TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, cbe, plist); 6061ce91824SDavid Xu cbe->jobstate = JOBST_JOBQGLOBAL; 6072244ea07SJohn Dyson } 6082244ea07SJohn Dyson 6091ce91824SDavid Xu TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) { 6101ce91824SDavid Xu mtx_lock(&aio_job_mtx); 6111ce91824SDavid Xu if (cbe->jobstate == JOBST_JOBQGLOBAL) { 6121ce91824SDavid Xu TAILQ_REMOVE(&aio_jobs, cbe, list); 6131ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 6141ce91824SDavid Xu cbe->jobstate = JOBST_JOBFINISHED; 6151ce91824SDavid Xu cbe->uaiocb._aiocb_private.status = -1; 6161ce91824SDavid Xu cbe->uaiocb._aiocb_private.error = ECANCELED; 6171ce91824SDavid Xu TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); 6181ce91824SDavid Xu aio_bio_done_notify(p, cbe, DONE_QUEUE); 6191ce91824SDavid Xu } else { 6201ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 6211ce91824SDavid Xu } 6222244ea07SJohn Dyson } 62384af4da6SJohn Dyson 6241ce91824SDavid Xu if (TAILQ_FIRST(&ki->kaio_sockqueue)) 6251ce91824SDavid Xu goto restart; 6261ce91824SDavid Xu 6271ce91824SDavid Xu /* Wait for all running I/O to be finished */ 6281ce91824SDavid Xu if (TAILQ_FIRST(&ki->kaio_bufqueue) || 6291ce91824SDavid Xu TAILQ_FIRST(&ki->kaio_jobqueue)) { 63084af4da6SJohn Dyson ki->kaio_flags |= KAIO_WAKEUP; 6311ce91824SDavid Xu msleep(&p->p_aioinfo, &p->p_mtx, PRIBIO, "aioprn", hz); 6321ce91824SDavid Xu goto restart; 63384af4da6SJohn Dyson } 63484af4da6SJohn Dyson 6351ce91824SDavid Xu /* Free all completed I/O requests. */ 6361ce91824SDavid Xu while ((cbe = TAILQ_FIRST(&ki->kaio_done)) != NULL) 6371ce91824SDavid Xu aio_free_entry(cbe); 63884af4da6SJohn Dyson 6391ce91824SDavid Xu while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) { 640a9bf5e37SDavid Xu if (lj->lioj_count == 0) { 64184af4da6SJohn Dyson TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 6421ce91824SDavid Xu knlist_delete(&lj->klist, curthread, 1); 6431ce91824SDavid Xu sigqueue_take(&lj->lioj_ksi); 644c897b813SJeff Roberson uma_zfree(aiolio_zone, lj); 645f4f0ecefSJohn Dyson } else { 646a9bf5e37SDavid Xu panic("LIO job not cleaned up: C:%d, FC:%d\n", 647a9bf5e37SDavid Xu lj->lioj_count, lj->lioj_finished_count); 64884af4da6SJohn Dyson } 649f4f0ecefSJohn Dyson } 65084af4da6SJohn Dyson 651c897b813SJeff Roberson uma_zfree(kaio_zone, ki); 652a624e84fSJohn Dyson p->p_aioinfo = NULL; 6531ce91824SDavid Xu PROC_UNLOCK(p); 6542244ea07SJohn Dyson } 6552244ea07SJohn Dyson 6562244ea07SJohn Dyson /* 657bfbbc4aaSJason Evans * Select a job to run (called by an AIO daemon). 6582244ea07SJohn Dyson */ 6592244ea07SJohn Dyson static struct aiocblist * 660b40ce416SJulian Elischer aio_selectjob(struct aiothreadlist *aiop) 661fd3bf775SJohn Dyson { 6622244ea07SJohn Dyson struct aiocblist *aiocbe; 663bfbbc4aaSJason Evans struct kaioinfo *ki; 664bfbbc4aaSJason Evans struct proc *userp; 6652244ea07SJohn Dyson 6661ce91824SDavid Xu mtx_assert(&aio_job_mtx, MA_OWNED); 6672a522eb9SJohn Baldwin TAILQ_FOREACH(aiocbe, &aio_jobs, list) { 6682244ea07SJohn Dyson userp = aiocbe->userproc; 6692244ea07SJohn Dyson ki = userp->p_aioinfo; 6702244ea07SJohn Dyson 6712244ea07SJohn Dyson if (ki->kaio_active_count < ki->kaio_maxactive_count) { 6722244ea07SJohn Dyson TAILQ_REMOVE(&aio_jobs, aiocbe, list); 6731ce91824SDavid Xu /* Account for currently active jobs. */ 6741ce91824SDavid Xu ki->kaio_active_count++; 6751ce91824SDavid Xu aiocbe->jobstate = JOBST_JOBRUNNING; 6761ce91824SDavid Xu break; 6771ce91824SDavid Xu } 6781ce91824SDavid Xu } 679ac41f2efSAlfred Perlstein return (aiocbe); 6802244ea07SJohn Dyson } 6812244ea07SJohn Dyson 6822244ea07SJohn Dyson /* 683bfbbc4aaSJason Evans * The AIO processing activity. This is the code that does the I/O request for 684bfbbc4aaSJason Evans * the non-physio version of the operations. The normal vn operations are used, 685bfbbc4aaSJason Evans * and this code should work in all instances for every type of file, including 686bfbbc4aaSJason Evans * pipes, sockets, fifos, and regular files. 6871ce91824SDavid Xu * 6881ce91824SDavid Xu * XXX I don't think these code work well with pipes, sockets and fifo, the 6891ce91824SDavid Xu * problem is the aiod threads can be blocked if there is not data or no 6901ce91824SDavid Xu * buffer space, and file was not opened with O_NONBLOCK, all aiod threads 6911ce91824SDavid Xu * will be blocked if there is couple of such processes. We need a FOF_OFFSET 6921ce91824SDavid Xu * like flag to override f_flag to tell low level system to do non-blocking 6931ce91824SDavid Xu * I/O, we can not muck O_NONBLOCK because there is full of race between 6941ce91824SDavid Xu * userland and aiod threads, although there is a trigger mechanism for socket, 6951ce91824SDavid Xu * but it also does not work well if userland is misbehaviored. 6962244ea07SJohn Dyson */ 69788ed460eSAlan Cox static void 698fd3bf775SJohn Dyson aio_process(struct aiocblist *aiocbe) 699fd3bf775SJohn Dyson { 700f8f750c5SRobert Watson struct ucred *td_savedcred; 701b40ce416SJulian Elischer struct thread *td; 702b40ce416SJulian Elischer struct proc *mycp; 7032244ea07SJohn Dyson struct aiocb *cb; 7042244ea07SJohn Dyson struct file *fp; 7051ce91824SDavid Xu struct socket *so; 7062244ea07SJohn Dyson struct uio auio; 7072244ea07SJohn Dyson struct iovec aiov; 7082244ea07SJohn Dyson int cnt; 7092244ea07SJohn Dyson int error; 710fd3bf775SJohn Dyson int oublock_st, oublock_end; 711fd3bf775SJohn Dyson int inblock_st, inblock_end; 7122244ea07SJohn Dyson 713b40ce416SJulian Elischer td = curthread; 714f8f750c5SRobert Watson td_savedcred = td->td_ucred; 715f8f750c5SRobert Watson td->td_ucred = aiocbe->cred; 716b40ce416SJulian Elischer mycp = td->td_proc; 7172244ea07SJohn Dyson cb = &aiocbe->uaiocb; 71800e73160SAlan Cox fp = aiocbe->fd_file; 719bfbbc4aaSJason Evans 72091369fc7SAlan Cox aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; 7212244ea07SJohn Dyson aiov.iov_len = cb->aio_nbytes; 7222244ea07SJohn Dyson 7232244ea07SJohn Dyson auio.uio_iov = &aiov; 7242244ea07SJohn Dyson auio.uio_iovcnt = 1; 7259b16adc1SAlan Cox auio.uio_offset = cb->aio_offset; 7262244ea07SJohn Dyson auio.uio_resid = cb->aio_nbytes; 7272244ea07SJohn Dyson cnt = cb->aio_nbytes; 7282244ea07SJohn Dyson auio.uio_segflg = UIO_USERSPACE; 729b40ce416SJulian Elischer auio.uio_td = td; 7302244ea07SJohn Dyson 731fd3bf775SJohn Dyson inblock_st = mycp->p_stats->p_ru.ru_inblock; 732fd3bf775SJohn Dyson oublock_st = mycp->p_stats->p_ru.ru_oublock; 733279d7226SMatthew Dillon /* 734a9bf5e37SDavid Xu * aio_aqueue() acquires a reference to the file that is 7359b16adc1SAlan Cox * released in aio_free_entry(). 736279d7226SMatthew Dillon */ 7372244ea07SJohn Dyson if (cb->aio_lio_opcode == LIO_READ) { 7382244ea07SJohn Dyson auio.uio_rw = UIO_READ; 739b40ce416SJulian Elischer error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td); 7402244ea07SJohn Dyson } else { 7412244ea07SJohn Dyson auio.uio_rw = UIO_WRITE; 742b40ce416SJulian Elischer error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td); 7432244ea07SJohn Dyson } 744fd3bf775SJohn Dyson inblock_end = mycp->p_stats->p_ru.ru_inblock; 745fd3bf775SJohn Dyson oublock_end = mycp->p_stats->p_ru.ru_oublock; 746fd3bf775SJohn Dyson 747fd3bf775SJohn Dyson aiocbe->inputcharge = inblock_end - inblock_st; 748fd3bf775SJohn Dyson aiocbe->outputcharge = oublock_end - oublock_st; 7492244ea07SJohn Dyson 750bfbbc4aaSJason Evans if ((error) && (auio.uio_resid != cnt)) { 7512244ea07SJohn Dyson if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 7522244ea07SJohn Dyson error = 0; 75319eb87d2SJohn Baldwin if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) { 7541ce91824SDavid Xu int sigpipe = 1; 7551ce91824SDavid Xu if (fp->f_type == DTYPE_SOCKET) { 7561ce91824SDavid Xu so = fp->f_data; 7571ce91824SDavid Xu if (so->so_options & SO_NOSIGPIPE) 7581ce91824SDavid Xu sigpipe = 0; 7591ce91824SDavid Xu } 7601ce91824SDavid Xu if (sigpipe) { 7619b16adc1SAlan Cox PROC_LOCK(aiocbe->userproc); 7629b16adc1SAlan Cox psignal(aiocbe->userproc, SIGPIPE); 7639b16adc1SAlan Cox PROC_UNLOCK(aiocbe->userproc); 76419eb87d2SJohn Baldwin } 7652244ea07SJohn Dyson } 7661ce91824SDavid Xu } 7672244ea07SJohn Dyson 7682244ea07SJohn Dyson cnt -= auio.uio_resid; 7692244ea07SJohn Dyson cb->_aiocb_private.error = error; 7702244ea07SJohn Dyson cb->_aiocb_private.status = cnt; 771f8f750c5SRobert Watson td->td_ucred = td_savedcred; 7722244ea07SJohn Dyson } 7732244ea07SJohn Dyson 77469cd28daSDoug Ambrisko static void 7751ce91824SDavid Xu aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type) 7761ce91824SDavid Xu { 7771ce91824SDavid Xu struct aioliojob *lj; 77869cd28daSDoug Ambrisko struct kaioinfo *ki; 7791ce91824SDavid Xu int lj_done; 78069cd28daSDoug Ambrisko 7811ce91824SDavid Xu PROC_LOCK_ASSERT(userp, MA_OWNED); 78269cd28daSDoug Ambrisko ki = userp->p_aioinfo; 78369cd28daSDoug Ambrisko lj = aiocbe->lio; 78469cd28daSDoug Ambrisko lj_done = 0; 78569cd28daSDoug Ambrisko if (lj) { 7861ce91824SDavid Xu lj->lioj_finished_count++; 7871ce91824SDavid Xu if (lj->lioj_count == lj->lioj_finished_count) 78869cd28daSDoug Ambrisko lj_done = 1; 78969cd28daSDoug Ambrisko } 79069cd28daSDoug Ambrisko if (type == DONE_QUEUE) { 7911ce91824SDavid Xu aiocbe->jobflags |= AIOCBLIST_DONE; 79269cd28daSDoug Ambrisko } else { 7931ce91824SDavid Xu aiocbe->jobflags |= AIOCBLIST_BUFDONE; 7941ce91824SDavid Xu ki->kaio_buffer_count--; 79569cd28daSDoug Ambrisko } 7961ce91824SDavid Xu TAILQ_INSERT_TAIL(&ki->kaio_done, aiocbe, plist); 7971ce91824SDavid Xu aiocbe->jobstate = JOBST_JOBFINISHED; 7981ce91824SDavid Xu if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || 7991ce91824SDavid Xu aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) 8001ce91824SDavid Xu aio_sendsig(userp, &aiocbe->uaiocb.aio_sigevent, &aiocbe->ksi); 8011ce91824SDavid Xu 8021ce91824SDavid Xu KNOTE_LOCKED(&aiocbe->klist, 1); 8031ce91824SDavid Xu 80469cd28daSDoug Ambrisko if (lj_done) { 8051ce91824SDavid Xu if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 80669cd28daSDoug Ambrisko lj->lioj_flags |= LIOJ_KEVENT_POSTED; 8071ce91824SDavid Xu KNOTE_LOCKED(&lj->klist, 1); 80869cd28daSDoug Ambrisko } 8091ce91824SDavid Xu if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) 81069cd28daSDoug Ambrisko == LIOJ_SIGNAL 8114c0fb2cfSDavid Xu && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 8124c0fb2cfSDavid Xu lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { 8134c0fb2cfSDavid Xu aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi); 81469cd28daSDoug Ambrisko lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 81569cd28daSDoug Ambrisko } 81669cd28daSDoug Ambrisko } 81769cd28daSDoug Ambrisko if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 81869cd28daSDoug Ambrisko ki->kaio_flags &= ~KAIO_WAKEUP; 8191ce91824SDavid Xu wakeup(&userp->p_aioinfo); 82069cd28daSDoug Ambrisko } 82169cd28daSDoug Ambrisko } 82269cd28daSDoug Ambrisko 8232244ea07SJohn Dyson /* 82484af4da6SJohn Dyson * The AIO daemon, most of the actual work is done in aio_process, 82584af4da6SJohn Dyson * but the setup (and address space mgmt) is done in this routine. 8262244ea07SJohn Dyson */ 8272244ea07SJohn Dyson static void 8281ce91824SDavid Xu aio_daemon(void *_id) 8292244ea07SJohn Dyson { 830bfbbc4aaSJason Evans struct aiocblist *aiocbe; 831b40ce416SJulian Elischer struct aiothreadlist *aiop; 832bfbbc4aaSJason Evans struct kaioinfo *ki; 833bfbbc4aaSJason Evans struct proc *curcp, *mycp, *userp; 834bfbbc4aaSJason Evans struct vmspace *myvm, *tmpvm; 835b40ce416SJulian Elischer struct thread *td = curthread; 836f591779bSSeigo Tanimura struct pgrp *newpgrp; 837f591779bSSeigo Tanimura struct session *newsess; 8381ce91824SDavid Xu int id = (intptr_t)_id; 8392244ea07SJohn Dyson 8402244ea07SJohn Dyson /* 841fd3bf775SJohn Dyson * Local copies of curproc (cp) and vmspace (myvm) 8422244ea07SJohn Dyson */ 843b40ce416SJulian Elischer mycp = td->td_proc; 844fd3bf775SJohn Dyson myvm = mycp->p_vmspace; 845fd3bf775SJohn Dyson 846cd4ed3b5SJohn Baldwin KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp")); 847fd3bf775SJohn Dyson 848fd3bf775SJohn Dyson /* 849bfbbc4aaSJason Evans * Allocate and ready the aio control info. There is one aiop structure 850bfbbc4aaSJason Evans * per daemon. 851fd3bf775SJohn Dyson */ 852a163d034SWarner Losh aiop = uma_zalloc(aiop_zone, M_WAITOK); 853b40ce416SJulian Elischer aiop->aiothread = td; 854b40ce416SJulian Elischer aiop->aiothreadflags |= AIOP_FREE; 8552244ea07SJohn Dyson 8562244ea07SJohn Dyson /* 857bfbbc4aaSJason Evans * Place thread (lightweight process) onto the AIO free thread list. 8582244ea07SJohn Dyson */ 8591ce91824SDavid Xu mtx_lock(&aio_job_mtx); 860fd3bf775SJohn Dyson TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 8611ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 862bfbbc4aaSJason Evans 8632244ea07SJohn Dyson /* 864fd3bf775SJohn Dyson * Get rid of our current filedescriptors. AIOD's don't need any 865fd3bf775SJohn Dyson * filedescriptors, except as temporarily inherited from the client. 8662244ea07SJohn Dyson */ 867b40ce416SJulian Elischer fdfree(td); 868fd3bf775SJohn Dyson 869bfbbc4aaSJason Evans /* The daemon resides in its own pgrp. */ 870eb8e6d52SEivind Eklund MALLOC(newpgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP, 871a163d034SWarner Losh M_WAITOK | M_ZERO); 872eb8e6d52SEivind Eklund MALLOC(newsess, struct session *, sizeof(struct session), M_SESSION, 873a163d034SWarner Losh M_WAITOK | M_ZERO); 874f591779bSSeigo Tanimura 875ba626c1dSJohn Baldwin sx_xlock(&proctree_lock); 876f591779bSSeigo Tanimura enterpgrp(mycp, mycp->p_pid, newpgrp, newsess); 877ba626c1dSJohn Baldwin sx_xunlock(&proctree_lock); 878fd3bf775SJohn Dyson 879fd3bf775SJohn Dyson /* 880fd3bf775SJohn Dyson * Wakeup parent process. (Parent sleeps to keep from blasting away 881b40ce416SJulian Elischer * and creating too many daemons.) 882fd3bf775SJohn Dyson */ 8831ce91824SDavid Xu sema_post(&aio_newproc_sem); 8842244ea07SJohn Dyson 8851ce91824SDavid Xu mtx_lock(&aio_job_mtx); 886bfbbc4aaSJason Evans for (;;) { 887fd3bf775SJohn Dyson /* 888fd3bf775SJohn Dyson * curcp is the current daemon process context. 889fd3bf775SJohn Dyson * userp is the current user process context. 890fd3bf775SJohn Dyson */ 891fd3bf775SJohn Dyson curcp = mycp; 892c4860686SJohn Dyson 893fd3bf775SJohn Dyson /* 894fd3bf775SJohn Dyson * Take daemon off of free queue 895fd3bf775SJohn Dyson */ 896b40ce416SJulian Elischer if (aiop->aiothreadflags & AIOP_FREE) { 8972244ea07SJohn Dyson TAILQ_REMOVE(&aio_freeproc, aiop, list); 898b40ce416SJulian Elischer aiop->aiothreadflags &= ~AIOP_FREE; 8992244ea07SJohn Dyson } 9002244ea07SJohn Dyson 901fd3bf775SJohn Dyson /* 902bfbbc4aaSJason Evans * Check for jobs. 903fd3bf775SJohn Dyson */ 904d254af07SMatthew Dillon while ((aiocbe = aio_selectjob(aiop)) != NULL) { 9051ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 9062244ea07SJohn Dyson userp = aiocbe->userproc; 9072244ea07SJohn Dyson 908fd3bf775SJohn Dyson /* 909bfbbc4aaSJason Evans * Connect to process address space for user program. 910fd3bf775SJohn Dyson */ 911fd3bf775SJohn Dyson if (userp != curcp) { 912fd3bf775SJohn Dyson /* 913bfbbc4aaSJason Evans * Save the current address space that we are 914bfbbc4aaSJason Evans * connected to. 915fd3bf775SJohn Dyson */ 916fd3bf775SJohn Dyson tmpvm = mycp->p_vmspace; 917bfbbc4aaSJason Evans 918fd3bf775SJohn Dyson /* 919bfbbc4aaSJason Evans * Point to the new user address space, and 920bfbbc4aaSJason Evans * refer to it. 921fd3bf775SJohn Dyson */ 922fd3bf775SJohn Dyson mycp->p_vmspace = userp->p_vmspace; 9231a276a3fSAlan Cox atomic_add_int(&mycp->p_vmspace->vm_refcnt, 1); 924bfbbc4aaSJason Evans 925bfbbc4aaSJason Evans /* Activate the new mapping. */ 926079b7badSJulian Elischer pmap_activate(FIRST_THREAD_IN_PROC(mycp)); 927bfbbc4aaSJason Evans 928fd3bf775SJohn Dyson /* 929bfbbc4aaSJason Evans * If the old address space wasn't the daemons 930bfbbc4aaSJason Evans * own address space, then we need to remove the 931bfbbc4aaSJason Evans * daemon's reference from the other process 932bfbbc4aaSJason Evans * that it was acting on behalf of. 933fd3bf775SJohn Dyson */ 9342244ea07SJohn Dyson if (tmpvm != myvm) { 9352244ea07SJohn Dyson vmspace_free(tmpvm); 9362244ea07SJohn Dyson } 937fd3bf775SJohn Dyson curcp = userp; 9382244ea07SJohn Dyson } 9392244ea07SJohn Dyson 940fd3bf775SJohn Dyson ki = userp->p_aioinfo; 94184af4da6SJohn Dyson 942bfbbc4aaSJason Evans /* Do the I/O function. */ 9432244ea07SJohn Dyson aio_process(aiocbe); 94484af4da6SJohn Dyson 9459b84335cSDavid Xu mtx_lock(&aio_job_mtx); 9469b84335cSDavid Xu /* Decrement the active job count. */ 9479b84335cSDavid Xu ki->kaio_active_count--; 9489b84335cSDavid Xu mtx_unlock(&aio_job_mtx); 9499b84335cSDavid Xu 9501ce91824SDavid Xu PROC_LOCK(userp); 9511ce91824SDavid Xu TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 95269cd28daSDoug Ambrisko aio_bio_done_notify(userp, aiocbe, DONE_QUEUE); 9532244ea07SJohn Dyson if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 9542244ea07SJohn Dyson wakeup(aiocbe); 9552244ea07SJohn Dyson aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 9562244ea07SJohn Dyson } 9571ce91824SDavid Xu PROC_UNLOCK(userp); 9581ce91824SDavid Xu 9591ce91824SDavid Xu mtx_lock(&aio_job_mtx); 9602244ea07SJohn Dyson } 9612244ea07SJohn Dyson 962fd3bf775SJohn Dyson /* 963bfbbc4aaSJason Evans * Disconnect from user address space. 964fd3bf775SJohn Dyson */ 965fd3bf775SJohn Dyson if (curcp != mycp) { 9661ce91824SDavid Xu 9671ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 9681ce91824SDavid Xu 969bfbbc4aaSJason Evans /* Get the user address space to disconnect from. */ 970fd3bf775SJohn Dyson tmpvm = mycp->p_vmspace; 971bfbbc4aaSJason Evans 972bfbbc4aaSJason Evans /* Get original address space for daemon. */ 973fd3bf775SJohn Dyson mycp->p_vmspace = myvm; 974bfbbc4aaSJason Evans 975bfbbc4aaSJason Evans /* Activate the daemon's address space. */ 976079b7badSJulian Elischer pmap_activate(FIRST_THREAD_IN_PROC(mycp)); 977bfbbc4aaSJason Evans #ifdef DIAGNOSTIC 978bfbbc4aaSJason Evans if (tmpvm == myvm) { 979bfbbc4aaSJason Evans printf("AIOD: vmspace problem -- %d\n", 980bfbbc4aaSJason Evans mycp->p_pid); 981bfbbc4aaSJason Evans } 98211783b14SJohn Dyson #endif 983bfbbc4aaSJason Evans /* Remove our vmspace reference. */ 9842244ea07SJohn Dyson vmspace_free(tmpvm); 985bfbbc4aaSJason Evans 986fd3bf775SJohn Dyson curcp = mycp; 9871ce91824SDavid Xu 9881ce91824SDavid Xu mtx_lock(&aio_job_mtx); 9891ce91824SDavid Xu /* 9901ce91824SDavid Xu * We have to restart to avoid race, we only sleep if 9911ce91824SDavid Xu * no job can be selected, that should be 9921ce91824SDavid Xu * curcp == mycp. 9931ce91824SDavid Xu */ 9941ce91824SDavid Xu continue; 995fd3bf775SJohn Dyson } 996fd3bf775SJohn Dyson 9971ce91824SDavid Xu mtx_assert(&aio_job_mtx, MA_OWNED); 9981ce91824SDavid Xu 999fd3bf775SJohn Dyson TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 1000b40ce416SJulian Elischer aiop->aiothreadflags |= AIOP_FREE; 1001fd3bf775SJohn Dyson 1002fd3bf775SJohn Dyson /* 1003bfbbc4aaSJason Evans * If daemon is inactive for a long time, allow it to exit, 1004bfbbc4aaSJason Evans * thereby freeing resources. 1005fd3bf775SJohn Dyson */ 10061ce91824SDavid Xu if (msleep(aiop->aiothread, &aio_job_mtx, PRIBIO, "aiordy", 10071ce91824SDavid Xu aiod_lifetime)) { 1008c3869e4bSAlan Cox if (TAILQ_EMPTY(&aio_jobs)) { 1009b40ce416SJulian Elischer if ((aiop->aiothreadflags & AIOP_FREE) && 101084af4da6SJohn Dyson (num_aio_procs > target_aio_procs)) { 1011fd3bf775SJohn Dyson TAILQ_REMOVE(&aio_freeproc, aiop, list); 101284af4da6SJohn Dyson num_aio_procs--; 10131ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 10141ce91824SDavid Xu uma_zfree(aiop_zone, aiop); 10151ce91824SDavid Xu free_unr(aiod_unr, id); 1016bfbbc4aaSJason Evans #ifdef DIAGNOSTIC 1017bfbbc4aaSJason Evans if (mycp->p_vmspace->vm_refcnt <= 1) { 1018bfbbc4aaSJason Evans printf("AIOD: bad vm refcnt for" 1019bfbbc4aaSJason Evans " exiting daemon: %d\n", 1020fd3bf775SJohn Dyson mycp->p_vmspace->vm_refcnt); 1021bfbbc4aaSJason Evans } 102211783b14SJohn Dyson #endif 1023c9a970a7SAlan Cox kthread_exit(0); 1024fd3bf775SJohn Dyson } 10252244ea07SJohn Dyson } 10262244ea07SJohn Dyson } 10272244ea07SJohn Dyson } 10281ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 10291ce91824SDavid Xu panic("shouldn't be here\n"); 10301ce91824SDavid Xu } 10312244ea07SJohn Dyson 10322244ea07SJohn Dyson /* 1033bfbbc4aaSJason Evans * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 1034bfbbc4aaSJason Evans * AIO daemon modifies its environment itself. 10352244ea07SJohn Dyson */ 10362244ea07SJohn Dyson static int 10371ce91824SDavid Xu aio_newproc(int *start) 1038fd3bf775SJohn Dyson { 10392244ea07SJohn Dyson int error; 1040c9a970a7SAlan Cox struct proc *p; 10411ce91824SDavid Xu int id; 10422244ea07SJohn Dyson 10431ce91824SDavid Xu id = alloc_unr(aiod_unr); 10441ce91824SDavid Xu error = kthread_create(aio_daemon, (void *)(intptr_t)id, &p, 10451ce91824SDavid Xu RFNOWAIT, 0, "aiod%d", id); 10461ce91824SDavid Xu if (error == 0) { 1047fd3bf775SJohn Dyson /* 10481ce91824SDavid Xu * Wait until daemon is started. 1049fd3bf775SJohn Dyson */ 10501ce91824SDavid Xu sema_wait(&aio_newproc_sem); 10511ce91824SDavid Xu mtx_lock(&aio_job_mtx); 105284af4da6SJohn Dyson num_aio_procs++; 10531ce91824SDavid Xu if (start != NULL) 10547f34b521SDavid Xu (*start)--; 10551ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 10561ce91824SDavid Xu } else { 10571ce91824SDavid Xu free_unr(aiod_unr, id); 10581ce91824SDavid Xu } 1059ac41f2efSAlfred Perlstein return (error); 10602244ea07SJohn Dyson } 10612244ea07SJohn Dyson 10622244ea07SJohn Dyson /* 106388ed460eSAlan Cox * Try the high-performance, low-overhead physio method for eligible 106488ed460eSAlan Cox * VCHR devices. This method doesn't use an aio helper thread, and 106588ed460eSAlan Cox * thus has very low overhead. 106688ed460eSAlan Cox * 1067a9bf5e37SDavid Xu * Assumes that the caller, aio_aqueue(), has incremented the file 106888ed460eSAlan Cox * structure's reference count, preventing its deallocation for the 106988ed460eSAlan Cox * duration of this call. 1070fd3bf775SJohn Dyson */ 107188ed460eSAlan Cox static int 1072bfbbc4aaSJason Evans aio_qphysio(struct proc *p, struct aiocblist *aiocbe) 1073fd3bf775SJohn Dyson { 1074fd3bf775SJohn Dyson struct aiocb *cb; 1075fd3bf775SJohn Dyson struct file *fp; 1076fd3bf775SJohn Dyson struct buf *bp; 1077fd3bf775SJohn Dyson struct vnode *vp; 1078fd3bf775SJohn Dyson struct kaioinfo *ki; 10791ce91824SDavid Xu struct aioliojob *lj; 10801ce91824SDavid Xu int error; 1081fd3bf775SJohn Dyson 108284af4da6SJohn Dyson cb = &aiocbe->uaiocb; 10839fbd7ccfSAlan Cox fp = aiocbe->fd_file; 1084fd3bf775SJohn Dyson 1085008626c3SPoul-Henning Kamp if (fp->f_type != DTYPE_VNODE) 1086008626c3SPoul-Henning Kamp return (-1); 1087fd3bf775SJohn Dyson 10883b6d9652SPoul-Henning Kamp vp = fp->f_vnode; 108911783b14SJohn Dyson 1090f582ac06SBrian Feldman /* 1091f582ac06SBrian Feldman * If its not a disk, we don't want to return a positive error. 1092f582ac06SBrian Feldman * It causes the aio code to not fall through to try the thread 1093f582ac06SBrian Feldman * way when you're talking to a regular file. 1094f582ac06SBrian Feldman */ 1095f582ac06SBrian Feldman if (!vn_isdisk(vp, &error)) { 1096f582ac06SBrian Feldman if (error == ENOTBLK) 1097f582ac06SBrian Feldman return (-1); 1098f582ac06SBrian Feldman else 1099ba4ad1fcSPoul-Henning Kamp return (error); 1100f582ac06SBrian Feldman } 1101fd3bf775SJohn Dyson 11025d9d81e7SPoul-Henning Kamp if (cb->aio_nbytes % vp->v_bufobj.bo_bsize) 1103008626c3SPoul-Henning Kamp return (-1); 1104fd3bf775SJohn Dyson 110569cd28daSDoug Ambrisko if (cb->aio_nbytes > vp->v_rdev->si_iosize_max) 110669cd28daSDoug Ambrisko return (-1); 110769cd28daSDoug Ambrisko 110813644654SAlan Cox if (cb->aio_nbytes > 110913644654SAlan Cox MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK)) 1110008626c3SPoul-Henning Kamp return (-1); 1111fd3bf775SJohn Dyson 1112fd3bf775SJohn Dyson ki = p->p_aioinfo; 1113008626c3SPoul-Henning Kamp if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) 1114008626c3SPoul-Henning Kamp return (-1); 1115fd3bf775SJohn Dyson 1116bfbbc4aaSJason Evans /* Create and build a buffer header for a transfer. */ 11171c7c3c6aSMatthew Dillon bp = (struct buf *)getpbuf(NULL); 111813644654SAlan Cox BUF_KERNPROC(bp); 1119fd3bf775SJohn Dyson 11201ce91824SDavid Xu PROC_LOCK(p); 11211ce91824SDavid Xu ki->kaio_count++; 11221ce91824SDavid Xu ki->kaio_buffer_count++; 11231ce91824SDavid Xu lj = aiocbe->lio; 11241ce91824SDavid Xu if (lj) 11251ce91824SDavid Xu lj->lioj_count++; 11261ce91824SDavid Xu PROC_UNLOCK(p); 11271ce91824SDavid Xu 1128fd3bf775SJohn Dyson /* 1129bfbbc4aaSJason Evans * Get a copy of the kva from the physical buffer. 1130fd3bf775SJohn Dyson */ 1131ef38cda1SAlan Cox error = 0; 1132fd3bf775SJohn Dyson 1133fd3bf775SJohn Dyson bp->b_bcount = cb->aio_nbytes; 1134fd3bf775SJohn Dyson bp->b_bufsize = cb->aio_nbytes; 1135fd3bf775SJohn Dyson bp->b_iodone = aio_physwakeup; 1136fd3bf775SJohn Dyson bp->b_saveaddr = bp->b_data; 113791369fc7SAlan Cox bp->b_data = (void *)(uintptr_t)cb->aio_buf; 1138a44ca4f0SHidetoshi Shimokawa bp->b_offset = cb->aio_offset; 1139a44ca4f0SHidetoshi Shimokawa bp->b_iooffset = cb->aio_offset; 1140fd3bf775SJohn Dyson bp->b_blkno = btodb(cb->aio_offset); 114106363906SAlan Cox bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ; 1142fd3bf775SJohn Dyson 11432d5c7e45SMatthew Dillon /* 11442d5c7e45SMatthew Dillon * Bring buffer into kernel space. 11452d5c7e45SMatthew Dillon */ 11462d5c7e45SMatthew Dillon if (vmapbuf(bp) < 0) { 11472d5c7e45SMatthew Dillon error = EFAULT; 11482d5c7e45SMatthew Dillon goto doerror; 11492d5c7e45SMatthew Dillon } 1150fd3bf775SJohn Dyson 11511ce91824SDavid Xu PROC_LOCK(p); 1152fd3bf775SJohn Dyson aiocbe->bp = bp; 11538edbaf85SHidetoshi Shimokawa bp->b_caller1 = (void *)aiocbe; 115484af4da6SJohn Dyson TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 11551ce91824SDavid Xu TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); 1156fd3bf775SJohn Dyson aiocbe->jobstate = JOBST_JOBQBUF; 115784af4da6SJohn Dyson cb->_aiocb_private.status = cb->aio_nbytes; 11581ce91824SDavid Xu PROC_UNLOCK(p); 11591ce91824SDavid Xu 11601ce91824SDavid Xu atomic_add_int(&num_queue_count, 1); 11611ce91824SDavid Xu atomic_add_int(&num_buf_aio, 1); 11621ce91824SDavid Xu 1163fd3bf775SJohn Dyson bp->b_error = 0; 1164fd3bf775SJohn Dyson 11651ce91824SDavid Xu TASK_INIT(&aiocbe->biotask, 0, biohelper, aiocbe); 1166bfbbc4aaSJason Evans 1167bfbbc4aaSJason Evans /* Perform transfer. */ 11686afb3b1cSPoul-Henning Kamp dev_strategy(vp->v_rdev, bp); 1169ac41f2efSAlfred Perlstein return (0); 1170fd3bf775SJohn Dyson 1171fd3bf775SJohn Dyson doerror: 11721ce91824SDavid Xu PROC_LOCK(p); 11731ce91824SDavid Xu ki->kaio_count--; 1174fd3bf775SJohn Dyson ki->kaio_buffer_count--; 1175bfbbc4aaSJason Evans if (lj) 11761ce91824SDavid Xu lj->lioj_count--; 117784af4da6SJohn Dyson aiocbe->bp = NULL; 11781ce91824SDavid Xu PROC_UNLOCK(p); 11791c7c3c6aSMatthew Dillon relpbuf(bp, NULL); 1180fd3bf775SJohn Dyson return (error); 1181fd3bf775SJohn Dyson } 1182fd3bf775SJohn Dyson 1183fd3bf775SJohn Dyson /* 1184bfbbc4aaSJason Evans * Wake up aio requests that may be serviceable now. 1185bfbbc4aaSJason Evans */ 118648dac059SAlan Cox static void 118721d56e9cSAlfred Perlstein aio_swake_cb(struct socket *so, struct sockbuf *sb) 1188bfbbc4aaSJason Evans { 1189bfbbc4aaSJason Evans struct aiocblist *cb, *cbn; 1190bfbbc4aaSJason Evans struct proc *p; 1191bfbbc4aaSJason Evans struct kaioinfo *ki = NULL; 1192bfbbc4aaSJason Evans int opcode, wakecount = 0; 1193b40ce416SJulian Elischer struct aiothreadlist *aiop; 1194bfbbc4aaSJason Evans 1195bfbbc4aaSJason Evans if (sb == &so->so_snd) { 1196bfbbc4aaSJason Evans opcode = LIO_WRITE; 11979535efc0SRobert Watson SOCKBUF_LOCK(&so->so_snd); 1198bfbbc4aaSJason Evans so->so_snd.sb_flags &= ~SB_AIO; 11999535efc0SRobert Watson SOCKBUF_UNLOCK(&so->so_snd); 1200bfbbc4aaSJason Evans } else { 1201bfbbc4aaSJason Evans opcode = LIO_READ; 12029535efc0SRobert Watson SOCKBUF_LOCK(&so->so_rcv); 1203bfbbc4aaSJason Evans so->so_rcv.sb_flags &= ~SB_AIO; 12049535efc0SRobert Watson SOCKBUF_UNLOCK(&so->so_rcv); 1205bfbbc4aaSJason Evans } 1206bfbbc4aaSJason Evans 12071ce91824SDavid Xu mtx_lock(&aio_sock_mtx); 12082a522eb9SJohn Baldwin TAILQ_FOREACH_SAFE(cb, &so->so_aiojobq, list, cbn) { 1209bfbbc4aaSJason Evans if (opcode == cb->uaiocb.aio_lio_opcode) { 12108c0d9af5SDavid Xu if (cb->jobstate != JOBST_JOBQSOCK) 12111ce91824SDavid Xu panic("invalid queue value"); 1212bfbbc4aaSJason Evans p = cb->userproc; 1213bfbbc4aaSJason Evans ki = p->p_aioinfo; 1214bfbbc4aaSJason Evans TAILQ_REMOVE(&so->so_aiojobq, cb, list); 12151ce91824SDavid Xu PROC_LOCK(p); 1216bfbbc4aaSJason Evans TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist); 12171ce91824SDavid Xu /* 12181ce91824SDavid Xu * XXX check AIO_RUNDOWN, and don't put on 12191ce91824SDavid Xu * jobqueue if it was set. 12201ce91824SDavid Xu */ 1221bfbbc4aaSJason Evans TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist); 12221ce91824SDavid Xu cb->jobstate = JOBST_JOBQGLOBAL; 12231ce91824SDavid Xu mtx_lock(&aio_job_mtx); 12241ce91824SDavid Xu TAILQ_INSERT_TAIL(&aio_jobs, cb, list); 12251ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 12261ce91824SDavid Xu PROC_UNLOCK(p); 1227bfbbc4aaSJason Evans wakecount++; 1228bfbbc4aaSJason Evans } 1229bfbbc4aaSJason Evans } 12301ce91824SDavid Xu mtx_unlock(&aio_sock_mtx); 1231bfbbc4aaSJason Evans 1232bfbbc4aaSJason Evans while (wakecount--) { 12331ce91824SDavid Xu mtx_lock(&aio_job_mtx); 12342a522eb9SJohn Baldwin if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1235bfbbc4aaSJason Evans TAILQ_REMOVE(&aio_freeproc, aiop, list); 1236b40ce416SJulian Elischer aiop->aiothreadflags &= ~AIOP_FREE; 1237b40ce416SJulian Elischer wakeup(aiop->aiothread); 1238bfbbc4aaSJason Evans } 12391ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 1240bfbbc4aaSJason Evans } 1241bfbbc4aaSJason Evans } 1242bfbbc4aaSJason Evans 1243bfbbc4aaSJason Evans /* 1244bfbbc4aaSJason Evans * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1245bfbbc4aaSJason Evans * technique is done in this code. 12462244ea07SJohn Dyson */ 12472244ea07SJohn Dyson static int 1248a9bf5e37SDavid Xu aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj, 12490972628aSDavid Xu int type, int oldsigev) 1250fd3bf775SJohn Dyson { 1251b40ce416SJulian Elischer struct proc *p = td->td_proc; 12522244ea07SJohn Dyson struct file *fp; 1253bfbbc4aaSJason Evans struct socket *so; 12542244ea07SJohn Dyson struct aiocblist *aiocbe; 1255b40ce416SJulian Elischer struct aiothreadlist *aiop; 12562244ea07SJohn Dyson struct kaioinfo *ki; 1257c6fa9f78SAlan Cox struct kevent kev; 1258c6fa9f78SAlan Cox struct kqueue *kq; 1259c6fa9f78SAlan Cox struct file *kq_fp; 1260576c004fSAlfred Perlstein struct sockbuf *sb; 12611ce91824SDavid Xu int opcode; 12621ce91824SDavid Xu int error; 12631ce91824SDavid Xu int fd; 12641ce91824SDavid Xu int jid; 12652244ea07SJohn Dyson 1266a9bf5e37SDavid Xu if (p->p_aioinfo == NULL) 1267a9bf5e37SDavid Xu aio_init_aioinfo(p); 1268a9bf5e37SDavid Xu 12691ce91824SDavid Xu ki = p->p_aioinfo; 12701ce91824SDavid Xu 1271a9bf5e37SDavid Xu suword(&job->_aiocb_private.status, -1); 1272a9bf5e37SDavid Xu suword(&job->_aiocb_private.error, 0); 1273a9bf5e37SDavid Xu suword(&job->_aiocb_private.kernelinfo, -1); 1274a9bf5e37SDavid Xu 1275a9bf5e37SDavid Xu if (num_queue_count >= max_queue_count || 1276a9bf5e37SDavid Xu ki->kaio_count >= ki->kaio_qallowed_count) { 1277a9bf5e37SDavid Xu suword(&job->_aiocb_private.error, EAGAIN); 1278a9bf5e37SDavid Xu return (EAGAIN); 1279a9bf5e37SDavid Xu } 1280a9bf5e37SDavid Xu 12811ce91824SDavid Xu aiocbe = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); 1282fd3bf775SJohn Dyson aiocbe->inputcharge = 0; 1283fd3bf775SJohn Dyson aiocbe->outputcharge = 0; 12841ce91824SDavid Xu knlist_init(&aiocbe->klist, &p->p_mtx, NULL, NULL, NULL); 1285fd3bf775SJohn Dyson 12860972628aSDavid Xu if (oldsigev) { 12870972628aSDavid Xu bzero(&aiocbe->uaiocb, sizeof(struct aiocb)); 12880972628aSDavid Xu error = copyin(job, &aiocbe->uaiocb, sizeof(struct oaiocb)); 12890972628aSDavid Xu bcopy(&aiocbe->uaiocb.__spare__, &aiocbe->uaiocb.aio_sigevent, 12900972628aSDavid Xu sizeof(struct osigevent)); 12910972628aSDavid Xu } else { 12920972628aSDavid Xu error = copyin(job, &aiocbe->uaiocb, sizeof(struct aiocb)); 12930972628aSDavid Xu } 12942244ea07SJohn Dyson if (error) { 1295fd3bf775SJohn Dyson suword(&job->_aiocb_private.error, error); 1296c897b813SJeff Roberson uma_zfree(aiocb_zone, aiocbe); 1297ac41f2efSAlfred Perlstein return (error); 12982244ea07SJohn Dyson } 129968d71118SDavid Xu 130068d71118SDavid Xu if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && 130168d71118SDavid Xu aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && 130268d71118SDavid Xu aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && 130368d71118SDavid Xu aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { 130468d71118SDavid Xu suword(&job->_aiocb_private.error, EINVAL); 130568d71118SDavid Xu uma_zfree(aiocb_zone, aiocbe); 130668d71118SDavid Xu return (EINVAL); 130768d71118SDavid Xu } 130868d71118SDavid Xu 13094c0fb2cfSDavid Xu if ((aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || 13104c0fb2cfSDavid Xu aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && 13112f3cf918SAlfred Perlstein !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) { 1312c897b813SJeff Roberson uma_zfree(aiocb_zone, aiocbe); 1313ac41f2efSAlfred Perlstein return (EINVAL); 13142f3cf918SAlfred Perlstein } 13152244ea07SJohn Dyson 13164c0fb2cfSDavid Xu ksiginfo_init(&aiocbe->ksi); 13174c0fb2cfSDavid Xu 1318bfbbc4aaSJason Evans /* Save userspace address of the job info. */ 131911783b14SJohn Dyson aiocbe->uuaiocb = job; 132011783b14SJohn Dyson 1321bfbbc4aaSJason Evans /* Get the opcode. */ 1322bfbbc4aaSJason Evans if (type != LIO_NOP) 1323a624e84fSJohn Dyson aiocbe->uaiocb.aio_lio_opcode = type; 1324a624e84fSJohn Dyson opcode = aiocbe->uaiocb.aio_lio_opcode; 13252244ea07SJohn Dyson 13262a522eb9SJohn Baldwin /* Fetch the file object for the specified file descriptor. */ 13272244ea07SJohn Dyson fd = aiocbe->uaiocb.aio_fildes; 13282a522eb9SJohn Baldwin switch (opcode) { 13292a522eb9SJohn Baldwin case LIO_WRITE: 13302a522eb9SJohn Baldwin error = fget_write(td, fd, &fp); 13312a522eb9SJohn Baldwin break; 13322a522eb9SJohn Baldwin case LIO_READ: 13332a522eb9SJohn Baldwin error = fget_read(td, fd, &fp); 13342a522eb9SJohn Baldwin break; 13352a522eb9SJohn Baldwin default: 13362a522eb9SJohn Baldwin error = fget(td, fd, &fp); 13372a522eb9SJohn Baldwin } 13382a522eb9SJohn Baldwin if (error) { 1339c897b813SJeff Roberson uma_zfree(aiocb_zone, aiocbe); 13402244ea07SJohn Dyson suword(&job->_aiocb_private.error, EBADF); 1341af56abaaSJohn Baldwin return (error); 13422244ea07SJohn Dyson } 13432a522eb9SJohn Baldwin aiocbe->fd_file = fp; 13442244ea07SJohn Dyson 13452244ea07SJohn Dyson if (aiocbe->uaiocb.aio_offset == -1LL) { 1346ae124fc4SAlan Cox error = EINVAL; 1347ae124fc4SAlan Cox goto aqueue_fail; 13482244ea07SJohn Dyson } 13491ce91824SDavid Xu 13501ce91824SDavid Xu mtx_lock(&aio_job_mtx); 13511ce91824SDavid Xu jid = jobrefid; 13522d2f8ae7SBruce Evans if (jobrefid == LONG_MAX) 1353fd3bf775SJohn Dyson jobrefid = 1; 13542d2f8ae7SBruce Evans else 13552d2f8ae7SBruce Evans jobrefid++; 13561ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 13571ce91824SDavid Xu 13581ce91824SDavid Xu error = suword(&job->_aiocb_private.kernelinfo, jid); 13591ce91824SDavid Xu if (error) { 13601ce91824SDavid Xu error = EINVAL; 13611ce91824SDavid Xu goto aqueue_fail; 13621ce91824SDavid Xu } 13631ce91824SDavid Xu aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; 13642244ea07SJohn Dyson 13652244ea07SJohn Dyson if (opcode == LIO_NOP) { 1366a5c0b1c0SAlan Cox fdrop(fp, td); 1367c897b813SJeff Roberson uma_zfree(aiocb_zone, aiocbe); 1368ac41f2efSAlfred Perlstein return (0); 13692244ea07SJohn Dyson } 1370fd3bf775SJohn Dyson if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1371ae124fc4SAlan Cox error = EINVAL; 1372ae124fc4SAlan Cox goto aqueue_fail; 13732244ea07SJohn Dyson } 13742244ea07SJohn Dyson 1375c6fa9f78SAlan Cox if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) { 1376c6fa9f78SAlan Cox kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue; 1377bbe7bbdfSAlan Cox } else 1378cb679c38SJonathan Lemon goto no_kqueue; 13792a522eb9SJohn Baldwin error = fget(td, (u_int)kev.ident, &kq_fp); 13802a522eb9SJohn Baldwin if (error) 13812a522eb9SJohn Baldwin goto aqueue_fail; 13822a522eb9SJohn Baldwin if (kq_fp->f_type != DTYPE_KQUEUE) { 13832a522eb9SJohn Baldwin fdrop(kq_fp, td); 1384cb679c38SJonathan Lemon error = EBADF; 1385cb679c38SJonathan Lemon goto aqueue_fail; 1386cb679c38SJonathan Lemon } 138748e3128bSMatthew Dillon kq = kq_fp->f_data; 1388b46f1c55SAlan Cox kev.ident = (uintptr_t)aiocbe->uuaiocb; 1389cb679c38SJonathan Lemon kev.filter = EVFILT_AIO; 1390cb679c38SJonathan Lemon kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 1391b46f1c55SAlan Cox kev.data = (intptr_t)aiocbe; 13921ce91824SDavid Xu kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sival_ptr; 1393ad3b9257SJohn-Mark Gurney error = kqueue_register(kq, &kev, td, 1); 13942a522eb9SJohn Baldwin fdrop(kq_fp, td); 1395cb679c38SJonathan Lemon aqueue_fail: 1396cb679c38SJonathan Lemon if (error) { 1397a5c0b1c0SAlan Cox fdrop(fp, td); 1398c897b813SJeff Roberson uma_zfree(aiocb_zone, aiocbe); 1399cb679c38SJonathan Lemon suword(&job->_aiocb_private.error, error); 1400279d7226SMatthew Dillon goto done; 1401cb679c38SJonathan Lemon } 1402cb679c38SJonathan Lemon no_kqueue: 1403cb679c38SJonathan Lemon 1404fd3bf775SJohn Dyson suword(&job->_aiocb_private.error, EINPROGRESS); 1405fd3bf775SJohn Dyson aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 14062244ea07SJohn Dyson aiocbe->userproc = p; 1407f8f750c5SRobert Watson aiocbe->cred = crhold(td->td_ucred); 14082244ea07SJohn Dyson aiocbe->jobflags = 0; 140984af4da6SJohn Dyson aiocbe->lio = lj; 14102244ea07SJohn Dyson 1411bfbbc4aaSJason Evans if (fp->f_type == DTYPE_SOCKET) { 1412bfbbc4aaSJason Evans /* 1413bfbbc4aaSJason Evans * Alternate queueing for socket ops: Reach down into the 1414bfbbc4aaSJason Evans * descriptor to get the socket data. Then check to see if the 1415bfbbc4aaSJason Evans * socket is ready to be read or written (based on the requested 1416bfbbc4aaSJason Evans * operation). 1417bfbbc4aaSJason Evans * 1418bfbbc4aaSJason Evans * If it is not ready for io, then queue the aiocbe on the 1419bfbbc4aaSJason Evans * socket, and set the flags so we get a call when sbnotify() 1420bfbbc4aaSJason Evans * happens. 1421576c004fSAlfred Perlstein * 1422576c004fSAlfred Perlstein * Note if opcode is neither LIO_WRITE nor LIO_READ we lock 1423576c004fSAlfred Perlstein * and unlock the snd sockbuf for no reason. 1424bfbbc4aaSJason Evans */ 142548e3128bSMatthew Dillon so = fp->f_data; 1426576c004fSAlfred Perlstein sb = (opcode == LIO_READ) ? &so->so_rcv : &so->so_snd; 1427576c004fSAlfred Perlstein SOCKBUF_LOCK(sb); 1428bfbbc4aaSJason Evans if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode == 1429bfbbc4aaSJason Evans LIO_WRITE) && (!sowriteable(so)))) { 14301ce91824SDavid Xu mtx_lock(&aio_sock_mtx); 1431bfbbc4aaSJason Evans TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); 14321ce91824SDavid Xu mtx_unlock(&aio_sock_mtx); 14331ce91824SDavid Xu 1434576c004fSAlfred Perlstein sb->sb_flags |= SB_AIO; 14351ce91824SDavid Xu PROC_LOCK(p); 14361ce91824SDavid Xu TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist); 14371ce91824SDavid Xu TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); 14381ce91824SDavid Xu aiocbe->jobstate = JOBST_JOBQSOCK; 14391ce91824SDavid Xu ki->kaio_count++; 14401ce91824SDavid Xu if (lj) 14411ce91824SDavid Xu lj->lioj_count++; 14421ce91824SDavid Xu PROC_UNLOCK(p); 1443576c004fSAlfred Perlstein SOCKBUF_UNLOCK(sb); 14441ce91824SDavid Xu atomic_add_int(&num_queue_count, 1); 1445279d7226SMatthew Dillon error = 0; 1446279d7226SMatthew Dillon goto done; 1447bfbbc4aaSJason Evans } 1448576c004fSAlfred Perlstein SOCKBUF_UNLOCK(sb); 1449bfbbc4aaSJason Evans } 1450bfbbc4aaSJason Evans 1451bfbbc4aaSJason Evans if ((error = aio_qphysio(p, aiocbe)) == 0) 1452279d7226SMatthew Dillon goto done; 14531ce91824SDavid Xu #if 0 1454279d7226SMatthew Dillon if (error > 0) { 1455fd3bf775SJohn Dyson aiocbe->uaiocb._aiocb_private.error = error; 1456fd3bf775SJohn Dyson suword(&job->_aiocb_private.error, error); 1457279d7226SMatthew Dillon goto done; 1458fd3bf775SJohn Dyson } 14591ce91824SDavid Xu #endif 1460bfbbc4aaSJason Evans /* No buffer for daemon I/O. */ 146184af4da6SJohn Dyson aiocbe->bp = NULL; 146284af4da6SJohn Dyson 14631ce91824SDavid Xu PROC_LOCK(p); 14641ce91824SDavid Xu ki->kaio_count++; 1465bfbbc4aaSJason Evans if (lj) 14661ce91824SDavid Xu lj->lioj_count++; 1467fd3bf775SJohn Dyson TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 14681ce91824SDavid Xu TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); 1469fd3bf775SJohn Dyson 14701ce91824SDavid Xu mtx_lock(&aio_job_mtx); 14711ce91824SDavid Xu TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 14721ce91824SDavid Xu aiocbe->jobstate = JOBST_JOBQGLOBAL; 14731ce91824SDavid Xu PROC_UNLOCK(p); 14741ce91824SDavid Xu 14751ce91824SDavid Xu atomic_add_int(&num_queue_count, 1); 1476fd3bf775SJohn Dyson 1477fd3bf775SJohn Dyson /* 1478bfbbc4aaSJason Evans * If we don't have a free AIO process, and we are below our quota, then 1479bfbbc4aaSJason Evans * start one. Otherwise, depend on the subsequent I/O completions to 1480bfbbc4aaSJason Evans * pick-up this job. If we don't sucessfully create the new process 1481bfbbc4aaSJason Evans * (thread) due to resource issues, we return an error for now (EAGAIN), 1482bfbbc4aaSJason Evans * which is likely not the correct thing to do. 1483fd3bf775SJohn Dyson */ 1484c6c191b2SAlan Cox retryproc: 14851ce91824SDavid Xu error = 0; 1486d254af07SMatthew Dillon if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 14872244ea07SJohn Dyson TAILQ_REMOVE(&aio_freeproc, aiop, list); 1488b40ce416SJulian Elischer aiop->aiothreadflags &= ~AIOP_FREE; 1489b40ce416SJulian Elischer wakeup(aiop->aiothread); 1490fd3bf775SJohn Dyson } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1491fd3bf775SJohn Dyson ((ki->kaio_active_count + num_aio_resv_start) < 1492fd3bf775SJohn Dyson ki->kaio_maxactive_count)) { 1493fd3bf775SJohn Dyson num_aio_resv_start++; 14941ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 14951ce91824SDavid Xu error = aio_newproc(&num_aio_resv_start); 14961ce91824SDavid Xu mtx_lock(&aio_job_mtx); 14971ce91824SDavid Xu if (error) { 149884af4da6SJohn Dyson num_aio_resv_start--; 14992244ea07SJohn Dyson goto retryproc; 1500fd3bf775SJohn Dyson } 15011ce91824SDavid Xu } 15021ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 15031ce91824SDavid Xu 1504279d7226SMatthew Dillon done: 1505ac41f2efSAlfred Perlstein return (error); 15062244ea07SJohn Dyson } 15072244ea07SJohn Dyson 1508fd3bf775SJohn Dyson /* 1509bfbbc4aaSJason Evans * Support the aio_return system call, as a side-effect, kernel resources are 1510bfbbc4aaSJason Evans * released. 15112244ea07SJohn Dyson */ 15122244ea07SJohn Dyson int 1513b40ce416SJulian Elischer aio_return(struct thread *td, struct aio_return_args *uap) 1514fd3bf775SJohn Dyson { 1515b40ce416SJulian Elischer struct proc *p = td->td_proc; 15161ce91824SDavid Xu struct aiocblist *cb; 15171ce91824SDavid Xu struct aiocb *uaiocb; 15182244ea07SJohn Dyson struct kaioinfo *ki; 15191ce91824SDavid Xu int status, error; 15202244ea07SJohn Dyson 1521c0bf5caaSAlan Cox ki = p->p_aioinfo; 1522c0bf5caaSAlan Cox if (ki == NULL) 1523ac41f2efSAlfred Perlstein return (EINVAL); 15241ce91824SDavid Xu uaiocb = uap->aiocbp; 15253769f562SAlan Cox PROC_LOCK(p); 15261ce91824SDavid Xu TAILQ_FOREACH(cb, &ki->kaio_done, plist) { 15271ce91824SDavid Xu if (cb->uuaiocb == uaiocb) 1528c0bf5caaSAlan Cox break; 1529c0bf5caaSAlan Cox } 1530c0bf5caaSAlan Cox if (cb != NULL) { 15311ce91824SDavid Xu MPASS(cb->jobstate == JOBST_JOBFINISHED); 15321ce91824SDavid Xu status = cb->uaiocb._aiocb_private.status; 15331ce91824SDavid Xu error = cb->uaiocb._aiocb_private.error; 15341ce91824SDavid Xu td->td_retval[0] = status; 153569cd28daSDoug Ambrisko if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 153669cd28daSDoug Ambrisko p->p_stats->p_ru.ru_oublock += 153769cd28daSDoug Ambrisko cb->outputcharge; 153869cd28daSDoug Ambrisko cb->outputcharge = 0; 153969cd28daSDoug Ambrisko } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 154069cd28daSDoug Ambrisko p->p_stats->p_ru.ru_inblock += cb->inputcharge; 154169cd28daSDoug Ambrisko cb->inputcharge = 0; 154269cd28daSDoug Ambrisko } 154384af4da6SJohn Dyson aio_free_entry(cb); 15441ce91824SDavid Xu suword(&uaiocb->_aiocb_private.error, error); 15451ce91824SDavid Xu suword(&uaiocb->_aiocb_private.status, status); 15461ce91824SDavid Xu error = 0; 15471ce91824SDavid Xu } else 15481ce91824SDavid Xu error = EINVAL; 15491ce91824SDavid Xu PROC_UNLOCK(p); 15501ce91824SDavid Xu return (error); 15512244ea07SJohn Dyson } 15522244ea07SJohn Dyson 15532244ea07SJohn Dyson /* 1554bfbbc4aaSJason Evans * Allow a process to wakeup when any of the I/O requests are completed. 15552244ea07SJohn Dyson */ 15562244ea07SJohn Dyson int 1557b40ce416SJulian Elischer aio_suspend(struct thread *td, struct aio_suspend_args *uap) 1558fd3bf775SJohn Dyson { 1559b40ce416SJulian Elischer struct proc *p = td->td_proc; 15604a11ca4eSPoul-Henning Kamp struct timeval atv; 15612244ea07SJohn Dyson struct timespec ts; 15622244ea07SJohn Dyson struct aiocb *const *cbptr, *cbp; 15632244ea07SJohn Dyson struct kaioinfo *ki; 15641ce91824SDavid Xu struct aiocblist *cb, *cbfirst; 156511783b14SJohn Dyson struct aiocb **ujoblist; 15661ce91824SDavid Xu int njoblist; 15671ce91824SDavid Xu int error; 15681ce91824SDavid Xu int timo; 15691ce91824SDavid Xu int i; 15702244ea07SJohn Dyson 1571ae3b195fSTim J. Robbins if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX) 1572ac41f2efSAlfred Perlstein return (EINVAL); 15732244ea07SJohn Dyson 15742244ea07SJohn Dyson timo = 0; 15752244ea07SJohn Dyson if (uap->timeout) { 1576bfbbc4aaSJason Evans /* Get timespec struct. */ 1577bfbbc4aaSJason Evans if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1578ac41f2efSAlfred Perlstein return (error); 15792244ea07SJohn Dyson 15802244ea07SJohn Dyson if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 15812244ea07SJohn Dyson return (EINVAL); 15822244ea07SJohn Dyson 1583e3b3ba2dSDag-Erling Smørgrav TIMESPEC_TO_TIMEVAL(&atv, &ts); 15842244ea07SJohn Dyson if (itimerfix(&atv)) 15852244ea07SJohn Dyson return (EINVAL); 1586227ee8a1SPoul-Henning Kamp timo = tvtohz(&atv); 15872244ea07SJohn Dyson } 15882244ea07SJohn Dyson 15892244ea07SJohn Dyson ki = p->p_aioinfo; 15902244ea07SJohn Dyson if (ki == NULL) 1591ac41f2efSAlfred Perlstein return (EAGAIN); 15922244ea07SJohn Dyson 159384af4da6SJohn Dyson njoblist = 0; 1594a163d034SWarner Losh ujoblist = uma_zalloc(aiol_zone, M_WAITOK); 15952244ea07SJohn Dyson cbptr = uap->aiocbp; 15962244ea07SJohn Dyson 15972244ea07SJohn Dyson for (i = 0; i < uap->nent; i++) { 1598a739e09cSAlan Cox cbp = (struct aiocb *)(intptr_t)fuword(&cbptr[i]); 159984af4da6SJohn Dyson if (cbp == 0) 160084af4da6SJohn Dyson continue; 160111783b14SJohn Dyson ujoblist[njoblist] = cbp; 160284af4da6SJohn Dyson njoblist++; 16032244ea07SJohn Dyson } 1604bfbbc4aaSJason Evans 160511783b14SJohn Dyson if (njoblist == 0) { 1606c897b813SJeff Roberson uma_zfree(aiol_zone, ujoblist); 1607ac41f2efSAlfred Perlstein return (0); 160811783b14SJohn Dyson } 16092244ea07SJohn Dyson 16103769f562SAlan Cox PROC_LOCK(p); 16111ce91824SDavid Xu for (;;) { 16121ce91824SDavid Xu cbfirst = NULL; 16131ce91824SDavid Xu error = 0; 16141ce91824SDavid Xu TAILQ_FOREACH(cb, &ki->kaio_all, allist) { 161584af4da6SJohn Dyson for (i = 0; i < njoblist; i++) { 16161ce91824SDavid Xu if (cb->uuaiocb == ujoblist[i]) { 16171ce91824SDavid Xu if (cbfirst == NULL) 16181ce91824SDavid Xu cbfirst = cb; 16191ce91824SDavid Xu if (cb->jobstate == JOBST_JOBFINISHED) 16201ce91824SDavid Xu goto RETURN; 162184af4da6SJohn Dyson } 162284af4da6SJohn Dyson } 162384af4da6SJohn Dyson } 16241ce91824SDavid Xu /* All tasks were finished. */ 16251ce91824SDavid Xu if (cbfirst == NULL) 16261ce91824SDavid Xu break; 16272244ea07SJohn Dyson 1628fd3bf775SJohn Dyson ki->kaio_flags |= KAIO_WAKEUP; 16291ce91824SDavid Xu error = msleep(&p->p_aioinfo, &p->p_mtx, PRIBIO | PCATCH, 16301ce91824SDavid Xu "aiospn", timo); 16311ce91824SDavid Xu if (error == ERESTART) 16321ce91824SDavid Xu error = EINTR; 16331ce91824SDavid Xu if (error) 16341ce91824SDavid Xu break; 16352244ea07SJohn Dyson } 16361ce91824SDavid Xu RETURN: 16371ce91824SDavid Xu PROC_UNLOCK(p); 16381ce91824SDavid Xu uma_zfree(aiol_zone, ujoblist); 16391ce91824SDavid Xu return (error); 16402244ea07SJohn Dyson } 1641ee877a35SJohn Dyson 1642ee877a35SJohn Dyson /* 1643dd85920aSJason Evans * aio_cancel cancels any non-physio aio operations not currently in 1644dd85920aSJason Evans * progress. 1645ee877a35SJohn Dyson */ 1646ee877a35SJohn Dyson int 1647b40ce416SJulian Elischer aio_cancel(struct thread *td, struct aio_cancel_args *uap) 1648fd3bf775SJohn Dyson { 1649b40ce416SJulian Elischer struct proc *p = td->td_proc; 1650dd85920aSJason Evans struct kaioinfo *ki; 1651dd85920aSJason Evans struct aiocblist *cbe, *cbn; 1652dd85920aSJason Evans struct file *fp; 1653dd85920aSJason Evans struct socket *so; 16541ce91824SDavid Xu int error; 1655dd85920aSJason Evans int cancelled = 0; 1656dd85920aSJason Evans int notcancelled = 0; 1657dd85920aSJason Evans struct vnode *vp; 1658dd85920aSJason Evans 16592a522eb9SJohn Baldwin /* Lookup file object. */ 16601ce91824SDavid Xu error = fget(td, uap->fd, &fp); 16612a522eb9SJohn Baldwin if (error) 16622a522eb9SJohn Baldwin return (error); 1663dd85920aSJason Evans 16641ce91824SDavid Xu ki = p->p_aioinfo; 16651ce91824SDavid Xu if (ki == NULL) 16661ce91824SDavid Xu goto done; 16671ce91824SDavid Xu 1668dd85920aSJason Evans if (fp->f_type == DTYPE_VNODE) { 16693b6d9652SPoul-Henning Kamp vp = fp->f_vnode; 1670dd85920aSJason Evans if (vn_isdisk(vp, &error)) { 16712a522eb9SJohn Baldwin fdrop(fp, td); 1672b40ce416SJulian Elischer td->td_retval[0] = AIO_NOTCANCELED; 1673ac41f2efSAlfred Perlstein return (0); 1674dd85920aSJason Evans } 1675dd85920aSJason Evans } else if (fp->f_type == DTYPE_SOCKET) { 167648e3128bSMatthew Dillon so = fp->f_data; 16771ce91824SDavid Xu mtx_lock(&aio_sock_mtx); 16782a522eb9SJohn Baldwin TAILQ_FOREACH_SAFE(cbe, &so->so_aiojobq, list, cbn) { 16791ce91824SDavid Xu if (cbe->userproc == p && 16801ce91824SDavid Xu (uap->aiocbp == NULL || 16811ce91824SDavid Xu uap->aiocbp == cbe->uuaiocb)) { 1682dd85920aSJason Evans TAILQ_REMOVE(&so->so_aiojobq, cbe, list); 16831ce91824SDavid Xu PROC_LOCK(p); 1684dd85920aSJason Evans TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist); 16851ce91824SDavid Xu cbe->jobstate = JOBST_JOBRUNNING; 1686dd85920aSJason Evans cbe->uaiocb._aiocb_private.status = -1; 1687dd85920aSJason Evans cbe->uaiocb._aiocb_private.error = ECANCELED; 16881ce91824SDavid Xu aio_bio_done_notify(p, cbe, DONE_QUEUE); 16891ce91824SDavid Xu PROC_UNLOCK(p); 1690dd85920aSJason Evans cancelled++; 16911ce91824SDavid Xu if (uap->aiocbp != NULL) 1692dd85920aSJason Evans break; 1693dd85920aSJason Evans } 1694dd85920aSJason Evans } 16951ce91824SDavid Xu mtx_unlock(&aio_sock_mtx); 16961ce91824SDavid Xu if (cancelled && uap->aiocbp != NULL) { 16972a522eb9SJohn Baldwin fdrop(fp, td); 1698b40ce416SJulian Elischer td->td_retval[0] = AIO_CANCELED; 1699ac41f2efSAlfred Perlstein return (0); 1700dd85920aSJason Evans } 1701dd85920aSJason Evans } 1702dd85920aSJason Evans 17031ce91824SDavid Xu PROC_LOCK(p); 17042a522eb9SJohn Baldwin TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) { 1705dd85920aSJason Evans if ((uap->fd == cbe->uaiocb.aio_fildes) && 1706dd85920aSJason Evans ((uap->aiocbp == NULL) || 1707dd85920aSJason Evans (uap->aiocbp == cbe->uuaiocb))) { 17081ce91824SDavid Xu mtx_lock(&aio_job_mtx); 1709dd85920aSJason Evans if (cbe->jobstate == JOBST_JOBQGLOBAL) { 1710dd85920aSJason Evans TAILQ_REMOVE(&aio_jobs, cbe, list); 17111ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 17121ce91824SDavid Xu TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); 1713dd85920aSJason Evans cbe->uaiocb._aiocb_private.status = -1; 1714dd85920aSJason Evans cbe->uaiocb._aiocb_private.error = ECANCELED; 17151ce91824SDavid Xu aio_bio_done_notify(p, cbe, DONE_QUEUE); 17161ce91824SDavid Xu cancelled++; 1717dd85920aSJason Evans } else { 17181ce91824SDavid Xu mtx_unlock(&aio_job_mtx); 1719dd85920aSJason Evans notcancelled++; 1720dd85920aSJason Evans } 1721dd85920aSJason Evans } 1722dd85920aSJason Evans } 17231ce91824SDavid Xu PROC_UNLOCK(p); 17241ce91824SDavid Xu 1725ad49abc0SAlan Cox done: 17262a522eb9SJohn Baldwin fdrop(fp, td); 1727dd85920aSJason Evans if (notcancelled) { 1728b40ce416SJulian Elischer td->td_retval[0] = AIO_NOTCANCELED; 1729ac41f2efSAlfred Perlstein return (0); 1730dd85920aSJason Evans } 1731dd85920aSJason Evans if (cancelled) { 1732b40ce416SJulian Elischer td->td_retval[0] = AIO_CANCELED; 1733ac41f2efSAlfred Perlstein return (0); 1734dd85920aSJason Evans } 1735b40ce416SJulian Elischer td->td_retval[0] = AIO_ALLDONE; 1736dd85920aSJason Evans 1737ac41f2efSAlfred Perlstein return (0); 1738ee877a35SJohn Dyson } 1739ee877a35SJohn Dyson 1740ee877a35SJohn Dyson /* 1741bfbbc4aaSJason Evans * aio_error is implemented in the kernel level for compatibility purposes only. 1742bfbbc4aaSJason Evans * For a user mode async implementation, it would be best to do it in a userland 1743bfbbc4aaSJason Evans * subroutine. 1744ee877a35SJohn Dyson */ 1745ee877a35SJohn Dyson int 1746b40ce416SJulian Elischer aio_error(struct thread *td, struct aio_error_args *uap) 1747fd3bf775SJohn Dyson { 1748b40ce416SJulian Elischer struct proc *p = td->td_proc; 17492244ea07SJohn Dyson struct aiocblist *cb; 17502244ea07SJohn Dyson struct kaioinfo *ki; 17511ce91824SDavid Xu int status; 1752ee877a35SJohn Dyson 17532244ea07SJohn Dyson ki = p->p_aioinfo; 17541ce91824SDavid Xu if (ki == NULL) { 17551ce91824SDavid Xu td->td_retval[0] = EINVAL; 17561ce91824SDavid Xu return (0); 17571ce91824SDavid Xu } 1758ee877a35SJohn Dyson 17593769f562SAlan Cox PROC_LOCK(p); 17601ce91824SDavid Xu TAILQ_FOREACH(cb, &ki->kaio_all, allist) { 17611ce91824SDavid Xu if (cb->uuaiocb == uap->aiocbp) { 17621ce91824SDavid Xu if (cb->jobstate == JOBST_JOBFINISHED) 17631ce91824SDavid Xu td->td_retval[0] = 17641ce91824SDavid Xu cb->uaiocb._aiocb_private.error; 17651ce91824SDavid Xu else 1766b40ce416SJulian Elischer td->td_retval[0] = EINPROGRESS; 17671ce91824SDavid Xu PROC_UNLOCK(p); 1768ac41f2efSAlfred Perlstein return (0); 17692244ea07SJohn Dyson } 17702244ea07SJohn Dyson } 17713769f562SAlan Cox PROC_UNLOCK(p); 177284af4da6SJohn Dyson 17732244ea07SJohn Dyson /* 1774a9bf5e37SDavid Xu * Hack for failure of aio_aqueue. 17752244ea07SJohn Dyson */ 17762244ea07SJohn Dyson status = fuword(&uap->aiocbp->_aiocb_private.status); 17771ce91824SDavid Xu if (status == -1) { 17781ce91824SDavid Xu td->td_retval[0] = fuword(&uap->aiocbp->_aiocb_private.error); 17791ce91824SDavid Xu return (0); 17801ce91824SDavid Xu } 17811ce91824SDavid Xu 17821ce91824SDavid Xu td->td_retval[0] = EINVAL; 17831ce91824SDavid Xu return (0); 1784ee877a35SJohn Dyson } 1785ee877a35SJohn Dyson 1786eb8e6d52SEivind Eklund /* syscall - asynchronous read from a file (REALTIME) */ 1787ee877a35SJohn Dyson int 17880972628aSDavid Xu oaio_read(struct thread *td, struct oaio_read_args *uap) 17890972628aSDavid Xu { 17900972628aSDavid Xu 1791a9bf5e37SDavid Xu return aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 1); 17920972628aSDavid Xu } 17930972628aSDavid Xu 17940972628aSDavid Xu int 1795b40ce416SJulian Elischer aio_read(struct thread *td, struct aio_read_args *uap) 1796fd3bf775SJohn Dyson { 179721d56e9cSAlfred Perlstein 1798a9bf5e37SDavid Xu return aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, 0); 1799ee877a35SJohn Dyson } 1800ee877a35SJohn Dyson 1801eb8e6d52SEivind Eklund /* syscall - asynchronous write to a file (REALTIME) */ 1802ee877a35SJohn Dyson int 18030972628aSDavid Xu oaio_write(struct thread *td, struct oaio_write_args *uap) 18040972628aSDavid Xu { 18050972628aSDavid Xu 1806a9bf5e37SDavid Xu return aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 1); 18070972628aSDavid Xu } 18080972628aSDavid Xu 18090972628aSDavid Xu int 1810b40ce416SJulian Elischer aio_write(struct thread *td, struct aio_write_args *uap) 1811fd3bf775SJohn Dyson { 181221d56e9cSAlfred Perlstein 1813a9bf5e37SDavid Xu return aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, 0); 18140972628aSDavid Xu } 18150972628aSDavid Xu 18160972628aSDavid Xu /* syscall - list directed I/O (REALTIME) */ 18170972628aSDavid Xu int 18180972628aSDavid Xu olio_listio(struct thread *td, struct olio_listio_args *uap) 18190972628aSDavid Xu { 18200972628aSDavid Xu return do_lio_listio(td, (struct lio_listio_args *)uap, 1); 1821ee877a35SJohn Dyson } 1822ee877a35SJohn Dyson 182344a2c818STim J. Robbins /* syscall - list directed I/O (REALTIME) */ 1824ee877a35SJohn Dyson int 1825b40ce416SJulian Elischer lio_listio(struct thread *td, struct lio_listio_args *uap) 1826fd3bf775SJohn Dyson { 18270972628aSDavid Xu return do_lio_listio(td, uap, 0); 18280972628aSDavid Xu } 18290972628aSDavid Xu 18300972628aSDavid Xu static int 18310972628aSDavid Xu do_lio_listio(struct thread *td, struct lio_listio_args *uap, int oldsigev) 18320972628aSDavid Xu { 1833b40ce416SJulian Elischer struct proc *p = td->td_proc; 18342244ea07SJohn Dyson struct aiocb *iocb, * const *cbptr; 18352244ea07SJohn Dyson struct kaioinfo *ki; 18361ce91824SDavid Xu struct aioliojob *lj; 183769cd28daSDoug Ambrisko struct kevent kev; 183869cd28daSDoug Ambrisko struct kqueue * kq; 183969cd28daSDoug Ambrisko struct file *kq_fp; 18401ce91824SDavid Xu int nent; 18411ce91824SDavid Xu int error; 1842fd3bf775SJohn Dyson int nerror; 1843ee877a35SJohn Dyson int i; 1844ee877a35SJohn Dyson 1845bfbbc4aaSJason Evans if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 1846ac41f2efSAlfred Perlstein return (EINVAL); 18472244ea07SJohn Dyson 18482244ea07SJohn Dyson nent = uap->nent; 1849ae3b195fSTim J. Robbins if (nent < 0 || nent > AIO_LISTIO_MAX) 1850ac41f2efSAlfred Perlstein return (EINVAL); 18512244ea07SJohn Dyson 1852bfbbc4aaSJason Evans if (p->p_aioinfo == NULL) 18532244ea07SJohn Dyson aio_init_aioinfo(p); 18542244ea07SJohn Dyson 18552244ea07SJohn Dyson ki = p->p_aioinfo; 18562244ea07SJohn Dyson 1857a163d034SWarner Losh lj = uma_zalloc(aiolio_zone, M_WAITOK); 185884af4da6SJohn Dyson lj->lioj_flags = 0; 18591ce91824SDavid Xu lj->lioj_count = 0; 18601ce91824SDavid Xu lj->lioj_finished_count = 0; 18611ce91824SDavid Xu knlist_init(&lj->klist, &p->p_mtx, NULL, NULL, NULL); 18624c0fb2cfSDavid Xu ksiginfo_init(&lj->lioj_ksi); 186369cd28daSDoug Ambrisko 186484af4da6SJohn Dyson /* 1865bfbbc4aaSJason Evans * Setup signal. 186684af4da6SJohn Dyson */ 186784af4da6SJohn Dyson if (uap->sig && (uap->mode == LIO_NOWAIT)) { 18680972628aSDavid Xu bzero(&lj->lioj_signal, sizeof(&lj->lioj_signal)); 1869bfbbc4aaSJason Evans error = copyin(uap->sig, &lj->lioj_signal, 18700972628aSDavid Xu oldsigev ? sizeof(struct osigevent) : 18710972628aSDavid Xu sizeof(struct sigevent)); 18722f3cf918SAlfred Perlstein if (error) { 1873c897b813SJeff Roberson uma_zfree(aiolio_zone, lj); 1874ac41f2efSAlfred Perlstein return (error); 18752f3cf918SAlfred Perlstein } 187669cd28daSDoug Ambrisko 187769cd28daSDoug Ambrisko if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 187869cd28daSDoug Ambrisko /* Assume only new style KEVENT */ 18791ce91824SDavid Xu error = fget(td, lj->lioj_signal.sigev_notify_kqueue, 18801ce91824SDavid Xu &kq_fp); 18812a522eb9SJohn Baldwin if (error) { 18822a522eb9SJohn Baldwin uma_zfree(aiolio_zone, lj); 18832a522eb9SJohn Baldwin return (error); 18842a522eb9SJohn Baldwin } 18852a522eb9SJohn Baldwin if (kq_fp->f_type != DTYPE_KQUEUE) { 18862a522eb9SJohn Baldwin fdrop(kq_fp, td); 1887c897b813SJeff Roberson uma_zfree(aiolio_zone, lj); 188869cd28daSDoug Ambrisko return (EBADF); 18892f3cf918SAlfred Perlstein } 189069cd28daSDoug Ambrisko kq = (struct kqueue *)kq_fp->f_data; 189169cd28daSDoug Ambrisko kev.filter = EVFILT_LIO; 189269cd28daSDoug Ambrisko kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 189369cd28daSDoug Ambrisko kev.ident = (uintptr_t)lj; /* something unique */ 189469cd28daSDoug Ambrisko kev.data = (intptr_t)lj; 18951ce91824SDavid Xu /* pass user defined sigval data */ 18961ce91824SDavid Xu kev.udata = lj->lioj_signal.sigev_value.sival_ptr; 189769cd28daSDoug Ambrisko error = kqueue_register(kq, &kev, td, 1); 18982a522eb9SJohn Baldwin fdrop(kq_fp, td); 189969cd28daSDoug Ambrisko if (error) { 190069cd28daSDoug Ambrisko uma_zfree(aiolio_zone, lj); 190169cd28daSDoug Ambrisko return (error); 190269cd28daSDoug Ambrisko } 19031ce91824SDavid Xu } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) { 19041ce91824SDavid Xu ; 190568d71118SDavid Xu } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 190668d71118SDavid Xu lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) { 190768d71118SDavid Xu if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { 190869cd28daSDoug Ambrisko uma_zfree(aiolio_zone, lj); 190969cd28daSDoug Ambrisko return EINVAL; 191068d71118SDavid Xu } 191184af4da6SJohn Dyson lj->lioj_flags |= LIOJ_SIGNAL; 191268d71118SDavid Xu } else { 191368d71118SDavid Xu uma_zfree(aiolio_zone, lj); 191468d71118SDavid Xu return EINVAL; 19154d752b01SAlan Cox } 19161ce91824SDavid Xu } 191769cd28daSDoug Ambrisko 19181ce91824SDavid Xu PROC_LOCK(p); 19192f3cf918SAlfred Perlstein TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 19202244ea07SJohn Dyson /* 19211ce91824SDavid Xu * Add extra aiocb count to avoid the lio to be freed 19221ce91824SDavid Xu * by other threads doing aio_waitcomplete or aio_return, 19231ce91824SDavid Xu * and prevent event from being sent until we have queued 19241ce91824SDavid Xu * all tasks. 19251ce91824SDavid Xu */ 19261ce91824SDavid Xu lj->lioj_count = 1; 19271ce91824SDavid Xu PROC_UNLOCK(p); 19281ce91824SDavid Xu 19291ce91824SDavid Xu /* 1930bfbbc4aaSJason Evans * Get pointers to the list of I/O requests. 19312244ea07SJohn Dyson */ 1932fd3bf775SJohn Dyson nerror = 0; 19332244ea07SJohn Dyson cbptr = uap->acb_list; 19342244ea07SJohn Dyson for (i = 0; i < uap->nent; i++) { 1935a739e09cSAlan Cox iocb = (struct aiocb *)(intptr_t)fuword(&cbptr[i]); 19364a6a94d8SArchie Cobbs if (((intptr_t)iocb != -1) && ((intptr_t)iocb != 0)) { 1937a9bf5e37SDavid Xu error = aio_aqueue(td, iocb, lj, 0, oldsigev); 19381ce91824SDavid Xu if (error != 0) 1939fd3bf775SJohn Dyson nerror++; 1940fd3bf775SJohn Dyson } 1941fd3bf775SJohn Dyson } 19422244ea07SJohn Dyson 19431ce91824SDavid Xu error = 0; 19441ce91824SDavid Xu PROC_LOCK(p); 19452244ea07SJohn Dyson if (uap->mode == LIO_WAIT) { 19461ce91824SDavid Xu while (lj->lioj_count - 1 != lj->lioj_finished_count) { 1947fd3bf775SJohn Dyson ki->kaio_flags |= KAIO_WAKEUP; 19481ce91824SDavid Xu error = msleep(&p->p_aioinfo, &p->p_mtx, 19491ce91824SDavid Xu PRIBIO | PCATCH, "aiospn", 0); 19501ce91824SDavid Xu if (error == ERESTART) 19511ce91824SDavid Xu error = EINTR; 19521ce91824SDavid Xu if (error) 19531ce91824SDavid Xu break; 19541ce91824SDavid Xu } 19551ce91824SDavid Xu } else { 19561ce91824SDavid Xu if (lj->lioj_count - 1 == lj->lioj_finished_count) { 19571ce91824SDavid Xu if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 19581ce91824SDavid Xu lj->lioj_flags |= LIOJ_KEVENT_POSTED; 19591ce91824SDavid Xu KNOTE_LOCKED(&lj->klist, 1); 19601ce91824SDavid Xu } 19611ce91824SDavid Xu if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) 19621ce91824SDavid Xu == LIOJ_SIGNAL 19631ce91824SDavid Xu && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 19641ce91824SDavid Xu lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { 19651ce91824SDavid Xu aio_sendsig(p, &lj->lioj_signal, 19661ce91824SDavid Xu &lj->lioj_ksi); 19671ce91824SDavid Xu lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 19682244ea07SJohn Dyson } 19692244ea07SJohn Dyson } 19701ce91824SDavid Xu } 19711ce91824SDavid Xu lj->lioj_count--; 19721ce91824SDavid Xu if (lj->lioj_count == 0) { 19731ce91824SDavid Xu TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 19741ce91824SDavid Xu knlist_delete(&lj->klist, curthread, 1); 19751ce91824SDavid Xu sigqueue_take(&lj->lioj_ksi); 19761ce91824SDavid Xu PROC_UNLOCK(p); 19771ce91824SDavid Xu uma_zfree(aiolio_zone, lj); 19781ce91824SDavid Xu } else 19791ce91824SDavid Xu PROC_UNLOCK(p); 19802244ea07SJohn Dyson 19811ce91824SDavid Xu if (nerror) 19821ce91824SDavid Xu return (EIO); 19831ce91824SDavid Xu return (error); 1984ee877a35SJohn Dyson } 1985fd3bf775SJohn Dyson 198684af4da6SJohn Dyson /* 19871ce91824SDavid Xu * Called from interrupt thread for physio, we should return as fast 19881ce91824SDavid Xu * as possible, so we schedule a biohelper task. 198984af4da6SJohn Dyson */ 1990fd3bf775SJohn Dyson static void 1991bfbbc4aaSJason Evans aio_physwakeup(struct buf *bp) 1992fd3bf775SJohn Dyson { 199384af4da6SJohn Dyson struct aiocblist *aiocbe; 1994fd3bf775SJohn Dyson 19958edbaf85SHidetoshi Shimokawa aiocbe = (struct aiocblist *)bp->b_caller1; 19961ce91824SDavid Xu taskqueue_enqueue(taskqueue_aiod_bio, &aiocbe->biotask); 19971ce91824SDavid Xu } 199884af4da6SJohn Dyson 19991ce91824SDavid Xu /* 20001ce91824SDavid Xu * Task routine to perform heavy tasks, process wakeup, and signals. 20011ce91824SDavid Xu */ 20021ce91824SDavid Xu static void 20031ce91824SDavid Xu biohelper(void *context, int pending) 20041ce91824SDavid Xu { 20051ce91824SDavid Xu struct aiocblist *aiocbe = context; 20061ce91824SDavid Xu struct buf *bp; 20071ce91824SDavid Xu struct proc *userp; 20081ce91824SDavid Xu int nblks; 20091ce91824SDavid Xu 20101ce91824SDavid Xu bp = aiocbe->bp; 20111ce91824SDavid Xu userp = aiocbe->userproc; 20121ce91824SDavid Xu PROC_LOCK(userp); 201384af4da6SJohn Dyson aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 201484af4da6SJohn Dyson aiocbe->uaiocb._aiocb_private.error = 0; 2015c244d2deSPoul-Henning Kamp if (bp->b_ioflags & BIO_ERROR) 201684af4da6SJohn Dyson aiocbe->uaiocb._aiocb_private.error = bp->b_error; 20171ce91824SDavid Xu nblks = btodb(aiocbe->uaiocb.aio_nbytes); 20181ce91824SDavid Xu if (aiocbe->uaiocb.aio_lio_opcode == LIO_WRITE) 20191ce91824SDavid Xu aiocbe->outputcharge += nblks; 20201ce91824SDavid Xu else 20211ce91824SDavid Xu aiocbe->inputcharge += nblks; 20221ce91824SDavid Xu aiocbe->bp = NULL; 20231ce91824SDavid Xu TAILQ_REMOVE(&userp->p_aioinfo->kaio_bufqueue, aiocbe, plist); 202469cd28daSDoug Ambrisko aio_bio_done_notify(userp, aiocbe, DONE_BUF); 20251ce91824SDavid Xu PROC_UNLOCK(userp); 20261ce91824SDavid Xu 20271ce91824SDavid Xu /* Release mapping into kernel space. */ 20281ce91824SDavid Xu vunmapbuf(bp); 20291ce91824SDavid Xu relpbuf(bp, NULL); 20301ce91824SDavid Xu atomic_subtract_int(&num_buf_aio, 1); 203184af4da6SJohn Dyson } 2032bfbbc4aaSJason Evans 2033eb8e6d52SEivind Eklund /* syscall - wait for the next completion of an aio request */ 2034bfbbc4aaSJason Evans int 2035b40ce416SJulian Elischer aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap) 2036bfbbc4aaSJason Evans { 2037b40ce416SJulian Elischer struct proc *p = td->td_proc; 2038bfbbc4aaSJason Evans struct timeval atv; 2039bfbbc4aaSJason Evans struct timespec ts; 2040bfbbc4aaSJason Evans struct kaioinfo *ki; 20411ce91824SDavid Xu struct aiocblist *cb; 20421ce91824SDavid Xu struct aiocb *uuaiocb; 20431ce91824SDavid Xu int error, status, timo; 2044bfbbc4aaSJason Evans 20451ce91824SDavid Xu suword(uap->aiocbp, (long)NULL); 2046dd85920aSJason Evans 2047bfbbc4aaSJason Evans timo = 0; 2048bfbbc4aaSJason Evans if (uap->timeout) { 2049bfbbc4aaSJason Evans /* Get timespec struct. */ 205088ed460eSAlan Cox error = copyin(uap->timeout, &ts, sizeof(ts)); 2051bfbbc4aaSJason Evans if (error) 2052ac41f2efSAlfred Perlstein return (error); 2053bfbbc4aaSJason Evans 2054bfbbc4aaSJason Evans if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000)) 2055bfbbc4aaSJason Evans return (EINVAL); 2056bfbbc4aaSJason Evans 2057bfbbc4aaSJason Evans TIMESPEC_TO_TIMEVAL(&atv, &ts); 2058bfbbc4aaSJason Evans if (itimerfix(&atv)) 2059bfbbc4aaSJason Evans return (EINVAL); 2060bfbbc4aaSJason Evans timo = tvtohz(&atv); 2061bfbbc4aaSJason Evans } 2062bfbbc4aaSJason Evans 20638213baf0SChristian S.J. Peron if (p->p_aioinfo == NULL) 2064323fe565SDavid Xu aio_init_aioinfo(p); 20658213baf0SChristian S.J. Peron ki = p->p_aioinfo; 2066bfbbc4aaSJason Evans 20671ce91824SDavid Xu error = 0; 20681ce91824SDavid Xu cb = NULL; 20693769f562SAlan Cox PROC_LOCK(p); 20701ce91824SDavid Xu while ((cb = TAILQ_FIRST(&ki->kaio_done)) == NULL) { 20711ce91824SDavid Xu ki->kaio_flags |= KAIO_WAKEUP; 20721ce91824SDavid Xu error = msleep(&p->p_aioinfo, &p->p_mtx, PRIBIO | PCATCH, 20731ce91824SDavid Xu "aiowc", timo); 20741ce91824SDavid Xu if (error == ERESTART) 20751ce91824SDavid Xu error = EINTR; 20761ce91824SDavid Xu if (error) 20771ce91824SDavid Xu break; 20781ce91824SDavid Xu } 20791ce91824SDavid Xu 20801ce91824SDavid Xu if (cb != NULL) { 20811ce91824SDavid Xu MPASS(cb->jobstate == JOBST_JOBFINISHED); 20821ce91824SDavid Xu uuaiocb = cb->uuaiocb; 20831ce91824SDavid Xu status = cb->uaiocb._aiocb_private.status; 20841ce91824SDavid Xu error = cb->uaiocb._aiocb_private.error; 20851ce91824SDavid Xu td->td_retval[0] = status; 2086bfbbc4aaSJason Evans if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 20871ce91824SDavid Xu p->p_stats->p_ru.ru_oublock += cb->outputcharge; 2088bfbbc4aaSJason Evans cb->outputcharge = 0; 2089bfbbc4aaSJason Evans } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 2090516d2564SAlan Cox p->p_stats->p_ru.ru_inblock += cb->inputcharge; 2091bfbbc4aaSJason Evans cb->inputcharge = 0; 2092bfbbc4aaSJason Evans } 2093bfbbc4aaSJason Evans aio_free_entry(cb); 20943769f562SAlan Cox PROC_UNLOCK(p); 20951ce91824SDavid Xu suword(uap->aiocbp, (long)uuaiocb); 20961ce91824SDavid Xu suword(&uuaiocb->_aiocb_private.error, error); 20971ce91824SDavid Xu suword(&uuaiocb->_aiocb_private.status, status); 20981ce91824SDavid Xu } else 20991ce91824SDavid Xu PROC_UNLOCK(p); 2100bfbbc4aaSJason Evans 2101ac41f2efSAlfred Perlstein return (error); 2102bfbbc4aaSJason Evans } 2103cb679c38SJonathan Lemon 2104eb8e6d52SEivind Eklund /* kqueue attach function */ 2105cb679c38SJonathan Lemon static int 2106cb679c38SJonathan Lemon filt_aioattach(struct knote *kn) 2107cb679c38SJonathan Lemon { 2108b46f1c55SAlan Cox struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2109cb679c38SJonathan Lemon 2110cb679c38SJonathan Lemon /* 2111cb679c38SJonathan Lemon * The aiocbe pointer must be validated before using it, so 2112cb679c38SJonathan Lemon * registration is restricted to the kernel; the user cannot 2113cb679c38SJonathan Lemon * set EV_FLAG1. 2114cb679c38SJonathan Lemon */ 2115cb679c38SJonathan Lemon if ((kn->kn_flags & EV_FLAG1) == 0) 2116cb679c38SJonathan Lemon return (EPERM); 2117cb679c38SJonathan Lemon kn->kn_flags &= ~EV_FLAG1; 2118cb679c38SJonathan Lemon 2119ad3b9257SJohn-Mark Gurney knlist_add(&aiocbe->klist, kn, 0); 2120cb679c38SJonathan Lemon 2121cb679c38SJonathan Lemon return (0); 2122cb679c38SJonathan Lemon } 2123cb679c38SJonathan Lemon 2124eb8e6d52SEivind Eklund /* kqueue detach function */ 2125cb679c38SJonathan Lemon static void 2126cb679c38SJonathan Lemon filt_aiodetach(struct knote *kn) 2127cb679c38SJonathan Lemon { 2128b46f1c55SAlan Cox struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2129cb679c38SJonathan Lemon 213069cd28daSDoug Ambrisko if (!knlist_empty(&aiocbe->klist)) 2131ad3b9257SJohn-Mark Gurney knlist_remove(&aiocbe->klist, kn, 0); 2132cb679c38SJonathan Lemon } 2133cb679c38SJonathan Lemon 2134eb8e6d52SEivind Eklund /* kqueue filter function */ 2135cb679c38SJonathan Lemon /*ARGSUSED*/ 2136cb679c38SJonathan Lemon static int 2137cb679c38SJonathan Lemon filt_aio(struct knote *kn, long hint) 2138cb679c38SJonathan Lemon { 2139b46f1c55SAlan Cox struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2140cb679c38SJonathan Lemon 214191369fc7SAlan Cox kn->kn_data = aiocbe->uaiocb._aiocb_private.error; 21421ce91824SDavid Xu if (aiocbe->jobstate != JOBST_JOBFINISHED) 2143cb679c38SJonathan Lemon return (0); 2144cb679c38SJonathan Lemon kn->kn_flags |= EV_EOF; 2145cb679c38SJonathan Lemon return (1); 2146cb679c38SJonathan Lemon } 214769cd28daSDoug Ambrisko 214869cd28daSDoug Ambrisko /* kqueue attach function */ 214969cd28daSDoug Ambrisko static int 215069cd28daSDoug Ambrisko filt_lioattach(struct knote *kn) 215169cd28daSDoug Ambrisko { 21521ce91824SDavid Xu struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata; 215369cd28daSDoug Ambrisko 215469cd28daSDoug Ambrisko /* 21551ce91824SDavid Xu * The aioliojob pointer must be validated before using it, so 215669cd28daSDoug Ambrisko * registration is restricted to the kernel; the user cannot 215769cd28daSDoug Ambrisko * set EV_FLAG1. 215869cd28daSDoug Ambrisko */ 215969cd28daSDoug Ambrisko if ((kn->kn_flags & EV_FLAG1) == 0) 216069cd28daSDoug Ambrisko return (EPERM); 216169cd28daSDoug Ambrisko kn->kn_flags &= ~EV_FLAG1; 216269cd28daSDoug Ambrisko 216369cd28daSDoug Ambrisko knlist_add(&lj->klist, kn, 0); 216469cd28daSDoug Ambrisko 216569cd28daSDoug Ambrisko return (0); 216669cd28daSDoug Ambrisko } 216769cd28daSDoug Ambrisko 216869cd28daSDoug Ambrisko /* kqueue detach function */ 216969cd28daSDoug Ambrisko static void 217069cd28daSDoug Ambrisko filt_liodetach(struct knote *kn) 217169cd28daSDoug Ambrisko { 21721ce91824SDavid Xu struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata; 217369cd28daSDoug Ambrisko 217469cd28daSDoug Ambrisko if (!knlist_empty(&lj->klist)) 217569cd28daSDoug Ambrisko knlist_remove(&lj->klist, kn, 0); 217669cd28daSDoug Ambrisko } 217769cd28daSDoug Ambrisko 217869cd28daSDoug Ambrisko /* kqueue filter function */ 217969cd28daSDoug Ambrisko /*ARGSUSED*/ 218069cd28daSDoug Ambrisko static int 218169cd28daSDoug Ambrisko filt_lio(struct knote *kn, long hint) 218269cd28daSDoug Ambrisko { 21831ce91824SDavid Xu struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata; 21841ce91824SDavid Xu 218569cd28daSDoug Ambrisko return (lj->lioj_flags & LIOJ_KEVENT_POSTED); 218669cd28daSDoug Ambrisko } 2187