xref: /freebsd/sys/kern/vfs_aio.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*
2  * Copyright (c) 1997 John S. Dyson.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. John S. Dyson's name may not be used to endorse or promote products
10  *    derived from this software without specific prior written permission.
11  *
12  * DISCLAIMER:  This code isn't warranted to do anything useful.  Anything
13  * bad that happens because of using this software isn't the responsibility
14  * of the author.  This software is distributed AS-IS.
15  *
16  * $FreeBSD$
17  */
18 
19 /*
20  * This file contains support for the POSIX 1003.1B AIO/LIO facility.
21  */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/bio.h>
26 #include <sys/buf.h>
27 #include <sys/sysproto.h>
28 #include <sys/filedesc.h>
29 #include <sys/kernel.h>
30 #include <sys/kthread.h>
31 #include <sys/fcntl.h>
32 #include <sys/file.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 #include <sys/unistd.h>
36 #include <sys/proc.h>
37 #include <sys/resourcevar.h>
38 #include <sys/signalvar.h>
39 #include <sys/protosw.h>
40 #include <sys/socketvar.h>
41 #include <sys/sysctl.h>
42 #include <sys/vnode.h>
43 #include <sys/conf.h>
44 #include <sys/event.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_zone.h>
51 #include <sys/aio.h>
52 
53 #include <machine/limits.h>
54 
55 #include "opt_vfs_aio.h"
56 
57 #ifdef VFS_AIO
58 
59 static	long jobrefid;
60 
61 #define JOBST_NULL		0x0
62 #define	JOBST_JOBQPROC		0x1
63 #define JOBST_JOBQGLOBAL	0x2
64 #define JOBST_JOBRUNNING	0x3
65 #define JOBST_JOBFINISHED	0x4
66 #define	JOBST_JOBQBUF		0x5
67 #define	JOBST_JOBBFINISHED	0x6
68 
69 #ifndef MAX_AIO_PER_PROC
70 #define MAX_AIO_PER_PROC	32
71 #endif
72 
73 #ifndef MAX_AIO_QUEUE_PER_PROC
74 #define MAX_AIO_QUEUE_PER_PROC	256 /* Bigger than AIO_LISTIO_MAX */
75 #endif
76 
77 #ifndef MAX_AIO_PROCS
78 #define MAX_AIO_PROCS		32
79 #endif
80 
81 #ifndef MAX_AIO_QUEUE
82 #define	MAX_AIO_QUEUE		1024 /* Bigger than AIO_LISTIO_MAX */
83 #endif
84 
85 #ifndef TARGET_AIO_PROCS
86 #define TARGET_AIO_PROCS	4
87 #endif
88 
89 #ifndef MAX_BUF_AIO
90 #define MAX_BUF_AIO		16
91 #endif
92 
93 #ifndef AIOD_TIMEOUT_DEFAULT
94 #define	AIOD_TIMEOUT_DEFAULT	(10 * hz)
95 #endif
96 
97 #ifndef AIOD_LIFETIME_DEFAULT
98 #define AIOD_LIFETIME_DEFAULT	(30 * hz)
99 #endif
100 
101 static int max_aio_procs = MAX_AIO_PROCS;
102 static int num_aio_procs = 0;
103 static int target_aio_procs = TARGET_AIO_PROCS;
104 static int max_queue_count = MAX_AIO_QUEUE;
105 static int num_queue_count = 0;
106 static int num_buf_aio = 0;
107 static int num_aio_resv_start = 0;
108 static int aiod_timeout;
109 static int aiod_lifetime;
110 
111 static int max_aio_per_proc = MAX_AIO_PER_PROC;
112 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
113 static int max_buf_aio = MAX_BUF_AIO;
114 
115 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "AIO mgmt");
116 
117 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc,
118 	CTLFLAG_RW, &max_aio_per_proc, 0, "");
119 
120 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc,
121 	CTLFLAG_RW, &max_aio_queue_per_proc, 0, "");
122 
123 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
124 	CTLFLAG_RW, &max_aio_procs, 0, "");
125 
126 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
127 	CTLFLAG_RD, &num_aio_procs, 0, "");
128 
129 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count,
130 	CTLFLAG_RD, &num_queue_count, 0, "");
131 
132 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue,
133 	CTLFLAG_RW, &max_queue_count, 0, "");
134 
135 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs,
136 	CTLFLAG_RW, &target_aio_procs, 0, "");
137 
138 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio,
139 	CTLFLAG_RW, &max_buf_aio, 0, "");
140 
141 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio,
142 	CTLFLAG_RD, &num_buf_aio, 0, "");
143 
144 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime,
145 	CTLFLAG_RW, &aiod_lifetime, 0, "");
146 
147 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout,
148 	CTLFLAG_RW, &aiod_timeout, 0, "");
149 
150 /*
151  * AIO process info
152  */
153 #define AIOP_FREE	0x1			/* proc on free queue */
154 #define AIOP_SCHED	0x2			/* proc explicitly scheduled */
155 
156 struct aiothreadlist {
157 	int aiothreadflags;			/* AIO proc flags */
158 	TAILQ_ENTRY(aiothreadlist) list;	/* List of processes */
159 	struct thread *aiothread;		/* The AIO thread */
160 	TAILQ_HEAD (,aiocblist) jobtorun;	/* suggested job to run */
161 };
162 
163 /*
164  * data-structure for lio signal management
165  */
166 struct aio_liojob {
167 	int	lioj_flags;
168 	int	lioj_buffer_count;
169 	int	lioj_buffer_finished_count;
170 	int	lioj_queue_count;
171 	int	lioj_queue_finished_count;
172 	struct	sigevent lioj_signal;	/* signal on all I/O done */
173 	TAILQ_ENTRY	(aio_liojob) lioj_list;
174 	struct	kaioinfo *lioj_ki;
175 };
176 #define	LIOJ_SIGNAL		0x1	/* signal on all done (lio) */
177 #define	LIOJ_SIGNAL_POSTED	0x2	/* signal has been posted */
178 
179 /*
180  * per process aio data structure
181  */
182 struct kaioinfo {
183 	int	kaio_flags;		/* per process kaio flags */
184 	int	kaio_maxactive_count;	/* maximum number of AIOs */
185 	int	kaio_active_count;	/* number of currently used AIOs */
186 	int	kaio_qallowed_count;	/* maxiumu size of AIO queue */
187 	int	kaio_queue_count;	/* size of AIO queue */
188 	int	kaio_ballowed_count;	/* maximum number of buffers */
189 	int	kaio_queue_finished_count; /* number of daemon jobs finished */
190 	int	kaio_buffer_count;	/* number of physio buffers */
191 	int	kaio_buffer_finished_count; /* count of I/O done */
192 	struct 	proc *kaio_p;		/* process that uses this kaio block */
193 	TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
194 	TAILQ_HEAD (,aiocblist)	kaio_jobqueue;	/* job queue for process */
195 	TAILQ_HEAD (,aiocblist)	kaio_jobdone;	/* done queue for process */
196 	TAILQ_HEAD (,aiocblist)	kaio_bufqueue;	/* buffer job queue for process */
197 	TAILQ_HEAD (,aiocblist)	kaio_bufdone;	/* buffer done queue for process */
198 	TAILQ_HEAD (,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */
199 };
200 
201 #define KAIO_RUNDOWN	0x1	/* process is being run down */
202 #define KAIO_WAKEUP	0x2	/* wakeup process when there is a significant event */
203 
204 static TAILQ_HEAD(,aiothreadlist) aio_freeproc, aio_activeproc;
205 static TAILQ_HEAD(,aiocblist) aio_jobs;			/* Async job list */
206 static TAILQ_HEAD(,aiocblist) aio_bufjobs;		/* Phys I/O job list */
207 
208 static void	aio_init_aioinfo(struct proc *p);
209 static void	aio_onceonly(void *);
210 static int	aio_free_entry(struct aiocblist *aiocbe);
211 static void	aio_process(struct aiocblist *aiocbe);
212 static int	aio_newproc(void);
213 static int	aio_aqueue(struct thread *td, struct aiocb *job, int type);
214 static void	aio_physwakeup(struct buf *bp);
215 static int	aio_fphysio(struct proc *p, struct aiocblist *aiocbe);
216 static int	aio_qphysio(struct proc *p, struct aiocblist *iocb);
217 static void	aio_daemon(void *uproc);
218 static void	process_signal(void *aioj);
219 
220 SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
221 
222 static vm_zone_t kaio_zone = 0, aiop_zone = 0, aiocb_zone = 0, aiol_zone = 0;
223 static vm_zone_t aiolio_zone = 0;
224 
225 /*
226  * Startup initialization
227  */
228 static void
229 aio_onceonly(void *na)
230 {
231 	TAILQ_INIT(&aio_freeproc);
232 	TAILQ_INIT(&aio_activeproc);
233 	TAILQ_INIT(&aio_jobs);
234 	TAILQ_INIT(&aio_bufjobs);
235 	kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
236 	aiop_zone = zinit("AIOP", sizeof (struct aiothreadlist), 0, 0, 1);
237 	aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
238 	aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
239 	aiolio_zone = zinit("AIOLIO", AIO_LISTIO_MAX * sizeof (struct
240 	    aio_liojob), 0, 0, 1);
241 	aiod_timeout = AIOD_TIMEOUT_DEFAULT;
242 	aiod_lifetime = AIOD_LIFETIME_DEFAULT;
243 	jobrefid = 1;
244 }
245 
246 /*
247  * Init the per-process aioinfo structure.  The aioinfo limits are set
248  * per-process for user limit (resource) management.
249  */
250 static void
251 aio_init_aioinfo(struct proc *p)
252 {
253 	struct kaioinfo *ki;
254 	if (p->p_aioinfo == NULL) {
255 		ki = zalloc(kaio_zone);
256 		p->p_aioinfo = ki;
257 		ki->kaio_flags = 0;
258 		ki->kaio_maxactive_count = max_aio_per_proc;
259 		ki->kaio_active_count = 0;
260 		ki->kaio_qallowed_count = max_aio_queue_per_proc;
261 		ki->kaio_queue_count = 0;
262 		ki->kaio_ballowed_count = max_buf_aio;
263 		ki->kaio_buffer_count = 0;
264 		ki->kaio_buffer_finished_count = 0;
265 		ki->kaio_p = p;
266 		TAILQ_INIT(&ki->kaio_jobdone);
267 		TAILQ_INIT(&ki->kaio_jobqueue);
268 		TAILQ_INIT(&ki->kaio_bufdone);
269 		TAILQ_INIT(&ki->kaio_bufqueue);
270 		TAILQ_INIT(&ki->kaio_liojoblist);
271 		TAILQ_INIT(&ki->kaio_sockqueue);
272 	}
273 
274 	while (num_aio_procs < target_aio_procs)
275 		aio_newproc();
276 }
277 
278 /*
279  * Free a job entry.  Wait for completion if it is currently active, but don't
280  * delay forever.  If we delay, we return a flag that says that we have to
281  * restart the queue scan.
282  */
283 static int
284 aio_free_entry(struct aiocblist *aiocbe)
285 {
286 	struct kaioinfo *ki;
287 	struct aiothreadlist *aiop;
288 	struct aio_liojob *lj;
289 	struct proc *p;
290 	int error;
291 	int s;
292 
293 	if (aiocbe->jobstate == JOBST_NULL)
294 		panic("aio_free_entry: freeing already free job");
295 
296 	p = aiocbe->userproc;
297 	ki = p->p_aioinfo;
298 	lj = aiocbe->lio;
299 	if (ki == NULL)
300 		panic("aio_free_entry: missing p->p_aioinfo");
301 
302 	while (aiocbe->jobstate == JOBST_JOBRUNNING) {
303 		if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE)
304 			return 0;
305 		aiocbe->jobflags |= AIOCBLIST_RUNDOWN;
306 		tsleep(aiocbe, PRIBIO, "jobwai", 0);
307 	}
308 	aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE;
309 
310 	if (aiocbe->bp == NULL) {
311 		if (ki->kaio_queue_count <= 0)
312 			panic("aio_free_entry: process queue size <= 0");
313 		if (num_queue_count <= 0)
314 			panic("aio_free_entry: system wide queue size <= 0");
315 
316 		if (lj) {
317 			lj->lioj_queue_count--;
318 			if (aiocbe->jobflags & AIOCBLIST_DONE)
319 				lj->lioj_queue_finished_count--;
320 		}
321 		ki->kaio_queue_count--;
322 		if (aiocbe->jobflags & AIOCBLIST_DONE)
323 			ki->kaio_queue_finished_count--;
324 		num_queue_count--;
325 	} else {
326 		if (lj) {
327 			lj->lioj_buffer_count--;
328 			if (aiocbe->jobflags & AIOCBLIST_DONE)
329 				lj->lioj_buffer_finished_count--;
330 		}
331 		if (aiocbe->jobflags & AIOCBLIST_DONE)
332 			ki->kaio_buffer_finished_count--;
333 		ki->kaio_buffer_count--;
334 		num_buf_aio--;
335 	}
336 
337 	/* aiocbe is going away, we need to destroy any knotes */
338 	knote_remove(&p->p_thread, &aiocbe->klist); /* XXXKSE */
339 	/* XXXKSE Note the thread here is used to eventually find the
340 	 * owning process again, but it is also used to do a fo_close
341 	 * and that requires the thread. (but does it require the
342 	 * OWNING thread? (or maby the running thread?)
343 	 * There is a semantic problem here...
344 	 */
345 
346 	if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN)
347 	    && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) {
348 		ki->kaio_flags &= ~KAIO_WAKEUP;
349 		wakeup(p);
350 	}
351 
352 	if (aiocbe->jobstate == JOBST_JOBQBUF) {
353 		if ((error = aio_fphysio(p, aiocbe)) != 0)
354 			return error;
355 		if (aiocbe->jobstate != JOBST_JOBBFINISHED)
356 			panic("aio_free_entry: invalid physio finish-up state");
357 		s = splbio();
358 		TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
359 		splx(s);
360 	} else if (aiocbe->jobstate == JOBST_JOBQPROC) {
361 		aiop = aiocbe->jobaiothread;
362 		TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list);
363 	} else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) {
364 		TAILQ_REMOVE(&aio_jobs, aiocbe, list);
365 		TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
366 	} else if (aiocbe->jobstate == JOBST_JOBFINISHED)
367 		TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist);
368 	else if (aiocbe->jobstate == JOBST_JOBBFINISHED) {
369 		s = splbio();
370 		TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
371 		splx(s);
372 		if (aiocbe->bp) {
373 			vunmapbuf(aiocbe->bp);
374 			relpbuf(aiocbe->bp, NULL);
375 			aiocbe->bp = NULL;
376 		}
377 	}
378 	if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) {
379 		TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
380 		zfree(aiolio_zone, lj);
381 	}
382 	aiocbe->jobstate = JOBST_NULL;
383 	untimeout(process_signal, aiocbe, aiocbe->timeouthandle);
384 	zfree(aiocb_zone, aiocbe);
385 	return 0;
386 }
387 #endif /* VFS_AIO */
388 
389 /*
390  * Rundown the jobs for a given process.
391  */
392 void
393 aio_proc_rundown(struct proc *p)
394 {
395 #ifndef VFS_AIO
396 	return;
397 #else
398 	int s;
399 	struct kaioinfo *ki;
400 	struct aio_liojob *lj, *ljn;
401 	struct aiocblist *aiocbe, *aiocbn;
402 	struct file *fp;
403 	struct filedesc *fdp;
404 	struct socket *so;
405 
406 	ki = p->p_aioinfo;
407 	if (ki == NULL)
408 		return;
409 
410 	ki->kaio_flags |= LIOJ_SIGNAL_POSTED;
411 	while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count >
412 	    ki->kaio_buffer_finished_count)) {
413 		ki->kaio_flags |= KAIO_RUNDOWN;
414 		if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout))
415 			break;
416 	}
417 
418 	/*
419 	 * Move any aio ops that are waiting on socket I/O to the normal job
420 	 * queues so they are cleaned up with any others.
421 	 */
422 	fdp = p->p_fd;
423 
424 	s = splnet();
425 	for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe =
426 	    aiocbn) {
427 		aiocbn = TAILQ_NEXT(aiocbe, plist);
428 		fp = fdp->fd_ofiles[aiocbe->uaiocb.aio_fildes];
429 
430 		/*
431 		 * Under some circumstances, the aio_fildes and the file
432 		 * structure don't match.  This would leave aiocbe's in the
433 		 * TAILQ associated with the socket and cause a panic later.
434 		 *
435 		 * Detect and fix.
436 		 */
437 		if ((fp == NULL) || (fp != aiocbe->fd_file))
438 			fp = aiocbe->fd_file;
439 		if (fp) {
440 			so = (struct socket *)fp->f_data;
441 			TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list);
442 			if (TAILQ_EMPTY(&so->so_aiojobq)) {
443 				so->so_snd.sb_flags &= ~SB_AIO;
444 				so->so_rcv.sb_flags &= ~SB_AIO;
445 			}
446 		}
447 		TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist);
448 		TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list);
449 		TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist);
450 	}
451 	splx(s);
452 
453 restart1:
454 	for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) {
455 		aiocbn = TAILQ_NEXT(aiocbe, plist);
456 		if (aio_free_entry(aiocbe))
457 			goto restart1;
458 	}
459 
460 restart2:
461 	for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe =
462 	    aiocbn) {
463 		aiocbn = TAILQ_NEXT(aiocbe, plist);
464 		if (aio_free_entry(aiocbe))
465 			goto restart2;
466 	}
467 
468 /*
469  * Note the use of lots of splbio here, trying to avoid splbio for long chains
470  * of I/O.  Probably unnecessary.
471  */
472 restart3:
473 	s = splbio();
474 	while (TAILQ_FIRST(&ki->kaio_bufqueue)) {
475 		ki->kaio_flags |= KAIO_WAKEUP;
476 		tsleep(p, PRIBIO, "aioprn", 0);
477 		splx(s);
478 		goto restart3;
479 	}
480 	splx(s);
481 
482 restart4:
483 	s = splbio();
484 	for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) {
485 		aiocbn = TAILQ_NEXT(aiocbe, plist);
486 		if (aio_free_entry(aiocbe)) {
487 			splx(s);
488 			goto restart4;
489 		}
490 	}
491 	splx(s);
492 
493         /*
494          * If we've slept, jobs might have moved from one queue to another.
495          * Retry rundown if we didn't manage to empty the queues.
496          */
497         if (TAILQ_FIRST(&ki->kaio_jobdone) != NULL ||
498 	    TAILQ_FIRST(&ki->kaio_jobqueue) != NULL ||
499 	    TAILQ_FIRST(&ki->kaio_bufqueue) != NULL ||
500 	    TAILQ_FIRST(&ki->kaio_bufdone) != NULL)
501 		goto restart1;
502 
503 	for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) {
504 		ljn = TAILQ_NEXT(lj, lioj_list);
505 		if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count ==
506 		    0)) {
507 			TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
508 			zfree(aiolio_zone, lj);
509 		} else {
510 #ifdef DIAGNOSTIC
511 			printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
512 			    "QF:%d\n", lj->lioj_buffer_count,
513 			    lj->lioj_buffer_finished_count,
514 			    lj->lioj_queue_count,
515 			    lj->lioj_queue_finished_count);
516 #endif
517 		}
518 	}
519 
520 	zfree(kaio_zone, ki);
521 	p->p_aioinfo = NULL;
522 #endif /* VFS_AIO */
523 }
524 
525 #ifdef VFS_AIO
526 /*
527  * Select a job to run (called by an AIO daemon).
528  */
529 static struct aiocblist *
530 aio_selectjob(struct aiothreadlist *aiop)
531 {
532 	int s;
533 	struct aiocblist *aiocbe;
534 	struct kaioinfo *ki;
535 	struct proc *userp;
536 
537 	aiocbe = TAILQ_FIRST(&aiop->jobtorun);
538 	if (aiocbe) {
539 		TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list);
540 		return aiocbe;
541 	}
542 
543 	s = splnet();
544 	for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe =
545 	    TAILQ_NEXT(aiocbe, list)) {
546 		userp = aiocbe->userproc;
547 		ki = userp->p_aioinfo;
548 
549 		if (ki->kaio_active_count < ki->kaio_maxactive_count) {
550 			TAILQ_REMOVE(&aio_jobs, aiocbe, list);
551 			splx(s);
552 			return aiocbe;
553 		}
554 	}
555 	splx(s);
556 
557 	return NULL;
558 }
559 
560 /*
561  * The AIO processing activity.  This is the code that does the I/O request for
562  * the non-physio version of the operations.  The normal vn operations are used,
563  * and this code should work in all instances for every type of file, including
564  * pipes, sockets, fifos, and regular files.
565  */
566 static void
567 aio_process(struct aiocblist *aiocbe)
568 {
569 	struct filedesc *fdp;
570 	struct thread *td;
571 	struct proc *userp;
572 	struct proc *mycp;
573 	struct aiocb *cb;
574 	struct file *fp;
575 	struct uio auio;
576 	struct iovec aiov;
577 	unsigned int fd;
578 	int cnt;
579 	int error;
580 	off_t offset;
581 	int oublock_st, oublock_end;
582 	int inblock_st, inblock_end;
583 
584 	userp = aiocbe->userproc;
585 	td = curthread;
586 	mycp = td->td_proc;
587 	cb = &aiocbe->uaiocb;
588 
589 	fdp = mycp->p_fd;
590 	fd = cb->aio_fildes;
591 	fp = fdp->fd_ofiles[fd];
592 
593 	if ((fp == NULL) || (fp != aiocbe->fd_file)) {
594 		cb->_aiocb_private.error = EBADF;
595 		cb->_aiocb_private.status = -1;
596 		return;
597 	}
598 
599 	aiov.iov_base = cb->aio_buf;
600 	aiov.iov_len = cb->aio_nbytes;
601 
602 	auio.uio_iov = &aiov;
603 	auio.uio_iovcnt = 1;
604 	auio.uio_offset = offset = cb->aio_offset;
605 	auio.uio_resid = cb->aio_nbytes;
606 	cnt = cb->aio_nbytes;
607 	auio.uio_segflg = UIO_USERSPACE;
608 	auio.uio_td = td;
609 
610 	inblock_st = mycp->p_stats->p_ru.ru_inblock;
611 	oublock_st = mycp->p_stats->p_ru.ru_oublock;
612 	/*
613 	 * Temporarily bump the ref count while reading to avoid the
614 	 * descriptor being ripped out from under us.
615 	 */
616 	fhold(fp);
617 	if (cb->aio_lio_opcode == LIO_READ) {
618 		auio.uio_rw = UIO_READ;
619 		error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
620 	} else {
621 		auio.uio_rw = UIO_WRITE;
622 		error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
623 	}
624 	fdrop(fp, td);
625 	inblock_end = mycp->p_stats->p_ru.ru_inblock;
626 	oublock_end = mycp->p_stats->p_ru.ru_oublock;
627 
628 	aiocbe->inputcharge = inblock_end - inblock_st;
629 	aiocbe->outputcharge = oublock_end - oublock_st;
630 
631 	if ((error) && (auio.uio_resid != cnt)) {
632 		if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
633 			error = 0;
634 		if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
635 			PROC_LOCK(userp);
636 			psignal(userp, SIGPIPE);
637 			PROC_UNLOCK(userp);
638 		}
639 	}
640 
641 	cnt -= auio.uio_resid;
642 	cb->_aiocb_private.error = error;
643 	cb->_aiocb_private.status = cnt;
644 }
645 
646 /*
647  * The AIO daemon, most of the actual work is done in aio_process,
648  * but the setup (and address space mgmt) is done in this routine.
649  */
650 static void
651 aio_daemon(void *uproc)
652 {
653 	int s;
654 	struct aio_liojob *lj;
655 	struct aiocb *cb;
656 	struct aiocblist *aiocbe;
657 	struct aiothreadlist *aiop;
658 	struct kaioinfo *ki;
659 	struct proc *curcp, *mycp, *userp;
660 	struct vmspace *myvm, *tmpvm;
661 	struct thread *td = curthread;
662 
663 	mtx_lock(&Giant);
664 	/*
665 	 * Local copies of curproc (cp) and vmspace (myvm)
666 	 */
667 	mycp = td->td_proc;
668 	myvm = mycp->p_vmspace;
669 
670 	if (mycp->p_textvp) {
671 		vrele(mycp->p_textvp);
672 		mycp->p_textvp = NULL;
673 	}
674 
675 	/*
676 	 * Allocate and ready the aio control info.  There is one aiop structure
677 	 * per daemon.
678 	 */
679 	aiop = zalloc(aiop_zone);
680 	aiop->aiothread = td;
681 	aiop->aiothreadflags |= AIOP_FREE;
682 	TAILQ_INIT(&aiop->jobtorun);
683 
684 	s = splnet();
685 
686 	/*
687 	 * Place thread (lightweight process) onto the AIO free thread list.
688 	 */
689 	if (TAILQ_EMPTY(&aio_freeproc))
690 		wakeup(&aio_freeproc);
691 	TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
692 
693 	splx(s);
694 
695 	/*
696 	 * Get rid of our current filedescriptors.  AIOD's don't need any
697 	 * filedescriptors, except as temporarily inherited from the client.
698 	 * Credentials are also cloned, and made equivalent to "root".
699 	 */
700 	fdfree(td);
701 	mycp->p_fd = NULL;
702 	mycp->p_ucred = crcopy(mycp->p_ucred);
703 	mycp->p_ucred->cr_uid = 0;
704 	uifree(mycp->p_ucred->cr_uidinfo);
705 	mycp->p_ucred->cr_uidinfo = uifind(0);
706 	mycp->p_ucred->cr_ngroups = 1;
707 	mycp->p_ucred->cr_groups[0] = 1;
708 
709 	/* The daemon resides in its own pgrp. */
710 	enterpgrp(mycp, mycp->p_pid, 1);
711 
712 	/* Mark special process type. */
713 	mycp->p_flag |= P_SYSTEM;
714 
715 	/*
716 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
717 	 * and creating too many daemons.)
718 	 */
719 	wakeup(mycp);
720 
721 	for (;;) {
722 		/*
723 		 * curcp is the current daemon process context.
724 		 * userp is the current user process context.
725 		 */
726 		curcp = mycp;
727 
728 		/*
729 		 * Take daemon off of free queue
730 		 */
731 		if (aiop->aiothreadflags & AIOP_FREE) {
732 			s = splnet();
733 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
734 			TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
735 			aiop->aiothreadflags &= ~AIOP_FREE;
736 			splx(s);
737 		}
738 		aiop->aiothreadflags &= ~AIOP_SCHED;
739 
740 		/*
741 		 * Check for jobs.
742 		 */
743 		while ((aiocbe = aio_selectjob(aiop)) != NULL) {
744 			cb = &aiocbe->uaiocb;
745 			userp = aiocbe->userproc;
746 
747 			aiocbe->jobstate = JOBST_JOBRUNNING;
748 
749 			/*
750 			 * Connect to process address space for user program.
751 			 */
752 			if (userp != curcp) {
753 				/*
754 				 * Save the current address space that we are
755 				 * connected to.
756 				 */
757 				tmpvm = mycp->p_vmspace;
758 
759 				/*
760 				 * Point to the new user address space, and
761 				 * refer to it.
762 				 */
763 				mycp->p_vmspace = userp->p_vmspace;
764 				mycp->p_vmspace->vm_refcnt++;
765 
766 				/* Activate the new mapping. */
767 				pmap_activate(&mycp->p_thread);
768 
769 				/*
770 				 * If the old address space wasn't the daemons
771 				 * own address space, then we need to remove the
772 				 * daemon's reference from the other process
773 				 * that it was acting on behalf of.
774 				 */
775 				if (tmpvm != myvm) {
776 					vmspace_free(tmpvm);
777 				}
778 
779 				/*
780 				 * Disassociate from previous clients file
781 				 * descriptors, and associate to the new clients
782 				 * descriptors.  Note that the daemon doesn't
783 				 * need to worry about its orginal descriptors,
784 				 * because they were originally freed.
785 				 */
786 				if (mycp->p_fd)
787 					fdfree(td);
788 				mycp->p_fd = fdshare(userp);
789 				curcp = userp;
790 			}
791 
792 			ki = userp->p_aioinfo;
793 			lj = aiocbe->lio;
794 
795 			/* Account for currently active jobs. */
796 			ki->kaio_active_count++;
797 
798 			/* Do the I/O function. */
799 			aiocbe->jobaiothread = aiop;
800 			aio_process(aiocbe);
801 
802 			/* Decrement the active job count. */
803 			ki->kaio_active_count--;
804 
805 			/*
806 			 * Increment the completion count for wakeup/signal
807 			 * comparisons.
808 			 */
809 			aiocbe->jobflags |= AIOCBLIST_DONE;
810 			ki->kaio_queue_finished_count++;
811 			if (lj)
812 				lj->lioj_queue_finished_count++;
813 			if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags
814 			    & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) {
815 				ki->kaio_flags &= ~KAIO_WAKEUP;
816 				wakeup(userp);
817 			}
818 
819 			s = splbio();
820 			if (lj && (lj->lioj_flags &
821 			    (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) {
822 				if ((lj->lioj_queue_finished_count ==
823 				    lj->lioj_queue_count) &&
824 				    (lj->lioj_buffer_finished_count ==
825 				    lj->lioj_buffer_count)) {
826 					PROC_LOCK(userp);
827 					psignal(userp,
828 					    lj->lioj_signal.sigev_signo);
829 					PROC_UNLOCK(userp);
830 					lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
831 				}
832 			}
833 			splx(s);
834 
835 			aiocbe->jobstate = JOBST_JOBFINISHED;
836 
837 			/*
838 			 * If the I/O request should be automatically rundown,
839 			 * do the needed cleanup.  Otherwise, place the queue
840 			 * entry for the just finished I/O request into the done
841 			 * queue for the associated client.
842 			 */
843 			s = splnet();
844 			if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) {
845 				aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE;
846 				zfree(aiocb_zone, aiocbe);
847 			} else {
848 				TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
849 				TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe,
850 				    plist);
851 			}
852 			splx(s);
853 			KNOTE(&aiocbe->klist, 0);
854 
855 			if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) {
856 				wakeup(aiocbe);
857 				aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN;
858 			}
859 
860 			if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
861 				PROC_LOCK(userp);
862 				psignal(userp, cb->aio_sigevent.sigev_signo);
863 				PROC_UNLOCK(userp);
864 			}
865 		}
866 
867 		/*
868 		 * Disconnect from user address space.
869 		 */
870 		if (curcp != mycp) {
871 			/* Get the user address space to disconnect from. */
872 			tmpvm = mycp->p_vmspace;
873 
874 			/* Get original address space for daemon. */
875 			mycp->p_vmspace = myvm;
876 
877 			/* Activate the daemon's address space. */
878 			pmap_activate(&mycp->p_thread);
879 #ifdef DIAGNOSTIC
880 			if (tmpvm == myvm) {
881 				printf("AIOD: vmspace problem -- %d\n",
882 				    mycp->p_pid);
883 			}
884 #endif
885 			/* Remove our vmspace reference. */
886 			vmspace_free(tmpvm);
887 
888 			/*
889 			 * Disassociate from the user process's file
890 			 * descriptors.
891 			 */
892 			if (mycp->p_fd)
893 				fdfree(td);
894 			mycp->p_fd = NULL;
895 			curcp = mycp;
896 		}
897 
898 		/*
899 		 * If we are the first to be put onto the free queue, wakeup
900 		 * anyone waiting for a daemon.
901 		 */
902 		s = splnet();
903 		TAILQ_REMOVE(&aio_activeproc, aiop, list);
904 		if (TAILQ_EMPTY(&aio_freeproc))
905 			wakeup(&aio_freeproc);
906 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
907 		aiop->aiothreadflags |= AIOP_FREE;
908 		splx(s);
909 
910 		/*
911 		 * If daemon is inactive for a long time, allow it to exit,
912 		 * thereby freeing resources.
913 		 */
914 		if (((aiop->aiothreadflags & AIOP_SCHED) == 0) && tsleep(mycp,
915 		    PRIBIO, "aiordy", aiod_lifetime)) {
916 			s = splnet();
917 			if ((TAILQ_FIRST(&aio_jobs) == NULL) &&
918 			    (TAILQ_FIRST(&aiop->jobtorun) == NULL)) {
919 				if ((aiop->aiothreadflags & AIOP_FREE) &&
920 				    (num_aio_procs > target_aio_procs)) {
921 					TAILQ_REMOVE(&aio_freeproc, aiop, list);
922 					splx(s);
923 					zfree(aiop_zone, aiop);
924 					num_aio_procs--;
925 #ifdef DIAGNOSTIC
926 					if (mycp->p_vmspace->vm_refcnt <= 1) {
927 						printf("AIOD: bad vm refcnt for"
928 						    " exiting daemon: %d\n",
929 						    mycp->p_vmspace->vm_refcnt);
930 					}
931 #endif
932 					kthread_exit(0);
933 				}
934 			}
935 			splx(s);
936 		}
937 	}
938 }
939 
940 /*
941  * Create a new AIO daemon.  This is mostly a kernel-thread fork routine.  The
942  * AIO daemon modifies its environment itself.
943  */
944 static int
945 aio_newproc()
946 {
947 	int error;
948 	struct proc *p;
949 
950 	error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, "aiod%d",
951 			       num_aio_procs);
952 	if (error)
953 		return error;
954 
955 	/*
956 	 * Wait until daemon is started, but continue on just in case to
957 	 * handle error conditions.
958 	 */
959 	error = tsleep(p, PZERO, "aiosta", aiod_timeout);
960 
961 	num_aio_procs++;
962 
963 	return error;
964 }
965 
966 /*
967  * Try the high-performance, low-overhead physio method for eligible
968  * VCHR devices.  This method doesn't use an aio helper thread, and
969  * thus has very low overhead.
970  *
971  * Assumes that the caller, _aio_aqueue(), has incremented the file
972  * structure's reference count, preventing its deallocation for the
973  * duration of this call.
974  */
975 static int
976 aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
977 {
978 	int error;
979 	struct aiocb *cb;
980 	struct file *fp;
981 	struct buf *bp;
982 	struct vnode *vp;
983 	struct kaioinfo *ki;
984 	struct filedesc *fdp;
985 	struct aio_liojob *lj;
986 	int fd;
987 	int s;
988 	int notify;
989 
990 	cb = &aiocbe->uaiocb;
991 	fdp = p->p_fd;
992 	fd = cb->aio_fildes;
993 	fp = fdp->fd_ofiles[fd];
994 
995 	if (fp->f_type != DTYPE_VNODE)
996 		return (-1);
997 
998 	vp = (struct vnode *)fp->f_data;
999 
1000 	/*
1001 	 * If its not a disk, we don't want to return a positive error.
1002 	 * It causes the aio code to not fall through to try the thread
1003 	 * way when you're talking to a regular file.
1004 	 */
1005 	if (!vn_isdisk(vp, &error)) {
1006 		if (error == ENOTBLK)
1007 			return (-1);
1008 		else
1009 			return (error);
1010 	}
1011 
1012  	if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys)
1013 		return (-1);
1014 
1015 	if (cb->aio_nbytes >
1016 	    MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK))
1017 		return (-1);
1018 
1019 	ki = p->p_aioinfo;
1020 	if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
1021 		return (-1);
1022 
1023 	ki->kaio_buffer_count++;
1024 
1025 	lj = aiocbe->lio;
1026 	if (lj)
1027 		lj->lioj_buffer_count++;
1028 
1029 	/* Create and build a buffer header for a transfer. */
1030 	bp = (struct buf *)getpbuf(NULL);
1031 	BUF_KERNPROC(bp);
1032 
1033 	/*
1034 	 * Get a copy of the kva from the physical buffer.
1035 	 */
1036 	bp->b_caller1 = p;
1037 	bp->b_dev = vp->v_rdev;
1038 	error = bp->b_error = 0;
1039 
1040 	bp->b_bcount = cb->aio_nbytes;
1041 	bp->b_bufsize = cb->aio_nbytes;
1042 	bp->b_flags = B_PHYS;
1043 	bp->b_iodone = aio_physwakeup;
1044 	bp->b_saveaddr = bp->b_data;
1045 	bp->b_data = cb->aio_buf;
1046 	bp->b_blkno = btodb(cb->aio_offset);
1047 
1048 	if (cb->aio_lio_opcode == LIO_WRITE) {
1049 		bp->b_iocmd = BIO_WRITE;
1050 		if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_READ)) {
1051 			error = EFAULT;
1052 			goto doerror;
1053 		}
1054 	} else {
1055 		bp->b_iocmd = BIO_READ;
1056 		if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_WRITE)) {
1057 			error = EFAULT;
1058 			goto doerror;
1059 		}
1060 	}
1061 
1062 	/* Bring buffer into kernel space. */
1063 	vmapbuf(bp);
1064 
1065 	s = splbio();
1066 	aiocbe->bp = bp;
1067 	bp->b_spc = (void *)aiocbe;
1068 	TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list);
1069 	TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
1070 	aiocbe->jobstate = JOBST_JOBQBUF;
1071 	cb->_aiocb_private.status = cb->aio_nbytes;
1072 	num_buf_aio++;
1073 	bp->b_error = 0;
1074 
1075 	splx(s);
1076 
1077 	/* Perform transfer. */
1078 	DEV_STRATEGY(bp, 0);
1079 
1080 	notify = 0;
1081 	s = splbio();
1082 
1083 	/*
1084 	 * If we had an error invoking the request, or an error in processing
1085 	 * the request before we have returned, we process it as an error in
1086 	 * transfer.  Note that such an I/O error is not indicated immediately,
1087 	 * but is returned using the aio_error mechanism.  In this case,
1088 	 * aio_suspend will return immediately.
1089 	 */
1090 	if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) {
1091 		struct aiocb *job = aiocbe->uuaiocb;
1092 
1093 		aiocbe->uaiocb._aiocb_private.status = 0;
1094 		suword(&job->_aiocb_private.status, 0);
1095 		aiocbe->uaiocb._aiocb_private.error = bp->b_error;
1096 		suword(&job->_aiocb_private.error, bp->b_error);
1097 
1098 		ki->kaio_buffer_finished_count++;
1099 
1100 		if (aiocbe->jobstate != JOBST_JOBBFINISHED) {
1101 			aiocbe->jobstate = JOBST_JOBBFINISHED;
1102 			aiocbe->jobflags |= AIOCBLIST_DONE;
1103 			TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
1104 			TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
1105 			TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
1106 			notify = 1;
1107 		}
1108 	}
1109 	splx(s);
1110 	if (notify)
1111 		KNOTE(&aiocbe->klist, 0);
1112 	return 0;
1113 
1114 doerror:
1115 	ki->kaio_buffer_count--;
1116 	if (lj)
1117 		lj->lioj_buffer_count--;
1118 	aiocbe->bp = NULL;
1119 	relpbuf(bp, NULL);
1120 	return error;
1121 }
1122 
1123 /*
1124  * This waits/tests physio completion.
1125  */
1126 static int
1127 aio_fphysio(struct proc *p, struct aiocblist *iocb)
1128 {
1129 	int s;
1130 	struct buf *bp;
1131 	int error;
1132 
1133 	bp = iocb->bp;
1134 
1135 	s = splbio();
1136 	while ((bp->b_flags & B_DONE) == 0) {
1137 		if (tsleep(bp, PRIBIO, "physstr", aiod_timeout)) {
1138 			if ((bp->b_flags & B_DONE) == 0) {
1139 				splx(s);
1140 				return EINPROGRESS;
1141 			} else
1142 				break;
1143 		}
1144 	}
1145 	splx(s);
1146 
1147 	/* Release mapping into kernel space. */
1148 	vunmapbuf(bp);
1149 	iocb->bp = 0;
1150 
1151 	error = 0;
1152 
1153 	/* Check for an error. */
1154 	if (bp->b_ioflags & BIO_ERROR)
1155 		error = bp->b_error;
1156 
1157 	relpbuf(bp, NULL);
1158 	return (error);
1159 }
1160 #endif /* VFS_AIO */
1161 
1162 /*
1163  * Wake up aio requests that may be serviceable now.
1164  */
1165 void
1166 aio_swake(struct socket *so, struct sockbuf *sb)
1167 {
1168 #ifndef VFS_AIO
1169 	return;
1170 #else
1171 	struct aiocblist *cb,*cbn;
1172 	struct proc *p;
1173 	struct kaioinfo *ki = NULL;
1174 	int opcode, wakecount = 0;
1175 	struct aiothreadlist *aiop;
1176 
1177 	if (sb == &so->so_snd) {
1178 		opcode = LIO_WRITE;
1179 		so->so_snd.sb_flags &= ~SB_AIO;
1180 	} else {
1181 		opcode = LIO_READ;
1182 		so->so_rcv.sb_flags &= ~SB_AIO;
1183 	}
1184 
1185 	for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) {
1186 		cbn = TAILQ_NEXT(cb, list);
1187 		if (opcode == cb->uaiocb.aio_lio_opcode) {
1188 			p = cb->userproc;
1189 			ki = p->p_aioinfo;
1190 			TAILQ_REMOVE(&so->so_aiojobq, cb, list);
1191 			TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist);
1192 			TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
1193 			TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist);
1194 			wakecount++;
1195 			if (cb->jobstate != JOBST_JOBQGLOBAL)
1196 				panic("invalid queue value");
1197 		}
1198 	}
1199 
1200 	while (wakecount--) {
1201 		if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) {
1202 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
1203 			TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
1204 			aiop->aiothreadflags &= ~AIOP_FREE;
1205 			wakeup(aiop->aiothread);
1206 		}
1207 	}
1208 #endif /* VFS_AIO */
1209 }
1210 
1211 #ifdef VFS_AIO
1212 /*
1213  * Queue a new AIO request.  Choosing either the threaded or direct physio VCHR
1214  * technique is done in this code.
1215  */
1216 static int
1217 _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int type)
1218 {
1219 	struct proc *p = td->td_proc;
1220 	struct filedesc *fdp;
1221 	struct file *fp;
1222 	unsigned int fd;
1223 	struct socket *so;
1224 	int s;
1225 	int error;
1226 	int opcode;
1227 	struct aiocblist *aiocbe;
1228 	struct aiothreadlist *aiop;
1229 	struct kaioinfo *ki;
1230 	struct kevent kev;
1231 	struct kqueue *kq;
1232 	struct file *kq_fp;
1233 
1234 	aiocbe = zalloc(aiocb_zone);
1235 	aiocbe->inputcharge = 0;
1236 	aiocbe->outputcharge = 0;
1237 	callout_handle_init(&aiocbe->timeouthandle);
1238 	SLIST_INIT(&aiocbe->klist);
1239 
1240 	suword(&job->_aiocb_private.status, -1);
1241 	suword(&job->_aiocb_private.error, 0);
1242 	suword(&job->_aiocb_private.kernelinfo, -1);
1243 
1244 	error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb));
1245 	if (error) {
1246 		suword(&job->_aiocb_private.error, error);
1247 		zfree(aiocb_zone, aiocbe);
1248 		return error;
1249 	}
1250 	if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL &&
1251 		!_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
1252 		zfree(aiocb_zone, aiocbe);
1253 		return EINVAL;
1254 	}
1255 
1256 	/* Save userspace address of the job info. */
1257 	aiocbe->uuaiocb = job;
1258 
1259 	/* Get the opcode. */
1260 	if (type != LIO_NOP)
1261 		aiocbe->uaiocb.aio_lio_opcode = type;
1262 	opcode = aiocbe->uaiocb.aio_lio_opcode;
1263 
1264 	/* Get the fd info for process. */
1265 	fdp = p->p_fd;
1266 
1267 	/*
1268 	 * Range check file descriptor.
1269 	 */
1270 	fd = aiocbe->uaiocb.aio_fildes;
1271 	if (fd >= fdp->fd_nfiles) {
1272 		zfree(aiocb_zone, aiocbe);
1273 		if (type == 0)
1274 			suword(&job->_aiocb_private.error, EBADF);
1275 		return EBADF;
1276 	}
1277 
1278 	fp = aiocbe->fd_file = fdp->fd_ofiles[fd];
1279 	if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) ==
1280 	    0))) {
1281 		zfree(aiocb_zone, aiocbe);
1282 		if (type == 0)
1283 			suword(&job->_aiocb_private.error, EBADF);
1284 		return EBADF;
1285 	}
1286 
1287 	if (aiocbe->uaiocb.aio_offset == -1LL) {
1288 		zfree(aiocb_zone, aiocbe);
1289 		if (type == 0)
1290 			suword(&job->_aiocb_private.error, EINVAL);
1291 		return EINVAL;
1292 	}
1293 
1294 	error = suword(&job->_aiocb_private.kernelinfo, jobrefid);
1295 	if (error) {
1296 		zfree(aiocb_zone, aiocbe);
1297 		if (type == 0)
1298 			suword(&job->_aiocb_private.error, EINVAL);
1299 		return error;
1300 	}
1301 
1302 	aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid;
1303 	if (jobrefid == LONG_MAX)
1304 		jobrefid = 1;
1305 	else
1306 		jobrefid++;
1307 
1308 	if (opcode == LIO_NOP) {
1309 		zfree(aiocb_zone, aiocbe);
1310 		if (type == 0) {
1311 			suword(&job->_aiocb_private.error, 0);
1312 			suword(&job->_aiocb_private.status, 0);
1313 			suword(&job->_aiocb_private.kernelinfo, 0);
1314 		}
1315 		return 0;
1316 	}
1317 
1318 	if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) {
1319 		zfree(aiocb_zone, aiocbe);
1320 		if (type == 0) {
1321 			suword(&job->_aiocb_private.status, 0);
1322 			suword(&job->_aiocb_private.error, EINVAL);
1323 		}
1324 		return EINVAL;
1325 	}
1326 
1327 	fhold(fp);
1328 
1329 	if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) {
1330 		kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue;
1331 		kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sigval_ptr;
1332 	}
1333 	else {
1334 		/*
1335 		 * This method for requesting kevent-based notification won't
1336 		 * work on the alpha, since we're passing in a pointer
1337 		 * via aio_lio_opcode, which is an int.  Use the SIGEV_KEVENT-
1338 		 * based method instead.
1339 		 */
1340 		struct kevent *kevp;
1341 
1342 		kevp = (struct kevent *)job->aio_lio_opcode;
1343 		if (kevp == NULL)
1344 			goto no_kqueue;
1345 
1346 		error = copyin(kevp, &kev, sizeof(kev));
1347 		if (error)
1348 			goto aqueue_fail;
1349 	}
1350 	if ((u_int)kev.ident >= fdp->fd_nfiles ||
1351 	    (kq_fp = fdp->fd_ofiles[kev.ident]) == NULL ||
1352 	    (kq_fp->f_type != DTYPE_KQUEUE)) {
1353 		error = EBADF;
1354 		goto aqueue_fail;
1355 	}
1356 	kq = (struct kqueue *)kq_fp->f_data;
1357 	kev.ident = (uintptr_t)aiocbe;
1358 	kev.filter = EVFILT_AIO;
1359 	kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
1360 	error = kqueue_register(kq, &kev, td);
1361 aqueue_fail:
1362 	if (error) {
1363 		zfree(aiocb_zone, aiocbe);
1364 		if (type == 0)
1365 			suword(&job->_aiocb_private.error, error);
1366 		goto done;
1367 	}
1368 no_kqueue:
1369 
1370 	suword(&job->_aiocb_private.error, EINPROGRESS);
1371 	aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
1372 	aiocbe->userproc = p;
1373 	aiocbe->jobflags = 0;
1374 	aiocbe->lio = lj;
1375 	ki = p->p_aioinfo;
1376 
1377 	if (fp->f_type == DTYPE_SOCKET) {
1378 		/*
1379 		 * Alternate queueing for socket ops: Reach down into the
1380 		 * descriptor to get the socket data.  Then check to see if the
1381 		 * socket is ready to be read or written (based on the requested
1382 		 * operation).
1383 		 *
1384 		 * If it is not ready for io, then queue the aiocbe on the
1385 		 * socket, and set the flags so we get a call when sbnotify()
1386 		 * happens.
1387 		 */
1388 		so = (struct socket *)fp->f_data;
1389 		s = splnet();
1390 		if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
1391 		    LIO_WRITE) && (!sowriteable(so)))) {
1392 			TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
1393 			TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist);
1394 			if (opcode == LIO_READ)
1395 				so->so_rcv.sb_flags |= SB_AIO;
1396 			else
1397 				so->so_snd.sb_flags |= SB_AIO;
1398 			aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */
1399 			ki->kaio_queue_count++;
1400 			num_queue_count++;
1401 			splx(s);
1402 			error = 0;
1403 			goto done;
1404 		}
1405 		splx(s);
1406 	}
1407 
1408 	if ((error = aio_qphysio(p, aiocbe)) == 0)
1409 		goto done;
1410 	if (error > 0) {
1411 		suword(&job->_aiocb_private.status, 0);
1412 		aiocbe->uaiocb._aiocb_private.error = error;
1413 		suword(&job->_aiocb_private.error, error);
1414 		goto done;
1415 	}
1416 
1417 	/* No buffer for daemon I/O. */
1418 	aiocbe->bp = NULL;
1419 
1420 	ki->kaio_queue_count++;
1421 	if (lj)
1422 		lj->lioj_queue_count++;
1423 	s = splnet();
1424 	TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
1425 	TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
1426 	splx(s);
1427 	aiocbe->jobstate = JOBST_JOBQGLOBAL;
1428 
1429 	num_queue_count++;
1430 	error = 0;
1431 
1432 	/*
1433 	 * If we don't have a free AIO process, and we are below our quota, then
1434 	 * start one.  Otherwise, depend on the subsequent I/O completions to
1435 	 * pick-up this job.  If we don't sucessfully create the new process
1436 	 * (thread) due to resource issues, we return an error for now (EAGAIN),
1437 	 * which is likely not the correct thing to do.
1438 	 */
1439 retryproc:
1440 	s = splnet();
1441 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1442 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
1443 		TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
1444 		aiop->aiothreadflags &= ~AIOP_FREE;
1445 		wakeup(aiop->aiothread);
1446 	} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1447 	    ((ki->kaio_active_count + num_aio_resv_start) <
1448 	    ki->kaio_maxactive_count)) {
1449 		num_aio_resv_start++;
1450 		if ((error = aio_newproc()) == 0) {
1451 			num_aio_resv_start--;
1452 			td->td_retval[0] = 0;
1453 			goto retryproc;
1454 		}
1455 		num_aio_resv_start--;
1456 	}
1457 	splx(s);
1458 done:
1459 	fdrop(fp, td);
1460 	return error;
1461 }
1462 
1463 /*
1464  * This routine queues an AIO request, checking for quotas.
1465  */
1466 static int
1467 aio_aqueue(struct thread *td, struct aiocb *job, int type)
1468 {
1469 	struct proc *p = td->td_proc;
1470 	struct kaioinfo *ki;
1471 
1472 	if (p->p_aioinfo == NULL)
1473 		aio_init_aioinfo(p);
1474 
1475 	if (num_queue_count >= max_queue_count)
1476 		return EAGAIN;
1477 
1478 	ki = p->p_aioinfo;
1479 	if (ki->kaio_queue_count >= ki->kaio_qallowed_count)
1480 		return EAGAIN;
1481 
1482 	return _aio_aqueue(td, job, NULL, type);
1483 }
1484 #endif /* VFS_AIO */
1485 
1486 /*
1487  * Support the aio_return system call, as a side-effect, kernel resources are
1488  * released.
1489  */
1490 int
1491 aio_return(struct thread *td, struct aio_return_args *uap)
1492 {
1493 #ifndef VFS_AIO
1494 	return ENOSYS;
1495 #else
1496 	struct proc *p = td->td_proc;
1497 	int s;
1498 	int jobref;
1499 	struct aiocblist *cb, *ncb;
1500 	struct aiocb *ujob;
1501 	struct kaioinfo *ki;
1502 
1503 	ki = p->p_aioinfo;
1504 	if (ki == NULL)
1505 		return EINVAL;
1506 
1507 	ujob = uap->aiocbp;
1508 
1509 	jobref = fuword(&ujob->_aiocb_private.kernelinfo);
1510 	if (jobref == -1 || jobref == 0)
1511 		return EINVAL;
1512 
1513 	s = splnet();
1514 	for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb,
1515 	    plist)) {
1516 		if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) ==
1517 		    jobref) {
1518 			splx(s);
1519 			if (ujob == cb->uuaiocb) {
1520 				td->td_retval[0] =
1521 				    cb->uaiocb._aiocb_private.status;
1522 			} else
1523 				td->td_retval[0] = EFAULT;
1524 			if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
1525 				curproc->p_stats->p_ru.ru_oublock +=
1526 				    cb->outputcharge;
1527 				cb->outputcharge = 0;
1528 			} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
1529 				curproc->p_stats->p_ru.ru_inblock +=
1530 				    cb->inputcharge;
1531 				cb->inputcharge = 0;
1532 			}
1533 			aio_free_entry(cb);
1534 			return 0;
1535 		}
1536 	}
1537 	splx(s);
1538 
1539 	s = splbio();
1540 	for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) {
1541 		ncb = TAILQ_NEXT(cb, plist);
1542 		if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo)
1543 		    == jobref) {
1544 			splx(s);
1545 			if (ujob == cb->uuaiocb) {
1546 				td->td_retval[0] =
1547 				    cb->uaiocb._aiocb_private.status;
1548 			} else
1549 				td->td_retval[0] = EFAULT;
1550 			aio_free_entry(cb);
1551 			return 0;
1552 		}
1553 	}
1554 	splx(s);
1555 
1556 	return (EINVAL);
1557 #endif /* VFS_AIO */
1558 }
1559 
1560 /*
1561  * Allow a process to wakeup when any of the I/O requests are completed.
1562  */
1563 int
1564 aio_suspend(struct thread *td, struct aio_suspend_args *uap)
1565 {
1566 #ifndef VFS_AIO
1567 	return ENOSYS;
1568 #else
1569 	struct proc *p = td->td_proc;
1570 	struct timeval atv;
1571 	struct timespec ts;
1572 	struct aiocb *const *cbptr, *cbp;
1573 	struct kaioinfo *ki;
1574 	struct aiocblist *cb;
1575 	int i;
1576 	int njoblist;
1577 	int error, s, timo;
1578 	int *ijoblist;
1579 	struct aiocb **ujoblist;
1580 
1581 	if (uap->nent >= AIO_LISTIO_MAX)
1582 		return EINVAL;
1583 
1584 	timo = 0;
1585 	if (uap->timeout) {
1586 		/* Get timespec struct. */
1587 		if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
1588 			return error;
1589 
1590 		if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000)
1591 			return (EINVAL);
1592 
1593 		TIMESPEC_TO_TIMEVAL(&atv, &ts);
1594 		if (itimerfix(&atv))
1595 			return (EINVAL);
1596 		timo = tvtohz(&atv);
1597 	}
1598 
1599 	ki = p->p_aioinfo;
1600 	if (ki == NULL)
1601 		return EAGAIN;
1602 
1603 	njoblist = 0;
1604 	ijoblist = zalloc(aiol_zone);
1605 	ujoblist = zalloc(aiol_zone);
1606 	cbptr = uap->aiocbp;
1607 
1608 	for (i = 0; i < uap->nent; i++) {
1609 		cbp = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]);
1610 		if (cbp == 0)
1611 			continue;
1612 		ujoblist[njoblist] = cbp;
1613 		ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo);
1614 		njoblist++;
1615 	}
1616 
1617 	if (njoblist == 0) {
1618 		zfree(aiol_zone, ijoblist);
1619 		zfree(aiol_zone, ujoblist);
1620 		return 0;
1621 	}
1622 
1623 	error = 0;
1624 	for (;;) {
1625 		for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb =
1626 		    TAILQ_NEXT(cb, plist)) {
1627 			for (i = 0; i < njoblist; i++) {
1628 				if (((intptr_t)
1629 				    cb->uaiocb._aiocb_private.kernelinfo) ==
1630 				    ijoblist[i]) {
1631 					if (ujoblist[i] != cb->uuaiocb)
1632 						error = EINVAL;
1633 					zfree(aiol_zone, ijoblist);
1634 					zfree(aiol_zone, ujoblist);
1635 					return error;
1636 				}
1637 			}
1638 		}
1639 
1640 		s = splbio();
1641 		for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb =
1642 		    TAILQ_NEXT(cb, plist)) {
1643 			for (i = 0; i < njoblist; i++) {
1644 				if (((intptr_t)
1645 				    cb->uaiocb._aiocb_private.kernelinfo) ==
1646 				    ijoblist[i]) {
1647 					splx(s);
1648 					if (ujoblist[i] != cb->uuaiocb)
1649 						error = EINVAL;
1650 					zfree(aiol_zone, ijoblist);
1651 					zfree(aiol_zone, ujoblist);
1652 					return error;
1653 				}
1654 			}
1655 		}
1656 
1657 		ki->kaio_flags |= KAIO_WAKEUP;
1658 		error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo);
1659 		splx(s);
1660 
1661 		if (error == ERESTART || error == EINTR) {
1662 			zfree(aiol_zone, ijoblist);
1663 			zfree(aiol_zone, ujoblist);
1664 			return EINTR;
1665 		} else if (error == EWOULDBLOCK) {
1666 			zfree(aiol_zone, ijoblist);
1667 			zfree(aiol_zone, ujoblist);
1668 			return EAGAIN;
1669 		}
1670 	}
1671 
1672 /* NOTREACHED */
1673 	return EINVAL;
1674 #endif /* VFS_AIO */
1675 }
1676 
1677 /*
1678  * aio_cancel cancels any non-physio aio operations not currently in
1679  * progress.
1680  */
1681 int
1682 aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1683 {
1684 #ifndef VFS_AIO
1685 	return ENOSYS;
1686 #else
1687 	struct proc *p = td->td_proc;
1688 	struct kaioinfo *ki;
1689 	struct aiocblist *cbe, *cbn;
1690 	struct file *fp;
1691 	struct filedesc *fdp;
1692 	struct socket *so;
1693 	struct proc *po;
1694 	int s,error;
1695 	int cancelled=0;
1696 	int notcancelled=0;
1697 	struct vnode *vp;
1698 
1699 	fdp = p->p_fd;
1700 
1701 	fp = fdp->fd_ofiles[uap->fd];
1702 
1703 	if (fp == NULL) {
1704 		return EBADF;
1705 	}
1706 
1707         if (fp->f_type == DTYPE_VNODE) {
1708 		vp = (struct vnode *)fp->f_data;
1709 
1710 		if (vn_isdisk(vp,&error)) {
1711 			td->td_retval[0] = AIO_NOTCANCELED;
1712         	        return 0;
1713 		}
1714 	} else if (fp->f_type == DTYPE_SOCKET) {
1715 		so = (struct socket *)fp->f_data;
1716 
1717 		s = splnet();
1718 
1719 		for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) {
1720 			cbn = TAILQ_NEXT(cbe, list);
1721 			if ((uap->aiocbp == NULL) ||
1722 				(uap->aiocbp == cbe->uuaiocb) ) {
1723 				po = cbe->userproc;
1724 				ki = po->p_aioinfo;
1725 				TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
1726 				TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist);
1727 				TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist);
1728 				if (ki->kaio_flags & KAIO_WAKEUP) {
1729 					wakeup(po);
1730 				}
1731 				cbe->jobstate = JOBST_JOBFINISHED;
1732 				cbe->uaiocb._aiocb_private.status=-1;
1733 				cbe->uaiocb._aiocb_private.error=ECANCELED;
1734 				cancelled++;
1735 /* XXX cancelled, knote? */
1736 			        if (cbe->uaiocb.aio_sigevent.sigev_notify ==
1737 				    SIGEV_SIGNAL) {
1738 					PROC_LOCK(cbe->userproc);
1739 					psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
1740 					PROC_UNLOCK(cbe->userproc);
1741 				}
1742 				if (uap->aiocbp)
1743 					break;
1744 			}
1745 		}
1746 
1747 		splx(s);
1748 
1749 		if ((cancelled) && (uap->aiocbp)) {
1750 			td->td_retval[0] = AIO_CANCELED;
1751 			return 0;
1752 		}
1753 
1754 	}
1755 
1756 	ki=p->p_aioinfo;
1757 
1758 	s = splnet();
1759 
1760 	for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) {
1761 		cbn = TAILQ_NEXT(cbe, plist);
1762 
1763 		if ((uap->fd == cbe->uaiocb.aio_fildes) &&
1764 		    ((uap->aiocbp == NULL ) ||
1765 		     (uap->aiocbp == cbe->uuaiocb))) {
1766 
1767 			if (cbe->jobstate == JOBST_JOBQGLOBAL) {
1768 				TAILQ_REMOVE(&aio_jobs, cbe, list);
1769                                 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
1770                                 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe,
1771                                     plist);
1772 				cancelled++;
1773 				ki->kaio_queue_finished_count++;
1774 				cbe->jobstate = JOBST_JOBFINISHED;
1775 				cbe->uaiocb._aiocb_private.status = -1;
1776 				cbe->uaiocb._aiocb_private.error = ECANCELED;
1777 /* XXX cancelled, knote? */
1778 			        if (cbe->uaiocb.aio_sigevent.sigev_notify ==
1779 				    SIGEV_SIGNAL) {
1780 					PROC_LOCK(cbe->userproc);
1781 					psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
1782 					PROC_UNLOCK(cbe->userproc);
1783 				}
1784 			} else {
1785 				notcancelled++;
1786 			}
1787 		}
1788 	}
1789 
1790 	splx(s);
1791 
1792 
1793 	if (notcancelled) {
1794 		td->td_retval[0] = AIO_NOTCANCELED;
1795 		return 0;
1796 	}
1797 
1798 	if (cancelled) {
1799 		td->td_retval[0] = AIO_CANCELED;
1800 		return 0;
1801 	}
1802 
1803 	td->td_retval[0] = AIO_ALLDONE;
1804 
1805 	return 0;
1806 #endif /* VFS_AIO */
1807 }
1808 
1809 /*
1810  * aio_error is implemented in the kernel level for compatibility purposes only.
1811  * For a user mode async implementation, it would be best to do it in a userland
1812  * subroutine.
1813  */
1814 int
1815 aio_error(struct thread *td, struct aio_error_args *uap)
1816 {
1817 #ifndef VFS_AIO
1818 	return ENOSYS;
1819 #else
1820 	struct proc *p = td->td_proc;
1821 	int s;
1822 	struct aiocblist *cb;
1823 	struct kaioinfo *ki;
1824 	int jobref;
1825 
1826 	ki = p->p_aioinfo;
1827 	if (ki == NULL)
1828 		return EINVAL;
1829 
1830 	jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo);
1831 	if ((jobref == -1) || (jobref == 0))
1832 		return EINVAL;
1833 
1834 	for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb,
1835 	    plist)) {
1836 		if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1837 		    jobref) {
1838 			td->td_retval[0] = cb->uaiocb._aiocb_private.error;
1839 			return 0;
1840 		}
1841 	}
1842 
1843 	s = splnet();
1844 
1845 	for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb,
1846 	    plist)) {
1847 		if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1848 		    jobref) {
1849 			td->td_retval[0] = EINPROGRESS;
1850 			splx(s);
1851 			return 0;
1852 		}
1853 	}
1854 
1855 	for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb,
1856 	    plist)) {
1857 		if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1858 		    jobref) {
1859 			td->td_retval[0] = EINPROGRESS;
1860 			splx(s);
1861 			return 0;
1862 		}
1863 	}
1864 	splx(s);
1865 
1866 	s = splbio();
1867 	for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb,
1868 	    plist)) {
1869 		if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1870 		    jobref) {
1871 			td->td_retval[0] = cb->uaiocb._aiocb_private.error;
1872 			splx(s);
1873 			return 0;
1874 		}
1875 	}
1876 
1877 	for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb,
1878 	    plist)) {
1879 		if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1880 		    jobref) {
1881 			td->td_retval[0] = EINPROGRESS;
1882 			splx(s);
1883 			return 0;
1884 		}
1885 	}
1886 	splx(s);
1887 
1888 #if (0)
1889 	/*
1890 	 * Hack for lio.
1891 	 */
1892 	status = fuword(&uap->aiocbp->_aiocb_private.status);
1893 	if (status == -1)
1894 		return fuword(&uap->aiocbp->_aiocb_private.error);
1895 #endif
1896 	return EINVAL;
1897 #endif /* VFS_AIO */
1898 }
1899 
1900 int
1901 aio_read(struct thread *td, struct aio_read_args *uap)
1902 {
1903 #ifndef VFS_AIO
1904 	return ENOSYS;
1905 #else
1906 	return aio_aqueue(td, uap->aiocbp, LIO_READ);
1907 #endif /* VFS_AIO */
1908 }
1909 
1910 int
1911 aio_write(struct thread *td, struct aio_write_args *uap)
1912 {
1913 #ifndef VFS_AIO
1914 	return ENOSYS;
1915 #else
1916 	return aio_aqueue(td, uap->aiocbp, LIO_WRITE);
1917 #endif /* VFS_AIO */
1918 }
1919 
1920 int
1921 lio_listio(struct thread *td, struct lio_listio_args *uap)
1922 {
1923 #ifndef VFS_AIO
1924 	return ENOSYS;
1925 #else
1926 	struct proc *p = td->td_proc;
1927 	int nent, nentqueued;
1928 	struct aiocb *iocb, * const *cbptr;
1929 	struct aiocblist *cb;
1930 	struct kaioinfo *ki;
1931 	struct aio_liojob *lj;
1932 	int error, runningcode;
1933 	int nerror;
1934 	int i;
1935 	int s;
1936 
1937 	if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
1938 		return EINVAL;
1939 
1940 	nent = uap->nent;
1941 	if (nent > AIO_LISTIO_MAX)
1942 		return EINVAL;
1943 
1944 	if (p->p_aioinfo == NULL)
1945 		aio_init_aioinfo(p);
1946 
1947 	if ((nent + num_queue_count) > max_queue_count)
1948 		return EAGAIN;
1949 
1950 	ki = p->p_aioinfo;
1951 	if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count)
1952 		return EAGAIN;
1953 
1954 	lj = zalloc(aiolio_zone);
1955 	if (!lj)
1956 		return EAGAIN;
1957 
1958 	lj->lioj_flags = 0;
1959 	lj->lioj_buffer_count = 0;
1960 	lj->lioj_buffer_finished_count = 0;
1961 	lj->lioj_queue_count = 0;
1962 	lj->lioj_queue_finished_count = 0;
1963 	lj->lioj_ki = ki;
1964 
1965 	/*
1966 	 * Setup signal.
1967 	 */
1968 	if (uap->sig && (uap->mode == LIO_NOWAIT)) {
1969 		error = copyin(uap->sig, &lj->lioj_signal,
1970 			       sizeof(lj->lioj_signal));
1971 		if (error) {
1972 			zfree(aiolio_zone, lj);
1973 			return error;
1974 		}
1975 		if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
1976 			zfree(aiolio_zone, lj);
1977 			return EINVAL;
1978 		}
1979 		lj->lioj_flags |= LIOJ_SIGNAL;
1980 		lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED;
1981 	} else
1982 		lj->lioj_flags &= ~LIOJ_SIGNAL;
1983 
1984 	TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
1985 	/*
1986 	 * Get pointers to the list of I/O requests.
1987 	 */
1988 	nerror = 0;
1989 	nentqueued = 0;
1990 	cbptr = uap->acb_list;
1991 	for (i = 0; i < uap->nent; i++) {
1992 		iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]);
1993 		if (((intptr_t)iocb != -1) && ((intptr_t)iocb != NULL)) {
1994 			error = _aio_aqueue(td, iocb, lj, 0);
1995 			if (error == 0)
1996 				nentqueued++;
1997 			else
1998 				nerror++;
1999 		}
2000 	}
2001 
2002 	/*
2003 	 * If we haven't queued any, then just return error.
2004 	 */
2005 	if (nentqueued == 0)
2006 		return 0;
2007 
2008 	/*
2009 	 * Calculate the appropriate error return.
2010 	 */
2011 	runningcode = 0;
2012 	if (nerror)
2013 		runningcode = EIO;
2014 
2015 	if (uap->mode == LIO_WAIT) {
2016 		int command, found, jobref;
2017 
2018 		for (;;) {
2019 			found = 0;
2020 			for (i = 0; i < uap->nent; i++) {
2021 				/*
2022 				 * Fetch address of the control buf pointer in
2023 				 * user space.
2024 				 */
2025 				iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]);
2026 				if (((intptr_t)iocb == -1) || ((intptr_t)iocb
2027 				    == 0))
2028 					continue;
2029 
2030 				/*
2031 				 * Fetch the associated command from user space.
2032 				 */
2033 				command = fuword(&iocb->aio_lio_opcode);
2034 				if (command == LIO_NOP) {
2035 					found++;
2036 					continue;
2037 				}
2038 
2039 				jobref = fuword(&iocb->_aiocb_private.kernelinfo);
2040 
2041 				TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
2042 					if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
2043 					    == jobref) {
2044 						if (cb->uaiocb.aio_lio_opcode
2045 						    == LIO_WRITE) {
2046 							curproc->p_stats->p_ru.ru_oublock
2047 							    +=
2048 							    cb->outputcharge;
2049 							cb->outputcharge = 0;
2050 						} else if (cb->uaiocb.aio_lio_opcode
2051 						    == LIO_READ) {
2052 							curproc->p_stats->p_ru.ru_inblock
2053 							    += cb->inputcharge;
2054 							cb->inputcharge = 0;
2055 						}
2056 						found++;
2057 						break;
2058 					}
2059 				}
2060 
2061 				s = splbio();
2062 				TAILQ_FOREACH(cb, &ki->kaio_bufdone, plist) {
2063 					if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
2064 					    == jobref) {
2065 						found++;
2066 						break;
2067 					}
2068 				}
2069 				splx(s);
2070 			}
2071 
2072 			/*
2073 			 * If all I/Os have been disposed of, then we can
2074 			 * return.
2075 			 */
2076 			if (found == nentqueued)
2077 				return runningcode;
2078 
2079 			ki->kaio_flags |= KAIO_WAKEUP;
2080 			error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0);
2081 
2082 			if (error == EINTR)
2083 				return EINTR;
2084 			else if (error == EWOULDBLOCK)
2085 				return EAGAIN;
2086 		}
2087 	}
2088 
2089 	return runningcode;
2090 #endif /* VFS_AIO */
2091 }
2092 
2093 #ifdef VFS_AIO
2094 /*
2095  * This is a weird hack so that we can post a signal.  It is safe to do so from
2096  * a timeout routine, but *not* from an interrupt routine.
2097  */
2098 static void
2099 process_signal(void *aioj)
2100 {
2101 	struct aiocblist *aiocbe = aioj;
2102 	struct aio_liojob *lj = aiocbe->lio;
2103 	struct aiocb *cb = &aiocbe->uaiocb;
2104 
2105 	if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) &&
2106 		(lj->lioj_queue_count == lj->lioj_queue_finished_count)) {
2107 		PROC_LOCK(lj->lioj_ki->kaio_p);
2108 		psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo);
2109 		PROC_UNLOCK(lj->lioj_ki->kaio_p);
2110 		lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2111 	}
2112 
2113 	if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
2114 		PROC_LOCK(aiocbe->userproc);
2115 		psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo);
2116 		PROC_UNLOCK(aiocbe->userproc);
2117 	}
2118 }
2119 
2120 /*
2121  * Interrupt handler for physio, performs the necessary process wakeups, and
2122  * signals.
2123  */
2124 static void
2125 aio_physwakeup(struct buf *bp)
2126 {
2127 	struct aiocblist *aiocbe;
2128 	struct proc *p;
2129 	struct kaioinfo *ki;
2130 	struct aio_liojob *lj;
2131 
2132 	wakeup(bp);
2133 
2134 	aiocbe = (struct aiocblist *)bp->b_spc;
2135 	if (aiocbe) {
2136 		p = bp->b_caller1;
2137 
2138 		aiocbe->jobstate = JOBST_JOBBFINISHED;
2139 		aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
2140 		aiocbe->uaiocb._aiocb_private.error = 0;
2141 		aiocbe->jobflags |= AIOCBLIST_DONE;
2142 
2143 		if (bp->b_ioflags & BIO_ERROR)
2144 			aiocbe->uaiocb._aiocb_private.error = bp->b_error;
2145 
2146 		lj = aiocbe->lio;
2147 		if (lj) {
2148 			lj->lioj_buffer_finished_count++;
2149 
2150 			/*
2151 			 * wakeup/signal if all of the interrupt jobs are done.
2152 			 */
2153 			if (lj->lioj_buffer_finished_count ==
2154 			    lj->lioj_buffer_count) {
2155 				/*
2156 				 * Post a signal if it is called for.
2157 				 */
2158 				if ((lj->lioj_flags &
2159 				    (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) ==
2160 				    LIOJ_SIGNAL) {
2161 					lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2162 					aiocbe->timeouthandle =
2163 						timeout(process_signal,
2164 							aiocbe, 0);
2165 				}
2166 			}
2167 		}
2168 
2169 		ki = p->p_aioinfo;
2170 		if (ki) {
2171 			ki->kaio_buffer_finished_count++;
2172 			TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
2173 			TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
2174 			TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
2175 
2176 			KNOTE(&aiocbe->klist, 0);
2177 			/* Do the wakeup. */
2178 			if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) {
2179 				ki->kaio_flags &= ~KAIO_WAKEUP;
2180 				wakeup(p);
2181 			}
2182 		}
2183 
2184 		if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL)
2185 			aiocbe->timeouthandle =
2186 				timeout(process_signal, aiocbe, 0);
2187 	}
2188 }
2189 #endif /* VFS_AIO */
2190 
2191 int
2192 aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2193 {
2194 #ifndef VFS_AIO
2195 	return ENOSYS;
2196 #else
2197 	struct proc *p = td->td_proc;
2198 	struct timeval atv;
2199 	struct timespec ts;
2200 	struct aiocb **cbptr;
2201 	struct kaioinfo *ki;
2202 	struct aiocblist *cb = NULL;
2203 	int error, s, timo;
2204 
2205 	suword(uap->aiocbp, (int)NULL);
2206 
2207 	timo = 0;
2208 	if (uap->timeout) {
2209 		/* Get timespec struct. */
2210 		error = copyin(uap->timeout, &ts, sizeof(ts));
2211 		if (error)
2212 			return error;
2213 
2214 		if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000))
2215 			return (EINVAL);
2216 
2217 		TIMESPEC_TO_TIMEVAL(&atv, &ts);
2218 		if (itimerfix(&atv))
2219 			return (EINVAL);
2220 		timo = tvtohz(&atv);
2221 	}
2222 
2223 	ki = p->p_aioinfo;
2224 	if (ki == NULL)
2225 		return EAGAIN;
2226 
2227 	cbptr = uap->aiocbp;
2228 
2229 	for (;;) {
2230 		if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) {
2231 			suword(uap->aiocbp, (int)cb->uuaiocb);
2232 			td->td_retval[0] = cb->uaiocb._aiocb_private.status;
2233 			if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
2234 				curproc->p_stats->p_ru.ru_oublock +=
2235 				    cb->outputcharge;
2236 				cb->outputcharge = 0;
2237 			} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
2238 				curproc->p_stats->p_ru.ru_inblock +=
2239 				    cb->inputcharge;
2240 				cb->inputcharge = 0;
2241 			}
2242 			aio_free_entry(cb);
2243 			return cb->uaiocb._aiocb_private.error;
2244 		}
2245 
2246 		s = splbio();
2247  		if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) {
2248 			splx(s);
2249 			suword(uap->aiocbp, (int)cb->uuaiocb);
2250 			td->td_retval[0] = cb->uaiocb._aiocb_private.status;
2251 			aio_free_entry(cb);
2252 			return cb->uaiocb._aiocb_private.error;
2253 		}
2254 
2255 		ki->kaio_flags |= KAIO_WAKEUP;
2256 		error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo);
2257 		splx(s);
2258 
2259 		if (error == ERESTART)
2260 			return EINTR;
2261 		else if (error < 0)
2262 			return error;
2263 		else if (error == EINTR)
2264 			return EINTR;
2265 		else if (error == EWOULDBLOCK)
2266 			return EAGAIN;
2267 	}
2268 #endif /* VFS_AIO */
2269 }
2270 
2271 
2272 #ifndef VFS_AIO
2273 static int
2274 filt_aioattach(struct knote *kn)
2275 {
2276 
2277 	return (ENXIO);
2278 }
2279 
2280 struct filterops aio_filtops =
2281 	{ 0, filt_aioattach, NULL, NULL };
2282 
2283 #else
2284 static int
2285 filt_aioattach(struct knote *kn)
2286 {
2287 	struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id;
2288 
2289 	/*
2290 	 * The aiocbe pointer must be validated before using it, so
2291 	 * registration is restricted to the kernel; the user cannot
2292 	 * set EV_FLAG1.
2293 	 */
2294 	if ((kn->kn_flags & EV_FLAG1) == 0)
2295 		return (EPERM);
2296 	kn->kn_flags &= ~EV_FLAG1;
2297 
2298 	SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext);
2299 
2300 	return (0);
2301 }
2302 
2303 static void
2304 filt_aiodetach(struct knote *kn)
2305 {
2306 	struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id;
2307 	int s = splhigh();	 /* XXX no clue, so overkill */
2308 
2309 	SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext);
2310 	splx(s);
2311 }
2312 
2313 /*ARGSUSED*/
2314 static int
2315 filt_aio(struct knote *kn, long hint)
2316 {
2317 	struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id;
2318 
2319 	kn->kn_data = 0;		/* XXX data returned? */
2320 	if (aiocbe->jobstate != JOBST_JOBFINISHED &&
2321 	    aiocbe->jobstate != JOBST_JOBBFINISHED)
2322 		return (0);
2323 	kn->kn_flags |= EV_EOF;
2324 	return (1);
2325 }
2326 
2327 struct filterops aio_filtops =
2328 	{ 0, filt_aioattach, filt_aiodetach, filt_aio };
2329 #endif /* VFS_AIO */
2330