xref: /titanic_52/usr/src/uts/common/os/aio_subr.c (revision d2749ac6e20fe35dbfa822f9b55c185325a2147e)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
534709573Sraf  * Common Development and Distribution License (the "License").
634709573Sraf  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
2134709573Sraf 
227c478bd9Sstevel@tonic-gate /*
2316660111SSurya Prakki  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate #include <sys/types.h>
287c478bd9Sstevel@tonic-gate #include <sys/proc.h>
297c478bd9Sstevel@tonic-gate #include <sys/file.h>
307c478bd9Sstevel@tonic-gate #include <sys/errno.h>
317c478bd9Sstevel@tonic-gate #include <sys/param.h>
327c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
337c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
347c478bd9Sstevel@tonic-gate #include <sys/systm.h>
357c478bd9Sstevel@tonic-gate #include <vm/as.h>
367c478bd9Sstevel@tonic-gate #include <vm/page.h>
377c478bd9Sstevel@tonic-gate #include <sys/uio.h>
387c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
397c478bd9Sstevel@tonic-gate #include <sys/debug.h>
407c478bd9Sstevel@tonic-gate #include <sys/aio_impl.h>
417c478bd9Sstevel@tonic-gate #include <sys/epm.h>
427c478bd9Sstevel@tonic-gate #include <sys/fs/snode.h>
437c478bd9Sstevel@tonic-gate #include <sys/siginfo.h>
447c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
457c478bd9Sstevel@tonic-gate #include <sys/tnf_probe.h>
467c478bd9Sstevel@tonic-gate #include <sys/conf.h>
477c478bd9Sstevel@tonic-gate #include <sys/sdt.h>
487c478bd9Sstevel@tonic-gate 
497c478bd9Sstevel@tonic-gate int aphysio(int (*)(), int (*)(), dev_t, int, void (*)(), struct aio_req *);
507c478bd9Sstevel@tonic-gate void aio_done(struct buf *);
517c478bd9Sstevel@tonic-gate void aphysio_unlock(aio_req_t *);
527c478bd9Sstevel@tonic-gate void aio_cleanup(int);
537c478bd9Sstevel@tonic-gate void aio_cleanup_exit(void);
547c478bd9Sstevel@tonic-gate 
557c478bd9Sstevel@tonic-gate /*
567c478bd9Sstevel@tonic-gate  * private functions
577c478bd9Sstevel@tonic-gate  */
587c478bd9Sstevel@tonic-gate static void aio_sigev_send(proc_t *, sigqueue_t *);
597c478bd9Sstevel@tonic-gate static void aio_hash_delete(aio_t *, aio_req_t *);
607c478bd9Sstevel@tonic-gate static void aio_lio_free(aio_t *, aio_lio_t *);
6116660111SSurya Prakki static int aio_cleanup_cleanupq(aio_t *, aio_req_t *, int);
627c478bd9Sstevel@tonic-gate static int aio_cleanup_notifyq(aio_t *, aio_req_t *, int);
637c478bd9Sstevel@tonic-gate static void aio_cleanup_pollq(aio_t *, aio_req_t *, int);
647c478bd9Sstevel@tonic-gate static void aio_cleanup_portq(aio_t *, aio_req_t *, int);
657c478bd9Sstevel@tonic-gate 
667c478bd9Sstevel@tonic-gate /*
677c478bd9Sstevel@tonic-gate  * async version of physio() that doesn't wait synchronously
687c478bd9Sstevel@tonic-gate  * for the driver's strategy routine to complete.
697c478bd9Sstevel@tonic-gate  */
707c478bd9Sstevel@tonic-gate 
717c478bd9Sstevel@tonic-gate int
727c478bd9Sstevel@tonic-gate aphysio(
737c478bd9Sstevel@tonic-gate 	int (*strategy)(struct buf *),
747c478bd9Sstevel@tonic-gate 	int (*cancel)(struct buf *),
757c478bd9Sstevel@tonic-gate 	dev_t dev,
767c478bd9Sstevel@tonic-gate 	int rw,
777c478bd9Sstevel@tonic-gate 	void (*mincnt)(struct buf *),
787c478bd9Sstevel@tonic-gate 	struct aio_req *aio)
797c478bd9Sstevel@tonic-gate {
807c478bd9Sstevel@tonic-gate 	struct uio *uio = aio->aio_uio;
817c478bd9Sstevel@tonic-gate 	aio_req_t *reqp = (aio_req_t *)aio->aio_private;
827c478bd9Sstevel@tonic-gate 	struct buf *bp = &reqp->aio_req_buf;
837c478bd9Sstevel@tonic-gate 	struct iovec *iov;
847c478bd9Sstevel@tonic-gate 	struct as *as;
857c478bd9Sstevel@tonic-gate 	char *a;
867c478bd9Sstevel@tonic-gate 	int	error;
877c478bd9Sstevel@tonic-gate 	size_t	c;
887c478bd9Sstevel@tonic-gate 	struct page **pplist;
897c478bd9Sstevel@tonic-gate 	struct dev_ops *ops = devopsp[getmajor(dev)];
907c478bd9Sstevel@tonic-gate 
917c478bd9Sstevel@tonic-gate 	if (uio->uio_loffset < 0)
927c478bd9Sstevel@tonic-gate 		return (EINVAL);
937c478bd9Sstevel@tonic-gate #ifdef	_ILP32
947c478bd9Sstevel@tonic-gate 	/*
957c478bd9Sstevel@tonic-gate 	 * For 32-bit kernels, check against SPEC_MAXOFFSET_T which represents
967c478bd9Sstevel@tonic-gate 	 * the maximum size that can be supported by the IO subsystem.
977c478bd9Sstevel@tonic-gate 	 * XXX this code assumes a D_64BIT driver.
987c478bd9Sstevel@tonic-gate 	 */
997c478bd9Sstevel@tonic-gate 	if (uio->uio_loffset > SPEC_MAXOFFSET_T)
1007c478bd9Sstevel@tonic-gate 		return (EINVAL);
1017c478bd9Sstevel@tonic-gate #endif	/* _ILP32 */
1027c478bd9Sstevel@tonic-gate 
1037c478bd9Sstevel@tonic-gate 	TNF_PROBE_5(aphysio_start, "kaio", /* CSTYLED */,
1047c478bd9Sstevel@tonic-gate 	    tnf_opaque, bp, bp,
1057c478bd9Sstevel@tonic-gate 	    tnf_device, device, dev,
1067c478bd9Sstevel@tonic-gate 	    tnf_offset, blkno, btodt(uio->uio_loffset),
1077c478bd9Sstevel@tonic-gate 	    tnf_size, size, uio->uio_iov->iov_len,
1087c478bd9Sstevel@tonic-gate 	    tnf_bioflags, rw, rw);
1097c478bd9Sstevel@tonic-gate 
1107c478bd9Sstevel@tonic-gate 	if (rw == B_READ) {
1117c478bd9Sstevel@tonic-gate 		CPU_STATS_ADD_K(sys, phread, 1);
1127c478bd9Sstevel@tonic-gate 	} else {
1137c478bd9Sstevel@tonic-gate 		CPU_STATS_ADD_K(sys, phwrite, 1);
1147c478bd9Sstevel@tonic-gate 	}
1157c478bd9Sstevel@tonic-gate 
1167c478bd9Sstevel@tonic-gate 	iov = uio->uio_iov;
1177c478bd9Sstevel@tonic-gate 	sema_init(&bp->b_sem, 0, NULL, SEMA_DEFAULT, NULL);
1187c478bd9Sstevel@tonic-gate 	sema_init(&bp->b_io, 0, NULL, SEMA_DEFAULT, NULL);
1197c478bd9Sstevel@tonic-gate 
1207c478bd9Sstevel@tonic-gate 	bp->b_error = 0;
1217c478bd9Sstevel@tonic-gate 	bp->b_flags = B_BUSY | B_PHYS | B_ASYNC | rw;
1227c478bd9Sstevel@tonic-gate 	bp->b_edev = dev;
1237c478bd9Sstevel@tonic-gate 	bp->b_dev = cmpdev(dev);
1247c478bd9Sstevel@tonic-gate 	bp->b_lblkno = btodt(uio->uio_loffset);
1257c478bd9Sstevel@tonic-gate 	bp->b_offset = uio->uio_loffset;
1267c478bd9Sstevel@tonic-gate 	(void) ops->devo_getinfo(NULL, DDI_INFO_DEVT2DEVINFO,
1277c478bd9Sstevel@tonic-gate 	    (void *)bp->b_edev, (void **)&bp->b_dip);
1287c478bd9Sstevel@tonic-gate 
1297c478bd9Sstevel@tonic-gate 	/*
1307c478bd9Sstevel@tonic-gate 	 * Clustering: Clustering can set the b_iodone, b_forw and
1317c478bd9Sstevel@tonic-gate 	 * b_proc fields to cluster-specifc values.
1327c478bd9Sstevel@tonic-gate 	 */
1337c478bd9Sstevel@tonic-gate 	if (bp->b_iodone == NULL) {
1347c478bd9Sstevel@tonic-gate 		bp->b_iodone = (int (*)()) aio_done;
1357c478bd9Sstevel@tonic-gate 		/* b_forw points at an aio_req_t structure */
1367c478bd9Sstevel@tonic-gate 		bp->b_forw = (struct buf *)reqp;
1377c478bd9Sstevel@tonic-gate 		bp->b_proc = curproc;
1387c478bd9Sstevel@tonic-gate 	}
1397c478bd9Sstevel@tonic-gate 
1407c478bd9Sstevel@tonic-gate 	a = bp->b_un.b_addr = iov->iov_base;
1417c478bd9Sstevel@tonic-gate 	c = bp->b_bcount = iov->iov_len;
1427c478bd9Sstevel@tonic-gate 
1437c478bd9Sstevel@tonic-gate 	(*mincnt)(bp);
1447c478bd9Sstevel@tonic-gate 	if (bp->b_bcount != iov->iov_len)
1457c478bd9Sstevel@tonic-gate 		return (ENOTSUP);
1467c478bd9Sstevel@tonic-gate 
1477c478bd9Sstevel@tonic-gate 	as = bp->b_proc->p_as;
1487c478bd9Sstevel@tonic-gate 
1497c478bd9Sstevel@tonic-gate 	error = as_pagelock(as, &pplist, a,
1507c478bd9Sstevel@tonic-gate 	    c, rw == B_READ? S_WRITE : S_READ);
1517c478bd9Sstevel@tonic-gate 	if (error != 0) {
1527c478bd9Sstevel@tonic-gate 		bp->b_flags |= B_ERROR;
1537c478bd9Sstevel@tonic-gate 		bp->b_error = error;
1547c478bd9Sstevel@tonic-gate 		bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_SHADOW);
1557c478bd9Sstevel@tonic-gate 		return (error);
1567c478bd9Sstevel@tonic-gate 	}
1577c478bd9Sstevel@tonic-gate 	reqp->aio_req_flags |= AIO_PAGELOCKDONE;
1587c478bd9Sstevel@tonic-gate 	bp->b_shadow = pplist;
1597c478bd9Sstevel@tonic-gate 	if (pplist != NULL) {
1607c478bd9Sstevel@tonic-gate 		bp->b_flags |= B_SHADOW;
1617c478bd9Sstevel@tonic-gate 	}
1627c478bd9Sstevel@tonic-gate 
1637c478bd9Sstevel@tonic-gate 	if (cancel != anocancel)
1647c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC,
1657c478bd9Sstevel@tonic-gate 		    "aphysio: cancellation not supported, use anocancel");
1667c478bd9Sstevel@tonic-gate 
1677c478bd9Sstevel@tonic-gate 	reqp->aio_req_cancel = cancel;
1687c478bd9Sstevel@tonic-gate 
1697c478bd9Sstevel@tonic-gate 	DTRACE_IO1(start, struct buf *, bp);
1707c478bd9Sstevel@tonic-gate 
1717c478bd9Sstevel@tonic-gate 	return ((*strategy)(bp));
1727c478bd9Sstevel@tonic-gate }
1737c478bd9Sstevel@tonic-gate 
1747c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1757c478bd9Sstevel@tonic-gate int
1767c478bd9Sstevel@tonic-gate anocancel(struct buf *bp)
1777c478bd9Sstevel@tonic-gate {
1787c478bd9Sstevel@tonic-gate 	return (ENXIO);
1797c478bd9Sstevel@tonic-gate }
1807c478bd9Sstevel@tonic-gate 
1817c478bd9Sstevel@tonic-gate /*
1827c478bd9Sstevel@tonic-gate  * Called from biodone().
1837c478bd9Sstevel@tonic-gate  * Notify process that a pending AIO has finished.
1847c478bd9Sstevel@tonic-gate  */
1857c478bd9Sstevel@tonic-gate 
1867c478bd9Sstevel@tonic-gate /*
1877c478bd9Sstevel@tonic-gate  * Clustering: This function is made non-static as it is used
1887c478bd9Sstevel@tonic-gate  * by clustering s/w as contract private interface.
1897c478bd9Sstevel@tonic-gate  */
1907c478bd9Sstevel@tonic-gate 
1917c478bd9Sstevel@tonic-gate void
1927c478bd9Sstevel@tonic-gate aio_done(struct buf *bp)
1937c478bd9Sstevel@tonic-gate {
1947c478bd9Sstevel@tonic-gate 	proc_t *p;
1957c478bd9Sstevel@tonic-gate 	struct as *as;
1967c478bd9Sstevel@tonic-gate 	aio_req_t *reqp;
19734709573Sraf 	aio_lio_t *head = NULL;
1987c478bd9Sstevel@tonic-gate 	aio_t *aiop;
19934709573Sraf 	sigqueue_t *sigev = NULL;
2007c478bd9Sstevel@tonic-gate 	sigqueue_t *lio_sigev = NULL;
20134709573Sraf 	port_kevent_t *pkevp = NULL;
20234709573Sraf 	port_kevent_t *lio_pkevp = NULL;
2037c478bd9Sstevel@tonic-gate 	int fd;
2047c478bd9Sstevel@tonic-gate 	int cleanupqflag;
2057c478bd9Sstevel@tonic-gate 	int pollqflag;
2067c478bd9Sstevel@tonic-gate 	int portevpend;
2077c478bd9Sstevel@tonic-gate 	void (*func)();
20834709573Sraf 	int use_port = 0;
209fa7f62f0Ssp92102 	int reqp_flags = 0;
210b7555c90SSurya Prakki 	int send_signal = 0;
2117c478bd9Sstevel@tonic-gate 
2127c478bd9Sstevel@tonic-gate 	p = bp->b_proc;
213fa7f62f0Ssp92102 	as = p->p_as;
2147c478bd9Sstevel@tonic-gate 	reqp = (aio_req_t *)bp->b_forw;
2157c478bd9Sstevel@tonic-gate 	fd = reqp->aio_req_fd;
2167c478bd9Sstevel@tonic-gate 
2177c478bd9Sstevel@tonic-gate 	TNF_PROBE_5(aphysio_end, "kaio", /* CSTYLED */,
2187c478bd9Sstevel@tonic-gate 	    tnf_opaque, bp, bp,
2197c478bd9Sstevel@tonic-gate 	    tnf_device, device, bp->b_edev,
2207c478bd9Sstevel@tonic-gate 	    tnf_offset, blkno, btodt(reqp->aio_req_uio.uio_loffset),
2217c478bd9Sstevel@tonic-gate 	    tnf_size, size, reqp->aio_req_uio.uio_iov->iov_len,
2227c478bd9Sstevel@tonic-gate 	    tnf_bioflags, rw, (bp->b_flags & (B_READ|B_WRITE)));
2237c478bd9Sstevel@tonic-gate 
2247c478bd9Sstevel@tonic-gate 	/*
2257c478bd9Sstevel@tonic-gate 	 * mapout earlier so that more kmem is available when aio is
2267c478bd9Sstevel@tonic-gate 	 * heavily used. bug #1262082
2277c478bd9Sstevel@tonic-gate 	 */
2287c478bd9Sstevel@tonic-gate 	if (bp->b_flags & B_REMAPPED)
2297c478bd9Sstevel@tonic-gate 		bp_mapout(bp);
2307c478bd9Sstevel@tonic-gate 
2317c478bd9Sstevel@tonic-gate 	/* decrement fd's ref count by one, now that aio request is done. */
2327c478bd9Sstevel@tonic-gate 	areleasef(fd, P_FINFO(p));
2337c478bd9Sstevel@tonic-gate 
2347c478bd9Sstevel@tonic-gate 	aiop = p->p_aio;
2357c478bd9Sstevel@tonic-gate 	ASSERT(aiop != NULL);
2367c478bd9Sstevel@tonic-gate 
2377c478bd9Sstevel@tonic-gate 	mutex_enter(&aiop->aio_portq_mutex);
2387c478bd9Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
23934709573Sraf 	ASSERT(aiop->aio_pending > 0);
24034709573Sraf 	ASSERT(reqp->aio_req_flags & AIO_PENDING);
2417c478bd9Sstevel@tonic-gate 	aiop->aio_pending--;
2427c478bd9Sstevel@tonic-gate 	reqp->aio_req_flags &= ~AIO_PENDING;
243fa7f62f0Ssp92102 	reqp_flags = reqp->aio_req_flags;
24434709573Sraf 	if ((pkevp = reqp->aio_req_portkev) != NULL) {
2457c478bd9Sstevel@tonic-gate 		/* Event port notification is desired for this transaction */
2467c478bd9Sstevel@tonic-gate 		if (reqp->aio_req_flags & AIO_CLOSE_PORT) {
2477c478bd9Sstevel@tonic-gate 			/*
2487c478bd9Sstevel@tonic-gate 			 * The port is being closed and it is waiting for
2497c478bd9Sstevel@tonic-gate 			 * pending asynchronous I/O transactions to complete.
2507c478bd9Sstevel@tonic-gate 			 */
2517c478bd9Sstevel@tonic-gate 			portevpend = --aiop->aio_portpendcnt;
25234709573Sraf 			aio_deq(&aiop->aio_portpending, reqp);
25334709573Sraf 			aio_enq(&aiop->aio_portq, reqp, 0);
2547c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
2557c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_portq_mutex);
25634709573Sraf 			port_send_event(pkevp);
2577c478bd9Sstevel@tonic-gate 			if (portevpend == 0)
2587c478bd9Sstevel@tonic-gate 				cv_broadcast(&aiop->aio_portcv);
2597c478bd9Sstevel@tonic-gate 			return;
2607c478bd9Sstevel@tonic-gate 		}
2617c478bd9Sstevel@tonic-gate 
2627c478bd9Sstevel@tonic-gate 		if (aiop->aio_flags & AIO_CLEANUP) {
2637c478bd9Sstevel@tonic-gate 			/*
2647c478bd9Sstevel@tonic-gate 			 * aio_cleanup_thread() is waiting for completion of
2657c478bd9Sstevel@tonic-gate 			 * transactions.
2667c478bd9Sstevel@tonic-gate 			 */
2677c478bd9Sstevel@tonic-gate 			mutex_enter(&as->a_contents);
26834709573Sraf 			aio_deq(&aiop->aio_portpending, reqp);
26934709573Sraf 			aio_enq(&aiop->aio_portcleanupq, reqp, 0);
2707c478bd9Sstevel@tonic-gate 			cv_signal(&aiop->aio_cleanupcv);
2717c478bd9Sstevel@tonic-gate 			mutex_exit(&as->a_contents);
2727c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
2737c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_portq_mutex);
2747c478bd9Sstevel@tonic-gate 			return;
2757c478bd9Sstevel@tonic-gate 		}
2767c478bd9Sstevel@tonic-gate 
27734709573Sraf 		aio_deq(&aiop->aio_portpending, reqp);
27834709573Sraf 		aio_enq(&aiop->aio_portq, reqp, 0);
2797c478bd9Sstevel@tonic-gate 
28034709573Sraf 		use_port = 1;
28134709573Sraf 	} else {
2827c478bd9Sstevel@tonic-gate 		/*
2837c478bd9Sstevel@tonic-gate 		 * when the AIO_CLEANUP flag is enabled for this
2847c478bd9Sstevel@tonic-gate 		 * process, or when the AIO_POLL bit is set for
2857c478bd9Sstevel@tonic-gate 		 * this request, special handling is required.
2867c478bd9Sstevel@tonic-gate 		 * otherwise the request is put onto the doneq.
2877c478bd9Sstevel@tonic-gate 		 */
2887c478bd9Sstevel@tonic-gate 		cleanupqflag = (aiop->aio_flags & AIO_CLEANUP);
2897c478bd9Sstevel@tonic-gate 		pollqflag = (reqp->aio_req_flags & AIO_POLL);
2907c478bd9Sstevel@tonic-gate 		if (cleanupqflag | pollqflag) {
2917c478bd9Sstevel@tonic-gate 
292fa7f62f0Ssp92102 			if (cleanupqflag)
2937c478bd9Sstevel@tonic-gate 				mutex_enter(&as->a_contents);
2947c478bd9Sstevel@tonic-gate 
2957c478bd9Sstevel@tonic-gate 			/*
2967c478bd9Sstevel@tonic-gate 			 * requests with their AIO_POLL bit set are put
2977c478bd9Sstevel@tonic-gate 			 * on the pollq, requests with sigevent structures
2987c478bd9Sstevel@tonic-gate 			 * or with listio heads are put on the notifyq, and
2997c478bd9Sstevel@tonic-gate 			 * the remaining requests don't require any special
3007c478bd9Sstevel@tonic-gate 			 * cleanup handling, so they're put onto the default
3017c478bd9Sstevel@tonic-gate 			 * cleanupq.
3027c478bd9Sstevel@tonic-gate 			 */
3037c478bd9Sstevel@tonic-gate 			if (pollqflag)
3047c478bd9Sstevel@tonic-gate 				aio_enq(&aiop->aio_pollq, reqp, AIO_POLLQ);
3057c478bd9Sstevel@tonic-gate 			else if (reqp->aio_req_sigqp || reqp->aio_req_lio)
3067c478bd9Sstevel@tonic-gate 				aio_enq(&aiop->aio_notifyq, reqp, AIO_NOTIFYQ);
3077c478bd9Sstevel@tonic-gate 			else
30834709573Sraf 				aio_enq(&aiop->aio_cleanupq, reqp,
30934709573Sraf 				    AIO_CLEANUPQ);
3107c478bd9Sstevel@tonic-gate 
3117c478bd9Sstevel@tonic-gate 			if (cleanupqflag) {
3127c478bd9Sstevel@tonic-gate 				cv_signal(&aiop->aio_cleanupcv);
3137c478bd9Sstevel@tonic-gate 				mutex_exit(&as->a_contents);
3147c478bd9Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
31534709573Sraf 				mutex_exit(&aiop->aio_portq_mutex);
3167c478bd9Sstevel@tonic-gate 			} else {
3177c478bd9Sstevel@tonic-gate 				ASSERT(pollqflag);
3187c478bd9Sstevel@tonic-gate 				/* block aio_cleanup_exit until we're done */
3197c478bd9Sstevel@tonic-gate 				aiop->aio_flags |= AIO_DONE_ACTIVE;
3207c478bd9Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
32134709573Sraf 				mutex_exit(&aiop->aio_portq_mutex);
3227c478bd9Sstevel@tonic-gate 				/*
32334709573Sraf 				 * let the cleanup processing happen from an AST
32434709573Sraf 				 * set an AST on all threads in this process
3257c478bd9Sstevel@tonic-gate 				 */
3267c478bd9Sstevel@tonic-gate 				mutex_enter(&p->p_lock);
3277c478bd9Sstevel@tonic-gate 				set_proc_ast(p);
3287c478bd9Sstevel@tonic-gate 				mutex_exit(&p->p_lock);
3297c478bd9Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3307c478bd9Sstevel@tonic-gate 				/* wakeup anybody waiting in aiowait() */
3317c478bd9Sstevel@tonic-gate 				cv_broadcast(&aiop->aio_waitcv);
3327c478bd9Sstevel@tonic-gate 
3337c478bd9Sstevel@tonic-gate 				/* wakeup aio_cleanup_exit if needed */
3347c478bd9Sstevel@tonic-gate 				if (aiop->aio_flags & AIO_CLEANUP)
3357c478bd9Sstevel@tonic-gate 					cv_signal(&aiop->aio_cleanupcv);
3367c478bd9Sstevel@tonic-gate 				aiop->aio_flags &= ~AIO_DONE_ACTIVE;
3377c478bd9Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3387c478bd9Sstevel@tonic-gate 			}
3397c478bd9Sstevel@tonic-gate 			return;
3407c478bd9Sstevel@tonic-gate 		}
3417c478bd9Sstevel@tonic-gate 
3427c478bd9Sstevel@tonic-gate 		/*
3437c478bd9Sstevel@tonic-gate 		 * save req's sigevent pointer, and check its
3447c478bd9Sstevel@tonic-gate 		 * value after releasing aio_mutex lock.
3457c478bd9Sstevel@tonic-gate 		 */
3467c478bd9Sstevel@tonic-gate 		sigev = reqp->aio_req_sigqp;
3477c478bd9Sstevel@tonic-gate 		reqp->aio_req_sigqp = NULL;
3487c478bd9Sstevel@tonic-gate 
34934709573Sraf 		/* put request on done queue. */
35034709573Sraf 		aio_enq(&aiop->aio_doneq, reqp, AIO_DONEQ);
35134709573Sraf 	} /* portkevent */
35234709573Sraf 
3537c478bd9Sstevel@tonic-gate 	/*
35434709573Sraf 	 * when list IO notification is enabled, a notification or
35534709573Sraf 	 * signal is sent only when all entries in the list are done.
3567c478bd9Sstevel@tonic-gate 	 */
3577c478bd9Sstevel@tonic-gate 	if ((head = reqp->aio_req_lio) != NULL) {
3587c478bd9Sstevel@tonic-gate 		ASSERT(head->lio_refcnt > 0);
3597c478bd9Sstevel@tonic-gate 		if (--head->lio_refcnt == 0) {
3607c478bd9Sstevel@tonic-gate 			/*
3617c478bd9Sstevel@tonic-gate 			 * save lio's sigevent pointer, and check
36234709573Sraf 			 * its value after releasing aio_mutex lock.
3637c478bd9Sstevel@tonic-gate 			 */
3647c478bd9Sstevel@tonic-gate 			lio_sigev = head->lio_sigqp;
3657c478bd9Sstevel@tonic-gate 			head->lio_sigqp = NULL;
36634709573Sraf 			cv_signal(&head->lio_notify);
36734709573Sraf 			if (head->lio_port >= 0 &&
36834709573Sraf 			    (lio_pkevp = head->lio_portkev) != NULL)
36934709573Sraf 				head->lio_port = -1;
3707c478bd9Sstevel@tonic-gate 		}
3717c478bd9Sstevel@tonic-gate 	}
3727c478bd9Sstevel@tonic-gate 
3737c478bd9Sstevel@tonic-gate 	/*
3747c478bd9Sstevel@tonic-gate 	 * if AIO_WAITN set then
3757c478bd9Sstevel@tonic-gate 	 * send signal only when we reached the
3767c478bd9Sstevel@tonic-gate 	 * required amount of IO's finished
3777c478bd9Sstevel@tonic-gate 	 * or when all IO's are done
3787c478bd9Sstevel@tonic-gate 	 */
3797c478bd9Sstevel@tonic-gate 	if (aiop->aio_flags & AIO_WAITN) {
3807c478bd9Sstevel@tonic-gate 		if (aiop->aio_waitncnt > 0)
3817c478bd9Sstevel@tonic-gate 			aiop->aio_waitncnt--;
3827c478bd9Sstevel@tonic-gate 		if (aiop->aio_pending == 0 ||
3837c478bd9Sstevel@tonic-gate 		    aiop->aio_waitncnt == 0)
3847c478bd9Sstevel@tonic-gate 			cv_broadcast(&aiop->aio_waitcv);
3857c478bd9Sstevel@tonic-gate 	} else {
3867c478bd9Sstevel@tonic-gate 		cv_broadcast(&aiop->aio_waitcv);
3877c478bd9Sstevel@tonic-gate 	}
3887c478bd9Sstevel@tonic-gate 
389b7555c90SSurya Prakki 	/*
390b7555c90SSurya Prakki 	 * No need to set this flag for pollq, portq, lio requests.
391*d2749ac6SRoger A. Faulkner 	 * If this is an old Solaris aio request, and the process has
392*d2749ac6SRoger A. Faulkner 	 * a SIGIO signal handler enabled, then send a SIGIO signal.
393b7555c90SSurya Prakki 	 */
394b7555c90SSurya Prakki 	if (!sigev && !use_port && head == NULL &&
395*d2749ac6SRoger A. Faulkner 	    (reqp->aio_req_flags & AIO_SOLARIS) &&
396b7555c90SSurya Prakki 	    (func = PTOU(p)->u_signal[SIGIO - 1]) != SIG_DFL &&
397b7555c90SSurya Prakki 	    (func != SIG_IGN)) {
398b7555c90SSurya Prakki 		send_signal = 1;
399b7555c90SSurya Prakki 		reqp->aio_req_flags |= AIO_SIGNALLED;
400b7555c90SSurya Prakki 	}
401b7555c90SSurya Prakki 
4027c478bd9Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
40334709573Sraf 	mutex_exit(&aiop->aio_portq_mutex);
40434709573Sraf 
405fa7f62f0Ssp92102 	/*
406fa7f62f0Ssp92102 	 * Could the cleanup thread be waiting for AIO with locked
407fa7f62f0Ssp92102 	 * resources to finish?
408fa7f62f0Ssp92102 	 * Ideally in that case cleanup thread should block on cleanupcv,
409fa7f62f0Ssp92102 	 * but there is a window, where it could miss to see a new aio
410fa7f62f0Ssp92102 	 * request that sneaked in.
411fa7f62f0Ssp92102 	 */
412fa7f62f0Ssp92102 	mutex_enter(&as->a_contents);
413fa7f62f0Ssp92102 	if ((reqp_flags & AIO_PAGELOCKDONE) && AS_ISUNMAPWAIT(as))
414fa7f62f0Ssp92102 		cv_broadcast(&as->a_cv);
415fa7f62f0Ssp92102 	mutex_exit(&as->a_contents);
416fa7f62f0Ssp92102 
4177c478bd9Sstevel@tonic-gate 	if (sigev)
4187c478bd9Sstevel@tonic-gate 		aio_sigev_send(p, sigev);
419b7555c90SSurya Prakki 	else if (send_signal)
4207c478bd9Sstevel@tonic-gate 		psignal(p, SIGIO);
421b7555c90SSurya Prakki 
42234709573Sraf 	if (pkevp)
42334709573Sraf 		port_send_event(pkevp);
42434709573Sraf 	if (lio_sigev)
42534709573Sraf 		aio_sigev_send(p, lio_sigev);
42634709573Sraf 	if (lio_pkevp)
42734709573Sraf 		port_send_event(lio_pkevp);
4287c478bd9Sstevel@tonic-gate }
4297c478bd9Sstevel@tonic-gate 
4307c478bd9Sstevel@tonic-gate /*
4317c478bd9Sstevel@tonic-gate  * send a queued signal to the specified process when
4327c478bd9Sstevel@tonic-gate  * the event signal is non-NULL. A return value of 1
4337c478bd9Sstevel@tonic-gate  * will indicate that a signal is queued, and 0 means that
4347c478bd9Sstevel@tonic-gate  * no signal was specified, nor sent.
4357c478bd9Sstevel@tonic-gate  */
4367c478bd9Sstevel@tonic-gate static void
4377c478bd9Sstevel@tonic-gate aio_sigev_send(proc_t *p, sigqueue_t *sigev)
4387c478bd9Sstevel@tonic-gate {
4397c478bd9Sstevel@tonic-gate 	ASSERT(sigev != NULL);
4407c478bd9Sstevel@tonic-gate 
4417c478bd9Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
4427c478bd9Sstevel@tonic-gate 	sigaddqa(p, NULL, sigev);
4437c478bd9Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
4447c478bd9Sstevel@tonic-gate }
4457c478bd9Sstevel@tonic-gate 
4467c478bd9Sstevel@tonic-gate /*
4477c478bd9Sstevel@tonic-gate  * special case handling for zero length requests. the aio request
4487c478bd9Sstevel@tonic-gate  * short circuits the normal completion path since all that's required
4497c478bd9Sstevel@tonic-gate  * to complete this request is to copyout a zero to the aio request's
4507c478bd9Sstevel@tonic-gate  * return value.
4517c478bd9Sstevel@tonic-gate  */
4527c478bd9Sstevel@tonic-gate void
4537c478bd9Sstevel@tonic-gate aio_zerolen(aio_req_t *reqp)
4547c478bd9Sstevel@tonic-gate {
4557c478bd9Sstevel@tonic-gate 
4567c478bd9Sstevel@tonic-gate 	struct buf *bp = &reqp->aio_req_buf;
4577c478bd9Sstevel@tonic-gate 
4587c478bd9Sstevel@tonic-gate 	reqp->aio_req_flags |= AIO_ZEROLEN;
4597c478bd9Sstevel@tonic-gate 
4607c478bd9Sstevel@tonic-gate 	bp->b_forw = (struct buf *)reqp;
4617c478bd9Sstevel@tonic-gate 	bp->b_proc = curproc;
4627c478bd9Sstevel@tonic-gate 
4637c478bd9Sstevel@tonic-gate 	bp->b_resid = 0;
4647c478bd9Sstevel@tonic-gate 	bp->b_flags = 0;
4657c478bd9Sstevel@tonic-gate 
4667c478bd9Sstevel@tonic-gate 	aio_done(bp);
4677c478bd9Sstevel@tonic-gate }
4687c478bd9Sstevel@tonic-gate 
4697c478bd9Sstevel@tonic-gate /*
4707c478bd9Sstevel@tonic-gate  * unlock pages previously locked by as_pagelock
4717c478bd9Sstevel@tonic-gate  */
4727c478bd9Sstevel@tonic-gate void
4737c478bd9Sstevel@tonic-gate aphysio_unlock(aio_req_t *reqp)
4747c478bd9Sstevel@tonic-gate {
4757c478bd9Sstevel@tonic-gate 	struct buf *bp;
4767c478bd9Sstevel@tonic-gate 	struct iovec *iov;
4777c478bd9Sstevel@tonic-gate 	int flags;
4787c478bd9Sstevel@tonic-gate 
4797c478bd9Sstevel@tonic-gate 	if (reqp->aio_req_flags & AIO_PHYSIODONE)
4807c478bd9Sstevel@tonic-gate 		return;
4817c478bd9Sstevel@tonic-gate 
4827c478bd9Sstevel@tonic-gate 	reqp->aio_req_flags |= AIO_PHYSIODONE;
4837c478bd9Sstevel@tonic-gate 
4847c478bd9Sstevel@tonic-gate 	if (reqp->aio_req_flags & AIO_ZEROLEN)
4857c478bd9Sstevel@tonic-gate 		return;
4867c478bd9Sstevel@tonic-gate 
4877c478bd9Sstevel@tonic-gate 	bp = &reqp->aio_req_buf;
4887c478bd9Sstevel@tonic-gate 	iov = reqp->aio_req_uio.uio_iov;
4897c478bd9Sstevel@tonic-gate 	flags = (((bp->b_flags & B_READ) == B_READ) ? S_WRITE : S_READ);
4907c478bd9Sstevel@tonic-gate 	if (reqp->aio_req_flags & AIO_PAGELOCKDONE) {
4917c478bd9Sstevel@tonic-gate 		as_pageunlock(bp->b_proc->p_as,
4927c478bd9Sstevel@tonic-gate 		    bp->b_flags & B_SHADOW ? bp->b_shadow : NULL,
4937c478bd9Sstevel@tonic-gate 		    iov->iov_base, iov->iov_len, flags);
4947c478bd9Sstevel@tonic-gate 		reqp->aio_req_flags &= ~AIO_PAGELOCKDONE;
4957c478bd9Sstevel@tonic-gate 	}
4967c478bd9Sstevel@tonic-gate 	bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_SHADOW);
4977c478bd9Sstevel@tonic-gate 	bp->b_flags |= B_DONE;
4987c478bd9Sstevel@tonic-gate }
4997c478bd9Sstevel@tonic-gate 
5007c478bd9Sstevel@tonic-gate /*
50134709573Sraf  * deletes a requests id from the hash table of outstanding io.
5027c478bd9Sstevel@tonic-gate  */
5037c478bd9Sstevel@tonic-gate static void
50434709573Sraf aio_hash_delete(aio_t *aiop, struct aio_req_t *reqp)
5057c478bd9Sstevel@tonic-gate {
5067c478bd9Sstevel@tonic-gate 	long index;
5077c478bd9Sstevel@tonic-gate 	aio_result_t *resultp = reqp->aio_req_resultp;
5087c478bd9Sstevel@tonic-gate 	aio_req_t *current;
5097c478bd9Sstevel@tonic-gate 	aio_req_t **nextp;
5107c478bd9Sstevel@tonic-gate 
5117c478bd9Sstevel@tonic-gate 	index = AIO_HASH(resultp);
5127c478bd9Sstevel@tonic-gate 	nextp = (aiop->aio_hash + index);
5137c478bd9Sstevel@tonic-gate 	while ((current = *nextp) != NULL) {
5147c478bd9Sstevel@tonic-gate 		if (current->aio_req_resultp == resultp) {
5157c478bd9Sstevel@tonic-gate 			*nextp = current->aio_hash_next;
5167c478bd9Sstevel@tonic-gate 			return;
5177c478bd9Sstevel@tonic-gate 		}
5187c478bd9Sstevel@tonic-gate 		nextp = &current->aio_hash_next;
5197c478bd9Sstevel@tonic-gate 	}
5207c478bd9Sstevel@tonic-gate }
5217c478bd9Sstevel@tonic-gate 
5227c478bd9Sstevel@tonic-gate /*
5237c478bd9Sstevel@tonic-gate  * Put a list head struct onto its free list.
5247c478bd9Sstevel@tonic-gate  */
5257c478bd9Sstevel@tonic-gate static void
5267c478bd9Sstevel@tonic-gate aio_lio_free(aio_t *aiop, aio_lio_t *head)
5277c478bd9Sstevel@tonic-gate {
5287c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
5297c478bd9Sstevel@tonic-gate 
5307c478bd9Sstevel@tonic-gate 	if (head->lio_sigqp != NULL)
5317c478bd9Sstevel@tonic-gate 		kmem_free(head->lio_sigqp, sizeof (sigqueue_t));
5327c478bd9Sstevel@tonic-gate 	head->lio_next = aiop->aio_lio_free;
5337c478bd9Sstevel@tonic-gate 	aiop->aio_lio_free = head;
5347c478bd9Sstevel@tonic-gate }
5357c478bd9Sstevel@tonic-gate 
5367c478bd9Sstevel@tonic-gate /*
5377c478bd9Sstevel@tonic-gate  * Put a reqp onto the freelist.
5387c478bd9Sstevel@tonic-gate  */
5397c478bd9Sstevel@tonic-gate void
5407c478bd9Sstevel@tonic-gate aio_req_free(aio_t *aiop, aio_req_t *reqp)
5417c478bd9Sstevel@tonic-gate {
5427c478bd9Sstevel@tonic-gate 	aio_lio_t *liop;
5437c478bd9Sstevel@tonic-gate 
5447c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
5457c478bd9Sstevel@tonic-gate 
5467c478bd9Sstevel@tonic-gate 	if (reqp->aio_req_portkev) {
5477c478bd9Sstevel@tonic-gate 		port_free_event(reqp->aio_req_portkev);
5487c478bd9Sstevel@tonic-gate 		reqp->aio_req_portkev = NULL;
5497c478bd9Sstevel@tonic-gate 	}
5507c478bd9Sstevel@tonic-gate 
5517c478bd9Sstevel@tonic-gate 	if ((liop = reqp->aio_req_lio) != NULL) {
5527c478bd9Sstevel@tonic-gate 		if (--liop->lio_nent == 0)
5537c478bd9Sstevel@tonic-gate 			aio_lio_free(aiop, liop);
5547c478bd9Sstevel@tonic-gate 		reqp->aio_req_lio = NULL;
5557c478bd9Sstevel@tonic-gate 	}
55634709573Sraf 	if (reqp->aio_req_sigqp != NULL) {
5577c478bd9Sstevel@tonic-gate 		kmem_free(reqp->aio_req_sigqp, sizeof (sigqueue_t));
55834709573Sraf 		reqp->aio_req_sigqp = NULL;
55934709573Sraf 	}
5607c478bd9Sstevel@tonic-gate 	reqp->aio_req_next = aiop->aio_free;
56134709573Sraf 	reqp->aio_req_prev = NULL;
5627c478bd9Sstevel@tonic-gate 	aiop->aio_free = reqp;
5637c478bd9Sstevel@tonic-gate 	aiop->aio_outstanding--;
5647c478bd9Sstevel@tonic-gate 	if (aiop->aio_outstanding == 0)
5657c478bd9Sstevel@tonic-gate 		cv_broadcast(&aiop->aio_waitcv);
5667c478bd9Sstevel@tonic-gate 	aio_hash_delete(aiop, reqp);
5677c478bd9Sstevel@tonic-gate }
5687c478bd9Sstevel@tonic-gate 
5697c478bd9Sstevel@tonic-gate /*
5707c478bd9Sstevel@tonic-gate  * Put a reqp onto the freelist.
5717c478bd9Sstevel@tonic-gate  */
5727c478bd9Sstevel@tonic-gate void
5737c478bd9Sstevel@tonic-gate aio_req_free_port(aio_t *aiop, aio_req_t *reqp)
5747c478bd9Sstevel@tonic-gate {
5757c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
5767c478bd9Sstevel@tonic-gate 
5777c478bd9Sstevel@tonic-gate 	reqp->aio_req_next = aiop->aio_free;
57834709573Sraf 	reqp->aio_req_prev = NULL;
5797c478bd9Sstevel@tonic-gate 	aiop->aio_free = reqp;
5807c478bd9Sstevel@tonic-gate 	aiop->aio_outstanding--;
5817c478bd9Sstevel@tonic-gate 	aio_hash_delete(aiop, reqp);
5827c478bd9Sstevel@tonic-gate }
5837c478bd9Sstevel@tonic-gate 
5847c478bd9Sstevel@tonic-gate 
5857c478bd9Sstevel@tonic-gate /*
58634709573Sraf  * Verify the integrity of a queue.
5877c478bd9Sstevel@tonic-gate  */
58834709573Sraf #if defined(DEBUG)
5897c478bd9Sstevel@tonic-gate static void
59034709573Sraf aio_verify_queue(aio_req_t *head,
59134709573Sraf 	aio_req_t *entry_present, aio_req_t *entry_missing)
59234709573Sraf {
59334709573Sraf 	aio_req_t *reqp;
59434709573Sraf 	int found = 0;
59534709573Sraf 	int present = 0;
59634709573Sraf 
59734709573Sraf 	if ((reqp = head) != NULL) {
59834709573Sraf 		do {
59934709573Sraf 			ASSERT(reqp->aio_req_prev->aio_req_next == reqp);
60034709573Sraf 			ASSERT(reqp->aio_req_next->aio_req_prev == reqp);
60134709573Sraf 			if (entry_present == reqp)
60234709573Sraf 				found++;
60334709573Sraf 			if (entry_missing == reqp)
60434709573Sraf 				present++;
60534709573Sraf 		} while ((reqp = reqp->aio_req_next) != head);
60634709573Sraf 	}
60734709573Sraf 	ASSERT(entry_present == NULL || found == 1);
60834709573Sraf 	ASSERT(entry_missing == NULL || present == 0);
60934709573Sraf }
61034709573Sraf #else
61134709573Sraf #define	aio_verify_queue(x, y, z)
61234709573Sraf #endif
61334709573Sraf 
61434709573Sraf /*
61534709573Sraf  * Put a request onto the tail of a queue.
61634709573Sraf  */
61734709573Sraf void
6187c478bd9Sstevel@tonic-gate aio_enq(aio_req_t **qhead, aio_req_t *reqp, int qflg_new)
6197c478bd9Sstevel@tonic-gate {
62034709573Sraf 	aio_req_t *head;
62134709573Sraf 	aio_req_t *prev;
62234709573Sraf 
62334709573Sraf 	aio_verify_queue(*qhead, NULL, reqp);
62434709573Sraf 
62534709573Sraf 	if ((head = *qhead) == NULL) {
6267c478bd9Sstevel@tonic-gate 		reqp->aio_req_next = reqp;
6277c478bd9Sstevel@tonic-gate 		reqp->aio_req_prev = reqp;
62834709573Sraf 		*qhead = reqp;
6297c478bd9Sstevel@tonic-gate 	} else {
63034709573Sraf 		reqp->aio_req_next = head;
63134709573Sraf 		reqp->aio_req_prev = prev = head->aio_req_prev;
63234709573Sraf 		prev->aio_req_next = reqp;
63334709573Sraf 		head->aio_req_prev = reqp;
6347c478bd9Sstevel@tonic-gate 	}
6357c478bd9Sstevel@tonic-gate 	reqp->aio_req_flags |= qflg_new;
6367c478bd9Sstevel@tonic-gate }
6377c478bd9Sstevel@tonic-gate 
6387c478bd9Sstevel@tonic-gate /*
63934709573Sraf  * Remove a request from its queue.
6407c478bd9Sstevel@tonic-gate  */
64134709573Sraf void
64234709573Sraf aio_deq(aio_req_t **qhead, aio_req_t *reqp)
6437c478bd9Sstevel@tonic-gate {
64434709573Sraf 	aio_verify_queue(*qhead, reqp, NULL);
6457c478bd9Sstevel@tonic-gate 
64634709573Sraf 	if (reqp->aio_req_next == reqp) {
64734709573Sraf 		*qhead = NULL;
6487c478bd9Sstevel@tonic-gate 	} else {
6497c478bd9Sstevel@tonic-gate 		reqp->aio_req_prev->aio_req_next = reqp->aio_req_next;
6507c478bd9Sstevel@tonic-gate 		reqp->aio_req_next->aio_req_prev = reqp->aio_req_prev;
65134709573Sraf 		if (*qhead == reqp)
65234709573Sraf 			*qhead = reqp->aio_req_next;
6537c478bd9Sstevel@tonic-gate 	}
6547c478bd9Sstevel@tonic-gate 	reqp->aio_req_next = NULL;
6557c478bd9Sstevel@tonic-gate 	reqp->aio_req_prev = NULL;
6567c478bd9Sstevel@tonic-gate }
6577c478bd9Sstevel@tonic-gate 
6587c478bd9Sstevel@tonic-gate /*
6597c478bd9Sstevel@tonic-gate  * concatenate a specified queue with the cleanupq. the specified
6607c478bd9Sstevel@tonic-gate  * queue is put onto the tail of the cleanupq. all elements on the
6617c478bd9Sstevel@tonic-gate  * specified queue should have their aio_req_flags field cleared.
6627c478bd9Sstevel@tonic-gate  */
6637c478bd9Sstevel@tonic-gate /*ARGSUSED*/
6647c478bd9Sstevel@tonic-gate void
6657c478bd9Sstevel@tonic-gate aio_cleanupq_concat(aio_t *aiop, aio_req_t *q2, int qflg)
6667c478bd9Sstevel@tonic-gate {
6677c478bd9Sstevel@tonic-gate 	aio_req_t *cleanupqhead, *q2tail;
6687c478bd9Sstevel@tonic-gate 	aio_req_t *reqp = q2;
6697c478bd9Sstevel@tonic-gate 
6707c478bd9Sstevel@tonic-gate 	do {
6717c478bd9Sstevel@tonic-gate 		ASSERT(reqp->aio_req_flags & qflg);
6727c478bd9Sstevel@tonic-gate 		reqp->aio_req_flags &= ~qflg;
6737c478bd9Sstevel@tonic-gate 		reqp->aio_req_flags |= AIO_CLEANUPQ;
6747c478bd9Sstevel@tonic-gate 	} while ((reqp = reqp->aio_req_next) != q2);
6757c478bd9Sstevel@tonic-gate 
6767c478bd9Sstevel@tonic-gate 	cleanupqhead = aiop->aio_cleanupq;
6777c478bd9Sstevel@tonic-gate 	if (cleanupqhead == NULL)
6787c478bd9Sstevel@tonic-gate 		aiop->aio_cleanupq = q2;
6797c478bd9Sstevel@tonic-gate 	else {
6807c478bd9Sstevel@tonic-gate 		cleanupqhead->aio_req_prev->aio_req_next = q2;
6817c478bd9Sstevel@tonic-gate 		q2tail = q2->aio_req_prev;
6827c478bd9Sstevel@tonic-gate 		q2tail->aio_req_next = cleanupqhead;
6837c478bd9Sstevel@tonic-gate 		q2->aio_req_prev = cleanupqhead->aio_req_prev;
6847c478bd9Sstevel@tonic-gate 		cleanupqhead->aio_req_prev = q2tail;
6857c478bd9Sstevel@tonic-gate 	}
6867c478bd9Sstevel@tonic-gate }
6877c478bd9Sstevel@tonic-gate 
6887c478bd9Sstevel@tonic-gate /*
6897c478bd9Sstevel@tonic-gate  * cleanup aio requests that are on the per-process poll queue.
6907c478bd9Sstevel@tonic-gate  */
6917c478bd9Sstevel@tonic-gate void
6927c478bd9Sstevel@tonic-gate aio_cleanup(int flag)
6937c478bd9Sstevel@tonic-gate {
6947c478bd9Sstevel@tonic-gate 	aio_t *aiop = curproc->p_aio;
6957c478bd9Sstevel@tonic-gate 	aio_req_t *pollqhead, *cleanupqhead, *notifyqhead;
6967c478bd9Sstevel@tonic-gate 	aio_req_t *cleanupport;
6977c478bd9Sstevel@tonic-gate 	aio_req_t *portq = NULL;
6987c478bd9Sstevel@tonic-gate 	void (*func)();
6997c478bd9Sstevel@tonic-gate 	int signalled = 0;
7007c478bd9Sstevel@tonic-gate 	int qflag = 0;
7017c478bd9Sstevel@tonic-gate 	int exitflg;
7027c478bd9Sstevel@tonic-gate 
7037c478bd9Sstevel@tonic-gate 	ASSERT(aiop != NULL);
7047c478bd9Sstevel@tonic-gate 
7057c478bd9Sstevel@tonic-gate 	if (flag == AIO_CLEANUP_EXIT)
7067c478bd9Sstevel@tonic-gate 		exitflg = AIO_CLEANUP_EXIT;
7077c478bd9Sstevel@tonic-gate 	else
7087c478bd9Sstevel@tonic-gate 		exitflg = 0;
7097c478bd9Sstevel@tonic-gate 
7107c478bd9Sstevel@tonic-gate 	/*
7117c478bd9Sstevel@tonic-gate 	 * We need to get the aio_cleanupq_mutex because we are calling
7127c478bd9Sstevel@tonic-gate 	 * aio_cleanup_cleanupq()
7137c478bd9Sstevel@tonic-gate 	 */
7147c478bd9Sstevel@tonic-gate 	mutex_enter(&aiop->aio_cleanupq_mutex);
7157c478bd9Sstevel@tonic-gate 	/*
7167c478bd9Sstevel@tonic-gate 	 * take all the requests off the cleanupq, the notifyq,
7177c478bd9Sstevel@tonic-gate 	 * and the pollq.
7187c478bd9Sstevel@tonic-gate 	 */
7197c478bd9Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
7207c478bd9Sstevel@tonic-gate 	if ((cleanupqhead = aiop->aio_cleanupq) != NULL) {
7217c478bd9Sstevel@tonic-gate 		aiop->aio_cleanupq = NULL;
7227c478bd9Sstevel@tonic-gate 		qflag++;
7237c478bd9Sstevel@tonic-gate 	}
7247c478bd9Sstevel@tonic-gate 	if ((notifyqhead = aiop->aio_notifyq) != NULL) {
7257c478bd9Sstevel@tonic-gate 		aiop->aio_notifyq = NULL;
7267c478bd9Sstevel@tonic-gate 		qflag++;
7277c478bd9Sstevel@tonic-gate 	}
7287c478bd9Sstevel@tonic-gate 	if ((pollqhead = aiop->aio_pollq) != NULL) {
7297c478bd9Sstevel@tonic-gate 		aiop->aio_pollq = NULL;
7307c478bd9Sstevel@tonic-gate 		qflag++;
7317c478bd9Sstevel@tonic-gate 	}
7327c478bd9Sstevel@tonic-gate 	if (flag) {
7337c478bd9Sstevel@tonic-gate 		if ((portq = aiop->aio_portq) != NULL)
7347c478bd9Sstevel@tonic-gate 			qflag++;
7357c478bd9Sstevel@tonic-gate 
7367c478bd9Sstevel@tonic-gate 		if ((cleanupport = aiop->aio_portcleanupq) != NULL) {
7377c478bd9Sstevel@tonic-gate 			aiop->aio_portcleanupq = NULL;
7387c478bd9Sstevel@tonic-gate 			qflag++;
7397c478bd9Sstevel@tonic-gate 		}
7407c478bd9Sstevel@tonic-gate 	}
7417c478bd9Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
7427c478bd9Sstevel@tonic-gate 
7437c478bd9Sstevel@tonic-gate 	/*
7447c478bd9Sstevel@tonic-gate 	 * return immediately if cleanupq, pollq, and
7457c478bd9Sstevel@tonic-gate 	 * notifyq are all empty. someone else must have
7467c478bd9Sstevel@tonic-gate 	 * emptied them.
7477c478bd9Sstevel@tonic-gate 	 */
7487c478bd9Sstevel@tonic-gate 	if (!qflag) {
7497c478bd9Sstevel@tonic-gate 		mutex_exit(&aiop->aio_cleanupq_mutex);
7507c478bd9Sstevel@tonic-gate 		return;
7517c478bd9Sstevel@tonic-gate 	}
7527c478bd9Sstevel@tonic-gate 
7537c478bd9Sstevel@tonic-gate 	/*
7547c478bd9Sstevel@tonic-gate 	 * do cleanup for the various queues.
7557c478bd9Sstevel@tonic-gate 	 */
7567c478bd9Sstevel@tonic-gate 	if (cleanupqhead)
75716660111SSurya Prakki 		signalled = aio_cleanup_cleanupq(aiop, cleanupqhead, exitflg);
7587c478bd9Sstevel@tonic-gate 	mutex_exit(&aiop->aio_cleanupq_mutex);
7597c478bd9Sstevel@tonic-gate 	if (notifyqhead)
7607c478bd9Sstevel@tonic-gate 		signalled = aio_cleanup_notifyq(aiop, notifyqhead, exitflg);
7617c478bd9Sstevel@tonic-gate 	if (pollqhead)
7627c478bd9Sstevel@tonic-gate 		aio_cleanup_pollq(aiop, pollqhead, exitflg);
7637c478bd9Sstevel@tonic-gate 	if (flag && (cleanupport || portq))
7647c478bd9Sstevel@tonic-gate 		aio_cleanup_portq(aiop, cleanupport, exitflg);
7657c478bd9Sstevel@tonic-gate 
7667c478bd9Sstevel@tonic-gate 	if (exitflg)
7677c478bd9Sstevel@tonic-gate 		return;
7687c478bd9Sstevel@tonic-gate 
7697c478bd9Sstevel@tonic-gate 	/*
7707c478bd9Sstevel@tonic-gate 	 * If we have an active aio_cleanup_thread it's possible for
7717c478bd9Sstevel@tonic-gate 	 * this routine to push something on to the done queue after
7727c478bd9Sstevel@tonic-gate 	 * an aiowait/aiosuspend thread has already decided to block.
7737c478bd9Sstevel@tonic-gate 	 * This being the case, we need a cv_broadcast here to wake
7747c478bd9Sstevel@tonic-gate 	 * these threads up. It is simpler and cleaner to do this
7757c478bd9Sstevel@tonic-gate 	 * broadcast here than in the individual cleanup routines.
7767c478bd9Sstevel@tonic-gate 	 */
7777c478bd9Sstevel@tonic-gate 
7787c478bd9Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
779*d2749ac6SRoger A. Faulkner 	/*
780*d2749ac6SRoger A. Faulkner 	 * If there has never been an old solaris aio request
781*d2749ac6SRoger A. Faulkner 	 * issued by this process, then do not send a SIGIO signal.
782*d2749ac6SRoger A. Faulkner 	 */
783*d2749ac6SRoger A. Faulkner 	if (!(aiop->aio_flags & AIO_SOLARIS_REQ))
784*d2749ac6SRoger A. Faulkner 		signalled = 1;
7857c478bd9Sstevel@tonic-gate 	cv_broadcast(&aiop->aio_waitcv);
7867c478bd9Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
7877c478bd9Sstevel@tonic-gate 
7887c478bd9Sstevel@tonic-gate 	/*
7897c478bd9Sstevel@tonic-gate 	 * Only if the process wasn't already signalled,
7907c478bd9Sstevel@tonic-gate 	 * determine if a SIGIO signal should be delievered.
7917c478bd9Sstevel@tonic-gate 	 */
7927c478bd9Sstevel@tonic-gate 	if (!signalled &&
79334709573Sraf 	    (func = PTOU(curproc)->u_signal[SIGIO - 1]) != SIG_DFL &&
7947c478bd9Sstevel@tonic-gate 	    func != SIG_IGN)
7957c478bd9Sstevel@tonic-gate 		psignal(curproc, SIGIO);
7967c478bd9Sstevel@tonic-gate }
7977c478bd9Sstevel@tonic-gate 
7987c478bd9Sstevel@tonic-gate 
7997c478bd9Sstevel@tonic-gate /*
8007c478bd9Sstevel@tonic-gate  * Do cleanup for every element of the port cleanup queue.
8017c478bd9Sstevel@tonic-gate  */
8027c478bd9Sstevel@tonic-gate static void
8037c478bd9Sstevel@tonic-gate aio_cleanup_portq(aio_t *aiop, aio_req_t *cleanupq, int exitflag)
8047c478bd9Sstevel@tonic-gate {
8057c478bd9Sstevel@tonic-gate 	aio_req_t	*reqp;
8067c478bd9Sstevel@tonic-gate 	aio_req_t	*next;
8077c478bd9Sstevel@tonic-gate 	aio_req_t	*headp;
80834709573Sraf 	aio_lio_t	*liop;
8097c478bd9Sstevel@tonic-gate 
8107c478bd9Sstevel@tonic-gate 	/* first check the portq */
8117c478bd9Sstevel@tonic-gate 	if (exitflag || ((aiop->aio_flags & AIO_CLEANUP_PORT) == 0)) {
8127c478bd9Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
8137c478bd9Sstevel@tonic-gate 		if (aiop->aio_flags & AIO_CLEANUP)
8147c478bd9Sstevel@tonic-gate 			aiop->aio_flags |= AIO_CLEANUP_PORT;
8157c478bd9Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
8167c478bd9Sstevel@tonic-gate 
81734709573Sraf 		/*
81834709573Sraf 		 * It is not allowed to hold locks during aphysio_unlock().
81934709573Sraf 		 * The aio_done() interrupt function will try to acquire
82034709573Sraf 		 * aio_mutex and aio_portq_mutex.  Therefore we disconnect
82134709573Sraf 		 * the portq list from the aiop for the duration of the
82234709573Sraf 		 * aphysio_unlock() loop below.
82334709573Sraf 		 */
8247c478bd9Sstevel@tonic-gate 		mutex_enter(&aiop->aio_portq_mutex);
8257c478bd9Sstevel@tonic-gate 		headp = aiop->aio_portq;
8267c478bd9Sstevel@tonic-gate 		aiop->aio_portq = NULL;
8277c478bd9Sstevel@tonic-gate 		mutex_exit(&aiop->aio_portq_mutex);
82834709573Sraf 		if ((reqp = headp) != NULL) {
82934709573Sraf 			do {
8307c478bd9Sstevel@tonic-gate 				next = reqp->aio_req_next;
8317c478bd9Sstevel@tonic-gate 				aphysio_unlock(reqp);
8327c478bd9Sstevel@tonic-gate 				if (exitflag) {
8337c478bd9Sstevel@tonic-gate 					mutex_enter(&aiop->aio_mutex);
8347c478bd9Sstevel@tonic-gate 					aio_req_free(aiop, reqp);
8357c478bd9Sstevel@tonic-gate 					mutex_exit(&aiop->aio_mutex);
8367c478bd9Sstevel@tonic-gate 				}
83734709573Sraf 			} while ((reqp = next) != headp);
8387c478bd9Sstevel@tonic-gate 		}
8397c478bd9Sstevel@tonic-gate 
8407c478bd9Sstevel@tonic-gate 		if (headp != NULL && exitflag == 0) {
84134709573Sraf 			/* move unlocked requests back to the port queue */
84234709573Sraf 			aio_req_t *newq;
84334709573Sraf 
8447c478bd9Sstevel@tonic-gate 			mutex_enter(&aiop->aio_portq_mutex);
84534709573Sraf 			if ((newq = aiop->aio_portq) != NULL) {
84634709573Sraf 				aio_req_t *headprev = headp->aio_req_prev;
84734709573Sraf 				aio_req_t *newqprev = newq->aio_req_prev;
84834709573Sraf 
84934709573Sraf 				headp->aio_req_prev = newqprev;
85034709573Sraf 				newq->aio_req_prev = headprev;
85134709573Sraf 				headprev->aio_req_next = newq;
85234709573Sraf 				newqprev->aio_req_next = headp;
8537c478bd9Sstevel@tonic-gate 			}
8547c478bd9Sstevel@tonic-gate 			aiop->aio_portq = headp;
8557c478bd9Sstevel@tonic-gate 			cv_broadcast(&aiop->aio_portcv);
8567c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_portq_mutex);
8577c478bd9Sstevel@tonic-gate 		}
8587c478bd9Sstevel@tonic-gate 	}
8597c478bd9Sstevel@tonic-gate 
8607c478bd9Sstevel@tonic-gate 	/* now check the port cleanup queue */
86134709573Sraf 	if ((reqp = cleanupq) == NULL)
86234709573Sraf 		return;
86334709573Sraf 	do {
8647c478bd9Sstevel@tonic-gate 		next = reqp->aio_req_next;
8657c478bd9Sstevel@tonic-gate 		aphysio_unlock(reqp);
8667c478bd9Sstevel@tonic-gate 		if (exitflag) {
8677c478bd9Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
8687c478bd9Sstevel@tonic-gate 			aio_req_free(aiop, reqp);
8697c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
8707c478bd9Sstevel@tonic-gate 		} else {
8717c478bd9Sstevel@tonic-gate 			mutex_enter(&aiop->aio_portq_mutex);
87234709573Sraf 			aio_enq(&aiop->aio_portq, reqp, 0);
8737c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_portq_mutex);
87434709573Sraf 			port_send_event(reqp->aio_req_portkev);
87534709573Sraf 			if ((liop = reqp->aio_req_lio) != NULL) {
87634709573Sraf 				int send_event = 0;
87734709573Sraf 
87834709573Sraf 				mutex_enter(&aiop->aio_mutex);
87934709573Sraf 				ASSERT(liop->lio_refcnt > 0);
88034709573Sraf 				if (--liop->lio_refcnt == 0) {
88134709573Sraf 					if (liop->lio_port >= 0 &&
88234709573Sraf 					    liop->lio_portkev) {
88334709573Sraf 						liop->lio_port = -1;
88434709573Sraf 						send_event = 1;
8857c478bd9Sstevel@tonic-gate 					}
8867c478bd9Sstevel@tonic-gate 				}
88734709573Sraf 				mutex_exit(&aiop->aio_mutex);
88834709573Sraf 				if (send_event)
88934709573Sraf 					port_send_event(liop->lio_portkev);
89034709573Sraf 			}
89134709573Sraf 		}
89234709573Sraf 	} while ((reqp = next) != cleanupq);
8937c478bd9Sstevel@tonic-gate }
8947c478bd9Sstevel@tonic-gate 
8957c478bd9Sstevel@tonic-gate /*
8967c478bd9Sstevel@tonic-gate  * Do cleanup for every element of the cleanupq.
8977c478bd9Sstevel@tonic-gate  */
89816660111SSurya Prakki static int
8997c478bd9Sstevel@tonic-gate aio_cleanup_cleanupq(aio_t *aiop, aio_req_t *qhead, int exitflg)
9007c478bd9Sstevel@tonic-gate {
9017c478bd9Sstevel@tonic-gate 	aio_req_t *reqp, *next;
90216660111SSurya Prakki 	int signalled = 0;
90334709573Sraf 
9047c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_cleanupq_mutex));
9057c478bd9Sstevel@tonic-gate 
9067c478bd9Sstevel@tonic-gate 	/*
9077c478bd9Sstevel@tonic-gate 	 * Since aio_req_done() or aio_req_find() use the HASH list to find
9087c478bd9Sstevel@tonic-gate 	 * the required requests, they could potentially take away elements
9097c478bd9Sstevel@tonic-gate 	 * if they are already done (AIO_DONEQ is set).
9107c478bd9Sstevel@tonic-gate 	 * The aio_cleanupq_mutex protects the queue for the duration of the
9117c478bd9Sstevel@tonic-gate 	 * loop from aio_req_done() and aio_req_find().
9127c478bd9Sstevel@tonic-gate 	 */
91334709573Sraf 	if ((reqp = qhead) == NULL)
91416660111SSurya Prakki 		return (0);
91534709573Sraf 	do {
9167c478bd9Sstevel@tonic-gate 		ASSERT(reqp->aio_req_flags & AIO_CLEANUPQ);
91734709573Sraf 		ASSERT(reqp->aio_req_portkev == NULL);
9187c478bd9Sstevel@tonic-gate 		next = reqp->aio_req_next;
9197c478bd9Sstevel@tonic-gate 		aphysio_unlock(reqp);
9207c478bd9Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
92134709573Sraf 		if (exitflg)
9227c478bd9Sstevel@tonic-gate 			aio_req_free(aiop, reqp);
92334709573Sraf 		else
92434709573Sraf 			aio_enq(&aiop->aio_doneq, reqp, AIO_DONEQ);
925b7555c90SSurya Prakki 		if (!exitflg) {
926b7555c90SSurya Prakki 			if (reqp->aio_req_flags & AIO_SIGNALLED)
92716660111SSurya Prakki 				signalled++;
928b7555c90SSurya Prakki 			else
929b7555c90SSurya Prakki 				reqp->aio_req_flags |= AIO_SIGNALLED;
930b7555c90SSurya Prakki 		}
9317c478bd9Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
93234709573Sraf 	} while ((reqp = next) != qhead);
93316660111SSurya Prakki 	return (signalled);
9347c478bd9Sstevel@tonic-gate }
9357c478bd9Sstevel@tonic-gate 
9367c478bd9Sstevel@tonic-gate /*
9377c478bd9Sstevel@tonic-gate  * do cleanup for every element of the notify queue.
9387c478bd9Sstevel@tonic-gate  */
9397c478bd9Sstevel@tonic-gate static int
9407c478bd9Sstevel@tonic-gate aio_cleanup_notifyq(aio_t *aiop, aio_req_t *qhead, int exitflg)
9417c478bd9Sstevel@tonic-gate {
9427c478bd9Sstevel@tonic-gate 	aio_req_t *reqp, *next;
9437c478bd9Sstevel@tonic-gate 	aio_lio_t *liohead;
9447c478bd9Sstevel@tonic-gate 	sigqueue_t *sigev, *lio_sigev = NULL;
9457c478bd9Sstevel@tonic-gate 	int signalled = 0;
9467c478bd9Sstevel@tonic-gate 
94734709573Sraf 	if ((reqp = qhead) == NULL)
94834709573Sraf 		return (0);
94934709573Sraf 	do {
9507c478bd9Sstevel@tonic-gate 		ASSERT(reqp->aio_req_flags & AIO_NOTIFYQ);
9517c478bd9Sstevel@tonic-gate 		next = reqp->aio_req_next;
9527c478bd9Sstevel@tonic-gate 		aphysio_unlock(reqp);
9537c478bd9Sstevel@tonic-gate 		if (exitflg) {
9547c478bd9Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
9557c478bd9Sstevel@tonic-gate 			aio_req_free(aiop, reqp);
9567c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
95734709573Sraf 		} else {
9587c478bd9Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
95934709573Sraf 			aio_enq(&aiop->aio_doneq, reqp, AIO_DONEQ);
9607c478bd9Sstevel@tonic-gate 			sigev = reqp->aio_req_sigqp;
9617c478bd9Sstevel@tonic-gate 			reqp->aio_req_sigqp = NULL;
9627c478bd9Sstevel@tonic-gate 			if ((liohead = reqp->aio_req_lio) != NULL) {
9637c478bd9Sstevel@tonic-gate 				ASSERT(liohead->lio_refcnt > 0);
9647c478bd9Sstevel@tonic-gate 				if (--liohead->lio_refcnt == 0) {
9657c478bd9Sstevel@tonic-gate 					cv_signal(&liohead->lio_notify);
9667c478bd9Sstevel@tonic-gate 					lio_sigev = liohead->lio_sigqp;
9677c478bd9Sstevel@tonic-gate 					liohead->lio_sigqp = NULL;
9687c478bd9Sstevel@tonic-gate 				}
9697c478bd9Sstevel@tonic-gate 			}
9707c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
9717c478bd9Sstevel@tonic-gate 			if (sigev) {
9727c478bd9Sstevel@tonic-gate 				signalled++;
97334709573Sraf 				aio_sigev_send(reqp->aio_req_buf.b_proc,
97434709573Sraf 				    sigev);
9757c478bd9Sstevel@tonic-gate 			}
9767c478bd9Sstevel@tonic-gate 			if (lio_sigev) {
9777c478bd9Sstevel@tonic-gate 				signalled++;
97834709573Sraf 				aio_sigev_send(reqp->aio_req_buf.b_proc,
97934709573Sraf 				    lio_sigev);
9807c478bd9Sstevel@tonic-gate 			}
9817c478bd9Sstevel@tonic-gate 		}
98234709573Sraf 	} while ((reqp = next) != qhead);
98334709573Sraf 
9847c478bd9Sstevel@tonic-gate 	return (signalled);
9857c478bd9Sstevel@tonic-gate }
9867c478bd9Sstevel@tonic-gate 
9877c478bd9Sstevel@tonic-gate /*
9887c478bd9Sstevel@tonic-gate  * Do cleanup for every element of the poll queue.
9897c478bd9Sstevel@tonic-gate  */
9907c478bd9Sstevel@tonic-gate static void
9917c478bd9Sstevel@tonic-gate aio_cleanup_pollq(aio_t *aiop, aio_req_t *qhead, int exitflg)
9927c478bd9Sstevel@tonic-gate {
9937c478bd9Sstevel@tonic-gate 	aio_req_t *reqp, *next;
9947c478bd9Sstevel@tonic-gate 
9957c478bd9Sstevel@tonic-gate 	/*
9967c478bd9Sstevel@tonic-gate 	 * As no other threads should be accessing the queue at this point,
9977c478bd9Sstevel@tonic-gate 	 * it isn't necessary to hold aio_mutex while we traverse its elements.
9987c478bd9Sstevel@tonic-gate 	 */
99934709573Sraf 	if ((reqp = qhead) == NULL)
100034709573Sraf 		return;
100134709573Sraf 	do {
10027c478bd9Sstevel@tonic-gate 		ASSERT(reqp->aio_req_flags & AIO_POLLQ);
10037c478bd9Sstevel@tonic-gate 		next = reqp->aio_req_next;
10047c478bd9Sstevel@tonic-gate 		aphysio_unlock(reqp);
10057c478bd9Sstevel@tonic-gate 		if (exitflg) {
10067c478bd9Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
10077c478bd9Sstevel@tonic-gate 			aio_req_free(aiop, reqp);
10087c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
100934709573Sraf 		} else {
10107c478bd9Sstevel@tonic-gate 			aio_copyout_result(reqp);
10117c478bd9Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
101234709573Sraf 			aio_enq(&aiop->aio_doneq, reqp, AIO_DONEQ);
10137c478bd9Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
10147c478bd9Sstevel@tonic-gate 		}
101534709573Sraf 	} while ((reqp = next) != qhead);
10167c478bd9Sstevel@tonic-gate }
10177c478bd9Sstevel@tonic-gate 
10187c478bd9Sstevel@tonic-gate /*
10197c478bd9Sstevel@tonic-gate  * called by exit(). waits for all outstanding kaio to finish
10207c478bd9Sstevel@tonic-gate  * before the kaio resources are freed.
10217c478bd9Sstevel@tonic-gate  */
10227c478bd9Sstevel@tonic-gate void
10237c478bd9Sstevel@tonic-gate aio_cleanup_exit(void)
10247c478bd9Sstevel@tonic-gate {
10257c478bd9Sstevel@tonic-gate 	proc_t *p = curproc;
10267c478bd9Sstevel@tonic-gate 	aio_t *aiop = p->p_aio;
10277c478bd9Sstevel@tonic-gate 	aio_req_t *reqp, *next, *head;
10287c478bd9Sstevel@tonic-gate 	aio_lio_t *nxtlio, *liop;
10297c478bd9Sstevel@tonic-gate 
10307c478bd9Sstevel@tonic-gate 	/*
10317c478bd9Sstevel@tonic-gate 	 * wait for all outstanding kaio to complete. process
10327c478bd9Sstevel@tonic-gate 	 * is now single-threaded; no other kaio requests can
10337c478bd9Sstevel@tonic-gate 	 * happen once aio_pending is zero.
10347c478bd9Sstevel@tonic-gate 	 */
10357c478bd9Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
10367c478bd9Sstevel@tonic-gate 	aiop->aio_flags |= AIO_CLEANUP;
10377c478bd9Sstevel@tonic-gate 	while ((aiop->aio_pending != 0) || (aiop->aio_flags & AIO_DONE_ACTIVE))
10387c478bd9Sstevel@tonic-gate 		cv_wait(&aiop->aio_cleanupcv, &aiop->aio_mutex);
10397c478bd9Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
10407c478bd9Sstevel@tonic-gate 
10417c478bd9Sstevel@tonic-gate 	/* cleanup the cleanup-thread queues. */
10427c478bd9Sstevel@tonic-gate 	aio_cleanup(AIO_CLEANUP_EXIT);
10437c478bd9Sstevel@tonic-gate 
10447c478bd9Sstevel@tonic-gate 	/*
10457c478bd9Sstevel@tonic-gate 	 * Although this process is now single-threaded, we
10467c478bd9Sstevel@tonic-gate 	 * still need to protect ourselves against a race with
10477c478bd9Sstevel@tonic-gate 	 * aio_cleanup_dr_delete_memory().
10487c478bd9Sstevel@tonic-gate 	 */
10497c478bd9Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
10507c478bd9Sstevel@tonic-gate 
10517c478bd9Sstevel@tonic-gate 	/*
10527c478bd9Sstevel@tonic-gate 	 * free up the done queue's resources.
10537c478bd9Sstevel@tonic-gate 	 */
10547c478bd9Sstevel@tonic-gate 	if ((head = aiop->aio_doneq) != NULL) {
105534709573Sraf 		aiop->aio_doneq = NULL;
105634709573Sraf 		reqp = head;
105734709573Sraf 		do {
10587c478bd9Sstevel@tonic-gate 			next = reqp->aio_req_next;
10597c478bd9Sstevel@tonic-gate 			aphysio_unlock(reqp);
10607c478bd9Sstevel@tonic-gate 			kmem_free(reqp, sizeof (struct aio_req_t));
106134709573Sraf 		} while ((reqp = next) != head);
10627c478bd9Sstevel@tonic-gate 	}
10637c478bd9Sstevel@tonic-gate 	/*
10647c478bd9Sstevel@tonic-gate 	 * release aio request freelist.
10657c478bd9Sstevel@tonic-gate 	 */
10667c478bd9Sstevel@tonic-gate 	for (reqp = aiop->aio_free; reqp != NULL; reqp = next) {
10677c478bd9Sstevel@tonic-gate 		next = reqp->aio_req_next;
10687c478bd9Sstevel@tonic-gate 		kmem_free(reqp, sizeof (struct aio_req_t));
10697c478bd9Sstevel@tonic-gate 	}
10707c478bd9Sstevel@tonic-gate 
10717c478bd9Sstevel@tonic-gate 	/*
10727c478bd9Sstevel@tonic-gate 	 * release io list head freelist.
10737c478bd9Sstevel@tonic-gate 	 */
10747c478bd9Sstevel@tonic-gate 	for (liop = aiop->aio_lio_free; liop != NULL; liop = nxtlio) {
10757c478bd9Sstevel@tonic-gate 		nxtlio = liop->lio_next;
10767c478bd9Sstevel@tonic-gate 		kmem_free(liop, sizeof (aio_lio_t));
10777c478bd9Sstevel@tonic-gate 	}
10787c478bd9Sstevel@tonic-gate 
10797c478bd9Sstevel@tonic-gate 	if (aiop->aio_iocb)
10807c478bd9Sstevel@tonic-gate 		kmem_free(aiop->aio_iocb, aiop->aio_iocbsz);
10817c478bd9Sstevel@tonic-gate 
10827c478bd9Sstevel@tonic-gate 	mutex_destroy(&aiop->aio_mutex);
10837c478bd9Sstevel@tonic-gate 	mutex_destroy(&aiop->aio_portq_mutex);
10847c478bd9Sstevel@tonic-gate 	mutex_destroy(&aiop->aio_cleanupq_mutex);
10857c478bd9Sstevel@tonic-gate 	p->p_aio = NULL;
10867c478bd9Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
10877c478bd9Sstevel@tonic-gate 	kmem_free(aiop, sizeof (struct aio));
10887c478bd9Sstevel@tonic-gate }
10897c478bd9Sstevel@tonic-gate 
10907c478bd9Sstevel@tonic-gate /*
10917c478bd9Sstevel@tonic-gate  * copy out aio request's result to a user-level result_t buffer.
10927c478bd9Sstevel@tonic-gate  */
10937c478bd9Sstevel@tonic-gate void
10947c478bd9Sstevel@tonic-gate aio_copyout_result(aio_req_t *reqp)
10957c478bd9Sstevel@tonic-gate {
10967c478bd9Sstevel@tonic-gate 	struct buf	*bp;
10977c478bd9Sstevel@tonic-gate 	struct iovec	*iov;
10987c478bd9Sstevel@tonic-gate 	void		*resultp;
10997c478bd9Sstevel@tonic-gate 	int		error;
11007c478bd9Sstevel@tonic-gate 	size_t		retval;
11017c478bd9Sstevel@tonic-gate 
11027c478bd9Sstevel@tonic-gate 	if (reqp->aio_req_flags & AIO_COPYOUTDONE)
11037c478bd9Sstevel@tonic-gate 		return;
11047c478bd9Sstevel@tonic-gate 
11057c478bd9Sstevel@tonic-gate 	reqp->aio_req_flags |= AIO_COPYOUTDONE;
11067c478bd9Sstevel@tonic-gate 
11077c478bd9Sstevel@tonic-gate 	iov = reqp->aio_req_uio.uio_iov;
11087c478bd9Sstevel@tonic-gate 	bp = &reqp->aio_req_buf;
11097c478bd9Sstevel@tonic-gate 	/* "resultp" points to user-level result_t buffer */
11107c478bd9Sstevel@tonic-gate 	resultp = (void *)reqp->aio_req_resultp;
11117c478bd9Sstevel@tonic-gate 	if (bp->b_flags & B_ERROR) {
11127c478bd9Sstevel@tonic-gate 		if (bp->b_error)
11137c478bd9Sstevel@tonic-gate 			error = bp->b_error;
11147c478bd9Sstevel@tonic-gate 		else
11157c478bd9Sstevel@tonic-gate 			error = EIO;
11167c478bd9Sstevel@tonic-gate 		retval = (size_t)-1;
11177c478bd9Sstevel@tonic-gate 	} else {
11187c478bd9Sstevel@tonic-gate 		error = 0;
11197c478bd9Sstevel@tonic-gate 		retval = iov->iov_len - bp->b_resid;
11207c478bd9Sstevel@tonic-gate 	}
11217c478bd9Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
11227c478bd9Sstevel@tonic-gate 	if (get_udatamodel() == DATAMODEL_NATIVE) {
11237c478bd9Sstevel@tonic-gate 		(void) sulword(&((aio_result_t *)resultp)->aio_return, retval);
11247c478bd9Sstevel@tonic-gate 		(void) suword32(&((aio_result_t *)resultp)->aio_errno, error);
11257c478bd9Sstevel@tonic-gate 	} else {
11267c478bd9Sstevel@tonic-gate 		(void) suword32(&((aio_result32_t *)resultp)->aio_return,
11277c478bd9Sstevel@tonic-gate 		    (int)retval);
11287c478bd9Sstevel@tonic-gate 		(void) suword32(&((aio_result32_t *)resultp)->aio_errno, error);
11297c478bd9Sstevel@tonic-gate 	}
11307c478bd9Sstevel@tonic-gate #else
11317c478bd9Sstevel@tonic-gate 	(void) suword32(&((aio_result_t *)resultp)->aio_return, retval);
11327c478bd9Sstevel@tonic-gate 	(void) suword32(&((aio_result_t *)resultp)->aio_errno, error);
11337c478bd9Sstevel@tonic-gate #endif
11347c478bd9Sstevel@tonic-gate }
11357c478bd9Sstevel@tonic-gate 
11367c478bd9Sstevel@tonic-gate 
11377c478bd9Sstevel@tonic-gate void
11387c478bd9Sstevel@tonic-gate aio_copyout_result_port(struct iovec *iov, struct buf *bp, void *resultp)
11397c478bd9Sstevel@tonic-gate {
11407c478bd9Sstevel@tonic-gate 	int errno;
11417c478bd9Sstevel@tonic-gate 	size_t retval;
11427c478bd9Sstevel@tonic-gate 
11437c478bd9Sstevel@tonic-gate 	if (bp->b_flags & B_ERROR) {
11447c478bd9Sstevel@tonic-gate 		if (bp->b_error)
11457c478bd9Sstevel@tonic-gate 			errno = bp->b_error;
11467c478bd9Sstevel@tonic-gate 		else
11477c478bd9Sstevel@tonic-gate 			errno = EIO;
11487c478bd9Sstevel@tonic-gate 		retval = (size_t)-1;
11497c478bd9Sstevel@tonic-gate 	} else {
11507c478bd9Sstevel@tonic-gate 		errno = 0;
11517c478bd9Sstevel@tonic-gate 		retval = iov->iov_len - bp->b_resid;
11527c478bd9Sstevel@tonic-gate 	}
11537c478bd9Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
11547c478bd9Sstevel@tonic-gate 	if (get_udatamodel() == DATAMODEL_NATIVE) {
11557c478bd9Sstevel@tonic-gate 		(void) sulword(&((aio_result_t *)resultp)->aio_return, retval);
11567c478bd9Sstevel@tonic-gate 		(void) suword32(&((aio_result_t *)resultp)->aio_errno, errno);
11577c478bd9Sstevel@tonic-gate 	} else {
11587c478bd9Sstevel@tonic-gate 		(void) suword32(&((aio_result32_t *)resultp)->aio_return,
11597c478bd9Sstevel@tonic-gate 		    (int)retval);
11607c478bd9Sstevel@tonic-gate 		(void) suword32(&((aio_result32_t *)resultp)->aio_errno, errno);
11617c478bd9Sstevel@tonic-gate 	}
11627c478bd9Sstevel@tonic-gate #else
11637c478bd9Sstevel@tonic-gate 	(void) suword32(&((aio_result_t *)resultp)->aio_return, retval);
11647c478bd9Sstevel@tonic-gate 	(void) suword32(&((aio_result_t *)resultp)->aio_errno, errno);
11657c478bd9Sstevel@tonic-gate #endif
11667c478bd9Sstevel@tonic-gate }
11677c478bd9Sstevel@tonic-gate 
11687c478bd9Sstevel@tonic-gate /*
11697c478bd9Sstevel@tonic-gate  * This function is used to remove a request from the done queue.
11707c478bd9Sstevel@tonic-gate  */
11717c478bd9Sstevel@tonic-gate 
11727c478bd9Sstevel@tonic-gate void
11737c478bd9Sstevel@tonic-gate aio_req_remove_portq(aio_t *aiop, aio_req_t *reqp)
11747c478bd9Sstevel@tonic-gate {
11757c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_portq_mutex));
11767c478bd9Sstevel@tonic-gate 	while (aiop->aio_portq == NULL) {
11777c478bd9Sstevel@tonic-gate 		/*
11787c478bd9Sstevel@tonic-gate 		 * aio_portq is set to NULL when aio_cleanup_portq()
11797c478bd9Sstevel@tonic-gate 		 * is working with the event queue.
11807c478bd9Sstevel@tonic-gate 		 * The aio_cleanup_thread() uses aio_cleanup_portq()
11817c478bd9Sstevel@tonic-gate 		 * to unlock all AIO buffers with completed transactions.
11827c478bd9Sstevel@tonic-gate 		 * Wait here until aio_cleanup_portq() restores the
11837c478bd9Sstevel@tonic-gate 		 * list of completed transactions in aio_portq.
11847c478bd9Sstevel@tonic-gate 		 */
11857c478bd9Sstevel@tonic-gate 		cv_wait(&aiop->aio_portcv, &aiop->aio_portq_mutex);
11867c478bd9Sstevel@tonic-gate 	}
118734709573Sraf 	aio_deq(&aiop->aio_portq, reqp);
11887c478bd9Sstevel@tonic-gate }
11897c478bd9Sstevel@tonic-gate 
11907c478bd9Sstevel@tonic-gate /* ARGSUSED */
11917c478bd9Sstevel@tonic-gate void
11927c478bd9Sstevel@tonic-gate aio_close_port(void *arg, int port, pid_t pid, int lastclose)
11937c478bd9Sstevel@tonic-gate {
11947c478bd9Sstevel@tonic-gate 	aio_t		*aiop;
11957c478bd9Sstevel@tonic-gate 	aio_req_t 	*reqp;
11967c478bd9Sstevel@tonic-gate 	aio_req_t 	*next;
11977c478bd9Sstevel@tonic-gate 	aio_req_t	*headp;
11987c478bd9Sstevel@tonic-gate 	int		counter;
11997c478bd9Sstevel@tonic-gate 
12007c478bd9Sstevel@tonic-gate 	if (arg == NULL)
12017c478bd9Sstevel@tonic-gate 		aiop = curproc->p_aio;
12027c478bd9Sstevel@tonic-gate 	else
12037c478bd9Sstevel@tonic-gate 		aiop = (aio_t *)arg;
12047c478bd9Sstevel@tonic-gate 
12057c478bd9Sstevel@tonic-gate 	/*
12067c478bd9Sstevel@tonic-gate 	 * The PORT_SOURCE_AIO source is always associated with every new
12077c478bd9Sstevel@tonic-gate 	 * created port by default.
12087c478bd9Sstevel@tonic-gate 	 * If no asynchronous I/O transactions were associated with the port
12097c478bd9Sstevel@tonic-gate 	 * then the aiop pointer will still be set to NULL.
12107c478bd9Sstevel@tonic-gate 	 */
12117c478bd9Sstevel@tonic-gate 	if (aiop == NULL)
12127c478bd9Sstevel@tonic-gate 		return;
12137c478bd9Sstevel@tonic-gate 
12147c478bd9Sstevel@tonic-gate 	/*
12157c478bd9Sstevel@tonic-gate 	 * Within a process event ports can be used to collect events other
12167c478bd9Sstevel@tonic-gate 	 * than PORT_SOURCE_AIO events. At the same time the process can submit
12177c478bd9Sstevel@tonic-gate 	 * asynchronous I/Os transactions which are not associated with the
12187c478bd9Sstevel@tonic-gate 	 * current port.
12197c478bd9Sstevel@tonic-gate 	 * The current process oriented model of AIO uses a sigle queue for
12207c478bd9Sstevel@tonic-gate 	 * pending events. On close the pending queue (queue of asynchronous
12217c478bd9Sstevel@tonic-gate 	 * I/O transactions using event port notification) must be scanned
12227c478bd9Sstevel@tonic-gate 	 * to detect and handle pending I/Os using the current port.
12237c478bd9Sstevel@tonic-gate 	 */
12247c478bd9Sstevel@tonic-gate 	mutex_enter(&aiop->aio_portq_mutex);
12257c478bd9Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
122634709573Sraf 	counter = 0;
122734709573Sraf 	if ((headp = aiop->aio_portpending) != NULL) {
122834709573Sraf 		reqp = headp;
122934709573Sraf 		do {
123034709573Sraf 			if (reqp->aio_req_portkev &&
123134709573Sraf 			    reqp->aio_req_port == port) {
12327c478bd9Sstevel@tonic-gate 				reqp->aio_req_flags |= AIO_CLOSE_PORT;
12337c478bd9Sstevel@tonic-gate 				counter++;
12347c478bd9Sstevel@tonic-gate 			}
123534709573Sraf 		} while ((reqp = reqp->aio_req_next) != headp);
12367c478bd9Sstevel@tonic-gate 	}
12377c478bd9Sstevel@tonic-gate 	if (counter == 0) {
12387c478bd9Sstevel@tonic-gate 		/* no AIOs pending */
12397c478bd9Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
12407c478bd9Sstevel@tonic-gate 		mutex_exit(&aiop->aio_portq_mutex);
12417c478bd9Sstevel@tonic-gate 		return;
12427c478bd9Sstevel@tonic-gate 	}
12437c478bd9Sstevel@tonic-gate 	aiop->aio_portpendcnt += counter;
1244f7ccf9b3Spraks 	mutex_exit(&aiop->aio_mutex);
12457c478bd9Sstevel@tonic-gate 	while (aiop->aio_portpendcnt)
1246f7ccf9b3Spraks 		cv_wait(&aiop->aio_portcv, &aiop->aio_portq_mutex);
12477c478bd9Sstevel@tonic-gate 
12487c478bd9Sstevel@tonic-gate 	/*
12497c478bd9Sstevel@tonic-gate 	 * all pending AIOs are completed.
12507c478bd9Sstevel@tonic-gate 	 * check port doneq
12517c478bd9Sstevel@tonic-gate 	 */
12527c478bd9Sstevel@tonic-gate 	headp = NULL;
125334709573Sraf 	if ((reqp = aiop->aio_portq) != NULL) {
125434709573Sraf 		do {
12557c478bd9Sstevel@tonic-gate 			next = reqp->aio_req_next;
12567c478bd9Sstevel@tonic-gate 			if (reqp->aio_req_port == port) {
125734709573Sraf 				/* dequeue request and discard event */
12587c478bd9Sstevel@tonic-gate 				aio_req_remove_portq(aiop, reqp);
12597c478bd9Sstevel@tonic-gate 				port_free_event(reqp->aio_req_portkev);
12607c478bd9Sstevel@tonic-gate 				/* put request in temporary queue */
12617c478bd9Sstevel@tonic-gate 				reqp->aio_req_next = headp;
12627c478bd9Sstevel@tonic-gate 				headp = reqp;
12637c478bd9Sstevel@tonic-gate 			}
126434709573Sraf 		} while ((reqp = next) != aiop->aio_portq);
12657c478bd9Sstevel@tonic-gate 	}
12667c478bd9Sstevel@tonic-gate 	mutex_exit(&aiop->aio_portq_mutex);
12677c478bd9Sstevel@tonic-gate 
12687c478bd9Sstevel@tonic-gate 	/* headp points to the list of requests to be discarded */
12697c478bd9Sstevel@tonic-gate 	for (reqp = headp; reqp != NULL; reqp = next) {
12707c478bd9Sstevel@tonic-gate 		next = reqp->aio_req_next;
12717c478bd9Sstevel@tonic-gate 		aphysio_unlock(reqp);
12727c478bd9Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
12737c478bd9Sstevel@tonic-gate 		aio_req_free_port(aiop, reqp);
12747c478bd9Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
12757c478bd9Sstevel@tonic-gate 	}
12767c478bd9Sstevel@tonic-gate 
12777c478bd9Sstevel@tonic-gate 	if (aiop->aio_flags & AIO_CLEANUP)
12787c478bd9Sstevel@tonic-gate 		cv_broadcast(&aiop->aio_waitcv);
12797c478bd9Sstevel@tonic-gate }
12807c478bd9Sstevel@tonic-gate 
12817c478bd9Sstevel@tonic-gate /*
12827c478bd9Sstevel@tonic-gate  * aio_cleanup_dr_delete_memory is used by dr's delete_memory_thread
1283b0b27ce6Spraks  * to kick start the aio_cleanup_thread for the give process to do the
1284b0b27ce6Spraks  * necessary cleanup.
1285b0b27ce6Spraks  * This is needed so that delete_memory_thread can obtain writer locks
1286b0b27ce6Spraks  * on pages that need to be relocated during a dr memory delete operation,
1287b0b27ce6Spraks  * otherwise a deadly embrace may occur.
12887c478bd9Sstevel@tonic-gate  */
12897c478bd9Sstevel@tonic-gate int
12907c478bd9Sstevel@tonic-gate aio_cleanup_dr_delete_memory(proc_t *procp)
12917c478bd9Sstevel@tonic-gate {
12927c478bd9Sstevel@tonic-gate 	struct aio *aiop = procp->p_aio;
1293b0b27ce6Spraks 	struct as *as = procp->p_as;
1294b0b27ce6Spraks 	int ret = 0;
12957c478bd9Sstevel@tonic-gate 
12967c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&procp->p_lock));
12977c478bd9Sstevel@tonic-gate 
1298b0b27ce6Spraks 	mutex_enter(&as->a_contents);
1299b0b27ce6Spraks 
1300b0b27ce6Spraks 	if (aiop != NULL) {
1301b0b27ce6Spraks 		aiop->aio_rqclnup = 1;
1302b0b27ce6Spraks 		cv_broadcast(&as->a_cv);
1303b0b27ce6Spraks 		ret = 1;
1304b0b27ce6Spraks 	}
1305b0b27ce6Spraks 	mutex_exit(&as->a_contents);
1306b0b27ce6Spraks 	return (ret);
13077c478bd9Sstevel@tonic-gate }
1308