xref: /illumos-gate/usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.c (revision 2d6eb4a5e0a47d30189497241345dc5466bb68ab)
1507c3241Smlf /*
2507c3241Smlf  * CDDL HEADER START
3507c3241Smlf  *
4507c3241Smlf  * The contents of this file are subject to the terms of the
5507c3241Smlf  * Common Development and Distribution License (the "License").
6507c3241Smlf  * You may not use this file except in compliance with the License.
7507c3241Smlf  *
8507c3241Smlf  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9507c3241Smlf  * or http://www.opensolaris.org/os/licensing.
10507c3241Smlf  * See the License for the specific language governing permissions
11507c3241Smlf  * and limitations under the License.
12507c3241Smlf  *
13507c3241Smlf  * When distributing Covered Code, include this CDDL HEADER in each
14507c3241Smlf  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15507c3241Smlf  * If applicable, add the following below this CDDL HEADER, with the
16507c3241Smlf  * fields enclosed by brackets "[]" replaced with your own identifying
17507c3241Smlf  * information: Portions Copyright [yyyy] [name of copyright owner]
18507c3241Smlf  *
19507c3241Smlf  * CDDL HEADER END
20507c3241Smlf  */
21507c3241Smlf 
22507c3241Smlf /*
23*903a11ebSrh87107  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24507c3241Smlf  * Use is subject to license terms.
25507c3241Smlf  */
26507c3241Smlf 
27507c3241Smlf #include <sys/types.h>
28507c3241Smlf #include <sys/kmem.h>
29507c3241Smlf #include <sys/note.h>
30507c3241Smlf 
31507c3241Smlf #include "ghd.h"
32507c3241Smlf 
33507c3241Smlf 
34507c3241Smlf 
35507c3241Smlf /*ARGSUSED*/
36507c3241Smlf gtgt_t *
ghd_target_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,ccc_t * cccp,size_t tgt_private_size,void * hba_private,ushort_t target,uchar_t lun)37507c3241Smlf ghd_target_init(dev_info_t	*hba_dip,
38507c3241Smlf 		dev_info_t	*tgt_dip,
39507c3241Smlf 		ccc_t		*cccp,
40507c3241Smlf 		size_t		 tgt_private_size,
41507c3241Smlf 		void		*hba_private,
42507c3241Smlf 		ushort_t	 target,
43507c3241Smlf 		uchar_t		 lun)
44507c3241Smlf {
45507c3241Smlf 	_NOTE(ARGUNUSED(hba_dip))
46507c3241Smlf 	gtgt_t	*gtgtp;
47507c3241Smlf 	size_t	 size = sizeof (*gtgtp) + tgt_private_size;
48507c3241Smlf 	gdev_t	*gdevp;
49507c3241Smlf 	ulong_t	 maxactive;
50507c3241Smlf 
51507c3241Smlf 	gtgtp = kmem_zalloc(size, KM_SLEEP);
52507c3241Smlf 
53507c3241Smlf 	/*
54507c3241Smlf 	 * initialize the per instance structure
55507c3241Smlf 	 */
56507c3241Smlf 
57507c3241Smlf 	gtgtp->gt_tgt_private = (void *)(gtgtp + 1);
58507c3241Smlf 	gtgtp->gt_size = size;
59507c3241Smlf 	gtgtp->gt_hba_private = hba_private;
60507c3241Smlf 	gtgtp->gt_target = target;
61507c3241Smlf 	gtgtp->gt_lun = lun;
62507c3241Smlf 	gtgtp->gt_ccc = cccp;
63507c3241Smlf 
64507c3241Smlf 	/*
65507c3241Smlf 	 * set the queue's maxactive to 1 if
66507c3241Smlf 	 * property not specified on target or hba devinfo node
67507c3241Smlf 	 */
68507c3241Smlf 	maxactive = ddi_getprop(DDI_DEV_T_ANY, tgt_dip, 0, "ghd-maxactive", 1);
69507c3241Smlf 	gtgtp->gt_maxactive = maxactive;
70507c3241Smlf 
71507c3241Smlf 	/* initialize the linked list pointers */
72507c3241Smlf 	GTGT_INIT(gtgtp);
73507c3241Smlf 
74507c3241Smlf 	/*
75507c3241Smlf 	 * grab both mutexes so the queue structures
76507c3241Smlf 	 * stay stable while adding this instance to the linked lists
77507c3241Smlf 	 */
78507c3241Smlf 	mutex_enter(&cccp->ccc_hba_mutex);
79507c3241Smlf 	mutex_enter(&cccp->ccc_waitq_mutex);
80507c3241Smlf 
81507c3241Smlf 	/*
82507c3241Smlf 	 * Search the HBA's linked list of device structures.
83507c3241Smlf 	 *
84507c3241Smlf 	 * If this device is already attached then link this instance
85507c3241Smlf 	 * to the existing per-device-structure on the ccc_devs list.
86507c3241Smlf 	 *
87507c3241Smlf 	 */
88507c3241Smlf 	gdevp = CCCP2GDEVP(cccp);
89507c3241Smlf 	while (gdevp != NULL) {
90507c3241Smlf 		if (gdevp->gd_target == target && gdevp->gd_lun == lun) {
91507c3241Smlf 			GDBG_WAITQ(("ghd_target_init(%d,%d) found gdevp 0x%p"
92*903a11ebSrh87107 			    " gtgtp 0x%p max %lu\n", target, lun,
93*903a11ebSrh87107 			    (void *)gdevp, (void *)gtgtp, maxactive));
94507c3241Smlf 
95507c3241Smlf 			goto foundit;
96507c3241Smlf 		}
97507c3241Smlf 		gdevp = GDEV_NEXTP(gdevp);
98507c3241Smlf 	}
99507c3241Smlf 
100507c3241Smlf 	/*
101507c3241Smlf 	 * Not found. This is the first instance for this device.
102507c3241Smlf 	 */
103507c3241Smlf 
104507c3241Smlf 
105507c3241Smlf 	/* allocate the per-device-structure */
106507c3241Smlf 
107507c3241Smlf 	gdevp = kmem_zalloc(sizeof (*gdevp), KM_SLEEP);
108507c3241Smlf 	gdevp->gd_target = target;
109507c3241Smlf 	gdevp->gd_lun = lun;
110507c3241Smlf 
111507c3241Smlf 	/*
112507c3241Smlf 	 * link this second level queue to the HBA's first
113507c3241Smlf 	 * level queue
114507c3241Smlf 	 */
115507c3241Smlf 	GDEV_QATTACH(gdevp, cccp, maxactive);
116507c3241Smlf 
117507c3241Smlf 	GDBG_WAITQ(("ghd_target_init(%d,%d) new gdevp 0x%p gtgtp 0x%p"
118*903a11ebSrh87107 	    " max %lu\n", target, lun, (void *)gdevp, (void *)gtgtp,
119*903a11ebSrh87107 	    maxactive));
120507c3241Smlf 
121507c3241Smlf foundit:
122507c3241Smlf 
123507c3241Smlf 	/* save the ptr to the per device structure */
124507c3241Smlf 	gtgtp->gt_gdevp = gdevp;
125507c3241Smlf 
126507c3241Smlf 	/* Add the per instance structure to the per device list  */
127507c3241Smlf 	GTGT_ATTACH(gtgtp, gdevp);
128507c3241Smlf 
129507c3241Smlf 	ghd_waitq_process_and_mutex_exit(cccp);
130507c3241Smlf 
131507c3241Smlf 	return (gtgtp);
132507c3241Smlf }
133507c3241Smlf 
134507c3241Smlf /*ARGSUSED*/
135507c3241Smlf void
ghd_target_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,ccc_t * cccp,gtgt_t * gtgtp)136507c3241Smlf ghd_target_free(dev_info_t	*hba_dip,
137507c3241Smlf 		dev_info_t	*tgt_dip,
138507c3241Smlf 		ccc_t		*cccp,
139507c3241Smlf 		gtgt_t		*gtgtp)
140507c3241Smlf {
141507c3241Smlf 	_NOTE(ARGUNUSED(hba_dip,tgt_dip))
142507c3241Smlf 
143507c3241Smlf 	gdev_t	*gdevp = gtgtp->gt_gdevp;
144507c3241Smlf 
145507c3241Smlf 	GDBG_WAITQ(("ghd_target_free(%d,%d) gdevp-0x%p gtgtp 0x%p\n",
146*903a11ebSrh87107 	    gtgtp->gt_target, gtgtp->gt_lun, (void *)gdevp, (void *)gtgtp));
147507c3241Smlf 
148507c3241Smlf 	/*
149507c3241Smlf 	 * grab both mutexes so the queue structures
150507c3241Smlf 	 * stay stable while deleting this instance
151507c3241Smlf 	 */
152507c3241Smlf 	mutex_enter(&cccp->ccc_hba_mutex);
153507c3241Smlf 	mutex_enter(&cccp->ccc_waitq_mutex);
154507c3241Smlf 
155507c3241Smlf 	ASSERT(gdevp->gd_ninstances > 0);
156507c3241Smlf 
157507c3241Smlf 	/*
158507c3241Smlf 	 * remove this per-instance structure from the device list and
159507c3241Smlf 	 * free the memory
160507c3241Smlf 	 */
161507c3241Smlf 	GTGT_DEATTACH(gtgtp, gdevp);
162507c3241Smlf 	kmem_free((caddr_t)gtgtp, gtgtp->gt_size);
163507c3241Smlf 
164507c3241Smlf 	if (gdevp->gd_ninstances == 1) {
165*903a11ebSrh87107 		GDBG_WAITQ(("ghd_target_free: N=1 gdevp 0x%p\n",
166*903a11ebSrh87107 		    (void *)gdevp));
167507c3241Smlf 		/*
168507c3241Smlf 		 * If there's now just one instance left attached to this
169507c3241Smlf 		 * device then reset the queue's max active value
170507c3241Smlf 		 * from that instance's saved value.
171507c3241Smlf 		 */
172507c3241Smlf 		gtgtp = GDEVP2GTGTP(gdevp);
173507c3241Smlf 		GDEV_MAXACTIVE(gdevp) = gtgtp->gt_maxactive;
174507c3241Smlf 
175507c3241Smlf 	} else if (gdevp->gd_ninstances == 0) {
176507c3241Smlf 		/* else no instances left */
177*903a11ebSrh87107 		GDBG_WAITQ(("ghd_target_free: N=0 gdevp 0x%p\n",
178*903a11ebSrh87107 		    (void *)gdevp));
179507c3241Smlf 
180507c3241Smlf 		/* detach this per-dev-structure from the HBA's dev list */
181507c3241Smlf 		GDEV_QDETACH(gdevp, cccp);
182507c3241Smlf 		kmem_free(gdevp, sizeof (*gdevp));
183507c3241Smlf 
184507c3241Smlf 	}
185507c3241Smlf #if defined(GHD_DEBUG) || defined(__lint)
186507c3241Smlf 	else {
187507c3241Smlf 		/* leave maxactive set to 1 */
188*903a11ebSrh87107 		GDBG_WAITQ(("ghd_target_free: N>1 gdevp 0x%p\n",
189*903a11ebSrh87107 		    (void *)gdevp));
190507c3241Smlf 	}
191507c3241Smlf #endif
192507c3241Smlf 
193507c3241Smlf 	ghd_waitq_process_and_mutex_exit(cccp);
194507c3241Smlf }
195507c3241Smlf 
196507c3241Smlf void
ghd_waitq_shuffle_up(ccc_t * cccp,gdev_t * gdevp)197507c3241Smlf ghd_waitq_shuffle_up(ccc_t *cccp, gdev_t *gdevp)
198507c3241Smlf {
199507c3241Smlf 	gcmd_t	*gcmdp;
200507c3241Smlf 
201507c3241Smlf 	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
202507c3241Smlf 
203507c3241Smlf 	GDBG_WAITQ(("ghd_waitq_shuffle_up: cccp 0x%p gdevp 0x%p N %ld "
204*903a11ebSrh87107 	    "max %ld\n", (void *)cccp, (void *)gdevp, GDEV_NACTIVE(gdevp),
205507c3241Smlf 	    GDEV_MAXACTIVE(gdevp)));
206507c3241Smlf 	for (;;) {
207507c3241Smlf 		/*
208507c3241Smlf 		 * Now check the device wait queue throttle to see if I can
209507c3241Smlf 		 * shuffle up a request to the HBA wait queue.
210507c3241Smlf 		 */
211507c3241Smlf 		if (GDEV_NACTIVE(gdevp) >= GDEV_MAXACTIVE(gdevp)) {
212507c3241Smlf 			GDBG_WAITQ(("ghd_waitq_shuffle_up: N>MAX gdevp 0x%p\n",
213*903a11ebSrh87107 			    (void *)gdevp));
214507c3241Smlf 			return;
215507c3241Smlf 		}
216507c3241Smlf 
217507c3241Smlf 		/*
218507c3241Smlf 		 * single thread requests while multiple instances
219507c3241Smlf 		 * because the different target drives might have
220507c3241Smlf 		 * conflicting maxactive throttles.
221507c3241Smlf 		 */
222507c3241Smlf 		if (gdevp->gd_ninstances > 1 && GDEV_NACTIVE(gdevp) > 0) {
223507c3241Smlf 			GDBG_WAITQ(("ghd_waitq_shuffle_up: multi gdevp 0x%p\n",
224*903a11ebSrh87107 			    (void *)gdevp));
225507c3241Smlf 			return;
226507c3241Smlf 		}
227507c3241Smlf 
228507c3241Smlf 		/*
229507c3241Smlf 		 * promote the topmost request from the device queue to
230507c3241Smlf 		 * the HBA queue.
231507c3241Smlf 		 */
232507c3241Smlf 		if ((gcmdp = L2_remove_head(&GDEV_QHEAD(gdevp))) == NULL) {
233507c3241Smlf 			/* the device is empty so we're done */
234507c3241Smlf 			GDBG_WAITQ(("ghd_waitq_shuffle_up: MT gdevp 0x%p\n",
235*903a11ebSrh87107 			    (void *)gdevp));
236507c3241Smlf 			return;
237507c3241Smlf 		}
238507c3241Smlf 		L2_add(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
239507c3241Smlf 		GDEV_NACTIVE(gdevp)++;
240507c3241Smlf 		gcmdp->cmd_waitq_level++;
241507c3241Smlf 		GDBG_WAITQ(("ghd_waitq_shuffle_up: gdevp 0x%p gcmdp 0x%p\n",
242*903a11ebSrh87107 		    (void *)gdevp, (void *)gcmdp));
243507c3241Smlf 	}
244507c3241Smlf }
245507c3241Smlf 
246507c3241Smlf 
247507c3241Smlf void
ghd_waitq_delete(ccc_t * cccp,gcmd_t * gcmdp)248507c3241Smlf ghd_waitq_delete(ccc_t *cccp, gcmd_t *gcmdp)
249507c3241Smlf {
250507c3241Smlf 	gtgt_t	*gtgtp = GCMDP2GTGTP(gcmdp);
251507c3241Smlf 	gdev_t	*gdevp = gtgtp->gt_gdevp;
252507c3241Smlf #if defined(GHD_DEBUG) || defined(__lint)
253507c3241Smlf 	Q_t	*qp = &gdevp->gd_waitq;
254507c3241Smlf #endif
255507c3241Smlf 
256507c3241Smlf 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
257507c3241Smlf 	mutex_enter(&cccp->ccc_waitq_mutex);
258507c3241Smlf 
259507c3241Smlf 	/*
260507c3241Smlf 	 * Adjust all queue counters. If this request is being aborted
261507c3241Smlf 	 * it might only have made it to the target queue. Otherwise,
262507c3241Smlf 	 * both the target and hba queue have to be adjusted when a
263507c3241Smlf 	 * request is completed normally. The cmd_waitq_level value
264507c3241Smlf 	 * indicates which queue counters need to be adjusted. It's
265507c3241Smlf 	 * incremented as the request progresses up the queues.
266507c3241Smlf 	 */
267507c3241Smlf 	switch (gcmdp->cmd_waitq_level) {
268507c3241Smlf 	case 0:
269507c3241Smlf 		break;
270507c3241Smlf 	case 1:
271507c3241Smlf 		/*
272507c3241Smlf 		 * If this is an early-timeout, or early-abort, the request
273507c3241Smlf 		 * is still linked onto a waitq. Remove it now. If it's
274507c3241Smlf 		 * an active request and no longer on the waitq then calling
275507c3241Smlf 		 * L2_delete a second time does no harm.
276507c3241Smlf 		 */
277507c3241Smlf 		L2_delete(&gcmdp->cmd_q);
278507c3241Smlf 		break;
279507c3241Smlf 
280507c3241Smlf 	case 2:
281507c3241Smlf 		L2_delete(&gcmdp->cmd_q);
282507c3241Smlf #if defined(GHD_DEBUG) || defined(__lint)
283507c3241Smlf 		if (GDEV_NACTIVE(gdevp) == 0)
284507c3241Smlf 			debug_enter("\n\nGHD WAITQ DELETE\n\n");
285507c3241Smlf #endif
286507c3241Smlf 		GDEV_NACTIVE(gdevp)--;
287507c3241Smlf 		break;
288507c3241Smlf 
289507c3241Smlf 	case 3:
290507c3241Smlf 		/* it's an active or completed command */
291507c3241Smlf #if defined(GHD_DEBUG) || defined(__lint)
292507c3241Smlf 		if (GDEV_NACTIVE(gdevp) == 0 || GHBA_NACTIVE(cccp) == 0)
293507c3241Smlf 			debug_enter("\n\nGHD WAITQ DELETE\n\n");
294507c3241Smlf #endif
295507c3241Smlf 		GDEV_NACTIVE(gdevp)--;
296507c3241Smlf 		GHBA_NACTIVE(cccp)--;
297507c3241Smlf 		break;
298507c3241Smlf 
299507c3241Smlf 	default:
300507c3241Smlf 		/* this shouldn't happen */
301507c3241Smlf #if defined(GHD_DEBUG) || defined(__lint)
302507c3241Smlf 		debug_enter("\n\nGHD WAITQ LEVEL > 3\n\n");
303507c3241Smlf #endif
304507c3241Smlf 		break;
305507c3241Smlf 	}
306507c3241Smlf 
307507c3241Smlf 	GDBG_WAITQ(("ghd_waitq_delete: gcmdp 0x%p qp 0x%p level %ld\n",
308*903a11ebSrh87107 	    (void *)gcmdp, (void *)qp, gcmdp->cmd_waitq_level));
309507c3241Smlf 
310507c3241Smlf 
311507c3241Smlf 	/*
312507c3241Smlf 	 * There's probably now more room in the HBA queue. Move
313507c3241Smlf 	 * up as many requests as possible.
314507c3241Smlf 	 */
315507c3241Smlf 	ghd_waitq_shuffle_up(cccp, gdevp);
316507c3241Smlf 
317507c3241Smlf 	mutex_exit(&cccp->ccc_waitq_mutex);
318507c3241Smlf }
319507c3241Smlf 
320507c3241Smlf 
321507c3241Smlf int
ghd_waitq_process_and_mutex_hold(ccc_t * cccp)322507c3241Smlf ghd_waitq_process_and_mutex_hold(ccc_t *cccp)
323507c3241Smlf {
324507c3241Smlf 	gcmd_t	*gcmdp;
325507c3241Smlf 	int	 rc = FALSE;
326507c3241Smlf 
327507c3241Smlf 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
328507c3241Smlf 	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
329507c3241Smlf 
330507c3241Smlf 	for (;;) {
331507c3241Smlf 		if (L2_EMPTY(&GHBA_QHEAD(cccp))) {
332507c3241Smlf 			/* return if the list is empty */
333507c3241Smlf 			GDBG_WAITQ(("ghd_waitq_proc: MT cccp 0x%p qp 0x%p\n",
334*903a11ebSrh87107 			    (void *)cccp, (void *)&cccp->ccc_waitq));
335507c3241Smlf 			break;
336507c3241Smlf 		}
337507c3241Smlf 		if (GHBA_NACTIVE(cccp) >= GHBA_MAXACTIVE(cccp)) {
338507c3241Smlf 			/* return if the HBA is too active */
339507c3241Smlf 			GDBG_WAITQ(("ghd_waitq_proc: N>M cccp 0x%p qp 0x%p"
340*903a11ebSrh87107 			    " N %ld max %ld\n", (void *)cccp,
341*903a11ebSrh87107 			    (void *)&cccp->ccc_waitq,
342507c3241Smlf 			    GHBA_NACTIVE(cccp),
343507c3241Smlf 			    GHBA_MAXACTIVE(cccp)));
344507c3241Smlf 			break;
345507c3241Smlf 		}
346507c3241Smlf 
347507c3241Smlf 		/*
348507c3241Smlf 		 * bail out if the wait queue has been
349507c3241Smlf 		 * "held" by the HBA driver
350507c3241Smlf 		 */
351507c3241Smlf 		if (cccp->ccc_waitq_held) {
352507c3241Smlf 			GDBG_WAITQ(("ghd_waitq_proc: held"));
353507c3241Smlf 			return (rc);
354507c3241Smlf 		}
355507c3241Smlf 
356507c3241Smlf 		if (cccp->ccc_waitq_frozen) {
357507c3241Smlf 
358507c3241Smlf 			clock_t lbolt, delay_in_hz, time_to_wait;
359507c3241Smlf 
360507c3241Smlf 			delay_in_hz =
361507c3241Smlf 			    drv_usectohz(cccp->ccc_waitq_freezedelay * 1000);
362507c3241Smlf 
363507c3241Smlf 			lbolt = ddi_get_lbolt();
364507c3241Smlf 			time_to_wait = delay_in_hz -
365507c3241Smlf 			    (lbolt - cccp->ccc_waitq_freezetime);
366507c3241Smlf 
367507c3241Smlf 			if (time_to_wait > 0) {
368507c3241Smlf 				/*
369507c3241Smlf 				 * stay frozen; we'll be called again
370507c3241Smlf 				 * by ghd_timeout_softintr()
371507c3241Smlf 				 */
372507c3241Smlf 				GDBG_WAITQ(("ghd_waitq_proc: frozen"));
373507c3241Smlf 				return (rc);
374507c3241Smlf 			} else {
375507c3241Smlf 				/* unfreeze and continue */
376507c3241Smlf 				GDBG_WAITQ(("ghd_waitq_proc: unfreezing"));
377507c3241Smlf 				cccp->ccc_waitq_freezetime = 0;
378507c3241Smlf 				cccp->ccc_waitq_freezedelay = 0;
379507c3241Smlf 				cccp->ccc_waitq_frozen = 0;
380507c3241Smlf 			}
381507c3241Smlf 		}
382507c3241Smlf 
383507c3241Smlf 		gcmdp = (gcmd_t *)L2_remove_head(&GHBA_QHEAD(cccp));
384507c3241Smlf 		GHBA_NACTIVE(cccp)++;
385507c3241Smlf 		gcmdp->cmd_waitq_level++;
386507c3241Smlf 		mutex_exit(&cccp->ccc_waitq_mutex);
387507c3241Smlf 
388507c3241Smlf 		/*
389507c3241Smlf 		 * Start up the next I/O request
390507c3241Smlf 		 */
391507c3241Smlf 		ASSERT(gcmdp != NULL);
392507c3241Smlf 		gcmdp->cmd_state = GCMD_STATE_ACTIVE;
393507c3241Smlf 		if (!(*cccp->ccc_hba_start)(cccp->ccc_hba_handle, gcmdp)) {
394507c3241Smlf 			/* if the HBA rejected the request, requeue it */
395507c3241Smlf 			gcmdp->cmd_state = GCMD_STATE_WAITQ;
396507c3241Smlf 			mutex_enter(&cccp->ccc_waitq_mutex);
397507c3241Smlf 			GHBA_NACTIVE(cccp)--;
398507c3241Smlf 			gcmdp->cmd_waitq_level--;
399507c3241Smlf 			L2_add_head(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
400507c3241Smlf 			GDBG_WAITQ(("ghd_waitq_proc: busy cccp 0x%p gcmdp 0x%p"
401*903a11ebSrh87107 			    " handle 0x%p\n", (void *)cccp, (void *)gcmdp,
402507c3241Smlf 			    cccp->ccc_hba_handle));
403507c3241Smlf 			break;
404507c3241Smlf 		}
405507c3241Smlf 		rc = TRUE;
406507c3241Smlf 		mutex_enter(&cccp->ccc_waitq_mutex);
407507c3241Smlf 		GDBG_WAITQ(("ghd_waitq_proc: ++ cccp 0x%p gcmdp 0x%p N %ld\n",
408*903a11ebSrh87107 		    (void *)cccp, (void *)gcmdp, GHBA_NACTIVE(cccp)));
409507c3241Smlf 	}
410507c3241Smlf 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
411507c3241Smlf 	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
412507c3241Smlf 	return (rc);
413507c3241Smlf }
414507c3241Smlf 
415507c3241Smlf void
ghd_waitq_process_and_mutex_exit(ccc_t * cccp)416507c3241Smlf ghd_waitq_process_and_mutex_exit(ccc_t *cccp)
417507c3241Smlf {
418507c3241Smlf 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
419507c3241Smlf 	ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
420507c3241Smlf 
421*903a11ebSrh87107 	GDBG_WAITQ(("ghd_waitq_process_and_mutex_exit: cccp 0x%p\n",
422*903a11ebSrh87107 	    (void *)cccp));
423507c3241Smlf 
424507c3241Smlf 	(void) ghd_waitq_process_and_mutex_hold(cccp);
425507c3241Smlf 
426507c3241Smlf 	/*
427507c3241Smlf 	 * Release the mutexes in the opposite order that they
428507c3241Smlf 	 * were acquired to prevent requests queued by
429507c3241Smlf 	 * ghd_transport() from getting hung up in the wait queue.
430507c3241Smlf 	 */
431507c3241Smlf 	mutex_exit(&cccp->ccc_hba_mutex);
432507c3241Smlf 	mutex_exit(&cccp->ccc_waitq_mutex);
433507c3241Smlf }
434