xref: /illumos-gate/usr/src/uts/intel/io/dktp/hba/ghd/ghd.c (revision bea83d026ee1bd1b2a2419e1d0232f107a5d7d9b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/kmem.h>
31 #include <sys/debug.h>
32 #include <sys/scsi/scsi.h>
33 
34 #include "ghd.h"
35 
36 /* ghd_poll() function codes: */
37 typedef enum {
38 	GHD_POLL_REQUEST,	/* wait for a specific request */
39 	GHD_POLL_DEVICE,	/* wait for a specific device to idle */
40 	GHD_POLL_ALL		/* wait for the whole bus to idle */
41 } gpoll_t;
42 
43 /*
44  * Local functions:
45  */
46 static	gcmd_t	*ghd_doneq_get(ccc_t *cccp);
47 static	void	 ghd_doneq_pollmode_enter(ccc_t *cccp);
48 static	void	 ghd_doneq_pollmode_exit(ccc_t *cccp);
49 static	uint_t	 ghd_doneq_process(caddr_t arg);
50 static	void	 ghd_do_reset_notify_callbacks(ccc_t *cccp);
51 
52 static	int	 ghd_poll(ccc_t *cccp, gpoll_t polltype, ulong_t polltime,
53 			gcmd_t *poll_gcmdp, gtgt_t *gtgtp, void *intr_status);
54 
55 
56 /*
57  * Local configuration variables
58  */
59 
60 ulong_t	ghd_tran_abort_timeout = 5;
61 ulong_t	ghd_tran_abort_lun_timeout = 5;
62 ulong_t	ghd_tran_reset_target_timeout = 5;
63 ulong_t	ghd_tran_reset_bus_timeout = 5;
64 
65 static int
66 ghd_doneq_init(ccc_t *cccp)
67 {
68 	ddi_iblock_cookie_t iblock;
69 
70 	L2_INIT(&cccp->ccc_doneq);
71 	cccp->ccc_hba_pollmode = TRUE;
72 
73 	if (ddi_add_softintr(cccp->ccc_hba_dip, DDI_SOFTINT_LOW,
74 	    &cccp->ccc_doneq_softid, &iblock, NULL,
75 	    ghd_doneq_process, (caddr_t)cccp) != DDI_SUCCESS) {
76 		GDBG_ERROR(("ghd_doneq_init: add softintr failed cccp 0x%p\n",
77 		    (void *)cccp));
78 		return (FALSE);
79 	}
80 
81 	mutex_init(&cccp->ccc_doneq_mutex, NULL, MUTEX_DRIVER, iblock);
82 	ghd_doneq_pollmode_exit(cccp);
83 	return (TRUE);
84 }
85 
86 /*
87  * ghd_complete():
88  *
89  *	The HBA driver calls this entry point when it's completely
90  *	done processing a request.
91  *
92  *	See the GHD_COMPLETE_INLINE() macro in ghd.h for the actual code.
93  */
94 
95 void
96 ghd_complete(ccc_t *cccp, gcmd_t *gcmdp)
97 {
98 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
99 	GHD_COMPLETE_INLINE(cccp, gcmdp);
100 }
101 
102 
103 /*
104  * ghd_doneq_put_head():
105  *
106  *	Mark the request done and prepend it to the doneq.
107  *	See the GHD_DONEQ_PUT_HEAD_INLINE() macros in ghd.h for
108  *	the actual code.
109  */
110 void
111 ghd_doneq_put_head(ccc_t *cccp, gcmd_t *gcmdp)
112 {
113 	GHD_DONEQ_PUT_HEAD_INLINE(cccp, gcmdp)
114 }
115 
116 /*
117  * ghd_doneq_put_tail():
118  *
119  *	Mark the request done and append it to the doneq.
120  *	See the GHD_DONEQ_PUT_TAIL_INLINE() macros in ghd.h for
121  *	the actual code.
122  */
123 void
124 ghd_doneq_put_tail(ccc_t *cccp, gcmd_t *gcmdp)
125 {
126 	GHD_DONEQ_PUT_TAIL_INLINE(cccp, gcmdp)
127 }
128 
129 static gcmd_t	*
130 ghd_doneq_get(ccc_t *cccp)
131 {
132 	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
133 	gcmd_t	 *gcmdp;
134 
135 	mutex_enter(doneq_mutexp);
136 	if ((gcmdp = L2_next(&cccp->ccc_doneq)) != NULL)
137 		L2_delete(&gcmdp->cmd_q);
138 	mutex_exit(doneq_mutexp);
139 	return (gcmdp);
140 }
141 
142 
143 static void
144 ghd_doneq_pollmode_enter(ccc_t *cccp)
145 {
146 	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
147 
148 	mutex_enter(doneq_mutexp);
149 	cccp->ccc_hba_pollmode = TRUE;
150 	mutex_exit(doneq_mutexp);
151 }
152 
153 
154 static void
155 ghd_doneq_pollmode_exit(ccc_t *cccp)
156 {
157 	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
158 
159 	mutex_enter(doneq_mutexp);
160 	cccp->ccc_hba_pollmode = FALSE;
161 	mutex_exit(doneq_mutexp);
162 
163 	/* trigger software interrupt for the completion callbacks */
164 	if (!L2_EMPTY(&cccp->ccc_doneq))
165 		ddi_trigger_softintr(cccp->ccc_doneq_softid);
166 }
167 
168 
169 /* ***************************************************************** */
170 
171 /*
172  *
173  * ghd_doneq_process()
174  *
175  *	This function is called directly from the software interrupt
176  *	handler.
177  *
178  *	The doneq is protected by a separate mutex than the
179  *	HBA mutex in order to avoid mutex contention on MP systems.
180  *
181  */
182 
183 static uint_t
184 ghd_doneq_process(caddr_t arg)
185 {
186 	ccc_t *cccp =	(ccc_t *)arg;
187 	kmutex_t	*doneq_mutexp;
188 	gcmd_t		*gcmdp;
189 	int			rc = DDI_INTR_UNCLAIMED;
190 
191 	doneq_mutexp = &cccp->ccc_doneq_mutex;
192 
193 	for (;;) {
194 		mutex_enter(doneq_mutexp);
195 		/* skip if FLAG_NOINTR request in progress */
196 		if (cccp->ccc_hba_pollmode)
197 			break;
198 		/* pop the first one from the done Q */
199 		if ((gcmdp = L2_next(&cccp->ccc_doneq)) == NULL)
200 			break;
201 		L2_delete(&gcmdp->cmd_q);
202 
203 		if (gcmdp->cmd_flags & GCMDFLG_RESET_NOTIFY) {
204 			/* special request; processed here and discarded */
205 			ghd_do_reset_notify_callbacks(cccp);
206 			ghd_gcmd_free(gcmdp);
207 			mutex_exit(doneq_mutexp);
208 			continue;
209 		}
210 
211 		/*
212 		 * drop the mutex since completion
213 		 * function can re-enter the top half via
214 		 * ghd_transport()
215 		 */
216 		mutex_exit(doneq_mutexp);
217 		gcmdp->cmd_state = GCMD_STATE_IDLE;
218 		(*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, TRUE);
219 #ifdef notyet
220 		/* I don't think this is ever necessary */
221 		rc = DDI_INTR_CLAIMED;
222 #endif
223 	}
224 	mutex_exit(doneq_mutexp);
225 	return (rc);
226 }
227 
228 static void
229 ghd_do_reset_notify_callbacks(ccc_t *cccp)
230 {
231 	ghd_reset_notify_list_t *rnp;
232 	L2el_t *rnl = &cccp->ccc_reset_notify_list;
233 
234 	ASSERT(mutex_owned(&cccp->ccc_doneq_mutex));
235 
236 	/* lock the reset notify list while we operate on it */
237 	mutex_enter(&cccp->ccc_reset_notify_mutex);
238 
239 	for (rnp = (ghd_reset_notify_list_t *)L2_next(rnl);
240 	    rnp != NULL;
241 	    rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) {
242 
243 		/* don't call if HBA driver didn't set it */
244 		if (cccp->ccc_hba_reset_notify_callback) {
245 			(*cccp->ccc_hba_reset_notify_callback)(rnp->gtgtp,
246 			    rnp->callback, rnp->arg);
247 		}
248 	}
249 	mutex_exit(&cccp->ccc_reset_notify_mutex);
250 }
251 
252 
253 /* ***************************************************************** */
254 
255 
256 /*
257  * ghd_register()
258  *
259  *	Do the usual interrupt handler setup stuff.
260  *
261  *	Also, set up three mutexes: the wait queue mutex, the HBA
262  *	mutex, and the done queue mutex. The permitted locking
263  *	orders are:
264  *
265  *		1. enter(waitq)
266  *		2. enter(activel)
267  *		3. enter(doneq)
268  *		4. enter(HBA) then enter(activel)
269  *		5. enter(HBA) then enter(doneq)
270  *		6. enter(HBA) then enter(waitq)
271  *		7. enter(waitq) then tryenter(HBA)
272  *
273  *	Note: cases 6 and 7 won't deadlock because case 7 is always
274  *	mutex_tryenter() call.
275  *
276  */
277 
278 
279 int
280 ghd_register(char *labelp,
281 	ccc_t	*cccp,
282 	dev_info_t *dip,
283 	int	inumber,
284 	void	*hba_handle,
285 	int	(*ccballoc)(gtgt_t *, gcmd_t *, int, int, int, int),
286 	void	(*ccbfree)(gcmd_t *),
287 	void	(*sg_func)(gcmd_t *, ddi_dma_cookie_t *, int, int),
288 	int	(*hba_start)(void *, gcmd_t *),
289 	void    (*hba_complete)(void *, gcmd_t *, int),
290 	uint_t	(*int_handler)(caddr_t),
291 	int	(*get_status)(void *, void *),
292 	void	(*process_intr)(void *, void *),
293 	int	(*timeout_func)(void *, gcmd_t *, gtgt_t *, gact_t, int),
294 	tmr_t	*tmrp,
295 	void 	(*hba_reset_notify_callback)(gtgt_t *,
296 			void (*)(caddr_t), caddr_t))
297 {
298 
299 	cccp->ccc_label = labelp;
300 	cccp->ccc_hba_dip = dip;
301 	cccp->ccc_ccballoc = ccballoc;
302 	cccp->ccc_ccbfree = ccbfree;
303 	cccp->ccc_sg_func = sg_func;
304 	cccp->ccc_hba_start = hba_start;
305 	cccp->ccc_hba_complete = hba_complete;
306 	cccp->ccc_process_intr = process_intr;
307 	cccp->ccc_get_status = get_status;
308 	cccp->ccc_hba_handle = hba_handle;
309 	cccp->ccc_hba_reset_notify_callback = hba_reset_notify_callback;
310 
311 	/* initialize the HBA's list headers */
312 	CCCP_INIT(cccp);
313 
314 	if (ddi_get_iblock_cookie(dip, inumber, &cccp->ccc_iblock)
315 	    != DDI_SUCCESS) {
316 
317 		return (FALSE);
318 	}
319 
320 	mutex_init(&cccp->ccc_hba_mutex, NULL, MUTEX_DRIVER, cccp->ccc_iblock);
321 
322 	mutex_init(&cccp->ccc_waitq_mutex, NULL, MUTEX_DRIVER,
323 	    cccp->ccc_iblock);
324 
325 	mutex_init(&cccp->ccc_reset_notify_mutex, NULL, MUTEX_DRIVER,
326 	    cccp->ccc_iblock);
327 
328 	/* Establish interrupt handler */
329 	if (ddi_add_intr(dip, inumber, &cccp->ccc_iblock, NULL,
330 	    int_handler, (caddr_t)hba_handle) != DDI_SUCCESS) {
331 		mutex_destroy(&cccp->ccc_hba_mutex);
332 		mutex_destroy(&cccp->ccc_waitq_mutex);
333 		mutex_destroy(&cccp->ccc_reset_notify_mutex);
334 
335 		return (FALSE);
336 	}
337 
338 	if (ghd_timer_attach(cccp, tmrp, timeout_func) == FALSE) {
339 		ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
340 		mutex_destroy(&cccp->ccc_hba_mutex);
341 		mutex_destroy(&cccp->ccc_waitq_mutex);
342 		mutex_destroy(&cccp->ccc_reset_notify_mutex);
343 
344 		return (FALSE);
345 	}
346 
347 	if (ghd_doneq_init(cccp)) {
348 
349 		return (TRUE);
350 	}
351 
352 	/*
353 	 * ghd_doneq_init() returned error:
354 	 */
355 
356 	ghd_timer_detach(cccp);
357 	ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
358 	mutex_destroy(&cccp->ccc_hba_mutex);
359 	mutex_destroy(&cccp->ccc_waitq_mutex);
360 	mutex_destroy(&cccp->ccc_reset_notify_mutex);
361 
362 	return (FALSE);
363 
364 }
365 
366 
367 void
368 ghd_unregister(ccc_t *cccp)
369 {
370 	ghd_timer_detach(cccp);
371 	ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
372 	ddi_remove_softintr(cccp->ccc_doneq_softid);
373 	mutex_destroy(&cccp->ccc_hba_mutex);
374 	mutex_destroy(&cccp->ccc_waitq_mutex);
375 	mutex_destroy(&cccp->ccc_doneq_mutex);
376 }
377 
378 
379 
380 int
381 ghd_intr(ccc_t *cccp, void *intr_status)
382 {
383 	int (*statfunc)(void *, void *) = cccp->ccc_get_status;
384 	void (*processfunc)(void *, void *) = cccp->ccc_process_intr;
385 	kmutex_t *waitq_mutexp = &cccp->ccc_waitq_mutex;
386 	kmutex_t *hba_mutexp = &cccp->ccc_hba_mutex;
387 	void		  *handle = cccp->ccc_hba_handle;
388 	int		   rc = DDI_INTR_UNCLAIMED;
389 	int		   more;
390 
391 
392 	mutex_enter(hba_mutexp);
393 
394 	GDBG_INTR(("ghd_intr(): cccp=0x%p status=0x%p\n",
395 		cccp, intr_status));
396 
397 	for (;;) {
398 		more = FALSE;
399 
400 		/* process the interrupt status */
401 		while ((*statfunc)(handle, intr_status)) {
402 			(*processfunc)(handle, intr_status);
403 			rc = DDI_INTR_CLAIMED;
404 			more = TRUE;
405 		}
406 		mutex_enter(waitq_mutexp);
407 		if (ghd_waitq_process_and_mutex_hold(cccp)) {
408 			ASSERT(mutex_owned(hba_mutexp));
409 			mutex_exit(waitq_mutexp);
410 			continue;
411 		}
412 		if (more) {
413 			mutex_exit(waitq_mutexp);
414 			continue;
415 		}
416 		GDBG_INTR(("ghd_intr(): done cccp=0x%p status=0x%p rc %d\n",
417 			cccp, intr_status, rc));
418 		/*
419 		 * Release the mutexes in the opposite order that they
420 		 * were acquired to prevent requests queued by
421 		 * ghd_transport() from getting hung up in the wait queue.
422 		 */
423 		mutex_exit(hba_mutexp);
424 		mutex_exit(waitq_mutexp);
425 		return (rc);
426 	}
427 }
428 
429 static int
430 ghd_poll(ccc_t	*cccp,
431 	gpoll_t	 polltype,
432 	ulong_t	 polltime,
433 	gcmd_t	*poll_gcmdp,
434 	gtgt_t	*gtgtp,
435 	void	*intr_status)
436 {
437 	gcmd_t	*gcmdp;
438 	L2el_t	 gcmd_hold_queue;
439 	int	 got_it = FALSE;
440 	clock_t	 start_lbolt;
441 	clock_t	 current_lbolt;
442 
443 
444 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
445 	L2_INIT(&gcmd_hold_queue);
446 
447 	/* Que hora es? */
448 	start_lbolt = ddi_get_lbolt();
449 
450 	/* unqueue and save all CMD/CCBs until I find the right one */
451 	while (!got_it) {
452 
453 		/* Give up yet? */
454 		current_lbolt = ddi_get_lbolt();
455 		if (polltime && (current_lbolt - start_lbolt >= polltime))
456 			break;
457 
458 		/*
459 		 * delay 1 msec each time around the loop (this is an
460 		 * arbitrary delay value, any value should work) except
461 		 * zero because some devices don't like being polled too
462 		 * fast and it saturates the bus on an MP system.
463 		 */
464 		drv_usecwait(1000);
465 
466 		/*
467 		 * check for any new device status
468 		 */
469 		if ((*cccp->ccc_get_status)(cccp->ccc_hba_handle, intr_status))
470 			(*cccp->ccc_process_intr)(cccp->ccc_hba_handle,
471 			    intr_status);
472 
473 		/*
474 		 * If something completed then try to start the
475 		 * next request from the wait queue. Don't release
476 		 * the HBA mutex because I don't know whether my
477 		 * request(s) is/are on the done queue yet.
478 		 */
479 		mutex_enter(&cccp->ccc_waitq_mutex);
480 		(void) ghd_waitq_process_and_mutex_hold(cccp);
481 		mutex_exit(&cccp->ccc_waitq_mutex);
482 
483 		/*
484 		 * Process the first of any timed-out requests.
485 		 */
486 		ghd_timer_poll(cccp, GHD_TIMER_POLL_ONE);
487 
488 		/*
489 		 * Unqueue all the completed requests, look for mine
490 		 */
491 		while (gcmdp = ghd_doneq_get(cccp)) {
492 			/*
493 			 * If we got one and it's my request, then
494 			 * we're done.
495 			 */
496 			if (gcmdp == poll_gcmdp) {
497 				poll_gcmdp->cmd_state = GCMD_STATE_IDLE;
498 				got_it = TRUE;
499 				continue;
500 			}
501 			/* fifo queue the other cmds on my local list */
502 			L2_add(&gcmd_hold_queue, &gcmdp->cmd_q, gcmdp);
503 		}
504 
505 
506 		/*
507 		 * Check whether we're done yet.
508 		 */
509 		switch (polltype) {
510 		case GHD_POLL_DEVICE:
511 			/*
512 			 * wait for everything queued on a specific device
513 			 */
514 			if (GDEV_NACTIVE(gtgtp->gt_gdevp) == 0)
515 				got_it = TRUE;
516 			break;
517 
518 		case GHD_POLL_ALL:
519 			/*
520 			 * if waiting for all outstanding requests and
521 			 * if active list is now empty then exit
522 			 */
523 			if (GHBA_NACTIVE(cccp) == 0)
524 				got_it = TRUE;
525 			break;
526 
527 		case GHD_POLL_REQUEST:
528 			break;
529 
530 		}
531 	}
532 
533 	if (L2_EMPTY(&gcmd_hold_queue)) {
534 		ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
535 		ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
536 		return (got_it);
537 	}
538 
539 	/*
540 	 * copy the local gcmd_hold_queue back to the doneq so
541 	 * that the order of completion callbacks is preserved
542 	 */
543 	while (gcmdp = L2_next(&gcmd_hold_queue)) {
544 		L2_delete(&gcmdp->cmd_q);
545 		GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
546 	}
547 
548 	ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
549 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
550 	return (got_it);
551 }
552 
553 
554 /*
555  * ghd_tran_abort()
556  *
557  *	Abort specific command on a target.
558  *
559  */
560 
561 int
562 ghd_tran_abort(ccc_t *cccp, gcmd_t *gcmdp, gtgt_t *gtgtp, void *intr_status)
563 {
564 	gact_t	 action;
565 	int	 rc;
566 
567 	/*
568 	 * call the driver's abort_cmd function
569 	 */
570 
571 	mutex_enter(&cccp->ccc_hba_mutex);
572 	ghd_doneq_pollmode_enter(cccp);
573 
574 	switch (gcmdp->cmd_state) {
575 	case GCMD_STATE_WAITQ:
576 		/* not yet started */
577 		action = GACTION_EARLY_ABORT;
578 		break;
579 
580 	case GCMD_STATE_ACTIVE:
581 		/* in progress */
582 		action = GACTION_ABORT_CMD;
583 		break;
584 
585 	default:
586 		/* everything else, probably already being aborted */
587 		rc = FALSE;
588 		goto exit;
589 	}
590 
591 	/* stop the timer and remove it from the active list */
592 	GHD_TIMER_STOP(cccp, gcmdp);
593 
594 	/* start a new timer and send out the abort command */
595 	ghd_timer_newstate(cccp, gcmdp, gtgtp, action, GHD_TGTREQ);
596 
597 	/* wait for the abort to complete */
598 	if (rc = ghd_poll(cccp, GHD_POLL_REQUEST, ghd_tran_abort_timeout,
599 	    gcmdp, gtgtp, intr_status)) {
600 		gcmdp->cmd_state = GCMD_STATE_DONEQ;
601 		GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
602 	}
603 
604 exit:
605 	ghd_doneq_pollmode_exit(cccp);
606 
607 	mutex_enter(&cccp->ccc_waitq_mutex);
608 	ghd_waitq_process_and_mutex_exit(cccp);
609 
610 	return (rc);
611 }
612 
613 
614 /*
615  * ghd_tran_abort_lun()
616  *
617  *	Abort all commands on a specific target.
618  *
619  */
620 
621 int
622 ghd_tran_abort_lun(ccc_t *cccp,	gtgt_t *gtgtp, void *intr_status)
623 {
624 	int	 rc;
625 
626 	/*
627 	 * call the HBA driver's abort_device function
628 	 */
629 
630 	mutex_enter(&cccp->ccc_hba_mutex);
631 	ghd_doneq_pollmode_enter(cccp);
632 
633 	/* send out the abort device request */
634 	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_ABORT_DEV, GHD_TGTREQ);
635 
636 	/* wait for the device to go idle */
637 	rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_abort_lun_timeout,
638 		NULL, gtgtp, intr_status);
639 
640 	ghd_doneq_pollmode_exit(cccp);
641 
642 	mutex_enter(&cccp->ccc_waitq_mutex);
643 	ghd_waitq_process_and_mutex_exit(cccp);
644 
645 	return (rc);
646 }
647 
648 
649 
650 /*
651  * ghd_tran_reset_target()
652  *
653  *	reset the target device
654  *
655  *
656  */
657 
658 int
659 ghd_tran_reset_target(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
660 {
661 	int rc = TRUE;
662 
663 
664 	mutex_enter(&cccp->ccc_hba_mutex);
665 	ghd_doneq_pollmode_enter(cccp);
666 
667 	/* send out the device reset request */
668 	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_TARGET, GHD_TGTREQ);
669 
670 	/* wait for the device to reset */
671 	rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_reset_target_timeout,
672 		NULL, gtgtp, intr_status);
673 
674 	ghd_doneq_pollmode_exit(cccp);
675 
676 	mutex_enter(&cccp->ccc_waitq_mutex);
677 	ghd_waitq_process_and_mutex_exit(cccp);
678 
679 	return (rc);
680 }
681 
682 
683 
684 /*
685  * ghd_tran_reset_bus()
686  *
687  *	reset the scsi bus
688  *
689  */
690 
691 int
692 ghd_tran_reset_bus(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
693 {
694 	int	rc;
695 
696 	mutex_enter(&cccp->ccc_hba_mutex);
697 	ghd_doneq_pollmode_enter(cccp);
698 
699 	/* send out the bus reset request */
700 	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_BUS, GHD_TGTREQ);
701 
702 	/*
703 	 * Wait for all active requests on this HBA to complete
704 	 */
705 	rc = ghd_poll(cccp, GHD_POLL_ALL, ghd_tran_reset_bus_timeout,
706 		NULL, NULL, intr_status);
707 
708 
709 	ghd_doneq_pollmode_exit(cccp);
710 
711 	mutex_enter(&cccp->ccc_waitq_mutex);
712 	ghd_waitq_process_and_mutex_exit(cccp);
713 
714 	return (rc);
715 }
716 
717 
718 int
719 ghd_transport(ccc_t	*cccp,
720 		gcmd_t	*gcmdp,
721 		gtgt_t	*gtgtp,
722 		ulong_t	 timeout,
723 		int	 polled,
724 		void	*intr_status)
725 {
726 	gdev_t	*gdevp = gtgtp->gt_gdevp;
727 
728 	ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
729 	ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
730 
731 	if (polled) {
732 		/*
733 		 * Grab the HBA mutex so no other requests are started
734 		 * until after this one completes.
735 		 */
736 		mutex_enter(&cccp->ccc_hba_mutex);
737 
738 		GDBG_START(("ghd_transport: polled"
739 			" cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
740 				cccp, gdevp, gtgtp, gcmdp));
741 
742 		/*
743 		 * Lock the doneq so no other thread flushes the Q.
744 		 */
745 		ghd_doneq_pollmode_enter(cccp);
746 	}
747 #if defined(GHD_DEBUG) || defined(__lint)
748 	else {
749 		GDBG_START(("ghd_transport: non-polled"
750 			" cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
751 				cccp, gdevp, gtgtp, gcmdp));
752 	}
753 #endif
754 	/*
755 	 * add this request to the tail of the waitq
756 	 */
757 	gcmdp->cmd_waitq_level = 1;
758 	mutex_enter(&cccp->ccc_waitq_mutex);
759 	L2_add(&GDEV_QHEAD(gdevp), &gcmdp->cmd_q, gcmdp);
760 
761 	/*
762 	 * Add this request to the packet timer active list and start its
763 	 * abort timer.
764 	 */
765 	gcmdp->cmd_state = GCMD_STATE_WAITQ;
766 	ghd_timer_start(cccp, gcmdp, timeout);
767 
768 
769 	/*
770 	 * Check the device wait queue throttle and perhaps move
771 	 * some requests to the end of the HBA wait queue.
772 	 */
773 	ghd_waitq_shuffle_up(cccp, gdevp);
774 
775 	if (!polled) {
776 		/*
777 		 * See if the HBA mutex is available but use the
778 		 * tryenter so I don't deadlock.
779 		 */
780 		if (!mutex_tryenter(&cccp->ccc_hba_mutex)) {
781 			/* The HBA mutex isn't available */
782 			GDBG_START(("ghd_transport: !mutex cccp 0x%p\n", cccp));
783 			mutex_exit(&cccp->ccc_waitq_mutex);
784 			return (TRAN_ACCEPT);
785 		}
786 		GDBG_START(("ghd_transport: got mutex cccp 0x%p\n", cccp));
787 
788 		/*
789 		 * start as many requests as possible from the head
790 		 * of the HBA wait queue
791 		 */
792 
793 		ghd_waitq_process_and_mutex_exit(cccp);
794 
795 		ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
796 		ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
797 
798 		return (TRAN_ACCEPT);
799 	}
800 
801 
802 	/*
803 	 * If polled mode (FLAG_NOINTR specified in scsi_pkt flags),
804 	 * then ghd_poll() waits until the request completes or times out
805 	 * before returning.
806 	 */
807 
808 	mutex_exit(&cccp->ccc_waitq_mutex);
809 	(void) ghd_poll(cccp, GHD_POLL_REQUEST, 0, gcmdp, gtgtp, intr_status);
810 	ghd_doneq_pollmode_exit(cccp);
811 
812 	mutex_enter(&cccp->ccc_waitq_mutex);
813 	ghd_waitq_process_and_mutex_exit(cccp);
814 
815 	/* call HBA's completion function but don't do callback to target */
816 	(*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, FALSE);
817 
818 	GDBG_START(("ghd_transport: polled done cccp 0x%p\n", cccp));
819 	return (TRAN_ACCEPT);
820 }
821 
822 int ghd_reset_notify(ccc_t 	*cccp,
823 			gtgt_t *gtgtp,
824 			int 	flag,
825 			void 	(*callback)(caddr_t),
826 			caddr_t arg)
827 {
828 	ghd_reset_notify_list_t *rnp;
829 	int rc = FALSE;
830 
831 	switch (flag) {
832 
833 	case SCSI_RESET_NOTIFY:
834 
835 		rnp = (ghd_reset_notify_list_t *)kmem_zalloc(sizeof (*rnp),
836 		    KM_SLEEP);
837 		rnp->gtgtp = gtgtp;
838 		rnp->callback = callback;
839 		rnp->arg = arg;
840 
841 		mutex_enter(&cccp->ccc_reset_notify_mutex);
842 		L2_add(&cccp->ccc_reset_notify_list, &rnp->l2_link,
843 		    (void *)rnp);
844 		mutex_exit(&cccp->ccc_reset_notify_mutex);
845 
846 		rc = TRUE;
847 
848 		break;
849 
850 	case SCSI_RESET_CANCEL:
851 
852 		mutex_enter(&cccp->ccc_reset_notify_mutex);
853 		for (rnp = (ghd_reset_notify_list_t *)
854 			L2_next(&cccp->ccc_reset_notify_list);
855 		    rnp != NULL;
856 		    rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) {
857 			if (rnp->gtgtp == gtgtp &&
858 			    rnp->callback == callback &&
859 			    rnp->arg == arg) {
860 				L2_delete(&rnp->l2_link);
861 				kmem_free(rnp, sizeof (*rnp));
862 				rc = TRUE;
863 			}
864 		}
865 		mutex_exit(&cccp->ccc_reset_notify_mutex);
866 		break;
867 
868 	default:
869 		rc = FALSE;
870 		break;
871 	}
872 
873 	return (rc);
874 }
875 
876 /*
877  * freeze the HBA waitq output (see ghd_waitq_process_and_mutex_hold),
878  * presumably because of a SCSI reset, for delay milliseconds.
879  */
880 
881 void
882 ghd_freeze_waitq(ccc_t *cccp, int delay)
883 {
884 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
885 
886 	/* freeze the waitq for delay milliseconds */
887 
888 	mutex_enter(&cccp->ccc_waitq_mutex);
889 	cccp->ccc_waitq_freezetime = ddi_get_lbolt();
890 	cccp->ccc_waitq_freezedelay = delay;
891 	cccp->ccc_waitq_frozen = 1;
892 	mutex_exit(&cccp->ccc_waitq_mutex);
893 }
894 
895 void
896 ghd_queue_hold(ccc_t *cccp)
897 {
898 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
899 
900 	mutex_enter(&cccp->ccc_waitq_mutex);
901 	cccp->ccc_waitq_held = 1;
902 	mutex_exit(&cccp->ccc_waitq_mutex);
903 }
904 
905 void
906 ghd_queue_unhold(ccc_t *cccp)
907 {
908 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
909 
910 	mutex_enter(&cccp->ccc_waitq_mutex);
911 	cccp->ccc_waitq_held = 0;
912 	mutex_exit(&cccp->ccc_waitq_mutex);
913 }
914 
915 
916 
917 /*
918  * Trigger previously-registered reset notifications
919  */
920 
921 void
922 ghd_trigger_reset_notify(ccc_t *cccp)
923 {
924 	gcmd_t *gcmdp;
925 
926 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
927 
928 	/* create magic doneq entry */
929 
930 	gcmdp = ghd_gcmd_alloc((gtgt_t *)NULL, 0, TRUE);
931 	gcmdp->cmd_flags = GCMDFLG_RESET_NOTIFY;
932 
933 	/* put at head of doneq so it's processed ASAP */
934 
935 	GHD_DONEQ_PUT_HEAD(cccp, gcmdp);
936 }
937