xref: /titanic_50/usr/src/uts/intel/io/dktp/hba/ghd/ghd.c (revision 14e0668fa5c67fe3c44f2abd39f29a6f18272f11)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/debug.h>
31 #include <sys/scsi/scsi.h>
32 
33 #include "ghd.h"
34 
35 /* ghd_poll() function codes: */
36 typedef enum {
37 	GHD_POLL_REQUEST,	/* wait for a specific request */
38 	GHD_POLL_DEVICE,	/* wait for a specific device to idle */
39 	GHD_POLL_ALL		/* wait for the whole bus to idle */
40 } gpoll_t;
41 
42 /*
43  * Local functions:
44  */
45 static	gcmd_t	*ghd_doneq_get(ccc_t *cccp);
46 static	void	 ghd_doneq_pollmode_enter(ccc_t *cccp);
47 static	void	 ghd_doneq_pollmode_exit(ccc_t *cccp);
48 static	uint_t	 ghd_doneq_process(caddr_t arg);
49 static	void	 ghd_do_reset_notify_callbacks(ccc_t *cccp);
50 
51 static	int	 ghd_poll(ccc_t *cccp, gpoll_t polltype, ulong_t polltime,
52 			gcmd_t *poll_gcmdp, gtgt_t *gtgtp, void *intr_status);
53 
54 
55 /*
56  * Local configuration variables
57  */
58 #define	DEFAULT_GHD_TIMEOUT    50000	/* Amount of time to poll(50ms) */
59 
60 ulong_t	ghd_tran_abort_timeout = DEFAULT_GHD_TIMEOUT;
61 ulong_t	ghd_tran_abort_lun_timeout = DEFAULT_GHD_TIMEOUT;
62 ulong_t	ghd_tran_reset_target_timeout = DEFAULT_GHD_TIMEOUT;
63 ulong_t	ghd_tran_reset_bus_timeout = DEFAULT_GHD_TIMEOUT;
64 
65 static int
ghd_doneq_init(ccc_t * cccp)66 ghd_doneq_init(ccc_t *cccp)
67 {
68 	ddi_iblock_cookie_t iblock;
69 
70 	L2_INIT(&cccp->ccc_doneq);
71 	cccp->ccc_hba_pollmode = TRUE;
72 
73 	if (ddi_add_softintr(cccp->ccc_hba_dip, DDI_SOFTINT_LOW,
74 	    &cccp->ccc_doneq_softid, &iblock, NULL,
75 	    ghd_doneq_process, (caddr_t)cccp) != DDI_SUCCESS) {
76 		GDBG_ERROR(("ghd_doneq_init: add softintr failed cccp 0x%p\n",
77 		    (void *)cccp));
78 		return (FALSE);
79 	}
80 
81 	mutex_init(&cccp->ccc_doneq_mutex, NULL, MUTEX_DRIVER, iblock);
82 	ghd_doneq_pollmode_exit(cccp);
83 	return (TRUE);
84 }
85 
86 /*
87  * ghd_complete():
88  *
89  *	The HBA driver calls this entry point when it's completely
90  *	done processing a request.
91  *
92  *	See the GHD_COMPLETE_INLINE() macro in ghd.h for the actual code.
93  */
94 
95 void
ghd_complete(ccc_t * cccp,gcmd_t * gcmdp)96 ghd_complete(ccc_t *cccp, gcmd_t *gcmdp)
97 {
98 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
99 	GHD_COMPLETE_INLINE(cccp, gcmdp);
100 }
101 
102 
103 /*
104  * ghd_doneq_put_head():
105  *
106  *	Mark the request done and prepend it to the doneq.
107  *	See the GHD_DONEQ_PUT_HEAD_INLINE() macros in ghd.h for
108  *	the actual code.
109  */
110 void
ghd_doneq_put_head(ccc_t * cccp,gcmd_t * gcmdp)111 ghd_doneq_put_head(ccc_t *cccp, gcmd_t *gcmdp)
112 {
113 	GHD_DONEQ_PUT_HEAD_INLINE(cccp, gcmdp)
114 }
115 
116 /*
117  * ghd_doneq_put_tail():
118  *
119  *	Mark the request done and append it to the doneq.
120  *	See the GHD_DONEQ_PUT_TAIL_INLINE() macros in ghd.h for
121  *	the actual code.
122  */
123 void
ghd_doneq_put_tail(ccc_t * cccp,gcmd_t * gcmdp)124 ghd_doneq_put_tail(ccc_t *cccp, gcmd_t *gcmdp)
125 {
126 	GHD_DONEQ_PUT_TAIL_INLINE(cccp, gcmdp)
127 }
128 
129 static gcmd_t	*
ghd_doneq_get(ccc_t * cccp)130 ghd_doneq_get(ccc_t *cccp)
131 {
132 	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
133 	gcmd_t	 *gcmdp;
134 
135 	mutex_enter(doneq_mutexp);
136 	if ((gcmdp = L2_next(&cccp->ccc_doneq)) != NULL)
137 		L2_delete(&gcmdp->cmd_q);
138 	mutex_exit(doneq_mutexp);
139 	return (gcmdp);
140 }
141 
142 
143 static void
ghd_doneq_pollmode_enter(ccc_t * cccp)144 ghd_doneq_pollmode_enter(ccc_t *cccp)
145 {
146 	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
147 
148 	mutex_enter(doneq_mutexp);
149 	cccp->ccc_hba_pollmode = TRUE;
150 	mutex_exit(doneq_mutexp);
151 }
152 
153 
154 static void
ghd_doneq_pollmode_exit(ccc_t * cccp)155 ghd_doneq_pollmode_exit(ccc_t *cccp)
156 {
157 	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
158 
159 	mutex_enter(doneq_mutexp);
160 	cccp->ccc_hba_pollmode = FALSE;
161 	mutex_exit(doneq_mutexp);
162 
163 	/* trigger software interrupt for the completion callbacks */
164 	if (!L2_EMPTY(&cccp->ccc_doneq)) {
165 		/*
166 		 * If we are panicking we should just call the completion
167 		 * function directly as we can not use soft interrupts
168 		 * or timeouts during panic.
169 		 */
170 		if (!ddi_in_panic())
171 			ddi_trigger_softintr(cccp->ccc_doneq_softid);
172 		else
173 			(void) ghd_doneq_process((caddr_t)cccp);
174 	}
175 }
176 
177 
178 /* ***************************************************************** */
179 
180 /*
181  *
182  * ghd_doneq_process()
183  *
184  *	This function is called directly from the software interrupt
185  *	handler.
186  *
187  *	The doneq is protected by a separate mutex than the
188  *	HBA mutex in order to avoid mutex contention on MP systems.
189  *
190  */
191 
192 static uint_t
ghd_doneq_process(caddr_t arg)193 ghd_doneq_process(caddr_t arg)
194 {
195 	ccc_t *cccp =	(ccc_t *)arg;
196 	kmutex_t	*doneq_mutexp;
197 	gcmd_t		*gcmdp;
198 	int			rc = DDI_INTR_UNCLAIMED;
199 
200 	doneq_mutexp = &cccp->ccc_doneq_mutex;
201 
202 	for (;;) {
203 		mutex_enter(doneq_mutexp);
204 		/* skip if FLAG_NOINTR request in progress */
205 		if (cccp->ccc_hba_pollmode)
206 			break;
207 		/* pop the first one from the done Q */
208 		if ((gcmdp = L2_next(&cccp->ccc_doneq)) == NULL)
209 			break;
210 		L2_delete(&gcmdp->cmd_q);
211 
212 		if (gcmdp->cmd_flags & GCMDFLG_RESET_NOTIFY) {
213 			/* special request; processed here and discarded */
214 			ghd_do_reset_notify_callbacks(cccp);
215 			ghd_gcmd_free(gcmdp);
216 			mutex_exit(doneq_mutexp);
217 			continue;
218 		}
219 
220 		/*
221 		 * drop the mutex since completion
222 		 * function can re-enter the top half via
223 		 * ghd_transport()
224 		 */
225 		mutex_exit(doneq_mutexp);
226 		gcmdp->cmd_state = GCMD_STATE_IDLE;
227 		(*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, TRUE);
228 #ifdef notyet
229 		/* I don't think this is ever necessary */
230 		rc = DDI_INTR_CLAIMED;
231 #endif
232 	}
233 	mutex_exit(doneq_mutexp);
234 	return (rc);
235 }
236 
237 static void
ghd_do_reset_notify_callbacks(ccc_t * cccp)238 ghd_do_reset_notify_callbacks(ccc_t *cccp)
239 {
240 	ghd_reset_notify_list_t *rnp;
241 	L2el_t *rnl = &cccp->ccc_reset_notify_list;
242 
243 	ASSERT(mutex_owned(&cccp->ccc_doneq_mutex));
244 
245 	/* lock the reset notify list while we operate on it */
246 	mutex_enter(&cccp->ccc_reset_notify_mutex);
247 
248 	for (rnp = (ghd_reset_notify_list_t *)L2_next(rnl);
249 	    rnp != NULL;
250 	    rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) {
251 
252 		/* don't call if HBA driver didn't set it */
253 		if (cccp->ccc_hba_reset_notify_callback) {
254 			(*cccp->ccc_hba_reset_notify_callback)(rnp->gtgtp,
255 			    rnp->callback, rnp->arg);
256 		}
257 	}
258 	mutex_exit(&cccp->ccc_reset_notify_mutex);
259 }
260 
261 
262 /* ***************************************************************** */
263 
264 
265 /*
266  * ghd_register()
267  *
268  *	Do the usual interrupt handler setup stuff.
269  *
270  *	Also, set up three mutexes: the wait queue mutex, the HBA
271  *	mutex, and the done queue mutex. The permitted locking
272  *	orders are:
273  *
274  *		1. enter(waitq)
275  *		2. enter(activel)
276  *		3. enter(doneq)
277  *		4. enter(HBA) then enter(activel)
278  *		5. enter(HBA) then enter(doneq)
279  *		6. enter(HBA) then enter(waitq)
280  *		7. enter(waitq) then tryenter(HBA)
281  *
282  *	Note: cases 6 and 7 won't deadlock because case 7 is always
283  *	mutex_tryenter() call.
284  *
285  */
286 
287 
288 int
ghd_register(char * labelp,ccc_t * cccp,dev_info_t * dip,int inumber,void * hba_handle,int (* ccballoc)(gtgt_t *,gcmd_t *,int,int,int,int),void (* ccbfree)(gcmd_t *),void (* sg_func)(gcmd_t *,ddi_dma_cookie_t *,int,int),int (* hba_start)(void *,gcmd_t *),void (* hba_complete)(void *,gcmd_t *,int),uint_t (* int_handler)(caddr_t),int (* get_status)(void *,void *),void (* process_intr)(void *,void *),int (* timeout_func)(void *,gcmd_t *,gtgt_t *,gact_t,int),tmr_t * tmrp,void (* hba_reset_notify_callback)(gtgt_t *,void (*)(caddr_t),caddr_t))289 ghd_register(char *labelp,
290 	ccc_t	*cccp,
291 	dev_info_t *dip,
292 	int	inumber,
293 	void	*hba_handle,
294 	int	(*ccballoc)(gtgt_t *, gcmd_t *, int, int, int, int),
295 	void	(*ccbfree)(gcmd_t *),
296 	void	(*sg_func)(gcmd_t *, ddi_dma_cookie_t *, int, int),
297 	int	(*hba_start)(void *, gcmd_t *),
298 	void    (*hba_complete)(void *, gcmd_t *, int),
299 	uint_t	(*int_handler)(caddr_t),
300 	int	(*get_status)(void *, void *),
301 	void	(*process_intr)(void *, void *),
302 	int	(*timeout_func)(void *, gcmd_t *, gtgt_t *, gact_t, int),
303 	tmr_t	*tmrp,
304 	void 	(*hba_reset_notify_callback)(gtgt_t *,
305 			void (*)(caddr_t), caddr_t))
306 {
307 
308 	cccp->ccc_label = labelp;
309 	cccp->ccc_hba_dip = dip;
310 	cccp->ccc_ccballoc = ccballoc;
311 	cccp->ccc_ccbfree = ccbfree;
312 	cccp->ccc_sg_func = sg_func;
313 	cccp->ccc_hba_start = hba_start;
314 	cccp->ccc_hba_complete = hba_complete;
315 	cccp->ccc_process_intr = process_intr;
316 	cccp->ccc_get_status = get_status;
317 	cccp->ccc_hba_handle = hba_handle;
318 	cccp->ccc_hba_reset_notify_callback = hba_reset_notify_callback;
319 
320 	/* initialize the HBA's list headers */
321 	CCCP_INIT(cccp);
322 
323 	if (ddi_get_iblock_cookie(dip, inumber, &cccp->ccc_iblock)
324 	    != DDI_SUCCESS) {
325 
326 		return (FALSE);
327 	}
328 
329 	mutex_init(&cccp->ccc_hba_mutex, NULL, MUTEX_DRIVER, cccp->ccc_iblock);
330 
331 	mutex_init(&cccp->ccc_waitq_mutex, NULL, MUTEX_DRIVER,
332 	    cccp->ccc_iblock);
333 
334 	mutex_init(&cccp->ccc_reset_notify_mutex, NULL, MUTEX_DRIVER,
335 	    cccp->ccc_iblock);
336 
337 	/* Establish interrupt handler */
338 	if (ddi_add_intr(dip, inumber, &cccp->ccc_iblock, NULL,
339 	    int_handler, (caddr_t)hba_handle) != DDI_SUCCESS) {
340 		mutex_destroy(&cccp->ccc_hba_mutex);
341 		mutex_destroy(&cccp->ccc_waitq_mutex);
342 		mutex_destroy(&cccp->ccc_reset_notify_mutex);
343 
344 		return (FALSE);
345 	}
346 
347 	if (ghd_timer_attach(cccp, tmrp, timeout_func) == FALSE) {
348 		ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
349 		mutex_destroy(&cccp->ccc_hba_mutex);
350 		mutex_destroy(&cccp->ccc_waitq_mutex);
351 		mutex_destroy(&cccp->ccc_reset_notify_mutex);
352 
353 		return (FALSE);
354 	}
355 
356 	if (ghd_doneq_init(cccp)) {
357 
358 		return (TRUE);
359 	}
360 
361 	/*
362 	 * ghd_doneq_init() returned error:
363 	 */
364 
365 	ghd_timer_detach(cccp);
366 	ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
367 	mutex_destroy(&cccp->ccc_hba_mutex);
368 	mutex_destroy(&cccp->ccc_waitq_mutex);
369 	mutex_destroy(&cccp->ccc_reset_notify_mutex);
370 
371 	return (FALSE);
372 
373 }
374 
375 
376 void
ghd_unregister(ccc_t * cccp)377 ghd_unregister(ccc_t *cccp)
378 {
379 	ghd_timer_detach(cccp);
380 	ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
381 	ddi_remove_softintr(cccp->ccc_doneq_softid);
382 	mutex_destroy(&cccp->ccc_hba_mutex);
383 	mutex_destroy(&cccp->ccc_waitq_mutex);
384 	mutex_destroy(&cccp->ccc_doneq_mutex);
385 }
386 
387 
388 
389 int
ghd_intr(ccc_t * cccp,void * intr_status)390 ghd_intr(ccc_t *cccp, void *intr_status)
391 {
392 	int (*statfunc)(void *, void *) = cccp->ccc_get_status;
393 	void (*processfunc)(void *, void *) = cccp->ccc_process_intr;
394 	kmutex_t *waitq_mutexp = &cccp->ccc_waitq_mutex;
395 	kmutex_t *hba_mutexp = &cccp->ccc_hba_mutex;
396 	void		  *handle = cccp->ccc_hba_handle;
397 	int		   rc = DDI_INTR_UNCLAIMED;
398 	int		   more;
399 
400 
401 	mutex_enter(hba_mutexp);
402 
403 	GDBG_INTR(("ghd_intr(): cccp=0x%p status=0x%p\n",
404 	    (void *)cccp, intr_status));
405 
406 	for (;;) {
407 		more = FALSE;
408 
409 		/* process the interrupt status */
410 		while ((*statfunc)(handle, intr_status)) {
411 			(*processfunc)(handle, intr_status);
412 			rc = DDI_INTR_CLAIMED;
413 			more = TRUE;
414 		}
415 		mutex_enter(waitq_mutexp);
416 		if (ghd_waitq_process_and_mutex_hold(cccp)) {
417 			ASSERT(mutex_owned(hba_mutexp));
418 			mutex_exit(waitq_mutexp);
419 			continue;
420 		}
421 		if (more) {
422 			mutex_exit(waitq_mutexp);
423 			continue;
424 		}
425 		GDBG_INTR(("ghd_intr(): done cccp=0x%p status=0x%p rc %d\n",
426 		    (void *)cccp, intr_status, rc));
427 		/*
428 		 * Release the mutexes in the opposite order that they
429 		 * were acquired to prevent requests queued by
430 		 * ghd_transport() from getting hung up in the wait queue.
431 		 */
432 		mutex_exit(hba_mutexp);
433 		mutex_exit(waitq_mutexp);
434 		return (rc);
435 	}
436 }
437 
438 static int
ghd_poll(ccc_t * cccp,gpoll_t polltype,ulong_t polltime,gcmd_t * poll_gcmdp,gtgt_t * gtgtp,void * intr_status)439 ghd_poll(ccc_t	*cccp,
440 	gpoll_t	 polltype,
441 	ulong_t	 polltime,
442 	gcmd_t	*poll_gcmdp,
443 	gtgt_t	*gtgtp,
444 	void	*intr_status)
445 {
446 	gcmd_t	*gcmdp;
447 	L2el_t	 gcmd_hold_queue;
448 	int	 got_it = FALSE;
449 	clock_t  poll_lbolt;
450 	clock_t	 start_lbolt;
451 	clock_t	 current_lbolt;
452 
453 
454 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
455 	L2_INIT(&gcmd_hold_queue);
456 
457 	/* Que hora es? */
458 	poll_lbolt = drv_usectohz((clock_t)polltime);
459 	start_lbolt = ddi_get_lbolt();
460 
461 	/* unqueue and save all CMD/CCBs until I find the right one */
462 	while (!got_it) {
463 
464 		/* Give up yet? */
465 		current_lbolt = ddi_get_lbolt();
466 		if (poll_lbolt && (current_lbolt - start_lbolt >= poll_lbolt))
467 			break;
468 
469 		/*
470 		 * delay 1 msec each time around the loop (this is an
471 		 * arbitrary delay value, any value should work) except
472 		 * zero because some devices don't like being polled too
473 		 * fast and it saturates the bus on an MP system.
474 		 */
475 		drv_usecwait(1000);
476 
477 		/*
478 		 * check for any new device status
479 		 */
480 		if ((*cccp->ccc_get_status)(cccp->ccc_hba_handle, intr_status))
481 			(*cccp->ccc_process_intr)(cccp->ccc_hba_handle,
482 			    intr_status);
483 
484 		/*
485 		 * If something completed then try to start the
486 		 * next request from the wait queue. Don't release
487 		 * the HBA mutex because I don't know whether my
488 		 * request(s) is/are on the done queue yet.
489 		 */
490 		mutex_enter(&cccp->ccc_waitq_mutex);
491 		(void) ghd_waitq_process_and_mutex_hold(cccp);
492 		mutex_exit(&cccp->ccc_waitq_mutex);
493 
494 		/*
495 		 * Process the first of any timed-out requests.
496 		 */
497 		ghd_timer_poll(cccp, GHD_TIMER_POLL_ONE);
498 
499 		/*
500 		 * Unqueue all the completed requests, look for mine
501 		 */
502 		while (gcmdp = ghd_doneq_get(cccp)) {
503 			/*
504 			 * If we got one and it's my request, then
505 			 * we're done.
506 			 */
507 			if (gcmdp == poll_gcmdp) {
508 				poll_gcmdp->cmd_state = GCMD_STATE_IDLE;
509 				got_it = TRUE;
510 				continue;
511 			}
512 			/* fifo queue the other cmds on my local list */
513 			L2_add(&gcmd_hold_queue, &gcmdp->cmd_q, gcmdp);
514 		}
515 
516 
517 		/*
518 		 * Check whether we're done yet.
519 		 */
520 		switch (polltype) {
521 		case GHD_POLL_DEVICE:
522 			/*
523 			 * wait for everything queued on a specific device
524 			 */
525 			if (GDEV_NACTIVE(gtgtp->gt_gdevp) == 0)
526 				got_it = TRUE;
527 			break;
528 
529 		case GHD_POLL_ALL:
530 			/*
531 			 * if waiting for all outstanding requests and
532 			 * if active list is now empty then exit
533 			 */
534 			if (GHBA_NACTIVE(cccp) == 0)
535 				got_it = TRUE;
536 			break;
537 
538 		case GHD_POLL_REQUEST:
539 			break;
540 
541 		}
542 	}
543 
544 	if (L2_EMPTY(&gcmd_hold_queue)) {
545 		ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
546 		ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
547 		return (got_it);
548 	}
549 
550 	/*
551 	 * copy the local gcmd_hold_queue back to the doneq so
552 	 * that the order of completion callbacks is preserved
553 	 */
554 	while (gcmdp = L2_next(&gcmd_hold_queue)) {
555 		L2_delete(&gcmdp->cmd_q);
556 		GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
557 	}
558 
559 	ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
560 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
561 	return (got_it);
562 }
563 
564 
565 /*
566  * ghd_tran_abort()
567  *
568  *	Abort specific command on a target.
569  *
570  */
571 
572 int
ghd_tran_abort(ccc_t * cccp,gcmd_t * gcmdp,gtgt_t * gtgtp,void * intr_status)573 ghd_tran_abort(ccc_t *cccp, gcmd_t *gcmdp, gtgt_t *gtgtp, void *intr_status)
574 {
575 	gact_t	 action;
576 	int	 rc;
577 
578 	/*
579 	 * call the driver's abort_cmd function
580 	 */
581 
582 	mutex_enter(&cccp->ccc_hba_mutex);
583 	ghd_doneq_pollmode_enter(cccp);
584 
585 	switch (gcmdp->cmd_state) {
586 	case GCMD_STATE_WAITQ:
587 		/* not yet started */
588 		action = GACTION_EARLY_ABORT;
589 		break;
590 
591 	case GCMD_STATE_ACTIVE:
592 		/* in progress */
593 		action = GACTION_ABORT_CMD;
594 		break;
595 
596 	default:
597 		/* everything else, probably already being aborted */
598 		rc = FALSE;
599 		goto exit;
600 	}
601 
602 	/* stop the timer and remove it from the active list */
603 	GHD_TIMER_STOP(cccp, gcmdp);
604 
605 	/* start a new timer and send out the abort command */
606 	ghd_timer_newstate(cccp, gcmdp, gtgtp, action, GHD_TGTREQ);
607 
608 	/* wait for the abort to complete */
609 	if (rc = ghd_poll(cccp, GHD_POLL_REQUEST, ghd_tran_abort_timeout,
610 	    gcmdp, gtgtp, intr_status)) {
611 		gcmdp->cmd_state = GCMD_STATE_DONEQ;
612 		GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
613 	}
614 
615 exit:
616 	ghd_doneq_pollmode_exit(cccp);
617 
618 	mutex_enter(&cccp->ccc_waitq_mutex);
619 	ghd_waitq_process_and_mutex_exit(cccp);
620 
621 	return (rc);
622 }
623 
624 
625 /*
626  * ghd_tran_abort_lun()
627  *
628  *	Abort all commands on a specific target.
629  *
630  */
631 
632 int
ghd_tran_abort_lun(ccc_t * cccp,gtgt_t * gtgtp,void * intr_status)633 ghd_tran_abort_lun(ccc_t *cccp,	gtgt_t *gtgtp, void *intr_status)
634 {
635 	int	 rc;
636 
637 	/*
638 	 * call the HBA driver's abort_device function
639 	 */
640 
641 	mutex_enter(&cccp->ccc_hba_mutex);
642 	ghd_doneq_pollmode_enter(cccp);
643 
644 	/* send out the abort device request */
645 	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_ABORT_DEV, GHD_TGTREQ);
646 
647 	/* wait for the device to go idle */
648 	rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_abort_lun_timeout,
649 	    NULL, gtgtp, intr_status);
650 
651 	ghd_doneq_pollmode_exit(cccp);
652 
653 	mutex_enter(&cccp->ccc_waitq_mutex);
654 	ghd_waitq_process_and_mutex_exit(cccp);
655 
656 	return (rc);
657 }
658 
659 
660 
661 /*
662  * ghd_tran_reset_target()
663  *
664  *	reset the target device
665  *
666  *
667  */
668 
669 int
ghd_tran_reset_target(ccc_t * cccp,gtgt_t * gtgtp,void * intr_status)670 ghd_tran_reset_target(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
671 {
672 	int rc = TRUE;
673 
674 
675 	mutex_enter(&cccp->ccc_hba_mutex);
676 	ghd_doneq_pollmode_enter(cccp);
677 
678 	/* send out the device reset request */
679 	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_TARGET, GHD_TGTREQ);
680 
681 	/* wait for the device to reset */
682 	rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_reset_target_timeout,
683 	    NULL, gtgtp, intr_status);
684 
685 	ghd_doneq_pollmode_exit(cccp);
686 
687 	mutex_enter(&cccp->ccc_waitq_mutex);
688 	ghd_waitq_process_and_mutex_exit(cccp);
689 
690 	return (rc);
691 }
692 
693 
694 
695 /*
696  * ghd_tran_reset_bus()
697  *
698  *	reset the scsi bus
699  *
700  */
701 
702 int
ghd_tran_reset_bus(ccc_t * cccp,gtgt_t * gtgtp,void * intr_status)703 ghd_tran_reset_bus(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
704 {
705 	int	rc;
706 
707 	mutex_enter(&cccp->ccc_hba_mutex);
708 	ghd_doneq_pollmode_enter(cccp);
709 
710 	/* send out the bus reset request */
711 	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_BUS, GHD_TGTREQ);
712 
713 	/*
714 	 * Wait for all active requests on this HBA to complete
715 	 */
716 	rc = ghd_poll(cccp, GHD_POLL_ALL, ghd_tran_reset_bus_timeout,
717 	    NULL, NULL, intr_status);
718 
719 
720 	ghd_doneq_pollmode_exit(cccp);
721 
722 	mutex_enter(&cccp->ccc_waitq_mutex);
723 	ghd_waitq_process_and_mutex_exit(cccp);
724 
725 	return (rc);
726 }
727 
728 
729 int
ghd_transport(ccc_t * cccp,gcmd_t * gcmdp,gtgt_t * gtgtp,ulong_t timeout,int polled,void * intr_status)730 ghd_transport(ccc_t	*cccp,
731 		gcmd_t	*gcmdp,
732 		gtgt_t	*gtgtp,
733 		ulong_t	 timeout,
734 		int	 polled,
735 		void	*intr_status)
736 {
737 	gdev_t	*gdevp = gtgtp->gt_gdevp;
738 
739 	ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
740 	ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
741 
742 	if (polled) {
743 		/*
744 		 * Grab the HBA mutex so no other requests are started
745 		 * until after this one completes.
746 		 */
747 		mutex_enter(&cccp->ccc_hba_mutex);
748 
749 		GDBG_START(("ghd_transport: polled"
750 		    " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
751 		    (void *)cccp, (void *)gdevp, (void *)gtgtp, (void *)gcmdp));
752 
753 		/*
754 		 * Lock the doneq so no other thread flushes the Q.
755 		 */
756 		ghd_doneq_pollmode_enter(cccp);
757 	}
758 #if defined(GHD_DEBUG) || defined(__lint)
759 	else {
760 		GDBG_START(("ghd_transport: non-polled"
761 		    " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
762 		    (void *)cccp, (void *)gdevp, (void *)gtgtp, (void *)gcmdp));
763 	}
764 #endif
765 	/*
766 	 * add this request to the tail of the waitq
767 	 */
768 	gcmdp->cmd_waitq_level = 1;
769 	mutex_enter(&cccp->ccc_waitq_mutex);
770 	L2_add(&GDEV_QHEAD(gdevp), &gcmdp->cmd_q, gcmdp);
771 
772 	/*
773 	 * Add this request to the packet timer active list and start its
774 	 * abort timer.
775 	 */
776 	gcmdp->cmd_state = GCMD_STATE_WAITQ;
777 	ghd_timer_start(cccp, gcmdp, timeout);
778 
779 
780 	/*
781 	 * Check the device wait queue throttle and perhaps move
782 	 * some requests to the end of the HBA wait queue.
783 	 */
784 	ghd_waitq_shuffle_up(cccp, gdevp);
785 
786 	if (!polled) {
787 		/*
788 		 * See if the HBA mutex is available but use the
789 		 * tryenter so I don't deadlock.
790 		 */
791 		if (!mutex_tryenter(&cccp->ccc_hba_mutex)) {
792 			/* The HBA mutex isn't available */
793 			GDBG_START(("ghd_transport: !mutex cccp 0x%p\n",
794 			    (void *)cccp));
795 			mutex_exit(&cccp->ccc_waitq_mutex);
796 			return (TRAN_ACCEPT);
797 		}
798 		GDBG_START(("ghd_transport: got mutex cccp 0x%p\n",
799 		    (void *)cccp));
800 
801 		/*
802 		 * start as many requests as possible from the head
803 		 * of the HBA wait queue
804 		 */
805 
806 		ghd_waitq_process_and_mutex_exit(cccp);
807 
808 		ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
809 		ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
810 
811 		return (TRAN_ACCEPT);
812 	}
813 
814 
815 	/*
816 	 * If polled mode (FLAG_NOINTR specified in scsi_pkt flags),
817 	 * then ghd_poll() waits until the request completes or times out
818 	 * before returning.
819 	 */
820 
821 	mutex_exit(&cccp->ccc_waitq_mutex);
822 	(void) ghd_poll(cccp, GHD_POLL_REQUEST, 0, gcmdp, gtgtp, intr_status);
823 	ghd_doneq_pollmode_exit(cccp);
824 
825 	mutex_enter(&cccp->ccc_waitq_mutex);
826 	ghd_waitq_process_and_mutex_exit(cccp);
827 
828 	/* call HBA's completion function but don't do callback to target */
829 	(*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, FALSE);
830 
831 	GDBG_START(("ghd_transport: polled done cccp 0x%p\n", (void *)cccp));
832 	return (TRAN_ACCEPT);
833 }
834 
ghd_reset_notify(ccc_t * cccp,gtgt_t * gtgtp,int flag,void (* callback)(caddr_t),caddr_t arg)835 int ghd_reset_notify(ccc_t 	*cccp,
836 			gtgt_t *gtgtp,
837 			int 	flag,
838 			void 	(*callback)(caddr_t),
839 			caddr_t arg)
840 {
841 	ghd_reset_notify_list_t *rnp;
842 	int rc = FALSE;
843 
844 	switch (flag) {
845 
846 	case SCSI_RESET_NOTIFY:
847 
848 		rnp = (ghd_reset_notify_list_t *)kmem_zalloc(sizeof (*rnp),
849 		    KM_SLEEP);
850 		rnp->gtgtp = gtgtp;
851 		rnp->callback = callback;
852 		rnp->arg = arg;
853 
854 		mutex_enter(&cccp->ccc_reset_notify_mutex);
855 		L2_add(&cccp->ccc_reset_notify_list, &rnp->l2_link,
856 		    (void *)rnp);
857 		mutex_exit(&cccp->ccc_reset_notify_mutex);
858 
859 		rc = TRUE;
860 
861 		break;
862 
863 	case SCSI_RESET_CANCEL:
864 
865 		mutex_enter(&cccp->ccc_reset_notify_mutex);
866 		for (rnp = (ghd_reset_notify_list_t *)
867 		    L2_next(&cccp->ccc_reset_notify_list);
868 		    rnp != NULL;
869 		    rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) {
870 			if (rnp->gtgtp == gtgtp &&
871 			    rnp->callback == callback &&
872 			    rnp->arg == arg) {
873 				L2_delete(&rnp->l2_link);
874 				kmem_free(rnp, sizeof (*rnp));
875 				rc = TRUE;
876 			}
877 		}
878 		mutex_exit(&cccp->ccc_reset_notify_mutex);
879 		break;
880 
881 	default:
882 		rc = FALSE;
883 		break;
884 	}
885 
886 	return (rc);
887 }
888 
889 /*
890  * freeze the HBA waitq output (see ghd_waitq_process_and_mutex_hold),
891  * presumably because of a SCSI reset, for delay milliseconds.
892  */
893 
894 void
ghd_freeze_waitq(ccc_t * cccp,int delay)895 ghd_freeze_waitq(ccc_t *cccp, int delay)
896 {
897 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
898 
899 	/* freeze the waitq for delay milliseconds */
900 
901 	mutex_enter(&cccp->ccc_waitq_mutex);
902 	cccp->ccc_waitq_freezetime = ddi_get_lbolt();
903 	cccp->ccc_waitq_freezedelay = delay;
904 	cccp->ccc_waitq_frozen = 1;
905 	mutex_exit(&cccp->ccc_waitq_mutex);
906 }
907 
908 void
ghd_queue_hold(ccc_t * cccp)909 ghd_queue_hold(ccc_t *cccp)
910 {
911 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
912 
913 	mutex_enter(&cccp->ccc_waitq_mutex);
914 	cccp->ccc_waitq_held = 1;
915 	mutex_exit(&cccp->ccc_waitq_mutex);
916 }
917 
918 void
ghd_queue_unhold(ccc_t * cccp)919 ghd_queue_unhold(ccc_t *cccp)
920 {
921 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
922 
923 	mutex_enter(&cccp->ccc_waitq_mutex);
924 	cccp->ccc_waitq_held = 0;
925 	mutex_exit(&cccp->ccc_waitq_mutex);
926 }
927 
928 
929 
930 /*
931  * Trigger previously-registered reset notifications
932  */
933 
934 void
ghd_trigger_reset_notify(ccc_t * cccp)935 ghd_trigger_reset_notify(ccc_t *cccp)
936 {
937 	gcmd_t *gcmdp;
938 
939 	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
940 
941 	/* create magic doneq entry */
942 
943 	gcmdp = ghd_gcmd_alloc((gtgt_t *)NULL, 0, TRUE);
944 	gcmdp->cmd_flags = GCMDFLG_RESET_NOTIFY;
945 
946 	/* put at head of doneq so it's processed ASAP */
947 
948 	GHD_DONEQ_PUT_HEAD(cccp, gcmdp);
949 }
950