xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision e1fe3dba5ce2826061f6489765be9b4a341736a9)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  */
61 /*-
62  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
63  * Copyright (c) 2005, WHEEL Sp. z o.o.
64  * Copyright (c) 2004, 2005 Justin T. Gibbs
65  * All rights reserved.
66  *
67  * Redistribution and use in source and binary forms, with or without
68  * modification, are permitted provided that the following conditions are
69  * met:
70  * 1. Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
73  *    substantially similar to the "NO WARRANTY" disclaimer below
74  *    ("Disclaimer") and any redistribution must be conditioned upon including
75  *    a substantially similar Disclaimer requirement for further binary
76  *    redistribution.
77  * 3. Neither the names of the above listed copyright holders nor the names
78  *    of any contributors may be used to endorse or promote products derived
79  *    from this software without specific prior written permission.
80  *
81  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
82  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
83  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
84  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
85  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
86  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92  */
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
95 
96 #include <dev/mpt/mpt.h>
97 #include <dev/mpt/mpt_cam.h>
98 #include <dev/mpt/mpt_raid.h>
99 
100 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
101 #include "dev/mpt/mpilib/mpi_init.h"
102 #include "dev/mpt/mpilib/mpi_targ.h"
103 
104 #include <sys/callout.h>
105 #include <sys/kthread.h>
106 
107 static void mpt_poll(struct cam_sim *);
108 static timeout_t mpt_timeout;
109 static void mpt_action(struct cam_sim *, union ccb *);
110 static int mpt_setwidth(struct mpt_softc *, int, int);
111 static int mpt_setsync(struct mpt_softc *, int, int, int);
112 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
113 static mpt_reply_handler_t mpt_scsi_reply_handler;
114 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
115 static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
116 					MSG_DEFAULT_REPLY *reply_frame);
117 static int mpt_bus_reset(struct mpt_softc *, int /*sleep_ok*/);
118 
119 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
120 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
121 static void mpt_recovery_thread(void *arg);
122 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int /*type*/,
123 			     u_int /*flags*/, u_int /*channel*/,
124 			     u_int /*target*/, u_int /*lun*/,
125 			     u_int /*abort_ctx*/, int /*sleep_ok*/);
126 static void mpt_recover_commands(struct mpt_softc *mpt);
127 
128 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
129 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
130 
131 static mpt_probe_handler_t	mpt_cam_probe;
132 static mpt_attach_handler_t	mpt_cam_attach;
133 static mpt_event_handler_t	mpt_cam_event;
134 static mpt_reset_handler_t	mpt_cam_ioc_reset;
135 static mpt_detach_handler_t	mpt_cam_detach;
136 
137 static struct mpt_personality mpt_cam_personality =
138 {
139 	.name		= "mpt_cam",
140 	.probe		= mpt_cam_probe,
141 	.attach		= mpt_cam_attach,
142 	.event		= mpt_cam_event,
143 	.reset		= mpt_cam_ioc_reset,
144 	.detach		= mpt_cam_detach,
145 };
146 
147 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
148 
149 int
150 mpt_cam_probe(struct mpt_softc *mpt)
151 {
152 	/*
153 	 * Only attach to nodes that support the initiator
154 	 * role or have RAID physical devices that need
155 	 * CAM pass-thru support.
156 	 */
157 	if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
158 	 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0))
159 		return (0);
160 	return (ENODEV);
161 }
162 
163 int
164 mpt_cam_attach(struct mpt_softc *mpt)
165 {
166 	struct cam_devq *devq;
167 	mpt_handler_t	 handler;
168 	int		 maxq;
169 	int		 error;
170 
171 	MPTLOCK_2_CAMLOCK(mpt);
172 	TAILQ_INIT(&mpt->request_timeout_list);
173 	mpt->bus = 0;
174 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
175 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
176 
177 	handler.reply_handler = mpt_scsi_reply_handler;
178 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
179 				     &scsi_io_handler_id);
180 	if (error != 0)
181 		goto cleanup;
182 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
183 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
184 				     &scsi_tmf_handler_id);
185 	if (error != 0)
186 		goto cleanup;
187 
188 	/*
189 	 * We keep one request reserved for timeout TMF requests.
190 	 */
191 	mpt->tmf_req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
192 	if (mpt->tmf_req == NULL) {
193 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
194 		error = ENOMEM;
195 		goto cleanup;
196 	}
197 
198 	/*
199 	 * Mark the request as free even though not on the free list.
200 	 * There is only one TMF request allowed to be outstanding at
201 	 * a time and the TMF routines perform their own allocation
202 	 * tracking using the standard state flags.
203 	 */
204 	mpt->tmf_req->state = REQ_STATE_FREE;
205 	maxq--;
206 
207 	if (mpt_spawn_recovery_thread(mpt) != 0) {
208 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
209 		error = ENOMEM;
210 		goto cleanup;
211 	}
212 
213 	/*
214 	 * Create the device queue for our SIM(s).
215 	 */
216 	devq = cam_simq_alloc(maxq);
217 	if (devq == NULL) {
218 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
219 		error = ENOMEM;
220 		goto cleanup;
221 	}
222 
223 	/*
224 	 * Construct our SIM entry.
225 	 */
226 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
227 	    mpt->unit, 1, maxq, devq);
228 	if (mpt->sim == NULL) {
229 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
230 		cam_simq_free(devq);
231 		error = ENOMEM;
232 		goto cleanup;
233 	}
234 
235 	/*
236 	 * Register exactly the bus.
237 	 */
238 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
239 		mpt_prt(mpt, "Bus registration Failed!\n");
240 		error = ENOMEM;
241 		goto cleanup;
242 	}
243 
244 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
245 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
246 		mpt_prt(mpt, "Unable to allocate Path!\n");
247 		error = ENOMEM;
248 		goto cleanup;
249 	}
250 
251 	/*
252 	 * Only register a second bus for RAID physical
253 	 * devices if the controller supports RAID.
254 	 */
255 	if (mpt->ioc_page2 == NULL
256 	 || mpt->ioc_page2->MaxPhysDisks == 0)
257 		return (0);
258 
259 	/*
260 	 * Create a "bus" to export all hidden disks to CAM.
261 	 */
262 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
263 	    mpt->unit, 1, maxq, devq);
264 	if (mpt->phydisk_sim == NULL) {
265 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
266 		error = ENOMEM;
267 		goto cleanup;
268 	}
269 
270 	/*
271 	 * Register exactly the bus.
272 	 */
273 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
274 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
275 		error = ENOMEM;
276 		goto cleanup;
277 	}
278 
279 	if (xpt_create_path(&mpt->phydisk_path, NULL,
280 	    cam_sim_path(mpt->phydisk_sim),
281 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
282 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
283 		error = ENOMEM;
284 		goto cleanup;
285 	}
286 
287 	CAMLOCK_2_MPTLOCK(mpt);
288 	return (0);
289 cleanup:
290 	CAMLOCK_2_MPTLOCK(mpt);
291 	mpt_cam_detach(mpt);
292 	return (error);
293 }
294 
295 void
296 mpt_cam_detach(struct mpt_softc *mpt)
297 {
298 	mpt_handler_t handler;
299 
300 	mpt_terminate_recovery_thread(mpt);
301 
302 	handler.reply_handler = mpt_scsi_reply_handler;
303 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
304 			       scsi_io_handler_id);
305 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
306 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
307 			       scsi_tmf_handler_id);
308 
309 	if (mpt->tmf_req != NULL) {
310 		mpt_free_request(mpt, mpt->tmf_req);
311 		mpt->tmf_req = NULL;
312 	}
313 
314 	if (mpt->sim != NULL) {
315 		xpt_free_path(mpt->path);
316 		xpt_bus_deregister(cam_sim_path(mpt->sim));
317 		cam_sim_free(mpt->sim, TRUE);
318 		mpt->sim = NULL;
319 	}
320 
321 	if (mpt->phydisk_sim != NULL) {
322 		xpt_free_path(mpt->phydisk_path);
323 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
324 		cam_sim_free(mpt->phydisk_sim, TRUE);
325 		mpt->phydisk_sim = NULL;
326 	}
327 }
328 
329 /* This routine is used after a system crash to dump core onto the
330  * swap device.
331  */
332 static void
333 mpt_poll(struct cam_sim *sim)
334 {
335 	struct mpt_softc *mpt;
336 
337 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
338 	MPT_LOCK(mpt);
339 	mpt_intr(mpt);
340 	MPT_UNLOCK(mpt);
341 }
342 
343 /*
344  * Watchdog timeout routine for SCSI requests.
345  */
346 static void
347 mpt_timeout(void *arg)
348 {
349 	union ccb	 *ccb;
350 	struct mpt_softc *mpt;
351 	request_t	 *req;
352 
353 	ccb = (union ccb *)arg;
354 #ifdef NOTYET
355 	mpt = mpt_find_softc(mpt);
356 	if (mpt == NULL)
357 		return;
358 #else
359 	mpt = ccb->ccb_h.ccb_mpt_ptr;
360 #endif
361 
362 	MPT_LOCK(mpt);
363 	req = ccb->ccb_h.ccb_req_ptr;
364 	mpt_prt(mpt, "Request %p:serno Timed out.\n", req, req->serno);
365 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
366 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
367 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
368 		req->state |= REQ_STATE_TIMEDOUT;
369 		mpt_wakeup_recovery_thread(mpt);
370 	}
371 	MPT_UNLOCK(mpt);
372 }
373 
374 /*
375  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
376  *
377  * Takes a list of physical segments and builds the SGL for SCSI IO command
378  * and forwards the commard to the IOC after one last check that CAM has not
379  * aborted the transaction.
380  */
381 static void
382 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
383 {
384 	request_t *req, *trq;
385 	char *mpt_off;
386 	union ccb *ccb;
387 	struct mpt_softc *mpt;
388 	int seg, first_lim;
389 	uint32_t flags, nxt_off;
390 	bus_dmasync_op_t op;
391 	MSG_SCSI_IO_REQUEST *mpt_req;
392 	SGE_SIMPLE64 *se;
393 	SGE_CHAIN64 *ce;
394 
395 	req = (request_t *)arg;
396 	ccb = req->ccb;
397 
398 	mpt = ccb->ccb_h.ccb_mpt_ptr;
399 	req = ccb->ccb_h.ccb_req_ptr;
400 	mpt_req = req->req_vbuf;
401 	mpt_off = req->req_vbuf;
402 
403 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
404 		error = EFBIG;
405 	}
406 
407 bad:
408 	if (error != 0) {
409 		if (error != EFBIG && error != ENOMEM)
410 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
411 		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
412 			xpt_freeze_devq(ccb->ccb_h.path, 1);
413 			ccb->ccb_h.status = CAM_DEV_QFRZN;
414 			if (error == EFBIG) {
415 				ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
416 			} else if (error == ENOMEM) {
417 				if (mpt->outofbeer == 0) {
418 					mpt->outofbeer = 1;
419 					xpt_freeze_simq(mpt->sim, 1);
420 					mpt_lprt(mpt, MPT_PRT_DEBUG,
421 					    "FREEZEQ\n");
422 				}
423 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
424 			} else
425 				ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
426 		}
427 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
428 		xpt_done(ccb);
429 		CAMLOCK_2_MPTLOCK(mpt);
430 		mpt_free_request(mpt, req);
431 		MPTLOCK_2_CAMLOCK(mpt);
432 		return;
433 	}
434 
435 	/*
436 	 * No data to transfer?
437 	 * Just make a single simple SGL with zero length.
438 	 */
439 
440 	if (mpt->verbose >= MPT_PRT_DEBUG) {
441 		int tidx = ((char *)&mpt_req->SGL) - mpt_off;
442 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
443 	}
444 
445 	if (nseg == 0) {
446 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) &mpt_req->SGL;
447 		MPI_pSGE_SET_FLAGS(se1,
448 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
449 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
450 		goto out;
451 	}
452 
453 	mpt_req->DataLength = ccb->csio.dxfer_len;
454 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
455 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
456 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
457 
458 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
459 		op = BUS_DMASYNC_PREREAD;
460 	} else {
461 		op = BUS_DMASYNC_PREWRITE;
462 	}
463 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
464 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
465 	}
466 
467 	/*
468 	 * Okay, fill in what we can at the end of the command frame.
469 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
470 	 * the command frame.
471 	 *
472 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
473 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
474 	 * that.
475 	 */
476 
477 	if (nseg < MPT_NSGL_FIRST(mpt)) {
478 		first_lim = nseg;
479 	} else {
480 		/*
481 		 * Leave room for CHAIN element
482 		 */
483 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
484 	}
485 
486 	se = (SGE_SIMPLE64 *) &mpt_req->SGL;
487 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
488 		uint32_t tf;
489 
490 		bzero(se, sizeof (*se));
491 		se->Address.Low = dm_segs->ds_addr;
492 		if (sizeof(bus_addr_t) > 4) {
493 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
494 		}
495 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
496 		tf = flags;
497 		if (seg == first_lim - 1) {
498 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
499 		}
500 		if (seg == nseg - 1) {
501 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
502 				MPI_SGE_FLAGS_END_OF_BUFFER;
503 		}
504 		MPI_pSGE_SET_FLAGS(se, tf);
505 	}
506 
507 	if (seg == nseg) {
508 		goto out;
509 	}
510 
511 	/*
512 	 * Tell the IOC where to find the first chain element.
513 	 */
514 	mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;
515 	nxt_off = MPT_RQSL(mpt);
516 	trq = req;
517 
518 	/*
519 	 * Make up the rest of the data segments out of a chain element
520 	 * (contiained in the current request frame) which points to
521 	 * SIMPLE64 elements in the next request frame, possibly ending
522 	 * with *another* chain element (if there's more).
523 	 */
524 	while (seg < nseg) {
525 		int this_seg_lim;
526 		uint32_t tf, cur_off;
527 		bus_addr_t chain_list_addr;
528 
529 		/*
530 		 * Point to the chain descriptor. Note that the chain
531 		 * descriptor is at the end of the *previous* list (whether
532 		 * chain or simple).
533 		 */
534 		ce = (SGE_CHAIN64 *) se;
535 
536 		/*
537 		 * Before we change our current pointer, make  sure we won't
538 		 * overflow the request area with this frame. Note that we
539 		 * test against 'greater than' here as it's okay in this case
540 		 * to have next offset be just outside the request area.
541 		 */
542 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
543 			nxt_off = MPT_REQUEST_AREA;
544 			goto next_chain;
545 		}
546 
547 		/*
548 		 * Set our SGE element pointer to the beginning of the chain
549 		 * list and update our next chain list offset.
550 		 */
551 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
552 		cur_off = nxt_off;
553 		nxt_off += MPT_RQSL(mpt);
554 
555 		/*
556 		 * Now initialized the chain descriptor.
557 		 */
558 		bzero(ce, sizeof (SGE_CHAIN64));
559 
560 		/*
561 		 * Get the physical address of the chain list.
562 		 */
563 		chain_list_addr = trq->req_pbuf;
564 		chain_list_addr += cur_off;
565 		if (sizeof (bus_addr_t) > 4) {
566 			ce->Address.High =
567 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
568 		}
569 		ce->Address.Low = (uint32_t) chain_list_addr;
570 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
571 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
572 
573 		/*
574 		 * If we have more than a frame's worth of segments left,
575 		 * set up the chain list to have the last element be another
576 		 * chain descriptor.
577 		 */
578 		if ((nseg - seg) > MPT_NSGL(mpt)) {
579 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
580 			/*
581 			 * The length of the chain is the length in bytes of the
582 			 * number of segments plus the next chain element.
583 			 *
584 			 * The next chain descriptor offset is the length,
585 			 * in words, of the number of segments.
586 			 */
587 			ce->Length = (this_seg_lim - seg) *
588 			    sizeof (SGE_SIMPLE64);
589 			ce->NextChainOffset = ce->Length >> 2;
590 			ce->Length += sizeof (SGE_CHAIN64);
591 		} else {
592 			this_seg_lim = nseg;
593 			ce->Length = (this_seg_lim - seg) *
594 			    sizeof (SGE_SIMPLE64);
595 		}
596 
597 		/*
598 		 * Fill in the chain list SGE elements with our segment data.
599 		 *
600 		 * If we're the last element in this chain list, set the last
601 		 * element flag. If we're the completely last element period,
602 		 * set the end of list and end of buffer flags.
603 		 */
604 		while (seg < this_seg_lim) {
605 			bzero(se, sizeof (*se));
606 			se->Address.Low = dm_segs->ds_addr;
607 			if (sizeof (bus_addr_t) > 4) {
608 				se->Address.High =
609 				    ((uint64_t)dm_segs->ds_addr) >> 32;
610 			}
611 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
612 			tf = flags;
613 			if (seg ==  this_seg_lim - 1) {
614 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
615 			}
616 			if (seg == nseg - 1) {
617 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
618 					MPI_SGE_FLAGS_END_OF_BUFFER;
619 			}
620 			MPI_pSGE_SET_FLAGS(se, tf);
621 			se++;
622 			seg++;
623 			dm_segs++;
624 		}
625 
626     next_chain:
627 		/*
628 		 * If we have more segments to do and we've used up all of
629 		 * the space in a request area, go allocate another one
630 		 * and chain to that.
631 		 */
632 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
633 			request_t *nrq = mpt_get_request(mpt, FALSE);
634 
635 			if (nrq == NULL) {
636 				error = ENOMEM;
637 				goto bad;
638 			}
639 
640 			/*
641 			 * Append the new request area on the tail of our list.
642 			 */
643 			if ((trq = req->chain) == NULL) {
644 				req->chain = nrq;
645 			} else {
646 				while (trq->chain != NULL) {
647 					trq = trq->chain;
648 				}
649 				trq->chain = nrq;
650 			}
651 			trq = nrq;
652 			mpt_off = trq->req_vbuf;
653 			mpt_req = trq->req_vbuf;
654 			if (mpt->verbose >= MPT_PRT_DEBUG) {
655 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
656 			}
657 			nxt_off = 0;
658 		}
659 	}
660 out:
661 
662 	/*
663 	 * Last time we need to check if this CCB needs to be aborted.
664 	 */
665 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
666 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
667 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
668 		CAMLOCK_2_MPTLOCK(mpt);
669 		mpt_free_request(mpt, req);
670 		MPTLOCK_2_CAMLOCK(mpt);
671 		xpt_done(ccb);
672 		return;
673 	}
674 
675 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
676 	CAMLOCK_2_MPTLOCK(mpt);
677 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
678 		ccb->ccb_h.timeout_ch =
679 			timeout(mpt_timeout, (caddr_t)ccb,
680 				(ccb->ccb_h.timeout * hz) / 1000);
681 	} else {
682 		callout_handle_init(&ccb->ccb_h.timeout_ch);
683 	}
684 	if (mpt->verbose >= MPT_PRT_DEBUG) {
685 		int nc = 0;
686 		mpt_print_scsi_io_request(req->req_vbuf);
687 		for (trq = req->chain; trq; trq = trq->chain) {
688 			printf("  Additional Chain Area %d\n", nc++);
689 			mpt_dump_sgl(trq->req_vbuf, 0);
690 		}
691 	}
692 	mpt_send_cmd(mpt, req);
693 	MPTLOCK_2_CAMLOCK(mpt);
694 }
695 
696 static void
697 mpt_start(struct cam_sim *sim, union ccb *ccb)
698 {
699 	request_t *req;
700 	struct mpt_softc *mpt;
701 	MSG_SCSI_IO_REQUEST *mpt_req;
702 	struct ccb_scsiio *csio = &ccb->csio;
703 	struct ccb_hdr *ccbh = &ccb->ccb_h;
704 	int raid_passthru;
705 
706 	/* Get the pointer for the physical addapter */
707 	mpt = ccb->ccb_h.ccb_mpt_ptr;
708 	raid_passthru = (sim == mpt->phydisk_sim);
709 
710 	CAMLOCK_2_MPTLOCK(mpt);
711 	/* Get a request structure off the free list */
712 	if ((req = mpt_get_request(mpt, /*sleep_ok*/FALSE)) == NULL) {
713 		if (mpt->outofbeer == 0) {
714 			mpt->outofbeer = 1;
715 			xpt_freeze_simq(mpt->sim, 1);
716 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
717 		}
718 		MPTLOCK_2_CAMLOCK(mpt);
719 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
720 		xpt_done(ccb);
721 		return;
722 	}
723 
724 	MPTLOCK_2_CAMLOCK(mpt);
725 
726 #if 0
727 	COWWWWW
728 	if (raid_passthru) {
729 		status = mpt_raid_quiesce_disk(mpt, mpt->raid_disks + ccb->ccb_h.target_id,
730 		     request_t *req)
731 	}
732 #endif
733 
734 	/*
735 	 * Link the ccb and the request structure so we can find
736 	 * the other knowing either the request or the ccb
737 	 */
738 	req->ccb = ccb;
739 	ccb->ccb_h.ccb_req_ptr = req;
740 
741 	/* Now we build the command for the IOC */
742 	mpt_req = req->req_vbuf;
743 	bzero(mpt_req, sizeof *mpt_req);
744 
745 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
746 	if (raid_passthru)
747 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
748 
749 	mpt_req->Bus = mpt->bus;
750 
751 	mpt_req->SenseBufferLength =
752 		(csio->sense_len < MPT_SENSE_SIZE) ?
753 		 csio->sense_len : MPT_SENSE_SIZE;
754 
755 	/*
756 	 * We use the message context to find the request structure when we
757 	 * Get the command completion interrupt from the IOC.
758 	 */
759 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
760 
761 	/* Which physical device to do the I/O on */
762 	mpt_req->TargetID = ccb->ccb_h.target_id;
763 	/*
764 	 * XXX Assumes Single level, Single byte, CAM LUN type.
765 	 */
766 	mpt_req->LUN[1] = ccb->ccb_h.target_lun;
767 
768 	/* Set the direction of the transfer */
769 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
770 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
771 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
772 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
773 	else
774 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
775 
776 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
777 		switch(ccb->csio.tag_action) {
778 		case MSG_HEAD_OF_Q_TAG:
779 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
780 			break;
781 		case MSG_ACA_TASK:
782 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
783 			break;
784 		case MSG_ORDERED_Q_TAG:
785 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
786 			break;
787 		case MSG_SIMPLE_Q_TAG:
788 		default:
789 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
790 			break;
791 		}
792 	} else {
793 		if (mpt->is_fc)
794 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
795 		else
796 			/* XXX No such thing for a target doing packetized. */
797 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
798 	}
799 
800 	if (mpt->is_fc == 0 && mpt->is_sas == 0) {
801 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
802 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
803 		}
804 	}
805 
806 	/* Copy the scsi command block into place */
807 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0)
808 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
809 	else
810 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
811 
812 	mpt_req->CDBLength = csio->cdb_len;
813 	mpt_req->DataLength = csio->dxfer_len;
814 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
815 
816 	/*
817 	 * If we have any data to send with this command,
818 	 * map it into bus space.
819 	 */
820 
821 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
822 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
823 			/*
824 			 * We've been given a pointer to a single buffer.
825 			 */
826 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
827 				/*
828 				 * Virtual address that needs to translated into
829 				 * one or more physical address ranges.
830 				 */
831 				int error;
832 
833 				error = bus_dmamap_load(mpt->buffer_dmat,
834 				    req->dmap, csio->data_ptr, csio->dxfer_len,
835 				    mpt_execute_req, req, 0);
836 				if (error == EINPROGRESS) {
837 					/*
838 					 * So as to maintain ordering,
839 					 * freeze the controller queue
840 					 * until our mapping is
841 					 * returned.
842 					 */
843 					xpt_freeze_simq(mpt->sim, 1);
844 					ccbh->status |= CAM_RELEASE_SIMQ;
845 				}
846 			} else {
847 				/*
848 				 * We have been given a pointer to single
849 				 * physical buffer.
850 				 */
851 				struct bus_dma_segment seg;
852 				seg.ds_addr =
853 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
854 				seg.ds_len = csio->dxfer_len;
855 				mpt_execute_req(req, &seg, 1, 0);
856 			}
857 		} else {
858 			/*
859 			 * We have been given a list of addresses.
860 			 * This case could be easily supported but they are not
861 			 * currently generated by the CAM subsystem so there
862 			 * is no point in wasting the time right now.
863 			 */
864 			struct bus_dma_segment *segs;
865 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
866 				mpt_execute_req(req, NULL, 0, EFAULT);
867 			} else {
868 				/* Just use the segments provided */
869 				segs = (struct bus_dma_segment *)csio->data_ptr;
870 				mpt_execute_req(req, segs, csio->sglist_cnt, 0);
871 			}
872 		}
873 	} else {
874 		mpt_execute_req(req, NULL, 0, 0);
875 	}
876 }
877 
878 static int
879 mpt_bus_reset(struct mpt_softc *mpt, int sleep_ok)
880 {
881 	int   error;
882 	u_int status;
883 
884 	error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
885 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
886 	    /*bus*/0, /*target_id*/0, /*target_lun*/0, /*abort_ctx*/0,
887 	    sleep_ok);
888 
889 	if (error != 0) {
890 		/*
891 		 * mpt_scsi_send_tmf hard resets on failure, so no
892 		 * need to do so here.
893 		 */
894 		mpt_prt(mpt,
895 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
896 		return (EIO);
897 	}
898 
899 	/* Wait for bus reset to be processed by the IOC. */
900 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
901 	    REQ_STATE_DONE, sleep_ok, /*time_ms*/5000);
902 
903 	status = mpt->tmf_req->IOCStatus;
904 	mpt->tmf_req->state = REQ_STATE_FREE;
905 	if (error) {
906 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out."
907 			"Resetting controller.\n");
908 		mpt_reset(mpt, /*reinit*/TRUE);
909 		return (ETIMEDOUT);
910 	} else if ((status & MPI_IOCSTATUS_MASK) != MPI_SCSI_STATUS_SUCCESS) {
911 		mpt_prt(mpt, "mpt_bus_reset: TMF Status %d."
912 			"Resetting controller.\n", status);
913 		mpt_reset(mpt, /*reinit*/TRUE);
914 		return (EIO);
915 	}
916 	return (0);
917 }
918 
919 static int
920 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
921 	      MSG_EVENT_NOTIFY_REPLY *msg)
922 {
923 	mpt_lprt(mpt, MPT_PRT_ALWAYS, "mpt_cam_event: 0x%x\n",
924                  msg->Event & 0xFF);
925 	switch(msg->Event & 0xFF) {
926 	case MPI_EVENT_UNIT_ATTENTION:
927 		mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
928 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
929 		break;
930 
931 	case MPI_EVENT_IOC_BUS_RESET:
932 		/* We generated a bus reset */
933 		mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
934 		    (msg->Data[0] >> 8) & 0xff);
935 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
936 		break;
937 
938 	case MPI_EVENT_EXT_BUS_RESET:
939 		/* Someone else generated a bus reset */
940 		mpt_prt(mpt, "Ext Bus Reset\n");
941 		/*
942 		 * These replies don't return EventData like the MPI
943 		 * spec says they do
944 		 */
945 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
946 		break;
947 
948 	case MPI_EVENT_RESCAN:
949 		/*
950 		 * In general this means a device has been added
951 		 * to the loop.
952 		 */
953 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
954 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
955 		break;
956 
957 	case MPI_EVENT_LINK_STATUS_CHANGE:
958 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
959 		    (msg->Data[1] >> 8) & 0xff,
960 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
961 		break;
962 
963 	case MPI_EVENT_LOOP_STATE_CHANGE:
964 		switch ((msg->Data[0] >> 16) & 0xff) {
965 		case 0x01:
966 			mpt_prt(mpt,
967 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
968 			    "(Loop Initialization)\n",
969 			    (msg->Data[1] >> 8) & 0xff,
970 			    (msg->Data[0] >> 8) & 0xff,
971 			    (msg->Data[0]     ) & 0xff);
972 			switch ((msg->Data[0] >> 8) & 0xff) {
973 			case 0xF7:
974 				if ((msg->Data[0] & 0xff) == 0xF7) {
975 					printf("Device needs AL_PA\n");
976 				} else {
977 					printf("Device %02x doesn't like "
978 					    "FC performance\n",
979 					    msg->Data[0] & 0xFF);
980 				}
981 				break;
982 			case 0xF8:
983 				if ((msg->Data[0] & 0xff) == 0xF7) {
984 					printf("Device had loop failure at its "
985 					    "receiver prior to acquiring "
986 					    "AL_PA\n");
987 				} else {
988 					printf("Device %02x detected loop "
989 					    "failure at its receiver\n",
990 					    msg->Data[0] & 0xFF);
991 				}
992 				break;
993 			default:
994 				printf("Device %02x requests that device "
995 				    "%02x reset itself\n",
996 				    msg->Data[0] & 0xFF,
997 				    (msg->Data[0] >> 8) & 0xFF);
998 				break;
999 			}
1000 			break;
1001 		case 0x02:
1002 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
1003 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
1004 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1005 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
1006 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
1007 			break;
1008 		case 0x03:
1009 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
1010 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
1011 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1012 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
1013 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
1014 			break;
1015 		default:
1016 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
1017 			    "FC event (%02x %02x %02x)\n",
1018 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1019 			    (msg->Data[0] >> 16) & 0xff, /* Event */
1020 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
1021 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
1022 		}
1023 		break;
1024 
1025 	case MPI_EVENT_LOGOUT:
1026 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
1027 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1028 		break;
1029 	case MPI_EVENT_EVENT_CHANGE:
1030 		mpt_lprt(mpt, MPT_PRT_DEBUG,
1031 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
1032 		break;
1033 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1034 		/*
1035 		 * Devices are attachin'.....
1036 		 */
1037 		mpt_prt(mpt,
1038 		    "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
1039 		break;
1040 	default:
1041 		return (/*handled*/0);
1042 	}
1043 	return (/*handled*/1);
1044 }
1045 
1046 /*
1047  * Reply path for all SCSI I/O requests, called from our
1048  * interrupt handler by extracting our handler index from
1049  * the MsgContext field of the reply from the IOC.
1050  *
1051  * This routine is optimized for the common case of a
1052  * completion without error.  All exception handling is
1053  * offloaded to non-inlined helper routines to minimize
1054  * cache footprint.
1055  */
1056 static int
1057 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
1058 		       MSG_DEFAULT_REPLY *reply_frame)
1059 {
1060 	MSG_SCSI_IO_REQUEST *scsi_req;
1061 	union ccb *ccb;
1062 
1063 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
1064 	ccb = req->ccb;
1065 	if (ccb == NULL) {
1066 		mpt_prt(mpt, "Completion without CCB. Flags %#x, Func %#x\n",
1067 			req->state, scsi_req->Function);
1068 		mpt_print_scsi_io_request(scsi_req);
1069 		return (/*free_reply*/TRUE);
1070 	}
1071 
1072 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
1073 
1074 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1075 		bus_dmasync_op_t op;
1076 
1077 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1078 			op = BUS_DMASYNC_POSTREAD;
1079 		else
1080 			op = BUS_DMASYNC_POSTWRITE;
1081 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1082 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1083 	}
1084 
1085 	if (reply_frame == NULL) {
1086 		/*
1087 		 * Context only reply, completion
1088 		 * without error status.
1089 		 */
1090 		ccb->csio.resid = 0;
1091 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1092 		ccb->csio.scsi_status = SCSI_STATUS_OK;
1093 	} else {
1094 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
1095 	}
1096 
1097 	if (mpt->outofbeer) {
1098 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1099 		mpt->outofbeer = 0;
1100 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
1101 	}
1102 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1103 	MPTLOCK_2_CAMLOCK(mpt);
1104 	if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
1105 	 && scsi_req->CDB[0] == INQUIRY
1106 	 && (scsi_req->CDB[1] & SI_EVPD) == 0) {
1107 		struct scsi_inquiry_data *inq;
1108 
1109 		/*
1110 		 * Fake out the device type so that only the
1111 		 * pass-thru device will attach.
1112 		 */
1113 		inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1114 		inq->device &= ~0x1F;
1115 		inq->device |= T_NODEVICE;
1116 	}
1117 	xpt_done(ccb);
1118 	CAMLOCK_2_MPTLOCK(mpt);
1119 	if ((req->state & REQ_STATE_TIMEDOUT) == 0)
1120 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1121 	else
1122 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
1123 
1124 	if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
1125 		mpt_free_request(mpt, req);
1126 		return (/*free_reply*/TRUE);
1127 	}
1128 	req->state &= ~REQ_STATE_QUEUED;
1129 	req->state |= REQ_STATE_DONE;
1130 	wakeup(req);
1131 	return (/*free_reply*/TRUE);
1132 }
1133 
1134 static int
1135 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
1136 			   MSG_DEFAULT_REPLY *reply_frame)
1137 {
1138 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
1139 	uint16_t		  status;
1140 
1141 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
1142 
1143 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
1144 
1145 	/* Record status of TMF for any waiters. */
1146 	req->IOCStatus = tmf_reply->IOCStatus;
1147 	status = le16toh(tmf_reply->IOCStatus);
1148 	mpt_lprt(mpt,
1149 	    (status == MPI_IOCSTATUS_SUCCESS)? MPT_PRT_DEBUG : MPT_PRT_ERROR,
1150 	    "TMF Complete: req %p:%u status 0x%x\n", req, req->serno, status);
1151 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1152 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
1153 		req->state |= REQ_STATE_DONE;
1154 		wakeup(req);
1155 	} else
1156 		mpt->tmf_req->state = REQ_STATE_FREE;
1157 
1158 	return (/*free_reply*/TRUE);
1159 }
1160 
1161 /*
1162  * Clean up all SCSI Initiator personality state in response
1163  * to a controller reset.
1164  */
1165 static void
1166 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
1167 {
1168 	/*
1169 	 * The pending list is already run down by
1170 	 * the generic handler.  Perform the same
1171 	 * operation on the timed out request list.
1172 	 */
1173 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
1174 				   MPI_IOCSTATUS_INVALID_STATE);
1175 
1176 	/*
1177 	 * Inform the XPT that a bus reset has occurred.
1178 	 */
1179 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
1180 }
1181 
1182 /*
1183  * Parse additional completion information in the reply
1184  * frame for SCSI I/O requests.
1185  */
1186 static int
1187 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
1188 			     MSG_DEFAULT_REPLY *reply_frame)
1189 {
1190 	union ccb *ccb;
1191 	MSG_SCSI_IO_REPLY *scsi_io_reply;
1192 	u_int ioc_status;
1193 	u_int sstate;
1194 	u_int loginfo;
1195 
1196 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
1197 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
1198 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
1199 		("MPT SCSI I/O Handler called with incorrect reply type"));
1200 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
1201 		("MPT SCSI I/O Handler called with continuation reply"));
1202 
1203 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
1204 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
1205 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
1206 	ioc_status &= MPI_IOCSTATUS_MASK;
1207 	sstate = scsi_io_reply->SCSIState;
1208 
1209 	ccb = req->ccb;
1210 	ccb->csio.resid =
1211 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
1212 
1213 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
1214 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
1215 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1216 		ccb->csio.sense_resid =
1217 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
1218 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
1219 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
1220 	}
1221 
1222 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
1223 		/*
1224 		 * Tag messages rejected, but non-tagged retry
1225 		 * was successful.
1226 XXXX
1227 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
1228 		 */
1229 	}
1230 
1231 	switch(ioc_status) {
1232 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1233 		/*
1234 		 * XXX
1235 		 * Linux driver indicates that a zero
1236 		 * transfer length with this error code
1237 		 * indicates a CRC error.
1238 		 *
1239 		 * No need to swap the bytes for checking
1240 		 * against zero.
1241 		 */
1242 		if (scsi_io_reply->TransferCount == 0) {
1243 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
1244 			break;
1245 		}
1246 		/* FALLTHROUGH */
1247 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1248 	case MPI_IOCSTATUS_SUCCESS:
1249 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1250 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
1251 			/*
1252 			 * Status was never returned for this transaction.
1253 			 */
1254 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
1255 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
1256 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
1257 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
1258 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
1259 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
1260 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
1261 
1262 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
1263 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
1264 		} else
1265 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1266 		break;
1267 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
1268 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
1269 		break;
1270 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
1271 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
1272 		break;
1273 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1274 		/*
1275 		 * Since selection timeouts and "device really not
1276 		 * there" are grouped into this error code, report
1277 		 * selection timeout.  Selection timeouts are
1278 		 * typically retried before giving up on the device
1279 		 * whereas "device not there" errors are considered
1280 		 * unretryable.
1281 		 */
1282 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
1283 		break;
1284 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1285 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
1286 		break;
1287 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1288 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
1289 		break;
1290 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1291 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
1292 		break;
1293 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1294 		ccb->ccb_h.status = CAM_UA_TERMIO;
1295 		break;
1296 	case MPI_IOCSTATUS_INVALID_STATE:
1297 		/*
1298 		 * The IOC has been reset.  Emulate a bus reset.
1299 		 */
1300 		/* FALLTHROUGH */
1301 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1302 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1303 		break;
1304 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
1305 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1306 		/*
1307 		 * Don't clobber any timeout status that has
1308 		 * already been set for this transaction.  We
1309 		 * want the SCSI layer to be able to differentiate
1310 		 * between the command we aborted due to timeout
1311 		 * and any innocent bystanders.
1312 		 */
1313 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
1314 			break;
1315 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
1316 		break;
1317 
1318 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1319 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
1320 		break;
1321 	case MPI_IOCSTATUS_BUSY:
1322 		mpt_set_ccb_status(ccb, CAM_BUSY);
1323 		break;
1324 	case MPI_IOCSTATUS_INVALID_FUNCTION:
1325 	case MPI_IOCSTATUS_INVALID_SGL:
1326 	case MPI_IOCSTATUS_INTERNAL_ERROR:
1327 	case MPI_IOCSTATUS_INVALID_FIELD:
1328 	default:
1329 		/* XXX
1330 		 * Some of the above may need to kick
1331 		 * of a recovery action!!!!
1332 		 */
1333 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1334 		break;
1335 	}
1336 
1337 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1338 		mpt_freeze_ccb(ccb);
1339 
1340 	return (/*free_reply*/TRUE);
1341 }
1342 
1343 static void
1344 mpt_action(struct cam_sim *sim, union ccb *ccb)
1345 {
1346 	struct	mpt_softc *mpt;
1347 	struct	ccb_trans_settings *cts;
1348 	u_int	tgt;
1349 	int	raid_passthru;
1350 
1351 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
1352 
1353 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1354 	raid_passthru = (sim == mpt->phydisk_sim);
1355 
1356 	tgt = ccb->ccb_h.target_id;
1357 	if (raid_passthru
1358 	 && ccb->ccb_h.func_code != XPT_PATH_INQ
1359 	 && ccb->ccb_h.func_code != XPT_RESET_BUS) {
1360 		CAMLOCK_2_MPTLOCK(mpt);
1361 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1362 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1363 			MPTLOCK_2_CAMLOCK(mpt);
1364 			xpt_done(ccb);
1365 			return;
1366 		}
1367 		MPTLOCK_2_CAMLOCK(mpt);
1368 	}
1369 
1370 	ccb->ccb_h.ccb_mpt_ptr = mpt;
1371 
1372 	switch (ccb->ccb_h.func_code) {
1373 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1374 		/*
1375 		 * Do a couple of preliminary checks...
1376 		 */
1377 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1378 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1379 				ccb->ccb_h.status = CAM_REQ_INVALID;
1380 				xpt_done(ccb);
1381 				break;
1382 			}
1383 		}
1384 		/* Max supported CDB length is 16 bytes */
1385 		/* XXX Unless we implement the new 32byte message type */
1386 		if (ccb->csio.cdb_len >
1387 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
1388 			ccb->ccb_h.status = CAM_REQ_INVALID;
1389 			xpt_done(ccb);
1390 			return;
1391 		}
1392 		ccb->csio.scsi_status = SCSI_STATUS_OK;
1393 		mpt_start(sim, ccb);
1394 		break;
1395 
1396 	case XPT_RESET_BUS:
1397 		mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
1398 		if (!raid_passthru) {
1399 			CAMLOCK_2_MPTLOCK(mpt);
1400 			(void)mpt_bus_reset(mpt, /*sleep_ok*/FALSE);
1401 			MPTLOCK_2_CAMLOCK(mpt);
1402 		}
1403 		/*
1404 		 * mpt_bus_reset is always successful in that it
1405 		 * will fall back to a hard reset should a bus
1406 		 * reset attempt fail.
1407 		 */
1408 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1409 		xpt_done(ccb);
1410 		break;
1411 
1412 	case XPT_ABORT:
1413 		/*
1414 		 * XXX: Need to implement
1415 		 */
1416 		ccb->ccb_h.status = CAM_UA_ABORT;
1417 		xpt_done(ccb);
1418 		break;
1419 
1420 #ifdef	CAM_NEW_TRAN_CODE
1421 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
1422 #else
1423 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
1424 #endif
1425 #define	DP_DISC_ENABLE	0x1
1426 #define	DP_DISC_DISABL	0x2
1427 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
1428 
1429 #define	DP_TQING_ENABLE	0x4
1430 #define	DP_TQING_DISABL	0x8
1431 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
1432 
1433 #define	DP_WIDE		0x10
1434 #define	DP_NARROW	0x20
1435 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
1436 
1437 #define	DP_SYNC		0x40
1438 
1439 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1440 		cts = &ccb->cts;
1441 		if (!IS_CURRENT_SETTINGS(cts)) {
1442 			mpt_prt(mpt, "Attempt to set User settings\n");
1443 			ccb->ccb_h.status = CAM_REQ_INVALID;
1444 			xpt_done(ccb);
1445 			break;
1446 		}
1447 		if (mpt->is_fc == 0 && mpt->is_sas == 0) {
1448 			uint8_t dval = 0;
1449 			u_int period = 0, offset = 0;
1450 #ifndef	CAM_NEW_TRAN_CODE
1451 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1452 				dval |= DP_DISC_ENABLE;
1453 			}
1454 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1455 				dval |= DP_TQING_ENABLE;
1456 			}
1457 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1458 				if (cts->bus_width)
1459 					dval |= DP_WIDE;
1460 				else
1461 					dval |= DP_NARROW;
1462 			}
1463 			/*
1464 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
1465 			 * of nonzero will cause us to go to the
1466 			 * selected (from NVRAM) maximum value for
1467 			 * this device. At a later point, we'll
1468 			 * allow finer control.
1469 			 */
1470 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1471 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
1472 				dval |= DP_SYNC;
1473 				period = cts->sync_period;
1474 				offset = cts->sync_offset;
1475 			}
1476 #else
1477 			struct ccb_trans_settings_scsi *scsi =
1478 			    &cts->proto_specific.scsi;
1479 			struct ccb_trans_settings_spi *spi =
1480 			    &cts->xport_specific.spi;
1481 
1482 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
1483 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
1484 					dval |= DP_DISC_ENABLE;
1485 				else
1486 					dval |= DP_DISC_DISABL;
1487 			}
1488 
1489 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
1490 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
1491 					dval |= DP_TQING_ENABLE;
1492 				else
1493 					dval |= DP_TQING_DISABL;
1494 			}
1495 
1496 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
1497 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
1498 					dval |= DP_WIDE;
1499 				else
1500 					dval |= DP_NARROW;
1501 			}
1502 
1503 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
1504 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
1505 			    (spi->sync_period && spi->sync_offset)) {
1506 				dval |= DP_SYNC;
1507 				period = spi->sync_period;
1508 				offset = spi->sync_offset;
1509 			}
1510 #endif
1511 			CAMLOCK_2_MPTLOCK(mpt);
1512 			if (dval & DP_DISC_ENABLE) {
1513 				mpt->mpt_disc_enable |= (1 << tgt);
1514 			} else if (dval & DP_DISC_DISABL) {
1515 				mpt->mpt_disc_enable &= ~(1 << tgt);
1516 			}
1517 			if (dval & DP_TQING_ENABLE) {
1518 				mpt->mpt_tag_enable |= (1 << tgt);
1519 			} else if (dval & DP_TQING_DISABL) {
1520 				mpt->mpt_tag_enable &= ~(1 << tgt);
1521 			}
1522 			if (dval & DP_WIDTH) {
1523 				if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) {
1524 mpt_prt(mpt, "Set width Failed!\n");
1525 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1526 					MPTLOCK_2_CAMLOCK(mpt);
1527 					xpt_done(ccb);
1528 					break;
1529 				}
1530 			}
1531 			if (dval & DP_SYNC) {
1532 				if (mpt_setsync(mpt, tgt, period, offset)) {
1533 mpt_prt(mpt, "Set sync Failed!\n");
1534 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1535 					MPTLOCK_2_CAMLOCK(mpt);
1536 					xpt_done(ccb);
1537 					break;
1538 				}
1539 			}
1540 			MPTLOCK_2_CAMLOCK(mpt);
1541 			mpt_lprt(mpt, MPT_PRT_DEBUG,
1542 				 "SET tgt %d flags %x period %x off %x\n",
1543 				 tgt, dval, period, offset);
1544 		}
1545 		ccb->ccb_h.status = CAM_REQ_CMP;
1546 		xpt_done(ccb);
1547 		break;
1548 
1549 	case XPT_GET_TRAN_SETTINGS:
1550 		cts = &ccb->cts;
1551 		if (mpt->is_fc) {
1552 #ifndef	CAM_NEW_TRAN_CODE
1553 			/*
1554 			 * a lot of normal SCSI things don't make sense.
1555 			 */
1556 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1557 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1558 			/*
1559 			 * How do you measure the width of a high
1560 			 * speed serial bus? Well, in bytes.
1561 			 *
1562 			 * Offset and period make no sense, though, so we set
1563 			 * (above) a 'base' transfer speed to be gigabit.
1564 			 */
1565 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1566 #else
1567 			struct ccb_trans_settings_fc *fc =
1568 			    &cts->xport_specific.fc;
1569 
1570 			cts->protocol = PROTO_SCSI;
1571 			cts->protocol_version = SCSI_REV_2;
1572 			cts->transport = XPORT_FC;
1573 			cts->transport_version = 0;
1574 
1575 			fc->valid = CTS_FC_VALID_SPEED;
1576 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
1577 			/* XXX: need a port database for each target */
1578 #endif
1579 		} else if (mpt->is_sas) {
1580 #ifndef	CAM_NEW_TRAN_CODE
1581 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1582 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1583 			/*
1584 			 * How do you measure the width of a high
1585 			 * speed serial bus? Well, in bytes.
1586 			 *
1587 			 * Offset and period make no sense, though, so we set
1588 			 * (above) a 'base' transfer speed to be gigabit.
1589 			 */
1590 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1591 #else
1592 			struct ccb_trans_settings_sas *sas =
1593 			    &cts->xport_specific.sas;
1594 
1595 			cts->protocol = PROTO_SCSI;
1596 			cts->protocol_version = SCSI_REV_3;
1597 			cts->transport = XPORT_SAS;
1598 			cts->transport_version = 0;
1599 
1600 			sas->valid = CTS_SAS_VALID_SPEED;
1601 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
1602 #endif
1603 		} else {
1604 #ifdef	CAM_NEW_TRAN_CODE
1605 			struct ccb_trans_settings_scsi *scsi =
1606 			    &cts->proto_specific.scsi;
1607 			struct ccb_trans_settings_spi *spi =
1608 			    &cts->xport_specific.spi;
1609 #endif
1610 			uint8_t dval, pval, oval;
1611 			int rv;
1612 
1613 			/*
1614 			 * We aren't going off of Port PAGE2 params for
1615 			 * tagged queuing or disconnect capabilities
1616 			 * for current settings. For goal settings,
1617 			 * we assert all capabilities- we've had some
1618 			 * problems with reading NVRAM data.
1619 			 */
1620 			if (IS_CURRENT_SETTINGS(cts)) {
1621 				CONFIG_PAGE_SCSI_DEVICE_0 tmp;
1622 				dval = 0;
1623 
1624 				tmp = mpt->mpt_dev_page0[tgt];
1625 				CAMLOCK_2_MPTLOCK(mpt);
1626 				rv = mpt_read_cur_cfg_page(mpt, tgt,
1627 							   &tmp.Header,
1628 							   sizeof(tmp),
1629 							   /*sleep_ok*/FALSE,
1630 							   /*timeout_ms*/5000);
1631 				if (rv) {
1632 					mpt_prt(mpt,
1633 					    "cannot get target %d DP0\n", tgt);
1634 				}
1635 				mpt_lprt(mpt, MPT_PRT_DEBUG,
1636 					 "SPI Tgt %d Page 0: NParms %x "
1637 					 "Information %x\n", tgt,
1638 					 tmp.NegotiatedParameters,
1639 					 tmp.Information);
1640 				MPTLOCK_2_CAMLOCK(mpt);
1641 
1642 				if (tmp.NegotiatedParameters &
1643 				    MPI_SCSIDEVPAGE0_NP_WIDE)
1644 					dval |= DP_WIDE;
1645 
1646 				if (mpt->mpt_disc_enable & (1 << tgt)) {
1647 					dval |= DP_DISC_ENABLE;
1648 				}
1649 				if (mpt->mpt_tag_enable & (1 << tgt)) {
1650 					dval |= DP_TQING_ENABLE;
1651 				}
1652 				oval = (tmp.NegotiatedParameters >> 16) & 0xff;
1653 				pval = (tmp.NegotiatedParameters >>  8) & 0xff;
1654 			} else {
1655 				/*
1656 				 * XXX: Fix wrt NVRAM someday. Attempts
1657 				 * XXX: to read port page2 device data
1658 				 * XXX: just returns zero in these areas.
1659 				 */
1660 				dval = DP_WIDE|DP_DISC|DP_TQING;
1661 				oval = (mpt->mpt_port_page0.Capabilities >> 16);
1662 				pval = (mpt->mpt_port_page0.Capabilities >>  8);
1663 			}
1664 #ifndef	CAM_NEW_TRAN_CODE
1665 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1666 			if (dval & DP_DISC_ENABLE) {
1667 				cts->flags |= CCB_TRANS_DISC_ENB;
1668 			}
1669 			if (dval & DP_TQING_ENABLE) {
1670 				cts->flags |= CCB_TRANS_TAG_ENB;
1671 			}
1672 			if (dval & DP_WIDE) {
1673 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1674 			} else {
1675 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1676 			}
1677 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1678 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1679 			if (oval) {
1680 				cts->sync_period = pval;
1681 				cts->sync_offset = oval;
1682 				cts->valid |=
1683 				    CCB_TRANS_SYNC_RATE_VALID |
1684 				    CCB_TRANS_SYNC_OFFSET_VALID;
1685 			}
1686 #else
1687 			cts->protocol = PROTO_SCSI;
1688 			cts->protocol_version = SCSI_REV_2;
1689 			cts->transport = XPORT_SPI;
1690 			cts->transport_version = 2;
1691 
1692 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1693 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
1694 			if (dval & DP_DISC_ENABLE) {
1695 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
1696 			}
1697 			if (dval & DP_TQING_ENABLE) {
1698 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
1699 			}
1700 			if (oval && pval) {
1701 				spi->sync_offset = oval;
1702 				spi->sync_period = pval;
1703 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1704 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1705 			}
1706 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
1707 			if (dval & DP_WIDE) {
1708 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1709 			} else {
1710 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1711 			}
1712 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
1713 				scsi->valid = CTS_SCSI_VALID_TQ;
1714 				spi->valid |= CTS_SPI_VALID_DISC;
1715 			} else {
1716 				scsi->valid = 0;
1717 			}
1718 #endif
1719 			mpt_lprt(mpt, MPT_PRT_DEBUG,
1720 				 "GET %s tgt %d flags %x period %x offset %x\n",
1721 				 IS_CURRENT_SETTINGS(cts)
1722 			       ? "ACTIVE" : "NVRAM",
1723 				 tgt, dval, pval, oval);
1724 		}
1725 		ccb->ccb_h.status = CAM_REQ_CMP;
1726 		xpt_done(ccb);
1727 		break;
1728 
1729 	case XPT_CALC_GEOMETRY:
1730 	{
1731 		struct ccb_calc_geometry *ccg;
1732 
1733 		ccg = &ccb->ccg;
1734 		if (ccg->block_size == 0) {
1735 			ccb->ccb_h.status = CAM_REQ_INVALID;
1736 			xpt_done(ccb);
1737 			break;
1738 		}
1739 
1740 		mpt_calc_geometry(ccg, /*extended*/1);
1741 		xpt_done(ccb);
1742 		break;
1743 	}
1744 	case XPT_PATH_INQ:		/* Path routing inquiry */
1745 	{
1746 		struct ccb_pathinq *cpi = &ccb->cpi;
1747 
1748 		cpi->version_num = 1;
1749 		cpi->target_sprt = 0;
1750 		cpi->hba_eng_cnt = 0;
1751 		cpi->max_lun = 7;
1752 		cpi->bus_id = cam_sim_bus(sim);
1753 		/* XXX Report base speed more accurately for FC/SAS, etc.*/
1754 		if (raid_passthru) {
1755 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks;
1756 			cpi->hba_misc = PIM_NOBUSRESET;
1757 			cpi->initiator_id = cpi->max_target + 1;
1758 			cpi->hba_inquiry = PI_TAG_ABLE;
1759 			if (mpt->is_fc) {
1760 				cpi->base_transfer_speed = 100000;
1761 			} else if (mpt->is_sas) {
1762 				cpi->base_transfer_speed = 300000;
1763 			} else {
1764 				cpi->base_transfer_speed = 3300;
1765 				cpi->hba_inquiry |=
1766 				    PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1767 			}
1768 		} else if (mpt->is_fc) {
1769 /* XXX SHOULD BE BASED UPON IOC FACTS XXX */
1770 			cpi->max_target = 255;
1771 			cpi->hba_misc = PIM_NOBUSRESET;
1772 			cpi->initiator_id = cpi->max_target + 1;
1773 			cpi->base_transfer_speed = 100000;
1774 			cpi->hba_inquiry = PI_TAG_ABLE;
1775 		} else if (mpt->is_sas) {
1776 			cpi->max_target = 63;	/* XXX */
1777 			cpi->hba_misc = PIM_NOBUSRESET;
1778 			cpi->initiator_id = cpi->max_target;
1779 			cpi->base_transfer_speed = 300000;
1780 			cpi->hba_inquiry = PI_TAG_ABLE;
1781 		} else {
1782 			cpi->initiator_id = mpt->mpt_ini_id;
1783 			cpi->base_transfer_speed = 3300;
1784 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1785 			cpi->hba_misc = 0;
1786 			cpi->max_target = 15;
1787 		}
1788 
1789 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1790 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
1791 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1792 		cpi->unit_number = cam_sim_unit(sim);
1793 		cpi->ccb_h.status = CAM_REQ_CMP;
1794 		xpt_done(ccb);
1795 		break;
1796 	}
1797 	default:
1798 		ccb->ccb_h.status = CAM_REQ_INVALID;
1799 		xpt_done(ccb);
1800 		break;
1801 	}
1802 }
1803 
1804 static int
1805 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
1806 {
1807 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
1808 	int rv;
1809 
1810 	tmp = mpt->mpt_dev_page1[tgt];
1811 	if (onoff) {
1812 		tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1813 	} else {
1814 		tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1815 	}
1816 	rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
1817 				    /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1818 	if (rv) {
1819 		mpt_prt(mpt, "mpt_setwidth: write cur page failed\n");
1820 		return (-1);
1821 	}
1822 	rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
1823 				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1824 	if (rv) {
1825 		mpt_prt(mpt, "mpt_setwidth: read cur page failed\n");
1826 		return (-1);
1827 	}
1828 	mpt->mpt_dev_page1[tgt] = tmp;
1829 	mpt_lprt(mpt, MPT_PRT_DEBUG,
1830 		 "SPI Target %d Page 1: RequestedParameters %x Config %x\n",
1831 		 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1832 		 mpt->mpt_dev_page1[tgt].Configuration);
1833 	return (0);
1834 }
1835 
1836 static int
1837 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
1838 {
1839 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
1840 	int rv;
1841 
1842 	tmp = mpt->mpt_dev_page1[tgt];
1843 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
1844 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
1845 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
1846 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
1847 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
1848 	/*
1849 	 * XXX: For now, we're ignoring specific settings
1850 	 */
1851 	if (period && offset) {
1852 		int factor, offset, np;
1853 		factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1854 		offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1855 		np = 0;
1856 		if (factor < 0x9) {
1857 			np |= MPI_SCSIDEVPAGE1_RP_QAS;
1858 			np |= MPI_SCSIDEVPAGE1_RP_IU;
1859 		}
1860 		if (factor < 0xa) {
1861 			np |= MPI_SCSIDEVPAGE1_RP_DT;
1862 		}
1863 		np |= (factor << 8) | (offset << 16);
1864 		tmp.RequestedParameters |= np;
1865 	}
1866 	rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
1867 				    /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1868 	if (rv) {
1869 		mpt_prt(mpt, "mpt_setsync: write cur page failed\n");
1870 		return (-1);
1871 	}
1872 	rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
1873 				   /*sleep_ok*/FALSE, /*timeout_ms*/500);
1874 	if (rv) {
1875 		mpt_prt(mpt, "mpt_setsync: read cur page failed\n");
1876 		return (-1);
1877 	}
1878 	mpt->mpt_dev_page1[tgt] = tmp;
1879 	mpt_lprt(mpt, MPT_PRT_DEBUG,
1880 		 "SPI Target %d Page 1: RParams %x Config %x\n",
1881 		 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1882 		 mpt->mpt_dev_page1[tgt].Configuration);
1883 	return (0);
1884 }
1885 
1886 static void
1887 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
1888 {
1889 #if __FreeBSD_version >= 500000
1890 	cam_calc_geometry(ccg, extended);
1891 #else
1892 	uint32_t size_mb;
1893 	uint32_t secs_per_cylinder;
1894 
1895 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
1896 	if (size_mb > 1024 && extended) {
1897 		ccg->heads = 255;
1898 		ccg->secs_per_track = 63;
1899 	} else {
1900 		ccg->heads = 64;
1901 		ccg->secs_per_track = 32;
1902 	}
1903 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1904 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1905 	ccg->ccb_h.status = CAM_REQ_CMP;
1906 #endif
1907 }
1908 
1909 /****************************** Timeout Recovery ******************************/
1910 static int
1911 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
1912 {
1913 	int error;
1914 
1915 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
1916 	    &mpt->recovery_thread, /*flags*/0,
1917 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
1918 	return (error);
1919 }
1920 
1921 /*
1922  * Lock is not held on entry.
1923  */
1924 static void
1925 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
1926 {
1927 
1928 	MPT_LOCK(mpt);
1929 	if (mpt->recovery_thread == NULL) {
1930 		MPT_UNLOCK(mpt);
1931 		return;
1932 	}
1933 	mpt->shutdwn_recovery = 1;
1934 	wakeup(mpt);
1935 	/*
1936 	 * Sleep on a slightly different location
1937 	 * for this interlock just for added safety.
1938 	 */
1939 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
1940 	MPT_UNLOCK(mpt);
1941 }
1942 
1943 static void
1944 mpt_recovery_thread(void *arg)
1945 {
1946 	struct mpt_softc *mpt;
1947 
1948 #if __FreeBSD_version >= 500000
1949 	mtx_lock(&Giant);
1950 #endif
1951 	mpt = (struct mpt_softc *)arg;
1952 	MPT_LOCK(mpt);
1953 	for (;;) {
1954 
1955 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0
1956 		 && mpt->shutdwn_recovery == 0)
1957 			mpt_sleep(mpt, mpt, PUSER, "idle", 0);
1958 
1959 		if (mpt->shutdwn_recovery != 0)
1960 			break;
1961 
1962 		MPT_UNLOCK(mpt);
1963 		mpt_recover_commands(mpt);
1964 		MPT_LOCK(mpt);
1965 	}
1966 	mpt->recovery_thread = NULL;
1967 	wakeup(&mpt->recovery_thread);
1968 	MPT_UNLOCK(mpt);
1969 #if __FreeBSD_version >= 500000
1970 	mtx_unlock(&Giant);
1971 #endif
1972 	kthread_exit(0);
1973 }
1974 
1975 static int
1976 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type,
1977 		  u_int flags, u_int channel, u_int target, u_int lun,
1978 		  u_int abort_ctx, int sleep_ok)
1979 {
1980 	MSG_SCSI_TASK_MGMT *tmf_req;
1981 	int		    error;
1982 
1983 	/*
1984 	 * Wait for any current TMF request to complete.
1985 	 * We're only allowed to issue one TMF at a time.
1986 	 */
1987 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_MASK,
1988 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
1989 	if (error != 0) {
1990 		mpt_reset(mpt, /*reinit*/TRUE);
1991 		return (ETIMEDOUT);
1992 	}
1993 
1994 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
1995 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
1996 
1997 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
1998 	bzero(tmf_req, sizeof(*tmf_req));
1999 	tmf_req->TargetID = target;
2000 	tmf_req->Bus = channel;
2001 	tmf_req->ChainOffset = 0;
2002 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
2003 	tmf_req->Reserved = 0;
2004 	tmf_req->TaskType = type;
2005 	tmf_req->Reserved1 = 0;
2006 	tmf_req->MsgFlags = flags;
2007 	tmf_req->MsgContext =
2008 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
2009 	bzero(&tmf_req->LUN, sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
2010 	tmf_req->LUN[1] = lun;
2011 	tmf_req->TaskMsgContext = abort_ctx;
2012 
2013 	mpt_lprt(mpt, MPT_PRT_INFO,
2014 		 "Issuing TMF %p with MsgContext of 0x%x\n", tmf_req,
2015 		 tmf_req->MsgContext);
2016 	if (mpt->verbose > MPT_PRT_DEBUG)
2017 		mpt_print_request(tmf_req);
2018 
2019 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
2020 	if (error != 0)
2021 		mpt_reset(mpt, /*reinit*/TRUE);
2022 	return (error);
2023 }
2024 
2025 /*
2026  * When a command times out, it is placed on the requeust_timeout_list
2027  * and we wake our recovery thread.  The MPT-Fusion architecture supports
2028  * only a single TMF operation at a time, so we serially abort/bdr, etc,
2029  * the timedout transactions.  The next TMF is issued either by the
2030  * completion handler of the current TMF waking our recovery thread,
2031  * or the TMF timeout handler causing a hard reset sequence.
2032  */
2033 static void
2034 mpt_recover_commands(struct mpt_softc *mpt)
2035 {
2036 	request_t	   *req;
2037 	union ccb	   *ccb;
2038 	int		    error;
2039 
2040 	MPT_LOCK(mpt);
2041 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
2042 		/*
2043 		 * No work to do- leave.
2044 		 */
2045 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
2046                 MPT_UNLOCK(mpt);
2047 		return;
2048 	}
2049 
2050 	/*
2051 	 * Flush any commands whose completion coincides with their timeout.
2052 	 */
2053 	mpt_intr(mpt);
2054 
2055 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
2056 		/*
2057 		 * The timedout commands have already
2058 		 * completed.  This typically means
2059 		 * that either the timeout value was on
2060 		 * the hairy edge of what the device
2061 		 * requires or - more likely - interrupts
2062 		 * are not happening.
2063 		 */
2064 		mpt_prt(mpt, "Timedout requests already complete. "
2065                        "Interrupts may not be functioning.\n");
2066                 mpt_enable_ints(mpt);
2067                 MPT_UNLOCK(mpt);
2068                 return;
2069 	}
2070 
2071 	/*
2072 	 * We have no visibility into the current state of the
2073 	 * controller, so attempt to abort the commands in the
2074 	 * order they timed-out.
2075 	 */
2076 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
2077 		u_int status;
2078 		u_int32_t serno = req->serno;
2079 
2080 		mpt_prt(mpt, "Attempting to Abort Req %p:%u\n", req, serno);
2081 		ccb = req->ccb;
2082 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
2083 		error = mpt_scsi_send_tmf(mpt,
2084 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2085 		    /*MsgFlags*/0, mpt->bus, ccb->ccb_h.target_id,
2086 		    ccb->ccb_h.target_lun,
2087 		    htole32(req->index | scsi_io_handler_id), /*sleep_ok*/TRUE);
2088 
2089 		if (error != 0) {
2090 			mpt_prt(mpt, "Abort Req %p:%u failed to start TMF\n",
2091 			    req, serno);
2092 			/*
2093 			 * mpt_scsi_send_tmf hard resets on failure, so no
2094 			 * need to do so here.  Our queue should be emptied
2095 			 * by the hard reset.
2096 			 */
2097 			continue;
2098 		}
2099 
2100 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2101 		    REQ_STATE_DONE, /*sleep_ok*/TRUE, /*time_ms*/500);
2102 
2103 		status = mpt->tmf_req->IOCStatus;
2104 		if (error != 0) {
2105 
2106 			/*
2107 			 * If we've errored out and the transaction is still
2108 			 * pending, reset the controller.
2109 			 */
2110 			mpt_prt(mpt, "Abort Req %p:%d timed-out. "
2111 			    "Resetting controller\n", req, serno);
2112 			mpt_reset(mpt, /*reinit*/TRUE);
2113 			continue;
2114 		}
2115 
2116 		/*
2117 		 * TMF is complete.
2118 		 */
2119 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2120 		mpt->tmf_req->state = REQ_STATE_FREE;
2121 		if ((status & MPI_IOCSTATUS_MASK) == MPI_SCSI_STATUS_SUCCESS)
2122 			continue;
2123 
2124 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2125 			 "Abort Req %p: %u Failed "
2126 			 "with status 0x%x\n.  Resetting bus.",
2127 			 req, serno, status);
2128 
2129 		/*
2130 		 * If the abort attempt fails for any reason, reset the bus.
2131 		 * We should find all of the timed-out commands on our
2132 		 * list are in the done state after this completes.
2133 		 */
2134 		mpt_bus_reset(mpt, /*sleep_ok*/TRUE);
2135 	}
2136 
2137 	MPT_UNLOCK(mpt);
2138 }
2139