xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 262e143bd46171a6415a5b28af260a5efa2a3db8)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Additional Copyright (c) 2002 by Matthew Jacob under same license.
29  */
30 /*-
31  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
32  * Copyright (c) 2005, WHEEL Sp. z o.o.
33  * Copyright (c) 2004, 2005 Justin T. Gibbs
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions are
38  * met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
42  *    substantially similar to the "NO WARRANTY" disclaimer below
43  *    ("Disclaimer") and any redistribution must be conditioned upon including
44  *    a substantially similar Disclaimer requirement for further binary
45  *    redistribution.
46  * 3. Neither the names of the above listed copyright holders nor the names
47  *    of any contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
51  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
54  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
60  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  */
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
64 
65 #include <dev/mpt/mpt.h>
66 #include <dev/mpt/mpt_cam.h>
67 #include <dev/mpt/mpt_raid.h>
68 
69 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
70 #include "dev/mpt/mpilib/mpi_init.h"
71 #include "dev/mpt/mpilib/mpi_targ.h"
72 
73 #include <sys/callout.h>
74 #include <sys/kthread.h>
75 
76 static void mpt_poll(struct cam_sim *);
77 static timeout_t mpt_timeout;
78 static void mpt_action(struct cam_sim *, union ccb *);
79 static int mpt_setwidth(struct mpt_softc *, int, int);
80 static int mpt_setsync(struct mpt_softc *, int, int, int);
81 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
82 static mpt_reply_handler_t mpt_scsi_reply_handler;
83 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
84 static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
85 					MSG_DEFAULT_REPLY *reply_frame);
86 static int mpt_bus_reset(struct mpt_softc *, int /*sleep_ok*/);
87 
88 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
89 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
90 static void mpt_recovery_thread(void *arg);
91 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int /*type*/,
92 			     u_int /*flags*/, u_int /*channel*/,
93 			     u_int /*target*/, u_int /*lun*/,
94 			     u_int /*abort_ctx*/, int /*sleep_ok*/);
95 static void mpt_recover_commands(struct mpt_softc *mpt);
96 
97 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
98 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
99 
100 static mpt_probe_handler_t	mpt_cam_probe;
101 static mpt_attach_handler_t	mpt_cam_attach;
102 static mpt_event_handler_t	mpt_cam_event;
103 static mpt_reset_handler_t	mpt_cam_ioc_reset;
104 static mpt_detach_handler_t	mpt_cam_detach;
105 
106 static struct mpt_personality mpt_cam_personality =
107 {
108 	.name		= "mpt_cam",
109 	.probe		= mpt_cam_probe,
110 	.attach		= mpt_cam_attach,
111 	.event		= mpt_cam_event,
112 	.reset		= mpt_cam_ioc_reset,
113 	.detach		= mpt_cam_detach,
114 };
115 
116 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
117 
118 int
119 mpt_cam_probe(struct mpt_softc *mpt)
120 {
121 	/*
122 	 * Only attach to nodes that support the initiator
123 	 * role or have RAID physical devices that need
124 	 * CAM pass-thru support.
125 	 */
126 	if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
127 	 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0))
128 		return (0);
129 	return (ENODEV);
130 }
131 
132 int
133 mpt_cam_attach(struct mpt_softc *mpt)
134 {
135 	struct cam_devq *devq;
136 	mpt_handler_t	 handler;
137 	int		 maxq;
138 	int		 error;
139 
140 	MPTLOCK_2_CAMLOCK(mpt);
141 	TAILQ_INIT(&mpt->request_timeout_list);
142 	mpt->bus = 0;
143 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
144 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
145 
146 	handler.reply_handler = mpt_scsi_reply_handler;
147 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
148 				     &scsi_io_handler_id);
149 	if (error != 0)
150 		goto cleanup;
151 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
152 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
153 				     &scsi_tmf_handler_id);
154 	if (error != 0)
155 		goto cleanup;
156 
157 	/*
158 	 * We keep one request reserved for timeout TMF requests.
159 	 */
160 	mpt->tmf_req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
161 	if (mpt->tmf_req == NULL) {
162 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
163 		error = ENOMEM;
164 		goto cleanup;
165 	}
166 
167 	/*
168 	 * Mark the request as free even though not on the free list.
169 	 * There is only one TMF request allowed to be outstanding at
170 	 * a time and the TMF routines perform their own allocation
171 	 * tracking using the standard state flags.
172 	 */
173 	mpt->tmf_req->state = REQ_STATE_FREE;
174 	maxq--;
175 
176 	if (mpt_spawn_recovery_thread(mpt) != 0) {
177 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
178 		error = ENOMEM;
179 		goto cleanup;
180 	}
181 
182 	/*
183 	 * Create the device queue for our SIM(s).
184 	 */
185 	devq = cam_simq_alloc(maxq);
186 	if (devq == NULL) {
187 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
188 		error = ENOMEM;
189 		goto cleanup;
190 	}
191 
192 	/*
193 	 * Construct our SIM entry.
194 	 */
195 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
196 	    mpt->unit, 1, maxq, devq);
197 	if (mpt->sim == NULL) {
198 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
199 		cam_simq_free(devq);
200 		error = ENOMEM;
201 		goto cleanup;
202 	}
203 
204 	/*
205 	 * Register exactly the bus.
206 	 */
207 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
208 		mpt_prt(mpt, "Bus registration Failed!\n");
209 		error = ENOMEM;
210 		goto cleanup;
211 	}
212 
213 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
214 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
215 		mpt_prt(mpt, "Unable to allocate Path!\n");
216 		error = ENOMEM;
217 		goto cleanup;
218 	}
219 
220 	/*
221 	 * Only register a second bus for RAID physical
222 	 * devices if the controller supports RAID.
223 	 */
224 	if (mpt->ioc_page2 == NULL
225 	 || mpt->ioc_page2->MaxPhysDisks == 0)
226 		return (0);
227 
228 	/*
229 	 * Create a "bus" to export all hidden disks to CAM.
230 	 */
231 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
232 	    mpt->unit, 1, maxq, devq);
233 	if (mpt->phydisk_sim == NULL) {
234 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
235 		error = ENOMEM;
236 		goto cleanup;
237 	}
238 
239 	/*
240 	 * Register exactly the bus.
241 	 */
242 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
243 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
244 		error = ENOMEM;
245 		goto cleanup;
246 	}
247 
248 	if (xpt_create_path(&mpt->phydisk_path, NULL,
249 	    cam_sim_path(mpt->phydisk_sim),
250 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
251 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
252 		error = ENOMEM;
253 		goto cleanup;
254 	}
255 
256 	CAMLOCK_2_MPTLOCK(mpt);
257 	return (0);
258 cleanup:
259 	CAMLOCK_2_MPTLOCK(mpt);
260 	mpt_cam_detach(mpt);
261 	return (error);
262 }
263 
264 void
265 mpt_cam_detach(struct mpt_softc *mpt)
266 {
267 	mpt_handler_t handler;
268 
269 	mpt_terminate_recovery_thread(mpt);
270 
271 	handler.reply_handler = mpt_scsi_reply_handler;
272 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
273 			       scsi_io_handler_id);
274 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
275 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
276 			       scsi_tmf_handler_id);
277 
278 	if (mpt->tmf_req != NULL) {
279 		mpt_free_request(mpt, mpt->tmf_req);
280 		mpt->tmf_req = NULL;
281 	}
282 
283 	if (mpt->sim != NULL) {
284 		xpt_free_path(mpt->path);
285 		xpt_bus_deregister(cam_sim_path(mpt->sim));
286 		cam_sim_free(mpt->sim, TRUE);
287 		mpt->sim = NULL;
288 	}
289 
290 	if (mpt->phydisk_sim != NULL) {
291 		xpt_free_path(mpt->phydisk_path);
292 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
293 		cam_sim_free(mpt->phydisk_sim, TRUE);
294 		mpt->phydisk_sim = NULL;
295 	}
296 }
297 
298 /* This routine is used after a system crash to dump core onto the
299  * swap device.
300  */
301 static void
302 mpt_poll(struct cam_sim *sim)
303 {
304 	struct mpt_softc *mpt;
305 
306 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
307 	MPT_LOCK(mpt);
308 	mpt_intr(mpt);
309 	MPT_UNLOCK(mpt);
310 }
311 
312 /*
313  * Watchdog timeout routine for SCSI requests.
314  */
315 static void
316 mpt_timeout(void *arg)
317 {
318 	union ccb	 *ccb;
319 	struct mpt_softc *mpt;
320 	request_t	 *req;
321 
322 	ccb = (union ccb *)arg;
323 #ifdef NOTYET
324 	mpt = mpt_find_softc(mpt);
325 	if (mpt == NULL)
326 		return;
327 #else
328 	mpt = ccb->ccb_h.ccb_mpt_ptr;
329 #endif
330 
331 	MPT_LOCK(mpt);
332 	req = ccb->ccb_h.ccb_req_ptr;
333 	mpt_prt(mpt, "Request %p Timed out.\n", req);
334 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
335 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
336 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
337 		req->state |= REQ_STATE_TIMEDOUT;
338 		mpt_wakeup_recovery_thread(mpt);
339 	}
340 	MPT_UNLOCK(mpt);
341 }
342 
343 /*
344  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
345  *
346  * Takes a list of physical segments and builds the SGL for SCSI IO command
347  * and forwards the commard to the IOC after one last check that CAM has not
348  * aborted the transaction.
349  */
350 static void
351 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
352 {
353 	request_t *req;
354 	union ccb *ccb;
355 	struct mpt_softc *mpt;
356 	MSG_SCSI_IO_REQUEST *mpt_req;
357 	SGE_SIMPLE32 *se;
358 
359 	req = (request_t *)arg;
360 	ccb = req->ccb;
361 
362 	mpt = ccb->ccb_h.ccb_mpt_ptr;
363 	req = ccb->ccb_h.ccb_req_ptr;
364 	mpt_req = req->req_vbuf;
365 
366 	if (error == 0 && nseg > MPT_SGL_MAX) {
367 		error = EFBIG;
368 	}
369 
370 	if (error != 0) {
371 		if (error != EFBIG)
372 			mpt_prt(mpt, "bus_dmamap_load returned %d\n", error);
373 		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
374 			xpt_freeze_devq(ccb->ccb_h.path, 1);
375 			ccb->ccb_h.status = CAM_DEV_QFRZN;
376 			if (error == EFBIG)
377 				ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
378 			else
379 				ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
380 		}
381 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
382 		xpt_done(ccb);
383 		CAMLOCK_2_MPTLOCK(mpt);
384 		mpt_free_request(mpt, req);
385 		MPTLOCK_2_CAMLOCK(mpt);
386 		return;
387 	}
388 
389 	if (nseg > MPT_NSGL_FIRST(mpt)) {
390 		int i, nleft = nseg;
391 		uint32_t flags;
392 		bus_dmasync_op_t op;
393 		SGE_CHAIN32 *ce;
394 
395 		mpt_req->DataLength = ccb->csio.dxfer_len;
396 		flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
397 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
398 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
399 
400 		se = (SGE_SIMPLE32 *) &mpt_req->SGL;
401 		for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) {
402 			uint32_t tf;
403 
404 			bzero(se, sizeof (*se));
405 			se->Address = dm_segs->ds_addr;
406 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
407 			tf = flags;
408 			if (i == MPT_NSGL_FIRST(mpt) - 2) {
409 				tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
410 			}
411 			MPI_pSGE_SET_FLAGS(se, tf);
412 			nleft -= 1;
413 		}
414 
415 		/*
416 		 * Tell the IOC where to find the first chain element
417 		 */
418 		mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;
419 
420 		/*
421 		 * Until we're finished with all segments...
422 		 */
423 		while (nleft) {
424 			int ntodo;
425 			/*
426 			 * Construct the chain element that point to the
427 			 * next segment.
428 			 */
429 			ce = (SGE_CHAIN32 *) se++;
430 			if (nleft > MPT_NSGL(mpt)) {
431 				ntodo = MPT_NSGL(mpt) - 1;
432 				ce->NextChainOffset = (MPT_RQSL(mpt) -
433 				    sizeof (SGE_SIMPLE32)) >> 2;
434 				ce->Length = MPT_NSGL(mpt) *
435 				    sizeof (SGE_SIMPLE32);
436 			} else {
437 				ntodo = nleft;
438 				ce->NextChainOffset = 0;
439 				ce->Length = ntodo * sizeof (SGE_SIMPLE32);
440 			}
441 			ce->Address = req->req_pbuf +
442 			    ((char *)se - (char *)mpt_req);
443 			ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
444 			for (i = 0; i < ntodo; i++, se++, dm_segs++) {
445 				uint32_t tf;
446 
447 				bzero(se, sizeof (*se));
448 				se->Address = dm_segs->ds_addr;
449 				MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
450 				tf = flags;
451 				if (i == ntodo - 1) {
452 					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
453 					if (ce->NextChainOffset == 0) {
454 						tf |=
455 						    MPI_SGE_FLAGS_END_OF_LIST |
456 						    MPI_SGE_FLAGS_END_OF_BUFFER;
457 					}
458 				}
459 				MPI_pSGE_SET_FLAGS(se, tf);
460 				nleft -= 1;
461 			}
462 
463 		}
464 
465 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
466 			op = BUS_DMASYNC_PREREAD;
467 		else
468 			op = BUS_DMASYNC_PREWRITE;
469 		if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
470 			bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
471 		}
472 	} else if (nseg > 0) {
473 		int i;
474 		uint32_t flags;
475 		bus_dmasync_op_t op;
476 
477 		mpt_req->DataLength = ccb->csio.dxfer_len;
478 		flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
479 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
480 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
481 
482 		/* Copy the segments into our SG list */
483 		se = (SGE_SIMPLE32 *) &mpt_req->SGL;
484 		for (i = 0; i < nseg; i++, se++, dm_segs++) {
485 			uint32_t tf;
486 
487 			bzero(se, sizeof (*se));
488 			se->Address = dm_segs->ds_addr;
489 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
490 			tf = flags;
491 			if (i == nseg - 1) {
492 				tf |=
493 				    MPI_SGE_FLAGS_LAST_ELEMENT |
494 				    MPI_SGE_FLAGS_END_OF_BUFFER |
495 				    MPI_SGE_FLAGS_END_OF_LIST;
496 			}
497 			MPI_pSGE_SET_FLAGS(se, tf);
498 		}
499 
500 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
501 			op = BUS_DMASYNC_PREREAD;
502 		else
503 			op = BUS_DMASYNC_PREWRITE;
504 		if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
505 			bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
506 		}
507 	} else {
508 		se = (SGE_SIMPLE32 *) &mpt_req->SGL;
509 		/*
510 		 * No data to transfer so we just make a single simple SGL
511 		 * with zero length.
512 		 */
513 		MPI_pSGE_SET_FLAGS(se,
514 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
515 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
516 	}
517 
518 	/*
519 	 * Last time we need to check if this CCB needs to be aborted.
520 	 */
521 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
522 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
523 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
524 		CAMLOCK_2_MPTLOCK(mpt);
525 		mpt_free_request(mpt, req);
526 		MPTLOCK_2_CAMLOCK(mpt);
527 		xpt_done(ccb);
528 		return;
529 	}
530 
531 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
532 	CAMLOCK_2_MPTLOCK(mpt);
533 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
534 		ccb->ccb_h.timeout_ch =
535 			timeout(mpt_timeout, (caddr_t)ccb,
536 				(ccb->ccb_h.timeout * hz) / 1000);
537 	} else {
538 		callout_handle_init(&ccb->ccb_h.timeout_ch);
539 	}
540 	if (mpt->verbose >= MPT_PRT_DEBUG)
541 		mpt_print_scsi_io_request(mpt_req);
542 	mpt_send_cmd(mpt, req);
543 	MPTLOCK_2_CAMLOCK(mpt);
544 }
545 
546 static void
547 mpt_start(struct cam_sim *sim, union ccb *ccb)
548 {
549 	request_t *req;
550 	struct mpt_softc *mpt;
551 	MSG_SCSI_IO_REQUEST *mpt_req;
552 	struct ccb_scsiio *csio = &ccb->csio;
553 	struct ccb_hdr *ccbh = &ccb->ccb_h;
554 	int raid_passthru;
555 
556 	/* Get the pointer for the physical addapter */
557 	mpt = ccb->ccb_h.ccb_mpt_ptr;
558 	raid_passthru = (sim == mpt->phydisk_sim);
559 
560 	CAMLOCK_2_MPTLOCK(mpt);
561 	/* Get a request structure off the free list */
562 	if ((req = mpt_get_request(mpt, /*sleep_ok*/FALSE)) == NULL) {
563 		if (mpt->outofbeer == 0) {
564 			mpt->outofbeer = 1;
565 			xpt_freeze_simq(mpt->sim, 1);
566 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
567 		}
568 		MPTLOCK_2_CAMLOCK(mpt);
569 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
570 		xpt_done(ccb);
571 		return;
572 	}
573 
574 	MPTLOCK_2_CAMLOCK(mpt);
575 
576 #if 0
577 	COWWWWW
578 	if (raid_passthru) {
579 		status = mpt_raid_quiesce_disk(mpt, mpt->raid_disks + ccb->ccb_h.target_id,
580 		     request_t *req)
581 #endif
582 
583 	/*
584 	 * Link the ccb and the request structure so we can find
585 	 * the other knowing either the request or the ccb
586 	 */
587 	req->ccb = ccb;
588 	ccb->ccb_h.ccb_req_ptr = req;
589 
590 	/* Now we build the command for the IOC */
591 	mpt_req = req->req_vbuf;
592 	bzero(mpt_req, sizeof *mpt_req);
593 
594 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
595 	if (raid_passthru)
596 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
597 
598 	mpt_req->Bus = mpt->bus;
599 
600 	mpt_req->SenseBufferLength =
601 		(csio->sense_len < MPT_SENSE_SIZE) ?
602 		 csio->sense_len : MPT_SENSE_SIZE;
603 
604 	/*
605 	 * We use the message context to find the request structure when we
606 	 * Get the command completion interrupt from the IOC.
607 	 */
608 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
609 
610 	/* Which physical device to do the I/O on */
611 	mpt_req->TargetID = ccb->ccb_h.target_id;
612 	/*
613 	 * XXX Assumes Single level, Single byte, CAM LUN type.
614 	 */
615 	mpt_req->LUN[1] = ccb->ccb_h.target_lun;
616 
617 	/* Set the direction of the transfer */
618 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
619 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
620 	else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
621 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
622 	else
623 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
624 
625 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
626 		switch(ccb->csio.tag_action) {
627 		case MSG_HEAD_OF_Q_TAG:
628 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
629 			break;
630 		case MSG_ACA_TASK:
631 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
632 			break;
633 		case MSG_ORDERED_Q_TAG:
634 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
635 			break;
636 		case MSG_SIMPLE_Q_TAG:
637 		default:
638 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
639 			break;
640 		}
641 	} else {
642 		if (mpt->is_fc)
643 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
644 		else
645 			/* XXX No such thing for a target doing packetized. */
646 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
647 	}
648 
649 	if (mpt->is_fc == 0) {
650 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
651 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
652 		}
653 	}
654 
655 	/* Copy the scsi command block into place */
656 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0)
657 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
658 	else
659 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
660 
661 	mpt_req->CDBLength = csio->cdb_len;
662 	mpt_req->DataLength = csio->dxfer_len;
663 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
664 
665 	/*
666 	 * If we have any data to send with this command,
667 	 * map it into bus space.
668 	 */
669 
670 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
671 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
672 			/*
673 			 * We've been given a pointer to a single buffer.
674 			 */
675 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
676 				/*
677 				 * Virtual address that needs to translated into
678 				 * one or more physical address ranges.
679 				 */
680 				int error;
681 
682 				error = bus_dmamap_load(mpt->buffer_dmat,
683 				    req->dmap, csio->data_ptr, csio->dxfer_len,
684 				    mpt_execute_req, req, 0);
685 				if (error == EINPROGRESS) {
686 					/*
687 					 * So as to maintain ordering,
688 					 * freeze the controller queue
689 					 * until our mapping is
690 					 * returned.
691 					 */
692 					xpt_freeze_simq(mpt->sim, 1);
693 					ccbh->status |= CAM_RELEASE_SIMQ;
694 				}
695 			} else {
696 				/*
697 				 * We have been given a pointer to single
698 				 * physical buffer.
699 				 */
700 				struct bus_dma_segment seg;
701 				seg.ds_addr =
702 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
703 				seg.ds_len = csio->dxfer_len;
704 				mpt_execute_req(req, &seg, 1, 0);
705 			}
706 		} else {
707 			/*
708 			 * We have been given a list of addresses.
709 			 * This case could be easily supported but they are not
710 			 * currently generated by the CAM subsystem so there
711 			 * is no point in wasting the time right now.
712 			 */
713 			struct bus_dma_segment *segs;
714 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
715 				mpt_execute_req(req, NULL, 0, EFAULT);
716 			} else {
717 				/* Just use the segments provided */
718 				segs = (struct bus_dma_segment *)csio->data_ptr;
719 				mpt_execute_req(req, segs, csio->sglist_cnt,
720 				    (csio->sglist_cnt < MPT_SGL_MAX)?
721 				    0 : EFBIG);
722 			}
723 		}
724 	} else {
725 		mpt_execute_req(req, NULL, 0, 0);
726 	}
727 }
728 
729 static int
730 mpt_bus_reset(struct mpt_softc *mpt, int sleep_ok)
731 {
732 	int   error;
733 	u_int status;
734 
735 	error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
736 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
737 	    /*bus*/0, /*target_id*/0, /*target_lun*/0, /*abort_ctx*/0,
738 	    sleep_ok);
739 
740 	if (error != 0) {
741 		/*
742 		 * mpt_scsi_send_tmf hard resets on failure, so no
743 		 * need to do so here.
744 		 */
745 		mpt_prt(mpt,
746 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
747 		return (EIO);
748 	}
749 
750 	/* Wait for bus reset to be processed by the IOC. */
751 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
752 	    REQ_STATE_DONE, sleep_ok, /*time_ms*/5000);
753 
754 	status = mpt->tmf_req->IOCStatus;
755 	mpt->tmf_req->state = REQ_STATE_FREE;
756 	if (error) {
757 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out."
758 			"Resetting controller.\n");
759 		mpt_reset(mpt, /*reinit*/TRUE);
760 		return (ETIMEDOUT);
761 	} else if ((status & MPI_IOCSTATUS_MASK) != MPI_SCSI_STATUS_SUCCESS) {
762 		mpt_prt(mpt, "mpt_bus_reset: TMF Status %d."
763 			"Resetting controller.\n", status);
764 		mpt_reset(mpt, /*reinit*/TRUE);
765 		return (EIO);
766 	}
767 	return (0);
768 }
769 
770 static int
771 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
772 	      MSG_EVENT_NOTIFY_REPLY *msg)
773 {
774 	switch(msg->Event & 0xFF) {
775 	case MPI_EVENT_UNIT_ATTENTION:
776 		mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
777 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
778 		break;
779 
780 	case MPI_EVENT_IOC_BUS_RESET:
781 		/* We generated a bus reset */
782 		mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
783 		    (msg->Data[0] >> 8) & 0xff);
784 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
785 		break;
786 
787 	case MPI_EVENT_EXT_BUS_RESET:
788 		/* Someone else generated a bus reset */
789 		mpt_prt(mpt, "Ext Bus Reset\n");
790 		/*
791 		 * These replies don't return EventData like the MPI
792 		 * spec says they do
793 		 */
794 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
795 		break;
796 
797 	case MPI_EVENT_RESCAN:
798 		/*
799 		 * In general this means a device has been added
800 		 * to the loop.
801 		 */
802 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
803 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
804 		break;
805 
806 	case MPI_EVENT_LINK_STATUS_CHANGE:
807 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
808 		    (msg->Data[1] >> 8) & 0xff,
809 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
810 		break;
811 
812 	case MPI_EVENT_LOOP_STATE_CHANGE:
813 		switch ((msg->Data[0] >> 16) & 0xff) {
814 		case 0x01:
815 			mpt_prt(mpt,
816 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
817 			    "(Loop Initialization)\n",
818 			    (msg->Data[1] >> 8) & 0xff,
819 			    (msg->Data[0] >> 8) & 0xff,
820 			    (msg->Data[0]     ) & 0xff);
821 			switch ((msg->Data[0] >> 8) & 0xff) {
822 			case 0xF7:
823 				if ((msg->Data[0] & 0xff) == 0xF7) {
824 					printf("Device needs AL_PA\n");
825 				} else {
826 					printf("Device %02x doesn't like "
827 					    "FC performance\n",
828 					    msg->Data[0] & 0xFF);
829 				}
830 				break;
831 			case 0xF8:
832 				if ((msg->Data[0] & 0xff) == 0xF7) {
833 					printf("Device had loop failure at its "
834 					    "receiver prior to acquiring "
835 					    "AL_PA\n");
836 				} else {
837 					printf("Device %02x detected loop "
838 					    "failure at its receiver\n",
839 					    msg->Data[0] & 0xFF);
840 				}
841 				break;
842 			default:
843 				printf("Device %02x requests that device "
844 				    "%02x reset itself\n",
845 				    msg->Data[0] & 0xFF,
846 				    (msg->Data[0] >> 8) & 0xFF);
847 				break;
848 			}
849 			break;
850 		case 0x02:
851 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
852 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
853 			    (msg->Data[1] >> 8) & 0xff, /* Port */
854 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
855 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
856 			break;
857 		case 0x03:
858 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
859 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
860 			    (msg->Data[1] >> 8) & 0xff, /* Port */
861 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
862 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
863 			break;
864 		default:
865 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
866 			    "FC event (%02x %02x %02x)\n",
867 			    (msg->Data[1] >> 8) & 0xff, /* Port */
868 			    (msg->Data[0] >> 16) & 0xff, /* Event */
869 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
870 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
871 		}
872 		break;
873 
874 	case MPI_EVENT_LOGOUT:
875 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
876 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
877 		break;
878 	default:
879 		return (/*handled*/0);
880 	}
881 	return (/*handled*/1);
882 }
883 
884 /*
885  * Reply path for all SCSI I/O requests, called from our
886  * interrupt handler by extracting our handler index from
887  * the MsgContext field of the reply from the IOC.
888  *
889  * This routine is optimized for the common case of a
890  * completion without error.  All exception handling is
891  * offloaded to non-inlined helper routines to minimize
892  * cache footprint.
893  */
894 static int
895 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
896 		       MSG_DEFAULT_REPLY *reply_frame)
897 {
898 	MSG_SCSI_IO_REQUEST *scsi_req;
899 	union ccb *ccb;
900 
901 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
902 	ccb = req->ccb;
903 	if (ccb == NULL) {
904 		mpt_prt(mpt, "Completion without CCB. Flags %#x, Func %#x\n",
905 			req->state, scsi_req->Function);
906 		mpt_print_scsi_io_request(scsi_req);
907 		return (/*free_reply*/TRUE);
908 	}
909 
910 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
911 
912 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
913 		bus_dmasync_op_t op;
914 
915 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
916 			op = BUS_DMASYNC_POSTREAD;
917 		else
918 			op = BUS_DMASYNC_POSTWRITE;
919 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
920 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
921 	}
922 
923 	if (reply_frame == NULL) {
924 		/*
925 		 * Context only reply, completion
926 		 * without error status.
927 		 */
928 		ccb->csio.resid = 0;
929 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
930 		ccb->csio.scsi_status = SCSI_STATUS_OK;
931 	} else {
932 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
933 	}
934 
935 	if (mpt->outofbeer) {
936 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
937 		mpt->outofbeer = 0;
938 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
939 	}
940 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
941 	MPTLOCK_2_CAMLOCK(mpt);
942 	if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
943 	 && scsi_req->CDB[0] == INQUIRY
944 	 && (scsi_req->CDB[1] & SI_EVPD) == 0) {
945 		struct scsi_inquiry_data *inq;
946 
947 		/*
948 		 * Fake out the device type so that only the
949 		 * pass-thru device will attach.
950 		 */
951 		inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
952 		inq->device &= ~0x1F;
953 		inq->device |= T_NODEVICE;
954 	}
955 	xpt_done(ccb);
956 	CAMLOCK_2_MPTLOCK(mpt);
957 	if ((req->state & REQ_STATE_TIMEDOUT) == 0)
958 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
959 	else
960 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
961 
962 	if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
963 		mpt_free_request(mpt, req);
964 		return (/*free_reply*/TRUE);
965 	}
966 	req->state &= ~REQ_STATE_QUEUED;
967 	req->state |= REQ_STATE_DONE;
968 	wakeup(req);
969 	return (/*free_reply*/TRUE);
970 }
971 
972 static int
973 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
974 			   MSG_DEFAULT_REPLY *reply_frame)
975 {
976 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
977 	u_int			  status;
978 
979 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF Complete: req %p, reply %p\n",
980 		 req, reply_frame);
981 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
982 
983 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
984 
985 	/* Record status of TMF for any waiters. */
986 	req->IOCStatus = tmf_reply->IOCStatus;
987 	status = le16toh(tmf_reply->IOCStatus);
988 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF Complete: status 0x%x\n", status);
989 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
990 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
991 		req->state |= REQ_STATE_DONE;
992 		wakeup(req);
993 	} else
994 		mpt->tmf_req->state = REQ_STATE_FREE;
995 
996 	return (/*free_reply*/TRUE);
997 }
998 
999 /*
1000  * Clean up all SCSI Initiator personality state in response
1001  * to a controller reset.
1002  */
1003 static void
1004 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
1005 {
1006 	/*
1007 	 * The pending list is already run down by
1008 	 * the generic handler.  Perform the same
1009 	 * operation on the timed out request list.
1010 	 */
1011 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
1012 				   MPI_IOCSTATUS_INVALID_STATE);
1013 
1014 	/*
1015 	 * Inform the XPT that a bus reset has occurred.
1016 	 */
1017 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
1018 }
1019 
1020 /*
1021  * Parse additional completion information in the reply
1022  * frame for SCSI I/O requests.
1023  */
1024 static int
1025 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
1026 			     MSG_DEFAULT_REPLY *reply_frame)
1027 {
1028 	union ccb *ccb;
1029 	MSG_SCSI_IO_REPLY *scsi_io_reply;
1030 	u_int ioc_status;
1031 	u_int sstate;
1032 	u_int loginfo;
1033 
1034 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
1035 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
1036 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
1037 		("MPT SCSI I/O Handler called with incorrect reply type"));
1038 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
1039 		("MPT SCSI I/O Handler called with continuation reply"));
1040 
1041 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
1042 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
1043 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
1044 	ioc_status &= MPI_IOCSTATUS_MASK;
1045 	sstate = scsi_io_reply->SCSIState;
1046 
1047 	ccb = req->ccb;
1048 	ccb->csio.resid =
1049 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
1050 
1051 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
1052 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
1053 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1054 		ccb->csio.sense_resid =
1055 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
1056 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
1057 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
1058 	}
1059 
1060 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
1061 		/*
1062 		 * Tag messages rejected, but non-tagged retry
1063 		 * was successful.
1064 XXXX
1065 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
1066 		 */
1067 	}
1068 
1069 	switch(ioc_status) {
1070 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1071 		/*
1072 		 * XXX
1073 		 * Linux driver indicates that a zero
1074 		 * transfer length with this error code
1075 		 * indicates a CRC error.
1076 		 *
1077 		 * No need to swap the bytes for checking
1078 		 * against zero.
1079 		 */
1080 		if (scsi_io_reply->TransferCount == 0) {
1081 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
1082 			break;
1083 		}
1084 		/* FALLTHROUGH */
1085 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1086 	case MPI_IOCSTATUS_SUCCESS:
1087 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1088 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
1089 			/*
1090 			 * Status was never returned for this transaction.
1091 			 */
1092 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
1093 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
1094 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
1095 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
1096 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
1097 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
1098 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
1099 
1100 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
1101 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
1102 		} else
1103 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1104 		break;
1105 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
1106 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
1107 		break;
1108 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
1109 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
1110 		break;
1111 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1112 		/*
1113 		 * Since selection timeouts and "device really not
1114 		 * there" are grouped into this error code, report
1115 		 * selection timeout.  Selection timeouts are
1116 		 * typically retried before giving up on the device
1117 		 * whereas "device not there" errors are considered
1118 		 * unretryable.
1119 		 */
1120 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
1121 		break;
1122 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1123 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
1124 		break;
1125 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1126 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
1127 		break;
1128 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1129 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
1130 		break;
1131 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1132 		ccb->ccb_h.status = CAM_UA_TERMIO;
1133 		break;
1134 	case MPI_IOCSTATUS_INVALID_STATE:
1135 		/*
1136 		 * The IOC has been reset.  Emulate a bus reset.
1137 		 */
1138 		/* FALLTHROUGH */
1139 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1140 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1141 		break;
1142 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
1143 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1144 		/*
1145 		 * Don't clobber any timeout status that has
1146 		 * already been set for this transaction.  We
1147 		 * want the SCSI layer to be able to differentiate
1148 		 * between the command we aborted due to timeout
1149 		 * and any innocent bystanders.
1150 		 */
1151 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
1152 			break;
1153 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
1154 		break;
1155 
1156 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1157 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
1158 		break;
1159 	case MPI_IOCSTATUS_BUSY:
1160 		mpt_set_ccb_status(ccb, CAM_BUSY);
1161 		break;
1162 	case MPI_IOCSTATUS_INVALID_FUNCTION:
1163 	case MPI_IOCSTATUS_INVALID_SGL:
1164 	case MPI_IOCSTATUS_INTERNAL_ERROR:
1165 	case MPI_IOCSTATUS_INVALID_FIELD:
1166 	default:
1167 		/* XXX
1168 		 * Some of the above may need to kick
1169 		 * of a recovery action!!!!
1170 		 */
1171 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1172 		break;
1173 	}
1174 
1175 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1176 		mpt_freeze_ccb(ccb);
1177 
1178 	return (/*free_reply*/TRUE);
1179 }
1180 
1181 static void
1182 mpt_action(struct cam_sim *sim, union ccb *ccb)
1183 {
1184 	struct	mpt_softc *mpt;
1185 	struct	ccb_trans_settings *cts;
1186 	u_int	tgt;
1187 	int	raid_passthru;
1188 
1189 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
1190 
1191 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1192 	raid_passthru = (sim == mpt->phydisk_sim);
1193 
1194 	tgt = ccb->ccb_h.target_id;
1195 	if (raid_passthru
1196 	 && ccb->ccb_h.func_code != XPT_PATH_INQ
1197 	 && ccb->ccb_h.func_code != XPT_RESET_BUS) {
1198 		CAMLOCK_2_MPTLOCK(mpt);
1199 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1200 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1201 			MPTLOCK_2_CAMLOCK(mpt);
1202 			xpt_done(ccb);
1203 			return;
1204 		}
1205 		MPTLOCK_2_CAMLOCK(mpt);
1206 	}
1207 
1208 	ccb->ccb_h.ccb_mpt_ptr = mpt;
1209 
1210 	switch (ccb->ccb_h.func_code) {
1211 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1212 		/*
1213 		 * Do a couple of preliminary checks...
1214 		 */
1215 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1216 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1217 				ccb->ccb_h.status = CAM_REQ_INVALID;
1218 				xpt_done(ccb);
1219 				break;
1220 			}
1221 		}
1222 		/* Max supported CDB length is 16 bytes */
1223 		/* XXX Unless we implement the new 32byte message type */
1224 		if (ccb->csio.cdb_len >
1225 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
1226 			ccb->ccb_h.status = CAM_REQ_INVALID;
1227 			xpt_done(ccb);
1228 			return;
1229 		}
1230 		ccb->csio.scsi_status = SCSI_STATUS_OK;
1231 		mpt_start(sim, ccb);
1232 		break;
1233 
1234 	case XPT_RESET_BUS:
1235 		mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
1236 		if (!raid_passthru) {
1237 			CAMLOCK_2_MPTLOCK(mpt);
1238 			(void)mpt_bus_reset(mpt, /*sleep_ok*/FALSE);
1239 			MPTLOCK_2_CAMLOCK(mpt);
1240 		}
1241 		/*
1242 		 * mpt_bus_reset is always successful in that it
1243 		 * will fall back to a hard reset should a bus
1244 		 * reset attempt fail.
1245 		 */
1246 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1247 		xpt_done(ccb);
1248 		break;
1249 
1250 	case XPT_ABORT:
1251 		/*
1252 		 * XXX: Need to implement
1253 		 */
1254 		ccb->ccb_h.status = CAM_UA_ABORT;
1255 		xpt_done(ccb);
1256 		break;
1257 
1258 #ifdef	CAM_NEW_TRAN_CODE
1259 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
1260 #else
1261 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
1262 #endif
1263 #define	DP_DISC_ENABLE	0x1
1264 #define	DP_DISC_DISABL	0x2
1265 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
1266 
1267 #define	DP_TQING_ENABLE	0x4
1268 #define	DP_TQING_DISABL	0x8
1269 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
1270 
1271 #define	DP_WIDE		0x10
1272 #define	DP_NARROW	0x20
1273 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
1274 
1275 #define	DP_SYNC		0x40
1276 
1277 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1278 		cts = &ccb->cts;
1279 		if (!IS_CURRENT_SETTINGS(cts)) {
1280 			mpt_prt(mpt, "Attempt to set User settings\n");
1281 			ccb->ccb_h.status = CAM_REQ_INVALID;
1282 			xpt_done(ccb);
1283 			break;
1284 		}
1285 		if (mpt->is_fc == 0) {
1286 			uint8_t dval = 0;
1287 			u_int period = 0, offset = 0;
1288 #ifndef	CAM_NEW_TRAN_CODE
1289 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1290 				dval |= DP_DISC_ENABLE;
1291 			}
1292 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1293 				dval |= DP_TQING_ENABLE;
1294 			}
1295 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1296 				if (cts->bus_width)
1297 					dval |= DP_WIDE;
1298 				else
1299 					dval |= DP_NARROW;
1300 			}
1301 			/*
1302 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
1303 			 * of nonzero will cause us to go to the
1304 			 * selected (from NVRAM) maximum value for
1305 			 * this device. At a later point, we'll
1306 			 * allow finer control.
1307 			 */
1308 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1309 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
1310 				dval |= DP_SYNC;
1311 				period = cts->sync_period;
1312 				offset = cts->sync_offset;
1313 			}
1314 #else
1315 			struct ccb_trans_settings_scsi *scsi =
1316 			    &cts->proto_specific.scsi;
1317 			struct ccb_trans_settings_spi *spi =
1318 			    &cts->xport_specific.spi;
1319 
1320 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
1321 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
1322 					dval |= DP_DISC_ENABLE;
1323 				else
1324 					dval |= DP_DISC_DISABL;
1325 			}
1326 
1327 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
1328 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
1329 					dval |= DP_TQING_ENABLE;
1330 				else
1331 					dval |= DP_TQING_DISABL;
1332 			}
1333 
1334 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
1335 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
1336 					dval |= DP_WIDE;
1337 				else
1338 					dval |= DP_NARROW;
1339 			}
1340 
1341 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
1342 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
1343 			    (spi->sync_period && spi->sync_offset)) {
1344 				dval |= DP_SYNC;
1345 				period = spi->sync_period;
1346 				offset = spi->sync_offset;
1347 			}
1348 #endif
1349 			CAMLOCK_2_MPTLOCK(mpt);
1350 			if (dval & DP_DISC_ENABLE) {
1351 				mpt->mpt_disc_enable |= (1 << tgt);
1352 			} else if (dval & DP_DISC_DISABL) {
1353 				mpt->mpt_disc_enable &= ~(1 << tgt);
1354 			}
1355 			if (dval & DP_TQING_ENABLE) {
1356 				mpt->mpt_tag_enable |= (1 << tgt);
1357 			} else if (dval & DP_TQING_DISABL) {
1358 				mpt->mpt_tag_enable &= ~(1 << tgt);
1359 			}
1360 			if (dval & DP_WIDTH) {
1361 				if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) {
1362 mpt_prt(mpt, "Set width Failed!\n");
1363 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1364 					MPTLOCK_2_CAMLOCK(mpt);
1365 					xpt_done(ccb);
1366 					break;
1367 				}
1368 			}
1369 			if (dval & DP_SYNC) {
1370 				if (mpt_setsync(mpt, tgt, period, offset)) {
1371 mpt_prt(mpt, "Set sync Failed!\n");
1372 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1373 					MPTLOCK_2_CAMLOCK(mpt);
1374 					xpt_done(ccb);
1375 					break;
1376 				}
1377 			}
1378 			MPTLOCK_2_CAMLOCK(mpt);
1379 			mpt_lprt(mpt, MPT_PRT_DEBUG,
1380 				 "SET tgt %d flags %x period %x off %x\n",
1381 				 tgt, dval, period, offset);
1382 		}
1383 		ccb->ccb_h.status = CAM_REQ_CMP;
1384 		xpt_done(ccb);
1385 		break;
1386 
1387 	case XPT_GET_TRAN_SETTINGS:
1388 		cts = &ccb->cts;
1389 		if (mpt->is_fc) {
1390 #ifndef	CAM_NEW_TRAN_CODE
1391 			/*
1392 			 * a lot of normal SCSI things don't make sense.
1393 			 */
1394 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1395 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1396 			/*
1397 			 * How do you measure the width of a high
1398 			 * speed serial bus? Well, in bytes.
1399 			 *
1400 			 * Offset and period make no sense, though, so we set
1401 			 * (above) a 'base' transfer speed to be gigabit.
1402 			 */
1403 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1404 #else
1405 			struct ccb_trans_settings_fc *fc =
1406 			    &cts->xport_specific.fc;
1407 
1408 			cts->protocol = PROTO_SCSI;
1409 			cts->protocol_version = SCSI_REV_2;
1410 			cts->transport = XPORT_FC;
1411 			cts->transport_version = 0;
1412 
1413 			fc->valid = CTS_FC_VALID_SPEED;
1414 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
1415 			/* XXX: need a port database for each target */
1416 #endif
1417 		} else {
1418 #ifdef	CAM_NEW_TRAN_CODE
1419 			struct ccb_trans_settings_scsi *scsi =
1420 			    &cts->proto_specific.scsi;
1421 			struct ccb_trans_settings_spi *spi =
1422 			    &cts->xport_specific.spi;
1423 #endif
1424 			uint8_t dval, pval, oval;
1425 			int rv;
1426 
1427 			/*
1428 			 * We aren't going off of Port PAGE2 params for
1429 			 * tagged queuing or disconnect capabilities
1430 			 * for current settings. For goal settings,
1431 			 * we assert all capabilities- we've had some
1432 			 * problems with reading NVRAM data.
1433 			 */
1434 			if (IS_CURRENT_SETTINGS(cts)) {
1435 				CONFIG_PAGE_SCSI_DEVICE_0 tmp;
1436 				dval = 0;
1437 
1438 				tmp = mpt->mpt_dev_page0[tgt];
1439 				CAMLOCK_2_MPTLOCK(mpt);
1440 				rv = mpt_read_cur_cfg_page(mpt, tgt,
1441 							   &tmp.Header,
1442 							   sizeof(tmp),
1443 							   /*sleep_ok*/FALSE,
1444 							   /*timeout_ms*/5000);
1445 				if (rv) {
1446 					mpt_prt(mpt,
1447 					    "cannot get target %d DP0\n", tgt);
1448 				}
1449 				mpt_lprt(mpt, MPT_PRT_DEBUG,
1450 					 "SPI Tgt %d Page 0: NParms %x "
1451 					 "Information %x\n", tgt,
1452 					 tmp.NegotiatedParameters,
1453 					 tmp.Information);
1454 				MPTLOCK_2_CAMLOCK(mpt);
1455 
1456 				if (tmp.NegotiatedParameters &
1457 				    MPI_SCSIDEVPAGE0_NP_WIDE)
1458 					dval |= DP_WIDE;
1459 
1460 				if (mpt->mpt_disc_enable & (1 << tgt)) {
1461 					dval |= DP_DISC_ENABLE;
1462 				}
1463 				if (mpt->mpt_tag_enable & (1 << tgt)) {
1464 					dval |= DP_TQING_ENABLE;
1465 				}
1466 				oval = (tmp.NegotiatedParameters >> 16) & 0xff;
1467 				pval = (tmp.NegotiatedParameters >>  8) & 0xff;
1468 			} else {
1469 				/*
1470 				 * XXX: Fix wrt NVRAM someday. Attempts
1471 				 * XXX: to read port page2 device data
1472 				 * XXX: just returns zero in these areas.
1473 				 */
1474 				dval = DP_WIDE|DP_DISC|DP_TQING;
1475 				oval = (mpt->mpt_port_page0.Capabilities >> 16);
1476 				pval = (mpt->mpt_port_page0.Capabilities >>  8);
1477 			}
1478 #ifndef	CAM_NEW_TRAN_CODE
1479 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1480 			if (dval & DP_DISC_ENABLE) {
1481 				cts->flags |= CCB_TRANS_DISC_ENB;
1482 			}
1483 			if (dval & DP_TQING_ENABLE) {
1484 				cts->flags |= CCB_TRANS_TAG_ENB;
1485 			}
1486 			if (dval & DP_WIDE) {
1487 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1488 			} else {
1489 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1490 			}
1491 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1492 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1493 			if (oval) {
1494 				cts->sync_period = pval;
1495 				cts->sync_offset = oval;
1496 				cts->valid |=
1497 				    CCB_TRANS_SYNC_RATE_VALID |
1498 				    CCB_TRANS_SYNC_OFFSET_VALID;
1499 			}
1500 #else
1501 			cts->protocol = PROTO_SCSI;
1502 			cts->protocol_version = SCSI_REV_2;
1503 			cts->transport = XPORT_SPI;
1504 			cts->transport_version = 2;
1505 
1506 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1507 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
1508 			if (dval & DP_DISC_ENABLE) {
1509 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
1510 			}
1511 			if (dval & DP_TQING_ENABLE) {
1512 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
1513 			}
1514 			if (oval && pval) {
1515 				spi->sync_offset = oval;
1516 				spi->sync_period = pval;
1517 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1518 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1519 			}
1520 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
1521 			if (dval & DP_WIDE) {
1522 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1523 			} else {
1524 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1525 			}
1526 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
1527 				scsi->valid = CTS_SCSI_VALID_TQ;
1528 				spi->valid |= CTS_SPI_VALID_DISC;
1529 			} else {
1530 				scsi->valid = 0;
1531 			}
1532 #endif
1533 			mpt_lprt(mpt, MPT_PRT_DEBUG,
1534 				 "GET %s tgt %d flags %x period %x offset %x\n",
1535 				 IS_CURRENT_SETTINGS(cts)
1536 			       ? "ACTIVE" : "NVRAM",
1537 				 tgt, dval, pval, oval);
1538 		}
1539 		ccb->ccb_h.status = CAM_REQ_CMP;
1540 		xpt_done(ccb);
1541 		break;
1542 
1543 	case XPT_CALC_GEOMETRY:
1544 	{
1545 		struct ccb_calc_geometry *ccg;
1546 
1547 		ccg = &ccb->ccg;
1548 		if (ccg->block_size == 0) {
1549 			ccb->ccb_h.status = CAM_REQ_INVALID;
1550 			xpt_done(ccb);
1551 			break;
1552 		}
1553 
1554 		mpt_calc_geometry(ccg, /*extended*/1);
1555 		xpt_done(ccb);
1556 		break;
1557 	}
1558 	case XPT_PATH_INQ:		/* Path routing inquiry */
1559 	{
1560 		struct ccb_pathinq *cpi = &ccb->cpi;
1561 
1562 		cpi->version_num = 1;
1563 		cpi->target_sprt = 0;
1564 		cpi->hba_eng_cnt = 0;
1565 		cpi->max_lun = 7;
1566 		cpi->bus_id = cam_sim_bus(sim);
1567 		/* XXX Report base speed more accurately for FC/SAS, etc.*/
1568 		if (raid_passthru) {
1569 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks;
1570 			cpi->hba_misc = PIM_NOBUSRESET;
1571 			cpi->initiator_id = cpi->max_target + 1;
1572 			cpi->hba_inquiry = PI_TAG_ABLE;
1573 			if (mpt->is_fc) {
1574 				cpi->base_transfer_speed = 100000;
1575 			} else {
1576 				cpi->base_transfer_speed = 3300;
1577 				cpi->hba_inquiry |=
1578 				    PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1579 			}
1580 		} else if (mpt->is_fc) {
1581 			cpi->max_target = 255;
1582 			cpi->hba_misc = PIM_NOBUSRESET;
1583 			cpi->initiator_id = cpi->max_target + 1;
1584 			cpi->base_transfer_speed = 100000;
1585 			cpi->hba_inquiry = PI_TAG_ABLE;
1586 		} else {
1587 			cpi->initiator_id = mpt->mpt_ini_id;
1588 			cpi->base_transfer_speed = 3300;
1589 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1590 			cpi->hba_misc = 0;
1591 			cpi->max_target = 15;
1592 		}
1593 
1594 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1595 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
1596 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1597 		cpi->unit_number = cam_sim_unit(sim);
1598 		cpi->ccb_h.status = CAM_REQ_CMP;
1599 		xpt_done(ccb);
1600 		break;
1601 	}
1602 	default:
1603 		ccb->ccb_h.status = CAM_REQ_INVALID;
1604 		xpt_done(ccb);
1605 		break;
1606 	}
1607 }
1608 
1609 static int
1610 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
1611 {
1612 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
1613 	int rv;
1614 
1615 	tmp = mpt->mpt_dev_page1[tgt];
1616 	if (onoff) {
1617 		tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1618 	} else {
1619 		tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1620 	}
1621 	rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
1622 				    /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1623 	if (rv) {
1624 		mpt_prt(mpt, "mpt_setwidth: write cur page failed\n");
1625 		return (-1);
1626 	}
1627 	rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
1628 				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1629 	if (rv) {
1630 		mpt_prt(mpt, "mpt_setwidth: read cur page failed\n");
1631 		return (-1);
1632 	}
1633 	mpt->mpt_dev_page1[tgt] = tmp;
1634 	mpt_lprt(mpt, MPT_PRT_DEBUG,
1635 		 "SPI Target %d Page 1: RequestedParameters %x Config %x\n",
1636 		 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1637 		 mpt->mpt_dev_page1[tgt].Configuration);
1638 	return (0);
1639 }
1640 
1641 static int
1642 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
1643 {
1644 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
1645 	int rv;
1646 
1647 	tmp = mpt->mpt_dev_page1[tgt];
1648 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
1649 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
1650 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
1651 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
1652 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
1653 	/*
1654 	 * XXX: For now, we're ignoring specific settings
1655 	 */
1656 	if (period && offset) {
1657 		int factor, offset, np;
1658 		factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1659 		offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1660 		np = 0;
1661 		if (factor < 0x9) {
1662 			np |= MPI_SCSIDEVPAGE1_RP_QAS;
1663 			np |= MPI_SCSIDEVPAGE1_RP_IU;
1664 		}
1665 		if (factor < 0xa) {
1666 			np |= MPI_SCSIDEVPAGE1_RP_DT;
1667 		}
1668 		np |= (factor << 8) | (offset << 16);
1669 		tmp.RequestedParameters |= np;
1670 	}
1671 	rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
1672 				    /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1673 	if (rv) {
1674 		mpt_prt(mpt, "mpt_setsync: write cur page failed\n");
1675 		return (-1);
1676 	}
1677 	rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
1678 				   /*sleep_ok*/FALSE, /*timeout_ms*/500);
1679 	if (rv) {
1680 		mpt_prt(mpt, "mpt_setsync: read cur page failed\n");
1681 		return (-1);
1682 	}
1683 	mpt->mpt_dev_page1[tgt] = tmp;
1684 	mpt_lprt(mpt, MPT_PRT_DEBUG,
1685 		 "SPI Target %d Page 1: RParams %x Config %x\n",
1686 		 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1687 		 mpt->mpt_dev_page1[tgt].Configuration);
1688 	return (0);
1689 }
1690 
1691 static void
1692 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
1693 {
1694 #if __FreeBSD_version >= 500000
1695 	cam_calc_geometry(ccg, extended);
1696 #else
1697 	uint32_t size_mb;
1698 	uint32_t secs_per_cylinder;
1699 
1700 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
1701 	if (size_mb > 1024 && extended) {
1702 		ccg->heads = 255;
1703 		ccg->secs_per_track = 63;
1704 	} else {
1705 		ccg->heads = 64;
1706 		ccg->secs_per_track = 32;
1707 	}
1708 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1709 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1710 	ccg->ccb_h.status = CAM_REQ_CMP;
1711 #endif
1712 }
1713 
1714 /****************************** Timeout Recovery ******************************/
1715 static int
1716 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
1717 {
1718 	int error;
1719 
1720 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
1721 	    &mpt->recovery_thread, /*flags*/0,
1722 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
1723 	return (error);
1724 }
1725 
1726 /*
1727  * Lock is not held on entry.
1728  */
1729 static void
1730 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
1731 {
1732 
1733 	MPT_LOCK(mpt);
1734 	if (mpt->recovery_thread == NULL) {
1735 		MPT_UNLOCK(mpt);
1736 		return;
1737 	}
1738 	mpt->shutdwn_recovery = 1;
1739 	wakeup(mpt);
1740 	/*
1741 	 * Sleep on a slightly different location
1742 	 * for this interlock just for added safety.
1743 	 */
1744 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
1745 	MPT_UNLOCK(mpt);
1746 }
1747 
1748 static void
1749 mpt_recovery_thread(void *arg)
1750 {
1751 	struct mpt_softc *mpt;
1752 
1753 #if __FreeBSD_version >= 500000
1754 	mtx_lock(&Giant);
1755 #endif
1756 	mpt = (struct mpt_softc *)arg;
1757 	MPT_LOCK(mpt);
1758 	for (;;) {
1759 
1760 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0
1761 		 && mpt->shutdwn_recovery == 0)
1762 			mpt_sleep(mpt, mpt, PUSER, "idle", 0);
1763 
1764 		if (mpt->shutdwn_recovery != 0)
1765 			break;
1766 
1767 		MPT_UNLOCK(mpt);
1768 		mpt_recover_commands(mpt);
1769 		MPT_LOCK(mpt);
1770 	}
1771 	mpt->recovery_thread = NULL;
1772 	wakeup(&mpt->recovery_thread);
1773 	MPT_UNLOCK(mpt);
1774 #if __FreeBSD_version >= 500000
1775 	mtx_unlock(&Giant);
1776 #endif
1777 	kthread_exit(0);
1778 }
1779 
1780 static int
1781 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type,
1782 		  u_int flags, u_int channel, u_int target, u_int lun,
1783 		  u_int abort_ctx, int sleep_ok)
1784 {
1785 	MSG_SCSI_TASK_MGMT *tmf_req;
1786 	int		    error;
1787 
1788 	/*
1789 	 * Wait for any current TMF request to complete.
1790 	 * We're only allowed to issue one TMF at a time.
1791 	 */
1792 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_MASK,
1793 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
1794 	if (error != 0) {
1795 		mpt_reset(mpt, /*reinit*/TRUE);
1796 		return (ETIMEDOUT);
1797 	}
1798 
1799 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
1800 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
1801 
1802 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
1803 	bzero(tmf_req, sizeof(*tmf_req));
1804 	tmf_req->TargetID = target;
1805 	tmf_req->Bus = channel;
1806 	tmf_req->ChainOffset = 0;
1807 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1808 	tmf_req->Reserved = 0;
1809 	tmf_req->TaskType = type;
1810 	tmf_req->Reserved1 = 0;
1811 	tmf_req->MsgFlags = flags;
1812 	tmf_req->MsgContext =
1813 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
1814 	bzero(&tmf_req->LUN, sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
1815 	tmf_req->LUN[1] = lun;
1816 	tmf_req->TaskMsgContext = abort_ctx;
1817 
1818 	mpt_lprt(mpt, MPT_PRT_DEBUG,
1819 		 "Issuing TMF %p with MsgContext of 0x%x\n", tmf_req,
1820 		 tmf_req->MsgContext);
1821 	if (mpt->verbose > MPT_PRT_DEBUG)
1822 		mpt_print_request(tmf_req);
1823 
1824 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
1825 	if (error != 0)
1826 		mpt_reset(mpt, /*reinit*/TRUE);
1827 	return (error);
1828 }
1829 
1830 /*
1831  * When a command times out, it is placed on the requeust_timeout_list
1832  * and we wake our recovery thread.  The MPT-Fusion architecture supports
1833  * only a single TMF operation at a time, so we serially abort/bdr, etc,
1834  * the timedout transactions.  The next TMF is issued either by the
1835  * completion handler of the current TMF waking our recovery thread,
1836  * or the TMF timeout handler causing a hard reset sequence.
1837  */
1838 static void
1839 mpt_recover_commands(struct mpt_softc *mpt)
1840 {
1841 	request_t	   *req;
1842 	union ccb	   *ccb;
1843 	int		    error;
1844 
1845 	MPT_LOCK(mpt);
1846 
1847 	/*
1848 	 * Flush any commands whose completion coincides
1849 	 * with their timeout.
1850 	 */
1851 	mpt_intr(mpt);
1852 
1853 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
1854 		/*
1855 		 * The timedout commands have already
1856 		 * completed.  This typically means
1857 		 * that either the timeout value was on
1858 		 * the hairy edge of what the device
1859 		 * requires or - more likely - interrupts
1860 		 * are not happening.
1861 		 */
1862 		mpt_prt(mpt, "Timedout requests already complete. "
1863                        "Interrupts may not be functioning.\n");
1864                 MPT_UNLOCK(mpt);
1865                 return;
1866 	}
1867 
1868 	/*
1869 	 * We have no visibility into the current state of the
1870 	 * controller, so attempt to abort the commands in the
1871 	 * order they timed-out.
1872 	 */
1873 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
1874 		u_int status;
1875 
1876 		mpt_prt(mpt, "Attempting to Abort Req %p\n", req);
1877 
1878 		ccb = req->ccb;
1879 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
1880 		error = mpt_scsi_send_tmf(mpt,
1881 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1882 		    /*MsgFlags*/0, mpt->bus, ccb->ccb_h.target_id,
1883 		    ccb->ccb_h.target_lun,
1884 		    htole32(req->index | scsi_io_handler_id), /*sleep_ok*/TRUE);
1885 
1886 		if (error != 0) {
1887 			/*
1888 			 * mpt_scsi_send_tmf hard resets on failure, so no
1889 			 * need to do so here.  Our queue should be emptied
1890 			 * by the hard reset.
1891 			 */
1892 			continue;
1893 		}
1894 
1895 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1896 		    REQ_STATE_DONE, /*sleep_ok*/TRUE, /*time_ms*/5000);
1897 
1898 		status = mpt->tmf_req->IOCStatus;
1899 		if (error != 0) {
1900 
1901 			/*
1902 			 * If we've errored out and the transaction is still
1903 			 * pending, reset the controller.
1904 			 */
1905 			mpt_prt(mpt, "mpt_recover_commands: Abort timed-out."
1906 				"Resetting controller\n");
1907 			mpt_reset(mpt, /*reinit*/TRUE);
1908 			continue;
1909 		}
1910 
1911 		/*
1912 		 * TMF is complete.
1913 		 */
1914 		mpt->tmf_req->state = REQ_STATE_FREE;
1915 		if ((status & MPI_IOCSTATUS_MASK) == MPI_SCSI_STATUS_SUCCESS)
1916 			continue;
1917 
1918 		mpt_lprt(mpt, MPT_PRT_DEBUG,
1919 			 "mpt_recover_commands: Abort Failed "
1920 			 "with status 0x%x\n.  Resetting bus", status);
1921 
1922 		/*
1923 		 * If the abort attempt fails for any reason, reset the bus.
1924 		 * We should find all of the timed-out commands on our
1925 		 * list are in the done state after this completes.
1926 		 */
1927 		mpt_bus_reset(mpt, /*sleep_ok*/TRUE);
1928 	}
1929 
1930 	MPT_UNLOCK(mpt);
1931 }
1932