xref: /freebsd/sys/dev/mps/mps_sas.c (revision 4906cdc8c5d161f74ab36bb5792ac0706d182c11)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011, 2012 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * LSI MPT-Fusion Host Adapter FreeBSD
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for LSI MPT2 */
36 
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
87 
88 #define MPSSAS_DISCOVERY_TIMEOUT	20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90 
91 /*
92  * static array to check SCSI OpCode for EEDP protection bits
93  */
94 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114 };
115 
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117 
118 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
119 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
120 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
121 static void mpssas_poll(struct cam_sim *sim);
122 static void mpssas_scsiio_timeout(void *data);
123 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
124 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
125     struct mps_command *cm, union ccb *ccb);
126 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
127 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
128 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
129 #if __FreeBSD_version >= 900026
130 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
131 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
132 			       uint64_t sasaddr);
133 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
134 #endif //FreeBSD_version >= 900026
135 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
136 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
137 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
138 static void mpssas_async(void *callback_arg, uint32_t code,
139 			 struct cam_path *path, void *arg);
140 #if (__FreeBSD_version < 901503) || \
141     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
142 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
143 			      struct ccb_getdev *cgd);
144 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
145 #endif
146 static int mpssas_send_portenable(struct mps_softc *sc);
147 static void mpssas_portenable_complete(struct mps_softc *sc,
148     struct mps_command *cm);
149 
150 struct mpssas_target *
151 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
152 {
153 	struct mpssas_target *target;
154 	int i;
155 
156 	for (i = start; i < sassc->maxtargets; i++) {
157 		target = &sassc->targets[i];
158 		if (target->handle == handle)
159 			return (target);
160 	}
161 
162 	return (NULL);
163 }
164 
165 /* we need to freeze the simq during attach and diag reset, to avoid failing
166  * commands before device handles have been found by discovery.  Since
167  * discovery involves reading config pages and possibly sending commands,
168  * discovery actions may continue even after we receive the end of discovery
169  * event, so refcount discovery actions instead of assuming we can unfreeze
170  * the simq when we get the event.
171  */
172 void
173 mpssas_startup_increment(struct mpssas_softc *sassc)
174 {
175 	MPS_FUNCTRACE(sassc->sc);
176 
177 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
178 		if (sassc->startup_refcount++ == 0) {
179 			/* just starting, freeze the simq */
180 			mps_dprint(sassc->sc, MPS_INIT,
181 			    "%s freezing simq\n", __func__);
182 #if __FreeBSD_version >= 1000039
183 			xpt_hold_boot();
184 #endif
185 			xpt_freeze_simq(sassc->sim, 1);
186 		}
187 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
188 		    sassc->startup_refcount);
189 	}
190 }
191 
192 void
193 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
194 {
195 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
196 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
197 		xpt_release_simq(sassc->sim, 1);
198 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
199 	}
200 }
201 
202 void
203 mpssas_startup_decrement(struct mpssas_softc *sassc)
204 {
205 	MPS_FUNCTRACE(sassc->sc);
206 
207 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
208 		if (--sassc->startup_refcount == 0) {
209 			/* finished all discovery-related actions, release
210 			 * the simq and rescan for the latest topology.
211 			 */
212 			mps_dprint(sassc->sc, MPS_INIT,
213 			    "%s releasing simq\n", __func__);
214 			sassc->flags &= ~MPSSAS_IN_STARTUP;
215 			xpt_release_simq(sassc->sim, 1);
216 #if __FreeBSD_version >= 1000039
217 			xpt_release_boot();
218 #else
219 			mpssas_rescan_target(sassc->sc, NULL);
220 #endif
221 		}
222 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
223 		    sassc->startup_refcount);
224 	}
225 }
226 
227 /* LSI's firmware requires us to stop sending commands when we're doing task
228  * management, so refcount the TMs and keep the simq frozen when any are in
229  * use.
230  */
231 struct mps_command *
232 mpssas_alloc_tm(struct mps_softc *sc)
233 {
234 	struct mps_command *tm;
235 
236 	MPS_FUNCTRACE(sc);
237 	tm = mps_alloc_high_priority_command(sc);
238 	if (tm != NULL) {
239 		if (sc->sassc->tm_count++ == 0) {
240 			mps_dprint(sc, MPS_RECOVERY,
241 			    "%s freezing simq\n", __func__);
242 			xpt_freeze_simq(sc->sassc->sim, 1);
243 		}
244 		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
245 		    sc->sassc->tm_count);
246 	}
247 	return tm;
248 }
249 
250 void
251 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
252 {
253 	mps_dprint(sc, MPS_TRACE, "%s", __func__);
254 	if (tm == NULL)
255 		return;
256 
257 	/* if there are no TMs in use, we can release the simq.  We use our
258 	 * own refcount so that it's easier for a diag reset to cleanup and
259 	 * release the simq.
260 	 */
261 	if (--sc->sassc->tm_count == 0) {
262 		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
263 		xpt_release_simq(sc->sassc->sim, 1);
264 	}
265 	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
266 	    sc->sassc->tm_count);
267 
268 	mps_free_high_priority_command(sc, tm);
269 }
270 
271 void
272 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
273 {
274 	struct mpssas_softc *sassc = sc->sassc;
275 	path_id_t pathid;
276 	target_id_t targetid;
277 	union ccb *ccb;
278 
279 	MPS_FUNCTRACE(sc);
280 	pathid = cam_sim_path(sassc->sim);
281 	if (targ == NULL)
282 		targetid = CAM_TARGET_WILDCARD;
283 	else
284 		targetid = targ - sassc->targets;
285 
286 	/*
287 	 * Allocate a CCB and schedule a rescan.
288 	 */
289 	ccb = xpt_alloc_ccb_nowait();
290 	if (ccb == NULL) {
291 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
292 		return;
293 	}
294 
295 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
296 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
298 		xpt_free_ccb(ccb);
299 		return;
300 	}
301 
302 	if (targetid == CAM_TARGET_WILDCARD)
303 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
304 	else
305 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
306 
307 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
308 	xpt_rescan(ccb);
309 }
310 
311 static void
312 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
313 {
314 	struct sbuf sb;
315 	va_list ap;
316 	char str[192];
317 	char path_str[64];
318 
319 	if (cm == NULL)
320 		return;
321 
322 	/* No need to be in here if debugging isn't enabled */
323 	if ((cm->cm_sc->mps_debug & level) == 0)
324 		return;
325 
326 	sbuf_new(&sb, str, sizeof(str), 0);
327 
328 	va_start(ap, fmt);
329 
330 	if (cm->cm_ccb != NULL) {
331 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
332 				sizeof(path_str));
333 		sbuf_cat(&sb, path_str);
334 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
335 			scsi_command_string(&cm->cm_ccb->csio, &sb);
336 			sbuf_printf(&sb, "length %d ",
337 				    cm->cm_ccb->csio.dxfer_len);
338 		}
339 	}
340 	else {
341 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 		    cam_sim_name(cm->cm_sc->sassc->sim),
343 		    cam_sim_unit(cm->cm_sc->sassc->sim),
344 		    cam_sim_bus(cm->cm_sc->sassc->sim),
345 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
346 		    cm->cm_lun);
347 	}
348 
349 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 	sbuf_vprintf(&sb, fmt, ap);
351 	sbuf_finish(&sb);
352 	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
353 
354 	va_end(ap);
355 }
356 
357 
358 static void
359 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
360 {
361 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
362 	struct mpssas_target *targ;
363 	uint16_t handle;
364 
365 	MPS_FUNCTRACE(sc);
366 
367 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
368 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
369 	targ = tm->cm_targ;
370 
371 	if (reply == NULL) {
372 		/* XXX retry the remove after the diag reset completes? */
373 		mps_dprint(sc, MPS_FAULT,
374 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
375 		mpssas_free_tm(sc, tm);
376 		return;
377 	}
378 
379 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
380 		mps_dprint(sc, MPS_FAULT,
381 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
382 		   reply->IOCStatus, handle);
383 		mpssas_free_tm(sc, tm);
384 		return;
385 	}
386 
387 	mps_dprint(sc, MPS_XINFO,
388 	    "Reset aborted %u commands\n", reply->TerminationCount);
389 	mps_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mps_dprint(sc, MPS_XINFO,
393 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
402 		targ = tm->cm_targ;
403 		targ->handle = 0x0;
404 		targ->encl_handle = 0x0;
405 		targ->encl_slot = 0x0;
406 		targ->exp_dev_handle = 0x0;
407 		targ->phy_num = 0x0;
408 		targ->linkrate = 0x0;
409 		targ->devinfo = 0x0;
410 		targ->flags = 0x0;
411 	}
412 
413 	mpssas_free_tm(sc, tm);
414 }
415 
416 
417 /*
418  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
419  * Otherwise Volume Delete is same as Bare Drive Removal.
420  */
421 void
422 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
423 {
424 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
425 	struct mps_softc *sc;
426 	struct mps_command *cm;
427 	struct mpssas_target *targ = NULL;
428 
429 	MPS_FUNCTRACE(sassc->sc);
430 	sc = sassc->sc;
431 
432 #ifdef WD_SUPPORT
433 	/*
434 	 * If this is a WD controller, determine if the disk should be exposed
435 	 * to the OS or not.  If disk should be exposed, return from this
436 	 * function without doing anything.
437 	 */
438 	if (sc->WD_available && (sc->WD_hide_expose ==
439 	    MPS_WD_EXPOSE_ALWAYS)) {
440 		return;
441 	}
442 #endif //WD_SUPPORT
443 
444 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
445 	if (targ == NULL) {
446 		/* FIXME: what is the action? */
447 		/* We don't know about this device? */
448 		mps_dprint(sc, MPS_ERROR,
449 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
450 		return;
451 	}
452 
453 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
454 
455 	cm = mpssas_alloc_tm(sc);
456 	if (cm == NULL) {
457 		mps_dprint(sc, MPS_ERROR,
458 		    "%s: command alloc failure\n", __func__);
459 		return;
460 	}
461 
462 	mpssas_rescan_target(sc, targ);
463 
464 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
465 	req->DevHandle = targ->handle;
466 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
467 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
468 
469 	/* SAS Hard Link Reset / SATA Link Reset */
470 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
471 
472 	cm->cm_targ = targ;
473 	cm->cm_data = NULL;
474 	cm->cm_desc.HighPriority.RequestFlags =
475 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
476 	cm->cm_complete = mpssas_remove_volume;
477 	cm->cm_complete_data = (void *)(uintptr_t)handle;
478 	mps_map_command(sc, cm);
479 }
480 
481 /*
482  * The MPT2 firmware performs debounce on the link to avoid transient link
483  * errors and false removals.  When it does decide that link has been lost
484  * and a device need to go away, it expects that the host will perform a
485  * target reset and then an op remove.  The reset has the side-effect of
486  * aborting any outstanding requests for the device, which is required for
487  * the op-remove to succeed.  It's not clear if the host should check for
488  * the device coming back alive after the reset.
489  */
490 void
491 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
492 {
493 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
494 	struct mps_softc *sc;
495 	struct mps_command *cm;
496 	struct mpssas_target *targ = NULL;
497 
498 	MPS_FUNCTRACE(sassc->sc);
499 
500 	sc = sassc->sc;
501 
502 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
503 	if (targ == NULL) {
504 		/* FIXME: what is the action? */
505 		/* We don't know about this device? */
506 		mps_dprint(sc, MPS_ERROR,
507 		    "%s : invalid handle 0x%x \n", __func__, handle);
508 		return;
509 	}
510 
511 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
512 
513 	cm = mpssas_alloc_tm(sc);
514 	if (cm == NULL) {
515 		mps_dprint(sc, MPS_ERROR,
516 		    "%s: command alloc failure\n", __func__);
517 		return;
518 	}
519 
520 	mpssas_rescan_target(sc, targ);
521 
522 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
523 	memset(req, 0, sizeof(*req));
524 	req->DevHandle = htole16(targ->handle);
525 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
527 
528 	/* SAS Hard Link Reset / SATA Link Reset */
529 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
530 
531 	cm->cm_targ = targ;
532 	cm->cm_data = NULL;
533 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
534 	cm->cm_complete = mpssas_remove_device;
535 	cm->cm_complete_data = (void *)(uintptr_t)handle;
536 	mps_map_command(sc, cm);
537 }
538 
539 static void
540 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
541 {
542 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
543 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
544 	struct mpssas_target *targ;
545 	struct mps_command *next_cm;
546 	uint16_t handle;
547 
548 	MPS_FUNCTRACE(sc);
549 
550 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
551 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
552 	targ = tm->cm_targ;
553 
554 	/*
555 	 * Currently there should be no way we can hit this case.  It only
556 	 * happens when we have a failure to allocate chain frames, and
557 	 * task management commands don't have S/G lists.
558 	 */
559 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
560 		mps_dprint(sc, MPS_ERROR,
561 		    "%s: cm_flags = %#x for remove of handle %#04x! "
562 		    "This should not happen!\n", __func__, tm->cm_flags,
563 		    handle);
564 		mpssas_free_tm(sc, tm);
565 		return;
566 	}
567 
568 	if (reply == NULL) {
569 		/* XXX retry the remove after the diag reset completes? */
570 		mps_dprint(sc, MPS_FAULT,
571 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
572 		mpssas_free_tm(sc, tm);
573 		return;
574 	}
575 
576 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
577 		mps_dprint(sc, MPS_FAULT,
578 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
579 		   le16toh(reply->IOCStatus), handle);
580 		mpssas_free_tm(sc, tm);
581 		return;
582 	}
583 
584 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
585 	    le32toh(reply->TerminationCount));
586 	mps_free_reply(sc, tm->cm_reply_data);
587 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
588 
589 	/* Reuse the existing command */
590 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
591 	memset(req, 0, sizeof(*req));
592 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
593 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
594 	req->DevHandle = htole16(handle);
595 	tm->cm_data = NULL;
596 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
597 	tm->cm_complete = mpssas_remove_complete;
598 	tm->cm_complete_data = (void *)(uintptr_t)handle;
599 
600 	mps_map_command(sc, tm);
601 
602 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
603 		   targ->tid, handle);
604 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
605 		union ccb *ccb;
606 
607 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
608 		ccb = tm->cm_complete_data;
609 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
610 		mpssas_scsiio_complete(sc, tm);
611 	}
612 }
613 
614 static void
615 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
616 {
617 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
618 	uint16_t handle;
619 	struct mpssas_target *targ;
620 	struct mpssas_lun *lun;
621 
622 	MPS_FUNCTRACE(sc);
623 
624 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
625 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
626 
627 	/*
628 	 * Currently there should be no way we can hit this case.  It only
629 	 * happens when we have a failure to allocate chain frames, and
630 	 * task management commands don't have S/G lists.
631 	 */
632 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
633 		mps_dprint(sc, MPS_XINFO,
634 			   "%s: cm_flags = %#x for remove of handle %#04x! "
635 			   "This should not happen!\n", __func__, tm->cm_flags,
636 			   handle);
637 		mpssas_free_tm(sc, tm);
638 		return;
639 	}
640 
641 	if (reply == NULL) {
642 		/* most likely a chip reset */
643 		mps_dprint(sc, MPS_FAULT,
644 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
645 		mpssas_free_tm(sc, tm);
646 		return;
647 	}
648 
649 	mps_dprint(sc, MPS_XINFO,
650 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
651 	    handle, le16toh(reply->IOCStatus));
652 
653 	/*
654 	 * Don't clear target if remove fails because things will get confusing.
655 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
656 	 * this target id if possible, and so we can assign the same target id
657 	 * to this device if it comes back in the future.
658 	 */
659 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
660 		targ = tm->cm_targ;
661 		targ->handle = 0x0;
662 		targ->encl_handle = 0x0;
663 		targ->encl_slot = 0x0;
664 		targ->exp_dev_handle = 0x0;
665 		targ->phy_num = 0x0;
666 		targ->linkrate = 0x0;
667 		targ->devinfo = 0x0;
668 		targ->flags = 0x0;
669 
670 		while(!SLIST_EMPTY(&targ->luns)) {
671 			lun = SLIST_FIRST(&targ->luns);
672 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
673 			free(lun, M_MPT2);
674 		}
675 	}
676 
677 
678 	mpssas_free_tm(sc, tm);
679 }
680 
681 static int
682 mpssas_register_events(struct mps_softc *sc)
683 {
684 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
685 
686 	bzero(events, 16);
687 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
688 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
689 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
690 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
691 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
692 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
693 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
694 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
695 	setbit(events, MPI2_EVENT_IR_VOLUME);
696 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
697 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
698 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
699 
700 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
701 	    &sc->sassc->mpssas_eh);
702 
703 	return (0);
704 }
705 
706 int
707 mps_attach_sas(struct mps_softc *sc)
708 {
709 	struct mpssas_softc *sassc;
710 	cam_status status;
711 	int unit, error = 0;
712 
713 	MPS_FUNCTRACE(sc);
714 
715 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
716 	if(!sassc) {
717 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
718 		__func__, __LINE__);
719 		return (ENOMEM);
720 	}
721 
722 	/*
723 	 * XXX MaxTargets could change during a reinit.  Since we don't
724 	 * resize the targets[] array during such an event, cache the value
725 	 * of MaxTargets here so that we don't get into trouble later.  This
726 	 * should move into the reinit logic.
727 	 */
728 	sassc->maxtargets = sc->facts->MaxTargets;
729 	sassc->targets = malloc(sizeof(struct mpssas_target) *
730 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
731 	if(!sassc->targets) {
732 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
733 		__func__, __LINE__);
734 		free(sassc, M_MPT2);
735 		return (ENOMEM);
736 	}
737 	sc->sassc = sassc;
738 	sassc->sc = sc;
739 
740 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
741 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
742 		error = ENOMEM;
743 		goto out;
744 	}
745 
746 	unit = device_get_unit(sc->mps_dev);
747 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
748 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
749 	if (sassc->sim == NULL) {
750 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
751 		error = EINVAL;
752 		goto out;
753 	}
754 
755 	TAILQ_INIT(&sassc->ev_queue);
756 
757 	/* Initialize taskqueue for Event Handling */
758 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
759 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
760 	    taskqueue_thread_enqueue, &sassc->ev_tq);
761 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
762 	    device_get_nameunit(sc->mps_dev));
763 
764 	mps_lock(sc);
765 
766 	/*
767 	 * XXX There should be a bus for every port on the adapter, but since
768 	 * we're just going to fake the topology for now, we'll pretend that
769 	 * everything is just a target on a single bus.
770 	 */
771 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
772 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
773 		    error);
774 		mps_unlock(sc);
775 		goto out;
776 	}
777 
778 	/*
779 	 * Assume that discovery events will start right away.
780 	 *
781 	 * Hold off boot until discovery is complete.
782 	 */
783 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
784 	sc->sassc->startup_refcount = 0;
785 	mpssas_startup_increment(sassc);
786 
787 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
788 	sassc->discovery_timeouts = 0;
789 
790 	sassc->tm_count = 0;
791 
792 	/*
793 	 * Register for async events so we can determine the EEDP
794 	 * capabilities of devices.
795 	 */
796 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
797 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
798 	    CAM_LUN_WILDCARD);
799 	if (status != CAM_REQ_CMP) {
800 		mps_printf(sc, "Error %#x creating sim path\n", status);
801 		sassc->path = NULL;
802 	} else {
803 		int event;
804 
805 #if (__FreeBSD_version >= 1000006) || \
806     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
807 		event = AC_ADVINFO_CHANGED;
808 #else
809 		event = AC_FOUND_DEVICE;
810 #endif
811 		status = xpt_register_async(event, mpssas_async, sc,
812 					    sassc->path);
813 		if (status != CAM_REQ_CMP) {
814 			mps_dprint(sc, MPS_ERROR,
815 			    "Error %#x registering async handler for "
816 			    "AC_ADVINFO_CHANGED events\n", status);
817 			xpt_free_path(sassc->path);
818 			sassc->path = NULL;
819 		}
820 	}
821 	if (status != CAM_REQ_CMP) {
822 		/*
823 		 * EEDP use is the exception, not the rule.
824 		 * Warn the user, but do not fail to attach.
825 		 */
826 		mps_printf(sc, "EEDP capabilities disabled.\n");
827 	}
828 
829 	mps_unlock(sc);
830 
831 	mpssas_register_events(sc);
832 out:
833 	if (error)
834 		mps_detach_sas(sc);
835 	return (error);
836 }
837 
838 int
839 mps_detach_sas(struct mps_softc *sc)
840 {
841 	struct mpssas_softc *sassc;
842 	struct mpssas_lun *lun, *lun_tmp;
843 	struct mpssas_target *targ;
844 	int i;
845 
846 	MPS_FUNCTRACE(sc);
847 
848 	if (sc->sassc == NULL)
849 		return (0);
850 
851 	sassc = sc->sassc;
852 	mps_deregister_events(sc, sassc->mpssas_eh);
853 
854 	/*
855 	 * Drain and free the event handling taskqueue with the lock
856 	 * unheld so that any parallel processing tasks drain properly
857 	 * without deadlocking.
858 	 */
859 	if (sassc->ev_tq != NULL)
860 		taskqueue_free(sassc->ev_tq);
861 
862 	/* Make sure CAM doesn't wedge if we had to bail out early. */
863 	mps_lock(sc);
864 
865 	/* Deregister our async handler */
866 	if (sassc->path != NULL) {
867 		xpt_register_async(0, mpssas_async, sc, sassc->path);
868 		xpt_free_path(sassc->path);
869 		sassc->path = NULL;
870 	}
871 
872 	if (sassc->flags & MPSSAS_IN_STARTUP)
873 		xpt_release_simq(sassc->sim, 1);
874 
875 	if (sassc->sim != NULL) {
876 		xpt_bus_deregister(cam_sim_path(sassc->sim));
877 		cam_sim_free(sassc->sim, FALSE);
878 	}
879 
880 	sassc->flags |= MPSSAS_SHUTDOWN;
881 	mps_unlock(sc);
882 
883 	if (sassc->devq != NULL)
884 		cam_simq_free(sassc->devq);
885 
886 	for(i=0; i< sassc->maxtargets ;i++) {
887 		targ = &sassc->targets[i];
888 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
889 			free(lun, M_MPT2);
890 		}
891 	}
892 	free(sassc->targets, M_MPT2);
893 	free(sassc, M_MPT2);
894 	sc->sassc = NULL;
895 
896 	return (0);
897 }
898 
899 void
900 mpssas_discovery_end(struct mpssas_softc *sassc)
901 {
902 	struct mps_softc *sc = sassc->sc;
903 
904 	MPS_FUNCTRACE(sc);
905 
906 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
907 		callout_stop(&sassc->discovery_callout);
908 
909 }
910 
911 static void
912 mpssas_action(struct cam_sim *sim, union ccb *ccb)
913 {
914 	struct mpssas_softc *sassc;
915 
916 	sassc = cam_sim_softc(sim);
917 
918 	MPS_FUNCTRACE(sassc->sc);
919 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
920 	    ccb->ccb_h.func_code);
921 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
922 
923 	switch (ccb->ccb_h.func_code) {
924 	case XPT_PATH_INQ:
925 	{
926 		struct ccb_pathinq *cpi = &ccb->cpi;
927 
928 		cpi->version_num = 1;
929 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
930 		cpi->target_sprt = 0;
931 #if __FreeBSD_version >= 1000039
932 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
933 #else
934 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
935 #endif
936 		cpi->hba_eng_cnt = 0;
937 		cpi->max_target = sassc->maxtargets - 1;
938 		cpi->max_lun = 255;
939 		cpi->initiator_id = sassc->maxtargets - 1;
940 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
941 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
942 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
943 		cpi->unit_number = cam_sim_unit(sim);
944 		cpi->bus_id = cam_sim_bus(sim);
945 		cpi->base_transfer_speed = 150000;
946 		cpi->transport = XPORT_SAS;
947 		cpi->transport_version = 0;
948 		cpi->protocol = PROTO_SCSI;
949 		cpi->protocol_version = SCSI_REV_SPC;
950 #if __FreeBSD_version >= 800001
951 		/*
952 		 * XXX KDM where does this number come from?
953 		 */
954 		cpi->maxio = 256 * 1024;
955 #endif
956 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
957 		break;
958 	}
959 	case XPT_GET_TRAN_SETTINGS:
960 	{
961 		struct ccb_trans_settings	*cts;
962 		struct ccb_trans_settings_sas	*sas;
963 		struct ccb_trans_settings_scsi	*scsi;
964 		struct mpssas_target *targ;
965 
966 		cts = &ccb->cts;
967 		sas = &cts->xport_specific.sas;
968 		scsi = &cts->proto_specific.scsi;
969 
970 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
971 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
972 		    cts->ccb_h.target_id));
973 		targ = &sassc->targets[cts->ccb_h.target_id];
974 		if (targ->handle == 0x0) {
975 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
976 			break;
977 		}
978 
979 		cts->protocol_version = SCSI_REV_SPC2;
980 		cts->transport = XPORT_SAS;
981 		cts->transport_version = 0;
982 
983 		sas->valid = CTS_SAS_VALID_SPEED;
984 		switch (targ->linkrate) {
985 		case 0x08:
986 			sas->bitrate = 150000;
987 			break;
988 		case 0x09:
989 			sas->bitrate = 300000;
990 			break;
991 		case 0x0a:
992 			sas->bitrate = 600000;
993 			break;
994 		default:
995 			sas->valid = 0;
996 		}
997 
998 		cts->protocol = PROTO_SCSI;
999 		scsi->valid = CTS_SCSI_VALID_TQ;
1000 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1001 
1002 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1003 		break;
1004 	}
1005 	case XPT_CALC_GEOMETRY:
1006 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1007 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1008 		break;
1009 	case XPT_RESET_DEV:
1010 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1011 		mpssas_action_resetdev(sassc, ccb);
1012 		return;
1013 	case XPT_RESET_BUS:
1014 	case XPT_ABORT:
1015 	case XPT_TERM_IO:
1016 		mps_dprint(sassc->sc, MPS_XINFO,
1017 		    "mpssas_action faking success for abort or reset\n");
1018 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1019 		break;
1020 	case XPT_SCSI_IO:
1021 		mpssas_action_scsiio(sassc, ccb);
1022 		return;
1023 #if __FreeBSD_version >= 900026
1024 	case XPT_SMP_IO:
1025 		mpssas_action_smpio(sassc, ccb);
1026 		return;
1027 #endif
1028 	default:
1029 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1030 		break;
1031 	}
1032 	xpt_done(ccb);
1033 
1034 }
1035 
1036 static void
1037 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1038     target_id_t target_id, lun_id_t lun_id)
1039 {
1040 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1041 	struct cam_path *path;
1042 
1043 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1044 	    ac_code, target_id, (uintmax_t)lun_id);
1045 
1046 	if (xpt_create_path(&path, NULL,
1047 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1048 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1049 			   "notification\n");
1050 		return;
1051 	}
1052 
1053 	xpt_async(ac_code, path, NULL);
1054 	xpt_free_path(path);
1055 }
1056 
1057 static void
1058 mpssas_complete_all_commands(struct mps_softc *sc)
1059 {
1060 	struct mps_command *cm;
1061 	int i;
1062 	int completed;
1063 
1064 	MPS_FUNCTRACE(sc);
1065 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1066 
1067 	/* complete all commands with a NULL reply */
1068 	for (i = 1; i < sc->num_reqs; i++) {
1069 		cm = &sc->commands[i];
1070 		cm->cm_reply = NULL;
1071 		completed = 0;
1072 
1073 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1074 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1075 
1076 		if (cm->cm_complete != NULL) {
1077 			mpssas_log_command(cm, MPS_RECOVERY,
1078 			    "completing cm %p state %x ccb %p for diag reset\n",
1079 			    cm, cm->cm_state, cm->cm_ccb);
1080 
1081 			cm->cm_complete(sc, cm);
1082 			completed = 1;
1083 		}
1084 
1085 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1086 			mpssas_log_command(cm, MPS_RECOVERY,
1087 			    "waking up cm %p state %x ccb %p for diag reset\n",
1088 			    cm, cm->cm_state, cm->cm_ccb);
1089 			wakeup(cm);
1090 			completed = 1;
1091 		}
1092 
1093 		if (cm->cm_sc->io_cmds_active != 0) {
1094 			cm->cm_sc->io_cmds_active--;
1095 		} else {
1096 			mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1097 			    "io_cmds_active is out of sync - resynching to "
1098 			    "0\n");
1099 		}
1100 
1101 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1102 			/* this should never happen, but if it does, log */
1103 			mpssas_log_command(cm, MPS_RECOVERY,
1104 			    "cm %p state %x flags 0x%x ccb %p during diag "
1105 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1106 			    cm->cm_ccb);
1107 		}
1108 	}
1109 }
1110 
1111 void
1112 mpssas_handle_reinit(struct mps_softc *sc)
1113 {
1114 	int i;
1115 
1116 	/* Go back into startup mode and freeze the simq, so that CAM
1117 	 * doesn't send any commands until after we've rediscovered all
1118 	 * targets and found the proper device handles for them.
1119 	 *
1120 	 * After the reset, portenable will trigger discovery, and after all
1121 	 * discovery-related activities have finished, the simq will be
1122 	 * released.
1123 	 */
1124 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1125 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1126 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1127 	mpssas_startup_increment(sc->sassc);
1128 
1129 	/* notify CAM of a bus reset */
1130 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1131 	    CAM_LUN_WILDCARD);
1132 
1133 	/* complete and cleanup after all outstanding commands */
1134 	mpssas_complete_all_commands(sc);
1135 
1136 	mps_dprint(sc, MPS_INIT,
1137 	    "%s startup %u tm %u after command completion\n",
1138 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1139 
1140 	/* zero all the target handles, since they may change after the
1141 	 * reset, and we have to rediscover all the targets and use the new
1142 	 * handles.
1143 	 */
1144 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1145 		if (sc->sassc->targets[i].outstanding != 0)
1146 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1147 			    i, sc->sassc->targets[i].outstanding);
1148 		sc->sassc->targets[i].handle = 0x0;
1149 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1150 		sc->sassc->targets[i].outstanding = 0;
1151 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1152 	}
1153 }
1154 
1155 static void
1156 mpssas_tm_timeout(void *data)
1157 {
1158 	struct mps_command *tm = data;
1159 	struct mps_softc *sc = tm->cm_sc;
1160 
1161 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1162 
1163 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1164 	    "task mgmt %p timed out\n", tm);
1165 	mps_reinit(sc);
1166 }
1167 
1168 static void
1169 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1170 {
1171 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1172 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1173 	unsigned int cm_count = 0;
1174 	struct mps_command *cm;
1175 	struct mpssas_target *targ;
1176 
1177 	callout_stop(&tm->cm_callout);
1178 
1179 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1180 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1181 	targ = tm->cm_targ;
1182 
1183 	/*
1184 	 * Currently there should be no way we can hit this case.  It only
1185 	 * happens when we have a failure to allocate chain frames, and
1186 	 * task management commands don't have S/G lists.
1187 	 * XXXSL So should it be an assertion?
1188 	 */
1189 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1190 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1191 			   "This should not happen!\n", __func__, tm->cm_flags);
1192 		mpssas_free_tm(sc, tm);
1193 		return;
1194 	}
1195 
1196 	if (reply == NULL) {
1197 		mpssas_log_command(tm, MPS_RECOVERY,
1198 		    "NULL reset reply for tm %p\n", tm);
1199 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1200 			/* this completion was due to a reset, just cleanup */
1201 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1202 			targ->tm = NULL;
1203 			mpssas_free_tm(sc, tm);
1204 		}
1205 		else {
1206 			/* we should have gotten a reply. */
1207 			mps_reinit(sc);
1208 		}
1209 		return;
1210 	}
1211 
1212 	mpssas_log_command(tm, MPS_RECOVERY,
1213 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1214 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1215 	    le32toh(reply->TerminationCount));
1216 
1217 	/* See if there are any outstanding commands for this LUN.
1218 	 * This could be made more efficient by using a per-LU data
1219 	 * structure of some sort.
1220 	 */
1221 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1222 		if (cm->cm_lun == tm->cm_lun)
1223 			cm_count++;
1224 	}
1225 
1226 	if (cm_count == 0) {
1227 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1228 		    "logical unit %u finished recovery after reset\n",
1229 		    tm->cm_lun, tm);
1230 
1231 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1232 		    tm->cm_lun);
1233 
1234 		/* we've finished recovery for this logical unit.  check and
1235 		 * see if some other logical unit has a timedout command
1236 		 * that needs to be processed.
1237 		 */
1238 		cm = TAILQ_FIRST(&targ->timedout_commands);
1239 		if (cm) {
1240 			mpssas_send_abort(sc, tm, cm);
1241 		}
1242 		else {
1243 			targ->tm = NULL;
1244 			mpssas_free_tm(sc, tm);
1245 		}
1246 	}
1247 	else {
1248 		/* if we still have commands for this LUN, the reset
1249 		 * effectively failed, regardless of the status reported.
1250 		 * Escalate to a target reset.
1251 		 */
1252 		mpssas_log_command(tm, MPS_RECOVERY,
1253 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1254 		    tm, cm_count);
1255 		mpssas_send_reset(sc, tm,
1256 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1257 	}
1258 }
1259 
1260 static void
1261 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1262 {
1263 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1264 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1265 	struct mpssas_target *targ;
1266 
1267 	callout_stop(&tm->cm_callout);
1268 
1269 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1270 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1271 	targ = tm->cm_targ;
1272 
1273 	/*
1274 	 * Currently there should be no way we can hit this case.  It only
1275 	 * happens when we have a failure to allocate chain frames, and
1276 	 * task management commands don't have S/G lists.
1277 	 */
1278 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1279 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1280 			   "This should not happen!\n", __func__, tm->cm_flags);
1281 		mpssas_free_tm(sc, tm);
1282 		return;
1283 	}
1284 
1285 	if (reply == NULL) {
1286 		mpssas_log_command(tm, MPS_RECOVERY,
1287 		    "NULL reset reply for tm %p\n", tm);
1288 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1289 			/* this completion was due to a reset, just cleanup */
1290 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1291 			targ->tm = NULL;
1292 			mpssas_free_tm(sc, tm);
1293 		}
1294 		else {
1295 			/* we should have gotten a reply. */
1296 			mps_reinit(sc);
1297 		}
1298 		return;
1299 	}
1300 
1301 	mpssas_log_command(tm, MPS_RECOVERY,
1302 	    "target reset status 0x%x code 0x%x count %u\n",
1303 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1304 	    le32toh(reply->TerminationCount));
1305 
1306 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1307 
1308 	if (targ->outstanding == 0) {
1309 		/* we've finished recovery for this target and all
1310 		 * of its logical units.
1311 		 */
1312 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1313 		    "recovery finished after target reset\n");
1314 
1315 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1316 		    CAM_LUN_WILDCARD);
1317 
1318 		targ->tm = NULL;
1319 		mpssas_free_tm(sc, tm);
1320 	}
1321 	else {
1322 		/* after a target reset, if this target still has
1323 		 * outstanding commands, the reset effectively failed,
1324 		 * regardless of the status reported.  escalate.
1325 		 */
1326 		mpssas_log_command(tm, MPS_RECOVERY,
1327 		    "target reset complete for tm %p, but still have %u command(s)\n",
1328 		    tm, targ->outstanding);
1329 		mps_reinit(sc);
1330 	}
1331 }
1332 
1333 #define MPS_RESET_TIMEOUT 30
1334 
1335 static int
1336 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1337 {
1338 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1339 	struct mpssas_target *target;
1340 	int err;
1341 
1342 	target = tm->cm_targ;
1343 	if (target->handle == 0) {
1344 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1345 		    __func__, target->tid);
1346 		return -1;
1347 	}
1348 
1349 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1350 	req->DevHandle = htole16(target->handle);
1351 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1352 	req->TaskType = type;
1353 
1354 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1355 		/* XXX Need to handle invalid LUNs */
1356 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1357 		tm->cm_targ->logical_unit_resets++;
1358 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1359 		    "sending logical unit reset\n");
1360 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1361 	}
1362 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1363 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1364 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1365 		tm->cm_targ->target_resets++;
1366 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1367 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1368 		    "sending target reset\n");
1369 		tm->cm_complete = mpssas_target_reset_complete;
1370 	}
1371 	else {
1372 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1373 		return -1;
1374 	}
1375 
1376 	tm->cm_data = NULL;
1377 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1378 	tm->cm_complete_data = (void *)tm;
1379 
1380 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1381 	    mpssas_tm_timeout, tm);
1382 
1383 	err = mps_map_command(sc, tm);
1384 	if (err)
1385 		mpssas_log_command(tm, MPS_RECOVERY,
1386 		    "error %d sending reset type %u\n",
1387 		    err, type);
1388 
1389 	return err;
1390 }
1391 
1392 
1393 static void
1394 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1395 {
1396 	struct mps_command *cm;
1397 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1398 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1399 	struct mpssas_target *targ;
1400 
1401 	callout_stop(&tm->cm_callout);
1402 
1403 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1404 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1405 	targ = tm->cm_targ;
1406 
1407 	/*
1408 	 * Currently there should be no way we can hit this case.  It only
1409 	 * happens when we have a failure to allocate chain frames, and
1410 	 * task management commands don't have S/G lists.
1411 	 */
1412 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1413 		mpssas_log_command(tm, MPS_RECOVERY,
1414 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1415 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1416 		mpssas_free_tm(sc, tm);
1417 		return;
1418 	}
1419 
1420 	if (reply == NULL) {
1421 		mpssas_log_command(tm, MPS_RECOVERY,
1422 		    "NULL abort reply for tm %p TaskMID %u\n",
1423 		    tm, le16toh(req->TaskMID));
1424 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1425 			/* this completion was due to a reset, just cleanup */
1426 			targ->tm = NULL;
1427 			mpssas_free_tm(sc, tm);
1428 		}
1429 		else {
1430 			/* we should have gotten a reply. */
1431 			mps_reinit(sc);
1432 		}
1433 		return;
1434 	}
1435 
1436 	mpssas_log_command(tm, MPS_RECOVERY,
1437 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1438 	    le16toh(req->TaskMID),
1439 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1440 	    le32toh(reply->TerminationCount));
1441 
1442 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1443 	if (cm == NULL) {
1444 		/* if there are no more timedout commands, we're done with
1445 		 * error recovery for this target.
1446 		 */
1447 		mpssas_log_command(tm, MPS_RECOVERY,
1448 		    "finished recovery after aborting TaskMID %u\n",
1449 		    le16toh(req->TaskMID));
1450 
1451 		targ->tm = NULL;
1452 		mpssas_free_tm(sc, tm);
1453 	}
1454 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1455 		/* abort success, but we have more timedout commands to abort */
1456 		mpssas_log_command(tm, MPS_RECOVERY,
1457 		    "continuing recovery after aborting TaskMID %u\n",
1458 		    le16toh(req->TaskMID));
1459 
1460 		mpssas_send_abort(sc, tm, cm);
1461 	}
1462 	else {
1463 		/* we didn't get a command completion, so the abort
1464 		 * failed as far as we're concerned.  escalate.
1465 		 */
1466 		mpssas_log_command(tm, MPS_RECOVERY,
1467 		    "abort failed for TaskMID %u tm %p\n",
1468 		    le16toh(req->TaskMID), tm);
1469 
1470 		mpssas_send_reset(sc, tm,
1471 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1472 	}
1473 }
1474 
1475 #define MPS_ABORT_TIMEOUT 5
1476 
1477 static int
1478 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1479 {
1480 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1481 	struct mpssas_target *targ;
1482 	int err;
1483 
1484 	targ = cm->cm_targ;
1485 	if (targ->handle == 0) {
1486 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1487 		    __func__, cm->cm_ccb->ccb_h.target_id);
1488 		return -1;
1489 	}
1490 
1491 	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1492 	    "Aborting command %p\n", cm);
1493 
1494 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1495 	req->DevHandle = htole16(targ->handle);
1496 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1497 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1498 
1499 	/* XXX Need to handle invalid LUNs */
1500 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1501 
1502 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1503 
1504 	tm->cm_data = NULL;
1505 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1506 	tm->cm_complete = mpssas_abort_complete;
1507 	tm->cm_complete_data = (void *)tm;
1508 	tm->cm_targ = cm->cm_targ;
1509 	tm->cm_lun = cm->cm_lun;
1510 
1511 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1512 	    mpssas_tm_timeout, tm);
1513 
1514 	targ->aborts++;
1515 
1516 	err = mps_map_command(sc, tm);
1517 	if (err)
1518 		mpssas_log_command(tm, MPS_RECOVERY,
1519 		    "error %d sending abort for cm %p SMID %u\n",
1520 		    err, cm, req->TaskMID);
1521 	return err;
1522 }
1523 
1524 
1525 static void
1526 mpssas_scsiio_timeout(void *data)
1527 {
1528 	struct mps_softc *sc;
1529 	struct mps_command *cm;
1530 	struct mpssas_target *targ;
1531 
1532 	cm = (struct mps_command *)data;
1533 	sc = cm->cm_sc;
1534 
1535 	MPS_FUNCTRACE(sc);
1536 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1537 
1538 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1539 
1540 	/*
1541 	 * Run the interrupt handler to make sure it's not pending.  This
1542 	 * isn't perfect because the command could have already completed
1543 	 * and been re-used, though this is unlikely.
1544 	 */
1545 	mps_intr_locked(sc);
1546 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1547 		mpssas_log_command(cm, MPS_XINFO,
1548 		    "SCSI command %p almost timed out\n", cm);
1549 		return;
1550 	}
1551 
1552 	if (cm->cm_ccb == NULL) {
1553 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1554 		return;
1555 	}
1556 
1557 	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1558 	    cm, cm->cm_ccb);
1559 
1560 	targ = cm->cm_targ;
1561 	targ->timeouts++;
1562 
1563 	/* XXX first, check the firmware state, to see if it's still
1564 	 * operational.  if not, do a diag reset.
1565 	 */
1566 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1567 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1568 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1569 
1570 	if (targ->tm != NULL) {
1571 		/* target already in recovery, just queue up another
1572 		 * timedout command to be processed later.
1573 		 */
1574 		mps_dprint(sc, MPS_RECOVERY,
1575 		    "queued timedout cm %p for processing by tm %p\n",
1576 		    cm, targ->tm);
1577 	}
1578 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1579 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1580 		    cm, targ->tm);
1581 
1582 		/* start recovery by aborting the first timedout command */
1583 		mpssas_send_abort(sc, targ->tm, cm);
1584 	}
1585 	else {
1586 		/* XXX queue this target up for recovery once a TM becomes
1587 		 * available.  The firmware only has a limited number of
1588 		 * HighPriority credits for the high priority requests used
1589 		 * for task management, and we ran out.
1590 		 *
1591 		 * Isilon: don't worry about this for now, since we have
1592 		 * more credits than disks in an enclosure, and limit
1593 		 * ourselves to one TM per target for recovery.
1594 		 */
1595 		mps_dprint(sc, MPS_RECOVERY,
1596 		    "timedout cm %p failed to allocate a tm\n", cm);
1597 	}
1598 
1599 }
1600 
1601 static void
1602 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1603 {
1604 	MPI2_SCSI_IO_REQUEST *req;
1605 	struct ccb_scsiio *csio;
1606 	struct mps_softc *sc;
1607 	struct mpssas_target *targ;
1608 	struct mpssas_lun *lun;
1609 	struct mps_command *cm;
1610 	uint8_t i, lba_byte, *ref_tag_addr;
1611 	uint16_t eedp_flags;
1612 	uint32_t mpi_control;
1613 
1614 	sc = sassc->sc;
1615 	MPS_FUNCTRACE(sc);
1616 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1617 
1618 	csio = &ccb->csio;
1619 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1620 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1621 	     csio->ccb_h.target_id));
1622 	targ = &sassc->targets[csio->ccb_h.target_id];
1623 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1624 	if (targ->handle == 0x0) {
1625 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1626 		    __func__, csio->ccb_h.target_id);
1627 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1628 		xpt_done(ccb);
1629 		return;
1630 	}
1631 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1632 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1633 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1634 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1635 		xpt_done(ccb);
1636 		return;
1637 	}
1638 	/*
1639 	 * Sometimes, it is possible to get a command that is not "In
1640 	 * Progress" and was actually aborted by the upper layer.  Check for
1641 	 * this here and complete the command without error.
1642 	 */
1643 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1644 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1645 		    "target %u\n", __func__, csio->ccb_h.target_id);
1646 		xpt_done(ccb);
1647 		return;
1648 	}
1649 	/*
1650 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1651 	 * that the volume has timed out.  We want volumes to be enumerated
1652 	 * until they are deleted/removed, not just failed.
1653 	 */
1654 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1655 		if (targ->devinfo == 0)
1656 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1657 		else
1658 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1659 		xpt_done(ccb);
1660 		return;
1661 	}
1662 
1663 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1664 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1665 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1666 		xpt_done(ccb);
1667 		return;
1668 	}
1669 
1670 	cm = mps_alloc_command(sc);
1671 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1672 		if (cm != NULL) {
1673 			mps_free_command(sc, cm);
1674 		}
1675 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1676 			xpt_freeze_simq(sassc->sim, 1);
1677 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1678 		}
1679 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1680 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1681 		xpt_done(ccb);
1682 		return;
1683 	}
1684 
1685 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1686 	bzero(req, sizeof(*req));
1687 	req->DevHandle = htole16(targ->handle);
1688 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1689 	req->MsgFlags = 0;
1690 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1691 	req->SenseBufferLength = MPS_SENSE_LEN;
1692 	req->SGLFlags = 0;
1693 	req->ChainOffset = 0;
1694 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1695 	req->SGLOffset1= 0;
1696 	req->SGLOffset2= 0;
1697 	req->SGLOffset3= 0;
1698 	req->SkipCount = 0;
1699 	req->DataLength = htole32(csio->dxfer_len);
1700 	req->BidirectionalDataLength = 0;
1701 	req->IoFlags = htole16(csio->cdb_len);
1702 	req->EEDPFlags = 0;
1703 
1704 	/* Note: BiDirectional transfers are not supported */
1705 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1706 	case CAM_DIR_IN:
1707 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1708 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1709 		break;
1710 	case CAM_DIR_OUT:
1711 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1712 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1713 		break;
1714 	case CAM_DIR_NONE:
1715 	default:
1716 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1717 		break;
1718 	}
1719 
1720 	if (csio->cdb_len == 32)
1721                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1722 	/*
1723 	 * It looks like the hardware doesn't require an explicit tag
1724 	 * number for each transaction.  SAM Task Management not supported
1725 	 * at the moment.
1726 	 */
1727 	switch (csio->tag_action) {
1728 	case MSG_HEAD_OF_Q_TAG:
1729 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1730 		break;
1731 	case MSG_ORDERED_Q_TAG:
1732 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1733 		break;
1734 	case MSG_ACA_TASK:
1735 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1736 		break;
1737 	case CAM_TAG_ACTION_NONE:
1738 	case MSG_SIMPLE_Q_TAG:
1739 	default:
1740 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1741 		break;
1742 	}
1743 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1744 	req->Control = htole32(mpi_control);
1745 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1746 		mps_free_command(sc, cm);
1747 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1748 		xpt_done(ccb);
1749 		return;
1750 	}
1751 
1752 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1753 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1754 	else
1755 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1756 	req->IoFlags = htole16(csio->cdb_len);
1757 
1758 	/*
1759 	 * Check if EEDP is supported and enabled.  If it is then check if the
1760 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1761 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1762 	 * for EEDP transfer.
1763 	 */
1764 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1765 	if (sc->eedp_enabled && eedp_flags) {
1766 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1767 			if (lun->lun_id == csio->ccb_h.target_lun) {
1768 				break;
1769 			}
1770 		}
1771 
1772 		if ((lun != NULL) && (lun->eedp_formatted)) {
1773 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1774 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1775 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1776 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1777 			req->EEDPFlags = htole16(eedp_flags);
1778 
1779 			/*
1780 			 * If CDB less than 32, fill in Primary Ref Tag with
1781 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1782 			 * already there.  Also, set protection bit.  FreeBSD
1783 			 * currently does not support CDBs bigger than 16, but
1784 			 * the code doesn't hurt, and will be here for the
1785 			 * future.
1786 			 */
1787 			if (csio->cdb_len != 32) {
1788 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1789 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1790 				    PrimaryReferenceTag;
1791 				for (i = 0; i < 4; i++) {
1792 					*ref_tag_addr =
1793 					    req->CDB.CDB32[lba_byte + i];
1794 					ref_tag_addr++;
1795 				}
1796 				req->CDB.EEDP32.PrimaryReferenceTag =
1797 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1798 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1799 				    0xFFFF;
1800 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1801 				    0x20;
1802 			} else {
1803 				eedp_flags |=
1804 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1805 				req->EEDPFlags = htole16(eedp_flags);
1806 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1807 				    0x1F) | 0x20;
1808 			}
1809 		}
1810 	}
1811 
1812 	cm->cm_length = csio->dxfer_len;
1813 	if (cm->cm_length != 0) {
1814 		cm->cm_data = ccb;
1815 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1816 	} else {
1817 		cm->cm_data = NULL;
1818 	}
1819 	cm->cm_sge = &req->SGL;
1820 	cm->cm_sglsize = (32 - 24) * 4;
1821 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1822 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1823 	cm->cm_complete = mpssas_scsiio_complete;
1824 	cm->cm_complete_data = ccb;
1825 	cm->cm_targ = targ;
1826 	cm->cm_lun = csio->ccb_h.target_lun;
1827 	cm->cm_ccb = ccb;
1828 
1829 	/*
1830 	 * If HBA is a WD and the command is not for a retry, try to build a
1831 	 * direct I/O message. If failed, or the command is for a retry, send
1832 	 * the I/O to the IR volume itself.
1833 	 */
1834 	if (sc->WD_valid_config) {
1835 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1836 			mpssas_direct_drive_io(sassc, cm, ccb);
1837 		} else {
1838 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1839 		}
1840 	}
1841 
1842 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1843 	    mpssas_scsiio_timeout, cm, 0);
1844 
1845 	targ->issued++;
1846 	targ->outstanding++;
1847 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1848 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1849 
1850 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1851 	    __func__, cm, ccb, targ->outstanding);
1852 
1853 	mps_map_command(sc, cm);
1854 	return;
1855 }
1856 
1857 static void
1858 mps_response_code(struct mps_softc *sc, u8 response_code)
1859 {
1860         char *desc;
1861 
1862         switch (response_code) {
1863         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1864                 desc = "task management request completed";
1865                 break;
1866         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1867                 desc = "invalid frame";
1868                 break;
1869         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1870                 desc = "task management request not supported";
1871                 break;
1872         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1873                 desc = "task management request failed";
1874                 break;
1875         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1876                 desc = "task management request succeeded";
1877                 break;
1878         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1879                 desc = "invalid lun";
1880                 break;
1881         case 0xA:
1882                 desc = "overlapped tag attempted";
1883                 break;
1884         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1885                 desc = "task queued, however not sent to target";
1886                 break;
1887         default:
1888                 desc = "unknown";
1889                 break;
1890         }
1891 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1892                 response_code, desc);
1893 }
1894 /**
1895  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1896  */
1897 static void
1898 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1899     Mpi2SCSIIOReply_t *mpi_reply)
1900 {
1901 	u32 response_info;
1902 	u8 *response_bytes;
1903 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1904 	    MPI2_IOCSTATUS_MASK;
1905 	u8 scsi_state = mpi_reply->SCSIState;
1906 	u8 scsi_status = mpi_reply->SCSIStatus;
1907 	char *desc_ioc_state = NULL;
1908 	char *desc_scsi_status = NULL;
1909 	char *desc_scsi_state = sc->tmp_string;
1910 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1911 
1912 	if (log_info == 0x31170000)
1913 		return;
1914 
1915 	switch (ioc_status) {
1916 	case MPI2_IOCSTATUS_SUCCESS:
1917 		desc_ioc_state = "success";
1918 		break;
1919 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1920 		desc_ioc_state = "invalid function";
1921 		break;
1922 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1923 		desc_ioc_state = "scsi recovered error";
1924 		break;
1925 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1926 		desc_ioc_state = "scsi invalid dev handle";
1927 		break;
1928 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1929 		desc_ioc_state = "scsi device not there";
1930 		break;
1931 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1932 		desc_ioc_state = "scsi data overrun";
1933 		break;
1934 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1935 		desc_ioc_state = "scsi data underrun";
1936 		break;
1937 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1938 		desc_ioc_state = "scsi io data error";
1939 		break;
1940 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1941 		desc_ioc_state = "scsi protocol error";
1942 		break;
1943 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1944 		desc_ioc_state = "scsi task terminated";
1945 		break;
1946 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1947 		desc_ioc_state = "scsi residual mismatch";
1948 		break;
1949 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1950 		desc_ioc_state = "scsi task mgmt failed";
1951 		break;
1952 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1953 		desc_ioc_state = "scsi ioc terminated";
1954 		break;
1955 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1956 		desc_ioc_state = "scsi ext terminated";
1957 		break;
1958 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1959 		desc_ioc_state = "eedp guard error";
1960 		break;
1961 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1962 		desc_ioc_state = "eedp ref tag error";
1963 		break;
1964 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1965 		desc_ioc_state = "eedp app tag error";
1966 		break;
1967 	default:
1968 		desc_ioc_state = "unknown";
1969 		break;
1970 	}
1971 
1972 	switch (scsi_status) {
1973 	case MPI2_SCSI_STATUS_GOOD:
1974 		desc_scsi_status = "good";
1975 		break;
1976 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1977 		desc_scsi_status = "check condition";
1978 		break;
1979 	case MPI2_SCSI_STATUS_CONDITION_MET:
1980 		desc_scsi_status = "condition met";
1981 		break;
1982 	case MPI2_SCSI_STATUS_BUSY:
1983 		desc_scsi_status = "busy";
1984 		break;
1985 	case MPI2_SCSI_STATUS_INTERMEDIATE:
1986 		desc_scsi_status = "intermediate";
1987 		break;
1988 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1989 		desc_scsi_status = "intermediate condmet";
1990 		break;
1991 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
1992 		desc_scsi_status = "reservation conflict";
1993 		break;
1994 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
1995 		desc_scsi_status = "command terminated";
1996 		break;
1997 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
1998 		desc_scsi_status = "task set full";
1999 		break;
2000 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2001 		desc_scsi_status = "aca active";
2002 		break;
2003 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2004 		desc_scsi_status = "task aborted";
2005 		break;
2006 	default:
2007 		desc_scsi_status = "unknown";
2008 		break;
2009 	}
2010 
2011 	desc_scsi_state[0] = '\0';
2012 	if (!scsi_state)
2013 		desc_scsi_state = " ";
2014 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2015 		strcat(desc_scsi_state, "response info ");
2016 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2017 		strcat(desc_scsi_state, "state terminated ");
2018 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2019 		strcat(desc_scsi_state, "no status ");
2020 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2021 		strcat(desc_scsi_state, "autosense failed ");
2022 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2023 		strcat(desc_scsi_state, "autosense valid ");
2024 
2025 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2026 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2027 	/* We can add more detail about underflow data here
2028 	 * TO-DO
2029 	 * */
2030 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2031 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2032 	    desc_scsi_state, scsi_state);
2033 
2034 	if (sc->mps_debug & MPS_XINFO &&
2035 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2036 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2037 		scsi_sense_print(csio);
2038 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2039 	}
2040 
2041 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2042 		response_info = le32toh(mpi_reply->ResponseInfo);
2043 		response_bytes = (u8 *)&response_info;
2044 		mps_response_code(sc,response_bytes[0]);
2045 	}
2046 }
2047 
2048 static void
2049 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2050 {
2051 	MPI2_SCSI_IO_REPLY *rep;
2052 	union ccb *ccb;
2053 	struct ccb_scsiio *csio;
2054 	struct mpssas_softc *sassc;
2055 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2056 	u8 *TLR_bits, TLR_on;
2057 	int dir = 0, i;
2058 	u16 alloc_len;
2059 
2060 	MPS_FUNCTRACE(sc);
2061 	mps_dprint(sc, MPS_TRACE,
2062 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2063 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2064 	    cm->cm_targ->outstanding);
2065 
2066 	callout_stop(&cm->cm_callout);
2067 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2068 
2069 	sassc = sc->sassc;
2070 	ccb = cm->cm_complete_data;
2071 	csio = &ccb->csio;
2072 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2073 	/*
2074 	 * XXX KDM if the chain allocation fails, does it matter if we do
2075 	 * the sync and unload here?  It is simpler to do it in every case,
2076 	 * assuming it doesn't cause problems.
2077 	 */
2078 	if (cm->cm_data != NULL) {
2079 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2080 			dir = BUS_DMASYNC_POSTREAD;
2081 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2082 			dir = BUS_DMASYNC_POSTWRITE;
2083 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2084 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2085 	}
2086 
2087 	cm->cm_targ->completed++;
2088 	cm->cm_targ->outstanding--;
2089 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2090 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2091 
2092 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2093 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2094 		if (cm->cm_reply != NULL)
2095 			mpssas_log_command(cm, MPS_RECOVERY,
2096 			    "completed timedout cm %p ccb %p during recovery "
2097 			    "ioc %x scsi %x state %x xfer %u\n",
2098 			    cm, cm->cm_ccb,
2099 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2100 			    le32toh(rep->TransferCount));
2101 		else
2102 			mpssas_log_command(cm, MPS_RECOVERY,
2103 			    "completed timedout cm %p ccb %p during recovery\n",
2104 			    cm, cm->cm_ccb);
2105 	} else if (cm->cm_targ->tm != NULL) {
2106 		if (cm->cm_reply != NULL)
2107 			mpssas_log_command(cm, MPS_RECOVERY,
2108 			    "completed cm %p ccb %p during recovery "
2109 			    "ioc %x scsi %x state %x xfer %u\n",
2110 			    cm, cm->cm_ccb,
2111 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2112 			    le32toh(rep->TransferCount));
2113 		else
2114 			mpssas_log_command(cm, MPS_RECOVERY,
2115 			    "completed cm %p ccb %p during recovery\n",
2116 			    cm, cm->cm_ccb);
2117 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2118 		mpssas_log_command(cm, MPS_RECOVERY,
2119 		    "reset completed cm %p ccb %p\n",
2120 		    cm, cm->cm_ccb);
2121 	}
2122 
2123 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2124 		/*
2125 		 * We ran into an error after we tried to map the command,
2126 		 * so we're getting a callback without queueing the command
2127 		 * to the hardware.  So we set the status here, and it will
2128 		 * be retained below.  We'll go through the "fast path",
2129 		 * because there can be no reply when we haven't actually
2130 		 * gone out to the hardware.
2131 		 */
2132 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2133 
2134 		/*
2135 		 * Currently the only error included in the mask is
2136 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2137 		 * chain frames.  We need to freeze the queue until we get
2138 		 * a command that completed without this error, which will
2139 		 * hopefully have some chain frames attached that we can
2140 		 * use.  If we wanted to get smarter about it, we would
2141 		 * only unfreeze the queue in this condition when we're
2142 		 * sure that we're getting some chain frames back.  That's
2143 		 * probably unnecessary.
2144 		 */
2145 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2146 			xpt_freeze_simq(sassc->sim, 1);
2147 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2148 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2149 				   "freezing SIM queue\n");
2150 		}
2151 	}
2152 
2153 	/*
2154 	 * If this is a Start Stop Unit command and it was issued by the driver
2155 	 * during shutdown, decrement the refcount to account for all of the
2156 	 * commands that were sent.  All SSU commands should be completed before
2157 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2158 	 * is TRUE.
2159 	 */
2160 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2161 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2162 		sc->SSU_refcount--;
2163 	}
2164 
2165 	/* Take the fast path to completion */
2166 	if (cm->cm_reply == NULL) {
2167 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2168 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2169 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2170 			else {
2171 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2172 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2173 			}
2174 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2175 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2176 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2177 				mps_dprint(sc, MPS_XINFO,
2178 				    "Unfreezing SIM queue\n");
2179 			}
2180 		}
2181 
2182 		/*
2183 		 * There are two scenarios where the status won't be
2184 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2185 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2186 		 */
2187 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2188 			/*
2189 			 * Freeze the dev queue so that commands are
2190 			 * executed in the correct order after error
2191 			 * recovery.
2192 			 */
2193 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2194 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2195 		}
2196 		mps_free_command(sc, cm);
2197 		xpt_done(ccb);
2198 		return;
2199 	}
2200 
2201 	mpssas_log_command(cm, MPS_XINFO,
2202 	    "ioc %x scsi %x state %x xfer %u\n",
2203 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2204 	    le32toh(rep->TransferCount));
2205 
2206 	/*
2207 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2208 	 * Volume if an error occurred (normal I/O retry).  Use the original
2209 	 * CCB, but set a flag that this will be a retry so that it's sent to
2210 	 * the original volume.  Free the command but reuse the CCB.
2211 	 */
2212 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2213 		mps_free_command(sc, cm);
2214 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2215 		mpssas_action_scsiio(sassc, ccb);
2216 		return;
2217 	} else
2218 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2219 
2220 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2221 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2222 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2223 		/* FALLTHROUGH */
2224 	case MPI2_IOCSTATUS_SUCCESS:
2225 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2226 
2227 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2228 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2229 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2230 
2231 		/* Completion failed at the transport level. */
2232 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2233 		    MPI2_SCSI_STATE_TERMINATED)) {
2234 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2235 			break;
2236 		}
2237 
2238 		/* In a modern packetized environment, an autosense failure
2239 		 * implies that there's not much else that can be done to
2240 		 * recover the command.
2241 		 */
2242 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2243 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2244 			break;
2245 		}
2246 
2247 		/*
2248 		 * CAM doesn't care about SAS Response Info data, but if this is
2249 		 * the state check if TLR should be done.  If not, clear the
2250 		 * TLR_bits for the target.
2251 		 */
2252 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2253 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2254 		    MPS_SCSI_RI_INVALID_FRAME)) {
2255 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2256 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2257 		}
2258 
2259 		/*
2260 		 * Intentionally override the normal SCSI status reporting
2261 		 * for these two cases.  These are likely to happen in a
2262 		 * multi-initiator environment, and we want to make sure that
2263 		 * CAM retries these commands rather than fail them.
2264 		 */
2265 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2266 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2267 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2268 			break;
2269 		}
2270 
2271 		/* Handle normal status and sense */
2272 		csio->scsi_status = rep->SCSIStatus;
2273 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2274 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2275 		else
2276 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2277 
2278 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2279 			int sense_len, returned_sense_len;
2280 
2281 			returned_sense_len = min(le32toh(rep->SenseCount),
2282 			    sizeof(struct scsi_sense_data));
2283 			if (returned_sense_len < ccb->csio.sense_len)
2284 				ccb->csio.sense_resid = ccb->csio.sense_len -
2285 					returned_sense_len;
2286 			else
2287 				ccb->csio.sense_resid = 0;
2288 
2289 			sense_len = min(returned_sense_len,
2290 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2291 			bzero(&ccb->csio.sense_data,
2292 			      sizeof(ccb->csio.sense_data));
2293 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2294 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2295 		}
2296 
2297 		/*
2298 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2299 		 * and it's page code 0 (Supported Page List), and there is
2300 		 * inquiry data, and this is for a sequential access device, and
2301 		 * the device is an SSP target, and TLR is supported by the
2302 		 * controller, turn the TLR_bits value ON if page 0x90 is
2303 		 * supported.
2304 		 */
2305 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2306 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2307 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2308 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2309 		    (csio->data_ptr != NULL) &&
2310 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2311 		    (sc->control_TLR) &&
2312 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2313 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2314 			vpd_list = (struct scsi_vpd_supported_page_list *)
2315 			    csio->data_ptr;
2316 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2317 			    TLR_bits;
2318 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2319 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2320 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2321 			    csio->cdb_io.cdb_bytes[4];
2322 			alloc_len -= csio->resid;
2323 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2324 				if (vpd_list->list[i] == 0x90) {
2325 					*TLR_bits = TLR_on;
2326 					break;
2327 				}
2328 			}
2329 		}
2330 		break;
2331 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2332 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2333 		/*
2334 		 * If devinfo is 0 this will be a volume.  In that case don't
2335 		 * tell CAM that the volume is not there.  We want volumes to
2336 		 * be enumerated until they are deleted/removed, not just
2337 		 * failed.
2338 		 */
2339 		if (cm->cm_targ->devinfo == 0)
2340 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2341 		else
2342 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2343 		break;
2344 	case MPI2_IOCSTATUS_INVALID_SGL:
2345 		mps_print_scsiio_cmd(sc, cm);
2346 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2347 		break;
2348 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2349 		/*
2350 		 * This is one of the responses that comes back when an I/O
2351 		 * has been aborted.  If it is because of a timeout that we
2352 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2353 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2354 		 * command is the same (it gets retried, subject to the
2355 		 * retry counter), the only difference is what gets printed
2356 		 * on the console.
2357 		 */
2358 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2359 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2360 		else
2361 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2362 		break;
2363 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2364 		/* resid is ignored for this condition */
2365 		csio->resid = 0;
2366 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2367 		break;
2368 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2369 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2370 		/*
2371 		 * Since these are generally external (i.e. hopefully
2372 		 * transient transport-related) errors, retry these without
2373 		 * decrementing the retry count.
2374 		 */
2375 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2376 		mpssas_log_command(cm, MPS_INFO,
2377 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2378 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2379 		    le32toh(rep->TransferCount));
2380 		break;
2381 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2382 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2383 	case MPI2_IOCSTATUS_INVALID_VPID:
2384 	case MPI2_IOCSTATUS_INVALID_FIELD:
2385 	case MPI2_IOCSTATUS_INVALID_STATE:
2386 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2387 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2388 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2389 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2390 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2391 	default:
2392 		mpssas_log_command(cm, MPS_XINFO,
2393 		    "completed ioc %x scsi %x state %x xfer %u\n",
2394 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2395 		    le32toh(rep->TransferCount));
2396 		csio->resid = cm->cm_length;
2397 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2398 		break;
2399 	}
2400 
2401 	mps_sc_failed_io_info(sc,csio,rep);
2402 
2403 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2404 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2405 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2406 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2407 		    "unfreezing SIM queue\n");
2408 	}
2409 
2410 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2411 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2412 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2413 	}
2414 
2415 	mps_free_command(sc, cm);
2416 	xpt_done(ccb);
2417 }
2418 
2419 /* All Request reached here are Endian safe */
2420 static void
2421 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2422     union ccb *ccb) {
2423 	pMpi2SCSIIORequest_t	pIO_req;
2424 	struct mps_softc	*sc = sassc->sc;
2425 	uint64_t		virtLBA;
2426 	uint32_t		physLBA, stripe_offset, stripe_unit;
2427 	uint32_t		io_size, column;
2428 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2429 
2430 	/*
2431 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2432 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2433 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2434 	 * bit different than the 10/16 CDBs, handle them separately.
2435 	 */
2436 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2437 	CDB = pIO_req->CDB.CDB32;
2438 
2439 	/*
2440 	 * Handle 6 byte CDBs.
2441 	 */
2442 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2443 	    (CDB[0] == WRITE_6))) {
2444 		/*
2445 		 * Get the transfer size in blocks.
2446 		 */
2447 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2448 
2449 		/*
2450 		 * Get virtual LBA given in the CDB.
2451 		 */
2452 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2453 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2454 
2455 		/*
2456 		 * Check that LBA range for I/O does not exceed volume's
2457 		 * MaxLBA.
2458 		 */
2459 		if ((virtLBA + (uint64_t)io_size - 1) <=
2460 		    sc->DD_max_lba) {
2461 			/*
2462 			 * Check if the I/O crosses a stripe boundary.  If not,
2463 			 * translate the virtual LBA to a physical LBA and set
2464 			 * the DevHandle for the PhysDisk to be used.  If it
2465 			 * does cross a boundry, do normal I/O.  To get the
2466 			 * right DevHandle to use, get the map number for the
2467 			 * column, then use that map number to look up the
2468 			 * DevHandle of the PhysDisk.
2469 			 */
2470 			stripe_offset = (uint32_t)virtLBA &
2471 			    (sc->DD_stripe_size - 1);
2472 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2473 				physLBA = (uint32_t)virtLBA >>
2474 				    sc->DD_stripe_exponent;
2475 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2476 				column = physLBA % sc->DD_num_phys_disks;
2477 				pIO_req->DevHandle =
2478 				    htole16(sc->DD_column_map[column].dev_handle);
2479 				/* ???? Is this endian safe*/
2480 				cm->cm_desc.SCSIIO.DevHandle =
2481 				    pIO_req->DevHandle;
2482 
2483 				physLBA = (stripe_unit <<
2484 				    sc->DD_stripe_exponent) + stripe_offset;
2485 				ptrLBA = &pIO_req->CDB.CDB32[1];
2486 				physLBA_byte = (uint8_t)(physLBA >> 16);
2487 				*ptrLBA = physLBA_byte;
2488 				ptrLBA = &pIO_req->CDB.CDB32[2];
2489 				physLBA_byte = (uint8_t)(physLBA >> 8);
2490 				*ptrLBA = physLBA_byte;
2491 				ptrLBA = &pIO_req->CDB.CDB32[3];
2492 				physLBA_byte = (uint8_t)physLBA;
2493 				*ptrLBA = physLBA_byte;
2494 
2495 				/*
2496 				 * Set flag that Direct Drive I/O is
2497 				 * being done.
2498 				 */
2499 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2500 			}
2501 		}
2502 		return;
2503 	}
2504 
2505 	/*
2506 	 * Handle 10, 12 or 16 byte CDBs.
2507 	 */
2508 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2509 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2510 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2511 	    (CDB[0] == WRITE_12))) {
2512 		/*
2513 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2514 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2515 		 * the else section.  10-byte and 12-byte CDB's are OK.
2516 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2517 		 * ready to accept 12byte CDB for Direct IOs.
2518 		 */
2519 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2520 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2521 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2522 			/*
2523 			 * Get the transfer size in blocks.
2524 			 */
2525 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2526 
2527 			/*
2528 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2529 			 * LBA in the CDB depending on command.
2530 			 */
2531 			lba_idx = ((CDB[0] == READ_12) ||
2532 				(CDB[0] == WRITE_12) ||
2533 				(CDB[0] == READ_10) ||
2534 				(CDB[0] == WRITE_10))? 2 : 6;
2535 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2536 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2537 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2538 			    (uint64_t)CDB[lba_idx + 3];
2539 
2540 			/*
2541 			 * Check that LBA range for I/O does not exceed volume's
2542 			 * MaxLBA.
2543 			 */
2544 			if ((virtLBA + (uint64_t)io_size - 1) <=
2545 			    sc->DD_max_lba) {
2546 				/*
2547 				 * Check if the I/O crosses a stripe boundary.
2548 				 * If not, translate the virtual LBA to a
2549 				 * physical LBA and set the DevHandle for the
2550 				 * PhysDisk to be used.  If it does cross a
2551 				 * boundry, do normal I/O.  To get the right
2552 				 * DevHandle to use, get the map number for the
2553 				 * column, then use that map number to look up
2554 				 * the DevHandle of the PhysDisk.
2555 				 */
2556 				stripe_offset = (uint32_t)virtLBA &
2557 				    (sc->DD_stripe_size - 1);
2558 				if ((stripe_offset + io_size) <=
2559 				    sc->DD_stripe_size) {
2560 					physLBA = (uint32_t)virtLBA >>
2561 					    sc->DD_stripe_exponent;
2562 					stripe_unit = physLBA /
2563 					    sc->DD_num_phys_disks;
2564 					column = physLBA %
2565 					    sc->DD_num_phys_disks;
2566 					pIO_req->DevHandle =
2567 					    htole16(sc->DD_column_map[column].
2568 					    dev_handle);
2569 					cm->cm_desc.SCSIIO.DevHandle =
2570 					    pIO_req->DevHandle;
2571 
2572 					physLBA = (stripe_unit <<
2573 					    sc->DD_stripe_exponent) +
2574 					    stripe_offset;
2575 					ptrLBA =
2576 					    &pIO_req->CDB.CDB32[lba_idx];
2577 					physLBA_byte = (uint8_t)(physLBA >> 24);
2578 					*ptrLBA = physLBA_byte;
2579 					ptrLBA =
2580 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2581 					physLBA_byte = (uint8_t)(physLBA >> 16);
2582 					*ptrLBA = physLBA_byte;
2583 					ptrLBA =
2584 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2585 					physLBA_byte = (uint8_t)(physLBA >> 8);
2586 					*ptrLBA = physLBA_byte;
2587 					ptrLBA =
2588 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2589 					physLBA_byte = (uint8_t)physLBA;
2590 					*ptrLBA = physLBA_byte;
2591 
2592 					/*
2593 					 * Set flag that Direct Drive I/O is
2594 					 * being done.
2595 					 */
2596 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2597 				}
2598 			}
2599 		} else {
2600 			/*
2601 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2602 			 * 0.  Get the transfer size in blocks.
2603 			 */
2604 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2605 
2606 			/*
2607 			 * Get virtual LBA.
2608 			 */
2609 			virtLBA = ((uint64_t)CDB[2] << 54) |
2610 			    ((uint64_t)CDB[3] << 48) |
2611 			    ((uint64_t)CDB[4] << 40) |
2612 			    ((uint64_t)CDB[5] << 32) |
2613 			    ((uint64_t)CDB[6] << 24) |
2614 			    ((uint64_t)CDB[7] << 16) |
2615 			    ((uint64_t)CDB[8] << 8) |
2616 			    (uint64_t)CDB[9];
2617 
2618 			/*
2619 			 * Check that LBA range for I/O does not exceed volume's
2620 			 * MaxLBA.
2621 			 */
2622 			if ((virtLBA + (uint64_t)io_size - 1) <=
2623 			    sc->DD_max_lba) {
2624 				/*
2625 				 * Check if the I/O crosses a stripe boundary.
2626 				 * If not, translate the virtual LBA to a
2627 				 * physical LBA and set the DevHandle for the
2628 				 * PhysDisk to be used.  If it does cross a
2629 				 * boundry, do normal I/O.  To get the right
2630 				 * DevHandle to use, get the map number for the
2631 				 * column, then use that map number to look up
2632 				 * the DevHandle of the PhysDisk.
2633 				 */
2634 				stripe_offset = (uint32_t)virtLBA &
2635 				    (sc->DD_stripe_size - 1);
2636 				if ((stripe_offset + io_size) <=
2637 				    sc->DD_stripe_size) {
2638 					physLBA = (uint32_t)(virtLBA >>
2639 					    sc->DD_stripe_exponent);
2640 					stripe_unit = physLBA /
2641 					    sc->DD_num_phys_disks;
2642 					column = physLBA %
2643 					    sc->DD_num_phys_disks;
2644 					pIO_req->DevHandle =
2645 					    htole16(sc->DD_column_map[column].
2646 					    dev_handle);
2647 					cm->cm_desc.SCSIIO.DevHandle =
2648 					    pIO_req->DevHandle;
2649 
2650 					physLBA = (stripe_unit <<
2651 					    sc->DD_stripe_exponent) +
2652 					    stripe_offset;
2653 
2654 					/*
2655 					 * Set upper 4 bytes of LBA to 0.  We
2656 					 * assume that the phys disks are less
2657 					 * than 2 TB's in size.  Then, set the
2658 					 * lower 4 bytes.
2659 					 */
2660 					pIO_req->CDB.CDB32[2] = 0;
2661 					pIO_req->CDB.CDB32[3] = 0;
2662 					pIO_req->CDB.CDB32[4] = 0;
2663 					pIO_req->CDB.CDB32[5] = 0;
2664 					ptrLBA = &pIO_req->CDB.CDB32[6];
2665 					physLBA_byte = (uint8_t)(physLBA >> 24);
2666 					*ptrLBA = physLBA_byte;
2667 					ptrLBA = &pIO_req->CDB.CDB32[7];
2668 					physLBA_byte = (uint8_t)(physLBA >> 16);
2669 					*ptrLBA = physLBA_byte;
2670 					ptrLBA = &pIO_req->CDB.CDB32[8];
2671 					physLBA_byte = (uint8_t)(physLBA >> 8);
2672 					*ptrLBA = physLBA_byte;
2673 					ptrLBA = &pIO_req->CDB.CDB32[9];
2674 					physLBA_byte = (uint8_t)physLBA;
2675 					*ptrLBA = physLBA_byte;
2676 
2677 					/*
2678 					 * Set flag that Direct Drive I/O is
2679 					 * being done.
2680 					 */
2681 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2682 				}
2683 			}
2684 		}
2685 	}
2686 }
2687 
2688 #if __FreeBSD_version >= 900026
2689 static void
2690 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2691 {
2692 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2693 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2694 	uint64_t sasaddr;
2695 	union ccb *ccb;
2696 
2697 	ccb = cm->cm_complete_data;
2698 
2699 	/*
2700 	 * Currently there should be no way we can hit this case.  It only
2701 	 * happens when we have a failure to allocate chain frames, and SMP
2702 	 * commands require two S/G elements only.  That should be handled
2703 	 * in the standard request size.
2704 	 */
2705 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2706 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2707 			   __func__, cm->cm_flags);
2708 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2709 		goto bailout;
2710         }
2711 
2712 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2713 	if (rpl == NULL) {
2714 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2715 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2716 		goto bailout;
2717 	}
2718 
2719 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2720 	sasaddr = le32toh(req->SASAddress.Low);
2721 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2722 
2723 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2724 	    MPI2_IOCSTATUS_SUCCESS ||
2725 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2726 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2727 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2728 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2729 		goto bailout;
2730 	}
2731 
2732 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2733 		   "%#jx completed successfully\n", __func__,
2734 		   (uintmax_t)sasaddr);
2735 
2736 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2737 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2738 	else
2739 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2740 
2741 bailout:
2742 	/*
2743 	 * We sync in both directions because we had DMAs in the S/G list
2744 	 * in both directions.
2745 	 */
2746 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2747 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2748 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2749 	mps_free_command(sc, cm);
2750 	xpt_done(ccb);
2751 }
2752 
2753 static void
2754 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2755 {
2756 	struct mps_command *cm;
2757 	uint8_t *request, *response;
2758 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2759 	struct mps_softc *sc;
2760 	struct sglist *sg;
2761 	int error;
2762 
2763 	sc = sassc->sc;
2764 	sg = NULL;
2765 	error = 0;
2766 
2767 	/*
2768 	 * XXX We don't yet support physical addresses here.
2769 	 */
2770 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2771 	case CAM_DATA_PADDR:
2772 	case CAM_DATA_SG_PADDR:
2773 		mps_dprint(sc, MPS_ERROR,
2774 			   "%s: physical addresses not supported\n", __func__);
2775 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2776 		xpt_done(ccb);
2777 		return;
2778 	case CAM_DATA_SG:
2779 		/*
2780 		 * The chip does not support more than one buffer for the
2781 		 * request or response.
2782 		 */
2783 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2784 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2785 			mps_dprint(sc, MPS_ERROR,
2786 				   "%s: multiple request or response "
2787 				   "buffer segments not supported for SMP\n",
2788 				   __func__);
2789 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2790 			xpt_done(ccb);
2791 			return;
2792 		}
2793 
2794 		/*
2795 		 * The CAM_SCATTER_VALID flag was originally implemented
2796 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2797 		 * We have two.  So, just take that flag to mean that we
2798 		 * might have S/G lists, and look at the S/G segment count
2799 		 * to figure out whether that is the case for each individual
2800 		 * buffer.
2801 		 */
2802 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2803 			bus_dma_segment_t *req_sg;
2804 
2805 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2806 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2807 		} else
2808 			request = ccb->smpio.smp_request;
2809 
2810 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2811 			bus_dma_segment_t *rsp_sg;
2812 
2813 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2814 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2815 		} else
2816 			response = ccb->smpio.smp_response;
2817 		break;
2818 	case CAM_DATA_VADDR:
2819 		request = ccb->smpio.smp_request;
2820 		response = ccb->smpio.smp_response;
2821 		break;
2822 	default:
2823 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2824 		xpt_done(ccb);
2825 		return;
2826 	}
2827 
2828 	cm = mps_alloc_command(sc);
2829 	if (cm == NULL) {
2830 		mps_dprint(sc, MPS_ERROR,
2831 		    "%s: cannot allocate command\n", __func__);
2832 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2833 		xpt_done(ccb);
2834 		return;
2835 	}
2836 
2837 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2838 	bzero(req, sizeof(*req));
2839 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2840 
2841 	/* Allow the chip to use any route to this SAS address. */
2842 	req->PhysicalPort = 0xff;
2843 
2844 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2845 	req->SGLFlags =
2846 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2847 
2848 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2849 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2850 
2851 	mpi_init_sge(cm, req, &req->SGL);
2852 
2853 	/*
2854 	 * Set up a uio to pass into mps_map_command().  This allows us to
2855 	 * do one map command, and one busdma call in there.
2856 	 */
2857 	cm->cm_uio.uio_iov = cm->cm_iovec;
2858 	cm->cm_uio.uio_iovcnt = 2;
2859 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2860 
2861 	/*
2862 	 * The read/write flag isn't used by busdma, but set it just in
2863 	 * case.  This isn't exactly accurate, either, since we're going in
2864 	 * both directions.
2865 	 */
2866 	cm->cm_uio.uio_rw = UIO_WRITE;
2867 
2868 	cm->cm_iovec[0].iov_base = request;
2869 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2870 	cm->cm_iovec[1].iov_base = response;
2871 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2872 
2873 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2874 			       cm->cm_iovec[1].iov_len;
2875 
2876 	/*
2877 	 * Trigger a warning message in mps_data_cb() for the user if we
2878 	 * wind up exceeding two S/G segments.  The chip expects one
2879 	 * segment for the request and another for the response.
2880 	 */
2881 	cm->cm_max_segs = 2;
2882 
2883 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2884 	cm->cm_complete = mpssas_smpio_complete;
2885 	cm->cm_complete_data = ccb;
2886 
2887 	/*
2888 	 * Tell the mapping code that we're using a uio, and that this is
2889 	 * an SMP passthrough request.  There is a little special-case
2890 	 * logic there (in mps_data_cb()) to handle the bidirectional
2891 	 * transfer.
2892 	 */
2893 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2894 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2895 
2896 	/* The chip data format is little endian. */
2897 	req->SASAddress.High = htole32(sasaddr >> 32);
2898 	req->SASAddress.Low = htole32(sasaddr);
2899 
2900 	/*
2901 	 * XXX Note that we don't have a timeout/abort mechanism here.
2902 	 * From the manual, it looks like task management requests only
2903 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2904 	 * have a mechanism to retry requests in the event of a chip reset
2905 	 * at least.  Hopefully the chip will insure that any errors short
2906 	 * of that are relayed back to the driver.
2907 	 */
2908 	error = mps_map_command(sc, cm);
2909 	if ((error != 0) && (error != EINPROGRESS)) {
2910 		mps_dprint(sc, MPS_ERROR,
2911 			   "%s: error %d returned from mps_map_command()\n",
2912 			   __func__, error);
2913 		goto bailout_error;
2914 	}
2915 
2916 	return;
2917 
2918 bailout_error:
2919 	mps_free_command(sc, cm);
2920 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2921 	xpt_done(ccb);
2922 	return;
2923 
2924 }
2925 
2926 static void
2927 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2928 {
2929 	struct mps_softc *sc;
2930 	struct mpssas_target *targ;
2931 	uint64_t sasaddr = 0;
2932 
2933 	sc = sassc->sc;
2934 
2935 	/*
2936 	 * Make sure the target exists.
2937 	 */
2938 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2939 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2940 	targ = &sassc->targets[ccb->ccb_h.target_id];
2941 	if (targ->handle == 0x0) {
2942 		mps_dprint(sc, MPS_ERROR,
2943 			   "%s: target %d does not exist!\n", __func__,
2944 			   ccb->ccb_h.target_id);
2945 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2946 		xpt_done(ccb);
2947 		return;
2948 	}
2949 
2950 	/*
2951 	 * If this device has an embedded SMP target, we'll talk to it
2952 	 * directly.
2953 	 * figure out what the expander's address is.
2954 	 */
2955 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2956 		sasaddr = targ->sasaddr;
2957 
2958 	/*
2959 	 * If we don't have a SAS address for the expander yet, try
2960 	 * grabbing it from the page 0x83 information cached in the
2961 	 * transport layer for this target.  LSI expanders report the
2962 	 * expander SAS address as the port-associated SAS address in
2963 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2964 	 * 0x83.
2965 	 *
2966 	 * XXX KDM disable this for now, but leave it commented out so that
2967 	 * it is obvious that this is another possible way to get the SAS
2968 	 * address.
2969 	 *
2970 	 * The parent handle method below is a little more reliable, and
2971 	 * the other benefit is that it works for devices other than SES
2972 	 * devices.  So you can send a SMP request to a da(4) device and it
2973 	 * will get routed to the expander that device is attached to.
2974 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2975 	 */
2976 #if 0
2977 	if (sasaddr == 0)
2978 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2979 #endif
2980 
2981 	/*
2982 	 * If we still don't have a SAS address for the expander, look for
2983 	 * the parent device of this device, which is probably the expander.
2984 	 */
2985 	if (sasaddr == 0) {
2986 #ifdef OLD_MPS_PROBE
2987 		struct mpssas_target *parent_target;
2988 #endif
2989 
2990 		if (targ->parent_handle == 0x0) {
2991 			mps_dprint(sc, MPS_ERROR,
2992 				   "%s: handle %d does not have a valid "
2993 				   "parent handle!\n", __func__, targ->handle);
2994 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2995 			goto bailout;
2996 		}
2997 #ifdef OLD_MPS_PROBE
2998 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2999 			targ->parent_handle);
3000 
3001 		if (parent_target == NULL) {
3002 			mps_dprint(sc, MPS_ERROR,
3003 				   "%s: handle %d does not have a valid "
3004 				   "parent target!\n", __func__, targ->handle);
3005 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3006 			goto bailout;
3007 		}
3008 
3009 		if ((parent_target->devinfo &
3010 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3011 			mps_dprint(sc, MPS_ERROR,
3012 				   "%s: handle %d parent %d does not "
3013 				   "have an SMP target!\n", __func__,
3014 				   targ->handle, parent_target->handle);
3015 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3016 			goto bailout;
3017 
3018 		}
3019 
3020 		sasaddr = parent_target->sasaddr;
3021 #else /* OLD_MPS_PROBE */
3022 		if ((targ->parent_devinfo &
3023 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3024 			mps_dprint(sc, MPS_ERROR,
3025 				   "%s: handle %d parent %d does not "
3026 				   "have an SMP target!\n", __func__,
3027 				   targ->handle, targ->parent_handle);
3028 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3029 			goto bailout;
3030 
3031 		}
3032 		if (targ->parent_sasaddr == 0x0) {
3033 			mps_dprint(sc, MPS_ERROR,
3034 				   "%s: handle %d parent handle %d does "
3035 				   "not have a valid SAS address!\n",
3036 				   __func__, targ->handle, targ->parent_handle);
3037 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3038 			goto bailout;
3039 		}
3040 
3041 		sasaddr = targ->parent_sasaddr;
3042 #endif /* OLD_MPS_PROBE */
3043 
3044 	}
3045 
3046 	if (sasaddr == 0) {
3047 		mps_dprint(sc, MPS_INFO,
3048 			   "%s: unable to find SAS address for handle %d\n",
3049 			   __func__, targ->handle);
3050 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3051 		goto bailout;
3052 	}
3053 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3054 
3055 	return;
3056 
3057 bailout:
3058 	xpt_done(ccb);
3059 
3060 }
3061 #endif //__FreeBSD_version >= 900026
3062 
3063 static void
3064 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3065 {
3066 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3067 	struct mps_softc *sc;
3068 	struct mps_command *tm;
3069 	struct mpssas_target *targ;
3070 
3071 	MPS_FUNCTRACE(sassc->sc);
3072 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3073 
3074 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3075 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3076 	     ccb->ccb_h.target_id));
3077 	sc = sassc->sc;
3078 	tm = mps_alloc_command(sc);
3079 	if (tm == NULL) {
3080 		mps_dprint(sc, MPS_ERROR,
3081 		    "command alloc failure in mpssas_action_resetdev\n");
3082 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3083 		xpt_done(ccb);
3084 		return;
3085 	}
3086 
3087 	targ = &sassc->targets[ccb->ccb_h.target_id];
3088 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3089 	req->DevHandle = htole16(targ->handle);
3090 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3091 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3092 
3093 	/* SAS Hard Link Reset / SATA Link Reset */
3094 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3095 
3096 	tm->cm_data = NULL;
3097 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3098 	tm->cm_complete = mpssas_resetdev_complete;
3099 	tm->cm_complete_data = ccb;
3100 	tm->cm_targ = targ;
3101 	mps_map_command(sc, tm);
3102 }
3103 
3104 static void
3105 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3106 {
3107 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3108 	union ccb *ccb;
3109 
3110 	MPS_FUNCTRACE(sc);
3111 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3112 
3113 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3114 	ccb = tm->cm_complete_data;
3115 
3116 	/*
3117 	 * Currently there should be no way we can hit this case.  It only
3118 	 * happens when we have a failure to allocate chain frames, and
3119 	 * task management commands don't have S/G lists.
3120 	 */
3121 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3122 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3123 
3124 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3125 
3126 		mps_dprint(sc, MPS_ERROR,
3127 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3128 			   "This should not happen!\n", __func__, tm->cm_flags,
3129 			   req->DevHandle);
3130 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3131 		goto bailout;
3132 	}
3133 
3134 	mps_dprint(sc, MPS_XINFO,
3135 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3136 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3137 
3138 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3139 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3140 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3141 		    CAM_LUN_WILDCARD);
3142 	}
3143 	else
3144 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3145 
3146 bailout:
3147 
3148 	mpssas_free_tm(sc, tm);
3149 	xpt_done(ccb);
3150 }
3151 
3152 static void
3153 mpssas_poll(struct cam_sim *sim)
3154 {
3155 	struct mpssas_softc *sassc;
3156 
3157 	sassc = cam_sim_softc(sim);
3158 
3159 	if (sassc->sc->mps_debug & MPS_TRACE) {
3160 		/* frequent debug messages during a panic just slow
3161 		 * everything down too much.
3162 		 */
3163 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3164 		sassc->sc->mps_debug &= ~MPS_TRACE;
3165 	}
3166 
3167 	mps_intr_locked(sassc->sc);
3168 }
3169 
3170 static void
3171 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3172 	     void *arg)
3173 {
3174 	struct mps_softc *sc;
3175 
3176 	sc = (struct mps_softc *)callback_arg;
3177 
3178 	switch (code) {
3179 #if (__FreeBSD_version >= 1000006) || \
3180     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3181 	case AC_ADVINFO_CHANGED: {
3182 		struct mpssas_target *target;
3183 		struct mpssas_softc *sassc;
3184 		struct scsi_read_capacity_data_long rcap_buf;
3185 		struct ccb_dev_advinfo cdai;
3186 		struct mpssas_lun *lun;
3187 		lun_id_t lunid;
3188 		int found_lun;
3189 		uintptr_t buftype;
3190 
3191 		buftype = (uintptr_t)arg;
3192 
3193 		found_lun = 0;
3194 		sassc = sc->sassc;
3195 
3196 		/*
3197 		 * We're only interested in read capacity data changes.
3198 		 */
3199 		if (buftype != CDAI_TYPE_RCAPLONG)
3200 			break;
3201 
3202 		/*
3203 		 * We should have a handle for this, but check to make sure.
3204 		 */
3205 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3206 		    ("Target %d out of bounds in mpssas_async\n",
3207 		    xpt_path_target_id(path)));
3208 		target = &sassc->targets[xpt_path_target_id(path)];
3209 		if (target->handle == 0)
3210 			break;
3211 
3212 		lunid = xpt_path_lun_id(path);
3213 
3214 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3215 			if (lun->lun_id == lunid) {
3216 				found_lun = 1;
3217 				break;
3218 			}
3219 		}
3220 
3221 		if (found_lun == 0) {
3222 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3223 				     M_NOWAIT | M_ZERO);
3224 			if (lun == NULL) {
3225 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3226 					   "LUN for EEDP support.\n");
3227 				break;
3228 			}
3229 			lun->lun_id = lunid;
3230 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3231 		}
3232 
3233 		bzero(&rcap_buf, sizeof(rcap_buf));
3234 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3235 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3236 		cdai.ccb_h.flags = CAM_DIR_IN;
3237 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3238 		cdai.flags = 0;
3239 		cdai.bufsiz = sizeof(rcap_buf);
3240 		cdai.buf = (uint8_t *)&rcap_buf;
3241 		xpt_action((union ccb *)&cdai);
3242 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3243 			cam_release_devq(cdai.ccb_h.path,
3244 					 0, 0, 0, FALSE);
3245 
3246 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3247 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3248 			lun->eedp_formatted = TRUE;
3249 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3250 		} else {
3251 			lun->eedp_formatted = FALSE;
3252 			lun->eedp_block_size = 0;
3253 		}
3254 		break;
3255 	}
3256 #else
3257 	case AC_FOUND_DEVICE: {
3258 		struct ccb_getdev *cgd;
3259 
3260 		cgd = arg;
3261 		mpssas_check_eedp(sc, path, cgd);
3262 		break;
3263 	}
3264 #endif
3265 	default:
3266 		break;
3267 	}
3268 }
3269 
3270 #if (__FreeBSD_version < 901503) || \
3271     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3272 static void
3273 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3274 		  struct ccb_getdev *cgd)
3275 {
3276 	struct mpssas_softc *sassc = sc->sassc;
3277 	struct ccb_scsiio *csio;
3278 	struct scsi_read_capacity_16 *scsi_cmd;
3279 	struct scsi_read_capacity_eedp *rcap_buf;
3280 	path_id_t pathid;
3281 	target_id_t targetid;
3282 	lun_id_t lunid;
3283 	union ccb *ccb;
3284 	struct cam_path *local_path;
3285 	struct mpssas_target *target;
3286 	struct mpssas_lun *lun;
3287 	uint8_t	found_lun;
3288 	char path_str[64];
3289 
3290 	sassc = sc->sassc;
3291 	pathid = cam_sim_path(sassc->sim);
3292 	targetid = xpt_path_target_id(path);
3293 	lunid = xpt_path_lun_id(path);
3294 
3295 	KASSERT(targetid < sassc->maxtargets,
3296 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3297 	     targetid));
3298 	target = &sassc->targets[targetid];
3299 	if (target->handle == 0x0)
3300 		return;
3301 
3302 	/*
3303 	 * Determine if the device is EEDP capable.
3304 	 *
3305 	 * If this flag is set in the inquiry data,
3306 	 * the device supports protection information,
3307 	 * and must support the 16 byte read
3308 	 * capacity command, otherwise continue without
3309 	 * sending read cap 16
3310 	 */
3311 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3312 		return;
3313 
3314 	/*
3315 	 * Issue a READ CAPACITY 16 command.  This info
3316 	 * is used to determine if the LUN is formatted
3317 	 * for EEDP support.
3318 	 */
3319 	ccb = xpt_alloc_ccb_nowait();
3320 	if (ccb == NULL) {
3321 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3322 		    "for EEDP support.\n");
3323 		return;
3324 	}
3325 
3326 	if (xpt_create_path(&local_path, xpt_periph,
3327 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3328 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3329 		    "path for EEDP support\n");
3330 		xpt_free_ccb(ccb);
3331 		return;
3332 	}
3333 
3334 	/*
3335 	 * If LUN is already in list, don't create a new
3336 	 * one.
3337 	 */
3338 	found_lun = FALSE;
3339 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3340 		if (lun->lun_id == lunid) {
3341 			found_lun = TRUE;
3342 			break;
3343 		}
3344 	}
3345 	if (!found_lun) {
3346 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3347 		    M_NOWAIT | M_ZERO);
3348 		if (lun == NULL) {
3349 			mps_dprint(sc, MPS_ERROR,
3350 			    "Unable to alloc LUN for EEDP support.\n");
3351 			xpt_free_path(local_path);
3352 			xpt_free_ccb(ccb);
3353 			return;
3354 		}
3355 		lun->lun_id = lunid;
3356 		SLIST_INSERT_HEAD(&target->luns, lun,
3357 		    lun_link);
3358 	}
3359 
3360 	xpt_path_string(local_path, path_str, sizeof(path_str));
3361 
3362 	/*
3363 	 * If this is a SATA direct-access end device,
3364 	 * mark it so that a SCSI StartStopUnit command
3365 	 * will be sent to it when the driver is being
3366 	 * shutdown.
3367 	 */
3368 	if ((cgd.inq_data.device == T_DIRECT) &&
3369 		(target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3370 		((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3371 		MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3372 		lun->stop_at_shutdown = TRUE;
3373 	}
3374 
3375 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3376 	    path_str, target->handle);
3377 
3378 	/*
3379 	 * Issue a READ CAPACITY 16 command for the LUN.
3380 	 * The mpssas_read_cap_done function will load
3381 	 * the read cap info into the LUN struct.
3382 	 */
3383 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3384 	    M_MPT2, M_NOWAIT | M_ZERO);
3385 	if (rcap_buf == NULL) {
3386 		mps_dprint(sc, MPS_FAULT,
3387 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3388 		xpt_free_path(ccb->ccb_h.path);
3389 		xpt_free_ccb(ccb);
3390 		return;
3391 	}
3392 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3393 	csio = &ccb->csio;
3394 	csio->ccb_h.func_code = XPT_SCSI_IO;
3395 	csio->ccb_h.flags = CAM_DIR_IN;
3396 	csio->ccb_h.retry_count = 4;
3397 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3398 	csio->ccb_h.timeout = 60000;
3399 	csio->data_ptr = (uint8_t *)rcap_buf;
3400 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3401 	csio->sense_len = MPS_SENSE_LEN;
3402 	csio->cdb_len = sizeof(*scsi_cmd);
3403 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3404 
3405 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3406 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3407 	scsi_cmd->opcode = 0x9E;
3408 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3409 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3410 
3411 	ccb->ccb_h.ppriv_ptr1 = sassc;
3412 	xpt_action(ccb);
3413 }
3414 
3415 static void
3416 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3417 {
3418 	struct mpssas_softc *sassc;
3419 	struct mpssas_target *target;
3420 	struct mpssas_lun *lun;
3421 	struct scsi_read_capacity_eedp *rcap_buf;
3422 
3423 	if (done_ccb == NULL)
3424 		return;
3425 
3426 	/* Driver need to release devq, it Scsi command is
3427 	 * generated by driver internally.
3428 	 * Currently there is a single place where driver
3429 	 * calls scsi command internally. In future if driver
3430 	 * calls more scsi command internally, it needs to release
3431 	 * devq internally, since those command will not go back to
3432 	 * cam_periph.
3433 	 */
3434 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3435         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3436 		xpt_release_devq(done_ccb->ccb_h.path,
3437 			       	/*count*/ 1, /*run_queue*/TRUE);
3438 	}
3439 
3440 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3441 
3442 	/*
3443 	 * Get the LUN ID for the path and look it up in the LUN list for the
3444 	 * target.
3445 	 */
3446 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3447 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3448 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3449 	     done_ccb->ccb_h.target_id));
3450 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3451 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3452 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3453 			continue;
3454 
3455 		/*
3456 		 * Got the LUN in the target's LUN list.  Fill it in
3457 		 * with EEDP info.  If the READ CAP 16 command had some
3458 		 * SCSI error (common if command is not supported), mark
3459 		 * the lun as not supporting EEDP and set the block size
3460 		 * to 0.
3461 		 */
3462 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3463 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3464 			lun->eedp_formatted = FALSE;
3465 			lun->eedp_block_size = 0;
3466 			break;
3467 		}
3468 
3469 		if (rcap_buf->protect & 0x01) {
3470 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3471  			    "target ID %d is formatted for EEDP "
3472  			    "support.\n", done_ccb->ccb_h.target_lun,
3473  			    done_ccb->ccb_h.target_id);
3474 			lun->eedp_formatted = TRUE;
3475 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3476 		}
3477 		break;
3478 	}
3479 
3480 	// Finished with this CCB and path.
3481 	free(rcap_buf, M_MPT2);
3482 	xpt_free_path(done_ccb->ccb_h.path);
3483 	xpt_free_ccb(done_ccb);
3484 }
3485 #endif /* (__FreeBSD_version < 901503) || \
3486           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3487 
3488 int
3489 mpssas_startup(struct mps_softc *sc)
3490 {
3491 
3492 	/*
3493 	 * Send the port enable message and set the wait_for_port_enable flag.
3494 	 * This flag helps to keep the simq frozen until all discovery events
3495 	 * are processed.
3496 	 */
3497 	sc->wait_for_port_enable = 1;
3498 	mpssas_send_portenable(sc);
3499 	return (0);
3500 }
3501 
3502 static int
3503 mpssas_send_portenable(struct mps_softc *sc)
3504 {
3505 	MPI2_PORT_ENABLE_REQUEST *request;
3506 	struct mps_command *cm;
3507 
3508 	MPS_FUNCTRACE(sc);
3509 
3510 	if ((cm = mps_alloc_command(sc)) == NULL)
3511 		return (EBUSY);
3512 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3513 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3514 	request->MsgFlags = 0;
3515 	request->VP_ID = 0;
3516 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3517 	cm->cm_complete = mpssas_portenable_complete;
3518 	cm->cm_data = NULL;
3519 	cm->cm_sge = NULL;
3520 
3521 	mps_map_command(sc, cm);
3522 	mps_dprint(sc, MPS_XINFO,
3523 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3524 	    cm, cm->cm_req, cm->cm_complete);
3525 	return (0);
3526 }
3527 
3528 static void
3529 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3530 {
3531 	MPI2_PORT_ENABLE_REPLY *reply;
3532 	struct mpssas_softc *sassc;
3533 
3534 	MPS_FUNCTRACE(sc);
3535 	sassc = sc->sassc;
3536 
3537 	/*
3538 	 * Currently there should be no way we can hit this case.  It only
3539 	 * happens when we have a failure to allocate chain frames, and
3540 	 * port enable commands don't have S/G lists.
3541 	 */
3542 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3543 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3544 			   "This should not happen!\n", __func__, cm->cm_flags);
3545 	}
3546 
3547 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3548 	if (reply == NULL)
3549 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3550 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3551 	    MPI2_IOCSTATUS_SUCCESS)
3552 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3553 
3554 	mps_free_command(sc, cm);
3555 	if (sc->mps_ich.ich_arg != NULL) {
3556 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3557 		config_intrhook_disestablish(&sc->mps_ich);
3558 		sc->mps_ich.ich_arg = NULL;
3559 	}
3560 
3561 	/*
3562 	 * Get WarpDrive info after discovery is complete but before the scan
3563 	 * starts.  At this point, all devices are ready to be exposed to the
3564 	 * OS.  If devices should be hidden instead, take them out of the
3565 	 * 'targets' array before the scan.  The devinfo for a disk will have
3566 	 * some info and a volume's will be 0.  Use that to remove disks.
3567 	 */
3568 	mps_wd_config_pages(sc);
3569 
3570 	/*
3571 	 * Done waiting for port enable to complete.  Decrement the refcount.
3572 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3573 	 * take place.  Since the simq was explicitly frozen before port
3574 	 * enable, it must be explicitly released here to keep the
3575 	 * freeze/release count in sync.
3576 	 */
3577 	sc->wait_for_port_enable = 0;
3578 	sc->port_enable_complete = 1;
3579 	wakeup(&sc->port_enable_complete);
3580 	mpssas_startup_decrement(sassc);
3581 }
3582 
3583 int
3584 mpssas_check_id(struct mpssas_softc *sassc, int id)
3585 {
3586 	struct mps_softc *sc = sassc->sc;
3587 	char *ids;
3588 	char *name;
3589 
3590 	ids = &sc->exclude_ids[0];
3591 	while((name = strsep(&ids, ",")) != NULL) {
3592 		if (name[0] == '\0')
3593 			continue;
3594 		if (strtol(name, NULL, 0) == (long)id)
3595 			return (1);
3596 	}
3597 
3598 	return (0);
3599 }
3600 
3601 void
3602 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3603 {
3604 	struct mpssas_softc *sassc;
3605 	struct mpssas_lun *lun, *lun_tmp;
3606 	struct mpssas_target *targ;
3607 	int i;
3608 
3609 	sassc = sc->sassc;
3610 	/*
3611 	 * The number of targets is based on IOC Facts, so free all of
3612 	 * the allocated LUNs for each target and then the target buffer
3613 	 * itself.
3614 	 */
3615 	for (i=0; i< maxtargets; i++) {
3616 		targ = &sassc->targets[i];
3617 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3618 			free(lun, M_MPT2);
3619 		}
3620 	}
3621 	free(sassc->targets, M_MPT2);
3622 
3623 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3624 	    M_MPT2, M_WAITOK|M_ZERO);
3625 	if (!sassc->targets) {
3626 		panic("%s failed to alloc targets with error %d\n",
3627 		    __func__, ENOMEM);
3628 	}
3629 }
3630