xref: /freebsd/sys/dev/mps/mps_sas.c (revision a98ff317388a00b992f1bf8404dee596f9383f5e)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011, 2012 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * LSI MPT-Fusion Host Adapter FreeBSD
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for LSI MPT2 */
36 
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
87 
88 #define MPSSAS_DISCOVERY_TIMEOUT	20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90 
91 /*
92  * static array to check SCSI OpCode for EEDP protection bits
93  */
94 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114 };
115 
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117 
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126     struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133 			       uint64_t sasaddr);
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 			xpt_freeze_simq(sassc->sim, 1);
184 		}
185 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
186 		    sassc->startup_refcount);
187 	}
188 }
189 
190 void
191 mpssas_startup_decrement(struct mpssas_softc *sassc)
192 {
193 	MPS_FUNCTRACE(sassc->sc);
194 
195 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
196 		if (--sassc->startup_refcount == 0) {
197 			/* finished all discovery-related actions, release
198 			 * the simq and rescan for the latest topology.
199 			 */
200 			mps_dprint(sassc->sc, MPS_INIT,
201 			    "%s releasing simq\n", __func__);
202 			sassc->flags &= ~MPSSAS_IN_STARTUP;
203 #if __FreeBSD_version >= 1000039
204 			xpt_release_boot();
205 #else
206 			xpt_release_simq(sassc->sim, 1);
207 			mpssas_rescan_target(sassc->sc, NULL);
208 #endif
209 		}
210 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
211 		    sassc->startup_refcount);
212 	}
213 }
214 
215 /* LSI's firmware requires us to stop sending commands when we're doing task
216  * management, so refcount the TMs and keep the simq frozen when any are in
217  * use.
218  */
219 struct mps_command *
220 mpssas_alloc_tm(struct mps_softc *sc)
221 {
222 	struct mps_command *tm;
223 
224 	MPS_FUNCTRACE(sc);
225 	tm = mps_alloc_high_priority_command(sc);
226 	if (tm != NULL) {
227 		if (sc->sassc->tm_count++ == 0) {
228 			mps_dprint(sc, MPS_RECOVERY,
229 			    "%s freezing simq\n", __func__);
230 			xpt_freeze_simq(sc->sassc->sim, 1);
231 		}
232 		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
233 		    sc->sassc->tm_count);
234 	}
235 	return tm;
236 }
237 
238 void
239 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
240 {
241 	mps_dprint(sc, MPS_TRACE, "%s", __func__);
242 	if (tm == NULL)
243 		return;
244 
245 	/* if there are no TMs in use, we can release the simq.  We use our
246 	 * own refcount so that it's easier for a diag reset to cleanup and
247 	 * release the simq.
248 	 */
249 	if (--sc->sassc->tm_count == 0) {
250 		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
251 		xpt_release_simq(sc->sassc->sim, 1);
252 	}
253 	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
254 	    sc->sassc->tm_count);
255 
256 	mps_free_high_priority_command(sc, tm);
257 }
258 
259 void
260 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
261 {
262 	struct mpssas_softc *sassc = sc->sassc;
263 	path_id_t pathid;
264 	target_id_t targetid;
265 	union ccb *ccb;
266 
267 	MPS_FUNCTRACE(sc);
268 	pathid = cam_sim_path(sassc->sim);
269 	if (targ == NULL)
270 		targetid = CAM_TARGET_WILDCARD;
271 	else
272 		targetid = targ - sassc->targets;
273 
274 	/*
275 	 * Allocate a CCB and schedule a rescan.
276 	 */
277 	ccb = xpt_alloc_ccb_nowait();
278 	if (ccb == NULL) {
279 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
280 		return;
281 	}
282 
283 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
284 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
285 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
286 		xpt_free_ccb(ccb);
287 		return;
288 	}
289 
290 	if (targetid == CAM_TARGET_WILDCARD)
291 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
292 	else
293 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
294 
295 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
296 	xpt_rescan(ccb);
297 }
298 
299 static void
300 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
301 {
302 	struct sbuf sb;
303 	va_list ap;
304 	char str[192];
305 	char path_str[64];
306 
307 	if (cm == NULL)
308 		return;
309 
310 	sbuf_new(&sb, str, sizeof(str), 0);
311 
312 	va_start(ap, fmt);
313 
314 	if (cm->cm_ccb != NULL) {
315 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
316 				sizeof(path_str));
317 		sbuf_cat(&sb, path_str);
318 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
319 			scsi_command_string(&cm->cm_ccb->csio, &sb);
320 			sbuf_printf(&sb, "length %d ",
321 				    cm->cm_ccb->csio.dxfer_len);
322 		}
323 	}
324 	else {
325 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
326 		    cam_sim_name(cm->cm_sc->sassc->sim),
327 		    cam_sim_unit(cm->cm_sc->sassc->sim),
328 		    cam_sim_bus(cm->cm_sc->sassc->sim),
329 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
330 		    cm->cm_lun);
331 	}
332 
333 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
334 	sbuf_vprintf(&sb, fmt, ap);
335 	sbuf_finish(&sb);
336 	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
337 
338 	va_end(ap);
339 }
340 
341 
342 static void
343 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
344 {
345 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
346 	struct mpssas_target *targ;
347 	uint16_t handle;
348 
349 	MPS_FUNCTRACE(sc);
350 
351 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
352 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
353 	targ = tm->cm_targ;
354 
355 	if (reply == NULL) {
356 		/* XXX retry the remove after the diag reset completes? */
357 		mps_dprint(sc, MPS_FAULT,
358 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
359 		mpssas_free_tm(sc, tm);
360 		return;
361 	}
362 
363 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
364 		mps_dprint(sc, MPS_FAULT,
365 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
366 		   reply->IOCStatus, handle);
367 		mpssas_free_tm(sc, tm);
368 		return;
369 	}
370 
371 	mps_dprint(sc, MPS_XINFO,
372 	    "Reset aborted %u commands\n", reply->TerminationCount);
373 	mps_free_reply(sc, tm->cm_reply_data);
374 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
375 
376 	mps_dprint(sc, MPS_XINFO,
377 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
378 
379 	/*
380 	 * Don't clear target if remove fails because things will get confusing.
381 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
382 	 * this target id if possible, and so we can assign the same target id
383 	 * to this device if it comes back in the future.
384 	 */
385 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
386 		targ = tm->cm_targ;
387 		targ->handle = 0x0;
388 		targ->encl_handle = 0x0;
389 		targ->encl_slot = 0x0;
390 		targ->exp_dev_handle = 0x0;
391 		targ->phy_num = 0x0;
392 		targ->linkrate = 0x0;
393 		targ->devinfo = 0x0;
394 		targ->flags = 0x0;
395 	}
396 
397 	mpssas_free_tm(sc, tm);
398 }
399 
400 
401 /*
402  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
403  * Otherwise Volume Delete is same as Bare Drive Removal.
404  */
405 void
406 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
407 {
408 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
409 	struct mps_softc *sc;
410 	struct mps_command *cm;
411 	struct mpssas_target *targ = NULL;
412 
413 	MPS_FUNCTRACE(sassc->sc);
414 	sc = sassc->sc;
415 
416 #ifdef WD_SUPPORT
417 	/*
418 	 * If this is a WD controller, determine if the disk should be exposed
419 	 * to the OS or not.  If disk should be exposed, return from this
420 	 * function without doing anything.
421 	 */
422 	if (sc->WD_available && (sc->WD_hide_expose ==
423 	    MPS_WD_EXPOSE_ALWAYS)) {
424 		return;
425 	}
426 #endif //WD_SUPPORT
427 
428 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
429 	if (targ == NULL) {
430 		/* FIXME: what is the action? */
431 		/* We don't know about this device? */
432 		mps_dprint(sc, MPS_ERROR,
433 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
434 		return;
435 	}
436 
437 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
438 
439 	cm = mpssas_alloc_tm(sc);
440 	if (cm == NULL) {
441 		mps_dprint(sc, MPS_ERROR,
442 		    "%s: command alloc failure\n", __func__);
443 		return;
444 	}
445 
446 	mpssas_rescan_target(sc, targ);
447 
448 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
449 	req->DevHandle = targ->handle;
450 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
451 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
452 
453 	/* SAS Hard Link Reset / SATA Link Reset */
454 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
455 
456 	cm->cm_targ = targ;
457 	cm->cm_data = NULL;
458 	cm->cm_desc.HighPriority.RequestFlags =
459 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
460 	cm->cm_complete = mpssas_remove_volume;
461 	cm->cm_complete_data = (void *)(uintptr_t)handle;
462 	mps_map_command(sc, cm);
463 }
464 
465 /*
466  * The MPT2 firmware performs debounce on the link to avoid transient link
467  * errors and false removals.  When it does decide that link has been lost
468  * and a device need to go away, it expects that the host will perform a
469  * target reset and then an op remove.  The reset has the side-effect of
470  * aborting any outstanding requests for the device, which is required for
471  * the op-remove to succeed.  It's not clear if the host should check for
472  * the device coming back alive after the reset.
473  */
474 void
475 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
476 {
477 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
478 	struct mps_softc *sc;
479 	struct mps_command *cm;
480 	struct mpssas_target *targ = NULL;
481 
482 	MPS_FUNCTRACE(sassc->sc);
483 
484 	sc = sassc->sc;
485 
486 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
487 	if (targ == NULL) {
488 		/* FIXME: what is the action? */
489 		/* We don't know about this device? */
490 		mps_dprint(sc, MPS_ERROR,
491 		    "%s : invalid handle 0x%x \n", __func__, handle);
492 		return;
493 	}
494 
495 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
496 
497 	cm = mpssas_alloc_tm(sc);
498 	if (cm == NULL) {
499 		mps_dprint(sc, MPS_ERROR,
500 		    "%s: command alloc failure\n", __func__);
501 		return;
502 	}
503 
504 	mpssas_rescan_target(sc, targ);
505 
506 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
507 	memset(req, 0, sizeof(*req));
508 	req->DevHandle = htole16(targ->handle);
509 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
510 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
511 
512 	/* SAS Hard Link Reset / SATA Link Reset */
513 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
514 
515 	cm->cm_targ = targ;
516 	cm->cm_data = NULL;
517 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
518 	cm->cm_complete = mpssas_remove_device;
519 	cm->cm_complete_data = (void *)(uintptr_t)handle;
520 	mps_map_command(sc, cm);
521 }
522 
523 static void
524 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
525 {
526 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
527 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
528 	struct mpssas_target *targ;
529 	struct mps_command *next_cm;
530 	uint16_t handle;
531 
532 	MPS_FUNCTRACE(sc);
533 
534 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
535 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
536 	targ = tm->cm_targ;
537 
538 	/*
539 	 * Currently there should be no way we can hit this case.  It only
540 	 * happens when we have a failure to allocate chain frames, and
541 	 * task management commands don't have S/G lists.
542 	 */
543 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
544 		mps_dprint(sc, MPS_ERROR,
545 		    "%s: cm_flags = %#x for remove of handle %#04x! "
546 		    "This should not happen!\n", __func__, tm->cm_flags,
547 		    handle);
548 		mpssas_free_tm(sc, tm);
549 		return;
550 	}
551 
552 	if (reply == NULL) {
553 		/* XXX retry the remove after the diag reset completes? */
554 		mps_dprint(sc, MPS_FAULT,
555 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
556 		mpssas_free_tm(sc, tm);
557 		return;
558 	}
559 
560 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
561 		mps_dprint(sc, MPS_FAULT,
562 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
563 		   le16toh(reply->IOCStatus), handle);
564 		mpssas_free_tm(sc, tm);
565 		return;
566 	}
567 
568 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
569 	    le32toh(reply->TerminationCount));
570 	mps_free_reply(sc, tm->cm_reply_data);
571 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
572 
573 	/* Reuse the existing command */
574 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
575 	memset(req, 0, sizeof(*req));
576 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
577 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
578 	req->DevHandle = htole16(handle);
579 	tm->cm_data = NULL;
580 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
581 	tm->cm_complete = mpssas_remove_complete;
582 	tm->cm_complete_data = (void *)(uintptr_t)handle;
583 
584 	mps_map_command(sc, tm);
585 
586 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
587 		   targ->tid, handle);
588 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
589 		union ccb *ccb;
590 
591 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
592 		ccb = tm->cm_complete_data;
593 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
594 		mpssas_scsiio_complete(sc, tm);
595 	}
596 }
597 
598 static void
599 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
600 {
601 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
602 	uint16_t handle;
603 	struct mpssas_target *targ;
604 	struct mpssas_lun *lun;
605 
606 	MPS_FUNCTRACE(sc);
607 
608 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
609 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
610 
611 	/*
612 	 * Currently there should be no way we can hit this case.  It only
613 	 * happens when we have a failure to allocate chain frames, and
614 	 * task management commands don't have S/G lists.
615 	 */
616 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
617 		mps_dprint(sc, MPS_XINFO,
618 			   "%s: cm_flags = %#x for remove of handle %#04x! "
619 			   "This should not happen!\n", __func__, tm->cm_flags,
620 			   handle);
621 		mpssas_free_tm(sc, tm);
622 		return;
623 	}
624 
625 	if (reply == NULL) {
626 		/* most likely a chip reset */
627 		mps_dprint(sc, MPS_FAULT,
628 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
629 		mpssas_free_tm(sc, tm);
630 		return;
631 	}
632 
633 	mps_dprint(sc, MPS_XINFO,
634 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
635 	    handle, le16toh(reply->IOCStatus));
636 
637 	/*
638 	 * Don't clear target if remove fails because things will get confusing.
639 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
640 	 * this target id if possible, and so we can assign the same target id
641 	 * to this device if it comes back in the future.
642 	 */
643 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
644 		targ = tm->cm_targ;
645 		targ->handle = 0x0;
646 		targ->encl_handle = 0x0;
647 		targ->encl_slot = 0x0;
648 		targ->exp_dev_handle = 0x0;
649 		targ->phy_num = 0x0;
650 		targ->linkrate = 0x0;
651 		targ->devinfo = 0x0;
652 		targ->flags = 0x0;
653 
654 		while(!SLIST_EMPTY(&targ->luns)) {
655 			lun = SLIST_FIRST(&targ->luns);
656 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
657 			free(lun, M_MPT2);
658 		}
659 	}
660 
661 
662 	mpssas_free_tm(sc, tm);
663 }
664 
665 static int
666 mpssas_register_events(struct mps_softc *sc)
667 {
668 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
669 
670 	bzero(events, 16);
671 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
672 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
673 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
674 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
675 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
676 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
677 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
678 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
679 	setbit(events, MPI2_EVENT_IR_VOLUME);
680 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
681 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
682 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
683 
684 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
685 	    &sc->sassc->mpssas_eh);
686 
687 	return (0);
688 }
689 
690 int
691 mps_attach_sas(struct mps_softc *sc)
692 {
693 	struct mpssas_softc *sassc;
694 	cam_status status;
695 	int unit, error = 0;
696 
697 	MPS_FUNCTRACE(sc);
698 
699 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
700 	if(!sassc) {
701 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
702 		__func__, __LINE__);
703 		return (ENOMEM);
704 	}
705 	sassc->targets = malloc(sizeof(struct mpssas_target) *
706 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
707 	if(!sassc->targets) {
708 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
709 		__func__, __LINE__);
710 		free(sassc, M_MPT2);
711 		return (ENOMEM);
712 	}
713 	sc->sassc = sassc;
714 	sassc->sc = sc;
715 
716 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
717 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
718 		error = ENOMEM;
719 		goto out;
720 	}
721 
722 	unit = device_get_unit(sc->mps_dev);
723 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
724 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
725 	if (sassc->sim == NULL) {
726 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
727 		error = EINVAL;
728 		goto out;
729 	}
730 
731 	TAILQ_INIT(&sassc->ev_queue);
732 
733 	/* Initialize taskqueue for Event Handling */
734 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
735 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
736 	    taskqueue_thread_enqueue, &sassc->ev_tq);
737 
738 	/* Run the task queue with lowest priority */
739 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
740 	    device_get_nameunit(sc->mps_dev));
741 
742 	mps_lock(sc);
743 
744 	/*
745 	 * XXX There should be a bus for every port on the adapter, but since
746 	 * we're just going to fake the topology for now, we'll pretend that
747 	 * everything is just a target on a single bus.
748 	 */
749 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
750 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
751 		    error);
752 		mps_unlock(sc);
753 		goto out;
754 	}
755 
756 	/*
757 	 * Assume that discovery events will start right away.
758 	 *
759 	 * Hold off boot until discovery is complete.
760 	 */
761 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
762 #if __FreeBSD_version >= 1000039
763 	xpt_hold_boot();
764 #else
765 	xpt_freeze_simq(sassc->sim, 1);
766 #endif
767 	sc->sassc->startup_refcount = 0;
768 
769 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
770 	sassc->discovery_timeouts = 0;
771 
772 	sassc->tm_count = 0;
773 
774 	/*
775 	 * Register for async events so we can determine the EEDP
776 	 * capabilities of devices.
777 	 */
778 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
779 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
780 	    CAM_LUN_WILDCARD);
781 	if (status != CAM_REQ_CMP) {
782 		mps_printf(sc, "Error %#x creating sim path\n", status);
783 		sassc->path = NULL;
784 	} else {
785 		int event;
786 
787 #if (__FreeBSD_version >= 1000006) || \
788     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
789 		event = AC_ADVINFO_CHANGED;
790 #else
791 		event = AC_FOUND_DEVICE;
792 #endif
793 		status = xpt_register_async(event, mpssas_async, sc,
794 					    sassc->path);
795 		if (status != CAM_REQ_CMP) {
796 			mps_dprint(sc, MPS_ERROR,
797 			    "Error %#x registering async handler for "
798 			    "AC_ADVINFO_CHANGED events\n", status);
799 			xpt_free_path(sassc->path);
800 			sassc->path = NULL;
801 		}
802 	}
803 	if (status != CAM_REQ_CMP) {
804 		/*
805 		 * EEDP use is the exception, not the rule.
806 		 * Warn the user, but do not fail to attach.
807 		 */
808 		mps_printf(sc, "EEDP capabilities disabled.\n");
809 	}
810 
811 	mps_unlock(sc);
812 
813 	mpssas_register_events(sc);
814 out:
815 	if (error)
816 		mps_detach_sas(sc);
817 	return (error);
818 }
819 
820 int
821 mps_detach_sas(struct mps_softc *sc)
822 {
823 	struct mpssas_softc *sassc;
824 	struct mpssas_lun *lun, *lun_tmp;
825 	struct mpssas_target *targ;
826 	int i;
827 
828 	MPS_FUNCTRACE(sc);
829 
830 	if (sc->sassc == NULL)
831 		return (0);
832 
833 	sassc = sc->sassc;
834 	mps_deregister_events(sc, sassc->mpssas_eh);
835 
836 	/*
837 	 * Drain and free the event handling taskqueue with the lock
838 	 * unheld so that any parallel processing tasks drain properly
839 	 * without deadlocking.
840 	 */
841 	if (sassc->ev_tq != NULL)
842 		taskqueue_free(sassc->ev_tq);
843 
844 	/* Make sure CAM doesn't wedge if we had to bail out early. */
845 	mps_lock(sc);
846 
847 	/* Deregister our async handler */
848 	if (sassc->path != NULL) {
849 		xpt_register_async(0, mpssas_async, sc, sassc->path);
850 		xpt_free_path(sassc->path);
851 		sassc->path = NULL;
852 	}
853 
854 	if (sassc->flags & MPSSAS_IN_STARTUP)
855 		xpt_release_simq(sassc->sim, 1);
856 
857 	if (sassc->sim != NULL) {
858 		xpt_bus_deregister(cam_sim_path(sassc->sim));
859 		cam_sim_free(sassc->sim, FALSE);
860 	}
861 
862 	sassc->flags |= MPSSAS_SHUTDOWN;
863 	mps_unlock(sc);
864 
865 	if (sassc->devq != NULL)
866 		cam_simq_free(sassc->devq);
867 
868 	for(i=0; i< sc->facts->MaxTargets ;i++) {
869 		targ = &sassc->targets[i];
870 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
871 			free(lun, M_MPT2);
872 		}
873 	}
874 	free(sassc->targets, M_MPT2);
875 	free(sassc, M_MPT2);
876 	sc->sassc = NULL;
877 
878 	return (0);
879 }
880 
881 void
882 mpssas_discovery_end(struct mpssas_softc *sassc)
883 {
884 	struct mps_softc *sc = sassc->sc;
885 
886 	MPS_FUNCTRACE(sc);
887 
888 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
889 		callout_stop(&sassc->discovery_callout);
890 
891 }
892 
893 static void
894 mpssas_discovery_timeout(void *data)
895 {
896 	struct mpssas_softc *sassc = data;
897 	struct mps_softc *sc;
898 
899 	sc = sassc->sc;
900 	MPS_FUNCTRACE(sc);
901 
902 	mps_lock(sc);
903 	mps_dprint(sc, MPS_INFO,
904 	    "Timeout waiting for discovery, interrupts may not be working!\n");
905 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
906 
907 	/* Poll the hardware for events in case interrupts aren't working */
908 	mps_intr_locked(sc);
909 
910 	mps_dprint(sassc->sc, MPS_INFO,
911 	    "Finished polling after discovery timeout at %d\n", ticks);
912 
913 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
914 		mpssas_discovery_end(sassc);
915 	} else {
916 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
917 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
918 			callout_reset(&sassc->discovery_callout,
919 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
920 			    mpssas_discovery_timeout, sassc);
921 			sassc->discovery_timeouts++;
922 		} else {
923 			mps_dprint(sassc->sc, MPS_FAULT,
924 			    "Discovery timed out, continuing.\n");
925 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
926 			mpssas_discovery_end(sassc);
927 		}
928 	}
929 
930 	mps_unlock(sc);
931 }
932 
933 static void
934 mpssas_action(struct cam_sim *sim, union ccb *ccb)
935 {
936 	struct mpssas_softc *sassc;
937 
938 	sassc = cam_sim_softc(sim);
939 
940 	MPS_FUNCTRACE(sassc->sc);
941 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
942 	    ccb->ccb_h.func_code);
943 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
944 
945 	switch (ccb->ccb_h.func_code) {
946 	case XPT_PATH_INQ:
947 	{
948 		struct ccb_pathinq *cpi = &ccb->cpi;
949 
950 		cpi->version_num = 1;
951 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
952 		cpi->target_sprt = 0;
953 #if __FreeBSD_version >= 1000039
954 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
955 #else
956 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
957 #endif
958 		cpi->hba_eng_cnt = 0;
959 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
960 		cpi->max_lun = 255;
961 		cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
962 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
963 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
964 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
965 		cpi->unit_number = cam_sim_unit(sim);
966 		cpi->bus_id = cam_sim_bus(sim);
967 		cpi->base_transfer_speed = 150000;
968 		cpi->transport = XPORT_SAS;
969 		cpi->transport_version = 0;
970 		cpi->protocol = PROTO_SCSI;
971 		cpi->protocol_version = SCSI_REV_SPC;
972 #if __FreeBSD_version >= 800001
973 		/*
974 		 * XXX KDM where does this number come from?
975 		 */
976 		cpi->maxio = 256 * 1024;
977 #endif
978 		cpi->ccb_h.status = CAM_REQ_CMP;
979 		break;
980 	}
981 	case XPT_GET_TRAN_SETTINGS:
982 	{
983 		struct ccb_trans_settings	*cts;
984 		struct ccb_trans_settings_sas	*sas;
985 		struct ccb_trans_settings_scsi	*scsi;
986 		struct mpssas_target *targ;
987 
988 		cts = &ccb->cts;
989 		sas = &cts->xport_specific.sas;
990 		scsi = &cts->proto_specific.scsi;
991 
992 		targ = &sassc->targets[cts->ccb_h.target_id];
993 		if (targ->handle == 0x0) {
994 			cts->ccb_h.status = CAM_SEL_TIMEOUT;
995 			break;
996 		}
997 
998 		cts->protocol_version = SCSI_REV_SPC2;
999 		cts->transport = XPORT_SAS;
1000 		cts->transport_version = 0;
1001 
1002 		sas->valid = CTS_SAS_VALID_SPEED;
1003 		switch (targ->linkrate) {
1004 		case 0x08:
1005 			sas->bitrate = 150000;
1006 			break;
1007 		case 0x09:
1008 			sas->bitrate = 300000;
1009 			break;
1010 		case 0x0a:
1011 			sas->bitrate = 600000;
1012 			break;
1013 		default:
1014 			sas->valid = 0;
1015 		}
1016 
1017 		cts->protocol = PROTO_SCSI;
1018 		scsi->valid = CTS_SCSI_VALID_TQ;
1019 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1020 
1021 		cts->ccb_h.status = CAM_REQ_CMP;
1022 		break;
1023 	}
1024 	case XPT_CALC_GEOMETRY:
1025 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1026 		ccb->ccb_h.status = CAM_REQ_CMP;
1027 		break;
1028 	case XPT_RESET_DEV:
1029 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1030 		mpssas_action_resetdev(sassc, ccb);
1031 		return;
1032 	case XPT_RESET_BUS:
1033 	case XPT_ABORT:
1034 	case XPT_TERM_IO:
1035 		mps_dprint(sassc->sc, MPS_XINFO,
1036 		    "mpssas_action faking success for abort or reset\n");
1037 		ccb->ccb_h.status = CAM_REQ_CMP;
1038 		break;
1039 	case XPT_SCSI_IO:
1040 		mpssas_action_scsiio(sassc, ccb);
1041 		return;
1042 #if __FreeBSD_version >= 900026
1043 	case XPT_SMP_IO:
1044 		mpssas_action_smpio(sassc, ccb);
1045 		return;
1046 #endif
1047 	default:
1048 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1049 		break;
1050 	}
1051 	xpt_done(ccb);
1052 
1053 }
1054 
1055 static void
1056 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1057     target_id_t target_id, lun_id_t lun_id)
1058 {
1059 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1060 	struct cam_path *path;
1061 
1062 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__,
1063 	    ac_code, target_id, lun_id);
1064 
1065 	if (xpt_create_path(&path, NULL,
1066 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1067 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1068 			   "notification\n");
1069 		return;
1070 	}
1071 
1072 	xpt_async(ac_code, path, NULL);
1073 	xpt_free_path(path);
1074 }
1075 
1076 static void
1077 mpssas_complete_all_commands(struct mps_softc *sc)
1078 {
1079 	struct mps_command *cm;
1080 	int i;
1081 	int completed;
1082 
1083 	MPS_FUNCTRACE(sc);
1084 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1085 
1086 	/* complete all commands with a NULL reply */
1087 	for (i = 1; i < sc->num_reqs; i++) {
1088 		cm = &sc->commands[i];
1089 		cm->cm_reply = NULL;
1090 		completed = 0;
1091 
1092 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1093 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1094 
1095 		if (cm->cm_complete != NULL) {
1096 			mpssas_log_command(cm, MPS_RECOVERY,
1097 			    "completing cm %p state %x ccb %p for diag reset\n",
1098 			    cm, cm->cm_state, cm->cm_ccb);
1099 
1100 			cm->cm_complete(sc, cm);
1101 			completed = 1;
1102 		}
1103 
1104 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1105 			mpssas_log_command(cm, MPS_RECOVERY,
1106 			    "waking up cm %p state %x ccb %p for diag reset\n",
1107 			    cm, cm->cm_state, cm->cm_ccb);
1108 			wakeup(cm);
1109 			completed = 1;
1110 		}
1111 
1112 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1113 			/* this should never happen, but if it does, log */
1114 			mpssas_log_command(cm, MPS_RECOVERY,
1115 			    "cm %p state %x flags 0x%x ccb %p during diag "
1116 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1117 			    cm->cm_ccb);
1118 		}
1119 	}
1120 }
1121 
1122 void
1123 mpssas_handle_reinit(struct mps_softc *sc)
1124 {
1125 	int i;
1126 
1127 	/* Go back into startup mode and freeze the simq, so that CAM
1128 	 * doesn't send any commands until after we've rediscovered all
1129 	 * targets and found the proper device handles for them.
1130 	 *
1131 	 * After the reset, portenable will trigger discovery, and after all
1132 	 * discovery-related activities have finished, the simq will be
1133 	 * released.
1134 	 */
1135 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1136 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1137 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1138 	xpt_freeze_simq(sc->sassc->sim, 1);
1139 
1140 	/* notify CAM of a bus reset */
1141 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1142 	    CAM_LUN_WILDCARD);
1143 
1144 	/* complete and cleanup after all outstanding commands */
1145 	mpssas_complete_all_commands(sc);
1146 
1147 	mps_dprint(sc, MPS_INIT,
1148 	    "%s startup %u tm %u after command completion\n",
1149 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1150 
1151 	/*
1152 	 * The simq was explicitly frozen above, so set the refcount to 0.
1153 	 * The simq will be explicitly released after port enable completes.
1154 	 */
1155 	sc->sassc->startup_refcount = 0;
1156 
1157 	/* zero all the target handles, since they may change after the
1158 	 * reset, and we have to rediscover all the targets and use the new
1159 	 * handles.
1160 	 */
1161 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1162 		if (sc->sassc->targets[i].outstanding != 0)
1163 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1164 			    i, sc->sassc->targets[i].outstanding);
1165 		sc->sassc->targets[i].handle = 0x0;
1166 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1167 		sc->sassc->targets[i].outstanding = 0;
1168 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1169 	}
1170 }
1171 
1172 static void
1173 mpssas_tm_timeout(void *data)
1174 {
1175 	struct mps_command *tm = data;
1176 	struct mps_softc *sc = tm->cm_sc;
1177 
1178 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1179 
1180 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1181 	    "task mgmt %p timed out\n", tm);
1182 	mps_reinit(sc);
1183 }
1184 
1185 static void
1186 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1187 {
1188 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1189 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1190 	unsigned int cm_count = 0;
1191 	struct mps_command *cm;
1192 	struct mpssas_target *targ;
1193 
1194 	callout_stop(&tm->cm_callout);
1195 
1196 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1197 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1198 	targ = tm->cm_targ;
1199 
1200 	/*
1201 	 * Currently there should be no way we can hit this case.  It only
1202 	 * happens when we have a failure to allocate chain frames, and
1203 	 * task management commands don't have S/G lists.
1204 	 * XXXSL So should it be an assertion?
1205 	 */
1206 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1207 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1208 			   "This should not happen!\n", __func__, tm->cm_flags);
1209 		mpssas_free_tm(sc, tm);
1210 		return;
1211 	}
1212 
1213 	if (reply == NULL) {
1214 		mpssas_log_command(tm, MPS_RECOVERY,
1215 		    "NULL reset reply for tm %p\n", tm);
1216 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1217 			/* this completion was due to a reset, just cleanup */
1218 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1219 			targ->tm = NULL;
1220 			mpssas_free_tm(sc, tm);
1221 		}
1222 		else {
1223 			/* we should have gotten a reply. */
1224 			mps_reinit(sc);
1225 		}
1226 		return;
1227 	}
1228 
1229 	mpssas_log_command(tm, MPS_RECOVERY,
1230 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1231 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1232 	    le32toh(reply->TerminationCount));
1233 
1234 	/* See if there are any outstanding commands for this LUN.
1235 	 * This could be made more efficient by using a per-LU data
1236 	 * structure of some sort.
1237 	 */
1238 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1239 		if (cm->cm_lun == tm->cm_lun)
1240 			cm_count++;
1241 	}
1242 
1243 	if (cm_count == 0) {
1244 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1245 		    "logical unit %u finished recovery after reset\n",
1246 		    tm->cm_lun, tm);
1247 
1248 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1249 		    tm->cm_lun);
1250 
1251 		/* we've finished recovery for this logical unit.  check and
1252 		 * see if some other logical unit has a timedout command
1253 		 * that needs to be processed.
1254 		 */
1255 		cm = TAILQ_FIRST(&targ->timedout_commands);
1256 		if (cm) {
1257 			mpssas_send_abort(sc, tm, cm);
1258 		}
1259 		else {
1260 			targ->tm = NULL;
1261 			mpssas_free_tm(sc, tm);
1262 		}
1263 	}
1264 	else {
1265 		/* if we still have commands for this LUN, the reset
1266 		 * effectively failed, regardless of the status reported.
1267 		 * Escalate to a target reset.
1268 		 */
1269 		mpssas_log_command(tm, MPS_RECOVERY,
1270 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1271 		    tm, cm_count);
1272 		mpssas_send_reset(sc, tm,
1273 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1274 	}
1275 }
1276 
1277 static void
1278 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1279 {
1280 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1281 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1282 	struct mpssas_target *targ;
1283 
1284 	callout_stop(&tm->cm_callout);
1285 
1286 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1287 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1288 	targ = tm->cm_targ;
1289 
1290 	/*
1291 	 * Currently there should be no way we can hit this case.  It only
1292 	 * happens when we have a failure to allocate chain frames, and
1293 	 * task management commands don't have S/G lists.
1294 	 */
1295 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1296 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1297 			   "This should not happen!\n", __func__, tm->cm_flags);
1298 		mpssas_free_tm(sc, tm);
1299 		return;
1300 	}
1301 
1302 	if (reply == NULL) {
1303 		mpssas_log_command(tm, MPS_RECOVERY,
1304 		    "NULL reset reply for tm %p\n", tm);
1305 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1306 			/* this completion was due to a reset, just cleanup */
1307 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1308 			targ->tm = NULL;
1309 			mpssas_free_tm(sc, tm);
1310 		}
1311 		else {
1312 			/* we should have gotten a reply. */
1313 			mps_reinit(sc);
1314 		}
1315 		return;
1316 	}
1317 
1318 	mpssas_log_command(tm, MPS_RECOVERY,
1319 	    "target reset status 0x%x code 0x%x count %u\n",
1320 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1321 	    le32toh(reply->TerminationCount));
1322 
1323 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1324 
1325 	if (targ->outstanding == 0) {
1326 		/* we've finished recovery for this target and all
1327 		 * of its logical units.
1328 		 */
1329 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1330 		    "recovery finished after target reset\n");
1331 
1332 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1333 		    CAM_LUN_WILDCARD);
1334 
1335 		targ->tm = NULL;
1336 		mpssas_free_tm(sc, tm);
1337 	}
1338 	else {
1339 		/* after a target reset, if this target still has
1340 		 * outstanding commands, the reset effectively failed,
1341 		 * regardless of the status reported.  escalate.
1342 		 */
1343 		mpssas_log_command(tm, MPS_RECOVERY,
1344 		    "target reset complete for tm %p, but still have %u command(s)\n",
1345 		    tm, targ->outstanding);
1346 		mps_reinit(sc);
1347 	}
1348 }
1349 
1350 #define MPS_RESET_TIMEOUT 30
1351 
1352 static int
1353 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1354 {
1355 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1356 	struct mpssas_target *target;
1357 	int err;
1358 
1359 	target = tm->cm_targ;
1360 	if (target->handle == 0) {
1361 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1362 		    __func__, target->tid);
1363 		return -1;
1364 	}
1365 
1366 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1367 	req->DevHandle = htole16(target->handle);
1368 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1369 	req->TaskType = type;
1370 
1371 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1372 		/* XXX Need to handle invalid LUNs */
1373 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1374 		tm->cm_targ->logical_unit_resets++;
1375 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1376 		    "sending logical unit reset\n");
1377 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1378 	}
1379 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1380 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1381 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1382 		tm->cm_targ->target_resets++;
1383 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1384 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1385 		    "sending target reset\n");
1386 		tm->cm_complete = mpssas_target_reset_complete;
1387 	}
1388 	else {
1389 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1390 		return -1;
1391 	}
1392 
1393 	tm->cm_data = NULL;
1394 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1395 	tm->cm_complete_data = (void *)tm;
1396 
1397 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1398 	    mpssas_tm_timeout, tm);
1399 
1400 	err = mps_map_command(sc, tm);
1401 	if (err)
1402 		mpssas_log_command(tm, MPS_RECOVERY,
1403 		    "error %d sending reset type %u\n",
1404 		    err, type);
1405 
1406 	return err;
1407 }
1408 
1409 
1410 static void
1411 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1412 {
1413 	struct mps_command *cm;
1414 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1415 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1416 	struct mpssas_target *targ;
1417 
1418 	callout_stop(&tm->cm_callout);
1419 
1420 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1421 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1422 	targ = tm->cm_targ;
1423 
1424 	/*
1425 	 * Currently there should be no way we can hit this case.  It only
1426 	 * happens when we have a failure to allocate chain frames, and
1427 	 * task management commands don't have S/G lists.
1428 	 */
1429 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1430 		mpssas_log_command(tm, MPS_RECOVERY,
1431 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1432 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1433 		mpssas_free_tm(sc, tm);
1434 		return;
1435 	}
1436 
1437 	if (reply == NULL) {
1438 		mpssas_log_command(tm, MPS_RECOVERY,
1439 		    "NULL abort reply for tm %p TaskMID %u\n",
1440 		    tm, le16toh(req->TaskMID));
1441 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1442 			/* this completion was due to a reset, just cleanup */
1443 			targ->tm = NULL;
1444 			mpssas_free_tm(sc, tm);
1445 		}
1446 		else {
1447 			/* we should have gotten a reply. */
1448 			mps_reinit(sc);
1449 		}
1450 		return;
1451 	}
1452 
1453 	mpssas_log_command(tm, MPS_RECOVERY,
1454 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1455 	    le16toh(req->TaskMID),
1456 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1457 	    le32toh(reply->TerminationCount));
1458 
1459 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1460 	if (cm == NULL) {
1461 		/* if there are no more timedout commands, we're done with
1462 		 * error recovery for this target.
1463 		 */
1464 		mpssas_log_command(tm, MPS_RECOVERY,
1465 		    "finished recovery after aborting TaskMID %u\n",
1466 		    le16toh(req->TaskMID));
1467 
1468 		targ->tm = NULL;
1469 		mpssas_free_tm(sc, tm);
1470 	}
1471 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1472 		/* abort success, but we have more timedout commands to abort */
1473 		mpssas_log_command(tm, MPS_RECOVERY,
1474 		    "continuing recovery after aborting TaskMID %u\n",
1475 		    le16toh(req->TaskMID));
1476 
1477 		mpssas_send_abort(sc, tm, cm);
1478 	}
1479 	else {
1480 		/* we didn't get a command completion, so the abort
1481 		 * failed as far as we're concerned.  escalate.
1482 		 */
1483 		mpssas_log_command(tm, MPS_RECOVERY,
1484 		    "abort failed for TaskMID %u tm %p\n",
1485 		    le16toh(req->TaskMID), tm);
1486 
1487 		mpssas_send_reset(sc, tm,
1488 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1489 	}
1490 }
1491 
1492 #define MPS_ABORT_TIMEOUT 5
1493 
1494 static int
1495 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1496 {
1497 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1498 	struct mpssas_target *targ;
1499 	int err;
1500 
1501 	targ = cm->cm_targ;
1502 	if (targ->handle == 0) {
1503 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1504 		    __func__, cm->cm_ccb->ccb_h.target_id);
1505 		return -1;
1506 	}
1507 
1508 	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1509 	    "Aborting command %p\n", cm);
1510 
1511 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1512 	req->DevHandle = htole16(targ->handle);
1513 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1514 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1515 
1516 	/* XXX Need to handle invalid LUNs */
1517 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1518 
1519 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1520 
1521 	tm->cm_data = NULL;
1522 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1523 	tm->cm_complete = mpssas_abort_complete;
1524 	tm->cm_complete_data = (void *)tm;
1525 	tm->cm_targ = cm->cm_targ;
1526 	tm->cm_lun = cm->cm_lun;
1527 
1528 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1529 	    mpssas_tm_timeout, tm);
1530 
1531 	targ->aborts++;
1532 
1533 	err = mps_map_command(sc, tm);
1534 	if (err)
1535 		mpssas_log_command(tm, MPS_RECOVERY,
1536 		    "error %d sending abort for cm %p SMID %u\n",
1537 		    err, cm, req->TaskMID);
1538 	return err;
1539 }
1540 
1541 
1542 static void
1543 mpssas_scsiio_timeout(void *data)
1544 {
1545 	struct mps_softc *sc;
1546 	struct mps_command *cm;
1547 	struct mpssas_target *targ;
1548 
1549 	cm = (struct mps_command *)data;
1550 	sc = cm->cm_sc;
1551 
1552 	MPS_FUNCTRACE(sc);
1553 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1554 
1555 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1556 
1557 	/*
1558 	 * Run the interrupt handler to make sure it's not pending.  This
1559 	 * isn't perfect because the command could have already completed
1560 	 * and been re-used, though this is unlikely.
1561 	 */
1562 	mps_intr_locked(sc);
1563 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1564 		mpssas_log_command(cm, MPS_XINFO,
1565 		    "SCSI command %p almost timed out\n", cm);
1566 		return;
1567 	}
1568 
1569 	if (cm->cm_ccb == NULL) {
1570 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1571 		return;
1572 	}
1573 
1574 	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1575 	    cm, cm->cm_ccb);
1576 
1577 	targ = cm->cm_targ;
1578 	targ->timeouts++;
1579 
1580 	/* XXX first, check the firmware state, to see if it's still
1581 	 * operational.  if not, do a diag reset.
1582 	 */
1583 
1584 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1585 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1586 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1587 
1588 	if (targ->tm != NULL) {
1589 		/* target already in recovery, just queue up another
1590 		 * timedout command to be processed later.
1591 		 */
1592 		mps_dprint(sc, MPS_RECOVERY,
1593 		    "queued timedout cm %p for processing by tm %p\n",
1594 		    cm, targ->tm);
1595 	}
1596 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1597 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1598 		    cm, targ->tm);
1599 
1600 		/* start recovery by aborting the first timedout command */
1601 		mpssas_send_abort(sc, targ->tm, cm);
1602 	}
1603 	else {
1604 		/* XXX queue this target up for recovery once a TM becomes
1605 		 * available.  The firmware only has a limited number of
1606 		 * HighPriority credits for the high priority requests used
1607 		 * for task management, and we ran out.
1608 		 *
1609 		 * Isilon: don't worry about this for now, since we have
1610 		 * more credits than disks in an enclosure, and limit
1611 		 * ourselves to one TM per target for recovery.
1612 		 */
1613 		mps_dprint(sc, MPS_RECOVERY,
1614 		    "timedout cm %p failed to allocate a tm\n", cm);
1615 	}
1616 
1617 }
1618 
1619 static void
1620 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1621 {
1622 	MPI2_SCSI_IO_REQUEST *req;
1623 	struct ccb_scsiio *csio;
1624 	struct mps_softc *sc;
1625 	struct mpssas_target *targ;
1626 	struct mpssas_lun *lun;
1627 	struct mps_command *cm;
1628 	uint8_t i, lba_byte, *ref_tag_addr;
1629 	uint16_t eedp_flags;
1630 	uint32_t mpi_control;
1631 
1632 	sc = sassc->sc;
1633 	MPS_FUNCTRACE(sc);
1634 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1635 
1636 	csio = &ccb->csio;
1637 	targ = &sassc->targets[csio->ccb_h.target_id];
1638 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1639 	if (targ->handle == 0x0) {
1640 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1641 		    __func__, csio->ccb_h.target_id);
1642 		csio->ccb_h.status = CAM_SEL_TIMEOUT;
1643 		xpt_done(ccb);
1644 		return;
1645 	}
1646 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1647 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1648 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1649 		csio->ccb_h.status = CAM_TID_INVALID;
1650 		xpt_done(ccb);
1651 		return;
1652 	}
1653 	/*
1654 	 * Sometimes, it is possible to get a command that is not "In
1655 	 * Progress" and was actually aborted by the upper layer.  Check for
1656 	 * this here and complete the command without error.
1657 	 */
1658 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1659 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1660 		    "target %u\n", __func__, csio->ccb_h.target_id);
1661 		xpt_done(ccb);
1662 		return;
1663 	}
1664 	/*
1665 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1666 	 * that the volume has timed out.  We want volumes to be enumerated
1667 	 * until they are deleted/removed, not just failed.
1668 	 */
1669 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1670 		if (targ->devinfo == 0)
1671 			csio->ccb_h.status = CAM_REQ_CMP;
1672 		else
1673 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1674 		xpt_done(ccb);
1675 		return;
1676 	}
1677 
1678 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1679 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1680 		csio->ccb_h.status = CAM_TID_INVALID;
1681 		xpt_done(ccb);
1682 		return;
1683 	}
1684 
1685 	cm = mps_alloc_command(sc);
1686 	if (cm == NULL) {
1687 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1688 			xpt_freeze_simq(sassc->sim, 1);
1689 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1690 		}
1691 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1692 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1693 		xpt_done(ccb);
1694 		return;
1695 	}
1696 
1697 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1698 	bzero(req, sizeof(*req));
1699 	req->DevHandle = htole16(targ->handle);
1700 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1701 	req->MsgFlags = 0;
1702 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1703 	req->SenseBufferLength = MPS_SENSE_LEN;
1704 	req->SGLFlags = 0;
1705 	req->ChainOffset = 0;
1706 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1707 	req->SGLOffset1= 0;
1708 	req->SGLOffset2= 0;
1709 	req->SGLOffset3= 0;
1710 	req->SkipCount = 0;
1711 	req->DataLength = htole32(csio->dxfer_len);
1712 	req->BidirectionalDataLength = 0;
1713 	req->IoFlags = htole16(csio->cdb_len);
1714 	req->EEDPFlags = 0;
1715 
1716 	/* Note: BiDirectional transfers are not supported */
1717 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1718 	case CAM_DIR_IN:
1719 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1720 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1721 		break;
1722 	case CAM_DIR_OUT:
1723 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1724 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1725 		break;
1726 	case CAM_DIR_NONE:
1727 	default:
1728 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1729 		break;
1730 	}
1731 
1732 	if (csio->cdb_len == 32)
1733                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1734 	/*
1735 	 * It looks like the hardware doesn't require an explicit tag
1736 	 * number for each transaction.  SAM Task Management not supported
1737 	 * at the moment.
1738 	 */
1739 	switch (csio->tag_action) {
1740 	case MSG_HEAD_OF_Q_TAG:
1741 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1742 		break;
1743 	case MSG_ORDERED_Q_TAG:
1744 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1745 		break;
1746 	case MSG_ACA_TASK:
1747 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1748 		break;
1749 	case CAM_TAG_ACTION_NONE:
1750 	case MSG_SIMPLE_Q_TAG:
1751 	default:
1752 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1753 		break;
1754 	}
1755 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1756 	req->Control = htole32(mpi_control);
1757 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1758 		mps_free_command(sc, cm);
1759 		ccb->ccb_h.status = CAM_LUN_INVALID;
1760 		xpt_done(ccb);
1761 		return;
1762 	}
1763 
1764 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1765 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1766 	else
1767 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1768 	req->IoFlags = htole16(csio->cdb_len);
1769 
1770 	/*
1771 	 * Check if EEDP is supported and enabled.  If it is then check if the
1772 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1773 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1774 	 * for EEDP transfer.
1775 	 */
1776 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1777 	if (sc->eedp_enabled && eedp_flags) {
1778 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1779 			if (lun->lun_id == csio->ccb_h.target_lun) {
1780 				break;
1781 			}
1782 		}
1783 
1784 		if ((lun != NULL) && (lun->eedp_formatted)) {
1785 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1786 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1787 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1788 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1789 			req->EEDPFlags = htole16(eedp_flags);
1790 
1791 			/*
1792 			 * If CDB less than 32, fill in Primary Ref Tag with
1793 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1794 			 * already there.  Also, set protection bit.  FreeBSD
1795 			 * currently does not support CDBs bigger than 16, but
1796 			 * the code doesn't hurt, and will be here for the
1797 			 * future.
1798 			 */
1799 			if (csio->cdb_len != 32) {
1800 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1801 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1802 				    PrimaryReferenceTag;
1803 				for (i = 0; i < 4; i++) {
1804 					*ref_tag_addr =
1805 					    req->CDB.CDB32[lba_byte + i];
1806 					ref_tag_addr++;
1807 				}
1808 				req->CDB.EEDP32.PrimaryReferenceTag =
1809 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1810 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1811 				    0xFFFF;
1812 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1813 				    0x20;
1814 			} else {
1815 				eedp_flags |=
1816 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1817 				req->EEDPFlags = htole16(eedp_flags);
1818 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1819 				    0x1F) | 0x20;
1820 			}
1821 		}
1822 	}
1823 
1824 	cm->cm_length = csio->dxfer_len;
1825 	if (cm->cm_length != 0) {
1826 		cm->cm_data = ccb;
1827 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1828 	} else {
1829 		cm->cm_data = NULL;
1830 	}
1831 	cm->cm_sge = &req->SGL;
1832 	cm->cm_sglsize = (32 - 24) * 4;
1833 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1834 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1835 	cm->cm_complete = mpssas_scsiio_complete;
1836 	cm->cm_complete_data = ccb;
1837 	cm->cm_targ = targ;
1838 	cm->cm_lun = csio->ccb_h.target_lun;
1839 	cm->cm_ccb = ccb;
1840 
1841 	/*
1842 	 * If HBA is a WD and the command is not for a retry, try to build a
1843 	 * direct I/O message. If failed, or the command is for a retry, send
1844 	 * the I/O to the IR volume itself.
1845 	 */
1846 	if (sc->WD_valid_config) {
1847 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1848 			mpssas_direct_drive_io(sassc, cm, ccb);
1849 		} else {
1850 			ccb->ccb_h.status = CAM_REQ_INPROG;
1851 		}
1852 	}
1853 
1854 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1855 	   mpssas_scsiio_timeout, cm);
1856 
1857 	targ->issued++;
1858 	targ->outstanding++;
1859 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1860 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1861 
1862 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1863 	    __func__, cm, ccb, targ->outstanding);
1864 
1865 	mps_map_command(sc, cm);
1866 	return;
1867 }
1868 
1869 static void
1870 mps_response_code(struct mps_softc *sc, u8 response_code)
1871 {
1872         char *desc;
1873 
1874         switch (response_code) {
1875         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1876                 desc = "task management request completed";
1877                 break;
1878         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1879                 desc = "invalid frame";
1880                 break;
1881         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1882                 desc = "task management request not supported";
1883                 break;
1884         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1885                 desc = "task management request failed";
1886                 break;
1887         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1888                 desc = "task management request succeeded";
1889                 break;
1890         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1891                 desc = "invalid lun";
1892                 break;
1893         case 0xA:
1894                 desc = "overlapped tag attempted";
1895                 break;
1896         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1897                 desc = "task queued, however not sent to target";
1898                 break;
1899         default:
1900                 desc = "unknown";
1901                 break;
1902         }
1903 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1904                 response_code, desc);
1905 }
1906 /**
1907  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1908  */
1909 static void
1910 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1911     Mpi2SCSIIOReply_t *mpi_reply)
1912 {
1913 	u32 response_info;
1914 	u8 *response_bytes;
1915 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1916 	    MPI2_IOCSTATUS_MASK;
1917 	u8 scsi_state = mpi_reply->SCSIState;
1918 	u8 scsi_status = mpi_reply->SCSIStatus;
1919 	char *desc_ioc_state = NULL;
1920 	char *desc_scsi_status = NULL;
1921 	char *desc_scsi_state = sc->tmp_string;
1922 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1923 
1924 	if (log_info == 0x31170000)
1925 		return;
1926 
1927 	switch (ioc_status) {
1928 	case MPI2_IOCSTATUS_SUCCESS:
1929 		desc_ioc_state = "success";
1930 		break;
1931 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1932 		desc_ioc_state = "invalid function";
1933 		break;
1934 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1935 		desc_ioc_state = "scsi recovered error";
1936 		break;
1937 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1938 		desc_ioc_state = "scsi invalid dev handle";
1939 		break;
1940 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1941 		desc_ioc_state = "scsi device not there";
1942 		break;
1943 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1944 		desc_ioc_state = "scsi data overrun";
1945 		break;
1946 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1947 		desc_ioc_state = "scsi data underrun";
1948 		break;
1949 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1950 		desc_ioc_state = "scsi io data error";
1951 		break;
1952 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1953 		desc_ioc_state = "scsi protocol error";
1954 		break;
1955 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1956 		desc_ioc_state = "scsi task terminated";
1957 		break;
1958 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1959 		desc_ioc_state = "scsi residual mismatch";
1960 		break;
1961 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1962 		desc_ioc_state = "scsi task mgmt failed";
1963 		break;
1964 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1965 		desc_ioc_state = "scsi ioc terminated";
1966 		break;
1967 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1968 		desc_ioc_state = "scsi ext terminated";
1969 		break;
1970 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1971 		desc_ioc_state = "eedp guard error";
1972 		break;
1973 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1974 		desc_ioc_state = "eedp ref tag error";
1975 		break;
1976 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1977 		desc_ioc_state = "eedp app tag error";
1978 		break;
1979 	default:
1980 		desc_ioc_state = "unknown";
1981 		break;
1982 	}
1983 
1984 	switch (scsi_status) {
1985 	case MPI2_SCSI_STATUS_GOOD:
1986 		desc_scsi_status = "good";
1987 		break;
1988 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1989 		desc_scsi_status = "check condition";
1990 		break;
1991 	case MPI2_SCSI_STATUS_CONDITION_MET:
1992 		desc_scsi_status = "condition met";
1993 		break;
1994 	case MPI2_SCSI_STATUS_BUSY:
1995 		desc_scsi_status = "busy";
1996 		break;
1997 	case MPI2_SCSI_STATUS_INTERMEDIATE:
1998 		desc_scsi_status = "intermediate";
1999 		break;
2000 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2001 		desc_scsi_status = "intermediate condmet";
2002 		break;
2003 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2004 		desc_scsi_status = "reservation conflict";
2005 		break;
2006 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2007 		desc_scsi_status = "command terminated";
2008 		break;
2009 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2010 		desc_scsi_status = "task set full";
2011 		break;
2012 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2013 		desc_scsi_status = "aca active";
2014 		break;
2015 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2016 		desc_scsi_status = "task aborted";
2017 		break;
2018 	default:
2019 		desc_scsi_status = "unknown";
2020 		break;
2021 	}
2022 
2023 	desc_scsi_state[0] = '\0';
2024 	if (!scsi_state)
2025 		desc_scsi_state = " ";
2026 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2027 		strcat(desc_scsi_state, "response info ");
2028 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2029 		strcat(desc_scsi_state, "state terminated ");
2030 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2031 		strcat(desc_scsi_state, "no status ");
2032 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2033 		strcat(desc_scsi_state, "autosense failed ");
2034 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2035 		strcat(desc_scsi_state, "autosense valid ");
2036 
2037 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2038 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2039 	/* We can add more detail about underflow data here
2040 	 * TO-DO
2041 	 * */
2042 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2043 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2044 	    desc_scsi_state, scsi_state);
2045 
2046 	if (sc->mps_debug & MPS_XINFO &&
2047 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2048 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2049 		scsi_sense_print(csio);
2050 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2051 	}
2052 
2053 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2054 		response_info = le32toh(mpi_reply->ResponseInfo);
2055 		response_bytes = (u8 *)&response_info;
2056 		mps_response_code(sc,response_bytes[0]);
2057 	}
2058 }
2059 
2060 static void
2061 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2062 {
2063 	MPI2_SCSI_IO_REPLY *rep;
2064 	union ccb *ccb;
2065 	struct ccb_scsiio *csio;
2066 	struct mpssas_softc *sassc;
2067 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2068 	u8 *TLR_bits, TLR_on;
2069 	int dir = 0, i;
2070 	u16 alloc_len;
2071 
2072 	MPS_FUNCTRACE(sc);
2073 	mps_dprint(sc, MPS_TRACE,
2074 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2075 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2076 	    cm->cm_targ->outstanding);
2077 
2078 	callout_stop(&cm->cm_callout);
2079 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2080 
2081 	sassc = sc->sassc;
2082 	ccb = cm->cm_complete_data;
2083 	csio = &ccb->csio;
2084 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2085 	/*
2086 	 * XXX KDM if the chain allocation fails, does it matter if we do
2087 	 * the sync and unload here?  It is simpler to do it in every case,
2088 	 * assuming it doesn't cause problems.
2089 	 */
2090 	if (cm->cm_data != NULL) {
2091 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2092 			dir = BUS_DMASYNC_POSTREAD;
2093 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2094 			dir = BUS_DMASYNC_POSTWRITE;
2095 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2096 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2097 	}
2098 
2099 	cm->cm_targ->completed++;
2100 	cm->cm_targ->outstanding--;
2101 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2102 	ccb->ccb_h.status |= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2103 
2104 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2105 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2106 		if (cm->cm_reply != NULL)
2107 			mpssas_log_command(cm, MPS_RECOVERY,
2108 			    "completed timedout cm %p ccb %p during recovery "
2109 			    "ioc %x scsi %x state %x xfer %u\n",
2110 			    cm, cm->cm_ccb,
2111 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2112 			    le32toh(rep->TransferCount));
2113 		else
2114 			mpssas_log_command(cm, MPS_RECOVERY,
2115 			    "completed timedout cm %p ccb %p during recovery\n",
2116 			    cm, cm->cm_ccb);
2117 	} else if (cm->cm_targ->tm != NULL) {
2118 		if (cm->cm_reply != NULL)
2119 			mpssas_log_command(cm, MPS_RECOVERY,
2120 			    "completed cm %p ccb %p during recovery "
2121 			    "ioc %x scsi %x state %x xfer %u\n",
2122 			    cm, cm->cm_ccb,
2123 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2124 			    le32toh(rep->TransferCount));
2125 		else
2126 			mpssas_log_command(cm, MPS_RECOVERY,
2127 			    "completed cm %p ccb %p during recovery\n",
2128 			    cm, cm->cm_ccb);
2129 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2130 		mpssas_log_command(cm, MPS_RECOVERY,
2131 		    "reset completed cm %p ccb %p\n",
2132 		    cm, cm->cm_ccb);
2133 	}
2134 
2135 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2136 		/*
2137 		 * We ran into an error after we tried to map the command,
2138 		 * so we're getting a callback without queueing the command
2139 		 * to the hardware.  So we set the status here, and it will
2140 		 * be retained below.  We'll go through the "fast path",
2141 		 * because there can be no reply when we haven't actually
2142 		 * gone out to the hardware.
2143 		 */
2144 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2145 
2146 		/*
2147 		 * Currently the only error included in the mask is
2148 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2149 		 * chain frames.  We need to freeze the queue until we get
2150 		 * a command that completed without this error, which will
2151 		 * hopefully have some chain frames attached that we can
2152 		 * use.  If we wanted to get smarter about it, we would
2153 		 * only unfreeze the queue in this condition when we're
2154 		 * sure that we're getting some chain frames back.  That's
2155 		 * probably unnecessary.
2156 		 */
2157 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2158 			xpt_freeze_simq(sassc->sim, 1);
2159 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2160 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2161 				   "freezing SIM queue\n");
2162 		}
2163 	}
2164 
2165 	/* Take the fast path to completion */
2166 	if (cm->cm_reply == NULL) {
2167 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2168 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2169 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2170 			else {
2171 				ccb->ccb_h.status = CAM_REQ_CMP;
2172 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2173 			}
2174 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2175 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2176 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2177 				mps_dprint(sc, MPS_XINFO,
2178 				    "Unfreezing SIM queue\n");
2179 			}
2180 		}
2181 
2182 		/*
2183 		 * There are two scenarios where the status won't be
2184 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2185 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2186 		 */
2187 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2188 			/*
2189 			 * Freeze the dev queue so that commands are
2190 			 * executed in the correct order with after error
2191 			 * recovery.
2192 			 */
2193 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2194 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2195 		}
2196 		mps_free_command(sc, cm);
2197 		xpt_done(ccb);
2198 		return;
2199 	}
2200 
2201 	mpssas_log_command(cm, MPS_XINFO,
2202 	    "ioc %x scsi %x state %x xfer %u\n",
2203 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2204 	    le32toh(rep->TransferCount));
2205 
2206 	/*
2207 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2208 	 * Volume if an error occurred (normal I/O retry).  Use the original
2209 	 * CCB, but set a flag that this will be a retry so that it's sent to
2210 	 * the original volume.  Free the command but reuse the CCB.
2211 	 */
2212 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2213 		mps_free_command(sc, cm);
2214 		ccb->ccb_h.status = MPS_WD_RETRY;
2215 		mpssas_action_scsiio(sassc, ccb);
2216 		return;
2217 	}
2218 
2219 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2220 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2221 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2222 		/* FALLTHROUGH */
2223 	case MPI2_IOCSTATUS_SUCCESS:
2224 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2225 
2226 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2227 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2228 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2229 
2230 		/* Completion failed at the transport level. */
2231 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2232 		    MPI2_SCSI_STATE_TERMINATED)) {
2233 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2234 			break;
2235 		}
2236 
2237 		/* In a modern packetized environment, an autosense failure
2238 		 * implies that there's not much else that can be done to
2239 		 * recover the command.
2240 		 */
2241 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2242 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2243 			break;
2244 		}
2245 
2246 		/*
2247 		 * CAM doesn't care about SAS Response Info data, but if this is
2248 		 * the state check if TLR should be done.  If not, clear the
2249 		 * TLR_bits for the target.
2250 		 */
2251 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2252 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2253 		    MPS_SCSI_RI_INVALID_FRAME)) {
2254 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2255 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2256 		}
2257 
2258 		/*
2259 		 * Intentionally override the normal SCSI status reporting
2260 		 * for these two cases.  These are likely to happen in a
2261 		 * multi-initiator environment, and we want to make sure that
2262 		 * CAM retries these commands rather than fail them.
2263 		 */
2264 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2265 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2266 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2267 			break;
2268 		}
2269 
2270 		/* Handle normal status and sense */
2271 		csio->scsi_status = rep->SCSIStatus;
2272 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2273 			ccb->ccb_h.status = CAM_REQ_CMP;
2274 		else
2275 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2276 
2277 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2278 			int sense_len, returned_sense_len;
2279 
2280 			returned_sense_len = min(le32toh(rep->SenseCount),
2281 			    sizeof(struct scsi_sense_data));
2282 			if (returned_sense_len < ccb->csio.sense_len)
2283 				ccb->csio.sense_resid = ccb->csio.sense_len -
2284 					returned_sense_len;
2285 			else
2286 				ccb->csio.sense_resid = 0;
2287 
2288 			sense_len = min(returned_sense_len,
2289 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2290 			bzero(&ccb->csio.sense_data,
2291 			      sizeof(ccb->csio.sense_data));
2292 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2293 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2294 		}
2295 
2296 		/*
2297 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2298 		 * and it's page code 0 (Supported Page List), and there is
2299 		 * inquiry data, and this is for a sequential access device, and
2300 		 * the device is an SSP target, and TLR is supported by the
2301 		 * controller, turn the TLR_bits value ON if page 0x90 is
2302 		 * supported.
2303 		 */
2304 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2305 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2306 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2307 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2308 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2309 		    T_SEQUENTIAL) && (sc->control_TLR) &&
2310 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2311 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2312 			vpd_list = (struct scsi_vpd_supported_page_list *)
2313 			    csio->data_ptr;
2314 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2315 			    TLR_bits;
2316 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2317 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2318 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2319 			    csio->cdb_io.cdb_bytes[4];
2320 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2321 				if (vpd_list->list[i] == 0x90) {
2322 					*TLR_bits = TLR_on;
2323 					break;
2324 				}
2325 			}
2326 		}
2327 		break;
2328 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2329 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2330 		/*
2331 		 * If devinfo is 0 this will be a volume.  In that case don't
2332 		 * tell CAM that the volume is not there.  We want volumes to
2333 		 * be enumerated until they are deleted/removed, not just
2334 		 * failed.
2335 		 */
2336 		if (cm->cm_targ->devinfo == 0)
2337 			ccb->ccb_h.status = CAM_REQ_CMP;
2338 		else
2339 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2340 		break;
2341 	case MPI2_IOCSTATUS_INVALID_SGL:
2342 		mps_print_scsiio_cmd(sc, cm);
2343 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2344 		break;
2345 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2346 		/*
2347 		 * This is one of the responses that comes back when an I/O
2348 		 * has been aborted.  If it is because of a timeout that we
2349 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2350 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2351 		 * command is the same (it gets retried, subject to the
2352 		 * retry counter), the only difference is what gets printed
2353 		 * on the console.
2354 		 */
2355 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2356 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2357 		else
2358 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2359 		break;
2360 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2361 		/* resid is ignored for this condition */
2362 		csio->resid = 0;
2363 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2364 		break;
2365 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2366 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2367 		/*
2368 		 * Since these are generally external (i.e. hopefully
2369 		 * transient transport-related) errors, retry these without
2370 		 * decrementing the retry count.
2371 		 */
2372 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2373 		mpssas_log_command(cm, MPS_INFO,
2374 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2375 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2376 		    le32toh(rep->TransferCount));
2377 		break;
2378 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2379 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2380 	case MPI2_IOCSTATUS_INVALID_VPID:
2381 	case MPI2_IOCSTATUS_INVALID_FIELD:
2382 	case MPI2_IOCSTATUS_INVALID_STATE:
2383 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2384 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2385 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2386 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2387 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2388 	default:
2389 		mpssas_log_command(cm, MPS_XINFO,
2390 		    "completed ioc %x scsi %x state %x xfer %u\n",
2391 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2392 		    le32toh(rep->TransferCount));
2393 		csio->resid = cm->cm_length;
2394 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2395 		break;
2396 	}
2397 
2398 	mps_sc_failed_io_info(sc,csio,rep);
2399 
2400 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2401 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2402 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2403 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2404 		    "unfreezing SIM queue\n");
2405 	}
2406 
2407 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2408 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2409 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2410 	}
2411 
2412 	mps_free_command(sc, cm);
2413 	xpt_done(ccb);
2414 }
2415 
2416 /* All Request reached here are Endian safe */
2417 static void
2418 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2419     union ccb *ccb) {
2420 	pMpi2SCSIIORequest_t	pIO_req;
2421 	struct mps_softc	*sc = sassc->sc;
2422 	uint64_t		virtLBA;
2423 	uint32_t		physLBA, stripe_offset, stripe_unit;
2424 	uint32_t		io_size, column;
2425 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2426 
2427 	/*
2428 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2429 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2430 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2431 	 * bit different than the 10/16 CDBs, handle them separately.
2432 	 */
2433 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2434 	CDB = pIO_req->CDB.CDB32;
2435 
2436 	/*
2437 	 * Handle 6 byte CDBs.
2438 	 */
2439 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2440 	    (CDB[0] == WRITE_6))) {
2441 		/*
2442 		 * Get the transfer size in blocks.
2443 		 */
2444 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2445 
2446 		/*
2447 		 * Get virtual LBA given in the CDB.
2448 		 */
2449 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2450 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2451 
2452 		/*
2453 		 * Check that LBA range for I/O does not exceed volume's
2454 		 * MaxLBA.
2455 		 */
2456 		if ((virtLBA + (uint64_t)io_size - 1) <=
2457 		    sc->DD_max_lba) {
2458 			/*
2459 			 * Check if the I/O crosses a stripe boundary.  If not,
2460 			 * translate the virtual LBA to a physical LBA and set
2461 			 * the DevHandle for the PhysDisk to be used.  If it
2462 			 * does cross a boundry, do normal I/O.  To get the
2463 			 * right DevHandle to use, get the map number for the
2464 			 * column, then use that map number to look up the
2465 			 * DevHandle of the PhysDisk.
2466 			 */
2467 			stripe_offset = (uint32_t)virtLBA &
2468 			    (sc->DD_stripe_size - 1);
2469 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2470 				physLBA = (uint32_t)virtLBA >>
2471 				    sc->DD_stripe_exponent;
2472 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2473 				column = physLBA % sc->DD_num_phys_disks;
2474 				pIO_req->DevHandle =
2475 				    htole16(sc->DD_column_map[column].dev_handle);
2476 				/* ???? Is this endian safe*/
2477 				cm->cm_desc.SCSIIO.DevHandle =
2478 				    pIO_req->DevHandle;
2479 
2480 				physLBA = (stripe_unit <<
2481 				    sc->DD_stripe_exponent) + stripe_offset;
2482 				ptrLBA = &pIO_req->CDB.CDB32[1];
2483 				physLBA_byte = (uint8_t)(physLBA >> 16);
2484 				*ptrLBA = physLBA_byte;
2485 				ptrLBA = &pIO_req->CDB.CDB32[2];
2486 				physLBA_byte = (uint8_t)(physLBA >> 8);
2487 				*ptrLBA = physLBA_byte;
2488 				ptrLBA = &pIO_req->CDB.CDB32[3];
2489 				physLBA_byte = (uint8_t)physLBA;
2490 				*ptrLBA = physLBA_byte;
2491 
2492 				/*
2493 				 * Set flag that Direct Drive I/O is
2494 				 * being done.
2495 				 */
2496 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2497 			}
2498 		}
2499 		return;
2500 	}
2501 
2502 	/*
2503 	 * Handle 10, 12 or 16 byte CDBs.
2504 	 */
2505 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2506 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2507 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2508 	    (CDB[0] == WRITE_12))) {
2509 		/*
2510 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2511 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2512 		 * the else section.  10-byte and 12-byte CDB's are OK.
2513 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2514 		 * ready to accept 12byte CDB for Direct IOs.
2515 		 */
2516 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2517 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2518 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2519 			/*
2520 			 * Get the transfer size in blocks.
2521 			 */
2522 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2523 
2524 			/*
2525 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2526 			 * LBA in the CDB depending on command.
2527 			 */
2528 			lba_idx = ((CDB[0] == READ_12) ||
2529 				(CDB[0] == WRITE_12) ||
2530 				(CDB[0] == READ_10) ||
2531 				(CDB[0] == WRITE_10))? 2 : 6;
2532 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2533 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2534 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2535 			    (uint64_t)CDB[lba_idx + 3];
2536 
2537 			/*
2538 			 * Check that LBA range for I/O does not exceed volume's
2539 			 * MaxLBA.
2540 			 */
2541 			if ((virtLBA + (uint64_t)io_size - 1) <=
2542 			    sc->DD_max_lba) {
2543 				/*
2544 				 * Check if the I/O crosses a stripe boundary.
2545 				 * If not, translate the virtual LBA to a
2546 				 * physical LBA and set the DevHandle for the
2547 				 * PhysDisk to be used.  If it does cross a
2548 				 * boundry, do normal I/O.  To get the right
2549 				 * DevHandle to use, get the map number for the
2550 				 * column, then use that map number to look up
2551 				 * the DevHandle of the PhysDisk.
2552 				 */
2553 				stripe_offset = (uint32_t)virtLBA &
2554 				    (sc->DD_stripe_size - 1);
2555 				if ((stripe_offset + io_size) <=
2556 				    sc->DD_stripe_size) {
2557 					physLBA = (uint32_t)virtLBA >>
2558 					    sc->DD_stripe_exponent;
2559 					stripe_unit = physLBA /
2560 					    sc->DD_num_phys_disks;
2561 					column = physLBA %
2562 					    sc->DD_num_phys_disks;
2563 					pIO_req->DevHandle =
2564 					    htole16(sc->DD_column_map[column].
2565 					    dev_handle);
2566 					cm->cm_desc.SCSIIO.DevHandle =
2567 					    pIO_req->DevHandle;
2568 
2569 					physLBA = (stripe_unit <<
2570 					    sc->DD_stripe_exponent) +
2571 					    stripe_offset;
2572 					ptrLBA =
2573 					    &pIO_req->CDB.CDB32[lba_idx];
2574 					physLBA_byte = (uint8_t)(physLBA >> 24);
2575 					*ptrLBA = physLBA_byte;
2576 					ptrLBA =
2577 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2578 					physLBA_byte = (uint8_t)(physLBA >> 16);
2579 					*ptrLBA = physLBA_byte;
2580 					ptrLBA =
2581 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2582 					physLBA_byte = (uint8_t)(physLBA >> 8);
2583 					*ptrLBA = physLBA_byte;
2584 					ptrLBA =
2585 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2586 					physLBA_byte = (uint8_t)physLBA;
2587 					*ptrLBA = physLBA_byte;
2588 
2589 					/*
2590 					 * Set flag that Direct Drive I/O is
2591 					 * being done.
2592 					 */
2593 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2594 				}
2595 			}
2596 		} else {
2597 			/*
2598 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2599 			 * 0.  Get the transfer size in blocks.
2600 			 */
2601 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2602 
2603 			/*
2604 			 * Get virtual LBA.
2605 			 */
2606 			virtLBA = ((uint64_t)CDB[2] << 54) |
2607 			    ((uint64_t)CDB[3] << 48) |
2608 			    ((uint64_t)CDB[4] << 40) |
2609 			    ((uint64_t)CDB[5] << 32) |
2610 			    ((uint64_t)CDB[6] << 24) |
2611 			    ((uint64_t)CDB[7] << 16) |
2612 			    ((uint64_t)CDB[8] << 8) |
2613 			    (uint64_t)CDB[9];
2614 
2615 			/*
2616 			 * Check that LBA range for I/O does not exceed volume's
2617 			 * MaxLBA.
2618 			 */
2619 			if ((virtLBA + (uint64_t)io_size - 1) <=
2620 			    sc->DD_max_lba) {
2621 				/*
2622 				 * Check if the I/O crosses a stripe boundary.
2623 				 * If not, translate the virtual LBA to a
2624 				 * physical LBA and set the DevHandle for the
2625 				 * PhysDisk to be used.  If it does cross a
2626 				 * boundry, do normal I/O.  To get the right
2627 				 * DevHandle to use, get the map number for the
2628 				 * column, then use that map number to look up
2629 				 * the DevHandle of the PhysDisk.
2630 				 */
2631 				stripe_offset = (uint32_t)virtLBA &
2632 				    (sc->DD_stripe_size - 1);
2633 				if ((stripe_offset + io_size) <=
2634 				    sc->DD_stripe_size) {
2635 					physLBA = (uint32_t)(virtLBA >>
2636 					    sc->DD_stripe_exponent);
2637 					stripe_unit = physLBA /
2638 					    sc->DD_num_phys_disks;
2639 					column = physLBA %
2640 					    sc->DD_num_phys_disks;
2641 					pIO_req->DevHandle =
2642 					    htole16(sc->DD_column_map[column].
2643 					    dev_handle);
2644 					cm->cm_desc.SCSIIO.DevHandle =
2645 					    pIO_req->DevHandle;
2646 
2647 					physLBA = (stripe_unit <<
2648 					    sc->DD_stripe_exponent) +
2649 					    stripe_offset;
2650 
2651 					/*
2652 					 * Set upper 4 bytes of LBA to 0.  We
2653 					 * assume that the phys disks are less
2654 					 * than 2 TB's in size.  Then, set the
2655 					 * lower 4 bytes.
2656 					 */
2657 					pIO_req->CDB.CDB32[2] = 0;
2658 					pIO_req->CDB.CDB32[3] = 0;
2659 					pIO_req->CDB.CDB32[4] = 0;
2660 					pIO_req->CDB.CDB32[5] = 0;
2661 					ptrLBA = &pIO_req->CDB.CDB32[6];
2662 					physLBA_byte = (uint8_t)(physLBA >> 24);
2663 					*ptrLBA = physLBA_byte;
2664 					ptrLBA = &pIO_req->CDB.CDB32[7];
2665 					physLBA_byte = (uint8_t)(physLBA >> 16);
2666 					*ptrLBA = physLBA_byte;
2667 					ptrLBA = &pIO_req->CDB.CDB32[8];
2668 					physLBA_byte = (uint8_t)(physLBA >> 8);
2669 					*ptrLBA = physLBA_byte;
2670 					ptrLBA = &pIO_req->CDB.CDB32[9];
2671 					physLBA_byte = (uint8_t)physLBA;
2672 					*ptrLBA = physLBA_byte;
2673 
2674 					/*
2675 					 * Set flag that Direct Drive I/O is
2676 					 * being done.
2677 					 */
2678 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2679 				}
2680 			}
2681 		}
2682 	}
2683 }
2684 
2685 #if __FreeBSD_version >= 900026
2686 static void
2687 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2688 {
2689 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2690 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2691 	uint64_t sasaddr;
2692 	union ccb *ccb;
2693 
2694 	ccb = cm->cm_complete_data;
2695 
2696 	/*
2697 	 * Currently there should be no way we can hit this case.  It only
2698 	 * happens when we have a failure to allocate chain frames, and SMP
2699 	 * commands require two S/G elements only.  That should be handled
2700 	 * in the standard request size.
2701 	 */
2702 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2703 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2704 			   __func__, cm->cm_flags);
2705 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2706 		goto bailout;
2707         }
2708 
2709 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2710 	if (rpl == NULL) {
2711 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2712 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2713 		goto bailout;
2714 	}
2715 
2716 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2717 	sasaddr = le32toh(req->SASAddress.Low);
2718 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2719 
2720 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2721 	    MPI2_IOCSTATUS_SUCCESS ||
2722 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2723 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2724 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2725 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2726 		goto bailout;
2727 	}
2728 
2729 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2730 		   "%#jx completed successfully\n", __func__,
2731 		   (uintmax_t)sasaddr);
2732 
2733 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2734 		ccb->ccb_h.status = CAM_REQ_CMP;
2735 	else
2736 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2737 
2738 bailout:
2739 	/*
2740 	 * We sync in both directions because we had DMAs in the S/G list
2741 	 * in both directions.
2742 	 */
2743 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2744 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2745 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2746 	mps_free_command(sc, cm);
2747 	xpt_done(ccb);
2748 }
2749 
2750 static void
2751 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2752 {
2753 	struct mps_command *cm;
2754 	uint8_t *request, *response;
2755 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2756 	struct mps_softc *sc;
2757 	struct sglist *sg;
2758 	int error;
2759 
2760 	sc = sassc->sc;
2761 	sg = NULL;
2762 	error = 0;
2763 
2764 	/*
2765 	 * XXX We don't yet support physical addresses here.
2766 	 */
2767 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2768 	case CAM_DATA_PADDR:
2769 	case CAM_DATA_SG_PADDR:
2770 		mps_dprint(sc, MPS_ERROR,
2771 			   "%s: physical addresses not supported\n", __func__);
2772 		ccb->ccb_h.status = CAM_REQ_INVALID;
2773 		xpt_done(ccb);
2774 		return;
2775 	case CAM_DATA_SG:
2776 		/*
2777 		 * The chip does not support more than one buffer for the
2778 		 * request or response.
2779 		 */
2780 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2781 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2782 			mps_dprint(sc, MPS_ERROR,
2783 				   "%s: multiple request or response "
2784 				   "buffer segments not supported for SMP\n",
2785 				   __func__);
2786 			ccb->ccb_h.status = CAM_REQ_INVALID;
2787 			xpt_done(ccb);
2788 			return;
2789 		}
2790 
2791 		/*
2792 		 * The CAM_SCATTER_VALID flag was originally implemented
2793 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2794 		 * We have two.  So, just take that flag to mean that we
2795 		 * might have S/G lists, and look at the S/G segment count
2796 		 * to figure out whether that is the case for each individual
2797 		 * buffer.
2798 		 */
2799 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2800 			bus_dma_segment_t *req_sg;
2801 
2802 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2803 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2804 		} else
2805 			request = ccb->smpio.smp_request;
2806 
2807 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2808 			bus_dma_segment_t *rsp_sg;
2809 
2810 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2811 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2812 		} else
2813 			response = ccb->smpio.smp_response;
2814 		break;
2815 	case CAM_DATA_VADDR:
2816 		request = ccb->smpio.smp_request;
2817 		response = ccb->smpio.smp_response;
2818 		break;
2819 	default:
2820 		ccb->ccb_h.status = CAM_REQ_INVALID;
2821 		xpt_done(ccb);
2822 		return;
2823 	}
2824 
2825 	cm = mps_alloc_command(sc);
2826 	if (cm == NULL) {
2827 		mps_dprint(sc, MPS_ERROR,
2828 		    "%s: cannot allocate command\n", __func__);
2829 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2830 		xpt_done(ccb);
2831 		return;
2832 	}
2833 
2834 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2835 	bzero(req, sizeof(*req));
2836 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2837 
2838 	/* Allow the chip to use any route to this SAS address. */
2839 	req->PhysicalPort = 0xff;
2840 
2841 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2842 	req->SGLFlags =
2843 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2844 
2845 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2846 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2847 
2848 	mpi_init_sge(cm, req, &req->SGL);
2849 
2850 	/*
2851 	 * Set up a uio to pass into mps_map_command().  This allows us to
2852 	 * do one map command, and one busdma call in there.
2853 	 */
2854 	cm->cm_uio.uio_iov = cm->cm_iovec;
2855 	cm->cm_uio.uio_iovcnt = 2;
2856 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2857 
2858 	/*
2859 	 * The read/write flag isn't used by busdma, but set it just in
2860 	 * case.  This isn't exactly accurate, either, since we're going in
2861 	 * both directions.
2862 	 */
2863 	cm->cm_uio.uio_rw = UIO_WRITE;
2864 
2865 	cm->cm_iovec[0].iov_base = request;
2866 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2867 	cm->cm_iovec[1].iov_base = response;
2868 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2869 
2870 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2871 			       cm->cm_iovec[1].iov_len;
2872 
2873 	/*
2874 	 * Trigger a warning message in mps_data_cb() for the user if we
2875 	 * wind up exceeding two S/G segments.  The chip expects one
2876 	 * segment for the request and another for the response.
2877 	 */
2878 	cm->cm_max_segs = 2;
2879 
2880 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2881 	cm->cm_complete = mpssas_smpio_complete;
2882 	cm->cm_complete_data = ccb;
2883 
2884 	/*
2885 	 * Tell the mapping code that we're using a uio, and that this is
2886 	 * an SMP passthrough request.  There is a little special-case
2887 	 * logic there (in mps_data_cb()) to handle the bidirectional
2888 	 * transfer.
2889 	 */
2890 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2891 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2892 
2893 	/* The chip data format is little endian. */
2894 	req->SASAddress.High = htole32(sasaddr >> 32);
2895 	req->SASAddress.Low = htole32(sasaddr);
2896 
2897 	/*
2898 	 * XXX Note that we don't have a timeout/abort mechanism here.
2899 	 * From the manual, it looks like task management requests only
2900 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2901 	 * have a mechanism to retry requests in the event of a chip reset
2902 	 * at least.  Hopefully the chip will insure that any errors short
2903 	 * of that are relayed back to the driver.
2904 	 */
2905 	error = mps_map_command(sc, cm);
2906 	if ((error != 0) && (error != EINPROGRESS)) {
2907 		mps_dprint(sc, MPS_ERROR,
2908 			   "%s: error %d returned from mps_map_command()\n",
2909 			   __func__, error);
2910 		goto bailout_error;
2911 	}
2912 
2913 	return;
2914 
2915 bailout_error:
2916 	mps_free_command(sc, cm);
2917 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2918 	xpt_done(ccb);
2919 	return;
2920 
2921 }
2922 
2923 static void
2924 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2925 {
2926 	struct mps_softc *sc;
2927 	struct mpssas_target *targ;
2928 	uint64_t sasaddr = 0;
2929 
2930 	sc = sassc->sc;
2931 
2932 	/*
2933 	 * Make sure the target exists.
2934 	 */
2935 	targ = &sassc->targets[ccb->ccb_h.target_id];
2936 	if (targ->handle == 0x0) {
2937 		mps_dprint(sc, MPS_ERROR,
2938 			   "%s: target %d does not exist!\n", __func__,
2939 			   ccb->ccb_h.target_id);
2940 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2941 		xpt_done(ccb);
2942 		return;
2943 	}
2944 
2945 	/*
2946 	 * If this device has an embedded SMP target, we'll talk to it
2947 	 * directly.
2948 	 * figure out what the expander's address is.
2949 	 */
2950 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2951 		sasaddr = targ->sasaddr;
2952 
2953 	/*
2954 	 * If we don't have a SAS address for the expander yet, try
2955 	 * grabbing it from the page 0x83 information cached in the
2956 	 * transport layer for this target.  LSI expanders report the
2957 	 * expander SAS address as the port-associated SAS address in
2958 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2959 	 * 0x83.
2960 	 *
2961 	 * XXX KDM disable this for now, but leave it commented out so that
2962 	 * it is obvious that this is another possible way to get the SAS
2963 	 * address.
2964 	 *
2965 	 * The parent handle method below is a little more reliable, and
2966 	 * the other benefit is that it works for devices other than SES
2967 	 * devices.  So you can send a SMP request to a da(4) device and it
2968 	 * will get routed to the expander that device is attached to.
2969 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2970 	 */
2971 #if 0
2972 	if (sasaddr == 0)
2973 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2974 #endif
2975 
2976 	/*
2977 	 * If we still don't have a SAS address for the expander, look for
2978 	 * the parent device of this device, which is probably the expander.
2979 	 */
2980 	if (sasaddr == 0) {
2981 #ifdef OLD_MPS_PROBE
2982 		struct mpssas_target *parent_target;
2983 #endif
2984 
2985 		if (targ->parent_handle == 0x0) {
2986 			mps_dprint(sc, MPS_ERROR,
2987 				   "%s: handle %d does not have a valid "
2988 				   "parent handle!\n", __func__, targ->handle);
2989 			ccb->ccb_h.status = CAM_REQ_INVALID;
2990 			goto bailout;
2991 		}
2992 #ifdef OLD_MPS_PROBE
2993 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2994 			targ->parent_handle);
2995 
2996 		if (parent_target == NULL) {
2997 			mps_dprint(sc, MPS_ERROR,
2998 				   "%s: handle %d does not have a valid "
2999 				   "parent target!\n", __func__, targ->handle);
3000 			ccb->ccb_h.status = CAM_REQ_INVALID;
3001 			goto bailout;
3002 		}
3003 
3004 		if ((parent_target->devinfo &
3005 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3006 			mps_dprint(sc, MPS_ERROR,
3007 				   "%s: handle %d parent %d does not "
3008 				   "have an SMP target!\n", __func__,
3009 				   targ->handle, parent_target->handle);
3010 			ccb->ccb_h.status = CAM_REQ_INVALID;
3011 			goto bailout;
3012 
3013 		}
3014 
3015 		sasaddr = parent_target->sasaddr;
3016 #else /* OLD_MPS_PROBE */
3017 		if ((targ->parent_devinfo &
3018 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3019 			mps_dprint(sc, MPS_ERROR,
3020 				   "%s: handle %d parent %d does not "
3021 				   "have an SMP target!\n", __func__,
3022 				   targ->handle, targ->parent_handle);
3023 			ccb->ccb_h.status = CAM_REQ_INVALID;
3024 			goto bailout;
3025 
3026 		}
3027 		if (targ->parent_sasaddr == 0x0) {
3028 			mps_dprint(sc, MPS_ERROR,
3029 				   "%s: handle %d parent handle %d does "
3030 				   "not have a valid SAS address!\n",
3031 				   __func__, targ->handle, targ->parent_handle);
3032 			ccb->ccb_h.status = CAM_REQ_INVALID;
3033 			goto bailout;
3034 		}
3035 
3036 		sasaddr = targ->parent_sasaddr;
3037 #endif /* OLD_MPS_PROBE */
3038 
3039 	}
3040 
3041 	if (sasaddr == 0) {
3042 		mps_dprint(sc, MPS_INFO,
3043 			   "%s: unable to find SAS address for handle %d\n",
3044 			   __func__, targ->handle);
3045 		ccb->ccb_h.status = CAM_REQ_INVALID;
3046 		goto bailout;
3047 	}
3048 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3049 
3050 	return;
3051 
3052 bailout:
3053 	xpt_done(ccb);
3054 
3055 }
3056 #endif //__FreeBSD_version >= 900026
3057 
3058 static void
3059 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3060 {
3061 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3062 	struct mps_softc *sc;
3063 	struct mps_command *tm;
3064 	struct mpssas_target *targ;
3065 
3066 	MPS_FUNCTRACE(sassc->sc);
3067 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3068 
3069 	sc = sassc->sc;
3070 	tm = mps_alloc_command(sc);
3071 	if (tm == NULL) {
3072 		mps_dprint(sc, MPS_ERROR,
3073 		    "comand alloc failure in mpssas_action_resetdev\n");
3074 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3075 		xpt_done(ccb);
3076 		return;
3077 	}
3078 
3079 	targ = &sassc->targets[ccb->ccb_h.target_id];
3080 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3081 	req->DevHandle = htole16(targ->handle);
3082 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3083 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3084 
3085 	/* SAS Hard Link Reset / SATA Link Reset */
3086 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3087 
3088 	tm->cm_data = NULL;
3089 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3090 	tm->cm_complete = mpssas_resetdev_complete;
3091 	tm->cm_complete_data = ccb;
3092 	tm->cm_targ = targ;
3093 	mps_map_command(sc, tm);
3094 }
3095 
3096 static void
3097 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3098 {
3099 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3100 	union ccb *ccb;
3101 
3102 	MPS_FUNCTRACE(sc);
3103 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3104 
3105 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3106 	ccb = tm->cm_complete_data;
3107 
3108 	/*
3109 	 * Currently there should be no way we can hit this case.  It only
3110 	 * happens when we have a failure to allocate chain frames, and
3111 	 * task management commands don't have S/G lists.
3112 	 */
3113 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3114 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3115 
3116 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3117 
3118 		mps_dprint(sc, MPS_ERROR,
3119 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3120 			   "This should not happen!\n", __func__, tm->cm_flags,
3121 			   req->DevHandle);
3122 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3123 		goto bailout;
3124 	}
3125 
3126 	mps_dprint(sc, MPS_XINFO,
3127 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3128 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3129 
3130 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3131 		ccb->ccb_h.status = CAM_REQ_CMP;
3132 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3133 		    CAM_LUN_WILDCARD);
3134 	}
3135 	else
3136 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3137 
3138 bailout:
3139 
3140 	mpssas_free_tm(sc, tm);
3141 	xpt_done(ccb);
3142 }
3143 
3144 static void
3145 mpssas_poll(struct cam_sim *sim)
3146 {
3147 	struct mpssas_softc *sassc;
3148 
3149 	sassc = cam_sim_softc(sim);
3150 
3151 	if (sassc->sc->mps_debug & MPS_TRACE) {
3152 		/* frequent debug messages during a panic just slow
3153 		 * everything down too much.
3154 		 */
3155 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3156 		sassc->sc->mps_debug &= ~MPS_TRACE;
3157 	}
3158 
3159 	mps_intr_locked(sassc->sc);
3160 }
3161 
3162 static void
3163 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3164 	     void *arg)
3165 {
3166 	struct mps_softc *sc;
3167 
3168 	sc = (struct mps_softc *)callback_arg;
3169 
3170 	switch (code) {
3171 #if (__FreeBSD_version >= 1000006) || \
3172     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3173 	case AC_ADVINFO_CHANGED: {
3174 		struct mpssas_target *target;
3175 		struct mpssas_softc *sassc;
3176 		struct scsi_read_capacity_data_long rcap_buf;
3177 		struct ccb_dev_advinfo cdai;
3178 		struct mpssas_lun *lun;
3179 		lun_id_t lunid;
3180 		int found_lun;
3181 		uintptr_t buftype;
3182 
3183 		buftype = (uintptr_t)arg;
3184 
3185 		found_lun = 0;
3186 		sassc = sc->sassc;
3187 
3188 		/*
3189 		 * We're only interested in read capacity data changes.
3190 		 */
3191 		if (buftype != CDAI_TYPE_RCAPLONG)
3192 			break;
3193 
3194 		/*
3195 		 * We should have a handle for this, but check to make sure.
3196 		 */
3197 		target = &sassc->targets[xpt_path_target_id(path)];
3198 		if (target->handle == 0)
3199 			break;
3200 
3201 		lunid = xpt_path_lun_id(path);
3202 
3203 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3204 			if (lun->lun_id == lunid) {
3205 				found_lun = 1;
3206 				break;
3207 			}
3208 		}
3209 
3210 		if (found_lun == 0) {
3211 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3212 				     M_NOWAIT | M_ZERO);
3213 			if (lun == NULL) {
3214 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3215 					   "LUN for EEDP support.\n");
3216 				break;
3217 			}
3218 			lun->lun_id = lunid;
3219 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3220 		}
3221 
3222 		bzero(&rcap_buf, sizeof(rcap_buf));
3223 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3224 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3225 		cdai.ccb_h.flags = CAM_DIR_IN;
3226 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3227 		cdai.flags = 0;
3228 		cdai.bufsiz = sizeof(rcap_buf);
3229 		cdai.buf = (uint8_t *)&rcap_buf;
3230 		xpt_action((union ccb *)&cdai);
3231 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3232 			cam_release_devq(cdai.ccb_h.path,
3233 					 0, 0, 0, FALSE);
3234 
3235 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3236 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3237 			lun->eedp_formatted = TRUE;
3238 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3239 		} else {
3240 			lun->eedp_formatted = FALSE;
3241 			lun->eedp_block_size = 0;
3242 		}
3243 		break;
3244 	}
3245 #else
3246 	case AC_FOUND_DEVICE: {
3247 		struct ccb_getdev *cgd;
3248 
3249 		cgd = arg;
3250 		mpssas_check_eedp(sc, path, cgd);
3251 		break;
3252 	}
3253 #endif
3254 	default:
3255 		break;
3256 	}
3257 }
3258 
3259 #if (__FreeBSD_version < 901503) || \
3260     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3261 static void
3262 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3263 		  struct ccb_getdev *cgd)
3264 {
3265 	struct mpssas_softc *sassc = sc->sassc;
3266 	struct ccb_scsiio *csio;
3267 	struct scsi_read_capacity_16 *scsi_cmd;
3268 	struct scsi_read_capacity_eedp *rcap_buf;
3269 	path_id_t pathid;
3270 	target_id_t targetid;
3271 	lun_id_t lunid;
3272 	union ccb *ccb;
3273 	struct cam_path *local_path;
3274 	struct mpssas_target *target;
3275 	struct mpssas_lun *lun;
3276 	uint8_t	found_lun;
3277 	char path_str[64];
3278 
3279 	sassc = sc->sassc;
3280 	pathid = cam_sim_path(sassc->sim);
3281 	targetid = xpt_path_target_id(path);
3282 	lunid = xpt_path_lun_id(path);
3283 
3284 	target = &sassc->targets[targetid];
3285 	if (target->handle == 0x0)
3286 		return;
3287 
3288 	/*
3289 	 * Determine if the device is EEDP capable.
3290 	 *
3291 	 * If this flag is set in the inquiry data,
3292 	 * the device supports protection information,
3293 	 * and must support the 16 byte read
3294 	 * capacity command, otherwise continue without
3295 	 * sending read cap 16
3296 	 */
3297 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3298 		return;
3299 
3300 	/*
3301 	 * Issue a READ CAPACITY 16 command.  This info
3302 	 * is used to determine if the LUN is formatted
3303 	 * for EEDP support.
3304 	 */
3305 	ccb = xpt_alloc_ccb_nowait();
3306 	if (ccb == NULL) {
3307 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3308 		    "for EEDP support.\n");
3309 		return;
3310 	}
3311 
3312 	if (xpt_create_path(&local_path, xpt_periph,
3313 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3314 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3315 		    "path for EEDP support\n");
3316 		xpt_free_ccb(ccb);
3317 		return;
3318 	}
3319 
3320 	/*
3321 	 * If LUN is already in list, don't create a new
3322 	 * one.
3323 	 */
3324 	found_lun = FALSE;
3325 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3326 		if (lun->lun_id == lunid) {
3327 			found_lun = TRUE;
3328 			break;
3329 		}
3330 	}
3331 	if (!found_lun) {
3332 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3333 		    M_NOWAIT | M_ZERO);
3334 		if (lun == NULL) {
3335 			mps_dprint(sc, MPS_ERROR,
3336 			    "Unable to alloc LUN for EEDP support.\n");
3337 			xpt_free_path(local_path);
3338 			xpt_free_ccb(ccb);
3339 			return;
3340 		}
3341 		lun->lun_id = lunid;
3342 		SLIST_INSERT_HEAD(&target->luns, lun,
3343 		    lun_link);
3344 	}
3345 
3346 	xpt_path_string(local_path, path_str, sizeof(path_str));
3347 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3348 	    path_str, target->handle);
3349 
3350 	/*
3351 	 * Issue a READ CAPACITY 16 command for the LUN.
3352 	 * The mpssas_read_cap_done function will load
3353 	 * the read cap info into the LUN struct.
3354 	 */
3355 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3356 	    M_MPT2, M_NOWAIT | M_ZERO);
3357 	if (rcap_buf == NULL) {
3358 		mps_dprint(sc, MPS_FAULT,
3359 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3360 		xpt_free_path(ccb->ccb_h.path);
3361 		xpt_free_ccb(ccb);
3362 		return;
3363 	}
3364 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3365 	csio = &ccb->csio;
3366 	csio->ccb_h.func_code = XPT_SCSI_IO;
3367 	csio->ccb_h.flags = CAM_DIR_IN;
3368 	csio->ccb_h.retry_count = 4;
3369 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3370 	csio->ccb_h.timeout = 60000;
3371 	csio->data_ptr = (uint8_t *)rcap_buf;
3372 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3373 	csio->sense_len = MPS_SENSE_LEN;
3374 	csio->cdb_len = sizeof(*scsi_cmd);
3375 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3376 
3377 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3378 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3379 	scsi_cmd->opcode = 0x9E;
3380 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3381 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3382 
3383 	ccb->ccb_h.ppriv_ptr1 = sassc;
3384 	xpt_action(ccb);
3385 }
3386 
3387 static void
3388 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3389 {
3390 	struct mpssas_softc *sassc;
3391 	struct mpssas_target *target;
3392 	struct mpssas_lun *lun;
3393 	struct scsi_read_capacity_eedp *rcap_buf;
3394 
3395 	if (done_ccb == NULL)
3396 		return;
3397 
3398 	/* Driver need to release devq, it Scsi command is
3399 	 * generated by driver internally.
3400 	 * Currently there is a single place where driver
3401 	 * calls scsi command internally. In future if driver
3402 	 * calls more scsi command internally, it needs to release
3403 	 * devq internally, since those command will not go back to
3404 	 * cam_periph.
3405 	 */
3406 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3407         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3408 		xpt_release_devq(done_ccb->ccb_h.path,
3409 			       	/*count*/ 1, /*run_queue*/TRUE);
3410 	}
3411 
3412 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3413 
3414 	/*
3415 	 * Get the LUN ID for the path and look it up in the LUN list for the
3416 	 * target.
3417 	 */
3418 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3419 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3420 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3421 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3422 			continue;
3423 
3424 		/*
3425 		 * Got the LUN in the target's LUN list.  Fill it in
3426 		 * with EEDP info.  If the READ CAP 16 command had some
3427 		 * SCSI error (common if command is not supported), mark
3428 		 * the lun as not supporting EEDP and set the block size
3429 		 * to 0.
3430 		 */
3431 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3432 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3433 			lun->eedp_formatted = FALSE;
3434 			lun->eedp_block_size = 0;
3435 			break;
3436 		}
3437 
3438 		if (rcap_buf->protect & 0x01) {
3439 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3440  			    "target ID %d is formatted for EEDP "
3441  			    "support.\n", done_ccb->ccb_h.target_lun,
3442  			    done_ccb->ccb_h.target_id);
3443 			lun->eedp_formatted = TRUE;
3444 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3445 		}
3446 		break;
3447 	}
3448 
3449 	// Finished with this CCB and path.
3450 	free(rcap_buf, M_MPT2);
3451 	xpt_free_path(done_ccb->ccb_h.path);
3452 	xpt_free_ccb(done_ccb);
3453 }
3454 #endif /* (__FreeBSD_version < 901503) || \
3455           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3456 
3457 int
3458 mpssas_startup(struct mps_softc *sc)
3459 {
3460 	struct mpssas_softc *sassc;
3461 
3462 	/*
3463 	 * Send the port enable message and set the wait_for_port_enable flag.
3464 	 * This flag helps to keep the simq frozen until all discovery events
3465 	 * are processed.
3466 	 */
3467 	sassc = sc->sassc;
3468 	mpssas_startup_increment(sassc);
3469 	sc->wait_for_port_enable = 1;
3470 	mpssas_send_portenable(sc);
3471 	return (0);
3472 }
3473 
3474 static int
3475 mpssas_send_portenable(struct mps_softc *sc)
3476 {
3477 	MPI2_PORT_ENABLE_REQUEST *request;
3478 	struct mps_command *cm;
3479 
3480 	MPS_FUNCTRACE(sc);
3481 
3482 	if ((cm = mps_alloc_command(sc)) == NULL)
3483 		return (EBUSY);
3484 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3485 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3486 	request->MsgFlags = 0;
3487 	request->VP_ID = 0;
3488 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3489 	cm->cm_complete = mpssas_portenable_complete;
3490 	cm->cm_data = NULL;
3491 	cm->cm_sge = NULL;
3492 
3493 	mps_map_command(sc, cm);
3494 	mps_dprint(sc, MPS_XINFO,
3495 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3496 	    cm, cm->cm_req, cm->cm_complete);
3497 	return (0);
3498 }
3499 
3500 static void
3501 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3502 {
3503 	MPI2_PORT_ENABLE_REPLY *reply;
3504 	struct mpssas_softc *sassc;
3505 
3506 	MPS_FUNCTRACE(sc);
3507 	sassc = sc->sassc;
3508 
3509 	/*
3510 	 * Currently there should be no way we can hit this case.  It only
3511 	 * happens when we have a failure to allocate chain frames, and
3512 	 * port enable commands don't have S/G lists.
3513 	 */
3514 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3515 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3516 			   "This should not happen!\n", __func__, cm->cm_flags);
3517 	}
3518 
3519 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3520 	if (reply == NULL)
3521 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3522 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3523 	    MPI2_IOCSTATUS_SUCCESS)
3524 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3525 
3526 	mps_free_command(sc, cm);
3527 	if (sc->mps_ich.ich_arg != NULL) {
3528 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3529 		config_intrhook_disestablish(&sc->mps_ich);
3530 		sc->mps_ich.ich_arg = NULL;
3531 	}
3532 
3533 	/*
3534 	 * Get WarpDrive info after discovery is complete but before the scan
3535 	 * starts.  At this point, all devices are ready to be exposed to the
3536 	 * OS.  If devices should be hidden instead, take them out of the
3537 	 * 'targets' array before the scan.  The devinfo for a disk will have
3538 	 * some info and a volume's will be 0.  Use that to remove disks.
3539 	 */
3540 	mps_wd_config_pages(sc);
3541 
3542 	/*
3543 	 * Done waiting for port enable to complete.  Decrement the refcount.
3544 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3545 	 * take place.  Since the simq was explicitly frozen before port
3546 	 * enable, it must be explicitly released here to keep the
3547 	 * freeze/release count in sync.
3548 	 */
3549 	sc->wait_for_port_enable = 0;
3550 	sc->port_enable_complete = 1;
3551 	wakeup(&sc->port_enable_complete);
3552 	mpssas_startup_decrement(sassc);
3553 	xpt_release_simq(sassc->sim, 1);
3554 }
3555 
3556