xref: /freebsd/sys/dev/mps/mps_sas.c (revision 36c53d67007eda24e9d672ca09d30672547fce6c)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011, 2012 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * LSI MPT-Fusion Host Adapter FreeBSD
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for LSI MPT2 */
36 
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
87 
88 #define MPSSAS_DISCOVERY_TIMEOUT	20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90 
91 /*
92  * static array to check SCSI OpCode for EEDP protection bits
93  */
94 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114 };
115 
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117 
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126     struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133 			       uint64_t sasaddr);
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
195 {
196 	MPS_FUNCTRACE(sassc->sc);
197 
198 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 		if (--sassc->startup_refcount == 0) {
200 			/* finished all discovery-related actions, release
201 			 * the simq and rescan for the latest topology.
202 			 */
203 			mps_dprint(sassc->sc, MPS_INIT,
204 			    "%s releasing simq\n", __func__);
205 			sassc->flags &= ~MPSSAS_IN_STARTUP;
206 			xpt_release_simq(sassc->sim, 1);
207 #if __FreeBSD_version >= 1000039
208 			xpt_release_boot();
209 #else
210 			mpssas_rescan_target(sassc->sc, NULL);
211 #endif
212 		}
213 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
214 		    sassc->startup_refcount);
215 	}
216 }
217 
218 /* LSI's firmware requires us to stop sending commands when we're doing task
219  * management, so refcount the TMs and keep the simq frozen when any are in
220  * use.
221  */
222 struct mps_command *
223 mpssas_alloc_tm(struct mps_softc *sc)
224 {
225 	struct mps_command *tm;
226 
227 	MPS_FUNCTRACE(sc);
228 	tm = mps_alloc_high_priority_command(sc);
229 	if (tm != NULL) {
230 		if (sc->sassc->tm_count++ == 0) {
231 			mps_dprint(sc, MPS_RECOVERY,
232 			    "%s freezing simq\n", __func__);
233 			xpt_freeze_simq(sc->sassc->sim, 1);
234 		}
235 		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
236 		    sc->sassc->tm_count);
237 	}
238 	return tm;
239 }
240 
241 void
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243 {
244 	mps_dprint(sc, MPS_TRACE, "%s", __func__);
245 	if (tm == NULL)
246 		return;
247 
248 	/* if there are no TMs in use, we can release the simq.  We use our
249 	 * own refcount so that it's easier for a diag reset to cleanup and
250 	 * release the simq.
251 	 */
252 	if (--sc->sassc->tm_count == 0) {
253 		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
254 		xpt_release_simq(sc->sassc->sim, 1);
255 	}
256 	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
257 	    sc->sassc->tm_count);
258 
259 	mps_free_high_priority_command(sc, tm);
260 }
261 
262 void
263 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
264 {
265 	struct mpssas_softc *sassc = sc->sassc;
266 	path_id_t pathid;
267 	target_id_t targetid;
268 	union ccb *ccb;
269 
270 	MPS_FUNCTRACE(sc);
271 	pathid = cam_sim_path(sassc->sim);
272 	if (targ == NULL)
273 		targetid = CAM_TARGET_WILDCARD;
274 	else
275 		targetid = targ - sassc->targets;
276 
277 	/*
278 	 * Allocate a CCB and schedule a rescan.
279 	 */
280 	ccb = xpt_alloc_ccb_nowait();
281 	if (ccb == NULL) {
282 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
283 		return;
284 	}
285 
286 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
287 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
288 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
289 		xpt_free_ccb(ccb);
290 		return;
291 	}
292 
293 	if (targetid == CAM_TARGET_WILDCARD)
294 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
295 	else
296 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
297 
298 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
299 	xpt_rescan(ccb);
300 }
301 
302 static void
303 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
304 {
305 	struct sbuf sb;
306 	va_list ap;
307 	char str[192];
308 	char path_str[64];
309 
310 	if (cm == NULL)
311 		return;
312 
313 	/* No need to be in here if debugging isn't enabled */
314 	if ((cm->cm_sc->mps_debug & level) == 0)
315 		return;
316 
317 	sbuf_new(&sb, str, sizeof(str), 0);
318 
319 	va_start(ap, fmt);
320 
321 	if (cm->cm_ccb != NULL) {
322 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
323 				sizeof(path_str));
324 		sbuf_cat(&sb, path_str);
325 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
326 			scsi_command_string(&cm->cm_ccb->csio, &sb);
327 			sbuf_printf(&sb, "length %d ",
328 				    cm->cm_ccb->csio.dxfer_len);
329 		}
330 	}
331 	else {
332 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
333 		    cam_sim_name(cm->cm_sc->sassc->sim),
334 		    cam_sim_unit(cm->cm_sc->sassc->sim),
335 		    cam_sim_bus(cm->cm_sc->sassc->sim),
336 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
337 		    cm->cm_lun);
338 	}
339 
340 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
341 	sbuf_vprintf(&sb, fmt, ap);
342 	sbuf_finish(&sb);
343 	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
344 
345 	va_end(ap);
346 }
347 
348 
349 static void
350 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
351 {
352 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
353 	struct mpssas_target *targ;
354 	uint16_t handle;
355 
356 	MPS_FUNCTRACE(sc);
357 
358 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
359 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
360 	targ = tm->cm_targ;
361 
362 	if (reply == NULL) {
363 		/* XXX retry the remove after the diag reset completes? */
364 		mps_dprint(sc, MPS_FAULT,
365 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
366 		mpssas_free_tm(sc, tm);
367 		return;
368 	}
369 
370 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
371 		mps_dprint(sc, MPS_FAULT,
372 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
373 		   reply->IOCStatus, handle);
374 		mpssas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	mps_dprint(sc, MPS_XINFO,
379 	    "Reset aborted %u commands\n", reply->TerminationCount);
380 	mps_free_reply(sc, tm->cm_reply_data);
381 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
382 
383 	mps_dprint(sc, MPS_XINFO,
384 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
385 
386 	/*
387 	 * Don't clear target if remove fails because things will get confusing.
388 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 	 * this target id if possible, and so we can assign the same target id
390 	 * to this device if it comes back in the future.
391 	 */
392 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
393 		targ = tm->cm_targ;
394 		targ->handle = 0x0;
395 		targ->encl_handle = 0x0;
396 		targ->encl_slot = 0x0;
397 		targ->exp_dev_handle = 0x0;
398 		targ->phy_num = 0x0;
399 		targ->linkrate = 0x0;
400 		targ->devinfo = 0x0;
401 		targ->flags = 0x0;
402 	}
403 
404 	mpssas_free_tm(sc, tm);
405 }
406 
407 
408 /*
409  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
410  * Otherwise Volume Delete is same as Bare Drive Removal.
411  */
412 void
413 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
414 {
415 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
416 	struct mps_softc *sc;
417 	struct mps_command *cm;
418 	struct mpssas_target *targ = NULL;
419 
420 	MPS_FUNCTRACE(sassc->sc);
421 	sc = sassc->sc;
422 
423 #ifdef WD_SUPPORT
424 	/*
425 	 * If this is a WD controller, determine if the disk should be exposed
426 	 * to the OS or not.  If disk should be exposed, return from this
427 	 * function without doing anything.
428 	 */
429 	if (sc->WD_available && (sc->WD_hide_expose ==
430 	    MPS_WD_EXPOSE_ALWAYS)) {
431 		return;
432 	}
433 #endif //WD_SUPPORT
434 
435 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
436 	if (targ == NULL) {
437 		/* FIXME: what is the action? */
438 		/* We don't know about this device? */
439 		mps_dprint(sc, MPS_ERROR,
440 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
441 		return;
442 	}
443 
444 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
445 
446 	cm = mpssas_alloc_tm(sc);
447 	if (cm == NULL) {
448 		mps_dprint(sc, MPS_ERROR,
449 		    "%s: command alloc failure\n", __func__);
450 		return;
451 	}
452 
453 	mpssas_rescan_target(sc, targ);
454 
455 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
456 	req->DevHandle = targ->handle;
457 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
458 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
459 
460 	/* SAS Hard Link Reset / SATA Link Reset */
461 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
462 
463 	cm->cm_targ = targ;
464 	cm->cm_data = NULL;
465 	cm->cm_desc.HighPriority.RequestFlags =
466 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
467 	cm->cm_complete = mpssas_remove_volume;
468 	cm->cm_complete_data = (void *)(uintptr_t)handle;
469 	mps_map_command(sc, cm);
470 }
471 
472 /*
473  * The MPT2 firmware performs debounce on the link to avoid transient link
474  * errors and false removals.  When it does decide that link has been lost
475  * and a device need to go away, it expects that the host will perform a
476  * target reset and then an op remove.  The reset has the side-effect of
477  * aborting any outstanding requests for the device, which is required for
478  * the op-remove to succeed.  It's not clear if the host should check for
479  * the device coming back alive after the reset.
480  */
481 void
482 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
483 {
484 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
485 	struct mps_softc *sc;
486 	struct mps_command *cm;
487 	struct mpssas_target *targ = NULL;
488 
489 	MPS_FUNCTRACE(sassc->sc);
490 
491 	sc = sassc->sc;
492 
493 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
494 	if (targ == NULL) {
495 		/* FIXME: what is the action? */
496 		/* We don't know about this device? */
497 		mps_dprint(sc, MPS_ERROR,
498 		    "%s : invalid handle 0x%x \n", __func__, handle);
499 		return;
500 	}
501 
502 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
503 
504 	cm = mpssas_alloc_tm(sc);
505 	if (cm == NULL) {
506 		mps_dprint(sc, MPS_ERROR,
507 		    "%s: command alloc failure\n", __func__);
508 		return;
509 	}
510 
511 	mpssas_rescan_target(sc, targ);
512 
513 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
514 	memset(req, 0, sizeof(*req));
515 	req->DevHandle = htole16(targ->handle);
516 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
517 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
518 
519 	/* SAS Hard Link Reset / SATA Link Reset */
520 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
521 
522 	cm->cm_targ = targ;
523 	cm->cm_data = NULL;
524 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
525 	cm->cm_complete = mpssas_remove_device;
526 	cm->cm_complete_data = (void *)(uintptr_t)handle;
527 	mps_map_command(sc, cm);
528 }
529 
530 static void
531 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
532 {
533 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
534 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
535 	struct mpssas_target *targ;
536 	struct mps_command *next_cm;
537 	uint16_t handle;
538 
539 	MPS_FUNCTRACE(sc);
540 
541 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
542 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
543 	targ = tm->cm_targ;
544 
545 	/*
546 	 * Currently there should be no way we can hit this case.  It only
547 	 * happens when we have a failure to allocate chain frames, and
548 	 * task management commands don't have S/G lists.
549 	 */
550 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
551 		mps_dprint(sc, MPS_ERROR,
552 		    "%s: cm_flags = %#x for remove of handle %#04x! "
553 		    "This should not happen!\n", __func__, tm->cm_flags,
554 		    handle);
555 		mpssas_free_tm(sc, tm);
556 		return;
557 	}
558 
559 	if (reply == NULL) {
560 		/* XXX retry the remove after the diag reset completes? */
561 		mps_dprint(sc, MPS_FAULT,
562 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
563 		mpssas_free_tm(sc, tm);
564 		return;
565 	}
566 
567 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
568 		mps_dprint(sc, MPS_FAULT,
569 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
570 		   le16toh(reply->IOCStatus), handle);
571 		mpssas_free_tm(sc, tm);
572 		return;
573 	}
574 
575 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
576 	    le32toh(reply->TerminationCount));
577 	mps_free_reply(sc, tm->cm_reply_data);
578 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
579 
580 	/* Reuse the existing command */
581 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
582 	memset(req, 0, sizeof(*req));
583 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
584 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
585 	req->DevHandle = htole16(handle);
586 	tm->cm_data = NULL;
587 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
588 	tm->cm_complete = mpssas_remove_complete;
589 	tm->cm_complete_data = (void *)(uintptr_t)handle;
590 
591 	mps_map_command(sc, tm);
592 
593 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
594 		   targ->tid, handle);
595 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
596 		union ccb *ccb;
597 
598 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
599 		ccb = tm->cm_complete_data;
600 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
601 		mpssas_scsiio_complete(sc, tm);
602 	}
603 }
604 
605 static void
606 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
607 {
608 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
609 	uint16_t handle;
610 	struct mpssas_target *targ;
611 	struct mpssas_lun *lun;
612 
613 	MPS_FUNCTRACE(sc);
614 
615 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
616 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
617 
618 	/*
619 	 * Currently there should be no way we can hit this case.  It only
620 	 * happens when we have a failure to allocate chain frames, and
621 	 * task management commands don't have S/G lists.
622 	 */
623 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
624 		mps_dprint(sc, MPS_XINFO,
625 			   "%s: cm_flags = %#x for remove of handle %#04x! "
626 			   "This should not happen!\n", __func__, tm->cm_flags,
627 			   handle);
628 		mpssas_free_tm(sc, tm);
629 		return;
630 	}
631 
632 	if (reply == NULL) {
633 		/* most likely a chip reset */
634 		mps_dprint(sc, MPS_FAULT,
635 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
636 		mpssas_free_tm(sc, tm);
637 		return;
638 	}
639 
640 	mps_dprint(sc, MPS_XINFO,
641 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
642 	    handle, le16toh(reply->IOCStatus));
643 
644 	/*
645 	 * Don't clear target if remove fails because things will get confusing.
646 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
647 	 * this target id if possible, and so we can assign the same target id
648 	 * to this device if it comes back in the future.
649 	 */
650 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
651 		targ = tm->cm_targ;
652 		targ->handle = 0x0;
653 		targ->encl_handle = 0x0;
654 		targ->encl_slot = 0x0;
655 		targ->exp_dev_handle = 0x0;
656 		targ->phy_num = 0x0;
657 		targ->linkrate = 0x0;
658 		targ->devinfo = 0x0;
659 		targ->flags = 0x0;
660 
661 		while(!SLIST_EMPTY(&targ->luns)) {
662 			lun = SLIST_FIRST(&targ->luns);
663 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
664 			free(lun, M_MPT2);
665 		}
666 	}
667 
668 
669 	mpssas_free_tm(sc, tm);
670 }
671 
672 static int
673 mpssas_register_events(struct mps_softc *sc)
674 {
675 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
676 
677 	bzero(events, 16);
678 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
679 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
680 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
681 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
682 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
683 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
684 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
685 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
686 	setbit(events, MPI2_EVENT_IR_VOLUME);
687 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
688 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
689 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
690 
691 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
692 	    &sc->sassc->mpssas_eh);
693 
694 	return (0);
695 }
696 
697 int
698 mps_attach_sas(struct mps_softc *sc)
699 {
700 	struct mpssas_softc *sassc;
701 	cam_status status;
702 	int unit, error = 0;
703 
704 	MPS_FUNCTRACE(sc);
705 
706 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
707 	if(!sassc) {
708 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
709 		__func__, __LINE__);
710 		return (ENOMEM);
711 	}
712 	sassc->targets = malloc(sizeof(struct mpssas_target) *
713 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
714 	if(!sassc->targets) {
715 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
716 		__func__, __LINE__);
717 		free(sassc, M_MPT2);
718 		return (ENOMEM);
719 	}
720 	sc->sassc = sassc;
721 	sassc->sc = sc;
722 
723 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
724 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
725 		error = ENOMEM;
726 		goto out;
727 	}
728 
729 	unit = device_get_unit(sc->mps_dev);
730 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
731 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
732 	if (sassc->sim == NULL) {
733 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
734 		error = EINVAL;
735 		goto out;
736 	}
737 
738 	TAILQ_INIT(&sassc->ev_queue);
739 
740 	/* Initialize taskqueue for Event Handling */
741 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
742 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
743 	    taskqueue_thread_enqueue, &sassc->ev_tq);
744 
745 	/* Run the task queue with lowest priority */
746 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
747 	    device_get_nameunit(sc->mps_dev));
748 
749 	mps_lock(sc);
750 
751 	/*
752 	 * XXX There should be a bus for every port on the adapter, but since
753 	 * we're just going to fake the topology for now, we'll pretend that
754 	 * everything is just a target on a single bus.
755 	 */
756 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
757 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
758 		    error);
759 		mps_unlock(sc);
760 		goto out;
761 	}
762 
763 	/*
764 	 * Assume that discovery events will start right away.
765 	 *
766 	 * Hold off boot until discovery is complete.
767 	 */
768 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
769 	sc->sassc->startup_refcount = 0;
770 	mpssas_startup_increment(sassc);
771 
772 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
773 	sassc->discovery_timeouts = 0;
774 
775 	sassc->tm_count = 0;
776 
777 	/*
778 	 * Register for async events so we can determine the EEDP
779 	 * capabilities of devices.
780 	 */
781 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
782 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
783 	    CAM_LUN_WILDCARD);
784 	if (status != CAM_REQ_CMP) {
785 		mps_printf(sc, "Error %#x creating sim path\n", status);
786 		sassc->path = NULL;
787 	} else {
788 		int event;
789 
790 #if (__FreeBSD_version >= 1000006) || \
791     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
792 		event = AC_ADVINFO_CHANGED;
793 #else
794 		event = AC_FOUND_DEVICE;
795 #endif
796 		status = xpt_register_async(event, mpssas_async, sc,
797 					    sassc->path);
798 		if (status != CAM_REQ_CMP) {
799 			mps_dprint(sc, MPS_ERROR,
800 			    "Error %#x registering async handler for "
801 			    "AC_ADVINFO_CHANGED events\n", status);
802 			xpt_free_path(sassc->path);
803 			sassc->path = NULL;
804 		}
805 	}
806 	if (status != CAM_REQ_CMP) {
807 		/*
808 		 * EEDP use is the exception, not the rule.
809 		 * Warn the user, but do not fail to attach.
810 		 */
811 		mps_printf(sc, "EEDP capabilities disabled.\n");
812 	}
813 
814 	mps_unlock(sc);
815 
816 	mpssas_register_events(sc);
817 out:
818 	if (error)
819 		mps_detach_sas(sc);
820 	return (error);
821 }
822 
823 int
824 mps_detach_sas(struct mps_softc *sc)
825 {
826 	struct mpssas_softc *sassc;
827 	struct mpssas_lun *lun, *lun_tmp;
828 	struct mpssas_target *targ;
829 	int i;
830 
831 	MPS_FUNCTRACE(sc);
832 
833 	if (sc->sassc == NULL)
834 		return (0);
835 
836 	sassc = sc->sassc;
837 	mps_deregister_events(sc, sassc->mpssas_eh);
838 
839 	/*
840 	 * Drain and free the event handling taskqueue with the lock
841 	 * unheld so that any parallel processing tasks drain properly
842 	 * without deadlocking.
843 	 */
844 	if (sassc->ev_tq != NULL)
845 		taskqueue_free(sassc->ev_tq);
846 
847 	/* Make sure CAM doesn't wedge if we had to bail out early. */
848 	mps_lock(sc);
849 
850 	/* Deregister our async handler */
851 	if (sassc->path != NULL) {
852 		xpt_register_async(0, mpssas_async, sc, sassc->path);
853 		xpt_free_path(sassc->path);
854 		sassc->path = NULL;
855 	}
856 
857 	if (sassc->flags & MPSSAS_IN_STARTUP)
858 		xpt_release_simq(sassc->sim, 1);
859 
860 	if (sassc->sim != NULL) {
861 		xpt_bus_deregister(cam_sim_path(sassc->sim));
862 		cam_sim_free(sassc->sim, FALSE);
863 	}
864 
865 	sassc->flags |= MPSSAS_SHUTDOWN;
866 	mps_unlock(sc);
867 
868 	if (sassc->devq != NULL)
869 		cam_simq_free(sassc->devq);
870 
871 	for(i=0; i< sc->facts->MaxTargets ;i++) {
872 		targ = &sassc->targets[i];
873 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
874 			free(lun, M_MPT2);
875 		}
876 	}
877 	free(sassc->targets, M_MPT2);
878 	free(sassc, M_MPT2);
879 	sc->sassc = NULL;
880 
881 	return (0);
882 }
883 
884 void
885 mpssas_discovery_end(struct mpssas_softc *sassc)
886 {
887 	struct mps_softc *sc = sassc->sc;
888 
889 	MPS_FUNCTRACE(sc);
890 
891 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
892 		callout_stop(&sassc->discovery_callout);
893 
894 }
895 
896 static void
897 mpssas_discovery_timeout(void *data)
898 {
899 	struct mpssas_softc *sassc = data;
900 	struct mps_softc *sc;
901 
902 	sc = sassc->sc;
903 	MPS_FUNCTRACE(sc);
904 
905 	mps_lock(sc);
906 	mps_dprint(sc, MPS_INFO,
907 	    "Timeout waiting for discovery, interrupts may not be working!\n");
908 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
909 
910 	/* Poll the hardware for events in case interrupts aren't working */
911 	mps_intr_locked(sc);
912 
913 	mps_dprint(sassc->sc, MPS_INFO,
914 	    "Finished polling after discovery timeout at %d\n", ticks);
915 
916 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
917 		mpssas_discovery_end(sassc);
918 	} else {
919 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
920 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
921 			callout_reset(&sassc->discovery_callout,
922 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
923 			    mpssas_discovery_timeout, sassc);
924 			sassc->discovery_timeouts++;
925 		} else {
926 			mps_dprint(sassc->sc, MPS_FAULT,
927 			    "Discovery timed out, continuing.\n");
928 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
929 			mpssas_discovery_end(sassc);
930 		}
931 	}
932 
933 	mps_unlock(sc);
934 }
935 
936 static void
937 mpssas_action(struct cam_sim *sim, union ccb *ccb)
938 {
939 	struct mpssas_softc *sassc;
940 
941 	sassc = cam_sim_softc(sim);
942 
943 	MPS_FUNCTRACE(sassc->sc);
944 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
945 	    ccb->ccb_h.func_code);
946 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
947 
948 	switch (ccb->ccb_h.func_code) {
949 	case XPT_PATH_INQ:
950 	{
951 		struct ccb_pathinq *cpi = &ccb->cpi;
952 
953 		cpi->version_num = 1;
954 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
955 		cpi->target_sprt = 0;
956 #if __FreeBSD_version >= 1000039
957 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
958 #else
959 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
960 #endif
961 		cpi->hba_eng_cnt = 0;
962 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
963 		cpi->max_lun = 255;
964 		cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
965 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
966 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
967 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
968 		cpi->unit_number = cam_sim_unit(sim);
969 		cpi->bus_id = cam_sim_bus(sim);
970 		cpi->base_transfer_speed = 150000;
971 		cpi->transport = XPORT_SAS;
972 		cpi->transport_version = 0;
973 		cpi->protocol = PROTO_SCSI;
974 		cpi->protocol_version = SCSI_REV_SPC;
975 #if __FreeBSD_version >= 800001
976 		/*
977 		 * XXX KDM where does this number come from?
978 		 */
979 		cpi->maxio = 256 * 1024;
980 #endif
981 		cpi->ccb_h.status = CAM_REQ_CMP;
982 		break;
983 	}
984 	case XPT_GET_TRAN_SETTINGS:
985 	{
986 		struct ccb_trans_settings	*cts;
987 		struct ccb_trans_settings_sas	*sas;
988 		struct ccb_trans_settings_scsi	*scsi;
989 		struct mpssas_target *targ;
990 
991 		cts = &ccb->cts;
992 		sas = &cts->xport_specific.sas;
993 		scsi = &cts->proto_specific.scsi;
994 
995 		targ = &sassc->targets[cts->ccb_h.target_id];
996 		if (targ->handle == 0x0) {
997 			cts->ccb_h.status = CAM_SEL_TIMEOUT;
998 			break;
999 		}
1000 
1001 		cts->protocol_version = SCSI_REV_SPC2;
1002 		cts->transport = XPORT_SAS;
1003 		cts->transport_version = 0;
1004 
1005 		sas->valid = CTS_SAS_VALID_SPEED;
1006 		switch (targ->linkrate) {
1007 		case 0x08:
1008 			sas->bitrate = 150000;
1009 			break;
1010 		case 0x09:
1011 			sas->bitrate = 300000;
1012 			break;
1013 		case 0x0a:
1014 			sas->bitrate = 600000;
1015 			break;
1016 		default:
1017 			sas->valid = 0;
1018 		}
1019 
1020 		cts->protocol = PROTO_SCSI;
1021 		scsi->valid = CTS_SCSI_VALID_TQ;
1022 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1023 
1024 		cts->ccb_h.status = CAM_REQ_CMP;
1025 		break;
1026 	}
1027 	case XPT_CALC_GEOMETRY:
1028 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1029 		ccb->ccb_h.status = CAM_REQ_CMP;
1030 		break;
1031 	case XPT_RESET_DEV:
1032 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1033 		mpssas_action_resetdev(sassc, ccb);
1034 		return;
1035 	case XPT_RESET_BUS:
1036 	case XPT_ABORT:
1037 	case XPT_TERM_IO:
1038 		mps_dprint(sassc->sc, MPS_XINFO,
1039 		    "mpssas_action faking success for abort or reset\n");
1040 		ccb->ccb_h.status = CAM_REQ_CMP;
1041 		break;
1042 	case XPT_SCSI_IO:
1043 		mpssas_action_scsiio(sassc, ccb);
1044 		return;
1045 #if __FreeBSD_version >= 900026
1046 	case XPT_SMP_IO:
1047 		mpssas_action_smpio(sassc, ccb);
1048 		return;
1049 #endif
1050 	default:
1051 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1052 		break;
1053 	}
1054 	xpt_done(ccb);
1055 
1056 }
1057 
1058 static void
1059 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1060     target_id_t target_id, lun_id_t lun_id)
1061 {
1062 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1063 	struct cam_path *path;
1064 
1065 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1066 	    ac_code, target_id, (uintmax_t)lun_id);
1067 
1068 	if (xpt_create_path(&path, NULL,
1069 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1070 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1071 			   "notification\n");
1072 		return;
1073 	}
1074 
1075 	xpt_async(ac_code, path, NULL);
1076 	xpt_free_path(path);
1077 }
1078 
1079 static void
1080 mpssas_complete_all_commands(struct mps_softc *sc)
1081 {
1082 	struct mps_command *cm;
1083 	int i;
1084 	int completed;
1085 
1086 	MPS_FUNCTRACE(sc);
1087 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1088 
1089 	/* complete all commands with a NULL reply */
1090 	for (i = 1; i < sc->num_reqs; i++) {
1091 		cm = &sc->commands[i];
1092 		cm->cm_reply = NULL;
1093 		completed = 0;
1094 
1095 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1096 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1097 
1098 		if (cm->cm_complete != NULL) {
1099 			mpssas_log_command(cm, MPS_RECOVERY,
1100 			    "completing cm %p state %x ccb %p for diag reset\n",
1101 			    cm, cm->cm_state, cm->cm_ccb);
1102 
1103 			cm->cm_complete(sc, cm);
1104 			completed = 1;
1105 		}
1106 
1107 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1108 			mpssas_log_command(cm, MPS_RECOVERY,
1109 			    "waking up cm %p state %x ccb %p for diag reset\n",
1110 			    cm, cm->cm_state, cm->cm_ccb);
1111 			wakeup(cm);
1112 			completed = 1;
1113 		}
1114 
1115 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1116 			/* this should never happen, but if it does, log */
1117 			mpssas_log_command(cm, MPS_RECOVERY,
1118 			    "cm %p state %x flags 0x%x ccb %p during diag "
1119 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1120 			    cm->cm_ccb);
1121 		}
1122 	}
1123 }
1124 
1125 void
1126 mpssas_handle_reinit(struct mps_softc *sc)
1127 {
1128 	int i;
1129 
1130 	/* Go back into startup mode and freeze the simq, so that CAM
1131 	 * doesn't send any commands until after we've rediscovered all
1132 	 * targets and found the proper device handles for them.
1133 	 *
1134 	 * After the reset, portenable will trigger discovery, and after all
1135 	 * discovery-related activities have finished, the simq will be
1136 	 * released.
1137 	 */
1138 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1139 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1140 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1141 	mpssas_startup_increment(sc->sassc);
1142 
1143 	/* notify CAM of a bus reset */
1144 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1145 	    CAM_LUN_WILDCARD);
1146 
1147 	/* complete and cleanup after all outstanding commands */
1148 	mpssas_complete_all_commands(sc);
1149 
1150 	mps_dprint(sc, MPS_INIT,
1151 	    "%s startup %u tm %u after command completion\n",
1152 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1153 
1154 	/* zero all the target handles, since they may change after the
1155 	 * reset, and we have to rediscover all the targets and use the new
1156 	 * handles.
1157 	 */
1158 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1159 		if (sc->sassc->targets[i].outstanding != 0)
1160 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1161 			    i, sc->sassc->targets[i].outstanding);
1162 		sc->sassc->targets[i].handle = 0x0;
1163 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1164 		sc->sassc->targets[i].outstanding = 0;
1165 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1166 	}
1167 }
1168 
1169 static void
1170 mpssas_tm_timeout(void *data)
1171 {
1172 	struct mps_command *tm = data;
1173 	struct mps_softc *sc = tm->cm_sc;
1174 
1175 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1176 
1177 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1178 	    "task mgmt %p timed out\n", tm);
1179 	mps_reinit(sc);
1180 }
1181 
1182 static void
1183 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1184 {
1185 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1186 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1187 	unsigned int cm_count = 0;
1188 	struct mps_command *cm;
1189 	struct mpssas_target *targ;
1190 
1191 	callout_stop(&tm->cm_callout);
1192 
1193 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1194 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1195 	targ = tm->cm_targ;
1196 
1197 	/*
1198 	 * Currently there should be no way we can hit this case.  It only
1199 	 * happens when we have a failure to allocate chain frames, and
1200 	 * task management commands don't have S/G lists.
1201 	 * XXXSL So should it be an assertion?
1202 	 */
1203 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1204 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1205 			   "This should not happen!\n", __func__, tm->cm_flags);
1206 		mpssas_free_tm(sc, tm);
1207 		return;
1208 	}
1209 
1210 	if (reply == NULL) {
1211 		mpssas_log_command(tm, MPS_RECOVERY,
1212 		    "NULL reset reply for tm %p\n", tm);
1213 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1214 			/* this completion was due to a reset, just cleanup */
1215 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1216 			targ->tm = NULL;
1217 			mpssas_free_tm(sc, tm);
1218 		}
1219 		else {
1220 			/* we should have gotten a reply. */
1221 			mps_reinit(sc);
1222 		}
1223 		return;
1224 	}
1225 
1226 	mpssas_log_command(tm, MPS_RECOVERY,
1227 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1228 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1229 	    le32toh(reply->TerminationCount));
1230 
1231 	/* See if there are any outstanding commands for this LUN.
1232 	 * This could be made more efficient by using a per-LU data
1233 	 * structure of some sort.
1234 	 */
1235 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1236 		if (cm->cm_lun == tm->cm_lun)
1237 			cm_count++;
1238 	}
1239 
1240 	if (cm_count == 0) {
1241 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1242 		    "logical unit %u finished recovery after reset\n",
1243 		    tm->cm_lun, tm);
1244 
1245 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1246 		    tm->cm_lun);
1247 
1248 		/* we've finished recovery for this logical unit.  check and
1249 		 * see if some other logical unit has a timedout command
1250 		 * that needs to be processed.
1251 		 */
1252 		cm = TAILQ_FIRST(&targ->timedout_commands);
1253 		if (cm) {
1254 			mpssas_send_abort(sc, tm, cm);
1255 		}
1256 		else {
1257 			targ->tm = NULL;
1258 			mpssas_free_tm(sc, tm);
1259 		}
1260 	}
1261 	else {
1262 		/* if we still have commands for this LUN, the reset
1263 		 * effectively failed, regardless of the status reported.
1264 		 * Escalate to a target reset.
1265 		 */
1266 		mpssas_log_command(tm, MPS_RECOVERY,
1267 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1268 		    tm, cm_count);
1269 		mpssas_send_reset(sc, tm,
1270 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1271 	}
1272 }
1273 
1274 static void
1275 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1276 {
1277 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1278 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1279 	struct mpssas_target *targ;
1280 
1281 	callout_stop(&tm->cm_callout);
1282 
1283 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1284 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1285 	targ = tm->cm_targ;
1286 
1287 	/*
1288 	 * Currently there should be no way we can hit this case.  It only
1289 	 * happens when we have a failure to allocate chain frames, and
1290 	 * task management commands don't have S/G lists.
1291 	 */
1292 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1293 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1294 			   "This should not happen!\n", __func__, tm->cm_flags);
1295 		mpssas_free_tm(sc, tm);
1296 		return;
1297 	}
1298 
1299 	if (reply == NULL) {
1300 		mpssas_log_command(tm, MPS_RECOVERY,
1301 		    "NULL reset reply for tm %p\n", tm);
1302 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1303 			/* this completion was due to a reset, just cleanup */
1304 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1305 			targ->tm = NULL;
1306 			mpssas_free_tm(sc, tm);
1307 		}
1308 		else {
1309 			/* we should have gotten a reply. */
1310 			mps_reinit(sc);
1311 		}
1312 		return;
1313 	}
1314 
1315 	mpssas_log_command(tm, MPS_RECOVERY,
1316 	    "target reset status 0x%x code 0x%x count %u\n",
1317 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1318 	    le32toh(reply->TerminationCount));
1319 
1320 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1321 
1322 	if (targ->outstanding == 0) {
1323 		/* we've finished recovery for this target and all
1324 		 * of its logical units.
1325 		 */
1326 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1327 		    "recovery finished after target reset\n");
1328 
1329 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1330 		    CAM_LUN_WILDCARD);
1331 
1332 		targ->tm = NULL;
1333 		mpssas_free_tm(sc, tm);
1334 	}
1335 	else {
1336 		/* after a target reset, if this target still has
1337 		 * outstanding commands, the reset effectively failed,
1338 		 * regardless of the status reported.  escalate.
1339 		 */
1340 		mpssas_log_command(tm, MPS_RECOVERY,
1341 		    "target reset complete for tm %p, but still have %u command(s)\n",
1342 		    tm, targ->outstanding);
1343 		mps_reinit(sc);
1344 	}
1345 }
1346 
1347 #define MPS_RESET_TIMEOUT 30
1348 
1349 static int
1350 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1351 {
1352 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1353 	struct mpssas_target *target;
1354 	int err;
1355 
1356 	target = tm->cm_targ;
1357 	if (target->handle == 0) {
1358 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1359 		    __func__, target->tid);
1360 		return -1;
1361 	}
1362 
1363 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1364 	req->DevHandle = htole16(target->handle);
1365 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1366 	req->TaskType = type;
1367 
1368 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1369 		/* XXX Need to handle invalid LUNs */
1370 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1371 		tm->cm_targ->logical_unit_resets++;
1372 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1373 		    "sending logical unit reset\n");
1374 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1375 	}
1376 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1377 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1378 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1379 		tm->cm_targ->target_resets++;
1380 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1381 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1382 		    "sending target reset\n");
1383 		tm->cm_complete = mpssas_target_reset_complete;
1384 	}
1385 	else {
1386 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1387 		return -1;
1388 	}
1389 
1390 	tm->cm_data = NULL;
1391 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1392 	tm->cm_complete_data = (void *)tm;
1393 
1394 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1395 	    mpssas_tm_timeout, tm);
1396 
1397 	err = mps_map_command(sc, tm);
1398 	if (err)
1399 		mpssas_log_command(tm, MPS_RECOVERY,
1400 		    "error %d sending reset type %u\n",
1401 		    err, type);
1402 
1403 	return err;
1404 }
1405 
1406 
1407 static void
1408 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1409 {
1410 	struct mps_command *cm;
1411 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1412 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1413 	struct mpssas_target *targ;
1414 
1415 	callout_stop(&tm->cm_callout);
1416 
1417 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1418 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1419 	targ = tm->cm_targ;
1420 
1421 	/*
1422 	 * Currently there should be no way we can hit this case.  It only
1423 	 * happens when we have a failure to allocate chain frames, and
1424 	 * task management commands don't have S/G lists.
1425 	 */
1426 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1427 		mpssas_log_command(tm, MPS_RECOVERY,
1428 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1429 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1430 		mpssas_free_tm(sc, tm);
1431 		return;
1432 	}
1433 
1434 	if (reply == NULL) {
1435 		mpssas_log_command(tm, MPS_RECOVERY,
1436 		    "NULL abort reply for tm %p TaskMID %u\n",
1437 		    tm, le16toh(req->TaskMID));
1438 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1439 			/* this completion was due to a reset, just cleanup */
1440 			targ->tm = NULL;
1441 			mpssas_free_tm(sc, tm);
1442 		}
1443 		else {
1444 			/* we should have gotten a reply. */
1445 			mps_reinit(sc);
1446 		}
1447 		return;
1448 	}
1449 
1450 	mpssas_log_command(tm, MPS_RECOVERY,
1451 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1452 	    le16toh(req->TaskMID),
1453 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1454 	    le32toh(reply->TerminationCount));
1455 
1456 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1457 	if (cm == NULL) {
1458 		/* if there are no more timedout commands, we're done with
1459 		 * error recovery for this target.
1460 		 */
1461 		mpssas_log_command(tm, MPS_RECOVERY,
1462 		    "finished recovery after aborting TaskMID %u\n",
1463 		    le16toh(req->TaskMID));
1464 
1465 		targ->tm = NULL;
1466 		mpssas_free_tm(sc, tm);
1467 	}
1468 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1469 		/* abort success, but we have more timedout commands to abort */
1470 		mpssas_log_command(tm, MPS_RECOVERY,
1471 		    "continuing recovery after aborting TaskMID %u\n",
1472 		    le16toh(req->TaskMID));
1473 
1474 		mpssas_send_abort(sc, tm, cm);
1475 	}
1476 	else {
1477 		/* we didn't get a command completion, so the abort
1478 		 * failed as far as we're concerned.  escalate.
1479 		 */
1480 		mpssas_log_command(tm, MPS_RECOVERY,
1481 		    "abort failed for TaskMID %u tm %p\n",
1482 		    le16toh(req->TaskMID), tm);
1483 
1484 		mpssas_send_reset(sc, tm,
1485 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1486 	}
1487 }
1488 
1489 #define MPS_ABORT_TIMEOUT 5
1490 
1491 static int
1492 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1493 {
1494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1495 	struct mpssas_target *targ;
1496 	int err;
1497 
1498 	targ = cm->cm_targ;
1499 	if (targ->handle == 0) {
1500 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1501 		    __func__, cm->cm_ccb->ccb_h.target_id);
1502 		return -1;
1503 	}
1504 
1505 	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1506 	    "Aborting command %p\n", cm);
1507 
1508 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1509 	req->DevHandle = htole16(targ->handle);
1510 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1511 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1512 
1513 	/* XXX Need to handle invalid LUNs */
1514 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1515 
1516 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1517 
1518 	tm->cm_data = NULL;
1519 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1520 	tm->cm_complete = mpssas_abort_complete;
1521 	tm->cm_complete_data = (void *)tm;
1522 	tm->cm_targ = cm->cm_targ;
1523 	tm->cm_lun = cm->cm_lun;
1524 
1525 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1526 	    mpssas_tm_timeout, tm);
1527 
1528 	targ->aborts++;
1529 
1530 	err = mps_map_command(sc, tm);
1531 	if (err)
1532 		mpssas_log_command(tm, MPS_RECOVERY,
1533 		    "error %d sending abort for cm %p SMID %u\n",
1534 		    err, cm, req->TaskMID);
1535 	return err;
1536 }
1537 
1538 
1539 static void
1540 mpssas_scsiio_timeout(void *data)
1541 {
1542 	struct mps_softc *sc;
1543 	struct mps_command *cm;
1544 	struct mpssas_target *targ;
1545 
1546 	cm = (struct mps_command *)data;
1547 	sc = cm->cm_sc;
1548 
1549 	MPS_FUNCTRACE(sc);
1550 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1551 
1552 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1553 
1554 	/*
1555 	 * Run the interrupt handler to make sure it's not pending.  This
1556 	 * isn't perfect because the command could have already completed
1557 	 * and been re-used, though this is unlikely.
1558 	 */
1559 	mps_intr_locked(sc);
1560 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1561 		mpssas_log_command(cm, MPS_XINFO,
1562 		    "SCSI command %p almost timed out\n", cm);
1563 		return;
1564 	}
1565 
1566 	if (cm->cm_ccb == NULL) {
1567 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1568 		return;
1569 	}
1570 
1571 	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1572 	    cm, cm->cm_ccb);
1573 
1574 	targ = cm->cm_targ;
1575 	targ->timeouts++;
1576 
1577 	/* XXX first, check the firmware state, to see if it's still
1578 	 * operational.  if not, do a diag reset.
1579 	 */
1580 
1581 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1582 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1583 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1584 
1585 	if (targ->tm != NULL) {
1586 		/* target already in recovery, just queue up another
1587 		 * timedout command to be processed later.
1588 		 */
1589 		mps_dprint(sc, MPS_RECOVERY,
1590 		    "queued timedout cm %p for processing by tm %p\n",
1591 		    cm, targ->tm);
1592 	}
1593 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1594 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1595 		    cm, targ->tm);
1596 
1597 		/* start recovery by aborting the first timedout command */
1598 		mpssas_send_abort(sc, targ->tm, cm);
1599 	}
1600 	else {
1601 		/* XXX queue this target up for recovery once a TM becomes
1602 		 * available.  The firmware only has a limited number of
1603 		 * HighPriority credits for the high priority requests used
1604 		 * for task management, and we ran out.
1605 		 *
1606 		 * Isilon: don't worry about this for now, since we have
1607 		 * more credits than disks in an enclosure, and limit
1608 		 * ourselves to one TM per target for recovery.
1609 		 */
1610 		mps_dprint(sc, MPS_RECOVERY,
1611 		    "timedout cm %p failed to allocate a tm\n", cm);
1612 	}
1613 
1614 }
1615 
1616 static void
1617 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1618 {
1619 	MPI2_SCSI_IO_REQUEST *req;
1620 	struct ccb_scsiio *csio;
1621 	struct mps_softc *sc;
1622 	struct mpssas_target *targ;
1623 	struct mpssas_lun *lun;
1624 	struct mps_command *cm;
1625 	uint8_t i, lba_byte, *ref_tag_addr;
1626 	uint16_t eedp_flags;
1627 	uint32_t mpi_control;
1628 
1629 	sc = sassc->sc;
1630 	MPS_FUNCTRACE(sc);
1631 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1632 
1633 	csio = &ccb->csio;
1634 	targ = &sassc->targets[csio->ccb_h.target_id];
1635 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1636 	if (targ->handle == 0x0) {
1637 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1638 		    __func__, csio->ccb_h.target_id);
1639 		csio->ccb_h.status = CAM_SEL_TIMEOUT;
1640 		xpt_done(ccb);
1641 		return;
1642 	}
1643 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1644 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1645 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1646 		csio->ccb_h.status = CAM_TID_INVALID;
1647 		xpt_done(ccb);
1648 		return;
1649 	}
1650 	/*
1651 	 * Sometimes, it is possible to get a command that is not "In
1652 	 * Progress" and was actually aborted by the upper layer.  Check for
1653 	 * this here and complete the command without error.
1654 	 */
1655 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1656 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1657 		    "target %u\n", __func__, csio->ccb_h.target_id);
1658 		xpt_done(ccb);
1659 		return;
1660 	}
1661 	/*
1662 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1663 	 * that the volume has timed out.  We want volumes to be enumerated
1664 	 * until they are deleted/removed, not just failed.
1665 	 */
1666 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1667 		if (targ->devinfo == 0)
1668 			csio->ccb_h.status = CAM_REQ_CMP;
1669 		else
1670 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1671 		xpt_done(ccb);
1672 		return;
1673 	}
1674 
1675 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1676 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1677 		csio->ccb_h.status = CAM_TID_INVALID;
1678 		xpt_done(ccb);
1679 		return;
1680 	}
1681 
1682 	cm = mps_alloc_command(sc);
1683 	if (cm == NULL) {
1684 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1685 			xpt_freeze_simq(sassc->sim, 1);
1686 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1687 		}
1688 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1689 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1690 		xpt_done(ccb);
1691 		return;
1692 	}
1693 
1694 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1695 	bzero(req, sizeof(*req));
1696 	req->DevHandle = htole16(targ->handle);
1697 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1698 	req->MsgFlags = 0;
1699 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1700 	req->SenseBufferLength = MPS_SENSE_LEN;
1701 	req->SGLFlags = 0;
1702 	req->ChainOffset = 0;
1703 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1704 	req->SGLOffset1= 0;
1705 	req->SGLOffset2= 0;
1706 	req->SGLOffset3= 0;
1707 	req->SkipCount = 0;
1708 	req->DataLength = htole32(csio->dxfer_len);
1709 	req->BidirectionalDataLength = 0;
1710 	req->IoFlags = htole16(csio->cdb_len);
1711 	req->EEDPFlags = 0;
1712 
1713 	/* Note: BiDirectional transfers are not supported */
1714 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1715 	case CAM_DIR_IN:
1716 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1717 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1718 		break;
1719 	case CAM_DIR_OUT:
1720 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1721 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1722 		break;
1723 	case CAM_DIR_NONE:
1724 	default:
1725 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1726 		break;
1727 	}
1728 
1729 	if (csio->cdb_len == 32)
1730                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1731 	/*
1732 	 * It looks like the hardware doesn't require an explicit tag
1733 	 * number for each transaction.  SAM Task Management not supported
1734 	 * at the moment.
1735 	 */
1736 	switch (csio->tag_action) {
1737 	case MSG_HEAD_OF_Q_TAG:
1738 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1739 		break;
1740 	case MSG_ORDERED_Q_TAG:
1741 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1742 		break;
1743 	case MSG_ACA_TASK:
1744 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1745 		break;
1746 	case CAM_TAG_ACTION_NONE:
1747 	case MSG_SIMPLE_Q_TAG:
1748 	default:
1749 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1750 		break;
1751 	}
1752 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1753 	req->Control = htole32(mpi_control);
1754 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1755 		mps_free_command(sc, cm);
1756 		ccb->ccb_h.status = CAM_LUN_INVALID;
1757 		xpt_done(ccb);
1758 		return;
1759 	}
1760 
1761 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1762 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1763 	else
1764 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1765 	req->IoFlags = htole16(csio->cdb_len);
1766 
1767 	/*
1768 	 * Check if EEDP is supported and enabled.  If it is then check if the
1769 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1770 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1771 	 * for EEDP transfer.
1772 	 */
1773 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1774 	if (sc->eedp_enabled && eedp_flags) {
1775 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1776 			if (lun->lun_id == csio->ccb_h.target_lun) {
1777 				break;
1778 			}
1779 		}
1780 
1781 		if ((lun != NULL) && (lun->eedp_formatted)) {
1782 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1783 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1784 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1785 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1786 			req->EEDPFlags = htole16(eedp_flags);
1787 
1788 			/*
1789 			 * If CDB less than 32, fill in Primary Ref Tag with
1790 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1791 			 * already there.  Also, set protection bit.  FreeBSD
1792 			 * currently does not support CDBs bigger than 16, but
1793 			 * the code doesn't hurt, and will be here for the
1794 			 * future.
1795 			 */
1796 			if (csio->cdb_len != 32) {
1797 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1798 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1799 				    PrimaryReferenceTag;
1800 				for (i = 0; i < 4; i++) {
1801 					*ref_tag_addr =
1802 					    req->CDB.CDB32[lba_byte + i];
1803 					ref_tag_addr++;
1804 				}
1805 				req->CDB.EEDP32.PrimaryReferenceTag =
1806 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1807 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1808 				    0xFFFF;
1809 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1810 				    0x20;
1811 			} else {
1812 				eedp_flags |=
1813 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1814 				req->EEDPFlags = htole16(eedp_flags);
1815 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1816 				    0x1F) | 0x20;
1817 			}
1818 		}
1819 	}
1820 
1821 	cm->cm_length = csio->dxfer_len;
1822 	if (cm->cm_length != 0) {
1823 		cm->cm_data = ccb;
1824 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1825 	} else {
1826 		cm->cm_data = NULL;
1827 	}
1828 	cm->cm_sge = &req->SGL;
1829 	cm->cm_sglsize = (32 - 24) * 4;
1830 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1831 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1832 	cm->cm_complete = mpssas_scsiio_complete;
1833 	cm->cm_complete_data = ccb;
1834 	cm->cm_targ = targ;
1835 	cm->cm_lun = csio->ccb_h.target_lun;
1836 	cm->cm_ccb = ccb;
1837 
1838 	/*
1839 	 * If HBA is a WD and the command is not for a retry, try to build a
1840 	 * direct I/O message. If failed, or the command is for a retry, send
1841 	 * the I/O to the IR volume itself.
1842 	 */
1843 	if (sc->WD_valid_config) {
1844 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1845 			mpssas_direct_drive_io(sassc, cm, ccb);
1846 		} else {
1847 			ccb->ccb_h.status = CAM_REQ_INPROG;
1848 		}
1849 	}
1850 
1851 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1852 	   mpssas_scsiio_timeout, cm);
1853 
1854 	targ->issued++;
1855 	targ->outstanding++;
1856 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1857 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1858 
1859 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1860 	    __func__, cm, ccb, targ->outstanding);
1861 
1862 	mps_map_command(sc, cm);
1863 	return;
1864 }
1865 
1866 static void
1867 mps_response_code(struct mps_softc *sc, u8 response_code)
1868 {
1869         char *desc;
1870 
1871         switch (response_code) {
1872         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1873                 desc = "task management request completed";
1874                 break;
1875         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1876                 desc = "invalid frame";
1877                 break;
1878         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1879                 desc = "task management request not supported";
1880                 break;
1881         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1882                 desc = "task management request failed";
1883                 break;
1884         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1885                 desc = "task management request succeeded";
1886                 break;
1887         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1888                 desc = "invalid lun";
1889                 break;
1890         case 0xA:
1891                 desc = "overlapped tag attempted";
1892                 break;
1893         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1894                 desc = "task queued, however not sent to target";
1895                 break;
1896         default:
1897                 desc = "unknown";
1898                 break;
1899         }
1900 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1901                 response_code, desc);
1902 }
1903 /**
1904  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1905  */
1906 static void
1907 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1908     Mpi2SCSIIOReply_t *mpi_reply)
1909 {
1910 	u32 response_info;
1911 	u8 *response_bytes;
1912 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1913 	    MPI2_IOCSTATUS_MASK;
1914 	u8 scsi_state = mpi_reply->SCSIState;
1915 	u8 scsi_status = mpi_reply->SCSIStatus;
1916 	char *desc_ioc_state = NULL;
1917 	char *desc_scsi_status = NULL;
1918 	char *desc_scsi_state = sc->tmp_string;
1919 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1920 
1921 	if (log_info == 0x31170000)
1922 		return;
1923 
1924 	switch (ioc_status) {
1925 	case MPI2_IOCSTATUS_SUCCESS:
1926 		desc_ioc_state = "success";
1927 		break;
1928 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1929 		desc_ioc_state = "invalid function";
1930 		break;
1931 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1932 		desc_ioc_state = "scsi recovered error";
1933 		break;
1934 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1935 		desc_ioc_state = "scsi invalid dev handle";
1936 		break;
1937 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1938 		desc_ioc_state = "scsi device not there";
1939 		break;
1940 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1941 		desc_ioc_state = "scsi data overrun";
1942 		break;
1943 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1944 		desc_ioc_state = "scsi data underrun";
1945 		break;
1946 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1947 		desc_ioc_state = "scsi io data error";
1948 		break;
1949 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1950 		desc_ioc_state = "scsi protocol error";
1951 		break;
1952 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1953 		desc_ioc_state = "scsi task terminated";
1954 		break;
1955 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1956 		desc_ioc_state = "scsi residual mismatch";
1957 		break;
1958 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1959 		desc_ioc_state = "scsi task mgmt failed";
1960 		break;
1961 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1962 		desc_ioc_state = "scsi ioc terminated";
1963 		break;
1964 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1965 		desc_ioc_state = "scsi ext terminated";
1966 		break;
1967 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1968 		desc_ioc_state = "eedp guard error";
1969 		break;
1970 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1971 		desc_ioc_state = "eedp ref tag error";
1972 		break;
1973 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1974 		desc_ioc_state = "eedp app tag error";
1975 		break;
1976 	default:
1977 		desc_ioc_state = "unknown";
1978 		break;
1979 	}
1980 
1981 	switch (scsi_status) {
1982 	case MPI2_SCSI_STATUS_GOOD:
1983 		desc_scsi_status = "good";
1984 		break;
1985 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1986 		desc_scsi_status = "check condition";
1987 		break;
1988 	case MPI2_SCSI_STATUS_CONDITION_MET:
1989 		desc_scsi_status = "condition met";
1990 		break;
1991 	case MPI2_SCSI_STATUS_BUSY:
1992 		desc_scsi_status = "busy";
1993 		break;
1994 	case MPI2_SCSI_STATUS_INTERMEDIATE:
1995 		desc_scsi_status = "intermediate";
1996 		break;
1997 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1998 		desc_scsi_status = "intermediate condmet";
1999 		break;
2000 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2001 		desc_scsi_status = "reservation conflict";
2002 		break;
2003 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2004 		desc_scsi_status = "command terminated";
2005 		break;
2006 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2007 		desc_scsi_status = "task set full";
2008 		break;
2009 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2010 		desc_scsi_status = "aca active";
2011 		break;
2012 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2013 		desc_scsi_status = "task aborted";
2014 		break;
2015 	default:
2016 		desc_scsi_status = "unknown";
2017 		break;
2018 	}
2019 
2020 	desc_scsi_state[0] = '\0';
2021 	if (!scsi_state)
2022 		desc_scsi_state = " ";
2023 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2024 		strcat(desc_scsi_state, "response info ");
2025 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2026 		strcat(desc_scsi_state, "state terminated ");
2027 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2028 		strcat(desc_scsi_state, "no status ");
2029 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2030 		strcat(desc_scsi_state, "autosense failed ");
2031 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2032 		strcat(desc_scsi_state, "autosense valid ");
2033 
2034 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2035 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2036 	/* We can add more detail about underflow data here
2037 	 * TO-DO
2038 	 * */
2039 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2040 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2041 	    desc_scsi_state, scsi_state);
2042 
2043 	if (sc->mps_debug & MPS_XINFO &&
2044 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2045 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2046 		scsi_sense_print(csio);
2047 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2048 	}
2049 
2050 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2051 		response_info = le32toh(mpi_reply->ResponseInfo);
2052 		response_bytes = (u8 *)&response_info;
2053 		mps_response_code(sc,response_bytes[0]);
2054 	}
2055 }
2056 
2057 static void
2058 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2059 {
2060 	MPI2_SCSI_IO_REPLY *rep;
2061 	union ccb *ccb;
2062 	struct ccb_scsiio *csio;
2063 	struct mpssas_softc *sassc;
2064 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2065 	u8 *TLR_bits, TLR_on;
2066 	int dir = 0, i;
2067 	u16 alloc_len;
2068 
2069 	MPS_FUNCTRACE(sc);
2070 	mps_dprint(sc, MPS_TRACE,
2071 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2072 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2073 	    cm->cm_targ->outstanding);
2074 
2075 	callout_stop(&cm->cm_callout);
2076 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2077 
2078 	sassc = sc->sassc;
2079 	ccb = cm->cm_complete_data;
2080 	csio = &ccb->csio;
2081 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2082 	/*
2083 	 * XXX KDM if the chain allocation fails, does it matter if we do
2084 	 * the sync and unload here?  It is simpler to do it in every case,
2085 	 * assuming it doesn't cause problems.
2086 	 */
2087 	if (cm->cm_data != NULL) {
2088 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2089 			dir = BUS_DMASYNC_POSTREAD;
2090 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2091 			dir = BUS_DMASYNC_POSTWRITE;
2092 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2093 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2094 	}
2095 
2096 	cm->cm_targ->completed++;
2097 	cm->cm_targ->outstanding--;
2098 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2099 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2100 
2101 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2102 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2103 		if (cm->cm_reply != NULL)
2104 			mpssas_log_command(cm, MPS_RECOVERY,
2105 			    "completed timedout cm %p ccb %p during recovery "
2106 			    "ioc %x scsi %x state %x xfer %u\n",
2107 			    cm, cm->cm_ccb,
2108 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2109 			    le32toh(rep->TransferCount));
2110 		else
2111 			mpssas_log_command(cm, MPS_RECOVERY,
2112 			    "completed timedout cm %p ccb %p during recovery\n",
2113 			    cm, cm->cm_ccb);
2114 	} else if (cm->cm_targ->tm != NULL) {
2115 		if (cm->cm_reply != NULL)
2116 			mpssas_log_command(cm, MPS_RECOVERY,
2117 			    "completed cm %p ccb %p during recovery "
2118 			    "ioc %x scsi %x state %x xfer %u\n",
2119 			    cm, cm->cm_ccb,
2120 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2121 			    le32toh(rep->TransferCount));
2122 		else
2123 			mpssas_log_command(cm, MPS_RECOVERY,
2124 			    "completed cm %p ccb %p during recovery\n",
2125 			    cm, cm->cm_ccb);
2126 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2127 		mpssas_log_command(cm, MPS_RECOVERY,
2128 		    "reset completed cm %p ccb %p\n",
2129 		    cm, cm->cm_ccb);
2130 	}
2131 
2132 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2133 		/*
2134 		 * We ran into an error after we tried to map the command,
2135 		 * so we're getting a callback without queueing the command
2136 		 * to the hardware.  So we set the status here, and it will
2137 		 * be retained below.  We'll go through the "fast path",
2138 		 * because there can be no reply when we haven't actually
2139 		 * gone out to the hardware.
2140 		 */
2141 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2142 
2143 		/*
2144 		 * Currently the only error included in the mask is
2145 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2146 		 * chain frames.  We need to freeze the queue until we get
2147 		 * a command that completed without this error, which will
2148 		 * hopefully have some chain frames attached that we can
2149 		 * use.  If we wanted to get smarter about it, we would
2150 		 * only unfreeze the queue in this condition when we're
2151 		 * sure that we're getting some chain frames back.  That's
2152 		 * probably unnecessary.
2153 		 */
2154 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2155 			xpt_freeze_simq(sassc->sim, 1);
2156 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2157 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2158 				   "freezing SIM queue\n");
2159 		}
2160 	}
2161 
2162 	/* Take the fast path to completion */
2163 	if (cm->cm_reply == NULL) {
2164 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2165 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2166 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2167 			else {
2168 				ccb->ccb_h.status = CAM_REQ_CMP;
2169 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2170 			}
2171 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2172 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2173 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2174 				mps_dprint(sc, MPS_XINFO,
2175 				    "Unfreezing SIM queue\n");
2176 			}
2177 		}
2178 
2179 		/*
2180 		 * There are two scenarios where the status won't be
2181 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2182 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2183 		 */
2184 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2185 			/*
2186 			 * Freeze the dev queue so that commands are
2187 			 * executed in the correct order with after error
2188 			 * recovery.
2189 			 */
2190 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2191 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2192 		}
2193 		mps_free_command(sc, cm);
2194 		xpt_done(ccb);
2195 		return;
2196 	}
2197 
2198 	mpssas_log_command(cm, MPS_XINFO,
2199 	    "ioc %x scsi %x state %x xfer %u\n",
2200 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2201 	    le32toh(rep->TransferCount));
2202 
2203 	/*
2204 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2205 	 * Volume if an error occurred (normal I/O retry).  Use the original
2206 	 * CCB, but set a flag that this will be a retry so that it's sent to
2207 	 * the original volume.  Free the command but reuse the CCB.
2208 	 */
2209 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2210 		mps_free_command(sc, cm);
2211 		ccb->ccb_h.status = MPS_WD_RETRY;
2212 		mpssas_action_scsiio(sassc, ccb);
2213 		return;
2214 	}
2215 
2216 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2217 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2218 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2219 		/* FALLTHROUGH */
2220 	case MPI2_IOCSTATUS_SUCCESS:
2221 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2222 
2223 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2224 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2225 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2226 
2227 		/* Completion failed at the transport level. */
2228 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2229 		    MPI2_SCSI_STATE_TERMINATED)) {
2230 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2231 			break;
2232 		}
2233 
2234 		/* In a modern packetized environment, an autosense failure
2235 		 * implies that there's not much else that can be done to
2236 		 * recover the command.
2237 		 */
2238 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2239 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2240 			break;
2241 		}
2242 
2243 		/*
2244 		 * CAM doesn't care about SAS Response Info data, but if this is
2245 		 * the state check if TLR should be done.  If not, clear the
2246 		 * TLR_bits for the target.
2247 		 */
2248 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2249 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2250 		    MPS_SCSI_RI_INVALID_FRAME)) {
2251 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2252 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2253 		}
2254 
2255 		/*
2256 		 * Intentionally override the normal SCSI status reporting
2257 		 * for these two cases.  These are likely to happen in a
2258 		 * multi-initiator environment, and we want to make sure that
2259 		 * CAM retries these commands rather than fail them.
2260 		 */
2261 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2262 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2263 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2264 			break;
2265 		}
2266 
2267 		/* Handle normal status and sense */
2268 		csio->scsi_status = rep->SCSIStatus;
2269 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2270 			ccb->ccb_h.status = CAM_REQ_CMP;
2271 		else
2272 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2273 
2274 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2275 			int sense_len, returned_sense_len;
2276 
2277 			returned_sense_len = min(le32toh(rep->SenseCount),
2278 			    sizeof(struct scsi_sense_data));
2279 			if (returned_sense_len < ccb->csio.sense_len)
2280 				ccb->csio.sense_resid = ccb->csio.sense_len -
2281 					returned_sense_len;
2282 			else
2283 				ccb->csio.sense_resid = 0;
2284 
2285 			sense_len = min(returned_sense_len,
2286 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2287 			bzero(&ccb->csio.sense_data,
2288 			      sizeof(ccb->csio.sense_data));
2289 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2290 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2291 		}
2292 
2293 		/*
2294 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2295 		 * and it's page code 0 (Supported Page List), and there is
2296 		 * inquiry data, and this is for a sequential access device, and
2297 		 * the device is an SSP target, and TLR is supported by the
2298 		 * controller, turn the TLR_bits value ON if page 0x90 is
2299 		 * supported.
2300 		 */
2301 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2302 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2303 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2304 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2305 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2306 		    T_SEQUENTIAL) && (sc->control_TLR) &&
2307 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2308 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2309 			vpd_list = (struct scsi_vpd_supported_page_list *)
2310 			    csio->data_ptr;
2311 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2312 			    TLR_bits;
2313 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2314 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2315 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2316 			    csio->cdb_io.cdb_bytes[4];
2317 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2318 				if (vpd_list->list[i] == 0x90) {
2319 					*TLR_bits = TLR_on;
2320 					break;
2321 				}
2322 			}
2323 		}
2324 		break;
2325 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2326 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2327 		/*
2328 		 * If devinfo is 0 this will be a volume.  In that case don't
2329 		 * tell CAM that the volume is not there.  We want volumes to
2330 		 * be enumerated until they are deleted/removed, not just
2331 		 * failed.
2332 		 */
2333 		if (cm->cm_targ->devinfo == 0)
2334 			ccb->ccb_h.status = CAM_REQ_CMP;
2335 		else
2336 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2337 		break;
2338 	case MPI2_IOCSTATUS_INVALID_SGL:
2339 		mps_print_scsiio_cmd(sc, cm);
2340 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2341 		break;
2342 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2343 		/*
2344 		 * This is one of the responses that comes back when an I/O
2345 		 * has been aborted.  If it is because of a timeout that we
2346 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2347 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2348 		 * command is the same (it gets retried, subject to the
2349 		 * retry counter), the only difference is what gets printed
2350 		 * on the console.
2351 		 */
2352 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2353 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2354 		else
2355 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2356 		break;
2357 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2358 		/* resid is ignored for this condition */
2359 		csio->resid = 0;
2360 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2361 		break;
2362 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2363 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2364 		/*
2365 		 * Since these are generally external (i.e. hopefully
2366 		 * transient transport-related) errors, retry these without
2367 		 * decrementing the retry count.
2368 		 */
2369 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2370 		mpssas_log_command(cm, MPS_INFO,
2371 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2372 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2373 		    le32toh(rep->TransferCount));
2374 		break;
2375 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2376 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2377 	case MPI2_IOCSTATUS_INVALID_VPID:
2378 	case MPI2_IOCSTATUS_INVALID_FIELD:
2379 	case MPI2_IOCSTATUS_INVALID_STATE:
2380 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2381 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2382 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2383 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2384 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2385 	default:
2386 		mpssas_log_command(cm, MPS_XINFO,
2387 		    "completed ioc %x scsi %x state %x xfer %u\n",
2388 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2389 		    le32toh(rep->TransferCount));
2390 		csio->resid = cm->cm_length;
2391 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2392 		break;
2393 	}
2394 
2395 	mps_sc_failed_io_info(sc,csio,rep);
2396 
2397 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2398 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2399 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2400 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2401 		    "unfreezing SIM queue\n");
2402 	}
2403 
2404 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2405 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2406 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2407 	}
2408 
2409 	mps_free_command(sc, cm);
2410 	xpt_done(ccb);
2411 }
2412 
2413 /* All Request reached here are Endian safe */
2414 static void
2415 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2416     union ccb *ccb) {
2417 	pMpi2SCSIIORequest_t	pIO_req;
2418 	struct mps_softc	*sc = sassc->sc;
2419 	uint64_t		virtLBA;
2420 	uint32_t		physLBA, stripe_offset, stripe_unit;
2421 	uint32_t		io_size, column;
2422 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2423 
2424 	/*
2425 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2426 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2427 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2428 	 * bit different than the 10/16 CDBs, handle them separately.
2429 	 */
2430 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2431 	CDB = pIO_req->CDB.CDB32;
2432 
2433 	/*
2434 	 * Handle 6 byte CDBs.
2435 	 */
2436 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2437 	    (CDB[0] == WRITE_6))) {
2438 		/*
2439 		 * Get the transfer size in blocks.
2440 		 */
2441 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2442 
2443 		/*
2444 		 * Get virtual LBA given in the CDB.
2445 		 */
2446 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2447 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2448 
2449 		/*
2450 		 * Check that LBA range for I/O does not exceed volume's
2451 		 * MaxLBA.
2452 		 */
2453 		if ((virtLBA + (uint64_t)io_size - 1) <=
2454 		    sc->DD_max_lba) {
2455 			/*
2456 			 * Check if the I/O crosses a stripe boundary.  If not,
2457 			 * translate the virtual LBA to a physical LBA and set
2458 			 * the DevHandle for the PhysDisk to be used.  If it
2459 			 * does cross a boundry, do normal I/O.  To get the
2460 			 * right DevHandle to use, get the map number for the
2461 			 * column, then use that map number to look up the
2462 			 * DevHandle of the PhysDisk.
2463 			 */
2464 			stripe_offset = (uint32_t)virtLBA &
2465 			    (sc->DD_stripe_size - 1);
2466 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2467 				physLBA = (uint32_t)virtLBA >>
2468 				    sc->DD_stripe_exponent;
2469 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2470 				column = physLBA % sc->DD_num_phys_disks;
2471 				pIO_req->DevHandle =
2472 				    htole16(sc->DD_column_map[column].dev_handle);
2473 				/* ???? Is this endian safe*/
2474 				cm->cm_desc.SCSIIO.DevHandle =
2475 				    pIO_req->DevHandle;
2476 
2477 				physLBA = (stripe_unit <<
2478 				    sc->DD_stripe_exponent) + stripe_offset;
2479 				ptrLBA = &pIO_req->CDB.CDB32[1];
2480 				physLBA_byte = (uint8_t)(physLBA >> 16);
2481 				*ptrLBA = physLBA_byte;
2482 				ptrLBA = &pIO_req->CDB.CDB32[2];
2483 				physLBA_byte = (uint8_t)(physLBA >> 8);
2484 				*ptrLBA = physLBA_byte;
2485 				ptrLBA = &pIO_req->CDB.CDB32[3];
2486 				physLBA_byte = (uint8_t)physLBA;
2487 				*ptrLBA = physLBA_byte;
2488 
2489 				/*
2490 				 * Set flag that Direct Drive I/O is
2491 				 * being done.
2492 				 */
2493 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2494 			}
2495 		}
2496 		return;
2497 	}
2498 
2499 	/*
2500 	 * Handle 10, 12 or 16 byte CDBs.
2501 	 */
2502 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2503 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2504 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2505 	    (CDB[0] == WRITE_12))) {
2506 		/*
2507 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2508 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2509 		 * the else section.  10-byte and 12-byte CDB's are OK.
2510 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2511 		 * ready to accept 12byte CDB for Direct IOs.
2512 		 */
2513 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2514 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2515 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2516 			/*
2517 			 * Get the transfer size in blocks.
2518 			 */
2519 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2520 
2521 			/*
2522 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2523 			 * LBA in the CDB depending on command.
2524 			 */
2525 			lba_idx = ((CDB[0] == READ_12) ||
2526 				(CDB[0] == WRITE_12) ||
2527 				(CDB[0] == READ_10) ||
2528 				(CDB[0] == WRITE_10))? 2 : 6;
2529 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2530 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2531 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2532 			    (uint64_t)CDB[lba_idx + 3];
2533 
2534 			/*
2535 			 * Check that LBA range for I/O does not exceed volume's
2536 			 * MaxLBA.
2537 			 */
2538 			if ((virtLBA + (uint64_t)io_size - 1) <=
2539 			    sc->DD_max_lba) {
2540 				/*
2541 				 * Check if the I/O crosses a stripe boundary.
2542 				 * If not, translate the virtual LBA to a
2543 				 * physical LBA and set the DevHandle for the
2544 				 * PhysDisk to be used.  If it does cross a
2545 				 * boundry, do normal I/O.  To get the right
2546 				 * DevHandle to use, get the map number for the
2547 				 * column, then use that map number to look up
2548 				 * the DevHandle of the PhysDisk.
2549 				 */
2550 				stripe_offset = (uint32_t)virtLBA &
2551 				    (sc->DD_stripe_size - 1);
2552 				if ((stripe_offset + io_size) <=
2553 				    sc->DD_stripe_size) {
2554 					physLBA = (uint32_t)virtLBA >>
2555 					    sc->DD_stripe_exponent;
2556 					stripe_unit = physLBA /
2557 					    sc->DD_num_phys_disks;
2558 					column = physLBA %
2559 					    sc->DD_num_phys_disks;
2560 					pIO_req->DevHandle =
2561 					    htole16(sc->DD_column_map[column].
2562 					    dev_handle);
2563 					cm->cm_desc.SCSIIO.DevHandle =
2564 					    pIO_req->DevHandle;
2565 
2566 					physLBA = (stripe_unit <<
2567 					    sc->DD_stripe_exponent) +
2568 					    stripe_offset;
2569 					ptrLBA =
2570 					    &pIO_req->CDB.CDB32[lba_idx];
2571 					physLBA_byte = (uint8_t)(physLBA >> 24);
2572 					*ptrLBA = physLBA_byte;
2573 					ptrLBA =
2574 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2575 					physLBA_byte = (uint8_t)(physLBA >> 16);
2576 					*ptrLBA = physLBA_byte;
2577 					ptrLBA =
2578 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2579 					physLBA_byte = (uint8_t)(physLBA >> 8);
2580 					*ptrLBA = physLBA_byte;
2581 					ptrLBA =
2582 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2583 					physLBA_byte = (uint8_t)physLBA;
2584 					*ptrLBA = physLBA_byte;
2585 
2586 					/*
2587 					 * Set flag that Direct Drive I/O is
2588 					 * being done.
2589 					 */
2590 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2591 				}
2592 			}
2593 		} else {
2594 			/*
2595 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2596 			 * 0.  Get the transfer size in blocks.
2597 			 */
2598 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2599 
2600 			/*
2601 			 * Get virtual LBA.
2602 			 */
2603 			virtLBA = ((uint64_t)CDB[2] << 54) |
2604 			    ((uint64_t)CDB[3] << 48) |
2605 			    ((uint64_t)CDB[4] << 40) |
2606 			    ((uint64_t)CDB[5] << 32) |
2607 			    ((uint64_t)CDB[6] << 24) |
2608 			    ((uint64_t)CDB[7] << 16) |
2609 			    ((uint64_t)CDB[8] << 8) |
2610 			    (uint64_t)CDB[9];
2611 
2612 			/*
2613 			 * Check that LBA range for I/O does not exceed volume's
2614 			 * MaxLBA.
2615 			 */
2616 			if ((virtLBA + (uint64_t)io_size - 1) <=
2617 			    sc->DD_max_lba) {
2618 				/*
2619 				 * Check if the I/O crosses a stripe boundary.
2620 				 * If not, translate the virtual LBA to a
2621 				 * physical LBA and set the DevHandle for the
2622 				 * PhysDisk to be used.  If it does cross a
2623 				 * boundry, do normal I/O.  To get the right
2624 				 * DevHandle to use, get the map number for the
2625 				 * column, then use that map number to look up
2626 				 * the DevHandle of the PhysDisk.
2627 				 */
2628 				stripe_offset = (uint32_t)virtLBA &
2629 				    (sc->DD_stripe_size - 1);
2630 				if ((stripe_offset + io_size) <=
2631 				    sc->DD_stripe_size) {
2632 					physLBA = (uint32_t)(virtLBA >>
2633 					    sc->DD_stripe_exponent);
2634 					stripe_unit = physLBA /
2635 					    sc->DD_num_phys_disks;
2636 					column = physLBA %
2637 					    sc->DD_num_phys_disks;
2638 					pIO_req->DevHandle =
2639 					    htole16(sc->DD_column_map[column].
2640 					    dev_handle);
2641 					cm->cm_desc.SCSIIO.DevHandle =
2642 					    pIO_req->DevHandle;
2643 
2644 					physLBA = (stripe_unit <<
2645 					    sc->DD_stripe_exponent) +
2646 					    stripe_offset;
2647 
2648 					/*
2649 					 * Set upper 4 bytes of LBA to 0.  We
2650 					 * assume that the phys disks are less
2651 					 * than 2 TB's in size.  Then, set the
2652 					 * lower 4 bytes.
2653 					 */
2654 					pIO_req->CDB.CDB32[2] = 0;
2655 					pIO_req->CDB.CDB32[3] = 0;
2656 					pIO_req->CDB.CDB32[4] = 0;
2657 					pIO_req->CDB.CDB32[5] = 0;
2658 					ptrLBA = &pIO_req->CDB.CDB32[6];
2659 					physLBA_byte = (uint8_t)(physLBA >> 24);
2660 					*ptrLBA = physLBA_byte;
2661 					ptrLBA = &pIO_req->CDB.CDB32[7];
2662 					physLBA_byte = (uint8_t)(physLBA >> 16);
2663 					*ptrLBA = physLBA_byte;
2664 					ptrLBA = &pIO_req->CDB.CDB32[8];
2665 					physLBA_byte = (uint8_t)(physLBA >> 8);
2666 					*ptrLBA = physLBA_byte;
2667 					ptrLBA = &pIO_req->CDB.CDB32[9];
2668 					physLBA_byte = (uint8_t)physLBA;
2669 					*ptrLBA = physLBA_byte;
2670 
2671 					/*
2672 					 * Set flag that Direct Drive I/O is
2673 					 * being done.
2674 					 */
2675 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2676 				}
2677 			}
2678 		}
2679 	}
2680 }
2681 
2682 #if __FreeBSD_version >= 900026
2683 static void
2684 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2685 {
2686 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2687 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2688 	uint64_t sasaddr;
2689 	union ccb *ccb;
2690 
2691 	ccb = cm->cm_complete_data;
2692 
2693 	/*
2694 	 * Currently there should be no way we can hit this case.  It only
2695 	 * happens when we have a failure to allocate chain frames, and SMP
2696 	 * commands require two S/G elements only.  That should be handled
2697 	 * in the standard request size.
2698 	 */
2699 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2700 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2701 			   __func__, cm->cm_flags);
2702 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2703 		goto bailout;
2704         }
2705 
2706 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2707 	if (rpl == NULL) {
2708 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2709 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2710 		goto bailout;
2711 	}
2712 
2713 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2714 	sasaddr = le32toh(req->SASAddress.Low);
2715 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2716 
2717 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2718 	    MPI2_IOCSTATUS_SUCCESS ||
2719 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2720 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2721 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2722 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2723 		goto bailout;
2724 	}
2725 
2726 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2727 		   "%#jx completed successfully\n", __func__,
2728 		   (uintmax_t)sasaddr);
2729 
2730 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2731 		ccb->ccb_h.status = CAM_REQ_CMP;
2732 	else
2733 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2734 
2735 bailout:
2736 	/*
2737 	 * We sync in both directions because we had DMAs in the S/G list
2738 	 * in both directions.
2739 	 */
2740 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2741 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2742 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2743 	mps_free_command(sc, cm);
2744 	xpt_done(ccb);
2745 }
2746 
2747 static void
2748 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2749 {
2750 	struct mps_command *cm;
2751 	uint8_t *request, *response;
2752 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2753 	struct mps_softc *sc;
2754 	struct sglist *sg;
2755 	int error;
2756 
2757 	sc = sassc->sc;
2758 	sg = NULL;
2759 	error = 0;
2760 
2761 	/*
2762 	 * XXX We don't yet support physical addresses here.
2763 	 */
2764 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2765 	case CAM_DATA_PADDR:
2766 	case CAM_DATA_SG_PADDR:
2767 		mps_dprint(sc, MPS_ERROR,
2768 			   "%s: physical addresses not supported\n", __func__);
2769 		ccb->ccb_h.status = CAM_REQ_INVALID;
2770 		xpt_done(ccb);
2771 		return;
2772 	case CAM_DATA_SG:
2773 		/*
2774 		 * The chip does not support more than one buffer for the
2775 		 * request or response.
2776 		 */
2777 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2778 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2779 			mps_dprint(sc, MPS_ERROR,
2780 				   "%s: multiple request or response "
2781 				   "buffer segments not supported for SMP\n",
2782 				   __func__);
2783 			ccb->ccb_h.status = CAM_REQ_INVALID;
2784 			xpt_done(ccb);
2785 			return;
2786 		}
2787 
2788 		/*
2789 		 * The CAM_SCATTER_VALID flag was originally implemented
2790 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2791 		 * We have two.  So, just take that flag to mean that we
2792 		 * might have S/G lists, and look at the S/G segment count
2793 		 * to figure out whether that is the case for each individual
2794 		 * buffer.
2795 		 */
2796 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2797 			bus_dma_segment_t *req_sg;
2798 
2799 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2800 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2801 		} else
2802 			request = ccb->smpio.smp_request;
2803 
2804 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2805 			bus_dma_segment_t *rsp_sg;
2806 
2807 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2808 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2809 		} else
2810 			response = ccb->smpio.smp_response;
2811 		break;
2812 	case CAM_DATA_VADDR:
2813 		request = ccb->smpio.smp_request;
2814 		response = ccb->smpio.smp_response;
2815 		break;
2816 	default:
2817 		ccb->ccb_h.status = CAM_REQ_INVALID;
2818 		xpt_done(ccb);
2819 		return;
2820 	}
2821 
2822 	cm = mps_alloc_command(sc);
2823 	if (cm == NULL) {
2824 		mps_dprint(sc, MPS_ERROR,
2825 		    "%s: cannot allocate command\n", __func__);
2826 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2827 		xpt_done(ccb);
2828 		return;
2829 	}
2830 
2831 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2832 	bzero(req, sizeof(*req));
2833 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2834 
2835 	/* Allow the chip to use any route to this SAS address. */
2836 	req->PhysicalPort = 0xff;
2837 
2838 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2839 	req->SGLFlags =
2840 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2841 
2842 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2843 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2844 
2845 	mpi_init_sge(cm, req, &req->SGL);
2846 
2847 	/*
2848 	 * Set up a uio to pass into mps_map_command().  This allows us to
2849 	 * do one map command, and one busdma call in there.
2850 	 */
2851 	cm->cm_uio.uio_iov = cm->cm_iovec;
2852 	cm->cm_uio.uio_iovcnt = 2;
2853 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2854 
2855 	/*
2856 	 * The read/write flag isn't used by busdma, but set it just in
2857 	 * case.  This isn't exactly accurate, either, since we're going in
2858 	 * both directions.
2859 	 */
2860 	cm->cm_uio.uio_rw = UIO_WRITE;
2861 
2862 	cm->cm_iovec[0].iov_base = request;
2863 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2864 	cm->cm_iovec[1].iov_base = response;
2865 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2866 
2867 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2868 			       cm->cm_iovec[1].iov_len;
2869 
2870 	/*
2871 	 * Trigger a warning message in mps_data_cb() for the user if we
2872 	 * wind up exceeding two S/G segments.  The chip expects one
2873 	 * segment for the request and another for the response.
2874 	 */
2875 	cm->cm_max_segs = 2;
2876 
2877 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2878 	cm->cm_complete = mpssas_smpio_complete;
2879 	cm->cm_complete_data = ccb;
2880 
2881 	/*
2882 	 * Tell the mapping code that we're using a uio, and that this is
2883 	 * an SMP passthrough request.  There is a little special-case
2884 	 * logic there (in mps_data_cb()) to handle the bidirectional
2885 	 * transfer.
2886 	 */
2887 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2888 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2889 
2890 	/* The chip data format is little endian. */
2891 	req->SASAddress.High = htole32(sasaddr >> 32);
2892 	req->SASAddress.Low = htole32(sasaddr);
2893 
2894 	/*
2895 	 * XXX Note that we don't have a timeout/abort mechanism here.
2896 	 * From the manual, it looks like task management requests only
2897 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2898 	 * have a mechanism to retry requests in the event of a chip reset
2899 	 * at least.  Hopefully the chip will insure that any errors short
2900 	 * of that are relayed back to the driver.
2901 	 */
2902 	error = mps_map_command(sc, cm);
2903 	if ((error != 0) && (error != EINPROGRESS)) {
2904 		mps_dprint(sc, MPS_ERROR,
2905 			   "%s: error %d returned from mps_map_command()\n",
2906 			   __func__, error);
2907 		goto bailout_error;
2908 	}
2909 
2910 	return;
2911 
2912 bailout_error:
2913 	mps_free_command(sc, cm);
2914 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2915 	xpt_done(ccb);
2916 	return;
2917 
2918 }
2919 
2920 static void
2921 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2922 {
2923 	struct mps_softc *sc;
2924 	struct mpssas_target *targ;
2925 	uint64_t sasaddr = 0;
2926 
2927 	sc = sassc->sc;
2928 
2929 	/*
2930 	 * Make sure the target exists.
2931 	 */
2932 	targ = &sassc->targets[ccb->ccb_h.target_id];
2933 	if (targ->handle == 0x0) {
2934 		mps_dprint(sc, MPS_ERROR,
2935 			   "%s: target %d does not exist!\n", __func__,
2936 			   ccb->ccb_h.target_id);
2937 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2938 		xpt_done(ccb);
2939 		return;
2940 	}
2941 
2942 	/*
2943 	 * If this device has an embedded SMP target, we'll talk to it
2944 	 * directly.
2945 	 * figure out what the expander's address is.
2946 	 */
2947 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2948 		sasaddr = targ->sasaddr;
2949 
2950 	/*
2951 	 * If we don't have a SAS address for the expander yet, try
2952 	 * grabbing it from the page 0x83 information cached in the
2953 	 * transport layer for this target.  LSI expanders report the
2954 	 * expander SAS address as the port-associated SAS address in
2955 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2956 	 * 0x83.
2957 	 *
2958 	 * XXX KDM disable this for now, but leave it commented out so that
2959 	 * it is obvious that this is another possible way to get the SAS
2960 	 * address.
2961 	 *
2962 	 * The parent handle method below is a little more reliable, and
2963 	 * the other benefit is that it works for devices other than SES
2964 	 * devices.  So you can send a SMP request to a da(4) device and it
2965 	 * will get routed to the expander that device is attached to.
2966 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2967 	 */
2968 #if 0
2969 	if (sasaddr == 0)
2970 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2971 #endif
2972 
2973 	/*
2974 	 * If we still don't have a SAS address for the expander, look for
2975 	 * the parent device of this device, which is probably the expander.
2976 	 */
2977 	if (sasaddr == 0) {
2978 #ifdef OLD_MPS_PROBE
2979 		struct mpssas_target *parent_target;
2980 #endif
2981 
2982 		if (targ->parent_handle == 0x0) {
2983 			mps_dprint(sc, MPS_ERROR,
2984 				   "%s: handle %d does not have a valid "
2985 				   "parent handle!\n", __func__, targ->handle);
2986 			ccb->ccb_h.status = CAM_REQ_INVALID;
2987 			goto bailout;
2988 		}
2989 #ifdef OLD_MPS_PROBE
2990 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2991 			targ->parent_handle);
2992 
2993 		if (parent_target == NULL) {
2994 			mps_dprint(sc, MPS_ERROR,
2995 				   "%s: handle %d does not have a valid "
2996 				   "parent target!\n", __func__, targ->handle);
2997 			ccb->ccb_h.status = CAM_REQ_INVALID;
2998 			goto bailout;
2999 		}
3000 
3001 		if ((parent_target->devinfo &
3002 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3003 			mps_dprint(sc, MPS_ERROR,
3004 				   "%s: handle %d parent %d does not "
3005 				   "have an SMP target!\n", __func__,
3006 				   targ->handle, parent_target->handle);
3007 			ccb->ccb_h.status = CAM_REQ_INVALID;
3008 			goto bailout;
3009 
3010 		}
3011 
3012 		sasaddr = parent_target->sasaddr;
3013 #else /* OLD_MPS_PROBE */
3014 		if ((targ->parent_devinfo &
3015 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3016 			mps_dprint(sc, MPS_ERROR,
3017 				   "%s: handle %d parent %d does not "
3018 				   "have an SMP target!\n", __func__,
3019 				   targ->handle, targ->parent_handle);
3020 			ccb->ccb_h.status = CAM_REQ_INVALID;
3021 			goto bailout;
3022 
3023 		}
3024 		if (targ->parent_sasaddr == 0x0) {
3025 			mps_dprint(sc, MPS_ERROR,
3026 				   "%s: handle %d parent handle %d does "
3027 				   "not have a valid SAS address!\n",
3028 				   __func__, targ->handle, targ->parent_handle);
3029 			ccb->ccb_h.status = CAM_REQ_INVALID;
3030 			goto bailout;
3031 		}
3032 
3033 		sasaddr = targ->parent_sasaddr;
3034 #endif /* OLD_MPS_PROBE */
3035 
3036 	}
3037 
3038 	if (sasaddr == 0) {
3039 		mps_dprint(sc, MPS_INFO,
3040 			   "%s: unable to find SAS address for handle %d\n",
3041 			   __func__, targ->handle);
3042 		ccb->ccb_h.status = CAM_REQ_INVALID;
3043 		goto bailout;
3044 	}
3045 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3046 
3047 	return;
3048 
3049 bailout:
3050 	xpt_done(ccb);
3051 
3052 }
3053 #endif //__FreeBSD_version >= 900026
3054 
3055 static void
3056 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3057 {
3058 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3059 	struct mps_softc *sc;
3060 	struct mps_command *tm;
3061 	struct mpssas_target *targ;
3062 
3063 	MPS_FUNCTRACE(sassc->sc);
3064 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3065 
3066 	sc = sassc->sc;
3067 	tm = mps_alloc_command(sc);
3068 	if (tm == NULL) {
3069 		mps_dprint(sc, MPS_ERROR,
3070 		    "command alloc failure in mpssas_action_resetdev\n");
3071 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3072 		xpt_done(ccb);
3073 		return;
3074 	}
3075 
3076 	targ = &sassc->targets[ccb->ccb_h.target_id];
3077 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3078 	req->DevHandle = htole16(targ->handle);
3079 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3080 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3081 
3082 	/* SAS Hard Link Reset / SATA Link Reset */
3083 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3084 
3085 	tm->cm_data = NULL;
3086 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3087 	tm->cm_complete = mpssas_resetdev_complete;
3088 	tm->cm_complete_data = ccb;
3089 	tm->cm_targ = targ;
3090 	mps_map_command(sc, tm);
3091 }
3092 
3093 static void
3094 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3095 {
3096 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3097 	union ccb *ccb;
3098 
3099 	MPS_FUNCTRACE(sc);
3100 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3101 
3102 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3103 	ccb = tm->cm_complete_data;
3104 
3105 	/*
3106 	 * Currently there should be no way we can hit this case.  It only
3107 	 * happens when we have a failure to allocate chain frames, and
3108 	 * task management commands don't have S/G lists.
3109 	 */
3110 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3111 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3112 
3113 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3114 
3115 		mps_dprint(sc, MPS_ERROR,
3116 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3117 			   "This should not happen!\n", __func__, tm->cm_flags,
3118 			   req->DevHandle);
3119 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3120 		goto bailout;
3121 	}
3122 
3123 	mps_dprint(sc, MPS_XINFO,
3124 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3125 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3126 
3127 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3128 		ccb->ccb_h.status = CAM_REQ_CMP;
3129 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3130 		    CAM_LUN_WILDCARD);
3131 	}
3132 	else
3133 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3134 
3135 bailout:
3136 
3137 	mpssas_free_tm(sc, tm);
3138 	xpt_done(ccb);
3139 }
3140 
3141 static void
3142 mpssas_poll(struct cam_sim *sim)
3143 {
3144 	struct mpssas_softc *sassc;
3145 
3146 	sassc = cam_sim_softc(sim);
3147 
3148 	if (sassc->sc->mps_debug & MPS_TRACE) {
3149 		/* frequent debug messages during a panic just slow
3150 		 * everything down too much.
3151 		 */
3152 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3153 		sassc->sc->mps_debug &= ~MPS_TRACE;
3154 	}
3155 
3156 	mps_intr_locked(sassc->sc);
3157 }
3158 
3159 static void
3160 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3161 	     void *arg)
3162 {
3163 	struct mps_softc *sc;
3164 
3165 	sc = (struct mps_softc *)callback_arg;
3166 
3167 	switch (code) {
3168 #if (__FreeBSD_version >= 1000006) || \
3169     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3170 	case AC_ADVINFO_CHANGED: {
3171 		struct mpssas_target *target;
3172 		struct mpssas_softc *sassc;
3173 		struct scsi_read_capacity_data_long rcap_buf;
3174 		struct ccb_dev_advinfo cdai;
3175 		struct mpssas_lun *lun;
3176 		lun_id_t lunid;
3177 		int found_lun;
3178 		uintptr_t buftype;
3179 
3180 		buftype = (uintptr_t)arg;
3181 
3182 		found_lun = 0;
3183 		sassc = sc->sassc;
3184 
3185 		/*
3186 		 * We're only interested in read capacity data changes.
3187 		 */
3188 		if (buftype != CDAI_TYPE_RCAPLONG)
3189 			break;
3190 
3191 		/*
3192 		 * We should have a handle for this, but check to make sure.
3193 		 */
3194 		target = &sassc->targets[xpt_path_target_id(path)];
3195 		if (target->handle == 0)
3196 			break;
3197 
3198 		lunid = xpt_path_lun_id(path);
3199 
3200 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3201 			if (lun->lun_id == lunid) {
3202 				found_lun = 1;
3203 				break;
3204 			}
3205 		}
3206 
3207 		if (found_lun == 0) {
3208 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3209 				     M_NOWAIT | M_ZERO);
3210 			if (lun == NULL) {
3211 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3212 					   "LUN for EEDP support.\n");
3213 				break;
3214 			}
3215 			lun->lun_id = lunid;
3216 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3217 		}
3218 
3219 		bzero(&rcap_buf, sizeof(rcap_buf));
3220 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3221 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3222 		cdai.ccb_h.flags = CAM_DIR_IN;
3223 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3224 		cdai.flags = 0;
3225 		cdai.bufsiz = sizeof(rcap_buf);
3226 		cdai.buf = (uint8_t *)&rcap_buf;
3227 		xpt_action((union ccb *)&cdai);
3228 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3229 			cam_release_devq(cdai.ccb_h.path,
3230 					 0, 0, 0, FALSE);
3231 
3232 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3233 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3234 			lun->eedp_formatted = TRUE;
3235 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3236 		} else {
3237 			lun->eedp_formatted = FALSE;
3238 			lun->eedp_block_size = 0;
3239 		}
3240 		break;
3241 	}
3242 #else
3243 	case AC_FOUND_DEVICE: {
3244 		struct ccb_getdev *cgd;
3245 
3246 		cgd = arg;
3247 		mpssas_check_eedp(sc, path, cgd);
3248 		break;
3249 	}
3250 #endif
3251 	default:
3252 		break;
3253 	}
3254 }
3255 
3256 #if (__FreeBSD_version < 901503) || \
3257     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3258 static void
3259 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3260 		  struct ccb_getdev *cgd)
3261 {
3262 	struct mpssas_softc *sassc = sc->sassc;
3263 	struct ccb_scsiio *csio;
3264 	struct scsi_read_capacity_16 *scsi_cmd;
3265 	struct scsi_read_capacity_eedp *rcap_buf;
3266 	path_id_t pathid;
3267 	target_id_t targetid;
3268 	lun_id_t lunid;
3269 	union ccb *ccb;
3270 	struct cam_path *local_path;
3271 	struct mpssas_target *target;
3272 	struct mpssas_lun *lun;
3273 	uint8_t	found_lun;
3274 	char path_str[64];
3275 
3276 	sassc = sc->sassc;
3277 	pathid = cam_sim_path(sassc->sim);
3278 	targetid = xpt_path_target_id(path);
3279 	lunid = xpt_path_lun_id(path);
3280 
3281 	target = &sassc->targets[targetid];
3282 	if (target->handle == 0x0)
3283 		return;
3284 
3285 	/*
3286 	 * Determine if the device is EEDP capable.
3287 	 *
3288 	 * If this flag is set in the inquiry data,
3289 	 * the device supports protection information,
3290 	 * and must support the 16 byte read
3291 	 * capacity command, otherwise continue without
3292 	 * sending read cap 16
3293 	 */
3294 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3295 		return;
3296 
3297 	/*
3298 	 * Issue a READ CAPACITY 16 command.  This info
3299 	 * is used to determine if the LUN is formatted
3300 	 * for EEDP support.
3301 	 */
3302 	ccb = xpt_alloc_ccb_nowait();
3303 	if (ccb == NULL) {
3304 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3305 		    "for EEDP support.\n");
3306 		return;
3307 	}
3308 
3309 	if (xpt_create_path(&local_path, xpt_periph,
3310 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3311 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3312 		    "path for EEDP support\n");
3313 		xpt_free_ccb(ccb);
3314 		return;
3315 	}
3316 
3317 	/*
3318 	 * If LUN is already in list, don't create a new
3319 	 * one.
3320 	 */
3321 	found_lun = FALSE;
3322 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3323 		if (lun->lun_id == lunid) {
3324 			found_lun = TRUE;
3325 			break;
3326 		}
3327 	}
3328 	if (!found_lun) {
3329 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3330 		    M_NOWAIT | M_ZERO);
3331 		if (lun == NULL) {
3332 			mps_dprint(sc, MPS_ERROR,
3333 			    "Unable to alloc LUN for EEDP support.\n");
3334 			xpt_free_path(local_path);
3335 			xpt_free_ccb(ccb);
3336 			return;
3337 		}
3338 		lun->lun_id = lunid;
3339 		SLIST_INSERT_HEAD(&target->luns, lun,
3340 		    lun_link);
3341 	}
3342 
3343 	xpt_path_string(local_path, path_str, sizeof(path_str));
3344 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3345 	    path_str, target->handle);
3346 
3347 	/*
3348 	 * Issue a READ CAPACITY 16 command for the LUN.
3349 	 * The mpssas_read_cap_done function will load
3350 	 * the read cap info into the LUN struct.
3351 	 */
3352 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3353 	    M_MPT2, M_NOWAIT | M_ZERO);
3354 	if (rcap_buf == NULL) {
3355 		mps_dprint(sc, MPS_FAULT,
3356 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3357 		xpt_free_path(ccb->ccb_h.path);
3358 		xpt_free_ccb(ccb);
3359 		return;
3360 	}
3361 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3362 	csio = &ccb->csio;
3363 	csio->ccb_h.func_code = XPT_SCSI_IO;
3364 	csio->ccb_h.flags = CAM_DIR_IN;
3365 	csio->ccb_h.retry_count = 4;
3366 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3367 	csio->ccb_h.timeout = 60000;
3368 	csio->data_ptr = (uint8_t *)rcap_buf;
3369 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3370 	csio->sense_len = MPS_SENSE_LEN;
3371 	csio->cdb_len = sizeof(*scsi_cmd);
3372 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3373 
3374 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3375 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3376 	scsi_cmd->opcode = 0x9E;
3377 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3378 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3379 
3380 	ccb->ccb_h.ppriv_ptr1 = sassc;
3381 	xpt_action(ccb);
3382 }
3383 
3384 static void
3385 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3386 {
3387 	struct mpssas_softc *sassc;
3388 	struct mpssas_target *target;
3389 	struct mpssas_lun *lun;
3390 	struct scsi_read_capacity_eedp *rcap_buf;
3391 
3392 	if (done_ccb == NULL)
3393 		return;
3394 
3395 	/* Driver need to release devq, it Scsi command is
3396 	 * generated by driver internally.
3397 	 * Currently there is a single place where driver
3398 	 * calls scsi command internally. In future if driver
3399 	 * calls more scsi command internally, it needs to release
3400 	 * devq internally, since those command will not go back to
3401 	 * cam_periph.
3402 	 */
3403 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3404         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3405 		xpt_release_devq(done_ccb->ccb_h.path,
3406 			       	/*count*/ 1, /*run_queue*/TRUE);
3407 	}
3408 
3409 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3410 
3411 	/*
3412 	 * Get the LUN ID for the path and look it up in the LUN list for the
3413 	 * target.
3414 	 */
3415 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3416 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3417 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3418 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3419 			continue;
3420 
3421 		/*
3422 		 * Got the LUN in the target's LUN list.  Fill it in
3423 		 * with EEDP info.  If the READ CAP 16 command had some
3424 		 * SCSI error (common if command is not supported), mark
3425 		 * the lun as not supporting EEDP and set the block size
3426 		 * to 0.
3427 		 */
3428 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3429 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3430 			lun->eedp_formatted = FALSE;
3431 			lun->eedp_block_size = 0;
3432 			break;
3433 		}
3434 
3435 		if (rcap_buf->protect & 0x01) {
3436 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3437  			    "target ID %d is formatted for EEDP "
3438  			    "support.\n", done_ccb->ccb_h.target_lun,
3439  			    done_ccb->ccb_h.target_id);
3440 			lun->eedp_formatted = TRUE;
3441 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3442 		}
3443 		break;
3444 	}
3445 
3446 	// Finished with this CCB and path.
3447 	free(rcap_buf, M_MPT2);
3448 	xpt_free_path(done_ccb->ccb_h.path);
3449 	xpt_free_ccb(done_ccb);
3450 }
3451 #endif /* (__FreeBSD_version < 901503) || \
3452           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3453 
3454 int
3455 mpssas_startup(struct mps_softc *sc)
3456 {
3457 
3458 	/*
3459 	 * Send the port enable message and set the wait_for_port_enable flag.
3460 	 * This flag helps to keep the simq frozen until all discovery events
3461 	 * are processed.
3462 	 */
3463 	sc->wait_for_port_enable = 1;
3464 	mpssas_send_portenable(sc);
3465 	return (0);
3466 }
3467 
3468 static int
3469 mpssas_send_portenable(struct mps_softc *sc)
3470 {
3471 	MPI2_PORT_ENABLE_REQUEST *request;
3472 	struct mps_command *cm;
3473 
3474 	MPS_FUNCTRACE(sc);
3475 
3476 	if ((cm = mps_alloc_command(sc)) == NULL)
3477 		return (EBUSY);
3478 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3479 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3480 	request->MsgFlags = 0;
3481 	request->VP_ID = 0;
3482 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3483 	cm->cm_complete = mpssas_portenable_complete;
3484 	cm->cm_data = NULL;
3485 	cm->cm_sge = NULL;
3486 
3487 	mps_map_command(sc, cm);
3488 	mps_dprint(sc, MPS_XINFO,
3489 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3490 	    cm, cm->cm_req, cm->cm_complete);
3491 	return (0);
3492 }
3493 
3494 static void
3495 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3496 {
3497 	MPI2_PORT_ENABLE_REPLY *reply;
3498 	struct mpssas_softc *sassc;
3499 
3500 	MPS_FUNCTRACE(sc);
3501 	sassc = sc->sassc;
3502 
3503 	/*
3504 	 * Currently there should be no way we can hit this case.  It only
3505 	 * happens when we have a failure to allocate chain frames, and
3506 	 * port enable commands don't have S/G lists.
3507 	 */
3508 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3509 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3510 			   "This should not happen!\n", __func__, cm->cm_flags);
3511 	}
3512 
3513 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3514 	if (reply == NULL)
3515 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3516 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3517 	    MPI2_IOCSTATUS_SUCCESS)
3518 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3519 
3520 	mps_free_command(sc, cm);
3521 	if (sc->mps_ich.ich_arg != NULL) {
3522 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3523 		config_intrhook_disestablish(&sc->mps_ich);
3524 		sc->mps_ich.ich_arg = NULL;
3525 	}
3526 
3527 	/*
3528 	 * Get WarpDrive info after discovery is complete but before the scan
3529 	 * starts.  At this point, all devices are ready to be exposed to the
3530 	 * OS.  If devices should be hidden instead, take them out of the
3531 	 * 'targets' array before the scan.  The devinfo for a disk will have
3532 	 * some info and a volume's will be 0.  Use that to remove disks.
3533 	 */
3534 	mps_wd_config_pages(sc);
3535 
3536 	/*
3537 	 * Done waiting for port enable to complete.  Decrement the refcount.
3538 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3539 	 * take place.  Since the simq was explicitly frozen before port
3540 	 * enable, it must be explicitly released here to keep the
3541 	 * freeze/release count in sync.
3542 	 */
3543 	sc->wait_for_port_enable = 0;
3544 	sc->port_enable_complete = 1;
3545 	wakeup(&sc->port_enable_complete);
3546 	mpssas_startup_decrement(sassc);
3547 }
3548 
3549 int
3550 mpssas_check_id(struct mpssas_softc *sassc, int id)
3551 {
3552 	struct mps_softc *sc = sassc->sc;
3553 	char *ids;
3554 	char *name;
3555 
3556 	ids = &sc->exclude_ids[0];
3557 	while((name = strsep(&ids, ",")) != NULL) {
3558 		if (name[0] == '\0')
3559 			continue;
3560 		if (strtol(name, NULL, 0) == (long)id)
3561 			return (1);
3562 	}
3563 
3564 	return (0);
3565 }
3566