xref: /freebsd/sys/dev/mps/mps_sas.c (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2015 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  * $FreeBSD$
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT2 */
37 
38 /* TODO Move headers to mpsvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_xpt.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #if __FreeBSD_version >= 900026
74 #include <cam/scsi/smp_all.h>
75 #endif
76 
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
88 
89 #define MPSSAS_DISCOVERY_TIMEOUT	20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91 
92 /*
93  * static array to check SCSI OpCode for EEDP protection bits
94  */
95 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116 
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124     struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128     struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 #if __FreeBSD_version >= 900026
133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135 			       uint64_t sasaddr);
136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137 #endif //FreeBSD_version >= 900026
138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->maxtargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195 {
196 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198 		xpt_release_simq(sassc->sim, 1);
199 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
200 	}
201 }
202 
203 void
204 mpssas_startup_decrement(struct mpssas_softc *sassc)
205 {
206 	MPS_FUNCTRACE(sassc->sc);
207 
208 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209 		if (--sassc->startup_refcount == 0) {
210 			/* finished all discovery-related actions, release
211 			 * the simq and rescan for the latest topology.
212 			 */
213 			mps_dprint(sassc->sc, MPS_INIT,
214 			    "%s releasing simq\n", __func__);
215 			sassc->flags &= ~MPSSAS_IN_STARTUP;
216 			xpt_release_simq(sassc->sim, 1);
217 #if __FreeBSD_version >= 1000039
218 			xpt_release_boot();
219 #else
220 			mpssas_rescan_target(sassc->sc, NULL);
221 #endif
222 		}
223 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224 		    sassc->startup_refcount);
225 	}
226 }
227 
228 /* The firmware requires us to stop sending commands when we're doing task
229  * management, so refcount the TMs and keep the simq frozen when any are in
230  * use.
231  */
232 struct mps_command *
233 mpssas_alloc_tm(struct mps_softc *sc)
234 {
235 	struct mps_command *tm;
236 
237 	tm = mps_alloc_high_priority_command(sc);
238 	return tm;
239 }
240 
241 void
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243 {
244 	if (tm == NULL)
245 		return;
246 
247 	/*
248 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
249 	 * free the resources used for freezing the devq.  Must clear the
250 	 * INRESET flag as well or scsi I/O will not work.
251 	 */
252 	if (tm->cm_targ != NULL) {
253 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
254 	}
255 	if (tm->cm_ccb) {
256 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
257 		    tm->cm_targ->tid);
258 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
259 		xpt_free_path(tm->cm_ccb->ccb_h.path);
260 		xpt_free_ccb(tm->cm_ccb);
261 	}
262 
263 	mps_free_high_priority_command(sc, tm);
264 }
265 
266 void
267 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
268 {
269 	struct mpssas_softc *sassc = sc->sassc;
270 	path_id_t pathid;
271 	target_id_t targetid;
272 	union ccb *ccb;
273 
274 	MPS_FUNCTRACE(sc);
275 	pathid = cam_sim_path(sassc->sim);
276 	if (targ == NULL)
277 		targetid = CAM_TARGET_WILDCARD;
278 	else
279 		targetid = targ - sassc->targets;
280 
281 	/*
282 	 * Allocate a CCB and schedule a rescan.
283 	 */
284 	ccb = xpt_alloc_ccb_nowait();
285 	if (ccb == NULL) {
286 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
287 		return;
288 	}
289 
290 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
291 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
292 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
293 		xpt_free_ccb(ccb);
294 		return;
295 	}
296 
297 	if (targetid == CAM_TARGET_WILDCARD)
298 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
299 	else
300 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
301 
302 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
303 	xpt_rescan(ccb);
304 }
305 
306 static void
307 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
308 {
309 	struct sbuf sb;
310 	va_list ap;
311 	char str[192];
312 	char path_str[64];
313 
314 	if (cm == NULL)
315 		return;
316 
317 	/* No need to be in here if debugging isn't enabled */
318 	if ((cm->cm_sc->mps_debug & level) == 0)
319 		return;
320 
321 	sbuf_new(&sb, str, sizeof(str), 0);
322 
323 	va_start(ap, fmt);
324 
325 	if (cm->cm_ccb != NULL) {
326 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
327 				sizeof(path_str));
328 		sbuf_cat(&sb, path_str);
329 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
330 			scsi_command_string(&cm->cm_ccb->csio, &sb);
331 			sbuf_printf(&sb, "length %d ",
332 				    cm->cm_ccb->csio.dxfer_len);
333 		}
334 	}
335 	else {
336 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
337 		    cam_sim_name(cm->cm_sc->sassc->sim),
338 		    cam_sim_unit(cm->cm_sc->sassc->sim),
339 		    cam_sim_bus(cm->cm_sc->sassc->sim),
340 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
341 		    cm->cm_lun);
342 	}
343 
344 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
345 	sbuf_vprintf(&sb, fmt, ap);
346 	sbuf_finish(&sb);
347 	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
348 
349 	va_end(ap);
350 }
351 
352 
353 static void
354 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
355 {
356 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
357 	struct mpssas_target *targ;
358 	uint16_t handle;
359 
360 	MPS_FUNCTRACE(sc);
361 
362 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
363 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
364 	targ = tm->cm_targ;
365 
366 	if (reply == NULL) {
367 		/* XXX retry the remove after the diag reset completes? */
368 		mps_dprint(sc, MPS_FAULT,
369 		    "%s NULL reply resetting device 0x%04x\n", __func__,
370 		    handle);
371 		mpssas_free_tm(sc, tm);
372 		return;
373 	}
374 
375 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
376 		mps_dprint(sc, MPS_FAULT,
377 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
378 		   reply->IOCStatus, handle);
379 		mpssas_free_tm(sc, tm);
380 		return;
381 	}
382 
383 	mps_dprint(sc, MPS_XINFO,
384 	    "Reset aborted %u commands\n", reply->TerminationCount);
385 	mps_free_reply(sc, tm->cm_reply_data);
386 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
387 
388 	mps_dprint(sc, MPS_XINFO,
389 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
390 
391 	/*
392 	 * Don't clear target if remove fails because things will get confusing.
393 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
394 	 * this target id if possible, and so we can assign the same target id
395 	 * to this device if it comes back in the future.
396 	 */
397 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
398 		targ = tm->cm_targ;
399 		targ->handle = 0x0;
400 		targ->encl_handle = 0x0;
401 		targ->encl_slot = 0x0;
402 		targ->exp_dev_handle = 0x0;
403 		targ->phy_num = 0x0;
404 		targ->linkrate = 0x0;
405 		targ->devinfo = 0x0;
406 		targ->flags = 0x0;
407 	}
408 
409 	mpssas_free_tm(sc, tm);
410 }
411 
412 
413 /*
414  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
415  * Otherwise Volume Delete is same as Bare Drive Removal.
416  */
417 void
418 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
419 {
420 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
421 	struct mps_softc *sc;
422 	struct mps_command *cm;
423 	struct mpssas_target *targ = NULL;
424 
425 	MPS_FUNCTRACE(sassc->sc);
426 	sc = sassc->sc;
427 
428 #ifdef WD_SUPPORT
429 	/*
430 	 * If this is a WD controller, determine if the disk should be exposed
431 	 * to the OS or not.  If disk should be exposed, return from this
432 	 * function without doing anything.
433 	 */
434 	if (sc->WD_available && (sc->WD_hide_expose ==
435 	    MPS_WD_EXPOSE_ALWAYS)) {
436 		return;
437 	}
438 #endif //WD_SUPPORT
439 
440 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mps_dprint(sc, MPS_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
450 
451 	cm = mpssas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mps_dprint(sc, MPS_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mpssas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mpssas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mps_map_command(sc, cm);
480 }
481 
482 /*
483  * The MPT2 firmware performs debounce on the link to avoid transient link
484  * errors and false removals.  When it does decide that link has been lost
485  * and a device need to go away, it expects that the host will perform a
486  * target reset and then an op remove.  The reset has the side-effect of
487  * aborting any outstanding requests for the device, which is required for
488  * the op-remove to succeed.  It's not clear if the host should check for
489  * the device coming back alive after the reset.
490  */
491 void
492 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mps_softc *sc;
496 	struct mps_command *cm;
497 	struct mpssas_target *targ = NULL;
498 
499 	MPS_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mps_dprint(sc, MPS_ERROR,
508 		    "%s : invalid handle 0x%x \n", __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
513 
514 	cm = mpssas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mps_dprint(sc, MPS_ERROR,
517 		    "%s: command alloc failure\n", __func__);
518 		return;
519 	}
520 
521 	mpssas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
535 	cm->cm_complete = mpssas_remove_device;
536 	cm->cm_complete_data = (void *)(uintptr_t)handle;
537 
538 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
539 	    __func__, targ->tid);
540 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
541 
542 	mps_map_command(sc, cm);
543 }
544 
545 static void
546 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
547 {
548 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
549 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
550 	struct mpssas_target *targ;
551 	struct mps_command *next_cm;
552 	uint16_t handle;
553 
554 	MPS_FUNCTRACE(sc);
555 
556 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
557 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
558 	targ = tm->cm_targ;
559 
560 	/*
561 	 * Currently there should be no way we can hit this case.  It only
562 	 * happens when we have a failure to allocate chain frames, and
563 	 * task management commands don't have S/G lists.
564 	 */
565 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
566 		mps_dprint(sc, MPS_ERROR,
567 		    "%s: cm_flags = %#x for remove of handle %#04x! "
568 		    "This should not happen!\n", __func__, tm->cm_flags,
569 		    handle);
570 		mpssas_free_tm(sc, tm);
571 		return;
572 	}
573 
574 	if (reply == NULL) {
575 		/* XXX retry the remove after the diag reset completes? */
576 		mps_dprint(sc, MPS_FAULT,
577 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
578 		mpssas_free_tm(sc, tm);
579 		return;
580 	}
581 
582 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
583 		mps_dprint(sc, MPS_FAULT,
584 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
585 		   le16toh(reply->IOCStatus), handle);
586 		mpssas_free_tm(sc, tm);
587 		return;
588 	}
589 
590 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
591 	    le32toh(reply->TerminationCount));
592 	mps_free_reply(sc, tm->cm_reply_data);
593 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
594 
595 	/* Reuse the existing command */
596 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
597 	memset(req, 0, sizeof(*req));
598 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
599 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
600 	req->DevHandle = htole16(handle);
601 	tm->cm_data = NULL;
602 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
603 	tm->cm_complete = mpssas_remove_complete;
604 	tm->cm_complete_data = (void *)(uintptr_t)handle;
605 
606 	mps_map_command(sc, tm);
607 
608 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
609 		   targ->tid, handle);
610 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
611 		union ccb *ccb;
612 
613 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
614 		ccb = tm->cm_complete_data;
615 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
616 		mpssas_scsiio_complete(sc, tm);
617 	}
618 }
619 
620 static void
621 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
622 {
623 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
624 	uint16_t handle;
625 	struct mpssas_target *targ;
626 	struct mpssas_lun *lun;
627 
628 	MPS_FUNCTRACE(sc);
629 
630 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
631 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
632 
633 	/*
634 	 * Currently there should be no way we can hit this case.  It only
635 	 * happens when we have a failure to allocate chain frames, and
636 	 * task management commands don't have S/G lists.
637 	 */
638 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
639 		mps_dprint(sc, MPS_XINFO,
640 			   "%s: cm_flags = %#x for remove of handle %#04x! "
641 			   "This should not happen!\n", __func__, tm->cm_flags,
642 			   handle);
643 		mpssas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mps_dprint(sc, MPS_FAULT,
650 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
651 		mpssas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mps_dprint(sc, MPS_XINFO,
656 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
657 	    handle, le16toh(reply->IOCStatus));
658 
659 	/*
660 	 * Don't clear target if remove fails because things will get confusing.
661 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
662 	 * this target id if possible, and so we can assign the same target id
663 	 * to this device if it comes back in the future.
664 	 */
665 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_slot = 0x0;
670 		targ->exp_dev_handle = 0x0;
671 		targ->phy_num = 0x0;
672 		targ->linkrate = 0x0;
673 		targ->devinfo = 0x0;
674 		targ->flags = 0x0;
675 
676 		while(!SLIST_EMPTY(&targ->luns)) {
677 			lun = SLIST_FIRST(&targ->luns);
678 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
679 			free(lun, M_MPT2);
680 		}
681 	}
682 
683 
684 	mpssas_free_tm(sc, tm);
685 }
686 
687 static int
688 mpssas_register_events(struct mps_softc *sc)
689 {
690 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
691 
692 	bzero(events, 16);
693 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
694 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
695 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
696 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
697 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
698 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
699 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
701 	setbit(events, MPI2_EVENT_IR_VOLUME);
702 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
703 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
704 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
705 
706 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
707 	    &sc->sassc->mpssas_eh);
708 
709 	return (0);
710 }
711 
712 int
713 mps_attach_sas(struct mps_softc *sc)
714 {
715 	struct mpssas_softc *sassc;
716 	cam_status status;
717 	int unit, error = 0;
718 
719 	MPS_FUNCTRACE(sc);
720 
721 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
722 	if(!sassc) {
723 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
724 		__func__, __LINE__);
725 		return (ENOMEM);
726 	}
727 
728 	/*
729 	 * XXX MaxTargets could change during a reinit.  Since we don't
730 	 * resize the targets[] array during such an event, cache the value
731 	 * of MaxTargets here so that we don't get into trouble later.  This
732 	 * should move into the reinit logic.
733 	 */
734 	sassc->maxtargets = sc->facts->MaxTargets;
735 	sassc->targets = malloc(sizeof(struct mpssas_target) *
736 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
737 	if(!sassc->targets) {
738 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
739 		__func__, __LINE__);
740 		free(sassc, M_MPT2);
741 		return (ENOMEM);
742 	}
743 	sc->sassc = sassc;
744 	sassc->sc = sc;
745 
746 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
747 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
748 		error = ENOMEM;
749 		goto out;
750 	}
751 
752 	unit = device_get_unit(sc->mps_dev);
753 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
754 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
755 	if (sassc->sim == NULL) {
756 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
757 		error = EINVAL;
758 		goto out;
759 	}
760 
761 	TAILQ_INIT(&sassc->ev_queue);
762 
763 	/* Initialize taskqueue for Event Handling */
764 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
765 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
766 	    taskqueue_thread_enqueue, &sassc->ev_tq);
767 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
768 	    device_get_nameunit(sc->mps_dev));
769 
770 	mps_lock(sc);
771 
772 	/*
773 	 * XXX There should be a bus for every port on the adapter, but since
774 	 * we're just going to fake the topology for now, we'll pretend that
775 	 * everything is just a target on a single bus.
776 	 */
777 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
778 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
779 		    error);
780 		mps_unlock(sc);
781 		goto out;
782 	}
783 
784 	/*
785 	 * Assume that discovery events will start right away.
786 	 *
787 	 * Hold off boot until discovery is complete.
788 	 */
789 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
790 	sc->sassc->startup_refcount = 0;
791 	mpssas_startup_increment(sassc);
792 
793 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
794 
795 	/*
796 	 * Register for async events so we can determine the EEDP
797 	 * capabilities of devices.
798 	 */
799 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
800 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
801 	    CAM_LUN_WILDCARD);
802 	if (status != CAM_REQ_CMP) {
803 		mps_printf(sc, "Error %#x creating sim path\n", status);
804 		sassc->path = NULL;
805 	} else {
806 		int event;
807 
808 #if (__FreeBSD_version >= 1000006) || \
809     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
810 		event = AC_ADVINFO_CHANGED;
811 #else
812 		event = AC_FOUND_DEVICE;
813 #endif
814 		status = xpt_register_async(event, mpssas_async, sc,
815 					    sassc->path);
816 		if (status != CAM_REQ_CMP) {
817 			mps_dprint(sc, MPS_ERROR,
818 			    "Error %#x registering async handler for "
819 			    "AC_ADVINFO_CHANGED events\n", status);
820 			xpt_free_path(sassc->path);
821 			sassc->path = NULL;
822 		}
823 	}
824 	if (status != CAM_REQ_CMP) {
825 		/*
826 		 * EEDP use is the exception, not the rule.
827 		 * Warn the user, but do not fail to attach.
828 		 */
829 		mps_printf(sc, "EEDP capabilities disabled.\n");
830 	}
831 
832 	mps_unlock(sc);
833 
834 	mpssas_register_events(sc);
835 out:
836 	if (error)
837 		mps_detach_sas(sc);
838 	return (error);
839 }
840 
841 int
842 mps_detach_sas(struct mps_softc *sc)
843 {
844 	struct mpssas_softc *sassc;
845 	struct mpssas_lun *lun, *lun_tmp;
846 	struct mpssas_target *targ;
847 	int i;
848 
849 	MPS_FUNCTRACE(sc);
850 
851 	if (sc->sassc == NULL)
852 		return (0);
853 
854 	sassc = sc->sassc;
855 	mps_deregister_events(sc, sassc->mpssas_eh);
856 
857 	/*
858 	 * Drain and free the event handling taskqueue with the lock
859 	 * unheld so that any parallel processing tasks drain properly
860 	 * without deadlocking.
861 	 */
862 	if (sassc->ev_tq != NULL)
863 		taskqueue_free(sassc->ev_tq);
864 
865 	/* Make sure CAM doesn't wedge if we had to bail out early. */
866 	mps_lock(sc);
867 
868 	/* Deregister our async handler */
869 	if (sassc->path != NULL) {
870 		xpt_register_async(0, mpssas_async, sc, sassc->path);
871 		xpt_free_path(sassc->path);
872 		sassc->path = NULL;
873 	}
874 
875 	if (sassc->flags & MPSSAS_IN_STARTUP)
876 		xpt_release_simq(sassc->sim, 1);
877 
878 	if (sassc->sim != NULL) {
879 		xpt_bus_deregister(cam_sim_path(sassc->sim));
880 		cam_sim_free(sassc->sim, FALSE);
881 	}
882 
883 	sassc->flags |= MPSSAS_SHUTDOWN;
884 	mps_unlock(sc);
885 
886 	if (sassc->devq != NULL)
887 		cam_simq_free(sassc->devq);
888 
889 	for(i=0; i< sassc->maxtargets ;i++) {
890 		targ = &sassc->targets[i];
891 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
892 			free(lun, M_MPT2);
893 		}
894 	}
895 	free(sassc->targets, M_MPT2);
896 	free(sassc, M_MPT2);
897 	sc->sassc = NULL;
898 
899 	return (0);
900 }
901 
902 void
903 mpssas_discovery_end(struct mpssas_softc *sassc)
904 {
905 	struct mps_softc *sc = sassc->sc;
906 
907 	MPS_FUNCTRACE(sc);
908 
909 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
910 		callout_stop(&sassc->discovery_callout);
911 
912 }
913 
914 static void
915 mpssas_action(struct cam_sim *sim, union ccb *ccb)
916 {
917 	struct mpssas_softc *sassc;
918 
919 	sassc = cam_sim_softc(sim);
920 
921 	MPS_FUNCTRACE(sassc->sc);
922 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
923 	    ccb->ccb_h.func_code);
924 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
925 
926 	switch (ccb->ccb_h.func_code) {
927 	case XPT_PATH_INQ:
928 	{
929 		struct ccb_pathinq *cpi = &ccb->cpi;
930 
931 		cpi->version_num = 1;
932 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
933 		cpi->target_sprt = 0;
934 #if __FreeBSD_version >= 1000039
935 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
936 #else
937 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
938 #endif
939 		cpi->hba_eng_cnt = 0;
940 		cpi->max_target = sassc->maxtargets - 1;
941 		cpi->max_lun = 255;
942 		cpi->initiator_id = sassc->maxtargets - 1;
943 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
944 		strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN);
945 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
946 		cpi->unit_number = cam_sim_unit(sim);
947 		cpi->bus_id = cam_sim_bus(sim);
948 		cpi->base_transfer_speed = 150000;
949 		cpi->transport = XPORT_SAS;
950 		cpi->transport_version = 0;
951 		cpi->protocol = PROTO_SCSI;
952 		cpi->protocol_version = SCSI_REV_SPC;
953 #if __FreeBSD_version >= 800001
954 		/*
955 		 * XXX KDM where does this number come from?
956 		 */
957 		cpi->maxio = 256 * 1024;
958 #endif
959 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
960 		break;
961 	}
962 	case XPT_GET_TRAN_SETTINGS:
963 	{
964 		struct ccb_trans_settings	*cts;
965 		struct ccb_trans_settings_sas	*sas;
966 		struct ccb_trans_settings_scsi	*scsi;
967 		struct mpssas_target *targ;
968 
969 		cts = &ccb->cts;
970 		sas = &cts->xport_specific.sas;
971 		scsi = &cts->proto_specific.scsi;
972 
973 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
974 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
975 		    cts->ccb_h.target_id));
976 		targ = &sassc->targets[cts->ccb_h.target_id];
977 		if (targ->handle == 0x0) {
978 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
979 			break;
980 		}
981 
982 		cts->protocol_version = SCSI_REV_SPC2;
983 		cts->transport = XPORT_SAS;
984 		cts->transport_version = 0;
985 
986 		sas->valid = CTS_SAS_VALID_SPEED;
987 		switch (targ->linkrate) {
988 		case 0x08:
989 			sas->bitrate = 150000;
990 			break;
991 		case 0x09:
992 			sas->bitrate = 300000;
993 			break;
994 		case 0x0a:
995 			sas->bitrate = 600000;
996 			break;
997 		default:
998 			sas->valid = 0;
999 		}
1000 
1001 		cts->protocol = PROTO_SCSI;
1002 		scsi->valid = CTS_SCSI_VALID_TQ;
1003 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1004 
1005 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1006 		break;
1007 	}
1008 	case XPT_CALC_GEOMETRY:
1009 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1010 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1011 		break;
1012 	case XPT_RESET_DEV:
1013 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1014 		mpssas_action_resetdev(sassc, ccb);
1015 		return;
1016 	case XPT_RESET_BUS:
1017 	case XPT_ABORT:
1018 	case XPT_TERM_IO:
1019 		mps_dprint(sassc->sc, MPS_XINFO,
1020 		    "mpssas_action faking success for abort or reset\n");
1021 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1022 		break;
1023 	case XPT_SCSI_IO:
1024 		mpssas_action_scsiio(sassc, ccb);
1025 		return;
1026 #if __FreeBSD_version >= 900026
1027 	case XPT_SMP_IO:
1028 		mpssas_action_smpio(sassc, ccb);
1029 		return;
1030 #endif
1031 	default:
1032 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1033 		break;
1034 	}
1035 	xpt_done(ccb);
1036 
1037 }
1038 
1039 static void
1040 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1041     target_id_t target_id, lun_id_t lun_id)
1042 {
1043 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1044 	struct cam_path *path;
1045 
1046 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1047 	    ac_code, target_id, (uintmax_t)lun_id);
1048 
1049 	if (xpt_create_path(&path, NULL,
1050 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1051 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1052 			   "notification\n");
1053 		return;
1054 	}
1055 
1056 	xpt_async(ac_code, path, NULL);
1057 	xpt_free_path(path);
1058 }
1059 
1060 static void
1061 mpssas_complete_all_commands(struct mps_softc *sc)
1062 {
1063 	struct mps_command *cm;
1064 	int i;
1065 	int completed;
1066 
1067 	MPS_FUNCTRACE(sc);
1068 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1069 
1070 	/* complete all commands with a NULL reply */
1071 	for (i = 1; i < sc->num_reqs; i++) {
1072 		cm = &sc->commands[i];
1073 		cm->cm_reply = NULL;
1074 		completed = 0;
1075 
1076 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1077 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1078 
1079 		if (cm->cm_complete != NULL) {
1080 			mpssas_log_command(cm, MPS_RECOVERY,
1081 			    "completing cm %p state %x ccb %p for diag reset\n",
1082 			    cm, cm->cm_state, cm->cm_ccb);
1083 
1084 			cm->cm_complete(sc, cm);
1085 			completed = 1;
1086 		}
1087 
1088 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1089 			mpssas_log_command(cm, MPS_RECOVERY,
1090 			    "waking up cm %p state %x ccb %p for diag reset\n",
1091 			    cm, cm->cm_state, cm->cm_ccb);
1092 			wakeup(cm);
1093 			completed = 1;
1094 		}
1095 
1096 		if (cm->cm_sc->io_cmds_active != 0) {
1097 			cm->cm_sc->io_cmds_active--;
1098 		} else {
1099 			mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1100 			    "io_cmds_active is out of sync - resynching to "
1101 			    "0\n");
1102 		}
1103 
1104 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1105 			/* this should never happen, but if it does, log */
1106 			mpssas_log_command(cm, MPS_RECOVERY,
1107 			    "cm %p state %x flags 0x%x ccb %p during diag "
1108 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1109 			    cm->cm_ccb);
1110 		}
1111 	}
1112 }
1113 
1114 void
1115 mpssas_handle_reinit(struct mps_softc *sc)
1116 {
1117 	int i;
1118 
1119 	/* Go back into startup mode and freeze the simq, so that CAM
1120 	 * doesn't send any commands until after we've rediscovered all
1121 	 * targets and found the proper device handles for them.
1122 	 *
1123 	 * After the reset, portenable will trigger discovery, and after all
1124 	 * discovery-related activities have finished, the simq will be
1125 	 * released.
1126 	 */
1127 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1128 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1129 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1130 	mpssas_startup_increment(sc->sassc);
1131 
1132 	/* notify CAM of a bus reset */
1133 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1134 	    CAM_LUN_WILDCARD);
1135 
1136 	/* complete and cleanup after all outstanding commands */
1137 	mpssas_complete_all_commands(sc);
1138 
1139 	mps_dprint(sc, MPS_INIT,
1140 	    "%s startup %u after command completion\n", __func__,
1141 	    sc->sassc->startup_refcount);
1142 
1143 	/* zero all the target handles, since they may change after the
1144 	 * reset, and we have to rediscover all the targets and use the new
1145 	 * handles.
1146 	 */
1147 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1148 		if (sc->sassc->targets[i].outstanding != 0)
1149 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1150 			    i, sc->sassc->targets[i].outstanding);
1151 		sc->sassc->targets[i].handle = 0x0;
1152 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1153 		sc->sassc->targets[i].outstanding = 0;
1154 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1155 	}
1156 }
1157 
1158 static void
1159 mpssas_tm_timeout(void *data)
1160 {
1161 	struct mps_command *tm = data;
1162 	struct mps_softc *sc = tm->cm_sc;
1163 
1164 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1165 
1166 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1167 	    "task mgmt %p timed out\n", tm);
1168 	mps_reinit(sc);
1169 }
1170 
1171 static void
1172 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1173 {
1174 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1175 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1176 	unsigned int cm_count = 0;
1177 	struct mps_command *cm;
1178 	struct mpssas_target *targ;
1179 
1180 	callout_stop(&tm->cm_callout);
1181 
1182 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1183 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1184 	targ = tm->cm_targ;
1185 
1186 	/*
1187 	 * Currently there should be no way we can hit this case.  It only
1188 	 * happens when we have a failure to allocate chain frames, and
1189 	 * task management commands don't have S/G lists.
1190 	 * XXXSL So should it be an assertion?
1191 	 */
1192 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1193 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1194 			   "This should not happen!\n", __func__, tm->cm_flags);
1195 		mpssas_free_tm(sc, tm);
1196 		return;
1197 	}
1198 
1199 	if (reply == NULL) {
1200 		mpssas_log_command(tm, MPS_RECOVERY,
1201 		    "NULL reset reply for tm %p\n", tm);
1202 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1203 			/* this completion was due to a reset, just cleanup */
1204 			targ->tm = NULL;
1205 			mpssas_free_tm(sc, tm);
1206 		}
1207 		else {
1208 			/* we should have gotten a reply. */
1209 			mps_reinit(sc);
1210 		}
1211 		return;
1212 	}
1213 
1214 	mpssas_log_command(tm, MPS_RECOVERY,
1215 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1216 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1217 	    le32toh(reply->TerminationCount));
1218 
1219 	/* See if there are any outstanding commands for this LUN.
1220 	 * This could be made more efficient by using a per-LU data
1221 	 * structure of some sort.
1222 	 */
1223 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1224 		if (cm->cm_lun == tm->cm_lun)
1225 			cm_count++;
1226 	}
1227 
1228 	if (cm_count == 0) {
1229 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1230 		    "logical unit %u finished recovery after reset\n",
1231 		    tm->cm_lun, tm);
1232 
1233 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1234 		    tm->cm_lun);
1235 
1236 		/* we've finished recovery for this logical unit.  check and
1237 		 * see if some other logical unit has a timedout command
1238 		 * that needs to be processed.
1239 		 */
1240 		cm = TAILQ_FIRST(&targ->timedout_commands);
1241 		if (cm) {
1242 			mpssas_send_abort(sc, tm, cm);
1243 		}
1244 		else {
1245 			targ->tm = NULL;
1246 			mpssas_free_tm(sc, tm);
1247 		}
1248 	}
1249 	else {
1250 		/* if we still have commands for this LUN, the reset
1251 		 * effectively failed, regardless of the status reported.
1252 		 * Escalate to a target reset.
1253 		 */
1254 		mpssas_log_command(tm, MPS_RECOVERY,
1255 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1256 		    tm, cm_count);
1257 		mpssas_send_reset(sc, tm,
1258 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1259 	}
1260 }
1261 
1262 static void
1263 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1264 {
1265 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1266 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1267 	struct mpssas_target *targ;
1268 
1269 	callout_stop(&tm->cm_callout);
1270 
1271 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1272 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1273 	targ = tm->cm_targ;
1274 
1275 	/*
1276 	 * Currently there should be no way we can hit this case.  It only
1277 	 * happens when we have a failure to allocate chain frames, and
1278 	 * task management commands don't have S/G lists.
1279 	 */
1280 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1281 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1282 			   "This should not happen!\n", __func__, tm->cm_flags);
1283 		mpssas_free_tm(sc, tm);
1284 		return;
1285 	}
1286 
1287 	if (reply == NULL) {
1288 		mpssas_log_command(tm, MPS_RECOVERY,
1289 		    "NULL reset reply for tm %p\n", tm);
1290 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1291 			/* this completion was due to a reset, just cleanup */
1292 			targ->tm = NULL;
1293 			mpssas_free_tm(sc, tm);
1294 		}
1295 		else {
1296 			/* we should have gotten a reply. */
1297 			mps_reinit(sc);
1298 		}
1299 		return;
1300 	}
1301 
1302 	mpssas_log_command(tm, MPS_RECOVERY,
1303 	    "target reset status 0x%x code 0x%x count %u\n",
1304 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1305 	    le32toh(reply->TerminationCount));
1306 
1307 	if (targ->outstanding == 0) {
1308 		/* we've finished recovery for this target and all
1309 		 * of its logical units.
1310 		 */
1311 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1312 		    "recovery finished after target reset\n");
1313 
1314 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1315 		    CAM_LUN_WILDCARD);
1316 
1317 		targ->tm = NULL;
1318 		mpssas_free_tm(sc, tm);
1319 	}
1320 	else {
1321 		/* after a target reset, if this target still has
1322 		 * outstanding commands, the reset effectively failed,
1323 		 * regardless of the status reported.  escalate.
1324 		 */
1325 		mpssas_log_command(tm, MPS_RECOVERY,
1326 		    "target reset complete for tm %p, but still have %u command(s)\n",
1327 		    tm, targ->outstanding);
1328 		mps_reinit(sc);
1329 	}
1330 }
1331 
1332 #define MPS_RESET_TIMEOUT 30
1333 
1334 int
1335 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1336 {
1337 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1338 	struct mpssas_target *target;
1339 	int err;
1340 
1341 	target = tm->cm_targ;
1342 	if (target->handle == 0) {
1343 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1344 		    __func__, target->tid);
1345 		return -1;
1346 	}
1347 
1348 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1349 	req->DevHandle = htole16(target->handle);
1350 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1351 	req->TaskType = type;
1352 
1353 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1354 		/* XXX Need to handle invalid LUNs */
1355 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1356 		tm->cm_targ->logical_unit_resets++;
1357 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1358 		    "sending logical unit reset\n");
1359 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1360 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1361 	}
1362 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1363 		/*
1364 		 * Target reset method =
1365 		 * 	SAS Hard Link Reset / SATA Link Reset
1366 		 */
1367 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1368 		tm->cm_targ->target_resets++;
1369 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1370 		    "sending target reset\n");
1371 		tm->cm_complete = mpssas_target_reset_complete;
1372 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1373 	}
1374 	else {
1375 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1376 		return -1;
1377 	}
1378 
1379 	tm->cm_data = NULL;
1380 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1381 	tm->cm_complete_data = (void *)tm;
1382 
1383 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1384 	    mpssas_tm_timeout, tm);
1385 
1386 	err = mps_map_command(sc, tm);
1387 	if (err)
1388 		mpssas_log_command(tm, MPS_RECOVERY,
1389 		    "error %d sending reset type %u\n",
1390 		    err, type);
1391 
1392 	return err;
1393 }
1394 
1395 
1396 static void
1397 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1398 {
1399 	struct mps_command *cm;
1400 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1401 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1402 	struct mpssas_target *targ;
1403 
1404 	callout_stop(&tm->cm_callout);
1405 
1406 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1407 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1408 	targ = tm->cm_targ;
1409 
1410 	/*
1411 	 * Currently there should be no way we can hit this case.  It only
1412 	 * happens when we have a failure to allocate chain frames, and
1413 	 * task management commands don't have S/G lists.
1414 	 */
1415 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1416 		mpssas_log_command(tm, MPS_RECOVERY,
1417 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1418 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1419 		mpssas_free_tm(sc, tm);
1420 		return;
1421 	}
1422 
1423 	if (reply == NULL) {
1424 		mpssas_log_command(tm, MPS_RECOVERY,
1425 		    "NULL abort reply for tm %p TaskMID %u\n",
1426 		    tm, le16toh(req->TaskMID));
1427 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1428 			/* this completion was due to a reset, just cleanup */
1429 			targ->tm = NULL;
1430 			mpssas_free_tm(sc, tm);
1431 		}
1432 		else {
1433 			/* we should have gotten a reply. */
1434 			mps_reinit(sc);
1435 		}
1436 		return;
1437 	}
1438 
1439 	mpssas_log_command(tm, MPS_RECOVERY,
1440 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1441 	    le16toh(req->TaskMID),
1442 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1443 	    le32toh(reply->TerminationCount));
1444 
1445 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1446 	if (cm == NULL) {
1447 		/* if there are no more timedout commands, we're done with
1448 		 * error recovery for this target.
1449 		 */
1450 		mpssas_log_command(tm, MPS_RECOVERY,
1451 		    "finished recovery after aborting TaskMID %u\n",
1452 		    le16toh(req->TaskMID));
1453 
1454 		targ->tm = NULL;
1455 		mpssas_free_tm(sc, tm);
1456 	}
1457 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1458 		/* abort success, but we have more timedout commands to abort */
1459 		mpssas_log_command(tm, MPS_RECOVERY,
1460 		    "continuing recovery after aborting TaskMID %u\n",
1461 		    le16toh(req->TaskMID));
1462 
1463 		mpssas_send_abort(sc, tm, cm);
1464 	}
1465 	else {
1466 		/* we didn't get a command completion, so the abort
1467 		 * failed as far as we're concerned.  escalate.
1468 		 */
1469 		mpssas_log_command(tm, MPS_RECOVERY,
1470 		    "abort failed for TaskMID %u tm %p\n",
1471 		    le16toh(req->TaskMID), tm);
1472 
1473 		mpssas_send_reset(sc, tm,
1474 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1475 	}
1476 }
1477 
1478 #define MPS_ABORT_TIMEOUT 5
1479 
1480 static int
1481 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1482 {
1483 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1484 	struct mpssas_target *targ;
1485 	int err;
1486 
1487 	targ = cm->cm_targ;
1488 	if (targ->handle == 0) {
1489 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1490 		    __func__, cm->cm_ccb->ccb_h.target_id);
1491 		return -1;
1492 	}
1493 
1494 	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1495 	    "Aborting command %p\n", cm);
1496 
1497 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1498 	req->DevHandle = htole16(targ->handle);
1499 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1500 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1501 
1502 	/* XXX Need to handle invalid LUNs */
1503 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1504 
1505 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1506 
1507 	tm->cm_data = NULL;
1508 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1509 	tm->cm_complete = mpssas_abort_complete;
1510 	tm->cm_complete_data = (void *)tm;
1511 	tm->cm_targ = cm->cm_targ;
1512 	tm->cm_lun = cm->cm_lun;
1513 
1514 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1515 	    mpssas_tm_timeout, tm);
1516 
1517 	targ->aborts++;
1518 
1519 	mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1520 	    __func__, targ->tid);
1521 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1522 
1523 	err = mps_map_command(sc, tm);
1524 	if (err)
1525 		mpssas_log_command(tm, MPS_RECOVERY,
1526 		    "error %d sending abort for cm %p SMID %u\n",
1527 		    err, cm, req->TaskMID);
1528 	return err;
1529 }
1530 
1531 static void
1532 mpssas_scsiio_timeout(void *data)
1533 {
1534 	struct mps_softc *sc;
1535 	struct mps_command *cm;
1536 	struct mpssas_target *targ;
1537 
1538 	cm = (struct mps_command *)data;
1539 	sc = cm->cm_sc;
1540 
1541 	MPS_FUNCTRACE(sc);
1542 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1543 
1544 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1545 
1546 	/*
1547 	 * Run the interrupt handler to make sure it's not pending.  This
1548 	 * isn't perfect because the command could have already completed
1549 	 * and been re-used, though this is unlikely.
1550 	 */
1551 	mps_intr_locked(sc);
1552 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1553 		mpssas_log_command(cm, MPS_XINFO,
1554 		    "SCSI command %p almost timed out\n", cm);
1555 		return;
1556 	}
1557 
1558 	if (cm->cm_ccb == NULL) {
1559 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1560 		return;
1561 	}
1562 
1563 	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1564 	    cm, cm->cm_ccb);
1565 
1566 	targ = cm->cm_targ;
1567 	targ->timeouts++;
1568 
1569 	/* XXX first, check the firmware state, to see if it's still
1570 	 * operational.  if not, do a diag reset.
1571 	 */
1572 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1573 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1574 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1575 
1576 	if (targ->tm != NULL) {
1577 		/* target already in recovery, just queue up another
1578 		 * timedout command to be processed later.
1579 		 */
1580 		mps_dprint(sc, MPS_RECOVERY,
1581 		    "queued timedout cm %p for processing by tm %p\n",
1582 		    cm, targ->tm);
1583 	}
1584 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1585 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1586 		    cm, targ->tm);
1587 
1588 		/* start recovery by aborting the first timedout command */
1589 		mpssas_send_abort(sc, targ->tm, cm);
1590 	}
1591 	else {
1592 		/* XXX queue this target up for recovery once a TM becomes
1593 		 * available.  The firmware only has a limited number of
1594 		 * HighPriority credits for the high priority requests used
1595 		 * for task management, and we ran out.
1596 		 *
1597 		 * Isilon: don't worry about this for now, since we have
1598 		 * more credits than disks in an enclosure, and limit
1599 		 * ourselves to one TM per target for recovery.
1600 		 */
1601 		mps_dprint(sc, MPS_RECOVERY,
1602 		    "timedout cm %p failed to allocate a tm\n", cm);
1603 	}
1604 
1605 }
1606 
1607 static void
1608 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1609 {
1610 	MPI2_SCSI_IO_REQUEST *req;
1611 	struct ccb_scsiio *csio;
1612 	struct mps_softc *sc;
1613 	struct mpssas_target *targ;
1614 	struct mpssas_lun *lun;
1615 	struct mps_command *cm;
1616 	uint8_t i, lba_byte, *ref_tag_addr;
1617 	uint16_t eedp_flags;
1618 	uint32_t mpi_control;
1619 
1620 	sc = sassc->sc;
1621 	MPS_FUNCTRACE(sc);
1622 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1623 
1624 	csio = &ccb->csio;
1625 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1626 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1627 	     csio->ccb_h.target_id));
1628 	targ = &sassc->targets[csio->ccb_h.target_id];
1629 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1630 	if (targ->handle == 0x0) {
1631 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1632 		    __func__, csio->ccb_h.target_id);
1633 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1634 		xpt_done(ccb);
1635 		return;
1636 	}
1637 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1638 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1639 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1640 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1641 		xpt_done(ccb);
1642 		return;
1643 	}
1644 	/*
1645 	 * Sometimes, it is possible to get a command that is not "In
1646 	 * Progress" and was actually aborted by the upper layer.  Check for
1647 	 * this here and complete the command without error.
1648 	 */
1649 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1650 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1651 		    "target %u\n", __func__, csio->ccb_h.target_id);
1652 		xpt_done(ccb);
1653 		return;
1654 	}
1655 	/*
1656 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1657 	 * that the volume has timed out.  We want volumes to be enumerated
1658 	 * until they are deleted/removed, not just failed.
1659 	 */
1660 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1661 		if (targ->devinfo == 0)
1662 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1663 		else
1664 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1665 		xpt_done(ccb);
1666 		return;
1667 	}
1668 
1669 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1670 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1671 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1672 		xpt_done(ccb);
1673 		return;
1674 	}
1675 
1676 	/*
1677 	 * If target has a reset in progress, freeze the devq and return.  The
1678 	 * devq will be released when the TM reset is finished.
1679 	 */
1680 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1681 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1682 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1683 		    __func__, targ->tid);
1684 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1685 		xpt_done(ccb);
1686 		return;
1687 	}
1688 
1689 	cm = mps_alloc_command(sc);
1690 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1691 		if (cm != NULL) {
1692 			mps_free_command(sc, cm);
1693 		}
1694 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1695 			xpt_freeze_simq(sassc->sim, 1);
1696 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1697 		}
1698 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1699 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1700 		xpt_done(ccb);
1701 		return;
1702 	}
1703 
1704 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1705 	bzero(req, sizeof(*req));
1706 	req->DevHandle = htole16(targ->handle);
1707 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1708 	req->MsgFlags = 0;
1709 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1710 	req->SenseBufferLength = MPS_SENSE_LEN;
1711 	req->SGLFlags = 0;
1712 	req->ChainOffset = 0;
1713 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1714 	req->SGLOffset1= 0;
1715 	req->SGLOffset2= 0;
1716 	req->SGLOffset3= 0;
1717 	req->SkipCount = 0;
1718 	req->DataLength = htole32(csio->dxfer_len);
1719 	req->BidirectionalDataLength = 0;
1720 	req->IoFlags = htole16(csio->cdb_len);
1721 	req->EEDPFlags = 0;
1722 
1723 	/* Note: BiDirectional transfers are not supported */
1724 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1725 	case CAM_DIR_IN:
1726 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1727 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1728 		break;
1729 	case CAM_DIR_OUT:
1730 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1731 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1732 		break;
1733 	case CAM_DIR_NONE:
1734 	default:
1735 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1736 		break;
1737 	}
1738 
1739 	if (csio->cdb_len == 32)
1740                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1741 	/*
1742 	 * It looks like the hardware doesn't require an explicit tag
1743 	 * number for each transaction.  SAM Task Management not supported
1744 	 * at the moment.
1745 	 */
1746 	switch (csio->tag_action) {
1747 	case MSG_HEAD_OF_Q_TAG:
1748 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1749 		break;
1750 	case MSG_ORDERED_Q_TAG:
1751 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1752 		break;
1753 	case MSG_ACA_TASK:
1754 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1755 		break;
1756 	case CAM_TAG_ACTION_NONE:
1757 	case MSG_SIMPLE_Q_TAG:
1758 	default:
1759 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1760 		break;
1761 	}
1762 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1763 	req->Control = htole32(mpi_control);
1764 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1765 		mps_free_command(sc, cm);
1766 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1767 		xpt_done(ccb);
1768 		return;
1769 	}
1770 
1771 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1772 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1773 	else
1774 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1775 	req->IoFlags = htole16(csio->cdb_len);
1776 
1777 	/*
1778 	 * Check if EEDP is supported and enabled.  If it is then check if the
1779 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1780 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1781 	 * for EEDP transfer.
1782 	 */
1783 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1784 	if (sc->eedp_enabled && eedp_flags) {
1785 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1786 			if (lun->lun_id == csio->ccb_h.target_lun) {
1787 				break;
1788 			}
1789 		}
1790 
1791 		if ((lun != NULL) && (lun->eedp_formatted)) {
1792 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1793 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1794 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1795 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1796 			req->EEDPFlags = htole16(eedp_flags);
1797 
1798 			/*
1799 			 * If CDB less than 32, fill in Primary Ref Tag with
1800 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1801 			 * already there.  Also, set protection bit.  FreeBSD
1802 			 * currently does not support CDBs bigger than 16, but
1803 			 * the code doesn't hurt, and will be here for the
1804 			 * future.
1805 			 */
1806 			if (csio->cdb_len != 32) {
1807 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1808 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1809 				    PrimaryReferenceTag;
1810 				for (i = 0; i < 4; i++) {
1811 					*ref_tag_addr =
1812 					    req->CDB.CDB32[lba_byte + i];
1813 					ref_tag_addr++;
1814 				}
1815 				req->CDB.EEDP32.PrimaryReferenceTag =
1816 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1817 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1818 				    0xFFFF;
1819 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1820 				    0x20;
1821 			} else {
1822 				eedp_flags |=
1823 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1824 				req->EEDPFlags = htole16(eedp_flags);
1825 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1826 				    0x1F) | 0x20;
1827 			}
1828 		}
1829 	}
1830 
1831 	cm->cm_length = csio->dxfer_len;
1832 	if (cm->cm_length != 0) {
1833 		cm->cm_data = ccb;
1834 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1835 	} else {
1836 		cm->cm_data = NULL;
1837 	}
1838 	cm->cm_sge = &req->SGL;
1839 	cm->cm_sglsize = (32 - 24) * 4;
1840 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1841 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1842 	cm->cm_complete = mpssas_scsiio_complete;
1843 	cm->cm_complete_data = ccb;
1844 	cm->cm_targ = targ;
1845 	cm->cm_lun = csio->ccb_h.target_lun;
1846 	cm->cm_ccb = ccb;
1847 
1848 	/*
1849 	 * If HBA is a WD and the command is not for a retry, try to build a
1850 	 * direct I/O message. If failed, or the command is for a retry, send
1851 	 * the I/O to the IR volume itself.
1852 	 */
1853 	if (sc->WD_valid_config) {
1854 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1855 			mpssas_direct_drive_io(sassc, cm, ccb);
1856 		} else {
1857 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1858 		}
1859 	}
1860 
1861 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1862 	    mpssas_scsiio_timeout, cm, 0);
1863 
1864 	targ->issued++;
1865 	targ->outstanding++;
1866 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1867 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1868 
1869 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1870 	    __func__, cm, ccb, targ->outstanding);
1871 
1872 	mps_map_command(sc, cm);
1873 	return;
1874 }
1875 
1876 static void
1877 mps_response_code(struct mps_softc *sc, u8 response_code)
1878 {
1879         char *desc;
1880 
1881         switch (response_code) {
1882         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1883                 desc = "task management request completed";
1884                 break;
1885         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1886                 desc = "invalid frame";
1887                 break;
1888         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1889                 desc = "task management request not supported";
1890                 break;
1891         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1892                 desc = "task management request failed";
1893                 break;
1894         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1895                 desc = "task management request succeeded";
1896                 break;
1897         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1898                 desc = "invalid lun";
1899                 break;
1900         case 0xA:
1901                 desc = "overlapped tag attempted";
1902                 break;
1903         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1904                 desc = "task queued, however not sent to target";
1905                 break;
1906         default:
1907                 desc = "unknown";
1908                 break;
1909         }
1910 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1911                 response_code, desc);
1912 }
1913 /**
1914  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1915  */
1916 static void
1917 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1918     Mpi2SCSIIOReply_t *mpi_reply)
1919 {
1920 	u32 response_info;
1921 	u8 *response_bytes;
1922 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1923 	    MPI2_IOCSTATUS_MASK;
1924 	u8 scsi_state = mpi_reply->SCSIState;
1925 	u8 scsi_status = mpi_reply->SCSIStatus;
1926 	char *desc_ioc_state = NULL;
1927 	char *desc_scsi_status = NULL;
1928 	char *desc_scsi_state = sc->tmp_string;
1929 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1930 
1931 	if (log_info == 0x31170000)
1932 		return;
1933 
1934 	switch (ioc_status) {
1935 	case MPI2_IOCSTATUS_SUCCESS:
1936 		desc_ioc_state = "success";
1937 		break;
1938 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1939 		desc_ioc_state = "invalid function";
1940 		break;
1941 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1942 		desc_ioc_state = "scsi recovered error";
1943 		break;
1944 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1945 		desc_ioc_state = "scsi invalid dev handle";
1946 		break;
1947 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1948 		desc_ioc_state = "scsi device not there";
1949 		break;
1950 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1951 		desc_ioc_state = "scsi data overrun";
1952 		break;
1953 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1954 		desc_ioc_state = "scsi data underrun";
1955 		break;
1956 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1957 		desc_ioc_state = "scsi io data error";
1958 		break;
1959 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1960 		desc_ioc_state = "scsi protocol error";
1961 		break;
1962 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1963 		desc_ioc_state = "scsi task terminated";
1964 		break;
1965 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1966 		desc_ioc_state = "scsi residual mismatch";
1967 		break;
1968 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1969 		desc_ioc_state = "scsi task mgmt failed";
1970 		break;
1971 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1972 		desc_ioc_state = "scsi ioc terminated";
1973 		break;
1974 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1975 		desc_ioc_state = "scsi ext terminated";
1976 		break;
1977 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1978 		desc_ioc_state = "eedp guard error";
1979 		break;
1980 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1981 		desc_ioc_state = "eedp ref tag error";
1982 		break;
1983 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1984 		desc_ioc_state = "eedp app tag error";
1985 		break;
1986 	default:
1987 		desc_ioc_state = "unknown";
1988 		break;
1989 	}
1990 
1991 	switch (scsi_status) {
1992 	case MPI2_SCSI_STATUS_GOOD:
1993 		desc_scsi_status = "good";
1994 		break;
1995 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1996 		desc_scsi_status = "check condition";
1997 		break;
1998 	case MPI2_SCSI_STATUS_CONDITION_MET:
1999 		desc_scsi_status = "condition met";
2000 		break;
2001 	case MPI2_SCSI_STATUS_BUSY:
2002 		desc_scsi_status = "busy";
2003 		break;
2004 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2005 		desc_scsi_status = "intermediate";
2006 		break;
2007 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2008 		desc_scsi_status = "intermediate condmet";
2009 		break;
2010 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2011 		desc_scsi_status = "reservation conflict";
2012 		break;
2013 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2014 		desc_scsi_status = "command terminated";
2015 		break;
2016 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2017 		desc_scsi_status = "task set full";
2018 		break;
2019 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2020 		desc_scsi_status = "aca active";
2021 		break;
2022 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2023 		desc_scsi_status = "task aborted";
2024 		break;
2025 	default:
2026 		desc_scsi_status = "unknown";
2027 		break;
2028 	}
2029 
2030 	desc_scsi_state[0] = '\0';
2031 	if (!scsi_state)
2032 		desc_scsi_state = " ";
2033 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2034 		strcat(desc_scsi_state, "response info ");
2035 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2036 		strcat(desc_scsi_state, "state terminated ");
2037 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2038 		strcat(desc_scsi_state, "no status ");
2039 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2040 		strcat(desc_scsi_state, "autosense failed ");
2041 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2042 		strcat(desc_scsi_state, "autosense valid ");
2043 
2044 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2045 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2046 	/* We can add more detail about underflow data here
2047 	 * TO-DO
2048 	 * */
2049 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2050 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2051 	    desc_scsi_state, scsi_state);
2052 
2053 	if (sc->mps_debug & MPS_XINFO &&
2054 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2055 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2056 		scsi_sense_print(csio);
2057 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2058 	}
2059 
2060 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2061 		response_info = le32toh(mpi_reply->ResponseInfo);
2062 		response_bytes = (u8 *)&response_info;
2063 		mps_response_code(sc,response_bytes[0]);
2064 	}
2065 }
2066 
2067 static void
2068 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2069 {
2070 	MPI2_SCSI_IO_REPLY *rep;
2071 	union ccb *ccb;
2072 	struct ccb_scsiio *csio;
2073 	struct mpssas_softc *sassc;
2074 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2075 	u8 *TLR_bits, TLR_on;
2076 	int dir = 0, i;
2077 	u16 alloc_len;
2078 	struct mpssas_target *target;
2079 	target_id_t target_id;
2080 
2081 	MPS_FUNCTRACE(sc);
2082 	mps_dprint(sc, MPS_TRACE,
2083 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2084 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2085 	    cm->cm_targ->outstanding);
2086 
2087 	callout_stop(&cm->cm_callout);
2088 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2089 
2090 	sassc = sc->sassc;
2091 	ccb = cm->cm_complete_data;
2092 	csio = &ccb->csio;
2093 	target_id = csio->ccb_h.target_id;
2094 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2095 	/*
2096 	 * XXX KDM if the chain allocation fails, does it matter if we do
2097 	 * the sync and unload here?  It is simpler to do it in every case,
2098 	 * assuming it doesn't cause problems.
2099 	 */
2100 	if (cm->cm_data != NULL) {
2101 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2102 			dir = BUS_DMASYNC_POSTREAD;
2103 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2104 			dir = BUS_DMASYNC_POSTWRITE;
2105 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2106 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2107 	}
2108 
2109 	cm->cm_targ->completed++;
2110 	cm->cm_targ->outstanding--;
2111 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2112 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2113 
2114 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2115 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2116 		if (cm->cm_reply != NULL)
2117 			mpssas_log_command(cm, MPS_RECOVERY,
2118 			    "completed timedout cm %p ccb %p during recovery "
2119 			    "ioc %x scsi %x state %x xfer %u\n",
2120 			    cm, cm->cm_ccb,
2121 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2122 			    le32toh(rep->TransferCount));
2123 		else
2124 			mpssas_log_command(cm, MPS_RECOVERY,
2125 			    "completed timedout cm %p ccb %p during recovery\n",
2126 			    cm, cm->cm_ccb);
2127 	} else if (cm->cm_targ->tm != NULL) {
2128 		if (cm->cm_reply != NULL)
2129 			mpssas_log_command(cm, MPS_RECOVERY,
2130 			    "completed cm %p ccb %p during recovery "
2131 			    "ioc %x scsi %x state %x xfer %u\n",
2132 			    cm, cm->cm_ccb,
2133 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2134 			    le32toh(rep->TransferCount));
2135 		else
2136 			mpssas_log_command(cm, MPS_RECOVERY,
2137 			    "completed cm %p ccb %p during recovery\n",
2138 			    cm, cm->cm_ccb);
2139 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2140 		mpssas_log_command(cm, MPS_RECOVERY,
2141 		    "reset completed cm %p ccb %p\n",
2142 		    cm, cm->cm_ccb);
2143 	}
2144 
2145 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2146 		/*
2147 		 * We ran into an error after we tried to map the command,
2148 		 * so we're getting a callback without queueing the command
2149 		 * to the hardware.  So we set the status here, and it will
2150 		 * be retained below.  We'll go through the "fast path",
2151 		 * because there can be no reply when we haven't actually
2152 		 * gone out to the hardware.
2153 		 */
2154 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2155 
2156 		/*
2157 		 * Currently the only error included in the mask is
2158 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2159 		 * chain frames.  We need to freeze the queue until we get
2160 		 * a command that completed without this error, which will
2161 		 * hopefully have some chain frames attached that we can
2162 		 * use.  If we wanted to get smarter about it, we would
2163 		 * only unfreeze the queue in this condition when we're
2164 		 * sure that we're getting some chain frames back.  That's
2165 		 * probably unnecessary.
2166 		 */
2167 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2168 			xpt_freeze_simq(sassc->sim, 1);
2169 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2170 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2171 				   "freezing SIM queue\n");
2172 		}
2173 	}
2174 
2175 	/*
2176 	 * If this is a Start Stop Unit command and it was issued by the driver
2177 	 * during shutdown, decrement the refcount to account for all of the
2178 	 * commands that were sent.  All SSU commands should be completed before
2179 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2180 	 * is TRUE.
2181 	 */
2182 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2183 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2184 		sc->SSU_refcount--;
2185 	}
2186 
2187 	/* Take the fast path to completion */
2188 	if (cm->cm_reply == NULL) {
2189 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2190 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2191 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2192 			else {
2193 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2194 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2195 			}
2196 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2197 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2198 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2199 				mps_dprint(sc, MPS_XINFO,
2200 				    "Unfreezing SIM queue\n");
2201 			}
2202 		}
2203 
2204 		/*
2205 		 * There are two scenarios where the status won't be
2206 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2207 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2208 		 */
2209 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2210 			/*
2211 			 * Freeze the dev queue so that commands are
2212 			 * executed in the correct order after error
2213 			 * recovery.
2214 			 */
2215 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2216 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2217 		}
2218 		mps_free_command(sc, cm);
2219 		xpt_done(ccb);
2220 		return;
2221 	}
2222 
2223 	mpssas_log_command(cm, MPS_XINFO,
2224 	    "ioc %x scsi %x state %x xfer %u\n",
2225 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2226 	    le32toh(rep->TransferCount));
2227 
2228 	/*
2229 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2230 	 * Volume if an error occurred (normal I/O retry).  Use the original
2231 	 * CCB, but set a flag that this will be a retry so that it's sent to
2232 	 * the original volume.  Free the command but reuse the CCB.
2233 	 */
2234 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2235 		mps_free_command(sc, cm);
2236 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2237 		mpssas_action_scsiio(sassc, ccb);
2238 		return;
2239 	} else
2240 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2241 
2242 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2243 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2244 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2245 		/* FALLTHROUGH */
2246 	case MPI2_IOCSTATUS_SUCCESS:
2247 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2248 
2249 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2250 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2251 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2252 
2253 		/* Completion failed at the transport level. */
2254 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2255 		    MPI2_SCSI_STATE_TERMINATED)) {
2256 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2257 			break;
2258 		}
2259 
2260 		/* In a modern packetized environment, an autosense failure
2261 		 * implies that there's not much else that can be done to
2262 		 * recover the command.
2263 		 */
2264 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2265 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2266 			break;
2267 		}
2268 
2269 		/*
2270 		 * CAM doesn't care about SAS Response Info data, but if this is
2271 		 * the state check if TLR should be done.  If not, clear the
2272 		 * TLR_bits for the target.
2273 		 */
2274 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2275 		    ((le32toh(rep->ResponseInfo) &
2276 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2277 		    MPS_SCSI_RI_INVALID_FRAME)) {
2278 			sc->mapping_table[target_id].TLR_bits =
2279 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2280 		}
2281 
2282 		/*
2283 		 * Intentionally override the normal SCSI status reporting
2284 		 * for these two cases.  These are likely to happen in a
2285 		 * multi-initiator environment, and we want to make sure that
2286 		 * CAM retries these commands rather than fail them.
2287 		 */
2288 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2289 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2290 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2291 			break;
2292 		}
2293 
2294 		/* Handle normal status and sense */
2295 		csio->scsi_status = rep->SCSIStatus;
2296 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2297 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2298 		else
2299 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2300 
2301 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2302 			int sense_len, returned_sense_len;
2303 
2304 			returned_sense_len = min(le32toh(rep->SenseCount),
2305 			    sizeof(struct scsi_sense_data));
2306 			if (returned_sense_len < ccb->csio.sense_len)
2307 				ccb->csio.sense_resid = ccb->csio.sense_len -
2308 					returned_sense_len;
2309 			else
2310 				ccb->csio.sense_resid = 0;
2311 
2312 			sense_len = min(returned_sense_len,
2313 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2314 			bzero(&ccb->csio.sense_data,
2315 			      sizeof(ccb->csio.sense_data));
2316 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2317 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2318 		}
2319 
2320 		/*
2321 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2322 		 * and it's page code 0 (Supported Page List), and there is
2323 		 * inquiry data, and this is for a sequential access device, and
2324 		 * the device is an SSP target, and TLR is supported by the
2325 		 * controller, turn the TLR_bits value ON if page 0x90 is
2326 		 * supported.
2327 		 */
2328 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2329 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2330 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2331 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2332 		    (csio->data_ptr != NULL) &&
2333 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2334 		    (sc->control_TLR) &&
2335 		    (sc->mapping_table[target_id].device_info &
2336 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2337 			vpd_list = (struct scsi_vpd_supported_page_list *)
2338 			    csio->data_ptr;
2339 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2340 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2341 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2342 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2343 			    csio->cdb_io.cdb_bytes[4];
2344 			alloc_len -= csio->resid;
2345 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2346 				if (vpd_list->list[i] == 0x90) {
2347 					*TLR_bits = TLR_on;
2348 					break;
2349 				}
2350 			}
2351 		}
2352 
2353 		/*
2354 		 * If this is a SATA direct-access end device, mark it so that
2355 		 * a SCSI StartStopUnit command will be sent to it when the
2356 		 * driver is being shutdown.
2357 		 */
2358 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2359 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2360 		    (sc->mapping_table[target_id].device_info &
2361 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2362 		    ((sc->mapping_table[target_id].device_info &
2363 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2364 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2365 			target = &sassc->targets[target_id];
2366 			target->supports_SSU = TRUE;
2367 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2368 			    target_id);
2369 		}
2370 		break;
2371 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2372 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2373 		/*
2374 		 * If devinfo is 0 this will be a volume.  In that case don't
2375 		 * tell CAM that the volume is not there.  We want volumes to
2376 		 * be enumerated until they are deleted/removed, not just
2377 		 * failed.
2378 		 */
2379 		if (cm->cm_targ->devinfo == 0)
2380 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2381 		else
2382 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2383 		break;
2384 	case MPI2_IOCSTATUS_INVALID_SGL:
2385 		mps_print_scsiio_cmd(sc, cm);
2386 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2387 		break;
2388 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2389 		/*
2390 		 * This is one of the responses that comes back when an I/O
2391 		 * has been aborted.  If it is because of a timeout that we
2392 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2393 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2394 		 * command is the same (it gets retried, subject to the
2395 		 * retry counter), the only difference is what gets printed
2396 		 * on the console.
2397 		 */
2398 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2399 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2400 		else
2401 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2402 		break;
2403 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2404 		/* resid is ignored for this condition */
2405 		csio->resid = 0;
2406 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2407 		break;
2408 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2409 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2410 		/*
2411 		 * Since these are generally external (i.e. hopefully
2412 		 * transient transport-related) errors, retry these without
2413 		 * decrementing the retry count.
2414 		 */
2415 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2416 		mpssas_log_command(cm, MPS_INFO,
2417 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2418 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2419 		    le32toh(rep->TransferCount));
2420 		break;
2421 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2422 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2423 	case MPI2_IOCSTATUS_INVALID_VPID:
2424 	case MPI2_IOCSTATUS_INVALID_FIELD:
2425 	case MPI2_IOCSTATUS_INVALID_STATE:
2426 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2427 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2428 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2429 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2430 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2431 	default:
2432 		mpssas_log_command(cm, MPS_XINFO,
2433 		    "completed ioc %x scsi %x state %x xfer %u\n",
2434 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2435 		    le32toh(rep->TransferCount));
2436 		csio->resid = cm->cm_length;
2437 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2438 		break;
2439 	}
2440 
2441 	mps_sc_failed_io_info(sc,csio,rep);
2442 
2443 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2444 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2445 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2446 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2447 		    "unfreezing SIM queue\n");
2448 	}
2449 
2450 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2451 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2452 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2453 	}
2454 
2455 	mps_free_command(sc, cm);
2456 	xpt_done(ccb);
2457 }
2458 
2459 /* All Request reached here are Endian safe */
2460 static void
2461 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2462     union ccb *ccb) {
2463 	pMpi2SCSIIORequest_t	pIO_req;
2464 	struct mps_softc	*sc = sassc->sc;
2465 	uint64_t		virtLBA;
2466 	uint32_t		physLBA, stripe_offset, stripe_unit;
2467 	uint32_t		io_size, column;
2468 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2469 
2470 	/*
2471 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2472 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2473 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2474 	 * bit different than the 10/16 CDBs, handle them separately.
2475 	 */
2476 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2477 	CDB = pIO_req->CDB.CDB32;
2478 
2479 	/*
2480 	 * Handle 6 byte CDBs.
2481 	 */
2482 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2483 	    (CDB[0] == WRITE_6))) {
2484 		/*
2485 		 * Get the transfer size in blocks.
2486 		 */
2487 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2488 
2489 		/*
2490 		 * Get virtual LBA given in the CDB.
2491 		 */
2492 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2493 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2494 
2495 		/*
2496 		 * Check that LBA range for I/O does not exceed volume's
2497 		 * MaxLBA.
2498 		 */
2499 		if ((virtLBA + (uint64_t)io_size - 1) <=
2500 		    sc->DD_max_lba) {
2501 			/*
2502 			 * Check if the I/O crosses a stripe boundary.  If not,
2503 			 * translate the virtual LBA to a physical LBA and set
2504 			 * the DevHandle for the PhysDisk to be used.  If it
2505 			 * does cross a boundry, do normal I/O.  To get the
2506 			 * right DevHandle to use, get the map number for the
2507 			 * column, then use that map number to look up the
2508 			 * DevHandle of the PhysDisk.
2509 			 */
2510 			stripe_offset = (uint32_t)virtLBA &
2511 			    (sc->DD_stripe_size - 1);
2512 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2513 				physLBA = (uint32_t)virtLBA >>
2514 				    sc->DD_stripe_exponent;
2515 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2516 				column = physLBA % sc->DD_num_phys_disks;
2517 				pIO_req->DevHandle =
2518 				    htole16(sc->DD_column_map[column].dev_handle);
2519 				/* ???? Is this endian safe*/
2520 				cm->cm_desc.SCSIIO.DevHandle =
2521 				    pIO_req->DevHandle;
2522 
2523 				physLBA = (stripe_unit <<
2524 				    sc->DD_stripe_exponent) + stripe_offset;
2525 				ptrLBA = &pIO_req->CDB.CDB32[1];
2526 				physLBA_byte = (uint8_t)(physLBA >> 16);
2527 				*ptrLBA = physLBA_byte;
2528 				ptrLBA = &pIO_req->CDB.CDB32[2];
2529 				physLBA_byte = (uint8_t)(physLBA >> 8);
2530 				*ptrLBA = physLBA_byte;
2531 				ptrLBA = &pIO_req->CDB.CDB32[3];
2532 				physLBA_byte = (uint8_t)physLBA;
2533 				*ptrLBA = physLBA_byte;
2534 
2535 				/*
2536 				 * Set flag that Direct Drive I/O is
2537 				 * being done.
2538 				 */
2539 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2540 			}
2541 		}
2542 		return;
2543 	}
2544 
2545 	/*
2546 	 * Handle 10, 12 or 16 byte CDBs.
2547 	 */
2548 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2549 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2550 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2551 	    (CDB[0] == WRITE_12))) {
2552 		/*
2553 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2554 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2555 		 * the else section.  10-byte and 12-byte CDB's are OK.
2556 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2557 		 * ready to accept 12byte CDB for Direct IOs.
2558 		 */
2559 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2560 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2561 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2562 			/*
2563 			 * Get the transfer size in blocks.
2564 			 */
2565 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2566 
2567 			/*
2568 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2569 			 * LBA in the CDB depending on command.
2570 			 */
2571 			lba_idx = ((CDB[0] == READ_12) ||
2572 				(CDB[0] == WRITE_12) ||
2573 				(CDB[0] == READ_10) ||
2574 				(CDB[0] == WRITE_10))? 2 : 6;
2575 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2576 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2577 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2578 			    (uint64_t)CDB[lba_idx + 3];
2579 
2580 			/*
2581 			 * Check that LBA range for I/O does not exceed volume's
2582 			 * MaxLBA.
2583 			 */
2584 			if ((virtLBA + (uint64_t)io_size - 1) <=
2585 			    sc->DD_max_lba) {
2586 				/*
2587 				 * Check if the I/O crosses a stripe boundary.
2588 				 * If not, translate the virtual LBA to a
2589 				 * physical LBA and set the DevHandle for the
2590 				 * PhysDisk to be used.  If it does cross a
2591 				 * boundry, do normal I/O.  To get the right
2592 				 * DevHandle to use, get the map number for the
2593 				 * column, then use that map number to look up
2594 				 * the DevHandle of the PhysDisk.
2595 				 */
2596 				stripe_offset = (uint32_t)virtLBA &
2597 				    (sc->DD_stripe_size - 1);
2598 				if ((stripe_offset + io_size) <=
2599 				    sc->DD_stripe_size) {
2600 					physLBA = (uint32_t)virtLBA >>
2601 					    sc->DD_stripe_exponent;
2602 					stripe_unit = physLBA /
2603 					    sc->DD_num_phys_disks;
2604 					column = physLBA %
2605 					    sc->DD_num_phys_disks;
2606 					pIO_req->DevHandle =
2607 					    htole16(sc->DD_column_map[column].
2608 					    dev_handle);
2609 					cm->cm_desc.SCSIIO.DevHandle =
2610 					    pIO_req->DevHandle;
2611 
2612 					physLBA = (stripe_unit <<
2613 					    sc->DD_stripe_exponent) +
2614 					    stripe_offset;
2615 					ptrLBA =
2616 					    &pIO_req->CDB.CDB32[lba_idx];
2617 					physLBA_byte = (uint8_t)(physLBA >> 24);
2618 					*ptrLBA = physLBA_byte;
2619 					ptrLBA =
2620 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2621 					physLBA_byte = (uint8_t)(physLBA >> 16);
2622 					*ptrLBA = physLBA_byte;
2623 					ptrLBA =
2624 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2625 					physLBA_byte = (uint8_t)(physLBA >> 8);
2626 					*ptrLBA = physLBA_byte;
2627 					ptrLBA =
2628 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2629 					physLBA_byte = (uint8_t)physLBA;
2630 					*ptrLBA = physLBA_byte;
2631 
2632 					/*
2633 					 * Set flag that Direct Drive I/O is
2634 					 * being done.
2635 					 */
2636 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2637 				}
2638 			}
2639 		} else {
2640 			/*
2641 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2642 			 * 0.  Get the transfer size in blocks.
2643 			 */
2644 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2645 
2646 			/*
2647 			 * Get virtual LBA.
2648 			 */
2649 			virtLBA = ((uint64_t)CDB[2] << 54) |
2650 			    ((uint64_t)CDB[3] << 48) |
2651 			    ((uint64_t)CDB[4] << 40) |
2652 			    ((uint64_t)CDB[5] << 32) |
2653 			    ((uint64_t)CDB[6] << 24) |
2654 			    ((uint64_t)CDB[7] << 16) |
2655 			    ((uint64_t)CDB[8] << 8) |
2656 			    (uint64_t)CDB[9];
2657 
2658 			/*
2659 			 * Check that LBA range for I/O does not exceed volume's
2660 			 * MaxLBA.
2661 			 */
2662 			if ((virtLBA + (uint64_t)io_size - 1) <=
2663 			    sc->DD_max_lba) {
2664 				/*
2665 				 * Check if the I/O crosses a stripe boundary.
2666 				 * If not, translate the virtual LBA to a
2667 				 * physical LBA and set the DevHandle for the
2668 				 * PhysDisk to be used.  If it does cross a
2669 				 * boundry, do normal I/O.  To get the right
2670 				 * DevHandle to use, get the map number for the
2671 				 * column, then use that map number to look up
2672 				 * the DevHandle of the PhysDisk.
2673 				 */
2674 				stripe_offset = (uint32_t)virtLBA &
2675 				    (sc->DD_stripe_size - 1);
2676 				if ((stripe_offset + io_size) <=
2677 				    sc->DD_stripe_size) {
2678 					physLBA = (uint32_t)(virtLBA >>
2679 					    sc->DD_stripe_exponent);
2680 					stripe_unit = physLBA /
2681 					    sc->DD_num_phys_disks;
2682 					column = physLBA %
2683 					    sc->DD_num_phys_disks;
2684 					pIO_req->DevHandle =
2685 					    htole16(sc->DD_column_map[column].
2686 					    dev_handle);
2687 					cm->cm_desc.SCSIIO.DevHandle =
2688 					    pIO_req->DevHandle;
2689 
2690 					physLBA = (stripe_unit <<
2691 					    sc->DD_stripe_exponent) +
2692 					    stripe_offset;
2693 
2694 					/*
2695 					 * Set upper 4 bytes of LBA to 0.  We
2696 					 * assume that the phys disks are less
2697 					 * than 2 TB's in size.  Then, set the
2698 					 * lower 4 bytes.
2699 					 */
2700 					pIO_req->CDB.CDB32[2] = 0;
2701 					pIO_req->CDB.CDB32[3] = 0;
2702 					pIO_req->CDB.CDB32[4] = 0;
2703 					pIO_req->CDB.CDB32[5] = 0;
2704 					ptrLBA = &pIO_req->CDB.CDB32[6];
2705 					physLBA_byte = (uint8_t)(physLBA >> 24);
2706 					*ptrLBA = physLBA_byte;
2707 					ptrLBA = &pIO_req->CDB.CDB32[7];
2708 					physLBA_byte = (uint8_t)(physLBA >> 16);
2709 					*ptrLBA = physLBA_byte;
2710 					ptrLBA = &pIO_req->CDB.CDB32[8];
2711 					physLBA_byte = (uint8_t)(physLBA >> 8);
2712 					*ptrLBA = physLBA_byte;
2713 					ptrLBA = &pIO_req->CDB.CDB32[9];
2714 					physLBA_byte = (uint8_t)physLBA;
2715 					*ptrLBA = physLBA_byte;
2716 
2717 					/*
2718 					 * Set flag that Direct Drive I/O is
2719 					 * being done.
2720 					 */
2721 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2722 				}
2723 			}
2724 		}
2725 	}
2726 }
2727 
2728 #if __FreeBSD_version >= 900026
2729 static void
2730 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2731 {
2732 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2733 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2734 	uint64_t sasaddr;
2735 	union ccb *ccb;
2736 
2737 	ccb = cm->cm_complete_data;
2738 
2739 	/*
2740 	 * Currently there should be no way we can hit this case.  It only
2741 	 * happens when we have a failure to allocate chain frames, and SMP
2742 	 * commands require two S/G elements only.  That should be handled
2743 	 * in the standard request size.
2744 	 */
2745 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2746 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2747 			   __func__, cm->cm_flags);
2748 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2749 		goto bailout;
2750         }
2751 
2752 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2753 	if (rpl == NULL) {
2754 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2755 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2756 		goto bailout;
2757 	}
2758 
2759 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2760 	sasaddr = le32toh(req->SASAddress.Low);
2761 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2762 
2763 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2764 	    MPI2_IOCSTATUS_SUCCESS ||
2765 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2766 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2767 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2768 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2769 		goto bailout;
2770 	}
2771 
2772 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2773 		   "%#jx completed successfully\n", __func__,
2774 		   (uintmax_t)sasaddr);
2775 
2776 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2777 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2778 	else
2779 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2780 
2781 bailout:
2782 	/*
2783 	 * We sync in both directions because we had DMAs in the S/G list
2784 	 * in both directions.
2785 	 */
2786 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2787 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2788 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2789 	mps_free_command(sc, cm);
2790 	xpt_done(ccb);
2791 }
2792 
2793 static void
2794 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2795 {
2796 	struct mps_command *cm;
2797 	uint8_t *request, *response;
2798 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2799 	struct mps_softc *sc;
2800 	int error;
2801 
2802 	sc = sassc->sc;
2803 	error = 0;
2804 
2805 	/*
2806 	 * XXX We don't yet support physical addresses here.
2807 	 */
2808 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2809 	case CAM_DATA_PADDR:
2810 	case CAM_DATA_SG_PADDR:
2811 		mps_dprint(sc, MPS_ERROR,
2812 			   "%s: physical addresses not supported\n", __func__);
2813 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2814 		xpt_done(ccb);
2815 		return;
2816 	case CAM_DATA_SG:
2817 		/*
2818 		 * The chip does not support more than one buffer for the
2819 		 * request or response.
2820 		 */
2821 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2822 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2823 			mps_dprint(sc, MPS_ERROR,
2824 				   "%s: multiple request or response "
2825 				   "buffer segments not supported for SMP\n",
2826 				   __func__);
2827 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2828 			xpt_done(ccb);
2829 			return;
2830 		}
2831 
2832 		/*
2833 		 * The CAM_SCATTER_VALID flag was originally implemented
2834 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2835 		 * We have two.  So, just take that flag to mean that we
2836 		 * might have S/G lists, and look at the S/G segment count
2837 		 * to figure out whether that is the case for each individual
2838 		 * buffer.
2839 		 */
2840 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2841 			bus_dma_segment_t *req_sg;
2842 
2843 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2844 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2845 		} else
2846 			request = ccb->smpio.smp_request;
2847 
2848 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2849 			bus_dma_segment_t *rsp_sg;
2850 
2851 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2852 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2853 		} else
2854 			response = ccb->smpio.smp_response;
2855 		break;
2856 	case CAM_DATA_VADDR:
2857 		request = ccb->smpio.smp_request;
2858 		response = ccb->smpio.smp_response;
2859 		break;
2860 	default:
2861 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2862 		xpt_done(ccb);
2863 		return;
2864 	}
2865 
2866 	cm = mps_alloc_command(sc);
2867 	if (cm == NULL) {
2868 		mps_dprint(sc, MPS_ERROR,
2869 		    "%s: cannot allocate command\n", __func__);
2870 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2871 		xpt_done(ccb);
2872 		return;
2873 	}
2874 
2875 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2876 	bzero(req, sizeof(*req));
2877 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2878 
2879 	/* Allow the chip to use any route to this SAS address. */
2880 	req->PhysicalPort = 0xff;
2881 
2882 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2883 	req->SGLFlags =
2884 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2885 
2886 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2887 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2888 
2889 	mpi_init_sge(cm, req, &req->SGL);
2890 
2891 	/*
2892 	 * Set up a uio to pass into mps_map_command().  This allows us to
2893 	 * do one map command, and one busdma call in there.
2894 	 */
2895 	cm->cm_uio.uio_iov = cm->cm_iovec;
2896 	cm->cm_uio.uio_iovcnt = 2;
2897 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2898 
2899 	/*
2900 	 * The read/write flag isn't used by busdma, but set it just in
2901 	 * case.  This isn't exactly accurate, either, since we're going in
2902 	 * both directions.
2903 	 */
2904 	cm->cm_uio.uio_rw = UIO_WRITE;
2905 
2906 	cm->cm_iovec[0].iov_base = request;
2907 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2908 	cm->cm_iovec[1].iov_base = response;
2909 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2910 
2911 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2912 			       cm->cm_iovec[1].iov_len;
2913 
2914 	/*
2915 	 * Trigger a warning message in mps_data_cb() for the user if we
2916 	 * wind up exceeding two S/G segments.  The chip expects one
2917 	 * segment for the request and another for the response.
2918 	 */
2919 	cm->cm_max_segs = 2;
2920 
2921 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2922 	cm->cm_complete = mpssas_smpio_complete;
2923 	cm->cm_complete_data = ccb;
2924 
2925 	/*
2926 	 * Tell the mapping code that we're using a uio, and that this is
2927 	 * an SMP passthrough request.  There is a little special-case
2928 	 * logic there (in mps_data_cb()) to handle the bidirectional
2929 	 * transfer.
2930 	 */
2931 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2932 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2933 
2934 	/* The chip data format is little endian. */
2935 	req->SASAddress.High = htole32(sasaddr >> 32);
2936 	req->SASAddress.Low = htole32(sasaddr);
2937 
2938 	/*
2939 	 * XXX Note that we don't have a timeout/abort mechanism here.
2940 	 * From the manual, it looks like task management requests only
2941 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2942 	 * have a mechanism to retry requests in the event of a chip reset
2943 	 * at least.  Hopefully the chip will insure that any errors short
2944 	 * of that are relayed back to the driver.
2945 	 */
2946 	error = mps_map_command(sc, cm);
2947 	if ((error != 0) && (error != EINPROGRESS)) {
2948 		mps_dprint(sc, MPS_ERROR,
2949 			   "%s: error %d returned from mps_map_command()\n",
2950 			   __func__, error);
2951 		goto bailout_error;
2952 	}
2953 
2954 	return;
2955 
2956 bailout_error:
2957 	mps_free_command(sc, cm);
2958 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2959 	xpt_done(ccb);
2960 	return;
2961 
2962 }
2963 
2964 static void
2965 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2966 {
2967 	struct mps_softc *sc;
2968 	struct mpssas_target *targ;
2969 	uint64_t sasaddr = 0;
2970 
2971 	sc = sassc->sc;
2972 
2973 	/*
2974 	 * Make sure the target exists.
2975 	 */
2976 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2977 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2978 	targ = &sassc->targets[ccb->ccb_h.target_id];
2979 	if (targ->handle == 0x0) {
2980 		mps_dprint(sc, MPS_ERROR,
2981 			   "%s: target %d does not exist!\n", __func__,
2982 			   ccb->ccb_h.target_id);
2983 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2984 		xpt_done(ccb);
2985 		return;
2986 	}
2987 
2988 	/*
2989 	 * If this device has an embedded SMP target, we'll talk to it
2990 	 * directly.
2991 	 * figure out what the expander's address is.
2992 	 */
2993 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2994 		sasaddr = targ->sasaddr;
2995 
2996 	/*
2997 	 * If we don't have a SAS address for the expander yet, try
2998 	 * grabbing it from the page 0x83 information cached in the
2999 	 * transport layer for this target.  LSI expanders report the
3000 	 * expander SAS address as the port-associated SAS address in
3001 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3002 	 * 0x83.
3003 	 *
3004 	 * XXX KDM disable this for now, but leave it commented out so that
3005 	 * it is obvious that this is another possible way to get the SAS
3006 	 * address.
3007 	 *
3008 	 * The parent handle method below is a little more reliable, and
3009 	 * the other benefit is that it works for devices other than SES
3010 	 * devices.  So you can send a SMP request to a da(4) device and it
3011 	 * will get routed to the expander that device is attached to.
3012 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3013 	 */
3014 #if 0
3015 	if (sasaddr == 0)
3016 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3017 #endif
3018 
3019 	/*
3020 	 * If we still don't have a SAS address for the expander, look for
3021 	 * the parent device of this device, which is probably the expander.
3022 	 */
3023 	if (sasaddr == 0) {
3024 #ifdef OLD_MPS_PROBE
3025 		struct mpssas_target *parent_target;
3026 #endif
3027 
3028 		if (targ->parent_handle == 0x0) {
3029 			mps_dprint(sc, MPS_ERROR,
3030 				   "%s: handle %d does not have a valid "
3031 				   "parent handle!\n", __func__, targ->handle);
3032 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3033 			goto bailout;
3034 		}
3035 #ifdef OLD_MPS_PROBE
3036 		parent_target = mpssas_find_target_by_handle(sassc, 0,
3037 			targ->parent_handle);
3038 
3039 		if (parent_target == NULL) {
3040 			mps_dprint(sc, MPS_ERROR,
3041 				   "%s: handle %d does not have a valid "
3042 				   "parent target!\n", __func__, targ->handle);
3043 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3044 			goto bailout;
3045 		}
3046 
3047 		if ((parent_target->devinfo &
3048 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3049 			mps_dprint(sc, MPS_ERROR,
3050 				   "%s: handle %d parent %d does not "
3051 				   "have an SMP target!\n", __func__,
3052 				   targ->handle, parent_target->handle);
3053 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3054 			goto bailout;
3055 
3056 		}
3057 
3058 		sasaddr = parent_target->sasaddr;
3059 #else /* OLD_MPS_PROBE */
3060 		if ((targ->parent_devinfo &
3061 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3062 			mps_dprint(sc, MPS_ERROR,
3063 				   "%s: handle %d parent %d does not "
3064 				   "have an SMP target!\n", __func__,
3065 				   targ->handle, targ->parent_handle);
3066 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3067 			goto bailout;
3068 
3069 		}
3070 		if (targ->parent_sasaddr == 0x0) {
3071 			mps_dprint(sc, MPS_ERROR,
3072 				   "%s: handle %d parent handle %d does "
3073 				   "not have a valid SAS address!\n",
3074 				   __func__, targ->handle, targ->parent_handle);
3075 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3076 			goto bailout;
3077 		}
3078 
3079 		sasaddr = targ->parent_sasaddr;
3080 #endif /* OLD_MPS_PROBE */
3081 
3082 	}
3083 
3084 	if (sasaddr == 0) {
3085 		mps_dprint(sc, MPS_INFO,
3086 			   "%s: unable to find SAS address for handle %d\n",
3087 			   __func__, targ->handle);
3088 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3089 		goto bailout;
3090 	}
3091 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3092 
3093 	return;
3094 
3095 bailout:
3096 	xpt_done(ccb);
3097 
3098 }
3099 #endif //__FreeBSD_version >= 900026
3100 
3101 static void
3102 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3103 {
3104 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3105 	struct mps_softc *sc;
3106 	struct mps_command *tm;
3107 	struct mpssas_target *targ;
3108 
3109 	MPS_FUNCTRACE(sassc->sc);
3110 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3111 
3112 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3113 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3114 	     ccb->ccb_h.target_id));
3115 	sc = sassc->sc;
3116 	tm = mps_alloc_command(sc);
3117 	if (tm == NULL) {
3118 		mps_dprint(sc, MPS_ERROR,
3119 		    "command alloc failure in mpssas_action_resetdev\n");
3120 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3121 		xpt_done(ccb);
3122 		return;
3123 	}
3124 
3125 	targ = &sassc->targets[ccb->ccb_h.target_id];
3126 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3127 	req->DevHandle = htole16(targ->handle);
3128 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3129 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3130 
3131 	/* SAS Hard Link Reset / SATA Link Reset */
3132 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3133 
3134 	tm->cm_data = NULL;
3135 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3136 	tm->cm_complete = mpssas_resetdev_complete;
3137 	tm->cm_complete_data = ccb;
3138 	tm->cm_targ = targ;
3139 	targ->flags |= MPSSAS_TARGET_INRESET;
3140 
3141 	mps_map_command(sc, tm);
3142 }
3143 
3144 static void
3145 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3146 {
3147 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3148 	union ccb *ccb;
3149 
3150 	MPS_FUNCTRACE(sc);
3151 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3152 
3153 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3154 	ccb = tm->cm_complete_data;
3155 
3156 	/*
3157 	 * Currently there should be no way we can hit this case.  It only
3158 	 * happens when we have a failure to allocate chain frames, and
3159 	 * task management commands don't have S/G lists.
3160 	 */
3161 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3162 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3163 
3164 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3165 
3166 		mps_dprint(sc, MPS_ERROR,
3167 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3168 			   "This should not happen!\n", __func__, tm->cm_flags,
3169 			   req->DevHandle);
3170 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3171 		goto bailout;
3172 	}
3173 
3174 	mps_dprint(sc, MPS_XINFO,
3175 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3176 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3177 
3178 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3179 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3180 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3181 		    CAM_LUN_WILDCARD);
3182 	}
3183 	else
3184 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3185 
3186 bailout:
3187 
3188 	mpssas_free_tm(sc, tm);
3189 	xpt_done(ccb);
3190 }
3191 
3192 static void
3193 mpssas_poll(struct cam_sim *sim)
3194 {
3195 	struct mpssas_softc *sassc;
3196 
3197 	sassc = cam_sim_softc(sim);
3198 
3199 	if (sassc->sc->mps_debug & MPS_TRACE) {
3200 		/* frequent debug messages during a panic just slow
3201 		 * everything down too much.
3202 		 */
3203 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3204 		sassc->sc->mps_debug &= ~MPS_TRACE;
3205 	}
3206 
3207 	mps_intr_locked(sassc->sc);
3208 }
3209 
3210 static void
3211 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3212 	     void *arg)
3213 {
3214 	struct mps_softc *sc;
3215 
3216 	sc = (struct mps_softc *)callback_arg;
3217 
3218 	switch (code) {
3219 #if (__FreeBSD_version >= 1000006) || \
3220     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3221 	case AC_ADVINFO_CHANGED: {
3222 		struct mpssas_target *target;
3223 		struct mpssas_softc *sassc;
3224 		struct scsi_read_capacity_data_long rcap_buf;
3225 		struct ccb_dev_advinfo cdai;
3226 		struct mpssas_lun *lun;
3227 		lun_id_t lunid;
3228 		int found_lun;
3229 		uintptr_t buftype;
3230 
3231 		buftype = (uintptr_t)arg;
3232 
3233 		found_lun = 0;
3234 		sassc = sc->sassc;
3235 
3236 		/*
3237 		 * We're only interested in read capacity data changes.
3238 		 */
3239 		if (buftype != CDAI_TYPE_RCAPLONG)
3240 			break;
3241 
3242 		/*
3243 		 * We should have a handle for this, but check to make sure.
3244 		 */
3245 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3246 		    ("Target %d out of bounds in mpssas_async\n",
3247 		    xpt_path_target_id(path)));
3248 		target = &sassc->targets[xpt_path_target_id(path)];
3249 		if (target->handle == 0)
3250 			break;
3251 
3252 		lunid = xpt_path_lun_id(path);
3253 
3254 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3255 			if (lun->lun_id == lunid) {
3256 				found_lun = 1;
3257 				break;
3258 			}
3259 		}
3260 
3261 		if (found_lun == 0) {
3262 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3263 				     M_NOWAIT | M_ZERO);
3264 			if (lun == NULL) {
3265 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3266 					   "LUN for EEDP support.\n");
3267 				break;
3268 			}
3269 			lun->lun_id = lunid;
3270 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3271 		}
3272 
3273 		bzero(&rcap_buf, sizeof(rcap_buf));
3274 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3275 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3276 		cdai.ccb_h.flags = CAM_DIR_IN;
3277 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3278 #if (__FreeBSD_version >= 1100061) || \
3279     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3280 		cdai.flags = CDAI_FLAG_NONE;
3281 #else
3282 		cdai.flags = 0;
3283 #endif
3284 		cdai.bufsiz = sizeof(rcap_buf);
3285 		cdai.buf = (uint8_t *)&rcap_buf;
3286 		xpt_action((union ccb *)&cdai);
3287 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3288 			cam_release_devq(cdai.ccb_h.path,
3289 					 0, 0, 0, FALSE);
3290 
3291 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3292 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3293 			lun->eedp_formatted = TRUE;
3294 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3295 		} else {
3296 			lun->eedp_formatted = FALSE;
3297 			lun->eedp_block_size = 0;
3298 		}
3299 		break;
3300 	}
3301 #else
3302 	case AC_FOUND_DEVICE: {
3303 		struct ccb_getdev *cgd;
3304 
3305 		cgd = arg;
3306 		mpssas_check_eedp(sc, path, cgd);
3307 		break;
3308 	}
3309 #endif
3310 	default:
3311 		break;
3312 	}
3313 }
3314 
3315 #if (__FreeBSD_version < 901503) || \
3316     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3317 static void
3318 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3319 		  struct ccb_getdev *cgd)
3320 {
3321 	struct mpssas_softc *sassc = sc->sassc;
3322 	struct ccb_scsiio *csio;
3323 	struct scsi_read_capacity_16 *scsi_cmd;
3324 	struct scsi_read_capacity_eedp *rcap_buf;
3325 	path_id_t pathid;
3326 	target_id_t targetid;
3327 	lun_id_t lunid;
3328 	union ccb *ccb;
3329 	struct cam_path *local_path;
3330 	struct mpssas_target *target;
3331 	struct mpssas_lun *lun;
3332 	uint8_t	found_lun;
3333 	char path_str[64];
3334 
3335 	sassc = sc->sassc;
3336 	pathid = cam_sim_path(sassc->sim);
3337 	targetid = xpt_path_target_id(path);
3338 	lunid = xpt_path_lun_id(path);
3339 
3340 	KASSERT(targetid < sassc->maxtargets,
3341 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3342 	     targetid));
3343 	target = &sassc->targets[targetid];
3344 	if (target->handle == 0x0)
3345 		return;
3346 
3347 	/*
3348 	 * Determine if the device is EEDP capable.
3349 	 *
3350 	 * If this flag is set in the inquiry data,
3351 	 * the device supports protection information,
3352 	 * and must support the 16 byte read
3353 	 * capacity command, otherwise continue without
3354 	 * sending read cap 16
3355 	 */
3356 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3357 		return;
3358 
3359 	/*
3360 	 * Issue a READ CAPACITY 16 command.  This info
3361 	 * is used to determine if the LUN is formatted
3362 	 * for EEDP support.
3363 	 */
3364 	ccb = xpt_alloc_ccb_nowait();
3365 	if (ccb == NULL) {
3366 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3367 		    "for EEDP support.\n");
3368 		return;
3369 	}
3370 
3371 	if (xpt_create_path(&local_path, xpt_periph,
3372 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3373 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3374 		    "path for EEDP support\n");
3375 		xpt_free_ccb(ccb);
3376 		return;
3377 	}
3378 
3379 	/*
3380 	 * If LUN is already in list, don't create a new
3381 	 * one.
3382 	 */
3383 	found_lun = FALSE;
3384 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3385 		if (lun->lun_id == lunid) {
3386 			found_lun = TRUE;
3387 			break;
3388 		}
3389 	}
3390 	if (!found_lun) {
3391 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3392 		    M_NOWAIT | M_ZERO);
3393 		if (lun == NULL) {
3394 			mps_dprint(sc, MPS_ERROR,
3395 			    "Unable to alloc LUN for EEDP support.\n");
3396 			xpt_free_path(local_path);
3397 			xpt_free_ccb(ccb);
3398 			return;
3399 		}
3400 		lun->lun_id = lunid;
3401 		SLIST_INSERT_HEAD(&target->luns, lun,
3402 		    lun_link);
3403 	}
3404 
3405 	xpt_path_string(local_path, path_str, sizeof(path_str));
3406 
3407 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3408 	    path_str, target->handle);
3409 
3410 	/*
3411 	 * Issue a READ CAPACITY 16 command for the LUN.
3412 	 * The mpssas_read_cap_done function will load
3413 	 * the read cap info into the LUN struct.
3414 	 */
3415 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3416 	    M_MPT2, M_NOWAIT | M_ZERO);
3417 	if (rcap_buf == NULL) {
3418 		mps_dprint(sc, MPS_FAULT,
3419 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3420 		xpt_free_path(ccb->ccb_h.path);
3421 		xpt_free_ccb(ccb);
3422 		return;
3423 	}
3424 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3425 	csio = &ccb->csio;
3426 	csio->ccb_h.func_code = XPT_SCSI_IO;
3427 	csio->ccb_h.flags = CAM_DIR_IN;
3428 	csio->ccb_h.retry_count = 4;
3429 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3430 	csio->ccb_h.timeout = 60000;
3431 	csio->data_ptr = (uint8_t *)rcap_buf;
3432 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3433 	csio->sense_len = MPS_SENSE_LEN;
3434 	csio->cdb_len = sizeof(*scsi_cmd);
3435 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3436 
3437 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3438 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3439 	scsi_cmd->opcode = 0x9E;
3440 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3441 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3442 
3443 	ccb->ccb_h.ppriv_ptr1 = sassc;
3444 	xpt_action(ccb);
3445 }
3446 
3447 static void
3448 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3449 {
3450 	struct mpssas_softc *sassc;
3451 	struct mpssas_target *target;
3452 	struct mpssas_lun *lun;
3453 	struct scsi_read_capacity_eedp *rcap_buf;
3454 
3455 	if (done_ccb == NULL)
3456 		return;
3457 
3458 	/* Driver need to release devq, it Scsi command is
3459 	 * generated by driver internally.
3460 	 * Currently there is a single place where driver
3461 	 * calls scsi command internally. In future if driver
3462 	 * calls more scsi command internally, it needs to release
3463 	 * devq internally, since those command will not go back to
3464 	 * cam_periph.
3465 	 */
3466 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3467         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3468 		xpt_release_devq(done_ccb->ccb_h.path,
3469 			       	/*count*/ 1, /*run_queue*/TRUE);
3470 	}
3471 
3472 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3473 
3474 	/*
3475 	 * Get the LUN ID for the path and look it up in the LUN list for the
3476 	 * target.
3477 	 */
3478 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3479 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3480 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3481 	     done_ccb->ccb_h.target_id));
3482 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3483 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3484 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3485 			continue;
3486 
3487 		/*
3488 		 * Got the LUN in the target's LUN list.  Fill it in
3489 		 * with EEDP info.  If the READ CAP 16 command had some
3490 		 * SCSI error (common if command is not supported), mark
3491 		 * the lun as not supporting EEDP and set the block size
3492 		 * to 0.
3493 		 */
3494 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3495 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3496 			lun->eedp_formatted = FALSE;
3497 			lun->eedp_block_size = 0;
3498 			break;
3499 		}
3500 
3501 		if (rcap_buf->protect & 0x01) {
3502 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3503  			    "target ID %d is formatted for EEDP "
3504  			    "support.\n", done_ccb->ccb_h.target_lun,
3505  			    done_ccb->ccb_h.target_id);
3506 			lun->eedp_formatted = TRUE;
3507 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3508 		}
3509 		break;
3510 	}
3511 
3512 	// Finished with this CCB and path.
3513 	free(rcap_buf, M_MPT2);
3514 	xpt_free_path(done_ccb->ccb_h.path);
3515 	xpt_free_ccb(done_ccb);
3516 }
3517 #endif /* (__FreeBSD_version < 901503) || \
3518           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3519 
3520 void
3521 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3522     struct mpssas_target *target, lun_id_t lun_id)
3523 {
3524 	union ccb *ccb;
3525 	path_id_t path_id;
3526 
3527 	/*
3528 	 * Set the INRESET flag for this target so that no I/O will be sent to
3529 	 * the target until the reset has completed.  If an I/O request does
3530 	 * happen, the devq will be frozen.  The CCB holds the path which is
3531 	 * used to release the devq.  The devq is released and the CCB is freed
3532 	 * when the TM completes.
3533 	 */
3534 	ccb = xpt_alloc_ccb_nowait();
3535 	if (ccb) {
3536 		path_id = cam_sim_path(sc->sassc->sim);
3537 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3538 		    target->tid, lun_id) != CAM_REQ_CMP) {
3539 			xpt_free_ccb(ccb);
3540 		} else {
3541 			tm->cm_ccb = ccb;
3542 			tm->cm_targ = target;
3543 			target->flags |= MPSSAS_TARGET_INRESET;
3544 		}
3545 	}
3546 }
3547 
3548 int
3549 mpssas_startup(struct mps_softc *sc)
3550 {
3551 
3552 	/*
3553 	 * Send the port enable message and set the wait_for_port_enable flag.
3554 	 * This flag helps to keep the simq frozen until all discovery events
3555 	 * are processed.
3556 	 */
3557 	sc->wait_for_port_enable = 1;
3558 	mpssas_send_portenable(sc);
3559 	return (0);
3560 }
3561 
3562 static int
3563 mpssas_send_portenable(struct mps_softc *sc)
3564 {
3565 	MPI2_PORT_ENABLE_REQUEST *request;
3566 	struct mps_command *cm;
3567 
3568 	MPS_FUNCTRACE(sc);
3569 
3570 	if ((cm = mps_alloc_command(sc)) == NULL)
3571 		return (EBUSY);
3572 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3573 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3574 	request->MsgFlags = 0;
3575 	request->VP_ID = 0;
3576 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3577 	cm->cm_complete = mpssas_portenable_complete;
3578 	cm->cm_data = NULL;
3579 	cm->cm_sge = NULL;
3580 
3581 	mps_map_command(sc, cm);
3582 	mps_dprint(sc, MPS_XINFO,
3583 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3584 	    cm, cm->cm_req, cm->cm_complete);
3585 	return (0);
3586 }
3587 
3588 static void
3589 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3590 {
3591 	MPI2_PORT_ENABLE_REPLY *reply;
3592 	struct mpssas_softc *sassc;
3593 
3594 	MPS_FUNCTRACE(sc);
3595 	sassc = sc->sassc;
3596 
3597 	/*
3598 	 * Currently there should be no way we can hit this case.  It only
3599 	 * happens when we have a failure to allocate chain frames, and
3600 	 * port enable commands don't have S/G lists.
3601 	 */
3602 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3603 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3604 			   "This should not happen!\n", __func__, cm->cm_flags);
3605 	}
3606 
3607 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3608 	if (reply == NULL)
3609 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3610 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3611 	    MPI2_IOCSTATUS_SUCCESS)
3612 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3613 
3614 	mps_free_command(sc, cm);
3615 	if (sc->mps_ich.ich_arg != NULL) {
3616 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3617 		config_intrhook_disestablish(&sc->mps_ich);
3618 		sc->mps_ich.ich_arg = NULL;
3619 	}
3620 
3621 	/*
3622 	 * Get WarpDrive info after discovery is complete but before the scan
3623 	 * starts.  At this point, all devices are ready to be exposed to the
3624 	 * OS.  If devices should be hidden instead, take them out of the
3625 	 * 'targets' array before the scan.  The devinfo for a disk will have
3626 	 * some info and a volume's will be 0.  Use that to remove disks.
3627 	 */
3628 	mps_wd_config_pages(sc);
3629 
3630 	/*
3631 	 * Done waiting for port enable to complete.  Decrement the refcount.
3632 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3633 	 * take place.  Since the simq was explicitly frozen before port
3634 	 * enable, it must be explicitly released here to keep the
3635 	 * freeze/release count in sync.
3636 	 */
3637 	sc->wait_for_port_enable = 0;
3638 	sc->port_enable_complete = 1;
3639 	wakeup(&sc->port_enable_complete);
3640 	mpssas_startup_decrement(sassc);
3641 }
3642 
3643 int
3644 mpssas_check_id(struct mpssas_softc *sassc, int id)
3645 {
3646 	struct mps_softc *sc = sassc->sc;
3647 	char *ids;
3648 	char *name;
3649 
3650 	ids = &sc->exclude_ids[0];
3651 	while((name = strsep(&ids, ",")) != NULL) {
3652 		if (name[0] == '\0')
3653 			continue;
3654 		if (strtol(name, NULL, 0) == (long)id)
3655 			return (1);
3656 	}
3657 
3658 	return (0);
3659 }
3660 
3661 void
3662 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3663 {
3664 	struct mpssas_softc *sassc;
3665 	struct mpssas_lun *lun, *lun_tmp;
3666 	struct mpssas_target *targ;
3667 	int i;
3668 
3669 	sassc = sc->sassc;
3670 	/*
3671 	 * The number of targets is based on IOC Facts, so free all of
3672 	 * the allocated LUNs for each target and then the target buffer
3673 	 * itself.
3674 	 */
3675 	for (i=0; i< maxtargets; i++) {
3676 		targ = &sassc->targets[i];
3677 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3678 			free(lun, M_MPT2);
3679 		}
3680 	}
3681 	free(sassc->targets, M_MPT2);
3682 
3683 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3684 	    M_MPT2, M_WAITOK|M_ZERO);
3685 	if (!sassc->targets) {
3686 		panic("%s failed to alloc targets with error %d\n",
3687 		    __func__, ENOMEM);
3688 	}
3689 }
3690