xref: /freebsd/sys/dev/mps/mps_sas.c (revision 4c1a82cea504df7a79f5bd8f7d0a41cacccff16e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  *
32  * $FreeBSD$
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /* Communications core for Avago Technologies (LSI) MPT2 */
39 
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/bio.h>
50 #include <sys/malloc.h>
51 #include <sys/uio.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sbuf.h>
58 
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/rman.h>
62 
63 #include <machine/stdarg.h>
64 
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #if __FreeBSD_version >= 900026
76 #include <cam/scsi/smp_all.h>
77 #endif
78 
79 #include <dev/mps/mpi/mpi2_type.h>
80 #include <dev/mps/mpi/mpi2.h>
81 #include <dev/mps/mpi/mpi2_ioc.h>
82 #include <dev/mps/mpi/mpi2_sas.h>
83 #include <dev/mps/mpi/mpi2_cnfg.h>
84 #include <dev/mps/mpi/mpi2_init.h>
85 #include <dev/mps/mpi/mpi2_tool.h>
86 #include <dev/mps/mps_ioctl.h>
87 #include <dev/mps/mpsvar.h>
88 #include <dev/mps/mps_table.h>
89 #include <dev/mps/mps_sas.h>
90 
91 #define MPSSAS_DISCOVERY_TIMEOUT	20
92 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93 
94 /*
95  * static array to check SCSI OpCode for EEDP protection bits
96  */
97 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 };
118 
119 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
120 
121 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
122 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
123 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mpssas_poll(struct cam_sim *sim);
125 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
126     struct mps_command *cm);
127 static void mpssas_scsiio_timeout(void *data);
128 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
129 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
130     struct mps_command *cm, union ccb *ccb);
131 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
132 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
133 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
134 #if __FreeBSD_version >= 900026
135 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
136 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
137 			       uint64_t sasaddr);
138 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
139 #endif //FreeBSD_version >= 900026
140 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
141 static void mpssas_async(void *callback_arg, uint32_t code,
142 			 struct cam_path *path, void *arg);
143 #if (__FreeBSD_version < 901503) || \
144     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
145 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
146 			      struct ccb_getdev *cgd);
147 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
148 #endif
149 static int mpssas_send_portenable(struct mps_softc *sc);
150 static void mpssas_portenable_complete(struct mps_softc *sc,
151     struct mps_command *cm);
152 
153 struct mpssas_target *
154 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
155 {
156 	struct mpssas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mpssas_startup_increment(struct mpssas_softc *sassc)
177 {
178 	MPS_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mps_dprint(sassc->sc, MPS_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if __FreeBSD_version >= 1000039
186 			xpt_hold_boot();
187 #endif
188 			xpt_freeze_simq(sassc->sim, 1);
189 		}
190 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
191 		    sassc->startup_refcount);
192 	}
193 }
194 
195 void
196 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
197 {
198 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
199 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
200 		xpt_release_simq(sassc->sim, 1);
201 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
202 	}
203 }
204 
205 void
206 mpssas_startup_decrement(struct mpssas_softc *sassc)
207 {
208 	MPS_FUNCTRACE(sassc->sc);
209 
210 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
211 		if (--sassc->startup_refcount == 0) {
212 			/* finished all discovery-related actions, release
213 			 * the simq and rescan for the latest topology.
214 			 */
215 			mps_dprint(sassc->sc, MPS_INIT,
216 			    "%s releasing simq\n", __func__);
217 			sassc->flags &= ~MPSSAS_IN_STARTUP;
218 			xpt_release_simq(sassc->sim, 1);
219 #if __FreeBSD_version >= 1000039
220 			xpt_release_boot();
221 #else
222 			mpssas_rescan_target(sassc->sc, NULL);
223 #endif
224 		}
225 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
226 		    sassc->startup_refcount);
227 	}
228 }
229 
230 /*
231  * The firmware requires us to stop sending commands when we're doing task
232  * management.
233  * XXX The logic for serializing the device has been made lazy and moved to
234  * mpssas_prepare_for_tm().
235  */
236 struct mps_command *
237 mpssas_alloc_tm(struct mps_softc *sc)
238 {
239 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
240 	struct mps_command *tm;
241 
242 	tm = mps_alloc_high_priority_command(sc);
243 	if (tm == NULL)
244 		return (NULL);
245 
246 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
247 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
248 	return tm;
249 }
250 
251 void
252 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
253 {
254 	int target_id = 0xFFFFFFFF;
255 
256 	if (tm == NULL)
257 		return;
258 
259 	/*
260 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
261 	 * free the resources used for freezing the devq.  Must clear the
262 	 * INRESET flag as well or scsi I/O will not work.
263 	 */
264 	if (tm->cm_targ != NULL) {
265 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
266 		target_id = tm->cm_targ->tid;
267 	}
268 	if (tm->cm_ccb) {
269 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
270 		    target_id);
271 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
272 		xpt_free_path(tm->cm_ccb->ccb_h.path);
273 		xpt_free_ccb(tm->cm_ccb);
274 	}
275 
276 	mps_free_high_priority_command(sc, tm);
277 }
278 
279 void
280 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
281 {
282 	struct mpssas_softc *sassc = sc->sassc;
283 	path_id_t pathid;
284 	target_id_t targetid;
285 	union ccb *ccb;
286 
287 	MPS_FUNCTRACE(sc);
288 	pathid = cam_sim_path(sassc->sim);
289 	if (targ == NULL)
290 		targetid = CAM_TARGET_WILDCARD;
291 	else
292 		targetid = targ - sassc->targets;
293 
294 	/*
295 	 * Allocate a CCB and schedule a rescan.
296 	 */
297 	ccb = xpt_alloc_ccb_nowait();
298 	if (ccb == NULL) {
299 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
300 		return;
301 	}
302 
303 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
304 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
305 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
306 		xpt_free_ccb(ccb);
307 		return;
308 	}
309 
310 	if (targetid == CAM_TARGET_WILDCARD)
311 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
312 	else
313 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
314 
315 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
316 	xpt_rescan(ccb);
317 }
318 
319 static void
320 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
321 {
322 	struct sbuf sb;
323 	va_list ap;
324 	char str[192];
325 	char path_str[64];
326 
327 	if (cm == NULL)
328 		return;
329 
330 	/* No need to be in here if debugging isn't enabled */
331 	if ((cm->cm_sc->mps_debug & level) == 0)
332 		return;
333 
334 	sbuf_new(&sb, str, sizeof(str), 0);
335 
336 	va_start(ap, fmt);
337 
338 	if (cm->cm_ccb != NULL) {
339 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
340 				sizeof(path_str));
341 		sbuf_cat(&sb, path_str);
342 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
343 			scsi_command_string(&cm->cm_ccb->csio, &sb);
344 			sbuf_printf(&sb, "length %d ",
345 				    cm->cm_ccb->csio.dxfer_len);
346 		}
347 	}
348 	else {
349 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
350 		    cam_sim_name(cm->cm_sc->sassc->sim),
351 		    cam_sim_unit(cm->cm_sc->sassc->sim),
352 		    cam_sim_bus(cm->cm_sc->sassc->sim),
353 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
354 		    cm->cm_lun);
355 	}
356 
357 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
358 	sbuf_vprintf(&sb, fmt, ap);
359 	sbuf_finish(&sb);
360 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
361 
362 	va_end(ap);
363 }
364 
365 
366 static void
367 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
368 {
369 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
370 	struct mpssas_target *targ;
371 	uint16_t handle;
372 
373 	MPS_FUNCTRACE(sc);
374 
375 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
376 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
377 	targ = tm->cm_targ;
378 
379 	if (reply == NULL) {
380 		/* XXX retry the remove after the diag reset completes? */
381 		mps_dprint(sc, MPS_FAULT,
382 		    "%s NULL reply resetting device 0x%04x\n", __func__,
383 		    handle);
384 		mpssas_free_tm(sc, tm);
385 		return;
386 	}
387 
388 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
389 	    MPI2_IOCSTATUS_SUCCESS) {
390 		mps_dprint(sc, MPS_ERROR,
391 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
392 		   le16toh(reply->IOCStatus), handle);
393 	}
394 
395 	mps_dprint(sc, MPS_XINFO,
396 	    "Reset aborted %u commands\n", reply->TerminationCount);
397 	mps_free_reply(sc, tm->cm_reply_data);
398 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
399 
400 	mps_dprint(sc, MPS_XINFO,
401 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
402 
403 	/*
404 	 * Don't clear target if remove fails because things will get confusing.
405 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
406 	 * this target id if possible, and so we can assign the same target id
407 	 * to this device if it comes back in the future.
408 	 */
409 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
410 	    MPI2_IOCSTATUS_SUCCESS) {
411 		targ = tm->cm_targ;
412 		targ->handle = 0x0;
413 		targ->encl_handle = 0x0;
414 		targ->encl_slot = 0x0;
415 		targ->exp_dev_handle = 0x0;
416 		targ->phy_num = 0x0;
417 		targ->linkrate = 0x0;
418 		targ->devinfo = 0x0;
419 		targ->flags = 0x0;
420 	}
421 
422 	mpssas_free_tm(sc, tm);
423 }
424 
425 
426 /*
427  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
428  * Otherwise Volume Delete is same as Bare Drive Removal.
429  */
430 void
431 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
432 {
433 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
434 	struct mps_softc *sc;
435 	struct mps_command *tm;
436 	struct mpssas_target *targ = NULL;
437 
438 	MPS_FUNCTRACE(sassc->sc);
439 	sc = sassc->sc;
440 
441 #ifdef WD_SUPPORT
442 	/*
443 	 * If this is a WD controller, determine if the disk should be exposed
444 	 * to the OS or not.  If disk should be exposed, return from this
445 	 * function without doing anything.
446 	 */
447 	if (sc->WD_available && (sc->WD_hide_expose ==
448 	    MPS_WD_EXPOSE_ALWAYS)) {
449 		return;
450 	}
451 #endif //WD_SUPPORT
452 
453 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
454 	if (targ == NULL) {
455 		/* FIXME: what is the action? */
456 		/* We don't know about this device? */
457 		mps_dprint(sc, MPS_ERROR,
458 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
459 		return;
460 	}
461 
462 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
463 
464 	tm = mpssas_alloc_tm(sc);
465 	if (tm == NULL) {
466 		mps_dprint(sc, MPS_ERROR,
467 		    "%s: command alloc failure\n", __func__);
468 		return;
469 	}
470 
471 	mpssas_rescan_target(sc, targ);
472 
473 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
474 	req->DevHandle = targ->handle;
475 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
476 
477 	/* SAS Hard Link Reset / SATA Link Reset */
478 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
479 
480 	tm->cm_targ = targ;
481 	tm->cm_data = NULL;
482 	tm->cm_complete = mpssas_remove_volume;
483 	tm->cm_complete_data = (void *)(uintptr_t)handle;
484 
485 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
486 	    __func__, targ->tid);
487 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
488 
489 	mps_map_command(sc, tm);
490 }
491 
492 /*
493  * The MPT2 firmware performs debounce on the link to avoid transient link
494  * errors and false removals.  When it does decide that link has been lost
495  * and a device need to go away, it expects that the host will perform a
496  * target reset and then an op remove.  The reset has the side-effect of
497  * aborting any outstanding requests for the device, which is required for
498  * the op-remove to succeed.  It's not clear if the host should check for
499  * the device coming back alive after the reset.
500  */
501 void
502 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
503 {
504 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
505 	struct mps_softc *sc;
506 	struct mps_command *cm;
507 	struct mpssas_target *targ = NULL;
508 
509 	MPS_FUNCTRACE(sassc->sc);
510 
511 	sc = sassc->sc;
512 
513 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
514 	if (targ == NULL) {
515 		/* FIXME: what is the action? */
516 		/* We don't know about this device? */
517 		mps_dprint(sc, MPS_ERROR,
518 		    "%s : invalid handle 0x%x \n", __func__, handle);
519 		return;
520 	}
521 
522 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
523 
524 	cm = mpssas_alloc_tm(sc);
525 	if (cm == NULL) {
526 		mps_dprint(sc, MPS_ERROR,
527 		    "%s: command alloc failure\n", __func__);
528 		return;
529 	}
530 
531 	mpssas_rescan_target(sc, targ);
532 
533 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
534 	memset(req, 0, sizeof(*req));
535 	req->DevHandle = htole16(targ->handle);
536 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
537 
538 	/* SAS Hard Link Reset / SATA Link Reset */
539 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
540 
541 	cm->cm_targ = targ;
542 	cm->cm_data = NULL;
543 	cm->cm_complete = mpssas_remove_device;
544 	cm->cm_complete_data = (void *)(uintptr_t)handle;
545 
546 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
547 	    __func__, targ->tid);
548 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
549 
550 	mps_map_command(sc, cm);
551 }
552 
553 static void
554 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
555 {
556 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
557 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
558 	struct mpssas_target *targ;
559 	struct mps_command *next_cm;
560 	uint16_t handle;
561 
562 	MPS_FUNCTRACE(sc);
563 
564 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
565 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
566 	targ = tm->cm_targ;
567 
568 	/*
569 	 * Currently there should be no way we can hit this case.  It only
570 	 * happens when we have a failure to allocate chain frames, and
571 	 * task management commands don't have S/G lists.
572 	 */
573 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
574 		mps_dprint(sc, MPS_ERROR,
575 		    "%s: cm_flags = %#x for remove of handle %#04x! "
576 		    "This should not happen!\n", __func__, tm->cm_flags,
577 		    handle);
578 	}
579 
580 	if (reply == NULL) {
581 		/* XXX retry the remove after the diag reset completes? */
582 		mps_dprint(sc, MPS_FAULT,
583 		    "%s NULL reply resetting device 0x%04x\n", __func__,
584 		    handle);
585 		mpssas_free_tm(sc, tm);
586 		return;
587 	}
588 
589 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
590 	    MPI2_IOCSTATUS_SUCCESS) {
591 		mps_dprint(sc, MPS_ERROR,
592 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
593 		   le16toh(reply->IOCStatus), handle);
594 	}
595 
596 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
597 	    le32toh(reply->TerminationCount));
598 	mps_free_reply(sc, tm->cm_reply_data);
599 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
600 
601 	/* Reuse the existing command */
602 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
603 	memset(req, 0, sizeof(*req));
604 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
605 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
606 	req->DevHandle = htole16(handle);
607 	tm->cm_data = NULL;
608 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
609 	tm->cm_complete = mpssas_remove_complete;
610 	tm->cm_complete_data = (void *)(uintptr_t)handle;
611 
612 	mps_map_command(sc, tm);
613 
614 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
615 		   targ->tid, handle);
616 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
617 		union ccb *ccb;
618 
619 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
620 		ccb = tm->cm_complete_data;
621 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
622 		mpssas_scsiio_complete(sc, tm);
623 	}
624 }
625 
626 static void
627 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
628 {
629 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
630 	uint16_t handle;
631 	struct mpssas_target *targ;
632 	struct mpssas_lun *lun;
633 
634 	MPS_FUNCTRACE(sc);
635 
636 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
637 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
638 
639 	/*
640 	 * Currently there should be no way we can hit this case.  It only
641 	 * happens when we have a failure to allocate chain frames, and
642 	 * task management commands don't have S/G lists.
643 	 */
644 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
645 		mps_dprint(sc, MPS_XINFO,
646 			   "%s: cm_flags = %#x for remove of handle %#04x! "
647 			   "This should not happen!\n", __func__, tm->cm_flags,
648 			   handle);
649 		mpssas_free_tm(sc, tm);
650 		return;
651 	}
652 
653 	if (reply == NULL) {
654 		/* most likely a chip reset */
655 		mps_dprint(sc, MPS_FAULT,
656 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
657 		mpssas_free_tm(sc, tm);
658 		return;
659 	}
660 
661 	mps_dprint(sc, MPS_XINFO,
662 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
663 	    handle, le16toh(reply->IOCStatus));
664 
665 	/*
666 	 * Don't clear target if remove fails because things will get confusing.
667 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
668 	 * this target id if possible, and so we can assign the same target id
669 	 * to this device if it comes back in the future.
670 	 */
671 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
672 	    MPI2_IOCSTATUS_SUCCESS) {
673 		targ = tm->cm_targ;
674 		targ->handle = 0x0;
675 		targ->encl_handle = 0x0;
676 		targ->encl_slot = 0x0;
677 		targ->exp_dev_handle = 0x0;
678 		targ->phy_num = 0x0;
679 		targ->linkrate = 0x0;
680 		targ->devinfo = 0x0;
681 		targ->flags = 0x0;
682 
683 		while(!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPT2);
687 		}
688 	}
689 
690 
691 	mpssas_free_tm(sc, tm);
692 }
693 
694 static int
695 mpssas_register_events(struct mps_softc *sc)
696 {
697 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
698 
699 	bzero(events, 16);
700 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
701 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
702 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
704 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
705 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
706 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
707 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
708 	setbit(events, MPI2_EVENT_IR_VOLUME);
709 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
710 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
711 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
712 
713 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
714 	    &sc->sassc->mpssas_eh);
715 
716 	return (0);
717 }
718 
719 int
720 mps_attach_sas(struct mps_softc *sc)
721 {
722 	struct mpssas_softc *sassc;
723 	cam_status status;
724 	int unit, error = 0, reqs;
725 
726 	MPS_FUNCTRACE(sc);
727 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
728 
729 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
730 	if(!sassc) {
731 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
732 		    "Cannot allocate SAS controller memory\n");
733 		return (ENOMEM);
734 	}
735 
736 	/*
737 	 * XXX MaxTargets could change during a reinit.  Since we don't
738 	 * resize the targets[] array during such an event, cache the value
739 	 * of MaxTargets here so that we don't get into trouble later.  This
740 	 * should move into the reinit logic.
741 	 */
742 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
743 	sassc->targets = malloc(sizeof(struct mpssas_target) *
744 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
745 	if(!sassc->targets) {
746 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
747 		    "Cannot allocate SAS target memory\n");
748 		free(sassc, M_MPT2);
749 		return (ENOMEM);
750 	}
751 	sc->sassc = sassc;
752 	sassc->sc = sc;
753 
754 	reqs = sc->num_reqs - sc->num_prireqs - 1;
755 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
756 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
757 		error = ENOMEM;
758 		goto out;
759 	}
760 
761 	unit = device_get_unit(sc->mps_dev);
762 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
763 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
764 	if (sassc->sim == NULL) {
765 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
766 		error = EINVAL;
767 		goto out;
768 	}
769 
770 	TAILQ_INIT(&sassc->ev_queue);
771 
772 	/* Initialize taskqueue for Event Handling */
773 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
774 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
775 	    taskqueue_thread_enqueue, &sassc->ev_tq);
776 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
777 	    device_get_nameunit(sc->mps_dev));
778 
779 	mps_lock(sc);
780 
781 	/*
782 	 * XXX There should be a bus for every port on the adapter, but since
783 	 * we're just going to fake the topology for now, we'll pretend that
784 	 * everything is just a target on a single bus.
785 	 */
786 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
787 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
788 		    "Error %d registering SCSI bus\n", error);
789 		mps_unlock(sc);
790 		goto out;
791 	}
792 
793 	/*
794 	 * Assume that discovery events will start right away.
795 	 *
796 	 * Hold off boot until discovery is complete.
797 	 */
798 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
799 	sc->sassc->startup_refcount = 0;
800 	mpssas_startup_increment(sassc);
801 
802 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
803 
804 	/*
805 	 * Register for async events so we can determine the EEDP
806 	 * capabilities of devices.
807 	 */
808 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
809 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
810 	    CAM_LUN_WILDCARD);
811 	if (status != CAM_REQ_CMP) {
812 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
813 		    "Error %#x creating sim path\n", status);
814 		sassc->path = NULL;
815 	} else {
816 		int event;
817 
818 #if (__FreeBSD_version >= 1000006) || \
819     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
820 		event = AC_ADVINFO_CHANGED;
821 #else
822 		event = AC_FOUND_DEVICE;
823 #endif
824 		status = xpt_register_async(event, mpssas_async, sc,
825 					    sassc->path);
826 		if (status != CAM_REQ_CMP) {
827 			mps_dprint(sc, MPS_ERROR,
828 			    "Error %#x registering async handler for "
829 			    "AC_ADVINFO_CHANGED events\n", status);
830 			xpt_free_path(sassc->path);
831 			sassc->path = NULL;
832 		}
833 	}
834 	if (status != CAM_REQ_CMP) {
835 		/*
836 		 * EEDP use is the exception, not the rule.
837 		 * Warn the user, but do not fail to attach.
838 		 */
839 		mps_printf(sc, "EEDP capabilities disabled.\n");
840 	}
841 
842 	mps_unlock(sc);
843 
844 	mpssas_register_events(sc);
845 out:
846 	if (error)
847 		mps_detach_sas(sc);
848 
849 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
850 	return (error);
851 }
852 
853 int
854 mps_detach_sas(struct mps_softc *sc)
855 {
856 	struct mpssas_softc *sassc;
857 	struct mpssas_lun *lun, *lun_tmp;
858 	struct mpssas_target *targ;
859 	int i;
860 
861 	MPS_FUNCTRACE(sc);
862 
863 	if (sc->sassc == NULL)
864 		return (0);
865 
866 	sassc = sc->sassc;
867 	mps_deregister_events(sc, sassc->mpssas_eh);
868 
869 	/*
870 	 * Drain and free the event handling taskqueue with the lock
871 	 * unheld so that any parallel processing tasks drain properly
872 	 * without deadlocking.
873 	 */
874 	if (sassc->ev_tq != NULL)
875 		taskqueue_free(sassc->ev_tq);
876 
877 	/* Make sure CAM doesn't wedge if we had to bail out early. */
878 	mps_lock(sc);
879 
880 	while (sassc->startup_refcount != 0)
881 		mpssas_startup_decrement(sassc);
882 
883 	/* Deregister our async handler */
884 	if (sassc->path != NULL) {
885 		xpt_register_async(0, mpssas_async, sc, sassc->path);
886 		xpt_free_path(sassc->path);
887 		sassc->path = NULL;
888 	}
889 
890 	if (sassc->flags & MPSSAS_IN_STARTUP)
891 		xpt_release_simq(sassc->sim, 1);
892 
893 	if (sassc->sim != NULL) {
894 		xpt_bus_deregister(cam_sim_path(sassc->sim));
895 		cam_sim_free(sassc->sim, FALSE);
896 	}
897 
898 	mps_unlock(sc);
899 
900 	if (sassc->devq != NULL)
901 		cam_simq_free(sassc->devq);
902 
903 	for(i=0; i< sassc->maxtargets ;i++) {
904 		targ = &sassc->targets[i];
905 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
906 			free(lun, M_MPT2);
907 		}
908 	}
909 	free(sassc->targets, M_MPT2);
910 	free(sassc, M_MPT2);
911 	sc->sassc = NULL;
912 
913 	return (0);
914 }
915 
916 void
917 mpssas_discovery_end(struct mpssas_softc *sassc)
918 {
919 	struct mps_softc *sc = sassc->sc;
920 
921 	MPS_FUNCTRACE(sc);
922 
923 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
924 		callout_stop(&sassc->discovery_callout);
925 
926 	/*
927 	 * After discovery has completed, check the mapping table for any
928 	 * missing devices and update their missing counts. Only do this once
929 	 * whenever the driver is initialized so that missing counts aren't
930 	 * updated unnecessarily. Note that just because discovery has
931 	 * completed doesn't mean that events have been processed yet. The
932 	 * check_devices function is a callout timer that checks if ALL devices
933 	 * are missing. If so, it will wait a little longer for events to
934 	 * complete and keep resetting itself until some device in the mapping
935 	 * table is not missing, meaning that event processing has started.
936 	 */
937 	if (sc->track_mapping_events) {
938 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
939 		    "completed. Check for missing devices in the mapping "
940 		    "table.\n");
941 		callout_reset(&sc->device_check_callout,
942 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
943 		    sc);
944 	}
945 }
946 
947 static void
948 mpssas_action(struct cam_sim *sim, union ccb *ccb)
949 {
950 	struct mpssas_softc *sassc;
951 
952 	sassc = cam_sim_softc(sim);
953 
954 	MPS_FUNCTRACE(sassc->sc);
955 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
956 	    ccb->ccb_h.func_code);
957 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
958 
959 	switch (ccb->ccb_h.func_code) {
960 	case XPT_PATH_INQ:
961 	{
962 		struct ccb_pathinq *cpi = &ccb->cpi;
963 		struct mps_softc *sc = sassc->sc;
964 
965 		cpi->version_num = 1;
966 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
967 		cpi->target_sprt = 0;
968 #if __FreeBSD_version >= 1000039
969 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
970 #else
971 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
972 #endif
973 		cpi->hba_eng_cnt = 0;
974 		cpi->max_target = sassc->maxtargets - 1;
975 		cpi->max_lun = 255;
976 
977 		/*
978 		 * initiator_id is set here to an ID outside the set of valid
979 		 * target IDs (including volumes).
980 		 */
981 		cpi->initiator_id = sassc->maxtargets;
982 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
983 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
984 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
985 		cpi->unit_number = cam_sim_unit(sim);
986 		cpi->bus_id = cam_sim_bus(sim);
987 		cpi->base_transfer_speed = 150000;
988 		cpi->transport = XPORT_SAS;
989 		cpi->transport_version = 0;
990 		cpi->protocol = PROTO_SCSI;
991 		cpi->protocol_version = SCSI_REV_SPC;
992 		cpi->maxio = sc->maxio;
993 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
994 		break;
995 	}
996 	case XPT_GET_TRAN_SETTINGS:
997 	{
998 		struct ccb_trans_settings	*cts;
999 		struct ccb_trans_settings_sas	*sas;
1000 		struct ccb_trans_settings_scsi	*scsi;
1001 		struct mpssas_target *targ;
1002 
1003 		cts = &ccb->cts;
1004 		sas = &cts->xport_specific.sas;
1005 		scsi = &cts->proto_specific.scsi;
1006 
1007 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1008 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1009 		    cts->ccb_h.target_id));
1010 		targ = &sassc->targets[cts->ccb_h.target_id];
1011 		if (targ->handle == 0x0) {
1012 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1013 			break;
1014 		}
1015 
1016 		cts->protocol_version = SCSI_REV_SPC2;
1017 		cts->transport = XPORT_SAS;
1018 		cts->transport_version = 0;
1019 
1020 		sas->valid = CTS_SAS_VALID_SPEED;
1021 		switch (targ->linkrate) {
1022 		case 0x08:
1023 			sas->bitrate = 150000;
1024 			break;
1025 		case 0x09:
1026 			sas->bitrate = 300000;
1027 			break;
1028 		case 0x0a:
1029 			sas->bitrate = 600000;
1030 			break;
1031 		default:
1032 			sas->valid = 0;
1033 		}
1034 
1035 		cts->protocol = PROTO_SCSI;
1036 		scsi->valid = CTS_SCSI_VALID_TQ;
1037 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1038 
1039 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1040 		break;
1041 	}
1042 	case XPT_CALC_GEOMETRY:
1043 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1044 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1045 		break;
1046 	case XPT_RESET_DEV:
1047 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1048 		mpssas_action_resetdev(sassc, ccb);
1049 		return;
1050 	case XPT_RESET_BUS:
1051 	case XPT_ABORT:
1052 	case XPT_TERM_IO:
1053 		mps_dprint(sassc->sc, MPS_XINFO,
1054 		    "mpssas_action faking success for abort or reset\n");
1055 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1056 		break;
1057 	case XPT_SCSI_IO:
1058 		mpssas_action_scsiio(sassc, ccb);
1059 		return;
1060 #if __FreeBSD_version >= 900026
1061 	case XPT_SMP_IO:
1062 		mpssas_action_smpio(sassc, ccb);
1063 		return;
1064 #endif
1065 	default:
1066 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1067 		break;
1068 	}
1069 	xpt_done(ccb);
1070 
1071 }
1072 
1073 static void
1074 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1075     target_id_t target_id, lun_id_t lun_id)
1076 {
1077 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1078 	struct cam_path *path;
1079 
1080 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1081 	    ac_code, target_id, (uintmax_t)lun_id);
1082 
1083 	if (xpt_create_path(&path, NULL,
1084 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1085 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1086 			   "notification\n");
1087 		return;
1088 	}
1089 
1090 	xpt_async(ac_code, path, NULL);
1091 	xpt_free_path(path);
1092 }
1093 
1094 static void
1095 mpssas_complete_all_commands(struct mps_softc *sc)
1096 {
1097 	struct mps_command *cm;
1098 	int i;
1099 	int completed;
1100 
1101 	MPS_FUNCTRACE(sc);
1102 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1103 
1104 	/* complete all commands with a NULL reply */
1105 	for (i = 1; i < sc->num_reqs; i++) {
1106 		cm = &sc->commands[i];
1107 		if (cm->cm_state == MPS_CM_STATE_FREE)
1108 			continue;
1109 
1110 		cm->cm_state = MPS_CM_STATE_BUSY;
1111 		cm->cm_reply = NULL;
1112 		completed = 0;
1113 
1114 		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1115 			MPASS(cm->cm_data);
1116 			free(cm->cm_data, M_MPT2);
1117 			cm->cm_data = NULL;
1118 		}
1119 
1120 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1121 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1122 
1123 		if (cm->cm_complete != NULL) {
1124 			mpssas_log_command(cm, MPS_RECOVERY,
1125 			    "completing cm %p state %x ccb %p for diag reset\n",
1126 			    cm, cm->cm_state, cm->cm_ccb);
1127 
1128 			cm->cm_complete(sc, cm);
1129 			completed = 1;
1130 		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1131 			mpssas_log_command(cm, MPS_RECOVERY,
1132 			    "waking up cm %p state %x ccb %p for diag reset\n",
1133 			    cm, cm->cm_state, cm->cm_ccb);
1134 			wakeup(cm);
1135 			completed = 1;
1136 		}
1137 
1138 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1139 			/* this should never happen, but if it does, log */
1140 			mpssas_log_command(cm, MPS_RECOVERY,
1141 			    "cm %p state %x flags 0x%x ccb %p during diag "
1142 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1143 			    cm->cm_ccb);
1144 		}
1145 	}
1146 
1147 	sc->io_cmds_active = 0;
1148 }
1149 
1150 void
1151 mpssas_handle_reinit(struct mps_softc *sc)
1152 {
1153 	int i;
1154 
1155 	/* Go back into startup mode and freeze the simq, so that CAM
1156 	 * doesn't send any commands until after we've rediscovered all
1157 	 * targets and found the proper device handles for them.
1158 	 *
1159 	 * After the reset, portenable will trigger discovery, and after all
1160 	 * discovery-related activities have finished, the simq will be
1161 	 * released.
1162 	 */
1163 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1164 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1165 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1166 	mpssas_startup_increment(sc->sassc);
1167 
1168 	/* notify CAM of a bus reset */
1169 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1170 	    CAM_LUN_WILDCARD);
1171 
1172 	/* complete and cleanup after all outstanding commands */
1173 	mpssas_complete_all_commands(sc);
1174 
1175 	mps_dprint(sc, MPS_INIT,
1176 	    "%s startup %u after command completion\n", __func__,
1177 	    sc->sassc->startup_refcount);
1178 
1179 	/* zero all the target handles, since they may change after the
1180 	 * reset, and we have to rediscover all the targets and use the new
1181 	 * handles.
1182 	 */
1183 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1184 		if (sc->sassc->targets[i].outstanding != 0)
1185 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1186 			    i, sc->sassc->targets[i].outstanding);
1187 		sc->sassc->targets[i].handle = 0x0;
1188 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1189 		sc->sassc->targets[i].outstanding = 0;
1190 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1191 	}
1192 }
1193 
1194 static void
1195 mpssas_tm_timeout(void *data)
1196 {
1197 	struct mps_command *tm = data;
1198 	struct mps_softc *sc = tm->cm_sc;
1199 
1200 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1201 
1202 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1203 	    "task mgmt %p timed out\n", tm);
1204 
1205 	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1206 	    ("command not inqueue\n"));
1207 
1208 	tm->cm_state = MPS_CM_STATE_BUSY;
1209 	mps_reinit(sc);
1210 }
1211 
1212 static void
1213 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1214 {
1215 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1216 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1217 	unsigned int cm_count = 0;
1218 	struct mps_command *cm;
1219 	struct mpssas_target *targ;
1220 
1221 	callout_stop(&tm->cm_callout);
1222 
1223 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1224 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1225 	targ = tm->cm_targ;
1226 
1227 	/*
1228 	 * Currently there should be no way we can hit this case.  It only
1229 	 * happens when we have a failure to allocate chain frames, and
1230 	 * task management commands don't have S/G lists.
1231 	 * XXXSL So should it be an assertion?
1232 	 */
1233 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1234 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1235 		    "%s: cm_flags = %#x for LUN reset! "
1236 		   "This should not happen!\n", __func__, tm->cm_flags);
1237 		mpssas_free_tm(sc, tm);
1238 		return;
1239 	}
1240 
1241 	if (reply == NULL) {
1242 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1243 		    tm);
1244 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1245 			/* this completion was due to a reset, just cleanup */
1246 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1247 			    "reset, ignoring NULL LUN reset reply\n");
1248 			targ->tm = NULL;
1249 			mpssas_free_tm(sc, tm);
1250 		}
1251 		else {
1252 			/* we should have gotten a reply. */
1253 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1254 			    "LUN reset attempt, resetting controller\n");
1255 			mps_reinit(sc);
1256 		}
1257 		return;
1258 	}
1259 
1260 	mps_dprint(sc, MPS_RECOVERY,
1261 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1262 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1263 	    le32toh(reply->TerminationCount));
1264 
1265 	/*
1266 	 * See if there are any outstanding commands for this LUN.
1267 	 * This could be made more efficient by using a per-LU data
1268 	 * structure of some sort.
1269 	 */
1270 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1271 		if (cm->cm_lun == tm->cm_lun)
1272 			cm_count++;
1273 	}
1274 
1275 	if (cm_count == 0) {
1276 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1277 		    "Finished recovery after LUN reset for target %u\n",
1278 		    targ->tid);
1279 
1280 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1281 
1282 		/*
1283 		 * We've finished recovery for this logical unit.  check and
1284 		 * see if some other logical unit has a timedout command
1285 		 * that needs to be processed.
1286 		 */
1287 		cm = TAILQ_FIRST(&targ->timedout_commands);
1288 		if (cm) {
1289 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1290 			    "More commands to abort for target %u\n",
1291 			    targ->tid);
1292 			mpssas_send_abort(sc, tm, cm);
1293 		} else {
1294 			targ->tm = NULL;
1295 			mpssas_free_tm(sc, tm);
1296 		}
1297 	} else {
1298 		/*
1299 		 * If we still have commands for this LUN, the reset
1300 		 * effectively failed, regardless of the status reported.
1301 		 * Escalate to a target reset.
1302 		 */
1303 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1304 		    "logical unit reset complete for target %u, but still "
1305 		    "have %u command(s), sending target reset\n", targ->tid,
1306 		    cm_count);
1307 		mpssas_send_reset(sc, tm,
1308 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1309 	}
1310 }
1311 
1312 static void
1313 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1314 {
1315 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1316 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1317 	struct mpssas_target *targ;
1318 
1319 	callout_stop(&tm->cm_callout);
1320 
1321 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1322 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1323 	targ = tm->cm_targ;
1324 
1325 	/*
1326 	 * Currently there should be no way we can hit this case.  It only
1327 	 * happens when we have a failure to allocate chain frames, and
1328 	 * task management commands don't have S/G lists.
1329 	 */
1330 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1331 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1332 			   "This should not happen!\n", __func__, tm->cm_flags);
1333 		mpssas_free_tm(sc, tm);
1334 		return;
1335 	}
1336 
1337 	if (reply == NULL) {
1338 		mps_dprint(sc, MPS_RECOVERY,
1339 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1340 		    tm, le16toh(req->TaskMID));
1341 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1342 			/* this completion was due to a reset, just cleanup */
1343 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1344 			    "reset, ignoring NULL target reset reply\n");
1345 			targ->tm = NULL;
1346 			mpssas_free_tm(sc, tm);
1347 		} else {
1348 			/* we should have gotten a reply. */
1349 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1350 			    "target reset attempt, resetting controller\n");
1351 			mps_reinit(sc);
1352 		}
1353 		return;
1354 	}
1355 
1356 	mps_dprint(sc, MPS_RECOVERY,
1357 	    "target reset status 0x%x code 0x%x count %u\n",
1358 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1359 	    le32toh(reply->TerminationCount));
1360 
1361 	if (targ->outstanding == 0) {
1362 		/* we've finished recovery for this target and all
1363 		 * of its logical units.
1364 		 */
1365 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1366 		    "Finished reset recovery for target %u\n", targ->tid);
1367 
1368 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1369 		    CAM_LUN_WILDCARD);
1370 
1371 		targ->tm = NULL;
1372 		mpssas_free_tm(sc, tm);
1373 	} else {
1374 		/*
1375 		 * After a target reset, if this target still has
1376 		 * outstanding commands, the reset effectively failed,
1377 		 * regardless of the status reported.  escalate.
1378 		 */
1379 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1380 		    "Target reset complete for target %u, but still have %u "
1381 		    "command(s), resetting controller\n", targ->tid,
1382 		    targ->outstanding);
1383 		mps_reinit(sc);
1384 	}
1385 }
1386 
1387 #define MPS_RESET_TIMEOUT 30
1388 
1389 int
1390 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1391 {
1392 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1393 	struct mpssas_target *target;
1394 	int err;
1395 
1396 	target = tm->cm_targ;
1397 	if (target->handle == 0) {
1398 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1399 		    __func__, target->tid);
1400 		return -1;
1401 	}
1402 
1403 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1404 	req->DevHandle = htole16(target->handle);
1405 	req->TaskType = type;
1406 
1407 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1408 		/* XXX Need to handle invalid LUNs */
1409 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1410 		tm->cm_targ->logical_unit_resets++;
1411 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1412 		    "Sending logical unit reset to target %u lun %d\n",
1413 		    target->tid, tm->cm_lun);
1414 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1415 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1416 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1417 		/*
1418 		 * Target reset method =
1419 		 * 	SAS Hard Link Reset / SATA Link Reset
1420 		 */
1421 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1422 		tm->cm_targ->target_resets++;
1423 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1424 		    "Sending target reset to target %u\n", target->tid);
1425 		tm->cm_complete = mpssas_target_reset_complete;
1426 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1427 	} else {
1428 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1429 		return -1;
1430 	}
1431 
1432 	tm->cm_data = NULL;
1433 	tm->cm_complete_data = (void *)tm;
1434 
1435 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1436 	    mpssas_tm_timeout, tm);
1437 
1438 	err = mps_map_command(sc, tm);
1439 	if (err)
1440 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1441 		    "error %d sending reset type %u\n",
1442 		    err, type);
1443 
1444 	return err;
1445 }
1446 
1447 
1448 static void
1449 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1450 {
1451 	struct mps_command *cm;
1452 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1453 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1454 	struct mpssas_target *targ;
1455 
1456 	callout_stop(&tm->cm_callout);
1457 
1458 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1459 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1460 	targ = tm->cm_targ;
1461 
1462 	/*
1463 	 * Currently there should be no way we can hit this case.  It only
1464 	 * happens when we have a failure to allocate chain frames, and
1465 	 * task management commands don't have S/G lists.
1466 	 */
1467 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1468 		mps_dprint(sc, MPS_RECOVERY,
1469 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1470 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1471 		mpssas_free_tm(sc, tm);
1472 		return;
1473 	}
1474 
1475 	if (reply == NULL) {
1476 		mps_dprint(sc, MPS_RECOVERY,
1477 		    "NULL abort reply for tm %p TaskMID %u\n",
1478 		    tm, le16toh(req->TaskMID));
1479 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1480 			/* this completion was due to a reset, just cleanup */
1481 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1482 			    "reset, ignoring NULL abort reply\n");
1483 			targ->tm = NULL;
1484 			mpssas_free_tm(sc, tm);
1485 		} else {
1486 			/* we should have gotten a reply. */
1487 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1488 			    "abort attempt, resetting controller\n");
1489 			mps_reinit(sc);
1490 		}
1491 		return;
1492 	}
1493 
1494 	mps_dprint(sc, MPS_RECOVERY,
1495 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1496 	    le16toh(req->TaskMID),
1497 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1498 	    le32toh(reply->TerminationCount));
1499 
1500 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1501 	if (cm == NULL) {
1502 		/*
1503 		 * If there are no more timedout commands, we're done with
1504 		 * error recovery for this target.
1505 		 */
1506 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1507 		    "Finished abort recovery for target %u\n", targ->tid);
1508 
1509 		targ->tm = NULL;
1510 		mpssas_free_tm(sc, tm);
1511 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1512 		/* abort success, but we have more timedout commands to abort */
1513 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1514 		    "Continuing abort recovery for target %u\n", targ->tid);
1515 
1516 		mpssas_send_abort(sc, tm, cm);
1517 	} else {
1518 		/* we didn't get a command completion, so the abort
1519 		 * failed as far as we're concerned.  escalate.
1520 		 */
1521 		mps_dprint(sc, MPS_RECOVERY,
1522 		    "Abort failed for target %u, sending logical unit reset\n",
1523 		    targ->tid);
1524 
1525 		mpssas_send_reset(sc, tm,
1526 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1527 	}
1528 }
1529 
1530 #define MPS_ABORT_TIMEOUT 5
1531 
1532 static int
1533 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1534 {
1535 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1536 	struct mpssas_target *targ;
1537 	int err;
1538 
1539 	targ = cm->cm_targ;
1540 	if (targ->handle == 0) {
1541 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1542 		    "%s null devhandle for target_id %d\n",
1543 		    __func__, cm->cm_ccb->ccb_h.target_id);
1544 		return -1;
1545 	}
1546 
1547 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1548 	    "Aborting command %p\n", cm);
1549 
1550 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1551 	req->DevHandle = htole16(targ->handle);
1552 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1553 
1554 	/* XXX Need to handle invalid LUNs */
1555 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1556 
1557 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1558 
1559 	tm->cm_data = NULL;
1560 	tm->cm_complete = mpssas_abort_complete;
1561 	tm->cm_complete_data = (void *)tm;
1562 	tm->cm_targ = cm->cm_targ;
1563 	tm->cm_lun = cm->cm_lun;
1564 
1565 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1566 	    mpssas_tm_timeout, tm);
1567 
1568 	targ->aborts++;
1569 
1570 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1571 
1572 	err = mps_map_command(sc, tm);
1573 	if (err)
1574 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1575 		    "error %d sending abort for cm %p SMID %u\n",
1576 		    err, cm, req->TaskMID);
1577 	return err;
1578 }
1579 
1580 static void
1581 mpssas_scsiio_timeout(void *data)
1582 {
1583 	sbintime_t elapsed, now;
1584 	union ccb *ccb;
1585 	struct mps_softc *sc;
1586 	struct mps_command *cm;
1587 	struct mpssas_target *targ;
1588 
1589 	cm = (struct mps_command *)data;
1590 	sc = cm->cm_sc;
1591 	ccb = cm->cm_ccb;
1592 	now = sbinuptime();
1593 
1594 	MPS_FUNCTRACE(sc);
1595 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1596 
1597 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1598 
1599 	/*
1600 	 * Run the interrupt handler to make sure it's not pending.  This
1601 	 * isn't perfect because the command could have already completed
1602 	 * and been re-used, though this is unlikely.
1603 	 */
1604 	mps_intr_locked(sc);
1605 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1606 		mpssas_log_command(cm, MPS_XINFO,
1607 		    "SCSI command %p almost timed out\n", cm);
1608 		return;
1609 	}
1610 
1611 	if (cm->cm_ccb == NULL) {
1612 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1613 		return;
1614 	}
1615 
1616 	targ = cm->cm_targ;
1617 	targ->timeouts++;
1618 
1619 	elapsed = now - ccb->ccb_h.qos.sim_data;
1620 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1621 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1622 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1623 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1624 
1625 	/* XXX first, check the firmware state, to see if it's still
1626 	 * operational.  if not, do a diag reset.
1627 	 */
1628 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1629 	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1630 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1631 
1632 	if (targ->tm != NULL) {
1633 		/* target already in recovery, just queue up another
1634 		 * timedout command to be processed later.
1635 		 */
1636 		mps_dprint(sc, MPS_RECOVERY,
1637 		    "queued timedout cm %p for processing by tm %p\n",
1638 		    cm, targ->tm);
1639 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1640 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1641 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1642 		    cm->cm_desc.Default.SMID);
1643 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1644 		    cm, targ->tm);
1645 
1646 		/* start recovery by aborting the first timedout command */
1647 		mpssas_send_abort(sc, targ->tm, cm);
1648 	} else {
1649 		/* XXX queue this target up for recovery once a TM becomes
1650 		 * available.  The firmware only has a limited number of
1651 		 * HighPriority credits for the high priority requests used
1652 		 * for task management, and we ran out.
1653 		 *
1654 		 * Isilon: don't worry about this for now, since we have
1655 		 * more credits than disks in an enclosure, and limit
1656 		 * ourselves to one TM per target for recovery.
1657 		 */
1658 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1659 		    "timedout cm %p failed to allocate a tm\n", cm);
1660 	}
1661 
1662 }
1663 
1664 static void
1665 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1666 {
1667 	MPI2_SCSI_IO_REQUEST *req;
1668 	struct ccb_scsiio *csio;
1669 	struct mps_softc *sc;
1670 	struct mpssas_target *targ;
1671 	struct mpssas_lun *lun;
1672 	struct mps_command *cm;
1673 	uint8_t i, lba_byte, *ref_tag_addr;
1674 	uint16_t eedp_flags;
1675 	uint32_t mpi_control;
1676 
1677 	sc = sassc->sc;
1678 	MPS_FUNCTRACE(sc);
1679 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1680 
1681 	csio = &ccb->csio;
1682 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1683 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1684 	     csio->ccb_h.target_id));
1685 	targ = &sassc->targets[csio->ccb_h.target_id];
1686 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1687 	if (targ->handle == 0x0) {
1688 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1689 		    __func__, csio->ccb_h.target_id);
1690 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1691 		xpt_done(ccb);
1692 		return;
1693 	}
1694 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1695 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1696 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1697 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1698 		xpt_done(ccb);
1699 		return;
1700 	}
1701 	/*
1702 	 * Sometimes, it is possible to get a command that is not "In
1703 	 * Progress" and was actually aborted by the upper layer.  Check for
1704 	 * this here and complete the command without error.
1705 	 */
1706 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1707 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1708 		    "target %u\n", __func__, csio->ccb_h.target_id);
1709 		xpt_done(ccb);
1710 		return;
1711 	}
1712 	/*
1713 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1714 	 * that the volume has timed out.  We want volumes to be enumerated
1715 	 * until they are deleted/removed, not just failed.
1716 	 */
1717 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1718 		if (targ->devinfo == 0)
1719 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1720 		else
1721 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1722 		xpt_done(ccb);
1723 		return;
1724 	}
1725 
1726 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1727 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1728 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1729 		xpt_done(ccb);
1730 		return;
1731 	}
1732 
1733 	/*
1734 	 * If target has a reset in progress, freeze the devq and return.  The
1735 	 * devq will be released when the TM reset is finished.
1736 	 */
1737 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1738 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1739 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1740 		    __func__, targ->tid);
1741 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1742 		xpt_done(ccb);
1743 		return;
1744 	}
1745 
1746 	cm = mps_alloc_command(sc);
1747 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1748 		if (cm != NULL) {
1749 			mps_free_command(sc, cm);
1750 		}
1751 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1752 			xpt_freeze_simq(sassc->sim, 1);
1753 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1754 		}
1755 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1756 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1757 		xpt_done(ccb);
1758 		return;
1759 	}
1760 
1761 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1762 	bzero(req, sizeof(*req));
1763 	req->DevHandle = htole16(targ->handle);
1764 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1765 	req->MsgFlags = 0;
1766 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1767 	req->SenseBufferLength = MPS_SENSE_LEN;
1768 	req->SGLFlags = 0;
1769 	req->ChainOffset = 0;
1770 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1771 	req->SGLOffset1= 0;
1772 	req->SGLOffset2= 0;
1773 	req->SGLOffset3= 0;
1774 	req->SkipCount = 0;
1775 	req->DataLength = htole32(csio->dxfer_len);
1776 	req->BidirectionalDataLength = 0;
1777 	req->IoFlags = htole16(csio->cdb_len);
1778 	req->EEDPFlags = 0;
1779 
1780 	/* Note: BiDirectional transfers are not supported */
1781 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1782 	case CAM_DIR_IN:
1783 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1784 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1785 		break;
1786 	case CAM_DIR_OUT:
1787 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1788 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1789 		break;
1790 	case CAM_DIR_NONE:
1791 	default:
1792 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1793 		break;
1794 	}
1795 
1796 	if (csio->cdb_len == 32)
1797                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1798 	/*
1799 	 * It looks like the hardware doesn't require an explicit tag
1800 	 * number for each transaction.  SAM Task Management not supported
1801 	 * at the moment.
1802 	 */
1803 	switch (csio->tag_action) {
1804 	case MSG_HEAD_OF_Q_TAG:
1805 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1806 		break;
1807 	case MSG_ORDERED_Q_TAG:
1808 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1809 		break;
1810 	case MSG_ACA_TASK:
1811 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1812 		break;
1813 	case CAM_TAG_ACTION_NONE:
1814 	case MSG_SIMPLE_Q_TAG:
1815 	default:
1816 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1817 		break;
1818 	}
1819 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1820 	req->Control = htole32(mpi_control);
1821 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1822 		mps_free_command(sc, cm);
1823 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1824 		xpt_done(ccb);
1825 		return;
1826 	}
1827 
1828 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1829 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1830 	else
1831 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1832 	req->IoFlags = htole16(csio->cdb_len);
1833 
1834 	/*
1835 	 * Check if EEDP is supported and enabled.  If it is then check if the
1836 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1837 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1838 	 * for EEDP transfer.
1839 	 */
1840 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1841 	if (sc->eedp_enabled && eedp_flags) {
1842 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1843 			if (lun->lun_id == csio->ccb_h.target_lun) {
1844 				break;
1845 			}
1846 		}
1847 
1848 		if ((lun != NULL) && (lun->eedp_formatted)) {
1849 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1850 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1851 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1852 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1853 			req->EEDPFlags = htole16(eedp_flags);
1854 
1855 			/*
1856 			 * If CDB less than 32, fill in Primary Ref Tag with
1857 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1858 			 * already there.  Also, set protection bit.  FreeBSD
1859 			 * currently does not support CDBs bigger than 16, but
1860 			 * the code doesn't hurt, and will be here for the
1861 			 * future.
1862 			 */
1863 			if (csio->cdb_len != 32) {
1864 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1865 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1866 				    PrimaryReferenceTag;
1867 				for (i = 0; i < 4; i++) {
1868 					*ref_tag_addr =
1869 					    req->CDB.CDB32[lba_byte + i];
1870 					ref_tag_addr++;
1871 				}
1872 				req->CDB.EEDP32.PrimaryReferenceTag =
1873 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1874 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1875 				    0xFFFF;
1876 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1877 				    0x20;
1878 			} else {
1879 				eedp_flags |=
1880 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1881 				req->EEDPFlags = htole16(eedp_flags);
1882 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1883 				    0x1F) | 0x20;
1884 			}
1885 		}
1886 	}
1887 
1888 	cm->cm_length = csio->dxfer_len;
1889 	if (cm->cm_length != 0) {
1890 		cm->cm_data = ccb;
1891 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1892 	} else {
1893 		cm->cm_data = NULL;
1894 	}
1895 	cm->cm_sge = &req->SGL;
1896 	cm->cm_sglsize = (32 - 24) * 4;
1897 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1898 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1899 	cm->cm_complete = mpssas_scsiio_complete;
1900 	cm->cm_complete_data = ccb;
1901 	cm->cm_targ = targ;
1902 	cm->cm_lun = csio->ccb_h.target_lun;
1903 	cm->cm_ccb = ccb;
1904 
1905 	/*
1906 	 * If HBA is a WD and the command is not for a retry, try to build a
1907 	 * direct I/O message. If failed, or the command is for a retry, send
1908 	 * the I/O to the IR volume itself.
1909 	 */
1910 	if (sc->WD_valid_config) {
1911 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1912 			mpssas_direct_drive_io(sassc, cm, ccb);
1913 		} else {
1914 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1915 		}
1916 	}
1917 
1918 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1919 	if (csio->bio != NULL)
1920 		biotrack(csio->bio, __func__);
1921 #endif
1922 	csio->ccb_h.qos.sim_data = sbinuptime();
1923 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1924 	    mpssas_scsiio_timeout, cm, 0);
1925 
1926 	targ->issued++;
1927 	targ->outstanding++;
1928 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1929 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1930 
1931 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1932 	    __func__, cm, ccb, targ->outstanding);
1933 
1934 	mps_map_command(sc, cm);
1935 	return;
1936 }
1937 
1938 /**
1939  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1940  */
1941 static void
1942 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1943     Mpi2SCSIIOReply_t *mpi_reply)
1944 {
1945 	u32 response_info;
1946 	u8 *response_bytes;
1947 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1948 	    MPI2_IOCSTATUS_MASK;
1949 	u8 scsi_state = mpi_reply->SCSIState;
1950 	u8 scsi_status = mpi_reply->SCSIStatus;
1951 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1952 	const char *desc_ioc_state, *desc_scsi_status;
1953 
1954 	if (log_info == 0x31170000)
1955 		return;
1956 
1957 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1958 	    ioc_status);
1959 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1960 	    scsi_status);
1961 
1962 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1963 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1964 
1965 	/*
1966 	 *We can add more detail about underflow data here
1967 	 * TO-DO
1968 	 */
1969 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1970 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1971 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1972 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1973 
1974 	if (sc->mps_debug & MPS_XINFO &&
1975 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1976 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1977 		scsi_sense_print(csio);
1978 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1979 	}
1980 
1981 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1982 		response_info = le32toh(mpi_reply->ResponseInfo);
1983 		response_bytes = (u8 *)&response_info;
1984 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1985 		    response_bytes[0],
1986 		    mps_describe_table(mps_scsi_taskmgmt_string,
1987 		    response_bytes[0]));
1988 	}
1989 }
1990 
1991 static void
1992 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1993 {
1994 	MPI2_SCSI_IO_REPLY *rep;
1995 	union ccb *ccb;
1996 	struct ccb_scsiio *csio;
1997 	struct mpssas_softc *sassc;
1998 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1999 	u8 *TLR_bits, TLR_on;
2000 	int dir = 0, i;
2001 	u16 alloc_len;
2002 	struct mpssas_target *target;
2003 	target_id_t target_id;
2004 
2005 	MPS_FUNCTRACE(sc);
2006 	mps_dprint(sc, MPS_TRACE,
2007 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2008 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2009 	    cm->cm_targ->outstanding);
2010 
2011 	callout_stop(&cm->cm_callout);
2012 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2013 
2014 	sassc = sc->sassc;
2015 	ccb = cm->cm_complete_data;
2016 	csio = &ccb->csio;
2017 	target_id = csio->ccb_h.target_id;
2018 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2019 	/*
2020 	 * XXX KDM if the chain allocation fails, does it matter if we do
2021 	 * the sync and unload here?  It is simpler to do it in every case,
2022 	 * assuming it doesn't cause problems.
2023 	 */
2024 	if (cm->cm_data != NULL) {
2025 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2026 			dir = BUS_DMASYNC_POSTREAD;
2027 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2028 			dir = BUS_DMASYNC_POSTWRITE;
2029 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2030 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2031 	}
2032 
2033 	cm->cm_targ->completed++;
2034 	cm->cm_targ->outstanding--;
2035 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2036 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2037 
2038 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2039 	if (ccb->csio.bio != NULL)
2040 		biotrack(ccb->csio.bio, __func__);
2041 #endif
2042 
2043 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2044 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2045 		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2046 		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2047 		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2048 		if (cm->cm_reply != NULL)
2049 			mpssas_log_command(cm, MPS_RECOVERY,
2050 			    "completed timedout cm %p ccb %p during recovery "
2051 			    "ioc %x scsi %x state %x xfer %u\n",
2052 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2053 			    rep->SCSIStatus, rep->SCSIState,
2054 			    le32toh(rep->TransferCount));
2055 		else
2056 			mpssas_log_command(cm, MPS_RECOVERY,
2057 			    "completed timedout cm %p ccb %p during recovery\n",
2058 			    cm, cm->cm_ccb);
2059 	} else if (cm->cm_targ->tm != NULL) {
2060 		if (cm->cm_reply != NULL)
2061 			mpssas_log_command(cm, MPS_RECOVERY,
2062 			    "completed cm %p ccb %p during recovery "
2063 			    "ioc %x scsi %x state %x xfer %u\n",
2064 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2065 			    rep->SCSIStatus, rep->SCSIState,
2066 			    le32toh(rep->TransferCount));
2067 		else
2068 			mpssas_log_command(cm, MPS_RECOVERY,
2069 			    "completed cm %p ccb %p during recovery\n",
2070 			    cm, cm->cm_ccb);
2071 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2072 		mpssas_log_command(cm, MPS_RECOVERY,
2073 		    "reset completed cm %p ccb %p\n",
2074 		    cm, cm->cm_ccb);
2075 	}
2076 
2077 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2078 		/*
2079 		 * We ran into an error after we tried to map the command,
2080 		 * so we're getting a callback without queueing the command
2081 		 * to the hardware.  So we set the status here, and it will
2082 		 * be retained below.  We'll go through the "fast path",
2083 		 * because there can be no reply when we haven't actually
2084 		 * gone out to the hardware.
2085 		 */
2086 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2087 
2088 		/*
2089 		 * Currently the only error included in the mask is
2090 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2091 		 * chain frames.  We need to freeze the queue until we get
2092 		 * a command that completed without this error, which will
2093 		 * hopefully have some chain frames attached that we can
2094 		 * use.  If we wanted to get smarter about it, we would
2095 		 * only unfreeze the queue in this condition when we're
2096 		 * sure that we're getting some chain frames back.  That's
2097 		 * probably unnecessary.
2098 		 */
2099 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2100 			xpt_freeze_simq(sassc->sim, 1);
2101 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2102 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2103 				   "freezing SIM queue\n");
2104 		}
2105 	}
2106 
2107 	/*
2108 	 * If this is a Start Stop Unit command and it was issued by the driver
2109 	 * during shutdown, decrement the refcount to account for all of the
2110 	 * commands that were sent.  All SSU commands should be completed before
2111 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2112 	 * is TRUE.
2113 	 */
2114 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2115 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2116 		sc->SSU_refcount--;
2117 	}
2118 
2119 	/* Take the fast path to completion */
2120 	if (cm->cm_reply == NULL) {
2121 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2122 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2123 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2124 			else {
2125 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2126 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2127 			}
2128 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2129 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2130 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2131 				mps_dprint(sc, MPS_XINFO,
2132 				    "Unfreezing SIM queue\n");
2133 			}
2134 		}
2135 
2136 		/*
2137 		 * There are two scenarios where the status won't be
2138 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2139 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2140 		 */
2141 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2142 			/*
2143 			 * Freeze the dev queue so that commands are
2144 			 * executed in the correct order after error
2145 			 * recovery.
2146 			 */
2147 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2148 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2149 		}
2150 		mps_free_command(sc, cm);
2151 		xpt_done(ccb);
2152 		return;
2153 	}
2154 
2155 	mpssas_log_command(cm, MPS_XINFO,
2156 	    "ioc %x scsi %x state %x xfer %u\n",
2157 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2158 	    le32toh(rep->TransferCount));
2159 
2160 	/*
2161 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2162 	 * Volume if an error occurred (normal I/O retry).  Use the original
2163 	 * CCB, but set a flag that this will be a retry so that it's sent to
2164 	 * the original volume.  Free the command but reuse the CCB.
2165 	 */
2166 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2167 		mps_free_command(sc, cm);
2168 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2169 		mpssas_action_scsiio(sassc, ccb);
2170 		return;
2171 	} else
2172 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2173 
2174 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2175 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2176 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2177 		/* FALLTHROUGH */
2178 	case MPI2_IOCSTATUS_SUCCESS:
2179 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2180 
2181 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2182 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2183 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2184 
2185 		/* Completion failed at the transport level. */
2186 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2187 		    MPI2_SCSI_STATE_TERMINATED)) {
2188 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2189 			break;
2190 		}
2191 
2192 		/* In a modern packetized environment, an autosense failure
2193 		 * implies that there's not much else that can be done to
2194 		 * recover the command.
2195 		 */
2196 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2197 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2198 			break;
2199 		}
2200 
2201 		/*
2202 		 * CAM doesn't care about SAS Response Info data, but if this is
2203 		 * the state check if TLR should be done.  If not, clear the
2204 		 * TLR_bits for the target.
2205 		 */
2206 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2207 		    ((le32toh(rep->ResponseInfo) &
2208 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2209 		    MPS_SCSI_RI_INVALID_FRAME)) {
2210 			sc->mapping_table[target_id].TLR_bits =
2211 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2212 		}
2213 
2214 		/*
2215 		 * Intentionally override the normal SCSI status reporting
2216 		 * for these two cases.  These are likely to happen in a
2217 		 * multi-initiator environment, and we want to make sure that
2218 		 * CAM retries these commands rather than fail them.
2219 		 */
2220 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2221 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2222 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2223 			break;
2224 		}
2225 
2226 		/* Handle normal status and sense */
2227 		csio->scsi_status = rep->SCSIStatus;
2228 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2229 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2230 		else
2231 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2232 
2233 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2234 			int sense_len, returned_sense_len;
2235 
2236 			returned_sense_len = min(le32toh(rep->SenseCount),
2237 			    sizeof(struct scsi_sense_data));
2238 			if (returned_sense_len < ccb->csio.sense_len)
2239 				ccb->csio.sense_resid = ccb->csio.sense_len -
2240 					returned_sense_len;
2241 			else
2242 				ccb->csio.sense_resid = 0;
2243 
2244 			sense_len = min(returned_sense_len,
2245 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2246 			bzero(&ccb->csio.sense_data,
2247 			      sizeof(ccb->csio.sense_data));
2248 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2249 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2250 		}
2251 
2252 		/*
2253 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2254 		 * and it's page code 0 (Supported Page List), and there is
2255 		 * inquiry data, and this is for a sequential access device, and
2256 		 * the device is an SSP target, and TLR is supported by the
2257 		 * controller, turn the TLR_bits value ON if page 0x90 is
2258 		 * supported.
2259 		 */
2260 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2261 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2262 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2263 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2264 		    (csio->data_ptr != NULL) &&
2265 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2266 		    (sc->control_TLR) &&
2267 		    (sc->mapping_table[target_id].device_info &
2268 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2269 			vpd_list = (struct scsi_vpd_supported_page_list *)
2270 			    csio->data_ptr;
2271 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2272 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2273 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2274 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2275 			    csio->cdb_io.cdb_bytes[4];
2276 			alloc_len -= csio->resid;
2277 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2278 				if (vpd_list->list[i] == 0x90) {
2279 					*TLR_bits = TLR_on;
2280 					break;
2281 				}
2282 			}
2283 		}
2284 
2285 		/*
2286 		 * If this is a SATA direct-access end device, mark it so that
2287 		 * a SCSI StartStopUnit command will be sent to it when the
2288 		 * driver is being shutdown.
2289 		 */
2290 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2291 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2292 		    (sc->mapping_table[target_id].device_info &
2293 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2294 		    ((sc->mapping_table[target_id].device_info &
2295 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2296 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2297 			target = &sassc->targets[target_id];
2298 			target->supports_SSU = TRUE;
2299 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2300 			    target_id);
2301 		}
2302 		break;
2303 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2304 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2305 		/*
2306 		 * If devinfo is 0 this will be a volume.  In that case don't
2307 		 * tell CAM that the volume is not there.  We want volumes to
2308 		 * be enumerated until they are deleted/removed, not just
2309 		 * failed.
2310 		 */
2311 		if (cm->cm_targ->devinfo == 0)
2312 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2313 		else
2314 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2315 		break;
2316 	case MPI2_IOCSTATUS_INVALID_SGL:
2317 		mps_print_scsiio_cmd(sc, cm);
2318 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2319 		break;
2320 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2321 		/*
2322 		 * This is one of the responses that comes back when an I/O
2323 		 * has been aborted.  If it is because of a timeout that we
2324 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2325 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2326 		 * command is the same (it gets retried, subject to the
2327 		 * retry counter), the only difference is what gets printed
2328 		 * on the console.
2329 		 */
2330 		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2331 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2332 		else
2333 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2334 		break;
2335 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2336 		/* resid is ignored for this condition */
2337 		csio->resid = 0;
2338 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2339 		break;
2340 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2341 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2342 		/*
2343 		 * These can sometimes be transient transport-related
2344 		 * errors, and sometimes persistent drive-related errors.
2345 		 * We used to retry these without decrementing the retry
2346 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2347 		 * we hit a persistent drive problem that returns one of
2348 		 * these error codes, we would retry indefinitely.  So,
2349 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2350 		 * count and avoid infinite retries.  We're taking the
2351 		 * potential risk of flagging false failures in the event
2352 		 * of a topology-related error (e.g. a SAS expander problem
2353 		 * causes a command addressed to a drive to fail), but
2354 		 * avoiding getting into an infinite retry loop.
2355 		 */
2356 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2357 		mps_dprint(sc, MPS_INFO,
2358 		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2359 		    mps_describe_table(mps_iocstatus_string,
2360 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2361 		    target_id, cm->cm_desc.Default.SMID,
2362 		    le32toh(rep->IOCLogInfo));
2363 		mps_dprint(sc, MPS_XINFO,
2364 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2365 		    rep->SCSIStatus, rep->SCSIState,
2366 		    le32toh(rep->TransferCount));
2367 		break;
2368 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2369 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2370 	case MPI2_IOCSTATUS_INVALID_VPID:
2371 	case MPI2_IOCSTATUS_INVALID_FIELD:
2372 	case MPI2_IOCSTATUS_INVALID_STATE:
2373 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2374 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2375 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2376 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2377 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2378 	default:
2379 		mpssas_log_command(cm, MPS_XINFO,
2380 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2381 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2382 		    rep->SCSIStatus, rep->SCSIState,
2383 		    le32toh(rep->TransferCount));
2384 		csio->resid = cm->cm_length;
2385 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2386 		break;
2387 	}
2388 
2389 	mps_sc_failed_io_info(sc,csio,rep);
2390 
2391 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2392 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2393 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2394 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2395 		    "unfreezing SIM queue\n");
2396 	}
2397 
2398 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2399 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2400 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2401 	}
2402 
2403 	mps_free_command(sc, cm);
2404 	xpt_done(ccb);
2405 }
2406 
2407 /* All Request reached here are Endian safe */
2408 static void
2409 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2410     union ccb *ccb) {
2411 	pMpi2SCSIIORequest_t	pIO_req;
2412 	struct mps_softc	*sc = sassc->sc;
2413 	uint64_t		virtLBA;
2414 	uint32_t		physLBA, stripe_offset, stripe_unit;
2415 	uint32_t		io_size, column;
2416 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2417 
2418 	/*
2419 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2420 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2421 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2422 	 * bit different than the 10/16 CDBs, handle them separately.
2423 	 */
2424 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2425 	CDB = pIO_req->CDB.CDB32;
2426 
2427 	/*
2428 	 * Handle 6 byte CDBs.
2429 	 */
2430 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2431 	    (CDB[0] == WRITE_6))) {
2432 		/*
2433 		 * Get the transfer size in blocks.
2434 		 */
2435 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2436 
2437 		/*
2438 		 * Get virtual LBA given in the CDB.
2439 		 */
2440 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2441 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2442 
2443 		/*
2444 		 * Check that LBA range for I/O does not exceed volume's
2445 		 * MaxLBA.
2446 		 */
2447 		if ((virtLBA + (uint64_t)io_size - 1) <=
2448 		    sc->DD_max_lba) {
2449 			/*
2450 			 * Check if the I/O crosses a stripe boundary.  If not,
2451 			 * translate the virtual LBA to a physical LBA and set
2452 			 * the DevHandle for the PhysDisk to be used.  If it
2453 			 * does cross a boundary, do normal I/O.  To get the
2454 			 * right DevHandle to use, get the map number for the
2455 			 * column, then use that map number to look up the
2456 			 * DevHandle of the PhysDisk.
2457 			 */
2458 			stripe_offset = (uint32_t)virtLBA &
2459 			    (sc->DD_stripe_size - 1);
2460 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2461 				physLBA = (uint32_t)virtLBA >>
2462 				    sc->DD_stripe_exponent;
2463 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2464 				column = physLBA % sc->DD_num_phys_disks;
2465 				pIO_req->DevHandle =
2466 				    htole16(sc->DD_column_map[column].dev_handle);
2467 				/* ???? Is this endian safe*/
2468 				cm->cm_desc.SCSIIO.DevHandle =
2469 				    pIO_req->DevHandle;
2470 
2471 				physLBA = (stripe_unit <<
2472 				    sc->DD_stripe_exponent) + stripe_offset;
2473 				ptrLBA = &pIO_req->CDB.CDB32[1];
2474 				physLBA_byte = (uint8_t)(physLBA >> 16);
2475 				*ptrLBA = physLBA_byte;
2476 				ptrLBA = &pIO_req->CDB.CDB32[2];
2477 				physLBA_byte = (uint8_t)(physLBA >> 8);
2478 				*ptrLBA = physLBA_byte;
2479 				ptrLBA = &pIO_req->CDB.CDB32[3];
2480 				physLBA_byte = (uint8_t)physLBA;
2481 				*ptrLBA = physLBA_byte;
2482 
2483 				/*
2484 				 * Set flag that Direct Drive I/O is
2485 				 * being done.
2486 				 */
2487 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2488 			}
2489 		}
2490 		return;
2491 	}
2492 
2493 	/*
2494 	 * Handle 10, 12 or 16 byte CDBs.
2495 	 */
2496 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2497 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2498 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2499 	    (CDB[0] == WRITE_12))) {
2500 		/*
2501 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2502 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2503 		 * the else section.  10-byte and 12-byte CDB's are OK.
2504 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2505 		 * ready to accept 12byte CDB for Direct IOs.
2506 		 */
2507 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2508 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2509 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2510 			/*
2511 			 * Get the transfer size in blocks.
2512 			 */
2513 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2514 
2515 			/*
2516 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2517 			 * LBA in the CDB depending on command.
2518 			 */
2519 			lba_idx = ((CDB[0] == READ_12) ||
2520 				(CDB[0] == WRITE_12) ||
2521 				(CDB[0] == READ_10) ||
2522 				(CDB[0] == WRITE_10))? 2 : 6;
2523 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2524 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2525 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2526 			    (uint64_t)CDB[lba_idx + 3];
2527 
2528 			/*
2529 			 * Check that LBA range for I/O does not exceed volume's
2530 			 * MaxLBA.
2531 			 */
2532 			if ((virtLBA + (uint64_t)io_size - 1) <=
2533 			    sc->DD_max_lba) {
2534 				/*
2535 				 * Check if the I/O crosses a stripe boundary.
2536 				 * If not, translate the virtual LBA to a
2537 				 * physical LBA and set the DevHandle for the
2538 				 * PhysDisk to be used.  If it does cross a
2539 				 * boundary, do normal I/O.  To get the right
2540 				 * DevHandle to use, get the map number for the
2541 				 * column, then use that map number to look up
2542 				 * the DevHandle of the PhysDisk.
2543 				 */
2544 				stripe_offset = (uint32_t)virtLBA &
2545 				    (sc->DD_stripe_size - 1);
2546 				if ((stripe_offset + io_size) <=
2547 				    sc->DD_stripe_size) {
2548 					physLBA = (uint32_t)virtLBA >>
2549 					    sc->DD_stripe_exponent;
2550 					stripe_unit = physLBA /
2551 					    sc->DD_num_phys_disks;
2552 					column = physLBA %
2553 					    sc->DD_num_phys_disks;
2554 					pIO_req->DevHandle =
2555 					    htole16(sc->DD_column_map[column].
2556 					    dev_handle);
2557 					cm->cm_desc.SCSIIO.DevHandle =
2558 					    pIO_req->DevHandle;
2559 
2560 					physLBA = (stripe_unit <<
2561 					    sc->DD_stripe_exponent) +
2562 					    stripe_offset;
2563 					ptrLBA =
2564 					    &pIO_req->CDB.CDB32[lba_idx];
2565 					physLBA_byte = (uint8_t)(physLBA >> 24);
2566 					*ptrLBA = physLBA_byte;
2567 					ptrLBA =
2568 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2569 					physLBA_byte = (uint8_t)(physLBA >> 16);
2570 					*ptrLBA = physLBA_byte;
2571 					ptrLBA =
2572 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2573 					physLBA_byte = (uint8_t)(physLBA >> 8);
2574 					*ptrLBA = physLBA_byte;
2575 					ptrLBA =
2576 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2577 					physLBA_byte = (uint8_t)physLBA;
2578 					*ptrLBA = physLBA_byte;
2579 
2580 					/*
2581 					 * Set flag that Direct Drive I/O is
2582 					 * being done.
2583 					 */
2584 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2585 				}
2586 			}
2587 		} else {
2588 			/*
2589 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2590 			 * 0.  Get the transfer size in blocks.
2591 			 */
2592 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2593 
2594 			/*
2595 			 * Get virtual LBA.
2596 			 */
2597 			virtLBA = ((uint64_t)CDB[2] << 54) |
2598 			    ((uint64_t)CDB[3] << 48) |
2599 			    ((uint64_t)CDB[4] << 40) |
2600 			    ((uint64_t)CDB[5] << 32) |
2601 			    ((uint64_t)CDB[6] << 24) |
2602 			    ((uint64_t)CDB[7] << 16) |
2603 			    ((uint64_t)CDB[8] << 8) |
2604 			    (uint64_t)CDB[9];
2605 
2606 			/*
2607 			 * Check that LBA range for I/O does not exceed volume's
2608 			 * MaxLBA.
2609 			 */
2610 			if ((virtLBA + (uint64_t)io_size - 1) <=
2611 			    sc->DD_max_lba) {
2612 				/*
2613 				 * Check if the I/O crosses a stripe boundary.
2614 				 * If not, translate the virtual LBA to a
2615 				 * physical LBA and set the DevHandle for the
2616 				 * PhysDisk to be used.  If it does cross a
2617 				 * boundary, do normal I/O.  To get the right
2618 				 * DevHandle to use, get the map number for the
2619 				 * column, then use that map number to look up
2620 				 * the DevHandle of the PhysDisk.
2621 				 */
2622 				stripe_offset = (uint32_t)virtLBA &
2623 				    (sc->DD_stripe_size - 1);
2624 				if ((stripe_offset + io_size) <=
2625 				    sc->DD_stripe_size) {
2626 					physLBA = (uint32_t)(virtLBA >>
2627 					    sc->DD_stripe_exponent);
2628 					stripe_unit = physLBA /
2629 					    sc->DD_num_phys_disks;
2630 					column = physLBA %
2631 					    sc->DD_num_phys_disks;
2632 					pIO_req->DevHandle =
2633 					    htole16(sc->DD_column_map[column].
2634 					    dev_handle);
2635 					cm->cm_desc.SCSIIO.DevHandle =
2636 					    pIO_req->DevHandle;
2637 
2638 					physLBA = (stripe_unit <<
2639 					    sc->DD_stripe_exponent) +
2640 					    stripe_offset;
2641 
2642 					/*
2643 					 * Set upper 4 bytes of LBA to 0.  We
2644 					 * assume that the phys disks are less
2645 					 * than 2 TB's in size.  Then, set the
2646 					 * lower 4 bytes.
2647 					 */
2648 					pIO_req->CDB.CDB32[2] = 0;
2649 					pIO_req->CDB.CDB32[3] = 0;
2650 					pIO_req->CDB.CDB32[4] = 0;
2651 					pIO_req->CDB.CDB32[5] = 0;
2652 					ptrLBA = &pIO_req->CDB.CDB32[6];
2653 					physLBA_byte = (uint8_t)(physLBA >> 24);
2654 					*ptrLBA = physLBA_byte;
2655 					ptrLBA = &pIO_req->CDB.CDB32[7];
2656 					physLBA_byte = (uint8_t)(physLBA >> 16);
2657 					*ptrLBA = physLBA_byte;
2658 					ptrLBA = &pIO_req->CDB.CDB32[8];
2659 					physLBA_byte = (uint8_t)(physLBA >> 8);
2660 					*ptrLBA = physLBA_byte;
2661 					ptrLBA = &pIO_req->CDB.CDB32[9];
2662 					physLBA_byte = (uint8_t)physLBA;
2663 					*ptrLBA = physLBA_byte;
2664 
2665 					/*
2666 					 * Set flag that Direct Drive I/O is
2667 					 * being done.
2668 					 */
2669 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2670 				}
2671 			}
2672 		}
2673 	}
2674 }
2675 
2676 #if __FreeBSD_version >= 900026
2677 static void
2678 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2679 {
2680 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2681 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2682 	uint64_t sasaddr;
2683 	union ccb *ccb;
2684 
2685 	ccb = cm->cm_complete_data;
2686 
2687 	/*
2688 	 * Currently there should be no way we can hit this case.  It only
2689 	 * happens when we have a failure to allocate chain frames, and SMP
2690 	 * commands require two S/G elements only.  That should be handled
2691 	 * in the standard request size.
2692 	 */
2693 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2694 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2695 			   __func__, cm->cm_flags);
2696 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2697 		goto bailout;
2698         }
2699 
2700 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2701 	if (rpl == NULL) {
2702 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2703 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2704 		goto bailout;
2705 	}
2706 
2707 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2708 	sasaddr = le32toh(req->SASAddress.Low);
2709 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2710 
2711 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2712 	    MPI2_IOCSTATUS_SUCCESS ||
2713 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2714 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2715 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2716 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2717 		goto bailout;
2718 	}
2719 
2720 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2721 		   "%#jx completed successfully\n", __func__,
2722 		   (uintmax_t)sasaddr);
2723 
2724 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2725 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2726 	else
2727 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2728 
2729 bailout:
2730 	/*
2731 	 * We sync in both directions because we had DMAs in the S/G list
2732 	 * in both directions.
2733 	 */
2734 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2735 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2736 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2737 	mps_free_command(sc, cm);
2738 	xpt_done(ccb);
2739 }
2740 
2741 static void
2742 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2743 {
2744 	struct mps_command *cm;
2745 	uint8_t *request, *response;
2746 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2747 	struct mps_softc *sc;
2748 	int error;
2749 
2750 	sc = sassc->sc;
2751 	error = 0;
2752 
2753 	/*
2754 	 * XXX We don't yet support physical addresses here.
2755 	 */
2756 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2757 	case CAM_DATA_PADDR:
2758 	case CAM_DATA_SG_PADDR:
2759 		mps_dprint(sc, MPS_ERROR,
2760 			   "%s: physical addresses not supported\n", __func__);
2761 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2762 		xpt_done(ccb);
2763 		return;
2764 	case CAM_DATA_SG:
2765 		/*
2766 		 * The chip does not support more than one buffer for the
2767 		 * request or response.
2768 		 */
2769 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2770 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2771 			mps_dprint(sc, MPS_ERROR,
2772 				   "%s: multiple request or response "
2773 				   "buffer segments not supported for SMP\n",
2774 				   __func__);
2775 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2776 			xpt_done(ccb);
2777 			return;
2778 		}
2779 
2780 		/*
2781 		 * The CAM_SCATTER_VALID flag was originally implemented
2782 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2783 		 * We have two.  So, just take that flag to mean that we
2784 		 * might have S/G lists, and look at the S/G segment count
2785 		 * to figure out whether that is the case for each individual
2786 		 * buffer.
2787 		 */
2788 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2789 			bus_dma_segment_t *req_sg;
2790 
2791 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2792 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2793 		} else
2794 			request = ccb->smpio.smp_request;
2795 
2796 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2797 			bus_dma_segment_t *rsp_sg;
2798 
2799 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2800 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2801 		} else
2802 			response = ccb->smpio.smp_response;
2803 		break;
2804 	case CAM_DATA_VADDR:
2805 		request = ccb->smpio.smp_request;
2806 		response = ccb->smpio.smp_response;
2807 		break;
2808 	default:
2809 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2810 		xpt_done(ccb);
2811 		return;
2812 	}
2813 
2814 	cm = mps_alloc_command(sc);
2815 	if (cm == NULL) {
2816 		mps_dprint(sc, MPS_ERROR,
2817 		    "%s: cannot allocate command\n", __func__);
2818 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2819 		xpt_done(ccb);
2820 		return;
2821 	}
2822 
2823 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2824 	bzero(req, sizeof(*req));
2825 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2826 
2827 	/* Allow the chip to use any route to this SAS address. */
2828 	req->PhysicalPort = 0xff;
2829 
2830 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2831 	req->SGLFlags =
2832 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2833 
2834 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2835 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2836 
2837 	mpi_init_sge(cm, req, &req->SGL);
2838 
2839 	/*
2840 	 * Set up a uio to pass into mps_map_command().  This allows us to
2841 	 * do one map command, and one busdma call in there.
2842 	 */
2843 	cm->cm_uio.uio_iov = cm->cm_iovec;
2844 	cm->cm_uio.uio_iovcnt = 2;
2845 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2846 
2847 	/*
2848 	 * The read/write flag isn't used by busdma, but set it just in
2849 	 * case.  This isn't exactly accurate, either, since we're going in
2850 	 * both directions.
2851 	 */
2852 	cm->cm_uio.uio_rw = UIO_WRITE;
2853 
2854 	cm->cm_iovec[0].iov_base = request;
2855 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2856 	cm->cm_iovec[1].iov_base = response;
2857 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2858 
2859 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2860 			       cm->cm_iovec[1].iov_len;
2861 
2862 	/*
2863 	 * Trigger a warning message in mps_data_cb() for the user if we
2864 	 * wind up exceeding two S/G segments.  The chip expects one
2865 	 * segment for the request and another for the response.
2866 	 */
2867 	cm->cm_max_segs = 2;
2868 
2869 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2870 	cm->cm_complete = mpssas_smpio_complete;
2871 	cm->cm_complete_data = ccb;
2872 
2873 	/*
2874 	 * Tell the mapping code that we're using a uio, and that this is
2875 	 * an SMP passthrough request.  There is a little special-case
2876 	 * logic there (in mps_data_cb()) to handle the bidirectional
2877 	 * transfer.
2878 	 */
2879 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2880 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2881 
2882 	/* The chip data format is little endian. */
2883 	req->SASAddress.High = htole32(sasaddr >> 32);
2884 	req->SASAddress.Low = htole32(sasaddr);
2885 
2886 	/*
2887 	 * XXX Note that we don't have a timeout/abort mechanism here.
2888 	 * From the manual, it looks like task management requests only
2889 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2890 	 * have a mechanism to retry requests in the event of a chip reset
2891 	 * at least.  Hopefully the chip will insure that any errors short
2892 	 * of that are relayed back to the driver.
2893 	 */
2894 	error = mps_map_command(sc, cm);
2895 	if ((error != 0) && (error != EINPROGRESS)) {
2896 		mps_dprint(sc, MPS_ERROR,
2897 			   "%s: error %d returned from mps_map_command()\n",
2898 			   __func__, error);
2899 		goto bailout_error;
2900 	}
2901 
2902 	return;
2903 
2904 bailout_error:
2905 	mps_free_command(sc, cm);
2906 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2907 	xpt_done(ccb);
2908 	return;
2909 
2910 }
2911 
2912 static void
2913 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2914 {
2915 	struct mps_softc *sc;
2916 	struct mpssas_target *targ;
2917 	uint64_t sasaddr = 0;
2918 
2919 	sc = sassc->sc;
2920 
2921 	/*
2922 	 * Make sure the target exists.
2923 	 */
2924 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2925 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2926 	targ = &sassc->targets[ccb->ccb_h.target_id];
2927 	if (targ->handle == 0x0) {
2928 		mps_dprint(sc, MPS_ERROR,
2929 			   "%s: target %d does not exist!\n", __func__,
2930 			   ccb->ccb_h.target_id);
2931 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2932 		xpt_done(ccb);
2933 		return;
2934 	}
2935 
2936 	/*
2937 	 * If this device has an embedded SMP target, we'll talk to it
2938 	 * directly.
2939 	 * figure out what the expander's address is.
2940 	 */
2941 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2942 		sasaddr = targ->sasaddr;
2943 
2944 	/*
2945 	 * If we don't have a SAS address for the expander yet, try
2946 	 * grabbing it from the page 0x83 information cached in the
2947 	 * transport layer for this target.  LSI expanders report the
2948 	 * expander SAS address as the port-associated SAS address in
2949 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2950 	 * 0x83.
2951 	 *
2952 	 * XXX KDM disable this for now, but leave it commented out so that
2953 	 * it is obvious that this is another possible way to get the SAS
2954 	 * address.
2955 	 *
2956 	 * The parent handle method below is a little more reliable, and
2957 	 * the other benefit is that it works for devices other than SES
2958 	 * devices.  So you can send a SMP request to a da(4) device and it
2959 	 * will get routed to the expander that device is attached to.
2960 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2961 	 */
2962 #if 0
2963 	if (sasaddr == 0)
2964 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2965 #endif
2966 
2967 	/*
2968 	 * If we still don't have a SAS address for the expander, look for
2969 	 * the parent device of this device, which is probably the expander.
2970 	 */
2971 	if (sasaddr == 0) {
2972 #ifdef OLD_MPS_PROBE
2973 		struct mpssas_target *parent_target;
2974 #endif
2975 
2976 		if (targ->parent_handle == 0x0) {
2977 			mps_dprint(sc, MPS_ERROR,
2978 				   "%s: handle %d does not have a valid "
2979 				   "parent handle!\n", __func__, targ->handle);
2980 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2981 			goto bailout;
2982 		}
2983 #ifdef OLD_MPS_PROBE
2984 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2985 			targ->parent_handle);
2986 
2987 		if (parent_target == NULL) {
2988 			mps_dprint(sc, MPS_ERROR,
2989 				   "%s: handle %d does not have a valid "
2990 				   "parent target!\n", __func__, targ->handle);
2991 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2992 			goto bailout;
2993 		}
2994 
2995 		if ((parent_target->devinfo &
2996 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2997 			mps_dprint(sc, MPS_ERROR,
2998 				   "%s: handle %d parent %d does not "
2999 				   "have an SMP target!\n", __func__,
3000 				   targ->handle, parent_target->handle);
3001 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3002 			goto bailout;
3003 
3004 		}
3005 
3006 		sasaddr = parent_target->sasaddr;
3007 #else /* OLD_MPS_PROBE */
3008 		if ((targ->parent_devinfo &
3009 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3010 			mps_dprint(sc, MPS_ERROR,
3011 				   "%s: handle %d parent %d does not "
3012 				   "have an SMP target!\n", __func__,
3013 				   targ->handle, targ->parent_handle);
3014 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3015 			goto bailout;
3016 
3017 		}
3018 		if (targ->parent_sasaddr == 0x0) {
3019 			mps_dprint(sc, MPS_ERROR,
3020 				   "%s: handle %d parent handle %d does "
3021 				   "not have a valid SAS address!\n",
3022 				   __func__, targ->handle, targ->parent_handle);
3023 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3024 			goto bailout;
3025 		}
3026 
3027 		sasaddr = targ->parent_sasaddr;
3028 #endif /* OLD_MPS_PROBE */
3029 
3030 	}
3031 
3032 	if (sasaddr == 0) {
3033 		mps_dprint(sc, MPS_INFO,
3034 			   "%s: unable to find SAS address for handle %d\n",
3035 			   __func__, targ->handle);
3036 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3037 		goto bailout;
3038 	}
3039 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3040 
3041 	return;
3042 
3043 bailout:
3044 	xpt_done(ccb);
3045 
3046 }
3047 #endif //__FreeBSD_version >= 900026
3048 
3049 static void
3050 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3051 {
3052 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3053 	struct mps_softc *sc;
3054 	struct mps_command *tm;
3055 	struct mpssas_target *targ;
3056 
3057 	MPS_FUNCTRACE(sassc->sc);
3058 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3059 
3060 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3061 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3062 	     ccb->ccb_h.target_id));
3063 	sc = sassc->sc;
3064 	tm = mpssas_alloc_tm(sc);
3065 	if (tm == NULL) {
3066 		mps_dprint(sc, MPS_ERROR,
3067 		    "command alloc failure in mpssas_action_resetdev\n");
3068 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3069 		xpt_done(ccb);
3070 		return;
3071 	}
3072 
3073 	targ = &sassc->targets[ccb->ccb_h.target_id];
3074 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3075 	req->DevHandle = htole16(targ->handle);
3076 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3077 
3078 	/* SAS Hard Link Reset / SATA Link Reset */
3079 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3080 
3081 	tm->cm_data = NULL;
3082 	tm->cm_complete = mpssas_resetdev_complete;
3083 	tm->cm_complete_data = ccb;
3084 	tm->cm_targ = targ;
3085 
3086 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3087 	mps_map_command(sc, tm);
3088 }
3089 
3090 static void
3091 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3092 {
3093 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3094 	union ccb *ccb;
3095 
3096 	MPS_FUNCTRACE(sc);
3097 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3098 
3099 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3100 	ccb = tm->cm_complete_data;
3101 
3102 	/*
3103 	 * Currently there should be no way we can hit this case.  It only
3104 	 * happens when we have a failure to allocate chain frames, and
3105 	 * task management commands don't have S/G lists.
3106 	 */
3107 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3108 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3109 
3110 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3111 
3112 		mps_dprint(sc, MPS_ERROR,
3113 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3114 			   "This should not happen!\n", __func__, tm->cm_flags,
3115 			   req->DevHandle);
3116 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3117 		goto bailout;
3118 	}
3119 
3120 	mps_dprint(sc, MPS_XINFO,
3121 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3122 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3123 
3124 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3125 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3126 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3127 		    CAM_LUN_WILDCARD);
3128 	}
3129 	else
3130 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3131 
3132 bailout:
3133 
3134 	mpssas_free_tm(sc, tm);
3135 	xpt_done(ccb);
3136 }
3137 
3138 static void
3139 mpssas_poll(struct cam_sim *sim)
3140 {
3141 	struct mpssas_softc *sassc;
3142 
3143 	sassc = cam_sim_softc(sim);
3144 
3145 	if (sassc->sc->mps_debug & MPS_TRACE) {
3146 		/* frequent debug messages during a panic just slow
3147 		 * everything down too much.
3148 		 */
3149 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3150 		sassc->sc->mps_debug &= ~MPS_TRACE;
3151 	}
3152 
3153 	mps_intr_locked(sassc->sc);
3154 }
3155 
3156 static void
3157 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3158 	     void *arg)
3159 {
3160 	struct mps_softc *sc;
3161 
3162 	sc = (struct mps_softc *)callback_arg;
3163 
3164 	switch (code) {
3165 #if (__FreeBSD_version >= 1000006) || \
3166     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3167 	case AC_ADVINFO_CHANGED: {
3168 		struct mpssas_target *target;
3169 		struct mpssas_softc *sassc;
3170 		struct scsi_read_capacity_data_long rcap_buf;
3171 		struct ccb_dev_advinfo cdai;
3172 		struct mpssas_lun *lun;
3173 		lun_id_t lunid;
3174 		int found_lun;
3175 		uintptr_t buftype;
3176 
3177 		buftype = (uintptr_t)arg;
3178 
3179 		found_lun = 0;
3180 		sassc = sc->sassc;
3181 
3182 		/*
3183 		 * We're only interested in read capacity data changes.
3184 		 */
3185 		if (buftype != CDAI_TYPE_RCAPLONG)
3186 			break;
3187 
3188 		/*
3189 		 * We should have a handle for this, but check to make sure.
3190 		 */
3191 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3192 		    ("Target %d out of bounds in mpssas_async\n",
3193 		    xpt_path_target_id(path)));
3194 		target = &sassc->targets[xpt_path_target_id(path)];
3195 		if (target->handle == 0)
3196 			break;
3197 
3198 		lunid = xpt_path_lun_id(path);
3199 
3200 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3201 			if (lun->lun_id == lunid) {
3202 				found_lun = 1;
3203 				break;
3204 			}
3205 		}
3206 
3207 		if (found_lun == 0) {
3208 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3209 				     M_NOWAIT | M_ZERO);
3210 			if (lun == NULL) {
3211 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3212 					   "LUN for EEDP support.\n");
3213 				break;
3214 			}
3215 			lun->lun_id = lunid;
3216 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3217 		}
3218 
3219 		bzero(&rcap_buf, sizeof(rcap_buf));
3220 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3221 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3222 		cdai.ccb_h.flags = CAM_DIR_IN;
3223 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3224 #if (__FreeBSD_version >= 1100061) || \
3225     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3226 		cdai.flags = CDAI_FLAG_NONE;
3227 #else
3228 		cdai.flags = 0;
3229 #endif
3230 		cdai.bufsiz = sizeof(rcap_buf);
3231 		cdai.buf = (uint8_t *)&rcap_buf;
3232 		xpt_action((union ccb *)&cdai);
3233 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3234 			cam_release_devq(cdai.ccb_h.path,
3235 					 0, 0, 0, FALSE);
3236 
3237 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3238 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3239 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3240 			case SRC16_PTYPE_1:
3241 			case SRC16_PTYPE_3:
3242 				lun->eedp_formatted = TRUE;
3243 				lun->eedp_block_size =
3244 				    scsi_4btoul(rcap_buf.length);
3245 				break;
3246 			case SRC16_PTYPE_2:
3247 			default:
3248 				lun->eedp_formatted = FALSE;
3249 				lun->eedp_block_size = 0;
3250 				break;
3251 			}
3252 		} else {
3253 			lun->eedp_formatted = FALSE;
3254 			lun->eedp_block_size = 0;
3255 		}
3256 		break;
3257 	}
3258 #else
3259 	case AC_FOUND_DEVICE: {
3260 		struct ccb_getdev *cgd;
3261 
3262 		cgd = arg;
3263 		mpssas_check_eedp(sc, path, cgd);
3264 		break;
3265 	}
3266 #endif
3267 	default:
3268 		break;
3269 	}
3270 }
3271 
3272 #if (__FreeBSD_version < 901503) || \
3273     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3274 static void
3275 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3276 		  struct ccb_getdev *cgd)
3277 {
3278 	struct mpssas_softc *sassc = sc->sassc;
3279 	struct ccb_scsiio *csio;
3280 	struct scsi_read_capacity_16 *scsi_cmd;
3281 	struct scsi_read_capacity_eedp *rcap_buf;
3282 	path_id_t pathid;
3283 	target_id_t targetid;
3284 	lun_id_t lunid;
3285 	union ccb *ccb;
3286 	struct cam_path *local_path;
3287 	struct mpssas_target *target;
3288 	struct mpssas_lun *lun;
3289 	uint8_t	found_lun;
3290 	char path_str[64];
3291 
3292 	sassc = sc->sassc;
3293 	pathid = cam_sim_path(sassc->sim);
3294 	targetid = xpt_path_target_id(path);
3295 	lunid = xpt_path_lun_id(path);
3296 
3297 	KASSERT(targetid < sassc->maxtargets,
3298 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3299 	     targetid));
3300 	target = &sassc->targets[targetid];
3301 	if (target->handle == 0x0)
3302 		return;
3303 
3304 	/*
3305 	 * Determine if the device is EEDP capable.
3306 	 *
3307 	 * If this flag is set in the inquiry data,
3308 	 * the device supports protection information,
3309 	 * and must support the 16 byte read
3310 	 * capacity command, otherwise continue without
3311 	 * sending read cap 16
3312 	 */
3313 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3314 		return;
3315 
3316 	/*
3317 	 * Issue a READ CAPACITY 16 command.  This info
3318 	 * is used to determine if the LUN is formatted
3319 	 * for EEDP support.
3320 	 */
3321 	ccb = xpt_alloc_ccb_nowait();
3322 	if (ccb == NULL) {
3323 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3324 		    "for EEDP support.\n");
3325 		return;
3326 	}
3327 
3328 	if (xpt_create_path(&local_path, xpt_periph,
3329 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3330 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3331 		    "path for EEDP support\n");
3332 		xpt_free_ccb(ccb);
3333 		return;
3334 	}
3335 
3336 	/*
3337 	 * If LUN is already in list, don't create a new
3338 	 * one.
3339 	 */
3340 	found_lun = FALSE;
3341 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3342 		if (lun->lun_id == lunid) {
3343 			found_lun = TRUE;
3344 			break;
3345 		}
3346 	}
3347 	if (!found_lun) {
3348 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3349 		    M_NOWAIT | M_ZERO);
3350 		if (lun == NULL) {
3351 			mps_dprint(sc, MPS_ERROR,
3352 			    "Unable to alloc LUN for EEDP support.\n");
3353 			xpt_free_path(local_path);
3354 			xpt_free_ccb(ccb);
3355 			return;
3356 		}
3357 		lun->lun_id = lunid;
3358 		SLIST_INSERT_HEAD(&target->luns, lun,
3359 		    lun_link);
3360 	}
3361 
3362 	xpt_path_string(local_path, path_str, sizeof(path_str));
3363 
3364 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3365 	    path_str, target->handle);
3366 
3367 	/*
3368 	 * Issue a READ CAPACITY 16 command for the LUN.
3369 	 * The mpssas_read_cap_done function will load
3370 	 * the read cap info into the LUN struct.
3371 	 */
3372 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3373 	    M_MPT2, M_NOWAIT | M_ZERO);
3374 	if (rcap_buf == NULL) {
3375 		mps_dprint(sc, MPS_FAULT,
3376 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3377 		xpt_free_path(ccb->ccb_h.path);
3378 		xpt_free_ccb(ccb);
3379 		return;
3380 	}
3381 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3382 	csio = &ccb->csio;
3383 	csio->ccb_h.func_code = XPT_SCSI_IO;
3384 	csio->ccb_h.flags = CAM_DIR_IN;
3385 	csio->ccb_h.retry_count = 4;
3386 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3387 	csio->ccb_h.timeout = 60000;
3388 	csio->data_ptr = (uint8_t *)rcap_buf;
3389 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3390 	csio->sense_len = MPS_SENSE_LEN;
3391 	csio->cdb_len = sizeof(*scsi_cmd);
3392 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3393 
3394 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3395 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3396 	scsi_cmd->opcode = 0x9E;
3397 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3398 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3399 
3400 	ccb->ccb_h.ppriv_ptr1 = sassc;
3401 	xpt_action(ccb);
3402 }
3403 
3404 static void
3405 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3406 {
3407 	struct mpssas_softc *sassc;
3408 	struct mpssas_target *target;
3409 	struct mpssas_lun *lun;
3410 	struct scsi_read_capacity_eedp *rcap_buf;
3411 
3412 	if (done_ccb == NULL)
3413 		return;
3414 
3415 	/* Driver need to release devq, it Scsi command is
3416 	 * generated by driver internally.
3417 	 * Currently there is a single place where driver
3418 	 * calls scsi command internally. In future if driver
3419 	 * calls more scsi command internally, it needs to release
3420 	 * devq internally, since those command will not go back to
3421 	 * cam_periph.
3422 	 */
3423 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3424         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3425 		xpt_release_devq(done_ccb->ccb_h.path,
3426 			       	/*count*/ 1, /*run_queue*/TRUE);
3427 	}
3428 
3429 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3430 
3431 	/*
3432 	 * Get the LUN ID for the path and look it up in the LUN list for the
3433 	 * target.
3434 	 */
3435 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3436 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3437 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3438 	     done_ccb->ccb_h.target_id));
3439 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3440 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3441 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3442 			continue;
3443 
3444 		/*
3445 		 * Got the LUN in the target's LUN list.  Fill it in
3446 		 * with EEDP info.  If the READ CAP 16 command had some
3447 		 * SCSI error (common if command is not supported), mark
3448 		 * the lun as not supporting EEDP and set the block size
3449 		 * to 0.
3450 		 */
3451 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3452 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3453 			lun->eedp_formatted = FALSE;
3454 			lun->eedp_block_size = 0;
3455 			break;
3456 		}
3457 
3458 		if (rcap_buf->protect & 0x01) {
3459 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3460  			    "target ID %d is formatted for EEDP "
3461  			    "support.\n", done_ccb->ccb_h.target_lun,
3462  			    done_ccb->ccb_h.target_id);
3463 			lun->eedp_formatted = TRUE;
3464 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3465 		}
3466 		break;
3467 	}
3468 
3469 	// Finished with this CCB and path.
3470 	free(rcap_buf, M_MPT2);
3471 	xpt_free_path(done_ccb->ccb_h.path);
3472 	xpt_free_ccb(done_ccb);
3473 }
3474 #endif /* (__FreeBSD_version < 901503) || \
3475           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3476 
3477 /*
3478  * Set the INRESET flag for this target so that no I/O will be sent to
3479  * the target until the reset has completed.  If an I/O request does
3480  * happen, the devq will be frozen.  The CCB holds the path which is
3481  * used to release the devq.  The devq is released and the CCB is freed
3482  * when the TM completes.
3483  */
3484 void
3485 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3486     struct mpssas_target *target, lun_id_t lun_id)
3487 {
3488 	union ccb *ccb;
3489 	path_id_t path_id;
3490 
3491 	ccb = xpt_alloc_ccb_nowait();
3492 	if (ccb) {
3493 		path_id = cam_sim_path(sc->sassc->sim);
3494 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3495 		    target->tid, lun_id) != CAM_REQ_CMP) {
3496 			xpt_free_ccb(ccb);
3497 		} else {
3498 			tm->cm_ccb = ccb;
3499 			tm->cm_targ = target;
3500 			target->flags |= MPSSAS_TARGET_INRESET;
3501 		}
3502 	}
3503 }
3504 
3505 int
3506 mpssas_startup(struct mps_softc *sc)
3507 {
3508 
3509 	/*
3510 	 * Send the port enable message and set the wait_for_port_enable flag.
3511 	 * This flag helps to keep the simq frozen until all discovery events
3512 	 * are processed.
3513 	 */
3514 	sc->wait_for_port_enable = 1;
3515 	mpssas_send_portenable(sc);
3516 	return (0);
3517 }
3518 
3519 static int
3520 mpssas_send_portenable(struct mps_softc *sc)
3521 {
3522 	MPI2_PORT_ENABLE_REQUEST *request;
3523 	struct mps_command *cm;
3524 
3525 	MPS_FUNCTRACE(sc);
3526 
3527 	if ((cm = mps_alloc_command(sc)) == NULL)
3528 		return (EBUSY);
3529 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3530 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3531 	request->MsgFlags = 0;
3532 	request->VP_ID = 0;
3533 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3534 	cm->cm_complete = mpssas_portenable_complete;
3535 	cm->cm_data = NULL;
3536 	cm->cm_sge = NULL;
3537 
3538 	mps_map_command(sc, cm);
3539 	mps_dprint(sc, MPS_XINFO,
3540 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3541 	    cm, cm->cm_req, cm->cm_complete);
3542 	return (0);
3543 }
3544 
3545 static void
3546 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3547 {
3548 	MPI2_PORT_ENABLE_REPLY *reply;
3549 	struct mpssas_softc *sassc;
3550 
3551 	MPS_FUNCTRACE(sc);
3552 	sassc = sc->sassc;
3553 
3554 	/*
3555 	 * Currently there should be no way we can hit this case.  It only
3556 	 * happens when we have a failure to allocate chain frames, and
3557 	 * port enable commands don't have S/G lists.
3558 	 */
3559 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3560 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3561 			   "This should not happen!\n", __func__, cm->cm_flags);
3562 	}
3563 
3564 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3565 	if (reply == NULL)
3566 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3567 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3568 	    MPI2_IOCSTATUS_SUCCESS)
3569 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3570 
3571 	mps_free_command(sc, cm);
3572 
3573 	/*
3574 	 * Get WarpDrive info after discovery is complete but before the scan
3575 	 * starts.  At this point, all devices are ready to be exposed to the
3576 	 * OS.  If devices should be hidden instead, take them out of the
3577 	 * 'targets' array before the scan.  The devinfo for a disk will have
3578 	 * some info and a volume's will be 0.  Use that to remove disks.
3579 	 */
3580 	mps_wd_config_pages(sc);
3581 
3582 	/*
3583 	 * Done waiting for port enable to complete.  Decrement the refcount.
3584 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3585 	 * take place.  Since the simq was explicitly frozen before port
3586 	 * enable, it must be explicitly released here to keep the
3587 	 * freeze/release count in sync.
3588 	 */
3589 	sc->wait_for_port_enable = 0;
3590 	sc->port_enable_complete = 1;
3591 	wakeup(&sc->port_enable_complete);
3592 	mpssas_startup_decrement(sassc);
3593 }
3594 
3595 int
3596 mpssas_check_id(struct mpssas_softc *sassc, int id)
3597 {
3598 	struct mps_softc *sc = sassc->sc;
3599 	char *ids;
3600 	char *name;
3601 
3602 	ids = &sc->exclude_ids[0];
3603 	while((name = strsep(&ids, ",")) != NULL) {
3604 		if (name[0] == '\0')
3605 			continue;
3606 		if (strtol(name, NULL, 0) == (long)id)
3607 			return (1);
3608 	}
3609 
3610 	return (0);
3611 }
3612 
3613 void
3614 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3615 {
3616 	struct mpssas_softc *sassc;
3617 	struct mpssas_lun *lun, *lun_tmp;
3618 	struct mpssas_target *targ;
3619 	int i;
3620 
3621 	sassc = sc->sassc;
3622 	/*
3623 	 * The number of targets is based on IOC Facts, so free all of
3624 	 * the allocated LUNs for each target and then the target buffer
3625 	 * itself.
3626 	 */
3627 	for (i=0; i< maxtargets; i++) {
3628 		targ = &sassc->targets[i];
3629 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3630 			free(lun, M_MPT2);
3631 		}
3632 	}
3633 	free(sassc->targets, M_MPT2);
3634 
3635 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3636 	    M_MPT2, M_WAITOK|M_ZERO);
3637 	if (!sassc->targets) {
3638 		panic("%s failed to alloc targets with error %d\n",
3639 		    __func__, ENOMEM);
3640 	}
3641 }
3642