xref: /freebsd/sys/dev/mps/mps_sas.c (revision 7029da5c36f2d3cf6bb6c81bf551229f416399e8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  *
32  * $FreeBSD$
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /* Communications core for Avago Technologies (LSI) MPT2 */
39 
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/bio.h>
50 #include <sys/malloc.h>
51 #include <sys/uio.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sbuf.h>
58 
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/rman.h>
62 
63 #include <machine/stdarg.h>
64 
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #if __FreeBSD_version >= 900026
76 #include <cam/scsi/smp_all.h>
77 #endif
78 
79 #include <dev/mps/mpi/mpi2_type.h>
80 #include <dev/mps/mpi/mpi2.h>
81 #include <dev/mps/mpi/mpi2_ioc.h>
82 #include <dev/mps/mpi/mpi2_sas.h>
83 #include <dev/mps/mpi/mpi2_cnfg.h>
84 #include <dev/mps/mpi/mpi2_init.h>
85 #include <dev/mps/mpi/mpi2_tool.h>
86 #include <dev/mps/mps_ioctl.h>
87 #include <dev/mps/mpsvar.h>
88 #include <dev/mps/mps_table.h>
89 #include <dev/mps/mps_sas.h>
90 
91 #define MPSSAS_DISCOVERY_TIMEOUT	20
92 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93 
94 /*
95  * static array to check SCSI OpCode for EEDP protection bits
96  */
97 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 };
118 
119 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
120 
121 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
122 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
123 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mpssas_poll(struct cam_sim *sim);
125 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
126     struct mps_command *cm);
127 static void mpssas_scsiio_timeout(void *data);
128 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
129 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
130     struct mps_command *cm, union ccb *ccb);
131 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
132 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
133 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
134 #if __FreeBSD_version >= 900026
135 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
136 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
137 			       uint64_t sasaddr);
138 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
139 #endif //FreeBSD_version >= 900026
140 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
141 static void mpssas_async(void *callback_arg, uint32_t code,
142 			 struct cam_path *path, void *arg);
143 #if (__FreeBSD_version < 901503) || \
144     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
145 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
146 			      struct ccb_getdev *cgd);
147 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
148 #endif
149 static int mpssas_send_portenable(struct mps_softc *sc);
150 static void mpssas_portenable_complete(struct mps_softc *sc,
151     struct mps_command *cm);
152 
153 struct mpssas_target *
154 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
155 {
156 	struct mpssas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mpssas_startup_increment(struct mpssas_softc *sassc)
177 {
178 	MPS_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mps_dprint(sassc->sc, MPS_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if __FreeBSD_version >= 1000039
186 			xpt_hold_boot();
187 #endif
188 			xpt_freeze_simq(sassc->sim, 1);
189 		}
190 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
191 		    sassc->startup_refcount);
192 	}
193 }
194 
195 void
196 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
197 {
198 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
199 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
200 		xpt_release_simq(sassc->sim, 1);
201 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
202 	}
203 }
204 
205 void
206 mpssas_startup_decrement(struct mpssas_softc *sassc)
207 {
208 	MPS_FUNCTRACE(sassc->sc);
209 
210 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
211 		if (--sassc->startup_refcount == 0) {
212 			/* finished all discovery-related actions, release
213 			 * the simq and rescan for the latest topology.
214 			 */
215 			mps_dprint(sassc->sc, MPS_INIT,
216 			    "%s releasing simq\n", __func__);
217 			sassc->flags &= ~MPSSAS_IN_STARTUP;
218 			xpt_release_simq(sassc->sim, 1);
219 #if __FreeBSD_version >= 1000039
220 			xpt_release_boot();
221 #else
222 			mpssas_rescan_target(sassc->sc, NULL);
223 #endif
224 		}
225 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
226 		    sassc->startup_refcount);
227 	}
228 }
229 
230 /*
231  * The firmware requires us to stop sending commands when we're doing task
232  * management.
233  * XXX The logic for serializing the device has been made lazy and moved to
234  * mpssas_prepare_for_tm().
235  */
236 struct mps_command *
237 mpssas_alloc_tm(struct mps_softc *sc)
238 {
239 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
240 	struct mps_command *tm;
241 
242 	tm = mps_alloc_high_priority_command(sc);
243 	if (tm == NULL)
244 		return (NULL);
245 
246 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
247 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
248 	return tm;
249 }
250 
251 void
252 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
253 {
254 	int target_id = 0xFFFFFFFF;
255 
256 	if (tm == NULL)
257 		return;
258 
259 	/*
260 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
261 	 * free the resources used for freezing the devq.  Must clear the
262 	 * INRESET flag as well or scsi I/O will not work.
263 	 */
264 	if (tm->cm_targ != NULL) {
265 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
266 		target_id = tm->cm_targ->tid;
267 	}
268 	if (tm->cm_ccb) {
269 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
270 		    target_id);
271 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
272 		xpt_free_path(tm->cm_ccb->ccb_h.path);
273 		xpt_free_ccb(tm->cm_ccb);
274 	}
275 
276 	mps_free_high_priority_command(sc, tm);
277 }
278 
279 void
280 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
281 {
282 	struct mpssas_softc *sassc = sc->sassc;
283 	path_id_t pathid;
284 	target_id_t targetid;
285 	union ccb *ccb;
286 
287 	MPS_FUNCTRACE(sc);
288 	pathid = cam_sim_path(sassc->sim);
289 	if (targ == NULL)
290 		targetid = CAM_TARGET_WILDCARD;
291 	else
292 		targetid = targ - sassc->targets;
293 
294 	/*
295 	 * Allocate a CCB and schedule a rescan.
296 	 */
297 	ccb = xpt_alloc_ccb_nowait();
298 	if (ccb == NULL) {
299 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
300 		return;
301 	}
302 
303 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
304 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
305 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
306 		xpt_free_ccb(ccb);
307 		return;
308 	}
309 
310 	if (targetid == CAM_TARGET_WILDCARD)
311 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
312 	else
313 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
314 
315 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
316 	xpt_rescan(ccb);
317 }
318 
319 static void
320 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
321 {
322 	struct sbuf sb;
323 	va_list ap;
324 	char str[192];
325 	char path_str[64];
326 
327 	if (cm == NULL)
328 		return;
329 
330 	/* No need to be in here if debugging isn't enabled */
331 	if ((cm->cm_sc->mps_debug & level) == 0)
332 		return;
333 
334 	sbuf_new(&sb, str, sizeof(str), 0);
335 
336 	va_start(ap, fmt);
337 
338 	if (cm->cm_ccb != NULL) {
339 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
340 				sizeof(path_str));
341 		sbuf_cat(&sb, path_str);
342 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
343 			scsi_command_string(&cm->cm_ccb->csio, &sb);
344 			sbuf_printf(&sb, "length %d ",
345 				    cm->cm_ccb->csio.dxfer_len);
346 		}
347 	}
348 	else {
349 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
350 		    cam_sim_name(cm->cm_sc->sassc->sim),
351 		    cam_sim_unit(cm->cm_sc->sassc->sim),
352 		    cam_sim_bus(cm->cm_sc->sassc->sim),
353 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
354 		    cm->cm_lun);
355 	}
356 
357 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
358 	sbuf_vprintf(&sb, fmt, ap);
359 	sbuf_finish(&sb);
360 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
361 
362 	va_end(ap);
363 }
364 
365 
366 static void
367 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
368 {
369 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
370 	struct mpssas_target *targ;
371 	uint16_t handle;
372 
373 	MPS_FUNCTRACE(sc);
374 
375 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
376 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
377 	targ = tm->cm_targ;
378 
379 	if (reply == NULL) {
380 		/* XXX retry the remove after the diag reset completes? */
381 		mps_dprint(sc, MPS_FAULT,
382 		    "%s NULL reply resetting device 0x%04x\n", __func__,
383 		    handle);
384 		mpssas_free_tm(sc, tm);
385 		return;
386 	}
387 
388 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
389 	    MPI2_IOCSTATUS_SUCCESS) {
390 		mps_dprint(sc, MPS_ERROR,
391 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
392 		   le16toh(reply->IOCStatus), handle);
393 	}
394 
395 	mps_dprint(sc, MPS_XINFO,
396 	    "Reset aborted %u commands\n", reply->TerminationCount);
397 	mps_free_reply(sc, tm->cm_reply_data);
398 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
399 
400 	mps_dprint(sc, MPS_XINFO,
401 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
402 
403 	/*
404 	 * Don't clear target if remove fails because things will get confusing.
405 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
406 	 * this target id if possible, and so we can assign the same target id
407 	 * to this device if it comes back in the future.
408 	 */
409 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
410 	    MPI2_IOCSTATUS_SUCCESS) {
411 		targ = tm->cm_targ;
412 		targ->handle = 0x0;
413 		targ->encl_handle = 0x0;
414 		targ->encl_slot = 0x0;
415 		targ->exp_dev_handle = 0x0;
416 		targ->phy_num = 0x0;
417 		targ->linkrate = 0x0;
418 		targ->devinfo = 0x0;
419 		targ->flags = 0x0;
420 	}
421 
422 	mpssas_free_tm(sc, tm);
423 }
424 
425 
426 /*
427  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
428  * Otherwise Volume Delete is same as Bare Drive Removal.
429  */
430 void
431 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
432 {
433 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
434 	struct mps_softc *sc;
435 	struct mps_command *tm;
436 	struct mpssas_target *targ = NULL;
437 
438 	MPS_FUNCTRACE(sassc->sc);
439 	sc = sassc->sc;
440 
441 #ifdef WD_SUPPORT
442 	/*
443 	 * If this is a WD controller, determine if the disk should be exposed
444 	 * to the OS or not.  If disk should be exposed, return from this
445 	 * function without doing anything.
446 	 */
447 	if (sc->WD_available && (sc->WD_hide_expose ==
448 	    MPS_WD_EXPOSE_ALWAYS)) {
449 		return;
450 	}
451 #endif //WD_SUPPORT
452 
453 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
454 	if (targ == NULL) {
455 		/* FIXME: what is the action? */
456 		/* We don't know about this device? */
457 		mps_dprint(sc, MPS_ERROR,
458 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
459 		return;
460 	}
461 
462 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
463 
464 	tm = mpssas_alloc_tm(sc);
465 	if (tm == NULL) {
466 		mps_dprint(sc, MPS_ERROR,
467 		    "%s: command alloc failure\n", __func__);
468 		return;
469 	}
470 
471 	mpssas_rescan_target(sc, targ);
472 
473 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
474 	req->DevHandle = targ->handle;
475 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
476 
477 	/* SAS Hard Link Reset / SATA Link Reset */
478 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
479 
480 	tm->cm_targ = targ;
481 	tm->cm_data = NULL;
482 	tm->cm_complete = mpssas_remove_volume;
483 	tm->cm_complete_data = (void *)(uintptr_t)handle;
484 
485 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
486 	    __func__, targ->tid);
487 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
488 
489 	mps_map_command(sc, tm);
490 }
491 
492 /*
493  * The MPT2 firmware performs debounce on the link to avoid transient link
494  * errors and false removals.  When it does decide that link has been lost
495  * and a device need to go away, it expects that the host will perform a
496  * target reset and then an op remove.  The reset has the side-effect of
497  * aborting any outstanding requests for the device, which is required for
498  * the op-remove to succeed.  It's not clear if the host should check for
499  * the device coming back alive after the reset.
500  */
501 void
502 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
503 {
504 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
505 	struct mps_softc *sc;
506 	struct mps_command *cm;
507 	struct mpssas_target *targ = NULL;
508 
509 	MPS_FUNCTRACE(sassc->sc);
510 
511 	sc = sassc->sc;
512 
513 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
514 	if (targ == NULL) {
515 		/* FIXME: what is the action? */
516 		/* We don't know about this device? */
517 		mps_dprint(sc, MPS_ERROR,
518 		    "%s : invalid handle 0x%x \n", __func__, handle);
519 		return;
520 	}
521 
522 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
523 
524 	cm = mpssas_alloc_tm(sc);
525 	if (cm == NULL) {
526 		mps_dprint(sc, MPS_ERROR,
527 		    "%s: command alloc failure\n", __func__);
528 		return;
529 	}
530 
531 	mpssas_rescan_target(sc, targ);
532 
533 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
534 	memset(req, 0, sizeof(*req));
535 	req->DevHandle = htole16(targ->handle);
536 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
537 
538 	/* SAS Hard Link Reset / SATA Link Reset */
539 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
540 
541 	cm->cm_targ = targ;
542 	cm->cm_data = NULL;
543 	cm->cm_complete = mpssas_remove_device;
544 	cm->cm_complete_data = (void *)(uintptr_t)handle;
545 
546 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
547 	    __func__, targ->tid);
548 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
549 
550 	mps_map_command(sc, cm);
551 }
552 
553 static void
554 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
555 {
556 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
557 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
558 	struct mpssas_target *targ;
559 	uint16_t handle;
560 
561 	MPS_FUNCTRACE(sc);
562 
563 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
564 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
565 	targ = tm->cm_targ;
566 
567 	/*
568 	 * Currently there should be no way we can hit this case.  It only
569 	 * happens when we have a failure to allocate chain frames, and
570 	 * task management commands don't have S/G lists.
571 	 */
572 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
573 		mps_dprint(sc, MPS_ERROR,
574 		    "%s: cm_flags = %#x for remove of handle %#04x! "
575 		    "This should not happen!\n", __func__, tm->cm_flags,
576 		    handle);
577 	}
578 
579 	if (reply == NULL) {
580 		/* XXX retry the remove after the diag reset completes? */
581 		mps_dprint(sc, MPS_FAULT,
582 		    "%s NULL reply resetting device 0x%04x\n", __func__,
583 		    handle);
584 		mpssas_free_tm(sc, tm);
585 		return;
586 	}
587 
588 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
589 	    MPI2_IOCSTATUS_SUCCESS) {
590 		mps_dprint(sc, MPS_ERROR,
591 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
592 		   le16toh(reply->IOCStatus), handle);
593 	}
594 
595 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
596 	    le32toh(reply->TerminationCount));
597 	mps_free_reply(sc, tm->cm_reply_data);
598 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
599 
600 	/* Reuse the existing command */
601 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
602 	memset(req, 0, sizeof(*req));
603 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
604 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
605 	req->DevHandle = htole16(handle);
606 	tm->cm_data = NULL;
607 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
608 	tm->cm_complete = mpssas_remove_complete;
609 	tm->cm_complete_data = (void *)(uintptr_t)handle;
610 
611 	/*
612 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
613 	 * They should be aborted or time out and we'll kick thus off there
614 	 * if so.
615 	 */
616 	if (TAILQ_FIRST(&targ->commands) == NULL) {
617 		mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n");
618 		mps_map_command(sc, tm);
619 		targ->pending_remove_tm = NULL;
620 	} else {
621 		targ->pending_remove_tm = tm;
622 	}
623 
624 
625 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
626 		   targ->tid, handle);
627 }
628 
629 static void
630 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
631 {
632 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
633 	uint16_t handle;
634 	struct mpssas_target *targ;
635 	struct mpssas_lun *lun;
636 
637 	MPS_FUNCTRACE(sc);
638 
639 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
640 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
641 	targ = tm->cm_targ;
642 
643 	/*
644 	 * At this point, we should have no pending commands for the target.
645 	 * The remove target has just completed.
646 	 */
647 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
648 	    ("%s: no commands should be pending\n", __func__));
649 
650 
651 	/*
652 	 * Currently there should be no way we can hit this case.  It only
653 	 * happens when we have a failure to allocate chain frames, and
654 	 * task management commands don't have S/G lists.
655 	 */
656 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
657 		mps_dprint(sc, MPS_XINFO,
658 			   "%s: cm_flags = %#x for remove of handle %#04x! "
659 			   "This should not happen!\n", __func__, tm->cm_flags,
660 			   handle);
661 		mpssas_free_tm(sc, tm);
662 		return;
663 	}
664 
665 	if (reply == NULL) {
666 		/* most likely a chip reset */
667 		mps_dprint(sc, MPS_FAULT,
668 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
669 		mpssas_free_tm(sc, tm);
670 		return;
671 	}
672 
673 	mps_dprint(sc, MPS_XINFO,
674 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
675 	    handle, le16toh(reply->IOCStatus));
676 
677 	/*
678 	 * Don't clear target if remove fails because things will get confusing.
679 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
680 	 * this target id if possible, and so we can assign the same target id
681 	 * to this device if it comes back in the future.
682 	 */
683 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
684 	    MPI2_IOCSTATUS_SUCCESS) {
685 		targ->handle = 0x0;
686 		targ->encl_handle = 0x0;
687 		targ->encl_slot = 0x0;
688 		targ->exp_dev_handle = 0x0;
689 		targ->phy_num = 0x0;
690 		targ->linkrate = 0x0;
691 		targ->devinfo = 0x0;
692 		targ->flags = 0x0;
693 
694 		while(!SLIST_EMPTY(&targ->luns)) {
695 			lun = SLIST_FIRST(&targ->luns);
696 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
697 			free(lun, M_MPT2);
698 		}
699 	}
700 
701 
702 	mpssas_free_tm(sc, tm);
703 }
704 
705 static int
706 mpssas_register_events(struct mps_softc *sc)
707 {
708 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
709 
710 	bzero(events, 16);
711 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
712 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
713 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
714 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
715 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
716 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
717 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
718 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
719 	setbit(events, MPI2_EVENT_IR_VOLUME);
720 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
721 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
722 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
723 
724 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
725 	    &sc->sassc->mpssas_eh);
726 
727 	return (0);
728 }
729 
730 int
731 mps_attach_sas(struct mps_softc *sc)
732 {
733 	struct mpssas_softc *sassc;
734 	cam_status status;
735 	int unit, error = 0, reqs;
736 
737 	MPS_FUNCTRACE(sc);
738 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
739 
740 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
741 	if(!sassc) {
742 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
743 		    "Cannot allocate SAS controller memory\n");
744 		return (ENOMEM);
745 	}
746 
747 	/*
748 	 * XXX MaxTargets could change during a reinit.  Since we don't
749 	 * resize the targets[] array during such an event, cache the value
750 	 * of MaxTargets here so that we don't get into trouble later.  This
751 	 * should move into the reinit logic.
752 	 */
753 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
754 	sassc->targets = malloc(sizeof(struct mpssas_target) *
755 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
756 	if(!sassc->targets) {
757 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
758 		    "Cannot allocate SAS target memory\n");
759 		free(sassc, M_MPT2);
760 		return (ENOMEM);
761 	}
762 	sc->sassc = sassc;
763 	sassc->sc = sc;
764 
765 	reqs = sc->num_reqs - sc->num_prireqs - 1;
766 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
767 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
768 		error = ENOMEM;
769 		goto out;
770 	}
771 
772 	unit = device_get_unit(sc->mps_dev);
773 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
774 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
775 	if (sassc->sim == NULL) {
776 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
777 		error = EINVAL;
778 		goto out;
779 	}
780 
781 	TAILQ_INIT(&sassc->ev_queue);
782 
783 	/* Initialize taskqueue for Event Handling */
784 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
785 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
786 	    taskqueue_thread_enqueue, &sassc->ev_tq);
787 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
788 	    device_get_nameunit(sc->mps_dev));
789 
790 	mps_lock(sc);
791 
792 	/*
793 	 * XXX There should be a bus for every port on the adapter, but since
794 	 * we're just going to fake the topology for now, we'll pretend that
795 	 * everything is just a target on a single bus.
796 	 */
797 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
798 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
799 		    "Error %d registering SCSI bus\n", error);
800 		mps_unlock(sc);
801 		goto out;
802 	}
803 
804 	/*
805 	 * Assume that discovery events will start right away.
806 	 *
807 	 * Hold off boot until discovery is complete.
808 	 */
809 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
810 	sc->sassc->startup_refcount = 0;
811 	mpssas_startup_increment(sassc);
812 
813 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
814 
815 	/*
816 	 * Register for async events so we can determine the EEDP
817 	 * capabilities of devices.
818 	 */
819 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
820 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
821 	    CAM_LUN_WILDCARD);
822 	if (status != CAM_REQ_CMP) {
823 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
824 		    "Error %#x creating sim path\n", status);
825 		sassc->path = NULL;
826 	} else {
827 		int event;
828 
829 #if (__FreeBSD_version >= 1000006) || \
830     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
831 		event = AC_ADVINFO_CHANGED;
832 #else
833 		event = AC_FOUND_DEVICE;
834 #endif
835 		status = xpt_register_async(event, mpssas_async, sc,
836 					    sassc->path);
837 		if (status != CAM_REQ_CMP) {
838 			mps_dprint(sc, MPS_ERROR,
839 			    "Error %#x registering async handler for "
840 			    "AC_ADVINFO_CHANGED events\n", status);
841 			xpt_free_path(sassc->path);
842 			sassc->path = NULL;
843 		}
844 	}
845 	if (status != CAM_REQ_CMP) {
846 		/*
847 		 * EEDP use is the exception, not the rule.
848 		 * Warn the user, but do not fail to attach.
849 		 */
850 		mps_printf(sc, "EEDP capabilities disabled.\n");
851 	}
852 
853 	mps_unlock(sc);
854 
855 	mpssas_register_events(sc);
856 out:
857 	if (error)
858 		mps_detach_sas(sc);
859 
860 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
861 	return (error);
862 }
863 
864 int
865 mps_detach_sas(struct mps_softc *sc)
866 {
867 	struct mpssas_softc *sassc;
868 	struct mpssas_lun *lun, *lun_tmp;
869 	struct mpssas_target *targ;
870 	int i;
871 
872 	MPS_FUNCTRACE(sc);
873 
874 	if (sc->sassc == NULL)
875 		return (0);
876 
877 	sassc = sc->sassc;
878 	mps_deregister_events(sc, sassc->mpssas_eh);
879 
880 	/*
881 	 * Drain and free the event handling taskqueue with the lock
882 	 * unheld so that any parallel processing tasks drain properly
883 	 * without deadlocking.
884 	 */
885 	if (sassc->ev_tq != NULL)
886 		taskqueue_free(sassc->ev_tq);
887 
888 	/* Make sure CAM doesn't wedge if we had to bail out early. */
889 	mps_lock(sc);
890 
891 	while (sassc->startup_refcount != 0)
892 		mpssas_startup_decrement(sassc);
893 
894 	/* Deregister our async handler */
895 	if (sassc->path != NULL) {
896 		xpt_register_async(0, mpssas_async, sc, sassc->path);
897 		xpt_free_path(sassc->path);
898 		sassc->path = NULL;
899 	}
900 
901 	if (sassc->flags & MPSSAS_IN_STARTUP)
902 		xpt_release_simq(sassc->sim, 1);
903 
904 	if (sassc->sim != NULL) {
905 		xpt_bus_deregister(cam_sim_path(sassc->sim));
906 		cam_sim_free(sassc->sim, FALSE);
907 	}
908 
909 	mps_unlock(sc);
910 
911 	if (sassc->devq != NULL)
912 		cam_simq_free(sassc->devq);
913 
914 	for(i=0; i< sassc->maxtargets ;i++) {
915 		targ = &sassc->targets[i];
916 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
917 			free(lun, M_MPT2);
918 		}
919 	}
920 	free(sassc->targets, M_MPT2);
921 	free(sassc, M_MPT2);
922 	sc->sassc = NULL;
923 
924 	return (0);
925 }
926 
927 void
928 mpssas_discovery_end(struct mpssas_softc *sassc)
929 {
930 	struct mps_softc *sc = sassc->sc;
931 
932 	MPS_FUNCTRACE(sc);
933 
934 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
935 		callout_stop(&sassc->discovery_callout);
936 
937 	/*
938 	 * After discovery has completed, check the mapping table for any
939 	 * missing devices and update their missing counts. Only do this once
940 	 * whenever the driver is initialized so that missing counts aren't
941 	 * updated unnecessarily. Note that just because discovery has
942 	 * completed doesn't mean that events have been processed yet. The
943 	 * check_devices function is a callout timer that checks if ALL devices
944 	 * are missing. If so, it will wait a little longer for events to
945 	 * complete and keep resetting itself until some device in the mapping
946 	 * table is not missing, meaning that event processing has started.
947 	 */
948 	if (sc->track_mapping_events) {
949 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
950 		    "completed. Check for missing devices in the mapping "
951 		    "table.\n");
952 		callout_reset(&sc->device_check_callout,
953 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
954 		    sc);
955 	}
956 }
957 
958 static void
959 mpssas_action(struct cam_sim *sim, union ccb *ccb)
960 {
961 	struct mpssas_softc *sassc;
962 
963 	sassc = cam_sim_softc(sim);
964 
965 	MPS_FUNCTRACE(sassc->sc);
966 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
967 	    ccb->ccb_h.func_code);
968 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
969 
970 	switch (ccb->ccb_h.func_code) {
971 	case XPT_PATH_INQ:
972 	{
973 		struct ccb_pathinq *cpi = &ccb->cpi;
974 		struct mps_softc *sc = sassc->sc;
975 
976 		cpi->version_num = 1;
977 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
978 		cpi->target_sprt = 0;
979 #if __FreeBSD_version >= 1000039
980 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
981 #else
982 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
983 #endif
984 		cpi->hba_eng_cnt = 0;
985 		cpi->max_target = sassc->maxtargets - 1;
986 		cpi->max_lun = 255;
987 
988 		/*
989 		 * initiator_id is set here to an ID outside the set of valid
990 		 * target IDs (including volumes).
991 		 */
992 		cpi->initiator_id = sassc->maxtargets;
993 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
994 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
995 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
996 		cpi->unit_number = cam_sim_unit(sim);
997 		cpi->bus_id = cam_sim_bus(sim);
998 		cpi->base_transfer_speed = 150000;
999 		cpi->transport = XPORT_SAS;
1000 		cpi->transport_version = 0;
1001 		cpi->protocol = PROTO_SCSI;
1002 		cpi->protocol_version = SCSI_REV_SPC;
1003 		cpi->maxio = sc->maxio;
1004 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1005 		break;
1006 	}
1007 	case XPT_GET_TRAN_SETTINGS:
1008 	{
1009 		struct ccb_trans_settings	*cts;
1010 		struct ccb_trans_settings_sas	*sas;
1011 		struct ccb_trans_settings_scsi	*scsi;
1012 		struct mpssas_target *targ;
1013 
1014 		cts = &ccb->cts;
1015 		sas = &cts->xport_specific.sas;
1016 		scsi = &cts->proto_specific.scsi;
1017 
1018 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1019 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1020 		    cts->ccb_h.target_id));
1021 		targ = &sassc->targets[cts->ccb_h.target_id];
1022 		if (targ->handle == 0x0) {
1023 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1024 			break;
1025 		}
1026 
1027 		cts->protocol_version = SCSI_REV_SPC2;
1028 		cts->transport = XPORT_SAS;
1029 		cts->transport_version = 0;
1030 
1031 		sas->valid = CTS_SAS_VALID_SPEED;
1032 		switch (targ->linkrate) {
1033 		case 0x08:
1034 			sas->bitrate = 150000;
1035 			break;
1036 		case 0x09:
1037 			sas->bitrate = 300000;
1038 			break;
1039 		case 0x0a:
1040 			sas->bitrate = 600000;
1041 			break;
1042 		default:
1043 			sas->valid = 0;
1044 		}
1045 
1046 		cts->protocol = PROTO_SCSI;
1047 		scsi->valid = CTS_SCSI_VALID_TQ;
1048 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1049 
1050 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1051 		break;
1052 	}
1053 	case XPT_CALC_GEOMETRY:
1054 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1055 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1056 		break;
1057 	case XPT_RESET_DEV:
1058 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1059 		mpssas_action_resetdev(sassc, ccb);
1060 		return;
1061 	case XPT_RESET_BUS:
1062 	case XPT_ABORT:
1063 	case XPT_TERM_IO:
1064 		mps_dprint(sassc->sc, MPS_XINFO,
1065 		    "mpssas_action faking success for abort or reset\n");
1066 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1067 		break;
1068 	case XPT_SCSI_IO:
1069 		mpssas_action_scsiio(sassc, ccb);
1070 		return;
1071 #if __FreeBSD_version >= 900026
1072 	case XPT_SMP_IO:
1073 		mpssas_action_smpio(sassc, ccb);
1074 		return;
1075 #endif
1076 	default:
1077 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1078 		break;
1079 	}
1080 	xpt_done(ccb);
1081 
1082 }
1083 
1084 static void
1085 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1086     target_id_t target_id, lun_id_t lun_id)
1087 {
1088 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1089 	struct cam_path *path;
1090 
1091 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1092 	    ac_code, target_id, (uintmax_t)lun_id);
1093 
1094 	if (xpt_create_path(&path, NULL,
1095 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1096 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1097 			   "notification\n");
1098 		return;
1099 	}
1100 
1101 	xpt_async(ac_code, path, NULL);
1102 	xpt_free_path(path);
1103 }
1104 
1105 static void
1106 mpssas_complete_all_commands(struct mps_softc *sc)
1107 {
1108 	struct mps_command *cm;
1109 	int i;
1110 	int completed;
1111 
1112 	MPS_FUNCTRACE(sc);
1113 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1114 
1115 	/* complete all commands with a NULL reply */
1116 	for (i = 1; i < sc->num_reqs; i++) {
1117 		cm = &sc->commands[i];
1118 		if (cm->cm_state == MPS_CM_STATE_FREE)
1119 			continue;
1120 
1121 		cm->cm_state = MPS_CM_STATE_BUSY;
1122 		cm->cm_reply = NULL;
1123 		completed = 0;
1124 
1125 		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1126 			MPASS(cm->cm_data);
1127 			free(cm->cm_data, M_MPT2);
1128 			cm->cm_data = NULL;
1129 		}
1130 
1131 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1132 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1133 
1134 		if (cm->cm_complete != NULL) {
1135 			mpssas_log_command(cm, MPS_RECOVERY,
1136 			    "completing cm %p state %x ccb %p for diag reset\n",
1137 			    cm, cm->cm_state, cm->cm_ccb);
1138 
1139 			cm->cm_complete(sc, cm);
1140 			completed = 1;
1141 		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1142 			mpssas_log_command(cm, MPS_RECOVERY,
1143 			    "waking up cm %p state %x ccb %p for diag reset\n",
1144 			    cm, cm->cm_state, cm->cm_ccb);
1145 			wakeup(cm);
1146 			completed = 1;
1147 		}
1148 
1149 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1150 			/* this should never happen, but if it does, log */
1151 			mpssas_log_command(cm, MPS_RECOVERY,
1152 			    "cm %p state %x flags 0x%x ccb %p during diag "
1153 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1154 			    cm->cm_ccb);
1155 		}
1156 	}
1157 
1158 	sc->io_cmds_active = 0;
1159 }
1160 
1161 void
1162 mpssas_handle_reinit(struct mps_softc *sc)
1163 {
1164 	int i;
1165 
1166 	/* Go back into startup mode and freeze the simq, so that CAM
1167 	 * doesn't send any commands until after we've rediscovered all
1168 	 * targets and found the proper device handles for them.
1169 	 *
1170 	 * After the reset, portenable will trigger discovery, and after all
1171 	 * discovery-related activities have finished, the simq will be
1172 	 * released.
1173 	 */
1174 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1175 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1176 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1177 	mpssas_startup_increment(sc->sassc);
1178 
1179 	/* notify CAM of a bus reset */
1180 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1181 	    CAM_LUN_WILDCARD);
1182 
1183 	/* complete and cleanup after all outstanding commands */
1184 	mpssas_complete_all_commands(sc);
1185 
1186 	mps_dprint(sc, MPS_INIT,
1187 	    "%s startup %u after command completion\n", __func__,
1188 	    sc->sassc->startup_refcount);
1189 
1190 	/* zero all the target handles, since they may change after the
1191 	 * reset, and we have to rediscover all the targets and use the new
1192 	 * handles.
1193 	 */
1194 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1195 		if (sc->sassc->targets[i].outstanding != 0)
1196 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1197 			    i, sc->sassc->targets[i].outstanding);
1198 		sc->sassc->targets[i].handle = 0x0;
1199 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1200 		sc->sassc->targets[i].outstanding = 0;
1201 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1202 	}
1203 }
1204 
1205 static void
1206 mpssas_tm_timeout(void *data)
1207 {
1208 	struct mps_command *tm = data;
1209 	struct mps_softc *sc = tm->cm_sc;
1210 
1211 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1212 
1213 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1214 	    "task mgmt %p timed out\n", tm);
1215 
1216 	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1217 	    ("command not inqueue\n"));
1218 
1219 	tm->cm_state = MPS_CM_STATE_BUSY;
1220 	mps_reinit(sc);
1221 }
1222 
1223 static void
1224 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1225 {
1226 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1227 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1228 	unsigned int cm_count = 0;
1229 	struct mps_command *cm;
1230 	struct mpssas_target *targ;
1231 
1232 	callout_stop(&tm->cm_callout);
1233 
1234 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1235 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1236 	targ = tm->cm_targ;
1237 
1238 	/*
1239 	 * Currently there should be no way we can hit this case.  It only
1240 	 * happens when we have a failure to allocate chain frames, and
1241 	 * task management commands don't have S/G lists.
1242 	 * XXXSL So should it be an assertion?
1243 	 */
1244 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1245 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1246 		    "%s: cm_flags = %#x for LUN reset! "
1247 		   "This should not happen!\n", __func__, tm->cm_flags);
1248 		mpssas_free_tm(sc, tm);
1249 		return;
1250 	}
1251 
1252 	if (reply == NULL) {
1253 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1254 		    tm);
1255 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1256 			/* this completion was due to a reset, just cleanup */
1257 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1258 			    "reset, ignoring NULL LUN reset reply\n");
1259 			targ->tm = NULL;
1260 			mpssas_free_tm(sc, tm);
1261 		}
1262 		else {
1263 			/* we should have gotten a reply. */
1264 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1265 			    "LUN reset attempt, resetting controller\n");
1266 			mps_reinit(sc);
1267 		}
1268 		return;
1269 	}
1270 
1271 	mps_dprint(sc, MPS_RECOVERY,
1272 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1273 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1274 	    le32toh(reply->TerminationCount));
1275 
1276 	/*
1277 	 * See if there are any outstanding commands for this LUN.
1278 	 * This could be made more efficient by using a per-LU data
1279 	 * structure of some sort.
1280 	 */
1281 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1282 		if (cm->cm_lun == tm->cm_lun)
1283 			cm_count++;
1284 	}
1285 
1286 	if (cm_count == 0) {
1287 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1288 		    "Finished recovery after LUN reset for target %u\n",
1289 		    targ->tid);
1290 
1291 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1292 
1293 		/*
1294 		 * We've finished recovery for this logical unit.  check and
1295 		 * see if some other logical unit has a timedout command
1296 		 * that needs to be processed.
1297 		 */
1298 		cm = TAILQ_FIRST(&targ->timedout_commands);
1299 		if (cm) {
1300 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1301 			    "More commands to abort for target %u\n",
1302 			    targ->tid);
1303 			mpssas_send_abort(sc, tm, cm);
1304 		} else {
1305 			targ->tm = NULL;
1306 			mpssas_free_tm(sc, tm);
1307 		}
1308 	} else {
1309 		/*
1310 		 * If we still have commands for this LUN, the reset
1311 		 * effectively failed, regardless of the status reported.
1312 		 * Escalate to a target reset.
1313 		 */
1314 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1315 		    "logical unit reset complete for target %u, but still "
1316 		    "have %u command(s), sending target reset\n", targ->tid,
1317 		    cm_count);
1318 		mpssas_send_reset(sc, tm,
1319 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1320 	}
1321 }
1322 
1323 static void
1324 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1325 {
1326 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1327 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1328 	struct mpssas_target *targ;
1329 
1330 	callout_stop(&tm->cm_callout);
1331 
1332 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1333 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1334 	targ = tm->cm_targ;
1335 
1336 	/*
1337 	 * Currently there should be no way we can hit this case.  It only
1338 	 * happens when we have a failure to allocate chain frames, and
1339 	 * task management commands don't have S/G lists.
1340 	 */
1341 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1342 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1343 			   "This should not happen!\n", __func__, tm->cm_flags);
1344 		mpssas_free_tm(sc, tm);
1345 		return;
1346 	}
1347 
1348 	if (reply == NULL) {
1349 		mps_dprint(sc, MPS_RECOVERY,
1350 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1351 		    tm, le16toh(req->TaskMID));
1352 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1353 			/* this completion was due to a reset, just cleanup */
1354 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1355 			    "reset, ignoring NULL target reset reply\n");
1356 			targ->tm = NULL;
1357 			mpssas_free_tm(sc, tm);
1358 		} else {
1359 			/* we should have gotten a reply. */
1360 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1361 			    "target reset attempt, resetting controller\n");
1362 			mps_reinit(sc);
1363 		}
1364 		return;
1365 	}
1366 
1367 	mps_dprint(sc, MPS_RECOVERY,
1368 	    "target reset status 0x%x code 0x%x count %u\n",
1369 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1370 	    le32toh(reply->TerminationCount));
1371 
1372 	if (targ->outstanding == 0) {
1373 		/* we've finished recovery for this target and all
1374 		 * of its logical units.
1375 		 */
1376 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1377 		    "Finished reset recovery for target %u\n", targ->tid);
1378 
1379 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1380 		    CAM_LUN_WILDCARD);
1381 
1382 		targ->tm = NULL;
1383 		mpssas_free_tm(sc, tm);
1384 	} else {
1385 		/*
1386 		 * After a target reset, if this target still has
1387 		 * outstanding commands, the reset effectively failed,
1388 		 * regardless of the status reported.  escalate.
1389 		 */
1390 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1391 		    "Target reset complete for target %u, but still have %u "
1392 		    "command(s), resetting controller\n", targ->tid,
1393 		    targ->outstanding);
1394 		mps_reinit(sc);
1395 	}
1396 }
1397 
1398 #define MPS_RESET_TIMEOUT 30
1399 
1400 int
1401 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1402 {
1403 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1404 	struct mpssas_target *target;
1405 	int err;
1406 
1407 	target = tm->cm_targ;
1408 	if (target->handle == 0) {
1409 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1410 		    __func__, target->tid);
1411 		return -1;
1412 	}
1413 
1414 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1415 	req->DevHandle = htole16(target->handle);
1416 	req->TaskType = type;
1417 
1418 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1419 		/* XXX Need to handle invalid LUNs */
1420 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1421 		tm->cm_targ->logical_unit_resets++;
1422 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1423 		    "Sending logical unit reset to target %u lun %d\n",
1424 		    target->tid, tm->cm_lun);
1425 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1426 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1427 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1428 		/*
1429 		 * Target reset method =
1430 		 * 	SAS Hard Link Reset / SATA Link Reset
1431 		 */
1432 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1433 		tm->cm_targ->target_resets++;
1434 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1435 		    "Sending target reset to target %u\n", target->tid);
1436 		tm->cm_complete = mpssas_target_reset_complete;
1437 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1438 	} else {
1439 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1440 		return -1;
1441 	}
1442 
1443 	tm->cm_data = NULL;
1444 	tm->cm_complete_data = (void *)tm;
1445 
1446 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1447 	    mpssas_tm_timeout, tm);
1448 
1449 	err = mps_map_command(sc, tm);
1450 	if (err)
1451 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1452 		    "error %d sending reset type %u\n",
1453 		    err, type);
1454 
1455 	return err;
1456 }
1457 
1458 
1459 static void
1460 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1461 {
1462 	struct mps_command *cm;
1463 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1464 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1465 	struct mpssas_target *targ;
1466 
1467 	callout_stop(&tm->cm_callout);
1468 
1469 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1470 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1471 	targ = tm->cm_targ;
1472 
1473 	/*
1474 	 * Currently there should be no way we can hit this case.  It only
1475 	 * happens when we have a failure to allocate chain frames, and
1476 	 * task management commands don't have S/G lists.
1477 	 */
1478 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1479 		mps_dprint(sc, MPS_RECOVERY,
1480 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1481 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1482 		mpssas_free_tm(sc, tm);
1483 		return;
1484 	}
1485 
1486 	if (reply == NULL) {
1487 		mps_dprint(sc, MPS_RECOVERY,
1488 		    "NULL abort reply for tm %p TaskMID %u\n",
1489 		    tm, le16toh(req->TaskMID));
1490 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1491 			/* this completion was due to a reset, just cleanup */
1492 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1493 			    "reset, ignoring NULL abort reply\n");
1494 			targ->tm = NULL;
1495 			mpssas_free_tm(sc, tm);
1496 		} else {
1497 			/* we should have gotten a reply. */
1498 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1499 			    "abort attempt, resetting controller\n");
1500 			mps_reinit(sc);
1501 		}
1502 		return;
1503 	}
1504 
1505 	mps_dprint(sc, MPS_RECOVERY,
1506 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1507 	    le16toh(req->TaskMID),
1508 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1509 	    le32toh(reply->TerminationCount));
1510 
1511 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1512 	if (cm == NULL) {
1513 		/*
1514 		 * If there are no more timedout commands, we're done with
1515 		 * error recovery for this target.
1516 		 */
1517 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1518 		    "Finished abort recovery for target %u\n", targ->tid);
1519 
1520 		targ->tm = NULL;
1521 		mpssas_free_tm(sc, tm);
1522 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1523 		/* abort success, but we have more timedout commands to abort */
1524 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1525 		    "Continuing abort recovery for target %u\n", targ->tid);
1526 
1527 		mpssas_send_abort(sc, tm, cm);
1528 	} else {
1529 		/* we didn't get a command completion, so the abort
1530 		 * failed as far as we're concerned.  escalate.
1531 		 */
1532 		mps_dprint(sc, MPS_RECOVERY,
1533 		    "Abort failed for target %u, sending logical unit reset\n",
1534 		    targ->tid);
1535 
1536 		mpssas_send_reset(sc, tm,
1537 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1538 	}
1539 }
1540 
1541 #define MPS_ABORT_TIMEOUT 5
1542 
1543 static int
1544 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1545 {
1546 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1547 	struct mpssas_target *targ;
1548 	int err;
1549 
1550 	targ = cm->cm_targ;
1551 	if (targ->handle == 0) {
1552 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1553 		    "%s null devhandle for target_id %d\n",
1554 		    __func__, cm->cm_ccb->ccb_h.target_id);
1555 		return -1;
1556 	}
1557 
1558 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1559 	    "Aborting command %p\n", cm);
1560 
1561 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1562 	req->DevHandle = htole16(targ->handle);
1563 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1564 
1565 	/* XXX Need to handle invalid LUNs */
1566 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1567 
1568 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1569 
1570 	tm->cm_data = NULL;
1571 	tm->cm_complete = mpssas_abort_complete;
1572 	tm->cm_complete_data = (void *)tm;
1573 	tm->cm_targ = cm->cm_targ;
1574 	tm->cm_lun = cm->cm_lun;
1575 
1576 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1577 	    mpssas_tm_timeout, tm);
1578 
1579 	targ->aborts++;
1580 
1581 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1582 
1583 	err = mps_map_command(sc, tm);
1584 	if (err)
1585 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1586 		    "error %d sending abort for cm %p SMID %u\n",
1587 		    err, cm, req->TaskMID);
1588 	return err;
1589 }
1590 
1591 static void
1592 mpssas_scsiio_timeout(void *data)
1593 {
1594 	sbintime_t elapsed, now;
1595 	union ccb *ccb;
1596 	struct mps_softc *sc;
1597 	struct mps_command *cm;
1598 	struct mpssas_target *targ;
1599 
1600 	cm = (struct mps_command *)data;
1601 	sc = cm->cm_sc;
1602 	ccb = cm->cm_ccb;
1603 	now = sbinuptime();
1604 
1605 	MPS_FUNCTRACE(sc);
1606 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1607 
1608 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1609 
1610 	/*
1611 	 * Run the interrupt handler to make sure it's not pending.  This
1612 	 * isn't perfect because the command could have already completed
1613 	 * and been re-used, though this is unlikely.
1614 	 */
1615 	mps_intr_locked(sc);
1616 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1617 		mpssas_log_command(cm, MPS_XINFO,
1618 		    "SCSI command %p almost timed out\n", cm);
1619 		return;
1620 	}
1621 
1622 	if (cm->cm_ccb == NULL) {
1623 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1624 		return;
1625 	}
1626 
1627 	targ = cm->cm_targ;
1628 	targ->timeouts++;
1629 
1630 	elapsed = now - ccb->ccb_h.qos.sim_data;
1631 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1632 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1633 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1634 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1635 
1636 	/* XXX first, check the firmware state, to see if it's still
1637 	 * operational.  if not, do a diag reset.
1638 	 */
1639 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1640 	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1641 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1642 
1643 	if (targ->tm != NULL) {
1644 		/* target already in recovery, just queue up another
1645 		 * timedout command to be processed later.
1646 		 */
1647 		mps_dprint(sc, MPS_RECOVERY,
1648 		    "queued timedout cm %p for processing by tm %p\n",
1649 		    cm, targ->tm);
1650 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1651 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1652 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1653 		    cm->cm_desc.Default.SMID);
1654 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1655 		    cm, targ->tm);
1656 
1657 		/* start recovery by aborting the first timedout command */
1658 		mpssas_send_abort(sc, targ->tm, cm);
1659 	} else {
1660 		/* XXX queue this target up for recovery once a TM becomes
1661 		 * available.  The firmware only has a limited number of
1662 		 * HighPriority credits for the high priority requests used
1663 		 * for task management, and we ran out.
1664 		 *
1665 		 * Isilon: don't worry about this for now, since we have
1666 		 * more credits than disks in an enclosure, and limit
1667 		 * ourselves to one TM per target for recovery.
1668 		 */
1669 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1670 		    "timedout cm %p failed to allocate a tm\n", cm);
1671 	}
1672 
1673 }
1674 
1675 static void
1676 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1677 {
1678 	MPI2_SCSI_IO_REQUEST *req;
1679 	struct ccb_scsiio *csio;
1680 	struct mps_softc *sc;
1681 	struct mpssas_target *targ;
1682 	struct mpssas_lun *lun;
1683 	struct mps_command *cm;
1684 	uint8_t i, lba_byte, *ref_tag_addr;
1685 	uint16_t eedp_flags;
1686 	uint32_t mpi_control;
1687 
1688 	sc = sassc->sc;
1689 	MPS_FUNCTRACE(sc);
1690 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1691 
1692 	csio = &ccb->csio;
1693 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1694 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1695 	     csio->ccb_h.target_id));
1696 	targ = &sassc->targets[csio->ccb_h.target_id];
1697 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1698 	if (targ->handle == 0x0) {
1699 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1700 		    __func__, csio->ccb_h.target_id);
1701 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1702 		xpt_done(ccb);
1703 		return;
1704 	}
1705 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1706 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1707 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1708 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1709 		xpt_done(ccb);
1710 		return;
1711 	}
1712 	/*
1713 	 * Sometimes, it is possible to get a command that is not "In
1714 	 * Progress" and was actually aborted by the upper layer.  Check for
1715 	 * this here and complete the command without error.
1716 	 */
1717 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1718 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1719 		    "target %u\n", __func__, csio->ccb_h.target_id);
1720 		xpt_done(ccb);
1721 		return;
1722 	}
1723 	/*
1724 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1725 	 * that the volume has timed out.  We want volumes to be enumerated
1726 	 * until they are deleted/removed, not just failed. In either event,
1727 	 * we're removing the target due to a firmware event telling us
1728 	 * the device is now gone (as opposed to some transient event). Since
1729 	 * we're opting to remove failed devices from the OS's view, we need
1730 	 * to propagate that status up the stack.
1731 	 */
1732 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1733 		if (targ->devinfo == 0)
1734 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1735 		else
1736 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1737 		xpt_done(ccb);
1738 		return;
1739 	}
1740 
1741 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1742 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1743 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1744 		xpt_done(ccb);
1745 		return;
1746 	}
1747 
1748 	/*
1749 	 * If target has a reset in progress, freeze the devq and return.  The
1750 	 * devq will be released when the TM reset is finished.
1751 	 */
1752 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1753 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1754 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1755 		    __func__, targ->tid);
1756 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1757 		xpt_done(ccb);
1758 		return;
1759 	}
1760 
1761 	cm = mps_alloc_command(sc);
1762 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1763 		if (cm != NULL) {
1764 			mps_free_command(sc, cm);
1765 		}
1766 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1767 			xpt_freeze_simq(sassc->sim, 1);
1768 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1769 		}
1770 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1771 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1772 		xpt_done(ccb);
1773 		return;
1774 	}
1775 
1776 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1777 	bzero(req, sizeof(*req));
1778 	req->DevHandle = htole16(targ->handle);
1779 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1780 	req->MsgFlags = 0;
1781 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1782 	req->SenseBufferLength = MPS_SENSE_LEN;
1783 	req->SGLFlags = 0;
1784 	req->ChainOffset = 0;
1785 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1786 	req->SGLOffset1= 0;
1787 	req->SGLOffset2= 0;
1788 	req->SGLOffset3= 0;
1789 	req->SkipCount = 0;
1790 	req->DataLength = htole32(csio->dxfer_len);
1791 	req->BidirectionalDataLength = 0;
1792 	req->IoFlags = htole16(csio->cdb_len);
1793 	req->EEDPFlags = 0;
1794 
1795 	/* Note: BiDirectional transfers are not supported */
1796 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1797 	case CAM_DIR_IN:
1798 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1799 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1800 		break;
1801 	case CAM_DIR_OUT:
1802 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1803 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1804 		break;
1805 	case CAM_DIR_NONE:
1806 	default:
1807 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1808 		break;
1809 	}
1810 
1811 	if (csio->cdb_len == 32)
1812                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1813 	/*
1814 	 * It looks like the hardware doesn't require an explicit tag
1815 	 * number for each transaction.  SAM Task Management not supported
1816 	 * at the moment.
1817 	 */
1818 	switch (csio->tag_action) {
1819 	case MSG_HEAD_OF_Q_TAG:
1820 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1821 		break;
1822 	case MSG_ORDERED_Q_TAG:
1823 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1824 		break;
1825 	case MSG_ACA_TASK:
1826 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1827 		break;
1828 	case CAM_TAG_ACTION_NONE:
1829 	case MSG_SIMPLE_Q_TAG:
1830 	default:
1831 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1832 		break;
1833 	}
1834 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1835 	req->Control = htole32(mpi_control);
1836 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1837 		mps_free_command(sc, cm);
1838 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1839 		xpt_done(ccb);
1840 		return;
1841 	}
1842 
1843 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1844 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1845 	else
1846 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1847 	req->IoFlags = htole16(csio->cdb_len);
1848 
1849 	/*
1850 	 * Check if EEDP is supported and enabled.  If it is then check if the
1851 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1852 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1853 	 * for EEDP transfer.
1854 	 */
1855 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1856 	if (sc->eedp_enabled && eedp_flags) {
1857 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1858 			if (lun->lun_id == csio->ccb_h.target_lun) {
1859 				break;
1860 			}
1861 		}
1862 
1863 		if ((lun != NULL) && (lun->eedp_formatted)) {
1864 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1865 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1866 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1867 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1868 			req->EEDPFlags = htole16(eedp_flags);
1869 
1870 			/*
1871 			 * If CDB less than 32, fill in Primary Ref Tag with
1872 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1873 			 * already there.  Also, set protection bit.  FreeBSD
1874 			 * currently does not support CDBs bigger than 16, but
1875 			 * the code doesn't hurt, and will be here for the
1876 			 * future.
1877 			 */
1878 			if (csio->cdb_len != 32) {
1879 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1880 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1881 				    PrimaryReferenceTag;
1882 				for (i = 0; i < 4; i++) {
1883 					*ref_tag_addr =
1884 					    req->CDB.CDB32[lba_byte + i];
1885 					ref_tag_addr++;
1886 				}
1887 				req->CDB.EEDP32.PrimaryReferenceTag =
1888 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1889 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1890 				    0xFFFF;
1891 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1892 				    0x20;
1893 			} else {
1894 				eedp_flags |=
1895 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1896 				req->EEDPFlags = htole16(eedp_flags);
1897 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1898 				    0x1F) | 0x20;
1899 			}
1900 		}
1901 	}
1902 
1903 	cm->cm_length = csio->dxfer_len;
1904 	if (cm->cm_length != 0) {
1905 		cm->cm_data = ccb;
1906 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1907 	} else {
1908 		cm->cm_data = NULL;
1909 	}
1910 	cm->cm_sge = &req->SGL;
1911 	cm->cm_sglsize = (32 - 24) * 4;
1912 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1913 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1914 	cm->cm_complete = mpssas_scsiio_complete;
1915 	cm->cm_complete_data = ccb;
1916 	cm->cm_targ = targ;
1917 	cm->cm_lun = csio->ccb_h.target_lun;
1918 	cm->cm_ccb = ccb;
1919 
1920 	/*
1921 	 * If HBA is a WD and the command is not for a retry, try to build a
1922 	 * direct I/O message. If failed, or the command is for a retry, send
1923 	 * the I/O to the IR volume itself.
1924 	 */
1925 	if (sc->WD_valid_config) {
1926 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1927 			mpssas_direct_drive_io(sassc, cm, ccb);
1928 		} else {
1929 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1930 		}
1931 	}
1932 
1933 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1934 	if (csio->bio != NULL)
1935 		biotrack(csio->bio, __func__);
1936 #endif
1937 	csio->ccb_h.qos.sim_data = sbinuptime();
1938 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1939 	    mpssas_scsiio_timeout, cm, 0);
1940 
1941 	targ->issued++;
1942 	targ->outstanding++;
1943 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1944 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1945 
1946 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1947 	    __func__, cm, ccb, targ->outstanding);
1948 
1949 	mps_map_command(sc, cm);
1950 	return;
1951 }
1952 
1953 /**
1954  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1955  */
1956 static void
1957 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1958     Mpi2SCSIIOReply_t *mpi_reply)
1959 {
1960 	u32 response_info;
1961 	u8 *response_bytes;
1962 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1963 	    MPI2_IOCSTATUS_MASK;
1964 	u8 scsi_state = mpi_reply->SCSIState;
1965 	u8 scsi_status = mpi_reply->SCSIStatus;
1966 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1967 	const char *desc_ioc_state, *desc_scsi_status;
1968 
1969 	if (log_info == 0x31170000)
1970 		return;
1971 
1972 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1973 	    ioc_status);
1974 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1975 	    scsi_status);
1976 
1977 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1978 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1979 
1980 	/*
1981 	 *We can add more detail about underflow data here
1982 	 * TO-DO
1983 	 */
1984 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1985 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1986 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1987 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1988 
1989 	if (sc->mps_debug & MPS_XINFO &&
1990 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1991 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1992 		scsi_sense_print(csio);
1993 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1994 	}
1995 
1996 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1997 		response_info = le32toh(mpi_reply->ResponseInfo);
1998 		response_bytes = (u8 *)&response_info;
1999 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
2000 		    response_bytes[0],
2001 		    mps_describe_table(mps_scsi_taskmgmt_string,
2002 		    response_bytes[0]));
2003 	}
2004 }
2005 
2006 static void
2007 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2008 {
2009 	MPI2_SCSI_IO_REPLY *rep;
2010 	union ccb *ccb;
2011 	struct ccb_scsiio *csio;
2012 	struct mpssas_softc *sassc;
2013 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2014 	u8 *TLR_bits, TLR_on;
2015 	int dir = 0, i;
2016 	u16 alloc_len;
2017 	struct mpssas_target *target;
2018 	target_id_t target_id;
2019 
2020 	MPS_FUNCTRACE(sc);
2021 	mps_dprint(sc, MPS_TRACE,
2022 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2023 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2024 	    cm->cm_targ->outstanding);
2025 
2026 	callout_stop(&cm->cm_callout);
2027 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2028 
2029 	sassc = sc->sassc;
2030 	ccb = cm->cm_complete_data;
2031 	csio = &ccb->csio;
2032 	target_id = csio->ccb_h.target_id;
2033 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2034 	/*
2035 	 * XXX KDM if the chain allocation fails, does it matter if we do
2036 	 * the sync and unload here?  It is simpler to do it in every case,
2037 	 * assuming it doesn't cause problems.
2038 	 */
2039 	if (cm->cm_data != NULL) {
2040 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2041 			dir = BUS_DMASYNC_POSTREAD;
2042 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2043 			dir = BUS_DMASYNC_POSTWRITE;
2044 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2045 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2046 	}
2047 
2048 	cm->cm_targ->completed++;
2049 	cm->cm_targ->outstanding--;
2050 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2051 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2052 
2053 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2054 	if (ccb->csio.bio != NULL)
2055 		biotrack(ccb->csio.bio, __func__);
2056 #endif
2057 
2058 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2059 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2060 		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2061 		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2062 		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2063 		if (cm->cm_reply != NULL)
2064 			mpssas_log_command(cm, MPS_RECOVERY,
2065 			    "completed timedout cm %p ccb %p during recovery "
2066 			    "ioc %x scsi %x state %x xfer %u\n",
2067 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2068 			    rep->SCSIStatus, rep->SCSIState,
2069 			    le32toh(rep->TransferCount));
2070 		else
2071 			mpssas_log_command(cm, MPS_RECOVERY,
2072 			    "completed timedout cm %p ccb %p during recovery\n",
2073 			    cm, cm->cm_ccb);
2074 	} else if (cm->cm_targ->tm != NULL) {
2075 		if (cm->cm_reply != NULL)
2076 			mpssas_log_command(cm, MPS_RECOVERY,
2077 			    "completed cm %p ccb %p during recovery "
2078 			    "ioc %x scsi %x state %x xfer %u\n",
2079 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2080 			    rep->SCSIStatus, rep->SCSIState,
2081 			    le32toh(rep->TransferCount));
2082 		else
2083 			mpssas_log_command(cm, MPS_RECOVERY,
2084 			    "completed cm %p ccb %p during recovery\n",
2085 			    cm, cm->cm_ccb);
2086 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2087 		mpssas_log_command(cm, MPS_RECOVERY,
2088 		    "reset completed cm %p ccb %p\n",
2089 		    cm, cm->cm_ccb);
2090 	}
2091 
2092 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2093 		/*
2094 		 * We ran into an error after we tried to map the command,
2095 		 * so we're getting a callback without queueing the command
2096 		 * to the hardware.  So we set the status here, and it will
2097 		 * be retained below.  We'll go through the "fast path",
2098 		 * because there can be no reply when we haven't actually
2099 		 * gone out to the hardware.
2100 		 */
2101 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2102 
2103 		/*
2104 		 * Currently the only error included in the mask is
2105 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2106 		 * chain frames.  We need to freeze the queue until we get
2107 		 * a command that completed without this error, which will
2108 		 * hopefully have some chain frames attached that we can
2109 		 * use.  If we wanted to get smarter about it, we would
2110 		 * only unfreeze the queue in this condition when we're
2111 		 * sure that we're getting some chain frames back.  That's
2112 		 * probably unnecessary.
2113 		 */
2114 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2115 			xpt_freeze_simq(sassc->sim, 1);
2116 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2117 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2118 				   "freezing SIM queue\n");
2119 		}
2120 	}
2121 
2122 	/*
2123 	 * If this is a Start Stop Unit command and it was issued by the driver
2124 	 * during shutdown, decrement the refcount to account for all of the
2125 	 * commands that were sent.  All SSU commands should be completed before
2126 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2127 	 * is TRUE.
2128 	 */
2129 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2130 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2131 		sc->SSU_refcount--;
2132 	}
2133 
2134 	/* Take the fast path to completion */
2135 	if (cm->cm_reply == NULL) {
2136 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2137 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2138 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2139 			else {
2140 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2141 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2142 			}
2143 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2144 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2145 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2146 				mps_dprint(sc, MPS_XINFO,
2147 				    "Unfreezing SIM queue\n");
2148 			}
2149 		}
2150 
2151 		/*
2152 		 * There are two scenarios where the status won't be
2153 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2154 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2155 		 */
2156 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2157 			/*
2158 			 * Freeze the dev queue so that commands are
2159 			 * executed in the correct order after error
2160 			 * recovery.
2161 			 */
2162 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2163 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2164 		}
2165 		mps_free_command(sc, cm);
2166 		xpt_done(ccb);
2167 		return;
2168 	}
2169 
2170 	mpssas_log_command(cm, MPS_XINFO,
2171 	    "ioc %x scsi %x state %x xfer %u\n",
2172 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2173 	    le32toh(rep->TransferCount));
2174 
2175 	/*
2176 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2177 	 * Volume if an error occurred (normal I/O retry).  Use the original
2178 	 * CCB, but set a flag that this will be a retry so that it's sent to
2179 	 * the original volume.  Free the command but reuse the CCB.
2180 	 */
2181 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2182 		mps_free_command(sc, cm);
2183 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2184 		mpssas_action_scsiio(sassc, ccb);
2185 		return;
2186 	} else
2187 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2188 
2189 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2190 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2191 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2192 		/* FALLTHROUGH */
2193 	case MPI2_IOCSTATUS_SUCCESS:
2194 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2195 
2196 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2197 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2198 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2199 
2200 		/* Completion failed at the transport level. */
2201 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2202 		    MPI2_SCSI_STATE_TERMINATED)) {
2203 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2204 			break;
2205 		}
2206 
2207 		/* In a modern packetized environment, an autosense failure
2208 		 * implies that there's not much else that can be done to
2209 		 * recover the command.
2210 		 */
2211 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2212 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2213 			break;
2214 		}
2215 
2216 		/*
2217 		 * CAM doesn't care about SAS Response Info data, but if this is
2218 		 * the state check if TLR should be done.  If not, clear the
2219 		 * TLR_bits for the target.
2220 		 */
2221 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2222 		    ((le32toh(rep->ResponseInfo) &
2223 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2224 		    MPS_SCSI_RI_INVALID_FRAME)) {
2225 			sc->mapping_table[target_id].TLR_bits =
2226 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2227 		}
2228 
2229 		/*
2230 		 * Intentionally override the normal SCSI status reporting
2231 		 * for these two cases.  These are likely to happen in a
2232 		 * multi-initiator environment, and we want to make sure that
2233 		 * CAM retries these commands rather than fail them.
2234 		 */
2235 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2236 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2237 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2238 			break;
2239 		}
2240 
2241 		/* Handle normal status and sense */
2242 		csio->scsi_status = rep->SCSIStatus;
2243 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2244 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2245 		else
2246 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2247 
2248 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2249 			int sense_len, returned_sense_len;
2250 
2251 			returned_sense_len = min(le32toh(rep->SenseCount),
2252 			    sizeof(struct scsi_sense_data));
2253 			if (returned_sense_len < ccb->csio.sense_len)
2254 				ccb->csio.sense_resid = ccb->csio.sense_len -
2255 					returned_sense_len;
2256 			else
2257 				ccb->csio.sense_resid = 0;
2258 
2259 			sense_len = min(returned_sense_len,
2260 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2261 			bzero(&ccb->csio.sense_data,
2262 			      sizeof(ccb->csio.sense_data));
2263 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2264 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2265 		}
2266 
2267 		/*
2268 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2269 		 * and it's page code 0 (Supported Page List), and there is
2270 		 * inquiry data, and this is for a sequential access device, and
2271 		 * the device is an SSP target, and TLR is supported by the
2272 		 * controller, turn the TLR_bits value ON if page 0x90 is
2273 		 * supported.
2274 		 */
2275 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2276 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2277 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2278 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2279 		    (csio->data_ptr != NULL) &&
2280 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2281 		    (sc->control_TLR) &&
2282 		    (sc->mapping_table[target_id].device_info &
2283 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2284 			vpd_list = (struct scsi_vpd_supported_page_list *)
2285 			    csio->data_ptr;
2286 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2287 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2288 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2289 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2290 			    csio->cdb_io.cdb_bytes[4];
2291 			alloc_len -= csio->resid;
2292 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2293 				if (vpd_list->list[i] == 0x90) {
2294 					*TLR_bits = TLR_on;
2295 					break;
2296 				}
2297 			}
2298 		}
2299 
2300 		/*
2301 		 * If this is a SATA direct-access end device, mark it so that
2302 		 * a SCSI StartStopUnit command will be sent to it when the
2303 		 * driver is being shutdown.
2304 		 */
2305 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2306 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2307 		    (sc->mapping_table[target_id].device_info &
2308 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2309 		    ((sc->mapping_table[target_id].device_info &
2310 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2311 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2312 			target = &sassc->targets[target_id];
2313 			target->supports_SSU = TRUE;
2314 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2315 			    target_id);
2316 		}
2317 		break;
2318 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2319 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2320 		/*
2321 		 * If devinfo is 0 this will be a volume.  In that case don't
2322 		 * tell CAM that the volume is not there.  We want volumes to
2323 		 * be enumerated until they are deleted/removed, not just
2324 		 * failed.
2325 		 */
2326 		if (cm->cm_targ->devinfo == 0)
2327 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2328 		else
2329 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2330 		break;
2331 	case MPI2_IOCSTATUS_INVALID_SGL:
2332 		mps_print_scsiio_cmd(sc, cm);
2333 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2334 		break;
2335 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2336 		/*
2337 		 * This is one of the responses that comes back when an I/O
2338 		 * has been aborted.  If it is because of a timeout that we
2339 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2340 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2341 		 * command is the same (it gets retried, subject to the
2342 		 * retry counter), the only difference is what gets printed
2343 		 * on the console.
2344 		 */
2345 		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2346 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2347 		else
2348 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2349 		break;
2350 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2351 		/* resid is ignored for this condition */
2352 		csio->resid = 0;
2353 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2354 		break;
2355 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2356 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2357 		/*
2358 		 * These can sometimes be transient transport-related
2359 		 * errors, and sometimes persistent drive-related errors.
2360 		 * We used to retry these without decrementing the retry
2361 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2362 		 * we hit a persistent drive problem that returns one of
2363 		 * these error codes, we would retry indefinitely.  So,
2364 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2365 		 * count and avoid infinite retries.  We're taking the
2366 		 * potential risk of flagging false failures in the event
2367 		 * of a topology-related error (e.g. a SAS expander problem
2368 		 * causes a command addressed to a drive to fail), but
2369 		 * avoiding getting into an infinite retry loop. However,
2370 		 * if we get them while were moving a device, we should
2371 		 * fail the request as 'not there' because the device
2372 		 * is effectively gone.
2373 		 */
2374 		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2375 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2376 		else
2377 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2378 		mps_dprint(sc, MPS_INFO,
2379 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2380 		    mps_describe_table(mps_iocstatus_string,
2381 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2382 		    target_id, cm->cm_desc.Default.SMID,
2383 		    le32toh(rep->IOCLogInfo),
2384 		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2385 		mps_dprint(sc, MPS_XINFO,
2386 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2387 		    rep->SCSIStatus, rep->SCSIState,
2388 		    le32toh(rep->TransferCount));
2389 		break;
2390 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2391 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2392 	case MPI2_IOCSTATUS_INVALID_VPID:
2393 	case MPI2_IOCSTATUS_INVALID_FIELD:
2394 	case MPI2_IOCSTATUS_INVALID_STATE:
2395 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2396 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2397 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2398 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2399 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2400 	default:
2401 		mpssas_log_command(cm, MPS_XINFO,
2402 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2403 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2404 		    rep->SCSIStatus, rep->SCSIState,
2405 		    le32toh(rep->TransferCount));
2406 		csio->resid = cm->cm_length;
2407 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2408 		break;
2409 	}
2410 
2411 	mps_sc_failed_io_info(sc,csio,rep);
2412 
2413 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2414 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2415 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2416 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2417 		    "unfreezing SIM queue\n");
2418 	}
2419 
2420 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2421 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2422 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2423 	}
2424 
2425 	/*
2426 	 * Check to see if we're removing the device. If so, and this is the
2427 	 * last command on the queue, proceed with the deferred removal of the
2428 	 * device.  Note, for removing a volume, this won't trigger because
2429 	 * pending_remove_tm will be NULL.
2430 	 */
2431 	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2432 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2433 		    cm->cm_targ->pending_remove_tm != NULL) {
2434 			mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n");
2435 			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2436 			cm->cm_targ->pending_remove_tm = NULL;
2437 		}
2438 	}
2439 
2440 	mps_free_command(sc, cm);
2441 	xpt_done(ccb);
2442 }
2443 
2444 /* All Request reached here are Endian safe */
2445 static void
2446 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2447     union ccb *ccb) {
2448 	pMpi2SCSIIORequest_t	pIO_req;
2449 	struct mps_softc	*sc = sassc->sc;
2450 	uint64_t		virtLBA;
2451 	uint32_t		physLBA, stripe_offset, stripe_unit;
2452 	uint32_t		io_size, column;
2453 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2454 
2455 	/*
2456 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2457 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2458 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2459 	 * bit different than the 10/16 CDBs, handle them separately.
2460 	 */
2461 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2462 	CDB = pIO_req->CDB.CDB32;
2463 
2464 	/*
2465 	 * Handle 6 byte CDBs.
2466 	 */
2467 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2468 	    (CDB[0] == WRITE_6))) {
2469 		/*
2470 		 * Get the transfer size in blocks.
2471 		 */
2472 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2473 
2474 		/*
2475 		 * Get virtual LBA given in the CDB.
2476 		 */
2477 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2478 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2479 
2480 		/*
2481 		 * Check that LBA range for I/O does not exceed volume's
2482 		 * MaxLBA.
2483 		 */
2484 		if ((virtLBA + (uint64_t)io_size - 1) <=
2485 		    sc->DD_max_lba) {
2486 			/*
2487 			 * Check if the I/O crosses a stripe boundary.  If not,
2488 			 * translate the virtual LBA to a physical LBA and set
2489 			 * the DevHandle for the PhysDisk to be used.  If it
2490 			 * does cross a boundary, do normal I/O.  To get the
2491 			 * right DevHandle to use, get the map number for the
2492 			 * column, then use that map number to look up the
2493 			 * DevHandle of the PhysDisk.
2494 			 */
2495 			stripe_offset = (uint32_t)virtLBA &
2496 			    (sc->DD_stripe_size - 1);
2497 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2498 				physLBA = (uint32_t)virtLBA >>
2499 				    sc->DD_stripe_exponent;
2500 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2501 				column = physLBA % sc->DD_num_phys_disks;
2502 				pIO_req->DevHandle =
2503 				    htole16(sc->DD_column_map[column].dev_handle);
2504 				/* ???? Is this endian safe*/
2505 				cm->cm_desc.SCSIIO.DevHandle =
2506 				    pIO_req->DevHandle;
2507 
2508 				physLBA = (stripe_unit <<
2509 				    sc->DD_stripe_exponent) + stripe_offset;
2510 				ptrLBA = &pIO_req->CDB.CDB32[1];
2511 				physLBA_byte = (uint8_t)(physLBA >> 16);
2512 				*ptrLBA = physLBA_byte;
2513 				ptrLBA = &pIO_req->CDB.CDB32[2];
2514 				physLBA_byte = (uint8_t)(physLBA >> 8);
2515 				*ptrLBA = physLBA_byte;
2516 				ptrLBA = &pIO_req->CDB.CDB32[3];
2517 				physLBA_byte = (uint8_t)physLBA;
2518 				*ptrLBA = physLBA_byte;
2519 
2520 				/*
2521 				 * Set flag that Direct Drive I/O is
2522 				 * being done.
2523 				 */
2524 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2525 			}
2526 		}
2527 		return;
2528 	}
2529 
2530 	/*
2531 	 * Handle 10, 12 or 16 byte CDBs.
2532 	 */
2533 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2534 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2535 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2536 	    (CDB[0] == WRITE_12))) {
2537 		/*
2538 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2539 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2540 		 * the else section.  10-byte and 12-byte CDB's are OK.
2541 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2542 		 * ready to accept 12byte CDB for Direct IOs.
2543 		 */
2544 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2545 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2546 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2547 			/*
2548 			 * Get the transfer size in blocks.
2549 			 */
2550 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2551 
2552 			/*
2553 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2554 			 * LBA in the CDB depending on command.
2555 			 */
2556 			lba_idx = ((CDB[0] == READ_12) ||
2557 				(CDB[0] == WRITE_12) ||
2558 				(CDB[0] == READ_10) ||
2559 				(CDB[0] == WRITE_10))? 2 : 6;
2560 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2561 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2562 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2563 			    (uint64_t)CDB[lba_idx + 3];
2564 
2565 			/*
2566 			 * Check that LBA range for I/O does not exceed volume's
2567 			 * MaxLBA.
2568 			 */
2569 			if ((virtLBA + (uint64_t)io_size - 1) <=
2570 			    sc->DD_max_lba) {
2571 				/*
2572 				 * Check if the I/O crosses a stripe boundary.
2573 				 * If not, translate the virtual LBA to a
2574 				 * physical LBA and set the DevHandle for the
2575 				 * PhysDisk to be used.  If it does cross a
2576 				 * boundary, do normal I/O.  To get the right
2577 				 * DevHandle to use, get the map number for the
2578 				 * column, then use that map number to look up
2579 				 * the DevHandle of the PhysDisk.
2580 				 */
2581 				stripe_offset = (uint32_t)virtLBA &
2582 				    (sc->DD_stripe_size - 1);
2583 				if ((stripe_offset + io_size) <=
2584 				    sc->DD_stripe_size) {
2585 					physLBA = (uint32_t)virtLBA >>
2586 					    sc->DD_stripe_exponent;
2587 					stripe_unit = physLBA /
2588 					    sc->DD_num_phys_disks;
2589 					column = physLBA %
2590 					    sc->DD_num_phys_disks;
2591 					pIO_req->DevHandle =
2592 					    htole16(sc->DD_column_map[column].
2593 					    dev_handle);
2594 					cm->cm_desc.SCSIIO.DevHandle =
2595 					    pIO_req->DevHandle;
2596 
2597 					physLBA = (stripe_unit <<
2598 					    sc->DD_stripe_exponent) +
2599 					    stripe_offset;
2600 					ptrLBA =
2601 					    &pIO_req->CDB.CDB32[lba_idx];
2602 					physLBA_byte = (uint8_t)(physLBA >> 24);
2603 					*ptrLBA = physLBA_byte;
2604 					ptrLBA =
2605 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2606 					physLBA_byte = (uint8_t)(physLBA >> 16);
2607 					*ptrLBA = physLBA_byte;
2608 					ptrLBA =
2609 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2610 					physLBA_byte = (uint8_t)(physLBA >> 8);
2611 					*ptrLBA = physLBA_byte;
2612 					ptrLBA =
2613 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2614 					physLBA_byte = (uint8_t)physLBA;
2615 					*ptrLBA = physLBA_byte;
2616 
2617 					/*
2618 					 * Set flag that Direct Drive I/O is
2619 					 * being done.
2620 					 */
2621 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2622 				}
2623 			}
2624 		} else {
2625 			/*
2626 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2627 			 * 0.  Get the transfer size in blocks.
2628 			 */
2629 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2630 
2631 			/*
2632 			 * Get virtual LBA.
2633 			 */
2634 			virtLBA = ((uint64_t)CDB[2] << 54) |
2635 			    ((uint64_t)CDB[3] << 48) |
2636 			    ((uint64_t)CDB[4] << 40) |
2637 			    ((uint64_t)CDB[5] << 32) |
2638 			    ((uint64_t)CDB[6] << 24) |
2639 			    ((uint64_t)CDB[7] << 16) |
2640 			    ((uint64_t)CDB[8] << 8) |
2641 			    (uint64_t)CDB[9];
2642 
2643 			/*
2644 			 * Check that LBA range for I/O does not exceed volume's
2645 			 * MaxLBA.
2646 			 */
2647 			if ((virtLBA + (uint64_t)io_size - 1) <=
2648 			    sc->DD_max_lba) {
2649 				/*
2650 				 * Check if the I/O crosses a stripe boundary.
2651 				 * If not, translate the virtual LBA to a
2652 				 * physical LBA and set the DevHandle for the
2653 				 * PhysDisk to be used.  If it does cross a
2654 				 * boundary, do normal I/O.  To get the right
2655 				 * DevHandle to use, get the map number for the
2656 				 * column, then use that map number to look up
2657 				 * the DevHandle of the PhysDisk.
2658 				 */
2659 				stripe_offset = (uint32_t)virtLBA &
2660 				    (sc->DD_stripe_size - 1);
2661 				if ((stripe_offset + io_size) <=
2662 				    sc->DD_stripe_size) {
2663 					physLBA = (uint32_t)(virtLBA >>
2664 					    sc->DD_stripe_exponent);
2665 					stripe_unit = physLBA /
2666 					    sc->DD_num_phys_disks;
2667 					column = physLBA %
2668 					    sc->DD_num_phys_disks;
2669 					pIO_req->DevHandle =
2670 					    htole16(sc->DD_column_map[column].
2671 					    dev_handle);
2672 					cm->cm_desc.SCSIIO.DevHandle =
2673 					    pIO_req->DevHandle;
2674 
2675 					physLBA = (stripe_unit <<
2676 					    sc->DD_stripe_exponent) +
2677 					    stripe_offset;
2678 
2679 					/*
2680 					 * Set upper 4 bytes of LBA to 0.  We
2681 					 * assume that the phys disks are less
2682 					 * than 2 TB's in size.  Then, set the
2683 					 * lower 4 bytes.
2684 					 */
2685 					pIO_req->CDB.CDB32[2] = 0;
2686 					pIO_req->CDB.CDB32[3] = 0;
2687 					pIO_req->CDB.CDB32[4] = 0;
2688 					pIO_req->CDB.CDB32[5] = 0;
2689 					ptrLBA = &pIO_req->CDB.CDB32[6];
2690 					physLBA_byte = (uint8_t)(physLBA >> 24);
2691 					*ptrLBA = physLBA_byte;
2692 					ptrLBA = &pIO_req->CDB.CDB32[7];
2693 					physLBA_byte = (uint8_t)(physLBA >> 16);
2694 					*ptrLBA = physLBA_byte;
2695 					ptrLBA = &pIO_req->CDB.CDB32[8];
2696 					physLBA_byte = (uint8_t)(physLBA >> 8);
2697 					*ptrLBA = physLBA_byte;
2698 					ptrLBA = &pIO_req->CDB.CDB32[9];
2699 					physLBA_byte = (uint8_t)physLBA;
2700 					*ptrLBA = physLBA_byte;
2701 
2702 					/*
2703 					 * Set flag that Direct Drive I/O is
2704 					 * being done.
2705 					 */
2706 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2707 				}
2708 			}
2709 		}
2710 	}
2711 }
2712 
2713 #if __FreeBSD_version >= 900026
2714 static void
2715 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2716 {
2717 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2718 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2719 	uint64_t sasaddr;
2720 	union ccb *ccb;
2721 
2722 	ccb = cm->cm_complete_data;
2723 
2724 	/*
2725 	 * Currently there should be no way we can hit this case.  It only
2726 	 * happens when we have a failure to allocate chain frames, and SMP
2727 	 * commands require two S/G elements only.  That should be handled
2728 	 * in the standard request size.
2729 	 */
2730 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2731 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2732 			   __func__, cm->cm_flags);
2733 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2734 		goto bailout;
2735         }
2736 
2737 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2738 	if (rpl == NULL) {
2739 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2740 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2741 		goto bailout;
2742 	}
2743 
2744 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2745 	sasaddr = le32toh(req->SASAddress.Low);
2746 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2747 
2748 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2749 	    MPI2_IOCSTATUS_SUCCESS ||
2750 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2751 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2752 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2753 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2754 		goto bailout;
2755 	}
2756 
2757 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2758 		   "%#jx completed successfully\n", __func__,
2759 		   (uintmax_t)sasaddr);
2760 
2761 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2762 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2763 	else
2764 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2765 
2766 bailout:
2767 	/*
2768 	 * We sync in both directions because we had DMAs in the S/G list
2769 	 * in both directions.
2770 	 */
2771 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2772 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2773 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2774 	mps_free_command(sc, cm);
2775 	xpt_done(ccb);
2776 }
2777 
2778 static void
2779 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2780 {
2781 	struct mps_command *cm;
2782 	uint8_t *request, *response;
2783 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2784 	struct mps_softc *sc;
2785 	int error;
2786 
2787 	sc = sassc->sc;
2788 	error = 0;
2789 
2790 	/*
2791 	 * XXX We don't yet support physical addresses here.
2792 	 */
2793 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2794 	case CAM_DATA_PADDR:
2795 	case CAM_DATA_SG_PADDR:
2796 		mps_dprint(sc, MPS_ERROR,
2797 			   "%s: physical addresses not supported\n", __func__);
2798 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2799 		xpt_done(ccb);
2800 		return;
2801 	case CAM_DATA_SG:
2802 		/*
2803 		 * The chip does not support more than one buffer for the
2804 		 * request or response.
2805 		 */
2806 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2807 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2808 			mps_dprint(sc, MPS_ERROR,
2809 				   "%s: multiple request or response "
2810 				   "buffer segments not supported for SMP\n",
2811 				   __func__);
2812 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2813 			xpt_done(ccb);
2814 			return;
2815 		}
2816 
2817 		/*
2818 		 * The CAM_SCATTER_VALID flag was originally implemented
2819 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2820 		 * We have two.  So, just take that flag to mean that we
2821 		 * might have S/G lists, and look at the S/G segment count
2822 		 * to figure out whether that is the case for each individual
2823 		 * buffer.
2824 		 */
2825 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2826 			bus_dma_segment_t *req_sg;
2827 
2828 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2829 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2830 		} else
2831 			request = ccb->smpio.smp_request;
2832 
2833 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2834 			bus_dma_segment_t *rsp_sg;
2835 
2836 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2837 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2838 		} else
2839 			response = ccb->smpio.smp_response;
2840 		break;
2841 	case CAM_DATA_VADDR:
2842 		request = ccb->smpio.smp_request;
2843 		response = ccb->smpio.smp_response;
2844 		break;
2845 	default:
2846 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2847 		xpt_done(ccb);
2848 		return;
2849 	}
2850 
2851 	cm = mps_alloc_command(sc);
2852 	if (cm == NULL) {
2853 		mps_dprint(sc, MPS_ERROR,
2854 		    "%s: cannot allocate command\n", __func__);
2855 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2856 		xpt_done(ccb);
2857 		return;
2858 	}
2859 
2860 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2861 	bzero(req, sizeof(*req));
2862 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2863 
2864 	/* Allow the chip to use any route to this SAS address. */
2865 	req->PhysicalPort = 0xff;
2866 
2867 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2868 	req->SGLFlags =
2869 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2870 
2871 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2872 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2873 
2874 	mpi_init_sge(cm, req, &req->SGL);
2875 
2876 	/*
2877 	 * Set up a uio to pass into mps_map_command().  This allows us to
2878 	 * do one map command, and one busdma call in there.
2879 	 */
2880 	cm->cm_uio.uio_iov = cm->cm_iovec;
2881 	cm->cm_uio.uio_iovcnt = 2;
2882 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2883 
2884 	/*
2885 	 * The read/write flag isn't used by busdma, but set it just in
2886 	 * case.  This isn't exactly accurate, either, since we're going in
2887 	 * both directions.
2888 	 */
2889 	cm->cm_uio.uio_rw = UIO_WRITE;
2890 
2891 	cm->cm_iovec[0].iov_base = request;
2892 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2893 	cm->cm_iovec[1].iov_base = response;
2894 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2895 
2896 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2897 			       cm->cm_iovec[1].iov_len;
2898 
2899 	/*
2900 	 * Trigger a warning message in mps_data_cb() for the user if we
2901 	 * wind up exceeding two S/G segments.  The chip expects one
2902 	 * segment for the request and another for the response.
2903 	 */
2904 	cm->cm_max_segs = 2;
2905 
2906 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2907 	cm->cm_complete = mpssas_smpio_complete;
2908 	cm->cm_complete_data = ccb;
2909 
2910 	/*
2911 	 * Tell the mapping code that we're using a uio, and that this is
2912 	 * an SMP passthrough request.  There is a little special-case
2913 	 * logic there (in mps_data_cb()) to handle the bidirectional
2914 	 * transfer.
2915 	 */
2916 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2917 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2918 
2919 	/* The chip data format is little endian. */
2920 	req->SASAddress.High = htole32(sasaddr >> 32);
2921 	req->SASAddress.Low = htole32(sasaddr);
2922 
2923 	/*
2924 	 * XXX Note that we don't have a timeout/abort mechanism here.
2925 	 * From the manual, it looks like task management requests only
2926 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2927 	 * have a mechanism to retry requests in the event of a chip reset
2928 	 * at least.  Hopefully the chip will insure that any errors short
2929 	 * of that are relayed back to the driver.
2930 	 */
2931 	error = mps_map_command(sc, cm);
2932 	if ((error != 0) && (error != EINPROGRESS)) {
2933 		mps_dprint(sc, MPS_ERROR,
2934 			   "%s: error %d returned from mps_map_command()\n",
2935 			   __func__, error);
2936 		goto bailout_error;
2937 	}
2938 
2939 	return;
2940 
2941 bailout_error:
2942 	mps_free_command(sc, cm);
2943 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2944 	xpt_done(ccb);
2945 	return;
2946 
2947 }
2948 
2949 static void
2950 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2951 {
2952 	struct mps_softc *sc;
2953 	struct mpssas_target *targ;
2954 	uint64_t sasaddr = 0;
2955 
2956 	sc = sassc->sc;
2957 
2958 	/*
2959 	 * Make sure the target exists.
2960 	 */
2961 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2962 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2963 	targ = &sassc->targets[ccb->ccb_h.target_id];
2964 	if (targ->handle == 0x0) {
2965 		mps_dprint(sc, MPS_ERROR,
2966 			   "%s: target %d does not exist!\n", __func__,
2967 			   ccb->ccb_h.target_id);
2968 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2969 		xpt_done(ccb);
2970 		return;
2971 	}
2972 
2973 	/*
2974 	 * If this device has an embedded SMP target, we'll talk to it
2975 	 * directly.
2976 	 * figure out what the expander's address is.
2977 	 */
2978 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2979 		sasaddr = targ->sasaddr;
2980 
2981 	/*
2982 	 * If we don't have a SAS address for the expander yet, try
2983 	 * grabbing it from the page 0x83 information cached in the
2984 	 * transport layer for this target.  LSI expanders report the
2985 	 * expander SAS address as the port-associated SAS address in
2986 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2987 	 * 0x83.
2988 	 *
2989 	 * XXX KDM disable this for now, but leave it commented out so that
2990 	 * it is obvious that this is another possible way to get the SAS
2991 	 * address.
2992 	 *
2993 	 * The parent handle method below is a little more reliable, and
2994 	 * the other benefit is that it works for devices other than SES
2995 	 * devices.  So you can send a SMP request to a da(4) device and it
2996 	 * will get routed to the expander that device is attached to.
2997 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2998 	 */
2999 #if 0
3000 	if (sasaddr == 0)
3001 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3002 #endif
3003 
3004 	/*
3005 	 * If we still don't have a SAS address for the expander, look for
3006 	 * the parent device of this device, which is probably the expander.
3007 	 */
3008 	if (sasaddr == 0) {
3009 #ifdef OLD_MPS_PROBE
3010 		struct mpssas_target *parent_target;
3011 #endif
3012 
3013 		if (targ->parent_handle == 0x0) {
3014 			mps_dprint(sc, MPS_ERROR,
3015 				   "%s: handle %d does not have a valid "
3016 				   "parent handle!\n", __func__, targ->handle);
3017 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3018 			goto bailout;
3019 		}
3020 #ifdef OLD_MPS_PROBE
3021 		parent_target = mpssas_find_target_by_handle(sassc, 0,
3022 			targ->parent_handle);
3023 
3024 		if (parent_target == NULL) {
3025 			mps_dprint(sc, MPS_ERROR,
3026 				   "%s: handle %d does not have a valid "
3027 				   "parent target!\n", __func__, targ->handle);
3028 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3029 			goto bailout;
3030 		}
3031 
3032 		if ((parent_target->devinfo &
3033 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3034 			mps_dprint(sc, MPS_ERROR,
3035 				   "%s: handle %d parent %d does not "
3036 				   "have an SMP target!\n", __func__,
3037 				   targ->handle, parent_target->handle);
3038 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3039 			goto bailout;
3040 
3041 		}
3042 
3043 		sasaddr = parent_target->sasaddr;
3044 #else /* OLD_MPS_PROBE */
3045 		if ((targ->parent_devinfo &
3046 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3047 			mps_dprint(sc, MPS_ERROR,
3048 				   "%s: handle %d parent %d does not "
3049 				   "have an SMP target!\n", __func__,
3050 				   targ->handle, targ->parent_handle);
3051 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3052 			goto bailout;
3053 
3054 		}
3055 		if (targ->parent_sasaddr == 0x0) {
3056 			mps_dprint(sc, MPS_ERROR,
3057 				   "%s: handle %d parent handle %d does "
3058 				   "not have a valid SAS address!\n",
3059 				   __func__, targ->handle, targ->parent_handle);
3060 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3061 			goto bailout;
3062 		}
3063 
3064 		sasaddr = targ->parent_sasaddr;
3065 #endif /* OLD_MPS_PROBE */
3066 
3067 	}
3068 
3069 	if (sasaddr == 0) {
3070 		mps_dprint(sc, MPS_INFO,
3071 			   "%s: unable to find SAS address for handle %d\n",
3072 			   __func__, targ->handle);
3073 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3074 		goto bailout;
3075 	}
3076 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3077 
3078 	return;
3079 
3080 bailout:
3081 	xpt_done(ccb);
3082 
3083 }
3084 #endif //__FreeBSD_version >= 900026
3085 
3086 static void
3087 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3088 {
3089 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3090 	struct mps_softc *sc;
3091 	struct mps_command *tm;
3092 	struct mpssas_target *targ;
3093 
3094 	MPS_FUNCTRACE(sassc->sc);
3095 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3096 
3097 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3098 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3099 	     ccb->ccb_h.target_id));
3100 	sc = sassc->sc;
3101 	tm = mpssas_alloc_tm(sc);
3102 	if (tm == NULL) {
3103 		mps_dprint(sc, MPS_ERROR,
3104 		    "command alloc failure in mpssas_action_resetdev\n");
3105 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3106 		xpt_done(ccb);
3107 		return;
3108 	}
3109 
3110 	targ = &sassc->targets[ccb->ccb_h.target_id];
3111 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3112 	req->DevHandle = htole16(targ->handle);
3113 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3114 
3115 	/* SAS Hard Link Reset / SATA Link Reset */
3116 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3117 
3118 	tm->cm_data = NULL;
3119 	tm->cm_complete = mpssas_resetdev_complete;
3120 	tm->cm_complete_data = ccb;
3121 	tm->cm_targ = targ;
3122 
3123 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3124 	mps_map_command(sc, tm);
3125 }
3126 
3127 static void
3128 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3129 {
3130 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3131 	union ccb *ccb;
3132 
3133 	MPS_FUNCTRACE(sc);
3134 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3135 
3136 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3137 	ccb = tm->cm_complete_data;
3138 
3139 	/*
3140 	 * Currently there should be no way we can hit this case.  It only
3141 	 * happens when we have a failure to allocate chain frames, and
3142 	 * task management commands don't have S/G lists.
3143 	 */
3144 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3145 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3146 
3147 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3148 
3149 		mps_dprint(sc, MPS_ERROR,
3150 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3151 			   "This should not happen!\n", __func__, tm->cm_flags,
3152 			   req->DevHandle);
3153 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3154 		goto bailout;
3155 	}
3156 
3157 	mps_dprint(sc, MPS_XINFO,
3158 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3159 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3160 
3161 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3162 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3163 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3164 		    CAM_LUN_WILDCARD);
3165 	}
3166 	else
3167 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3168 
3169 bailout:
3170 
3171 	mpssas_free_tm(sc, tm);
3172 	xpt_done(ccb);
3173 }
3174 
3175 static void
3176 mpssas_poll(struct cam_sim *sim)
3177 {
3178 	struct mpssas_softc *sassc;
3179 
3180 	sassc = cam_sim_softc(sim);
3181 
3182 	if (sassc->sc->mps_debug & MPS_TRACE) {
3183 		/* frequent debug messages during a panic just slow
3184 		 * everything down too much.
3185 		 */
3186 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3187 		sassc->sc->mps_debug &= ~MPS_TRACE;
3188 	}
3189 
3190 	mps_intr_locked(sassc->sc);
3191 }
3192 
3193 static void
3194 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3195 	     void *arg)
3196 {
3197 	struct mps_softc *sc;
3198 
3199 	sc = (struct mps_softc *)callback_arg;
3200 
3201 	switch (code) {
3202 #if (__FreeBSD_version >= 1000006) || \
3203     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3204 	case AC_ADVINFO_CHANGED: {
3205 		struct mpssas_target *target;
3206 		struct mpssas_softc *sassc;
3207 		struct scsi_read_capacity_data_long rcap_buf;
3208 		struct ccb_dev_advinfo cdai;
3209 		struct mpssas_lun *lun;
3210 		lun_id_t lunid;
3211 		int found_lun;
3212 		uintptr_t buftype;
3213 
3214 		buftype = (uintptr_t)arg;
3215 
3216 		found_lun = 0;
3217 		sassc = sc->sassc;
3218 
3219 		/*
3220 		 * We're only interested in read capacity data changes.
3221 		 */
3222 		if (buftype != CDAI_TYPE_RCAPLONG)
3223 			break;
3224 
3225 		/*
3226 		 * We should have a handle for this, but check to make sure.
3227 		 */
3228 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3229 		    ("Target %d out of bounds in mpssas_async\n",
3230 		    xpt_path_target_id(path)));
3231 		target = &sassc->targets[xpt_path_target_id(path)];
3232 		if (target->handle == 0)
3233 			break;
3234 
3235 		lunid = xpt_path_lun_id(path);
3236 
3237 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3238 			if (lun->lun_id == lunid) {
3239 				found_lun = 1;
3240 				break;
3241 			}
3242 		}
3243 
3244 		if (found_lun == 0) {
3245 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3246 				     M_NOWAIT | M_ZERO);
3247 			if (lun == NULL) {
3248 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3249 					   "LUN for EEDP support.\n");
3250 				break;
3251 			}
3252 			lun->lun_id = lunid;
3253 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3254 		}
3255 
3256 		bzero(&rcap_buf, sizeof(rcap_buf));
3257 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3258 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3259 		cdai.ccb_h.flags = CAM_DIR_IN;
3260 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3261 #if (__FreeBSD_version >= 1100061) || \
3262     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3263 		cdai.flags = CDAI_FLAG_NONE;
3264 #else
3265 		cdai.flags = 0;
3266 #endif
3267 		cdai.bufsiz = sizeof(rcap_buf);
3268 		cdai.buf = (uint8_t *)&rcap_buf;
3269 		xpt_action((union ccb *)&cdai);
3270 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3271 			cam_release_devq(cdai.ccb_h.path,
3272 					 0, 0, 0, FALSE);
3273 
3274 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3275 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3276 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3277 			case SRC16_PTYPE_1:
3278 			case SRC16_PTYPE_3:
3279 				lun->eedp_formatted = TRUE;
3280 				lun->eedp_block_size =
3281 				    scsi_4btoul(rcap_buf.length);
3282 				break;
3283 			case SRC16_PTYPE_2:
3284 			default:
3285 				lun->eedp_formatted = FALSE;
3286 				lun->eedp_block_size = 0;
3287 				break;
3288 			}
3289 		} else {
3290 			lun->eedp_formatted = FALSE;
3291 			lun->eedp_block_size = 0;
3292 		}
3293 		break;
3294 	}
3295 #else
3296 	case AC_FOUND_DEVICE: {
3297 		struct ccb_getdev *cgd;
3298 
3299 		cgd = arg;
3300 		mpssas_check_eedp(sc, path, cgd);
3301 		break;
3302 	}
3303 #endif
3304 	default:
3305 		break;
3306 	}
3307 }
3308 
3309 #if (__FreeBSD_version < 901503) || \
3310     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3311 static void
3312 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3313 		  struct ccb_getdev *cgd)
3314 {
3315 	struct mpssas_softc *sassc = sc->sassc;
3316 	struct ccb_scsiio *csio;
3317 	struct scsi_read_capacity_16 *scsi_cmd;
3318 	struct scsi_read_capacity_eedp *rcap_buf;
3319 	path_id_t pathid;
3320 	target_id_t targetid;
3321 	lun_id_t lunid;
3322 	union ccb *ccb;
3323 	struct cam_path *local_path;
3324 	struct mpssas_target *target;
3325 	struct mpssas_lun *lun;
3326 	uint8_t	found_lun;
3327 	char path_str[64];
3328 
3329 	sassc = sc->sassc;
3330 	pathid = cam_sim_path(sassc->sim);
3331 	targetid = xpt_path_target_id(path);
3332 	lunid = xpt_path_lun_id(path);
3333 
3334 	KASSERT(targetid < sassc->maxtargets,
3335 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3336 	     targetid));
3337 	target = &sassc->targets[targetid];
3338 	if (target->handle == 0x0)
3339 		return;
3340 
3341 	/*
3342 	 * Determine if the device is EEDP capable.
3343 	 *
3344 	 * If this flag is set in the inquiry data,
3345 	 * the device supports protection information,
3346 	 * and must support the 16 byte read
3347 	 * capacity command, otherwise continue without
3348 	 * sending read cap 16
3349 	 */
3350 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3351 		return;
3352 
3353 	/*
3354 	 * Issue a READ CAPACITY 16 command.  This info
3355 	 * is used to determine if the LUN is formatted
3356 	 * for EEDP support.
3357 	 */
3358 	ccb = xpt_alloc_ccb_nowait();
3359 	if (ccb == NULL) {
3360 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3361 		    "for EEDP support.\n");
3362 		return;
3363 	}
3364 
3365 	if (xpt_create_path(&local_path, xpt_periph,
3366 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3367 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3368 		    "path for EEDP support\n");
3369 		xpt_free_ccb(ccb);
3370 		return;
3371 	}
3372 
3373 	/*
3374 	 * If LUN is already in list, don't create a new
3375 	 * one.
3376 	 */
3377 	found_lun = FALSE;
3378 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3379 		if (lun->lun_id == lunid) {
3380 			found_lun = TRUE;
3381 			break;
3382 		}
3383 	}
3384 	if (!found_lun) {
3385 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3386 		    M_NOWAIT | M_ZERO);
3387 		if (lun == NULL) {
3388 			mps_dprint(sc, MPS_ERROR,
3389 			    "Unable to alloc LUN for EEDP support.\n");
3390 			xpt_free_path(local_path);
3391 			xpt_free_ccb(ccb);
3392 			return;
3393 		}
3394 		lun->lun_id = lunid;
3395 		SLIST_INSERT_HEAD(&target->luns, lun,
3396 		    lun_link);
3397 	}
3398 
3399 	xpt_path_string(local_path, path_str, sizeof(path_str));
3400 
3401 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3402 	    path_str, target->handle);
3403 
3404 	/*
3405 	 * Issue a READ CAPACITY 16 command for the LUN.
3406 	 * The mpssas_read_cap_done function will load
3407 	 * the read cap info into the LUN struct.
3408 	 */
3409 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3410 	    M_MPT2, M_NOWAIT | M_ZERO);
3411 	if (rcap_buf == NULL) {
3412 		mps_dprint(sc, MPS_FAULT,
3413 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3414 		xpt_free_path(ccb->ccb_h.path);
3415 		xpt_free_ccb(ccb);
3416 		return;
3417 	}
3418 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3419 	csio = &ccb->csio;
3420 	csio->ccb_h.func_code = XPT_SCSI_IO;
3421 	csio->ccb_h.flags = CAM_DIR_IN;
3422 	csio->ccb_h.retry_count = 4;
3423 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3424 	csio->ccb_h.timeout = 60000;
3425 	csio->data_ptr = (uint8_t *)rcap_buf;
3426 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3427 	csio->sense_len = MPS_SENSE_LEN;
3428 	csio->cdb_len = sizeof(*scsi_cmd);
3429 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3430 
3431 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3432 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3433 	scsi_cmd->opcode = 0x9E;
3434 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3435 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3436 
3437 	ccb->ccb_h.ppriv_ptr1 = sassc;
3438 	xpt_action(ccb);
3439 }
3440 
3441 static void
3442 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3443 {
3444 	struct mpssas_softc *sassc;
3445 	struct mpssas_target *target;
3446 	struct mpssas_lun *lun;
3447 	struct scsi_read_capacity_eedp *rcap_buf;
3448 
3449 	if (done_ccb == NULL)
3450 		return;
3451 
3452 	/* Driver need to release devq, it Scsi command is
3453 	 * generated by driver internally.
3454 	 * Currently there is a single place where driver
3455 	 * calls scsi command internally. In future if driver
3456 	 * calls more scsi command internally, it needs to release
3457 	 * devq internally, since those command will not go back to
3458 	 * cam_periph.
3459 	 */
3460 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3461         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3462 		xpt_release_devq(done_ccb->ccb_h.path,
3463 			       	/*count*/ 1, /*run_queue*/TRUE);
3464 	}
3465 
3466 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3467 
3468 	/*
3469 	 * Get the LUN ID for the path and look it up in the LUN list for the
3470 	 * target.
3471 	 */
3472 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3473 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3474 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3475 	     done_ccb->ccb_h.target_id));
3476 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3477 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3478 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3479 			continue;
3480 
3481 		/*
3482 		 * Got the LUN in the target's LUN list.  Fill it in
3483 		 * with EEDP info.  If the READ CAP 16 command had some
3484 		 * SCSI error (common if command is not supported), mark
3485 		 * the lun as not supporting EEDP and set the block size
3486 		 * to 0.
3487 		 */
3488 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3489 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3490 			lun->eedp_formatted = FALSE;
3491 			lun->eedp_block_size = 0;
3492 			break;
3493 		}
3494 
3495 		if (rcap_buf->protect & 0x01) {
3496 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3497  			    "target ID %d is formatted for EEDP "
3498  			    "support.\n", done_ccb->ccb_h.target_lun,
3499  			    done_ccb->ccb_h.target_id);
3500 			lun->eedp_formatted = TRUE;
3501 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3502 		}
3503 		break;
3504 	}
3505 
3506 	// Finished with this CCB and path.
3507 	free(rcap_buf, M_MPT2);
3508 	xpt_free_path(done_ccb->ccb_h.path);
3509 	xpt_free_ccb(done_ccb);
3510 }
3511 #endif /* (__FreeBSD_version < 901503) || \
3512           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3513 
3514 /*
3515  * Set the INRESET flag for this target so that no I/O will be sent to
3516  * the target until the reset has completed.  If an I/O request does
3517  * happen, the devq will be frozen.  The CCB holds the path which is
3518  * used to release the devq.  The devq is released and the CCB is freed
3519  * when the TM completes.
3520  */
3521 void
3522 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3523     struct mpssas_target *target, lun_id_t lun_id)
3524 {
3525 	union ccb *ccb;
3526 	path_id_t path_id;
3527 
3528 	ccb = xpt_alloc_ccb_nowait();
3529 	if (ccb) {
3530 		path_id = cam_sim_path(sc->sassc->sim);
3531 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3532 		    target->tid, lun_id) != CAM_REQ_CMP) {
3533 			xpt_free_ccb(ccb);
3534 		} else {
3535 			tm->cm_ccb = ccb;
3536 			tm->cm_targ = target;
3537 			target->flags |= MPSSAS_TARGET_INRESET;
3538 		}
3539 	}
3540 }
3541 
3542 int
3543 mpssas_startup(struct mps_softc *sc)
3544 {
3545 
3546 	/*
3547 	 * Send the port enable message and set the wait_for_port_enable flag.
3548 	 * This flag helps to keep the simq frozen until all discovery events
3549 	 * are processed.
3550 	 */
3551 	sc->wait_for_port_enable = 1;
3552 	mpssas_send_portenable(sc);
3553 	return (0);
3554 }
3555 
3556 static int
3557 mpssas_send_portenable(struct mps_softc *sc)
3558 {
3559 	MPI2_PORT_ENABLE_REQUEST *request;
3560 	struct mps_command *cm;
3561 
3562 	MPS_FUNCTRACE(sc);
3563 
3564 	if ((cm = mps_alloc_command(sc)) == NULL)
3565 		return (EBUSY);
3566 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3567 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3568 	request->MsgFlags = 0;
3569 	request->VP_ID = 0;
3570 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3571 	cm->cm_complete = mpssas_portenable_complete;
3572 	cm->cm_data = NULL;
3573 	cm->cm_sge = NULL;
3574 
3575 	mps_map_command(sc, cm);
3576 	mps_dprint(sc, MPS_XINFO,
3577 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3578 	    cm, cm->cm_req, cm->cm_complete);
3579 	return (0);
3580 }
3581 
3582 static void
3583 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3584 {
3585 	MPI2_PORT_ENABLE_REPLY *reply;
3586 	struct mpssas_softc *sassc;
3587 
3588 	MPS_FUNCTRACE(sc);
3589 	sassc = sc->sassc;
3590 
3591 	/*
3592 	 * Currently there should be no way we can hit this case.  It only
3593 	 * happens when we have a failure to allocate chain frames, and
3594 	 * port enable commands don't have S/G lists.
3595 	 */
3596 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3597 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3598 			   "This should not happen!\n", __func__, cm->cm_flags);
3599 	}
3600 
3601 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3602 	if (reply == NULL)
3603 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3604 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3605 	    MPI2_IOCSTATUS_SUCCESS)
3606 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3607 
3608 	mps_free_command(sc, cm);
3609 
3610 	/*
3611 	 * Get WarpDrive info after discovery is complete but before the scan
3612 	 * starts.  At this point, all devices are ready to be exposed to the
3613 	 * OS.  If devices should be hidden instead, take them out of the
3614 	 * 'targets' array before the scan.  The devinfo for a disk will have
3615 	 * some info and a volume's will be 0.  Use that to remove disks.
3616 	 */
3617 	mps_wd_config_pages(sc);
3618 
3619 	/*
3620 	 * Done waiting for port enable to complete.  Decrement the refcount.
3621 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3622 	 * take place.  Since the simq was explicitly frozen before port
3623 	 * enable, it must be explicitly released here to keep the
3624 	 * freeze/release count in sync.
3625 	 */
3626 	sc->wait_for_port_enable = 0;
3627 	sc->port_enable_complete = 1;
3628 	wakeup(&sc->port_enable_complete);
3629 	mpssas_startup_decrement(sassc);
3630 }
3631 
3632 int
3633 mpssas_check_id(struct mpssas_softc *sassc, int id)
3634 {
3635 	struct mps_softc *sc = sassc->sc;
3636 	char *ids;
3637 	char *name;
3638 
3639 	ids = &sc->exclude_ids[0];
3640 	while((name = strsep(&ids, ",")) != NULL) {
3641 		if (name[0] == '\0')
3642 			continue;
3643 		if (strtol(name, NULL, 0) == (long)id)
3644 			return (1);
3645 	}
3646 
3647 	return (0);
3648 }
3649 
3650 void
3651 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3652 {
3653 	struct mpssas_softc *sassc;
3654 	struct mpssas_lun *lun, *lun_tmp;
3655 	struct mpssas_target *targ;
3656 	int i;
3657 
3658 	sassc = sc->sassc;
3659 	/*
3660 	 * The number of targets is based on IOC Facts, so free all of
3661 	 * the allocated LUNs for each target and then the target buffer
3662 	 * itself.
3663 	 */
3664 	for (i=0; i< maxtargets; i++) {
3665 		targ = &sassc->targets[i];
3666 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3667 			free(lun, M_MPT2);
3668 		}
3669 	}
3670 	free(sassc->targets, M_MPT2);
3671 
3672 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3673 	    M_MPT2, M_WAITOK|M_ZERO);
3674 	if (!sassc->targets) {
3675 		panic("%s failed to alloc targets with error %d\n",
3676 		    __func__, ENOMEM);
3677 	}
3678 }
3679