xref: /freebsd/sys/dev/mps/mps_sas.c (revision c2a55efd74cccb3d4e7b9037b240ad062c203bb8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  */
32 
33 /* Communications core for Avago Technologies (LSI) MPT2 */
34 
35 /* TODO Move headers to mpsvar */
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/selinfo.h>
41 #include <sys/module.h>
42 #include <sys/bus.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/malloc.h>
46 #include <sys/uio.h>
47 #include <sys/sysctl.h>
48 #include <sys/endian.h>
49 #include <sys/queue.h>
50 #include <sys/kthread.h>
51 #include <sys/taskqueue.h>
52 #include <sys/sbuf.h>
53 #include <sys/stdarg.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <cam/cam.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_xpt.h>
62 #include <cam/cam_debug.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_xpt_sim.h>
65 #include <cam/cam_xpt_periph.h>
66 #include <cam/cam_periph.h>
67 #include <cam/scsi/scsi_all.h>
68 #include <cam/scsi/scsi_message.h>
69 #include <cam/scsi/smp_all.h>
70 
71 #include <dev/mps/mpi/mpi2_type.h>
72 #include <dev/mps/mpi/mpi2.h>
73 #include <dev/mps/mpi/mpi2_ioc.h>
74 #include <dev/mps/mpi/mpi2_sas.h>
75 #include <dev/mps/mpi/mpi2_cnfg.h>
76 #include <dev/mps/mpi/mpi2_init.h>
77 #include <dev/mps/mpi/mpi2_tool.h>
78 #include <dev/mps/mps_ioctl.h>
79 #include <dev/mps/mpsvar.h>
80 #include <dev/mps/mps_table.h>
81 #include <dev/mps/mps_sas.h>
82 
83 #include <sys/sdt.h>
84 
85 /* SDT Probes */
86 SDT_PROBE_DEFINE4(cam, , mps, complete, "union ccb *",
87     "struct mps_command *", "u_int", "u32");
88 
89 /*
90  * static array to check SCSI OpCode for EEDP protection bits
91  */
92 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
93 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
94 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 static uint8_t op_code_prot[256] = {
96 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
105 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
112 };
113 
114 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
115 
116 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
117 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
118 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
119 static void mpssas_poll(struct cam_sim *sim);
120 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
121     struct mps_command *cm);
122 static void mpssas_scsiio_timeout(void *data);
123 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
124 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
125     struct mps_command *cm, union ccb *ccb);
126 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
127 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
128 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
129 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
130 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
131 			       uint64_t sasaddr);
132 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
133 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
134 static void mpssas_async(void *callback_arg, uint32_t code,
135 			 struct cam_path *path, void *arg);
136 static int mpssas_send_portenable(struct mps_softc *sc);
137 static void mpssas_portenable_complete(struct mps_softc *sc,
138     struct mps_command *cm);
139 
140 struct mpssas_target *
141 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
142 {
143 	struct mpssas_target *target;
144 	int i;
145 
146 	for (i = start; i < sassc->maxtargets; i++) {
147 		target = &sassc->targets[i];
148 		if (target->handle == handle)
149 			return (target);
150 	}
151 
152 	return (NULL);
153 }
154 
155 /* we need to freeze the simq during attach and diag reset, to avoid failing
156  * commands before device handles have been found by discovery.  Since
157  * discovery involves reading config pages and possibly sending commands,
158  * discovery actions may continue even after we receive the end of discovery
159  * event, so refcount discovery actions instead of assuming we can unfreeze
160  * the simq when we get the event.
161  */
162 void
163 mpssas_startup_increment(struct mpssas_softc *sassc)
164 {
165 	MPS_FUNCTRACE(sassc->sc);
166 
167 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
168 		if (sassc->startup_refcount++ == 0) {
169 			/* just starting, freeze the simq */
170 			mps_dprint(sassc->sc, MPS_INIT,
171 			    "%s freezing simq\n", __func__);
172 			xpt_hold_boot();
173 			xpt_freeze_simq(sassc->sim, 1);
174 		}
175 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
176 		    sassc->startup_refcount);
177 	}
178 }
179 
180 void
181 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
182 {
183 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
184 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
185 		xpt_release_simq(sassc->sim, 1);
186 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
187 	}
188 }
189 
190 void
191 mpssas_startup_decrement(struct mpssas_softc *sassc)
192 {
193 	MPS_FUNCTRACE(sassc->sc);
194 
195 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
196 		if (--sassc->startup_refcount == 0) {
197 			/* finished all discovery-related actions, release
198 			 * the simq and rescan for the latest topology.
199 			 */
200 			mps_dprint(sassc->sc, MPS_INIT,
201 			    "%s releasing simq\n", __func__);
202 			sassc->flags &= ~MPSSAS_IN_STARTUP;
203 			xpt_release_simq(sassc->sim, 1);
204 			xpt_release_boot();
205 		}
206 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
207 		    sassc->startup_refcount);
208 	}
209 }
210 
211 /*
212  * The firmware requires us to stop sending commands when we're doing task
213  * management.
214  * XXX The logic for serializing the device has been made lazy and moved to
215  * mpssas_prepare_for_tm().
216  */
217 struct mps_command *
218 mpssas_alloc_tm(struct mps_softc *sc)
219 {
220 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
221 	struct mps_command *tm;
222 
223 	tm = mps_alloc_high_priority_command(sc);
224 	if (tm == NULL)
225 		return (NULL);
226 
227 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
228 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
229 	return tm;
230 }
231 
232 void
233 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
234 {
235 	if (tm == NULL)
236 		return;
237 
238 	/*
239 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
240 	 * free the resources used for freezing the devq.  Must clear the
241 	 * INRESET flag as well or scsi I/O will not work.
242 	 */
243 	if (tm->cm_ccb) {
244 		mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
245 		    "Unfreezing devq for target ID %d\n",
246 		    tm->cm_targ->tid);
247 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
248 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
249 		xpt_free_path(tm->cm_ccb->ccb_h.path);
250 		xpt_free_ccb(tm->cm_ccb);
251 	}
252 
253 	mps_free_high_priority_command(sc, tm);
254 }
255 
256 void
257 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
258 {
259 	struct mpssas_softc *sassc = sc->sassc;
260 	path_id_t pathid;
261 	target_id_t targetid;
262 	union ccb *ccb;
263 
264 	MPS_FUNCTRACE(sc);
265 	pathid = cam_sim_path(sassc->sim);
266 	if (targ == NULL)
267 		targetid = CAM_TARGET_WILDCARD;
268 	else
269 		targetid = targ - sassc->targets;
270 
271 	/*
272 	 * Allocate a CCB and schedule a rescan.
273 	 */
274 	ccb = xpt_alloc_ccb_nowait();
275 	if (ccb == NULL) {
276 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
277 		return;
278 	}
279 
280 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
281 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
282 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
283 		xpt_free_ccb(ccb);
284 		return;
285 	}
286 
287 	if (targetid == CAM_TARGET_WILDCARD)
288 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
289 	else
290 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
291 
292 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
293 	xpt_rescan(ccb);
294 }
295 
296 static void
297 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
298 {
299 	struct sbuf sb;
300 	va_list ap;
301 	char str[224];
302 
303 	if (cm == NULL)
304 		return;
305 
306 	/* No need to be in here if debugging isn't enabled */
307 	if ((cm->cm_sc->mps_debug & level) == 0)
308 		return;
309 
310 	sbuf_new(&sb, str, sizeof(str), 0);
311 
312 	va_start(ap, fmt);
313 
314 	if (cm->cm_ccb != NULL) {
315 		xpt_path_sbuf(cm->cm_ccb->csio.ccb_h.path, &sb);
316 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
317 			scsi_command_string(&cm->cm_ccb->csio, &sb);
318 			sbuf_printf(&sb, "length %d ",
319 				    cm->cm_ccb->csio.dxfer_len);
320 		}
321 	}
322 	else {
323 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
324 		    cam_sim_name(cm->cm_sc->sassc->sim),
325 		    cam_sim_unit(cm->cm_sc->sassc->sim),
326 		    cam_sim_bus(cm->cm_sc->sassc->sim),
327 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
328 		    cm->cm_lun);
329 	}
330 
331 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
332 	sbuf_vprintf(&sb, fmt, ap);
333 	sbuf_finish(&sb);
334 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
335 
336 	va_end(ap);
337 }
338 
339 static void
340 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
341 {
342 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
343 	struct mpssas_target *targ;
344 	uint16_t handle;
345 
346 	MPS_FUNCTRACE(sc);
347 
348 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
349 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
350 	targ = tm->cm_targ;
351 
352 	if (reply == NULL) {
353 		/* XXX retry the remove after the diag reset completes? */
354 		mps_dprint(sc, MPS_FAULT,
355 		    "%s NULL reply resetting device 0x%04x\n", __func__,
356 		    handle);
357 		mpssas_free_tm(sc, tm);
358 		return;
359 	}
360 
361 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
362 	    MPI2_IOCSTATUS_SUCCESS) {
363 		mps_dprint(sc, MPS_ERROR,
364 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
365 		   le16toh(reply->IOCStatus), handle);
366 	}
367 
368 	mps_dprint(sc, MPS_XINFO,
369 	    "Reset aborted %u commands\n", reply->TerminationCount);
370 	mps_free_reply(sc, tm->cm_reply_data);
371 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
372 
373 	mps_dprint(sc, MPS_XINFO,
374 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
375 
376 	/*
377 	 * Don't clear target if remove fails because things will get confusing.
378 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
379 	 * this target id if possible, and so we can assign the same target id
380 	 * to this device if it comes back in the future.
381 	 */
382 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
383 	    MPI2_IOCSTATUS_SUCCESS) {
384 		targ = tm->cm_targ;
385 		targ->handle = 0x0;
386 		targ->encl_handle = 0x0;
387 		targ->encl_slot = 0x0;
388 		targ->exp_dev_handle = 0x0;
389 		targ->phy_num = 0x0;
390 		targ->linkrate = 0x0;
391 		targ->devinfo = 0x0;
392 		targ->flags = 0x0;
393 	}
394 
395 	mpssas_free_tm(sc, tm);
396 }
397 
398 /*
399  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
400  * Otherwise Volume Delete is same as Bare Drive Removal.
401  */
402 void
403 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
404 {
405 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
406 	struct mps_softc *sc;
407 	struct mps_command *tm;
408 	struct mpssas_target *targ = NULL;
409 
410 	MPS_FUNCTRACE(sassc->sc);
411 	sc = sassc->sc;
412 
413 #ifdef WD_SUPPORT
414 	/*
415 	 * If this is a WD controller, determine if the disk should be exposed
416 	 * to the OS or not.  If disk should be exposed, return from this
417 	 * function without doing anything.
418 	 */
419 	if (sc->WD_available && (sc->WD_hide_expose ==
420 	    MPS_WD_EXPOSE_ALWAYS)) {
421 		return;
422 	}
423 #endif //WD_SUPPORT
424 
425 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
426 	if (targ == NULL) {
427 		/* FIXME: what is the action? */
428 		/* We don't know about this device? */
429 		mps_dprint(sc, MPS_ERROR,
430 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
431 		return;
432 	}
433 
434 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
435 
436 	tm = mpssas_alloc_tm(sc);
437 	if (tm == NULL) {
438 		mps_dprint(sc, MPS_ERROR,
439 		    "%s: command alloc failure\n", __func__);
440 		return;
441 	}
442 
443 	mpssas_rescan_target(sc, targ);
444 
445 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
446 	req->DevHandle = targ->handle;
447 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
448 
449 	/* SAS Hard Link Reset / SATA Link Reset */
450 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
451 
452 	tm->cm_targ = targ;
453 	tm->cm_data = NULL;
454 	tm->cm_complete = mpssas_remove_volume;
455 	tm->cm_complete_data = (void *)(uintptr_t)handle;
456 
457 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
458 	    __func__, targ->tid);
459 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
460 
461 	mps_map_command(sc, tm);
462 }
463 
464 /*
465  * The MPT2 firmware performs debounce on the link to avoid transient link
466  * errors and false removals.  When it does decide that link has been lost
467  * and a device need to go away, it expects that the host will perform a
468  * target reset and then an op remove.  The reset has the side-effect of
469  * aborting any outstanding requests for the device, which is required for
470  * the op-remove to succeed.  It's not clear if the host should check for
471  * the device coming back alive after the reset.
472  */
473 void
474 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
475 {
476 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
477 	struct mps_softc *sc;
478 	struct mps_command *cm;
479 	struct mpssas_target *targ = NULL;
480 
481 	MPS_FUNCTRACE(sassc->sc);
482 
483 	sc = sassc->sc;
484 
485 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
486 	if (targ == NULL) {
487 		/* FIXME: what is the action? */
488 		/* We don't know about this device? */
489 		mps_dprint(sc, MPS_ERROR,
490 		    "%s : invalid handle 0x%x \n", __func__, handle);
491 		return;
492 	}
493 
494 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
495 
496 	cm = mpssas_alloc_tm(sc);
497 	if (cm == NULL) {
498 		mps_dprint(sc, MPS_ERROR,
499 		    "%s: command alloc failure\n", __func__);
500 		return;
501 	}
502 
503 	mpssas_rescan_target(sc, targ);
504 
505 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
506 	req->DevHandle = htole16(targ->handle);
507 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
508 
509 	/* SAS Hard Link Reset / SATA Link Reset */
510 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
511 
512 	cm->cm_targ = targ;
513 	cm->cm_data = NULL;
514 	cm->cm_complete = mpssas_remove_device;
515 	cm->cm_complete_data = (void *)(uintptr_t)handle;
516 
517 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
518 	    __func__, targ->tid);
519 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
520 
521 	mps_map_command(sc, cm);
522 }
523 
524 static void
525 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
526 {
527 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
528 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
529 	struct mpssas_target *targ;
530 	uint16_t handle;
531 
532 	MPS_FUNCTRACE(sc);
533 
534 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
535 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
536 	targ = tm->cm_targ;
537 
538 	/*
539 	 * Currently there should be no way we can hit this case.  It only
540 	 * happens when we have a failure to allocate chain frames, and
541 	 * task management commands don't have S/G lists.
542 	 */
543 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
544 		mps_dprint(sc, MPS_ERROR,
545 		    "%s: cm_flags = %#x for remove of handle %#04x! "
546 		    "This should not happen!\n", __func__, tm->cm_flags,
547 		    handle);
548 	}
549 
550 	if (reply == NULL) {
551 		/* XXX retry the remove after the diag reset completes? */
552 		mps_dprint(sc, MPS_FAULT,
553 		    "%s NULL reply resetting device 0x%04x\n", __func__,
554 		    handle);
555 		mpssas_free_tm(sc, tm);
556 		return;
557 	}
558 
559 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
560 	    MPI2_IOCSTATUS_SUCCESS) {
561 		mps_dprint(sc, MPS_ERROR,
562 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
563 		   le16toh(reply->IOCStatus), handle);
564 	}
565 
566 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
567 	    le32toh(reply->TerminationCount));
568 	mps_free_reply(sc, tm->cm_reply_data);
569 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
570 
571 	/* Reuse the existing command */
572 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
573 	memset(req, 0, sizeof(*req));
574 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
575 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
576 	req->DevHandle = htole16(handle);
577 	tm->cm_data = NULL;
578 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
579 	tm->cm_complete = mpssas_remove_complete;
580 	tm->cm_complete_data = (void *)(uintptr_t)handle;
581 
582 	/*
583 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
584 	 * They should be aborted or time out and we'll kick thus off there
585 	 * if so.
586 	 */
587 	if (TAILQ_FIRST(&targ->commands) == NULL) {
588 		mps_dprint(sc, MPS_INFO,
589 		    "No pending commands: starting remove_device target %u handle 0x%04x\n",
590 		    targ->tid, handle);
591 		mps_map_command(sc, tm);
592 		targ->pending_remove_tm = NULL;
593 	} else {
594 		targ->pending_remove_tm = tm;
595 	}
596 
597 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
598 		   targ->tid, handle);
599 }
600 
601 static void
602 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
603 {
604 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
605 	uint16_t handle;
606 	struct mpssas_target *targ;
607 	struct mpssas_lun *lun;
608 
609 	MPS_FUNCTRACE(sc);
610 
611 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
612 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
613 	targ = tm->cm_targ;
614 
615 	/*
616 	 * At this point, we should have no pending commands for the target.
617 	 * The remove target has just completed.
618 	 */
619 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
620 	    ("%s: no commands should be pending\n", __func__));
621 
622 	/*
623 	 * Currently there should be no way we can hit this case.  It only
624 	 * happens when we have a failure to allocate chain frames, and
625 	 * task management commands don't have S/G lists.
626 	 */
627 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
628 		mps_dprint(sc, MPS_XINFO,
629 			   "%s: cm_flags = %#x for remove of handle %#04x! "
630 			   "This should not happen!\n", __func__, tm->cm_flags,
631 			   handle);
632 		mpssas_free_tm(sc, tm);
633 		return;
634 	}
635 
636 	if (reply == NULL) {
637 		/* most likely a chip reset */
638 		mps_dprint(sc, MPS_FAULT,
639 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
640 		mpssas_free_tm(sc, tm);
641 		return;
642 	}
643 
644 	mps_dprint(sc, MPS_XINFO,
645 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
646 	    handle, le16toh(reply->IOCStatus));
647 
648 	/*
649 	 * Don't clear target if remove fails because things will get confusing.
650 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
651 	 * this target id if possible, and so we can assign the same target id
652 	 * to this device if it comes back in the future.
653 	 */
654 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
655 	    MPI2_IOCSTATUS_SUCCESS) {
656 		targ->handle = 0x0;
657 		targ->encl_handle = 0x0;
658 		targ->encl_slot = 0x0;
659 		targ->exp_dev_handle = 0x0;
660 		targ->phy_num = 0x0;
661 		targ->linkrate = 0x0;
662 		targ->devinfo = 0x0;
663 		targ->flags = 0x0;
664 
665 		while(!SLIST_EMPTY(&targ->luns)) {
666 			lun = SLIST_FIRST(&targ->luns);
667 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
668 			free(lun, M_MPT2);
669 		}
670 	}
671 
672 	mpssas_free_tm(sc, tm);
673 }
674 
675 static int
676 mpssas_register_events(struct mps_softc *sc)
677 {
678 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
679 
680 	bzero(events, 16);
681 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
682 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
683 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
684 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
685 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
686 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
687 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
688 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
689 	setbit(events, MPI2_EVENT_IR_VOLUME);
690 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
691 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
692 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
693 
694 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
695 	    &sc->sassc->mpssas_eh);
696 
697 	return (0);
698 }
699 
700 int
701 mps_attach_sas(struct mps_softc *sc)
702 {
703 	struct mpssas_softc *sassc;
704 	cam_status status;
705 	int unit, error = 0, reqs;
706 
707 	MPS_FUNCTRACE(sc);
708 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
709 
710 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
711 
712 	/*
713 	 * XXX MaxTargets could change during a reinit.  Since we don't
714 	 * resize the targets[] array during such an event, cache the value
715 	 * of MaxTargets here so that we don't get into trouble later.  This
716 	 * should move into the reinit logic.
717 	 */
718 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
719 	sassc->targets = malloc(sizeof(struct mpssas_target) *
720 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
721 	sc->sassc = sassc;
722 	sassc->sc = sc;
723 
724 	reqs = sc->num_reqs - sc->num_prireqs - 1;
725 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
726 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
727 		error = ENOMEM;
728 		goto out;
729 	}
730 
731 	unit = device_get_unit(sc->mps_dev);
732 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
733 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
734 	if (sassc->sim == NULL) {
735 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
736 		error = EINVAL;
737 		goto out;
738 	}
739 
740 	TAILQ_INIT(&sassc->ev_queue);
741 
742 	/* Initialize taskqueue for Event Handling */
743 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
744 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
745 	    taskqueue_thread_enqueue, &sassc->ev_tq);
746 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
747 	    device_get_nameunit(sc->mps_dev));
748 
749 	mps_lock(sc);
750 
751 	/*
752 	 * XXX There should be a bus for every port on the adapter, but since
753 	 * we're just going to fake the topology for now, we'll pretend that
754 	 * everything is just a target on a single bus.
755 	 */
756 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
757 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
758 		    "Error %d registering SCSI bus\n", error);
759 		mps_unlock(sc);
760 		goto out;
761 	}
762 
763 	/*
764 	 * Assume that discovery events will start right away.
765 	 *
766 	 * Hold off boot until discovery is complete.
767 	 */
768 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
769 	sc->sassc->startup_refcount = 0;
770 	mpssas_startup_increment(sassc);
771 
772 	mps_unlock(sc);
773 
774 	/*
775 	 * Register for async events so we can determine the EEDP
776 	 * capabilities of devices.
777 	 */
778 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
779 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
780 	    CAM_LUN_WILDCARD);
781 	if (status != CAM_REQ_CMP) {
782 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
783 		    "Error %#x creating sim path\n", status);
784 		sassc->path = NULL;
785 	} else {
786 		int event;
787 
788 		event = AC_ADVINFO_CHANGED;
789 		status = xpt_register_async(event, mpssas_async, sc,
790 					    sassc->path);
791 		if (status != CAM_REQ_CMP) {
792 			mps_dprint(sc, MPS_ERROR,
793 			    "Error %#x registering async handler for "
794 			    "AC_ADVINFO_CHANGED events\n", status);
795 			xpt_free_path(sassc->path);
796 			sassc->path = NULL;
797 		}
798 	}
799 	if (status != CAM_REQ_CMP) {
800 		/*
801 		 * EEDP use is the exception, not the rule.
802 		 * Warn the user, but do not fail to attach.
803 		 */
804 		mps_printf(sc, "EEDP capabilities disabled.\n");
805 	}
806 
807 	mpssas_register_events(sc);
808 out:
809 	if (error)
810 		mps_detach_sas(sc);
811 
812 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
813 	return (error);
814 }
815 
816 int
817 mps_detach_sas(struct mps_softc *sc)
818 {
819 	struct mpssas_softc *sassc;
820 	struct mpssas_lun *lun, *lun_tmp;
821 	struct mpssas_target *targ;
822 	int i;
823 
824 	MPS_FUNCTRACE(sc);
825 
826 	if (sc->sassc == NULL)
827 		return (0);
828 
829 	sassc = sc->sassc;
830 	mps_deregister_events(sc, sassc->mpssas_eh);
831 
832 	/*
833 	 * Drain and free the event handling taskqueue with the lock
834 	 * unheld so that any parallel processing tasks drain properly
835 	 * without deadlocking.
836 	 */
837 	if (sassc->ev_tq != NULL)
838 		taskqueue_free(sassc->ev_tq);
839 
840 	/* Deregister our async handler */
841 	if (sassc->path != NULL) {
842 		xpt_register_async(0, mpssas_async, sc, sassc->path);
843 		xpt_free_path(sassc->path);
844 		sassc->path = NULL;
845 	}
846 
847 	/* Make sure CAM doesn't wedge if we had to bail out early. */
848 	mps_lock(sc);
849 
850 	while (sassc->startup_refcount != 0)
851 		mpssas_startup_decrement(sassc);
852 
853 	if (sassc->flags & MPSSAS_IN_STARTUP)
854 		xpt_release_simq(sassc->sim, 1);
855 
856 	if (sassc->sim != NULL) {
857 		xpt_bus_deregister(cam_sim_path(sassc->sim));
858 		cam_sim_free(sassc->sim, FALSE);
859 	}
860 
861 	mps_unlock(sc);
862 
863 	if (sassc->devq != NULL)
864 		cam_simq_free(sassc->devq);
865 
866 	for (i = 0; i < sassc->maxtargets; i++) {
867 		targ = &sassc->targets[i];
868 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
869 			free(lun, M_MPT2);
870 		}
871 	}
872 	free(sassc->targets, M_MPT2);
873 	free(sassc, M_MPT2);
874 	sc->sassc = NULL;
875 
876 	return (0);
877 }
878 
879 void
880 mpssas_discovery_end(struct mpssas_softc *sassc)
881 {
882 	struct mps_softc *sc = sassc->sc;
883 
884 	MPS_FUNCTRACE(sc);
885 
886 	/*
887 	 * After discovery has completed, check the mapping table for any
888 	 * missing devices and update their missing counts. Only do this once
889 	 * whenever the driver is initialized so that missing counts aren't
890 	 * updated unnecessarily. Note that just because discovery has
891 	 * completed doesn't mean that events have been processed yet. The
892 	 * check_devices function is a callout timer that checks if ALL devices
893 	 * are missing. If so, it will wait a little longer for events to
894 	 * complete and keep resetting itself until some device in the mapping
895 	 * table is not missing, meaning that event processing has started.
896 	 */
897 	if (sc->track_mapping_events) {
898 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
899 		    "completed. Check for missing devices in the mapping "
900 		    "table.\n");
901 		callout_reset(&sc->device_check_callout,
902 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
903 		    sc);
904 	}
905 }
906 
907 static void
908 mpssas_action(struct cam_sim *sim, union ccb *ccb)
909 {
910 	struct mpssas_softc *sassc;
911 
912 	sassc = cam_sim_softc(sim);
913 
914 	MPS_FUNCTRACE(sassc->sc);
915 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
916 	    ccb->ccb_h.func_code);
917 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
918 
919 	switch (ccb->ccb_h.func_code) {
920 	case XPT_PATH_INQ:
921 	{
922 		struct ccb_pathinq *cpi = &ccb->cpi;
923 		struct mps_softc *sc = sassc->sc;
924 
925 		cpi->version_num = 1;
926 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
927 		cpi->target_sprt = 0;
928 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
929 		cpi->hba_eng_cnt = 0;
930 		cpi->max_target = sassc->maxtargets - 1;
931 		cpi->max_lun = 255;
932 
933 		/*
934 		 * initiator_id is set here to an ID outside the set of valid
935 		 * target IDs (including volumes).
936 		 */
937 		cpi->initiator_id = sassc->maxtargets;
938 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
939 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
940 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
941 		cpi->unit_number = cam_sim_unit(sim);
942 		cpi->bus_id = cam_sim_bus(sim);
943 		cpi->base_transfer_speed = 150000;
944 		cpi->transport = XPORT_SAS;
945 		cpi->transport_version = 0;
946 		cpi->protocol = PROTO_SCSI;
947 		cpi->protocol_version = SCSI_REV_SPC;
948 		cpi->maxio = sc->maxio;
949 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
950 		break;
951 	}
952 	case XPT_GET_TRAN_SETTINGS:
953 	{
954 		struct ccb_trans_settings	*cts;
955 		struct ccb_trans_settings_sas	*sas;
956 		struct ccb_trans_settings_scsi	*scsi;
957 		struct mpssas_target *targ;
958 
959 		cts = &ccb->cts;
960 		sas = &cts->xport_specific.sas;
961 		scsi = &cts->proto_specific.scsi;
962 
963 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
964 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
965 		    cts->ccb_h.target_id));
966 		targ = &sassc->targets[cts->ccb_h.target_id];
967 		if (targ->handle == 0x0) {
968 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
969 			break;
970 		}
971 
972 		cts->protocol_version = SCSI_REV_SPC2;
973 		cts->transport = XPORT_SAS;
974 		cts->transport_version = 0;
975 
976 		sas->valid = CTS_SAS_VALID_SPEED;
977 		switch (targ->linkrate) {
978 		case 0x08:
979 			sas->bitrate = 150000;
980 			break;
981 		case 0x09:
982 			sas->bitrate = 300000;
983 			break;
984 		case 0x0a:
985 			sas->bitrate = 600000;
986 			break;
987 		default:
988 			sas->valid = 0;
989 		}
990 
991 		cts->protocol = PROTO_SCSI;
992 		scsi->valid = CTS_SCSI_VALID_TQ;
993 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
994 
995 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
996 		break;
997 	}
998 	case XPT_CALC_GEOMETRY:
999 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1000 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1001 		break;
1002 	case XPT_RESET_DEV:
1003 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1004 		mpssas_action_resetdev(sassc, ccb);
1005 		return;
1006 	case XPT_RESET_BUS:
1007 	case XPT_ABORT:
1008 	case XPT_TERM_IO:
1009 		mps_dprint(sassc->sc, MPS_XINFO,
1010 		    "mpssas_action faking success for abort or reset\n");
1011 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1012 		break;
1013 	case XPT_SCSI_IO:
1014 		mpssas_action_scsiio(sassc, ccb);
1015 		return;
1016 	case XPT_SMP_IO:
1017 		mpssas_action_smpio(sassc, ccb);
1018 		return;
1019 	default:
1020 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1021 		break;
1022 	}
1023 	xpt_done(ccb);
1024 
1025 }
1026 
1027 static void
1028 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1029     target_id_t target_id, lun_id_t lun_id)
1030 {
1031 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1032 	struct cam_path *path;
1033 
1034 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1035 	    ac_code, target_id, (uintmax_t)lun_id);
1036 
1037 	if (xpt_create_path(&path, NULL,
1038 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1039 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1040 			   "notification\n");
1041 		return;
1042 	}
1043 
1044 	xpt_async(ac_code, path, NULL);
1045 	xpt_free_path(path);
1046 }
1047 
1048 static void
1049 mpssas_complete_all_commands(struct mps_softc *sc)
1050 {
1051 	struct mps_command *cm;
1052 	int i;
1053 	int completed;
1054 
1055 	MPS_FUNCTRACE(sc);
1056 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1057 
1058 	/* complete all commands with a NULL reply */
1059 	for (i = 1; i < sc->num_reqs; i++) {
1060 		cm = &sc->commands[i];
1061 		if (cm->cm_state == MPS_CM_STATE_FREE)
1062 			continue;
1063 
1064 		cm->cm_state = MPS_CM_STATE_BUSY;
1065 		cm->cm_reply = NULL;
1066 		completed = 0;
1067 
1068 		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1069 			MPASS(cm->cm_data);
1070 			free(cm->cm_data, M_MPT2);
1071 			cm->cm_data = NULL;
1072 		}
1073 
1074 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1075 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1076 
1077 		if (cm->cm_complete != NULL) {
1078 			mpssas_log_command(cm, MPS_RECOVERY,
1079 			    "completing cm %p state %x ccb %p for diag reset\n",
1080 			    cm, cm->cm_state, cm->cm_ccb);
1081 
1082 			cm->cm_complete(sc, cm);
1083 			completed = 1;
1084 		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1085 			mpssas_log_command(cm, MPS_RECOVERY,
1086 			    "waking up cm %p state %x ccb %p for diag reset\n",
1087 			    cm, cm->cm_state, cm->cm_ccb);
1088 			wakeup(cm);
1089 			completed = 1;
1090 		}
1091 
1092 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1093 			/* this should never happen, but if it does, log */
1094 			mpssas_log_command(cm, MPS_RECOVERY,
1095 			    "cm %p state %x flags 0x%x ccb %p during diag "
1096 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1097 			    cm->cm_ccb);
1098 		}
1099 	}
1100 
1101 	sc->io_cmds_active = 0;
1102 }
1103 
1104 void
1105 mpssas_handle_reinit(struct mps_softc *sc)
1106 {
1107 	int i;
1108 
1109 	/* Go back into startup mode and freeze the simq, so that CAM
1110 	 * doesn't send any commands until after we've rediscovered all
1111 	 * targets and found the proper device handles for them.
1112 	 *
1113 	 * After the reset, portenable will trigger discovery, and after all
1114 	 * discovery-related activities have finished, the simq will be
1115 	 * released.
1116 	 */
1117 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1118 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1119 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1120 	mpssas_startup_increment(sc->sassc);
1121 
1122 	/* notify CAM of a bus reset */
1123 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1124 	    CAM_LUN_WILDCARD);
1125 
1126 	/* complete and cleanup after all outstanding commands */
1127 	mpssas_complete_all_commands(sc);
1128 
1129 	mps_dprint(sc, MPS_INIT,
1130 	    "%s startup %u after command completion\n", __func__,
1131 	    sc->sassc->startup_refcount);
1132 
1133 	/* zero all the target handles, since they may change after the
1134 	 * reset, and we have to rediscover all the targets and use the new
1135 	 * handles.
1136 	 */
1137 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1138 		if (sc->sassc->targets[i].outstanding != 0)
1139 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1140 			    i, sc->sassc->targets[i].outstanding);
1141 		sc->sassc->targets[i].handle = 0x0;
1142 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1143 		sc->sassc->targets[i].outstanding = 0;
1144 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1145 	}
1146 }
1147 
1148 static void
1149 mpssas_tm_timeout(void *data)
1150 {
1151 	struct mps_command *tm = data;
1152 	struct mps_softc *sc = tm->cm_sc;
1153 
1154 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1155 
1156 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1157 	    "task mgmt %p timed out\n", tm);
1158 
1159 	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1160 	    ("command not inqueue, state = %u\n", tm->cm_state));
1161 
1162 	tm->cm_state = MPS_CM_STATE_BUSY;
1163 	mps_reinit(sc);
1164 }
1165 
1166 static void
1167 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1168 {
1169 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1170 	unsigned int cm_count = 0;
1171 	struct mps_command *cm;
1172 	struct mpssas_target *targ;
1173 
1174 	callout_stop(&tm->cm_callout);
1175 
1176 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1177 	targ = tm->cm_targ;
1178 
1179 	/*
1180 	 * Currently there should be no way we can hit this case.  It only
1181 	 * happens when we have a failure to allocate chain frames, and
1182 	 * task management commands don't have S/G lists.
1183 	 * XXXSL So should it be an assertion?
1184 	 */
1185 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1186 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1187 		    "%s: cm_flags = %#x for LUN reset! "
1188 		   "This should not happen!\n", __func__, tm->cm_flags);
1189 		mpssas_free_tm(sc, tm);
1190 		return;
1191 	}
1192 
1193 	if (reply == NULL) {
1194 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1195 		    tm);
1196 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1197 			/* this completion was due to a reset, just cleanup */
1198 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1199 			    "reset, ignoring NULL LUN reset reply\n");
1200 			targ->tm = NULL;
1201 			mpssas_free_tm(sc, tm);
1202 		}
1203 		else {
1204 			/* we should have gotten a reply. */
1205 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1206 			    "LUN reset attempt, resetting controller\n");
1207 			mps_reinit(sc);
1208 		}
1209 		return;
1210 	}
1211 
1212 	mps_dprint(sc, MPS_RECOVERY,
1213 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1214 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1215 	    le32toh(reply->TerminationCount));
1216 
1217 	/*
1218 	 * See if there are any outstanding commands for this LUN.
1219 	 * This could be made more efficient by using a per-LU data
1220 	 * structure of some sort.
1221 	 */
1222 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1223 		if (cm->cm_lun == tm->cm_lun)
1224 			cm_count++;
1225 	}
1226 
1227 	if (cm_count == 0) {
1228 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1229 		    "Finished recovery after LUN reset for target %u\n",
1230 		    targ->tid);
1231 
1232 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1233 
1234 		/*
1235 		 * We've finished recovery for this logical unit.  check and
1236 		 * see if some other logical unit has a timedout command
1237 		 * that needs to be processed.
1238 		 */
1239 		cm = TAILQ_FIRST(&targ->timedout_commands);
1240 		if (cm) {
1241 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1242 			    "More commands to abort for target %u\n",
1243 			    targ->tid);
1244 			mpssas_send_abort(sc, tm, cm);
1245 		} else {
1246 			targ->tm = NULL;
1247 			mpssas_free_tm(sc, tm);
1248 		}
1249 	} else {
1250 		/*
1251 		 * If we still have commands for this LUN, the reset
1252 		 * effectively failed, regardless of the status reported.
1253 		 * Escalate to a target reset.
1254 		 */
1255 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1256 		    "logical unit reset complete for target %u, but still "
1257 		    "have %u command(s), sending target reset\n", targ->tid,
1258 		    cm_count);
1259 		mpssas_send_reset(sc, tm,
1260 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1261 	}
1262 }
1263 
1264 static void
1265 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1266 {
1267 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1268 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1269 	struct mpssas_target *targ;
1270 
1271 	callout_stop(&tm->cm_callout);
1272 
1273 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1274 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1275 	targ = tm->cm_targ;
1276 
1277 	/*
1278 	 * Currently there should be no way we can hit this case.  It only
1279 	 * happens when we have a failure to allocate chain frames, and
1280 	 * task management commands don't have S/G lists.
1281 	 */
1282 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1283 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1284 			   "This should not happen!\n", __func__, tm->cm_flags);
1285 		mpssas_free_tm(sc, tm);
1286 		return;
1287 	}
1288 
1289 	if (reply == NULL) {
1290 		mps_dprint(sc, MPS_RECOVERY,
1291 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1292 		    tm, le16toh(req->TaskMID));
1293 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1294 			/* this completion was due to a reset, just cleanup */
1295 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1296 			    "reset, ignoring NULL target reset reply\n");
1297 			targ->tm = NULL;
1298 			mpssas_free_tm(sc, tm);
1299 		} else {
1300 			/* we should have gotten a reply. */
1301 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1302 			    "target reset attempt, resetting controller\n");
1303 			mps_reinit(sc);
1304 		}
1305 		return;
1306 	}
1307 
1308 	mps_dprint(sc, MPS_RECOVERY,
1309 	    "target reset status 0x%x code 0x%x count %u\n",
1310 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1311 	    le32toh(reply->TerminationCount));
1312 
1313 	if (targ->outstanding == 0) {
1314 		/* we've finished recovery for this target and all
1315 		 * of its logical units.
1316 		 */
1317 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1318 		    "Finished reset recovery for target %u\n", targ->tid);
1319 
1320 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1321 		    CAM_LUN_WILDCARD);
1322 
1323 		targ->tm = NULL;
1324 		mpssas_free_tm(sc, tm);
1325 	} else {
1326 		/*
1327 		 * After a target reset, if this target still has
1328 		 * outstanding commands, the reset effectively failed,
1329 		 * regardless of the status reported.  escalate.
1330 		 */
1331 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1332 		    "Target reset complete for target %u, but still have %u "
1333 		    "command(s), resetting controller\n", targ->tid,
1334 		    targ->outstanding);
1335 		mps_reinit(sc);
1336 	}
1337 }
1338 
1339 #define MPS_RESET_TIMEOUT 30
1340 
1341 int
1342 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1343 {
1344 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1345 	struct mpssas_target *target;
1346 	int err;
1347 
1348 	target = tm->cm_targ;
1349 	if (target->handle == 0) {
1350 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1351 		    __func__, target->tid);
1352 		return -1;
1353 	}
1354 
1355 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1356 	req->DevHandle = htole16(target->handle);
1357 	req->TaskType = type;
1358 
1359 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1360 		/* XXX Need to handle invalid LUNs */
1361 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1362 		tm->cm_targ->logical_unit_resets++;
1363 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1364 		    "Sending logical unit reset to target %u lun %d\n",
1365 		    target->tid, tm->cm_lun);
1366 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1367 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1368 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1369 		/*
1370 		 * Target reset method =
1371 		 * 	SAS Hard Link Reset / SATA Link Reset
1372 		 */
1373 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1374 		tm->cm_targ->target_resets++;
1375 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1376 		    "Sending target reset to target %u\n", target->tid);
1377 		tm->cm_complete = mpssas_target_reset_complete;
1378 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1379 	} else {
1380 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1381 		return -1;
1382 	}
1383 
1384 	tm->cm_data = NULL;
1385 	tm->cm_complete_data = (void *)tm;
1386 
1387 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1388 	    mpssas_tm_timeout, tm);
1389 
1390 	err = mps_map_command(sc, tm);
1391 	if (err)
1392 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1393 		    "error %d sending reset type %u\n",
1394 		    err, type);
1395 
1396 	return err;
1397 }
1398 
1399 static void
1400 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1401 {
1402 	struct mps_command *cm;
1403 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1404 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1405 	struct mpssas_target *targ;
1406 
1407 	callout_stop(&tm->cm_callout);
1408 
1409 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1410 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1411 	targ = tm->cm_targ;
1412 
1413 	/*
1414 	 * Currently there should be no way we can hit this case.  It only
1415 	 * happens when we have a failure to allocate chain frames, and
1416 	 * task management commands don't have S/G lists.
1417 	 */
1418 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1419 		mps_dprint(sc, MPS_RECOVERY,
1420 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1421 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1422 		mpssas_free_tm(sc, tm);
1423 		return;
1424 	}
1425 
1426 	if (reply == NULL) {
1427 		mps_dprint(sc, MPS_RECOVERY,
1428 		    "NULL abort reply for tm %p TaskMID %u\n",
1429 		    tm, le16toh(req->TaskMID));
1430 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1431 			/* this completion was due to a reset, just cleanup */
1432 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1433 			    "reset, ignoring NULL abort reply\n");
1434 			targ->tm = NULL;
1435 			mpssas_free_tm(sc, tm);
1436 		} else {
1437 			/* we should have gotten a reply. */
1438 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1439 			    "abort attempt, resetting controller\n");
1440 			mps_reinit(sc);
1441 		}
1442 		return;
1443 	}
1444 
1445 	mps_dprint(sc, MPS_RECOVERY,
1446 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1447 	    le16toh(req->TaskMID),
1448 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1449 	    le32toh(reply->TerminationCount));
1450 
1451 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1452 	if (cm == NULL) {
1453 		/*
1454 		 * If there are no more timedout commands, we're done with
1455 		 * error recovery for this target.
1456 		 */
1457 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1458 		    "Finished abort recovery for target %u\n", targ->tid);
1459 
1460 		targ->tm = NULL;
1461 		mpssas_free_tm(sc, tm);
1462 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1463 		/* abort success, but we have more timedout commands to abort */
1464 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1465 		    "Continuing abort recovery for target %u\n", targ->tid);
1466 
1467 		mpssas_send_abort(sc, tm, cm);
1468 	} else {
1469 		/* we didn't get a command completion, so the abort
1470 		 * failed as far as we're concerned.  escalate.
1471 		 */
1472 		mps_dprint(sc, MPS_RECOVERY,
1473 		    "Abort failed for target %u, sending logical unit reset\n",
1474 		    targ->tid);
1475 
1476 		mpssas_send_reset(sc, tm,
1477 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1478 	}
1479 }
1480 
1481 #define MPS_ABORT_TIMEOUT 5
1482 
1483 static int
1484 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1485 {
1486 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1487 	struct mpssas_target *targ;
1488 	int err;
1489 
1490 	targ = cm->cm_targ;
1491 	if (targ->handle == 0) {
1492 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1493 		    "%s null devhandle for target_id %d\n",
1494 		    __func__, cm->cm_ccb->ccb_h.target_id);
1495 		return -1;
1496 	}
1497 
1498 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1499 	    "Aborting command %p\n", cm);
1500 
1501 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1502 	req->DevHandle = htole16(targ->handle);
1503 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1504 
1505 	/* XXX Need to handle invalid LUNs */
1506 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1507 
1508 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1509 
1510 	tm->cm_data = NULL;
1511 	tm->cm_complete = mpssas_abort_complete;
1512 	tm->cm_complete_data = (void *)tm;
1513 	tm->cm_targ = cm->cm_targ;
1514 	tm->cm_lun = cm->cm_lun;
1515 
1516 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1517 	    mpssas_tm_timeout, tm);
1518 
1519 	targ->aborts++;
1520 
1521 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1522 
1523 	err = mps_map_command(sc, tm);
1524 	if (err)
1525 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1526 		    "error %d sending abort for cm %p SMID %u\n",
1527 		    err, cm, req->TaskMID);
1528 	return err;
1529 }
1530 
1531 static void
1532 mpssas_scsiio_timeout(void *data)
1533 {
1534 	sbintime_t elapsed, now;
1535 	union ccb *ccb;
1536 	struct mps_softc *sc;
1537 	struct mps_command *cm;
1538 	struct mpssas_target *targ;
1539 
1540 	cm = (struct mps_command *)data;
1541 	sc = cm->cm_sc;
1542 	ccb = cm->cm_ccb;
1543 	now = sbinuptime();
1544 
1545 	MPS_FUNCTRACE(sc);
1546 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1547 
1548 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", cm);
1549 
1550 	/*
1551 	 * Run the interrupt handler to make sure it's not pending.  This
1552 	 * isn't perfect because the command could have already completed
1553 	 * and been re-used, though this is unlikely.
1554 	 */
1555 	mps_intr_locked(sc);
1556 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1557 		mpssas_log_command(cm, MPS_XINFO,
1558 		    "SCSI command %p almost timed out\n", cm);
1559 		return;
1560 	}
1561 
1562 	if (cm->cm_ccb == NULL) {
1563 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1564 		return;
1565 	}
1566 
1567 	targ = cm->cm_targ;
1568 	targ->timeouts++;
1569 
1570 	elapsed = now - ccb->ccb_h.qos.sim_data;
1571 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1572 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1573 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1574 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1575 
1576 	/* XXX first, check the firmware state, to see if it's still
1577 	 * operational.  if not, do a diag reset.
1578 	 */
1579 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1580 	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1581 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1582 
1583 	if (targ->tm != NULL) {
1584 		/* target already in recovery, just queue up another
1585 		 * timedout command to be processed later.
1586 		 */
1587 		mps_dprint(sc, MPS_RECOVERY,
1588 		    "queued timedout cm %p for processing by tm %p\n",
1589 		    cm, targ->tm);
1590 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1591 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1592 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1593 		    cm->cm_desc.Default.SMID);
1594 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1595 		    cm, targ->tm);
1596 
1597 		/* start recovery by aborting the first timedout command */
1598 		mpssas_send_abort(sc, targ->tm, cm);
1599 	} else {
1600 		/* XXX queue this target up for recovery once a TM becomes
1601 		 * available.  The firmware only has a limited number of
1602 		 * HighPriority credits for the high priority requests used
1603 		 * for task management, and we ran out.
1604 		 *
1605 		 * Isilon: don't worry about this for now, since we have
1606 		 * more credits than disks in an enclosure, and limit
1607 		 * ourselves to one TM per target for recovery.
1608 		 */
1609 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1610 		    "timedout cm %p failed to allocate a tm\n", cm);
1611 	}
1612 
1613 }
1614 
1615 static void
1616 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1617 {
1618 	MPI2_SCSI_IO_REQUEST *req;
1619 	struct ccb_scsiio *csio;
1620 	struct mps_softc *sc;
1621 	struct mpssas_target *targ;
1622 	struct mpssas_lun *lun;
1623 	struct mps_command *cm;
1624 	uint8_t i, lba_byte, *ref_tag_addr;
1625 	uint16_t eedp_flags;
1626 	uint32_t mpi_control;
1627 
1628 	sc = sassc->sc;
1629 	MPS_FUNCTRACE(sc);
1630 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1631 
1632 	csio = &ccb->csio;
1633 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1634 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1635 	     csio->ccb_h.target_id));
1636 	targ = &sassc->targets[csio->ccb_h.target_id];
1637 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1638 	if (targ->handle == 0x0) {
1639 		if (targ->flags & MPSSAS_TARGET_INDIAGRESET) {
1640 			mps_dprint(sc, MPS_ERROR,
1641 			    "%s NULL handle for target %u in diag reset freezing queue\n",
1642 			    __func__, csio->ccb_h.target_id);
1643 			ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1644 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1645 			xpt_done(ccb);
1646 			return;
1647 		}
1648 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1649 		    __func__, csio->ccb_h.target_id);
1650 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1651 		xpt_done(ccb);
1652 		return;
1653 	}
1654 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1655 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1656 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1657 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1658 		xpt_done(ccb);
1659 		return;
1660 	}
1661 	/*
1662 	 * Sometimes, it is possible to get a command that is not "In
1663 	 * Progress" and was actually aborted by the upper layer.  Check for
1664 	 * this here and complete the command without error.
1665 	 */
1666 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1667 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1668 		    "target %u\n", __func__, csio->ccb_h.target_id);
1669 		xpt_done(ccb);
1670 		return;
1671 	}
1672 	/*
1673 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1674 	 * that the volume has timed out.  We want volumes to be enumerated
1675 	 * until they are deleted/removed, not just failed. In either event,
1676 	 * we're removing the target due to a firmware event telling us
1677 	 * the device is now gone (as opposed to some transient event). Since
1678 	 * we're opting to remove failed devices from the OS's view, we need
1679 	 * to propagate that status up the stack.
1680 	 */
1681 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1682 		if (targ->devinfo == 0)
1683 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1684 		else
1685 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1686 		xpt_done(ccb);
1687 		return;
1688 	}
1689 
1690 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1691 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1692 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1693 		xpt_done(ccb);
1694 		return;
1695 	}
1696 
1697 	/*
1698 	 * If target has a reset in progress, the devq should be frozen.
1699 	 * Geting here we likely hit a race, so just requeue.
1700 	 */
1701 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1702 		ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1703 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1704 		    __func__, targ->tid);
1705 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1706 		xpt_done(ccb);
1707 		return;
1708 	}
1709 
1710 	cm = mps_alloc_command(sc);
1711 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1712 		if (cm != NULL) {
1713 			mps_free_command(sc, cm);
1714 		}
1715 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1716 			xpt_freeze_simq(sassc->sim, 1);
1717 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1718 		}
1719 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1720 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1721 		xpt_done(ccb);
1722 		return;
1723 	}
1724 
1725 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1726 	bzero(req, sizeof(*req));
1727 	req->DevHandle = htole16(targ->handle);
1728 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1729 	req->MsgFlags = 0;
1730 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1731 	req->SenseBufferLength = MPS_SENSE_LEN;
1732 	req->SGLFlags = 0;
1733 	req->ChainOffset = 0;
1734 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1735 	req->SGLOffset1= 0;
1736 	req->SGLOffset2= 0;
1737 	req->SGLOffset3= 0;
1738 	req->SkipCount = 0;
1739 	req->DataLength = htole32(csio->dxfer_len);
1740 	req->BidirectionalDataLength = 0;
1741 	req->IoFlags = htole16(csio->cdb_len);
1742 	req->EEDPFlags = 0;
1743 
1744 	/* Note: BiDirectional transfers are not supported */
1745 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1746 	case CAM_DIR_IN:
1747 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1748 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1749 		break;
1750 	case CAM_DIR_OUT:
1751 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1752 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1753 		break;
1754 	case CAM_DIR_NONE:
1755 	default:
1756 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1757 		break;
1758 	}
1759 
1760 	if (csio->cdb_len == 32)
1761                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1762 	/*
1763 	 * It looks like the hardware doesn't require an explicit tag
1764 	 * number for each transaction.  SAM Task Management not supported
1765 	 * at the moment.
1766 	 */
1767 	switch (csio->tag_action) {
1768 	case MSG_HEAD_OF_Q_TAG:
1769 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1770 		break;
1771 	case MSG_ORDERED_Q_TAG:
1772 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1773 		break;
1774 	case MSG_ACA_TASK:
1775 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1776 		break;
1777 	case CAM_TAG_ACTION_NONE:
1778 	case MSG_SIMPLE_Q_TAG:
1779 	default:
1780 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1781 		break;
1782 	}
1783 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) &
1784 	    MPI2_SCSIIO_CONTROL_TASKPRI_MASK;
1785 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1786 	req->Control = htole32(mpi_control);
1787 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1788 		mps_free_command(sc, cm);
1789 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1790 		xpt_done(ccb);
1791 		return;
1792 	}
1793 
1794 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1795 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1796 	else
1797 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1798 	req->IoFlags = htole16(csio->cdb_len);
1799 
1800 	/*
1801 	 * Check if EEDP is supported and enabled.  If it is then check if the
1802 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1803 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1804 	 * for EEDP transfer.
1805 	 */
1806 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1807 	if (sc->eedp_enabled && eedp_flags) {
1808 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1809 			if (lun->lun_id == csio->ccb_h.target_lun) {
1810 				break;
1811 			}
1812 		}
1813 
1814 		if ((lun != NULL) && (lun->eedp_formatted)) {
1815 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1816 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1817 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1818 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1819 			req->EEDPFlags = htole16(eedp_flags);
1820 
1821 			/*
1822 			 * If CDB less than 32, fill in Primary Ref Tag with
1823 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1824 			 * already there.  Also, set protection bit.  FreeBSD
1825 			 * currently does not support CDBs bigger than 16, but
1826 			 * the code doesn't hurt, and will be here for the
1827 			 * future.
1828 			 */
1829 			if (csio->cdb_len != 32) {
1830 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1831 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1832 				    PrimaryReferenceTag;
1833 				for (i = 0; i < 4; i++) {
1834 					*ref_tag_addr =
1835 					    req->CDB.CDB32[lba_byte + i];
1836 					ref_tag_addr++;
1837 				}
1838 				req->CDB.EEDP32.PrimaryReferenceTag =
1839 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1840 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1841 				    0xFFFF;
1842 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1843 				    0x20;
1844 			} else {
1845 				eedp_flags |=
1846 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1847 				req->EEDPFlags = htole16(eedp_flags);
1848 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1849 				    0x1F) | 0x20;
1850 			}
1851 		}
1852 	}
1853 
1854 	cm->cm_length = csio->dxfer_len;
1855 	if (cm->cm_length != 0) {
1856 		cm->cm_data = ccb;
1857 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1858 	} else {
1859 		cm->cm_data = NULL;
1860 	}
1861 	cm->cm_sge = &req->SGL;
1862 	cm->cm_sglsize = (32 - 24) * 4;
1863 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1864 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1865 	cm->cm_complete = mpssas_scsiio_complete;
1866 	cm->cm_complete_data = ccb;
1867 	cm->cm_targ = targ;
1868 	cm->cm_lun = csio->ccb_h.target_lun;
1869 	cm->cm_ccb = ccb;
1870 
1871 	/*
1872 	 * If HBA is a WD and the command is not for a retry, try to build a
1873 	 * direct I/O message. If failed, or the command is for a retry, send
1874 	 * the I/O to the IR volume itself.
1875 	 */
1876 	if (sc->WD_valid_config) {
1877 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1878 			mpssas_direct_drive_io(sassc, cm, ccb);
1879 		} else {
1880 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1881 		}
1882 	}
1883 
1884 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1885 	if (csio->bio != NULL)
1886 		biotrack(csio->bio, __func__);
1887 #endif
1888 	csio->ccb_h.qos.sim_data = sbinuptime();
1889 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1890 	    mpssas_scsiio_timeout, cm, 0);
1891 
1892 	targ->issued++;
1893 	targ->outstanding++;
1894 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1895 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1896 
1897 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1898 	    __func__, cm, ccb, targ->outstanding);
1899 
1900 	mps_map_command(sc, cm);
1901 	return;
1902 }
1903 
1904 /**
1905  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1906  */
1907 static void
1908 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1909     Mpi2SCSIIOReply_t *mpi_reply)
1910 {
1911 	u32 response_info;
1912 	u8 *response_bytes;
1913 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1914 	    MPI2_IOCSTATUS_MASK;
1915 	u8 scsi_state = mpi_reply->SCSIState;
1916 	u8 scsi_status = mpi_reply->SCSIStatus;
1917 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1918 	const char *desc_ioc_state, *desc_scsi_status;
1919 
1920 	if (log_info == 0x31170000)
1921 		return;
1922 
1923 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1924 	    ioc_status);
1925 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1926 	    scsi_status);
1927 
1928 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1929 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1930 
1931 	/*
1932 	 *We can add more detail about underflow data here
1933 	 * TO-DO
1934 	 */
1935 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1936 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1937 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1938 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1939 
1940 	if (sc->mps_debug & MPS_XINFO &&
1941 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1942 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1943 		scsi_sense_print(csio);
1944 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1945 	}
1946 
1947 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1948 		response_info = le32toh(mpi_reply->ResponseInfo);
1949 		response_bytes = (u8 *)&response_info;
1950 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1951 		    response_bytes[0],
1952 		    mps_describe_table(mps_scsi_taskmgmt_string,
1953 		    response_bytes[0]));
1954 	}
1955 }
1956 
1957 static void
1958 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1959 {
1960 	MPI2_SCSI_IO_REPLY *rep;
1961 	union ccb *ccb;
1962 	struct ccb_scsiio *csio;
1963 	struct mpssas_softc *sassc;
1964 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1965 	u8 *TLR_bits, TLR_on;
1966 	int dir = 0, i;
1967 	u16 alloc_len;
1968 	struct mpssas_target *target;
1969 	target_id_t target_id;
1970 
1971 	MPS_FUNCTRACE(sc);
1972 	mps_dprint(sc, MPS_TRACE,
1973 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1974 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1975 	    cm->cm_targ->outstanding);
1976 
1977 	callout_stop(&cm->cm_callout);
1978 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1979 
1980 	sassc = sc->sassc;
1981 	ccb = cm->cm_complete_data;
1982 	csio = &ccb->csio;
1983 	target_id = csio->ccb_h.target_id;
1984 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1985 	/*
1986 	 * XXX KDM if the chain allocation fails, does it matter if we do
1987 	 * the sync and unload here?  It is simpler to do it in every case,
1988 	 * assuming it doesn't cause problems.
1989 	 */
1990 	if (cm->cm_data != NULL) {
1991 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1992 			dir = BUS_DMASYNC_POSTREAD;
1993 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1994 			dir = BUS_DMASYNC_POSTWRITE;
1995 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1996 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1997 	}
1998 
1999 	cm->cm_targ->completed++;
2000 	cm->cm_targ->outstanding--;
2001 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2002 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2003 
2004 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2005 	if (ccb->csio.bio != NULL)
2006 		biotrack(ccb->csio.bio, __func__);
2007 #endif
2008 
2009 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2010 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2011 		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2012 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2013 		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2014 		if (cm->cm_reply != NULL)
2015 			mpssas_log_command(cm, MPS_RECOVERY,
2016 			    "completed timedout cm %p ccb %p during recovery "
2017 			    "ioc %x scsi %x state %x xfer %u\n",
2018 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2019 			    rep->SCSIStatus, rep->SCSIState,
2020 			    le32toh(rep->TransferCount));
2021 		else
2022 			mpssas_log_command(cm, MPS_RECOVERY,
2023 			    "completed timedout cm %p ccb %p during recovery\n",
2024 			    cm, cm->cm_ccb);
2025 	} else if (cm->cm_targ->tm != NULL) {
2026 		if (cm->cm_reply != NULL)
2027 			mpssas_log_command(cm, MPS_RECOVERY,
2028 			    "completed cm %p ccb %p during recovery "
2029 			    "ioc %x scsi %x state %x xfer %u\n",
2030 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2031 			    rep->SCSIStatus, rep->SCSIState,
2032 			    le32toh(rep->TransferCount));
2033 		else
2034 			mpssas_log_command(cm, MPS_RECOVERY,
2035 			    "completed cm %p ccb %p during recovery\n",
2036 			    cm, cm->cm_ccb);
2037 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2038 		mpssas_log_command(cm, MPS_RECOVERY,
2039 		    "reset completed cm %p ccb %p\n",
2040 		    cm, cm->cm_ccb);
2041 	}
2042 
2043 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2044 		/*
2045 		 * We ran into an error after we tried to map the command,
2046 		 * so we're getting a callback without queueing the command
2047 		 * to the hardware.  So we set the status here, and it will
2048 		 * be retained below.  We'll go through the "fast path",
2049 		 * because there can be no reply when we haven't actually
2050 		 * gone out to the hardware.
2051 		 */
2052 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2053 
2054 		/*
2055 		 * Currently the only error included in the mask is
2056 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2057 		 * chain frames.  We need to freeze the queue until we get
2058 		 * a command that completed without this error, which will
2059 		 * hopefully have some chain frames attached that we can
2060 		 * use.  If we wanted to get smarter about it, we would
2061 		 * only unfreeze the queue in this condition when we're
2062 		 * sure that we're getting some chain frames back.  That's
2063 		 * probably unnecessary.
2064 		 */
2065 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2066 			xpt_freeze_simq(sassc->sim, 1);
2067 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2068 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2069 				   "freezing SIM queue\n");
2070 		}
2071 	}
2072 
2073 	/*
2074 	 * If this is a Start Stop Unit command and it was issued by the driver
2075 	 * during shutdown, decrement the refcount to account for all of the
2076 	 * commands that were sent.  All SSU commands should be completed before
2077 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2078 	 * is TRUE.
2079 	 */
2080 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2081 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2082 		sc->SSU_refcount--;
2083 	}
2084 
2085 	SDT_PROBE4(cam, , mps, complete, ccb, cm, sassc->flags,
2086 	    sc->mapping_table[target_id].device_info);
2087 
2088 	/* Take the fast path to completion */
2089 	if (cm->cm_reply == NULL) {
2090 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2091 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2092 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2093 			else {
2094 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2095 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2096 			}
2097 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2098 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2099 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2100 				mps_dprint(sc, MPS_XINFO,
2101 				    "Unfreezing SIM queue\n");
2102 			}
2103 		}
2104 
2105 		/*
2106 		 * There are two scenarios where the status won't be
2107 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2108 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2109 		 */
2110 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2111 			/*
2112 			 * Freeze the dev queue so that commands are
2113 			 * executed in the correct order after error
2114 			 * recovery.
2115 			 */
2116 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2117 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2118 		}
2119 		mps_free_command(sc, cm);
2120 		xpt_done(ccb);
2121 		return;
2122 	}
2123 
2124 	mpssas_log_command(cm, MPS_XINFO,
2125 	    "ioc %x scsi %x state %x xfer %u\n",
2126 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2127 	    le32toh(rep->TransferCount));
2128 
2129 	/*
2130 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2131 	 * Volume if an error occurred (normal I/O retry).  Use the original
2132 	 * CCB, but set a flag that this will be a retry so that it's sent to
2133 	 * the original volume.  Free the command but reuse the CCB.
2134 	 */
2135 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2136 		mps_free_command(sc, cm);
2137 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2138 		mpssas_action_scsiio(sassc, ccb);
2139 		return;
2140 	} else
2141 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2142 
2143 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2144 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2145 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2146 		/* FALLTHROUGH */
2147 	case MPI2_IOCSTATUS_SUCCESS:
2148 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2149 
2150 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2151 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2152 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2153 
2154 		/* Completion failed at the transport level. */
2155 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2156 		    MPI2_SCSI_STATE_TERMINATED)) {
2157 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2158 			break;
2159 		}
2160 
2161 		/* In a modern packetized environment, an autosense failure
2162 		 * implies that there's not much else that can be done to
2163 		 * recover the command.
2164 		 */
2165 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2166 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2167 			break;
2168 		}
2169 
2170 		/*
2171 		 * CAM doesn't care about SAS Response Info data, but if this is
2172 		 * the state check if TLR should be done.  If not, clear the
2173 		 * TLR_bits for the target.
2174 		 */
2175 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2176 		    ((le32toh(rep->ResponseInfo) &
2177 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2178 		    MPS_SCSI_RI_INVALID_FRAME)) {
2179 			sc->mapping_table[target_id].TLR_bits =
2180 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2181 		}
2182 
2183 		/*
2184 		 * Intentionally override the normal SCSI status reporting
2185 		 * for these two cases.  These are likely to happen in a
2186 		 * multi-initiator environment, and we want to make sure that
2187 		 * CAM retries these commands rather than fail them.
2188 		 */
2189 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2190 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2191 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2192 			break;
2193 		}
2194 
2195 		/* Handle normal status and sense */
2196 		csio->scsi_status = rep->SCSIStatus;
2197 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2198 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2199 		else
2200 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2201 
2202 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2203 			int sense_len, returned_sense_len;
2204 
2205 			returned_sense_len = min(le32toh(rep->SenseCount),
2206 			    sizeof(struct scsi_sense_data));
2207 			if (returned_sense_len < ccb->csio.sense_len)
2208 				ccb->csio.sense_resid = ccb->csio.sense_len -
2209 					returned_sense_len;
2210 			else
2211 				ccb->csio.sense_resid = 0;
2212 
2213 			sense_len = min(returned_sense_len,
2214 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2215 			bzero(&ccb->csio.sense_data,
2216 			      sizeof(ccb->csio.sense_data));
2217 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2218 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2219 		}
2220 
2221 		/*
2222 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2223 		 * and it's page code 0 (Supported Page List), and there is
2224 		 * inquiry data, and this is for a sequential access device, and
2225 		 * the device is an SSP target, and TLR is supported by the
2226 		 * controller, turn the TLR_bits value ON if page 0x90 is
2227 		 * supported.
2228 		 */
2229 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2230 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2231 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2232 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2233 		    (csio->data_ptr != NULL) &&
2234 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2235 		    (sc->control_TLR) &&
2236 		    (sc->mapping_table[target_id].device_info &
2237 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2238 			vpd_list = (struct scsi_vpd_supported_page_list *)
2239 			    csio->data_ptr;
2240 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2241 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2242 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2243 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2244 			    csio->cdb_io.cdb_bytes[4];
2245 			alloc_len -= csio->resid;
2246 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2247 				if (vpd_list->list[i] == 0x90) {
2248 					*TLR_bits = TLR_on;
2249 					break;
2250 				}
2251 			}
2252 		}
2253 
2254 		/*
2255 		 * If this is a SATA direct-access end device, mark it so that
2256 		 * a SCSI StartStopUnit command will be sent to it when the
2257 		 * driver is being shutdown.
2258 		 */
2259 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2260 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2261 		    (sc->mapping_table[target_id].device_info &
2262 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2263 		    ((sc->mapping_table[target_id].device_info &
2264 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2265 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2266 			target = &sassc->targets[target_id];
2267 			target->supports_SSU = TRUE;
2268 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2269 			    target_id);
2270 		}
2271 		break;
2272 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2273 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2274 		/*
2275 		 * If devinfo is 0 this will be a volume.  In that case don't
2276 		 * tell CAM that the volume is not there.  We want volumes to
2277 		 * be enumerated until they are deleted/removed, not just
2278 		 * failed.
2279 		 */
2280 		if (cm->cm_targ->devinfo == 0)
2281 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2282 		else
2283 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2284 		break;
2285 	case MPI2_IOCSTATUS_INVALID_SGL:
2286 		mps_print_scsiio_cmd(sc, cm);
2287 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2288 		break;
2289 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2290 		/*
2291 		 * This is one of the responses that comes back when an I/O
2292 		 * has been aborted.  If it is because of a timeout that we
2293 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2294 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2295 		 * command is the same (it gets retried, subject to the
2296 		 * retry counter), the only difference is what gets printed
2297 		 * on the console.
2298 		 */
2299 		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2300 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2301 		else
2302 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2303 		break;
2304 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2305 		/* resid is ignored for this condition */
2306 		csio->resid = 0;
2307 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2308 		break;
2309 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2310 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2311 		/*
2312 		 * These can sometimes be transient transport-related
2313 		 * errors, and sometimes persistent drive-related errors.
2314 		 * We used to retry these without decrementing the retry
2315 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2316 		 * we hit a persistent drive problem that returns one of
2317 		 * these error codes, we would retry indefinitely.  So,
2318 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2319 		 * count and avoid infinite retries.  We're taking the
2320 		 * potential risk of flagging false failures in the event
2321 		 * of a topology-related error (e.g. a SAS expander problem
2322 		 * causes a command addressed to a drive to fail), but
2323 		 * avoiding getting into an infinite retry loop. However,
2324 		 * if we get them while were moving a device, we should
2325 		 * fail the request as 'not there' because the device
2326 		 * is effectively gone.
2327 		 */
2328 		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2329 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2330 		else
2331 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2332 		mps_dprint(sc, MPS_INFO,
2333 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2334 		    mps_describe_table(mps_iocstatus_string,
2335 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2336 		    target_id, cm->cm_desc.Default.SMID,
2337 		    le32toh(rep->IOCLogInfo),
2338 		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2339 		mps_dprint(sc, MPS_XINFO,
2340 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2341 		    rep->SCSIStatus, rep->SCSIState,
2342 		    le32toh(rep->TransferCount));
2343 		break;
2344 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2345 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2346 	case MPI2_IOCSTATUS_INVALID_VPID:
2347 	case MPI2_IOCSTATUS_INVALID_FIELD:
2348 	case MPI2_IOCSTATUS_INVALID_STATE:
2349 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2350 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2351 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2352 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2353 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2354 	default:
2355 		mpssas_log_command(cm, MPS_XINFO,
2356 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2357 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2358 		    rep->SCSIStatus, rep->SCSIState,
2359 		    le32toh(rep->TransferCount));
2360 		csio->resid = cm->cm_length;
2361 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2362 		break;
2363 	}
2364 
2365 	mps_sc_failed_io_info(sc,csio,rep);
2366 
2367 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2368 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2369 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2370 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2371 		    "unfreezing SIM queue\n");
2372 	}
2373 
2374 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2375 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2376 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2377 	}
2378 
2379 	/*
2380 	 * Check to see if we're removing the device. If so, and this is the
2381 	 * last command on the queue, proceed with the deferred removal of the
2382 	 * device.  Note, for removing a volume, this won't trigger because
2383 	 * pending_remove_tm will be NULL.
2384 	 */
2385 	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2386 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2387 		    cm->cm_targ->pending_remove_tm != NULL) {
2388 			mps_dprint(sc, MPS_INFO,
2389 			    "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
2390 			    cm->cm_targ->tid, cm->cm_targ->handle);
2391 			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2392 			cm->cm_targ->pending_remove_tm = NULL;
2393 		}
2394 	}
2395 
2396 	mps_free_command(sc, cm);
2397 	xpt_done(ccb);
2398 }
2399 
2400 /* All Request reached here are Endian safe */
2401 static void
2402 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2403     union ccb *ccb) {
2404 	pMpi2SCSIIORequest_t	pIO_req;
2405 	struct mps_softc	*sc = sassc->sc;
2406 	uint64_t		virtLBA;
2407 	uint32_t		physLBA, stripe_offset, stripe_unit;
2408 	uint32_t		io_size, column;
2409 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2410 
2411 	/*
2412 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2413 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2414 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2415 	 * bit different than the 10/16 CDBs, handle them separately.
2416 	 */
2417 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2418 	CDB = pIO_req->CDB.CDB32;
2419 
2420 	/*
2421 	 * Handle 6 byte CDBs.
2422 	 */
2423 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2424 	    (CDB[0] == WRITE_6))) {
2425 		/*
2426 		 * Get the transfer size in blocks.
2427 		 */
2428 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2429 
2430 		/*
2431 		 * Get virtual LBA given in the CDB.
2432 		 */
2433 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2434 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2435 
2436 		/*
2437 		 * Check that LBA range for I/O does not exceed volume's
2438 		 * MaxLBA.
2439 		 */
2440 		if ((virtLBA + (uint64_t)io_size - 1) <=
2441 		    sc->DD_max_lba) {
2442 			/*
2443 			 * Check if the I/O crosses a stripe boundary.  If not,
2444 			 * translate the virtual LBA to a physical LBA and set
2445 			 * the DevHandle for the PhysDisk to be used.  If it
2446 			 * does cross a boundary, do normal I/O.  To get the
2447 			 * right DevHandle to use, get the map number for the
2448 			 * column, then use that map number to look up the
2449 			 * DevHandle of the PhysDisk.
2450 			 */
2451 			stripe_offset = (uint32_t)virtLBA &
2452 			    (sc->DD_stripe_size - 1);
2453 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2454 				physLBA = (uint32_t)virtLBA >>
2455 				    sc->DD_stripe_exponent;
2456 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2457 				column = physLBA % sc->DD_num_phys_disks;
2458 				pIO_req->DevHandle =
2459 				    htole16(sc->DD_column_map[column].dev_handle);
2460 				/* ???? Is this endian safe*/
2461 				cm->cm_desc.SCSIIO.DevHandle =
2462 				    pIO_req->DevHandle;
2463 
2464 				physLBA = (stripe_unit <<
2465 				    sc->DD_stripe_exponent) + stripe_offset;
2466 				ptrLBA = &pIO_req->CDB.CDB32[1];
2467 				physLBA_byte = (uint8_t)(physLBA >> 16);
2468 				*ptrLBA = physLBA_byte;
2469 				ptrLBA = &pIO_req->CDB.CDB32[2];
2470 				physLBA_byte = (uint8_t)(physLBA >> 8);
2471 				*ptrLBA = physLBA_byte;
2472 				ptrLBA = &pIO_req->CDB.CDB32[3];
2473 				physLBA_byte = (uint8_t)physLBA;
2474 				*ptrLBA = physLBA_byte;
2475 
2476 				/*
2477 				 * Set flag that Direct Drive I/O is
2478 				 * being done.
2479 				 */
2480 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2481 			}
2482 		}
2483 		return;
2484 	}
2485 
2486 	/*
2487 	 * Handle 10, 12 or 16 byte CDBs.
2488 	 */
2489 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2490 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2491 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2492 	    (CDB[0] == WRITE_12))) {
2493 		/*
2494 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2495 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2496 		 * the else section.  10-byte and 12-byte CDB's are OK.
2497 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2498 		 * ready to accept 12byte CDB for Direct IOs.
2499 		 */
2500 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2501 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2502 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2503 			/*
2504 			 * Get the transfer size in blocks.
2505 			 */
2506 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2507 
2508 			/*
2509 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2510 			 * LBA in the CDB depending on command.
2511 			 */
2512 			lba_idx = ((CDB[0] == READ_12) ||
2513 				(CDB[0] == WRITE_12) ||
2514 				(CDB[0] == READ_10) ||
2515 				(CDB[0] == WRITE_10))? 2 : 6;
2516 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2517 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2518 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2519 			    (uint64_t)CDB[lba_idx + 3];
2520 
2521 			/*
2522 			 * Check that LBA range for I/O does not exceed volume's
2523 			 * MaxLBA.
2524 			 */
2525 			if ((virtLBA + (uint64_t)io_size - 1) <=
2526 			    sc->DD_max_lba) {
2527 				/*
2528 				 * Check if the I/O crosses a stripe boundary.
2529 				 * If not, translate the virtual LBA to a
2530 				 * physical LBA and set the DevHandle for the
2531 				 * PhysDisk to be used.  If it does cross a
2532 				 * boundary, do normal I/O.  To get the right
2533 				 * DevHandle to use, get the map number for the
2534 				 * column, then use that map number to look up
2535 				 * the DevHandle of the PhysDisk.
2536 				 */
2537 				stripe_offset = (uint32_t)virtLBA &
2538 				    (sc->DD_stripe_size - 1);
2539 				if ((stripe_offset + io_size) <=
2540 				    sc->DD_stripe_size) {
2541 					physLBA = (uint32_t)virtLBA >>
2542 					    sc->DD_stripe_exponent;
2543 					stripe_unit = physLBA /
2544 					    sc->DD_num_phys_disks;
2545 					column = physLBA %
2546 					    sc->DD_num_phys_disks;
2547 					pIO_req->DevHandle =
2548 					    htole16(sc->DD_column_map[column].
2549 					    dev_handle);
2550 					cm->cm_desc.SCSIIO.DevHandle =
2551 					    pIO_req->DevHandle;
2552 
2553 					physLBA = (stripe_unit <<
2554 					    sc->DD_stripe_exponent) +
2555 					    stripe_offset;
2556 					ptrLBA =
2557 					    &pIO_req->CDB.CDB32[lba_idx];
2558 					physLBA_byte = (uint8_t)(physLBA >> 24);
2559 					*ptrLBA = physLBA_byte;
2560 					ptrLBA =
2561 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2562 					physLBA_byte = (uint8_t)(physLBA >> 16);
2563 					*ptrLBA = physLBA_byte;
2564 					ptrLBA =
2565 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2566 					physLBA_byte = (uint8_t)(physLBA >> 8);
2567 					*ptrLBA = physLBA_byte;
2568 					ptrLBA =
2569 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2570 					physLBA_byte = (uint8_t)physLBA;
2571 					*ptrLBA = physLBA_byte;
2572 
2573 					/*
2574 					 * Set flag that Direct Drive I/O is
2575 					 * being done.
2576 					 */
2577 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2578 				}
2579 			}
2580 		} else {
2581 			/*
2582 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2583 			 * 0.  Get the transfer size in blocks.
2584 			 */
2585 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2586 
2587 			/*
2588 			 * Get virtual LBA.
2589 			 */
2590 			virtLBA = ((uint64_t)CDB[2] << 54) |
2591 			    ((uint64_t)CDB[3] << 48) |
2592 			    ((uint64_t)CDB[4] << 40) |
2593 			    ((uint64_t)CDB[5] << 32) |
2594 			    ((uint64_t)CDB[6] << 24) |
2595 			    ((uint64_t)CDB[7] << 16) |
2596 			    ((uint64_t)CDB[8] << 8) |
2597 			    (uint64_t)CDB[9];
2598 
2599 			/*
2600 			 * Check that LBA range for I/O does not exceed volume's
2601 			 * MaxLBA.
2602 			 */
2603 			if ((virtLBA + (uint64_t)io_size - 1) <=
2604 			    sc->DD_max_lba) {
2605 				/*
2606 				 * Check if the I/O crosses a stripe boundary.
2607 				 * If not, translate the virtual LBA to a
2608 				 * physical LBA and set the DevHandle for the
2609 				 * PhysDisk to be used.  If it does cross a
2610 				 * boundary, do normal I/O.  To get the right
2611 				 * DevHandle to use, get the map number for the
2612 				 * column, then use that map number to look up
2613 				 * the DevHandle of the PhysDisk.
2614 				 */
2615 				stripe_offset = (uint32_t)virtLBA &
2616 				    (sc->DD_stripe_size - 1);
2617 				if ((stripe_offset + io_size) <=
2618 				    sc->DD_stripe_size) {
2619 					physLBA = (uint32_t)(virtLBA >>
2620 					    sc->DD_stripe_exponent);
2621 					stripe_unit = physLBA /
2622 					    sc->DD_num_phys_disks;
2623 					column = physLBA %
2624 					    sc->DD_num_phys_disks;
2625 					pIO_req->DevHandle =
2626 					    htole16(sc->DD_column_map[column].
2627 					    dev_handle);
2628 					cm->cm_desc.SCSIIO.DevHandle =
2629 					    pIO_req->DevHandle;
2630 
2631 					physLBA = (stripe_unit <<
2632 					    sc->DD_stripe_exponent) +
2633 					    stripe_offset;
2634 
2635 					/*
2636 					 * Set upper 4 bytes of LBA to 0.  We
2637 					 * assume that the phys disks are less
2638 					 * than 2 TB's in size.  Then, set the
2639 					 * lower 4 bytes.
2640 					 */
2641 					pIO_req->CDB.CDB32[2] = 0;
2642 					pIO_req->CDB.CDB32[3] = 0;
2643 					pIO_req->CDB.CDB32[4] = 0;
2644 					pIO_req->CDB.CDB32[5] = 0;
2645 					ptrLBA = &pIO_req->CDB.CDB32[6];
2646 					physLBA_byte = (uint8_t)(physLBA >> 24);
2647 					*ptrLBA = physLBA_byte;
2648 					ptrLBA = &pIO_req->CDB.CDB32[7];
2649 					physLBA_byte = (uint8_t)(physLBA >> 16);
2650 					*ptrLBA = physLBA_byte;
2651 					ptrLBA = &pIO_req->CDB.CDB32[8];
2652 					physLBA_byte = (uint8_t)(physLBA >> 8);
2653 					*ptrLBA = physLBA_byte;
2654 					ptrLBA = &pIO_req->CDB.CDB32[9];
2655 					physLBA_byte = (uint8_t)physLBA;
2656 					*ptrLBA = physLBA_byte;
2657 
2658 					/*
2659 					 * Set flag that Direct Drive I/O is
2660 					 * being done.
2661 					 */
2662 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2663 				}
2664 			}
2665 		}
2666 	}
2667 }
2668 
2669 static void
2670 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2671 {
2672 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2673 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2674 	uint64_t sasaddr;
2675 	union ccb *ccb;
2676 
2677 	ccb = cm->cm_complete_data;
2678 
2679 	/*
2680 	 * Currently there should be no way we can hit this case.  It only
2681 	 * happens when we have a failure to allocate chain frames, and SMP
2682 	 * commands require two S/G elements only.  That should be handled
2683 	 * in the standard request size.
2684 	 */
2685 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2686 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2687 			   __func__, cm->cm_flags);
2688 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2689 		goto bailout;
2690         }
2691 
2692 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2693 	if (rpl == NULL) {
2694 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2695 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2696 		goto bailout;
2697 	}
2698 
2699 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2700 	sasaddr = le32toh(req->SASAddress.Low);
2701 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2702 
2703 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2704 	    MPI2_IOCSTATUS_SUCCESS ||
2705 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2706 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2707 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2708 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2709 		goto bailout;
2710 	}
2711 
2712 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2713 		   "%#jx completed successfully\n", __func__,
2714 		   (uintmax_t)sasaddr);
2715 
2716 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2717 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2718 	else
2719 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2720 
2721 bailout:
2722 	/*
2723 	 * We sync in both directions because we had DMAs in the S/G list
2724 	 * in both directions.
2725 	 */
2726 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2727 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2728 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2729 	mps_free_command(sc, cm);
2730 	xpt_done(ccb);
2731 }
2732 
2733 static void
2734 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2735 {
2736 	struct mps_command *cm;
2737 	uint8_t *request, *response;
2738 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2739 	struct mps_softc *sc;
2740 	int error;
2741 
2742 	sc = sassc->sc;
2743 	error = 0;
2744 
2745 	/*
2746 	 * XXX We don't yet support physical addresses here.
2747 	 */
2748 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2749 	case CAM_DATA_PADDR:
2750 	case CAM_DATA_SG_PADDR:
2751 		mps_dprint(sc, MPS_ERROR,
2752 			   "%s: physical addresses not supported\n", __func__);
2753 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2754 		xpt_done(ccb);
2755 		return;
2756 	case CAM_DATA_SG:
2757 		/*
2758 		 * The chip does not support more than one buffer for the
2759 		 * request or response.
2760 		 */
2761 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2762 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2763 			mps_dprint(sc, MPS_ERROR,
2764 				   "%s: multiple request or response "
2765 				   "buffer segments not supported for SMP\n",
2766 				   __func__);
2767 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2768 			xpt_done(ccb);
2769 			return;
2770 		}
2771 
2772 		/*
2773 		 * The CAM_SCATTER_VALID flag was originally implemented
2774 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2775 		 * We have two.  So, just take that flag to mean that we
2776 		 * might have S/G lists, and look at the S/G segment count
2777 		 * to figure out whether that is the case for each individual
2778 		 * buffer.
2779 		 */
2780 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2781 			bus_dma_segment_t *req_sg;
2782 
2783 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2784 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2785 		} else
2786 			request = ccb->smpio.smp_request;
2787 
2788 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2789 			bus_dma_segment_t *rsp_sg;
2790 
2791 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2792 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2793 		} else
2794 			response = ccb->smpio.smp_response;
2795 		break;
2796 	case CAM_DATA_VADDR:
2797 		request = ccb->smpio.smp_request;
2798 		response = ccb->smpio.smp_response;
2799 		break;
2800 	default:
2801 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2802 		xpt_done(ccb);
2803 		return;
2804 	}
2805 
2806 	cm = mps_alloc_command(sc);
2807 	if (cm == NULL) {
2808 		mps_dprint(sc, MPS_ERROR,
2809 		    "%s: cannot allocate command\n", __func__);
2810 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2811 		xpt_done(ccb);
2812 		return;
2813 	}
2814 
2815 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2816 	bzero(req, sizeof(*req));
2817 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2818 
2819 	/* Allow the chip to use any route to this SAS address. */
2820 	req->PhysicalPort = 0xff;
2821 
2822 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2823 	req->SGLFlags =
2824 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2825 
2826 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2827 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2828 
2829 	mpi_init_sge(cm, req, &req->SGL);
2830 
2831 	/*
2832 	 * Set up a uio to pass into mps_map_command().  This allows us to
2833 	 * do one map command, and one busdma call in there.
2834 	 */
2835 	cm->cm_uio.uio_iov = cm->cm_iovec;
2836 	cm->cm_uio.uio_iovcnt = 2;
2837 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2838 
2839 	/*
2840 	 * The read/write flag isn't used by busdma, but set it just in
2841 	 * case.  This isn't exactly accurate, either, since we're going in
2842 	 * both directions.
2843 	 */
2844 	cm->cm_uio.uio_rw = UIO_WRITE;
2845 
2846 	cm->cm_iovec[0].iov_base = request;
2847 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2848 	cm->cm_iovec[1].iov_base = response;
2849 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2850 
2851 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2852 			       cm->cm_iovec[1].iov_len;
2853 
2854 	/*
2855 	 * Trigger a warning message in mps_data_cb() for the user if we
2856 	 * wind up exceeding two S/G segments.  The chip expects one
2857 	 * segment for the request and another for the response.
2858 	 */
2859 	cm->cm_max_segs = 2;
2860 
2861 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2862 	cm->cm_complete = mpssas_smpio_complete;
2863 	cm->cm_complete_data = ccb;
2864 
2865 	/*
2866 	 * Tell the mapping code that we're using a uio, and that this is
2867 	 * an SMP passthrough request.  There is a little special-case
2868 	 * logic there (in mps_data_cb()) to handle the bidirectional
2869 	 * transfer.
2870 	 */
2871 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2872 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2873 
2874 	/* The chip data format is little endian. */
2875 	req->SASAddress.High = htole32(sasaddr >> 32);
2876 	req->SASAddress.Low = htole32(sasaddr);
2877 
2878 	/*
2879 	 * XXX Note that we don't have a timeout/abort mechanism here.
2880 	 * From the manual, it looks like task management requests only
2881 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2882 	 * have a mechanism to retry requests in the event of a chip reset
2883 	 * at least.  Hopefully the chip will insure that any errors short
2884 	 * of that are relayed back to the driver.
2885 	 */
2886 	error = mps_map_command(sc, cm);
2887 	if ((error != 0) && (error != EINPROGRESS)) {
2888 		mps_dprint(sc, MPS_ERROR,
2889 			   "%s: error %d returned from mps_map_command()\n",
2890 			   __func__, error);
2891 		goto bailout_error;
2892 	}
2893 
2894 	return;
2895 
2896 bailout_error:
2897 	mps_free_command(sc, cm);
2898 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2899 	xpt_done(ccb);
2900 	return;
2901 
2902 }
2903 
2904 static void
2905 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2906 {
2907 	struct mps_softc *sc;
2908 	struct mpssas_target *targ;
2909 	uint64_t sasaddr = 0;
2910 
2911 	sc = sassc->sc;
2912 
2913 	/*
2914 	 * Make sure the target exists.
2915 	 */
2916 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2917 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2918 	targ = &sassc->targets[ccb->ccb_h.target_id];
2919 	if (targ->handle == 0x0) {
2920 		mps_dprint(sc, MPS_ERROR,
2921 			   "%s: target %d does not exist!\n", __func__,
2922 			   ccb->ccb_h.target_id);
2923 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2924 		xpt_done(ccb);
2925 		return;
2926 	}
2927 
2928 	/*
2929 	 * If this device has an embedded SMP target, we'll talk to it
2930 	 * directly.
2931 	 * figure out what the expander's address is.
2932 	 */
2933 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2934 		sasaddr = targ->sasaddr;
2935 
2936 	/*
2937 	 * If we don't have a SAS address for the expander yet, try
2938 	 * grabbing it from the page 0x83 information cached in the
2939 	 * transport layer for this target.  LSI expanders report the
2940 	 * expander SAS address as the port-associated SAS address in
2941 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2942 	 * 0x83.
2943 	 *
2944 	 * XXX KDM disable this for now, but leave it commented out so that
2945 	 * it is obvious that this is another possible way to get the SAS
2946 	 * address.
2947 	 *
2948 	 * The parent handle method below is a little more reliable, and
2949 	 * the other benefit is that it works for devices other than SES
2950 	 * devices.  So you can send a SMP request to a da(4) device and it
2951 	 * will get routed to the expander that device is attached to.
2952 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2953 	 */
2954 #if 0
2955 	if (sasaddr == 0)
2956 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2957 #endif
2958 
2959 	/*
2960 	 * If we still don't have a SAS address for the expander, look for
2961 	 * the parent device of this device, which is probably the expander.
2962 	 */
2963 	if (sasaddr == 0) {
2964 #ifdef OLD_MPS_PROBE
2965 		struct mpssas_target *parent_target;
2966 #endif
2967 
2968 		if (targ->parent_handle == 0x0) {
2969 			mps_dprint(sc, MPS_ERROR,
2970 				   "%s: handle %d does not have a valid "
2971 				   "parent handle!\n", __func__, targ->handle);
2972 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2973 			goto bailout;
2974 		}
2975 #ifdef OLD_MPS_PROBE
2976 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2977 			targ->parent_handle);
2978 
2979 		if (parent_target == NULL) {
2980 			mps_dprint(sc, MPS_ERROR,
2981 				   "%s: handle %d does not have a valid "
2982 				   "parent target!\n", __func__, targ->handle);
2983 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2984 			goto bailout;
2985 		}
2986 
2987 		if ((parent_target->devinfo &
2988 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2989 			mps_dprint(sc, MPS_ERROR,
2990 				   "%s: handle %d parent %d does not "
2991 				   "have an SMP target!\n", __func__,
2992 				   targ->handle, parent_target->handle);
2993 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2994 			goto bailout;
2995 		}
2996 
2997 		sasaddr = parent_target->sasaddr;
2998 #else /* OLD_MPS_PROBE */
2999 		if ((targ->parent_devinfo &
3000 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3001 			mps_dprint(sc, MPS_ERROR,
3002 				   "%s: handle %d parent %d does not "
3003 				   "have an SMP target!\n", __func__,
3004 				   targ->handle, targ->parent_handle);
3005 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3006 			goto bailout;
3007 		}
3008 		if (targ->parent_sasaddr == 0x0) {
3009 			mps_dprint(sc, MPS_ERROR,
3010 				   "%s: handle %d parent handle %d does "
3011 				   "not have a valid SAS address!\n",
3012 				   __func__, targ->handle, targ->parent_handle);
3013 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3014 			goto bailout;
3015 		}
3016 
3017 		sasaddr = targ->parent_sasaddr;
3018 #endif /* OLD_MPS_PROBE */
3019 	}
3020 
3021 	if (sasaddr == 0) {
3022 		mps_dprint(sc, MPS_INFO,
3023 			   "%s: unable to find SAS address for handle %d\n",
3024 			   __func__, targ->handle);
3025 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3026 		goto bailout;
3027 	}
3028 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3029 
3030 	return;
3031 
3032 bailout:
3033 	xpt_done(ccb);
3034 
3035 }
3036 
3037 static void
3038 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3039 {
3040 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3041 	struct mps_softc *sc;
3042 	struct mps_command *tm;
3043 	struct mpssas_target *targ;
3044 
3045 	MPS_FUNCTRACE(sassc->sc);
3046 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3047 
3048 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3049 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3050 	     ccb->ccb_h.target_id));
3051 	sc = sassc->sc;
3052 	tm = mpssas_alloc_tm(sc);
3053 	if (tm == NULL) {
3054 		mps_dprint(sc, MPS_ERROR,
3055 		    "command alloc failure in mpssas_action_resetdev\n");
3056 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3057 		xpt_done(ccb);
3058 		return;
3059 	}
3060 
3061 	targ = &sassc->targets[ccb->ccb_h.target_id];
3062 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3063 	req->DevHandle = htole16(targ->handle);
3064 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3065 
3066 	/* SAS Hard Link Reset / SATA Link Reset */
3067 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3068 
3069 	tm->cm_data = NULL;
3070 	tm->cm_complete = mpssas_resetdev_complete;
3071 	tm->cm_complete_data = ccb;
3072 	tm->cm_targ = targ;
3073 
3074 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3075 	mps_map_command(sc, tm);
3076 }
3077 
3078 static void
3079 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3080 {
3081 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3082 	union ccb *ccb;
3083 
3084 	MPS_FUNCTRACE(sc);
3085 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3086 
3087 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3088 	ccb = tm->cm_complete_data;
3089 
3090 	/*
3091 	 * Currently there should be no way we can hit this case.  It only
3092 	 * happens when we have a failure to allocate chain frames, and
3093 	 * task management commands don't have S/G lists.
3094 	 */
3095 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3096 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3097 
3098 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3099 
3100 		mps_dprint(sc, MPS_ERROR,
3101 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3102 			   "This should not happen!\n", __func__, tm->cm_flags,
3103 			   req->DevHandle);
3104 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3105 		goto bailout;
3106 	}
3107 
3108 	mps_dprint(sc, MPS_XINFO,
3109 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3110 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3111 
3112 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3113 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3114 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3115 		    CAM_LUN_WILDCARD);
3116 	}
3117 	else
3118 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3119 
3120 bailout:
3121 
3122 	mpssas_free_tm(sc, tm);
3123 	xpt_done(ccb);
3124 }
3125 
3126 static void
3127 mpssas_poll(struct cam_sim *sim)
3128 {
3129 	struct mpssas_softc *sassc;
3130 
3131 	sassc = cam_sim_softc(sim);
3132 
3133 	if (sassc->sc->mps_debug & MPS_TRACE) {
3134 		/* frequent debug messages during a panic just slow
3135 		 * everything down too much.
3136 		 */
3137 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3138 		sassc->sc->mps_debug &= ~MPS_TRACE;
3139 	}
3140 
3141 	mps_intr_locked(sassc->sc);
3142 }
3143 
3144 static void
3145 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3146 	     void *arg)
3147 {
3148 	struct mps_softc *sc;
3149 
3150 	sc = (struct mps_softc *)callback_arg;
3151 
3152 	mps_lock(sc);
3153 	switch (code) {
3154 	case AC_ADVINFO_CHANGED: {
3155 		struct mpssas_target *target;
3156 		struct mpssas_softc *sassc;
3157 		struct scsi_read_capacity_data_long rcap_buf;
3158 		struct ccb_dev_advinfo cdai;
3159 		struct mpssas_lun *lun;
3160 		lun_id_t lunid;
3161 		int found_lun;
3162 		uintptr_t buftype;
3163 
3164 		buftype = (uintptr_t)arg;
3165 
3166 		found_lun = 0;
3167 		sassc = sc->sassc;
3168 
3169 		/*
3170 		 * We're only interested in read capacity data changes.
3171 		 */
3172 		if (buftype != CDAI_TYPE_RCAPLONG)
3173 			break;
3174 
3175 		/*
3176 		 * We should have a handle for this, but check to make sure.
3177 		 */
3178 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3179 		    ("Target %d out of bounds in mpssas_async\n",
3180 		    xpt_path_target_id(path)));
3181 		target = &sassc->targets[xpt_path_target_id(path)];
3182 		if (target->handle == 0)
3183 			break;
3184 
3185 		lunid = xpt_path_lun_id(path);
3186 
3187 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3188 			if (lun->lun_id == lunid) {
3189 				found_lun = 1;
3190 				break;
3191 			}
3192 		}
3193 
3194 		if (found_lun == 0) {
3195 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3196 				     M_NOWAIT | M_ZERO);
3197 			if (lun == NULL) {
3198 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3199 					   "LUN for EEDP support.\n");
3200 				break;
3201 			}
3202 			lun->lun_id = lunid;
3203 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3204 		}
3205 
3206 		bzero(&rcap_buf, sizeof(rcap_buf));
3207 		bzero(&cdai, sizeof(cdai));
3208 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3209 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3210 		cdai.ccb_h.flags = CAM_DIR_IN;
3211 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3212 		cdai.flags = CDAI_FLAG_NONE;
3213 		cdai.bufsiz = sizeof(rcap_buf);
3214 		cdai.buf = (uint8_t *)&rcap_buf;
3215 		xpt_action((union ccb *)&cdai);
3216 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3217 			cam_release_devq(cdai.ccb_h.path,
3218 					 0, 0, 0, FALSE);
3219 
3220 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3221 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3222 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3223 			case SRC16_PTYPE_1:
3224 			case SRC16_PTYPE_3:
3225 				lun->eedp_formatted = TRUE;
3226 				lun->eedp_block_size =
3227 				    scsi_4btoul(rcap_buf.length);
3228 				break;
3229 			case SRC16_PTYPE_2:
3230 			default:
3231 				lun->eedp_formatted = FALSE;
3232 				lun->eedp_block_size = 0;
3233 				break;
3234 			}
3235 		} else {
3236 			lun->eedp_formatted = FALSE;
3237 			lun->eedp_block_size = 0;
3238 		}
3239 		break;
3240 	}
3241 	default:
3242 		break;
3243 	}
3244 	mps_unlock(sc);
3245 }
3246 
3247 /*
3248  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3249  * the target until the reset has completed.  The CCB holds the path which
3250  * is used to release the devq.  The devq is released and the CCB is freed
3251  * when the TM completes.
3252  * We only need to do this when we're entering reset, not at each time we
3253  * need to send an abort (which will happen if multiple commands timeout
3254  * while we're sending the abort). We do not release the queue for each
3255  * command we complete (just at the end when we free the tm), so freezing
3256  * it each time doesn't make sense.
3257  */
3258 void
3259 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3260     struct mpssas_target *target, lun_id_t lun_id)
3261 {
3262 	union ccb *ccb;
3263 	path_id_t path_id;
3264 
3265 	ccb = xpt_alloc_ccb_nowait();
3266 	if (ccb) {
3267 		path_id = cam_sim_path(sc->sassc->sim);
3268 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3269 		    target->tid, lun_id) != CAM_REQ_CMP) {
3270 			xpt_free_ccb(ccb);
3271 		} else {
3272 			tm->cm_ccb = ccb;
3273 			tm->cm_targ = target;
3274 			if ((target->flags & MPSSAS_TARGET_INRESET) == 0) {
3275 				mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
3276 				    "%s: Freezing devq for target ID %d\n",
3277 				    __func__, target->tid);
3278 				xpt_freeze_devq(ccb->ccb_h.path, 1);
3279 				target->flags |= MPSSAS_TARGET_INRESET;
3280 			}
3281 		}
3282 	}
3283 }
3284 
3285 int
3286 mpssas_startup(struct mps_softc *sc)
3287 {
3288 
3289 	/*
3290 	 * Send the port enable message and set the wait_for_port_enable flag.
3291 	 * This flag helps to keep the simq frozen until all discovery events
3292 	 * are processed.
3293 	 */
3294 	sc->wait_for_port_enable = 1;
3295 	mpssas_send_portenable(sc);
3296 	return (0);
3297 }
3298 
3299 static int
3300 mpssas_send_portenable(struct mps_softc *sc)
3301 {
3302 	MPI2_PORT_ENABLE_REQUEST *request;
3303 	struct mps_command *cm;
3304 
3305 	MPS_FUNCTRACE(sc);
3306 
3307 	if ((cm = mps_alloc_command(sc)) == NULL)
3308 		return (EBUSY);
3309 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3310 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3311 	request->MsgFlags = 0;
3312 	request->VP_ID = 0;
3313 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3314 	cm->cm_complete = mpssas_portenable_complete;
3315 	cm->cm_data = NULL;
3316 	cm->cm_sge = NULL;
3317 
3318 	mps_map_command(sc, cm);
3319 	mps_dprint(sc, MPS_XINFO,
3320 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3321 	    cm, cm->cm_req, cm->cm_complete);
3322 	return (0);
3323 }
3324 
3325 static void
3326 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3327 {
3328 	MPI2_PORT_ENABLE_REPLY *reply;
3329 	struct mpssas_softc *sassc;
3330 
3331 	MPS_FUNCTRACE(sc);
3332 	sassc = sc->sassc;
3333 
3334 	/*
3335 	 * Currently there should be no way we can hit this case.  It only
3336 	 * happens when we have a failure to allocate chain frames, and
3337 	 * port enable commands don't have S/G lists.
3338 	 */
3339 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3340 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3341 			   "This should not happen!\n", __func__, cm->cm_flags);
3342 	}
3343 
3344 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3345 	if (reply == NULL)
3346 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3347 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3348 	    MPI2_IOCSTATUS_SUCCESS)
3349 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3350 
3351 	mps_free_command(sc, cm);
3352 
3353 	/*
3354 	 * Get WarpDrive info after discovery is complete but before the scan
3355 	 * starts.  At this point, all devices are ready to be exposed to the
3356 	 * OS.  If devices should be hidden instead, take them out of the
3357 	 * 'targets' array before the scan.  The devinfo for a disk will have
3358 	 * some info and a volume's will be 0.  Use that to remove disks.
3359 	 */
3360 	mps_wd_config_pages(sc);
3361 
3362 	/*
3363 	 * Done waiting for port enable to complete.  Decrement the refcount.
3364 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3365 	 * take place.  Since the simq was explicitly frozen before port
3366 	 * enable, it must be explicitly released here to keep the
3367 	 * freeze/release count in sync.
3368 	 */
3369 	sc->wait_for_port_enable = 0;
3370 	sc->port_enable_complete = 1;
3371 	wakeup(&sc->port_enable_complete);
3372 	mpssas_startup_decrement(sassc);
3373 }
3374 
3375 int
3376 mpssas_check_id(struct mpssas_softc *sassc, int id)
3377 {
3378 	struct mps_softc *sc = sassc->sc;
3379 	char *ids;
3380 	char *name;
3381 
3382 	ids = &sc->exclude_ids[0];
3383 	while((name = strsep(&ids, ",")) != NULL) {
3384 		if (name[0] == '\0')
3385 			continue;
3386 		if (strtol(name, NULL, 0) == (long)id)
3387 			return (1);
3388 	}
3389 
3390 	return (0);
3391 }
3392 
3393 void
3394 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3395 {
3396 	struct mpssas_softc *sassc;
3397 	struct mpssas_lun *lun, *lun_tmp;
3398 	struct mpssas_target *targ;
3399 	int i;
3400 
3401 	sassc = sc->sassc;
3402 	/*
3403 	 * The number of targets is based on IOC Facts, so free all of
3404 	 * the allocated LUNs for each target and then the target buffer
3405 	 * itself.
3406 	 */
3407 	for (i = 0; i < maxtargets; i++) {
3408 		targ = &sassc->targets[i];
3409 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3410 			free(lun, M_MPT2);
3411 		}
3412 	}
3413 	free(sassc->targets, M_MPT2);
3414 
3415 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3416 	    M_MPT2, M_WAITOK|M_ZERO);
3417 }
3418