xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 8ef24a0d4b28fe230e20637f56869cc4148cd2ca)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/mpr/mpi/mpi2_type.h>
76 #include <dev/mpr/mpi/mpi2.h>
77 #include <dev/mpr/mpi/mpi2_ioc.h>
78 #include <dev/mpr/mpi/mpi2_sas.h>
79 #include <dev/mpr/mpi/mpi2_cnfg.h>
80 #include <dev/mpr/mpi/mpi2_init.h>
81 #include <dev/mpr/mpi/mpi2_tool.h>
82 #include <dev/mpr/mpr_ioctl.h>
83 #include <dev/mpr/mprvar.h>
84 #include <dev/mpr/mpr_table.h>
85 #include <dev/mpr/mpr_sas.h>
86 
87 #define MPRSAS_DISCOVERY_TIMEOUT	20
88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
89 
90 /*
91  * static array to check SCSI OpCode for EEDP protection bits
92  */
93 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
94 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 static uint8_t op_code_prot[256] = {
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
106 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
113 };
114 
115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
116 
117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
120 static void mprsas_poll(struct cam_sim *sim);
121 static void mprsas_scsiio_timeout(void *data);
122 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
123 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
124 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
125 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
126 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
127 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
128     struct mpr_command *cm);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130     struct cam_path *path, void *arg);
131 #if (__FreeBSD_version < 901503) || \
132     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
133 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
134     struct ccb_getdev *cgd);
135 static void mprsas_read_cap_done(struct cam_periph *periph,
136     union ccb *done_ccb);
137 #endif
138 static int mprsas_send_portenable(struct mpr_softc *sc);
139 static void mprsas_portenable_complete(struct mpr_softc *sc,
140     struct mpr_command *cm);
141 
142 #if __FreeBSD_version >= 900026
143 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
144 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
145     uint64_t sasaddr);
146 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
147 #endif //FreeBSD_version >= 900026
148 
149 struct mprsas_target *
150 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
151     uint16_t handle)
152 {
153 	struct mprsas_target *target;
154 	int i;
155 
156 	for (i = start; i < sassc->maxtargets; i++) {
157 		target = &sassc->targets[i];
158 		if (target->handle == handle)
159 			return (target);
160 	}
161 
162 	return (NULL);
163 }
164 
165 /* we need to freeze the simq during attach and diag reset, to avoid failing
166  * commands before device handles have been found by discovery.  Since
167  * discovery involves reading config pages and possibly sending commands,
168  * discovery actions may continue even after we receive the end of discovery
169  * event, so refcount discovery actions instead of assuming we can unfreeze
170  * the simq when we get the event.
171  */
172 void
173 mprsas_startup_increment(struct mprsas_softc *sassc)
174 {
175 	MPR_FUNCTRACE(sassc->sc);
176 
177 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
178 		if (sassc->startup_refcount++ == 0) {
179 			/* just starting, freeze the simq */
180 			mpr_dprint(sassc->sc, MPR_INIT,
181 			    "%s freezing simq\n", __func__);
182 #if (__FreeBSD_version >= 1000039) || \
183     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
195 {
196 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
197 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
198 		xpt_release_simq(sassc->sim, 1);
199 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
200 	}
201 }
202 
203 void
204 mprsas_startup_decrement(struct mprsas_softc *sassc)
205 {
206 	MPR_FUNCTRACE(sassc->sc);
207 
208 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
209 		if (--sassc->startup_refcount == 0) {
210 			/* finished all discovery-related actions, release
211 			 * the simq and rescan for the latest topology.
212 			 */
213 			mpr_dprint(sassc->sc, MPR_INIT,
214 			    "%s releasing simq\n", __func__);
215 			sassc->flags &= ~MPRSAS_IN_STARTUP;
216 			xpt_release_simq(sassc->sim, 1);
217 #if (__FreeBSD_version >= 1000039) || \
218     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
219 			xpt_release_boot();
220 #else
221 			mprsas_rescan_target(sassc->sc, NULL);
222 #endif
223 		}
224 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
225 		    sassc->startup_refcount);
226 	}
227 }
228 
229 /* The firmware requires us to stop sending commands when we're doing task
230  * management, so refcount the TMs and keep the simq frozen when any are in
231  * use.
232  */
233 struct mpr_command *
234 mprsas_alloc_tm(struct mpr_softc *sc)
235 {
236 	struct mpr_command *tm;
237 
238 	MPR_FUNCTRACE(sc);
239 	tm = mpr_alloc_high_priority_command(sc);
240 	return tm;
241 }
242 
243 void
244 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
245 {
246 	int target_id = 0xFFFFFFFF;
247 
248 	MPR_FUNCTRACE(sc);
249 	if (tm == NULL)
250 		return;
251 
252 	/*
253 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
254 	 * free the resources used for freezing the devq.  Must clear the
255 	 * INRESET flag as well or scsi I/O will not work.
256 	 */
257 	if (tm->cm_targ != NULL) {
258 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
259 		target_id = tm->cm_targ->tid;
260 	}
261 	if (tm->cm_ccb) {
262 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
263 		    target_id);
264 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
265 		xpt_free_path(tm->cm_ccb->ccb_h.path);
266 		xpt_free_ccb(tm->cm_ccb);
267 	}
268 
269 	mpr_free_high_priority_command(sc, tm);
270 }
271 
272 void
273 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
274 {
275 	struct mprsas_softc *sassc = sc->sassc;
276 	path_id_t pathid;
277 	target_id_t targetid;
278 	union ccb *ccb;
279 
280 	MPR_FUNCTRACE(sc);
281 	pathid = cam_sim_path(sassc->sim);
282 	if (targ == NULL)
283 		targetid = CAM_TARGET_WILDCARD;
284 	else
285 		targetid = targ - sassc->targets;
286 
287 	/*
288 	 * Allocate a CCB and schedule a rescan.
289 	 */
290 	ccb = xpt_alloc_ccb_nowait();
291 	if (ccb == NULL) {
292 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
293 		return;
294 	}
295 
296 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
297 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
298 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
299 		xpt_free_ccb(ccb);
300 		return;
301 	}
302 
303 	if (targetid == CAM_TARGET_WILDCARD)
304 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
305 	else
306 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
307 
308 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
309 	xpt_rescan(ccb);
310 }
311 
312 static void
313 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
314 {
315 	struct sbuf sb;
316 	va_list ap;
317 	char str[192];
318 	char path_str[64];
319 
320 	if (cm == NULL)
321 		return;
322 
323 	/* No need to be in here if debugging isn't enabled */
324 	if ((cm->cm_sc->mpr_debug & level) == 0)
325 		return;
326 
327 	sbuf_new(&sb, str, sizeof(str), 0);
328 
329 	va_start(ap, fmt);
330 
331 	if (cm->cm_ccb != NULL) {
332 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
333 		    sizeof(path_str));
334 		sbuf_cat(&sb, path_str);
335 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
336 			scsi_command_string(&cm->cm_ccb->csio, &sb);
337 			sbuf_printf(&sb, "length %d ",
338 			    cm->cm_ccb->csio.dxfer_len);
339 		}
340 	} else {
341 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 		    cam_sim_name(cm->cm_sc->sassc->sim),
343 		    cam_sim_unit(cm->cm_sc->sassc->sim),
344 		    cam_sim_bus(cm->cm_sc->sassc->sim),
345 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
346 		    cm->cm_lun);
347 	}
348 
349 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 	sbuf_vprintf(&sb, fmt, ap);
351 	sbuf_finish(&sb);
352 	mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
353 
354 	va_end(ap);
355 }
356 
357 static void
358 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
359 {
360 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
361 	struct mprsas_target *targ;
362 	uint16_t handle;
363 
364 	MPR_FUNCTRACE(sc);
365 
366 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
367 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
368 	targ = tm->cm_targ;
369 
370 	if (reply == NULL) {
371 		/* XXX retry the remove after the diag reset completes? */
372 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
373 		    "0x%04x\n", __func__, handle);
374 		mprsas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 	    MPI2_IOCSTATUS_SUCCESS) {
380 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
381 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
382 	}
383 
384 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
385 	    le32toh(reply->TerminationCount));
386 	mpr_free_reply(sc, tm->cm_reply_data);
387 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
388 
389 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
390 	    targ->tid, handle);
391 
392 	/*
393 	 * Don't clear target if remove fails because things will get confusing.
394 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
395 	 * this target id if possible, and so we can assign the same target id
396 	 * to this device if it comes back in the future.
397 	 */
398 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
399 	    MPI2_IOCSTATUS_SUCCESS) {
400 		targ = tm->cm_targ;
401 		targ->handle = 0x0;
402 		targ->encl_handle = 0x0;
403 		targ->encl_level_valid = 0x0;
404 		targ->encl_level = 0x0;
405 		targ->connector_name[0] = ' ';
406 		targ->connector_name[1] = ' ';
407 		targ->connector_name[2] = ' ';
408 		targ->connector_name[3] = ' ';
409 		targ->encl_slot = 0x0;
410 		targ->exp_dev_handle = 0x0;
411 		targ->phy_num = 0x0;
412 		targ->linkrate = 0x0;
413 		targ->devinfo = 0x0;
414 		targ->flags = 0x0;
415 		targ->scsi_req_desc_type = 0;
416 	}
417 
418 	mprsas_free_tm(sc, tm);
419 }
420 
421 
422 /*
423  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
424  * Otherwise Volume Delete is same as Bare Drive Removal.
425  */
426 void
427 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
428 {
429 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
430 	struct mpr_softc *sc;
431 	struct mpr_command *cm;
432 	struct mprsas_target *targ = NULL;
433 
434 	MPR_FUNCTRACE(sassc->sc);
435 	sc = sassc->sc;
436 
437 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
438 	if (targ == NULL) {
439 		/* FIXME: what is the action? */
440 		/* We don't know about this device? */
441 		mpr_dprint(sc, MPR_ERROR,
442 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
443 		return;
444 	}
445 
446 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
447 
448 	cm = mprsas_alloc_tm(sc);
449 	if (cm == NULL) {
450 		mpr_dprint(sc, MPR_ERROR,
451 		    "%s: command alloc failure\n", __func__);
452 		return;
453 	}
454 
455 	mprsas_rescan_target(sc, targ);
456 
457 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
458 	req->DevHandle = targ->handle;
459 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
460 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
461 
462 	/* SAS Hard Link Reset / SATA Link Reset */
463 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
464 
465 	cm->cm_targ = targ;
466 	cm->cm_data = NULL;
467 	cm->cm_desc.HighPriority.RequestFlags =
468 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
469 	cm->cm_complete = mprsas_remove_volume;
470 	cm->cm_complete_data = (void *)(uintptr_t)handle;
471 
472 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
473 	    __func__, targ->tid);
474 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
475 
476 	mpr_map_command(sc, cm);
477 }
478 
479 /*
480  * The MPT3 firmware performs debounce on the link to avoid transient link
481  * errors and false removals.  When it does decide that link has been lost
482  * and a device needs to go away, it expects that the host will perform a
483  * target reset and then an op remove.  The reset has the side-effect of
484  * aborting any outstanding requests for the device, which is required for
485  * the op-remove to succeed.  It's not clear if the host should check for
486  * the device coming back alive after the reset.
487  */
488 void
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
490 {
491 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 	struct mpr_softc *sc;
493 	struct mpr_command *cm;
494 	struct mprsas_target *targ = NULL;
495 
496 	MPR_FUNCTRACE(sassc->sc);
497 
498 	sc = sassc->sc;
499 
500 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
501 	if (targ == NULL) {
502 		/* FIXME: what is the action? */
503 		/* We don't know about this device? */
504 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
505 		    __func__, handle);
506 		return;
507 	}
508 
509 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
510 
511 	cm = mprsas_alloc_tm(sc);
512 	if (cm == NULL) {
513 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
514 		    __func__);
515 		return;
516 	}
517 
518 	mprsas_rescan_target(sc, targ);
519 
520 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 	memset(req, 0, sizeof(*req));
522 	req->DevHandle = htole16(targ->handle);
523 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
525 
526 	/* SAS Hard Link Reset / SATA Link Reset */
527 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
528 
529 	cm->cm_targ = targ;
530 	cm->cm_data = NULL;
531 	cm->cm_desc.HighPriority.RequestFlags =
532 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 	cm->cm_complete = mprsas_remove_device;
534 	cm->cm_complete_data = (void *)(uintptr_t)handle;
535 
536 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
537 	    __func__, targ->tid);
538 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
539 
540 	mpr_map_command(sc, cm);
541 }
542 
543 static void
544 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
545 {
546 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
547 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
548 	struct mprsas_target *targ;
549 	struct mpr_command *next_cm;
550 	uint16_t handle;
551 
552 	MPR_FUNCTRACE(sc);
553 
554 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
555 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
556 	targ = tm->cm_targ;
557 
558 	/*
559 	 * Currently there should be no way we can hit this case.  It only
560 	 * happens when we have a failure to allocate chain frames, and
561 	 * task management commands don't have S/G lists.
562 	 */
563 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
564 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
565 		    "handle %#04x! This should not happen!\n", __func__,
566 		    tm->cm_flags, handle);
567 	}
568 
569 	if (reply == NULL) {
570 		/* XXX retry the remove after the diag reset completes? */
571 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
572 		    "0x%04x\n", __func__, handle);
573 		mprsas_free_tm(sc, tm);
574 		return;
575 	}
576 
577 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
578 	    MPI2_IOCSTATUS_SUCCESS) {
579 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
580 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
581 	}
582 
583 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
584 	    le32toh(reply->TerminationCount));
585 	mpr_free_reply(sc, tm->cm_reply_data);
586 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
587 
588 	/* Reuse the existing command */
589 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
590 	memset(req, 0, sizeof(*req));
591 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
592 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
593 	req->DevHandle = htole16(handle);
594 	tm->cm_data = NULL;
595 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
596 	tm->cm_complete = mprsas_remove_complete;
597 	tm->cm_complete_data = (void *)(uintptr_t)handle;
598 
599 	mpr_map_command(sc, tm);
600 
601 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
602 	    targ->tid, handle);
603 	if (targ->encl_level_valid) {
604 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
605 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
606 		    targ->connector_name);
607 	}
608 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
609 		union ccb *ccb;
610 
611 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
612 		ccb = tm->cm_complete_data;
613 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
614 		mprsas_scsiio_complete(sc, tm);
615 	}
616 }
617 
618 static void
619 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
620 {
621 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
622 	uint16_t handle;
623 	struct mprsas_target *targ;
624 	struct mprsas_lun *lun;
625 
626 	MPR_FUNCTRACE(sc);
627 
628 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
629 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
630 
631 	/*
632 	 * Currently there should be no way we can hit this case.  It only
633 	 * happens when we have a failure to allocate chain frames, and
634 	 * task management commands don't have S/G lists.
635 	 */
636 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
637 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
638 		    "handle %#04x! This should not happen!\n", __func__,
639 		    tm->cm_flags, handle);
640 		mprsas_free_tm(sc, tm);
641 		return;
642 	}
643 
644 	if (reply == NULL) {
645 		/* most likely a chip reset */
646 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
647 		    "0x%04x\n", __func__, handle);
648 		mprsas_free_tm(sc, tm);
649 		return;
650 	}
651 
652 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
653 	    __func__, handle, le16toh(reply->IOCStatus));
654 
655 	/*
656 	 * Don't clear target if remove fails because things will get confusing.
657 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
658 	 * this target id if possible, and so we can assign the same target id
659 	 * to this device if it comes back in the future.
660 	 */
661 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
662 	    MPI2_IOCSTATUS_SUCCESS) {
663 		targ = tm->cm_targ;
664 		targ->handle = 0x0;
665 		targ->encl_handle = 0x0;
666 		targ->encl_level_valid = 0x0;
667 		targ->encl_level = 0x0;
668 		targ->connector_name[0] = ' ';
669 		targ->connector_name[1] = ' ';
670 		targ->connector_name[2] = ' ';
671 		targ->connector_name[3] = ' ';
672 		targ->encl_slot = 0x0;
673 		targ->exp_dev_handle = 0x0;
674 		targ->phy_num = 0x0;
675 		targ->linkrate = 0x0;
676 		targ->devinfo = 0x0;
677 		targ->flags = 0x0;
678 		targ->scsi_req_desc_type = 0;
679 
680 		while (!SLIST_EMPTY(&targ->luns)) {
681 			lun = SLIST_FIRST(&targ->luns);
682 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
683 			free(lun, M_MPR);
684 		}
685 	}
686 
687 	mprsas_free_tm(sc, tm);
688 }
689 
690 static int
691 mprsas_register_events(struct mpr_softc *sc)
692 {
693 	uint8_t events[16];
694 
695 	bzero(events, 16);
696 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
697 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
698 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
699 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
701 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
702 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
704 	setbit(events, MPI2_EVENT_IR_VOLUME);
705 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
706 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
707 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
708 	setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
709 
710 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
711 	    &sc->sassc->mprsas_eh);
712 
713 	return (0);
714 }
715 
716 int
717 mpr_attach_sas(struct mpr_softc *sc)
718 {
719 	struct mprsas_softc *sassc;
720 	cam_status status;
721 	int unit, error = 0;
722 
723 	MPR_FUNCTRACE(sc);
724 
725 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
726 	if (!sassc) {
727 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
728 		    __func__, __LINE__);
729 		return (ENOMEM);
730 	}
731 
732 	/*
733 	 * XXX MaxTargets could change during a reinit.  Since we don't
734 	 * resize the targets[] array during such an event, cache the value
735 	 * of MaxTargets here so that we don't get into trouble later.  This
736 	 * should move into the reinit logic.
737 	 */
738 	sassc->maxtargets = sc->facts->MaxTargets;
739 	sassc->targets = malloc(sizeof(struct mprsas_target) *
740 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
741 	if (!sassc->targets) {
742 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
743 		    __func__, __LINE__);
744 		free(sassc, M_MPR);
745 		return (ENOMEM);
746 	}
747 	sc->sassc = sassc;
748 	sassc->sc = sc;
749 
750 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
751 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
752 		error = ENOMEM;
753 		goto out;
754 	}
755 
756 	unit = device_get_unit(sc->mpr_dev);
757 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
758 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
759 	if (sassc->sim == NULL) {
760 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
761 		error = EINVAL;
762 		goto out;
763 	}
764 
765 	TAILQ_INIT(&sassc->ev_queue);
766 
767 	/* Initialize taskqueue for Event Handling */
768 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
769 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
770 	    taskqueue_thread_enqueue, &sassc->ev_tq);
771 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
772 	    device_get_nameunit(sc->mpr_dev));
773 
774 	mpr_lock(sc);
775 
776 	/*
777 	 * XXX There should be a bus for every port on the adapter, but since
778 	 * we're just going to fake the topology for now, we'll pretend that
779 	 * everything is just a target on a single bus.
780 	 */
781 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
782 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
783 		    error);
784 		mpr_unlock(sc);
785 		goto out;
786 	}
787 
788 	/*
789 	 * Assume that discovery events will start right away.
790 	 *
791 	 * Hold off boot until discovery is complete.
792 	 */
793 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
794 	sc->sassc->startup_refcount = 0;
795 	mprsas_startup_increment(sassc);
796 
797 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
798 
799 	/*
800 	 * Register for async events so we can determine the EEDP
801 	 * capabilities of devices.
802 	 */
803 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
804 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
805 	    CAM_LUN_WILDCARD);
806 	if (status != CAM_REQ_CMP) {
807 		mpr_printf(sc, "Error %#x creating sim path\n", status);
808 		sassc->path = NULL;
809 	} else {
810 		int event;
811 
812 #if (__FreeBSD_version >= 1000006) || \
813     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
814 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
815 #else
816 		event = AC_FOUND_DEVICE;
817 #endif
818 
819 		/*
820 		 * Prior to the CAM locking improvements, we can't call
821 		 * xpt_register_async() with a particular path specified.
822 		 *
823 		 * If a path isn't specified, xpt_register_async() will
824 		 * generate a wildcard path and acquire the XPT lock while
825 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
826 		 * It will then drop the XPT lock once that is done.
827 		 *
828 		 * If a path is specified for xpt_register_async(), it will
829 		 * not acquire and drop the XPT lock around the call to
830 		 * xpt_action().  xpt_action() asserts that the caller
831 		 * holds the SIM lock, so the SIM lock has to be held when
832 		 * calling xpt_register_async() when the path is specified.
833 		 *
834 		 * But xpt_register_async calls xpt_for_all_devices(),
835 		 * which calls xptbustraverse(), which will acquire each
836 		 * SIM lock.  When it traverses our particular bus, it will
837 		 * necessarily acquire the SIM lock, which will lead to a
838 		 * recursive lock acquisition.
839 		 *
840 		 * The CAM locking changes fix this problem by acquiring
841 		 * the XPT topology lock around bus traversal in
842 		 * xptbustraverse(), so the caller can hold the SIM lock
843 		 * and it does not cause a recursive lock acquisition.
844 		 *
845 		 * These __FreeBSD_version values are approximate, especially
846 		 * for stable/10, which is two months later than the actual
847 		 * change.
848 		 */
849 
850 #if (__FreeBSD_version < 1000703) || \
851     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
852 		mpr_unlock(sc);
853 		status = xpt_register_async(event, mprsas_async, sc,
854 					    NULL);
855 		mpr_lock(sc);
856 #else
857 		status = xpt_register_async(event, mprsas_async, sc,
858 					    sassc->path);
859 #endif
860 
861 		if (status != CAM_REQ_CMP) {
862 			mpr_dprint(sc, MPR_ERROR,
863 			    "Error %#x registering async handler for "
864 			    "AC_ADVINFO_CHANGED events\n", status);
865 			xpt_free_path(sassc->path);
866 			sassc->path = NULL;
867 		}
868 	}
869 	if (status != CAM_REQ_CMP) {
870 		/*
871 		 * EEDP use is the exception, not the rule.
872 		 * Warn the user, but do not fail to attach.
873 		 */
874 		mpr_printf(sc, "EEDP capabilities disabled.\n");
875 	}
876 
877 	mpr_unlock(sc);
878 
879 	mprsas_register_events(sc);
880 out:
881 	if (error)
882 		mpr_detach_sas(sc);
883 	return (error);
884 }
885 
886 int
887 mpr_detach_sas(struct mpr_softc *sc)
888 {
889 	struct mprsas_softc *sassc;
890 	struct mprsas_lun *lun, *lun_tmp;
891 	struct mprsas_target *targ;
892 	int i;
893 
894 	MPR_FUNCTRACE(sc);
895 
896 	if (sc->sassc == NULL)
897 		return (0);
898 
899 	sassc = sc->sassc;
900 	mpr_deregister_events(sc, sassc->mprsas_eh);
901 
902 	/*
903 	 * Drain and free the event handling taskqueue with the lock
904 	 * unheld so that any parallel processing tasks drain properly
905 	 * without deadlocking.
906 	 */
907 	if (sassc->ev_tq != NULL)
908 		taskqueue_free(sassc->ev_tq);
909 
910 	/* Make sure CAM doesn't wedge if we had to bail out early. */
911 	mpr_lock(sc);
912 
913 	/* Deregister our async handler */
914 	if (sassc->path != NULL) {
915 		xpt_register_async(0, mprsas_async, sc, sassc->path);
916 		xpt_free_path(sassc->path);
917 		sassc->path = NULL;
918 	}
919 
920 	if (sassc->flags & MPRSAS_IN_STARTUP)
921 		xpt_release_simq(sassc->sim, 1);
922 
923 	if (sassc->sim != NULL) {
924 		xpt_bus_deregister(cam_sim_path(sassc->sim));
925 		cam_sim_free(sassc->sim, FALSE);
926 	}
927 
928 	mpr_unlock(sc);
929 
930 	if (sassc->devq != NULL)
931 		cam_simq_free(sassc->devq);
932 
933 	for (i = 0; i < sassc->maxtargets; i++) {
934 		targ = &sassc->targets[i];
935 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
936 			free(lun, M_MPR);
937 		}
938 	}
939 	free(sassc->targets, M_MPR);
940 	free(sassc, M_MPR);
941 	sc->sassc = NULL;
942 
943 	return (0);
944 }
945 
946 void
947 mprsas_discovery_end(struct mprsas_softc *sassc)
948 {
949 	struct mpr_softc *sc = sassc->sc;
950 
951 	MPR_FUNCTRACE(sc);
952 
953 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
954 		callout_stop(&sassc->discovery_callout);
955 
956 }
957 
958 static void
959 mprsas_action(struct cam_sim *sim, union ccb *ccb)
960 {
961 	struct mprsas_softc *sassc;
962 
963 	sassc = cam_sim_softc(sim);
964 
965 	MPR_FUNCTRACE(sassc->sc);
966 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
967 	    ccb->ccb_h.func_code);
968 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
969 
970 	switch (ccb->ccb_h.func_code) {
971 	case XPT_PATH_INQ:
972 	{
973 		struct ccb_pathinq *cpi = &ccb->cpi;
974 
975 		cpi->version_num = 1;
976 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
977 		cpi->target_sprt = 0;
978 #if (__FreeBSD_version >= 1000039) || \
979     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
980 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
981 #else
982 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
983 #endif
984 		cpi->hba_eng_cnt = 0;
985 		cpi->max_target = sassc->maxtargets - 1;
986 		cpi->max_lun = 255;
987 		cpi->initiator_id = sassc->maxtargets - 1;
988 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
989 		strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN);
990 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
991 		cpi->unit_number = cam_sim_unit(sim);
992 		cpi->bus_id = cam_sim_bus(sim);
993 		/*
994 		 * XXXSLM-I think this needs to change based on config page or
995 		 * something instead of hardcoded to 150000.
996 		 */
997 		cpi->base_transfer_speed = 150000;
998 		cpi->transport = XPORT_SAS;
999 		cpi->transport_version = 0;
1000 		cpi->protocol = PROTO_SCSI;
1001 		cpi->protocol_version = SCSI_REV_SPC;
1002 #if __FreeBSD_version >= 800001
1003 		/*
1004 		 * XXXSLM-probably need to base this number on max SGL's and
1005 		 * page size.
1006 		 */
1007 		cpi->maxio = 256 * 1024;
1008 #endif
1009 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1010 		break;
1011 	}
1012 	case XPT_GET_TRAN_SETTINGS:
1013 	{
1014 		struct ccb_trans_settings	*cts;
1015 		struct ccb_trans_settings_sas	*sas;
1016 		struct ccb_trans_settings_scsi	*scsi;
1017 		struct mprsas_target *targ;
1018 
1019 		cts = &ccb->cts;
1020 		sas = &cts->xport_specific.sas;
1021 		scsi = &cts->proto_specific.scsi;
1022 
1023 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1024 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1025 		    cts->ccb_h.target_id));
1026 		targ = &sassc->targets[cts->ccb_h.target_id];
1027 		if (targ->handle == 0x0) {
1028 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1029 			break;
1030 		}
1031 
1032 		cts->protocol_version = SCSI_REV_SPC2;
1033 		cts->transport = XPORT_SAS;
1034 		cts->transport_version = 0;
1035 
1036 		sas->valid = CTS_SAS_VALID_SPEED;
1037 		switch (targ->linkrate) {
1038 		case 0x08:
1039 			sas->bitrate = 150000;
1040 			break;
1041 		case 0x09:
1042 			sas->bitrate = 300000;
1043 			break;
1044 		case 0x0a:
1045 			sas->bitrate = 600000;
1046 			break;
1047 		case 0x0b:
1048 			sas->bitrate = 1200000;
1049 			break;
1050 		default:
1051 			sas->valid = 0;
1052 		}
1053 
1054 		cts->protocol = PROTO_SCSI;
1055 		scsi->valid = CTS_SCSI_VALID_TQ;
1056 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1057 
1058 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1059 		break;
1060 	}
1061 	case XPT_CALC_GEOMETRY:
1062 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1063 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1064 		break;
1065 	case XPT_RESET_DEV:
1066 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1067 		    "XPT_RESET_DEV\n");
1068 		mprsas_action_resetdev(sassc, ccb);
1069 		return;
1070 	case XPT_RESET_BUS:
1071 	case XPT_ABORT:
1072 	case XPT_TERM_IO:
1073 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1074 		    "for abort or reset\n");
1075 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1076 		break;
1077 	case XPT_SCSI_IO:
1078 		mprsas_action_scsiio(sassc, ccb);
1079 		return;
1080 #if __FreeBSD_version >= 900026
1081 	case XPT_SMP_IO:
1082 		mprsas_action_smpio(sassc, ccb);
1083 		return;
1084 #endif
1085 	default:
1086 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1087 		break;
1088 	}
1089 	xpt_done(ccb);
1090 
1091 }
1092 
1093 static void
1094 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1095     target_id_t target_id, lun_id_t lun_id)
1096 {
1097 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1098 	struct cam_path *path;
1099 
1100 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1101 	    ac_code, target_id, (uintmax_t)lun_id);
1102 
1103 	if (xpt_create_path(&path, NULL,
1104 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1105 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1106 		    "notification\n");
1107 		return;
1108 	}
1109 
1110 	xpt_async(ac_code, path, NULL);
1111 	xpt_free_path(path);
1112 }
1113 
1114 static void
1115 mprsas_complete_all_commands(struct mpr_softc *sc)
1116 {
1117 	struct mpr_command *cm;
1118 	int i;
1119 	int completed;
1120 
1121 	MPR_FUNCTRACE(sc);
1122 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1123 
1124 	/* complete all commands with a NULL reply */
1125 	for (i = 1; i < sc->num_reqs; i++) {
1126 		cm = &sc->commands[i];
1127 		cm->cm_reply = NULL;
1128 		completed = 0;
1129 
1130 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1131 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1132 
1133 		if (cm->cm_complete != NULL) {
1134 			mprsas_log_command(cm, MPR_RECOVERY,
1135 			    "completing cm %p state %x ccb %p for diag reset\n",
1136 			    cm, cm->cm_state, cm->cm_ccb);
1137 			cm->cm_complete(sc, cm);
1138 			completed = 1;
1139 		}
1140 
1141 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1142 			mprsas_log_command(cm, MPR_RECOVERY,
1143 			    "waking up cm %p state %x ccb %p for diag reset\n",
1144 			    cm, cm->cm_state, cm->cm_ccb);
1145 			wakeup(cm);
1146 			completed = 1;
1147 		}
1148 
1149 		if (cm->cm_sc->io_cmds_active != 0) {
1150 			cm->cm_sc->io_cmds_active--;
1151 		} else {
1152 			mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: "
1153 			    "io_cmds_active is out of sync - resynching to "
1154 			    "0\n");
1155 		}
1156 
1157 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1158 			/* this should never happen, but if it does, log */
1159 			mprsas_log_command(cm, MPR_RECOVERY,
1160 			    "cm %p state %x flags 0x%x ccb %p during diag "
1161 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1162 			    cm->cm_ccb);
1163 		}
1164 	}
1165 }
1166 
1167 void
1168 mprsas_handle_reinit(struct mpr_softc *sc)
1169 {
1170 	int i;
1171 
1172 	/* Go back into startup mode and freeze the simq, so that CAM
1173 	 * doesn't send any commands until after we've rediscovered all
1174 	 * targets and found the proper device handles for them.
1175 	 *
1176 	 * After the reset, portenable will trigger discovery, and after all
1177 	 * discovery-related activities have finished, the simq will be
1178 	 * released.
1179 	 */
1180 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1181 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1182 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1183 	mprsas_startup_increment(sc->sassc);
1184 
1185 	/* notify CAM of a bus reset */
1186 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1187 	    CAM_LUN_WILDCARD);
1188 
1189 	/* complete and cleanup after all outstanding commands */
1190 	mprsas_complete_all_commands(sc);
1191 
1192 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1193 	    __func__, sc->sassc->startup_refcount);
1194 
1195 	/* zero all the target handles, since they may change after the
1196 	 * reset, and we have to rediscover all the targets and use the new
1197 	 * handles.
1198 	 */
1199 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1200 		if (sc->sassc->targets[i].outstanding != 0)
1201 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1202 			    i, sc->sassc->targets[i].outstanding);
1203 		sc->sassc->targets[i].handle = 0x0;
1204 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1205 		sc->sassc->targets[i].outstanding = 0;
1206 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1207 	}
1208 }
1209 static void
1210 mprsas_tm_timeout(void *data)
1211 {
1212 	struct mpr_command *tm = data;
1213 	struct mpr_softc *sc = tm->cm_sc;
1214 
1215 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1216 
1217 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1218 	    "out\n", tm);
1219 	mpr_reinit(sc);
1220 }
1221 
1222 static void
1223 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1224 {
1225 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1226 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1227 	unsigned int cm_count = 0;
1228 	struct mpr_command *cm;
1229 	struct mprsas_target *targ;
1230 
1231 	callout_stop(&tm->cm_callout);
1232 
1233 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1234 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1235 	targ = tm->cm_targ;
1236 
1237 	/*
1238 	 * Currently there should be no way we can hit this case.  It only
1239 	 * happens when we have a failure to allocate chain frames, and
1240 	 * task management commands don't have S/G lists.
1241 	 */
1242 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1243 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1244 		    "This should not happen!\n", __func__, tm->cm_flags);
1245 		mprsas_free_tm(sc, tm);
1246 		return;
1247 	}
1248 
1249 	if (reply == NULL) {
1250 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1251 		    "%p\n", tm);
1252 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1253 			/* this completion was due to a reset, just cleanup */
1254 			targ->tm = NULL;
1255 			mprsas_free_tm(sc, tm);
1256 		}
1257 		else {
1258 			/* we should have gotten a reply. */
1259 			mpr_reinit(sc);
1260 		}
1261 		return;
1262 	}
1263 
1264 	mprsas_log_command(tm, MPR_RECOVERY,
1265 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1266 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1267 	    le32toh(reply->TerminationCount));
1268 
1269 	/* See if there are any outstanding commands for this LUN.
1270 	 * This could be made more efficient by using a per-LU data
1271 	 * structure of some sort.
1272 	 */
1273 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1274 		if (cm->cm_lun == tm->cm_lun)
1275 			cm_count++;
1276 	}
1277 
1278 	if (cm_count == 0) {
1279 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1280 		    "logical unit %u finished recovery after reset\n",
1281 		    tm->cm_lun, tm);
1282 
1283 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1284 		    tm->cm_lun);
1285 
1286 		/* we've finished recovery for this logical unit.  check and
1287 		 * see if some other logical unit has a timedout command
1288 		 * that needs to be processed.
1289 		 */
1290 		cm = TAILQ_FIRST(&targ->timedout_commands);
1291 		if (cm) {
1292 			mprsas_send_abort(sc, tm, cm);
1293 		}
1294 		else {
1295 			targ->tm = NULL;
1296 			mprsas_free_tm(sc, tm);
1297 		}
1298 	}
1299 	else {
1300 		/* if we still have commands for this LUN, the reset
1301 		 * effectively failed, regardless of the status reported.
1302 		 * Escalate to a target reset.
1303 		 */
1304 		mprsas_log_command(tm, MPR_RECOVERY,
1305 		    "logical unit reset complete for tm %p, but still have %u "
1306 		    "command(s)\n", tm, cm_count);
1307 		mprsas_send_reset(sc, tm,
1308 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1309 	}
1310 }
1311 
1312 static void
1313 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1314 {
1315 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1316 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1317 	struct mprsas_target *targ;
1318 
1319 	callout_stop(&tm->cm_callout);
1320 
1321 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1322 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1323 	targ = tm->cm_targ;
1324 
1325 	/*
1326 	 * Currently there should be no way we can hit this case.  It only
1327 	 * happens when we have a failure to allocate chain frames, and
1328 	 * task management commands don't have S/G lists.
1329 	 */
1330 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1331 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1332 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1333 		mprsas_free_tm(sc, tm);
1334 		return;
1335 	}
1336 
1337 	if (reply == NULL) {
1338 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1339 		    "%p\n", tm);
1340 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1341 			/* this completion was due to a reset, just cleanup */
1342 			targ->tm = NULL;
1343 			mprsas_free_tm(sc, tm);
1344 		}
1345 		else {
1346 			/* we should have gotten a reply. */
1347 			mpr_reinit(sc);
1348 		}
1349 		return;
1350 	}
1351 
1352 	mprsas_log_command(tm, MPR_RECOVERY,
1353 	    "target reset status 0x%x code 0x%x count %u\n",
1354 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1355 	    le32toh(reply->TerminationCount));
1356 
1357 	if (targ->outstanding == 0) {
1358 		/* we've finished recovery for this target and all
1359 		 * of its logical units.
1360 		 */
1361 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1362 		    "recovery finished after target reset\n");
1363 
1364 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1365 		    CAM_LUN_WILDCARD);
1366 
1367 		targ->tm = NULL;
1368 		mprsas_free_tm(sc, tm);
1369 	}
1370 	else {
1371 		/* after a target reset, if this target still has
1372 		 * outstanding commands, the reset effectively failed,
1373 		 * regardless of the status reported.  escalate.
1374 		 */
1375 		mprsas_log_command(tm, MPR_RECOVERY,
1376 		    "target reset complete for tm %p, but still have %u "
1377 		    "command(s)\n", tm, targ->outstanding);
1378 		mpr_reinit(sc);
1379 	}
1380 }
1381 
1382 #define MPR_RESET_TIMEOUT 30
1383 
1384 int
1385 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1386 {
1387 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1388 	struct mprsas_target *target;
1389 	int err;
1390 
1391 	target = tm->cm_targ;
1392 	if (target->handle == 0) {
1393 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1394 		    "%d\n", __func__, target->tid);
1395 		return -1;
1396 	}
1397 
1398 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1399 	req->DevHandle = htole16(target->handle);
1400 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1401 	req->TaskType = type;
1402 
1403 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1404 		/* XXX Need to handle invalid LUNs */
1405 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1406 		tm->cm_targ->logical_unit_resets++;
1407 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1408 		    "sending logical unit reset\n");
1409 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1410 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1411 	}
1412 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1413 		/*
1414 		 * Target reset method =
1415 		 *     SAS Hard Link Reset / SATA Link Reset
1416 		 */
1417 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1418 		tm->cm_targ->target_resets++;
1419 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1420 		    "sending target reset\n");
1421 		tm->cm_complete = mprsas_target_reset_complete;
1422 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1423 	}
1424 	else {
1425 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1426 		return -1;
1427 	}
1428 
1429 	mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1430 	    target->handle);
1431 	if (target->encl_level_valid) {
1432 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1433 		    "connector name (%4s)\n", target->encl_level,
1434 		    target->encl_slot, target->connector_name);
1435 	}
1436 
1437 	tm->cm_data = NULL;
1438 	tm->cm_desc.HighPriority.RequestFlags =
1439 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1440 	tm->cm_complete_data = (void *)tm;
1441 
1442 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1443 	    mprsas_tm_timeout, tm);
1444 
1445 	err = mpr_map_command(sc, tm);
1446 	if (err)
1447 		mprsas_log_command(tm, MPR_RECOVERY,
1448 		    "error %d sending reset type %u\n", err, type);
1449 
1450 	return err;
1451 }
1452 
1453 
1454 static void
1455 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1456 {
1457 	struct mpr_command *cm;
1458 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1459 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1460 	struct mprsas_target *targ;
1461 
1462 	callout_stop(&tm->cm_callout);
1463 
1464 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1465 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1466 	targ = tm->cm_targ;
1467 
1468 	/*
1469 	 * Currently there should be no way we can hit this case.  It only
1470 	 * happens when we have a failure to allocate chain frames, and
1471 	 * task management commands don't have S/G lists.
1472 	 */
1473 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1474 		mprsas_log_command(tm, MPR_RECOVERY,
1475 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1476 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1477 		mprsas_free_tm(sc, tm);
1478 		return;
1479 	}
1480 
1481 	if (reply == NULL) {
1482 		mprsas_log_command(tm, MPR_RECOVERY,
1483 		    "NULL abort reply for tm %p TaskMID %u\n",
1484 		    tm, le16toh(req->TaskMID));
1485 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1486 			/* this completion was due to a reset, just cleanup */
1487 			targ->tm = NULL;
1488 			mprsas_free_tm(sc, tm);
1489 		}
1490 		else {
1491 			/* we should have gotten a reply. */
1492 			mpr_reinit(sc);
1493 		}
1494 		return;
1495 	}
1496 
1497 	mprsas_log_command(tm, MPR_RECOVERY,
1498 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1499 	    le16toh(req->TaskMID),
1500 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1501 	    le32toh(reply->TerminationCount));
1502 
1503 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1504 	if (cm == NULL) {
1505 		/* if there are no more timedout commands, we're done with
1506 		 * error recovery for this target.
1507 		 */
1508 		mprsas_log_command(tm, MPR_RECOVERY,
1509 		    "finished recovery after aborting TaskMID %u\n",
1510 		    le16toh(req->TaskMID));
1511 
1512 		targ->tm = NULL;
1513 		mprsas_free_tm(sc, tm);
1514 	}
1515 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1516 		/* abort success, but we have more timedout commands to abort */
1517 		mprsas_log_command(tm, MPR_RECOVERY,
1518 		    "continuing recovery after aborting TaskMID %u\n",
1519 		    le16toh(req->TaskMID));
1520 
1521 		mprsas_send_abort(sc, tm, cm);
1522 	}
1523 	else {
1524 		/* we didn't get a command completion, so the abort
1525 		 * failed as far as we're concerned.  escalate.
1526 		 */
1527 		mprsas_log_command(tm, MPR_RECOVERY,
1528 		    "abort failed for TaskMID %u tm %p\n",
1529 		    le16toh(req->TaskMID), tm);
1530 
1531 		mprsas_send_reset(sc, tm,
1532 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1533 	}
1534 }
1535 
1536 #define MPR_ABORT_TIMEOUT 5
1537 
1538 static int
1539 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1540     struct mpr_command *cm)
1541 {
1542 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1543 	struct mprsas_target *targ;
1544 	int err;
1545 
1546 	targ = cm->cm_targ;
1547 	if (targ->handle == 0) {
1548 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1549 		    __func__, cm->cm_ccb->ccb_h.target_id);
1550 		return -1;
1551 	}
1552 
1553 	mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1554 	    "Aborting command %p\n", cm);
1555 
1556 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1557 	req->DevHandle = htole16(targ->handle);
1558 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1559 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1560 
1561 	/* XXX Need to handle invalid LUNs */
1562 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1563 
1564 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1565 
1566 	tm->cm_data = NULL;
1567 	tm->cm_desc.HighPriority.RequestFlags =
1568 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1569 	tm->cm_complete = mprsas_abort_complete;
1570 	tm->cm_complete_data = (void *)tm;
1571 	tm->cm_targ = cm->cm_targ;
1572 	tm->cm_lun = cm->cm_lun;
1573 
1574 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1575 	    mprsas_tm_timeout, tm);
1576 
1577 	targ->aborts++;
1578 
1579 	mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1580 	    __func__, targ->tid);
1581 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1582 
1583 	err = mpr_map_command(sc, tm);
1584 	if (err)
1585 		mprsas_log_command(tm, MPR_RECOVERY,
1586 		    "error %d sending abort for cm %p SMID %u\n",
1587 		    err, cm, req->TaskMID);
1588 	return err;
1589 }
1590 
1591 static void
1592 mprsas_scsiio_timeout(void *data)
1593 {
1594 	struct mpr_softc *sc;
1595 	struct mpr_command *cm;
1596 	struct mprsas_target *targ;
1597 
1598 	cm = (struct mpr_command *)data;
1599 	sc = cm->cm_sc;
1600 
1601 	MPR_FUNCTRACE(sc);
1602 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1603 
1604 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1605 
1606 	/*
1607 	 * Run the interrupt handler to make sure it's not pending.  This
1608 	 * isn't perfect because the command could have already completed
1609 	 * and been re-used, though this is unlikely.
1610 	 */
1611 	mpr_intr_locked(sc);
1612 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1613 		mprsas_log_command(cm, MPR_XINFO,
1614 		    "SCSI command %p almost timed out\n", cm);
1615 		return;
1616 	}
1617 
1618 	if (cm->cm_ccb == NULL) {
1619 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1620 		return;
1621 	}
1622 
1623 	targ = cm->cm_targ;
1624 	targ->timeouts++;
1625 
1626 	mprsas_log_command(cm, MPR_ERROR, "command timeout cm %p ccb %p target "
1627 	    "%u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid, targ->handle);
1628 	if (targ->encl_level_valid) {
1629 		mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1630 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1631 		    targ->connector_name);
1632 	}
1633 
1634 	/* XXX first, check the firmware state, to see if it's still
1635 	 * operational.  if not, do a diag reset.
1636 	 */
1637 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1638 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1639 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1640 
1641 	if (targ->tm != NULL) {
1642 		/* target already in recovery, just queue up another
1643 		 * timedout command to be processed later.
1644 		 */
1645 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1646 		    "processing by tm %p\n", cm, targ->tm);
1647 	}
1648 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1649 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1650 		    cm, targ->tm);
1651 
1652 		/* start recovery by aborting the first timedout command */
1653 		mprsas_send_abort(sc, targ->tm, cm);
1654 	}
1655 	else {
1656 		/* XXX queue this target up for recovery once a TM becomes
1657 		 * available.  The firmware only has a limited number of
1658 		 * HighPriority credits for the high priority requests used
1659 		 * for task management, and we ran out.
1660 		 *
1661 		 * Isilon: don't worry about this for now, since we have
1662 		 * more credits than disks in an enclosure, and limit
1663 		 * ourselves to one TM per target for recovery.
1664 		 */
1665 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to "
1666 		    "allocate a tm\n", cm);
1667 	}
1668 }
1669 
1670 static void
1671 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1672 {
1673 	MPI2_SCSI_IO_REQUEST *req;
1674 	struct ccb_scsiio *csio;
1675 	struct mpr_softc *sc;
1676 	struct mprsas_target *targ;
1677 	struct mprsas_lun *lun;
1678 	struct mpr_command *cm;
1679 	uint8_t i, lba_byte, *ref_tag_addr;
1680 	uint16_t eedp_flags;
1681 	uint32_t mpi_control;
1682 
1683 	sc = sassc->sc;
1684 	MPR_FUNCTRACE(sc);
1685 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1686 
1687 	csio = &ccb->csio;
1688 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1689 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1690 	     csio->ccb_h.target_id));
1691 	targ = &sassc->targets[csio->ccb_h.target_id];
1692 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1693 	if (targ->handle == 0x0) {
1694 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1695 		    __func__, csio->ccb_h.target_id);
1696 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1697 		xpt_done(ccb);
1698 		return;
1699 	}
1700 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1701 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1702 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1703 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1704 		xpt_done(ccb);
1705 		return;
1706 	}
1707 	/*
1708 	 * Sometimes, it is possible to get a command that is not "In
1709 	 * Progress" and was actually aborted by the upper layer.  Check for
1710 	 * this here and complete the command without error.
1711 	 */
1712 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1713 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1714 		    "target %u\n", __func__, csio->ccb_h.target_id);
1715 		xpt_done(ccb);
1716 		return;
1717 	}
1718 	/*
1719 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1720 	 * that the volume has timed out.  We want volumes to be enumerated
1721 	 * until they are deleted/removed, not just failed.
1722 	 */
1723 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1724 		if (targ->devinfo == 0)
1725 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1726 		else
1727 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1728 		xpt_done(ccb);
1729 		return;
1730 	}
1731 
1732 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1733 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1734 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1735 		xpt_done(ccb);
1736 		return;
1737 	}
1738 
1739 	/*
1740 	 * If target has a reset in progress, freeze the devq and return.  The
1741 	 * devq will be released when the TM reset is finished.
1742 	 */
1743 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1744 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1745 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1746 		    __func__, targ->tid);
1747 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1748 		xpt_done(ccb);
1749 		return;
1750 	}
1751 
1752 	cm = mpr_alloc_command(sc);
1753 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1754 		if (cm != NULL) {
1755 			mpr_free_command(sc, cm);
1756 		}
1757 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1758 			xpt_freeze_simq(sassc->sim, 1);
1759 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1760 		}
1761 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1762 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1763 		xpt_done(ccb);
1764 		return;
1765 	}
1766 
1767 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1768 	bzero(req, sizeof(*req));
1769 	req->DevHandle = htole16(targ->handle);
1770 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1771 	req->MsgFlags = 0;
1772 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1773 	req->SenseBufferLength = MPR_SENSE_LEN;
1774 	req->SGLFlags = 0;
1775 	req->ChainOffset = 0;
1776 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1777 	req->SGLOffset1= 0;
1778 	req->SGLOffset2= 0;
1779 	req->SGLOffset3= 0;
1780 	req->SkipCount = 0;
1781 	req->DataLength = htole32(csio->dxfer_len);
1782 	req->BidirectionalDataLength = 0;
1783 	req->IoFlags = htole16(csio->cdb_len);
1784 	req->EEDPFlags = 0;
1785 
1786 	/* Note: BiDirectional transfers are not supported */
1787 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1788 	case CAM_DIR_IN:
1789 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1790 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1791 		break;
1792 	case CAM_DIR_OUT:
1793 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1794 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1795 		break;
1796 	case CAM_DIR_NONE:
1797 	default:
1798 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1799 		break;
1800 	}
1801 
1802 	if (csio->cdb_len == 32)
1803 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1804 	/*
1805 	 * It looks like the hardware doesn't require an explicit tag
1806 	 * number for each transaction.  SAM Task Management not supported
1807 	 * at the moment.
1808 	 */
1809 	switch (csio->tag_action) {
1810 	case MSG_HEAD_OF_Q_TAG:
1811 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1812 		break;
1813 	case MSG_ORDERED_Q_TAG:
1814 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1815 		break;
1816 	case MSG_ACA_TASK:
1817 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1818 		break;
1819 	case CAM_TAG_ACTION_NONE:
1820 	case MSG_SIMPLE_Q_TAG:
1821 	default:
1822 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1823 		break;
1824 	}
1825 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1826 	req->Control = htole32(mpi_control);
1827 
1828 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1829 		mpr_free_command(sc, cm);
1830 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1831 		xpt_done(ccb);
1832 		return;
1833 	}
1834 
1835 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1836 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1837 	else
1838 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1839 	req->IoFlags = htole16(csio->cdb_len);
1840 
1841 	/*
1842 	 * Check if EEDP is supported and enabled.  If it is then check if the
1843 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1844 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1845 	 * for EEDP transfer.
1846 	 */
1847 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1848 	if (sc->eedp_enabled && eedp_flags) {
1849 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1850 			if (lun->lun_id == csio->ccb_h.target_lun) {
1851 				break;
1852 			}
1853 		}
1854 
1855 		if ((lun != NULL) && (lun->eedp_formatted)) {
1856 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1857 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1858 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1859 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1860 			req->EEDPFlags = htole16(eedp_flags);
1861 
1862 			/*
1863 			 * If CDB less than 32, fill in Primary Ref Tag with
1864 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1865 			 * already there.  Also, set protection bit.  FreeBSD
1866 			 * currently does not support CDBs bigger than 16, but
1867 			 * the code doesn't hurt, and will be here for the
1868 			 * future.
1869 			 */
1870 			if (csio->cdb_len != 32) {
1871 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1872 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1873 				    PrimaryReferenceTag;
1874 				for (i = 0; i < 4; i++) {
1875 					*ref_tag_addr =
1876 					    req->CDB.CDB32[lba_byte + i];
1877 					ref_tag_addr++;
1878 				}
1879 				req->CDB.EEDP32.PrimaryReferenceTag =
1880 				    htole32(req->
1881 				    CDB.EEDP32.PrimaryReferenceTag);
1882 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1883 				    0xFFFF;
1884 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1885 				    0x20;
1886 			} else {
1887 				eedp_flags |=
1888 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1889 				req->EEDPFlags = htole16(eedp_flags);
1890 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1891 				    0x1F) | 0x20;
1892 			}
1893 		}
1894 	}
1895 
1896 	cm->cm_length = csio->dxfer_len;
1897 	if (cm->cm_length != 0) {
1898 		cm->cm_data = ccb;
1899 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1900 	} else {
1901 		cm->cm_data = NULL;
1902 	}
1903 	cm->cm_sge = &req->SGL;
1904 	cm->cm_sglsize = (32 - 24) * 4;
1905 	cm->cm_complete = mprsas_scsiio_complete;
1906 	cm->cm_complete_data = ccb;
1907 	cm->cm_targ = targ;
1908 	cm->cm_lun = csio->ccb_h.target_lun;
1909 	cm->cm_ccb = ccb;
1910 	/*
1911 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1912 	 * and set descriptor type.
1913 	 */
1914 	if (targ->scsi_req_desc_type ==
1915 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1916 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1917 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
1918 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1919 		cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1920 	} else {
1921 		cm->cm_desc.SCSIIO.RequestFlags =
1922 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1923 		cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1924 	}
1925 
1926 #if __FreeBSD_version >= 1000029
1927 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1928 	    mprsas_scsiio_timeout, cm, 0);
1929 #else //__FreeBSD_version < 1000029
1930 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1931 	    mprsas_scsiio_timeout, cm);
1932 #endif //__FreeBSD_version >= 1000029
1933 
1934 	targ->issued++;
1935 	targ->outstanding++;
1936 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1937 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1938 
1939 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1940 	    __func__, cm, ccb, targ->outstanding);
1941 
1942 	mpr_map_command(sc, cm);
1943 	return;
1944 }
1945 
1946 static void
1947 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1948 {
1949         char *desc;
1950 
1951         switch (response_code) {
1952         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1953                 desc = "task management request completed";
1954                 break;
1955         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1956                 desc = "invalid frame";
1957                 break;
1958         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1959                 desc = "task management request not supported";
1960                 break;
1961         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1962                 desc = "task management request failed";
1963                 break;
1964         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1965                 desc = "task management request succeeded";
1966                 break;
1967         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1968                 desc = "invalid lun";
1969                 break;
1970         case 0xA:
1971                 desc = "overlapped tag attempted";
1972                 break;
1973         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1974                 desc = "task queued, however not sent to target";
1975                 break;
1976         default:
1977                 desc = "unknown";
1978                 break;
1979         }
1980 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1981 	    desc);
1982 }
1983 
1984 /**
1985  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1986  */
1987 static void
1988 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1989     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1990 {
1991 	u32 response_info;
1992 	u8 *response_bytes;
1993 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1994 	    MPI2_IOCSTATUS_MASK;
1995 	u8 scsi_state = mpi_reply->SCSIState;
1996 	u8 scsi_status = mpi_reply->SCSIStatus;
1997 	char *desc_ioc_state = NULL;
1998 	char *desc_scsi_status = NULL;
1999 	char *desc_scsi_state = sc->tmp_string;
2000 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2001 
2002 	if (log_info == 0x31170000)
2003 		return;
2004 
2005 	switch (ioc_status) {
2006 	case MPI2_IOCSTATUS_SUCCESS:
2007 		desc_ioc_state = "success";
2008 		break;
2009 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2010 		desc_ioc_state = "invalid function";
2011 		break;
2012 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2013 		desc_ioc_state = "scsi recovered error";
2014 		break;
2015 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2016 		desc_ioc_state = "scsi invalid dev handle";
2017 		break;
2018 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2019 		desc_ioc_state = "scsi device not there";
2020 		break;
2021 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2022 		desc_ioc_state = "scsi data overrun";
2023 		break;
2024 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2025 		desc_ioc_state = "scsi data underrun";
2026 		break;
2027 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2028 		desc_ioc_state = "scsi io data error";
2029 		break;
2030 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2031 		desc_ioc_state = "scsi protocol error";
2032 		break;
2033 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2034 		desc_ioc_state = "scsi task terminated";
2035 		break;
2036 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2037 		desc_ioc_state = "scsi residual mismatch";
2038 		break;
2039 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2040 		desc_ioc_state = "scsi task mgmt failed";
2041 		break;
2042 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2043 		desc_ioc_state = "scsi ioc terminated";
2044 		break;
2045 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2046 		desc_ioc_state = "scsi ext terminated";
2047 		break;
2048 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2049 		desc_ioc_state = "eedp guard error";
2050 		break;
2051 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2052 		desc_ioc_state = "eedp ref tag error";
2053 		break;
2054 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2055 		desc_ioc_state = "eedp app tag error";
2056 		break;
2057 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
2058 		desc_ioc_state = "insufficient power";
2059 		break;
2060 	default:
2061 		desc_ioc_state = "unknown";
2062 		break;
2063 	}
2064 
2065 	switch (scsi_status) {
2066 	case MPI2_SCSI_STATUS_GOOD:
2067 		desc_scsi_status = "good";
2068 		break;
2069 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2070 		desc_scsi_status = "check condition";
2071 		break;
2072 	case MPI2_SCSI_STATUS_CONDITION_MET:
2073 		desc_scsi_status = "condition met";
2074 		break;
2075 	case MPI2_SCSI_STATUS_BUSY:
2076 		desc_scsi_status = "busy";
2077 		break;
2078 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2079 		desc_scsi_status = "intermediate";
2080 		break;
2081 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2082 		desc_scsi_status = "intermediate condmet";
2083 		break;
2084 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2085 		desc_scsi_status = "reservation conflict";
2086 		break;
2087 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2088 		desc_scsi_status = "command terminated";
2089 		break;
2090 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2091 		desc_scsi_status = "task set full";
2092 		break;
2093 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2094 		desc_scsi_status = "aca active";
2095 		break;
2096 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2097 		desc_scsi_status = "task aborted";
2098 		break;
2099 	default:
2100 		desc_scsi_status = "unknown";
2101 		break;
2102 	}
2103 
2104 	desc_scsi_state[0] = '\0';
2105 	if (!scsi_state)
2106 		desc_scsi_state = " ";
2107 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2108 		strcat(desc_scsi_state, "response info ");
2109 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2110 		strcat(desc_scsi_state, "state terminated ");
2111 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2112 		strcat(desc_scsi_state, "no status ");
2113 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2114 		strcat(desc_scsi_state, "autosense failed ");
2115 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2116 		strcat(desc_scsi_state, "autosense valid ");
2117 
2118 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2119 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2120 	if (targ->encl_level_valid) {
2121 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2122 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2123 		    targ->connector_name);
2124 	}
2125 	/* We can add more detail about underflow data here
2126 	 * TO-DO
2127 	 * */
2128 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2129 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2130 	    desc_scsi_state, scsi_state);
2131 
2132 	if (sc->mpr_debug & MPR_XINFO &&
2133 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2134 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2135 		scsi_sense_print(csio);
2136 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2137 	}
2138 
2139 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2140 		response_info = le32toh(mpi_reply->ResponseInfo);
2141 		response_bytes = (u8 *)&response_info;
2142 		mpr_response_code(sc,response_bytes[0]);
2143 	}
2144 }
2145 
2146 static void
2147 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2148 {
2149 	MPI2_SCSI_IO_REPLY *rep;
2150 	union ccb *ccb;
2151 	struct ccb_scsiio *csio;
2152 	struct mprsas_softc *sassc;
2153 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2154 	u8 *TLR_bits, TLR_on;
2155 	int dir = 0, i;
2156 	u16 alloc_len;
2157 	struct mprsas_target *target;
2158 	target_id_t target_id;
2159 
2160 	MPR_FUNCTRACE(sc);
2161 	mpr_dprint(sc, MPR_TRACE,
2162 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2163 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2164 	    cm->cm_targ->outstanding);
2165 
2166 	callout_stop(&cm->cm_callout);
2167 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2168 
2169 	sassc = sc->sassc;
2170 	ccb = cm->cm_complete_data;
2171 	csio = &ccb->csio;
2172 	target_id = csio->ccb_h.target_id;
2173 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2174 	/*
2175 	 * XXX KDM if the chain allocation fails, does it matter if we do
2176 	 * the sync and unload here?  It is simpler to do it in every case,
2177 	 * assuming it doesn't cause problems.
2178 	 */
2179 	if (cm->cm_data != NULL) {
2180 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2181 			dir = BUS_DMASYNC_POSTREAD;
2182 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2183 			dir = BUS_DMASYNC_POSTWRITE;
2184 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2185 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2186 	}
2187 
2188 	cm->cm_targ->completed++;
2189 	cm->cm_targ->outstanding--;
2190 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2191 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2192 
2193 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2194 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2195 		if (cm->cm_reply != NULL)
2196 			mprsas_log_command(cm, MPR_RECOVERY,
2197 			    "completed timedout cm %p ccb %p during recovery "
2198 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2199 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2200 			    rep->SCSIState, le32toh(rep->TransferCount));
2201 		else
2202 			mprsas_log_command(cm, MPR_RECOVERY,
2203 			    "completed timedout cm %p ccb %p during recovery\n",
2204 			    cm, cm->cm_ccb);
2205 	} else if (cm->cm_targ->tm != NULL) {
2206 		if (cm->cm_reply != NULL)
2207 			mprsas_log_command(cm, MPR_RECOVERY,
2208 			    "completed cm %p ccb %p during recovery "
2209 			    "ioc %x scsi %x state %x xfer %u\n",
2210 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2211 			    rep->SCSIStatus, rep->SCSIState,
2212 			    le32toh(rep->TransferCount));
2213 		else
2214 			mprsas_log_command(cm, MPR_RECOVERY,
2215 			    "completed cm %p ccb %p during recovery\n",
2216 			    cm, cm->cm_ccb);
2217 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2218 		mprsas_log_command(cm, MPR_RECOVERY,
2219 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2220 	}
2221 
2222 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2223 		/*
2224 		 * We ran into an error after we tried to map the command,
2225 		 * so we're getting a callback without queueing the command
2226 		 * to the hardware.  So we set the status here, and it will
2227 		 * be retained below.  We'll go through the "fast path",
2228 		 * because there can be no reply when we haven't actually
2229 		 * gone out to the hardware.
2230 		 */
2231 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2232 
2233 		/*
2234 		 * Currently the only error included in the mask is
2235 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2236 		 * chain frames.  We need to freeze the queue until we get
2237 		 * a command that completed without this error, which will
2238 		 * hopefully have some chain frames attached that we can
2239 		 * use.  If we wanted to get smarter about it, we would
2240 		 * only unfreeze the queue in this condition when we're
2241 		 * sure that we're getting some chain frames back.  That's
2242 		 * probably unnecessary.
2243 		 */
2244 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2245 			xpt_freeze_simq(sassc->sim, 1);
2246 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2247 			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2248 			    "freezing SIM queue\n");
2249 		}
2250 	}
2251 
2252 	/*
2253 	 * If this is a Start Stop Unit command and it was issued by the driver
2254 	 * during shutdown, decrement the refcount to account for all of the
2255 	 * commands that were sent.  All SSU commands should be completed before
2256 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2257 	 * is TRUE.
2258 	 */
2259 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2260 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2261 		sc->SSU_refcount--;
2262 	}
2263 
2264 	/* Take the fast path to completion */
2265 	if (cm->cm_reply == NULL) {
2266 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2267 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2268 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2269 			else {
2270 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2271 				csio->scsi_status = SCSI_STATUS_OK;
2272 			}
2273 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2274 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2275 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2276 				mpr_dprint(sc, MPR_XINFO,
2277 				    "Unfreezing SIM queue\n");
2278 			}
2279 		}
2280 
2281 		/*
2282 		 * There are two scenarios where the status won't be
2283 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2284 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2285 		 */
2286 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2287 			/*
2288 			 * Freeze the dev queue so that commands are
2289 			 * executed in the correct order after error
2290 			 * recovery.
2291 			 */
2292 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2293 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2294 		}
2295 		mpr_free_command(sc, cm);
2296 		xpt_done(ccb);
2297 		return;
2298 	}
2299 
2300 	mprsas_log_command(cm, MPR_XINFO,
2301 	    "ioc %x scsi %x state %x xfer %u\n",
2302 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2303 	    le32toh(rep->TransferCount));
2304 
2305 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2306 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2307 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2308 		/* FALLTHROUGH */
2309 	case MPI2_IOCSTATUS_SUCCESS:
2310 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2311 
2312 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2313 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2314 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2315 
2316 		/* Completion failed at the transport level. */
2317 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2318 		    MPI2_SCSI_STATE_TERMINATED)) {
2319 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2320 			break;
2321 		}
2322 
2323 		/* In a modern packetized environment, an autosense failure
2324 		 * implies that there's not much else that can be done to
2325 		 * recover the command.
2326 		 */
2327 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2328 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2329 			break;
2330 		}
2331 
2332 		/*
2333 		 * CAM doesn't care about SAS Response Info data, but if this is
2334 		 * the state check if TLR should be done.  If not, clear the
2335 		 * TLR_bits for the target.
2336 		 */
2337 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2338 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2339 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2340 			sc->mapping_table[target_id].TLR_bits =
2341 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2342 		}
2343 
2344 		/*
2345 		 * Intentionally override the normal SCSI status reporting
2346 		 * for these two cases.  These are likely to happen in a
2347 		 * multi-initiator environment, and we want to make sure that
2348 		 * CAM retries these commands rather than fail them.
2349 		 */
2350 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2351 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2352 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2353 			break;
2354 		}
2355 
2356 		/* Handle normal status and sense */
2357 		csio->scsi_status = rep->SCSIStatus;
2358 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2359 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2360 		else
2361 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2362 
2363 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2364 			int sense_len, returned_sense_len;
2365 
2366 			returned_sense_len = min(le32toh(rep->SenseCount),
2367 			    sizeof(struct scsi_sense_data));
2368 			if (returned_sense_len < csio->sense_len)
2369 				csio->sense_resid = csio->sense_len -
2370 				    returned_sense_len;
2371 			else
2372 				csio->sense_resid = 0;
2373 
2374 			sense_len = min(returned_sense_len,
2375 			    csio->sense_len - csio->sense_resid);
2376 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2377 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2378 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2379 		}
2380 
2381 		/*
2382 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2383 		 * and it's page code 0 (Supported Page List), and there is
2384 		 * inquiry data, and this is for a sequential access device, and
2385 		 * the device is an SSP target, and TLR is supported by the
2386 		 * controller, turn the TLR_bits value ON if page 0x90 is
2387 		 * supported.
2388 		 */
2389 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2390 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2391 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2392 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2393 		    (csio->data_ptr != NULL) &&
2394 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2395 		    (sc->control_TLR) &&
2396 		    (sc->mapping_table[target_id].device_info &
2397 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2398 			vpd_list = (struct scsi_vpd_supported_page_list *)
2399 			    csio->data_ptr;
2400 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2401 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2402 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2403 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2404 			    csio->cdb_io.cdb_bytes[4];
2405 			alloc_len -= csio->resid;
2406 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2407 				if (vpd_list->list[i] == 0x90) {
2408 					*TLR_bits = TLR_on;
2409 					break;
2410 				}
2411 			}
2412 		}
2413 
2414 		/*
2415 		 * If this is a SATA direct-access end device, mark it so that
2416 		 * a SCSI StartStopUnit command will be sent to it when the
2417 		 * driver is being shutdown.
2418 		 */
2419 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2420 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2421 		    (sc->mapping_table[target_id].device_info &
2422 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2423 		    ((sc->mapping_table[target_id].device_info &
2424 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2425 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2426 			target = &sassc->targets[target_id];
2427 			target->supports_SSU = TRUE;
2428 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2429 			    target_id);
2430 		}
2431 		break;
2432 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2433 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2434 		/*
2435 		 * If devinfo is 0 this will be a volume.  In that case don't
2436 		 * tell CAM that the volume is not there.  We want volumes to
2437 		 * be enumerated until they are deleted/removed, not just
2438 		 * failed.
2439 		 */
2440 		if (cm->cm_targ->devinfo == 0)
2441 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2442 		else
2443 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2444 		break;
2445 	case MPI2_IOCSTATUS_INVALID_SGL:
2446 		mpr_print_scsiio_cmd(sc, cm);
2447 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2448 		break;
2449 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2450 		/*
2451 		 * This is one of the responses that comes back when an I/O
2452 		 * has been aborted.  If it is because of a timeout that we
2453 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2454 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2455 		 * command is the same (it gets retried, subject to the
2456 		 * retry counter), the only difference is what gets printed
2457 		 * on the console.
2458 		 */
2459 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2460 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2461 		else
2462 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2463 		break;
2464 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2465 		/* resid is ignored for this condition */
2466 		csio->resid = 0;
2467 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2468 		break;
2469 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2470 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2471 		/*
2472 		 * These can sometimes be transient transport-related
2473 		 * errors, and sometimes persistent drive-related errors.
2474 		 * We used to retry these without decrementing the retry
2475 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2476 		 * we hit a persistent drive problem that returns one of
2477 		 * these error codes, we would retry indefinitely.  So,
2478 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2479 		 * count and avoid infinite retries.  We're taking the
2480 		 * potential risk of flagging false failures in the event
2481 		 * of a topology-related error (e.g. a SAS expander problem
2482 		 * causes a command addressed to a drive to fail), but
2483 		 * avoiding getting into an infinite retry loop.
2484 		 */
2485 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2486 		mprsas_log_command(cm, MPR_INFO,
2487 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2488 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2489 		    le32toh(rep->TransferCount));
2490 		break;
2491 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2492 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2493 	case MPI2_IOCSTATUS_INVALID_VPID:
2494 	case MPI2_IOCSTATUS_INVALID_FIELD:
2495 	case MPI2_IOCSTATUS_INVALID_STATE:
2496 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2497 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2498 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2499 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2500 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2501 	default:
2502 		mprsas_log_command(cm, MPR_XINFO,
2503 		    "completed ioc %x scsi %x state %x xfer %u\n",
2504 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2505 		    le32toh(rep->TransferCount));
2506 		csio->resid = cm->cm_length;
2507 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2508 		break;
2509 	}
2510 
2511 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2512 
2513 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2514 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2515 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2516 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2517 		    "queue\n");
2518 	}
2519 
2520 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2521 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2522 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2523 	}
2524 
2525 	mpr_free_command(sc, cm);
2526 	xpt_done(ccb);
2527 }
2528 
2529 #if __FreeBSD_version >= 900026
2530 static void
2531 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2532 {
2533 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2534 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2535 	uint64_t sasaddr;
2536 	union ccb *ccb;
2537 
2538 	ccb = cm->cm_complete_data;
2539 
2540 	/*
2541 	 * Currently there should be no way we can hit this case.  It only
2542 	 * happens when we have a failure to allocate chain frames, and SMP
2543 	 * commands require two S/G elements only.  That should be handled
2544 	 * in the standard request size.
2545 	 */
2546 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2547 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2548 		    "request!\n", __func__, cm->cm_flags);
2549 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2550 		goto bailout;
2551         }
2552 
2553 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2554 	if (rpl == NULL) {
2555 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2556 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2557 		goto bailout;
2558 	}
2559 
2560 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2561 	sasaddr = le32toh(req->SASAddress.Low);
2562 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2563 
2564 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2565 	    MPI2_IOCSTATUS_SUCCESS ||
2566 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2567 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2568 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2569 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2570 		goto bailout;
2571 	}
2572 
2573 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2574 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2575 
2576 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2577 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2578 	else
2579 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2580 
2581 bailout:
2582 	/*
2583 	 * We sync in both directions because we had DMAs in the S/G list
2584 	 * in both directions.
2585 	 */
2586 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2587 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2588 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2589 	mpr_free_command(sc, cm);
2590 	xpt_done(ccb);
2591 }
2592 
2593 static void
2594 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2595 {
2596 	struct mpr_command *cm;
2597 	uint8_t *request, *response;
2598 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2599 	struct mpr_softc *sc;
2600 	struct sglist *sg;
2601 	int error;
2602 
2603 	sc = sassc->sc;
2604 	sg = NULL;
2605 	error = 0;
2606 
2607 #if (__FreeBSD_version >= 1000028) || \
2608     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2609 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2610 	case CAM_DATA_PADDR:
2611 	case CAM_DATA_SG_PADDR:
2612 		/*
2613 		 * XXX We don't yet support physical addresses here.
2614 		 */
2615 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2616 		    "supported\n", __func__);
2617 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2618 		xpt_done(ccb);
2619 		return;
2620 	case CAM_DATA_SG:
2621 		/*
2622 		 * The chip does not support more than one buffer for the
2623 		 * request or response.
2624 		 */
2625 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2626 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2627 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2628 			    "response buffer segments not supported for SMP\n",
2629 			    __func__);
2630 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2631 			xpt_done(ccb);
2632 			return;
2633 		}
2634 
2635 		/*
2636 		 * The CAM_SCATTER_VALID flag was originally implemented
2637 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2638 		 * We have two.  So, just take that flag to mean that we
2639 		 * might have S/G lists, and look at the S/G segment count
2640 		 * to figure out whether that is the case for each individual
2641 		 * buffer.
2642 		 */
2643 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2644 			bus_dma_segment_t *req_sg;
2645 
2646 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2647 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2648 		} else
2649 			request = ccb->smpio.smp_request;
2650 
2651 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2652 			bus_dma_segment_t *rsp_sg;
2653 
2654 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2655 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2656 		} else
2657 			response = ccb->smpio.smp_response;
2658 		break;
2659 	case CAM_DATA_VADDR:
2660 		request = ccb->smpio.smp_request;
2661 		response = ccb->smpio.smp_response;
2662 		break;
2663 	default:
2664 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2665 		xpt_done(ccb);
2666 		return;
2667 	}
2668 #else /* __FreeBSD_version < 1000028 */
2669 	/*
2670 	 * XXX We don't yet support physical addresses here.
2671 	 */
2672 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2673 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2674 		    "supported\n", __func__);
2675 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2676 		xpt_done(ccb);
2677 		return;
2678 	}
2679 
2680 	/*
2681 	 * If the user wants to send an S/G list, check to make sure they
2682 	 * have single buffers.
2683 	 */
2684 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2685 		/*
2686 		 * The chip does not support more than one buffer for the
2687 		 * request or response.
2688 		 */
2689 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2690 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2691 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2692 			    "response buffer segments not supported for SMP\n",
2693 			    __func__);
2694 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2695 			xpt_done(ccb);
2696 			return;
2697 		}
2698 
2699 		/*
2700 		 * The CAM_SCATTER_VALID flag was originally implemented
2701 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2702 		 * We have two.  So, just take that flag to mean that we
2703 		 * might have S/G lists, and look at the S/G segment count
2704 		 * to figure out whether that is the case for each individual
2705 		 * buffer.
2706 		 */
2707 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2708 			bus_dma_segment_t *req_sg;
2709 
2710 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2711 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2712 		} else
2713 			request = ccb->smpio.smp_request;
2714 
2715 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2716 			bus_dma_segment_t *rsp_sg;
2717 
2718 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2719 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2720 		} else
2721 			response = ccb->smpio.smp_response;
2722 	} else {
2723 		request = ccb->smpio.smp_request;
2724 		response = ccb->smpio.smp_response;
2725 	}
2726 #endif /* __FreeBSD_version < 1000028 */
2727 
2728 	cm = mpr_alloc_command(sc);
2729 	if (cm == NULL) {
2730 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2731 		    __func__);
2732 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2733 		xpt_done(ccb);
2734 		return;
2735 	}
2736 
2737 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2738 	bzero(req, sizeof(*req));
2739 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2740 
2741 	/* Allow the chip to use any route to this SAS address. */
2742 	req->PhysicalPort = 0xff;
2743 
2744 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2745 	req->SGLFlags =
2746 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2747 
2748 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2749 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2750 
2751 	mpr_init_sge(cm, req, &req->SGL);
2752 
2753 	/*
2754 	 * Set up a uio to pass into mpr_map_command().  This allows us to
2755 	 * do one map command, and one busdma call in there.
2756 	 */
2757 	cm->cm_uio.uio_iov = cm->cm_iovec;
2758 	cm->cm_uio.uio_iovcnt = 2;
2759 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2760 
2761 	/*
2762 	 * The read/write flag isn't used by busdma, but set it just in
2763 	 * case.  This isn't exactly accurate, either, since we're going in
2764 	 * both directions.
2765 	 */
2766 	cm->cm_uio.uio_rw = UIO_WRITE;
2767 
2768 	cm->cm_iovec[0].iov_base = request;
2769 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2770 	cm->cm_iovec[1].iov_base = response;
2771 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2772 
2773 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2774 			       cm->cm_iovec[1].iov_len;
2775 
2776 	/*
2777 	 * Trigger a warning message in mpr_data_cb() for the user if we
2778 	 * wind up exceeding two S/G segments.  The chip expects one
2779 	 * segment for the request and another for the response.
2780 	 */
2781 	cm->cm_max_segs = 2;
2782 
2783 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2784 	cm->cm_complete = mprsas_smpio_complete;
2785 	cm->cm_complete_data = ccb;
2786 
2787 	/*
2788 	 * Tell the mapping code that we're using a uio, and that this is
2789 	 * an SMP passthrough request.  There is a little special-case
2790 	 * logic there (in mpr_data_cb()) to handle the bidirectional
2791 	 * transfer.
2792 	 */
2793 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2794 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2795 
2796 	/* The chip data format is little endian. */
2797 	req->SASAddress.High = htole32(sasaddr >> 32);
2798 	req->SASAddress.Low = htole32(sasaddr);
2799 
2800 	/*
2801 	 * XXX Note that we don't have a timeout/abort mechanism here.
2802 	 * From the manual, it looks like task management requests only
2803 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2804 	 * have a mechanism to retry requests in the event of a chip reset
2805 	 * at least.  Hopefully the chip will insure that any errors short
2806 	 * of that are relayed back to the driver.
2807 	 */
2808 	error = mpr_map_command(sc, cm);
2809 	if ((error != 0) && (error != EINPROGRESS)) {
2810 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2811 		    "mpr_map_command()\n", __func__, error);
2812 		goto bailout_error;
2813 	}
2814 
2815 	return;
2816 
2817 bailout_error:
2818 	mpr_free_command(sc, cm);
2819 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2820 	xpt_done(ccb);
2821 	return;
2822 }
2823 
2824 static void
2825 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2826 {
2827 	struct mpr_softc *sc;
2828 	struct mprsas_target *targ;
2829 	uint64_t sasaddr = 0;
2830 
2831 	sc = sassc->sc;
2832 
2833 	/*
2834 	 * Make sure the target exists.
2835 	 */
2836 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2837 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2838 	targ = &sassc->targets[ccb->ccb_h.target_id];
2839 	if (targ->handle == 0x0) {
2840 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2841 		    __func__, ccb->ccb_h.target_id);
2842 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2843 		xpt_done(ccb);
2844 		return;
2845 	}
2846 
2847 	/*
2848 	 * If this device has an embedded SMP target, we'll talk to it
2849 	 * directly.
2850 	 * figure out what the expander's address is.
2851 	 */
2852 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2853 		sasaddr = targ->sasaddr;
2854 
2855 	/*
2856 	 * If we don't have a SAS address for the expander yet, try
2857 	 * grabbing it from the page 0x83 information cached in the
2858 	 * transport layer for this target.  LSI expanders report the
2859 	 * expander SAS address as the port-associated SAS address in
2860 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2861 	 * 0x83.
2862 	 *
2863 	 * XXX KDM disable this for now, but leave it commented out so that
2864 	 * it is obvious that this is another possible way to get the SAS
2865 	 * address.
2866 	 *
2867 	 * The parent handle method below is a little more reliable, and
2868 	 * the other benefit is that it works for devices other than SES
2869 	 * devices.  So you can send a SMP request to a da(4) device and it
2870 	 * will get routed to the expander that device is attached to.
2871 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2872 	 */
2873 #if 0
2874 	if (sasaddr == 0)
2875 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2876 #endif
2877 
2878 	/*
2879 	 * If we still don't have a SAS address for the expander, look for
2880 	 * the parent device of this device, which is probably the expander.
2881 	 */
2882 	if (sasaddr == 0) {
2883 #ifdef OLD_MPR_PROBE
2884 		struct mprsas_target *parent_target;
2885 #endif
2886 
2887 		if (targ->parent_handle == 0x0) {
2888 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2889 			    "a valid parent handle!\n", __func__, targ->handle);
2890 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2891 			goto bailout;
2892 		}
2893 #ifdef OLD_MPR_PROBE
2894 		parent_target = mprsas_find_target_by_handle(sassc, 0,
2895 		    targ->parent_handle);
2896 
2897 		if (parent_target == NULL) {
2898 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2899 			    "a valid parent target!\n", __func__, targ->handle);
2900 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2901 			goto bailout;
2902 		}
2903 
2904 		if ((parent_target->devinfo &
2905 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2906 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2907 			    "does not have an SMP target!\n", __func__,
2908 			    targ->handle, parent_target->handle);
2909 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2910 			goto bailout;
2911 		}
2912 
2913 		sasaddr = parent_target->sasaddr;
2914 #else /* OLD_MPR_PROBE */
2915 		if ((targ->parent_devinfo &
2916 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2917 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2918 			    "does not have an SMP target!\n", __func__,
2919 			    targ->handle, targ->parent_handle);
2920 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2921 			goto bailout;
2922 
2923 		}
2924 		if (targ->parent_sasaddr == 0x0) {
2925 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2926 			    "%d does not have a valid SAS address!\n", __func__,
2927 			    targ->handle, targ->parent_handle);
2928 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2929 			goto bailout;
2930 		}
2931 
2932 		sasaddr = targ->parent_sasaddr;
2933 #endif /* OLD_MPR_PROBE */
2934 
2935 	}
2936 
2937 	if (sasaddr == 0) {
2938 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2939 		    "handle %d\n", __func__, targ->handle);
2940 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2941 		goto bailout;
2942 	}
2943 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
2944 
2945 	return;
2946 
2947 bailout:
2948 	xpt_done(ccb);
2949 
2950 }
2951 #endif //__FreeBSD_version >= 900026
2952 
2953 static void
2954 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2955 {
2956 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2957 	struct mpr_softc *sc;
2958 	struct mpr_command *tm;
2959 	struct mprsas_target *targ;
2960 
2961 	MPR_FUNCTRACE(sassc->sc);
2962 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2963 
2964 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
2965 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
2966 	sc = sassc->sc;
2967 	tm = mpr_alloc_command(sc);
2968 	if (tm == NULL) {
2969 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
2970 		    "mprsas_action_resetdev\n");
2971 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2972 		xpt_done(ccb);
2973 		return;
2974 	}
2975 
2976 	targ = &sassc->targets[ccb->ccb_h.target_id];
2977 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2978 	req->DevHandle = htole16(targ->handle);
2979 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2980 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2981 
2982 	/* SAS Hard Link Reset / SATA Link Reset */
2983 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2984 
2985 	tm->cm_data = NULL;
2986 	tm->cm_desc.HighPriority.RequestFlags =
2987 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2988 	tm->cm_complete = mprsas_resetdev_complete;
2989 	tm->cm_complete_data = ccb;
2990 
2991 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
2992 	    __func__, targ->tid);
2993 	tm->cm_targ = targ;
2994 	targ->flags |= MPRSAS_TARGET_INRESET;
2995 
2996 	mpr_map_command(sc, tm);
2997 }
2998 
2999 static void
3000 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3001 {
3002 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3003 	union ccb *ccb;
3004 
3005 	MPR_FUNCTRACE(sc);
3006 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3007 
3008 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3009 	ccb = tm->cm_complete_data;
3010 
3011 	/*
3012 	 * Currently there should be no way we can hit this case.  It only
3013 	 * happens when we have a failure to allocate chain frames, and
3014 	 * task management commands don't have S/G lists.
3015 	 */
3016 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3017 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3018 
3019 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3020 
3021 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3022 		    "handle %#04x! This should not happen!\n", __func__,
3023 		    tm->cm_flags, req->DevHandle);
3024 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3025 		goto bailout;
3026 	}
3027 
3028 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3029 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3030 
3031 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3032 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3033 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3034 		    CAM_LUN_WILDCARD);
3035 	}
3036 	else
3037 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3038 
3039 bailout:
3040 
3041 	mprsas_free_tm(sc, tm);
3042 	xpt_done(ccb);
3043 }
3044 
3045 static void
3046 mprsas_poll(struct cam_sim *sim)
3047 {
3048 	struct mprsas_softc *sassc;
3049 
3050 	sassc = cam_sim_softc(sim);
3051 
3052 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3053 		/* frequent debug messages during a panic just slow
3054 		 * everything down too much.
3055 		 */
3056 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3057 		    __func__);
3058 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3059 	}
3060 
3061 	mpr_intr_locked(sassc->sc);
3062 }
3063 
3064 static void
3065 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3066     void *arg)
3067 {
3068 	struct mpr_softc *sc;
3069 
3070 	sc = (struct mpr_softc *)callback_arg;
3071 
3072 	switch (code) {
3073 #if (__FreeBSD_version >= 1000006) || \
3074     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3075 	case AC_ADVINFO_CHANGED: {
3076 		struct mprsas_target *target;
3077 		struct mprsas_softc *sassc;
3078 		struct scsi_read_capacity_data_long rcap_buf;
3079 		struct ccb_dev_advinfo cdai;
3080 		struct mprsas_lun *lun;
3081 		lun_id_t lunid;
3082 		int found_lun;
3083 		uintptr_t buftype;
3084 
3085 		buftype = (uintptr_t)arg;
3086 
3087 		found_lun = 0;
3088 		sassc = sc->sassc;
3089 
3090 		/*
3091 		 * We're only interested in read capacity data changes.
3092 		 */
3093 		if (buftype != CDAI_TYPE_RCAPLONG)
3094 			break;
3095 
3096 		/*
3097 		 * See the comment in mpr_attach_sas() for a detailed
3098 		 * explanation.  In these versions of FreeBSD we register
3099 		 * for all events and filter out the events that don't
3100 		 * apply to us.
3101 		 */
3102 #if (__FreeBSD_version < 1000703) || \
3103     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3104 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3105 			break;
3106 #endif
3107 
3108 		/*
3109 		 * We should have a handle for this, but check to make sure.
3110 		 */
3111 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3112 		    ("Target %d out of bounds in mprsas_async\n",
3113 		    xpt_path_target_id(path)));
3114 		target = &sassc->targets[xpt_path_target_id(path)];
3115 		if (target->handle == 0)
3116 			break;
3117 
3118 		lunid = xpt_path_lun_id(path);
3119 
3120 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3121 			if (lun->lun_id == lunid) {
3122 				found_lun = 1;
3123 				break;
3124 			}
3125 		}
3126 
3127 		if (found_lun == 0) {
3128 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3129 			    M_NOWAIT | M_ZERO);
3130 			if (lun == NULL) {
3131 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3132 				    "LUN for EEDP support.\n");
3133 				break;
3134 			}
3135 			lun->lun_id = lunid;
3136 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3137 		}
3138 
3139 		bzero(&rcap_buf, sizeof(rcap_buf));
3140 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3141 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3142 		cdai.ccb_h.flags = CAM_DIR_IN;
3143 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3144 #if (__FreeBSD_version >= 1100061) || \
3145     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3146 		cdai.flags = CDAI_FLAG_NONE;
3147 #else
3148 		cdai.flags = 0;
3149 #endif
3150 		cdai.bufsiz = sizeof(rcap_buf);
3151 		cdai.buf = (uint8_t *)&rcap_buf;
3152 		xpt_action((union ccb *)&cdai);
3153 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3154 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3155 
3156 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3157 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3158 			lun->eedp_formatted = TRUE;
3159 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3160 		} else {
3161 			lun->eedp_formatted = FALSE;
3162 			lun->eedp_block_size = 0;
3163 		}
3164 		break;
3165 	}
3166 #endif
3167 	case AC_FOUND_DEVICE: {
3168 		struct ccb_getdev *cgd;
3169 
3170 		/*
3171 		 * See the comment in mpr_attach_sas() for a detailed
3172 		 * explanation.  In these versions of FreeBSD we register
3173 		 * for all events and filter out the events that don't
3174 		 * apply to us.
3175 		 */
3176 #if (__FreeBSD_version < 1000703) || \
3177     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3178 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3179 			break;
3180 #endif
3181 
3182 		cgd = arg;
3183 #if (__FreeBSD_version < 901503) || \
3184     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3185 		mprsas_check_eedp(sc, path, cgd);
3186 #endif
3187 		break;
3188 	}
3189 	default:
3190 		break;
3191 	}
3192 }
3193 
3194 #if (__FreeBSD_version < 901503) || \
3195     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3196 static void
3197 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3198     struct ccb_getdev *cgd)
3199 {
3200 	struct mprsas_softc *sassc = sc->sassc;
3201 	struct ccb_scsiio *csio;
3202 	struct scsi_read_capacity_16 *scsi_cmd;
3203 	struct scsi_read_capacity_eedp *rcap_buf;
3204 	path_id_t pathid;
3205 	target_id_t targetid;
3206 	lun_id_t lunid;
3207 	union ccb *ccb;
3208 	struct cam_path *local_path;
3209 	struct mprsas_target *target;
3210 	struct mprsas_lun *lun;
3211 	uint8_t	found_lun;
3212 	char path_str[64];
3213 
3214 	pathid = cam_sim_path(sassc->sim);
3215 	targetid = xpt_path_target_id(path);
3216 	lunid = xpt_path_lun_id(path);
3217 
3218 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3219 	    "mprsas_check_eedp\n", targetid));
3220 	target = &sassc->targets[targetid];
3221 	if (target->handle == 0x0)
3222 		return;
3223 
3224 	/*
3225 	 * Determine if the device is EEDP capable.
3226 	 *
3227 	 * If this flag is set in the inquiry data, the device supports
3228 	 * protection information, and must support the 16 byte read capacity
3229 	 * command, otherwise continue without sending read cap 16.
3230 	 */
3231 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3232 		return;
3233 
3234 	/*
3235 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3236 	 * the LUN is formatted for EEDP support.
3237 	 */
3238 	ccb = xpt_alloc_ccb_nowait();
3239 	if (ccb == NULL) {
3240 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3241 		    "support.\n");
3242 		return;
3243 	}
3244 
3245 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3246 	    CAM_REQ_CMP) {
3247 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3248 		    "support.\n");
3249 		xpt_free_ccb(ccb);
3250 		return;
3251 	}
3252 
3253 	/*
3254 	 * If LUN is already in list, don't create a new one.
3255 	 */
3256 	found_lun = FALSE;
3257 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3258 		if (lun->lun_id == lunid) {
3259 			found_lun = TRUE;
3260 			break;
3261 		}
3262 	}
3263 	if (!found_lun) {
3264 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3265 		    M_NOWAIT | M_ZERO);
3266 		if (lun == NULL) {
3267 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3268 			    "EEDP support.\n");
3269 			xpt_free_path(local_path);
3270 			xpt_free_ccb(ccb);
3271 			return;
3272 		}
3273 		lun->lun_id = lunid;
3274 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3275 	}
3276 
3277 	xpt_path_string(local_path, path_str, sizeof(path_str));
3278 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3279 	    path_str, target->handle);
3280 
3281 	/*
3282 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3283 	 * mprsas_read_cap_done function will load the read cap info into the
3284 	 * LUN struct.
3285 	 */
3286 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3287 	    M_NOWAIT | M_ZERO);
3288 	if (rcap_buf == NULL) {
3289 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3290 		    "buffer for EEDP support.\n");
3291 		xpt_free_path(ccb->ccb_h.path);
3292 		xpt_free_ccb(ccb);
3293 		return;
3294 	}
3295 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3296 	csio = &ccb->csio;
3297 	csio->ccb_h.func_code = XPT_SCSI_IO;
3298 	csio->ccb_h.flags = CAM_DIR_IN;
3299 	csio->ccb_h.retry_count = 4;
3300 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3301 	csio->ccb_h.timeout = 60000;
3302 	csio->data_ptr = (uint8_t *)rcap_buf;
3303 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3304 	csio->sense_len = MPR_SENSE_LEN;
3305 	csio->cdb_len = sizeof(*scsi_cmd);
3306 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3307 
3308 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3309 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3310 	scsi_cmd->opcode = 0x9E;
3311 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3312 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3313 
3314 	ccb->ccb_h.ppriv_ptr1 = sassc;
3315 	xpt_action(ccb);
3316 }
3317 
3318 static void
3319 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3320 {
3321 	struct mprsas_softc *sassc;
3322 	struct mprsas_target *target;
3323 	struct mprsas_lun *lun;
3324 	struct scsi_read_capacity_eedp *rcap_buf;
3325 
3326 	if (done_ccb == NULL)
3327 		return;
3328 
3329 	/* Driver need to release devq, it Scsi command is
3330 	 * generated by driver internally.
3331 	 * Currently there is a single place where driver
3332 	 * calls scsi command internally. In future if driver
3333 	 * calls more scsi command internally, it needs to release
3334 	 * devq internally, since those command will not go back to
3335 	 * cam_periph.
3336 	 */
3337 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3338         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3339 		xpt_release_devq(done_ccb->ccb_h.path,
3340 			       	/*count*/ 1, /*run_queue*/TRUE);
3341 	}
3342 
3343 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3344 
3345 	/*
3346 	 * Get the LUN ID for the path and look it up in the LUN list for the
3347 	 * target.
3348 	 */
3349 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3350 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3351 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3352 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3353 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3354 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3355 			continue;
3356 
3357 		/*
3358 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3359 		 * info. If the READ CAP 16 command had some SCSI error (common
3360 		 * if command is not supported), mark the lun as not supporting
3361 		 * EEDP and set the block size to 0.
3362 		 */
3363 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3364 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3365 			lun->eedp_formatted = FALSE;
3366 			lun->eedp_block_size = 0;
3367 			break;
3368 		}
3369 
3370 		if (rcap_buf->protect & 0x01) {
3371 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3372 			    "%d is formatted for EEDP support.\n",
3373 			    done_ccb->ccb_h.target_lun,
3374 			    done_ccb->ccb_h.target_id);
3375 			lun->eedp_formatted = TRUE;
3376 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3377 		}
3378 		break;
3379 	}
3380 
3381 	// Finished with this CCB and path.
3382 	free(rcap_buf, M_MPR);
3383 	xpt_free_path(done_ccb->ccb_h.path);
3384 	xpt_free_ccb(done_ccb);
3385 }
3386 #endif /* (__FreeBSD_version < 901503) || \
3387           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3388 
3389 void
3390 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3391     struct mprsas_target *target, lun_id_t lun_id)
3392 {
3393 	union ccb *ccb;
3394 	path_id_t path_id;
3395 
3396 	/*
3397 	 * Set the INRESET flag for this target so that no I/O will be sent to
3398 	 * the target until the reset has completed.  If an I/O request does
3399 	 * happen, the devq will be frozen.  The CCB holds the path which is
3400 	 * used to release the devq.  The devq is released and the CCB is freed
3401 	 * when the TM completes.
3402 	 */
3403 	ccb = xpt_alloc_ccb_nowait();
3404 	if (ccb) {
3405 		path_id = cam_sim_path(sc->sassc->sim);
3406 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3407 		    target->tid, lun_id) != CAM_REQ_CMP) {
3408 			xpt_free_ccb(ccb);
3409 		} else {
3410 			tm->cm_ccb = ccb;
3411 			tm->cm_targ = target;
3412 			target->flags |= MPRSAS_TARGET_INRESET;
3413 		}
3414 	}
3415 }
3416 
3417 int
3418 mprsas_startup(struct mpr_softc *sc)
3419 {
3420 	/*
3421 	 * Send the port enable message and set the wait_for_port_enable flag.
3422 	 * This flag helps to keep the simq frozen until all discovery events
3423 	 * are processed.
3424 	 */
3425 	sc->wait_for_port_enable = 1;
3426 	mprsas_send_portenable(sc);
3427 	return (0);
3428 }
3429 
3430 static int
3431 mprsas_send_portenable(struct mpr_softc *sc)
3432 {
3433 	MPI2_PORT_ENABLE_REQUEST *request;
3434 	struct mpr_command *cm;
3435 
3436 	MPR_FUNCTRACE(sc);
3437 
3438 	if ((cm = mpr_alloc_command(sc)) == NULL)
3439 		return (EBUSY);
3440 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3441 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3442 	request->MsgFlags = 0;
3443 	request->VP_ID = 0;
3444 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3445 	cm->cm_complete = mprsas_portenable_complete;
3446 	cm->cm_data = NULL;
3447 	cm->cm_sge = NULL;
3448 
3449 	mpr_map_command(sc, cm);
3450 	mpr_dprint(sc, MPR_XINFO,
3451 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3452 	    cm, cm->cm_req, cm->cm_complete);
3453 	return (0);
3454 }
3455 
3456 static void
3457 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3458 {
3459 	MPI2_PORT_ENABLE_REPLY *reply;
3460 	struct mprsas_softc *sassc;
3461 
3462 	MPR_FUNCTRACE(sc);
3463 	sassc = sc->sassc;
3464 
3465 	/*
3466 	 * Currently there should be no way we can hit this case.  It only
3467 	 * happens when we have a failure to allocate chain frames, and
3468 	 * port enable commands don't have S/G lists.
3469 	 */
3470 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3471 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3472 		    "This should not happen!\n", __func__, cm->cm_flags);
3473 	}
3474 
3475 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3476 	if (reply == NULL)
3477 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3478 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3479 	    MPI2_IOCSTATUS_SUCCESS)
3480 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3481 
3482 	mpr_free_command(sc, cm);
3483 	if (sc->mpr_ich.ich_arg != NULL) {
3484 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3485 		config_intrhook_disestablish(&sc->mpr_ich);
3486 		sc->mpr_ich.ich_arg = NULL;
3487 	}
3488 
3489 	/*
3490 	 * Done waiting for port enable to complete.  Decrement the refcount.
3491 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3492 	 * take place.
3493 	 */
3494 	sc->wait_for_port_enable = 0;
3495 	sc->port_enable_complete = 1;
3496 	wakeup(&sc->port_enable_complete);
3497 	mprsas_startup_decrement(sassc);
3498 }
3499 
3500 int
3501 mprsas_check_id(struct mprsas_softc *sassc, int id)
3502 {
3503 	struct mpr_softc *sc = sassc->sc;
3504 	char *ids;
3505 	char *name;
3506 
3507 	ids = &sc->exclude_ids[0];
3508 	while((name = strsep(&ids, ",")) != NULL) {
3509 		if (name[0] == '\0')
3510 			continue;
3511 		if (strtol(name, NULL, 0) == (long)id)
3512 			return (1);
3513 	}
3514 
3515 	return (0);
3516 }
3517 
3518 void
3519 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3520 {
3521 	struct mprsas_softc *sassc;
3522 	struct mprsas_lun *lun, *lun_tmp;
3523 	struct mprsas_target *targ;
3524 	int i;
3525 
3526 	sassc = sc->sassc;
3527 	/*
3528 	 * The number of targets is based on IOC Facts, so free all of
3529 	 * the allocated LUNs for each target and then the target buffer
3530 	 * itself.
3531 	 */
3532 	for (i=0; i< maxtargets; i++) {
3533 		targ = &sassc->targets[i];
3534 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3535 			free(lun, M_MPR);
3536 		}
3537 	}
3538 	free(sassc->targets, M_MPR);
3539 
3540 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3541 	    M_MPR, M_WAITOK|M_ZERO);
3542 	if (!sassc->targets) {
3543 		panic("%s failed to alloc targets with error %d\n",
3544 		    __func__, ENOMEM);
3545 	}
3546 }
3547