xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision c7d813a93eeb447470734c9bc0c140d90a54c271)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/mpr/mpi/mpi2_type.h>
76 #include <dev/mpr/mpi/mpi2.h>
77 #include <dev/mpr/mpi/mpi2_ioc.h>
78 #include <dev/mpr/mpi/mpi2_sas.h>
79 #include <dev/mpr/mpi/mpi2_cnfg.h>
80 #include <dev/mpr/mpi/mpi2_init.h>
81 #include <dev/mpr/mpi/mpi2_tool.h>
82 #include <dev/mpr/mpr_ioctl.h>
83 #include <dev/mpr/mprvar.h>
84 #include <dev/mpr/mpr_table.h>
85 #include <dev/mpr/mpr_sas.h>
86 
87 #define MPRSAS_DISCOVERY_TIMEOUT	20
88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
89 
90 /*
91  * static array to check SCSI OpCode for EEDP protection bits
92  */
93 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
94 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 static uint8_t op_code_prot[256] = {
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
106 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
113 };
114 
115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
116 
117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
120 static void mprsas_poll(struct cam_sim *sim);
121 static void mprsas_scsiio_timeout(void *data);
122 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
123 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
124 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
125 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
126 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
127 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
128     struct mpr_command *cm);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130     struct cam_path *path, void *arg);
131 #if (__FreeBSD_version < 901503) || \
132     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
133 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
134     struct ccb_getdev *cgd);
135 static void mprsas_read_cap_done(struct cam_periph *periph,
136     union ccb *done_ccb);
137 #endif
138 static int mprsas_send_portenable(struct mpr_softc *sc);
139 static void mprsas_portenable_complete(struct mpr_softc *sc,
140     struct mpr_command *cm);
141 
142 #if __FreeBSD_version >= 900026
143 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
144 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
145     uint64_t sasaddr);
146 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
147 #endif //FreeBSD_version >= 900026
148 
149 struct mprsas_target *
150 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
151     uint16_t handle)
152 {
153 	struct mprsas_target *target;
154 	int i;
155 
156 	for (i = start; i < sassc->maxtargets; i++) {
157 		target = &sassc->targets[i];
158 		if (target->handle == handle)
159 			return (target);
160 	}
161 
162 	return (NULL);
163 }
164 
165 /* we need to freeze the simq during attach and diag reset, to avoid failing
166  * commands before device handles have been found by discovery.  Since
167  * discovery involves reading config pages and possibly sending commands,
168  * discovery actions may continue even after we receive the end of discovery
169  * event, so refcount discovery actions instead of assuming we can unfreeze
170  * the simq when we get the event.
171  */
172 void
173 mprsas_startup_increment(struct mprsas_softc *sassc)
174 {
175 	MPR_FUNCTRACE(sassc->sc);
176 
177 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
178 		if (sassc->startup_refcount++ == 0) {
179 			/* just starting, freeze the simq */
180 			mpr_dprint(sassc->sc, MPR_INIT,
181 			    "%s freezing simq\n", __func__);
182 #if (__FreeBSD_version >= 1000039) || \
183     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
195 {
196 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
197 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
198 		xpt_release_simq(sassc->sim, 1);
199 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
200 	}
201 }
202 
203 void
204 mprsas_startup_decrement(struct mprsas_softc *sassc)
205 {
206 	MPR_FUNCTRACE(sassc->sc);
207 
208 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
209 		if (--sassc->startup_refcount == 0) {
210 			/* finished all discovery-related actions, release
211 			 * the simq and rescan for the latest topology.
212 			 */
213 			mpr_dprint(sassc->sc, MPR_INIT,
214 			    "%s releasing simq\n", __func__);
215 			sassc->flags &= ~MPRSAS_IN_STARTUP;
216 			xpt_release_simq(sassc->sim, 1);
217 #if (__FreeBSD_version >= 1000039) || \
218     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
219 			xpt_release_boot();
220 #else
221 			mprsas_rescan_target(sassc->sc, NULL);
222 #endif
223 		}
224 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
225 		    sassc->startup_refcount);
226 	}
227 }
228 
229 /* The firmware requires us to stop sending commands when we're doing task
230  * management, so refcount the TMs and keep the simq frozen when any are in
231  * use.
232  */
233 struct mpr_command *
234 mprsas_alloc_tm(struct mpr_softc *sc)
235 {
236 	struct mpr_command *tm;
237 
238 	MPR_FUNCTRACE(sc);
239 	tm = mpr_alloc_high_priority_command(sc);
240 	return tm;
241 }
242 
243 void
244 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
245 {
246 	int target_id = 0xFFFFFFFF;
247 
248 	MPR_FUNCTRACE(sc);
249 	if (tm == NULL)
250 		return;
251 
252 	/*
253 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
254 	 * free the resources used for freezing the devq.  Must clear the
255 	 * INRESET flag as well or scsi I/O will not work.
256 	 */
257 	if (tm->cm_targ != NULL) {
258 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
259 		target_id = tm->cm_targ->tid;
260 	}
261 	if (tm->cm_ccb) {
262 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
263 		    target_id);
264 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
265 		xpt_free_path(tm->cm_ccb->ccb_h.path);
266 		xpt_free_ccb(tm->cm_ccb);
267 	}
268 
269 	mpr_free_high_priority_command(sc, tm);
270 }
271 
272 void
273 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
274 {
275 	struct mprsas_softc *sassc = sc->sassc;
276 	path_id_t pathid;
277 	target_id_t targetid;
278 	union ccb *ccb;
279 
280 	MPR_FUNCTRACE(sc);
281 	pathid = cam_sim_path(sassc->sim);
282 	if (targ == NULL)
283 		targetid = CAM_TARGET_WILDCARD;
284 	else
285 		targetid = targ - sassc->targets;
286 
287 	/*
288 	 * Allocate a CCB and schedule a rescan.
289 	 */
290 	ccb = xpt_alloc_ccb_nowait();
291 	if (ccb == NULL) {
292 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
293 		return;
294 	}
295 
296 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
297 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
298 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
299 		xpt_free_ccb(ccb);
300 		return;
301 	}
302 
303 	if (targetid == CAM_TARGET_WILDCARD)
304 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
305 	else
306 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
307 
308 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
309 	xpt_rescan(ccb);
310 }
311 
312 static void
313 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
314 {
315 	struct sbuf sb;
316 	va_list ap;
317 	char str[192];
318 	char path_str[64];
319 
320 	if (cm == NULL)
321 		return;
322 
323 	/* No need to be in here if debugging isn't enabled */
324 	if ((cm->cm_sc->mpr_debug & level) == 0)
325 		return;
326 
327 	sbuf_new(&sb, str, sizeof(str), 0);
328 
329 	va_start(ap, fmt);
330 
331 	if (cm->cm_ccb != NULL) {
332 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
333 		    sizeof(path_str));
334 		sbuf_cat(&sb, path_str);
335 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
336 			scsi_command_string(&cm->cm_ccb->csio, &sb);
337 			sbuf_printf(&sb, "length %d ",
338 			    cm->cm_ccb->csio.dxfer_len);
339 		}
340 	} else {
341 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 		    cam_sim_name(cm->cm_sc->sassc->sim),
343 		    cam_sim_unit(cm->cm_sc->sassc->sim),
344 		    cam_sim_bus(cm->cm_sc->sassc->sim),
345 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
346 		    cm->cm_lun);
347 	}
348 
349 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 	sbuf_vprintf(&sb, fmt, ap);
351 	sbuf_finish(&sb);
352 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
353 
354 	va_end(ap);
355 }
356 
357 static void
358 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
359 {
360 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
361 	struct mprsas_target *targ;
362 	uint16_t handle;
363 
364 	MPR_FUNCTRACE(sc);
365 
366 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
367 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
368 	targ = tm->cm_targ;
369 
370 	if (reply == NULL) {
371 		/* XXX retry the remove after the diag reset completes? */
372 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
373 		    "0x%04x\n", __func__, handle);
374 		mprsas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 	    MPI2_IOCSTATUS_SUCCESS) {
380 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
381 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
382 	}
383 
384 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
385 	    le32toh(reply->TerminationCount));
386 	mpr_free_reply(sc, tm->cm_reply_data);
387 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
388 
389 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
390 	    targ->tid, handle);
391 
392 	/*
393 	 * Don't clear target if remove fails because things will get confusing.
394 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
395 	 * this target id if possible, and so we can assign the same target id
396 	 * to this device if it comes back in the future.
397 	 */
398 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
399 	    MPI2_IOCSTATUS_SUCCESS) {
400 		targ = tm->cm_targ;
401 		targ->handle = 0x0;
402 		targ->encl_handle = 0x0;
403 		targ->encl_level_valid = 0x0;
404 		targ->encl_level = 0x0;
405 		targ->connector_name[0] = ' ';
406 		targ->connector_name[1] = ' ';
407 		targ->connector_name[2] = ' ';
408 		targ->connector_name[3] = ' ';
409 		targ->encl_slot = 0x0;
410 		targ->exp_dev_handle = 0x0;
411 		targ->phy_num = 0x0;
412 		targ->linkrate = 0x0;
413 		targ->devinfo = 0x0;
414 		targ->flags = 0x0;
415 		targ->scsi_req_desc_type = 0;
416 	}
417 
418 	mprsas_free_tm(sc, tm);
419 }
420 
421 
422 /*
423  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
424  * Otherwise Volume Delete is same as Bare Drive Removal.
425  */
426 void
427 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
428 {
429 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
430 	struct mpr_softc *sc;
431 	struct mpr_command *cm;
432 	struct mprsas_target *targ = NULL;
433 
434 	MPR_FUNCTRACE(sassc->sc);
435 	sc = sassc->sc;
436 
437 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
438 	if (targ == NULL) {
439 		/* FIXME: what is the action? */
440 		/* We don't know about this device? */
441 		mpr_dprint(sc, MPR_ERROR,
442 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
443 		return;
444 	}
445 
446 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
447 
448 	cm = mprsas_alloc_tm(sc);
449 	if (cm == NULL) {
450 		mpr_dprint(sc, MPR_ERROR,
451 		    "%s: command alloc failure\n", __func__);
452 		return;
453 	}
454 
455 	mprsas_rescan_target(sc, targ);
456 
457 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
458 	req->DevHandle = targ->handle;
459 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
460 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
461 
462 	/* SAS Hard Link Reset / SATA Link Reset */
463 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
464 
465 	cm->cm_targ = targ;
466 	cm->cm_data = NULL;
467 	cm->cm_desc.HighPriority.RequestFlags =
468 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
469 	cm->cm_complete = mprsas_remove_volume;
470 	cm->cm_complete_data = (void *)(uintptr_t)handle;
471 
472 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
473 	    __func__, targ->tid);
474 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
475 
476 	mpr_map_command(sc, cm);
477 }
478 
479 /*
480  * The MPT3 firmware performs debounce on the link to avoid transient link
481  * errors and false removals.  When it does decide that link has been lost
482  * and a device needs to go away, it expects that the host will perform a
483  * target reset and then an op remove.  The reset has the side-effect of
484  * aborting any outstanding requests for the device, which is required for
485  * the op-remove to succeed.  It's not clear if the host should check for
486  * the device coming back alive after the reset.
487  */
488 void
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
490 {
491 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 	struct mpr_softc *sc;
493 	struct mpr_command *cm;
494 	struct mprsas_target *targ = NULL;
495 
496 	MPR_FUNCTRACE(sassc->sc);
497 
498 	sc = sassc->sc;
499 
500 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
501 	if (targ == NULL) {
502 		/* FIXME: what is the action? */
503 		/* We don't know about this device? */
504 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
505 		    __func__, handle);
506 		return;
507 	}
508 
509 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
510 
511 	cm = mprsas_alloc_tm(sc);
512 	if (cm == NULL) {
513 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
514 		    __func__);
515 		return;
516 	}
517 
518 	mprsas_rescan_target(sc, targ);
519 
520 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 	memset(req, 0, sizeof(*req));
522 	req->DevHandle = htole16(targ->handle);
523 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
525 
526 	/* SAS Hard Link Reset / SATA Link Reset */
527 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
528 
529 	cm->cm_targ = targ;
530 	cm->cm_data = NULL;
531 	cm->cm_desc.HighPriority.RequestFlags =
532 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 	cm->cm_complete = mprsas_remove_device;
534 	cm->cm_complete_data = (void *)(uintptr_t)handle;
535 
536 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
537 	    __func__, targ->tid);
538 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
539 
540 	mpr_map_command(sc, cm);
541 }
542 
543 static void
544 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
545 {
546 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
547 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
548 	struct mprsas_target *targ;
549 	struct mpr_command *next_cm;
550 	uint16_t handle;
551 
552 	MPR_FUNCTRACE(sc);
553 
554 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
555 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
556 	targ = tm->cm_targ;
557 
558 	/*
559 	 * Currently there should be no way we can hit this case.  It only
560 	 * happens when we have a failure to allocate chain frames, and
561 	 * task management commands don't have S/G lists.
562 	 */
563 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
564 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
565 		    "handle %#04x! This should not happen!\n", __func__,
566 		    tm->cm_flags, handle);
567 	}
568 
569 	if (reply == NULL) {
570 		/* XXX retry the remove after the diag reset completes? */
571 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
572 		    "0x%04x\n", __func__, handle);
573 		mprsas_free_tm(sc, tm);
574 		return;
575 	}
576 
577 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
578 	    MPI2_IOCSTATUS_SUCCESS) {
579 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
580 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
581 	}
582 
583 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
584 	    le32toh(reply->TerminationCount));
585 	mpr_free_reply(sc, tm->cm_reply_data);
586 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
587 
588 	/* Reuse the existing command */
589 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
590 	memset(req, 0, sizeof(*req));
591 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
592 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
593 	req->DevHandle = htole16(handle);
594 	tm->cm_data = NULL;
595 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
596 	tm->cm_complete = mprsas_remove_complete;
597 	tm->cm_complete_data = (void *)(uintptr_t)handle;
598 
599 	mpr_map_command(sc, tm);
600 
601 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
602 	    targ->tid, handle);
603 	if (targ->encl_level_valid) {
604 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
605 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
606 		    targ->connector_name);
607 	}
608 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
609 		union ccb *ccb;
610 
611 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
612 		ccb = tm->cm_complete_data;
613 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
614 		mprsas_scsiio_complete(sc, tm);
615 	}
616 }
617 
618 static void
619 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
620 {
621 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
622 	uint16_t handle;
623 	struct mprsas_target *targ;
624 	struct mprsas_lun *lun;
625 
626 	MPR_FUNCTRACE(sc);
627 
628 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
629 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
630 
631 	/*
632 	 * Currently there should be no way we can hit this case.  It only
633 	 * happens when we have a failure to allocate chain frames, and
634 	 * task management commands don't have S/G lists.
635 	 */
636 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
637 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
638 		    "handle %#04x! This should not happen!\n", __func__,
639 		    tm->cm_flags, handle);
640 		mprsas_free_tm(sc, tm);
641 		return;
642 	}
643 
644 	if (reply == NULL) {
645 		/* most likely a chip reset */
646 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
647 		    "0x%04x\n", __func__, handle);
648 		mprsas_free_tm(sc, tm);
649 		return;
650 	}
651 
652 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
653 	    __func__, handle, le16toh(reply->IOCStatus));
654 
655 	/*
656 	 * Don't clear target if remove fails because things will get confusing.
657 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
658 	 * this target id if possible, and so we can assign the same target id
659 	 * to this device if it comes back in the future.
660 	 */
661 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
662 	    MPI2_IOCSTATUS_SUCCESS) {
663 		targ = tm->cm_targ;
664 		targ->handle = 0x0;
665 		targ->encl_handle = 0x0;
666 		targ->encl_level_valid = 0x0;
667 		targ->encl_level = 0x0;
668 		targ->connector_name[0] = ' ';
669 		targ->connector_name[1] = ' ';
670 		targ->connector_name[2] = ' ';
671 		targ->connector_name[3] = ' ';
672 		targ->encl_slot = 0x0;
673 		targ->exp_dev_handle = 0x0;
674 		targ->phy_num = 0x0;
675 		targ->linkrate = 0x0;
676 		targ->devinfo = 0x0;
677 		targ->flags = 0x0;
678 		targ->scsi_req_desc_type = 0;
679 
680 		while (!SLIST_EMPTY(&targ->luns)) {
681 			lun = SLIST_FIRST(&targ->luns);
682 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
683 			free(lun, M_MPR);
684 		}
685 	}
686 
687 	mprsas_free_tm(sc, tm);
688 }
689 
690 static int
691 mprsas_register_events(struct mpr_softc *sc)
692 {
693 	uint8_t events[16];
694 
695 	bzero(events, 16);
696 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
697 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
698 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
699 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
701 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
702 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
704 	setbit(events, MPI2_EVENT_IR_VOLUME);
705 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
706 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
707 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
708 	setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
709 
710 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
711 	    &sc->sassc->mprsas_eh);
712 
713 	return (0);
714 }
715 
716 int
717 mpr_attach_sas(struct mpr_softc *sc)
718 {
719 	struct mprsas_softc *sassc;
720 	cam_status status;
721 	int unit, error = 0;
722 
723 	MPR_FUNCTRACE(sc);
724 
725 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
726 	if (!sassc) {
727 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
728 		    __func__, __LINE__);
729 		return (ENOMEM);
730 	}
731 
732 	/*
733 	 * XXX MaxTargets could change during a reinit.  Since we don't
734 	 * resize the targets[] array during such an event, cache the value
735 	 * of MaxTargets here so that we don't get into trouble later.  This
736 	 * should move into the reinit logic.
737 	 */
738 	sassc->maxtargets = sc->facts->MaxTargets;
739 	sassc->targets = malloc(sizeof(struct mprsas_target) *
740 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
741 	if (!sassc->targets) {
742 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
743 		    __func__, __LINE__);
744 		free(sassc, M_MPR);
745 		return (ENOMEM);
746 	}
747 	sc->sassc = sassc;
748 	sassc->sc = sc;
749 
750 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
751 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
752 		error = ENOMEM;
753 		goto out;
754 	}
755 
756 	unit = device_get_unit(sc->mpr_dev);
757 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
758 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
759 	if (sassc->sim == NULL) {
760 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
761 		error = EINVAL;
762 		goto out;
763 	}
764 
765 	TAILQ_INIT(&sassc->ev_queue);
766 
767 	/* Initialize taskqueue for Event Handling */
768 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
769 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
770 	    taskqueue_thread_enqueue, &sassc->ev_tq);
771 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
772 	    device_get_nameunit(sc->mpr_dev));
773 
774 	mpr_lock(sc);
775 
776 	/*
777 	 * XXX There should be a bus for every port on the adapter, but since
778 	 * we're just going to fake the topology for now, we'll pretend that
779 	 * everything is just a target on a single bus.
780 	 */
781 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
782 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
783 		    error);
784 		mpr_unlock(sc);
785 		goto out;
786 	}
787 
788 	/*
789 	 * Assume that discovery events will start right away.
790 	 *
791 	 * Hold off boot until discovery is complete.
792 	 */
793 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
794 	sc->sassc->startup_refcount = 0;
795 	mprsas_startup_increment(sassc);
796 
797 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
798 
799 	/*
800 	 * Register for async events so we can determine the EEDP
801 	 * capabilities of devices.
802 	 */
803 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
804 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
805 	    CAM_LUN_WILDCARD);
806 	if (status != CAM_REQ_CMP) {
807 		mpr_printf(sc, "Error %#x creating sim path\n", status);
808 		sassc->path = NULL;
809 	} else {
810 		int event;
811 
812 #if (__FreeBSD_version >= 1000006) || \
813     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
814 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
815 #else
816 		event = AC_FOUND_DEVICE;
817 #endif
818 
819 		/*
820 		 * Prior to the CAM locking improvements, we can't call
821 		 * xpt_register_async() with a particular path specified.
822 		 *
823 		 * If a path isn't specified, xpt_register_async() will
824 		 * generate a wildcard path and acquire the XPT lock while
825 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
826 		 * It will then drop the XPT lock once that is done.
827 		 *
828 		 * If a path is specified for xpt_register_async(), it will
829 		 * not acquire and drop the XPT lock around the call to
830 		 * xpt_action().  xpt_action() asserts that the caller
831 		 * holds the SIM lock, so the SIM lock has to be held when
832 		 * calling xpt_register_async() when the path is specified.
833 		 *
834 		 * But xpt_register_async calls xpt_for_all_devices(),
835 		 * which calls xptbustraverse(), which will acquire each
836 		 * SIM lock.  When it traverses our particular bus, it will
837 		 * necessarily acquire the SIM lock, which will lead to a
838 		 * recursive lock acquisition.
839 		 *
840 		 * The CAM locking changes fix this problem by acquiring
841 		 * the XPT topology lock around bus traversal in
842 		 * xptbustraverse(), so the caller can hold the SIM lock
843 		 * and it does not cause a recursive lock acquisition.
844 		 *
845 		 * These __FreeBSD_version values are approximate, especially
846 		 * for stable/10, which is two months later than the actual
847 		 * change.
848 		 */
849 
850 #if (__FreeBSD_version < 1000703) || \
851     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
852 		mpr_unlock(sc);
853 		status = xpt_register_async(event, mprsas_async, sc,
854 					    NULL);
855 		mpr_lock(sc);
856 #else
857 		status = xpt_register_async(event, mprsas_async, sc,
858 					    sassc->path);
859 #endif
860 
861 		if (status != CAM_REQ_CMP) {
862 			mpr_dprint(sc, MPR_ERROR,
863 			    "Error %#x registering async handler for "
864 			    "AC_ADVINFO_CHANGED events\n", status);
865 			xpt_free_path(sassc->path);
866 			sassc->path = NULL;
867 		}
868 	}
869 	if (status != CAM_REQ_CMP) {
870 		/*
871 		 * EEDP use is the exception, not the rule.
872 		 * Warn the user, but do not fail to attach.
873 		 */
874 		mpr_printf(sc, "EEDP capabilities disabled.\n");
875 	}
876 
877 	mpr_unlock(sc);
878 
879 	mprsas_register_events(sc);
880 out:
881 	if (error)
882 		mpr_detach_sas(sc);
883 	return (error);
884 }
885 
886 int
887 mpr_detach_sas(struct mpr_softc *sc)
888 {
889 	struct mprsas_softc *sassc;
890 	struct mprsas_lun *lun, *lun_tmp;
891 	struct mprsas_target *targ;
892 	int i;
893 
894 	MPR_FUNCTRACE(sc);
895 
896 	if (sc->sassc == NULL)
897 		return (0);
898 
899 	sassc = sc->sassc;
900 	mpr_deregister_events(sc, sassc->mprsas_eh);
901 
902 	/*
903 	 * Drain and free the event handling taskqueue with the lock
904 	 * unheld so that any parallel processing tasks drain properly
905 	 * without deadlocking.
906 	 */
907 	if (sassc->ev_tq != NULL)
908 		taskqueue_free(sassc->ev_tq);
909 
910 	/* Make sure CAM doesn't wedge if we had to bail out early. */
911 	mpr_lock(sc);
912 
913 	/* Deregister our async handler */
914 	if (sassc->path != NULL) {
915 		xpt_register_async(0, mprsas_async, sc, sassc->path);
916 		xpt_free_path(sassc->path);
917 		sassc->path = NULL;
918 	}
919 
920 	if (sassc->flags & MPRSAS_IN_STARTUP)
921 		xpt_release_simq(sassc->sim, 1);
922 
923 	if (sassc->sim != NULL) {
924 		xpt_bus_deregister(cam_sim_path(sassc->sim));
925 		cam_sim_free(sassc->sim, FALSE);
926 	}
927 
928 	mpr_unlock(sc);
929 
930 	if (sassc->devq != NULL)
931 		cam_simq_free(sassc->devq);
932 
933 	for (i = 0; i < sassc->maxtargets; i++) {
934 		targ = &sassc->targets[i];
935 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
936 			free(lun, M_MPR);
937 		}
938 	}
939 	free(sassc->targets, M_MPR);
940 	free(sassc, M_MPR);
941 	sc->sassc = NULL;
942 
943 	return (0);
944 }
945 
946 void
947 mprsas_discovery_end(struct mprsas_softc *sassc)
948 {
949 	struct mpr_softc *sc = sassc->sc;
950 
951 	MPR_FUNCTRACE(sc);
952 
953 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
954 		callout_stop(&sassc->discovery_callout);
955 
956 }
957 
958 static void
959 mprsas_action(struct cam_sim *sim, union ccb *ccb)
960 {
961 	struct mprsas_softc *sassc;
962 
963 	sassc = cam_sim_softc(sim);
964 
965 	MPR_FUNCTRACE(sassc->sc);
966 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
967 	    ccb->ccb_h.func_code);
968 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
969 
970 	switch (ccb->ccb_h.func_code) {
971 	case XPT_PATH_INQ:
972 	{
973 		struct ccb_pathinq *cpi = &ccb->cpi;
974 		struct mpr_softc *sc = sassc->sc;
975 		uint8_t sges_per_frame;
976 
977 		cpi->version_num = 1;
978 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
979 		cpi->target_sprt = 0;
980 #if (__FreeBSD_version >= 1000039) || \
981     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
982 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
983 #else
984 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
985 #endif
986 		cpi->hba_eng_cnt = 0;
987 		cpi->max_target = sassc->maxtargets - 1;
988 		cpi->max_lun = 255;
989 		cpi->initiator_id = sassc->maxtargets - 1;
990 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
991 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
992 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
993 		cpi->unit_number = cam_sim_unit(sim);
994 		cpi->bus_id = cam_sim_bus(sim);
995 		/*
996 		 * XXXSLM-I think this needs to change based on config page or
997 		 * something instead of hardcoded to 150000.
998 		 */
999 		cpi->base_transfer_speed = 150000;
1000 		cpi->transport = XPORT_SAS;
1001 		cpi->transport_version = 0;
1002 		cpi->protocol = PROTO_SCSI;
1003 		cpi->protocol_version = SCSI_REV_SPC;
1004 
1005 		/*
1006 		 * Max IO Size is Page Size * the following:
1007 		 * ((SGEs per frame - 1 for chain element) *
1008 		 * Max Chain Depth) + 1 for no chain needed in last frame
1009 		 *
1010 		 * If user suggests a Max IO size to use, use the smaller of the
1011 		 * user's value and the calculated value as long as the user's
1012 		 * value is larger than 0. The user's value is in pages.
1013 		 */
1014 		sges_per_frame = (sc->chain_frame_size /
1015 		    sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1016 		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1017 		cpi->maxio *= PAGE_SIZE;
1018 		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1019 		    cpi->maxio))
1020 			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1021 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1022 		break;
1023 	}
1024 	case XPT_GET_TRAN_SETTINGS:
1025 	{
1026 		struct ccb_trans_settings	*cts;
1027 		struct ccb_trans_settings_sas	*sas;
1028 		struct ccb_trans_settings_scsi	*scsi;
1029 		struct mprsas_target *targ;
1030 
1031 		cts = &ccb->cts;
1032 		sas = &cts->xport_specific.sas;
1033 		scsi = &cts->proto_specific.scsi;
1034 
1035 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1036 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1037 		    cts->ccb_h.target_id));
1038 		targ = &sassc->targets[cts->ccb_h.target_id];
1039 		if (targ->handle == 0x0) {
1040 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1041 			break;
1042 		}
1043 
1044 		cts->protocol_version = SCSI_REV_SPC2;
1045 		cts->transport = XPORT_SAS;
1046 		cts->transport_version = 0;
1047 
1048 		sas->valid = CTS_SAS_VALID_SPEED;
1049 		switch (targ->linkrate) {
1050 		case 0x08:
1051 			sas->bitrate = 150000;
1052 			break;
1053 		case 0x09:
1054 			sas->bitrate = 300000;
1055 			break;
1056 		case 0x0a:
1057 			sas->bitrate = 600000;
1058 			break;
1059 		case 0x0b:
1060 			sas->bitrate = 1200000;
1061 			break;
1062 		default:
1063 			sas->valid = 0;
1064 		}
1065 
1066 		cts->protocol = PROTO_SCSI;
1067 		scsi->valid = CTS_SCSI_VALID_TQ;
1068 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1069 
1070 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1071 		break;
1072 	}
1073 	case XPT_CALC_GEOMETRY:
1074 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1075 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1076 		break;
1077 	case XPT_RESET_DEV:
1078 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1079 		    "XPT_RESET_DEV\n");
1080 		mprsas_action_resetdev(sassc, ccb);
1081 		return;
1082 	case XPT_RESET_BUS:
1083 	case XPT_ABORT:
1084 	case XPT_TERM_IO:
1085 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1086 		    "for abort or reset\n");
1087 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1088 		break;
1089 	case XPT_SCSI_IO:
1090 		mprsas_action_scsiio(sassc, ccb);
1091 		return;
1092 #if __FreeBSD_version >= 900026
1093 	case XPT_SMP_IO:
1094 		mprsas_action_smpio(sassc, ccb);
1095 		return;
1096 #endif
1097 	default:
1098 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1099 		break;
1100 	}
1101 	xpt_done(ccb);
1102 
1103 }
1104 
1105 static void
1106 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1107     target_id_t target_id, lun_id_t lun_id)
1108 {
1109 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1110 	struct cam_path *path;
1111 
1112 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1113 	    ac_code, target_id, (uintmax_t)lun_id);
1114 
1115 	if (xpt_create_path(&path, NULL,
1116 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1117 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1118 		    "notification\n");
1119 		return;
1120 	}
1121 
1122 	xpt_async(ac_code, path, NULL);
1123 	xpt_free_path(path);
1124 }
1125 
1126 static void
1127 mprsas_complete_all_commands(struct mpr_softc *sc)
1128 {
1129 	struct mpr_command *cm;
1130 	int i;
1131 	int completed;
1132 
1133 	MPR_FUNCTRACE(sc);
1134 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1135 
1136 	/* complete all commands with a NULL reply */
1137 	for (i = 1; i < sc->num_reqs; i++) {
1138 		cm = &sc->commands[i];
1139 		cm->cm_reply = NULL;
1140 		completed = 0;
1141 
1142 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1143 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1144 
1145 		if (cm->cm_complete != NULL) {
1146 			mprsas_log_command(cm, MPR_RECOVERY,
1147 			    "completing cm %p state %x ccb %p for diag reset\n",
1148 			    cm, cm->cm_state, cm->cm_ccb);
1149 			cm->cm_complete(sc, cm);
1150 			completed = 1;
1151 		}
1152 
1153 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1154 			mprsas_log_command(cm, MPR_RECOVERY,
1155 			    "waking up cm %p state %x ccb %p for diag reset\n",
1156 			    cm, cm->cm_state, cm->cm_ccb);
1157 			wakeup(cm);
1158 			completed = 1;
1159 		}
1160 
1161 		if (cm->cm_sc->io_cmds_active != 0) {
1162 			cm->cm_sc->io_cmds_active--;
1163 		} else {
1164 			mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: "
1165 			    "io_cmds_active is out of sync - resynching to "
1166 			    "0\n");
1167 		}
1168 
1169 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1170 			/* this should never happen, but if it does, log */
1171 			mprsas_log_command(cm, MPR_RECOVERY,
1172 			    "cm %p state %x flags 0x%x ccb %p during diag "
1173 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1174 			    cm->cm_ccb);
1175 		}
1176 	}
1177 }
1178 
1179 void
1180 mprsas_handle_reinit(struct mpr_softc *sc)
1181 {
1182 	int i;
1183 
1184 	/* Go back into startup mode and freeze the simq, so that CAM
1185 	 * doesn't send any commands until after we've rediscovered all
1186 	 * targets and found the proper device handles for them.
1187 	 *
1188 	 * After the reset, portenable will trigger discovery, and after all
1189 	 * discovery-related activities have finished, the simq will be
1190 	 * released.
1191 	 */
1192 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1193 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1194 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1195 	mprsas_startup_increment(sc->sassc);
1196 
1197 	/* notify CAM of a bus reset */
1198 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1199 	    CAM_LUN_WILDCARD);
1200 
1201 	/* complete and cleanup after all outstanding commands */
1202 	mprsas_complete_all_commands(sc);
1203 
1204 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1205 	    __func__, sc->sassc->startup_refcount);
1206 
1207 	/* zero all the target handles, since they may change after the
1208 	 * reset, and we have to rediscover all the targets and use the new
1209 	 * handles.
1210 	 */
1211 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1212 		if (sc->sassc->targets[i].outstanding != 0)
1213 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1214 			    i, sc->sassc->targets[i].outstanding);
1215 		sc->sassc->targets[i].handle = 0x0;
1216 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1217 		sc->sassc->targets[i].outstanding = 0;
1218 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1219 	}
1220 }
1221 static void
1222 mprsas_tm_timeout(void *data)
1223 {
1224 	struct mpr_command *tm = data;
1225 	struct mpr_softc *sc = tm->cm_sc;
1226 
1227 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1228 
1229 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1230 	    "out\n", tm);
1231 	mpr_reinit(sc);
1232 }
1233 
1234 static void
1235 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1236 {
1237 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1238 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1239 	unsigned int cm_count = 0;
1240 	struct mpr_command *cm;
1241 	struct mprsas_target *targ;
1242 
1243 	callout_stop(&tm->cm_callout);
1244 
1245 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1246 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1247 	targ = tm->cm_targ;
1248 
1249 	/*
1250 	 * Currently there should be no way we can hit this case.  It only
1251 	 * happens when we have a failure to allocate chain frames, and
1252 	 * task management commands don't have S/G lists.
1253 	 */
1254 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1255 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1256 		    "This should not happen!\n", __func__, tm->cm_flags);
1257 		mprsas_free_tm(sc, tm);
1258 		return;
1259 	}
1260 
1261 	if (reply == NULL) {
1262 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1263 		    "%p\n", tm);
1264 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1265 			/* this completion was due to a reset, just cleanup */
1266 			targ->tm = NULL;
1267 			mprsas_free_tm(sc, tm);
1268 		}
1269 		else {
1270 			/* we should have gotten a reply. */
1271 			mpr_reinit(sc);
1272 		}
1273 		return;
1274 	}
1275 
1276 	mprsas_log_command(tm, MPR_RECOVERY,
1277 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1278 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1279 	    le32toh(reply->TerminationCount));
1280 
1281 	/* See if there are any outstanding commands for this LUN.
1282 	 * This could be made more efficient by using a per-LU data
1283 	 * structure of some sort.
1284 	 */
1285 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1286 		if (cm->cm_lun == tm->cm_lun)
1287 			cm_count++;
1288 	}
1289 
1290 	if (cm_count == 0) {
1291 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1292 		    "logical unit %u finished recovery after reset\n",
1293 		    tm->cm_lun, tm);
1294 
1295 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1296 		    tm->cm_lun);
1297 
1298 		/* we've finished recovery for this logical unit.  check and
1299 		 * see if some other logical unit has a timedout command
1300 		 * that needs to be processed.
1301 		 */
1302 		cm = TAILQ_FIRST(&targ->timedout_commands);
1303 		if (cm) {
1304 			mprsas_send_abort(sc, tm, cm);
1305 		}
1306 		else {
1307 			targ->tm = NULL;
1308 			mprsas_free_tm(sc, tm);
1309 		}
1310 	}
1311 	else {
1312 		/* if we still have commands for this LUN, the reset
1313 		 * effectively failed, regardless of the status reported.
1314 		 * Escalate to a target reset.
1315 		 */
1316 		mprsas_log_command(tm, MPR_RECOVERY,
1317 		    "logical unit reset complete for tm %p, but still have %u "
1318 		    "command(s)\n", tm, cm_count);
1319 		mprsas_send_reset(sc, tm,
1320 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1321 	}
1322 }
1323 
1324 static void
1325 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1326 {
1327 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1328 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1329 	struct mprsas_target *targ;
1330 
1331 	callout_stop(&tm->cm_callout);
1332 
1333 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1334 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1335 	targ = tm->cm_targ;
1336 
1337 	/*
1338 	 * Currently there should be no way we can hit this case.  It only
1339 	 * happens when we have a failure to allocate chain frames, and
1340 	 * task management commands don't have S/G lists.
1341 	 */
1342 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1343 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1344 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1345 		mprsas_free_tm(sc, tm);
1346 		return;
1347 	}
1348 
1349 	if (reply == NULL) {
1350 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1351 		    "%p\n", tm);
1352 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1353 			/* this completion was due to a reset, just cleanup */
1354 			targ->tm = NULL;
1355 			mprsas_free_tm(sc, tm);
1356 		}
1357 		else {
1358 			/* we should have gotten a reply. */
1359 			mpr_reinit(sc);
1360 		}
1361 		return;
1362 	}
1363 
1364 	mprsas_log_command(tm, MPR_RECOVERY,
1365 	    "target reset status 0x%x code 0x%x count %u\n",
1366 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1367 	    le32toh(reply->TerminationCount));
1368 
1369 	if (targ->outstanding == 0) {
1370 		/* we've finished recovery for this target and all
1371 		 * of its logical units.
1372 		 */
1373 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1374 		    "recovery finished after target reset\n");
1375 
1376 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1377 		    CAM_LUN_WILDCARD);
1378 
1379 		targ->tm = NULL;
1380 		mprsas_free_tm(sc, tm);
1381 	}
1382 	else {
1383 		/* after a target reset, if this target still has
1384 		 * outstanding commands, the reset effectively failed,
1385 		 * regardless of the status reported.  escalate.
1386 		 */
1387 		mprsas_log_command(tm, MPR_RECOVERY,
1388 		    "target reset complete for tm %p, but still have %u "
1389 		    "command(s)\n", tm, targ->outstanding);
1390 		mpr_reinit(sc);
1391 	}
1392 }
1393 
1394 #define MPR_RESET_TIMEOUT 30
1395 
1396 int
1397 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1398 {
1399 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1400 	struct mprsas_target *target;
1401 	int err;
1402 
1403 	target = tm->cm_targ;
1404 	if (target->handle == 0) {
1405 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1406 		    "%d\n", __func__, target->tid);
1407 		return -1;
1408 	}
1409 
1410 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1411 	req->DevHandle = htole16(target->handle);
1412 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1413 	req->TaskType = type;
1414 
1415 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1416 		/* XXX Need to handle invalid LUNs */
1417 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1418 		tm->cm_targ->logical_unit_resets++;
1419 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1420 		    "sending logical unit reset\n");
1421 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1422 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1423 	}
1424 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1425 		/*
1426 		 * Target reset method =
1427 		 *     SAS Hard Link Reset / SATA Link Reset
1428 		 */
1429 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1430 		tm->cm_targ->target_resets++;
1431 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1432 		    "sending target reset\n");
1433 		tm->cm_complete = mprsas_target_reset_complete;
1434 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1435 	}
1436 	else {
1437 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1438 		return -1;
1439 	}
1440 
1441 	mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1442 	    target->handle);
1443 	if (target->encl_level_valid) {
1444 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1445 		    "connector name (%4s)\n", target->encl_level,
1446 		    target->encl_slot, target->connector_name);
1447 	}
1448 
1449 	tm->cm_data = NULL;
1450 	tm->cm_desc.HighPriority.RequestFlags =
1451 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1452 	tm->cm_complete_data = (void *)tm;
1453 
1454 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1455 	    mprsas_tm_timeout, tm);
1456 
1457 	err = mpr_map_command(sc, tm);
1458 	if (err)
1459 		mprsas_log_command(tm, MPR_RECOVERY,
1460 		    "error %d sending reset type %u\n", err, type);
1461 
1462 	return err;
1463 }
1464 
1465 
1466 static void
1467 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1468 {
1469 	struct mpr_command *cm;
1470 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1471 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1472 	struct mprsas_target *targ;
1473 
1474 	callout_stop(&tm->cm_callout);
1475 
1476 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1477 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1478 	targ = tm->cm_targ;
1479 
1480 	/*
1481 	 * Currently there should be no way we can hit this case.  It only
1482 	 * happens when we have a failure to allocate chain frames, and
1483 	 * task management commands don't have S/G lists.
1484 	 */
1485 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1486 		mprsas_log_command(tm, MPR_RECOVERY,
1487 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1488 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1489 		mprsas_free_tm(sc, tm);
1490 		return;
1491 	}
1492 
1493 	if (reply == NULL) {
1494 		mprsas_log_command(tm, MPR_RECOVERY,
1495 		    "NULL abort reply for tm %p TaskMID %u\n",
1496 		    tm, le16toh(req->TaskMID));
1497 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1498 			/* this completion was due to a reset, just cleanup */
1499 			targ->tm = NULL;
1500 			mprsas_free_tm(sc, tm);
1501 		}
1502 		else {
1503 			/* we should have gotten a reply. */
1504 			mpr_reinit(sc);
1505 		}
1506 		return;
1507 	}
1508 
1509 	mprsas_log_command(tm, MPR_RECOVERY,
1510 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1511 	    le16toh(req->TaskMID),
1512 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1513 	    le32toh(reply->TerminationCount));
1514 
1515 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1516 	if (cm == NULL) {
1517 		/* if there are no more timedout commands, we're done with
1518 		 * error recovery for this target.
1519 		 */
1520 		mprsas_log_command(tm, MPR_RECOVERY,
1521 		    "finished recovery after aborting TaskMID %u\n",
1522 		    le16toh(req->TaskMID));
1523 
1524 		targ->tm = NULL;
1525 		mprsas_free_tm(sc, tm);
1526 	}
1527 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1528 		/* abort success, but we have more timedout commands to abort */
1529 		mprsas_log_command(tm, MPR_RECOVERY,
1530 		    "continuing recovery after aborting TaskMID %u\n",
1531 		    le16toh(req->TaskMID));
1532 
1533 		mprsas_send_abort(sc, tm, cm);
1534 	}
1535 	else {
1536 		/* we didn't get a command completion, so the abort
1537 		 * failed as far as we're concerned.  escalate.
1538 		 */
1539 		mprsas_log_command(tm, MPR_RECOVERY,
1540 		    "abort failed for TaskMID %u tm %p\n",
1541 		    le16toh(req->TaskMID), tm);
1542 
1543 		mprsas_send_reset(sc, tm,
1544 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1545 	}
1546 }
1547 
1548 #define MPR_ABORT_TIMEOUT 5
1549 
1550 static int
1551 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1552     struct mpr_command *cm)
1553 {
1554 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1555 	struct mprsas_target *targ;
1556 	int err;
1557 
1558 	targ = cm->cm_targ;
1559 	if (targ->handle == 0) {
1560 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1561 		    __func__, cm->cm_ccb->ccb_h.target_id);
1562 		return -1;
1563 	}
1564 
1565 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1566 	    "Aborting command %p\n", cm);
1567 
1568 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1569 	req->DevHandle = htole16(targ->handle);
1570 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1571 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1572 
1573 	/* XXX Need to handle invalid LUNs */
1574 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1575 
1576 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1577 
1578 	tm->cm_data = NULL;
1579 	tm->cm_desc.HighPriority.RequestFlags =
1580 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1581 	tm->cm_complete = mprsas_abort_complete;
1582 	tm->cm_complete_data = (void *)tm;
1583 	tm->cm_targ = cm->cm_targ;
1584 	tm->cm_lun = cm->cm_lun;
1585 
1586 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1587 	    mprsas_tm_timeout, tm);
1588 
1589 	targ->aborts++;
1590 
1591 	mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1592 	    __func__, targ->tid);
1593 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1594 
1595 	err = mpr_map_command(sc, tm);
1596 	if (err)
1597 		mpr_dprint(sc, MPR_RECOVERY,
1598 		    "error %d sending abort for cm %p SMID %u\n",
1599 		    err, cm, req->TaskMID);
1600 	return err;
1601 }
1602 
1603 static void
1604 mprsas_scsiio_timeout(void *data)
1605 {
1606 	struct mpr_softc *sc;
1607 	struct mpr_command *cm;
1608 	struct mprsas_target *targ;
1609 
1610 	cm = (struct mpr_command *)data;
1611 	sc = cm->cm_sc;
1612 
1613 	MPR_FUNCTRACE(sc);
1614 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1615 
1616 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1617 
1618 	/*
1619 	 * Run the interrupt handler to make sure it's not pending.  This
1620 	 * isn't perfect because the command could have already completed
1621 	 * and been re-used, though this is unlikely.
1622 	 */
1623 	mpr_intr_locked(sc);
1624 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1625 		mprsas_log_command(cm, MPR_XINFO,
1626 		    "SCSI command %p almost timed out\n", cm);
1627 		return;
1628 	}
1629 
1630 	if (cm->cm_ccb == NULL) {
1631 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1632 		return;
1633 	}
1634 
1635 	targ = cm->cm_targ;
1636 	targ->timeouts++;
1637 
1638 	mprsas_log_command(cm, MPR_ERROR, "command timeout %d cm %p target "
1639 	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm,  targ->tid,
1640 	    targ->handle);
1641 	if (targ->encl_level_valid) {
1642 		mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1643 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1644 		    targ->connector_name);
1645 	}
1646 
1647 	/* XXX first, check the firmware state, to see if it's still
1648 	 * operational.  if not, do a diag reset.
1649 	 */
1650 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1651 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1652 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1653 
1654 	if (targ->tm != NULL) {
1655 		/* target already in recovery, just queue up another
1656 		 * timedout command to be processed later.
1657 		 */
1658 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1659 		    "processing by tm %p\n", cm, targ->tm);
1660 	}
1661 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1662 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1663 		    cm, targ->tm);
1664 
1665 		/* start recovery by aborting the first timedout command */
1666 		mprsas_send_abort(sc, targ->tm, cm);
1667 	}
1668 	else {
1669 		/* XXX queue this target up for recovery once a TM becomes
1670 		 * available.  The firmware only has a limited number of
1671 		 * HighPriority credits for the high priority requests used
1672 		 * for task management, and we ran out.
1673 		 *
1674 		 * Isilon: don't worry about this for now, since we have
1675 		 * more credits than disks in an enclosure, and limit
1676 		 * ourselves to one TM per target for recovery.
1677 		 */
1678 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to "
1679 		    "allocate a tm\n", cm);
1680 	}
1681 }
1682 
1683 static void
1684 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1685 {
1686 	MPI2_SCSI_IO_REQUEST *req;
1687 	struct ccb_scsiio *csio;
1688 	struct mpr_softc *sc;
1689 	struct mprsas_target *targ;
1690 	struct mprsas_lun *lun;
1691 	struct mpr_command *cm;
1692 	uint8_t i, lba_byte, *ref_tag_addr;
1693 	uint16_t eedp_flags;
1694 	uint32_t mpi_control;
1695 
1696 	sc = sassc->sc;
1697 	MPR_FUNCTRACE(sc);
1698 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1699 
1700 	csio = &ccb->csio;
1701 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1702 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1703 	     csio->ccb_h.target_id));
1704 	targ = &sassc->targets[csio->ccb_h.target_id];
1705 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1706 	if (targ->handle == 0x0) {
1707 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1708 		    __func__, csio->ccb_h.target_id);
1709 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1710 		xpt_done(ccb);
1711 		return;
1712 	}
1713 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1714 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1715 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1716 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1717 		xpt_done(ccb);
1718 		return;
1719 	}
1720 	/*
1721 	 * Sometimes, it is possible to get a command that is not "In
1722 	 * Progress" and was actually aborted by the upper layer.  Check for
1723 	 * this here and complete the command without error.
1724 	 */
1725 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1726 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1727 		    "target %u\n", __func__, csio->ccb_h.target_id);
1728 		xpt_done(ccb);
1729 		return;
1730 	}
1731 	/*
1732 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1733 	 * that the volume has timed out.  We want volumes to be enumerated
1734 	 * until they are deleted/removed, not just failed.
1735 	 */
1736 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1737 		if (targ->devinfo == 0)
1738 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1739 		else
1740 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1741 		xpt_done(ccb);
1742 		return;
1743 	}
1744 
1745 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1746 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1747 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1748 		xpt_done(ccb);
1749 		return;
1750 	}
1751 
1752 	/*
1753 	 * If target has a reset in progress, freeze the devq and return.  The
1754 	 * devq will be released when the TM reset is finished.
1755 	 */
1756 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1757 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1758 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1759 		    __func__, targ->tid);
1760 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1761 		xpt_done(ccb);
1762 		return;
1763 	}
1764 
1765 	cm = mpr_alloc_command(sc);
1766 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1767 		if (cm != NULL) {
1768 			mpr_free_command(sc, cm);
1769 		}
1770 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1771 			xpt_freeze_simq(sassc->sim, 1);
1772 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1773 		}
1774 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1775 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1776 		xpt_done(ccb);
1777 		return;
1778 	}
1779 
1780 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1781 	bzero(req, sizeof(*req));
1782 	req->DevHandle = htole16(targ->handle);
1783 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1784 	req->MsgFlags = 0;
1785 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1786 	req->SenseBufferLength = MPR_SENSE_LEN;
1787 	req->SGLFlags = 0;
1788 	req->ChainOffset = 0;
1789 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1790 	req->SGLOffset1= 0;
1791 	req->SGLOffset2= 0;
1792 	req->SGLOffset3= 0;
1793 	req->SkipCount = 0;
1794 	req->DataLength = htole32(csio->dxfer_len);
1795 	req->BidirectionalDataLength = 0;
1796 	req->IoFlags = htole16(csio->cdb_len);
1797 	req->EEDPFlags = 0;
1798 
1799 	/* Note: BiDirectional transfers are not supported */
1800 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1801 	case CAM_DIR_IN:
1802 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1803 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1804 		break;
1805 	case CAM_DIR_OUT:
1806 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1807 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1808 		break;
1809 	case CAM_DIR_NONE:
1810 	default:
1811 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1812 		break;
1813 	}
1814 
1815 	if (csio->cdb_len == 32)
1816 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1817 	/*
1818 	 * It looks like the hardware doesn't require an explicit tag
1819 	 * number for each transaction.  SAM Task Management not supported
1820 	 * at the moment.
1821 	 */
1822 	switch (csio->tag_action) {
1823 	case MSG_HEAD_OF_Q_TAG:
1824 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1825 		break;
1826 	case MSG_ORDERED_Q_TAG:
1827 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1828 		break;
1829 	case MSG_ACA_TASK:
1830 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1831 		break;
1832 	case CAM_TAG_ACTION_NONE:
1833 	case MSG_SIMPLE_Q_TAG:
1834 	default:
1835 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1836 		break;
1837 	}
1838 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1839 	req->Control = htole32(mpi_control);
1840 
1841 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1842 		mpr_free_command(sc, cm);
1843 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1844 		xpt_done(ccb);
1845 		return;
1846 	}
1847 
1848 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1849 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1850 	else {
1851 		KASSERT(csio->cdb_len <= IOCDBLEN,
1852 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER is not set",
1853 		     csio->cdb_len));
1854 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1855 	}
1856 	req->IoFlags = htole16(csio->cdb_len);
1857 
1858 	/*
1859 	 * Check if EEDP is supported and enabled.  If it is then check if the
1860 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1861 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1862 	 * for EEDP transfer.
1863 	 */
1864 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1865 	if (sc->eedp_enabled && eedp_flags) {
1866 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1867 			if (lun->lun_id == csio->ccb_h.target_lun) {
1868 				break;
1869 			}
1870 		}
1871 
1872 		if ((lun != NULL) && (lun->eedp_formatted)) {
1873 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1874 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1875 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1876 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1877 			req->EEDPFlags = htole16(eedp_flags);
1878 
1879 			/*
1880 			 * If CDB less than 32, fill in Primary Ref Tag with
1881 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1882 			 * already there.  Also, set protection bit.  FreeBSD
1883 			 * currently does not support CDBs bigger than 16, but
1884 			 * the code doesn't hurt, and will be here for the
1885 			 * future.
1886 			 */
1887 			if (csio->cdb_len != 32) {
1888 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1889 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1890 				    PrimaryReferenceTag;
1891 				for (i = 0; i < 4; i++) {
1892 					*ref_tag_addr =
1893 					    req->CDB.CDB32[lba_byte + i];
1894 					ref_tag_addr++;
1895 				}
1896 				req->CDB.EEDP32.PrimaryReferenceTag =
1897 				    htole32(req->
1898 				    CDB.EEDP32.PrimaryReferenceTag);
1899 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1900 				    0xFFFF;
1901 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1902 				    0x20;
1903 			} else {
1904 				eedp_flags |=
1905 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1906 				req->EEDPFlags = htole16(eedp_flags);
1907 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1908 				    0x1F) | 0x20;
1909 			}
1910 		}
1911 	}
1912 
1913 	cm->cm_length = csio->dxfer_len;
1914 	if (cm->cm_length != 0) {
1915 		cm->cm_data = ccb;
1916 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1917 	} else {
1918 		cm->cm_data = NULL;
1919 	}
1920 	cm->cm_sge = &req->SGL;
1921 	cm->cm_sglsize = (32 - 24) * 4;
1922 	cm->cm_complete = mprsas_scsiio_complete;
1923 	cm->cm_complete_data = ccb;
1924 	cm->cm_targ = targ;
1925 	cm->cm_lun = csio->ccb_h.target_lun;
1926 	cm->cm_ccb = ccb;
1927 	/*
1928 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1929 	 * and set descriptor type.
1930 	 */
1931 	if (targ->scsi_req_desc_type ==
1932 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1933 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1934 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
1935 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1936 		cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1937 	} else {
1938 		cm->cm_desc.SCSIIO.RequestFlags =
1939 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1940 		cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1941 	}
1942 
1943 #if __FreeBSD_version >= 1000029
1944 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1945 	    mprsas_scsiio_timeout, cm, 0);
1946 #else //__FreeBSD_version < 1000029
1947 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1948 	    mprsas_scsiio_timeout, cm);
1949 #endif //__FreeBSD_version >= 1000029
1950 
1951 	targ->issued++;
1952 	targ->outstanding++;
1953 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1954 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1955 
1956 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1957 	    __func__, cm, ccb, targ->outstanding);
1958 
1959 	mpr_map_command(sc, cm);
1960 	return;
1961 }
1962 
1963 static void
1964 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1965 {
1966         char *desc;
1967 
1968         switch (response_code) {
1969         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1970                 desc = "task management request completed";
1971                 break;
1972         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1973                 desc = "invalid frame";
1974                 break;
1975         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1976                 desc = "task management request not supported";
1977                 break;
1978         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1979                 desc = "task management request failed";
1980                 break;
1981         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1982                 desc = "task management request succeeded";
1983                 break;
1984         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1985                 desc = "invalid lun";
1986                 break;
1987         case 0xA:
1988                 desc = "overlapped tag attempted";
1989                 break;
1990         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1991                 desc = "task queued, however not sent to target";
1992                 break;
1993         default:
1994                 desc = "unknown";
1995                 break;
1996         }
1997 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1998 	    desc);
1999 }
2000 
2001 /**
2002  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2003  */
2004 static void
2005 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2006     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2007 {
2008 	u32 response_info;
2009 	u8 *response_bytes;
2010 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2011 	    MPI2_IOCSTATUS_MASK;
2012 	u8 scsi_state = mpi_reply->SCSIState;
2013 	u8 scsi_status = mpi_reply->SCSIStatus;
2014 	char *desc_ioc_state = NULL;
2015 	char *desc_scsi_status = NULL;
2016 	char *desc_scsi_state = sc->tmp_string;
2017 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2018 
2019 	if (log_info == 0x31170000)
2020 		return;
2021 
2022 	switch (ioc_status) {
2023 	case MPI2_IOCSTATUS_SUCCESS:
2024 		desc_ioc_state = "success";
2025 		break;
2026 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2027 		desc_ioc_state = "invalid function";
2028 		break;
2029 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2030 		desc_ioc_state = "scsi recovered error";
2031 		break;
2032 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2033 		desc_ioc_state = "scsi invalid dev handle";
2034 		break;
2035 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2036 		desc_ioc_state = "scsi device not there";
2037 		break;
2038 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2039 		desc_ioc_state = "scsi data overrun";
2040 		break;
2041 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2042 		desc_ioc_state = "scsi data underrun";
2043 		break;
2044 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2045 		desc_ioc_state = "scsi io data error";
2046 		break;
2047 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2048 		desc_ioc_state = "scsi protocol error";
2049 		break;
2050 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2051 		desc_ioc_state = "scsi task terminated";
2052 		break;
2053 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2054 		desc_ioc_state = "scsi residual mismatch";
2055 		break;
2056 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2057 		desc_ioc_state = "scsi task mgmt failed";
2058 		break;
2059 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2060 		desc_ioc_state = "scsi ioc terminated";
2061 		break;
2062 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2063 		desc_ioc_state = "scsi ext terminated";
2064 		break;
2065 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2066 		desc_ioc_state = "eedp guard error";
2067 		break;
2068 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2069 		desc_ioc_state = "eedp ref tag error";
2070 		break;
2071 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2072 		desc_ioc_state = "eedp app tag error";
2073 		break;
2074 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
2075 		desc_ioc_state = "insufficient power";
2076 		break;
2077 	default:
2078 		desc_ioc_state = "unknown";
2079 		break;
2080 	}
2081 
2082 	switch (scsi_status) {
2083 	case MPI2_SCSI_STATUS_GOOD:
2084 		desc_scsi_status = "good";
2085 		break;
2086 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2087 		desc_scsi_status = "check condition";
2088 		break;
2089 	case MPI2_SCSI_STATUS_CONDITION_MET:
2090 		desc_scsi_status = "condition met";
2091 		break;
2092 	case MPI2_SCSI_STATUS_BUSY:
2093 		desc_scsi_status = "busy";
2094 		break;
2095 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2096 		desc_scsi_status = "intermediate";
2097 		break;
2098 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2099 		desc_scsi_status = "intermediate condmet";
2100 		break;
2101 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2102 		desc_scsi_status = "reservation conflict";
2103 		break;
2104 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2105 		desc_scsi_status = "command terminated";
2106 		break;
2107 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2108 		desc_scsi_status = "task set full";
2109 		break;
2110 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2111 		desc_scsi_status = "aca active";
2112 		break;
2113 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2114 		desc_scsi_status = "task aborted";
2115 		break;
2116 	default:
2117 		desc_scsi_status = "unknown";
2118 		break;
2119 	}
2120 
2121 	desc_scsi_state[0] = '\0';
2122 	if (!scsi_state)
2123 		desc_scsi_state = " ";
2124 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2125 		strcat(desc_scsi_state, "response info ");
2126 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2127 		strcat(desc_scsi_state, "state terminated ");
2128 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2129 		strcat(desc_scsi_state, "no status ");
2130 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2131 		strcat(desc_scsi_state, "autosense failed ");
2132 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2133 		strcat(desc_scsi_state, "autosense valid ");
2134 
2135 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2136 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2137 	if (targ->encl_level_valid) {
2138 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2139 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2140 		    targ->connector_name);
2141 	}
2142 	/* We can add more detail about underflow data here
2143 	 * TO-DO
2144 	 * */
2145 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2146 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2147 	    desc_scsi_state, scsi_state);
2148 
2149 	if (sc->mpr_debug & MPR_XINFO &&
2150 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2151 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2152 		scsi_sense_print(csio);
2153 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2154 	}
2155 
2156 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2157 		response_info = le32toh(mpi_reply->ResponseInfo);
2158 		response_bytes = (u8 *)&response_info;
2159 		mpr_response_code(sc,response_bytes[0]);
2160 	}
2161 }
2162 
2163 static void
2164 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2165 {
2166 	MPI2_SCSI_IO_REPLY *rep;
2167 	union ccb *ccb;
2168 	struct ccb_scsiio *csio;
2169 	struct mprsas_softc *sassc;
2170 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2171 	u8 *TLR_bits, TLR_on;
2172 	int dir = 0, i;
2173 	u16 alloc_len;
2174 	struct mprsas_target *target;
2175 	target_id_t target_id;
2176 
2177 	MPR_FUNCTRACE(sc);
2178 	mpr_dprint(sc, MPR_TRACE,
2179 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2180 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2181 	    cm->cm_targ->outstanding);
2182 
2183 	callout_stop(&cm->cm_callout);
2184 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2185 
2186 	sassc = sc->sassc;
2187 	ccb = cm->cm_complete_data;
2188 	csio = &ccb->csio;
2189 	target_id = csio->ccb_h.target_id;
2190 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2191 	/*
2192 	 * XXX KDM if the chain allocation fails, does it matter if we do
2193 	 * the sync and unload here?  It is simpler to do it in every case,
2194 	 * assuming it doesn't cause problems.
2195 	 */
2196 	if (cm->cm_data != NULL) {
2197 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2198 			dir = BUS_DMASYNC_POSTREAD;
2199 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2200 			dir = BUS_DMASYNC_POSTWRITE;
2201 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2202 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2203 	}
2204 
2205 	cm->cm_targ->completed++;
2206 	cm->cm_targ->outstanding--;
2207 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2208 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2209 
2210 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2211 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2212 		if (cm->cm_reply != NULL)
2213 			mprsas_log_command(cm, MPR_RECOVERY,
2214 			    "completed timedout cm %p ccb %p during recovery "
2215 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2216 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2217 			    rep->SCSIState, le32toh(rep->TransferCount));
2218 		else
2219 			mprsas_log_command(cm, MPR_RECOVERY,
2220 			    "completed timedout cm %p ccb %p during recovery\n",
2221 			    cm, cm->cm_ccb);
2222 	} else if (cm->cm_targ->tm != NULL) {
2223 		if (cm->cm_reply != NULL)
2224 			mprsas_log_command(cm, MPR_RECOVERY,
2225 			    "completed cm %p ccb %p during recovery "
2226 			    "ioc %x scsi %x state %x xfer %u\n",
2227 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2228 			    rep->SCSIStatus, rep->SCSIState,
2229 			    le32toh(rep->TransferCount));
2230 		else
2231 			mprsas_log_command(cm, MPR_RECOVERY,
2232 			    "completed cm %p ccb %p during recovery\n",
2233 			    cm, cm->cm_ccb);
2234 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2235 		mprsas_log_command(cm, MPR_RECOVERY,
2236 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2237 	}
2238 
2239 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2240 		/*
2241 		 * We ran into an error after we tried to map the command,
2242 		 * so we're getting a callback without queueing the command
2243 		 * to the hardware.  So we set the status here, and it will
2244 		 * be retained below.  We'll go through the "fast path",
2245 		 * because there can be no reply when we haven't actually
2246 		 * gone out to the hardware.
2247 		 */
2248 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2249 
2250 		/*
2251 		 * Currently the only error included in the mask is
2252 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2253 		 * chain frames.  We need to freeze the queue until we get
2254 		 * a command that completed without this error, which will
2255 		 * hopefully have some chain frames attached that we can
2256 		 * use.  If we wanted to get smarter about it, we would
2257 		 * only unfreeze the queue in this condition when we're
2258 		 * sure that we're getting some chain frames back.  That's
2259 		 * probably unnecessary.
2260 		 */
2261 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2262 			xpt_freeze_simq(sassc->sim, 1);
2263 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2264 			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2265 			    "freezing SIM queue\n");
2266 		}
2267 	}
2268 
2269 	/*
2270 	 * If this is a Start Stop Unit command and it was issued by the driver
2271 	 * during shutdown, decrement the refcount to account for all of the
2272 	 * commands that were sent.  All SSU commands should be completed before
2273 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2274 	 * is TRUE.
2275 	 */
2276 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2277 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2278 		sc->SSU_refcount--;
2279 	}
2280 
2281 	/* Take the fast path to completion */
2282 	if (cm->cm_reply == NULL) {
2283 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2284 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2285 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2286 			else {
2287 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2288 				csio->scsi_status = SCSI_STATUS_OK;
2289 			}
2290 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2291 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2292 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2293 				mpr_dprint(sc, MPR_XINFO,
2294 				    "Unfreezing SIM queue\n");
2295 			}
2296 		}
2297 
2298 		/*
2299 		 * There are two scenarios where the status won't be
2300 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2301 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2302 		 */
2303 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2304 			/*
2305 			 * Freeze the dev queue so that commands are
2306 			 * executed in the correct order after error
2307 			 * recovery.
2308 			 */
2309 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2310 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2311 		}
2312 		mpr_free_command(sc, cm);
2313 		xpt_done(ccb);
2314 		return;
2315 	}
2316 
2317 	mprsas_log_command(cm, MPR_XINFO,
2318 	    "ioc %x scsi %x state %x xfer %u\n",
2319 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2320 	    le32toh(rep->TransferCount));
2321 
2322 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2323 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2324 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2325 		/* FALLTHROUGH */
2326 	case MPI2_IOCSTATUS_SUCCESS:
2327 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2328 
2329 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2330 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2331 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2332 
2333 		/* Completion failed at the transport level. */
2334 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2335 		    MPI2_SCSI_STATE_TERMINATED)) {
2336 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2337 			break;
2338 		}
2339 
2340 		/* In a modern packetized environment, an autosense failure
2341 		 * implies that there's not much else that can be done to
2342 		 * recover the command.
2343 		 */
2344 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2345 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2346 			break;
2347 		}
2348 
2349 		/*
2350 		 * CAM doesn't care about SAS Response Info data, but if this is
2351 		 * the state check if TLR should be done.  If not, clear the
2352 		 * TLR_bits for the target.
2353 		 */
2354 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2355 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2356 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2357 			sc->mapping_table[target_id].TLR_bits =
2358 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2359 		}
2360 
2361 		/*
2362 		 * Intentionally override the normal SCSI status reporting
2363 		 * for these two cases.  These are likely to happen in a
2364 		 * multi-initiator environment, and we want to make sure that
2365 		 * CAM retries these commands rather than fail them.
2366 		 */
2367 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2368 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2369 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2370 			break;
2371 		}
2372 
2373 		/* Handle normal status and sense */
2374 		csio->scsi_status = rep->SCSIStatus;
2375 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2376 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2377 		else
2378 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2379 
2380 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2381 			int sense_len, returned_sense_len;
2382 
2383 			returned_sense_len = min(le32toh(rep->SenseCount),
2384 			    sizeof(struct scsi_sense_data));
2385 			if (returned_sense_len < csio->sense_len)
2386 				csio->sense_resid = csio->sense_len -
2387 				    returned_sense_len;
2388 			else
2389 				csio->sense_resid = 0;
2390 
2391 			sense_len = min(returned_sense_len,
2392 			    csio->sense_len - csio->sense_resid);
2393 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2394 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2395 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2396 		}
2397 
2398 		/*
2399 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2400 		 * and it's page code 0 (Supported Page List), and there is
2401 		 * inquiry data, and this is for a sequential access device, and
2402 		 * the device is an SSP target, and TLR is supported by the
2403 		 * controller, turn the TLR_bits value ON if page 0x90 is
2404 		 * supported.
2405 		 */
2406 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2407 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2408 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2409 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2410 		    (csio->data_ptr != NULL) &&
2411 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2412 		    (sc->control_TLR) &&
2413 		    (sc->mapping_table[target_id].device_info &
2414 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2415 			vpd_list = (struct scsi_vpd_supported_page_list *)
2416 			    csio->data_ptr;
2417 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2418 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2419 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2420 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2421 			    csio->cdb_io.cdb_bytes[4];
2422 			alloc_len -= csio->resid;
2423 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2424 				if (vpd_list->list[i] == 0x90) {
2425 					*TLR_bits = TLR_on;
2426 					break;
2427 				}
2428 			}
2429 		}
2430 
2431 		/*
2432 		 * If this is a SATA direct-access end device, mark it so that
2433 		 * a SCSI StartStopUnit command will be sent to it when the
2434 		 * driver is being shutdown.
2435 		 */
2436 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2437 		    (csio->data_ptr != NULL) &&
2438 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2439 		    (sc->mapping_table[target_id].device_info &
2440 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2441 		    ((sc->mapping_table[target_id].device_info &
2442 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2443 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2444 			target = &sassc->targets[target_id];
2445 			target->supports_SSU = TRUE;
2446 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2447 			    target_id);
2448 		}
2449 		break;
2450 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2451 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2452 		/*
2453 		 * If devinfo is 0 this will be a volume.  In that case don't
2454 		 * tell CAM that the volume is not there.  We want volumes to
2455 		 * be enumerated until they are deleted/removed, not just
2456 		 * failed.
2457 		 */
2458 		if (cm->cm_targ->devinfo == 0)
2459 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2460 		else
2461 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2462 		break;
2463 	case MPI2_IOCSTATUS_INVALID_SGL:
2464 		mpr_print_scsiio_cmd(sc, cm);
2465 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2466 		break;
2467 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2468 		/*
2469 		 * This is one of the responses that comes back when an I/O
2470 		 * has been aborted.  If it is because of a timeout that we
2471 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2472 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2473 		 * command is the same (it gets retried, subject to the
2474 		 * retry counter), the only difference is what gets printed
2475 		 * on the console.
2476 		 */
2477 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2478 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2479 		else
2480 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2481 		break;
2482 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2483 		/* resid is ignored for this condition */
2484 		csio->resid = 0;
2485 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2486 		break;
2487 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2488 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2489 		/*
2490 		 * These can sometimes be transient transport-related
2491 		 * errors, and sometimes persistent drive-related errors.
2492 		 * We used to retry these without decrementing the retry
2493 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2494 		 * we hit a persistent drive problem that returns one of
2495 		 * these error codes, we would retry indefinitely.  So,
2496 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2497 		 * count and avoid infinite retries.  We're taking the
2498 		 * potential risk of flagging false failures in the event
2499 		 * of a topology-related error (e.g. a SAS expander problem
2500 		 * causes a command addressed to a drive to fail), but
2501 		 * avoiding getting into an infinite retry loop.
2502 		 */
2503 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2504 		mprsas_log_command(cm, MPR_INFO,
2505 		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2506 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2507 		    rep->SCSIStatus, rep->SCSIState,
2508 		    le32toh(rep->TransferCount));
2509 		break;
2510 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2511 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2512 	case MPI2_IOCSTATUS_INVALID_VPID:
2513 	case MPI2_IOCSTATUS_INVALID_FIELD:
2514 	case MPI2_IOCSTATUS_INVALID_STATE:
2515 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2516 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2517 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2518 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2519 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2520 	default:
2521 		mprsas_log_command(cm, MPR_XINFO,
2522 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2523 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2524 		    rep->SCSIStatus, rep->SCSIState,
2525 		    le32toh(rep->TransferCount));
2526 		csio->resid = cm->cm_length;
2527 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2528 		break;
2529 	}
2530 
2531 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2532 
2533 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2534 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2535 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2536 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2537 		    "queue\n");
2538 	}
2539 
2540 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2541 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2542 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2543 	}
2544 
2545 	mpr_free_command(sc, cm);
2546 	xpt_done(ccb);
2547 }
2548 
2549 #if __FreeBSD_version >= 900026
2550 static void
2551 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2552 {
2553 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2554 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2555 	uint64_t sasaddr;
2556 	union ccb *ccb;
2557 
2558 	ccb = cm->cm_complete_data;
2559 
2560 	/*
2561 	 * Currently there should be no way we can hit this case.  It only
2562 	 * happens when we have a failure to allocate chain frames, and SMP
2563 	 * commands require two S/G elements only.  That should be handled
2564 	 * in the standard request size.
2565 	 */
2566 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2568 		    "request!\n", __func__, cm->cm_flags);
2569 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2570 		goto bailout;
2571         }
2572 
2573 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2574 	if (rpl == NULL) {
2575 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2576 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2577 		goto bailout;
2578 	}
2579 
2580 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2581 	sasaddr = le32toh(req->SASAddress.Low);
2582 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2583 
2584 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2585 	    MPI2_IOCSTATUS_SUCCESS ||
2586 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2587 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2588 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2589 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2590 		goto bailout;
2591 	}
2592 
2593 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2594 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2595 
2596 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2597 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2598 	else
2599 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2600 
2601 bailout:
2602 	/*
2603 	 * We sync in both directions because we had DMAs in the S/G list
2604 	 * in both directions.
2605 	 */
2606 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2607 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2608 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2609 	mpr_free_command(sc, cm);
2610 	xpt_done(ccb);
2611 }
2612 
2613 static void
2614 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2615 {
2616 	struct mpr_command *cm;
2617 	uint8_t *request, *response;
2618 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2619 	struct mpr_softc *sc;
2620 	struct sglist *sg;
2621 	int error;
2622 
2623 	sc = sassc->sc;
2624 	sg = NULL;
2625 	error = 0;
2626 
2627 #if (__FreeBSD_version >= 1000028) || \
2628     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2629 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2630 	case CAM_DATA_PADDR:
2631 	case CAM_DATA_SG_PADDR:
2632 		/*
2633 		 * XXX We don't yet support physical addresses here.
2634 		 */
2635 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2636 		    "supported\n", __func__);
2637 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2638 		xpt_done(ccb);
2639 		return;
2640 	case CAM_DATA_SG:
2641 		/*
2642 		 * The chip does not support more than one buffer for the
2643 		 * request or response.
2644 		 */
2645 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2646 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2647 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2648 			    "response buffer segments not supported for SMP\n",
2649 			    __func__);
2650 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2651 			xpt_done(ccb);
2652 			return;
2653 		}
2654 
2655 		/*
2656 		 * The CAM_SCATTER_VALID flag was originally implemented
2657 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2658 		 * We have two.  So, just take that flag to mean that we
2659 		 * might have S/G lists, and look at the S/G segment count
2660 		 * to figure out whether that is the case for each individual
2661 		 * buffer.
2662 		 */
2663 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2664 			bus_dma_segment_t *req_sg;
2665 
2666 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2667 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2668 		} else
2669 			request = ccb->smpio.smp_request;
2670 
2671 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2672 			bus_dma_segment_t *rsp_sg;
2673 
2674 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2675 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2676 		} else
2677 			response = ccb->smpio.smp_response;
2678 		break;
2679 	case CAM_DATA_VADDR:
2680 		request = ccb->smpio.smp_request;
2681 		response = ccb->smpio.smp_response;
2682 		break;
2683 	default:
2684 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2685 		xpt_done(ccb);
2686 		return;
2687 	}
2688 #else /* __FreeBSD_version < 1000028 */
2689 	/*
2690 	 * XXX We don't yet support physical addresses here.
2691 	 */
2692 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2693 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2694 		    "supported\n", __func__);
2695 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2696 		xpt_done(ccb);
2697 		return;
2698 	}
2699 
2700 	/*
2701 	 * If the user wants to send an S/G list, check to make sure they
2702 	 * have single buffers.
2703 	 */
2704 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2705 		/*
2706 		 * The chip does not support more than one buffer for the
2707 		 * request or response.
2708 		 */
2709 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2710 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2711 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2712 			    "response buffer segments not supported for SMP\n",
2713 			    __func__);
2714 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2715 			xpt_done(ccb);
2716 			return;
2717 		}
2718 
2719 		/*
2720 		 * The CAM_SCATTER_VALID flag was originally implemented
2721 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2722 		 * We have two.  So, just take that flag to mean that we
2723 		 * might have S/G lists, and look at the S/G segment count
2724 		 * to figure out whether that is the case for each individual
2725 		 * buffer.
2726 		 */
2727 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2728 			bus_dma_segment_t *req_sg;
2729 
2730 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2731 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2732 		} else
2733 			request = ccb->smpio.smp_request;
2734 
2735 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2736 			bus_dma_segment_t *rsp_sg;
2737 
2738 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2739 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2740 		} else
2741 			response = ccb->smpio.smp_response;
2742 	} else {
2743 		request = ccb->smpio.smp_request;
2744 		response = ccb->smpio.smp_response;
2745 	}
2746 #endif /* __FreeBSD_version < 1000028 */
2747 
2748 	cm = mpr_alloc_command(sc);
2749 	if (cm == NULL) {
2750 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2751 		    __func__);
2752 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2753 		xpt_done(ccb);
2754 		return;
2755 	}
2756 
2757 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2758 	bzero(req, sizeof(*req));
2759 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2760 
2761 	/* Allow the chip to use any route to this SAS address. */
2762 	req->PhysicalPort = 0xff;
2763 
2764 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2765 	req->SGLFlags =
2766 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2767 
2768 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2769 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2770 
2771 	mpr_init_sge(cm, req, &req->SGL);
2772 
2773 	/*
2774 	 * Set up a uio to pass into mpr_map_command().  This allows us to
2775 	 * do one map command, and one busdma call in there.
2776 	 */
2777 	cm->cm_uio.uio_iov = cm->cm_iovec;
2778 	cm->cm_uio.uio_iovcnt = 2;
2779 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2780 
2781 	/*
2782 	 * The read/write flag isn't used by busdma, but set it just in
2783 	 * case.  This isn't exactly accurate, either, since we're going in
2784 	 * both directions.
2785 	 */
2786 	cm->cm_uio.uio_rw = UIO_WRITE;
2787 
2788 	cm->cm_iovec[0].iov_base = request;
2789 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2790 	cm->cm_iovec[1].iov_base = response;
2791 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2792 
2793 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2794 			       cm->cm_iovec[1].iov_len;
2795 
2796 	/*
2797 	 * Trigger a warning message in mpr_data_cb() for the user if we
2798 	 * wind up exceeding two S/G segments.  The chip expects one
2799 	 * segment for the request and another for the response.
2800 	 */
2801 	cm->cm_max_segs = 2;
2802 
2803 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2804 	cm->cm_complete = mprsas_smpio_complete;
2805 	cm->cm_complete_data = ccb;
2806 
2807 	/*
2808 	 * Tell the mapping code that we're using a uio, and that this is
2809 	 * an SMP passthrough request.  There is a little special-case
2810 	 * logic there (in mpr_data_cb()) to handle the bidirectional
2811 	 * transfer.
2812 	 */
2813 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2814 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2815 
2816 	/* The chip data format is little endian. */
2817 	req->SASAddress.High = htole32(sasaddr >> 32);
2818 	req->SASAddress.Low = htole32(sasaddr);
2819 
2820 	/*
2821 	 * XXX Note that we don't have a timeout/abort mechanism here.
2822 	 * From the manual, it looks like task management requests only
2823 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2824 	 * have a mechanism to retry requests in the event of a chip reset
2825 	 * at least.  Hopefully the chip will insure that any errors short
2826 	 * of that are relayed back to the driver.
2827 	 */
2828 	error = mpr_map_command(sc, cm);
2829 	if ((error != 0) && (error != EINPROGRESS)) {
2830 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2831 		    "mpr_map_command()\n", __func__, error);
2832 		goto bailout_error;
2833 	}
2834 
2835 	return;
2836 
2837 bailout_error:
2838 	mpr_free_command(sc, cm);
2839 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2840 	xpt_done(ccb);
2841 	return;
2842 }
2843 
2844 static void
2845 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2846 {
2847 	struct mpr_softc *sc;
2848 	struct mprsas_target *targ;
2849 	uint64_t sasaddr = 0;
2850 
2851 	sc = sassc->sc;
2852 
2853 	/*
2854 	 * Make sure the target exists.
2855 	 */
2856 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2857 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2858 	targ = &sassc->targets[ccb->ccb_h.target_id];
2859 	if (targ->handle == 0x0) {
2860 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2861 		    __func__, ccb->ccb_h.target_id);
2862 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2863 		xpt_done(ccb);
2864 		return;
2865 	}
2866 
2867 	/*
2868 	 * If this device has an embedded SMP target, we'll talk to it
2869 	 * directly.
2870 	 * figure out what the expander's address is.
2871 	 */
2872 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2873 		sasaddr = targ->sasaddr;
2874 
2875 	/*
2876 	 * If we don't have a SAS address for the expander yet, try
2877 	 * grabbing it from the page 0x83 information cached in the
2878 	 * transport layer for this target.  LSI expanders report the
2879 	 * expander SAS address as the port-associated SAS address in
2880 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2881 	 * 0x83.
2882 	 *
2883 	 * XXX KDM disable this for now, but leave it commented out so that
2884 	 * it is obvious that this is another possible way to get the SAS
2885 	 * address.
2886 	 *
2887 	 * The parent handle method below is a little more reliable, and
2888 	 * the other benefit is that it works for devices other than SES
2889 	 * devices.  So you can send a SMP request to a da(4) device and it
2890 	 * will get routed to the expander that device is attached to.
2891 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2892 	 */
2893 #if 0
2894 	if (sasaddr == 0)
2895 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2896 #endif
2897 
2898 	/*
2899 	 * If we still don't have a SAS address for the expander, look for
2900 	 * the parent device of this device, which is probably the expander.
2901 	 */
2902 	if (sasaddr == 0) {
2903 #ifdef OLD_MPR_PROBE
2904 		struct mprsas_target *parent_target;
2905 #endif
2906 
2907 		if (targ->parent_handle == 0x0) {
2908 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2909 			    "a valid parent handle!\n", __func__, targ->handle);
2910 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2911 			goto bailout;
2912 		}
2913 #ifdef OLD_MPR_PROBE
2914 		parent_target = mprsas_find_target_by_handle(sassc, 0,
2915 		    targ->parent_handle);
2916 
2917 		if (parent_target == NULL) {
2918 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2919 			    "a valid parent target!\n", __func__, targ->handle);
2920 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2921 			goto bailout;
2922 		}
2923 
2924 		if ((parent_target->devinfo &
2925 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2926 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2927 			    "does not have an SMP target!\n", __func__,
2928 			    targ->handle, parent_target->handle);
2929 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2930 			goto bailout;
2931 		}
2932 
2933 		sasaddr = parent_target->sasaddr;
2934 #else /* OLD_MPR_PROBE */
2935 		if ((targ->parent_devinfo &
2936 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2937 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2938 			    "does not have an SMP target!\n", __func__,
2939 			    targ->handle, targ->parent_handle);
2940 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2941 			goto bailout;
2942 
2943 		}
2944 		if (targ->parent_sasaddr == 0x0) {
2945 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2946 			    "%d does not have a valid SAS address!\n", __func__,
2947 			    targ->handle, targ->parent_handle);
2948 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2949 			goto bailout;
2950 		}
2951 
2952 		sasaddr = targ->parent_sasaddr;
2953 #endif /* OLD_MPR_PROBE */
2954 
2955 	}
2956 
2957 	if (sasaddr == 0) {
2958 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2959 		    "handle %d\n", __func__, targ->handle);
2960 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2961 		goto bailout;
2962 	}
2963 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
2964 
2965 	return;
2966 
2967 bailout:
2968 	xpt_done(ccb);
2969 
2970 }
2971 #endif //__FreeBSD_version >= 900026
2972 
2973 static void
2974 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2975 {
2976 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2977 	struct mpr_softc *sc;
2978 	struct mpr_command *tm;
2979 	struct mprsas_target *targ;
2980 
2981 	MPR_FUNCTRACE(sassc->sc);
2982 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2983 
2984 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
2985 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
2986 	sc = sassc->sc;
2987 	tm = mpr_alloc_command(sc);
2988 	if (tm == NULL) {
2989 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
2990 		    "mprsas_action_resetdev\n");
2991 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2992 		xpt_done(ccb);
2993 		return;
2994 	}
2995 
2996 	targ = &sassc->targets[ccb->ccb_h.target_id];
2997 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2998 	req->DevHandle = htole16(targ->handle);
2999 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3000 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3001 
3002 	/* SAS Hard Link Reset / SATA Link Reset */
3003 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3004 
3005 	tm->cm_data = NULL;
3006 	tm->cm_desc.HighPriority.RequestFlags =
3007 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3008 	tm->cm_complete = mprsas_resetdev_complete;
3009 	tm->cm_complete_data = ccb;
3010 
3011 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3012 	    __func__, targ->tid);
3013 	tm->cm_targ = targ;
3014 	targ->flags |= MPRSAS_TARGET_INRESET;
3015 
3016 	mpr_map_command(sc, tm);
3017 }
3018 
3019 static void
3020 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3021 {
3022 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3023 	union ccb *ccb;
3024 
3025 	MPR_FUNCTRACE(sc);
3026 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3027 
3028 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3029 	ccb = tm->cm_complete_data;
3030 
3031 	/*
3032 	 * Currently there should be no way we can hit this case.  It only
3033 	 * happens when we have a failure to allocate chain frames, and
3034 	 * task management commands don't have S/G lists.
3035 	 */
3036 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3037 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3038 
3039 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3040 
3041 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3042 		    "handle %#04x! This should not happen!\n", __func__,
3043 		    tm->cm_flags, req->DevHandle);
3044 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3045 		goto bailout;
3046 	}
3047 
3048 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3049 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3050 
3051 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3052 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3053 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3054 		    CAM_LUN_WILDCARD);
3055 	}
3056 	else
3057 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3058 
3059 bailout:
3060 
3061 	mprsas_free_tm(sc, tm);
3062 	xpt_done(ccb);
3063 }
3064 
3065 static void
3066 mprsas_poll(struct cam_sim *sim)
3067 {
3068 	struct mprsas_softc *sassc;
3069 
3070 	sassc = cam_sim_softc(sim);
3071 
3072 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3073 		/* frequent debug messages during a panic just slow
3074 		 * everything down too much.
3075 		 */
3076 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3077 		    __func__);
3078 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3079 	}
3080 
3081 	mpr_intr_locked(sassc->sc);
3082 }
3083 
3084 static void
3085 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3086     void *arg)
3087 {
3088 	struct mpr_softc *sc;
3089 
3090 	sc = (struct mpr_softc *)callback_arg;
3091 
3092 	switch (code) {
3093 #if (__FreeBSD_version >= 1000006) || \
3094     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3095 	case AC_ADVINFO_CHANGED: {
3096 		struct mprsas_target *target;
3097 		struct mprsas_softc *sassc;
3098 		struct scsi_read_capacity_data_long rcap_buf;
3099 		struct ccb_dev_advinfo cdai;
3100 		struct mprsas_lun *lun;
3101 		lun_id_t lunid;
3102 		int found_lun;
3103 		uintptr_t buftype;
3104 
3105 		buftype = (uintptr_t)arg;
3106 
3107 		found_lun = 0;
3108 		sassc = sc->sassc;
3109 
3110 		/*
3111 		 * We're only interested in read capacity data changes.
3112 		 */
3113 		if (buftype != CDAI_TYPE_RCAPLONG)
3114 			break;
3115 
3116 		/*
3117 		 * See the comment in mpr_attach_sas() for a detailed
3118 		 * explanation.  In these versions of FreeBSD we register
3119 		 * for all events and filter out the events that don't
3120 		 * apply to us.
3121 		 */
3122 #if (__FreeBSD_version < 1000703) || \
3123     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3124 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3125 			break;
3126 #endif
3127 
3128 		/*
3129 		 * We should have a handle for this, but check to make sure.
3130 		 */
3131 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3132 		    ("Target %d out of bounds in mprsas_async\n",
3133 		    xpt_path_target_id(path)));
3134 		target = &sassc->targets[xpt_path_target_id(path)];
3135 		if (target->handle == 0)
3136 			break;
3137 
3138 		lunid = xpt_path_lun_id(path);
3139 
3140 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3141 			if (lun->lun_id == lunid) {
3142 				found_lun = 1;
3143 				break;
3144 			}
3145 		}
3146 
3147 		if (found_lun == 0) {
3148 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3149 			    M_NOWAIT | M_ZERO);
3150 			if (lun == NULL) {
3151 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3152 				    "LUN for EEDP support.\n");
3153 				break;
3154 			}
3155 			lun->lun_id = lunid;
3156 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3157 		}
3158 
3159 		bzero(&rcap_buf, sizeof(rcap_buf));
3160 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3161 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3162 		cdai.ccb_h.flags = CAM_DIR_IN;
3163 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3164 #if (__FreeBSD_version >= 1100061) || \
3165     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3166 		cdai.flags = CDAI_FLAG_NONE;
3167 #else
3168 		cdai.flags = 0;
3169 #endif
3170 		cdai.bufsiz = sizeof(rcap_buf);
3171 		cdai.buf = (uint8_t *)&rcap_buf;
3172 		xpt_action((union ccb *)&cdai);
3173 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3174 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3175 
3176 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3177 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3178 			lun->eedp_formatted = TRUE;
3179 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3180 		} else {
3181 			lun->eedp_formatted = FALSE;
3182 			lun->eedp_block_size = 0;
3183 		}
3184 		break;
3185 	}
3186 #endif
3187 	case AC_FOUND_DEVICE: {
3188 		struct ccb_getdev *cgd;
3189 
3190 		/*
3191 		 * See the comment in mpr_attach_sas() for a detailed
3192 		 * explanation.  In these versions of FreeBSD we register
3193 		 * for all events and filter out the events that don't
3194 		 * apply to us.
3195 		 */
3196 #if (__FreeBSD_version < 1000703) || \
3197     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3198 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3199 			break;
3200 #endif
3201 
3202 		cgd = arg;
3203 #if (__FreeBSD_version < 901503) || \
3204     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3205 		mprsas_check_eedp(sc, path, cgd);
3206 #endif
3207 		break;
3208 	}
3209 	default:
3210 		break;
3211 	}
3212 }
3213 
3214 #if (__FreeBSD_version < 901503) || \
3215     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3216 static void
3217 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3218     struct ccb_getdev *cgd)
3219 {
3220 	struct mprsas_softc *sassc = sc->sassc;
3221 	struct ccb_scsiio *csio;
3222 	struct scsi_read_capacity_16 *scsi_cmd;
3223 	struct scsi_read_capacity_eedp *rcap_buf;
3224 	path_id_t pathid;
3225 	target_id_t targetid;
3226 	lun_id_t lunid;
3227 	union ccb *ccb;
3228 	struct cam_path *local_path;
3229 	struct mprsas_target *target;
3230 	struct mprsas_lun *lun;
3231 	uint8_t	found_lun;
3232 	char path_str[64];
3233 
3234 	pathid = cam_sim_path(sassc->sim);
3235 	targetid = xpt_path_target_id(path);
3236 	lunid = xpt_path_lun_id(path);
3237 
3238 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3239 	    "mprsas_check_eedp\n", targetid));
3240 	target = &sassc->targets[targetid];
3241 	if (target->handle == 0x0)
3242 		return;
3243 
3244 	/*
3245 	 * Determine if the device is EEDP capable.
3246 	 *
3247 	 * If this flag is set in the inquiry data, the device supports
3248 	 * protection information, and must support the 16 byte read capacity
3249 	 * command, otherwise continue without sending read cap 16.
3250 	 */
3251 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3252 		return;
3253 
3254 	/*
3255 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3256 	 * the LUN is formatted for EEDP support.
3257 	 */
3258 	ccb = xpt_alloc_ccb_nowait();
3259 	if (ccb == NULL) {
3260 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3261 		    "support.\n");
3262 		return;
3263 	}
3264 
3265 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3266 	    CAM_REQ_CMP) {
3267 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3268 		    "support.\n");
3269 		xpt_free_ccb(ccb);
3270 		return;
3271 	}
3272 
3273 	/*
3274 	 * If LUN is already in list, don't create a new one.
3275 	 */
3276 	found_lun = FALSE;
3277 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3278 		if (lun->lun_id == lunid) {
3279 			found_lun = TRUE;
3280 			break;
3281 		}
3282 	}
3283 	if (!found_lun) {
3284 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3285 		    M_NOWAIT | M_ZERO);
3286 		if (lun == NULL) {
3287 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3288 			    "EEDP support.\n");
3289 			xpt_free_path(local_path);
3290 			xpt_free_ccb(ccb);
3291 			return;
3292 		}
3293 		lun->lun_id = lunid;
3294 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3295 	}
3296 
3297 	xpt_path_string(local_path, path_str, sizeof(path_str));
3298 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3299 	    path_str, target->handle);
3300 
3301 	/*
3302 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3303 	 * mprsas_read_cap_done function will load the read cap info into the
3304 	 * LUN struct.
3305 	 */
3306 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3307 	    M_NOWAIT | M_ZERO);
3308 	if (rcap_buf == NULL) {
3309 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3310 		    "buffer for EEDP support.\n");
3311 		xpt_free_path(ccb->ccb_h.path);
3312 		xpt_free_ccb(ccb);
3313 		return;
3314 	}
3315 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3316 	csio = &ccb->csio;
3317 	csio->ccb_h.func_code = XPT_SCSI_IO;
3318 	csio->ccb_h.flags = CAM_DIR_IN;
3319 	csio->ccb_h.retry_count = 4;
3320 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3321 	csio->ccb_h.timeout = 60000;
3322 	csio->data_ptr = (uint8_t *)rcap_buf;
3323 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3324 	csio->sense_len = MPR_SENSE_LEN;
3325 	csio->cdb_len = sizeof(*scsi_cmd);
3326 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3327 
3328 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3329 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3330 	scsi_cmd->opcode = 0x9E;
3331 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3332 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3333 
3334 	ccb->ccb_h.ppriv_ptr1 = sassc;
3335 	xpt_action(ccb);
3336 }
3337 
3338 static void
3339 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3340 {
3341 	struct mprsas_softc *sassc;
3342 	struct mprsas_target *target;
3343 	struct mprsas_lun *lun;
3344 	struct scsi_read_capacity_eedp *rcap_buf;
3345 
3346 	if (done_ccb == NULL)
3347 		return;
3348 
3349 	/* Driver need to release devq, it Scsi command is
3350 	 * generated by driver internally.
3351 	 * Currently there is a single place where driver
3352 	 * calls scsi command internally. In future if driver
3353 	 * calls more scsi command internally, it needs to release
3354 	 * devq internally, since those command will not go back to
3355 	 * cam_periph.
3356 	 */
3357 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3358         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3359 		xpt_release_devq(done_ccb->ccb_h.path,
3360 			       	/*count*/ 1, /*run_queue*/TRUE);
3361 	}
3362 
3363 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3364 
3365 	/*
3366 	 * Get the LUN ID for the path and look it up in the LUN list for the
3367 	 * target.
3368 	 */
3369 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3370 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3371 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3372 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3373 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3374 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3375 			continue;
3376 
3377 		/*
3378 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3379 		 * info. If the READ CAP 16 command had some SCSI error (common
3380 		 * if command is not supported), mark the lun as not supporting
3381 		 * EEDP and set the block size to 0.
3382 		 */
3383 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3384 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3385 			lun->eedp_formatted = FALSE;
3386 			lun->eedp_block_size = 0;
3387 			break;
3388 		}
3389 
3390 		if (rcap_buf->protect & 0x01) {
3391 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3392 			    "%d is formatted for EEDP support.\n",
3393 			    done_ccb->ccb_h.target_lun,
3394 			    done_ccb->ccb_h.target_id);
3395 			lun->eedp_formatted = TRUE;
3396 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3397 		}
3398 		break;
3399 	}
3400 
3401 	// Finished with this CCB and path.
3402 	free(rcap_buf, M_MPR);
3403 	xpt_free_path(done_ccb->ccb_h.path);
3404 	xpt_free_ccb(done_ccb);
3405 }
3406 #endif /* (__FreeBSD_version < 901503) || \
3407           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3408 
3409 void
3410 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3411     struct mprsas_target *target, lun_id_t lun_id)
3412 {
3413 	union ccb *ccb;
3414 	path_id_t path_id;
3415 
3416 	/*
3417 	 * Set the INRESET flag for this target so that no I/O will be sent to
3418 	 * the target until the reset has completed.  If an I/O request does
3419 	 * happen, the devq will be frozen.  The CCB holds the path which is
3420 	 * used to release the devq.  The devq is released and the CCB is freed
3421 	 * when the TM completes.
3422 	 */
3423 	ccb = xpt_alloc_ccb_nowait();
3424 	if (ccb) {
3425 		path_id = cam_sim_path(sc->sassc->sim);
3426 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3427 		    target->tid, lun_id) != CAM_REQ_CMP) {
3428 			xpt_free_ccb(ccb);
3429 		} else {
3430 			tm->cm_ccb = ccb;
3431 			tm->cm_targ = target;
3432 			target->flags |= MPRSAS_TARGET_INRESET;
3433 		}
3434 	}
3435 }
3436 
3437 int
3438 mprsas_startup(struct mpr_softc *sc)
3439 {
3440 	/*
3441 	 * Send the port enable message and set the wait_for_port_enable flag.
3442 	 * This flag helps to keep the simq frozen until all discovery events
3443 	 * are processed.
3444 	 */
3445 	sc->wait_for_port_enable = 1;
3446 	mprsas_send_portenable(sc);
3447 	return (0);
3448 }
3449 
3450 static int
3451 mprsas_send_portenable(struct mpr_softc *sc)
3452 {
3453 	MPI2_PORT_ENABLE_REQUEST *request;
3454 	struct mpr_command *cm;
3455 
3456 	MPR_FUNCTRACE(sc);
3457 
3458 	if ((cm = mpr_alloc_command(sc)) == NULL)
3459 		return (EBUSY);
3460 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3461 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3462 	request->MsgFlags = 0;
3463 	request->VP_ID = 0;
3464 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3465 	cm->cm_complete = mprsas_portenable_complete;
3466 	cm->cm_data = NULL;
3467 	cm->cm_sge = NULL;
3468 
3469 	mpr_map_command(sc, cm);
3470 	mpr_dprint(sc, MPR_XINFO,
3471 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3472 	    cm, cm->cm_req, cm->cm_complete);
3473 	return (0);
3474 }
3475 
3476 static void
3477 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3478 {
3479 	MPI2_PORT_ENABLE_REPLY *reply;
3480 	struct mprsas_softc *sassc;
3481 
3482 	MPR_FUNCTRACE(sc);
3483 	sassc = sc->sassc;
3484 
3485 	/*
3486 	 * Currently there should be no way we can hit this case.  It only
3487 	 * happens when we have a failure to allocate chain frames, and
3488 	 * port enable commands don't have S/G lists.
3489 	 */
3490 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3491 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3492 		    "This should not happen!\n", __func__, cm->cm_flags);
3493 	}
3494 
3495 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3496 	if (reply == NULL)
3497 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3498 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3499 	    MPI2_IOCSTATUS_SUCCESS)
3500 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3501 
3502 	mpr_free_command(sc, cm);
3503 	if (sc->mpr_ich.ich_arg != NULL) {
3504 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3505 		config_intrhook_disestablish(&sc->mpr_ich);
3506 		sc->mpr_ich.ich_arg = NULL;
3507 	}
3508 
3509 	/*
3510 	 * Done waiting for port enable to complete.  Decrement the refcount.
3511 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3512 	 * take place.
3513 	 */
3514 	sc->wait_for_port_enable = 0;
3515 	sc->port_enable_complete = 1;
3516 	wakeup(&sc->port_enable_complete);
3517 	mprsas_startup_decrement(sassc);
3518 }
3519 
3520 int
3521 mprsas_check_id(struct mprsas_softc *sassc, int id)
3522 {
3523 	struct mpr_softc *sc = sassc->sc;
3524 	char *ids;
3525 	char *name;
3526 
3527 	ids = &sc->exclude_ids[0];
3528 	while((name = strsep(&ids, ",")) != NULL) {
3529 		if (name[0] == '\0')
3530 			continue;
3531 		if (strtol(name, NULL, 0) == (long)id)
3532 			return (1);
3533 	}
3534 
3535 	return (0);
3536 }
3537 
3538 void
3539 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3540 {
3541 	struct mprsas_softc *sassc;
3542 	struct mprsas_lun *lun, *lun_tmp;
3543 	struct mprsas_target *targ;
3544 	int i;
3545 
3546 	sassc = sc->sassc;
3547 	/*
3548 	 * The number of targets is based on IOC Facts, so free all of
3549 	 * the allocated LUNs for each target and then the target buffer
3550 	 * itself.
3551 	 */
3552 	for (i=0; i< maxtargets; i++) {
3553 		targ = &sassc->targets[i];
3554 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3555 			free(lun, M_MPR);
3556 		}
3557 	}
3558 	free(sassc->targets, M_MPR);
3559 
3560 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3561 	    M_MPR, M_WAITOK|M_ZERO);
3562 	if (!sassc->targets) {
3563 		panic("%s failed to alloc targets with error %d\n",
3564 		    __func__, ENOMEM);
3565 	}
3566 }
3567