xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 8657387683946d0c03e09fe77029edfe309eeb20)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/nvme/nvme.h>
76 
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
89 
90 #define MPRSAS_DISCOVERY_TIMEOUT	20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
92 
93 /*
94  * static array to check SCSI OpCode for EEDP protection bits
95  */
96 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 };
117 
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131     struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133     struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137     struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139     union ccb *done_ccb);
140 #endif
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143     struct mpr_command *cm);
144 
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148     uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	int target_id = 0xFFFFFFFF;
250 
251 	MPR_FUNCTRACE(sc);
252 	if (tm == NULL)
253 		return;
254 
255 	/*
256 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
257 	 * free the resources used for freezing the devq.  Must clear the
258 	 * INRESET flag as well or scsi I/O will not work.
259 	 */
260 	if (tm->cm_targ != NULL) {
261 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 		target_id = tm->cm_targ->tid;
263 	}
264 	if (tm->cm_ccb) {
265 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 		    target_id);
267 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 		xpt_free_path(tm->cm_ccb->ccb_h.path);
269 		xpt_free_ccb(tm->cm_ccb);
270 	}
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 	    MPI2_IOCSTATUS_SUCCESS) {
383 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 	    le32toh(reply->TerminationCount));
389 	mpr_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 	    targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mpr_map_command(sc, cm);
480 }
481 
482 /*
483  * The firmware performs debounce on the link to avoid transient link errors
484  * and false removals.  When it does decide that link has been lost and a
485  * device needs to go away, it expects that the host will perform a target reset
486  * and then an op remove.  The reset has the side-effect of aborting any
487  * outstanding requests for the device, which is required for the op-remove to
488  * succeed.  It's not clear if the host should check for the device coming back
489  * alive after the reset.
490  */
491 void
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mpr_softc *sc;
496 	struct mpr_command *cm;
497 	struct mprsas_target *targ = NULL;
498 
499 	MPR_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
508 		    __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
513 
514 	cm = mprsas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
517 		    __func__);
518 		return;
519 	}
520 
521 	mprsas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags =
535 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 	cm->cm_complete = mprsas_remove_device;
537 	cm->cm_complete_data = (void *)(uintptr_t)handle;
538 
539 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 	    __func__, targ->tid);
541 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 
543 	mpr_map_command(sc, cm);
544 }
545 
546 static void
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 {
549 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 	struct mprsas_target *targ;
552 	struct mpr_command *next_cm;
553 	uint16_t handle;
554 
555 	MPR_FUNCTRACE(sc);
556 
557 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 	targ = tm->cm_targ;
560 
561 	/*
562 	 * Currently there should be no way we can hit this case.  It only
563 	 * happens when we have a failure to allocate chain frames, and
564 	 * task management commands don't have S/G lists.
565 	 */
566 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 		    "handle %#04x! This should not happen!\n", __func__,
569 		    tm->cm_flags, handle);
570 	}
571 
572 	if (reply == NULL) {
573 		/* XXX retry the remove after the diag reset completes? */
574 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 		    "0x%04x\n", __func__, handle);
576 		mprsas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 	    MPI2_IOCSTATUS_SUCCESS) {
582 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 	    MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717 		}
718 	}
719 
720 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 	    &sc->sassc->mprsas_eh);
722 
723 	return (0);
724 }
725 
726 int
727 mpr_attach_sas(struct mpr_softc *sc)
728 {
729 	struct mprsas_softc *sassc;
730 	cam_status status;
731 	int unit, error = 0;
732 
733 	MPR_FUNCTRACE(sc);
734 
735 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
736 	if (!sassc) {
737 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
738 		    __func__, __LINE__);
739 		return (ENOMEM);
740 	}
741 
742 	/*
743 	 * XXX MaxTargets could change during a reinit.  Since we don't
744 	 * resize the targets[] array during such an event, cache the value
745 	 * of MaxTargets here so that we don't get into trouble later.  This
746 	 * should move into the reinit logic.
747 	 */
748 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
749 	sassc->targets = malloc(sizeof(struct mprsas_target) *
750 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
751 	if (!sassc->targets) {
752 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
753 		    __func__, __LINE__);
754 		free(sassc, M_MPR);
755 		return (ENOMEM);
756 	}
757 	sc->sassc = sassc;
758 	sassc->sc = sc;
759 
760 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
761 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
762 		error = ENOMEM;
763 		goto out;
764 	}
765 
766 	unit = device_get_unit(sc->mpr_dev);
767 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
768 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
769 	if (sassc->sim == NULL) {
770 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
771 		error = EINVAL;
772 		goto out;
773 	}
774 
775 	TAILQ_INIT(&sassc->ev_queue);
776 
777 	/* Initialize taskqueue for Event Handling */
778 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
779 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
780 	    taskqueue_thread_enqueue, &sassc->ev_tq);
781 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
782 	    device_get_nameunit(sc->mpr_dev));
783 
784 	mpr_lock(sc);
785 
786 	/*
787 	 * XXX There should be a bus for every port on the adapter, but since
788 	 * we're just going to fake the topology for now, we'll pretend that
789 	 * everything is just a target on a single bus.
790 	 */
791 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
792 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
793 		    error);
794 		mpr_unlock(sc);
795 		goto out;
796 	}
797 
798 	/*
799 	 * Assume that discovery events will start right away.
800 	 *
801 	 * Hold off boot until discovery is complete.
802 	 */
803 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
804 	sc->sassc->startup_refcount = 0;
805 	mprsas_startup_increment(sassc);
806 
807 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
808 
809 	/*
810 	 * Register for async events so we can determine the EEDP
811 	 * capabilities of devices.
812 	 */
813 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
814 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
815 	    CAM_LUN_WILDCARD);
816 	if (status != CAM_REQ_CMP) {
817 		mpr_printf(sc, "Error %#x creating sim path\n", status);
818 		sassc->path = NULL;
819 	} else {
820 		int event;
821 
822 #if (__FreeBSD_version >= 1000006) || \
823     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
824 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
825 #else
826 		event = AC_FOUND_DEVICE;
827 #endif
828 
829 		/*
830 		 * Prior to the CAM locking improvements, we can't call
831 		 * xpt_register_async() with a particular path specified.
832 		 *
833 		 * If a path isn't specified, xpt_register_async() will
834 		 * generate a wildcard path and acquire the XPT lock while
835 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
836 		 * It will then drop the XPT lock once that is done.
837 		 *
838 		 * If a path is specified for xpt_register_async(), it will
839 		 * not acquire and drop the XPT lock around the call to
840 		 * xpt_action().  xpt_action() asserts that the caller
841 		 * holds the SIM lock, so the SIM lock has to be held when
842 		 * calling xpt_register_async() when the path is specified.
843 		 *
844 		 * But xpt_register_async calls xpt_for_all_devices(),
845 		 * which calls xptbustraverse(), which will acquire each
846 		 * SIM lock.  When it traverses our particular bus, it will
847 		 * necessarily acquire the SIM lock, which will lead to a
848 		 * recursive lock acquisition.
849 		 *
850 		 * The CAM locking changes fix this problem by acquiring
851 		 * the XPT topology lock around bus traversal in
852 		 * xptbustraverse(), so the caller can hold the SIM lock
853 		 * and it does not cause a recursive lock acquisition.
854 		 *
855 		 * These __FreeBSD_version values are approximate, especially
856 		 * for stable/10, which is two months later than the actual
857 		 * change.
858 		 */
859 
860 #if (__FreeBSD_version < 1000703) || \
861     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
862 		mpr_unlock(sc);
863 		status = xpt_register_async(event, mprsas_async, sc,
864 					    NULL);
865 		mpr_lock(sc);
866 #else
867 		status = xpt_register_async(event, mprsas_async, sc,
868 					    sassc->path);
869 #endif
870 
871 		if (status != CAM_REQ_CMP) {
872 			mpr_dprint(sc, MPR_ERROR,
873 			    "Error %#x registering async handler for "
874 			    "AC_ADVINFO_CHANGED events\n", status);
875 			xpt_free_path(sassc->path);
876 			sassc->path = NULL;
877 		}
878 	}
879 	if (status != CAM_REQ_CMP) {
880 		/*
881 		 * EEDP use is the exception, not the rule.
882 		 * Warn the user, but do not fail to attach.
883 		 */
884 		mpr_printf(sc, "EEDP capabilities disabled.\n");
885 	}
886 
887 	mpr_unlock(sc);
888 
889 	mprsas_register_events(sc);
890 out:
891 	if (error)
892 		mpr_detach_sas(sc);
893 	return (error);
894 }
895 
896 int
897 mpr_detach_sas(struct mpr_softc *sc)
898 {
899 	struct mprsas_softc *sassc;
900 	struct mprsas_lun *lun, *lun_tmp;
901 	struct mprsas_target *targ;
902 	int i;
903 
904 	MPR_FUNCTRACE(sc);
905 
906 	if (sc->sassc == NULL)
907 		return (0);
908 
909 	sassc = sc->sassc;
910 	mpr_deregister_events(sc, sassc->mprsas_eh);
911 
912 	/*
913 	 * Drain and free the event handling taskqueue with the lock
914 	 * unheld so that any parallel processing tasks drain properly
915 	 * without deadlocking.
916 	 */
917 	if (sassc->ev_tq != NULL)
918 		taskqueue_free(sassc->ev_tq);
919 
920 	/* Make sure CAM doesn't wedge if we had to bail out early. */
921 	mpr_lock(sc);
922 
923 	/* Deregister our async handler */
924 	if (sassc->path != NULL) {
925 		xpt_register_async(0, mprsas_async, sc, sassc->path);
926 		xpt_free_path(sassc->path);
927 		sassc->path = NULL;
928 	}
929 
930 	if (sassc->flags & MPRSAS_IN_STARTUP)
931 		xpt_release_simq(sassc->sim, 1);
932 
933 	if (sassc->sim != NULL) {
934 		xpt_bus_deregister(cam_sim_path(sassc->sim));
935 		cam_sim_free(sassc->sim, FALSE);
936 	}
937 
938 	mpr_unlock(sc);
939 
940 	if (sassc->devq != NULL)
941 		cam_simq_free(sassc->devq);
942 
943 	for (i = 0; i < sassc->maxtargets; i++) {
944 		targ = &sassc->targets[i];
945 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
946 			free(lun, M_MPR);
947 		}
948 	}
949 	free(sassc->targets, M_MPR);
950 	free(sassc, M_MPR);
951 	sc->sassc = NULL;
952 
953 	return (0);
954 }
955 
956 void
957 mprsas_discovery_end(struct mprsas_softc *sassc)
958 {
959 	struct mpr_softc *sc = sassc->sc;
960 
961 	MPR_FUNCTRACE(sc);
962 
963 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
964 		callout_stop(&sassc->discovery_callout);
965 
966 	/*
967 	 * After discovery has completed, check the mapping table for any
968 	 * missing devices and update their missing counts. Only do this once
969 	 * whenever the driver is initialized so that missing counts aren't
970 	 * updated unnecessarily. Note that just because discovery has
971 	 * completed doesn't mean that events have been processed yet. The
972 	 * check_devices function is a callout timer that checks if ALL devices
973 	 * are missing. If so, it will wait a little longer for events to
974 	 * complete and keep resetting itself until some device in the mapping
975 	 * table is not missing, meaning that event processing has started.
976 	 */
977 	if (sc->track_mapping_events) {
978 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
979 		    "completed. Check for missing devices in the mapping "
980 		    "table.\n");
981 		callout_reset(&sc->device_check_callout,
982 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
983 		    sc);
984 	}
985 }
986 
987 static void
988 mprsas_action(struct cam_sim *sim, union ccb *ccb)
989 {
990 	struct mprsas_softc *sassc;
991 
992 	sassc = cam_sim_softc(sim);
993 
994 	MPR_FUNCTRACE(sassc->sc);
995 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
996 	    ccb->ccb_h.func_code);
997 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
998 
999 	switch (ccb->ccb_h.func_code) {
1000 	case XPT_PATH_INQ:
1001 	{
1002 		struct ccb_pathinq *cpi = &ccb->cpi;
1003 		struct mpr_softc *sc = sassc->sc;
1004 		uint8_t sges_per_frame;
1005 
1006 		cpi->version_num = 1;
1007 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1008 		cpi->target_sprt = 0;
1009 #if (__FreeBSD_version >= 1000039) || \
1010     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1011 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1012 #else
1013 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1014 #endif
1015 		cpi->hba_eng_cnt = 0;
1016 		cpi->max_target = sassc->maxtargets - 1;
1017 		cpi->max_lun = 255;
1018 
1019 		/*
1020 		 * initiator_id is set here to an ID outside the set of valid
1021 		 * target IDs (including volumes).
1022 		 */
1023 		cpi->initiator_id = sassc->maxtargets;
1024 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1025 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1026 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1027 		cpi->unit_number = cam_sim_unit(sim);
1028 		cpi->bus_id = cam_sim_bus(sim);
1029 		/*
1030 		 * XXXSLM-I think this needs to change based on config page or
1031 		 * something instead of hardcoded to 150000.
1032 		 */
1033 		cpi->base_transfer_speed = 150000;
1034 		cpi->transport = XPORT_SAS;
1035 		cpi->transport_version = 0;
1036 		cpi->protocol = PROTO_SCSI;
1037 		cpi->protocol_version = SCSI_REV_SPC;
1038 
1039 		/*
1040 		 * Max IO Size is Page Size * the following:
1041 		 * ((SGEs per frame - 1 for chain element) *
1042 		 * Max Chain Depth) + 1 for no chain needed in last frame
1043 		 *
1044 		 * If user suggests a Max IO size to use, use the smaller of the
1045 		 * user's value and the calculated value as long as the user's
1046 		 * value is larger than 0. The user's value is in pages.
1047 		 */
1048 		sges_per_frame = (sc->chain_frame_size /
1049 		    sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1050 		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1051 		cpi->maxio *= PAGE_SIZE;
1052 		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1053 		    cpi->maxio))
1054 			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1055 		sc->maxio = cpi->maxio;
1056 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1057 		break;
1058 	}
1059 	case XPT_GET_TRAN_SETTINGS:
1060 	{
1061 		struct ccb_trans_settings	*cts;
1062 		struct ccb_trans_settings_sas	*sas;
1063 		struct ccb_trans_settings_scsi	*scsi;
1064 		struct mprsas_target *targ;
1065 
1066 		cts = &ccb->cts;
1067 		sas = &cts->xport_specific.sas;
1068 		scsi = &cts->proto_specific.scsi;
1069 
1070 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1071 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1072 		    cts->ccb_h.target_id));
1073 		targ = &sassc->targets[cts->ccb_h.target_id];
1074 		if (targ->handle == 0x0) {
1075 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1076 			break;
1077 		}
1078 
1079 		cts->protocol_version = SCSI_REV_SPC2;
1080 		cts->transport = XPORT_SAS;
1081 		cts->transport_version = 0;
1082 
1083 		sas->valid = CTS_SAS_VALID_SPEED;
1084 		switch (targ->linkrate) {
1085 		case 0x08:
1086 			sas->bitrate = 150000;
1087 			break;
1088 		case 0x09:
1089 			sas->bitrate = 300000;
1090 			break;
1091 		case 0x0a:
1092 			sas->bitrate = 600000;
1093 			break;
1094 		case 0x0b:
1095 			sas->bitrate = 1200000;
1096 			break;
1097 		default:
1098 			sas->valid = 0;
1099 		}
1100 
1101 		cts->protocol = PROTO_SCSI;
1102 		scsi->valid = CTS_SCSI_VALID_TQ;
1103 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1104 
1105 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1106 		break;
1107 	}
1108 	case XPT_CALC_GEOMETRY:
1109 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1110 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1111 		break;
1112 	case XPT_RESET_DEV:
1113 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1114 		    "XPT_RESET_DEV\n");
1115 		mprsas_action_resetdev(sassc, ccb);
1116 		return;
1117 	case XPT_RESET_BUS:
1118 	case XPT_ABORT:
1119 	case XPT_TERM_IO:
1120 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1121 		    "for abort or reset\n");
1122 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1123 		break;
1124 	case XPT_SCSI_IO:
1125 		mprsas_action_scsiio(sassc, ccb);
1126 		return;
1127 #if __FreeBSD_version >= 900026
1128 	case XPT_SMP_IO:
1129 		mprsas_action_smpio(sassc, ccb);
1130 		return;
1131 #endif
1132 	default:
1133 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1134 		break;
1135 	}
1136 	xpt_done(ccb);
1137 
1138 }
1139 
1140 static void
1141 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1142     target_id_t target_id, lun_id_t lun_id)
1143 {
1144 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1145 	struct cam_path *path;
1146 
1147 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1148 	    ac_code, target_id, (uintmax_t)lun_id);
1149 
1150 	if (xpt_create_path(&path, NULL,
1151 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1152 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1153 		    "notification\n");
1154 		return;
1155 	}
1156 
1157 	xpt_async(ac_code, path, NULL);
1158 	xpt_free_path(path);
1159 }
1160 
1161 static void
1162 mprsas_complete_all_commands(struct mpr_softc *sc)
1163 {
1164 	struct mpr_command *cm;
1165 	int i;
1166 	int completed;
1167 
1168 	MPR_FUNCTRACE(sc);
1169 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1170 
1171 	/* complete all commands with a NULL reply */
1172 	for (i = 1; i < sc->num_reqs; i++) {
1173 		cm = &sc->commands[i];
1174 		cm->cm_reply = NULL;
1175 		completed = 0;
1176 
1177 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1178 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1179 
1180 		if (cm->cm_complete != NULL) {
1181 			mprsas_log_command(cm, MPR_RECOVERY,
1182 			    "completing cm %p state %x ccb %p for diag reset\n",
1183 			    cm, cm->cm_state, cm->cm_ccb);
1184 			cm->cm_complete(sc, cm);
1185 			completed = 1;
1186 		}
1187 
1188 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1189 			mprsas_log_command(cm, MPR_RECOVERY,
1190 			    "waking up cm %p state %x ccb %p for diag reset\n",
1191 			    cm, cm->cm_state, cm->cm_ccb);
1192 			wakeup(cm);
1193 			completed = 1;
1194 		}
1195 
1196 		if (cm->cm_sc->io_cmds_active != 0)
1197 			cm->cm_sc->io_cmds_active--;
1198 
1199 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1200 			/* this should never happen, but if it does, log */
1201 			mprsas_log_command(cm, MPR_RECOVERY,
1202 			    "cm %p state %x flags 0x%x ccb %p during diag "
1203 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1204 			    cm->cm_ccb);
1205 		}
1206 	}
1207 }
1208 
1209 void
1210 mprsas_handle_reinit(struct mpr_softc *sc)
1211 {
1212 	int i;
1213 
1214 	/* Go back into startup mode and freeze the simq, so that CAM
1215 	 * doesn't send any commands until after we've rediscovered all
1216 	 * targets and found the proper device handles for them.
1217 	 *
1218 	 * After the reset, portenable will trigger discovery, and after all
1219 	 * discovery-related activities have finished, the simq will be
1220 	 * released.
1221 	 */
1222 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1223 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1224 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1225 	mprsas_startup_increment(sc->sassc);
1226 
1227 	/* notify CAM of a bus reset */
1228 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1229 	    CAM_LUN_WILDCARD);
1230 
1231 	/* complete and cleanup after all outstanding commands */
1232 	mprsas_complete_all_commands(sc);
1233 
1234 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1235 	    __func__, sc->sassc->startup_refcount);
1236 
1237 	/* zero all the target handles, since they may change after the
1238 	 * reset, and we have to rediscover all the targets and use the new
1239 	 * handles.
1240 	 */
1241 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1242 		if (sc->sassc->targets[i].outstanding != 0)
1243 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1244 			    i, sc->sassc->targets[i].outstanding);
1245 		sc->sassc->targets[i].handle = 0x0;
1246 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1247 		sc->sassc->targets[i].outstanding = 0;
1248 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1249 	}
1250 }
1251 static void
1252 mprsas_tm_timeout(void *data)
1253 {
1254 	struct mpr_command *tm = data;
1255 	struct mpr_softc *sc = tm->cm_sc;
1256 
1257 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1258 
1259 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1260 	    "out\n", tm);
1261 	mpr_reinit(sc);
1262 }
1263 
1264 static void
1265 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1266 {
1267 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1268 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1269 	unsigned int cm_count = 0;
1270 	struct mpr_command *cm;
1271 	struct mprsas_target *targ;
1272 
1273 	callout_stop(&tm->cm_callout);
1274 
1275 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1276 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1277 	targ = tm->cm_targ;
1278 
1279 	/*
1280 	 * Currently there should be no way we can hit this case.  It only
1281 	 * happens when we have a failure to allocate chain frames, and
1282 	 * task management commands don't have S/G lists.
1283 	 */
1284 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1285 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1286 		    "This should not happen!\n", __func__, tm->cm_flags);
1287 		mprsas_free_tm(sc, tm);
1288 		return;
1289 	}
1290 
1291 	if (reply == NULL) {
1292 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1293 		    "%p\n", tm);
1294 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1295 			/* this completion was due to a reset, just cleanup */
1296 			targ->tm = NULL;
1297 			mprsas_free_tm(sc, tm);
1298 		}
1299 		else {
1300 			/* we should have gotten a reply. */
1301 			mpr_reinit(sc);
1302 		}
1303 		return;
1304 	}
1305 
1306 	mprsas_log_command(tm, MPR_RECOVERY,
1307 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1308 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1309 	    le32toh(reply->TerminationCount));
1310 
1311 	/* See if there are any outstanding commands for this LUN.
1312 	 * This could be made more efficient by using a per-LU data
1313 	 * structure of some sort.
1314 	 */
1315 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1316 		if (cm->cm_lun == tm->cm_lun)
1317 			cm_count++;
1318 	}
1319 
1320 	if (cm_count == 0) {
1321 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1322 		    "logical unit %u finished recovery after reset\n",
1323 		    tm->cm_lun, tm);
1324 
1325 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1326 		    tm->cm_lun);
1327 
1328 		/* we've finished recovery for this logical unit.  check and
1329 		 * see if some other logical unit has a timedout command
1330 		 * that needs to be processed.
1331 		 */
1332 		cm = TAILQ_FIRST(&targ->timedout_commands);
1333 		if (cm) {
1334 			mprsas_send_abort(sc, tm, cm);
1335 		}
1336 		else {
1337 			targ->tm = NULL;
1338 			mprsas_free_tm(sc, tm);
1339 		}
1340 	}
1341 	else {
1342 		/* if we still have commands for this LUN, the reset
1343 		 * effectively failed, regardless of the status reported.
1344 		 * Escalate to a target reset.
1345 		 */
1346 		mprsas_log_command(tm, MPR_RECOVERY,
1347 		    "logical unit reset complete for tm %p, but still have %u "
1348 		    "command(s)\n", tm, cm_count);
1349 		mprsas_send_reset(sc, tm,
1350 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1351 	}
1352 }
1353 
1354 static void
1355 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1356 {
1357 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1358 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1359 	struct mprsas_target *targ;
1360 
1361 	callout_stop(&tm->cm_callout);
1362 
1363 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1364 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1365 	targ = tm->cm_targ;
1366 
1367 	/*
1368 	 * Currently there should be no way we can hit this case.  It only
1369 	 * happens when we have a failure to allocate chain frames, and
1370 	 * task management commands don't have S/G lists.
1371 	 */
1372 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1373 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1374 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1375 		mprsas_free_tm(sc, tm);
1376 		return;
1377 	}
1378 
1379 	if (reply == NULL) {
1380 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1381 		    "%p\n", tm);
1382 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1383 			/* this completion was due to a reset, just cleanup */
1384 			targ->tm = NULL;
1385 			mprsas_free_tm(sc, tm);
1386 		}
1387 		else {
1388 			/* we should have gotten a reply. */
1389 			mpr_reinit(sc);
1390 		}
1391 		return;
1392 	}
1393 
1394 	mprsas_log_command(tm, MPR_RECOVERY,
1395 	    "target reset status 0x%x code 0x%x count %u\n",
1396 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1397 	    le32toh(reply->TerminationCount));
1398 
1399 	if (targ->outstanding == 0) {
1400 		/* we've finished recovery for this target and all
1401 		 * of its logical units.
1402 		 */
1403 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1404 		    "recovery finished after target reset\n");
1405 
1406 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1407 		    CAM_LUN_WILDCARD);
1408 
1409 		targ->tm = NULL;
1410 		mprsas_free_tm(sc, tm);
1411 	}
1412 	else {
1413 		/* after a target reset, if this target still has
1414 		 * outstanding commands, the reset effectively failed,
1415 		 * regardless of the status reported.  escalate.
1416 		 */
1417 		mprsas_log_command(tm, MPR_RECOVERY,
1418 		    "target reset complete for tm %p, but still have %u "
1419 		    "command(s)\n", tm, targ->outstanding);
1420 		mpr_reinit(sc);
1421 	}
1422 }
1423 
1424 #define MPR_RESET_TIMEOUT 30
1425 
1426 int
1427 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1428 {
1429 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1430 	struct mprsas_target *target;
1431 	int err;
1432 
1433 	target = tm->cm_targ;
1434 	if (target->handle == 0) {
1435 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1436 		    "%d\n", __func__, target->tid);
1437 		return -1;
1438 	}
1439 
1440 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1441 	req->DevHandle = htole16(target->handle);
1442 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1443 	req->TaskType = type;
1444 
1445 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1446 		/* XXX Need to handle invalid LUNs */
1447 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1448 		tm->cm_targ->logical_unit_resets++;
1449 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1450 		    "sending logical unit reset\n");
1451 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1452 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1453 	}
1454 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1455 		/*
1456 		 * Target reset method =
1457 		 *     SAS Hard Link Reset / SATA Link Reset
1458 		 */
1459 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1460 		tm->cm_targ->target_resets++;
1461 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1462 		    "sending target reset\n");
1463 		tm->cm_complete = mprsas_target_reset_complete;
1464 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1465 	}
1466 	else {
1467 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1468 		return -1;
1469 	}
1470 
1471 	mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1472 	    target->handle);
1473 	if (target->encl_level_valid) {
1474 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1475 		    "connector name (%4s)\n", target->encl_level,
1476 		    target->encl_slot, target->connector_name);
1477 	}
1478 
1479 	tm->cm_data = NULL;
1480 	tm->cm_desc.HighPriority.RequestFlags =
1481 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1482 	tm->cm_complete_data = (void *)tm;
1483 
1484 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1485 	    mprsas_tm_timeout, tm);
1486 
1487 	err = mpr_map_command(sc, tm);
1488 	if (err)
1489 		mprsas_log_command(tm, MPR_RECOVERY,
1490 		    "error %d sending reset type %u\n", err, type);
1491 
1492 	return err;
1493 }
1494 
1495 
1496 static void
1497 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1498 {
1499 	struct mpr_command *cm;
1500 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1501 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1502 	struct mprsas_target *targ;
1503 
1504 	callout_stop(&tm->cm_callout);
1505 
1506 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1507 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1508 	targ = tm->cm_targ;
1509 
1510 	/*
1511 	 * Currently there should be no way we can hit this case.  It only
1512 	 * happens when we have a failure to allocate chain frames, and
1513 	 * task management commands don't have S/G lists.
1514 	 */
1515 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1516 		mprsas_log_command(tm, MPR_RECOVERY,
1517 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1518 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1519 		mprsas_free_tm(sc, tm);
1520 		return;
1521 	}
1522 
1523 	if (reply == NULL) {
1524 		mprsas_log_command(tm, MPR_RECOVERY,
1525 		    "NULL abort reply for tm %p TaskMID %u\n",
1526 		    tm, le16toh(req->TaskMID));
1527 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1528 			/* this completion was due to a reset, just cleanup */
1529 			targ->tm = NULL;
1530 			mprsas_free_tm(sc, tm);
1531 		}
1532 		else {
1533 			/* we should have gotten a reply. */
1534 			mpr_reinit(sc);
1535 		}
1536 		return;
1537 	}
1538 
1539 	mprsas_log_command(tm, MPR_RECOVERY,
1540 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1541 	    le16toh(req->TaskMID),
1542 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1543 	    le32toh(reply->TerminationCount));
1544 
1545 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1546 	if (cm == NULL) {
1547 		/* if there are no more timedout commands, we're done with
1548 		 * error recovery for this target.
1549 		 */
1550 		mprsas_log_command(tm, MPR_RECOVERY,
1551 		    "finished recovery after aborting TaskMID %u\n",
1552 		    le16toh(req->TaskMID));
1553 
1554 		targ->tm = NULL;
1555 		mprsas_free_tm(sc, tm);
1556 	}
1557 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1558 		/* abort success, but we have more timedout commands to abort */
1559 		mprsas_log_command(tm, MPR_RECOVERY,
1560 		    "continuing recovery after aborting TaskMID %u\n",
1561 		    le16toh(req->TaskMID));
1562 
1563 		mprsas_send_abort(sc, tm, cm);
1564 	}
1565 	else {
1566 		/* we didn't get a command completion, so the abort
1567 		 * failed as far as we're concerned.  escalate.
1568 		 */
1569 		mprsas_log_command(tm, MPR_RECOVERY,
1570 		    "abort failed for TaskMID %u tm %p\n",
1571 		    le16toh(req->TaskMID), tm);
1572 
1573 		mprsas_send_reset(sc, tm,
1574 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1575 	}
1576 }
1577 
1578 #define MPR_ABORT_TIMEOUT 5
1579 
1580 static int
1581 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1582     struct mpr_command *cm)
1583 {
1584 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1585 	struct mprsas_target *targ;
1586 	int err;
1587 
1588 	targ = cm->cm_targ;
1589 	if (targ->handle == 0) {
1590 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1591 		    __func__, cm->cm_ccb->ccb_h.target_id);
1592 		return -1;
1593 	}
1594 
1595 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1596 	    "Aborting command %p\n", cm);
1597 
1598 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1599 	req->DevHandle = htole16(targ->handle);
1600 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1601 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1602 
1603 	/* XXX Need to handle invalid LUNs */
1604 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1605 
1606 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1607 
1608 	tm->cm_data = NULL;
1609 	tm->cm_desc.HighPriority.RequestFlags =
1610 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1611 	tm->cm_complete = mprsas_abort_complete;
1612 	tm->cm_complete_data = (void *)tm;
1613 	tm->cm_targ = cm->cm_targ;
1614 	tm->cm_lun = cm->cm_lun;
1615 
1616 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1617 	    mprsas_tm_timeout, tm);
1618 
1619 	targ->aborts++;
1620 
1621 	mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1622 	    __func__, targ->tid);
1623 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1624 
1625 	err = mpr_map_command(sc, tm);
1626 	if (err)
1627 		mpr_dprint(sc, MPR_RECOVERY,
1628 		    "error %d sending abort for cm %p SMID %u\n",
1629 		    err, cm, req->TaskMID);
1630 	return err;
1631 }
1632 
1633 static void
1634 mprsas_scsiio_timeout(void *data)
1635 {
1636 	struct mpr_softc *sc;
1637 	struct mpr_command *cm;
1638 	struct mprsas_target *targ;
1639 
1640 	cm = (struct mpr_command *)data;
1641 	sc = cm->cm_sc;
1642 
1643 	MPR_FUNCTRACE(sc);
1644 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1645 
1646 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1647 
1648 	/*
1649 	 * Run the interrupt handler to make sure it's not pending.  This
1650 	 * isn't perfect because the command could have already completed
1651 	 * and been re-used, though this is unlikely.
1652 	 */
1653 	mpr_intr_locked(sc);
1654 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1655 		mprsas_log_command(cm, MPR_XINFO,
1656 		    "SCSI command %p almost timed out\n", cm);
1657 		return;
1658 	}
1659 
1660 	if (cm->cm_ccb == NULL) {
1661 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1662 		return;
1663 	}
1664 
1665 	targ = cm->cm_targ;
1666 	targ->timeouts++;
1667 
1668 	mprsas_log_command(cm, MPR_ERROR, "command timeout %d cm %p target "
1669 	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
1670 	    targ->handle);
1671 	if (targ->encl_level_valid) {
1672 		mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1673 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1674 		    targ->connector_name);
1675 	}
1676 
1677 	/* XXX first, check the firmware state, to see if it's still
1678 	 * operational.  if not, do a diag reset.
1679 	 */
1680 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1681 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1682 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1683 
1684 	if (targ->tm != NULL) {
1685 		/* target already in recovery, just queue up another
1686 		 * timedout command to be processed later.
1687 		 */
1688 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1689 		    "processing by tm %p\n", cm, targ->tm);
1690 	}
1691 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1692 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1693 		    cm, targ->tm);
1694 
1695 		/* start recovery by aborting the first timedout command */
1696 		mprsas_send_abort(sc, targ->tm, cm);
1697 	}
1698 	else {
1699 		/* XXX queue this target up for recovery once a TM becomes
1700 		 * available.  The firmware only has a limited number of
1701 		 * HighPriority credits for the high priority requests used
1702 		 * for task management, and we ran out.
1703 		 *
1704 		 * Isilon: don't worry about this for now, since we have
1705 		 * more credits than disks in an enclosure, and limit
1706 		 * ourselves to one TM per target for recovery.
1707 		 */
1708 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to "
1709 		    "allocate a tm\n", cm);
1710 	}
1711 }
1712 
1713 /**
1714  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1715  *			     to SCSI Unmap.
1716  * Return 0 - for success,
1717  *	  1 - to immediately return back the command with success status to CAM
1718  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1719  *			   to FW without any translation.
1720  */
1721 static int
1722 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1723     union ccb *ccb, struct mprsas_target *targ)
1724 {
1725 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1726 	struct ccb_scsiio *csio;
1727 	struct unmap_parm_list *plist;
1728 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1729 	struct nvme_command *c;
1730 	int i, res;
1731 	uint16_t ndesc, list_len, data_length;
1732 	struct mpr_prp_page *prp_page_info;
1733 	uint64_t nvme_dsm_ranges_dma_handle;
1734 
1735 	csio = &ccb->csio;
1736 #if __FreeBSD_version >= 1100103
1737 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1738 #else
1739 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1740 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1741 		    ccb->csio.cdb_io.cdb_ptr[8]);
1742 	} else {
1743 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1744 		    ccb->csio.cdb_io.cdb_bytes[8]);
1745 	}
1746 #endif
1747 	if (!list_len) {
1748 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1749 		return -EINVAL;
1750 	}
1751 
1752 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1753 	if (!plist) {
1754 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1755 		    "save UNMAP data\n");
1756 		return -ENOMEM;
1757 	}
1758 
1759 	/* Copy SCSI unmap data to a local buffer */
1760 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1761 
1762 	/* return back the unmap command to CAM with success status,
1763 	 * if number of descripts is zero.
1764 	 */
1765 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1766 	if (!ndesc) {
1767 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1768 		    "UNMAP cmd is Zero\n");
1769 		res = 1;
1770 		goto out;
1771 	}
1772 
1773 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1774 	if (data_length > targ->MDTS) {
1775 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1776 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1777 		res = -EINVAL;
1778 		goto out;
1779 	}
1780 
1781 	prp_page_info = mpr_alloc_prp_page(sc);
1782 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1783 	    "UNMAP command.\n", __func__));
1784 
1785 	/*
1786 	 * Insert the allocated PRP page into the command's PRP page list. This
1787 	 * will be freed when the command is freed.
1788 	 */
1789 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1790 
1791 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1792 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1793 
1794 	bzero(nvme_dsm_ranges, data_length);
1795 
1796 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1797 	 * for each descriptors contained in SCSI UNMAP data.
1798 	 */
1799 	for (i = 0; i < ndesc; i++) {
1800 		nvme_dsm_ranges[i].length =
1801 		    htole32(be32toh(plist->desc[i].nlb));
1802 		nvme_dsm_ranges[i].starting_lba =
1803 		    htole64(be64toh(plist->desc[i].slba));
1804 		nvme_dsm_ranges[i].attributes = 0;
1805 	}
1806 
1807 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1808 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1809 	bzero(req, sizeof(*req));
1810 	req->DevHandle = htole16(targ->handle);
1811 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1812 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1813 	req->ErrorResponseBaseAddress.High =
1814 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1815 	req->ErrorResponseBaseAddress.Low =
1816 	    htole32(cm->cm_sense_busaddr);
1817 	req->ErrorResponseAllocationLength =
1818 	    htole16(sizeof(struct nvme_completion));
1819 	req->EncapsulatedCommandLength =
1820 	    htole16(sizeof(struct nvme_command));
1821 	req->DataLength = htole32(data_length);
1822 
1823 	/* Build NVMe DSM command */
1824 	c = (struct nvme_command *) req->NVMe_Command;
1825 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1826 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1827 	c->cdw10 = htole32(ndesc - 1);
1828 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1829 
1830 	cm->cm_length = data_length;
1831 	cm->cm_data = NULL;
1832 
1833 	cm->cm_complete = mprsas_scsiio_complete;
1834 	cm->cm_complete_data = ccb;
1835 	cm->cm_targ = targ;
1836 	cm->cm_lun = csio->ccb_h.target_lun;
1837 	cm->cm_ccb = ccb;
1838 
1839 	cm->cm_desc.Default.RequestFlags =
1840 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1841 
1842 #if __FreeBSD_version >= 1000029
1843 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1844 	    mprsas_scsiio_timeout, cm, 0);
1845 #else //__FreeBSD_version < 1000029
1846 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1847 	    mprsas_scsiio_timeout, cm);
1848 #endif //__FreeBSD_version >= 1000029
1849 
1850 	targ->issued++;
1851 	targ->outstanding++;
1852 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1853 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1854 
1855 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1856 	    __func__, cm, ccb, targ->outstanding);
1857 
1858 	mpr_build_nvme_prp(sc, cm, req,
1859 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1860 	mpr_map_command(sc, cm);
1861 
1862 out:
1863 	free(plist, M_MPR);
1864 	return 0;
1865 }
1866 
1867 static void
1868 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1869 {
1870 	MPI2_SCSI_IO_REQUEST *req;
1871 	struct ccb_scsiio *csio;
1872 	struct mpr_softc *sc;
1873 	struct mprsas_target *targ;
1874 	struct mprsas_lun *lun;
1875 	struct mpr_command *cm;
1876 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1877 	uint16_t eedp_flags;
1878 	uint32_t mpi_control;
1879 	int rc;
1880 
1881 	sc = sassc->sc;
1882 	MPR_FUNCTRACE(sc);
1883 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1884 
1885 	csio = &ccb->csio;
1886 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1887 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1888 	     csio->ccb_h.target_id));
1889 	targ = &sassc->targets[csio->ccb_h.target_id];
1890 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1891 	if (targ->handle == 0x0) {
1892 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1893 		    __func__, csio->ccb_h.target_id);
1894 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1895 		xpt_done(ccb);
1896 		return;
1897 	}
1898 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1899 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1900 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1901 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1902 		xpt_done(ccb);
1903 		return;
1904 	}
1905 	/*
1906 	 * Sometimes, it is possible to get a command that is not "In
1907 	 * Progress" and was actually aborted by the upper layer.  Check for
1908 	 * this here and complete the command without error.
1909 	 */
1910 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1911 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1912 		    "target %u\n", __func__, csio->ccb_h.target_id);
1913 		xpt_done(ccb);
1914 		return;
1915 	}
1916 	/*
1917 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1918 	 * that the volume has timed out.  We want volumes to be enumerated
1919 	 * until they are deleted/removed, not just failed.
1920 	 */
1921 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1922 		if (targ->devinfo == 0)
1923 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1924 		else
1925 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1926 		xpt_done(ccb);
1927 		return;
1928 	}
1929 
1930 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1931 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1932 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1933 		xpt_done(ccb);
1934 		return;
1935 	}
1936 
1937 	/*
1938 	 * If target has a reset in progress, freeze the devq and return.  The
1939 	 * devq will be released when the TM reset is finished.
1940 	 */
1941 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1942 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1943 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1944 		    __func__, targ->tid);
1945 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1946 		xpt_done(ccb);
1947 		return;
1948 	}
1949 
1950 	cm = mpr_alloc_command(sc);
1951 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1952 		if (cm != NULL) {
1953 			mpr_free_command(sc, cm);
1954 		}
1955 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1956 			xpt_freeze_simq(sassc->sim, 1);
1957 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1958 		}
1959 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1960 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1961 		xpt_done(ccb);
1962 		return;
1963 	}
1964 
1965 	/* For NVME device's issue UNMAP command directly to NVME drives by
1966 	 * constructing equivalent native NVMe DataSetManagement command.
1967 	 */
1968 #if __FreeBSD_version >= 1100103
1969 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1970 #else
1971 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1972 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
1973 	else
1974 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
1975 #endif
1976 	if (scsi_opcode == UNMAP &&
1977 	    targ->is_nvme &&
1978 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1979 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1980 		if (rc == 1) { /* return command to CAM with success status */
1981 			mpr_free_command(sc, cm);
1982 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1983 			xpt_done(ccb);
1984 			return;
1985 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1986 			return;
1987 	}
1988 
1989 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1990 	bzero(req, sizeof(*req));
1991 	req->DevHandle = htole16(targ->handle);
1992 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1993 	req->MsgFlags = 0;
1994 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1995 	req->SenseBufferLength = MPR_SENSE_LEN;
1996 	req->SGLFlags = 0;
1997 	req->ChainOffset = 0;
1998 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1999 	req->SGLOffset1= 0;
2000 	req->SGLOffset2= 0;
2001 	req->SGLOffset3= 0;
2002 	req->SkipCount = 0;
2003 	req->DataLength = htole32(csio->dxfer_len);
2004 	req->BidirectionalDataLength = 0;
2005 	req->IoFlags = htole16(csio->cdb_len);
2006 	req->EEDPFlags = 0;
2007 
2008 	/* Note: BiDirectional transfers are not supported */
2009 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2010 	case CAM_DIR_IN:
2011 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2012 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2013 		break;
2014 	case CAM_DIR_OUT:
2015 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2016 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2017 		break;
2018 	case CAM_DIR_NONE:
2019 	default:
2020 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2021 		break;
2022 	}
2023 
2024 	if (csio->cdb_len == 32)
2025 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2026 	/*
2027 	 * It looks like the hardware doesn't require an explicit tag
2028 	 * number for each transaction.  SAM Task Management not supported
2029 	 * at the moment.
2030 	 */
2031 	switch (csio->tag_action) {
2032 	case MSG_HEAD_OF_Q_TAG:
2033 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2034 		break;
2035 	case MSG_ORDERED_Q_TAG:
2036 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2037 		break;
2038 	case MSG_ACA_TASK:
2039 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2040 		break;
2041 	case CAM_TAG_ACTION_NONE:
2042 	case MSG_SIMPLE_Q_TAG:
2043 	default:
2044 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2045 		break;
2046 	}
2047 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2048 	req->Control = htole32(mpi_control);
2049 
2050 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2051 		mpr_free_command(sc, cm);
2052 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2053 		xpt_done(ccb);
2054 		return;
2055 	}
2056 
2057 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2058 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2059 	else {
2060 		KASSERT(csio->cdb_len <= IOCDBLEN,
2061 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2062 		    "is not set", csio->cdb_len));
2063 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2064 	}
2065 	req->IoFlags = htole16(csio->cdb_len);
2066 
2067 	/*
2068 	 * Check if EEDP is supported and enabled.  If it is then check if the
2069 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2070 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2071 	 * for EEDP transfer.
2072 	 */
2073 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2074 	if (sc->eedp_enabled && eedp_flags) {
2075 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2076 			if (lun->lun_id == csio->ccb_h.target_lun) {
2077 				break;
2078 			}
2079 		}
2080 
2081 		if ((lun != NULL) && (lun->eedp_formatted)) {
2082 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2083 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2084 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2085 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2086 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2087 				eedp_flags |=
2088 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2089 			}
2090 			req->EEDPFlags = htole16(eedp_flags);
2091 
2092 			/*
2093 			 * If CDB less than 32, fill in Primary Ref Tag with
2094 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2095 			 * already there.  Also, set protection bit.  FreeBSD
2096 			 * currently does not support CDBs bigger than 16, but
2097 			 * the code doesn't hurt, and will be here for the
2098 			 * future.
2099 			 */
2100 			if (csio->cdb_len != 32) {
2101 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2102 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2103 				    PrimaryReferenceTag;
2104 				for (i = 0; i < 4; i++) {
2105 					*ref_tag_addr =
2106 					    req->CDB.CDB32[lba_byte + i];
2107 					ref_tag_addr++;
2108 				}
2109 				req->CDB.EEDP32.PrimaryReferenceTag =
2110 				    htole32(req->
2111 				    CDB.EEDP32.PrimaryReferenceTag);
2112 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2113 				    0xFFFF;
2114 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2115 				    0x20;
2116 			} else {
2117 				eedp_flags |=
2118 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2119 				req->EEDPFlags = htole16(eedp_flags);
2120 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2121 				    0x1F) | 0x20;
2122 			}
2123 		}
2124 	}
2125 
2126 	cm->cm_length = csio->dxfer_len;
2127 	if (cm->cm_length != 0) {
2128 		cm->cm_data = ccb;
2129 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2130 	} else {
2131 		cm->cm_data = NULL;
2132 	}
2133 	cm->cm_sge = &req->SGL;
2134 	cm->cm_sglsize = (32 - 24) * 4;
2135 	cm->cm_complete = mprsas_scsiio_complete;
2136 	cm->cm_complete_data = ccb;
2137 	cm->cm_targ = targ;
2138 	cm->cm_lun = csio->ccb_h.target_lun;
2139 	cm->cm_ccb = ccb;
2140 	/*
2141 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2142 	 * and set descriptor type.
2143 	 */
2144 	if (targ->scsi_req_desc_type ==
2145 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2146 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2147 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2148 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2149 		if (!sc->atomic_desc_capable) {
2150 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2151 			    htole16(targ->handle);
2152 		}
2153 	} else {
2154 		cm->cm_desc.SCSIIO.RequestFlags =
2155 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2156 		if (!sc->atomic_desc_capable)
2157 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2158 	}
2159 
2160 #if __FreeBSD_version >= 1000029
2161 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2162 	    mprsas_scsiio_timeout, cm, 0);
2163 #else //__FreeBSD_version < 1000029
2164 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2165 	    mprsas_scsiio_timeout, cm);
2166 #endif //__FreeBSD_version >= 1000029
2167 
2168 	targ->issued++;
2169 	targ->outstanding++;
2170 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2171 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2172 
2173 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2174 	    __func__, cm, ccb, targ->outstanding);
2175 
2176 	mpr_map_command(sc, cm);
2177 	return;
2178 }
2179 
2180 static void
2181 mpr_response_code(struct mpr_softc *sc, u8 response_code)
2182 {
2183         char *desc;
2184 
2185         switch (response_code) {
2186         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2187                 desc = "task management request completed";
2188                 break;
2189         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2190                 desc = "invalid frame";
2191                 break;
2192         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2193                 desc = "task management request not supported";
2194                 break;
2195         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2196                 desc = "task management request failed";
2197                 break;
2198         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2199                 desc = "task management request succeeded";
2200                 break;
2201         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2202                 desc = "invalid lun";
2203                 break;
2204         case 0xA:
2205                 desc = "overlapped tag attempted";
2206                 break;
2207         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2208                 desc = "task queued, however not sent to target";
2209                 break;
2210         default:
2211                 desc = "unknown";
2212                 break;
2213         }
2214 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
2215 	    desc);
2216 }
2217 
2218 /**
2219  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2220  */
2221 static void
2222 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2223     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2224 {
2225 	u32 response_info;
2226 	u8 *response_bytes;
2227 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2228 	    MPI2_IOCSTATUS_MASK;
2229 	u8 scsi_state = mpi_reply->SCSIState;
2230 	u8 scsi_status = mpi_reply->SCSIStatus;
2231 	char *desc_ioc_state = NULL;
2232 	char *desc_scsi_status = NULL;
2233 	char *desc_scsi_state = sc->tmp_string;
2234 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2235 
2236 	if (log_info == 0x31170000)
2237 		return;
2238 
2239 	switch (ioc_status) {
2240 	case MPI2_IOCSTATUS_SUCCESS:
2241 		desc_ioc_state = "success";
2242 		break;
2243 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2244 		desc_ioc_state = "invalid function";
2245 		break;
2246 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2247 		desc_ioc_state = "scsi recovered error";
2248 		break;
2249 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2250 		desc_ioc_state = "scsi invalid dev handle";
2251 		break;
2252 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2253 		desc_ioc_state = "scsi device not there";
2254 		break;
2255 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2256 		desc_ioc_state = "scsi data overrun";
2257 		break;
2258 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2259 		desc_ioc_state = "scsi data underrun";
2260 		break;
2261 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2262 		desc_ioc_state = "scsi io data error";
2263 		break;
2264 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2265 		desc_ioc_state = "scsi protocol error";
2266 		break;
2267 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2268 		desc_ioc_state = "scsi task terminated";
2269 		break;
2270 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2271 		desc_ioc_state = "scsi residual mismatch";
2272 		break;
2273 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2274 		desc_ioc_state = "scsi task mgmt failed";
2275 		break;
2276 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2277 		desc_ioc_state = "scsi ioc terminated";
2278 		break;
2279 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2280 		desc_ioc_state = "scsi ext terminated";
2281 		break;
2282 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2283 		desc_ioc_state = "eedp guard error";
2284 		break;
2285 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2286 		desc_ioc_state = "eedp ref tag error";
2287 		break;
2288 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2289 		desc_ioc_state = "eedp app tag error";
2290 		break;
2291 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
2292 		desc_ioc_state = "insufficient power";
2293 		break;
2294 	default:
2295 		desc_ioc_state = "unknown";
2296 		break;
2297 	}
2298 
2299 	switch (scsi_status) {
2300 	case MPI2_SCSI_STATUS_GOOD:
2301 		desc_scsi_status = "good";
2302 		break;
2303 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2304 		desc_scsi_status = "check condition";
2305 		break;
2306 	case MPI2_SCSI_STATUS_CONDITION_MET:
2307 		desc_scsi_status = "condition met";
2308 		break;
2309 	case MPI2_SCSI_STATUS_BUSY:
2310 		desc_scsi_status = "busy";
2311 		break;
2312 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2313 		desc_scsi_status = "intermediate";
2314 		break;
2315 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2316 		desc_scsi_status = "intermediate condmet";
2317 		break;
2318 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2319 		desc_scsi_status = "reservation conflict";
2320 		break;
2321 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2322 		desc_scsi_status = "command terminated";
2323 		break;
2324 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2325 		desc_scsi_status = "task set full";
2326 		break;
2327 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2328 		desc_scsi_status = "aca active";
2329 		break;
2330 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2331 		desc_scsi_status = "task aborted";
2332 		break;
2333 	default:
2334 		desc_scsi_status = "unknown";
2335 		break;
2336 	}
2337 
2338 	desc_scsi_state[0] = '\0';
2339 	if (!scsi_state)
2340 		desc_scsi_state = " ";
2341 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2342 		strcat(desc_scsi_state, "response info ");
2343 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2344 		strcat(desc_scsi_state, "state terminated ");
2345 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2346 		strcat(desc_scsi_state, "no status ");
2347 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2348 		strcat(desc_scsi_state, "autosense failed ");
2349 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2350 		strcat(desc_scsi_state, "autosense valid ");
2351 
2352 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2353 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2354 	if (targ->encl_level_valid) {
2355 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2356 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2357 		    targ->connector_name);
2358 	}
2359 	/* We can add more detail about underflow data here
2360 	 * TO-DO
2361 	 * */
2362 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2363 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2364 	    desc_scsi_state, scsi_state);
2365 
2366 	if (sc->mpr_debug & MPR_XINFO &&
2367 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2368 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2369 		scsi_sense_print(csio);
2370 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2371 	}
2372 
2373 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2374 		response_info = le32toh(mpi_reply->ResponseInfo);
2375 		response_bytes = (u8 *)&response_info;
2376 		mpr_response_code(sc,response_bytes[0]);
2377 	}
2378 }
2379 
2380 /** mprsas_nvme_trans_status_code
2381  *
2382  * Convert Native NVMe command error status to
2383  * equivalent SCSI error status.
2384  *
2385  * Returns appropriate scsi_status
2386  */
2387 static u8
2388 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2389     struct mpr_command *cm)
2390 {
2391 	u8 status = MPI2_SCSI_STATUS_GOOD;
2392 	int skey, asc, ascq;
2393 	union ccb *ccb = cm->cm_complete_data;
2394 	int returned_sense_len;
2395 
2396 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2397 	skey = SSD_KEY_ILLEGAL_REQUEST;
2398 	asc = SCSI_ASC_NO_SENSE;
2399 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2400 
2401 	switch (nvme_status.sct) {
2402 	case NVME_SCT_GENERIC:
2403 		switch (nvme_status.sc) {
2404 		case NVME_SC_SUCCESS:
2405 			status = MPI2_SCSI_STATUS_GOOD;
2406 			skey = SSD_KEY_NO_SENSE;
2407 			asc = SCSI_ASC_NO_SENSE;
2408 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2409 			break;
2410 		case NVME_SC_INVALID_OPCODE:
2411 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2412 			skey = SSD_KEY_ILLEGAL_REQUEST;
2413 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2414 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2415 			break;
2416 		case NVME_SC_INVALID_FIELD:
2417 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2418 			skey = SSD_KEY_ILLEGAL_REQUEST;
2419 			asc = SCSI_ASC_INVALID_CDB;
2420 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2421 			break;
2422 		case NVME_SC_DATA_TRANSFER_ERROR:
2423 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2424 			skey = SSD_KEY_MEDIUM_ERROR;
2425 			asc = SCSI_ASC_NO_SENSE;
2426 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2427 			break;
2428 		case NVME_SC_ABORTED_POWER_LOSS:
2429 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2430 			skey = SSD_KEY_ABORTED_COMMAND;
2431 			asc = SCSI_ASC_WARNING;
2432 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2433 			break;
2434 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2435 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2436 			skey = SSD_KEY_HARDWARE_ERROR;
2437 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2438 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2439 			break;
2440 		case NVME_SC_ABORTED_BY_REQUEST:
2441 		case NVME_SC_ABORTED_SQ_DELETION:
2442 		case NVME_SC_ABORTED_FAILED_FUSED:
2443 		case NVME_SC_ABORTED_MISSING_FUSED:
2444 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2445 			skey = SSD_KEY_ABORTED_COMMAND;
2446 			asc = SCSI_ASC_NO_SENSE;
2447 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2448 			break;
2449 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2450 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2451 			skey = SSD_KEY_ILLEGAL_REQUEST;
2452 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2453 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2454 			break;
2455 		case NVME_SC_LBA_OUT_OF_RANGE:
2456 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2457 			skey = SSD_KEY_ILLEGAL_REQUEST;
2458 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2459 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2460 			break;
2461 		case NVME_SC_CAPACITY_EXCEEDED:
2462 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2463 			skey = SSD_KEY_MEDIUM_ERROR;
2464 			asc = SCSI_ASC_NO_SENSE;
2465 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2466 			break;
2467 		case NVME_SC_NAMESPACE_NOT_READY:
2468 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2469 			skey = SSD_KEY_NOT_READY;
2470 			asc = SCSI_ASC_LUN_NOT_READY;
2471 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2472 			break;
2473 		}
2474 		break;
2475 	case NVME_SCT_COMMAND_SPECIFIC:
2476 		switch (nvme_status.sc) {
2477 		case NVME_SC_INVALID_FORMAT:
2478 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2479 			skey = SSD_KEY_ILLEGAL_REQUEST;
2480 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2481 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2482 			break;
2483 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2484 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2485 			skey = SSD_KEY_ILLEGAL_REQUEST;
2486 			asc = SCSI_ASC_INVALID_CDB;
2487 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2488 			break;
2489 		}
2490 		break;
2491 	case NVME_SCT_MEDIA_ERROR:
2492 		switch (nvme_status.sc) {
2493 		case NVME_SC_WRITE_FAULTS:
2494 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2495 			skey = SSD_KEY_MEDIUM_ERROR;
2496 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2497 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2498 			break;
2499 		case NVME_SC_UNRECOVERED_READ_ERROR:
2500 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2501 			skey = SSD_KEY_MEDIUM_ERROR;
2502 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2503 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2504 			break;
2505 		case NVME_SC_GUARD_CHECK_ERROR:
2506 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2507 			skey = SSD_KEY_MEDIUM_ERROR;
2508 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2509 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2510 			break;
2511 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2512 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2513 			skey = SSD_KEY_MEDIUM_ERROR;
2514 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2515 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2516 			break;
2517 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2518 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2519 			skey = SSD_KEY_MEDIUM_ERROR;
2520 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2521 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2522 			break;
2523 		case NVME_SC_COMPARE_FAILURE:
2524 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2525 			skey = SSD_KEY_MISCOMPARE;
2526 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2527 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2528 			break;
2529 		case NVME_SC_ACCESS_DENIED:
2530 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2531 			skey = SSD_KEY_ILLEGAL_REQUEST;
2532 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2533 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2534 			break;
2535 		}
2536 		break;
2537 	}
2538 
2539 	returned_sense_len = sizeof(struct scsi_sense_data);
2540 	if (returned_sense_len < ccb->csio.sense_len)
2541 		ccb->csio.sense_resid = ccb->csio.sense_len -
2542 		    returned_sense_len;
2543 	else
2544 		ccb->csio.sense_resid = 0;
2545 
2546 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2547 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2548 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2549 
2550 	return status;
2551 }
2552 
2553 /** mprsas_complete_nvme_unmap
2554  *
2555  * Complete native NVMe command issued using NVMe Encapsulated
2556  * Request Message.
2557  */
2558 static u8
2559 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2560 {
2561 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2562 	struct nvme_completion *nvme_completion = NULL;
2563 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2564 
2565 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2566 	if (le16toh(mpi_reply->ErrorResponseCount)){
2567 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2568 		scsi_status = mprsas_nvme_trans_status_code(
2569 		    nvme_completion->status, cm);
2570 	}
2571 	return scsi_status;
2572 }
2573 
2574 static void
2575 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2576 {
2577 	MPI2_SCSI_IO_REPLY *rep;
2578 	union ccb *ccb;
2579 	struct ccb_scsiio *csio;
2580 	struct mprsas_softc *sassc;
2581 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2582 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2583 	int dir = 0, i;
2584 	u16 alloc_len;
2585 	struct mprsas_target *target;
2586 	target_id_t target_id;
2587 
2588 	MPR_FUNCTRACE(sc);
2589 	mpr_dprint(sc, MPR_TRACE,
2590 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2591 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2592 	    cm->cm_targ->outstanding);
2593 
2594 	callout_stop(&cm->cm_callout);
2595 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2596 
2597 	sassc = sc->sassc;
2598 	ccb = cm->cm_complete_data;
2599 	csio = &ccb->csio;
2600 	target_id = csio->ccb_h.target_id;
2601 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2602 	/*
2603 	 * XXX KDM if the chain allocation fails, does it matter if we do
2604 	 * the sync and unload here?  It is simpler to do it in every case,
2605 	 * assuming it doesn't cause problems.
2606 	 */
2607 	if (cm->cm_data != NULL) {
2608 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2609 			dir = BUS_DMASYNC_POSTREAD;
2610 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2611 			dir = BUS_DMASYNC_POSTWRITE;
2612 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2613 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2614 	}
2615 
2616 	cm->cm_targ->completed++;
2617 	cm->cm_targ->outstanding--;
2618 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2619 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2620 
2621 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2622 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2623 		if (cm->cm_reply != NULL)
2624 			mprsas_log_command(cm, MPR_RECOVERY,
2625 			    "completed timedout cm %p ccb %p during recovery "
2626 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2627 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2628 			    rep->SCSIState, le32toh(rep->TransferCount));
2629 		else
2630 			mprsas_log_command(cm, MPR_RECOVERY,
2631 			    "completed timedout cm %p ccb %p during recovery\n",
2632 			    cm, cm->cm_ccb);
2633 	} else if (cm->cm_targ->tm != NULL) {
2634 		if (cm->cm_reply != NULL)
2635 			mprsas_log_command(cm, MPR_RECOVERY,
2636 			    "completed cm %p ccb %p during recovery "
2637 			    "ioc %x scsi %x state %x xfer %u\n",
2638 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2639 			    rep->SCSIStatus, rep->SCSIState,
2640 			    le32toh(rep->TransferCount));
2641 		else
2642 			mprsas_log_command(cm, MPR_RECOVERY,
2643 			    "completed cm %p ccb %p during recovery\n",
2644 			    cm, cm->cm_ccb);
2645 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2646 		mprsas_log_command(cm, MPR_RECOVERY,
2647 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2648 	}
2649 
2650 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2651 		/*
2652 		 * We ran into an error after we tried to map the command,
2653 		 * so we're getting a callback without queueing the command
2654 		 * to the hardware.  So we set the status here, and it will
2655 		 * be retained below.  We'll go through the "fast path",
2656 		 * because there can be no reply when we haven't actually
2657 		 * gone out to the hardware.
2658 		 */
2659 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2660 
2661 		/*
2662 		 * Currently the only error included in the mask is
2663 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2664 		 * chain frames.  We need to freeze the queue until we get
2665 		 * a command that completed without this error, which will
2666 		 * hopefully have some chain frames attached that we can
2667 		 * use.  If we wanted to get smarter about it, we would
2668 		 * only unfreeze the queue in this condition when we're
2669 		 * sure that we're getting some chain frames back.  That's
2670 		 * probably unnecessary.
2671 		 */
2672 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2673 			xpt_freeze_simq(sassc->sim, 1);
2674 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2675 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2676 			    "freezing SIM queue\n");
2677 		}
2678 	}
2679 
2680 	/*
2681 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2682 	 * flag, and use it in a few places in the rest of this function for
2683 	 * convenience. Use the macro if available.
2684 	 */
2685 #if __FreeBSD_version >= 1100103
2686 	scsi_cdb = scsiio_cdb_ptr(csio);
2687 #else
2688 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2689 		scsi_cdb = csio->cdb_io.cdb_ptr;
2690 	else
2691 		scsi_cdb = csio->cdb_io.cdb_bytes;
2692 #endif
2693 
2694 	/*
2695 	 * If this is a Start Stop Unit command and it was issued by the driver
2696 	 * during shutdown, decrement the refcount to account for all of the
2697 	 * commands that were sent.  All SSU commands should be completed before
2698 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2699 	 * is TRUE.
2700 	 */
2701 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2702 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2703 		sc->SSU_refcount--;
2704 	}
2705 
2706 	/* Take the fast path to completion */
2707 	if (cm->cm_reply == NULL) {
2708 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2709 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2710 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2711 			else {
2712 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2713 				csio->scsi_status = SCSI_STATUS_OK;
2714 			}
2715 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2716 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2717 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2718 				mpr_dprint(sc, MPR_XINFO,
2719 				    "Unfreezing SIM queue\n");
2720 			}
2721 		}
2722 
2723 		/*
2724 		 * There are two scenarios where the status won't be
2725 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2726 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2727 		 */
2728 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2729 			/*
2730 			 * Freeze the dev queue so that commands are
2731 			 * executed in the correct order after error
2732 			 * recovery.
2733 			 */
2734 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2735 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2736 		}
2737 		mpr_free_command(sc, cm);
2738 		xpt_done(ccb);
2739 		return;
2740 	}
2741 
2742 	target = &sassc->targets[target_id];
2743 	if (scsi_cdb[0] == UNMAP &&
2744 	    target->is_nvme &&
2745 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2746 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2747 		csio->scsi_status = rep->SCSIStatus;
2748 	}
2749 
2750 	mprsas_log_command(cm, MPR_XINFO,
2751 	    "ioc %x scsi %x state %x xfer %u\n",
2752 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2753 	    le32toh(rep->TransferCount));
2754 
2755 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2756 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2757 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2758 		/* FALLTHROUGH */
2759 	case MPI2_IOCSTATUS_SUCCESS:
2760 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2761 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2762 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2763 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2764 
2765 		/* Completion failed at the transport level. */
2766 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2767 		    MPI2_SCSI_STATE_TERMINATED)) {
2768 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2769 			break;
2770 		}
2771 
2772 		/* In a modern packetized environment, an autosense failure
2773 		 * implies that there's not much else that can be done to
2774 		 * recover the command.
2775 		 */
2776 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2777 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2778 			break;
2779 		}
2780 
2781 		/*
2782 		 * CAM doesn't care about SAS Response Info data, but if this is
2783 		 * the state check if TLR should be done.  If not, clear the
2784 		 * TLR_bits for the target.
2785 		 */
2786 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2787 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2788 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2789 			sc->mapping_table[target_id].TLR_bits =
2790 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2791 		}
2792 
2793 		/*
2794 		 * Intentionally override the normal SCSI status reporting
2795 		 * for these two cases.  These are likely to happen in a
2796 		 * multi-initiator environment, and we want to make sure that
2797 		 * CAM retries these commands rather than fail them.
2798 		 */
2799 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2800 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2801 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2802 			break;
2803 		}
2804 
2805 		/* Handle normal status and sense */
2806 		csio->scsi_status = rep->SCSIStatus;
2807 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2808 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2809 		else
2810 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2811 
2812 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2813 			int sense_len, returned_sense_len;
2814 
2815 			returned_sense_len = min(le32toh(rep->SenseCount),
2816 			    sizeof(struct scsi_sense_data));
2817 			if (returned_sense_len < csio->sense_len)
2818 				csio->sense_resid = csio->sense_len -
2819 				    returned_sense_len;
2820 			else
2821 				csio->sense_resid = 0;
2822 
2823 			sense_len = min(returned_sense_len,
2824 			    csio->sense_len - csio->sense_resid);
2825 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2826 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2827 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2828 		}
2829 
2830 		/*
2831 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2832 		 * and it's page code 0 (Supported Page List), and there is
2833 		 * inquiry data, and this is for a sequential access device, and
2834 		 * the device is an SSP target, and TLR is supported by the
2835 		 * controller, turn the TLR_bits value ON if page 0x90 is
2836 		 * supported.
2837 		 */
2838 		if ((scsi_cdb[0] == INQUIRY) &&
2839 		    (scsi_cdb[1] & SI_EVPD) &&
2840 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2841 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2842 		    (csio->data_ptr != NULL) &&
2843 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2844 		    (sc->control_TLR) &&
2845 		    (sc->mapping_table[target_id].device_info &
2846 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2847 			vpd_list = (struct scsi_vpd_supported_page_list *)
2848 			    csio->data_ptr;
2849 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2850 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2851 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2852 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2853 			alloc_len -= csio->resid;
2854 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2855 				if (vpd_list->list[i] == 0x90) {
2856 					*TLR_bits = TLR_on;
2857 					break;
2858 				}
2859 			}
2860 		}
2861 
2862 		/*
2863 		 * If this is a SATA direct-access end device, mark it so that
2864 		 * a SCSI StartStopUnit command will be sent to it when the
2865 		 * driver is being shutdown.
2866 		 */
2867 		if ((scsi_cdb[0] == INQUIRY) &&
2868 		    (csio->data_ptr != NULL) &&
2869 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2870 		    (sc->mapping_table[target_id].device_info &
2871 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2872 		    ((sc->mapping_table[target_id].device_info &
2873 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2874 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2875 			target = &sassc->targets[target_id];
2876 			target->supports_SSU = TRUE;
2877 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2878 			    target_id);
2879 		}
2880 		break;
2881 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2882 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2883 		/*
2884 		 * If devinfo is 0 this will be a volume.  In that case don't
2885 		 * tell CAM that the volume is not there.  We want volumes to
2886 		 * be enumerated until they are deleted/removed, not just
2887 		 * failed.
2888 		 */
2889 		if (cm->cm_targ->devinfo == 0)
2890 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2891 		else
2892 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2893 		break;
2894 	case MPI2_IOCSTATUS_INVALID_SGL:
2895 		mpr_print_scsiio_cmd(sc, cm);
2896 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2897 		break;
2898 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2899 		/*
2900 		 * This is one of the responses that comes back when an I/O
2901 		 * has been aborted.  If it is because of a timeout that we
2902 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2903 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2904 		 * command is the same (it gets retried, subject to the
2905 		 * retry counter), the only difference is what gets printed
2906 		 * on the console.
2907 		 */
2908 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2909 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2910 		else
2911 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2912 		break;
2913 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2914 		/* resid is ignored for this condition */
2915 		csio->resid = 0;
2916 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2917 		break;
2918 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2919 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2920 		/*
2921 		 * These can sometimes be transient transport-related
2922 		 * errors, and sometimes persistent drive-related errors.
2923 		 * We used to retry these without decrementing the retry
2924 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2925 		 * we hit a persistent drive problem that returns one of
2926 		 * these error codes, we would retry indefinitely.  So,
2927 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2928 		 * count and avoid infinite retries.  We're taking the
2929 		 * potential risk of flagging false failures in the event
2930 		 * of a topology-related error (e.g. a SAS expander problem
2931 		 * causes a command addressed to a drive to fail), but
2932 		 * avoiding getting into an infinite retry loop.
2933 		 */
2934 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2935 		mprsas_log_command(cm, MPR_INFO,
2936 		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2937 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2938 		    rep->SCSIStatus, rep->SCSIState,
2939 		    le32toh(rep->TransferCount));
2940 		break;
2941 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2942 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2943 	case MPI2_IOCSTATUS_INVALID_VPID:
2944 	case MPI2_IOCSTATUS_INVALID_FIELD:
2945 	case MPI2_IOCSTATUS_INVALID_STATE:
2946 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2947 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2948 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2949 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2950 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2951 	default:
2952 		mprsas_log_command(cm, MPR_XINFO,
2953 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2954 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2955 		    rep->SCSIStatus, rep->SCSIState,
2956 		    le32toh(rep->TransferCount));
2957 		csio->resid = cm->cm_length;
2958 
2959 		if (scsi_cdb[0] == UNMAP &&
2960 		    target->is_nvme &&
2961 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2962 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2963 		else
2964 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2965 
2966 		break;
2967 	}
2968 
2969 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2970 
2971 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2972 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2973 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2974 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2975 		    "queue\n");
2976 	}
2977 
2978 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2979 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2980 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2981 	}
2982 
2983 	mpr_free_command(sc, cm);
2984 	xpt_done(ccb);
2985 }
2986 
2987 #if __FreeBSD_version >= 900026
2988 static void
2989 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2990 {
2991 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2992 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2993 	uint64_t sasaddr;
2994 	union ccb *ccb;
2995 
2996 	ccb = cm->cm_complete_data;
2997 
2998 	/*
2999 	 * Currently there should be no way we can hit this case.  It only
3000 	 * happens when we have a failure to allocate chain frames, and SMP
3001 	 * commands require two S/G elements only.  That should be handled
3002 	 * in the standard request size.
3003 	 */
3004 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3005 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
3006 		    "request!\n", __func__, cm->cm_flags);
3007 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3008 		goto bailout;
3009         }
3010 
3011 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
3012 	if (rpl == NULL) {
3013 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
3014 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3015 		goto bailout;
3016 	}
3017 
3018 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3019 	sasaddr = le32toh(req->SASAddress.Low);
3020 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
3021 
3022 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3023 	    MPI2_IOCSTATUS_SUCCESS ||
3024 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
3025 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
3026 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
3027 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3028 		goto bailout;
3029 	}
3030 
3031 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
3032 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
3033 
3034 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
3035 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3036 	else
3037 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
3038 
3039 bailout:
3040 	/*
3041 	 * We sync in both directions because we had DMAs in the S/G list
3042 	 * in both directions.
3043 	 */
3044 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3045 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3046 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3047 	mpr_free_command(sc, cm);
3048 	xpt_done(ccb);
3049 }
3050 
3051 static void
3052 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
3053 {
3054 	struct mpr_command *cm;
3055 	uint8_t *request, *response;
3056 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
3057 	struct mpr_softc *sc;
3058 	struct sglist *sg;
3059 	int error;
3060 
3061 	sc = sassc->sc;
3062 	sg = NULL;
3063 	error = 0;
3064 
3065 #if (__FreeBSD_version >= 1000028) || \
3066     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
3067 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
3068 	case CAM_DATA_PADDR:
3069 	case CAM_DATA_SG_PADDR:
3070 		/*
3071 		 * XXX We don't yet support physical addresses here.
3072 		 */
3073 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3074 		    "supported\n", __func__);
3075 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3076 		xpt_done(ccb);
3077 		return;
3078 	case CAM_DATA_SG:
3079 		/*
3080 		 * The chip does not support more than one buffer for the
3081 		 * request or response.
3082 		 */
3083 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
3084 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3085 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3086 			    "response buffer segments not supported for SMP\n",
3087 			    __func__);
3088 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3089 			xpt_done(ccb);
3090 			return;
3091 		}
3092 
3093 		/*
3094 		 * The CAM_SCATTER_VALID flag was originally implemented
3095 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3096 		 * We have two.  So, just take that flag to mean that we
3097 		 * might have S/G lists, and look at the S/G segment count
3098 		 * to figure out whether that is the case for each individual
3099 		 * buffer.
3100 		 */
3101 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3102 			bus_dma_segment_t *req_sg;
3103 
3104 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3105 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3106 		} else
3107 			request = ccb->smpio.smp_request;
3108 
3109 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3110 			bus_dma_segment_t *rsp_sg;
3111 
3112 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3113 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3114 		} else
3115 			response = ccb->smpio.smp_response;
3116 		break;
3117 	case CAM_DATA_VADDR:
3118 		request = ccb->smpio.smp_request;
3119 		response = ccb->smpio.smp_response;
3120 		break;
3121 	default:
3122 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3123 		xpt_done(ccb);
3124 		return;
3125 	}
3126 #else /* __FreeBSD_version < 1000028 */
3127 	/*
3128 	 * XXX We don't yet support physical addresses here.
3129 	 */
3130 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3131 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3132 		    "supported\n", __func__);
3133 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3134 		xpt_done(ccb);
3135 		return;
3136 	}
3137 
3138 	/*
3139 	 * If the user wants to send an S/G list, check to make sure they
3140 	 * have single buffers.
3141 	 */
3142 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3143 		/*
3144 		 * The chip does not support more than one buffer for the
3145 		 * request or response.
3146 		 */
3147 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3148 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3149 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3150 			    "response buffer segments not supported for SMP\n",
3151 			    __func__);
3152 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3153 			xpt_done(ccb);
3154 			return;
3155 		}
3156 
3157 		/*
3158 		 * The CAM_SCATTER_VALID flag was originally implemented
3159 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3160 		 * We have two.  So, just take that flag to mean that we
3161 		 * might have S/G lists, and look at the S/G segment count
3162 		 * to figure out whether that is the case for each individual
3163 		 * buffer.
3164 		 */
3165 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3166 			bus_dma_segment_t *req_sg;
3167 
3168 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3169 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3170 		} else
3171 			request = ccb->smpio.smp_request;
3172 
3173 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3174 			bus_dma_segment_t *rsp_sg;
3175 
3176 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3177 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3178 		} else
3179 			response = ccb->smpio.smp_response;
3180 	} else {
3181 		request = ccb->smpio.smp_request;
3182 		response = ccb->smpio.smp_response;
3183 	}
3184 #endif /* __FreeBSD_version < 1000028 */
3185 
3186 	cm = mpr_alloc_command(sc);
3187 	if (cm == NULL) {
3188 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3189 		    __func__);
3190 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3191 		xpt_done(ccb);
3192 		return;
3193 	}
3194 
3195 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3196 	bzero(req, sizeof(*req));
3197 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3198 
3199 	/* Allow the chip to use any route to this SAS address. */
3200 	req->PhysicalPort = 0xff;
3201 
3202 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3203 	req->SGLFlags =
3204 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3205 
3206 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3207 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3208 
3209 	mpr_init_sge(cm, req, &req->SGL);
3210 
3211 	/*
3212 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3213 	 * do one map command, and one busdma call in there.
3214 	 */
3215 	cm->cm_uio.uio_iov = cm->cm_iovec;
3216 	cm->cm_uio.uio_iovcnt = 2;
3217 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3218 
3219 	/*
3220 	 * The read/write flag isn't used by busdma, but set it just in
3221 	 * case.  This isn't exactly accurate, either, since we're going in
3222 	 * both directions.
3223 	 */
3224 	cm->cm_uio.uio_rw = UIO_WRITE;
3225 
3226 	cm->cm_iovec[0].iov_base = request;
3227 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3228 	cm->cm_iovec[1].iov_base = response;
3229 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3230 
3231 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3232 			       cm->cm_iovec[1].iov_len;
3233 
3234 	/*
3235 	 * Trigger a warning message in mpr_data_cb() for the user if we
3236 	 * wind up exceeding two S/G segments.  The chip expects one
3237 	 * segment for the request and another for the response.
3238 	 */
3239 	cm->cm_max_segs = 2;
3240 
3241 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3242 	cm->cm_complete = mprsas_smpio_complete;
3243 	cm->cm_complete_data = ccb;
3244 
3245 	/*
3246 	 * Tell the mapping code that we're using a uio, and that this is
3247 	 * an SMP passthrough request.  There is a little special-case
3248 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3249 	 * transfer.
3250 	 */
3251 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3252 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3253 
3254 	/* The chip data format is little endian. */
3255 	req->SASAddress.High = htole32(sasaddr >> 32);
3256 	req->SASAddress.Low = htole32(sasaddr);
3257 
3258 	/*
3259 	 * XXX Note that we don't have a timeout/abort mechanism here.
3260 	 * From the manual, it looks like task management requests only
3261 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3262 	 * have a mechanism to retry requests in the event of a chip reset
3263 	 * at least.  Hopefully the chip will insure that any errors short
3264 	 * of that are relayed back to the driver.
3265 	 */
3266 	error = mpr_map_command(sc, cm);
3267 	if ((error != 0) && (error != EINPROGRESS)) {
3268 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3269 		    "mpr_map_command()\n", __func__, error);
3270 		goto bailout_error;
3271 	}
3272 
3273 	return;
3274 
3275 bailout_error:
3276 	mpr_free_command(sc, cm);
3277 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3278 	xpt_done(ccb);
3279 	return;
3280 }
3281 
3282 static void
3283 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3284 {
3285 	struct mpr_softc *sc;
3286 	struct mprsas_target *targ;
3287 	uint64_t sasaddr = 0;
3288 
3289 	sc = sassc->sc;
3290 
3291 	/*
3292 	 * Make sure the target exists.
3293 	 */
3294 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3295 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3296 	targ = &sassc->targets[ccb->ccb_h.target_id];
3297 	if (targ->handle == 0x0) {
3298 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3299 		    __func__, ccb->ccb_h.target_id);
3300 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3301 		xpt_done(ccb);
3302 		return;
3303 	}
3304 
3305 	/*
3306 	 * If this device has an embedded SMP target, we'll talk to it
3307 	 * directly.
3308 	 * figure out what the expander's address is.
3309 	 */
3310 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3311 		sasaddr = targ->sasaddr;
3312 
3313 	/*
3314 	 * If we don't have a SAS address for the expander yet, try
3315 	 * grabbing it from the page 0x83 information cached in the
3316 	 * transport layer for this target.  LSI expanders report the
3317 	 * expander SAS address as the port-associated SAS address in
3318 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3319 	 * 0x83.
3320 	 *
3321 	 * XXX KDM disable this for now, but leave it commented out so that
3322 	 * it is obvious that this is another possible way to get the SAS
3323 	 * address.
3324 	 *
3325 	 * The parent handle method below is a little more reliable, and
3326 	 * the other benefit is that it works for devices other than SES
3327 	 * devices.  So you can send a SMP request to a da(4) device and it
3328 	 * will get routed to the expander that device is attached to.
3329 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3330 	 */
3331 #if 0
3332 	if (sasaddr == 0)
3333 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3334 #endif
3335 
3336 	/*
3337 	 * If we still don't have a SAS address for the expander, look for
3338 	 * the parent device of this device, which is probably the expander.
3339 	 */
3340 	if (sasaddr == 0) {
3341 #ifdef OLD_MPR_PROBE
3342 		struct mprsas_target *parent_target;
3343 #endif
3344 
3345 		if (targ->parent_handle == 0x0) {
3346 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3347 			    "a valid parent handle!\n", __func__, targ->handle);
3348 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3349 			goto bailout;
3350 		}
3351 #ifdef OLD_MPR_PROBE
3352 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3353 		    targ->parent_handle);
3354 
3355 		if (parent_target == NULL) {
3356 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3357 			    "a valid parent target!\n", __func__, targ->handle);
3358 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3359 			goto bailout;
3360 		}
3361 
3362 		if ((parent_target->devinfo &
3363 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3364 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3365 			    "does not have an SMP target!\n", __func__,
3366 			    targ->handle, parent_target->handle);
3367 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3368 			goto bailout;
3369 		}
3370 
3371 		sasaddr = parent_target->sasaddr;
3372 #else /* OLD_MPR_PROBE */
3373 		if ((targ->parent_devinfo &
3374 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3375 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3376 			    "does not have an SMP target!\n", __func__,
3377 			    targ->handle, targ->parent_handle);
3378 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3379 			goto bailout;
3380 
3381 		}
3382 		if (targ->parent_sasaddr == 0x0) {
3383 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3384 			    "%d does not have a valid SAS address!\n", __func__,
3385 			    targ->handle, targ->parent_handle);
3386 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3387 			goto bailout;
3388 		}
3389 
3390 		sasaddr = targ->parent_sasaddr;
3391 #endif /* OLD_MPR_PROBE */
3392 
3393 	}
3394 
3395 	if (sasaddr == 0) {
3396 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3397 		    "handle %d\n", __func__, targ->handle);
3398 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3399 		goto bailout;
3400 	}
3401 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3402 
3403 	return;
3404 
3405 bailout:
3406 	xpt_done(ccb);
3407 
3408 }
3409 #endif //__FreeBSD_version >= 900026
3410 
3411 static void
3412 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3413 {
3414 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3415 	struct mpr_softc *sc;
3416 	struct mpr_command *tm;
3417 	struct mprsas_target *targ;
3418 
3419 	MPR_FUNCTRACE(sassc->sc);
3420 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3421 
3422 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3423 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3424 	sc = sassc->sc;
3425 	tm = mpr_alloc_command(sc);
3426 	if (tm == NULL) {
3427 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3428 		    "mprsas_action_resetdev\n");
3429 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3430 		xpt_done(ccb);
3431 		return;
3432 	}
3433 
3434 	targ = &sassc->targets[ccb->ccb_h.target_id];
3435 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3436 	req->DevHandle = htole16(targ->handle);
3437 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3438 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3439 
3440 	/* SAS Hard Link Reset / SATA Link Reset */
3441 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3442 
3443 	tm->cm_data = NULL;
3444 	tm->cm_desc.HighPriority.RequestFlags =
3445 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3446 	tm->cm_complete = mprsas_resetdev_complete;
3447 	tm->cm_complete_data = ccb;
3448 
3449 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3450 	    __func__, targ->tid);
3451 	tm->cm_targ = targ;
3452 	targ->flags |= MPRSAS_TARGET_INRESET;
3453 
3454 	mpr_map_command(sc, tm);
3455 }
3456 
3457 static void
3458 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3459 {
3460 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3461 	union ccb *ccb;
3462 
3463 	MPR_FUNCTRACE(sc);
3464 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3465 
3466 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3467 	ccb = tm->cm_complete_data;
3468 
3469 	/*
3470 	 * Currently there should be no way we can hit this case.  It only
3471 	 * happens when we have a failure to allocate chain frames, and
3472 	 * task management commands don't have S/G lists.
3473 	 */
3474 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3475 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3476 
3477 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3478 
3479 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3480 		    "handle %#04x! This should not happen!\n", __func__,
3481 		    tm->cm_flags, req->DevHandle);
3482 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3483 		goto bailout;
3484 	}
3485 
3486 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3487 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3488 
3489 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3490 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3491 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3492 		    CAM_LUN_WILDCARD);
3493 	}
3494 	else
3495 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3496 
3497 bailout:
3498 
3499 	mprsas_free_tm(sc, tm);
3500 	xpt_done(ccb);
3501 }
3502 
3503 static void
3504 mprsas_poll(struct cam_sim *sim)
3505 {
3506 	struct mprsas_softc *sassc;
3507 
3508 	sassc = cam_sim_softc(sim);
3509 
3510 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3511 		/* frequent debug messages during a panic just slow
3512 		 * everything down too much.
3513 		 */
3514 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3515 		    __func__);
3516 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3517 	}
3518 
3519 	mpr_intr_locked(sassc->sc);
3520 }
3521 
3522 static void
3523 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3524     void *arg)
3525 {
3526 	struct mpr_softc *sc;
3527 
3528 	sc = (struct mpr_softc *)callback_arg;
3529 
3530 	switch (code) {
3531 #if (__FreeBSD_version >= 1000006) || \
3532     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3533 	case AC_ADVINFO_CHANGED: {
3534 		struct mprsas_target *target;
3535 		struct mprsas_softc *sassc;
3536 		struct scsi_read_capacity_data_long rcap_buf;
3537 		struct ccb_dev_advinfo cdai;
3538 		struct mprsas_lun *lun;
3539 		lun_id_t lunid;
3540 		int found_lun;
3541 		uintptr_t buftype;
3542 
3543 		buftype = (uintptr_t)arg;
3544 
3545 		found_lun = 0;
3546 		sassc = sc->sassc;
3547 
3548 		/*
3549 		 * We're only interested in read capacity data changes.
3550 		 */
3551 		if (buftype != CDAI_TYPE_RCAPLONG)
3552 			break;
3553 
3554 		/*
3555 		 * See the comment in mpr_attach_sas() for a detailed
3556 		 * explanation.  In these versions of FreeBSD we register
3557 		 * for all events and filter out the events that don't
3558 		 * apply to us.
3559 		 */
3560 #if (__FreeBSD_version < 1000703) || \
3561     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3562 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3563 			break;
3564 #endif
3565 
3566 		/*
3567 		 * We should have a handle for this, but check to make sure.
3568 		 */
3569 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3570 		    ("Target %d out of bounds in mprsas_async\n",
3571 		    xpt_path_target_id(path)));
3572 		target = &sassc->targets[xpt_path_target_id(path)];
3573 		if (target->handle == 0)
3574 			break;
3575 
3576 		lunid = xpt_path_lun_id(path);
3577 
3578 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3579 			if (lun->lun_id == lunid) {
3580 				found_lun = 1;
3581 				break;
3582 			}
3583 		}
3584 
3585 		if (found_lun == 0) {
3586 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3587 			    M_NOWAIT | M_ZERO);
3588 			if (lun == NULL) {
3589 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3590 				    "LUN for EEDP support.\n");
3591 				break;
3592 			}
3593 			lun->lun_id = lunid;
3594 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3595 		}
3596 
3597 		bzero(&rcap_buf, sizeof(rcap_buf));
3598 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3599 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3600 		cdai.ccb_h.flags = CAM_DIR_IN;
3601 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3602 #if (__FreeBSD_version >= 1100061) || \
3603     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3604 		cdai.flags = CDAI_FLAG_NONE;
3605 #else
3606 		cdai.flags = 0;
3607 #endif
3608 		cdai.bufsiz = sizeof(rcap_buf);
3609 		cdai.buf = (uint8_t *)&rcap_buf;
3610 		xpt_action((union ccb *)&cdai);
3611 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3612 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3613 
3614 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3615 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3616 			lun->eedp_formatted = TRUE;
3617 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3618 		} else {
3619 			lun->eedp_formatted = FALSE;
3620 			lun->eedp_block_size = 0;
3621 		}
3622 		break;
3623 	}
3624 #endif
3625 	case AC_FOUND_DEVICE: {
3626 		struct ccb_getdev *cgd;
3627 
3628 		/*
3629 		 * See the comment in mpr_attach_sas() for a detailed
3630 		 * explanation.  In these versions of FreeBSD we register
3631 		 * for all events and filter out the events that don't
3632 		 * apply to us.
3633 		 */
3634 #if (__FreeBSD_version < 1000703) || \
3635     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3636 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3637 			break;
3638 #endif
3639 
3640 		cgd = arg;
3641 #if (__FreeBSD_version < 901503) || \
3642     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3643 		mprsas_check_eedp(sc, path, cgd);
3644 #endif
3645 		break;
3646 	}
3647 	default:
3648 		break;
3649 	}
3650 }
3651 
3652 #if (__FreeBSD_version < 901503) || \
3653     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3654 static void
3655 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3656     struct ccb_getdev *cgd)
3657 {
3658 	struct mprsas_softc *sassc = sc->sassc;
3659 	struct ccb_scsiio *csio;
3660 	struct scsi_read_capacity_16 *scsi_cmd;
3661 	struct scsi_read_capacity_eedp *rcap_buf;
3662 	path_id_t pathid;
3663 	target_id_t targetid;
3664 	lun_id_t lunid;
3665 	union ccb *ccb;
3666 	struct cam_path *local_path;
3667 	struct mprsas_target *target;
3668 	struct mprsas_lun *lun;
3669 	uint8_t	found_lun;
3670 	char path_str[64];
3671 
3672 	pathid = cam_sim_path(sassc->sim);
3673 	targetid = xpt_path_target_id(path);
3674 	lunid = xpt_path_lun_id(path);
3675 
3676 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3677 	    "mprsas_check_eedp\n", targetid));
3678 	target = &sassc->targets[targetid];
3679 	if (target->handle == 0x0)
3680 		return;
3681 
3682 	/*
3683 	 * Determine if the device is EEDP capable.
3684 	 *
3685 	 * If this flag is set in the inquiry data, the device supports
3686 	 * protection information, and must support the 16 byte read capacity
3687 	 * command, otherwise continue without sending read cap 16.
3688 	 */
3689 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3690 		return;
3691 
3692 	/*
3693 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3694 	 * the LUN is formatted for EEDP support.
3695 	 */
3696 	ccb = xpt_alloc_ccb_nowait();
3697 	if (ccb == NULL) {
3698 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3699 		    "support.\n");
3700 		return;
3701 	}
3702 
3703 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3704 	    CAM_REQ_CMP) {
3705 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3706 		    "support.\n");
3707 		xpt_free_ccb(ccb);
3708 		return;
3709 	}
3710 
3711 	/*
3712 	 * If LUN is already in list, don't create a new one.
3713 	 */
3714 	found_lun = FALSE;
3715 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3716 		if (lun->lun_id == lunid) {
3717 			found_lun = TRUE;
3718 			break;
3719 		}
3720 	}
3721 	if (!found_lun) {
3722 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3723 		    M_NOWAIT | M_ZERO);
3724 		if (lun == NULL) {
3725 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3726 			    "EEDP support.\n");
3727 			xpt_free_path(local_path);
3728 			xpt_free_ccb(ccb);
3729 			return;
3730 		}
3731 		lun->lun_id = lunid;
3732 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3733 	}
3734 
3735 	xpt_path_string(local_path, path_str, sizeof(path_str));
3736 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3737 	    path_str, target->handle);
3738 
3739 	/*
3740 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3741 	 * mprsas_read_cap_done function will load the read cap info into the
3742 	 * LUN struct.
3743 	 */
3744 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3745 	    M_NOWAIT | M_ZERO);
3746 	if (rcap_buf == NULL) {
3747 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3748 		    "buffer for EEDP support.\n");
3749 		xpt_free_path(ccb->ccb_h.path);
3750 		xpt_free_ccb(ccb);
3751 		return;
3752 	}
3753 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3754 	csio = &ccb->csio;
3755 	csio->ccb_h.func_code = XPT_SCSI_IO;
3756 	csio->ccb_h.flags = CAM_DIR_IN;
3757 	csio->ccb_h.retry_count = 4;
3758 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3759 	csio->ccb_h.timeout = 60000;
3760 	csio->data_ptr = (uint8_t *)rcap_buf;
3761 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3762 	csio->sense_len = MPR_SENSE_LEN;
3763 	csio->cdb_len = sizeof(*scsi_cmd);
3764 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3765 
3766 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3767 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3768 	scsi_cmd->opcode = 0x9E;
3769 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3770 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3771 
3772 	ccb->ccb_h.ppriv_ptr1 = sassc;
3773 	xpt_action(ccb);
3774 }
3775 
3776 static void
3777 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3778 {
3779 	struct mprsas_softc *sassc;
3780 	struct mprsas_target *target;
3781 	struct mprsas_lun *lun;
3782 	struct scsi_read_capacity_eedp *rcap_buf;
3783 
3784 	if (done_ccb == NULL)
3785 		return;
3786 
3787 	/* Driver need to release devq, it Scsi command is
3788 	 * generated by driver internally.
3789 	 * Currently there is a single place where driver
3790 	 * calls scsi command internally. In future if driver
3791 	 * calls more scsi command internally, it needs to release
3792 	 * devq internally, since those command will not go back to
3793 	 * cam_periph.
3794 	 */
3795 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3796         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3797 		xpt_release_devq(done_ccb->ccb_h.path,
3798 			       	/*count*/ 1, /*run_queue*/TRUE);
3799 	}
3800 
3801 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3802 
3803 	/*
3804 	 * Get the LUN ID for the path and look it up in the LUN list for the
3805 	 * target.
3806 	 */
3807 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3808 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3809 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3810 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3811 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3812 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3813 			continue;
3814 
3815 		/*
3816 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3817 		 * info. If the READ CAP 16 command had some SCSI error (common
3818 		 * if command is not supported), mark the lun as not supporting
3819 		 * EEDP and set the block size to 0.
3820 		 */
3821 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3822 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3823 			lun->eedp_formatted = FALSE;
3824 			lun->eedp_block_size = 0;
3825 			break;
3826 		}
3827 
3828 		if (rcap_buf->protect & 0x01) {
3829 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3830 			    "%d is formatted for EEDP support.\n",
3831 			    done_ccb->ccb_h.target_lun,
3832 			    done_ccb->ccb_h.target_id);
3833 			lun->eedp_formatted = TRUE;
3834 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3835 		}
3836 		break;
3837 	}
3838 
3839 	// Finished with this CCB and path.
3840 	free(rcap_buf, M_MPR);
3841 	xpt_free_path(done_ccb->ccb_h.path);
3842 	xpt_free_ccb(done_ccb);
3843 }
3844 #endif /* (__FreeBSD_version < 901503) || \
3845           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3846 
3847 void
3848 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3849     struct mprsas_target *target, lun_id_t lun_id)
3850 {
3851 	union ccb *ccb;
3852 	path_id_t path_id;
3853 
3854 	/*
3855 	 * Set the INRESET flag for this target so that no I/O will be sent to
3856 	 * the target until the reset has completed.  If an I/O request does
3857 	 * happen, the devq will be frozen.  The CCB holds the path which is
3858 	 * used to release the devq.  The devq is released and the CCB is freed
3859 	 * when the TM completes.
3860 	 */
3861 	ccb = xpt_alloc_ccb_nowait();
3862 	if (ccb) {
3863 		path_id = cam_sim_path(sc->sassc->sim);
3864 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3865 		    target->tid, lun_id) != CAM_REQ_CMP) {
3866 			xpt_free_ccb(ccb);
3867 		} else {
3868 			tm->cm_ccb = ccb;
3869 			tm->cm_targ = target;
3870 			target->flags |= MPRSAS_TARGET_INRESET;
3871 		}
3872 	}
3873 }
3874 
3875 int
3876 mprsas_startup(struct mpr_softc *sc)
3877 {
3878 	/*
3879 	 * Send the port enable message and set the wait_for_port_enable flag.
3880 	 * This flag helps to keep the simq frozen until all discovery events
3881 	 * are processed.
3882 	 */
3883 	sc->wait_for_port_enable = 1;
3884 	mprsas_send_portenable(sc);
3885 	return (0);
3886 }
3887 
3888 static int
3889 mprsas_send_portenable(struct mpr_softc *sc)
3890 {
3891 	MPI2_PORT_ENABLE_REQUEST *request;
3892 	struct mpr_command *cm;
3893 
3894 	MPR_FUNCTRACE(sc);
3895 
3896 	if ((cm = mpr_alloc_command(sc)) == NULL)
3897 		return (EBUSY);
3898 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3899 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3900 	request->MsgFlags = 0;
3901 	request->VP_ID = 0;
3902 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3903 	cm->cm_complete = mprsas_portenable_complete;
3904 	cm->cm_data = NULL;
3905 	cm->cm_sge = NULL;
3906 
3907 	mpr_map_command(sc, cm);
3908 	mpr_dprint(sc, MPR_XINFO,
3909 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3910 	    cm, cm->cm_req, cm->cm_complete);
3911 	return (0);
3912 }
3913 
3914 static void
3915 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3916 {
3917 	MPI2_PORT_ENABLE_REPLY *reply;
3918 	struct mprsas_softc *sassc;
3919 
3920 	MPR_FUNCTRACE(sc);
3921 	sassc = sc->sassc;
3922 
3923 	/*
3924 	 * Currently there should be no way we can hit this case.  It only
3925 	 * happens when we have a failure to allocate chain frames, and
3926 	 * port enable commands don't have S/G lists.
3927 	 */
3928 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3929 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3930 		    "This should not happen!\n", __func__, cm->cm_flags);
3931 	}
3932 
3933 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3934 	if (reply == NULL)
3935 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3936 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3937 	    MPI2_IOCSTATUS_SUCCESS)
3938 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3939 
3940 	mpr_free_command(sc, cm);
3941 	if (sc->mpr_ich.ich_arg != NULL) {
3942 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3943 		config_intrhook_disestablish(&sc->mpr_ich);
3944 		sc->mpr_ich.ich_arg = NULL;
3945 	}
3946 
3947 	/*
3948 	 * Done waiting for port enable to complete.  Decrement the refcount.
3949 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3950 	 * take place.
3951 	 */
3952 	sc->wait_for_port_enable = 0;
3953 	sc->port_enable_complete = 1;
3954 	wakeup(&sc->port_enable_complete);
3955 	mprsas_startup_decrement(sassc);
3956 }
3957 
3958 int
3959 mprsas_check_id(struct mprsas_softc *sassc, int id)
3960 {
3961 	struct mpr_softc *sc = sassc->sc;
3962 	char *ids;
3963 	char *name;
3964 
3965 	ids = &sc->exclude_ids[0];
3966 	while((name = strsep(&ids, ",")) != NULL) {
3967 		if (name[0] == '\0')
3968 			continue;
3969 		if (strtol(name, NULL, 0) == (long)id)
3970 			return (1);
3971 	}
3972 
3973 	return (0);
3974 }
3975 
3976 void
3977 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3978 {
3979 	struct mprsas_softc *sassc;
3980 	struct mprsas_lun *lun, *lun_tmp;
3981 	struct mprsas_target *targ;
3982 	int i;
3983 
3984 	sassc = sc->sassc;
3985 	/*
3986 	 * The number of targets is based on IOC Facts, so free all of
3987 	 * the allocated LUNs for each target and then the target buffer
3988 	 * itself.
3989 	 */
3990 	for (i=0; i< maxtargets; i++) {
3991 		targ = &sassc->targets[i];
3992 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3993 			free(lun, M_MPR);
3994 		}
3995 	}
3996 	free(sassc->targets, M_MPR);
3997 
3998 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3999 	    M_MPR, M_WAITOK|M_ZERO);
4000 	if (!sassc->targets) {
4001 		panic("%s failed to alloc targets with error %d\n",
4002 		    __func__, ENOMEM);
4003 	}
4004 }
4005