xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/nvme/nvme.h>
76 
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
89 
90 #define MPRSAS_DISCOVERY_TIMEOUT	20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
92 
93 /*
94  * static array to check SCSI OpCode for EEDP protection bits
95  */
96 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 };
117 
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131     struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133     struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137     struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139     union ccb *done_ccb);
140 #endif
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143     struct mpr_command *cm);
144 
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148     uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	int target_id = 0xFFFFFFFF;
250 
251 	MPR_FUNCTRACE(sc);
252 	if (tm == NULL)
253 		return;
254 
255 	/*
256 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
257 	 * free the resources used for freezing the devq.  Must clear the
258 	 * INRESET flag as well or scsi I/O will not work.
259 	 */
260 	if (tm->cm_targ != NULL) {
261 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 		target_id = tm->cm_targ->tid;
263 	}
264 	if (tm->cm_ccb) {
265 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 		    target_id);
267 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 		xpt_free_path(tm->cm_ccb->ccb_h.path);
269 		xpt_free_ccb(tm->cm_ccb);
270 	}
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 	    MPI2_IOCSTATUS_SUCCESS) {
383 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 	    le32toh(reply->TerminationCount));
389 	mpr_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 	    targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mpr_map_command(sc, cm);
480 }
481 
482 /*
483  * The firmware performs debounce on the link to avoid transient link errors
484  * and false removals.  When it does decide that link has been lost and a
485  * device needs to go away, it expects that the host will perform a target reset
486  * and then an op remove.  The reset has the side-effect of aborting any
487  * outstanding requests for the device, which is required for the op-remove to
488  * succeed.  It's not clear if the host should check for the device coming back
489  * alive after the reset.
490  */
491 void
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mpr_softc *sc;
496 	struct mpr_command *cm;
497 	struct mprsas_target *targ = NULL;
498 
499 	MPR_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
508 		    __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
513 
514 	cm = mprsas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
517 		    __func__);
518 		return;
519 	}
520 
521 	mprsas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags =
535 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 	cm->cm_complete = mprsas_remove_device;
537 	cm->cm_complete_data = (void *)(uintptr_t)handle;
538 
539 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 	    __func__, targ->tid);
541 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 
543 	mpr_map_command(sc, cm);
544 }
545 
546 static void
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 {
549 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 	struct mprsas_target *targ;
552 	struct mpr_command *next_cm;
553 	uint16_t handle;
554 
555 	MPR_FUNCTRACE(sc);
556 
557 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 	targ = tm->cm_targ;
560 
561 	/*
562 	 * Currently there should be no way we can hit this case.  It only
563 	 * happens when we have a failure to allocate chain frames, and
564 	 * task management commands don't have S/G lists.
565 	 */
566 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 		    "handle %#04x! This should not happen!\n", __func__,
569 		    tm->cm_flags, handle);
570 	}
571 
572 	if (reply == NULL) {
573 		/* XXX retry the remove after the diag reset completes? */
574 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 		    "0x%04x\n", __func__, handle);
576 		mprsas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 	    MPI2_IOCSTATUS_SUCCESS) {
582 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 	    MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717 		}
718 	}
719 
720 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 	    &sc->sassc->mprsas_eh);
722 
723 	return (0);
724 }
725 
726 int
727 mpr_attach_sas(struct mpr_softc *sc)
728 {
729 	struct mprsas_softc *sassc;
730 	cam_status status;
731 	int unit, error = 0;
732 
733 	MPR_FUNCTRACE(sc);
734 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
735 
736 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
737 	if (!sassc) {
738 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
739 		    "Cannot allocate SAS subsystem memory\n");
740 		return (ENOMEM);
741 	}
742 
743 	/*
744 	 * XXX MaxTargets could change during a reinit.  Since we don't
745 	 * resize the targets[] array during such an event, cache the value
746 	 * of MaxTargets here so that we don't get into trouble later.  This
747 	 * should move into the reinit logic.
748 	 */
749 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
750 	sassc->targets = malloc(sizeof(struct mprsas_target) *
751 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
752 	if (!sassc->targets) {
753 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
754 		    "Cannot allocate SAS target memory\n");
755 		free(sassc, M_MPR);
756 		return (ENOMEM);
757 	}
758 	sc->sassc = sassc;
759 	sassc->sc = sc;
760 
761 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
762 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
763 		error = ENOMEM;
764 		goto out;
765 	}
766 
767 	unit = device_get_unit(sc->mpr_dev);
768 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
769 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
770 	if (sassc->sim == NULL) {
771 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
772 		error = EINVAL;
773 		goto out;
774 	}
775 
776 	TAILQ_INIT(&sassc->ev_queue);
777 
778 	/* Initialize taskqueue for Event Handling */
779 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
780 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
781 	    taskqueue_thread_enqueue, &sassc->ev_tq);
782 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
783 	    device_get_nameunit(sc->mpr_dev));
784 
785 	mpr_lock(sc);
786 
787 	/*
788 	 * XXX There should be a bus for every port on the adapter, but since
789 	 * we're just going to fake the topology for now, we'll pretend that
790 	 * everything is just a target on a single bus.
791 	 */
792 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
793 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
794 		    "Error %d registering SCSI bus\n", error);
795 		mpr_unlock(sc);
796 		goto out;
797 	}
798 
799 	/*
800 	 * Assume that discovery events will start right away.
801 	 *
802 	 * Hold off boot until discovery is complete.
803 	 */
804 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
805 	sc->sassc->startup_refcount = 0;
806 	mprsas_startup_increment(sassc);
807 
808 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
809 
810 	/*
811 	 * Register for async events so we can determine the EEDP
812 	 * capabilities of devices.
813 	 */
814 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
815 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
816 	    CAM_LUN_WILDCARD);
817 	if (status != CAM_REQ_CMP) {
818 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
819 		    "Error %#x creating sim path\n", status);
820 		sassc->path = NULL;
821 	} else {
822 		int event;
823 
824 #if (__FreeBSD_version >= 1000006) || \
825     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
826 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
827 #else
828 		event = AC_FOUND_DEVICE;
829 #endif
830 
831 		/*
832 		 * Prior to the CAM locking improvements, we can't call
833 		 * xpt_register_async() with a particular path specified.
834 		 *
835 		 * If a path isn't specified, xpt_register_async() will
836 		 * generate a wildcard path and acquire the XPT lock while
837 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
838 		 * It will then drop the XPT lock once that is done.
839 		 *
840 		 * If a path is specified for xpt_register_async(), it will
841 		 * not acquire and drop the XPT lock around the call to
842 		 * xpt_action().  xpt_action() asserts that the caller
843 		 * holds the SIM lock, so the SIM lock has to be held when
844 		 * calling xpt_register_async() when the path is specified.
845 		 *
846 		 * But xpt_register_async calls xpt_for_all_devices(),
847 		 * which calls xptbustraverse(), which will acquire each
848 		 * SIM lock.  When it traverses our particular bus, it will
849 		 * necessarily acquire the SIM lock, which will lead to a
850 		 * recursive lock acquisition.
851 		 *
852 		 * The CAM locking changes fix this problem by acquiring
853 		 * the XPT topology lock around bus traversal in
854 		 * xptbustraverse(), so the caller can hold the SIM lock
855 		 * and it does not cause a recursive lock acquisition.
856 		 *
857 		 * These __FreeBSD_version values are approximate, especially
858 		 * for stable/10, which is two months later than the actual
859 		 * change.
860 		 */
861 
862 #if (__FreeBSD_version < 1000703) || \
863     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
864 		mpr_unlock(sc);
865 		status = xpt_register_async(event, mprsas_async, sc,
866 					    NULL);
867 		mpr_lock(sc);
868 #else
869 		status = xpt_register_async(event, mprsas_async, sc,
870 					    sassc->path);
871 #endif
872 
873 		if (status != CAM_REQ_CMP) {
874 			mpr_dprint(sc, MPR_ERROR,
875 			    "Error %#x registering async handler for "
876 			    "AC_ADVINFO_CHANGED events\n", status);
877 			xpt_free_path(sassc->path);
878 			sassc->path = NULL;
879 		}
880 	}
881 	if (status != CAM_REQ_CMP) {
882 		/*
883 		 * EEDP use is the exception, not the rule.
884 		 * Warn the user, but do not fail to attach.
885 		 */
886 		mpr_printf(sc, "EEDP capabilities disabled.\n");
887 	}
888 
889 	mpr_unlock(sc);
890 
891 	mprsas_register_events(sc);
892 out:
893 	if (error)
894 		mpr_detach_sas(sc);
895 
896 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
897 	return (error);
898 }
899 
900 int
901 mpr_detach_sas(struct mpr_softc *sc)
902 {
903 	struct mprsas_softc *sassc;
904 	struct mprsas_lun *lun, *lun_tmp;
905 	struct mprsas_target *targ;
906 	int i;
907 
908 	MPR_FUNCTRACE(sc);
909 
910 	if (sc->sassc == NULL)
911 		return (0);
912 
913 	sassc = sc->sassc;
914 	mpr_deregister_events(sc, sassc->mprsas_eh);
915 
916 	/*
917 	 * Drain and free the event handling taskqueue with the lock
918 	 * unheld so that any parallel processing tasks drain properly
919 	 * without deadlocking.
920 	 */
921 	if (sassc->ev_tq != NULL)
922 		taskqueue_free(sassc->ev_tq);
923 
924 	/* Make sure CAM doesn't wedge if we had to bail out early. */
925 	mpr_lock(sc);
926 
927 	while (sassc->startup_refcount != 0)
928 		mprsas_startup_decrement(sassc);
929 
930 	/* Deregister our async handler */
931 	if (sassc->path != NULL) {
932 		xpt_register_async(0, mprsas_async, sc, sassc->path);
933 		xpt_free_path(sassc->path);
934 		sassc->path = NULL;
935 	}
936 
937 	if (sassc->flags & MPRSAS_IN_STARTUP)
938 		xpt_release_simq(sassc->sim, 1);
939 
940 	if (sassc->sim != NULL) {
941 		xpt_bus_deregister(cam_sim_path(sassc->sim));
942 		cam_sim_free(sassc->sim, FALSE);
943 	}
944 
945 	mpr_unlock(sc);
946 
947 	if (sassc->devq != NULL)
948 		cam_simq_free(sassc->devq);
949 
950 	for (i = 0; i < sassc->maxtargets; i++) {
951 		targ = &sassc->targets[i];
952 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
953 			free(lun, M_MPR);
954 		}
955 	}
956 	free(sassc->targets, M_MPR);
957 	free(sassc, M_MPR);
958 	sc->sassc = NULL;
959 
960 	return (0);
961 }
962 
963 void
964 mprsas_discovery_end(struct mprsas_softc *sassc)
965 {
966 	struct mpr_softc *sc = sassc->sc;
967 
968 	MPR_FUNCTRACE(sc);
969 
970 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
971 		callout_stop(&sassc->discovery_callout);
972 
973 	/*
974 	 * After discovery has completed, check the mapping table for any
975 	 * missing devices and update their missing counts. Only do this once
976 	 * whenever the driver is initialized so that missing counts aren't
977 	 * updated unnecessarily. Note that just because discovery has
978 	 * completed doesn't mean that events have been processed yet. The
979 	 * check_devices function is a callout timer that checks if ALL devices
980 	 * are missing. If so, it will wait a little longer for events to
981 	 * complete and keep resetting itself until some device in the mapping
982 	 * table is not missing, meaning that event processing has started.
983 	 */
984 	if (sc->track_mapping_events) {
985 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
986 		    "completed. Check for missing devices in the mapping "
987 		    "table.\n");
988 		callout_reset(&sc->device_check_callout,
989 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
990 		    sc);
991 	}
992 }
993 
994 static void
995 mprsas_action(struct cam_sim *sim, union ccb *ccb)
996 {
997 	struct mprsas_softc *sassc;
998 
999 	sassc = cam_sim_softc(sim);
1000 
1001 	MPR_FUNCTRACE(sassc->sc);
1002 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1003 	    ccb->ccb_h.func_code);
1004 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1005 
1006 	switch (ccb->ccb_h.func_code) {
1007 	case XPT_PATH_INQ:
1008 	{
1009 		struct ccb_pathinq *cpi = &ccb->cpi;
1010 		struct mpr_softc *sc = sassc->sc;
1011 		uint8_t sges_per_frame;
1012 
1013 		cpi->version_num = 1;
1014 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1015 		cpi->target_sprt = 0;
1016 #if (__FreeBSD_version >= 1000039) || \
1017     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1018 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1019 #else
1020 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1021 #endif
1022 		cpi->hba_eng_cnt = 0;
1023 		cpi->max_target = sassc->maxtargets - 1;
1024 		cpi->max_lun = 255;
1025 
1026 		/*
1027 		 * initiator_id is set here to an ID outside the set of valid
1028 		 * target IDs (including volumes).
1029 		 */
1030 		cpi->initiator_id = sassc->maxtargets;
1031 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1032 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1033 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1034 		cpi->unit_number = cam_sim_unit(sim);
1035 		cpi->bus_id = cam_sim_bus(sim);
1036 		/*
1037 		 * XXXSLM-I think this needs to change based on config page or
1038 		 * something instead of hardcoded to 150000.
1039 		 */
1040 		cpi->base_transfer_speed = 150000;
1041 		cpi->transport = XPORT_SAS;
1042 		cpi->transport_version = 0;
1043 		cpi->protocol = PROTO_SCSI;
1044 		cpi->protocol_version = SCSI_REV_SPC;
1045 
1046 		/*
1047 		 * Max IO Size is Page Size * the following:
1048 		 * ((SGEs per frame - 1 for chain element) *
1049 		 * Max Chain Depth) + 1 for no chain needed in last frame
1050 		 *
1051 		 * If user suggests a Max IO size to use, use the smaller of the
1052 		 * user's value and the calculated value as long as the user's
1053 		 * value is larger than 0. The user's value is in pages.
1054 		 */
1055 		sges_per_frame = (sc->chain_frame_size /
1056 		    sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1057 		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1058 		cpi->maxio *= PAGE_SIZE;
1059 		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1060 		    cpi->maxio))
1061 			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1062 		sc->maxio = cpi->maxio;
1063 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1064 		break;
1065 	}
1066 	case XPT_GET_TRAN_SETTINGS:
1067 	{
1068 		struct ccb_trans_settings	*cts;
1069 		struct ccb_trans_settings_sas	*sas;
1070 		struct ccb_trans_settings_scsi	*scsi;
1071 		struct mprsas_target *targ;
1072 
1073 		cts = &ccb->cts;
1074 		sas = &cts->xport_specific.sas;
1075 		scsi = &cts->proto_specific.scsi;
1076 
1077 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1078 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1079 		    cts->ccb_h.target_id));
1080 		targ = &sassc->targets[cts->ccb_h.target_id];
1081 		if (targ->handle == 0x0) {
1082 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1083 			break;
1084 		}
1085 
1086 		cts->protocol_version = SCSI_REV_SPC2;
1087 		cts->transport = XPORT_SAS;
1088 		cts->transport_version = 0;
1089 
1090 		sas->valid = CTS_SAS_VALID_SPEED;
1091 		switch (targ->linkrate) {
1092 		case 0x08:
1093 			sas->bitrate = 150000;
1094 			break;
1095 		case 0x09:
1096 			sas->bitrate = 300000;
1097 			break;
1098 		case 0x0a:
1099 			sas->bitrate = 600000;
1100 			break;
1101 		case 0x0b:
1102 			sas->bitrate = 1200000;
1103 			break;
1104 		default:
1105 			sas->valid = 0;
1106 		}
1107 
1108 		cts->protocol = PROTO_SCSI;
1109 		scsi->valid = CTS_SCSI_VALID_TQ;
1110 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1111 
1112 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1113 		break;
1114 	}
1115 	case XPT_CALC_GEOMETRY:
1116 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1117 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1118 		break;
1119 	case XPT_RESET_DEV:
1120 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1121 		    "XPT_RESET_DEV\n");
1122 		mprsas_action_resetdev(sassc, ccb);
1123 		return;
1124 	case XPT_RESET_BUS:
1125 	case XPT_ABORT:
1126 	case XPT_TERM_IO:
1127 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1128 		    "for abort or reset\n");
1129 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1130 		break;
1131 	case XPT_SCSI_IO:
1132 		mprsas_action_scsiio(sassc, ccb);
1133 		return;
1134 #if __FreeBSD_version >= 900026
1135 	case XPT_SMP_IO:
1136 		mprsas_action_smpio(sassc, ccb);
1137 		return;
1138 #endif
1139 	default:
1140 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1141 		break;
1142 	}
1143 	xpt_done(ccb);
1144 
1145 }
1146 
1147 static void
1148 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1149     target_id_t target_id, lun_id_t lun_id)
1150 {
1151 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1152 	struct cam_path *path;
1153 
1154 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1155 	    ac_code, target_id, (uintmax_t)lun_id);
1156 
1157 	if (xpt_create_path(&path, NULL,
1158 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1159 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1160 		    "notification\n");
1161 		return;
1162 	}
1163 
1164 	xpt_async(ac_code, path, NULL);
1165 	xpt_free_path(path);
1166 }
1167 
1168 static void
1169 mprsas_complete_all_commands(struct mpr_softc *sc)
1170 {
1171 	struct mpr_command *cm;
1172 	int i;
1173 	int completed;
1174 
1175 	MPR_FUNCTRACE(sc);
1176 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1177 
1178 	/* complete all commands with a NULL reply */
1179 	for (i = 1; i < sc->num_reqs; i++) {
1180 		cm = &sc->commands[i];
1181 		cm->cm_reply = NULL;
1182 		completed = 0;
1183 
1184 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1185 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1186 
1187 		if (cm->cm_complete != NULL) {
1188 			mprsas_log_command(cm, MPR_RECOVERY,
1189 			    "completing cm %p state %x ccb %p for diag reset\n",
1190 			    cm, cm->cm_state, cm->cm_ccb);
1191 			cm->cm_complete(sc, cm);
1192 			completed = 1;
1193 		}
1194 
1195 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1196 			mprsas_log_command(cm, MPR_RECOVERY,
1197 			    "waking up cm %p state %x ccb %p for diag reset\n",
1198 			    cm, cm->cm_state, cm->cm_ccb);
1199 			wakeup(cm);
1200 			completed = 1;
1201 		}
1202 
1203 		if (cm->cm_sc->io_cmds_active != 0)
1204 			cm->cm_sc->io_cmds_active--;
1205 
1206 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1207 			/* this should never happen, but if it does, log */
1208 			mprsas_log_command(cm, MPR_RECOVERY,
1209 			    "cm %p state %x flags 0x%x ccb %p during diag "
1210 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1211 			    cm->cm_ccb);
1212 		}
1213 	}
1214 }
1215 
1216 void
1217 mprsas_handle_reinit(struct mpr_softc *sc)
1218 {
1219 	int i;
1220 
1221 	/* Go back into startup mode and freeze the simq, so that CAM
1222 	 * doesn't send any commands until after we've rediscovered all
1223 	 * targets and found the proper device handles for them.
1224 	 *
1225 	 * After the reset, portenable will trigger discovery, and after all
1226 	 * discovery-related activities have finished, the simq will be
1227 	 * released.
1228 	 */
1229 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1230 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1231 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1232 	mprsas_startup_increment(sc->sassc);
1233 
1234 	/* notify CAM of a bus reset */
1235 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1236 	    CAM_LUN_WILDCARD);
1237 
1238 	/* complete and cleanup after all outstanding commands */
1239 	mprsas_complete_all_commands(sc);
1240 
1241 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1242 	    __func__, sc->sassc->startup_refcount);
1243 
1244 	/* zero all the target handles, since they may change after the
1245 	 * reset, and we have to rediscover all the targets and use the new
1246 	 * handles.
1247 	 */
1248 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1249 		if (sc->sassc->targets[i].outstanding != 0)
1250 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1251 			    i, sc->sassc->targets[i].outstanding);
1252 		sc->sassc->targets[i].handle = 0x0;
1253 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1254 		sc->sassc->targets[i].outstanding = 0;
1255 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1256 	}
1257 }
1258 static void
1259 mprsas_tm_timeout(void *data)
1260 {
1261 	struct mpr_command *tm = data;
1262 	struct mpr_softc *sc = tm->cm_sc;
1263 
1264 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1265 
1266 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1267 	    "out\n", tm);
1268 	mpr_reinit(sc);
1269 }
1270 
1271 static void
1272 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1273 {
1274 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1275 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1276 	unsigned int cm_count = 0;
1277 	struct mpr_command *cm;
1278 	struct mprsas_target *targ;
1279 
1280 	callout_stop(&tm->cm_callout);
1281 
1282 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1283 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1284 	targ = tm->cm_targ;
1285 
1286 	/*
1287 	 * Currently there should be no way we can hit this case.  It only
1288 	 * happens when we have a failure to allocate chain frames, and
1289 	 * task management commands don't have S/G lists.
1290 	 */
1291 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1292 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1293 		    "%s: cm_flags = %#x for LUN reset! "
1294 		    "This should not happen!\n", __func__, tm->cm_flags);
1295 		mprsas_free_tm(sc, tm);
1296 		return;
1297 	}
1298 
1299 	if (reply == NULL) {
1300 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1301 		    tm);
1302 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1303 			/* this completion was due to a reset, just cleanup */
1304 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1305 			    "reset, ignoring NULL LUN reset reply\n");
1306 			targ->tm = NULL;
1307 			mprsas_free_tm(sc, tm);
1308 		}
1309 		else {
1310 			/* we should have gotten a reply. */
1311 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1312 			    "LUN reset attempt, resetting controller\n");
1313 			mpr_reinit(sc);
1314 		}
1315 		return;
1316 	}
1317 
1318 	mpr_dprint(sc, MPR_RECOVERY,
1319 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1320 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1321 	    le32toh(reply->TerminationCount));
1322 
1323 	/*
1324 	 * See if there are any outstanding commands for this LUN.
1325 	 * This could be made more efficient by using a per-LU data
1326 	 * structure of some sort.
1327 	 */
1328 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1329 		if (cm->cm_lun == tm->cm_lun)
1330 			cm_count++;
1331 	}
1332 
1333 	if (cm_count == 0) {
1334 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1335 		    "Finished recovery after LUN reset for target %u\n",
1336 		    targ->tid);
1337 
1338 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1339 		    tm->cm_lun);
1340 
1341 		/*
1342 		 * We've finished recovery for this logical unit.  check and
1343 		 * see if some other logical unit has a timedout command
1344 		 * that needs to be processed.
1345 		 */
1346 		cm = TAILQ_FIRST(&targ->timedout_commands);
1347 		if (cm) {
1348 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1349 			   "More commands to abort for target %u\n", targ->tid);
1350 			mprsas_send_abort(sc, tm, cm);
1351 		} else {
1352 			targ->tm = NULL;
1353 			mprsas_free_tm(sc, tm);
1354 		}
1355 	} else {
1356 		/* if we still have commands for this LUN, the reset
1357 		 * effectively failed, regardless of the status reported.
1358 		 * Escalate to a target reset.
1359 		 */
1360 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1361 		    "logical unit reset complete for target %u, but still "
1362 		    "have %u command(s), sending target reset\n", targ->tid,
1363 		    cm_count);
1364 		mprsas_send_reset(sc, tm,
1365 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1366 	}
1367 }
1368 
1369 static void
1370 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1371 {
1372 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1373 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1374 	struct mprsas_target *targ;
1375 
1376 	callout_stop(&tm->cm_callout);
1377 
1378 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1379 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1380 	targ = tm->cm_targ;
1381 
1382 	/*
1383 	 * Currently there should be no way we can hit this case.  It only
1384 	 * happens when we have a failure to allocate chain frames, and
1385 	 * task management commands don't have S/G lists.
1386 	 */
1387 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1388 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1389 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1390 		mprsas_free_tm(sc, tm);
1391 		return;
1392 	}
1393 
1394 	if (reply == NULL) {
1395 		mpr_dprint(sc, MPR_RECOVERY,
1396 		    "NULL target reset reply for tm %p TaskMID %u\n",
1397 		    tm, le16toh(req->TaskMID));
1398 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1399 			/* this completion was due to a reset, just cleanup */
1400 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1401 			    "reset, ignoring NULL target reset reply\n");
1402 			targ->tm = NULL;
1403 			mprsas_free_tm(sc, tm);
1404 		}
1405 		else {
1406 			/* we should have gotten a reply. */
1407 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1408 			    "target reset attempt, resetting controller\n");
1409 			mpr_reinit(sc);
1410 		}
1411 		return;
1412 	}
1413 
1414 	mpr_dprint(sc, MPR_RECOVERY,
1415 	    "target reset status 0x%x code 0x%x count %u\n",
1416 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1417 	    le32toh(reply->TerminationCount));
1418 
1419 	if (targ->outstanding == 0) {
1420 		/*
1421 		 * We've finished recovery for this target and all
1422 		 * of its logical units.
1423 		 */
1424 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1425 		    "Finished reset recovery for target %u\n", targ->tid);
1426 
1427 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1428 		    CAM_LUN_WILDCARD);
1429 
1430 		targ->tm = NULL;
1431 		mprsas_free_tm(sc, tm);
1432 	} else {
1433 		/*
1434 		 * After a target reset, if this target still has
1435 		 * outstanding commands, the reset effectively failed,
1436 		 * regardless of the status reported.  escalate.
1437 		 */
1438 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1439 		    "Target reset complete for target %u, but still have %u "
1440 		    "command(s), resetting controller\n", targ->tid,
1441 		    targ->outstanding);
1442 		mpr_reinit(sc);
1443 	}
1444 }
1445 
1446 #define MPR_RESET_TIMEOUT 30
1447 
1448 int
1449 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1450 {
1451 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1452 	struct mprsas_target *target;
1453 	int err;
1454 
1455 	target = tm->cm_targ;
1456 	if (target->handle == 0) {
1457 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1458 		    "%d\n", __func__, target->tid);
1459 		return -1;
1460 	}
1461 
1462 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1463 	req->DevHandle = htole16(target->handle);
1464 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1465 	req->TaskType = type;
1466 
1467 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1468 		/* XXX Need to handle invalid LUNs */
1469 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1470 		tm->cm_targ->logical_unit_resets++;
1471 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1472 		    "Sending logical unit reset to target %u lun %d\n",
1473 		    target->tid, tm->cm_lun);
1474 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1475 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1476 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1477 		/*
1478 		 * Target reset method =
1479 		 *     SAS Hard Link Reset / SATA Link Reset
1480 		 */
1481 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1482 		tm->cm_targ->target_resets++;
1483 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1484 		    "Sending target reset to target %u\n", target->tid);
1485 		tm->cm_complete = mprsas_target_reset_complete;
1486 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1487 	}
1488 	else {
1489 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1490 		return -1;
1491 	}
1492 
1493 	if (target->encl_level_valid) {
1494 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1495 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1496 		    target->encl_level, target->encl_slot,
1497 		    target->connector_name);
1498 	}
1499 
1500 	tm->cm_data = NULL;
1501 	tm->cm_desc.HighPriority.RequestFlags =
1502 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1503 	tm->cm_complete_data = (void *)tm;
1504 
1505 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1506 	    mprsas_tm_timeout, tm);
1507 
1508 	err = mpr_map_command(sc, tm);
1509 	if (err)
1510 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1511 		    "error %d sending reset type %u\n", err, type);
1512 
1513 	return err;
1514 }
1515 
1516 
1517 static void
1518 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1519 {
1520 	struct mpr_command *cm;
1521 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1522 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1523 	struct mprsas_target *targ;
1524 
1525 	callout_stop(&tm->cm_callout);
1526 
1527 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1528 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1529 	targ = tm->cm_targ;
1530 
1531 	/*
1532 	 * Currently there should be no way we can hit this case.  It only
1533 	 * happens when we have a failure to allocate chain frames, and
1534 	 * task management commands don't have S/G lists.
1535 	 */
1536 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1537 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1538 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1539 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1540 		mprsas_free_tm(sc, tm);
1541 		return;
1542 	}
1543 
1544 	if (reply == NULL) {
1545 		mpr_dprint(sc, MPR_RECOVERY,
1546 		    "NULL abort reply for tm %p TaskMID %u\n",
1547 		    tm, le16toh(req->TaskMID));
1548 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1549 			/* this completion was due to a reset, just cleanup */
1550 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1551 			    "reset, ignoring NULL abort reply\n");
1552 			targ->tm = NULL;
1553 			mprsas_free_tm(sc, tm);
1554 		} else {
1555 			/* we should have gotten a reply. */
1556 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1557 			    "abort attempt, resetting controller\n");
1558 			mpr_reinit(sc);
1559 		}
1560 		return;
1561 	}
1562 
1563 	mpr_dprint(sc, MPR_RECOVERY,
1564 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1565 	    le16toh(req->TaskMID),
1566 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1567 	    le32toh(reply->TerminationCount));
1568 
1569 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1570 	if (cm == NULL) {
1571 		/*
1572 		 * if there are no more timedout commands, we're done with
1573 		 * error recovery for this target.
1574 		 */
1575 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1576 		    "Finished abort recovery for target %u\n", targ->tid);
1577 		targ->tm = NULL;
1578 		mprsas_free_tm(sc, tm);
1579 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1580 		/* abort success, but we have more timedout commands to abort */
1581 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1582 		    "Continuing abort recovery for target %u\n", targ->tid);
1583 		mprsas_send_abort(sc, tm, cm);
1584 	} else {
1585 		/*
1586 		 * we didn't get a command completion, so the abort
1587 		 * failed as far as we're concerned.  escalate.
1588 		 */
1589 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1590 		    "Abort failed for target %u, sending logical unit reset\n",
1591 		    targ->tid);
1592 
1593 		mprsas_send_reset(sc, tm,
1594 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1595 	}
1596 }
1597 
1598 #define MPR_ABORT_TIMEOUT 5
1599 
1600 static int
1601 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1602     struct mpr_command *cm)
1603 {
1604 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1605 	struct mprsas_target *targ;
1606 	int err;
1607 
1608 	targ = cm->cm_targ;
1609 	if (targ->handle == 0) {
1610 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1611 		   "%s null devhandle for target_id %d\n",
1612 		    __func__, cm->cm_ccb->ccb_h.target_id);
1613 		return -1;
1614 	}
1615 
1616 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1617 	    "Aborting command %p\n", cm);
1618 
1619 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1620 	req->DevHandle = htole16(targ->handle);
1621 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1622 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1623 
1624 	/* XXX Need to handle invalid LUNs */
1625 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1626 
1627 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1628 
1629 	tm->cm_data = NULL;
1630 	tm->cm_desc.HighPriority.RequestFlags =
1631 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1632 	tm->cm_complete = mprsas_abort_complete;
1633 	tm->cm_complete_data = (void *)tm;
1634 	tm->cm_targ = cm->cm_targ;
1635 	tm->cm_lun = cm->cm_lun;
1636 
1637 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1638 	    mprsas_tm_timeout, tm);
1639 
1640 	targ->aborts++;
1641 
1642 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1643 
1644 	err = mpr_map_command(sc, tm);
1645 	if (err)
1646 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1647 		    "error %d sending abort for cm %p SMID %u\n",
1648 		    err, cm, req->TaskMID);
1649 	return err;
1650 }
1651 
1652 static void
1653 mprsas_scsiio_timeout(void *data)
1654 {
1655 	sbintime_t elapsed, now;
1656 	union ccb *ccb;
1657 	struct mpr_softc *sc;
1658 	struct mpr_command *cm;
1659 	struct mprsas_target *targ;
1660 
1661 	cm = (struct mpr_command *)data;
1662 	sc = cm->cm_sc;
1663 	ccb = cm->cm_ccb;
1664 	now = sbinuptime();
1665 
1666 	MPR_FUNCTRACE(sc);
1667 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1668 
1669 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1670 
1671 	/*
1672 	 * Run the interrupt handler to make sure it's not pending.  This
1673 	 * isn't perfect because the command could have already completed
1674 	 * and been re-used, though this is unlikely.
1675 	 */
1676 	mpr_intr_locked(sc);
1677 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1678 		mprsas_log_command(cm, MPR_XINFO,
1679 		    "SCSI command %p almost timed out\n", cm);
1680 		return;
1681 	}
1682 
1683 	if (cm->cm_ccb == NULL) {
1684 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1685 		return;
1686 	}
1687 
1688 	targ = cm->cm_targ;
1689 	targ->timeouts++;
1690 
1691 	elapsed = now - ccb->ccb_h.qos.sim_data;
1692 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1693 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1694 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1695 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1696 	if (targ->encl_level_valid) {
1697 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1698 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1699 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1700 	}
1701 
1702 	/* XXX first, check the firmware state, to see if it's still
1703 	 * operational.  if not, do a diag reset.
1704 	 */
1705 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1706 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1707 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1708 
1709 	if (targ->tm != NULL) {
1710 		/* target already in recovery, just queue up another
1711 		 * timedout command to be processed later.
1712 		 */
1713 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1714 		    "processing by tm %p\n", cm, targ->tm);
1715 	}
1716 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1717 
1718 		/* start recovery by aborting the first timedout command */
1719 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1720 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1721 		    cm->cm_desc.Default.SMID);
1722 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1723 		    cm, targ->tm);
1724 		mprsas_send_abort(sc, targ->tm, cm);
1725 	}
1726 	else {
1727 		/* XXX queue this target up for recovery once a TM becomes
1728 		 * available.  The firmware only has a limited number of
1729 		 * HighPriority credits for the high priority requests used
1730 		 * for task management, and we ran out.
1731 		 *
1732 		 * Isilon: don't worry about this for now, since we have
1733 		 * more credits than disks in an enclosure, and limit
1734 		 * ourselves to one TM per target for recovery.
1735 		 */
1736 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1737 		    "timedout cm %p failed to allocate a tm\n", cm);
1738 	}
1739 }
1740 
1741 /**
1742  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1743  *			     to SCSI Unmap.
1744  * Return 0 - for success,
1745  *	  1 - to immediately return back the command with success status to CAM
1746  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1747  *			   to FW without any translation.
1748  */
1749 static int
1750 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1751     union ccb *ccb, struct mprsas_target *targ)
1752 {
1753 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1754 	struct ccb_scsiio *csio;
1755 	struct unmap_parm_list *plist;
1756 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1757 	struct nvme_command *c;
1758 	int i, res;
1759 	uint16_t ndesc, list_len, data_length;
1760 	struct mpr_prp_page *prp_page_info;
1761 	uint64_t nvme_dsm_ranges_dma_handle;
1762 
1763 	csio = &ccb->csio;
1764 #if __FreeBSD_version >= 1100103
1765 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1766 #else
1767 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1768 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1769 		    ccb->csio.cdb_io.cdb_ptr[8]);
1770 	} else {
1771 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1772 		    ccb->csio.cdb_io.cdb_bytes[8]);
1773 	}
1774 #endif
1775 	if (!list_len) {
1776 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1777 		return -EINVAL;
1778 	}
1779 
1780 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1781 	if (!plist) {
1782 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1783 		    "save UNMAP data\n");
1784 		return -ENOMEM;
1785 	}
1786 
1787 	/* Copy SCSI unmap data to a local buffer */
1788 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1789 
1790 	/* return back the unmap command to CAM with success status,
1791 	 * if number of descripts is zero.
1792 	 */
1793 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1794 	if (!ndesc) {
1795 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1796 		    "UNMAP cmd is Zero\n");
1797 		res = 1;
1798 		goto out;
1799 	}
1800 
1801 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1802 	if (data_length > targ->MDTS) {
1803 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1804 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1805 		res = -EINVAL;
1806 		goto out;
1807 	}
1808 
1809 	prp_page_info = mpr_alloc_prp_page(sc);
1810 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1811 	    "UNMAP command.\n", __func__));
1812 
1813 	/*
1814 	 * Insert the allocated PRP page into the command's PRP page list. This
1815 	 * will be freed when the command is freed.
1816 	 */
1817 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1818 
1819 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1820 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1821 
1822 	bzero(nvme_dsm_ranges, data_length);
1823 
1824 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1825 	 * for each descriptors contained in SCSI UNMAP data.
1826 	 */
1827 	for (i = 0; i < ndesc; i++) {
1828 		nvme_dsm_ranges[i].length =
1829 		    htole32(be32toh(plist->desc[i].nlb));
1830 		nvme_dsm_ranges[i].starting_lba =
1831 		    htole64(be64toh(plist->desc[i].slba));
1832 		nvme_dsm_ranges[i].attributes = 0;
1833 	}
1834 
1835 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1836 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1837 	bzero(req, sizeof(*req));
1838 	req->DevHandle = htole16(targ->handle);
1839 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1840 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1841 	req->ErrorResponseBaseAddress.High =
1842 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1843 	req->ErrorResponseBaseAddress.Low =
1844 	    htole32(cm->cm_sense_busaddr);
1845 	req->ErrorResponseAllocationLength =
1846 	    htole16(sizeof(struct nvme_completion));
1847 	req->EncapsulatedCommandLength =
1848 	    htole16(sizeof(struct nvme_command));
1849 	req->DataLength = htole32(data_length);
1850 
1851 	/* Build NVMe DSM command */
1852 	c = (struct nvme_command *) req->NVMe_Command;
1853 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1854 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1855 	c->cdw10 = htole32(ndesc - 1);
1856 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1857 
1858 	cm->cm_length = data_length;
1859 	cm->cm_data = NULL;
1860 
1861 	cm->cm_complete = mprsas_scsiio_complete;
1862 	cm->cm_complete_data = ccb;
1863 	cm->cm_targ = targ;
1864 	cm->cm_lun = csio->ccb_h.target_lun;
1865 	cm->cm_ccb = ccb;
1866 
1867 	cm->cm_desc.Default.RequestFlags =
1868 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1869 
1870 	csio->ccb_h.qos.sim_data = sbinuptime();
1871 #if __FreeBSD_version >= 1000029
1872 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1873 	    mprsas_scsiio_timeout, cm, 0);
1874 #else //__FreeBSD_version < 1000029
1875 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1876 	    mprsas_scsiio_timeout, cm);
1877 #endif //__FreeBSD_version >= 1000029
1878 
1879 	targ->issued++;
1880 	targ->outstanding++;
1881 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1882 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1883 
1884 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1885 	    __func__, cm, ccb, targ->outstanding);
1886 
1887 	mpr_build_nvme_prp(sc, cm, req,
1888 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1889 	mpr_map_command(sc, cm);
1890 
1891 out:
1892 	free(plist, M_MPR);
1893 	return 0;
1894 }
1895 
1896 static void
1897 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1898 {
1899 	MPI2_SCSI_IO_REQUEST *req;
1900 	struct ccb_scsiio *csio;
1901 	struct mpr_softc *sc;
1902 	struct mprsas_target *targ;
1903 	struct mprsas_lun *lun;
1904 	struct mpr_command *cm;
1905 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1906 	uint16_t eedp_flags;
1907 	uint32_t mpi_control;
1908 	int rc;
1909 
1910 	sc = sassc->sc;
1911 	MPR_FUNCTRACE(sc);
1912 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1913 
1914 	csio = &ccb->csio;
1915 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1916 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1917 	     csio->ccb_h.target_id));
1918 	targ = &sassc->targets[csio->ccb_h.target_id];
1919 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1920 	if (targ->handle == 0x0) {
1921 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1922 		    __func__, csio->ccb_h.target_id);
1923 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1924 		xpt_done(ccb);
1925 		return;
1926 	}
1927 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1928 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1929 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1930 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1931 		xpt_done(ccb);
1932 		return;
1933 	}
1934 	/*
1935 	 * Sometimes, it is possible to get a command that is not "In
1936 	 * Progress" and was actually aborted by the upper layer.  Check for
1937 	 * this here and complete the command without error.
1938 	 */
1939 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1940 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1941 		    "target %u\n", __func__, csio->ccb_h.target_id);
1942 		xpt_done(ccb);
1943 		return;
1944 	}
1945 	/*
1946 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1947 	 * that the volume has timed out.  We want volumes to be enumerated
1948 	 * until they are deleted/removed, not just failed.
1949 	 */
1950 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1951 		if (targ->devinfo == 0)
1952 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1953 		else
1954 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1955 		xpt_done(ccb);
1956 		return;
1957 	}
1958 
1959 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1960 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1961 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1962 		xpt_done(ccb);
1963 		return;
1964 	}
1965 
1966 	/*
1967 	 * If target has a reset in progress, freeze the devq and return.  The
1968 	 * devq will be released when the TM reset is finished.
1969 	 */
1970 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1971 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1972 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1973 		    __func__, targ->tid);
1974 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1975 		xpt_done(ccb);
1976 		return;
1977 	}
1978 
1979 	cm = mpr_alloc_command(sc);
1980 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1981 		if (cm != NULL) {
1982 			mpr_free_command(sc, cm);
1983 		}
1984 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1985 			xpt_freeze_simq(sassc->sim, 1);
1986 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1987 		}
1988 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1989 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1990 		xpt_done(ccb);
1991 		return;
1992 	}
1993 
1994 	/* For NVME device's issue UNMAP command directly to NVME drives by
1995 	 * constructing equivalent native NVMe DataSetManagement command.
1996 	 */
1997 #if __FreeBSD_version >= 1100103
1998 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1999 #else
2000 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2001 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
2002 	else
2003 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
2004 #endif
2005 	if (scsi_opcode == UNMAP &&
2006 	    targ->is_nvme &&
2007 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2008 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2009 		if (rc == 1) { /* return command to CAM with success status */
2010 			mpr_free_command(sc, cm);
2011 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2012 			xpt_done(ccb);
2013 			return;
2014 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
2015 			return;
2016 	}
2017 
2018 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2019 	bzero(req, sizeof(*req));
2020 	req->DevHandle = htole16(targ->handle);
2021 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2022 	req->MsgFlags = 0;
2023 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2024 	req->SenseBufferLength = MPR_SENSE_LEN;
2025 	req->SGLFlags = 0;
2026 	req->ChainOffset = 0;
2027 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2028 	req->SGLOffset1= 0;
2029 	req->SGLOffset2= 0;
2030 	req->SGLOffset3= 0;
2031 	req->SkipCount = 0;
2032 	req->DataLength = htole32(csio->dxfer_len);
2033 	req->BidirectionalDataLength = 0;
2034 	req->IoFlags = htole16(csio->cdb_len);
2035 	req->EEDPFlags = 0;
2036 
2037 	/* Note: BiDirectional transfers are not supported */
2038 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2039 	case CAM_DIR_IN:
2040 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2041 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2042 		break;
2043 	case CAM_DIR_OUT:
2044 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2045 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2046 		break;
2047 	case CAM_DIR_NONE:
2048 	default:
2049 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2050 		break;
2051 	}
2052 
2053 	if (csio->cdb_len == 32)
2054 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2055 	/*
2056 	 * It looks like the hardware doesn't require an explicit tag
2057 	 * number for each transaction.  SAM Task Management not supported
2058 	 * at the moment.
2059 	 */
2060 	switch (csio->tag_action) {
2061 	case MSG_HEAD_OF_Q_TAG:
2062 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2063 		break;
2064 	case MSG_ORDERED_Q_TAG:
2065 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2066 		break;
2067 	case MSG_ACA_TASK:
2068 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2069 		break;
2070 	case CAM_TAG_ACTION_NONE:
2071 	case MSG_SIMPLE_Q_TAG:
2072 	default:
2073 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2074 		break;
2075 	}
2076 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2077 	req->Control = htole32(mpi_control);
2078 
2079 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2080 		mpr_free_command(sc, cm);
2081 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2082 		xpt_done(ccb);
2083 		return;
2084 	}
2085 
2086 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2087 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2088 	else {
2089 		KASSERT(csio->cdb_len <= IOCDBLEN,
2090 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2091 		    "is not set", csio->cdb_len));
2092 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2093 	}
2094 	req->IoFlags = htole16(csio->cdb_len);
2095 
2096 	/*
2097 	 * Check if EEDP is supported and enabled.  If it is then check if the
2098 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2099 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2100 	 * for EEDP transfer.
2101 	 */
2102 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2103 	if (sc->eedp_enabled && eedp_flags) {
2104 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2105 			if (lun->lun_id == csio->ccb_h.target_lun) {
2106 				break;
2107 			}
2108 		}
2109 
2110 		if ((lun != NULL) && (lun->eedp_formatted)) {
2111 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2112 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2113 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2114 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2115 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2116 				eedp_flags |=
2117 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2118 			}
2119 			req->EEDPFlags = htole16(eedp_flags);
2120 
2121 			/*
2122 			 * If CDB less than 32, fill in Primary Ref Tag with
2123 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2124 			 * already there.  Also, set protection bit.  FreeBSD
2125 			 * currently does not support CDBs bigger than 16, but
2126 			 * the code doesn't hurt, and will be here for the
2127 			 * future.
2128 			 */
2129 			if (csio->cdb_len != 32) {
2130 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2131 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2132 				    PrimaryReferenceTag;
2133 				for (i = 0; i < 4; i++) {
2134 					*ref_tag_addr =
2135 					    req->CDB.CDB32[lba_byte + i];
2136 					ref_tag_addr++;
2137 				}
2138 				req->CDB.EEDP32.PrimaryReferenceTag =
2139 				    htole32(req->
2140 				    CDB.EEDP32.PrimaryReferenceTag);
2141 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2142 				    0xFFFF;
2143 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2144 				    0x20;
2145 			} else {
2146 				eedp_flags |=
2147 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2148 				req->EEDPFlags = htole16(eedp_flags);
2149 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2150 				    0x1F) | 0x20;
2151 			}
2152 		}
2153 	}
2154 
2155 	cm->cm_length = csio->dxfer_len;
2156 	if (cm->cm_length != 0) {
2157 		cm->cm_data = ccb;
2158 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2159 	} else {
2160 		cm->cm_data = NULL;
2161 	}
2162 	cm->cm_sge = &req->SGL;
2163 	cm->cm_sglsize = (32 - 24) * 4;
2164 	cm->cm_complete = mprsas_scsiio_complete;
2165 	cm->cm_complete_data = ccb;
2166 	cm->cm_targ = targ;
2167 	cm->cm_lun = csio->ccb_h.target_lun;
2168 	cm->cm_ccb = ccb;
2169 	/*
2170 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2171 	 * and set descriptor type.
2172 	 */
2173 	if (targ->scsi_req_desc_type ==
2174 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2175 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2176 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2177 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2178 		if (!sc->atomic_desc_capable) {
2179 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2180 			    htole16(targ->handle);
2181 		}
2182 	} else {
2183 		cm->cm_desc.SCSIIO.RequestFlags =
2184 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2185 		if (!sc->atomic_desc_capable)
2186 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2187 	}
2188 
2189 	csio->ccb_h.qos.sim_data = sbinuptime();
2190 #if __FreeBSD_version >= 1000029
2191 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2192 	    mprsas_scsiio_timeout, cm, 0);
2193 #else //__FreeBSD_version < 1000029
2194 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2195 	    mprsas_scsiio_timeout, cm);
2196 #endif //__FreeBSD_version >= 1000029
2197 
2198 	targ->issued++;
2199 	targ->outstanding++;
2200 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2201 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2202 
2203 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2204 	    __func__, cm, ccb, targ->outstanding);
2205 
2206 	mpr_map_command(sc, cm);
2207 	return;
2208 }
2209 
2210 /**
2211  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2212  */
2213 static void
2214 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2215     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2216 {
2217 	u32 response_info;
2218 	u8 *response_bytes;
2219 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2220 	    MPI2_IOCSTATUS_MASK;
2221 	u8 scsi_state = mpi_reply->SCSIState;
2222 	u8 scsi_status = mpi_reply->SCSIStatus;
2223 	char *desc_ioc_state = NULL;
2224 	char *desc_scsi_status = NULL;
2225 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2226 
2227 	if (log_info == 0x31170000)
2228 		return;
2229 
2230 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2231 	     ioc_status);
2232 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2233 	    scsi_status);
2234 
2235 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2236 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2237 	if (targ->encl_level_valid) {
2238 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2239 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2240 		    targ->connector_name);
2241 	}
2242 
2243 	/*
2244 	 * We can add more detail about underflow data here
2245 	 * TO-DO
2246 	 */
2247 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2248 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2249 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2250 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2251 
2252 	if (sc->mpr_debug & MPR_XINFO &&
2253 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2254 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2255 		scsi_sense_print(csio);
2256 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2257 	}
2258 
2259 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2260 		response_info = le32toh(mpi_reply->ResponseInfo);
2261 		response_bytes = (u8 *)&response_info;
2262 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2263 		    response_bytes[0],
2264 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2265 		    response_bytes[0]));
2266 	}
2267 }
2268 
2269 /** mprsas_nvme_trans_status_code
2270  *
2271  * Convert Native NVMe command error status to
2272  * equivalent SCSI error status.
2273  *
2274  * Returns appropriate scsi_status
2275  */
2276 static u8
2277 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2278     struct mpr_command *cm)
2279 {
2280 	u8 status = MPI2_SCSI_STATUS_GOOD;
2281 	int skey, asc, ascq;
2282 	union ccb *ccb = cm->cm_complete_data;
2283 	int returned_sense_len;
2284 
2285 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2286 	skey = SSD_KEY_ILLEGAL_REQUEST;
2287 	asc = SCSI_ASC_NO_SENSE;
2288 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2289 
2290 	switch (nvme_status.sct) {
2291 	case NVME_SCT_GENERIC:
2292 		switch (nvme_status.sc) {
2293 		case NVME_SC_SUCCESS:
2294 			status = MPI2_SCSI_STATUS_GOOD;
2295 			skey = SSD_KEY_NO_SENSE;
2296 			asc = SCSI_ASC_NO_SENSE;
2297 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2298 			break;
2299 		case NVME_SC_INVALID_OPCODE:
2300 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2301 			skey = SSD_KEY_ILLEGAL_REQUEST;
2302 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2303 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2304 			break;
2305 		case NVME_SC_INVALID_FIELD:
2306 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2307 			skey = SSD_KEY_ILLEGAL_REQUEST;
2308 			asc = SCSI_ASC_INVALID_CDB;
2309 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2310 			break;
2311 		case NVME_SC_DATA_TRANSFER_ERROR:
2312 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2313 			skey = SSD_KEY_MEDIUM_ERROR;
2314 			asc = SCSI_ASC_NO_SENSE;
2315 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2316 			break;
2317 		case NVME_SC_ABORTED_POWER_LOSS:
2318 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2319 			skey = SSD_KEY_ABORTED_COMMAND;
2320 			asc = SCSI_ASC_WARNING;
2321 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2322 			break;
2323 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2324 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2325 			skey = SSD_KEY_HARDWARE_ERROR;
2326 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2327 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2328 			break;
2329 		case NVME_SC_ABORTED_BY_REQUEST:
2330 		case NVME_SC_ABORTED_SQ_DELETION:
2331 		case NVME_SC_ABORTED_FAILED_FUSED:
2332 		case NVME_SC_ABORTED_MISSING_FUSED:
2333 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2334 			skey = SSD_KEY_ABORTED_COMMAND;
2335 			asc = SCSI_ASC_NO_SENSE;
2336 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2337 			break;
2338 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2339 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2340 			skey = SSD_KEY_ILLEGAL_REQUEST;
2341 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2342 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2343 			break;
2344 		case NVME_SC_LBA_OUT_OF_RANGE:
2345 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2346 			skey = SSD_KEY_ILLEGAL_REQUEST;
2347 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2348 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2349 			break;
2350 		case NVME_SC_CAPACITY_EXCEEDED:
2351 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2352 			skey = SSD_KEY_MEDIUM_ERROR;
2353 			asc = SCSI_ASC_NO_SENSE;
2354 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2355 			break;
2356 		case NVME_SC_NAMESPACE_NOT_READY:
2357 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2358 			skey = SSD_KEY_NOT_READY;
2359 			asc = SCSI_ASC_LUN_NOT_READY;
2360 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2361 			break;
2362 		}
2363 		break;
2364 	case NVME_SCT_COMMAND_SPECIFIC:
2365 		switch (nvme_status.sc) {
2366 		case NVME_SC_INVALID_FORMAT:
2367 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2368 			skey = SSD_KEY_ILLEGAL_REQUEST;
2369 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2370 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2371 			break;
2372 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2373 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2374 			skey = SSD_KEY_ILLEGAL_REQUEST;
2375 			asc = SCSI_ASC_INVALID_CDB;
2376 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2377 			break;
2378 		}
2379 		break;
2380 	case NVME_SCT_MEDIA_ERROR:
2381 		switch (nvme_status.sc) {
2382 		case NVME_SC_WRITE_FAULTS:
2383 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2384 			skey = SSD_KEY_MEDIUM_ERROR;
2385 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2386 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2387 			break;
2388 		case NVME_SC_UNRECOVERED_READ_ERROR:
2389 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2390 			skey = SSD_KEY_MEDIUM_ERROR;
2391 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2392 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2393 			break;
2394 		case NVME_SC_GUARD_CHECK_ERROR:
2395 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2396 			skey = SSD_KEY_MEDIUM_ERROR;
2397 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2398 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2399 			break;
2400 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2401 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2402 			skey = SSD_KEY_MEDIUM_ERROR;
2403 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2404 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2405 			break;
2406 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2407 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2408 			skey = SSD_KEY_MEDIUM_ERROR;
2409 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2410 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2411 			break;
2412 		case NVME_SC_COMPARE_FAILURE:
2413 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2414 			skey = SSD_KEY_MISCOMPARE;
2415 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2416 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2417 			break;
2418 		case NVME_SC_ACCESS_DENIED:
2419 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2420 			skey = SSD_KEY_ILLEGAL_REQUEST;
2421 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2422 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2423 			break;
2424 		}
2425 		break;
2426 	}
2427 
2428 	returned_sense_len = sizeof(struct scsi_sense_data);
2429 	if (returned_sense_len < ccb->csio.sense_len)
2430 		ccb->csio.sense_resid = ccb->csio.sense_len -
2431 		    returned_sense_len;
2432 	else
2433 		ccb->csio.sense_resid = 0;
2434 
2435 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2436 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2437 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2438 
2439 	return status;
2440 }
2441 
2442 /** mprsas_complete_nvme_unmap
2443  *
2444  * Complete native NVMe command issued using NVMe Encapsulated
2445  * Request Message.
2446  */
2447 static u8
2448 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2449 {
2450 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2451 	struct nvme_completion *nvme_completion = NULL;
2452 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2453 
2454 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2455 	if (le16toh(mpi_reply->ErrorResponseCount)){
2456 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2457 		scsi_status = mprsas_nvme_trans_status_code(
2458 		    nvme_completion->status, cm);
2459 	}
2460 	return scsi_status;
2461 }
2462 
2463 static void
2464 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2465 {
2466 	MPI2_SCSI_IO_REPLY *rep;
2467 	union ccb *ccb;
2468 	struct ccb_scsiio *csio;
2469 	struct mprsas_softc *sassc;
2470 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2471 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2472 	int dir = 0, i;
2473 	u16 alloc_len;
2474 	struct mprsas_target *target;
2475 	target_id_t target_id;
2476 
2477 	MPR_FUNCTRACE(sc);
2478 	mpr_dprint(sc, MPR_TRACE,
2479 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2480 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2481 	    cm->cm_targ->outstanding);
2482 
2483 	callout_stop(&cm->cm_callout);
2484 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2485 
2486 	sassc = sc->sassc;
2487 	ccb = cm->cm_complete_data;
2488 	csio = &ccb->csio;
2489 	target_id = csio->ccb_h.target_id;
2490 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2491 	/*
2492 	 * XXX KDM if the chain allocation fails, does it matter if we do
2493 	 * the sync and unload here?  It is simpler to do it in every case,
2494 	 * assuming it doesn't cause problems.
2495 	 */
2496 	if (cm->cm_data != NULL) {
2497 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2498 			dir = BUS_DMASYNC_POSTREAD;
2499 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2500 			dir = BUS_DMASYNC_POSTWRITE;
2501 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2502 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2503 	}
2504 
2505 	cm->cm_targ->completed++;
2506 	cm->cm_targ->outstanding--;
2507 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2508 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2509 
2510 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2511 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2512 		if (cm->cm_reply != NULL)
2513 			mprsas_log_command(cm, MPR_RECOVERY,
2514 			    "completed timedout cm %p ccb %p during recovery "
2515 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2516 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2517 			    rep->SCSIState, le32toh(rep->TransferCount));
2518 		else
2519 			mprsas_log_command(cm, MPR_RECOVERY,
2520 			    "completed timedout cm %p ccb %p during recovery\n",
2521 			    cm, cm->cm_ccb);
2522 	} else if (cm->cm_targ->tm != NULL) {
2523 		if (cm->cm_reply != NULL)
2524 			mprsas_log_command(cm, MPR_RECOVERY,
2525 			    "completed cm %p ccb %p during recovery "
2526 			    "ioc %x scsi %x state %x xfer %u\n",
2527 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2528 			    rep->SCSIStatus, rep->SCSIState,
2529 			    le32toh(rep->TransferCount));
2530 		else
2531 			mprsas_log_command(cm, MPR_RECOVERY,
2532 			    "completed cm %p ccb %p during recovery\n",
2533 			    cm, cm->cm_ccb);
2534 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2535 		mprsas_log_command(cm, MPR_RECOVERY,
2536 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2537 	}
2538 
2539 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2540 		/*
2541 		 * We ran into an error after we tried to map the command,
2542 		 * so we're getting a callback without queueing the command
2543 		 * to the hardware.  So we set the status here, and it will
2544 		 * be retained below.  We'll go through the "fast path",
2545 		 * because there can be no reply when we haven't actually
2546 		 * gone out to the hardware.
2547 		 */
2548 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2549 
2550 		/*
2551 		 * Currently the only error included in the mask is
2552 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2553 		 * chain frames.  We need to freeze the queue until we get
2554 		 * a command that completed without this error, which will
2555 		 * hopefully have some chain frames attached that we can
2556 		 * use.  If we wanted to get smarter about it, we would
2557 		 * only unfreeze the queue in this condition when we're
2558 		 * sure that we're getting some chain frames back.  That's
2559 		 * probably unnecessary.
2560 		 */
2561 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2562 			xpt_freeze_simq(sassc->sim, 1);
2563 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2564 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2565 			    "freezing SIM queue\n");
2566 		}
2567 	}
2568 
2569 	/*
2570 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2571 	 * flag, and use it in a few places in the rest of this function for
2572 	 * convenience. Use the macro if available.
2573 	 */
2574 #if __FreeBSD_version >= 1100103
2575 	scsi_cdb = scsiio_cdb_ptr(csio);
2576 #else
2577 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2578 		scsi_cdb = csio->cdb_io.cdb_ptr;
2579 	else
2580 		scsi_cdb = csio->cdb_io.cdb_bytes;
2581 #endif
2582 
2583 	/*
2584 	 * If this is a Start Stop Unit command and it was issued by the driver
2585 	 * during shutdown, decrement the refcount to account for all of the
2586 	 * commands that were sent.  All SSU commands should be completed before
2587 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2588 	 * is TRUE.
2589 	 */
2590 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2591 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2592 		sc->SSU_refcount--;
2593 	}
2594 
2595 	/* Take the fast path to completion */
2596 	if (cm->cm_reply == NULL) {
2597 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2598 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2599 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2600 			else {
2601 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2602 				csio->scsi_status = SCSI_STATUS_OK;
2603 			}
2604 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2605 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2606 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2607 				mpr_dprint(sc, MPR_XINFO,
2608 				    "Unfreezing SIM queue\n");
2609 			}
2610 		}
2611 
2612 		/*
2613 		 * There are two scenarios where the status won't be
2614 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2615 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2616 		 */
2617 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2618 			/*
2619 			 * Freeze the dev queue so that commands are
2620 			 * executed in the correct order after error
2621 			 * recovery.
2622 			 */
2623 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2624 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2625 		}
2626 		mpr_free_command(sc, cm);
2627 		xpt_done(ccb);
2628 		return;
2629 	}
2630 
2631 	target = &sassc->targets[target_id];
2632 	if (scsi_cdb[0] == UNMAP &&
2633 	    target->is_nvme &&
2634 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2635 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2636 		csio->scsi_status = rep->SCSIStatus;
2637 	}
2638 
2639 	mprsas_log_command(cm, MPR_XINFO,
2640 	    "ioc %x scsi %x state %x xfer %u\n",
2641 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2642 	    le32toh(rep->TransferCount));
2643 
2644 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2645 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2646 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2647 		/* FALLTHROUGH */
2648 	case MPI2_IOCSTATUS_SUCCESS:
2649 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2650 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2651 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2652 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2653 
2654 		/* Completion failed at the transport level. */
2655 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2656 		    MPI2_SCSI_STATE_TERMINATED)) {
2657 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2658 			break;
2659 		}
2660 
2661 		/* In a modern packetized environment, an autosense failure
2662 		 * implies that there's not much else that can be done to
2663 		 * recover the command.
2664 		 */
2665 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2666 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2667 			break;
2668 		}
2669 
2670 		/*
2671 		 * CAM doesn't care about SAS Response Info data, but if this is
2672 		 * the state check if TLR should be done.  If not, clear the
2673 		 * TLR_bits for the target.
2674 		 */
2675 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2676 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2677 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2678 			sc->mapping_table[target_id].TLR_bits =
2679 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2680 		}
2681 
2682 		/*
2683 		 * Intentionally override the normal SCSI status reporting
2684 		 * for these two cases.  These are likely to happen in a
2685 		 * multi-initiator environment, and we want to make sure that
2686 		 * CAM retries these commands rather than fail them.
2687 		 */
2688 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2689 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2690 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2691 			break;
2692 		}
2693 
2694 		/* Handle normal status and sense */
2695 		csio->scsi_status = rep->SCSIStatus;
2696 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2697 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2698 		else
2699 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2700 
2701 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2702 			int sense_len, returned_sense_len;
2703 
2704 			returned_sense_len = min(le32toh(rep->SenseCount),
2705 			    sizeof(struct scsi_sense_data));
2706 			if (returned_sense_len < csio->sense_len)
2707 				csio->sense_resid = csio->sense_len -
2708 				    returned_sense_len;
2709 			else
2710 				csio->sense_resid = 0;
2711 
2712 			sense_len = min(returned_sense_len,
2713 			    csio->sense_len - csio->sense_resid);
2714 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2715 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2716 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2717 		}
2718 
2719 		/*
2720 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2721 		 * and it's page code 0 (Supported Page List), and there is
2722 		 * inquiry data, and this is for a sequential access device, and
2723 		 * the device is an SSP target, and TLR is supported by the
2724 		 * controller, turn the TLR_bits value ON if page 0x90 is
2725 		 * supported.
2726 		 */
2727 		if ((scsi_cdb[0] == INQUIRY) &&
2728 		    (scsi_cdb[1] & SI_EVPD) &&
2729 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2730 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2731 		    (csio->data_ptr != NULL) &&
2732 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2733 		    (sc->control_TLR) &&
2734 		    (sc->mapping_table[target_id].device_info &
2735 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2736 			vpd_list = (struct scsi_vpd_supported_page_list *)
2737 			    csio->data_ptr;
2738 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2739 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2740 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2741 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2742 			alloc_len -= csio->resid;
2743 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2744 				if (vpd_list->list[i] == 0x90) {
2745 					*TLR_bits = TLR_on;
2746 					break;
2747 				}
2748 			}
2749 		}
2750 
2751 		/*
2752 		 * If this is a SATA direct-access end device, mark it so that
2753 		 * a SCSI StartStopUnit command will be sent to it when the
2754 		 * driver is being shutdown.
2755 		 */
2756 		if ((scsi_cdb[0] == INQUIRY) &&
2757 		    (csio->data_ptr != NULL) &&
2758 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2759 		    (sc->mapping_table[target_id].device_info &
2760 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2761 		    ((sc->mapping_table[target_id].device_info &
2762 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2763 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2764 			target = &sassc->targets[target_id];
2765 			target->supports_SSU = TRUE;
2766 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2767 			    target_id);
2768 		}
2769 		break;
2770 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2771 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2772 		/*
2773 		 * If devinfo is 0 this will be a volume.  In that case don't
2774 		 * tell CAM that the volume is not there.  We want volumes to
2775 		 * be enumerated until they are deleted/removed, not just
2776 		 * failed.
2777 		 */
2778 		if (cm->cm_targ->devinfo == 0)
2779 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2780 		else
2781 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2782 		break;
2783 	case MPI2_IOCSTATUS_INVALID_SGL:
2784 		mpr_print_scsiio_cmd(sc, cm);
2785 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2786 		break;
2787 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2788 		/*
2789 		 * This is one of the responses that comes back when an I/O
2790 		 * has been aborted.  If it is because of a timeout that we
2791 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2792 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2793 		 * command is the same (it gets retried, subject to the
2794 		 * retry counter), the only difference is what gets printed
2795 		 * on the console.
2796 		 */
2797 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2798 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2799 		else
2800 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2801 		break;
2802 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2803 		/* resid is ignored for this condition */
2804 		csio->resid = 0;
2805 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2806 		break;
2807 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2808 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2809 		/*
2810 		 * These can sometimes be transient transport-related
2811 		 * errors, and sometimes persistent drive-related errors.
2812 		 * We used to retry these without decrementing the retry
2813 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2814 		 * we hit a persistent drive problem that returns one of
2815 		 * these error codes, we would retry indefinitely.  So,
2816 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2817 		 * count and avoid infinite retries.  We're taking the
2818 		 * potential risk of flagging false failures in the event
2819 		 * of a topology-related error (e.g. a SAS expander problem
2820 		 * causes a command addressed to a drive to fail), but
2821 		 * avoiding getting into an infinite retry loop.
2822 		 */
2823 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2824 		mpr_dprint(sc, MPR_INFO,
2825 		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2826 		    mpr_describe_table(mpr_iocstatus_string,
2827 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2828 		    target_id, cm->cm_desc.Default.SMID,
2829 		    le32toh(rep->IOCLogInfo));
2830 		mpr_dprint(sc, MPR_XINFO,
2831 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2832 		    rep->SCSIStatus, rep->SCSIState,
2833 		    le32toh(rep->TransferCount));
2834 		break;
2835 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2836 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2837 	case MPI2_IOCSTATUS_INVALID_VPID:
2838 	case MPI2_IOCSTATUS_INVALID_FIELD:
2839 	case MPI2_IOCSTATUS_INVALID_STATE:
2840 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2841 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2842 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2843 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2844 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2845 	default:
2846 		mprsas_log_command(cm, MPR_XINFO,
2847 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2848 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2849 		    rep->SCSIStatus, rep->SCSIState,
2850 		    le32toh(rep->TransferCount));
2851 		csio->resid = cm->cm_length;
2852 
2853 		if (scsi_cdb[0] == UNMAP &&
2854 		    target->is_nvme &&
2855 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2856 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2857 		else
2858 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2859 
2860 		break;
2861 	}
2862 
2863 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2864 
2865 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2866 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2867 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2868 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2869 		    "queue\n");
2870 	}
2871 
2872 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2873 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2874 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2875 	}
2876 
2877 	mpr_free_command(sc, cm);
2878 	xpt_done(ccb);
2879 }
2880 
2881 #if __FreeBSD_version >= 900026
2882 static void
2883 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2884 {
2885 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2886 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2887 	uint64_t sasaddr;
2888 	union ccb *ccb;
2889 
2890 	ccb = cm->cm_complete_data;
2891 
2892 	/*
2893 	 * Currently there should be no way we can hit this case.  It only
2894 	 * happens when we have a failure to allocate chain frames, and SMP
2895 	 * commands require two S/G elements only.  That should be handled
2896 	 * in the standard request size.
2897 	 */
2898 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2899 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2900 		    "request!\n", __func__, cm->cm_flags);
2901 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2902 		goto bailout;
2903         }
2904 
2905 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2906 	if (rpl == NULL) {
2907 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2908 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2909 		goto bailout;
2910 	}
2911 
2912 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2913 	sasaddr = le32toh(req->SASAddress.Low);
2914 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2915 
2916 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2917 	    MPI2_IOCSTATUS_SUCCESS ||
2918 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2919 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2920 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2921 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2922 		goto bailout;
2923 	}
2924 
2925 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2926 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2927 
2928 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2929 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2930 	else
2931 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2932 
2933 bailout:
2934 	/*
2935 	 * We sync in both directions because we had DMAs in the S/G list
2936 	 * in both directions.
2937 	 */
2938 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2939 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2940 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2941 	mpr_free_command(sc, cm);
2942 	xpt_done(ccb);
2943 }
2944 
2945 static void
2946 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2947 {
2948 	struct mpr_command *cm;
2949 	uint8_t *request, *response;
2950 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2951 	struct mpr_softc *sc;
2952 	struct sglist *sg;
2953 	int error;
2954 
2955 	sc = sassc->sc;
2956 	sg = NULL;
2957 	error = 0;
2958 
2959 #if (__FreeBSD_version >= 1000028) || \
2960     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2961 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2962 	case CAM_DATA_PADDR:
2963 	case CAM_DATA_SG_PADDR:
2964 		/*
2965 		 * XXX We don't yet support physical addresses here.
2966 		 */
2967 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2968 		    "supported\n", __func__);
2969 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2970 		xpt_done(ccb);
2971 		return;
2972 	case CAM_DATA_SG:
2973 		/*
2974 		 * The chip does not support more than one buffer for the
2975 		 * request or response.
2976 		 */
2977 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2978 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2979 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2980 			    "response buffer segments not supported for SMP\n",
2981 			    __func__);
2982 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2983 			xpt_done(ccb);
2984 			return;
2985 		}
2986 
2987 		/*
2988 		 * The CAM_SCATTER_VALID flag was originally implemented
2989 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2990 		 * We have two.  So, just take that flag to mean that we
2991 		 * might have S/G lists, and look at the S/G segment count
2992 		 * to figure out whether that is the case for each individual
2993 		 * buffer.
2994 		 */
2995 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2996 			bus_dma_segment_t *req_sg;
2997 
2998 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2999 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3000 		} else
3001 			request = ccb->smpio.smp_request;
3002 
3003 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3004 			bus_dma_segment_t *rsp_sg;
3005 
3006 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3007 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3008 		} else
3009 			response = ccb->smpio.smp_response;
3010 		break;
3011 	case CAM_DATA_VADDR:
3012 		request = ccb->smpio.smp_request;
3013 		response = ccb->smpio.smp_response;
3014 		break;
3015 	default:
3016 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3017 		xpt_done(ccb);
3018 		return;
3019 	}
3020 #else /* __FreeBSD_version < 1000028 */
3021 	/*
3022 	 * XXX We don't yet support physical addresses here.
3023 	 */
3024 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3025 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3026 		    "supported\n", __func__);
3027 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3028 		xpt_done(ccb);
3029 		return;
3030 	}
3031 
3032 	/*
3033 	 * If the user wants to send an S/G list, check to make sure they
3034 	 * have single buffers.
3035 	 */
3036 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3037 		/*
3038 		 * The chip does not support more than one buffer for the
3039 		 * request or response.
3040 		 */
3041 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3042 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3043 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3044 			    "response buffer segments not supported for SMP\n",
3045 			    __func__);
3046 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3047 			xpt_done(ccb);
3048 			return;
3049 		}
3050 
3051 		/*
3052 		 * The CAM_SCATTER_VALID flag was originally implemented
3053 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3054 		 * We have two.  So, just take that flag to mean that we
3055 		 * might have S/G lists, and look at the S/G segment count
3056 		 * to figure out whether that is the case for each individual
3057 		 * buffer.
3058 		 */
3059 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3060 			bus_dma_segment_t *req_sg;
3061 
3062 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3063 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3064 		} else
3065 			request = ccb->smpio.smp_request;
3066 
3067 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3068 			bus_dma_segment_t *rsp_sg;
3069 
3070 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3071 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3072 		} else
3073 			response = ccb->smpio.smp_response;
3074 	} else {
3075 		request = ccb->smpio.smp_request;
3076 		response = ccb->smpio.smp_response;
3077 	}
3078 #endif /* __FreeBSD_version < 1000028 */
3079 
3080 	cm = mpr_alloc_command(sc);
3081 	if (cm == NULL) {
3082 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3083 		    __func__);
3084 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3085 		xpt_done(ccb);
3086 		return;
3087 	}
3088 
3089 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3090 	bzero(req, sizeof(*req));
3091 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3092 
3093 	/* Allow the chip to use any route to this SAS address. */
3094 	req->PhysicalPort = 0xff;
3095 
3096 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3097 	req->SGLFlags =
3098 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3099 
3100 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3101 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3102 
3103 	mpr_init_sge(cm, req, &req->SGL);
3104 
3105 	/*
3106 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3107 	 * do one map command, and one busdma call in there.
3108 	 */
3109 	cm->cm_uio.uio_iov = cm->cm_iovec;
3110 	cm->cm_uio.uio_iovcnt = 2;
3111 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3112 
3113 	/*
3114 	 * The read/write flag isn't used by busdma, but set it just in
3115 	 * case.  This isn't exactly accurate, either, since we're going in
3116 	 * both directions.
3117 	 */
3118 	cm->cm_uio.uio_rw = UIO_WRITE;
3119 
3120 	cm->cm_iovec[0].iov_base = request;
3121 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3122 	cm->cm_iovec[1].iov_base = response;
3123 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3124 
3125 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3126 			       cm->cm_iovec[1].iov_len;
3127 
3128 	/*
3129 	 * Trigger a warning message in mpr_data_cb() for the user if we
3130 	 * wind up exceeding two S/G segments.  The chip expects one
3131 	 * segment for the request and another for the response.
3132 	 */
3133 	cm->cm_max_segs = 2;
3134 
3135 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3136 	cm->cm_complete = mprsas_smpio_complete;
3137 	cm->cm_complete_data = ccb;
3138 
3139 	/*
3140 	 * Tell the mapping code that we're using a uio, and that this is
3141 	 * an SMP passthrough request.  There is a little special-case
3142 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3143 	 * transfer.
3144 	 */
3145 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3146 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3147 
3148 	/* The chip data format is little endian. */
3149 	req->SASAddress.High = htole32(sasaddr >> 32);
3150 	req->SASAddress.Low = htole32(sasaddr);
3151 
3152 	/*
3153 	 * XXX Note that we don't have a timeout/abort mechanism here.
3154 	 * From the manual, it looks like task management requests only
3155 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3156 	 * have a mechanism to retry requests in the event of a chip reset
3157 	 * at least.  Hopefully the chip will insure that any errors short
3158 	 * of that are relayed back to the driver.
3159 	 */
3160 	error = mpr_map_command(sc, cm);
3161 	if ((error != 0) && (error != EINPROGRESS)) {
3162 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3163 		    "mpr_map_command()\n", __func__, error);
3164 		goto bailout_error;
3165 	}
3166 
3167 	return;
3168 
3169 bailout_error:
3170 	mpr_free_command(sc, cm);
3171 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3172 	xpt_done(ccb);
3173 	return;
3174 }
3175 
3176 static void
3177 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3178 {
3179 	struct mpr_softc *sc;
3180 	struct mprsas_target *targ;
3181 	uint64_t sasaddr = 0;
3182 
3183 	sc = sassc->sc;
3184 
3185 	/*
3186 	 * Make sure the target exists.
3187 	 */
3188 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3189 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3190 	targ = &sassc->targets[ccb->ccb_h.target_id];
3191 	if (targ->handle == 0x0) {
3192 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3193 		    __func__, ccb->ccb_h.target_id);
3194 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3195 		xpt_done(ccb);
3196 		return;
3197 	}
3198 
3199 	/*
3200 	 * If this device has an embedded SMP target, we'll talk to it
3201 	 * directly.
3202 	 * figure out what the expander's address is.
3203 	 */
3204 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3205 		sasaddr = targ->sasaddr;
3206 
3207 	/*
3208 	 * If we don't have a SAS address for the expander yet, try
3209 	 * grabbing it from the page 0x83 information cached in the
3210 	 * transport layer for this target.  LSI expanders report the
3211 	 * expander SAS address as the port-associated SAS address in
3212 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3213 	 * 0x83.
3214 	 *
3215 	 * XXX KDM disable this for now, but leave it commented out so that
3216 	 * it is obvious that this is another possible way to get the SAS
3217 	 * address.
3218 	 *
3219 	 * The parent handle method below is a little more reliable, and
3220 	 * the other benefit is that it works for devices other than SES
3221 	 * devices.  So you can send a SMP request to a da(4) device and it
3222 	 * will get routed to the expander that device is attached to.
3223 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3224 	 */
3225 #if 0
3226 	if (sasaddr == 0)
3227 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3228 #endif
3229 
3230 	/*
3231 	 * If we still don't have a SAS address for the expander, look for
3232 	 * the parent device of this device, which is probably the expander.
3233 	 */
3234 	if (sasaddr == 0) {
3235 #ifdef OLD_MPR_PROBE
3236 		struct mprsas_target *parent_target;
3237 #endif
3238 
3239 		if (targ->parent_handle == 0x0) {
3240 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3241 			    "a valid parent handle!\n", __func__, targ->handle);
3242 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3243 			goto bailout;
3244 		}
3245 #ifdef OLD_MPR_PROBE
3246 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3247 		    targ->parent_handle);
3248 
3249 		if (parent_target == NULL) {
3250 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3251 			    "a valid parent target!\n", __func__, targ->handle);
3252 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3253 			goto bailout;
3254 		}
3255 
3256 		if ((parent_target->devinfo &
3257 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3258 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3259 			    "does not have an SMP target!\n", __func__,
3260 			    targ->handle, parent_target->handle);
3261 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3262 			goto bailout;
3263 		}
3264 
3265 		sasaddr = parent_target->sasaddr;
3266 #else /* OLD_MPR_PROBE */
3267 		if ((targ->parent_devinfo &
3268 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3269 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3270 			    "does not have an SMP target!\n", __func__,
3271 			    targ->handle, targ->parent_handle);
3272 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3273 			goto bailout;
3274 
3275 		}
3276 		if (targ->parent_sasaddr == 0x0) {
3277 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3278 			    "%d does not have a valid SAS address!\n", __func__,
3279 			    targ->handle, targ->parent_handle);
3280 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3281 			goto bailout;
3282 		}
3283 
3284 		sasaddr = targ->parent_sasaddr;
3285 #endif /* OLD_MPR_PROBE */
3286 
3287 	}
3288 
3289 	if (sasaddr == 0) {
3290 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3291 		    "handle %d\n", __func__, targ->handle);
3292 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3293 		goto bailout;
3294 	}
3295 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3296 
3297 	return;
3298 
3299 bailout:
3300 	xpt_done(ccb);
3301 
3302 }
3303 #endif //__FreeBSD_version >= 900026
3304 
3305 static void
3306 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3307 {
3308 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3309 	struct mpr_softc *sc;
3310 	struct mpr_command *tm;
3311 	struct mprsas_target *targ;
3312 
3313 	MPR_FUNCTRACE(sassc->sc);
3314 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3315 
3316 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3317 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3318 	sc = sassc->sc;
3319 	tm = mpr_alloc_command(sc);
3320 	if (tm == NULL) {
3321 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3322 		    "mprsas_action_resetdev\n");
3323 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3324 		xpt_done(ccb);
3325 		return;
3326 	}
3327 
3328 	targ = &sassc->targets[ccb->ccb_h.target_id];
3329 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3330 	req->DevHandle = htole16(targ->handle);
3331 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3332 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3333 
3334 	/* SAS Hard Link Reset / SATA Link Reset */
3335 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3336 
3337 	tm->cm_data = NULL;
3338 	tm->cm_desc.HighPriority.RequestFlags =
3339 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3340 	tm->cm_complete = mprsas_resetdev_complete;
3341 	tm->cm_complete_data = ccb;
3342 
3343 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3344 	    __func__, targ->tid);
3345 	tm->cm_targ = targ;
3346 	targ->flags |= MPRSAS_TARGET_INRESET;
3347 
3348 	mpr_map_command(sc, tm);
3349 }
3350 
3351 static void
3352 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3353 {
3354 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3355 	union ccb *ccb;
3356 
3357 	MPR_FUNCTRACE(sc);
3358 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3359 
3360 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3361 	ccb = tm->cm_complete_data;
3362 
3363 	/*
3364 	 * Currently there should be no way we can hit this case.  It only
3365 	 * happens when we have a failure to allocate chain frames, and
3366 	 * task management commands don't have S/G lists.
3367 	 */
3368 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3369 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3370 
3371 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3372 
3373 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3374 		    "handle %#04x! This should not happen!\n", __func__,
3375 		    tm->cm_flags, req->DevHandle);
3376 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3377 		goto bailout;
3378 	}
3379 
3380 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3381 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3382 
3383 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3384 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3385 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3386 		    CAM_LUN_WILDCARD);
3387 	}
3388 	else
3389 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3390 
3391 bailout:
3392 
3393 	mprsas_free_tm(sc, tm);
3394 	xpt_done(ccb);
3395 }
3396 
3397 static void
3398 mprsas_poll(struct cam_sim *sim)
3399 {
3400 	struct mprsas_softc *sassc;
3401 
3402 	sassc = cam_sim_softc(sim);
3403 
3404 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3405 		/* frequent debug messages during a panic just slow
3406 		 * everything down too much.
3407 		 */
3408 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3409 		    __func__);
3410 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3411 	}
3412 
3413 	mpr_intr_locked(sassc->sc);
3414 }
3415 
3416 static void
3417 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3418     void *arg)
3419 {
3420 	struct mpr_softc *sc;
3421 
3422 	sc = (struct mpr_softc *)callback_arg;
3423 
3424 	switch (code) {
3425 #if (__FreeBSD_version >= 1000006) || \
3426     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3427 	case AC_ADVINFO_CHANGED: {
3428 		struct mprsas_target *target;
3429 		struct mprsas_softc *sassc;
3430 		struct scsi_read_capacity_data_long rcap_buf;
3431 		struct ccb_dev_advinfo cdai;
3432 		struct mprsas_lun *lun;
3433 		lun_id_t lunid;
3434 		int found_lun;
3435 		uintptr_t buftype;
3436 
3437 		buftype = (uintptr_t)arg;
3438 
3439 		found_lun = 0;
3440 		sassc = sc->sassc;
3441 
3442 		/*
3443 		 * We're only interested in read capacity data changes.
3444 		 */
3445 		if (buftype != CDAI_TYPE_RCAPLONG)
3446 			break;
3447 
3448 		/*
3449 		 * See the comment in mpr_attach_sas() for a detailed
3450 		 * explanation.  In these versions of FreeBSD we register
3451 		 * for all events and filter out the events that don't
3452 		 * apply to us.
3453 		 */
3454 #if (__FreeBSD_version < 1000703) || \
3455     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3456 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3457 			break;
3458 #endif
3459 
3460 		/*
3461 		 * We should have a handle for this, but check to make sure.
3462 		 */
3463 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3464 		    ("Target %d out of bounds in mprsas_async\n",
3465 		    xpt_path_target_id(path)));
3466 		target = &sassc->targets[xpt_path_target_id(path)];
3467 		if (target->handle == 0)
3468 			break;
3469 
3470 		lunid = xpt_path_lun_id(path);
3471 
3472 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3473 			if (lun->lun_id == lunid) {
3474 				found_lun = 1;
3475 				break;
3476 			}
3477 		}
3478 
3479 		if (found_lun == 0) {
3480 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3481 			    M_NOWAIT | M_ZERO);
3482 			if (lun == NULL) {
3483 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3484 				    "LUN for EEDP support.\n");
3485 				break;
3486 			}
3487 			lun->lun_id = lunid;
3488 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3489 		}
3490 
3491 		bzero(&rcap_buf, sizeof(rcap_buf));
3492 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3493 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3494 		cdai.ccb_h.flags = CAM_DIR_IN;
3495 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3496 #if (__FreeBSD_version >= 1100061) || \
3497     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3498 		cdai.flags = CDAI_FLAG_NONE;
3499 #else
3500 		cdai.flags = 0;
3501 #endif
3502 		cdai.bufsiz = sizeof(rcap_buf);
3503 		cdai.buf = (uint8_t *)&rcap_buf;
3504 		xpt_action((union ccb *)&cdai);
3505 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3506 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3507 
3508 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3509 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3510 			lun->eedp_formatted = TRUE;
3511 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3512 		} else {
3513 			lun->eedp_formatted = FALSE;
3514 			lun->eedp_block_size = 0;
3515 		}
3516 		break;
3517 	}
3518 #endif
3519 	case AC_FOUND_DEVICE: {
3520 		struct ccb_getdev *cgd;
3521 
3522 		/*
3523 		 * See the comment in mpr_attach_sas() for a detailed
3524 		 * explanation.  In these versions of FreeBSD we register
3525 		 * for all events and filter out the events that don't
3526 		 * apply to us.
3527 		 */
3528 #if (__FreeBSD_version < 1000703) || \
3529     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3530 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3531 			break;
3532 #endif
3533 
3534 		cgd = arg;
3535 #if (__FreeBSD_version < 901503) || \
3536     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3537 		mprsas_check_eedp(sc, path, cgd);
3538 #endif
3539 		break;
3540 	}
3541 	default:
3542 		break;
3543 	}
3544 }
3545 
3546 #if (__FreeBSD_version < 901503) || \
3547     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3548 static void
3549 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3550     struct ccb_getdev *cgd)
3551 {
3552 	struct mprsas_softc *sassc = sc->sassc;
3553 	struct ccb_scsiio *csio;
3554 	struct scsi_read_capacity_16 *scsi_cmd;
3555 	struct scsi_read_capacity_eedp *rcap_buf;
3556 	path_id_t pathid;
3557 	target_id_t targetid;
3558 	lun_id_t lunid;
3559 	union ccb *ccb;
3560 	struct cam_path *local_path;
3561 	struct mprsas_target *target;
3562 	struct mprsas_lun *lun;
3563 	uint8_t	found_lun;
3564 	char path_str[64];
3565 
3566 	pathid = cam_sim_path(sassc->sim);
3567 	targetid = xpt_path_target_id(path);
3568 	lunid = xpt_path_lun_id(path);
3569 
3570 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3571 	    "mprsas_check_eedp\n", targetid));
3572 	target = &sassc->targets[targetid];
3573 	if (target->handle == 0x0)
3574 		return;
3575 
3576 	/*
3577 	 * Determine if the device is EEDP capable.
3578 	 *
3579 	 * If this flag is set in the inquiry data, the device supports
3580 	 * protection information, and must support the 16 byte read capacity
3581 	 * command, otherwise continue without sending read cap 16.
3582 	 */
3583 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3584 		return;
3585 
3586 	/*
3587 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3588 	 * the LUN is formatted for EEDP support.
3589 	 */
3590 	ccb = xpt_alloc_ccb_nowait();
3591 	if (ccb == NULL) {
3592 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3593 		    "support.\n");
3594 		return;
3595 	}
3596 
3597 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3598 	    CAM_REQ_CMP) {
3599 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3600 		    "support.\n");
3601 		xpt_free_ccb(ccb);
3602 		return;
3603 	}
3604 
3605 	/*
3606 	 * If LUN is already in list, don't create a new one.
3607 	 */
3608 	found_lun = FALSE;
3609 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3610 		if (lun->lun_id == lunid) {
3611 			found_lun = TRUE;
3612 			break;
3613 		}
3614 	}
3615 	if (!found_lun) {
3616 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3617 		    M_NOWAIT | M_ZERO);
3618 		if (lun == NULL) {
3619 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3620 			    "EEDP support.\n");
3621 			xpt_free_path(local_path);
3622 			xpt_free_ccb(ccb);
3623 			return;
3624 		}
3625 		lun->lun_id = lunid;
3626 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3627 	}
3628 
3629 	xpt_path_string(local_path, path_str, sizeof(path_str));
3630 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3631 	    path_str, target->handle);
3632 
3633 	/*
3634 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3635 	 * mprsas_read_cap_done function will load the read cap info into the
3636 	 * LUN struct.
3637 	 */
3638 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3639 	    M_NOWAIT | M_ZERO);
3640 	if (rcap_buf == NULL) {
3641 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3642 		    "buffer for EEDP support.\n");
3643 		xpt_free_path(ccb->ccb_h.path);
3644 		xpt_free_ccb(ccb);
3645 		return;
3646 	}
3647 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3648 	csio = &ccb->csio;
3649 	csio->ccb_h.func_code = XPT_SCSI_IO;
3650 	csio->ccb_h.flags = CAM_DIR_IN;
3651 	csio->ccb_h.retry_count = 4;
3652 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3653 	csio->ccb_h.timeout = 60000;
3654 	csio->data_ptr = (uint8_t *)rcap_buf;
3655 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3656 	csio->sense_len = MPR_SENSE_LEN;
3657 	csio->cdb_len = sizeof(*scsi_cmd);
3658 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3659 
3660 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3661 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3662 	scsi_cmd->opcode = 0x9E;
3663 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3664 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3665 
3666 	ccb->ccb_h.ppriv_ptr1 = sassc;
3667 	xpt_action(ccb);
3668 }
3669 
3670 static void
3671 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3672 {
3673 	struct mprsas_softc *sassc;
3674 	struct mprsas_target *target;
3675 	struct mprsas_lun *lun;
3676 	struct scsi_read_capacity_eedp *rcap_buf;
3677 
3678 	if (done_ccb == NULL)
3679 		return;
3680 
3681 	/* Driver need to release devq, it Scsi command is
3682 	 * generated by driver internally.
3683 	 * Currently there is a single place where driver
3684 	 * calls scsi command internally. In future if driver
3685 	 * calls more scsi command internally, it needs to release
3686 	 * devq internally, since those command will not go back to
3687 	 * cam_periph.
3688 	 */
3689 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3690         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3691 		xpt_release_devq(done_ccb->ccb_h.path,
3692 			       	/*count*/ 1, /*run_queue*/TRUE);
3693 	}
3694 
3695 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3696 
3697 	/*
3698 	 * Get the LUN ID for the path and look it up in the LUN list for the
3699 	 * target.
3700 	 */
3701 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3702 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3703 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3704 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3705 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3706 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3707 			continue;
3708 
3709 		/*
3710 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3711 		 * info. If the READ CAP 16 command had some SCSI error (common
3712 		 * if command is not supported), mark the lun as not supporting
3713 		 * EEDP and set the block size to 0.
3714 		 */
3715 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3716 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3717 			lun->eedp_formatted = FALSE;
3718 			lun->eedp_block_size = 0;
3719 			break;
3720 		}
3721 
3722 		if (rcap_buf->protect & 0x01) {
3723 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3724 			    "%d is formatted for EEDP support.\n",
3725 			    done_ccb->ccb_h.target_lun,
3726 			    done_ccb->ccb_h.target_id);
3727 			lun->eedp_formatted = TRUE;
3728 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3729 		}
3730 		break;
3731 	}
3732 
3733 	// Finished with this CCB and path.
3734 	free(rcap_buf, M_MPR);
3735 	xpt_free_path(done_ccb->ccb_h.path);
3736 	xpt_free_ccb(done_ccb);
3737 }
3738 #endif /* (__FreeBSD_version < 901503) || \
3739           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3740 
3741 void
3742 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3743     struct mprsas_target *target, lun_id_t lun_id)
3744 {
3745 	union ccb *ccb;
3746 	path_id_t path_id;
3747 
3748 	/*
3749 	 * Set the INRESET flag for this target so that no I/O will be sent to
3750 	 * the target until the reset has completed.  If an I/O request does
3751 	 * happen, the devq will be frozen.  The CCB holds the path which is
3752 	 * used to release the devq.  The devq is released and the CCB is freed
3753 	 * when the TM completes.
3754 	 */
3755 	ccb = xpt_alloc_ccb_nowait();
3756 	if (ccb) {
3757 		path_id = cam_sim_path(sc->sassc->sim);
3758 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3759 		    target->tid, lun_id) != CAM_REQ_CMP) {
3760 			xpt_free_ccb(ccb);
3761 		} else {
3762 			tm->cm_ccb = ccb;
3763 			tm->cm_targ = target;
3764 			target->flags |= MPRSAS_TARGET_INRESET;
3765 		}
3766 	}
3767 }
3768 
3769 int
3770 mprsas_startup(struct mpr_softc *sc)
3771 {
3772 	/*
3773 	 * Send the port enable message and set the wait_for_port_enable flag.
3774 	 * This flag helps to keep the simq frozen until all discovery events
3775 	 * are processed.
3776 	 */
3777 	sc->wait_for_port_enable = 1;
3778 	mprsas_send_portenable(sc);
3779 	return (0);
3780 }
3781 
3782 static int
3783 mprsas_send_portenable(struct mpr_softc *sc)
3784 {
3785 	MPI2_PORT_ENABLE_REQUEST *request;
3786 	struct mpr_command *cm;
3787 
3788 	MPR_FUNCTRACE(sc);
3789 
3790 	if ((cm = mpr_alloc_command(sc)) == NULL)
3791 		return (EBUSY);
3792 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3793 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3794 	request->MsgFlags = 0;
3795 	request->VP_ID = 0;
3796 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3797 	cm->cm_complete = mprsas_portenable_complete;
3798 	cm->cm_data = NULL;
3799 	cm->cm_sge = NULL;
3800 
3801 	mpr_map_command(sc, cm);
3802 	mpr_dprint(sc, MPR_XINFO,
3803 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3804 	    cm, cm->cm_req, cm->cm_complete);
3805 	return (0);
3806 }
3807 
3808 static void
3809 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3810 {
3811 	MPI2_PORT_ENABLE_REPLY *reply;
3812 	struct mprsas_softc *sassc;
3813 
3814 	MPR_FUNCTRACE(sc);
3815 	sassc = sc->sassc;
3816 
3817 	/*
3818 	 * Currently there should be no way we can hit this case.  It only
3819 	 * happens when we have a failure to allocate chain frames, and
3820 	 * port enable commands don't have S/G lists.
3821 	 */
3822 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3823 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3824 		    "This should not happen!\n", __func__, cm->cm_flags);
3825 	}
3826 
3827 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3828 	if (reply == NULL)
3829 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3830 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3831 	    MPI2_IOCSTATUS_SUCCESS)
3832 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3833 
3834 	mpr_free_command(sc, cm);
3835 	/*
3836 	 * Done waiting for port enable to complete.  Decrement the refcount.
3837 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3838 	 * take place.
3839 	 */
3840 	sc->wait_for_port_enable = 0;
3841 	sc->port_enable_complete = 1;
3842 	wakeup(&sc->port_enable_complete);
3843 	mprsas_startup_decrement(sassc);
3844 }
3845 
3846 int
3847 mprsas_check_id(struct mprsas_softc *sassc, int id)
3848 {
3849 	struct mpr_softc *sc = sassc->sc;
3850 	char *ids;
3851 	char *name;
3852 
3853 	ids = &sc->exclude_ids[0];
3854 	while((name = strsep(&ids, ",")) != NULL) {
3855 		if (name[0] == '\0')
3856 			continue;
3857 		if (strtol(name, NULL, 0) == (long)id)
3858 			return (1);
3859 	}
3860 
3861 	return (0);
3862 }
3863 
3864 void
3865 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3866 {
3867 	struct mprsas_softc *sassc;
3868 	struct mprsas_lun *lun, *lun_tmp;
3869 	struct mprsas_target *targ;
3870 	int i;
3871 
3872 	sassc = sc->sassc;
3873 	/*
3874 	 * The number of targets is based on IOC Facts, so free all of
3875 	 * the allocated LUNs for each target and then the target buffer
3876 	 * itself.
3877 	 */
3878 	for (i=0; i< maxtargets; i++) {
3879 		targ = &sassc->targets[i];
3880 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3881 			free(lun, M_MPR);
3882 		}
3883 	}
3884 	free(sassc->targets, M_MPR);
3885 
3886 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3887 	    M_MPR, M_WAITOK|M_ZERO);
3888 	if (!sassc->targets) {
3889 		panic("%s failed to alloc targets with error %d\n",
3890 		    __func__, ENOMEM);
3891 	}
3892 }
3893