xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 9e5787d2284e187abb5b654d924394a65772e004)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * Copyright 2000-2020 Broadcom Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT3 */
37 
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/smp_all.h>
73 
74 #include <dev/nvme/nvme.h>
75 
76 #include <dev/mpr/mpi/mpi2_type.h>
77 #include <dev/mpr/mpi/mpi2.h>
78 #include <dev/mpr/mpi/mpi2_ioc.h>
79 #include <dev/mpr/mpi/mpi2_sas.h>
80 #include <dev/mpr/mpi/mpi2_pci.h>
81 #include <dev/mpr/mpi/mpi2_cnfg.h>
82 #include <dev/mpr/mpi/mpi2_init.h>
83 #include <dev/mpr/mpi/mpi2_tool.h>
84 #include <dev/mpr/mpr_ioctl.h>
85 #include <dev/mpr/mprvar.h>
86 #include <dev/mpr/mpr_table.h>
87 #include <dev/mpr/mpr_sas.h>
88 
89 #define MPRSAS_DISCOVERY_TIMEOUT	20
90 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91 
92 /*
93  * static array to check SCSI OpCode for EEDP protection bits
94  */
95 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116 
117 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
118 
119 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
120 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mprsas_poll(struct cam_sim *sim);
123 static void mprsas_scsiio_timeout(void *data);
124 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
125 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
126 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
127 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
128 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
129 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130     struct mpr_command *cm);
131 static void mprsas_async(void *callback_arg, uint32_t code,
132     struct cam_path *path, void *arg);
133 static int mprsas_send_portenable(struct mpr_softc *sc);
134 static void mprsas_portenable_complete(struct mpr_softc *sc,
135     struct mpr_command *cm);
136 
137 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
138 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
139     uint64_t sasaddr);
140 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
141 
142 struct mprsas_target *
143 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
144     uint16_t handle)
145 {
146 	struct mprsas_target *target;
147 	int i;
148 
149 	for (i = start; i < sassc->maxtargets; i++) {
150 		target = &sassc->targets[i];
151 		if (target->handle == handle)
152 			return (target);
153 	}
154 
155 	return (NULL);
156 }
157 
158 /* we need to freeze the simq during attach and diag reset, to avoid failing
159  * commands before device handles have been found by discovery.  Since
160  * discovery involves reading config pages and possibly sending commands,
161  * discovery actions may continue even after we receive the end of discovery
162  * event, so refcount discovery actions instead of assuming we can unfreeze
163  * the simq when we get the event.
164  */
165 void
166 mprsas_startup_increment(struct mprsas_softc *sassc)
167 {
168 	MPR_FUNCTRACE(sassc->sc);
169 
170 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
171 		if (sassc->startup_refcount++ == 0) {
172 			/* just starting, freeze the simq */
173 			mpr_dprint(sassc->sc, MPR_INIT,
174 			    "%s freezing simq\n", __func__);
175 			xpt_hold_boot();
176 			xpt_freeze_simq(sassc->sim, 1);
177 		}
178 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
179 		    sassc->startup_refcount);
180 	}
181 }
182 
183 void
184 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
185 {
186 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
187 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
188 		xpt_release_simq(sassc->sim, 1);
189 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
190 	}
191 }
192 
193 void
194 mprsas_startup_decrement(struct mprsas_softc *sassc)
195 {
196 	MPR_FUNCTRACE(sassc->sc);
197 
198 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
199 		if (--sassc->startup_refcount == 0) {
200 			/* finished all discovery-related actions, release
201 			 * the simq and rescan for the latest topology.
202 			 */
203 			mpr_dprint(sassc->sc, MPR_INIT,
204 			    "%s releasing simq\n", __func__);
205 			sassc->flags &= ~MPRSAS_IN_STARTUP;
206 			xpt_release_simq(sassc->sim, 1);
207 			xpt_release_boot();
208 		}
209 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
210 		    sassc->startup_refcount);
211 	}
212 }
213 
214 /*
215  * The firmware requires us to stop sending commands when we're doing task
216  * management.
217  * use.
218  * XXX The logic for serializing the device has been made lazy and moved to
219  * mprsas_prepare_for_tm().
220  */
221 struct mpr_command *
222 mprsas_alloc_tm(struct mpr_softc *sc)
223 {
224 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
225 	struct mpr_command *tm;
226 
227 	MPR_FUNCTRACE(sc);
228 	tm = mpr_alloc_high_priority_command(sc);
229 	if (tm == NULL)
230 		return (NULL);
231 
232 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
233 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
234 	return tm;
235 }
236 
237 void
238 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
239 {
240 	int target_id = 0xFFFFFFFF;
241 
242 	MPR_FUNCTRACE(sc);
243 	if (tm == NULL)
244 		return;
245 
246 	/*
247 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
248 	 * free the resources used for freezing the devq.  Must clear the
249 	 * INRESET flag as well or scsi I/O will not work.
250 	 */
251 	if (tm->cm_targ != NULL) {
252 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
253 		target_id = tm->cm_targ->tid;
254 	}
255 	if (tm->cm_ccb) {
256 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
257 		    target_id);
258 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
259 		xpt_free_path(tm->cm_ccb->ccb_h.path);
260 		xpt_free_ccb(tm->cm_ccb);
261 	}
262 
263 	mpr_free_high_priority_command(sc, tm);
264 }
265 
266 void
267 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
268 {
269 	struct mprsas_softc *sassc = sc->sassc;
270 	path_id_t pathid;
271 	target_id_t targetid;
272 	union ccb *ccb;
273 
274 	MPR_FUNCTRACE(sc);
275 	pathid = cam_sim_path(sassc->sim);
276 	if (targ == NULL)
277 		targetid = CAM_TARGET_WILDCARD;
278 	else
279 		targetid = targ - sassc->targets;
280 
281 	/*
282 	 * Allocate a CCB and schedule a rescan.
283 	 */
284 	ccb = xpt_alloc_ccb_nowait();
285 	if (ccb == NULL) {
286 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
287 		return;
288 	}
289 
290 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
291 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
292 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
293 		xpt_free_ccb(ccb);
294 		return;
295 	}
296 
297 	if (targetid == CAM_TARGET_WILDCARD)
298 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
299 	else
300 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
301 
302 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
303 	xpt_rescan(ccb);
304 }
305 
306 static void
307 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
308 {
309 	struct sbuf sb;
310 	va_list ap;
311 	char str[224];
312 	char path_str[64];
313 
314 	if (cm == NULL)
315 		return;
316 
317 	/* No need to be in here if debugging isn't enabled */
318 	if ((cm->cm_sc->mpr_debug & level) == 0)
319 		return;
320 
321 	sbuf_new(&sb, str, sizeof(str), 0);
322 
323 	va_start(ap, fmt);
324 
325 	if (cm->cm_ccb != NULL) {
326 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
327 		    sizeof(path_str));
328 		sbuf_cat(&sb, path_str);
329 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
330 			scsi_command_string(&cm->cm_ccb->csio, &sb);
331 			sbuf_printf(&sb, "length %d ",
332 			    cm->cm_ccb->csio.dxfer_len);
333 		}
334 	} else {
335 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
336 		    cam_sim_name(cm->cm_sc->sassc->sim),
337 		    cam_sim_unit(cm->cm_sc->sassc->sim),
338 		    cam_sim_bus(cm->cm_sc->sassc->sim),
339 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
340 		    cm->cm_lun);
341 	}
342 
343 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
344 	sbuf_vprintf(&sb, fmt, ap);
345 	sbuf_finish(&sb);
346 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
347 
348 	va_end(ap);
349 }
350 
351 static void
352 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
353 {
354 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
355 	struct mprsas_target *targ;
356 	uint16_t handle;
357 
358 	MPR_FUNCTRACE(sc);
359 
360 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
361 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
362 	targ = tm->cm_targ;
363 
364 	if (reply == NULL) {
365 		/* XXX retry the remove after the diag reset completes? */
366 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
367 		    "0x%04x\n", __func__, handle);
368 		mprsas_free_tm(sc, tm);
369 		return;
370 	}
371 
372 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373 	    MPI2_IOCSTATUS_SUCCESS) {
374 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
375 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
376 	}
377 
378 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
379 	    le32toh(reply->TerminationCount));
380 	mpr_free_reply(sc, tm->cm_reply_data);
381 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
382 
383 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
384 	    targ->tid, handle);
385 
386 	/*
387 	 * Don't clear target if remove fails because things will get confusing.
388 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 	 * this target id if possible, and so we can assign the same target id
390 	 * to this device if it comes back in the future.
391 	 */
392 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
393 	    MPI2_IOCSTATUS_SUCCESS) {
394 		targ = tm->cm_targ;
395 		targ->handle = 0x0;
396 		targ->encl_handle = 0x0;
397 		targ->encl_level_valid = 0x0;
398 		targ->encl_level = 0x0;
399 		targ->connector_name[0] = ' ';
400 		targ->connector_name[1] = ' ';
401 		targ->connector_name[2] = ' ';
402 		targ->connector_name[3] = ' ';
403 		targ->encl_slot = 0x0;
404 		targ->exp_dev_handle = 0x0;
405 		targ->phy_num = 0x0;
406 		targ->linkrate = 0x0;
407 		targ->devinfo = 0x0;
408 		targ->flags = 0x0;
409 		targ->scsi_req_desc_type = 0;
410 	}
411 
412 	mprsas_free_tm(sc, tm);
413 }
414 
415 
416 /*
417  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418  * Otherwise Volume Delete is same as Bare Drive Removal.
419  */
420 void
421 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
422 {
423 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424 	struct mpr_softc *sc;
425 	struct mpr_command *cm;
426 	struct mprsas_target *targ = NULL;
427 
428 	MPR_FUNCTRACE(sassc->sc);
429 	sc = sassc->sc;
430 
431 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
432 	if (targ == NULL) {
433 		/* FIXME: what is the action? */
434 		/* We don't know about this device? */
435 		mpr_dprint(sc, MPR_ERROR,
436 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
437 		return;
438 	}
439 
440 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
441 
442 	cm = mprsas_alloc_tm(sc);
443 	if (cm == NULL) {
444 		mpr_dprint(sc, MPR_ERROR,
445 		    "%s: command alloc failure\n", __func__);
446 		return;
447 	}
448 
449 	mprsas_rescan_target(sc, targ);
450 
451 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
452 	req->DevHandle = targ->handle;
453 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
454 
455 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
456 		/* SAS Hard Link Reset / SATA Link Reset */
457 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
458 	} else {
459 		/* PCIe Protocol Level Reset*/
460 		req->MsgFlags =
461 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
462 	}
463 
464 	cm->cm_targ = targ;
465 	cm->cm_data = NULL;
466 	cm->cm_complete = mprsas_remove_volume;
467 	cm->cm_complete_data = (void *)(uintptr_t)handle;
468 
469 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
470 	    __func__, targ->tid);
471 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
472 
473 	mpr_map_command(sc, cm);
474 }
475 
476 /*
477  * The firmware performs debounce on the link to avoid transient link errors
478  * and false removals.  When it does decide that link has been lost and a
479  * device needs to go away, it expects that the host will perform a target reset
480  * and then an op remove.  The reset has the side-effect of aborting any
481  * outstanding requests for the device, which is required for the op-remove to
482  * succeed.  It's not clear if the host should check for the device coming back
483  * alive after the reset.
484  */
485 void
486 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
487 {
488 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
489 	struct mpr_softc *sc;
490 	struct mpr_command *tm;
491 	struct mprsas_target *targ = NULL;
492 
493 	MPR_FUNCTRACE(sassc->sc);
494 
495 	sc = sassc->sc;
496 
497 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
498 	if (targ == NULL) {
499 		/* FIXME: what is the action? */
500 		/* We don't know about this device? */
501 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
502 		    __func__, handle);
503 		return;
504 	}
505 
506 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
507 
508 	tm = mprsas_alloc_tm(sc);
509 	if (tm == NULL) {
510 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
511 		    __func__);
512 		return;
513 	}
514 
515 	mprsas_rescan_target(sc, targ);
516 
517 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
518 	req->DevHandle = htole16(targ->handle);
519 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
520 
521 	/* SAS Hard Link Reset / SATA Link Reset */
522 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
523 
524 	tm->cm_targ = targ;
525 	tm->cm_data = NULL;
526 	tm->cm_complete = mprsas_remove_device;
527 	tm->cm_complete_data = (void *)(uintptr_t)handle;
528 
529 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
530 	    __func__, targ->tid);
531 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
532 
533 	mpr_map_command(sc, tm);
534 }
535 
536 static void
537 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
538 {
539 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
540 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
541 	struct mprsas_target *targ;
542 	uint16_t handle;
543 
544 	MPR_FUNCTRACE(sc);
545 
546 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
547 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
548 	targ = tm->cm_targ;
549 
550 	/*
551 	 * Currently there should be no way we can hit this case.  It only
552 	 * happens when we have a failure to allocate chain frames, and
553 	 * task management commands don't have S/G lists.
554 	 */
555 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
556 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
557 		    "handle %#04x! This should not happen!\n", __func__,
558 		    tm->cm_flags, handle);
559 	}
560 
561 	if (reply == NULL) {
562 		/* XXX retry the remove after the diag reset completes? */
563 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
564 		    "0x%04x\n", __func__, handle);
565 		mprsas_free_tm(sc, tm);
566 		return;
567 	}
568 
569 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
570 	    MPI2_IOCSTATUS_SUCCESS) {
571 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
572 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
573 	}
574 
575 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
576 	    le32toh(reply->TerminationCount));
577 	mpr_free_reply(sc, tm->cm_reply_data);
578 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
579 
580 	/* Reuse the existing command */
581 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
582 	memset(req, 0, sizeof(*req));
583 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
584 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
585 	req->DevHandle = htole16(handle);
586 	tm->cm_data = NULL;
587 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
588 	tm->cm_complete = mprsas_remove_complete;
589 	tm->cm_complete_data = (void *)(uintptr_t)handle;
590 
591 	/*
592 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
593 	 * They should be aborted or time out and we'll kick thus off there
594 	 * if so.
595 	 */
596 	if (TAILQ_FIRST(&targ->commands) == NULL) {
597 		mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n");
598 		mpr_map_command(sc, tm);
599 		targ->pending_remove_tm = NULL;
600 	} else {
601 		targ->pending_remove_tm = tm;
602 	}
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 }
612 
613 static void
614 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
615 {
616 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
617 	uint16_t handle;
618 	struct mprsas_target *targ;
619 	struct mprsas_lun *lun;
620 
621 	MPR_FUNCTRACE(sc);
622 
623 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
624 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
625 
626 	targ = tm->cm_targ;
627 
628 	/*
629 	 * At this point, we should have no pending commands for the target.
630 	 * The remove target has just completed.
631 	 */
632 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
633 	    ("%s: no commands should be pending\n", __func__));
634 
635 	/*
636 	 * Currently there should be no way we can hit this case.  It only
637 	 * happens when we have a failure to allocate chain frames, and
638 	 * task management commands don't have S/G lists.
639 	 */
640 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
641 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
642 		    "handle %#04x! This should not happen!\n", __func__,
643 		    tm->cm_flags, handle);
644 		mprsas_free_tm(sc, tm);
645 		return;
646 	}
647 
648 	if (reply == NULL) {
649 		/* most likely a chip reset */
650 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
651 		    "0x%04x\n", __func__, handle);
652 		mprsas_free_tm(sc, tm);
653 		return;
654 	}
655 
656 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
657 	    __func__, handle, le16toh(reply->IOCStatus));
658 
659 	/*
660 	 * Don't clear target if remove fails because things will get confusing.
661 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
662 	 * this target id if possible, and so we can assign the same target id
663 	 * to this device if it comes back in the future.
664 	 */
665 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
666 	    MPI2_IOCSTATUS_SUCCESS) {
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
712 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
713 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
714 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
715 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
716 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
717 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
718 		}
719 	}
720 
721 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
722 	    &sc->sassc->mprsas_eh);
723 
724 	return (0);
725 }
726 
727 int
728 mpr_attach_sas(struct mpr_softc *sc)
729 {
730 	struct mprsas_softc *sassc;
731 	cam_status status;
732 	int unit, error = 0, reqs;
733 
734 	MPR_FUNCTRACE(sc);
735 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
736 
737 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
738 
739 	/*
740 	 * XXX MaxTargets could change during a reinit.  Since we don't
741 	 * resize the targets[] array during such an event, cache the value
742 	 * of MaxTargets here so that we don't get into trouble later.  This
743 	 * should move into the reinit logic.
744 	 */
745 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
746 	sassc->targets = malloc(sizeof(struct mprsas_target) *
747 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
748 	sc->sassc = sassc;
749 	sassc->sc = sc;
750 
751 	reqs = sc->num_reqs - sc->num_prireqs - 1;
752 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
753 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
754 		error = ENOMEM;
755 		goto out;
756 	}
757 
758 	unit = device_get_unit(sc->mpr_dev);
759 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
760 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
761 	if (sassc->sim == NULL) {
762 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
763 		error = EINVAL;
764 		goto out;
765 	}
766 
767 	TAILQ_INIT(&sassc->ev_queue);
768 
769 	/* Initialize taskqueue for Event Handling */
770 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
771 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
772 	    taskqueue_thread_enqueue, &sassc->ev_tq);
773 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
774 	    device_get_nameunit(sc->mpr_dev));
775 
776 	mpr_lock(sc);
777 
778 	/*
779 	 * XXX There should be a bus for every port on the adapter, but since
780 	 * we're just going to fake the topology for now, we'll pretend that
781 	 * everything is just a target on a single bus.
782 	 */
783 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
784 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
785 		    "Error %d registering SCSI bus\n", error);
786 		mpr_unlock(sc);
787 		goto out;
788 	}
789 
790 	/*
791 	 * Assume that discovery events will start right away.
792 	 *
793 	 * Hold off boot until discovery is complete.
794 	 */
795 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
796 	sc->sassc->startup_refcount = 0;
797 	mprsas_startup_increment(sassc);
798 
799 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
800 
801 	/*
802 	 * Register for async events so we can determine the EEDP
803 	 * capabilities of devices.
804 	 */
805 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
806 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
807 	    CAM_LUN_WILDCARD);
808 	if (status != CAM_REQ_CMP) {
809 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
810 		    "Error %#x creating sim path\n", status);
811 		sassc->path = NULL;
812 	} else {
813 		int event;
814 
815 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
816 		status = xpt_register_async(event, mprsas_async, sc,
817 					    sassc->path);
818 
819 		if (status != CAM_REQ_CMP) {
820 			mpr_dprint(sc, MPR_ERROR,
821 			    "Error %#x registering async handler for "
822 			    "AC_ADVINFO_CHANGED events\n", status);
823 			xpt_free_path(sassc->path);
824 			sassc->path = NULL;
825 		}
826 	}
827 	if (status != CAM_REQ_CMP) {
828 		/*
829 		 * EEDP use is the exception, not the rule.
830 		 * Warn the user, but do not fail to attach.
831 		 */
832 		mpr_printf(sc, "EEDP capabilities disabled.\n");
833 	}
834 
835 	mpr_unlock(sc);
836 
837 	mprsas_register_events(sc);
838 out:
839 	if (error)
840 		mpr_detach_sas(sc);
841 
842 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
843 	return (error);
844 }
845 
846 int
847 mpr_detach_sas(struct mpr_softc *sc)
848 {
849 	struct mprsas_softc *sassc;
850 	struct mprsas_lun *lun, *lun_tmp;
851 	struct mprsas_target *targ;
852 	int i;
853 
854 	MPR_FUNCTRACE(sc);
855 
856 	if (sc->sassc == NULL)
857 		return (0);
858 
859 	sassc = sc->sassc;
860 	mpr_deregister_events(sc, sassc->mprsas_eh);
861 
862 	/*
863 	 * Drain and free the event handling taskqueue with the lock
864 	 * unheld so that any parallel processing tasks drain properly
865 	 * without deadlocking.
866 	 */
867 	if (sassc->ev_tq != NULL)
868 		taskqueue_free(sassc->ev_tq);
869 
870 	/* Make sure CAM doesn't wedge if we had to bail out early. */
871 	mpr_lock(sc);
872 
873 	while (sassc->startup_refcount != 0)
874 		mprsas_startup_decrement(sassc);
875 
876 	/* Deregister our async handler */
877 	if (sassc->path != NULL) {
878 		xpt_register_async(0, mprsas_async, sc, sassc->path);
879 		xpt_free_path(sassc->path);
880 		sassc->path = NULL;
881 	}
882 
883 	if (sassc->flags & MPRSAS_IN_STARTUP)
884 		xpt_release_simq(sassc->sim, 1);
885 
886 	if (sassc->sim != NULL) {
887 		xpt_bus_deregister(cam_sim_path(sassc->sim));
888 		cam_sim_free(sassc->sim, FALSE);
889 	}
890 
891 	mpr_unlock(sc);
892 
893 	if (sassc->devq != NULL)
894 		cam_simq_free(sassc->devq);
895 
896 	for (i = 0; i < sassc->maxtargets; i++) {
897 		targ = &sassc->targets[i];
898 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
899 			free(lun, M_MPR);
900 		}
901 	}
902 	free(sassc->targets, M_MPR);
903 	free(sassc, M_MPR);
904 	sc->sassc = NULL;
905 
906 	return (0);
907 }
908 
909 void
910 mprsas_discovery_end(struct mprsas_softc *sassc)
911 {
912 	struct mpr_softc *sc = sassc->sc;
913 
914 	MPR_FUNCTRACE(sc);
915 
916 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
917 		callout_stop(&sassc->discovery_callout);
918 
919 	/*
920 	 * After discovery has completed, check the mapping table for any
921 	 * missing devices and update their missing counts. Only do this once
922 	 * whenever the driver is initialized so that missing counts aren't
923 	 * updated unnecessarily. Note that just because discovery has
924 	 * completed doesn't mean that events have been processed yet. The
925 	 * check_devices function is a callout timer that checks if ALL devices
926 	 * are missing. If so, it will wait a little longer for events to
927 	 * complete and keep resetting itself until some device in the mapping
928 	 * table is not missing, meaning that event processing has started.
929 	 */
930 	if (sc->track_mapping_events) {
931 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
932 		    "completed. Check for missing devices in the mapping "
933 		    "table.\n");
934 		callout_reset(&sc->device_check_callout,
935 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
936 		    sc);
937 	}
938 }
939 
940 static void
941 mprsas_action(struct cam_sim *sim, union ccb *ccb)
942 {
943 	struct mprsas_softc *sassc;
944 
945 	sassc = cam_sim_softc(sim);
946 
947 	MPR_FUNCTRACE(sassc->sc);
948 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
949 	    ccb->ccb_h.func_code);
950 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
951 
952 	switch (ccb->ccb_h.func_code) {
953 	case XPT_PATH_INQ:
954 	{
955 		struct ccb_pathinq *cpi = &ccb->cpi;
956 		struct mpr_softc *sc = sassc->sc;
957 
958 		cpi->version_num = 1;
959 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
960 		cpi->target_sprt = 0;
961 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
962 		cpi->hba_eng_cnt = 0;
963 		cpi->max_target = sassc->maxtargets - 1;
964 		cpi->max_lun = 255;
965 
966 		/*
967 		 * initiator_id is set here to an ID outside the set of valid
968 		 * target IDs (including volumes).
969 		 */
970 		cpi->initiator_id = sassc->maxtargets;
971 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
972 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
973 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
974 		cpi->unit_number = cam_sim_unit(sim);
975 		cpi->bus_id = cam_sim_bus(sim);
976 		/*
977 		 * XXXSLM-I think this needs to change based on config page or
978 		 * something instead of hardcoded to 150000.
979 		 */
980 		cpi->base_transfer_speed = 150000;
981 		cpi->transport = XPORT_SAS;
982 		cpi->transport_version = 0;
983 		cpi->protocol = PROTO_SCSI;
984 		cpi->protocol_version = SCSI_REV_SPC;
985 		cpi->maxio = sc->maxio;
986 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
987 		break;
988 	}
989 	case XPT_GET_TRAN_SETTINGS:
990 	{
991 		struct ccb_trans_settings	*cts;
992 		struct ccb_trans_settings_sas	*sas;
993 		struct ccb_trans_settings_scsi	*scsi;
994 		struct mprsas_target *targ;
995 
996 		cts = &ccb->cts;
997 		sas = &cts->xport_specific.sas;
998 		scsi = &cts->proto_specific.scsi;
999 
1000 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1001 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1002 		    cts->ccb_h.target_id));
1003 		targ = &sassc->targets[cts->ccb_h.target_id];
1004 		if (targ->handle == 0x0) {
1005 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1006 			break;
1007 		}
1008 
1009 		cts->protocol_version = SCSI_REV_SPC2;
1010 		cts->transport = XPORT_SAS;
1011 		cts->transport_version = 0;
1012 
1013 		sas->valid = CTS_SAS_VALID_SPEED;
1014 		switch (targ->linkrate) {
1015 		case 0x08:
1016 			sas->bitrate = 150000;
1017 			break;
1018 		case 0x09:
1019 			sas->bitrate = 300000;
1020 			break;
1021 		case 0x0a:
1022 			sas->bitrate = 600000;
1023 			break;
1024 		case 0x0b:
1025 			sas->bitrate = 1200000;
1026 			break;
1027 		default:
1028 			sas->valid = 0;
1029 		}
1030 
1031 		cts->protocol = PROTO_SCSI;
1032 		scsi->valid = CTS_SCSI_VALID_TQ;
1033 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1034 
1035 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1036 		break;
1037 	}
1038 	case XPT_CALC_GEOMETRY:
1039 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1040 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1041 		break;
1042 	case XPT_RESET_DEV:
1043 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1044 		    "XPT_RESET_DEV\n");
1045 		mprsas_action_resetdev(sassc, ccb);
1046 		return;
1047 	case XPT_RESET_BUS:
1048 	case XPT_ABORT:
1049 	case XPT_TERM_IO:
1050 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1051 		    "for abort or reset\n");
1052 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1053 		break;
1054 	case XPT_SCSI_IO:
1055 		mprsas_action_scsiio(sassc, ccb);
1056 		return;
1057 	case XPT_SMP_IO:
1058 		mprsas_action_smpio(sassc, ccb);
1059 		return;
1060 	default:
1061 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1062 		break;
1063 	}
1064 	xpt_done(ccb);
1065 
1066 }
1067 
1068 static void
1069 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1070     target_id_t target_id, lun_id_t lun_id)
1071 {
1072 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1073 	struct cam_path *path;
1074 
1075 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1076 	    ac_code, target_id, (uintmax_t)lun_id);
1077 
1078 	if (xpt_create_path(&path, NULL,
1079 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1080 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1081 		    "notification\n");
1082 		return;
1083 	}
1084 
1085 	xpt_async(ac_code, path, NULL);
1086 	xpt_free_path(path);
1087 }
1088 
1089 static void
1090 mprsas_complete_all_commands(struct mpr_softc *sc)
1091 {
1092 	struct mpr_command *cm;
1093 	int i;
1094 	int completed;
1095 
1096 	MPR_FUNCTRACE(sc);
1097 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1098 
1099 	/* complete all commands with a NULL reply */
1100 	for (i = 1; i < sc->num_reqs; i++) {
1101 		cm = &sc->commands[i];
1102 		if (cm->cm_state == MPR_CM_STATE_FREE)
1103 			continue;
1104 
1105 		cm->cm_state = MPR_CM_STATE_BUSY;
1106 		cm->cm_reply = NULL;
1107 		completed = 0;
1108 
1109 		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1110 			MPASS(cm->cm_data);
1111 			free(cm->cm_data, M_MPR);
1112 			cm->cm_data = NULL;
1113 		}
1114 
1115 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1116 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1117 
1118 		if (cm->cm_complete != NULL) {
1119 			mprsas_log_command(cm, MPR_RECOVERY,
1120 			    "completing cm %p state %x ccb %p for diag reset\n",
1121 			    cm, cm->cm_state, cm->cm_ccb);
1122 			cm->cm_complete(sc, cm);
1123 			completed = 1;
1124 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1125 			mprsas_log_command(cm, MPR_RECOVERY,
1126 			    "waking up cm %p state %x ccb %p for diag reset\n",
1127 			    cm, cm->cm_state, cm->cm_ccb);
1128 			wakeup(cm);
1129 			completed = 1;
1130 		}
1131 
1132 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1133 			/* this should never happen, but if it does, log */
1134 			mprsas_log_command(cm, MPR_RECOVERY,
1135 			    "cm %p state %x flags 0x%x ccb %p during diag "
1136 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1137 			    cm->cm_ccb);
1138 		}
1139 	}
1140 
1141 	sc->io_cmds_active = 0;
1142 }
1143 
1144 void
1145 mprsas_handle_reinit(struct mpr_softc *sc)
1146 {
1147 	int i;
1148 
1149 	/* Go back into startup mode and freeze the simq, so that CAM
1150 	 * doesn't send any commands until after we've rediscovered all
1151 	 * targets and found the proper device handles for them.
1152 	 *
1153 	 * After the reset, portenable will trigger discovery, and after all
1154 	 * discovery-related activities have finished, the simq will be
1155 	 * released.
1156 	 */
1157 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1158 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1159 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1160 	mprsas_startup_increment(sc->sassc);
1161 
1162 	/* notify CAM of a bus reset */
1163 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1164 	    CAM_LUN_WILDCARD);
1165 
1166 	/* complete and cleanup after all outstanding commands */
1167 	mprsas_complete_all_commands(sc);
1168 
1169 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1170 	    __func__, sc->sassc->startup_refcount);
1171 
1172 	/* zero all the target handles, since they may change after the
1173 	 * reset, and we have to rediscover all the targets and use the new
1174 	 * handles.
1175 	 */
1176 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1177 		if (sc->sassc->targets[i].outstanding != 0)
1178 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1179 			    i, sc->sassc->targets[i].outstanding);
1180 		sc->sassc->targets[i].handle = 0x0;
1181 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1182 		sc->sassc->targets[i].outstanding = 0;
1183 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1184 	}
1185 }
1186 static void
1187 mprsas_tm_timeout(void *data)
1188 {
1189 	struct mpr_command *tm = data;
1190 	struct mpr_softc *sc = tm->cm_sc;
1191 
1192 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1193 
1194 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1195 	    "out\n", tm);
1196 
1197 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1198 	    ("command not inqueue\n"));
1199 
1200 	tm->cm_state = MPR_CM_STATE_BUSY;
1201 	mpr_reinit(sc);
1202 }
1203 
1204 static void
1205 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1206 {
1207 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1208 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1209 	unsigned int cm_count = 0;
1210 	struct mpr_command *cm;
1211 	struct mprsas_target *targ;
1212 
1213 	callout_stop(&tm->cm_callout);
1214 
1215 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1216 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1217 	targ = tm->cm_targ;
1218 
1219 	/*
1220 	 * Currently there should be no way we can hit this case.  It only
1221 	 * happens when we have a failure to allocate chain frames, and
1222 	 * task management commands don't have S/G lists.
1223 	 */
1224 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1225 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1226 		    "%s: cm_flags = %#x for LUN reset! "
1227 		    "This should not happen!\n", __func__, tm->cm_flags);
1228 		mprsas_free_tm(sc, tm);
1229 		return;
1230 	}
1231 
1232 	if (reply == NULL) {
1233 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1234 		    tm);
1235 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1236 			/* this completion was due to a reset, just cleanup */
1237 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1238 			    "reset, ignoring NULL LUN reset reply\n");
1239 			targ->tm = NULL;
1240 			mprsas_free_tm(sc, tm);
1241 		}
1242 		else {
1243 			/* we should have gotten a reply. */
1244 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1245 			    "LUN reset attempt, resetting controller\n");
1246 			mpr_reinit(sc);
1247 		}
1248 		return;
1249 	}
1250 
1251 	mpr_dprint(sc, MPR_RECOVERY,
1252 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1253 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1254 	    le32toh(reply->TerminationCount));
1255 
1256 	/*
1257 	 * See if there are any outstanding commands for this LUN.
1258 	 * This could be made more efficient by using a per-LU data
1259 	 * structure of some sort.
1260 	 */
1261 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1262 		if (cm->cm_lun == tm->cm_lun)
1263 			cm_count++;
1264 	}
1265 
1266 	if (cm_count == 0) {
1267 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1268 		    "Finished recovery after LUN reset for target %u\n",
1269 		    targ->tid);
1270 
1271 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1272 		    tm->cm_lun);
1273 
1274 		/*
1275 		 * We've finished recovery for this logical unit.  check and
1276 		 * see if some other logical unit has a timedout command
1277 		 * that needs to be processed.
1278 		 */
1279 		cm = TAILQ_FIRST(&targ->timedout_commands);
1280 		if (cm) {
1281 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1282 			   "More commands to abort for target %u\n", targ->tid);
1283 			mprsas_send_abort(sc, tm, cm);
1284 		} else {
1285 			targ->tm = NULL;
1286 			mprsas_free_tm(sc, tm);
1287 		}
1288 	} else {
1289 		/* if we still have commands for this LUN, the reset
1290 		 * effectively failed, regardless of the status reported.
1291 		 * Escalate to a target reset.
1292 		 */
1293 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1294 		    "logical unit reset complete for target %u, but still "
1295 		    "have %u command(s), sending target reset\n", targ->tid,
1296 		    cm_count);
1297 		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1298 			mprsas_send_reset(sc, tm,
1299 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1300 		else
1301 			mpr_reinit(sc);
1302 	}
1303 }
1304 
1305 static void
1306 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1307 {
1308 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1309 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1310 	struct mprsas_target *targ;
1311 
1312 	callout_stop(&tm->cm_callout);
1313 
1314 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1315 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1316 	targ = tm->cm_targ;
1317 
1318 	/*
1319 	 * Currently there should be no way we can hit this case.  It only
1320 	 * happens when we have a failure to allocate chain frames, and
1321 	 * task management commands don't have S/G lists.
1322 	 */
1323 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1324 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1325 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1326 		mprsas_free_tm(sc, tm);
1327 		return;
1328 	}
1329 
1330 	if (reply == NULL) {
1331 		mpr_dprint(sc, MPR_RECOVERY,
1332 		    "NULL target reset reply for tm %p TaskMID %u\n",
1333 		    tm, le16toh(req->TaskMID));
1334 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1335 			/* this completion was due to a reset, just cleanup */
1336 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1337 			    "reset, ignoring NULL target reset reply\n");
1338 			targ->tm = NULL;
1339 			mprsas_free_tm(sc, tm);
1340 		}
1341 		else {
1342 			/* we should have gotten a reply. */
1343 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1344 			    "target reset attempt, resetting controller\n");
1345 			mpr_reinit(sc);
1346 		}
1347 		return;
1348 	}
1349 
1350 	mpr_dprint(sc, MPR_RECOVERY,
1351 	    "target reset status 0x%x code 0x%x count %u\n",
1352 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1353 	    le32toh(reply->TerminationCount));
1354 
1355 	if (targ->outstanding == 0) {
1356 		/*
1357 		 * We've finished recovery for this target and all
1358 		 * of its logical units.
1359 		 */
1360 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1361 		    "Finished reset recovery for target %u\n", targ->tid);
1362 
1363 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1364 		    CAM_LUN_WILDCARD);
1365 
1366 		targ->tm = NULL;
1367 		mprsas_free_tm(sc, tm);
1368 	} else {
1369 		/*
1370 		 * After a target reset, if this target still has
1371 		 * outstanding commands, the reset effectively failed,
1372 		 * regardless of the status reported.  escalate.
1373 		 */
1374 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1375 		    "Target reset complete for target %u, but still have %u "
1376 		    "command(s), resetting controller\n", targ->tid,
1377 		    targ->outstanding);
1378 		mpr_reinit(sc);
1379 	}
1380 }
1381 
1382 #define MPR_RESET_TIMEOUT 30
1383 
1384 int
1385 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1386 {
1387 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1388 	struct mprsas_target *target;
1389 	int err, timeout;
1390 
1391 	target = tm->cm_targ;
1392 	if (target->handle == 0) {
1393 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1394 		    "%d\n", __func__, target->tid);
1395 		return -1;
1396 	}
1397 
1398 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1399 	req->DevHandle = htole16(target->handle);
1400 	req->TaskType = type;
1401 
1402 	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1403 		timeout = MPR_RESET_TIMEOUT;
1404 		/*
1405 		 * Target reset method =
1406 		 *     SAS Hard Link Reset / SATA Link Reset
1407 		 */
1408 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1409 	} else {
1410 		timeout = (target->controller_reset_timeout) ? (
1411 		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1412 		/* PCIe Protocol Level Reset*/
1413 		req->MsgFlags =
1414 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1415 	}
1416 
1417 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1418 		/* XXX Need to handle invalid LUNs */
1419 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1420 		tm->cm_targ->logical_unit_resets++;
1421 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1422 		    "Sending logical unit reset to target %u lun %d\n",
1423 		    target->tid, tm->cm_lun);
1424 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1425 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1426 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1427 		tm->cm_targ->target_resets++;
1428 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1429 		    "Sending target reset to target %u\n", target->tid);
1430 		tm->cm_complete = mprsas_target_reset_complete;
1431 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1432 	}
1433 	else {
1434 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1435 		return -1;
1436 	}
1437 
1438 	if (target->encl_level_valid) {
1439 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1440 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1441 		    target->encl_level, target->encl_slot,
1442 		    target->connector_name);
1443 	}
1444 
1445 	tm->cm_data = NULL;
1446 	tm->cm_complete_data = (void *)tm;
1447 
1448 	callout_reset(&tm->cm_callout, timeout * hz,
1449 	    mprsas_tm_timeout, tm);
1450 
1451 	err = mpr_map_command(sc, tm);
1452 	if (err)
1453 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1454 		    "error %d sending reset type %u\n", err, type);
1455 
1456 	return err;
1457 }
1458 
1459 
1460 static void
1461 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1462 {
1463 	struct mpr_command *cm;
1464 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1465 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1466 	struct mprsas_target *targ;
1467 
1468 	callout_stop(&tm->cm_callout);
1469 
1470 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1471 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1472 	targ = tm->cm_targ;
1473 
1474 	/*
1475 	 * Currently there should be no way we can hit this case.  It only
1476 	 * happens when we have a failure to allocate chain frames, and
1477 	 * task management commands don't have S/G lists.
1478 	 */
1479 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1480 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1481 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1482 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1483 		mprsas_free_tm(sc, tm);
1484 		return;
1485 	}
1486 
1487 	if (reply == NULL) {
1488 		mpr_dprint(sc, MPR_RECOVERY,
1489 		    "NULL abort reply for tm %p TaskMID %u\n",
1490 		    tm, le16toh(req->TaskMID));
1491 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1492 			/* this completion was due to a reset, just cleanup */
1493 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1494 			    "reset, ignoring NULL abort reply\n");
1495 			targ->tm = NULL;
1496 			mprsas_free_tm(sc, tm);
1497 		} else {
1498 			/* we should have gotten a reply. */
1499 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1500 			    "abort attempt, resetting controller\n");
1501 			mpr_reinit(sc);
1502 		}
1503 		return;
1504 	}
1505 
1506 	mpr_dprint(sc, MPR_RECOVERY,
1507 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1508 	    le16toh(req->TaskMID),
1509 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1510 	    le32toh(reply->TerminationCount));
1511 
1512 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1513 	if (cm == NULL) {
1514 		/*
1515 		 * if there are no more timedout commands, we're done with
1516 		 * error recovery for this target.
1517 		 */
1518 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1519 		    "Finished abort recovery for target %u\n", targ->tid);
1520 		targ->tm = NULL;
1521 		mprsas_free_tm(sc, tm);
1522 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1523 		/* abort success, but we have more timedout commands to abort */
1524 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1525 		    "Continuing abort recovery for target %u\n", targ->tid);
1526 		mprsas_send_abort(sc, tm, cm);
1527 	} else {
1528 		/*
1529 		 * we didn't get a command completion, so the abort
1530 		 * failed as far as we're concerned.  escalate.
1531 		 */
1532 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1533 		    "Abort failed for target %u, sending logical unit reset\n",
1534 		    targ->tid);
1535 
1536 		mprsas_send_reset(sc, tm,
1537 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1538 	}
1539 }
1540 
1541 #define MPR_ABORT_TIMEOUT 5
1542 
1543 static int
1544 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1545     struct mpr_command *cm)
1546 {
1547 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1548 	struct mprsas_target *targ;
1549 	int err, timeout;
1550 
1551 	targ = cm->cm_targ;
1552 	if (targ->handle == 0) {
1553 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1554 		   "%s null devhandle for target_id %d\n",
1555 		    __func__, cm->cm_ccb->ccb_h.target_id);
1556 		return -1;
1557 	}
1558 
1559 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1560 	    "Aborting command %p\n", cm);
1561 
1562 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1563 	req->DevHandle = htole16(targ->handle);
1564 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1565 
1566 	/* XXX Need to handle invalid LUNs */
1567 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1568 
1569 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1570 
1571 	tm->cm_data = NULL;
1572 	tm->cm_complete = mprsas_abort_complete;
1573 	tm->cm_complete_data = (void *)tm;
1574 	tm->cm_targ = cm->cm_targ;
1575 	tm->cm_lun = cm->cm_lun;
1576 
1577 	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1578 		timeout	= MPR_ABORT_TIMEOUT;
1579 	else
1580 		timeout = sc->nvme_abort_timeout;
1581 
1582 	callout_reset(&tm->cm_callout, timeout * hz,
1583 	    mprsas_tm_timeout, tm);
1584 
1585 	targ->aborts++;
1586 
1587 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1588 
1589 	err = mpr_map_command(sc, tm);
1590 	if (err)
1591 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1592 		    "error %d sending abort for cm %p SMID %u\n",
1593 		    err, cm, req->TaskMID);
1594 	return err;
1595 }
1596 
1597 static void
1598 mprsas_scsiio_timeout(void *data)
1599 {
1600 	sbintime_t elapsed, now;
1601 	union ccb *ccb;
1602 	struct mpr_softc *sc;
1603 	struct mpr_command *cm;
1604 	struct mprsas_target *targ;
1605 
1606 	cm = (struct mpr_command *)data;
1607 	sc = cm->cm_sc;
1608 	ccb = cm->cm_ccb;
1609 	now = sbinuptime();
1610 
1611 	MPR_FUNCTRACE(sc);
1612 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1613 
1614 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1615 
1616 	/*
1617 	 * Run the interrupt handler to make sure it's not pending.  This
1618 	 * isn't perfect because the command could have already completed
1619 	 * and been re-used, though this is unlikely.
1620 	 */
1621 	mpr_intr_locked(sc);
1622 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1623 		mprsas_log_command(cm, MPR_XINFO,
1624 		    "SCSI command %p almost timed out\n", cm);
1625 		return;
1626 	}
1627 
1628 	if (cm->cm_ccb == NULL) {
1629 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1630 		return;
1631 	}
1632 
1633 	targ = cm->cm_targ;
1634 	targ->timeouts++;
1635 
1636 	elapsed = now - ccb->ccb_h.qos.sim_data;
1637 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1638 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1639 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1640 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1641 	if (targ->encl_level_valid) {
1642 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1643 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1644 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1645 	}
1646 
1647 	/* XXX first, check the firmware state, to see if it's still
1648 	 * operational.  if not, do a diag reset.
1649 	 */
1650 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1651 	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1652 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1653 
1654 	if (targ->tm != NULL) {
1655 		/* target already in recovery, just queue up another
1656 		 * timedout command to be processed later.
1657 		 */
1658 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1659 		    "processing by tm %p\n", cm, targ->tm);
1660 	}
1661 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1662 
1663 		/* start recovery by aborting the first timedout command */
1664 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1665 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1666 		    cm->cm_desc.Default.SMID);
1667 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1668 		    cm, targ->tm);
1669 		mprsas_send_abort(sc, targ->tm, cm);
1670 	}
1671 	else {
1672 		/* XXX queue this target up for recovery once a TM becomes
1673 		 * available.  The firmware only has a limited number of
1674 		 * HighPriority credits for the high priority requests used
1675 		 * for task management, and we ran out.
1676 		 *
1677 		 * Isilon: don't worry about this for now, since we have
1678 		 * more credits than disks in an enclosure, and limit
1679 		 * ourselves to one TM per target for recovery.
1680 		 */
1681 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1682 		    "timedout cm %p failed to allocate a tm\n", cm);
1683 	}
1684 }
1685 
1686 /**
1687  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1688  *			     to SCSI Unmap.
1689  * Return 0 - for success,
1690  *	  1 - to immediately return back the command with success status to CAM
1691  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1692  *			   to FW without any translation.
1693  */
1694 static int
1695 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1696     union ccb *ccb, struct mprsas_target *targ)
1697 {
1698 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1699 	struct ccb_scsiio *csio;
1700 	struct unmap_parm_list *plist;
1701 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1702 	struct nvme_command *c;
1703 	int i, res;
1704 	uint16_t ndesc, list_len, data_length;
1705 	struct mpr_prp_page *prp_page_info;
1706 	uint64_t nvme_dsm_ranges_dma_handle;
1707 
1708 	csio = &ccb->csio;
1709 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1710 	if (!list_len) {
1711 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1712 		return -EINVAL;
1713 	}
1714 
1715 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1716 	if (!plist) {
1717 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1718 		    "save UNMAP data\n");
1719 		return -ENOMEM;
1720 	}
1721 
1722 	/* Copy SCSI unmap data to a local buffer */
1723 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1724 
1725 	/* return back the unmap command to CAM with success status,
1726 	 * if number of descripts is zero.
1727 	 */
1728 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1729 	if (!ndesc) {
1730 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1731 		    "UNMAP cmd is Zero\n");
1732 		res = 1;
1733 		goto out;
1734 	}
1735 
1736 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1737 	if (data_length > targ->MDTS) {
1738 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1739 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1740 		res = -EINVAL;
1741 		goto out;
1742 	}
1743 
1744 	prp_page_info = mpr_alloc_prp_page(sc);
1745 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1746 	    "UNMAP command.\n", __func__));
1747 
1748 	/*
1749 	 * Insert the allocated PRP page into the command's PRP page list. This
1750 	 * will be freed when the command is freed.
1751 	 */
1752 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1753 
1754 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1755 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1756 
1757 	bzero(nvme_dsm_ranges, data_length);
1758 
1759 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1760 	 * for each descriptors contained in SCSI UNMAP data.
1761 	 */
1762 	for (i = 0; i < ndesc; i++) {
1763 		nvme_dsm_ranges[i].length =
1764 		    htole32(be32toh(plist->desc[i].nlb));
1765 		nvme_dsm_ranges[i].starting_lba =
1766 		    htole64(be64toh(plist->desc[i].slba));
1767 		nvme_dsm_ranges[i].attributes = 0;
1768 	}
1769 
1770 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1771 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1772 	bzero(req, sizeof(*req));
1773 	req->DevHandle = htole16(targ->handle);
1774 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1775 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1776 	req->ErrorResponseBaseAddress.High =
1777 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1778 	req->ErrorResponseBaseAddress.Low =
1779 	    htole32(cm->cm_sense_busaddr);
1780 	req->ErrorResponseAllocationLength =
1781 	    htole16(sizeof(struct nvme_completion));
1782 	req->EncapsulatedCommandLength =
1783 	    htole16(sizeof(struct nvme_command));
1784 	req->DataLength = htole32(data_length);
1785 
1786 	/* Build NVMe DSM command */
1787 	c = (struct nvme_command *) req->NVMe_Command;
1788 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1789 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1790 	c->cdw10 = htole32(ndesc - 1);
1791 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1792 
1793 	cm->cm_length = data_length;
1794 	cm->cm_data = NULL;
1795 
1796 	cm->cm_complete = mprsas_scsiio_complete;
1797 	cm->cm_complete_data = ccb;
1798 	cm->cm_targ = targ;
1799 	cm->cm_lun = csio->ccb_h.target_lun;
1800 	cm->cm_ccb = ccb;
1801 
1802 	cm->cm_desc.Default.RequestFlags =
1803 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1804 
1805 	csio->ccb_h.qos.sim_data = sbinuptime();
1806 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1807 	    mprsas_scsiio_timeout, cm, 0);
1808 
1809 	targ->issued++;
1810 	targ->outstanding++;
1811 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1812 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1813 
1814 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1815 	    __func__, cm, ccb, targ->outstanding);
1816 
1817 	mpr_build_nvme_prp(sc, cm, req,
1818 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1819 	mpr_map_command(sc, cm);
1820 
1821 out:
1822 	free(plist, M_MPR);
1823 	return 0;
1824 }
1825 
1826 static void
1827 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1828 {
1829 	MPI2_SCSI_IO_REQUEST *req;
1830 	struct ccb_scsiio *csio;
1831 	struct mpr_softc *sc;
1832 	struct mprsas_target *targ;
1833 	struct mprsas_lun *lun;
1834 	struct mpr_command *cm;
1835 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1836 	uint16_t eedp_flags;
1837 	uint32_t mpi_control;
1838 	int rc;
1839 
1840 	sc = sassc->sc;
1841 	MPR_FUNCTRACE(sc);
1842 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1843 
1844 	csio = &ccb->csio;
1845 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1846 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1847 	     csio->ccb_h.target_id));
1848 	targ = &sassc->targets[csio->ccb_h.target_id];
1849 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1850 	if (targ->handle == 0x0) {
1851 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1852 		    __func__, csio->ccb_h.target_id);
1853 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1854 		xpt_done(ccb);
1855 		return;
1856 	}
1857 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1858 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1859 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1860 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1861 		xpt_done(ccb);
1862 		return;
1863 	}
1864 	/*
1865 	 * Sometimes, it is possible to get a command that is not "In
1866 	 * Progress" and was actually aborted by the upper layer.  Check for
1867 	 * this here and complete the command without error.
1868 	 */
1869 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1870 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1871 		    "target %u\n", __func__, csio->ccb_h.target_id);
1872 		xpt_done(ccb);
1873 		return;
1874 	}
1875 	/*
1876 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1877 	 * that the volume has timed out.  We want volumes to be enumerated
1878 	 * until they are deleted/removed, not just failed. In either event,
1879 	 * we're removing the target due to a firmware event telling us
1880 	 * the device is now gone (as opposed to some transient event). Since
1881 	 * we're opting to remove failed devices from the OS's view, we need
1882 	 * to propagate that status up the stack.
1883 	 */
1884 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1885 		if (targ->devinfo == 0)
1886 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1887 		else
1888 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1889 		xpt_done(ccb);
1890 		return;
1891 	}
1892 
1893 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1894 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1895 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1896 		xpt_done(ccb);
1897 		return;
1898 	}
1899 
1900 	/*
1901 	 * If target has a reset in progress, freeze the devq and return.  The
1902 	 * devq will be released when the TM reset is finished.
1903 	 */
1904 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1905 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1906 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1907 		    __func__, targ->tid);
1908 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1909 		xpt_done(ccb);
1910 		return;
1911 	}
1912 
1913 	cm = mpr_alloc_command(sc);
1914 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1915 		if (cm != NULL) {
1916 			mpr_free_command(sc, cm);
1917 		}
1918 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1919 			xpt_freeze_simq(sassc->sim, 1);
1920 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1921 		}
1922 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1923 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1924 		xpt_done(ccb);
1925 		return;
1926 	}
1927 
1928 	/* For NVME device's issue UNMAP command directly to NVME drives by
1929 	 * constructing equivalent native NVMe DataSetManagement command.
1930 	 */
1931 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1932 	if (scsi_opcode == UNMAP &&
1933 	    targ->is_nvme &&
1934 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1935 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1936 		if (rc == 1) { /* return command to CAM with success status */
1937 			mpr_free_command(sc, cm);
1938 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1939 			xpt_done(ccb);
1940 			return;
1941 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1942 			return;
1943 	}
1944 
1945 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1946 	bzero(req, sizeof(*req));
1947 	req->DevHandle = htole16(targ->handle);
1948 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1949 	req->MsgFlags = 0;
1950 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1951 	req->SenseBufferLength = MPR_SENSE_LEN;
1952 	req->SGLFlags = 0;
1953 	req->ChainOffset = 0;
1954 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1955 	req->SGLOffset1= 0;
1956 	req->SGLOffset2= 0;
1957 	req->SGLOffset3= 0;
1958 	req->SkipCount = 0;
1959 	req->DataLength = htole32(csio->dxfer_len);
1960 	req->BidirectionalDataLength = 0;
1961 	req->IoFlags = htole16(csio->cdb_len);
1962 	req->EEDPFlags = 0;
1963 
1964 	/* Note: BiDirectional transfers are not supported */
1965 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1966 	case CAM_DIR_IN:
1967 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1968 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1969 		break;
1970 	case CAM_DIR_OUT:
1971 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1972 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1973 		break;
1974 	case CAM_DIR_NONE:
1975 	default:
1976 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1977 		break;
1978 	}
1979 
1980 	if (csio->cdb_len == 32)
1981 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1982 	/*
1983 	 * It looks like the hardware doesn't require an explicit tag
1984 	 * number for each transaction.  SAM Task Management not supported
1985 	 * at the moment.
1986 	 */
1987 	switch (csio->tag_action) {
1988 	case MSG_HEAD_OF_Q_TAG:
1989 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1990 		break;
1991 	case MSG_ORDERED_Q_TAG:
1992 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1993 		break;
1994 	case MSG_ACA_TASK:
1995 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1996 		break;
1997 	case CAM_TAG_ACTION_NONE:
1998 	case MSG_SIMPLE_Q_TAG:
1999 	default:
2000 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2001 		break;
2002 	}
2003 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2004 	req->Control = htole32(mpi_control);
2005 
2006 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2007 		mpr_free_command(sc, cm);
2008 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2009 		xpt_done(ccb);
2010 		return;
2011 	}
2012 
2013 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2014 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2015 	else {
2016 		KASSERT(csio->cdb_len <= IOCDBLEN,
2017 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2018 		    "is not set", csio->cdb_len));
2019 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2020 	}
2021 	req->IoFlags = htole16(csio->cdb_len);
2022 
2023 	/*
2024 	 * Check if EEDP is supported and enabled.  If it is then check if the
2025 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2026 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2027 	 * for EEDP transfer.
2028 	 */
2029 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2030 	if (sc->eedp_enabled && eedp_flags) {
2031 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2032 			if (lun->lun_id == csio->ccb_h.target_lun) {
2033 				break;
2034 			}
2035 		}
2036 
2037 		if ((lun != NULL) && (lun->eedp_formatted)) {
2038 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2039 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2040 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2041 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2042 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2043 				eedp_flags |=
2044 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2045 			}
2046 			req->EEDPFlags = htole16(eedp_flags);
2047 
2048 			/*
2049 			 * If CDB less than 32, fill in Primary Ref Tag with
2050 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2051 			 * already there.  Also, set protection bit.  FreeBSD
2052 			 * currently does not support CDBs bigger than 16, but
2053 			 * the code doesn't hurt, and will be here for the
2054 			 * future.
2055 			 */
2056 			if (csio->cdb_len != 32) {
2057 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2058 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2059 				    PrimaryReferenceTag;
2060 				for (i = 0; i < 4; i++) {
2061 					*ref_tag_addr =
2062 					    req->CDB.CDB32[lba_byte + i];
2063 					ref_tag_addr++;
2064 				}
2065 				req->CDB.EEDP32.PrimaryReferenceTag =
2066 				    htole32(req->
2067 				    CDB.EEDP32.PrimaryReferenceTag);
2068 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2069 				    0xFFFF;
2070 				req->CDB.CDB32[1] =
2071 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2072 			} else {
2073 				eedp_flags |=
2074 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2075 				req->EEDPFlags = htole16(eedp_flags);
2076 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2077 				    0x1F) | 0x20;
2078 			}
2079 		}
2080 	}
2081 
2082 	cm->cm_length = csio->dxfer_len;
2083 	if (cm->cm_length != 0) {
2084 		cm->cm_data = ccb;
2085 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2086 	} else {
2087 		cm->cm_data = NULL;
2088 	}
2089 	cm->cm_sge = &req->SGL;
2090 	cm->cm_sglsize = (32 - 24) * 4;
2091 	cm->cm_complete = mprsas_scsiio_complete;
2092 	cm->cm_complete_data = ccb;
2093 	cm->cm_targ = targ;
2094 	cm->cm_lun = csio->ccb_h.target_lun;
2095 	cm->cm_ccb = ccb;
2096 	/*
2097 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2098 	 * and set descriptor type.
2099 	 */
2100 	if (targ->scsi_req_desc_type ==
2101 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2102 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2103 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2104 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2105 		if (!sc->atomic_desc_capable) {
2106 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2107 			    htole16(targ->handle);
2108 		}
2109 	} else {
2110 		cm->cm_desc.SCSIIO.RequestFlags =
2111 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2112 		if (!sc->atomic_desc_capable)
2113 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2114 	}
2115 
2116 	csio->ccb_h.qos.sim_data = sbinuptime();
2117 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2118 	    mprsas_scsiio_timeout, cm, 0);
2119 
2120 	targ->issued++;
2121 	targ->outstanding++;
2122 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2123 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2124 
2125 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2126 	    __func__, cm, ccb, targ->outstanding);
2127 
2128 	mpr_map_command(sc, cm);
2129 	return;
2130 }
2131 
2132 /**
2133  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2134  */
2135 static void
2136 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2137     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2138 {
2139 	u32 response_info;
2140 	u8 *response_bytes;
2141 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2142 	    MPI2_IOCSTATUS_MASK;
2143 	u8 scsi_state = mpi_reply->SCSIState;
2144 	u8 scsi_status = mpi_reply->SCSIStatus;
2145 	char *desc_ioc_state = NULL;
2146 	char *desc_scsi_status = NULL;
2147 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2148 
2149 	if (log_info == 0x31170000)
2150 		return;
2151 
2152 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2153 	     ioc_status);
2154 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2155 	    scsi_status);
2156 
2157 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2158 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2159 	if (targ->encl_level_valid) {
2160 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2161 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2162 		    targ->connector_name);
2163 	}
2164 
2165 	/*
2166 	 * We can add more detail about underflow data here
2167 	 * TO-DO
2168 	 */
2169 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2170 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2171 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2172 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2173 
2174 	if (sc->mpr_debug & MPR_XINFO &&
2175 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2176 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2177 		scsi_sense_print(csio);
2178 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2179 	}
2180 
2181 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2182 		response_info = le32toh(mpi_reply->ResponseInfo);
2183 		response_bytes = (u8 *)&response_info;
2184 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2185 		    response_bytes[0],
2186 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2187 		    response_bytes[0]));
2188 	}
2189 }
2190 
2191 /** mprsas_nvme_trans_status_code
2192  *
2193  * Convert Native NVMe command error status to
2194  * equivalent SCSI error status.
2195  *
2196  * Returns appropriate scsi_status
2197  */
2198 static u8
2199 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2200     struct mpr_command *cm)
2201 {
2202 	u8 status = MPI2_SCSI_STATUS_GOOD;
2203 	int skey, asc, ascq;
2204 	union ccb *ccb = cm->cm_complete_data;
2205 	int returned_sense_len;
2206 	uint8_t sct, sc;
2207 
2208 	sct = NVME_STATUS_GET_SCT(nvme_status);
2209 	sc = NVME_STATUS_GET_SC(nvme_status);
2210 
2211 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2212 	skey = SSD_KEY_ILLEGAL_REQUEST;
2213 	asc = SCSI_ASC_NO_SENSE;
2214 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2215 
2216 	switch (sct) {
2217 	case NVME_SCT_GENERIC:
2218 		switch (sc) {
2219 		case NVME_SC_SUCCESS:
2220 			status = MPI2_SCSI_STATUS_GOOD;
2221 			skey = SSD_KEY_NO_SENSE;
2222 			asc = SCSI_ASC_NO_SENSE;
2223 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2224 			break;
2225 		case NVME_SC_INVALID_OPCODE:
2226 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2227 			skey = SSD_KEY_ILLEGAL_REQUEST;
2228 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2229 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2230 			break;
2231 		case NVME_SC_INVALID_FIELD:
2232 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2233 			skey = SSD_KEY_ILLEGAL_REQUEST;
2234 			asc = SCSI_ASC_INVALID_CDB;
2235 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2236 			break;
2237 		case NVME_SC_DATA_TRANSFER_ERROR:
2238 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2239 			skey = SSD_KEY_MEDIUM_ERROR;
2240 			asc = SCSI_ASC_NO_SENSE;
2241 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2242 			break;
2243 		case NVME_SC_ABORTED_POWER_LOSS:
2244 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2245 			skey = SSD_KEY_ABORTED_COMMAND;
2246 			asc = SCSI_ASC_WARNING;
2247 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2248 			break;
2249 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2250 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2251 			skey = SSD_KEY_HARDWARE_ERROR;
2252 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2253 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2254 			break;
2255 		case NVME_SC_ABORTED_BY_REQUEST:
2256 		case NVME_SC_ABORTED_SQ_DELETION:
2257 		case NVME_SC_ABORTED_FAILED_FUSED:
2258 		case NVME_SC_ABORTED_MISSING_FUSED:
2259 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2260 			skey = SSD_KEY_ABORTED_COMMAND;
2261 			asc = SCSI_ASC_NO_SENSE;
2262 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2263 			break;
2264 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2265 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2266 			skey = SSD_KEY_ILLEGAL_REQUEST;
2267 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2268 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2269 			break;
2270 		case NVME_SC_LBA_OUT_OF_RANGE:
2271 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2272 			skey = SSD_KEY_ILLEGAL_REQUEST;
2273 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2274 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2275 			break;
2276 		case NVME_SC_CAPACITY_EXCEEDED:
2277 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2278 			skey = SSD_KEY_MEDIUM_ERROR;
2279 			asc = SCSI_ASC_NO_SENSE;
2280 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2281 			break;
2282 		case NVME_SC_NAMESPACE_NOT_READY:
2283 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2284 			skey = SSD_KEY_NOT_READY;
2285 			asc = SCSI_ASC_LUN_NOT_READY;
2286 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2287 			break;
2288 		}
2289 		break;
2290 	case NVME_SCT_COMMAND_SPECIFIC:
2291 		switch (sc) {
2292 		case NVME_SC_INVALID_FORMAT:
2293 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2294 			skey = SSD_KEY_ILLEGAL_REQUEST;
2295 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2296 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2297 			break;
2298 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2299 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2300 			skey = SSD_KEY_ILLEGAL_REQUEST;
2301 			asc = SCSI_ASC_INVALID_CDB;
2302 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2303 			break;
2304 		}
2305 		break;
2306 	case NVME_SCT_MEDIA_ERROR:
2307 		switch (sc) {
2308 		case NVME_SC_WRITE_FAULTS:
2309 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2310 			skey = SSD_KEY_MEDIUM_ERROR;
2311 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2312 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2313 			break;
2314 		case NVME_SC_UNRECOVERED_READ_ERROR:
2315 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2316 			skey = SSD_KEY_MEDIUM_ERROR;
2317 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2318 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2319 			break;
2320 		case NVME_SC_GUARD_CHECK_ERROR:
2321 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2322 			skey = SSD_KEY_MEDIUM_ERROR;
2323 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2324 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2325 			break;
2326 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2327 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2328 			skey = SSD_KEY_MEDIUM_ERROR;
2329 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2330 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2331 			break;
2332 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2333 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2334 			skey = SSD_KEY_MEDIUM_ERROR;
2335 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2336 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2337 			break;
2338 		case NVME_SC_COMPARE_FAILURE:
2339 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2340 			skey = SSD_KEY_MISCOMPARE;
2341 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2342 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2343 			break;
2344 		case NVME_SC_ACCESS_DENIED:
2345 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2346 			skey = SSD_KEY_ILLEGAL_REQUEST;
2347 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2348 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2349 			break;
2350 		}
2351 		break;
2352 	}
2353 
2354 	returned_sense_len = sizeof(struct scsi_sense_data);
2355 	if (returned_sense_len < ccb->csio.sense_len)
2356 		ccb->csio.sense_resid = ccb->csio.sense_len -
2357 		    returned_sense_len;
2358 	else
2359 		ccb->csio.sense_resid = 0;
2360 
2361 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2362 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2363 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2364 
2365 	return status;
2366 }
2367 
2368 /** mprsas_complete_nvme_unmap
2369  *
2370  * Complete native NVMe command issued using NVMe Encapsulated
2371  * Request Message.
2372  */
2373 static u8
2374 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2375 {
2376 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2377 	struct nvme_completion *nvme_completion = NULL;
2378 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2379 
2380 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2381 	if (le16toh(mpi_reply->ErrorResponseCount)){
2382 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2383 		scsi_status = mprsas_nvme_trans_status_code(
2384 		    nvme_completion->status, cm);
2385 	}
2386 	return scsi_status;
2387 }
2388 
2389 static void
2390 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2391 {
2392 	MPI2_SCSI_IO_REPLY *rep;
2393 	union ccb *ccb;
2394 	struct ccb_scsiio *csio;
2395 	struct mprsas_softc *sassc;
2396 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2397 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2398 	int dir = 0, i;
2399 	u16 alloc_len;
2400 	struct mprsas_target *target;
2401 	target_id_t target_id;
2402 
2403 	MPR_FUNCTRACE(sc);
2404 	mpr_dprint(sc, MPR_TRACE,
2405 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2406 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2407 	    cm->cm_targ->outstanding);
2408 
2409 	callout_stop(&cm->cm_callout);
2410 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2411 
2412 	sassc = sc->sassc;
2413 	ccb = cm->cm_complete_data;
2414 	csio = &ccb->csio;
2415 	target_id = csio->ccb_h.target_id;
2416 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2417 	/*
2418 	 * XXX KDM if the chain allocation fails, does it matter if we do
2419 	 * the sync and unload here?  It is simpler to do it in every case,
2420 	 * assuming it doesn't cause problems.
2421 	 */
2422 	if (cm->cm_data != NULL) {
2423 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2424 			dir = BUS_DMASYNC_POSTREAD;
2425 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2426 			dir = BUS_DMASYNC_POSTWRITE;
2427 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2428 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2429 	}
2430 
2431 	cm->cm_targ->completed++;
2432 	cm->cm_targ->outstanding--;
2433 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2434 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2435 
2436 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2437 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2438 		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2439 		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2440 		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2441 		if (cm->cm_reply != NULL)
2442 			mprsas_log_command(cm, MPR_RECOVERY,
2443 			    "completed timedout cm %p ccb %p during recovery "
2444 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2445 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2446 			    rep->SCSIState, le32toh(rep->TransferCount));
2447 		else
2448 			mprsas_log_command(cm, MPR_RECOVERY,
2449 			    "completed timedout cm %p ccb %p during recovery\n",
2450 			    cm, cm->cm_ccb);
2451 	} else if (cm->cm_targ->tm != NULL) {
2452 		if (cm->cm_reply != NULL)
2453 			mprsas_log_command(cm, MPR_RECOVERY,
2454 			    "completed cm %p ccb %p during recovery "
2455 			    "ioc %x scsi %x state %x xfer %u\n",
2456 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2457 			    rep->SCSIStatus, rep->SCSIState,
2458 			    le32toh(rep->TransferCount));
2459 		else
2460 			mprsas_log_command(cm, MPR_RECOVERY,
2461 			    "completed cm %p ccb %p during recovery\n",
2462 			    cm, cm->cm_ccb);
2463 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2464 		mprsas_log_command(cm, MPR_RECOVERY,
2465 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2466 	}
2467 
2468 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2469 		/*
2470 		 * We ran into an error after we tried to map the command,
2471 		 * so we're getting a callback without queueing the command
2472 		 * to the hardware.  So we set the status here, and it will
2473 		 * be retained below.  We'll go through the "fast path",
2474 		 * because there can be no reply when we haven't actually
2475 		 * gone out to the hardware.
2476 		 */
2477 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2478 
2479 		/*
2480 		 * Currently the only error included in the mask is
2481 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2482 		 * chain frames.  We need to freeze the queue until we get
2483 		 * a command that completed without this error, which will
2484 		 * hopefully have some chain frames attached that we can
2485 		 * use.  If we wanted to get smarter about it, we would
2486 		 * only unfreeze the queue in this condition when we're
2487 		 * sure that we're getting some chain frames back.  That's
2488 		 * probably unnecessary.
2489 		 */
2490 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2491 			xpt_freeze_simq(sassc->sim, 1);
2492 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2493 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2494 			    "freezing SIM queue\n");
2495 		}
2496 	}
2497 
2498 	/*
2499 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2500 	 * flag, and use it in a few places in the rest of this function for
2501 	 * convenience. Use the macro if available.
2502 	 */
2503 	scsi_cdb = scsiio_cdb_ptr(csio);
2504 
2505 	/*
2506 	 * If this is a Start Stop Unit command and it was issued by the driver
2507 	 * during shutdown, decrement the refcount to account for all of the
2508 	 * commands that were sent.  All SSU commands should be completed before
2509 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2510 	 * is TRUE.
2511 	 */
2512 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2513 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2514 		sc->SSU_refcount--;
2515 	}
2516 
2517 	/* Take the fast path to completion */
2518 	if (cm->cm_reply == NULL) {
2519 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2520 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2521 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2522 			else {
2523 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2524 				csio->scsi_status = SCSI_STATUS_OK;
2525 			}
2526 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2527 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2528 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2529 				mpr_dprint(sc, MPR_XINFO,
2530 				    "Unfreezing SIM queue\n");
2531 			}
2532 		}
2533 
2534 		/*
2535 		 * There are two scenarios where the status won't be
2536 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2537 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2538 		 */
2539 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2540 			/*
2541 			 * Freeze the dev queue so that commands are
2542 			 * executed in the correct order after error
2543 			 * recovery.
2544 			 */
2545 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2546 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2547 		}
2548 		mpr_free_command(sc, cm);
2549 		xpt_done(ccb);
2550 		return;
2551 	}
2552 
2553 	target = &sassc->targets[target_id];
2554 	if (scsi_cdb[0] == UNMAP &&
2555 	    target->is_nvme &&
2556 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2557 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2558 		csio->scsi_status = rep->SCSIStatus;
2559 	}
2560 
2561 	mprsas_log_command(cm, MPR_XINFO,
2562 	    "ioc %x scsi %x state %x xfer %u\n",
2563 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2564 	    le32toh(rep->TransferCount));
2565 
2566 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2567 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2568 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2569 		/* FALLTHROUGH */
2570 	case MPI2_IOCSTATUS_SUCCESS:
2571 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2572 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2573 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2574 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2575 
2576 		/* Completion failed at the transport level. */
2577 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2578 		    MPI2_SCSI_STATE_TERMINATED)) {
2579 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2580 			break;
2581 		}
2582 
2583 		/* In a modern packetized environment, an autosense failure
2584 		 * implies that there's not much else that can be done to
2585 		 * recover the command.
2586 		 */
2587 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2588 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2589 			break;
2590 		}
2591 
2592 		/*
2593 		 * CAM doesn't care about SAS Response Info data, but if this is
2594 		 * the state check if TLR should be done.  If not, clear the
2595 		 * TLR_bits for the target.
2596 		 */
2597 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2598 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2599 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2600 			sc->mapping_table[target_id].TLR_bits =
2601 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2602 		}
2603 
2604 		/*
2605 		 * Intentionally override the normal SCSI status reporting
2606 		 * for these two cases.  These are likely to happen in a
2607 		 * multi-initiator environment, and we want to make sure that
2608 		 * CAM retries these commands rather than fail them.
2609 		 */
2610 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2611 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2612 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2613 			break;
2614 		}
2615 
2616 		/* Handle normal status and sense */
2617 		csio->scsi_status = rep->SCSIStatus;
2618 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2619 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2620 		else
2621 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2622 
2623 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2624 			int sense_len, returned_sense_len;
2625 
2626 			returned_sense_len = min(le32toh(rep->SenseCount),
2627 			    sizeof(struct scsi_sense_data));
2628 			if (returned_sense_len < csio->sense_len)
2629 				csio->sense_resid = csio->sense_len -
2630 				    returned_sense_len;
2631 			else
2632 				csio->sense_resid = 0;
2633 
2634 			sense_len = min(returned_sense_len,
2635 			    csio->sense_len - csio->sense_resid);
2636 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2637 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2638 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2639 		}
2640 
2641 		/*
2642 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2643 		 * and it's page code 0 (Supported Page List), and there is
2644 		 * inquiry data, and this is for a sequential access device, and
2645 		 * the device is an SSP target, and TLR is supported by the
2646 		 * controller, turn the TLR_bits value ON if page 0x90 is
2647 		 * supported.
2648 		 */
2649 		if ((scsi_cdb[0] == INQUIRY) &&
2650 		    (scsi_cdb[1] & SI_EVPD) &&
2651 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2652 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2653 		    (csio->data_ptr != NULL) &&
2654 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2655 		    (sc->control_TLR) &&
2656 		    (sc->mapping_table[target_id].device_info &
2657 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2658 			vpd_list = (struct scsi_vpd_supported_page_list *)
2659 			    csio->data_ptr;
2660 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2661 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2662 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2663 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2664 			alloc_len -= csio->resid;
2665 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2666 				if (vpd_list->list[i] == 0x90) {
2667 					*TLR_bits = TLR_on;
2668 					break;
2669 				}
2670 			}
2671 		}
2672 
2673 		/*
2674 		 * If this is a SATA direct-access end device, mark it so that
2675 		 * a SCSI StartStopUnit command will be sent to it when the
2676 		 * driver is being shutdown.
2677 		 */
2678 		if ((scsi_cdb[0] == INQUIRY) &&
2679 		    (csio->data_ptr != NULL) &&
2680 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2681 		    (sc->mapping_table[target_id].device_info &
2682 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2683 		    ((sc->mapping_table[target_id].device_info &
2684 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2685 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2686 			target = &sassc->targets[target_id];
2687 			target->supports_SSU = TRUE;
2688 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2689 			    target_id);
2690 		}
2691 		break;
2692 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2693 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2694 		/*
2695 		 * If devinfo is 0 this will be a volume.  In that case don't
2696 		 * tell CAM that the volume is not there.  We want volumes to
2697 		 * be enumerated until they are deleted/removed, not just
2698 		 * failed.
2699 		 */
2700 		if (cm->cm_targ->devinfo == 0)
2701 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2702 		else
2703 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2704 		break;
2705 	case MPI2_IOCSTATUS_INVALID_SGL:
2706 		mpr_print_scsiio_cmd(sc, cm);
2707 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2708 		break;
2709 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2710 		/*
2711 		 * This is one of the responses that comes back when an I/O
2712 		 * has been aborted.  If it is because of a timeout that we
2713 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2714 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2715 		 * command is the same (it gets retried, subject to the
2716 		 * retry counter), the only difference is what gets printed
2717 		 * on the console.
2718 		 */
2719 		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2720 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2721 		else
2722 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2723 		break;
2724 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2725 		/* resid is ignored for this condition */
2726 		csio->resid = 0;
2727 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2728 		break;
2729 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2730 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2731 		/*
2732 		 * These can sometimes be transient transport-related
2733 		 * errors, and sometimes persistent drive-related errors.
2734 		 * We used to retry these without decrementing the retry
2735 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2736 		 * we hit a persistent drive problem that returns one of
2737 		 * these error codes, we would retry indefinitely.  So,
2738 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2739 		 * count and avoid infinite retries.  We're taking the
2740 		 * potential risk of flagging false failures in the event
2741 		 * of a topology-related error (e.g. a SAS expander problem
2742 		 * causes a command addressed to a drive to fail), but
2743 		 * avoiding getting into an infinite retry loop. However,
2744 		 * if we get them while were moving a device, we should
2745 		 * fail the request as 'not there' because the device
2746 		 * is effectively gone.
2747 		 */
2748 		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2749 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2750 		else
2751 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2752 		mpr_dprint(sc, MPR_INFO,
2753 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2754 		    mpr_describe_table(mpr_iocstatus_string,
2755 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2756 		    target_id, cm->cm_desc.Default.SMID,
2757 		    le32toh(rep->IOCLogInfo),
2758 		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2759 		mpr_dprint(sc, MPR_XINFO,
2760 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2761 		    rep->SCSIStatus, rep->SCSIState,
2762 		    le32toh(rep->TransferCount));
2763 		break;
2764 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2765 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2766 	case MPI2_IOCSTATUS_INVALID_VPID:
2767 	case MPI2_IOCSTATUS_INVALID_FIELD:
2768 	case MPI2_IOCSTATUS_INVALID_STATE:
2769 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2770 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2771 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2772 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2773 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2774 	default:
2775 		mprsas_log_command(cm, MPR_XINFO,
2776 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2777 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2778 		    rep->SCSIStatus, rep->SCSIState,
2779 		    le32toh(rep->TransferCount));
2780 		csio->resid = cm->cm_length;
2781 
2782 		if (scsi_cdb[0] == UNMAP &&
2783 		    target->is_nvme &&
2784 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2785 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2786 		else
2787 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2788 
2789 		break;
2790 	}
2791 
2792 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2793 
2794 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2795 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2796 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2797 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2798 		    "queue\n");
2799 	}
2800 
2801 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2802 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2803 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2804 	}
2805 
2806 	/*
2807 	 * Check to see if we're removing the device. If so, and this is the
2808 	 * last command on the queue, proceed with the deferred removal of the
2809 	 * device.  Note, for removing a volume, this won't trigger because
2810 	 * pending_remove_tm will be NULL.
2811 	 */
2812 	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2813 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2814 		    cm->cm_targ->pending_remove_tm != NULL) {
2815 			mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n");
2816 			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2817 			cm->cm_targ->pending_remove_tm = NULL;
2818 		}
2819 	}
2820 
2821 	mpr_free_command(sc, cm);
2822 	xpt_done(ccb);
2823 }
2824 
2825 static void
2826 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2827 {
2828 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2829 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2830 	uint64_t sasaddr;
2831 	union ccb *ccb;
2832 
2833 	ccb = cm->cm_complete_data;
2834 
2835 	/*
2836 	 * Currently there should be no way we can hit this case.  It only
2837 	 * happens when we have a failure to allocate chain frames, and SMP
2838 	 * commands require two S/G elements only.  That should be handled
2839 	 * in the standard request size.
2840 	 */
2841 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2842 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2843 		    "request!\n", __func__, cm->cm_flags);
2844 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2845 		goto bailout;
2846         }
2847 
2848 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2849 	if (rpl == NULL) {
2850 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2851 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2852 		goto bailout;
2853 	}
2854 
2855 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2856 	sasaddr = le32toh(req->SASAddress.Low);
2857 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2858 
2859 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2860 	    MPI2_IOCSTATUS_SUCCESS ||
2861 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2862 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2863 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2864 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2865 		goto bailout;
2866 	}
2867 
2868 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2869 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2870 
2871 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2872 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2873 	else
2874 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2875 
2876 bailout:
2877 	/*
2878 	 * We sync in both directions because we had DMAs in the S/G list
2879 	 * in both directions.
2880 	 */
2881 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2882 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2883 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2884 	mpr_free_command(sc, cm);
2885 	xpt_done(ccb);
2886 }
2887 
2888 static void
2889 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2890 {
2891 	struct mpr_command *cm;
2892 	uint8_t *request, *response;
2893 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2894 	struct mpr_softc *sc;
2895 	struct sglist *sg;
2896 	int error;
2897 
2898 	sc = sassc->sc;
2899 	sg = NULL;
2900 	error = 0;
2901 
2902 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2903 	case CAM_DATA_PADDR:
2904 	case CAM_DATA_SG_PADDR:
2905 		/*
2906 		 * XXX We don't yet support physical addresses here.
2907 		 */
2908 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2909 		    "supported\n", __func__);
2910 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2911 		xpt_done(ccb);
2912 		return;
2913 	case CAM_DATA_SG:
2914 		/*
2915 		 * The chip does not support more than one buffer for the
2916 		 * request or response.
2917 		 */
2918 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2919 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2920 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2921 			    "response buffer segments not supported for SMP\n",
2922 			    __func__);
2923 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2924 			xpt_done(ccb);
2925 			return;
2926 		}
2927 
2928 		/*
2929 		 * The CAM_SCATTER_VALID flag was originally implemented
2930 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2931 		 * We have two.  So, just take that flag to mean that we
2932 		 * might have S/G lists, and look at the S/G segment count
2933 		 * to figure out whether that is the case for each individual
2934 		 * buffer.
2935 		 */
2936 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2937 			bus_dma_segment_t *req_sg;
2938 
2939 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2940 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2941 		} else
2942 			request = ccb->smpio.smp_request;
2943 
2944 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2945 			bus_dma_segment_t *rsp_sg;
2946 
2947 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2948 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2949 		} else
2950 			response = ccb->smpio.smp_response;
2951 		break;
2952 	case CAM_DATA_VADDR:
2953 		request = ccb->smpio.smp_request;
2954 		response = ccb->smpio.smp_response;
2955 		break;
2956 	default:
2957 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2958 		xpt_done(ccb);
2959 		return;
2960 	}
2961 
2962 	cm = mpr_alloc_command(sc);
2963 	if (cm == NULL) {
2964 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2965 		    __func__);
2966 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2967 		xpt_done(ccb);
2968 		return;
2969 	}
2970 
2971 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2972 	bzero(req, sizeof(*req));
2973 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2974 
2975 	/* Allow the chip to use any route to this SAS address. */
2976 	req->PhysicalPort = 0xff;
2977 
2978 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2979 	req->SGLFlags =
2980 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2981 
2982 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2983 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2984 
2985 	mpr_init_sge(cm, req, &req->SGL);
2986 
2987 	/*
2988 	 * Set up a uio to pass into mpr_map_command().  This allows us to
2989 	 * do one map command, and one busdma call in there.
2990 	 */
2991 	cm->cm_uio.uio_iov = cm->cm_iovec;
2992 	cm->cm_uio.uio_iovcnt = 2;
2993 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2994 
2995 	/*
2996 	 * The read/write flag isn't used by busdma, but set it just in
2997 	 * case.  This isn't exactly accurate, either, since we're going in
2998 	 * both directions.
2999 	 */
3000 	cm->cm_uio.uio_rw = UIO_WRITE;
3001 
3002 	cm->cm_iovec[0].iov_base = request;
3003 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3004 	cm->cm_iovec[1].iov_base = response;
3005 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3006 
3007 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3008 			       cm->cm_iovec[1].iov_len;
3009 
3010 	/*
3011 	 * Trigger a warning message in mpr_data_cb() for the user if we
3012 	 * wind up exceeding two S/G segments.  The chip expects one
3013 	 * segment for the request and another for the response.
3014 	 */
3015 	cm->cm_max_segs = 2;
3016 
3017 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3018 	cm->cm_complete = mprsas_smpio_complete;
3019 	cm->cm_complete_data = ccb;
3020 
3021 	/*
3022 	 * Tell the mapping code that we're using a uio, and that this is
3023 	 * an SMP passthrough request.  There is a little special-case
3024 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3025 	 * transfer.
3026 	 */
3027 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3028 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3029 
3030 	/* The chip data format is little endian. */
3031 	req->SASAddress.High = htole32(sasaddr >> 32);
3032 	req->SASAddress.Low = htole32(sasaddr);
3033 
3034 	/*
3035 	 * XXX Note that we don't have a timeout/abort mechanism here.
3036 	 * From the manual, it looks like task management requests only
3037 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3038 	 * have a mechanism to retry requests in the event of a chip reset
3039 	 * at least.  Hopefully the chip will insure that any errors short
3040 	 * of that are relayed back to the driver.
3041 	 */
3042 	error = mpr_map_command(sc, cm);
3043 	if ((error != 0) && (error != EINPROGRESS)) {
3044 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3045 		    "mpr_map_command()\n", __func__, error);
3046 		goto bailout_error;
3047 	}
3048 
3049 	return;
3050 
3051 bailout_error:
3052 	mpr_free_command(sc, cm);
3053 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3054 	xpt_done(ccb);
3055 	return;
3056 }
3057 
3058 static void
3059 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3060 {
3061 	struct mpr_softc *sc;
3062 	struct mprsas_target *targ;
3063 	uint64_t sasaddr = 0;
3064 
3065 	sc = sassc->sc;
3066 
3067 	/*
3068 	 * Make sure the target exists.
3069 	 */
3070 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3071 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3072 	targ = &sassc->targets[ccb->ccb_h.target_id];
3073 	if (targ->handle == 0x0) {
3074 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3075 		    __func__, ccb->ccb_h.target_id);
3076 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3077 		xpt_done(ccb);
3078 		return;
3079 	}
3080 
3081 	/*
3082 	 * If this device has an embedded SMP target, we'll talk to it
3083 	 * directly.
3084 	 * figure out what the expander's address is.
3085 	 */
3086 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3087 		sasaddr = targ->sasaddr;
3088 
3089 	/*
3090 	 * If we don't have a SAS address for the expander yet, try
3091 	 * grabbing it from the page 0x83 information cached in the
3092 	 * transport layer for this target.  LSI expanders report the
3093 	 * expander SAS address as the port-associated SAS address in
3094 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3095 	 * 0x83.
3096 	 *
3097 	 * XXX KDM disable this for now, but leave it commented out so that
3098 	 * it is obvious that this is another possible way to get the SAS
3099 	 * address.
3100 	 *
3101 	 * The parent handle method below is a little more reliable, and
3102 	 * the other benefit is that it works for devices other than SES
3103 	 * devices.  So you can send a SMP request to a da(4) device and it
3104 	 * will get routed to the expander that device is attached to.
3105 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3106 	 */
3107 #if 0
3108 	if (sasaddr == 0)
3109 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3110 #endif
3111 
3112 	/*
3113 	 * If we still don't have a SAS address for the expander, look for
3114 	 * the parent device of this device, which is probably the expander.
3115 	 */
3116 	if (sasaddr == 0) {
3117 #ifdef OLD_MPR_PROBE
3118 		struct mprsas_target *parent_target;
3119 #endif
3120 
3121 		if (targ->parent_handle == 0x0) {
3122 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3123 			    "a valid parent handle!\n", __func__, targ->handle);
3124 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3125 			goto bailout;
3126 		}
3127 #ifdef OLD_MPR_PROBE
3128 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3129 		    targ->parent_handle);
3130 
3131 		if (parent_target == NULL) {
3132 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3133 			    "a valid parent target!\n", __func__, targ->handle);
3134 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3135 			goto bailout;
3136 		}
3137 
3138 		if ((parent_target->devinfo &
3139 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3140 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3141 			    "does not have an SMP target!\n", __func__,
3142 			    targ->handle, parent_target->handle);
3143 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3144 			goto bailout;
3145 		}
3146 
3147 		sasaddr = parent_target->sasaddr;
3148 #else /* OLD_MPR_PROBE */
3149 		if ((targ->parent_devinfo &
3150 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3151 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3152 			    "does not have an SMP target!\n", __func__,
3153 			    targ->handle, targ->parent_handle);
3154 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3155 			goto bailout;
3156 
3157 		}
3158 		if (targ->parent_sasaddr == 0x0) {
3159 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3160 			    "%d does not have a valid SAS address!\n", __func__,
3161 			    targ->handle, targ->parent_handle);
3162 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3163 			goto bailout;
3164 		}
3165 
3166 		sasaddr = targ->parent_sasaddr;
3167 #endif /* OLD_MPR_PROBE */
3168 
3169 	}
3170 
3171 	if (sasaddr == 0) {
3172 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3173 		    "handle %d\n", __func__, targ->handle);
3174 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3175 		goto bailout;
3176 	}
3177 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3178 
3179 	return;
3180 
3181 bailout:
3182 	xpt_done(ccb);
3183 
3184 }
3185 
3186 static void
3187 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3188 {
3189 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3190 	struct mpr_softc *sc;
3191 	struct mpr_command *tm;
3192 	struct mprsas_target *targ;
3193 
3194 	MPR_FUNCTRACE(sassc->sc);
3195 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3196 
3197 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3198 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3199 	sc = sassc->sc;
3200 	tm = mprsas_alloc_tm(sc);
3201 	if (tm == NULL) {
3202 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3203 		    "mprsas_action_resetdev\n");
3204 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3205 		xpt_done(ccb);
3206 		return;
3207 	}
3208 
3209 	targ = &sassc->targets[ccb->ccb_h.target_id];
3210 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3211 	req->DevHandle = htole16(targ->handle);
3212 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3213 
3214 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3215 		/* SAS Hard Link Reset / SATA Link Reset */
3216 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3217 	} else {
3218 		/* PCIe Protocol Level Reset*/
3219 		req->MsgFlags =
3220 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3221 	}
3222 
3223 	tm->cm_data = NULL;
3224 	tm->cm_complete = mprsas_resetdev_complete;
3225 	tm->cm_complete_data = ccb;
3226 
3227 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3228 	    __func__, targ->tid);
3229 	tm->cm_targ = targ;
3230 
3231 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3232 	mpr_map_command(sc, tm);
3233 }
3234 
3235 static void
3236 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3237 {
3238 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3239 	union ccb *ccb;
3240 
3241 	MPR_FUNCTRACE(sc);
3242 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3243 
3244 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3245 	ccb = tm->cm_complete_data;
3246 
3247 	/*
3248 	 * Currently there should be no way we can hit this case.  It only
3249 	 * happens when we have a failure to allocate chain frames, and
3250 	 * task management commands don't have S/G lists.
3251 	 */
3252 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3253 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3254 
3255 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3256 
3257 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3258 		    "handle %#04x! This should not happen!\n", __func__,
3259 		    tm->cm_flags, req->DevHandle);
3260 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3261 		goto bailout;
3262 	}
3263 
3264 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3265 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3266 
3267 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3268 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3269 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3270 		    CAM_LUN_WILDCARD);
3271 	}
3272 	else
3273 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3274 
3275 bailout:
3276 
3277 	mprsas_free_tm(sc, tm);
3278 	xpt_done(ccb);
3279 }
3280 
3281 static void
3282 mprsas_poll(struct cam_sim *sim)
3283 {
3284 	struct mprsas_softc *sassc;
3285 
3286 	sassc = cam_sim_softc(sim);
3287 
3288 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3289 		/* frequent debug messages during a panic just slow
3290 		 * everything down too much.
3291 		 */
3292 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3293 		    __func__);
3294 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3295 	}
3296 
3297 	mpr_intr_locked(sassc->sc);
3298 }
3299 
3300 static void
3301 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3302     void *arg)
3303 {
3304 	struct mpr_softc *sc;
3305 
3306 	sc = (struct mpr_softc *)callback_arg;
3307 
3308 	switch (code) {
3309 	case AC_ADVINFO_CHANGED: {
3310 		struct mprsas_target *target;
3311 		struct mprsas_softc *sassc;
3312 		struct scsi_read_capacity_data_long rcap_buf;
3313 		struct ccb_dev_advinfo cdai;
3314 		struct mprsas_lun *lun;
3315 		lun_id_t lunid;
3316 		int found_lun;
3317 		uintptr_t buftype;
3318 
3319 		buftype = (uintptr_t)arg;
3320 
3321 		found_lun = 0;
3322 		sassc = sc->sassc;
3323 
3324 		/*
3325 		 * We're only interested in read capacity data changes.
3326 		 */
3327 		if (buftype != CDAI_TYPE_RCAPLONG)
3328 			break;
3329 
3330 		/*
3331 		 * We should have a handle for this, but check to make sure.
3332 		 */
3333 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3334 		    ("Target %d out of bounds in mprsas_async\n",
3335 		    xpt_path_target_id(path)));
3336 		target = &sassc->targets[xpt_path_target_id(path)];
3337 		if (target->handle == 0)
3338 			break;
3339 
3340 		lunid = xpt_path_lun_id(path);
3341 
3342 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3343 			if (lun->lun_id == lunid) {
3344 				found_lun = 1;
3345 				break;
3346 			}
3347 		}
3348 
3349 		if (found_lun == 0) {
3350 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3351 			    M_NOWAIT | M_ZERO);
3352 			if (lun == NULL) {
3353 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3354 				    "LUN for EEDP support.\n");
3355 				break;
3356 			}
3357 			lun->lun_id = lunid;
3358 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3359 		}
3360 
3361 		bzero(&rcap_buf, sizeof(rcap_buf));
3362 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3363 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3364 		cdai.ccb_h.flags = CAM_DIR_IN;
3365 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3366 		cdai.flags = CDAI_FLAG_NONE;
3367 		cdai.bufsiz = sizeof(rcap_buf);
3368 		cdai.buf = (uint8_t *)&rcap_buf;
3369 		xpt_action((union ccb *)&cdai);
3370 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3371 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3372 
3373 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3374 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3375 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3376 			case SRC16_PTYPE_1:
3377 			case SRC16_PTYPE_3:
3378 				lun->eedp_formatted = TRUE;
3379 				lun->eedp_block_size =
3380 				    scsi_4btoul(rcap_buf.length);
3381 				break;
3382 			case SRC16_PTYPE_2:
3383 			default:
3384 				lun->eedp_formatted = FALSE;
3385 				lun->eedp_block_size = 0;
3386 				break;
3387 			}
3388 		} else {
3389 			lun->eedp_formatted = FALSE;
3390 			lun->eedp_block_size = 0;
3391 		}
3392 		break;
3393 	}
3394 	case AC_FOUND_DEVICE:
3395 	default:
3396 		break;
3397 	}
3398 }
3399 
3400 /*
3401  * Set the INRESET flag for this target so that no I/O will be sent to
3402  * the target until the reset has completed.  If an I/O request does
3403  * happen, the devq will be frozen.  The CCB holds the path which is
3404  * used to release the devq.  The devq is released and the CCB is freed
3405  * when the TM completes.
3406  */
3407 void
3408 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3409     struct mprsas_target *target, lun_id_t lun_id)
3410 {
3411 	union ccb *ccb;
3412 	path_id_t path_id;
3413 
3414 	ccb = xpt_alloc_ccb_nowait();
3415 	if (ccb) {
3416 		path_id = cam_sim_path(sc->sassc->sim);
3417 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3418 		    target->tid, lun_id) != CAM_REQ_CMP) {
3419 			xpt_free_ccb(ccb);
3420 		} else {
3421 			tm->cm_ccb = ccb;
3422 			tm->cm_targ = target;
3423 			target->flags |= MPRSAS_TARGET_INRESET;
3424 		}
3425 	}
3426 }
3427 
3428 int
3429 mprsas_startup(struct mpr_softc *sc)
3430 {
3431 	/*
3432 	 * Send the port enable message and set the wait_for_port_enable flag.
3433 	 * This flag helps to keep the simq frozen until all discovery events
3434 	 * are processed.
3435 	 */
3436 	sc->wait_for_port_enable = 1;
3437 	mprsas_send_portenable(sc);
3438 	return (0);
3439 }
3440 
3441 static int
3442 mprsas_send_portenable(struct mpr_softc *sc)
3443 {
3444 	MPI2_PORT_ENABLE_REQUEST *request;
3445 	struct mpr_command *cm;
3446 
3447 	MPR_FUNCTRACE(sc);
3448 
3449 	if ((cm = mpr_alloc_command(sc)) == NULL)
3450 		return (EBUSY);
3451 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3452 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3453 	request->MsgFlags = 0;
3454 	request->VP_ID = 0;
3455 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3456 	cm->cm_complete = mprsas_portenable_complete;
3457 	cm->cm_data = NULL;
3458 	cm->cm_sge = NULL;
3459 
3460 	mpr_map_command(sc, cm);
3461 	mpr_dprint(sc, MPR_XINFO,
3462 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3463 	    cm, cm->cm_req, cm->cm_complete);
3464 	return (0);
3465 }
3466 
3467 static void
3468 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3469 {
3470 	MPI2_PORT_ENABLE_REPLY *reply;
3471 	struct mprsas_softc *sassc;
3472 
3473 	MPR_FUNCTRACE(sc);
3474 	sassc = sc->sassc;
3475 
3476 	/*
3477 	 * Currently there should be no way we can hit this case.  It only
3478 	 * happens when we have a failure to allocate chain frames, and
3479 	 * port enable commands don't have S/G lists.
3480 	 */
3481 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3482 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3483 		    "This should not happen!\n", __func__, cm->cm_flags);
3484 	}
3485 
3486 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3487 	if (reply == NULL)
3488 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3489 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3490 	    MPI2_IOCSTATUS_SUCCESS)
3491 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3492 
3493 	mpr_free_command(sc, cm);
3494 	/*
3495 	 * Done waiting for port enable to complete.  Decrement the refcount.
3496 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3497 	 * take place.
3498 	 */
3499 	sc->wait_for_port_enable = 0;
3500 	sc->port_enable_complete = 1;
3501 	wakeup(&sc->port_enable_complete);
3502 	mprsas_startup_decrement(sassc);
3503 }
3504 
3505 int
3506 mprsas_check_id(struct mprsas_softc *sassc, int id)
3507 {
3508 	struct mpr_softc *sc = sassc->sc;
3509 	char *ids;
3510 	char *name;
3511 
3512 	ids = &sc->exclude_ids[0];
3513 	while((name = strsep(&ids, ",")) != NULL) {
3514 		if (name[0] == '\0')
3515 			continue;
3516 		if (strtol(name, NULL, 0) == (long)id)
3517 			return (1);
3518 	}
3519 
3520 	return (0);
3521 }
3522 
3523 void
3524 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3525 {
3526 	struct mprsas_softc *sassc;
3527 	struct mprsas_lun *lun, *lun_tmp;
3528 	struct mprsas_target *targ;
3529 	int i;
3530 
3531 	sassc = sc->sassc;
3532 	/*
3533 	 * The number of targets is based on IOC Facts, so free all of
3534 	 * the allocated LUNs for each target and then the target buffer
3535 	 * itself.
3536 	 */
3537 	for (i=0; i< maxtargets; i++) {
3538 		targ = &sassc->targets[i];
3539 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3540 			free(lun, M_MPR);
3541 		}
3542 	}
3543 	free(sassc->targets, M_MPR);
3544 
3545 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3546 	    M_MPR, M_WAITOK|M_ZERO);
3547 }
3548