xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 78bc019d220e05abb5b12f678f9b4a847019bbcc)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * Copyright 2000-2020 Broadcom Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT3 */
37 
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/smp_all.h>
73 
74 #include <dev/nvme/nvme.h>
75 
76 #include <dev/mpr/mpi/mpi2_type.h>
77 #include <dev/mpr/mpi/mpi2.h>
78 #include <dev/mpr/mpi/mpi2_ioc.h>
79 #include <dev/mpr/mpi/mpi2_sas.h>
80 #include <dev/mpr/mpi/mpi2_pci.h>
81 #include <dev/mpr/mpi/mpi2_cnfg.h>
82 #include <dev/mpr/mpi/mpi2_init.h>
83 #include <dev/mpr/mpi/mpi2_tool.h>
84 #include <dev/mpr/mpr_ioctl.h>
85 #include <dev/mpr/mprvar.h>
86 #include <dev/mpr/mpr_table.h>
87 #include <dev/mpr/mpr_sas.h>
88 
89 #define MPRSAS_DISCOVERY_TIMEOUT	20
90 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91 
92 /*
93  * static array to check SCSI OpCode for EEDP protection bits
94  */
95 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116 
117 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
118 
119 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
120 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mprsas_poll(struct cam_sim *sim);
123 static void mprsas_scsiio_timeout(void *data);
124 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
125 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
126 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
127 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
128 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
129 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130     struct mpr_command *cm);
131 static void mprsas_async(void *callback_arg, uint32_t code,
132     struct cam_path *path, void *arg);
133 static int mprsas_send_portenable(struct mpr_softc *sc);
134 static void mprsas_portenable_complete(struct mpr_softc *sc,
135     struct mpr_command *cm);
136 
137 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
138 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
139     uint64_t sasaddr);
140 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
141 
142 struct mprsas_target *
143 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
144     uint16_t handle)
145 {
146 	struct mprsas_target *target;
147 	int i;
148 
149 	for (i = start; i < sassc->maxtargets; i++) {
150 		target = &sassc->targets[i];
151 		if (target->handle == handle)
152 			return (target);
153 	}
154 
155 	return (NULL);
156 }
157 
158 /* we need to freeze the simq during attach and diag reset, to avoid failing
159  * commands before device handles have been found by discovery.  Since
160  * discovery involves reading config pages and possibly sending commands,
161  * discovery actions may continue even after we receive the end of discovery
162  * event, so refcount discovery actions instead of assuming we can unfreeze
163  * the simq when we get the event.
164  */
165 void
166 mprsas_startup_increment(struct mprsas_softc *sassc)
167 {
168 	MPR_FUNCTRACE(sassc->sc);
169 
170 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
171 		if (sassc->startup_refcount++ == 0) {
172 			/* just starting, freeze the simq */
173 			mpr_dprint(sassc->sc, MPR_INIT,
174 			    "%s freezing simq\n", __func__);
175 			xpt_hold_boot();
176 			xpt_freeze_simq(sassc->sim, 1);
177 		}
178 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
179 		    sassc->startup_refcount);
180 	}
181 }
182 
183 void
184 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
185 {
186 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
187 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
188 		xpt_release_simq(sassc->sim, 1);
189 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
190 	}
191 }
192 
193 void
194 mprsas_startup_decrement(struct mprsas_softc *sassc)
195 {
196 	MPR_FUNCTRACE(sassc->sc);
197 
198 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
199 		if (--sassc->startup_refcount == 0) {
200 			/* finished all discovery-related actions, release
201 			 * the simq and rescan for the latest topology.
202 			 */
203 			mpr_dprint(sassc->sc, MPR_INIT,
204 			    "%s releasing simq\n", __func__);
205 			sassc->flags &= ~MPRSAS_IN_STARTUP;
206 			xpt_release_simq(sassc->sim, 1);
207 			xpt_release_boot();
208 		}
209 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
210 		    sassc->startup_refcount);
211 	}
212 }
213 
214 /*
215  * The firmware requires us to stop sending commands when we're doing task
216  * management.
217  * use.
218  * XXX The logic for serializing the device has been made lazy and moved to
219  * mprsas_prepare_for_tm().
220  */
221 struct mpr_command *
222 mprsas_alloc_tm(struct mpr_softc *sc)
223 {
224 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
225 	struct mpr_command *tm;
226 
227 	MPR_FUNCTRACE(sc);
228 	tm = mpr_alloc_high_priority_command(sc);
229 	if (tm == NULL)
230 		return (NULL);
231 
232 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
233 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
234 	return tm;
235 }
236 
237 void
238 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
239 {
240 
241 	MPR_FUNCTRACE(sc);
242 	if (tm == NULL)
243 		return;
244 
245 	/*
246 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
247 	 * free the resources used for freezing the devq.  Must clear the
248 	 * INRESET flag as well or scsi I/O will not work.
249 	 */
250 	if (tm->cm_ccb) {
251 		mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
252 		    "Unfreezing devq for target ID %d\n",
253 		    tm->cm_targ->tid);
254 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
255 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
256 		xpt_free_path(tm->cm_ccb->ccb_h.path);
257 		xpt_free_ccb(tm->cm_ccb);
258 	}
259 
260 	mpr_free_high_priority_command(sc, tm);
261 }
262 
263 void
264 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
265 {
266 	struct mprsas_softc *sassc = sc->sassc;
267 	path_id_t pathid;
268 	target_id_t targetid;
269 	union ccb *ccb;
270 
271 	MPR_FUNCTRACE(sc);
272 	pathid = cam_sim_path(sassc->sim);
273 	if (targ == NULL)
274 		targetid = CAM_TARGET_WILDCARD;
275 	else
276 		targetid = targ - sassc->targets;
277 
278 	/*
279 	 * Allocate a CCB and schedule a rescan.
280 	 */
281 	ccb = xpt_alloc_ccb_nowait();
282 	if (ccb == NULL) {
283 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
284 		return;
285 	}
286 
287 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
288 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
289 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
290 		xpt_free_ccb(ccb);
291 		return;
292 	}
293 
294 	if (targetid == CAM_TARGET_WILDCARD)
295 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
296 	else
297 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
298 
299 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
300 	xpt_rescan(ccb);
301 }
302 
303 static void
304 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
305 {
306 	struct sbuf sb;
307 	va_list ap;
308 	char str[224];
309 	char path_str[64];
310 
311 	if (cm == NULL)
312 		return;
313 
314 	/* No need to be in here if debugging isn't enabled */
315 	if ((cm->cm_sc->mpr_debug & level) == 0)
316 		return;
317 
318 	sbuf_new(&sb, str, sizeof(str), 0);
319 
320 	va_start(ap, fmt);
321 
322 	if (cm->cm_ccb != NULL) {
323 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
324 		    sizeof(path_str));
325 		sbuf_cat(&sb, path_str);
326 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
327 			scsi_command_string(&cm->cm_ccb->csio, &sb);
328 			sbuf_printf(&sb, "length %d ",
329 			    cm->cm_ccb->csio.dxfer_len);
330 		}
331 	} else {
332 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
333 		    cam_sim_name(cm->cm_sc->sassc->sim),
334 		    cam_sim_unit(cm->cm_sc->sassc->sim),
335 		    cam_sim_bus(cm->cm_sc->sassc->sim),
336 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
337 		    cm->cm_lun);
338 	}
339 
340 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
341 	sbuf_vprintf(&sb, fmt, ap);
342 	sbuf_finish(&sb);
343 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
344 
345 	va_end(ap);
346 }
347 
348 static void
349 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
350 {
351 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
352 	struct mprsas_target *targ;
353 	uint16_t handle;
354 
355 	MPR_FUNCTRACE(sc);
356 
357 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
358 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
359 	targ = tm->cm_targ;
360 
361 	if (reply == NULL) {
362 		/* XXX retry the remove after the diag reset completes? */
363 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
364 		    "0x%04x\n", __func__, handle);
365 		mprsas_free_tm(sc, tm);
366 		return;
367 	}
368 
369 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
370 	    MPI2_IOCSTATUS_SUCCESS) {
371 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
372 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
373 	}
374 
375 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
376 	    le32toh(reply->TerminationCount));
377 	mpr_free_reply(sc, tm->cm_reply_data);
378 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
379 
380 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
381 	    targ->tid, handle);
382 
383 	/*
384 	 * Don't clear target if remove fails because things will get confusing.
385 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
386 	 * this target id if possible, and so we can assign the same target id
387 	 * to this device if it comes back in the future.
388 	 */
389 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
390 	    MPI2_IOCSTATUS_SUCCESS) {
391 		targ = tm->cm_targ;
392 		targ->handle = 0x0;
393 		targ->encl_handle = 0x0;
394 		targ->encl_level_valid = 0x0;
395 		targ->encl_level = 0x0;
396 		targ->connector_name[0] = ' ';
397 		targ->connector_name[1] = ' ';
398 		targ->connector_name[2] = ' ';
399 		targ->connector_name[3] = ' ';
400 		targ->encl_slot = 0x0;
401 		targ->exp_dev_handle = 0x0;
402 		targ->phy_num = 0x0;
403 		targ->linkrate = 0x0;
404 		targ->devinfo = 0x0;
405 		targ->flags = 0x0;
406 		targ->scsi_req_desc_type = 0;
407 	}
408 
409 	mprsas_free_tm(sc, tm);
410 }
411 
412 /*
413  * Retry mprsas_prepare_remove() if some previous attempt failed to allocate
414  * high priority command due to limit reached.
415  */
416 void
417 mprsas_prepare_remove_retry(struct mprsas_softc *sassc)
418 {
419 	struct mprsas_target *target;
420 	int i;
421 
422 	if ((sassc->flags & MPRSAS_TOREMOVE) == 0)
423 		return;
424 
425 	for (i = 0; i < sassc->maxtargets; i++) {
426 		target = &sassc->targets[i];
427 		if ((target->flags & MPRSAS_TARGET_TOREMOVE) == 0)
428 			continue;
429 		if (TAILQ_EMPTY(&sassc->sc->high_priority_req_list))
430 			return;
431 		target->flags &= ~MPRSAS_TARGET_TOREMOVE;
432 		if (target->flags & MPR_TARGET_FLAGS_VOLUME)
433 			mprsas_prepare_volume_remove(sassc, target->handle);
434 		else
435 			mprsas_prepare_remove(sassc, target->handle);
436 	}
437 	sassc->flags &= ~MPRSAS_TOREMOVE;
438 }
439 
440 /*
441  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
442  * Otherwise Volume Delete is same as Bare Drive Removal.
443  */
444 void
445 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
446 {
447 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
448 	struct mpr_softc *sc;
449 	struct mpr_command *cm;
450 	struct mprsas_target *targ = NULL;
451 
452 	MPR_FUNCTRACE(sassc->sc);
453 	sc = sassc->sc;
454 
455 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
456 	if (targ == NULL) {
457 		/* FIXME: what is the action? */
458 		/* We don't know about this device? */
459 		mpr_dprint(sc, MPR_ERROR,
460 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
461 		return;
462 	}
463 
464 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
465 
466 	cm = mprsas_alloc_tm(sc);
467 	if (cm == NULL) {
468 		targ->flags |= MPRSAS_TARGET_TOREMOVE;
469 		sassc->flags |= MPRSAS_TOREMOVE;
470 		return;
471 	}
472 
473 	mprsas_rescan_target(sc, targ);
474 
475 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
476 	req->DevHandle = targ->handle;
477 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
478 
479 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
480 		/* SAS Hard Link Reset / SATA Link Reset */
481 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
482 	} else {
483 		/* PCIe Protocol Level Reset*/
484 		req->MsgFlags =
485 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
486 	}
487 
488 	cm->cm_targ = targ;
489 	cm->cm_data = NULL;
490 	cm->cm_complete = mprsas_remove_volume;
491 	cm->cm_complete_data = (void *)(uintptr_t)handle;
492 
493 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
494 	    __func__, targ->tid);
495 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
496 
497 	mpr_map_command(sc, cm);
498 }
499 
500 /*
501  * The firmware performs debounce on the link to avoid transient link errors
502  * and false removals.  When it does decide that link has been lost and a
503  * device needs to go away, it expects that the host will perform a target reset
504  * and then an op remove.  The reset has the side-effect of aborting any
505  * outstanding requests for the device, which is required for the op-remove to
506  * succeed.  It's not clear if the host should check for the device coming back
507  * alive after the reset.
508  */
509 void
510 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
511 {
512 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
513 	struct mpr_softc *sc;
514 	struct mpr_command *tm;
515 	struct mprsas_target *targ = NULL;
516 
517 	MPR_FUNCTRACE(sassc->sc);
518 
519 	sc = sassc->sc;
520 
521 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
522 	if (targ == NULL) {
523 		/* FIXME: what is the action? */
524 		/* We don't know about this device? */
525 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
526 		    __func__, handle);
527 		return;
528 	}
529 
530 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
531 
532 	tm = mprsas_alloc_tm(sc);
533 	if (tm == NULL) {
534 		targ->flags |= MPRSAS_TARGET_TOREMOVE;
535 		sassc->flags |= MPRSAS_TOREMOVE;
536 		return;
537 	}
538 
539 	mprsas_rescan_target(sc, targ);
540 
541 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
542 	req->DevHandle = htole16(targ->handle);
543 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
544 
545 	/* SAS Hard Link Reset / SATA Link Reset */
546 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
547 
548 	tm->cm_targ = targ;
549 	tm->cm_data = NULL;
550 	tm->cm_complete = mprsas_remove_device;
551 	tm->cm_complete_data = (void *)(uintptr_t)handle;
552 
553 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
554 	    __func__, targ->tid);
555 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
556 
557 	mpr_map_command(sc, tm);
558 }
559 
560 static void
561 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
562 {
563 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
564 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
565 	struct mprsas_target *targ;
566 	uint16_t handle;
567 
568 	MPR_FUNCTRACE(sc);
569 
570 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
571 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
572 	targ = tm->cm_targ;
573 
574 	/*
575 	 * Currently there should be no way we can hit this case.  It only
576 	 * happens when we have a failure to allocate chain frames, and
577 	 * task management commands don't have S/G lists.
578 	 */
579 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
580 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
581 		    "handle %#04x! This should not happen!\n", __func__,
582 		    tm->cm_flags, handle);
583 	}
584 
585 	if (reply == NULL) {
586 		/* XXX retry the remove after the diag reset completes? */
587 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
588 		    "0x%04x\n", __func__, handle);
589 		mprsas_free_tm(sc, tm);
590 		return;
591 	}
592 
593 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
594 	    MPI2_IOCSTATUS_SUCCESS) {
595 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
596 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
597 	}
598 
599 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
600 	    le32toh(reply->TerminationCount));
601 	mpr_free_reply(sc, tm->cm_reply_data);
602 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
603 
604 	/* Reuse the existing command */
605 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
606 	memset(req, 0, sizeof(*req));
607 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
608 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
609 	req->DevHandle = htole16(handle);
610 	tm->cm_data = NULL;
611 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
612 	tm->cm_complete = mprsas_remove_complete;
613 	tm->cm_complete_data = (void *)(uintptr_t)handle;
614 
615 	/*
616 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
617 	 * They should be aborted or time out and we'll kick thus off there
618 	 * if so.
619 	 */
620 	if (TAILQ_FIRST(&targ->commands) == NULL) {
621 		mpr_dprint(sc, MPR_INFO,
622 		    "No pending commands: starting remove_device for target %u handle 0x%04x\n",
623 		    targ->tid, handle);
624 		mpr_map_command(sc, tm);
625 		targ->pending_remove_tm = NULL;
626 	} else {
627 		targ->pending_remove_tm = tm;
628 	}
629 
630 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
631 	    targ->tid, handle);
632 	if (targ->encl_level_valid) {
633 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
634 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
635 		    targ->connector_name);
636 	}
637 }
638 
639 static void
640 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
641 {
642 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
643 	uint16_t handle;
644 	struct mprsas_target *targ;
645 	struct mprsas_lun *lun;
646 
647 	MPR_FUNCTRACE(sc);
648 
649 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
650 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
651 
652 	targ = tm->cm_targ;
653 
654 	/*
655 	 * At this point, we should have no pending commands for the target.
656 	 * The remove target has just completed.
657 	 */
658 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
659 	    ("%s: no commands should be pending\n", __func__));
660 
661 	/*
662 	 * Currently there should be no way we can hit this case.  It only
663 	 * happens when we have a failure to allocate chain frames, and
664 	 * task management commands don't have S/G lists.
665 	 */
666 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
667 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
668 		    "handle %#04x! This should not happen!\n", __func__,
669 		    tm->cm_flags, handle);
670 		mprsas_free_tm(sc, tm);
671 		return;
672 	}
673 
674 	if (reply == NULL) {
675 		/* most likely a chip reset */
676 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
677 		    "0x%04x\n", __func__, handle);
678 		mprsas_free_tm(sc, tm);
679 		return;
680 	}
681 
682 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
683 	    __func__, handle, le16toh(reply->IOCStatus));
684 
685 	/*
686 	 * Don't clear target if remove fails because things will get confusing.
687 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
688 	 * this target id if possible, and so we can assign the same target id
689 	 * to this device if it comes back in the future.
690 	 */
691 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
692 	    MPI2_IOCSTATUS_SUCCESS) {
693 		targ->handle = 0x0;
694 		targ->encl_handle = 0x0;
695 		targ->encl_level_valid = 0x0;
696 		targ->encl_level = 0x0;
697 		targ->connector_name[0] = ' ';
698 		targ->connector_name[1] = ' ';
699 		targ->connector_name[2] = ' ';
700 		targ->connector_name[3] = ' ';
701 		targ->encl_slot = 0x0;
702 		targ->exp_dev_handle = 0x0;
703 		targ->phy_num = 0x0;
704 		targ->linkrate = 0x0;
705 		targ->devinfo = 0x0;
706 		targ->flags = 0x0;
707 		targ->scsi_req_desc_type = 0;
708 
709 		while (!SLIST_EMPTY(&targ->luns)) {
710 			lun = SLIST_FIRST(&targ->luns);
711 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
712 			free(lun, M_MPR);
713 		}
714 	}
715 
716 	mprsas_free_tm(sc, tm);
717 }
718 
719 static int
720 mprsas_register_events(struct mpr_softc *sc)
721 {
722 	uint8_t events[16];
723 
724 	bzero(events, 16);
725 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
726 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
727 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
728 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
729 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
730 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
731 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
732 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
733 	setbit(events, MPI2_EVENT_IR_VOLUME);
734 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
735 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
736 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
737 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
738 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
739 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
740 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
741 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
742 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
743 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
744 		}
745 	}
746 
747 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
748 	    &sc->sassc->mprsas_eh);
749 
750 	return (0);
751 }
752 
753 int
754 mpr_attach_sas(struct mpr_softc *sc)
755 {
756 	struct mprsas_softc *sassc;
757 	cam_status status;
758 	int unit, error = 0, reqs;
759 
760 	MPR_FUNCTRACE(sc);
761 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
762 
763 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
764 
765 	/*
766 	 * XXX MaxTargets could change during a reinit.  Since we don't
767 	 * resize the targets[] array during such an event, cache the value
768 	 * of MaxTargets here so that we don't get into trouble later.  This
769 	 * should move into the reinit logic.
770 	 */
771 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
772 	sassc->targets = malloc(sizeof(struct mprsas_target) *
773 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
774 	sc->sassc = sassc;
775 	sassc->sc = sc;
776 
777 	reqs = sc->num_reqs - sc->num_prireqs - 1;
778 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
779 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
780 		error = ENOMEM;
781 		goto out;
782 	}
783 
784 	unit = device_get_unit(sc->mpr_dev);
785 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
786 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
787 	if (sassc->sim == NULL) {
788 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
789 		error = EINVAL;
790 		goto out;
791 	}
792 
793 	TAILQ_INIT(&sassc->ev_queue);
794 
795 	/* Initialize taskqueue for Event Handling */
796 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
797 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
798 	    taskqueue_thread_enqueue, &sassc->ev_tq);
799 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
800 	    device_get_nameunit(sc->mpr_dev));
801 
802 	mpr_lock(sc);
803 
804 	/*
805 	 * XXX There should be a bus for every port on the adapter, but since
806 	 * we're just going to fake the topology for now, we'll pretend that
807 	 * everything is just a target on a single bus.
808 	 */
809 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
810 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
811 		    "Error %d registering SCSI bus\n", error);
812 		mpr_unlock(sc);
813 		goto out;
814 	}
815 
816 	/*
817 	 * Assume that discovery events will start right away.
818 	 *
819 	 * Hold off boot until discovery is complete.
820 	 */
821 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
822 	sc->sassc->startup_refcount = 0;
823 	mprsas_startup_increment(sassc);
824 
825 	mpr_unlock(sc);
826 
827 	/*
828 	 * Register for async events so we can determine the EEDP
829 	 * capabilities of devices.
830 	 */
831 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
832 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
833 	    CAM_LUN_WILDCARD);
834 	if (status != CAM_REQ_CMP) {
835 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
836 		    "Error %#x creating sim path\n", status);
837 		sassc->path = NULL;
838 	} else {
839 		int event;
840 
841 		event = AC_ADVINFO_CHANGED;
842 		status = xpt_register_async(event, mprsas_async, sc,
843 					    sassc->path);
844 
845 		if (status != CAM_REQ_CMP) {
846 			mpr_dprint(sc, MPR_ERROR,
847 			    "Error %#x registering async handler for "
848 			    "AC_ADVINFO_CHANGED events\n", status);
849 			xpt_free_path(sassc->path);
850 			sassc->path = NULL;
851 		}
852 	}
853 	if (status != CAM_REQ_CMP) {
854 		/*
855 		 * EEDP use is the exception, not the rule.
856 		 * Warn the user, but do not fail to attach.
857 		 */
858 		mpr_printf(sc, "EEDP capabilities disabled.\n");
859 	}
860 
861 	mprsas_register_events(sc);
862 out:
863 	if (error)
864 		mpr_detach_sas(sc);
865 
866 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
867 	return (error);
868 }
869 
870 int
871 mpr_detach_sas(struct mpr_softc *sc)
872 {
873 	struct mprsas_softc *sassc;
874 	struct mprsas_lun *lun, *lun_tmp;
875 	struct mprsas_target *targ;
876 	int i;
877 
878 	MPR_FUNCTRACE(sc);
879 
880 	if (sc->sassc == NULL)
881 		return (0);
882 
883 	sassc = sc->sassc;
884 	mpr_deregister_events(sc, sassc->mprsas_eh);
885 
886 	/*
887 	 * Drain and free the event handling taskqueue with the lock
888 	 * unheld so that any parallel processing tasks drain properly
889 	 * without deadlocking.
890 	 */
891 	if (sassc->ev_tq != NULL)
892 		taskqueue_free(sassc->ev_tq);
893 
894 	/* Deregister our async handler */
895 	if (sassc->path != NULL) {
896 		xpt_register_async(0, mprsas_async, sc, sassc->path);
897 		xpt_free_path(sassc->path);
898 		sassc->path = NULL;
899 	}
900 
901 	/* Make sure CAM doesn't wedge if we had to bail out early. */
902 	mpr_lock(sc);
903 
904 	while (sassc->startup_refcount != 0)
905 		mprsas_startup_decrement(sassc);
906 
907 	if (sassc->flags & MPRSAS_IN_STARTUP)
908 		xpt_release_simq(sassc->sim, 1);
909 
910 	if (sassc->sim != NULL) {
911 		xpt_bus_deregister(cam_sim_path(sassc->sim));
912 		cam_sim_free(sassc->sim, FALSE);
913 	}
914 
915 	mpr_unlock(sc);
916 
917 	if (sassc->devq != NULL)
918 		cam_simq_free(sassc->devq);
919 
920 	for (i = 0; i < sassc->maxtargets; i++) {
921 		targ = &sassc->targets[i];
922 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
923 			free(lun, M_MPR);
924 		}
925 	}
926 	free(sassc->targets, M_MPR);
927 	free(sassc, M_MPR);
928 	sc->sassc = NULL;
929 
930 	return (0);
931 }
932 
933 void
934 mprsas_discovery_end(struct mprsas_softc *sassc)
935 {
936 	struct mpr_softc *sc = sassc->sc;
937 
938 	MPR_FUNCTRACE(sc);
939 
940 	/*
941 	 * After discovery has completed, check the mapping table for any
942 	 * missing devices and update their missing counts. Only do this once
943 	 * whenever the driver is initialized so that missing counts aren't
944 	 * updated unnecessarily. Note that just because discovery has
945 	 * completed doesn't mean that events have been processed yet. The
946 	 * check_devices function is a callout timer that checks if ALL devices
947 	 * are missing. If so, it will wait a little longer for events to
948 	 * complete and keep resetting itself until some device in the mapping
949 	 * table is not missing, meaning that event processing has started.
950 	 */
951 	if (sc->track_mapping_events) {
952 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
953 		    "completed. Check for missing devices in the mapping "
954 		    "table.\n");
955 		callout_reset(&sc->device_check_callout,
956 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
957 		    sc);
958 	}
959 }
960 
961 static void
962 mprsas_action(struct cam_sim *sim, union ccb *ccb)
963 {
964 	struct mprsas_softc *sassc;
965 
966 	sassc = cam_sim_softc(sim);
967 
968 	MPR_FUNCTRACE(sassc->sc);
969 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
970 	    ccb->ccb_h.func_code);
971 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
972 
973 	switch (ccb->ccb_h.func_code) {
974 	case XPT_PATH_INQ:
975 	{
976 		struct ccb_pathinq *cpi = &ccb->cpi;
977 		struct mpr_softc *sc = sassc->sc;
978 
979 		cpi->version_num = 1;
980 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
981 		cpi->target_sprt = 0;
982 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
983 		cpi->hba_eng_cnt = 0;
984 		cpi->max_target = sassc->maxtargets - 1;
985 		cpi->max_lun = 255;
986 
987 		/*
988 		 * initiator_id is set here to an ID outside the set of valid
989 		 * target IDs (including volumes).
990 		 */
991 		cpi->initiator_id = sassc->maxtargets;
992 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
993 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
994 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
995 		cpi->unit_number = cam_sim_unit(sim);
996 		cpi->bus_id = cam_sim_bus(sim);
997 		/*
998 		 * XXXSLM-I think this needs to change based on config page or
999 		 * something instead of hardcoded to 150000.
1000 		 */
1001 		cpi->base_transfer_speed = 150000;
1002 		cpi->transport = XPORT_SAS;
1003 		cpi->transport_version = 0;
1004 		cpi->protocol = PROTO_SCSI;
1005 		cpi->protocol_version = SCSI_REV_SPC;
1006 		cpi->maxio = sc->maxio;
1007 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1008 		break;
1009 	}
1010 	case XPT_GET_TRAN_SETTINGS:
1011 	{
1012 		struct ccb_trans_settings	*cts;
1013 		struct ccb_trans_settings_sas	*sas;
1014 		struct ccb_trans_settings_scsi	*scsi;
1015 		struct mprsas_target *targ;
1016 
1017 		cts = &ccb->cts;
1018 		sas = &cts->xport_specific.sas;
1019 		scsi = &cts->proto_specific.scsi;
1020 
1021 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1022 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1023 		    cts->ccb_h.target_id));
1024 		targ = &sassc->targets[cts->ccb_h.target_id];
1025 		if (targ->handle == 0x0) {
1026 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1027 			break;
1028 		}
1029 
1030 		cts->protocol_version = SCSI_REV_SPC2;
1031 		cts->transport = XPORT_SAS;
1032 		cts->transport_version = 0;
1033 
1034 		sas->valid = CTS_SAS_VALID_SPEED;
1035 		switch (targ->linkrate) {
1036 		case 0x08:
1037 			sas->bitrate = 150000;
1038 			break;
1039 		case 0x09:
1040 			sas->bitrate = 300000;
1041 			break;
1042 		case 0x0a:
1043 			sas->bitrate = 600000;
1044 			break;
1045 		case 0x0b:
1046 			sas->bitrate = 1200000;
1047 			break;
1048 		default:
1049 			sas->valid = 0;
1050 		}
1051 
1052 		cts->protocol = PROTO_SCSI;
1053 		scsi->valid = CTS_SCSI_VALID_TQ;
1054 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1055 
1056 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1057 		break;
1058 	}
1059 	case XPT_CALC_GEOMETRY:
1060 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1061 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1062 		break;
1063 	case XPT_RESET_DEV:
1064 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1065 		    "XPT_RESET_DEV\n");
1066 		mprsas_action_resetdev(sassc, ccb);
1067 		return;
1068 	case XPT_RESET_BUS:
1069 	case XPT_ABORT:
1070 	case XPT_TERM_IO:
1071 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1072 		    "for abort or reset\n");
1073 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1074 		break;
1075 	case XPT_SCSI_IO:
1076 		mprsas_action_scsiio(sassc, ccb);
1077 		return;
1078 	case XPT_SMP_IO:
1079 		mprsas_action_smpio(sassc, ccb);
1080 		return;
1081 	default:
1082 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1083 		break;
1084 	}
1085 	xpt_done(ccb);
1086 
1087 }
1088 
1089 static void
1090 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1091     target_id_t target_id, lun_id_t lun_id)
1092 {
1093 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1094 	struct cam_path *path;
1095 
1096 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1097 	    ac_code, target_id, (uintmax_t)lun_id);
1098 
1099 	if (xpt_create_path(&path, NULL,
1100 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1101 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1102 		    "notification\n");
1103 		return;
1104 	}
1105 
1106 	xpt_async(ac_code, path, NULL);
1107 	xpt_free_path(path);
1108 }
1109 
1110 static void
1111 mprsas_complete_all_commands(struct mpr_softc *sc)
1112 {
1113 	struct mpr_command *cm;
1114 	int i;
1115 	int completed;
1116 
1117 	MPR_FUNCTRACE(sc);
1118 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1119 
1120 	/* complete all commands with a NULL reply */
1121 	for (i = 1; i < sc->num_reqs; i++) {
1122 		cm = &sc->commands[i];
1123 		if (cm->cm_state == MPR_CM_STATE_FREE)
1124 			continue;
1125 
1126 		cm->cm_state = MPR_CM_STATE_BUSY;
1127 		cm->cm_reply = NULL;
1128 		completed = 0;
1129 
1130 		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1131 			MPASS(cm->cm_data);
1132 			free(cm->cm_data, M_MPR);
1133 			cm->cm_data = NULL;
1134 		}
1135 
1136 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1137 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1138 
1139 		if (cm->cm_complete != NULL) {
1140 			mprsas_log_command(cm, MPR_RECOVERY,
1141 			    "completing cm %p state %x ccb %p for diag reset\n",
1142 			    cm, cm->cm_state, cm->cm_ccb);
1143 			cm->cm_complete(sc, cm);
1144 			completed = 1;
1145 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1146 			mprsas_log_command(cm, MPR_RECOVERY,
1147 			    "waking up cm %p state %x ccb %p for diag reset\n",
1148 			    cm, cm->cm_state, cm->cm_ccb);
1149 			wakeup(cm);
1150 			completed = 1;
1151 		}
1152 
1153 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1154 			/* this should never happen, but if it does, log */
1155 			mprsas_log_command(cm, MPR_RECOVERY,
1156 			    "cm %p state %x flags 0x%x ccb %p during diag "
1157 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1158 			    cm->cm_ccb);
1159 		}
1160 	}
1161 
1162 	sc->io_cmds_active = 0;
1163 }
1164 
1165 void
1166 mprsas_handle_reinit(struct mpr_softc *sc)
1167 {
1168 	int i;
1169 
1170 	/* Go back into startup mode and freeze the simq, so that CAM
1171 	 * doesn't send any commands until after we've rediscovered all
1172 	 * targets and found the proper device handles for them.
1173 	 *
1174 	 * After the reset, portenable will trigger discovery, and after all
1175 	 * discovery-related activities have finished, the simq will be
1176 	 * released.
1177 	 */
1178 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1179 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1180 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1181 	mprsas_startup_increment(sc->sassc);
1182 
1183 	/* notify CAM of a bus reset */
1184 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1185 	    CAM_LUN_WILDCARD);
1186 
1187 	/* complete and cleanup after all outstanding commands */
1188 	mprsas_complete_all_commands(sc);
1189 
1190 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1191 	    __func__, sc->sassc->startup_refcount);
1192 
1193 	/* zero all the target handles, since they may change after the
1194 	 * reset, and we have to rediscover all the targets and use the new
1195 	 * handles.
1196 	 */
1197 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1198 		if (sc->sassc->targets[i].outstanding != 0)
1199 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1200 			    i, sc->sassc->targets[i].outstanding);
1201 		sc->sassc->targets[i].handle = 0x0;
1202 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1203 		sc->sassc->targets[i].outstanding = 0;
1204 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1205 	}
1206 }
1207 static void
1208 mprsas_tm_timeout(void *data)
1209 {
1210 	struct mpr_command *tm = data;
1211 	struct mpr_softc *sc = tm->cm_sc;
1212 
1213 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1214 
1215 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1216 	    "out\n", tm);
1217 
1218 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1219 	    ("command not inqueue, state = %u\n", tm->cm_state));
1220 
1221 	tm->cm_state = MPR_CM_STATE_BUSY;
1222 	mpr_reinit(sc);
1223 }
1224 
1225 static void
1226 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1227 {
1228 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1229 	unsigned int cm_count = 0;
1230 	struct mpr_command *cm;
1231 	struct mprsas_target *targ;
1232 
1233 	callout_stop(&tm->cm_callout);
1234 
1235 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1236 	targ = tm->cm_targ;
1237 
1238 	/*
1239 	 * Currently there should be no way we can hit this case.  It only
1240 	 * happens when we have a failure to allocate chain frames, and
1241 	 * task management commands don't have S/G lists.
1242 	 */
1243 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1244 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1245 		    "%s: cm_flags = %#x for LUN reset! "
1246 		    "This should not happen!\n", __func__, tm->cm_flags);
1247 		mprsas_free_tm(sc, tm);
1248 		return;
1249 	}
1250 
1251 	if (reply == NULL) {
1252 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1253 		    tm);
1254 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1255 			/* this completion was due to a reset, just cleanup */
1256 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1257 			    "reset, ignoring NULL LUN reset reply\n");
1258 			targ->tm = NULL;
1259 			mprsas_free_tm(sc, tm);
1260 		}
1261 		else {
1262 			/* we should have gotten a reply. */
1263 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1264 			    "LUN reset attempt, resetting controller\n");
1265 			mpr_reinit(sc);
1266 		}
1267 		return;
1268 	}
1269 
1270 	mpr_dprint(sc, MPR_RECOVERY,
1271 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1272 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1273 	    le32toh(reply->TerminationCount));
1274 
1275 	/*
1276 	 * See if there are any outstanding commands for this LUN.
1277 	 * This could be made more efficient by using a per-LU data
1278 	 * structure of some sort.
1279 	 */
1280 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1281 		if (cm->cm_lun == tm->cm_lun)
1282 			cm_count++;
1283 	}
1284 
1285 	if (cm_count == 0) {
1286 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1287 		    "Finished recovery after LUN reset for target %u\n",
1288 		    targ->tid);
1289 
1290 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1291 		    tm->cm_lun);
1292 
1293 		/*
1294 		 * We've finished recovery for this logical unit.  check and
1295 		 * see if some other logical unit has a timedout command
1296 		 * that needs to be processed.
1297 		 */
1298 		cm = TAILQ_FIRST(&targ->timedout_commands);
1299 		if (cm) {
1300 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1301 			   "More commands to abort for target %u\n", targ->tid);
1302 			mprsas_send_abort(sc, tm, cm);
1303 		} else {
1304 			targ->tm = NULL;
1305 			mprsas_free_tm(sc, tm);
1306 		}
1307 	} else {
1308 		/* if we still have commands for this LUN, the reset
1309 		 * effectively failed, regardless of the status reported.
1310 		 * Escalate to a target reset.
1311 		 */
1312 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1313 		    "logical unit reset complete for target %u, but still "
1314 		    "have %u command(s), sending target reset\n", targ->tid,
1315 		    cm_count);
1316 		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1317 			mprsas_send_reset(sc, tm,
1318 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1319 		else
1320 			mpr_reinit(sc);
1321 	}
1322 }
1323 
1324 static void
1325 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1326 {
1327 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1328 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1329 	struct mprsas_target *targ;
1330 
1331 	callout_stop(&tm->cm_callout);
1332 
1333 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1334 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1335 	targ = tm->cm_targ;
1336 
1337 	/*
1338 	 * Currently there should be no way we can hit this case.  It only
1339 	 * happens when we have a failure to allocate chain frames, and
1340 	 * task management commands don't have S/G lists.
1341 	 */
1342 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1343 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1344 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1345 		mprsas_free_tm(sc, tm);
1346 		return;
1347 	}
1348 
1349 	if (reply == NULL) {
1350 		mpr_dprint(sc, MPR_RECOVERY,
1351 		    "NULL target reset reply for tm %p TaskMID %u\n",
1352 		    tm, le16toh(req->TaskMID));
1353 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1354 			/* this completion was due to a reset, just cleanup */
1355 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1356 			    "reset, ignoring NULL target reset reply\n");
1357 			targ->tm = NULL;
1358 			mprsas_free_tm(sc, tm);
1359 		}
1360 		else {
1361 			/* we should have gotten a reply. */
1362 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1363 			    "target reset attempt, resetting controller\n");
1364 			mpr_reinit(sc);
1365 		}
1366 		return;
1367 	}
1368 
1369 	mpr_dprint(sc, MPR_RECOVERY,
1370 	    "target reset status 0x%x code 0x%x count %u\n",
1371 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1372 	    le32toh(reply->TerminationCount));
1373 
1374 	if (targ->outstanding == 0) {
1375 		/*
1376 		 * We've finished recovery for this target and all
1377 		 * of its logical units.
1378 		 */
1379 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1380 		    "Finished reset recovery for target %u\n", targ->tid);
1381 
1382 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1383 		    CAM_LUN_WILDCARD);
1384 
1385 		targ->tm = NULL;
1386 		mprsas_free_tm(sc, tm);
1387 	} else {
1388 		/*
1389 		 * After a target reset, if this target still has
1390 		 * outstanding commands, the reset effectively failed,
1391 		 * regardless of the status reported.  escalate.
1392 		 */
1393 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1394 		    "Target reset complete for target %u, but still have %u "
1395 		    "command(s), resetting controller\n", targ->tid,
1396 		    targ->outstanding);
1397 		mpr_reinit(sc);
1398 	}
1399 }
1400 
1401 #define MPR_RESET_TIMEOUT 30
1402 
1403 int
1404 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1405 {
1406 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1407 	struct mprsas_target *target;
1408 	int err, timeout;
1409 
1410 	target = tm->cm_targ;
1411 	if (target->handle == 0) {
1412 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1413 		    "%d\n", __func__, target->tid);
1414 		return -1;
1415 	}
1416 
1417 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1418 	req->DevHandle = htole16(target->handle);
1419 	req->TaskType = type;
1420 
1421 	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1422 		timeout = MPR_RESET_TIMEOUT;
1423 		/*
1424 		 * Target reset method =
1425 		 *     SAS Hard Link Reset / SATA Link Reset
1426 		 */
1427 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1428 	} else {
1429 		timeout = (target->controller_reset_timeout) ? (
1430 		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1431 		/* PCIe Protocol Level Reset*/
1432 		req->MsgFlags =
1433 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1434 	}
1435 
1436 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1437 		/* XXX Need to handle invalid LUNs */
1438 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1439 		tm->cm_targ->logical_unit_resets++;
1440 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1441 		    "Sending logical unit reset to target %u lun %d\n",
1442 		    target->tid, tm->cm_lun);
1443 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1444 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1445 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1446 		tm->cm_targ->target_resets++;
1447 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1448 		    "Sending target reset to target %u\n", target->tid);
1449 		tm->cm_complete = mprsas_target_reset_complete;
1450 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1451 	}
1452 	else {
1453 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1454 		return -1;
1455 	}
1456 
1457 	if (target->encl_level_valid) {
1458 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1459 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1460 		    target->encl_level, target->encl_slot,
1461 		    target->connector_name);
1462 	}
1463 
1464 	tm->cm_data = NULL;
1465 	tm->cm_complete_data = (void *)tm;
1466 
1467 	callout_reset(&tm->cm_callout, timeout * hz,
1468 	    mprsas_tm_timeout, tm);
1469 
1470 	err = mpr_map_command(sc, tm);
1471 	if (err)
1472 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1473 		    "error %d sending reset type %u\n", err, type);
1474 
1475 	return err;
1476 }
1477 
1478 static void
1479 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1480 {
1481 	struct mpr_command *cm;
1482 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1483 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1484 	struct mprsas_target *targ;
1485 
1486 	callout_stop(&tm->cm_callout);
1487 
1488 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1489 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1490 	targ = tm->cm_targ;
1491 
1492 	/*
1493 	 * Currently there should be no way we can hit this case.  It only
1494 	 * happens when we have a failure to allocate chain frames, and
1495 	 * task management commands don't have S/G lists.
1496 	 */
1497 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1498 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1499 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1500 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1501 		mprsas_free_tm(sc, tm);
1502 		return;
1503 	}
1504 
1505 	if (reply == NULL) {
1506 		mpr_dprint(sc, MPR_RECOVERY,
1507 		    "NULL abort reply for tm %p TaskMID %u\n",
1508 		    tm, le16toh(req->TaskMID));
1509 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1510 			/* this completion was due to a reset, just cleanup */
1511 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1512 			    "reset, ignoring NULL abort reply\n");
1513 			targ->tm = NULL;
1514 			mprsas_free_tm(sc, tm);
1515 		} else {
1516 			/* we should have gotten a reply. */
1517 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1518 			    "abort attempt, resetting controller\n");
1519 			mpr_reinit(sc);
1520 		}
1521 		return;
1522 	}
1523 
1524 	mpr_dprint(sc, MPR_RECOVERY,
1525 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1526 	    le16toh(req->TaskMID),
1527 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1528 	    le32toh(reply->TerminationCount));
1529 
1530 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1531 	if (cm == NULL) {
1532 		/*
1533 		 * if there are no more timedout commands, we're done with
1534 		 * error recovery for this target.
1535 		 */
1536 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1537 		    "Finished abort recovery for target %u\n", targ->tid);
1538 		targ->tm = NULL;
1539 		mprsas_free_tm(sc, tm);
1540 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1541 		/* abort success, but we have more timedout commands to abort */
1542 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1543 		    "Continuing abort recovery for target %u\n", targ->tid);
1544 		mprsas_send_abort(sc, tm, cm);
1545 	} else {
1546 		/*
1547 		 * we didn't get a command completion, so the abort
1548 		 * failed as far as we're concerned.  escalate.
1549 		 */
1550 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1551 		    "Abort failed for target %u, sending logical unit reset\n",
1552 		    targ->tid);
1553 
1554 		mprsas_send_reset(sc, tm,
1555 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1556 	}
1557 }
1558 
1559 #define MPR_ABORT_TIMEOUT 5
1560 
1561 static int
1562 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1563     struct mpr_command *cm)
1564 {
1565 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1566 	struct mprsas_target *targ;
1567 	int err, timeout;
1568 
1569 	targ = cm->cm_targ;
1570 	if (targ->handle == 0) {
1571 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1572 		   "%s null devhandle for target_id %d\n",
1573 		    __func__, cm->cm_ccb->ccb_h.target_id);
1574 		return -1;
1575 	}
1576 
1577 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1578 	    "Aborting command %p\n", cm);
1579 
1580 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1581 	req->DevHandle = htole16(targ->handle);
1582 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1583 
1584 	/* XXX Need to handle invalid LUNs */
1585 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1586 
1587 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1588 
1589 	tm->cm_data = NULL;
1590 	tm->cm_complete = mprsas_abort_complete;
1591 	tm->cm_complete_data = (void *)tm;
1592 	tm->cm_targ = cm->cm_targ;
1593 	tm->cm_lun = cm->cm_lun;
1594 
1595 	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1596 		timeout	= MPR_ABORT_TIMEOUT;
1597 	else
1598 		timeout = sc->nvme_abort_timeout;
1599 
1600 	callout_reset(&tm->cm_callout, timeout * hz,
1601 	    mprsas_tm_timeout, tm);
1602 
1603 	targ->aborts++;
1604 
1605 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1606 
1607 	err = mpr_map_command(sc, tm);
1608 	if (err)
1609 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1610 		    "error %d sending abort for cm %p SMID %u\n",
1611 		    err, cm, req->TaskMID);
1612 	return err;
1613 }
1614 
1615 static void
1616 mprsas_scsiio_timeout(void *data)
1617 {
1618 	sbintime_t elapsed, now;
1619 	union ccb *ccb;
1620 	struct mpr_softc *sc;
1621 	struct mpr_command *cm;
1622 	struct mprsas_target *targ;
1623 
1624 	cm = (struct mpr_command *)data;
1625 	sc = cm->cm_sc;
1626 	ccb = cm->cm_ccb;
1627 	now = sbinuptime();
1628 
1629 	MPR_FUNCTRACE(sc);
1630 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1631 
1632 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1633 
1634 	/*
1635 	 * Run the interrupt handler to make sure it's not pending.  This
1636 	 * isn't perfect because the command could have already completed
1637 	 * and been re-used, though this is unlikely.
1638 	 */
1639 	mpr_intr_locked(sc);
1640 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1641 		mprsas_log_command(cm, MPR_XINFO,
1642 		    "SCSI command %p almost timed out\n", cm);
1643 		return;
1644 	}
1645 
1646 	if (cm->cm_ccb == NULL) {
1647 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1648 		return;
1649 	}
1650 
1651 	targ = cm->cm_targ;
1652 	targ->timeouts++;
1653 
1654 	elapsed = now - ccb->ccb_h.qos.sim_data;
1655 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1656 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1657 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1658 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1659 	if (targ->encl_level_valid) {
1660 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1661 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1662 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1663 	}
1664 
1665 	/* XXX first, check the firmware state, to see if it's still
1666 	 * operational.  if not, do a diag reset.
1667 	 */
1668 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1669 	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1670 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1671 
1672 	if (targ->tm != NULL) {
1673 		/* target already in recovery, just queue up another
1674 		 * timedout command to be processed later.
1675 		 */
1676 		mpr_dprint(sc, MPR_RECOVERY,
1677 		    "queued timedout cm %p for processing by tm %p\n",
1678 		    cm, targ->tm);
1679 	} else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1680 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1681 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1682 		    cm->cm_desc.Default.SMID);
1683 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1684 		    cm, targ->tm);
1685 
1686 		/* start recovery by aborting the first timedout command */
1687 		mprsas_send_abort(sc, targ->tm, cm);
1688 	} else {
1689 		/* XXX queue this target up for recovery once a TM becomes
1690 		 * available.  The firmware only has a limited number of
1691 		 * HighPriority credits for the high priority requests used
1692 		 * for task management, and we ran out.
1693 		 *
1694 		 * Isilon: don't worry about this for now, since we have
1695 		 * more credits than disks in an enclosure, and limit
1696 		 * ourselves to one TM per target for recovery.
1697 		 */
1698 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1699 		    "timedout cm %p failed to allocate a tm\n", cm);
1700 	}
1701 }
1702 
1703 /**
1704  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1705  *			     to SCSI Unmap.
1706  * Return 0 - for success,
1707  *	  1 - to immediately return back the command with success status to CAM
1708  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1709  *			   to FW without any translation.
1710  */
1711 static int
1712 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1713     union ccb *ccb, struct mprsas_target *targ)
1714 {
1715 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1716 	struct ccb_scsiio *csio;
1717 	struct unmap_parm_list *plist;
1718 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1719 	struct nvme_command *c;
1720 	int i, res;
1721 	uint16_t ndesc, list_len, data_length;
1722 	struct mpr_prp_page *prp_page_info;
1723 	uint64_t nvme_dsm_ranges_dma_handle;
1724 
1725 	csio = &ccb->csio;
1726 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1727 	if (!list_len) {
1728 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1729 		return -EINVAL;
1730 	}
1731 
1732 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1733 	if (!plist) {
1734 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1735 		    "save UNMAP data\n");
1736 		return -ENOMEM;
1737 	}
1738 
1739 	/* Copy SCSI unmap data to a local buffer */
1740 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1741 
1742 	/* return back the unmap command to CAM with success status,
1743 	 * if number of descripts is zero.
1744 	 */
1745 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1746 	if (!ndesc) {
1747 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1748 		    "UNMAP cmd is Zero\n");
1749 		res = 1;
1750 		goto out;
1751 	}
1752 
1753 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1754 	if (data_length > targ->MDTS) {
1755 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1756 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1757 		res = -EINVAL;
1758 		goto out;
1759 	}
1760 
1761 	prp_page_info = mpr_alloc_prp_page(sc);
1762 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1763 	    "UNMAP command.\n", __func__));
1764 
1765 	/*
1766 	 * Insert the allocated PRP page into the command's PRP page list. This
1767 	 * will be freed when the command is freed.
1768 	 */
1769 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1770 
1771 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1772 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1773 
1774 	bzero(nvme_dsm_ranges, data_length);
1775 
1776 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1777 	 * for each descriptors contained in SCSI UNMAP data.
1778 	 */
1779 	for (i = 0; i < ndesc; i++) {
1780 		nvme_dsm_ranges[i].length =
1781 		    htole32(be32toh(plist->desc[i].nlb));
1782 		nvme_dsm_ranges[i].starting_lba =
1783 		    htole64(be64toh(plist->desc[i].slba));
1784 		nvme_dsm_ranges[i].attributes = 0;
1785 	}
1786 
1787 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1788 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1789 	bzero(req, sizeof(*req));
1790 	req->DevHandle = htole16(targ->handle);
1791 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1792 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1793 	req->ErrorResponseBaseAddress.High =
1794 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1795 	req->ErrorResponseBaseAddress.Low =
1796 	    htole32(cm->cm_sense_busaddr);
1797 	req->ErrorResponseAllocationLength =
1798 	    htole16(sizeof(struct nvme_completion));
1799 	req->EncapsulatedCommandLength =
1800 	    htole16(sizeof(struct nvme_command));
1801 	req->DataLength = htole32(data_length);
1802 
1803 	/* Build NVMe DSM command */
1804 	c = (struct nvme_command *) req->NVMe_Command;
1805 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1806 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1807 	c->cdw10 = htole32(ndesc - 1);
1808 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1809 
1810 	cm->cm_length = data_length;
1811 	cm->cm_data = NULL;
1812 
1813 	cm->cm_complete = mprsas_scsiio_complete;
1814 	cm->cm_complete_data = ccb;
1815 	cm->cm_targ = targ;
1816 	cm->cm_lun = csio->ccb_h.target_lun;
1817 	cm->cm_ccb = ccb;
1818 
1819 	cm->cm_desc.Default.RequestFlags =
1820 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1821 
1822 	csio->ccb_h.qos.sim_data = sbinuptime();
1823 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1824 	    mprsas_scsiio_timeout, cm, 0);
1825 
1826 	targ->issued++;
1827 	targ->outstanding++;
1828 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1829 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1830 
1831 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1832 	    __func__, cm, ccb, targ->outstanding);
1833 
1834 	mpr_build_nvme_prp(sc, cm, req,
1835 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1836 	mpr_map_command(sc, cm);
1837 	res = 0;
1838 
1839 out:
1840 	free(plist, M_MPR);
1841 	return (res);
1842 }
1843 
1844 static void
1845 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1846 {
1847 	MPI2_SCSI_IO_REQUEST *req;
1848 	struct ccb_scsiio *csio;
1849 	struct mpr_softc *sc;
1850 	struct mprsas_target *targ;
1851 	struct mprsas_lun *lun;
1852 	struct mpr_command *cm;
1853 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1854 	uint16_t eedp_flags;
1855 	uint32_t mpi_control;
1856 	int rc;
1857 
1858 	sc = sassc->sc;
1859 	MPR_FUNCTRACE(sc);
1860 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1861 
1862 	csio = &ccb->csio;
1863 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1864 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1865 	     csio->ccb_h.target_id));
1866 	targ = &sassc->targets[csio->ccb_h.target_id];
1867 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1868 	if (targ->handle == 0x0) {
1869 		if (targ->flags & MPRSAS_TARGET_INDIAGRESET) {
1870 			mpr_dprint(sc, MPR_ERROR,
1871 			    "%s NULL handle for target %u in diag reset freezing queue\n",
1872 			    __func__, csio->ccb_h.target_id);
1873 			ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1874 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1875 			xpt_done(ccb);
1876 			return;
1877 		}
1878 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1879 		    __func__, csio->ccb_h.target_id);
1880 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1881 		xpt_done(ccb);
1882 		return;
1883 	}
1884 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1885 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1886 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1887 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1888 		xpt_done(ccb);
1889 		return;
1890 	}
1891 	/*
1892 	 * Sometimes, it is possible to get a command that is not "In
1893 	 * Progress" and was actually aborted by the upper layer.  Check for
1894 	 * this here and complete the command without error.
1895 	 */
1896 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1897 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1898 		    "target %u\n", __func__, csio->ccb_h.target_id);
1899 		xpt_done(ccb);
1900 		return;
1901 	}
1902 	/*
1903 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1904 	 * that the volume has timed out.  We want volumes to be enumerated
1905 	 * until they are deleted/removed, not just failed. In either event,
1906 	 * we're removing the target due to a firmware event telling us
1907 	 * the device is now gone (as opposed to some transient event). Since
1908 	 * we're opting to remove failed devices from the OS's view, we need
1909 	 * to propagate that status up the stack.
1910 	 */
1911 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1912 		if (targ->devinfo == 0)
1913 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1914 		else
1915 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1916 		xpt_done(ccb);
1917 		return;
1918 	}
1919 
1920 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1921 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1922 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1923 		xpt_done(ccb);
1924 		return;
1925 	}
1926 
1927 	/*
1928 	 * If target has a reset in progress, the devq should be frozen.
1929 	 * Geting here we likely hit a race, so just requeue.
1930 	 */
1931 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1932 		ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1933 		mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
1934 		    "%s: Freezing devq for target ID %d\n",
1935 		    __func__, targ->tid);
1936 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1937 		xpt_done(ccb);
1938 		return;
1939 	}
1940 
1941 	cm = mpr_alloc_command(sc);
1942 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1943 		if (cm != NULL) {
1944 			mpr_free_command(sc, cm);
1945 		}
1946 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1947 			xpt_freeze_simq(sassc->sim, 1);
1948 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1949 		}
1950 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1951 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1952 		xpt_done(ccb);
1953 		return;
1954 	}
1955 
1956 	/* For NVME device's issue UNMAP command directly to NVME drives by
1957 	 * constructing equivalent native NVMe DataSetManagement command.
1958 	 */
1959 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1960 	if (scsi_opcode == UNMAP &&
1961 	    targ->is_nvme &&
1962 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1963 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1964 		if (rc == 1) { /* return command to CAM with success status */
1965 			mpr_free_command(sc, cm);
1966 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1967 			xpt_done(ccb);
1968 			return;
1969 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1970 			return;
1971 	}
1972 
1973 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1974 	bzero(req, sizeof(*req));
1975 	req->DevHandle = htole16(targ->handle);
1976 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1977 	req->MsgFlags = 0;
1978 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1979 	req->SenseBufferLength = MPR_SENSE_LEN;
1980 	req->SGLFlags = 0;
1981 	req->ChainOffset = 0;
1982 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1983 	req->SGLOffset1= 0;
1984 	req->SGLOffset2= 0;
1985 	req->SGLOffset3= 0;
1986 	req->SkipCount = 0;
1987 	req->DataLength = htole32(csio->dxfer_len);
1988 	req->BidirectionalDataLength = 0;
1989 	req->IoFlags = htole16(csio->cdb_len);
1990 	req->EEDPFlags = 0;
1991 
1992 	/* Note: BiDirectional transfers are not supported */
1993 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1994 	case CAM_DIR_IN:
1995 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1996 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1997 		break;
1998 	case CAM_DIR_OUT:
1999 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2000 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2001 		break;
2002 	case CAM_DIR_NONE:
2003 	default:
2004 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2005 		break;
2006 	}
2007 
2008 	if (csio->cdb_len == 32)
2009 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2010 	/*
2011 	 * It looks like the hardware doesn't require an explicit tag
2012 	 * number for each transaction.  SAM Task Management not supported
2013 	 * at the moment.
2014 	 */
2015 	switch (csio->tag_action) {
2016 	case MSG_HEAD_OF_Q_TAG:
2017 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2018 		break;
2019 	case MSG_ORDERED_Q_TAG:
2020 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2021 		break;
2022 	case MSG_ACA_TASK:
2023 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2024 		break;
2025 	case CAM_TAG_ACTION_NONE:
2026 	case MSG_SIMPLE_Q_TAG:
2027 	default:
2028 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2029 		break;
2030 	}
2031 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT) &
2032 	    MPI2_SCSIIO_CONTROL_CMDPRI_MASK;
2033 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2034 	req->Control = htole32(mpi_control);
2035 
2036 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2037 		mpr_free_command(sc, cm);
2038 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2039 		xpt_done(ccb);
2040 		return;
2041 	}
2042 
2043 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2044 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2045 	else {
2046 		KASSERT(csio->cdb_len <= IOCDBLEN,
2047 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2048 		    "is not set", csio->cdb_len));
2049 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2050 	}
2051 	req->IoFlags = htole16(csio->cdb_len);
2052 
2053 	/*
2054 	 * Check if EEDP is supported and enabled.  If it is then check if the
2055 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2056 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2057 	 * for EEDP transfer.
2058 	 */
2059 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2060 	if (sc->eedp_enabled && eedp_flags) {
2061 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2062 			if (lun->lun_id == csio->ccb_h.target_lun) {
2063 				break;
2064 			}
2065 		}
2066 
2067 		if ((lun != NULL) && (lun->eedp_formatted)) {
2068 			req->EEDPBlockSize = htole32(lun->eedp_block_size);
2069 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2070 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2071 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2072 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2073 				eedp_flags |=
2074 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2075 			}
2076 			req->EEDPFlags = htole16(eedp_flags);
2077 
2078 			/*
2079 			 * If CDB less than 32, fill in Primary Ref Tag with
2080 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2081 			 * already there.  Also, set protection bit.  FreeBSD
2082 			 * currently does not support CDBs bigger than 16, but
2083 			 * the code doesn't hurt, and will be here for the
2084 			 * future.
2085 			 */
2086 			if (csio->cdb_len != 32) {
2087 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2088 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2089 				    PrimaryReferenceTag;
2090 				for (i = 0; i < 4; i++) {
2091 					*ref_tag_addr =
2092 					    req->CDB.CDB32[lba_byte + i];
2093 					ref_tag_addr++;
2094 				}
2095 				req->CDB.EEDP32.PrimaryReferenceTag =
2096 				    htole32(req->
2097 				    CDB.EEDP32.PrimaryReferenceTag);
2098 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2099 				    0xFFFF;
2100 				req->CDB.CDB32[1] =
2101 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2102 			} else {
2103 				eedp_flags |=
2104 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2105 				req->EEDPFlags = htole16(eedp_flags);
2106 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2107 				    0x1F) | 0x20;
2108 			}
2109 		}
2110 	}
2111 
2112 	cm->cm_length = csio->dxfer_len;
2113 	if (cm->cm_length != 0) {
2114 		cm->cm_data = ccb;
2115 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2116 	} else {
2117 		cm->cm_data = NULL;
2118 	}
2119 	cm->cm_sge = &req->SGL;
2120 	cm->cm_sglsize = (32 - 24) * 4;
2121 	cm->cm_complete = mprsas_scsiio_complete;
2122 	cm->cm_complete_data = ccb;
2123 	cm->cm_targ = targ;
2124 	cm->cm_lun = csio->ccb_h.target_lun;
2125 	cm->cm_ccb = ccb;
2126 	/*
2127 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2128 	 * and set descriptor type.
2129 	 */
2130 	if (targ->scsi_req_desc_type ==
2131 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2132 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2133 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2134 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2135 		if (!sc->atomic_desc_capable) {
2136 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2137 			    htole16(targ->handle);
2138 		}
2139 	} else {
2140 		cm->cm_desc.SCSIIO.RequestFlags =
2141 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2142 		if (!sc->atomic_desc_capable)
2143 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2144 	}
2145 
2146 	csio->ccb_h.qos.sim_data = sbinuptime();
2147 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2148 	    mprsas_scsiio_timeout, cm, 0);
2149 
2150 	targ->issued++;
2151 	targ->outstanding++;
2152 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2153 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2154 
2155 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2156 	    __func__, cm, ccb, targ->outstanding);
2157 
2158 	mpr_map_command(sc, cm);
2159 	return;
2160 }
2161 
2162 /**
2163  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2164  */
2165 static void
2166 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2167     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2168 {
2169 	u32 response_info;
2170 	u8 *response_bytes;
2171 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2172 	    MPI2_IOCSTATUS_MASK;
2173 	u8 scsi_state = mpi_reply->SCSIState;
2174 	u8 scsi_status = mpi_reply->SCSIStatus;
2175 	char *desc_ioc_state = NULL;
2176 	char *desc_scsi_status = NULL;
2177 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2178 
2179 	if (log_info == 0x31170000)
2180 		return;
2181 
2182 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2183 	     ioc_status);
2184 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2185 	    scsi_status);
2186 
2187 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2188 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2189 	if (targ->encl_level_valid) {
2190 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2191 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2192 		    targ->connector_name);
2193 	}
2194 
2195 	/*
2196 	 * We can add more detail about underflow data here
2197 	 * TO-DO
2198 	 */
2199 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2200 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2201 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2202 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2203 
2204 	if (sc->mpr_debug & MPR_XINFO &&
2205 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2206 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2207 		scsi_sense_print(csio);
2208 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2209 	}
2210 
2211 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2212 		response_info = le32toh(mpi_reply->ResponseInfo);
2213 		response_bytes = (u8 *)&response_info;
2214 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2215 		    response_bytes[0],
2216 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2217 		    response_bytes[0]));
2218 	}
2219 }
2220 
2221 /** mprsas_nvme_trans_status_code
2222  *
2223  * Convert Native NVMe command error status to
2224  * equivalent SCSI error status.
2225  *
2226  * Returns appropriate scsi_status
2227  */
2228 static u8
2229 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2230     struct mpr_command *cm)
2231 {
2232 	u8 status = MPI2_SCSI_STATUS_GOOD;
2233 	int skey, asc, ascq;
2234 	union ccb *ccb = cm->cm_complete_data;
2235 	int returned_sense_len;
2236 	uint8_t sct, sc;
2237 
2238 	sct = NVME_STATUS_GET_SCT(nvme_status);
2239 	sc = NVME_STATUS_GET_SC(nvme_status);
2240 
2241 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2242 	skey = SSD_KEY_ILLEGAL_REQUEST;
2243 	asc = SCSI_ASC_NO_SENSE;
2244 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2245 
2246 	switch (sct) {
2247 	case NVME_SCT_GENERIC:
2248 		switch (sc) {
2249 		case NVME_SC_SUCCESS:
2250 			status = MPI2_SCSI_STATUS_GOOD;
2251 			skey = SSD_KEY_NO_SENSE;
2252 			asc = SCSI_ASC_NO_SENSE;
2253 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2254 			break;
2255 		case NVME_SC_INVALID_OPCODE:
2256 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2257 			skey = SSD_KEY_ILLEGAL_REQUEST;
2258 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2259 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2260 			break;
2261 		case NVME_SC_INVALID_FIELD:
2262 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2263 			skey = SSD_KEY_ILLEGAL_REQUEST;
2264 			asc = SCSI_ASC_INVALID_CDB;
2265 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2266 			break;
2267 		case NVME_SC_DATA_TRANSFER_ERROR:
2268 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2269 			skey = SSD_KEY_MEDIUM_ERROR;
2270 			asc = SCSI_ASC_NO_SENSE;
2271 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2272 			break;
2273 		case NVME_SC_ABORTED_POWER_LOSS:
2274 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2275 			skey = SSD_KEY_ABORTED_COMMAND;
2276 			asc = SCSI_ASC_WARNING;
2277 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2278 			break;
2279 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2280 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2281 			skey = SSD_KEY_HARDWARE_ERROR;
2282 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2283 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2284 			break;
2285 		case NVME_SC_ABORTED_BY_REQUEST:
2286 		case NVME_SC_ABORTED_SQ_DELETION:
2287 		case NVME_SC_ABORTED_FAILED_FUSED:
2288 		case NVME_SC_ABORTED_MISSING_FUSED:
2289 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2290 			skey = SSD_KEY_ABORTED_COMMAND;
2291 			asc = SCSI_ASC_NO_SENSE;
2292 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2293 			break;
2294 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2295 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2296 			skey = SSD_KEY_ILLEGAL_REQUEST;
2297 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2298 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2299 			break;
2300 		case NVME_SC_LBA_OUT_OF_RANGE:
2301 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2302 			skey = SSD_KEY_ILLEGAL_REQUEST;
2303 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2304 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2305 			break;
2306 		case NVME_SC_CAPACITY_EXCEEDED:
2307 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2308 			skey = SSD_KEY_MEDIUM_ERROR;
2309 			asc = SCSI_ASC_NO_SENSE;
2310 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2311 			break;
2312 		case NVME_SC_NAMESPACE_NOT_READY:
2313 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2314 			skey = SSD_KEY_NOT_READY;
2315 			asc = SCSI_ASC_LUN_NOT_READY;
2316 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2317 			break;
2318 		}
2319 		break;
2320 	case NVME_SCT_COMMAND_SPECIFIC:
2321 		switch (sc) {
2322 		case NVME_SC_INVALID_FORMAT:
2323 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2324 			skey = SSD_KEY_ILLEGAL_REQUEST;
2325 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2326 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2327 			break;
2328 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2329 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2330 			skey = SSD_KEY_ILLEGAL_REQUEST;
2331 			asc = SCSI_ASC_INVALID_CDB;
2332 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2333 			break;
2334 		}
2335 		break;
2336 	case NVME_SCT_MEDIA_ERROR:
2337 		switch (sc) {
2338 		case NVME_SC_WRITE_FAULTS:
2339 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2340 			skey = SSD_KEY_MEDIUM_ERROR;
2341 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2342 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2343 			break;
2344 		case NVME_SC_UNRECOVERED_READ_ERROR:
2345 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2346 			skey = SSD_KEY_MEDIUM_ERROR;
2347 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2348 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2349 			break;
2350 		case NVME_SC_GUARD_CHECK_ERROR:
2351 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2352 			skey = SSD_KEY_MEDIUM_ERROR;
2353 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2354 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2355 			break;
2356 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2357 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2358 			skey = SSD_KEY_MEDIUM_ERROR;
2359 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2360 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2361 			break;
2362 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2363 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2364 			skey = SSD_KEY_MEDIUM_ERROR;
2365 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2366 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2367 			break;
2368 		case NVME_SC_COMPARE_FAILURE:
2369 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2370 			skey = SSD_KEY_MISCOMPARE;
2371 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2372 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2373 			break;
2374 		case NVME_SC_ACCESS_DENIED:
2375 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2376 			skey = SSD_KEY_ILLEGAL_REQUEST;
2377 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2378 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2379 			break;
2380 		}
2381 		break;
2382 	}
2383 
2384 	returned_sense_len = sizeof(struct scsi_sense_data);
2385 	if (returned_sense_len < ccb->csio.sense_len)
2386 		ccb->csio.sense_resid = ccb->csio.sense_len -
2387 		    returned_sense_len;
2388 	else
2389 		ccb->csio.sense_resid = 0;
2390 
2391 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2392 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2393 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2394 
2395 	return status;
2396 }
2397 
2398 /** mprsas_complete_nvme_unmap
2399  *
2400  * Complete native NVMe command issued using NVMe Encapsulated
2401  * Request Message.
2402  */
2403 static u8
2404 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2405 {
2406 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2407 	struct nvme_completion *nvme_completion = NULL;
2408 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2409 
2410 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2411 	if (le16toh(mpi_reply->ErrorResponseCount)){
2412 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2413 		scsi_status = mprsas_nvme_trans_status_code(
2414 		    nvme_completion->status, cm);
2415 	}
2416 	return scsi_status;
2417 }
2418 
2419 static void
2420 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2421 {
2422 	MPI2_SCSI_IO_REPLY *rep;
2423 	union ccb *ccb;
2424 	struct ccb_scsiio *csio;
2425 	struct mprsas_softc *sassc;
2426 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2427 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2428 	int dir = 0, i;
2429 	u16 alloc_len;
2430 	struct mprsas_target *target;
2431 	target_id_t target_id;
2432 
2433 	MPR_FUNCTRACE(sc);
2434 
2435 	callout_stop(&cm->cm_callout);
2436 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2437 
2438 	sassc = sc->sassc;
2439 	ccb = cm->cm_complete_data;
2440 	csio = &ccb->csio;
2441 	target_id = csio->ccb_h.target_id;
2442 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2443 	mpr_dprint(sc, MPR_TRACE,
2444 	    "cm %p SMID %u ccb %p reply %p outstanding %u csio->scsi_status 0x%x,"
2445 	    "csio->dxfer_len 0x%x, csio->msg_le 0x%xn\n", cm,
2446 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2447 	    cm->cm_targ->outstanding, csio->scsi_status,
2448 	    csio->dxfer_len, csio->msg_len);
2449 	/*
2450 	 * XXX KDM if the chain allocation fails, does it matter if we do
2451 	 * the sync and unload here?  It is simpler to do it in every case,
2452 	 * assuming it doesn't cause problems.
2453 	 */
2454 	if (cm->cm_data != NULL) {
2455 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2456 			dir = BUS_DMASYNC_POSTREAD;
2457 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2458 			dir = BUS_DMASYNC_POSTWRITE;
2459 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2460 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2461 	}
2462 
2463 	cm->cm_targ->completed++;
2464 	cm->cm_targ->outstanding--;
2465 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2466 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2467 
2468 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2469 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2470 		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2471 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2472 		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2473 		if (cm->cm_reply != NULL)
2474 			mprsas_log_command(cm, MPR_RECOVERY,
2475 			    "completed timedout cm %p ccb %p during recovery "
2476 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2477 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2478 			    rep->SCSIState, le32toh(rep->TransferCount));
2479 		else
2480 			mprsas_log_command(cm, MPR_RECOVERY,
2481 			    "completed timedout cm %p ccb %p during recovery\n",
2482 			    cm, cm->cm_ccb);
2483 	} else if (cm->cm_targ->tm != NULL) {
2484 		if (cm->cm_reply != NULL)
2485 			mprsas_log_command(cm, MPR_RECOVERY,
2486 			    "completed cm %p ccb %p during recovery "
2487 			    "ioc %x scsi %x state %x xfer %u\n",
2488 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2489 			    rep->SCSIStatus, rep->SCSIState,
2490 			    le32toh(rep->TransferCount));
2491 		else
2492 			mprsas_log_command(cm, MPR_RECOVERY,
2493 			    "completed cm %p ccb %p during recovery\n",
2494 			    cm, cm->cm_ccb);
2495 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2496 		mprsas_log_command(cm, MPR_RECOVERY,
2497 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2498 	}
2499 
2500 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2501 		/*
2502 		 * We ran into an error after we tried to map the command,
2503 		 * so we're getting a callback without queueing the command
2504 		 * to the hardware.  So we set the status here, and it will
2505 		 * be retained below.  We'll go through the "fast path",
2506 		 * because there can be no reply when we haven't actually
2507 		 * gone out to the hardware.
2508 		 */
2509 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2510 
2511 		/*
2512 		 * Currently the only error included in the mask is
2513 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2514 		 * chain frames.  We need to freeze the queue until we get
2515 		 * a command that completed without this error, which will
2516 		 * hopefully have some chain frames attached that we can
2517 		 * use.  If we wanted to get smarter about it, we would
2518 		 * only unfreeze the queue in this condition when we're
2519 		 * sure that we're getting some chain frames back.  That's
2520 		 * probably unnecessary.
2521 		 */
2522 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2523 			xpt_freeze_simq(sassc->sim, 1);
2524 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2525 			mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
2526 			    "Error sending command, freezing SIM queue\n");
2527 		}
2528 	}
2529 
2530 	/*
2531 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2532 	 * flag, and use it in a few places in the rest of this function for
2533 	 * convenience. Use the macro if available.
2534 	 */
2535 	scsi_cdb = scsiio_cdb_ptr(csio);
2536 
2537 	/*
2538 	 * If this is a Start Stop Unit command and it was issued by the driver
2539 	 * during shutdown, decrement the refcount to account for all of the
2540 	 * commands that were sent.  All SSU commands should be completed before
2541 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2542 	 * is TRUE.
2543 	 */
2544 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2545 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2546 		sc->SSU_refcount--;
2547 	}
2548 
2549 	/* Take the fast path to completion */
2550 	if (cm->cm_reply == NULL) {
2551 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2552 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2553 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2554 			else {
2555 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2556 				csio->scsi_status = SCSI_STATUS_OK;
2557 			}
2558 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2559 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2560 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2561 				mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
2562 				    "Unfreezing SIM queue\n");
2563 			}
2564 		}
2565 
2566 		/*
2567 		 * There are two scenarios where the status won't be
2568 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2569 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2570 		 */
2571 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2572 			/*
2573 			 * Freeze the dev queue so that commands are
2574 			 * executed in the correct order after error
2575 			 * recovery.
2576 			 */
2577 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2578 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2579 		}
2580 		mpr_free_command(sc, cm);
2581 		xpt_done(ccb);
2582 		return;
2583 	}
2584 
2585 	target = &sassc->targets[target_id];
2586 	if (scsi_cdb[0] == UNMAP &&
2587 	    target->is_nvme &&
2588 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2589 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2590 		csio->scsi_status = rep->SCSIStatus;
2591 	}
2592 
2593 	mprsas_log_command(cm, MPR_XINFO,
2594 	    "ioc %x scsi %x state %x xfer %u\n",
2595 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2596 	    le32toh(rep->TransferCount));
2597 
2598 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2599 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2600 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2601 		/* FALLTHROUGH */
2602 	case MPI2_IOCSTATUS_SUCCESS:
2603 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2604 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2605 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2606 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2607 
2608 		/* Completion failed at the transport level. */
2609 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2610 		    MPI2_SCSI_STATE_TERMINATED)) {
2611 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2612 			break;
2613 		}
2614 
2615 		/* In a modern packetized environment, an autosense failure
2616 		 * implies that there's not much else that can be done to
2617 		 * recover the command.
2618 		 */
2619 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2620 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2621 			break;
2622 		}
2623 
2624 		/*
2625 		 * CAM doesn't care about SAS Response Info data, but if this is
2626 		 * the state check if TLR should be done.  If not, clear the
2627 		 * TLR_bits for the target.
2628 		 */
2629 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2630 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2631 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2632 			sc->mapping_table[target_id].TLR_bits =
2633 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2634 		}
2635 
2636 		/*
2637 		 * Intentionally override the normal SCSI status reporting
2638 		 * for these two cases.  These are likely to happen in a
2639 		 * multi-initiator environment, and we want to make sure that
2640 		 * CAM retries these commands rather than fail them.
2641 		 */
2642 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2643 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2644 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2645 			break;
2646 		}
2647 
2648 		/* Handle normal status and sense */
2649 		csio->scsi_status = rep->SCSIStatus;
2650 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2651 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2652 		else
2653 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2654 
2655 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2656 			int sense_len, returned_sense_len;
2657 
2658 			returned_sense_len = min(le32toh(rep->SenseCount),
2659 			    sizeof(struct scsi_sense_data));
2660 			if (returned_sense_len < csio->sense_len)
2661 				csio->sense_resid = csio->sense_len -
2662 				    returned_sense_len;
2663 			else
2664 				csio->sense_resid = 0;
2665 
2666 			sense_len = min(returned_sense_len,
2667 			    csio->sense_len - csio->sense_resid);
2668 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2669 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2670 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2671 		}
2672 
2673 		/*
2674 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2675 		 * and it's page code 0 (Supported Page List), and there is
2676 		 * inquiry data, and this is for a sequential access device, and
2677 		 * the device is an SSP target, and TLR is supported by the
2678 		 * controller, turn the TLR_bits value ON if page 0x90 is
2679 		 * supported.
2680 		 */
2681 		if ((scsi_cdb[0] == INQUIRY) &&
2682 		    (scsi_cdb[1] & SI_EVPD) &&
2683 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2684 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2685 		    (csio->data_ptr != NULL) &&
2686 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2687 		    (sc->control_TLR) &&
2688 		    (sc->mapping_table[target_id].device_info &
2689 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2690 			vpd_list = (struct scsi_vpd_supported_page_list *)
2691 			    csio->data_ptr;
2692 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2693 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2694 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2695 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2696 			alloc_len -= csio->resid;
2697 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2698 				if (vpd_list->list[i] == 0x90) {
2699 					*TLR_bits = TLR_on;
2700 					break;
2701 				}
2702 			}
2703 		}
2704 
2705 		/*
2706 		 * If this is a SATA direct-access end device, mark it so that
2707 		 * a SCSI StartStopUnit command will be sent to it when the
2708 		 * driver is being shutdown.
2709 		 */
2710 		if ((scsi_cdb[0] == INQUIRY) &&
2711 		    (csio->data_ptr != NULL) &&
2712 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2713 		    (sc->mapping_table[target_id].device_info &
2714 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2715 		    ((sc->mapping_table[target_id].device_info &
2716 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2717 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2718 			target = &sassc->targets[target_id];
2719 			target->supports_SSU = TRUE;
2720 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2721 			    target_id);
2722 		}
2723 		break;
2724 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2725 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2726 		/*
2727 		 * If devinfo is 0 this will be a volume.  In that case don't
2728 		 * tell CAM that the volume is not there.  We want volumes to
2729 		 * be enumerated until they are deleted/removed, not just
2730 		 * failed.
2731 		 */
2732 		if (cm->cm_targ->devinfo == 0)
2733 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2734 		else
2735 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2736 		break;
2737 	case MPI2_IOCSTATUS_INVALID_SGL:
2738 		mpr_print_scsiio_cmd(sc, cm);
2739 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2740 		break;
2741 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2742 		/*
2743 		 * This is one of the responses that comes back when an I/O
2744 		 * has been aborted.  If it is because of a timeout that we
2745 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2746 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2747 		 * command is the same (it gets retried, subject to the
2748 		 * retry counter), the only difference is what gets printed
2749 		 * on the console.
2750 		 */
2751 		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2752 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2753 		else
2754 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2755 		break;
2756 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2757 		/* resid is ignored for this condition */
2758 		csio->resid = 0;
2759 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2760 		break;
2761 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2762 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2763 		/*
2764 		 * These can sometimes be transient transport-related
2765 		 * errors, and sometimes persistent drive-related errors.
2766 		 * We used to retry these without decrementing the retry
2767 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2768 		 * we hit a persistent drive problem that returns one of
2769 		 * these error codes, we would retry indefinitely.  So,
2770 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2771 		 * count and avoid infinite retries.  We're taking the
2772 		 * potential risk of flagging false failures in the event
2773 		 * of a topology-related error (e.g. a SAS expander problem
2774 		 * causes a command addressed to a drive to fail), but
2775 		 * avoiding getting into an infinite retry loop. However,
2776 		 * if we get them while were moving a device, we should
2777 		 * fail the request as 'not there' because the device
2778 		 * is effectively gone.
2779 		 */
2780 		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2781 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2782 		else
2783 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2784 		mpr_dprint(sc, MPR_INFO,
2785 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2786 		    mpr_describe_table(mpr_iocstatus_string,
2787 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2788 		    target_id, cm->cm_desc.Default.SMID,
2789 		    le32toh(rep->IOCLogInfo),
2790 		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2791 		mpr_dprint(sc, MPR_XINFO,
2792 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2793 		    rep->SCSIStatus, rep->SCSIState,
2794 		    le32toh(rep->TransferCount));
2795 		break;
2796 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2797 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2798 	case MPI2_IOCSTATUS_INVALID_VPID:
2799 	case MPI2_IOCSTATUS_INVALID_FIELD:
2800 	case MPI2_IOCSTATUS_INVALID_STATE:
2801 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2802 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2803 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2804 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2805 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2806 	default:
2807 		mprsas_log_command(cm, MPR_XINFO,
2808 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2809 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2810 		    rep->SCSIStatus, rep->SCSIState,
2811 		    le32toh(rep->TransferCount));
2812 		csio->resid = cm->cm_length;
2813 
2814 		if (scsi_cdb[0] == UNMAP &&
2815 		    target->is_nvme &&
2816 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2817 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2818 		else
2819 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2820 
2821 		break;
2822 	}
2823 
2824 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2825 
2826 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2827 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2828 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2829 		mpr_dprint(sc, MPR_INFO, "Command completed, unfreezing SIM "
2830 		    "queue\n");
2831 	}
2832 
2833 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2834 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2835 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2836 	}
2837 
2838 	/*
2839 	 * Check to see if we're removing the device. If so, and this is the
2840 	 * last command on the queue, proceed with the deferred removal of the
2841 	 * device.  Note, for removing a volume, this won't trigger because
2842 	 * pending_remove_tm will be NULL.
2843 	 */
2844 	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2845 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2846 		    cm->cm_targ->pending_remove_tm != NULL) {
2847 			mpr_dprint(sc, MPR_INFO,
2848 			    "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
2849 			    cm->cm_targ->tid, cm->cm_targ->handle);
2850 			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2851 			cm->cm_targ->pending_remove_tm = NULL;
2852 		}
2853 	}
2854 
2855 	mpr_free_command(sc, cm);
2856 	xpt_done(ccb);
2857 }
2858 
2859 static void
2860 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2861 {
2862 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2863 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2864 	uint64_t sasaddr;
2865 	union ccb *ccb;
2866 
2867 	ccb = cm->cm_complete_data;
2868 
2869 	/*
2870 	 * Currently there should be no way we can hit this case.  It only
2871 	 * happens when we have a failure to allocate chain frames, and SMP
2872 	 * commands require two S/G elements only.  That should be handled
2873 	 * in the standard request size.
2874 	 */
2875 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2876 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2877 		    "request!\n", __func__, cm->cm_flags);
2878 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2879 		goto bailout;
2880         }
2881 
2882 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2883 	if (rpl == NULL) {
2884 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2885 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2886 		goto bailout;
2887 	}
2888 
2889 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2890 	sasaddr = le32toh(req->SASAddress.Low);
2891 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2892 
2893 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2894 	    MPI2_IOCSTATUS_SUCCESS ||
2895 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2896 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2897 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2898 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2899 		goto bailout;
2900 	}
2901 
2902 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2903 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2904 
2905 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2906 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2907 	else
2908 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2909 
2910 bailout:
2911 	/*
2912 	 * We sync in both directions because we had DMAs in the S/G list
2913 	 * in both directions.
2914 	 */
2915 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2916 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2917 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2918 	mpr_free_command(sc, cm);
2919 	xpt_done(ccb);
2920 }
2921 
2922 static void
2923 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2924 {
2925 	struct mpr_command *cm;
2926 	uint8_t *request, *response;
2927 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2928 	struct mpr_softc *sc;
2929 	int error;
2930 
2931 	sc = sassc->sc;
2932 	error = 0;
2933 
2934 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2935 	case CAM_DATA_PADDR:
2936 	case CAM_DATA_SG_PADDR:
2937 		/*
2938 		 * XXX We don't yet support physical addresses here.
2939 		 */
2940 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2941 		    "supported\n", __func__);
2942 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2943 		xpt_done(ccb);
2944 		return;
2945 	case CAM_DATA_SG:
2946 		/*
2947 		 * The chip does not support more than one buffer for the
2948 		 * request or response.
2949 		 */
2950 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2951 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2952 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2953 			    "response buffer segments not supported for SMP\n",
2954 			    __func__);
2955 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2956 			xpt_done(ccb);
2957 			return;
2958 		}
2959 
2960 		/*
2961 		 * The CAM_SCATTER_VALID flag was originally implemented
2962 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2963 		 * We have two.  So, just take that flag to mean that we
2964 		 * might have S/G lists, and look at the S/G segment count
2965 		 * to figure out whether that is the case for each individual
2966 		 * buffer.
2967 		 */
2968 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2969 			bus_dma_segment_t *req_sg;
2970 
2971 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2972 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2973 		} else
2974 			request = ccb->smpio.smp_request;
2975 
2976 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2977 			bus_dma_segment_t *rsp_sg;
2978 
2979 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2980 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2981 		} else
2982 			response = ccb->smpio.smp_response;
2983 		break;
2984 	case CAM_DATA_VADDR:
2985 		request = ccb->smpio.smp_request;
2986 		response = ccb->smpio.smp_response;
2987 		break;
2988 	default:
2989 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2990 		xpt_done(ccb);
2991 		return;
2992 	}
2993 
2994 	cm = mpr_alloc_command(sc);
2995 	if (cm == NULL) {
2996 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2997 		    __func__);
2998 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2999 		xpt_done(ccb);
3000 		return;
3001 	}
3002 
3003 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3004 	bzero(req, sizeof(*req));
3005 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3006 
3007 	/* Allow the chip to use any route to this SAS address. */
3008 	req->PhysicalPort = 0xff;
3009 
3010 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3011 	req->SGLFlags =
3012 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3013 
3014 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3015 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3016 
3017 	mpr_init_sge(cm, req, &req->SGL);
3018 
3019 	/*
3020 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3021 	 * do one map command, and one busdma call in there.
3022 	 */
3023 	cm->cm_uio.uio_iov = cm->cm_iovec;
3024 	cm->cm_uio.uio_iovcnt = 2;
3025 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3026 
3027 	/*
3028 	 * The read/write flag isn't used by busdma, but set it just in
3029 	 * case.  This isn't exactly accurate, either, since we're going in
3030 	 * both directions.
3031 	 */
3032 	cm->cm_uio.uio_rw = UIO_WRITE;
3033 
3034 	cm->cm_iovec[0].iov_base = request;
3035 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3036 	cm->cm_iovec[1].iov_base = response;
3037 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3038 
3039 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3040 			       cm->cm_iovec[1].iov_len;
3041 
3042 	/*
3043 	 * Trigger a warning message in mpr_data_cb() for the user if we
3044 	 * wind up exceeding two S/G segments.  The chip expects one
3045 	 * segment for the request and another for the response.
3046 	 */
3047 	cm->cm_max_segs = 2;
3048 
3049 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3050 	cm->cm_complete = mprsas_smpio_complete;
3051 	cm->cm_complete_data = ccb;
3052 
3053 	/*
3054 	 * Tell the mapping code that we're using a uio, and that this is
3055 	 * an SMP passthrough request.  There is a little special-case
3056 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3057 	 * transfer.
3058 	 */
3059 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3060 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3061 
3062 	/* The chip data format is little endian. */
3063 	req->SASAddress.High = htole32(sasaddr >> 32);
3064 	req->SASAddress.Low = htole32(sasaddr);
3065 
3066 	/*
3067 	 * XXX Note that we don't have a timeout/abort mechanism here.
3068 	 * From the manual, it looks like task management requests only
3069 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3070 	 * have a mechanism to retry requests in the event of a chip reset
3071 	 * at least.  Hopefully the chip will insure that any errors short
3072 	 * of that are relayed back to the driver.
3073 	 */
3074 	error = mpr_map_command(sc, cm);
3075 	if ((error != 0) && (error != EINPROGRESS)) {
3076 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3077 		    "mpr_map_command()\n", __func__, error);
3078 		goto bailout_error;
3079 	}
3080 
3081 	return;
3082 
3083 bailout_error:
3084 	mpr_free_command(sc, cm);
3085 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3086 	xpt_done(ccb);
3087 	return;
3088 }
3089 
3090 static void
3091 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3092 {
3093 	struct mpr_softc *sc;
3094 	struct mprsas_target *targ;
3095 	uint64_t sasaddr = 0;
3096 
3097 	sc = sassc->sc;
3098 
3099 	/*
3100 	 * Make sure the target exists.
3101 	 */
3102 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3103 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3104 	targ = &sassc->targets[ccb->ccb_h.target_id];
3105 	if (targ->handle == 0x0) {
3106 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3107 		    __func__, ccb->ccb_h.target_id);
3108 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3109 		xpt_done(ccb);
3110 		return;
3111 	}
3112 
3113 	/*
3114 	 * If this device has an embedded SMP target, we'll talk to it
3115 	 * directly.
3116 	 * figure out what the expander's address is.
3117 	 */
3118 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3119 		sasaddr = targ->sasaddr;
3120 
3121 	/*
3122 	 * If we don't have a SAS address for the expander yet, try
3123 	 * grabbing it from the page 0x83 information cached in the
3124 	 * transport layer for this target.  LSI expanders report the
3125 	 * expander SAS address as the port-associated SAS address in
3126 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3127 	 * 0x83.
3128 	 *
3129 	 * XXX KDM disable this for now, but leave it commented out so that
3130 	 * it is obvious that this is another possible way to get the SAS
3131 	 * address.
3132 	 *
3133 	 * The parent handle method below is a little more reliable, and
3134 	 * the other benefit is that it works for devices other than SES
3135 	 * devices.  So you can send a SMP request to a da(4) device and it
3136 	 * will get routed to the expander that device is attached to.
3137 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3138 	 */
3139 #if 0
3140 	if (sasaddr == 0)
3141 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3142 #endif
3143 
3144 	/*
3145 	 * If we still don't have a SAS address for the expander, look for
3146 	 * the parent device of this device, which is probably the expander.
3147 	 */
3148 	if (sasaddr == 0) {
3149 #ifdef OLD_MPR_PROBE
3150 		struct mprsas_target *parent_target;
3151 #endif
3152 
3153 		if (targ->parent_handle == 0x0) {
3154 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3155 			    "a valid parent handle!\n", __func__, targ->handle);
3156 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3157 			goto bailout;
3158 		}
3159 #ifdef OLD_MPR_PROBE
3160 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3161 		    targ->parent_handle);
3162 
3163 		if (parent_target == NULL) {
3164 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3165 			    "a valid parent target!\n", __func__, targ->handle);
3166 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3167 			goto bailout;
3168 		}
3169 
3170 		if ((parent_target->devinfo &
3171 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3172 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3173 			    "does not have an SMP target!\n", __func__,
3174 			    targ->handle, parent_target->handle);
3175 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3176 			goto bailout;
3177 		}
3178 
3179 		sasaddr = parent_target->sasaddr;
3180 #else /* OLD_MPR_PROBE */
3181 		if ((targ->parent_devinfo &
3182 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3183 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3184 			    "does not have an SMP target!\n", __func__,
3185 			    targ->handle, targ->parent_handle);
3186 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3187 			goto bailout;
3188 		}
3189 		if (targ->parent_sasaddr == 0x0) {
3190 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3191 			    "%d does not have a valid SAS address!\n", __func__,
3192 			    targ->handle, targ->parent_handle);
3193 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3194 			goto bailout;
3195 		}
3196 
3197 		sasaddr = targ->parent_sasaddr;
3198 #endif /* OLD_MPR_PROBE */
3199 	}
3200 
3201 	if (sasaddr == 0) {
3202 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3203 		    "handle %d\n", __func__, targ->handle);
3204 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3205 		goto bailout;
3206 	}
3207 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3208 
3209 	return;
3210 
3211 bailout:
3212 	xpt_done(ccb);
3213 
3214 }
3215 
3216 static void
3217 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3218 {
3219 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3220 	struct mpr_softc *sc;
3221 	struct mpr_command *tm;
3222 	struct mprsas_target *targ;
3223 
3224 	MPR_FUNCTRACE(sassc->sc);
3225 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3226 
3227 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3228 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3229 	sc = sassc->sc;
3230 	tm = mprsas_alloc_tm(sc);
3231 	if (tm == NULL) {
3232 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3233 		    "mprsas_action_resetdev\n");
3234 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3235 		xpt_done(ccb);
3236 		return;
3237 	}
3238 
3239 	targ = &sassc->targets[ccb->ccb_h.target_id];
3240 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3241 	req->DevHandle = htole16(targ->handle);
3242 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3243 
3244 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3245 		/* SAS Hard Link Reset / SATA Link Reset */
3246 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3247 	} else {
3248 		/* PCIe Protocol Level Reset*/
3249 		req->MsgFlags =
3250 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3251 	}
3252 
3253 	tm->cm_data = NULL;
3254 	tm->cm_complete = mprsas_resetdev_complete;
3255 	tm->cm_complete_data = ccb;
3256 
3257 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3258 	    __func__, targ->tid);
3259 	tm->cm_targ = targ;
3260 
3261 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3262 	mpr_map_command(sc, tm);
3263 }
3264 
3265 static void
3266 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3267 {
3268 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3269 	union ccb *ccb;
3270 
3271 	MPR_FUNCTRACE(sc);
3272 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3273 
3274 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3275 	ccb = tm->cm_complete_data;
3276 
3277 	/*
3278 	 * Currently there should be no way we can hit this case.  It only
3279 	 * happens when we have a failure to allocate chain frames, and
3280 	 * task management commands don't have S/G lists.
3281 	 */
3282 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3283 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3284 
3285 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3286 
3287 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3288 		    "handle %#04x! This should not happen!\n", __func__,
3289 		    tm->cm_flags, req->DevHandle);
3290 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3291 		goto bailout;
3292 	}
3293 
3294 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3295 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3296 
3297 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3298 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3299 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3300 		    CAM_LUN_WILDCARD);
3301 	}
3302 	else
3303 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3304 
3305 bailout:
3306 
3307 	mprsas_free_tm(sc, tm);
3308 	xpt_done(ccb);
3309 }
3310 
3311 static void
3312 mprsas_poll(struct cam_sim *sim)
3313 {
3314 	struct mprsas_softc *sassc;
3315 
3316 	sassc = cam_sim_softc(sim);
3317 
3318 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3319 		/* frequent debug messages during a panic just slow
3320 		 * everything down too much.
3321 		 */
3322 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3323 		    __func__);
3324 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3325 	}
3326 
3327 	mpr_intr_locked(sassc->sc);
3328 }
3329 
3330 static void
3331 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3332     void *arg)
3333 {
3334 	struct mpr_softc *sc;
3335 
3336 	sc = (struct mpr_softc *)callback_arg;
3337 
3338 	mpr_lock(sc);
3339 	switch (code) {
3340 	case AC_ADVINFO_CHANGED: {
3341 		struct mprsas_target *target;
3342 		struct mprsas_softc *sassc;
3343 		struct scsi_read_capacity_data_long rcap_buf;
3344 		struct ccb_dev_advinfo cdai;
3345 		struct mprsas_lun *lun;
3346 		lun_id_t lunid;
3347 		int found_lun;
3348 		uintptr_t buftype;
3349 
3350 		buftype = (uintptr_t)arg;
3351 
3352 		found_lun = 0;
3353 		sassc = sc->sassc;
3354 
3355 		/*
3356 		 * We're only interested in read capacity data changes.
3357 		 */
3358 		if (buftype != CDAI_TYPE_RCAPLONG)
3359 			break;
3360 
3361 		/*
3362 		 * We should have a handle for this, but check to make sure.
3363 		 */
3364 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3365 		    ("Target %d out of bounds in mprsas_async\n",
3366 		    xpt_path_target_id(path)));
3367 		target = &sassc->targets[xpt_path_target_id(path)];
3368 		if (target->handle == 0)
3369 			break;
3370 
3371 		lunid = xpt_path_lun_id(path);
3372 
3373 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3374 			if (lun->lun_id == lunid) {
3375 				found_lun = 1;
3376 				break;
3377 			}
3378 		}
3379 
3380 		if (found_lun == 0) {
3381 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3382 			    M_NOWAIT | M_ZERO);
3383 			if (lun == NULL) {
3384 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3385 				    "LUN for EEDP support.\n");
3386 				break;
3387 			}
3388 			lun->lun_id = lunid;
3389 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3390 		}
3391 
3392 		bzero(&rcap_buf, sizeof(rcap_buf));
3393 		bzero(&cdai, sizeof(cdai));
3394 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3395 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3396 		cdai.ccb_h.flags = CAM_DIR_IN;
3397 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3398 		cdai.flags = CDAI_FLAG_NONE;
3399 		cdai.bufsiz = sizeof(rcap_buf);
3400 		cdai.buf = (uint8_t *)&rcap_buf;
3401 		xpt_action((union ccb *)&cdai);
3402 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3403 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3404 
3405 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3406 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3407 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3408 			case SRC16_PTYPE_1:
3409 			case SRC16_PTYPE_3:
3410 				lun->eedp_formatted = TRUE;
3411 				lun->eedp_block_size =
3412 				    scsi_4btoul(rcap_buf.length);
3413 				break;
3414 			case SRC16_PTYPE_2:
3415 			default:
3416 				lun->eedp_formatted = FALSE;
3417 				lun->eedp_block_size = 0;
3418 				break;
3419 			}
3420 		} else {
3421 			lun->eedp_formatted = FALSE;
3422 			lun->eedp_block_size = 0;
3423 		}
3424 		break;
3425 	}
3426 	default:
3427 		break;
3428 	}
3429 	mpr_unlock(sc);
3430 }
3431 
3432 /*
3433  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3434  * the target until the reset has completed.  The CCB holds the path which
3435  * is used to release the devq.  The devq is released and the CCB is freed
3436  * when the TM completes.
3437  * We only need to do this when we're entering reset, not at each time we
3438  * need to send an abort (which will happen if multiple commands timeout
3439  * while we're sending the abort). We do not release the queue for each
3440  * command we complete (just at the end when we free the tm), so freezing
3441  * it each time doesn't make sense.
3442  */
3443 void
3444 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3445     struct mprsas_target *target, lun_id_t lun_id)
3446 {
3447 	union ccb *ccb;
3448 	path_id_t path_id;
3449 
3450 	ccb = xpt_alloc_ccb_nowait();
3451 	if (ccb) {
3452 		path_id = cam_sim_path(sc->sassc->sim);
3453 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3454 		    target->tid, lun_id) != CAM_REQ_CMP) {
3455 			xpt_free_ccb(ccb);
3456 		} else {
3457 			tm->cm_ccb = ccb;
3458 			tm->cm_targ = target;
3459 			if ((target->flags & MPRSAS_TARGET_INRESET) == 0) {
3460 				mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
3461 				    "%s: Freezing devq for target ID %d\n",
3462 				    __func__, target->tid);
3463 				xpt_freeze_devq(ccb->ccb_h.path, 1);
3464 				target->flags |= MPRSAS_TARGET_INRESET;
3465 			}
3466 		}
3467 	}
3468 }
3469 
3470 int
3471 mprsas_startup(struct mpr_softc *sc)
3472 {
3473 	/*
3474 	 * Send the port enable message and set the wait_for_port_enable flag.
3475 	 * This flag helps to keep the simq frozen until all discovery events
3476 	 * are processed.
3477 	 */
3478 	sc->wait_for_port_enable = 1;
3479 	mprsas_send_portenable(sc);
3480 	return (0);
3481 }
3482 
3483 static int
3484 mprsas_send_portenable(struct mpr_softc *sc)
3485 {
3486 	MPI2_PORT_ENABLE_REQUEST *request;
3487 	struct mpr_command *cm;
3488 
3489 	MPR_FUNCTRACE(sc);
3490 
3491 	if ((cm = mpr_alloc_command(sc)) == NULL)
3492 		return (EBUSY);
3493 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3494 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3495 	request->MsgFlags = 0;
3496 	request->VP_ID = 0;
3497 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3498 	cm->cm_complete = mprsas_portenable_complete;
3499 	cm->cm_data = NULL;
3500 	cm->cm_sge = NULL;
3501 
3502 	mpr_map_command(sc, cm);
3503 	mpr_dprint(sc, MPR_XINFO,
3504 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3505 	    cm, cm->cm_req, cm->cm_complete);
3506 	return (0);
3507 }
3508 
3509 static void
3510 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3511 {
3512 	MPI2_PORT_ENABLE_REPLY *reply;
3513 	struct mprsas_softc *sassc;
3514 
3515 	MPR_FUNCTRACE(sc);
3516 	sassc = sc->sassc;
3517 
3518 	/*
3519 	 * Currently there should be no way we can hit this case.  It only
3520 	 * happens when we have a failure to allocate chain frames, and
3521 	 * port enable commands don't have S/G lists.
3522 	 */
3523 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3524 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3525 		    "This should not happen!\n", __func__, cm->cm_flags);
3526 	}
3527 
3528 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3529 	if (reply == NULL)
3530 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3531 	else if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3532 	    MPI2_IOCSTATUS_SUCCESS)
3533 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3534 
3535 	mpr_free_command(sc, cm);
3536 	/*
3537 	 * Done waiting for port enable to complete.  Decrement the refcount.
3538 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3539 	 * take place.
3540 	 */
3541 	sc->wait_for_port_enable = 0;
3542 	sc->port_enable_complete = 1;
3543 	wakeup(&sc->port_enable_complete);
3544 	mprsas_startup_decrement(sassc);
3545 }
3546 
3547 int
3548 mprsas_check_id(struct mprsas_softc *sassc, int id)
3549 {
3550 	struct mpr_softc *sc = sassc->sc;
3551 	char *ids;
3552 	char *name;
3553 
3554 	ids = &sc->exclude_ids[0];
3555 	while((name = strsep(&ids, ",")) != NULL) {
3556 		if (name[0] == '\0')
3557 			continue;
3558 		if (strtol(name, NULL, 0) == (long)id)
3559 			return (1);
3560 	}
3561 
3562 	return (0);
3563 }
3564 
3565 void
3566 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3567 {
3568 	struct mprsas_softc *sassc;
3569 	struct mprsas_lun *lun, *lun_tmp;
3570 	struct mprsas_target *targ;
3571 	int i;
3572 
3573 	sassc = sc->sassc;
3574 	/*
3575 	 * The number of targets is based on IOC Facts, so free all of
3576 	 * the allocated LUNs for each target and then the target buffer
3577 	 * itself.
3578 	 */
3579 	for (i=0; i< maxtargets; i++) {
3580 		targ = &sassc->targets[i];
3581 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3582 			free(lun, M_MPR);
3583 		}
3584 	}
3585 	free(sassc->targets, M_MPR);
3586 
3587 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3588 	    M_MPR, M_WAITOK|M_ZERO);
3589 }
3590