xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision bdcbfde31e8e9b343f113a1956384bdf30d1ed62)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * Copyright 2000-2020 Broadcom Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 /* Communications core for Avago Technologies (LSI) MPT3 */
35 
36 /* TODO Move headers to mprvar */
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
42 #include <sys/module.h>
43 #include <sys/bus.h>
44 #include <sys/conf.h>
45 #include <sys/bio.h>
46 #include <sys/malloc.h>
47 #include <sys/uio.h>
48 #include <sys/sysctl.h>
49 #include <sys/endian.h>
50 #include <sys/queue.h>
51 #include <sys/kthread.h>
52 #include <sys/taskqueue.h>
53 #include <sys/sbuf.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <machine/stdarg.h>
60 
61 #include <cam/cam.h>
62 #include <cam/cam_ccb.h>
63 #include <cam/cam_debug.h>
64 #include <cam/cam_sim.h>
65 #include <cam/cam_xpt_sim.h>
66 #include <cam/cam_xpt_periph.h>
67 #include <cam/cam_periph.h>
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70 #include <cam/scsi/smp_all.h>
71 
72 #include <dev/nvme/nvme.h>
73 
74 #include <dev/mpr/mpi/mpi2_type.h>
75 #include <dev/mpr/mpi/mpi2.h>
76 #include <dev/mpr/mpi/mpi2_ioc.h>
77 #include <dev/mpr/mpi/mpi2_sas.h>
78 #include <dev/mpr/mpi/mpi2_pci.h>
79 #include <dev/mpr/mpi/mpi2_cnfg.h>
80 #include <dev/mpr/mpi/mpi2_init.h>
81 #include <dev/mpr/mpi/mpi2_tool.h>
82 #include <dev/mpr/mpr_ioctl.h>
83 #include <dev/mpr/mprvar.h>
84 #include <dev/mpr/mpr_table.h>
85 #include <dev/mpr/mpr_sas.h>
86 
87 #define MPRSAS_DISCOVERY_TIMEOUT	20
88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
89 
90 /*
91  * static array to check SCSI OpCode for EEDP protection bits
92  */
93 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
94 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 static uint8_t op_code_prot[256] = {
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
106 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
113 };
114 
115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
116 
117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
120 static void mprsas_poll(struct cam_sim *sim);
121 static void mprsas_scsiio_timeout(void *data);
122 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
123 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
124 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
125 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
126 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
127 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
128     struct mpr_command *cm);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130     struct cam_path *path, void *arg);
131 static int mprsas_send_portenable(struct mpr_softc *sc);
132 static void mprsas_portenable_complete(struct mpr_softc *sc,
133     struct mpr_command *cm);
134 
135 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
136 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
137     uint64_t sasaddr);
138 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
139 
140 struct mprsas_target *
141 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
142     uint16_t handle)
143 {
144 	struct mprsas_target *target;
145 	int i;
146 
147 	for (i = start; i < sassc->maxtargets; i++) {
148 		target = &sassc->targets[i];
149 		if (target->handle == handle)
150 			return (target);
151 	}
152 
153 	return (NULL);
154 }
155 
156 /* we need to freeze the simq during attach and diag reset, to avoid failing
157  * commands before device handles have been found by discovery.  Since
158  * discovery involves reading config pages and possibly sending commands,
159  * discovery actions may continue even after we receive the end of discovery
160  * event, so refcount discovery actions instead of assuming we can unfreeze
161  * the simq when we get the event.
162  */
163 void
164 mprsas_startup_increment(struct mprsas_softc *sassc)
165 {
166 	MPR_FUNCTRACE(sassc->sc);
167 
168 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
169 		if (sassc->startup_refcount++ == 0) {
170 			/* just starting, freeze the simq */
171 			mpr_dprint(sassc->sc, MPR_INIT,
172 			    "%s freezing simq\n", __func__);
173 			xpt_hold_boot();
174 			xpt_freeze_simq(sassc->sim, 1);
175 		}
176 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
177 		    sassc->startup_refcount);
178 	}
179 }
180 
181 void
182 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
183 {
184 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
185 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
186 		xpt_release_simq(sassc->sim, 1);
187 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
188 	}
189 }
190 
191 void
192 mprsas_startup_decrement(struct mprsas_softc *sassc)
193 {
194 	MPR_FUNCTRACE(sassc->sc);
195 
196 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
197 		if (--sassc->startup_refcount == 0) {
198 			/* finished all discovery-related actions, release
199 			 * the simq and rescan for the latest topology.
200 			 */
201 			mpr_dprint(sassc->sc, MPR_INIT,
202 			    "%s releasing simq\n", __func__);
203 			sassc->flags &= ~MPRSAS_IN_STARTUP;
204 			xpt_release_simq(sassc->sim, 1);
205 			xpt_release_boot();
206 		}
207 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
208 		    sassc->startup_refcount);
209 	}
210 }
211 
212 /*
213  * The firmware requires us to stop sending commands when we're doing task
214  * management.
215  * use.
216  * XXX The logic for serializing the device has been made lazy and moved to
217  * mprsas_prepare_for_tm().
218  */
219 struct mpr_command *
220 mprsas_alloc_tm(struct mpr_softc *sc)
221 {
222 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
223 	struct mpr_command *tm;
224 
225 	MPR_FUNCTRACE(sc);
226 	tm = mpr_alloc_high_priority_command(sc);
227 	if (tm == NULL)
228 		return (NULL);
229 
230 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
231 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
232 	return tm;
233 }
234 
235 void
236 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
237 {
238 
239 	MPR_FUNCTRACE(sc);
240 	if (tm == NULL)
241 		return;
242 
243 	/*
244 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
245 	 * free the resources used for freezing the devq.  Must clear the
246 	 * INRESET flag as well or scsi I/O will not work.
247 	 */
248 	if (tm->cm_ccb) {
249 		mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
250 		    "Unfreezing devq for target ID %d\n",
251 		    tm->cm_targ->tid);
252 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
253 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
254 		xpt_free_path(tm->cm_ccb->ccb_h.path);
255 		xpt_free_ccb(tm->cm_ccb);
256 	}
257 
258 	mpr_free_high_priority_command(sc, tm);
259 }
260 
261 void
262 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
263 {
264 	struct mprsas_softc *sassc = sc->sassc;
265 	path_id_t pathid;
266 	target_id_t targetid;
267 	union ccb *ccb;
268 
269 	MPR_FUNCTRACE(sc);
270 	pathid = cam_sim_path(sassc->sim);
271 	if (targ == NULL)
272 		targetid = CAM_TARGET_WILDCARD;
273 	else
274 		targetid = targ - sassc->targets;
275 
276 	/*
277 	 * Allocate a CCB and schedule a rescan.
278 	 */
279 	ccb = xpt_alloc_ccb_nowait();
280 	if (ccb == NULL) {
281 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
282 		return;
283 	}
284 
285 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
286 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
287 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
288 		xpt_free_ccb(ccb);
289 		return;
290 	}
291 
292 	if (targetid == CAM_TARGET_WILDCARD)
293 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
294 	else
295 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
296 
297 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
298 	xpt_rescan(ccb);
299 }
300 
301 static void
302 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
303 {
304 	struct sbuf sb;
305 	va_list ap;
306 	char str[224];
307 
308 	if (cm == NULL)
309 		return;
310 
311 	/* No need to be in here if debugging isn't enabled */
312 	if ((cm->cm_sc->mpr_debug & level) == 0)
313 		return;
314 
315 	sbuf_new(&sb, str, sizeof(str), 0);
316 
317 	va_start(ap, fmt);
318 
319 	if (cm->cm_ccb != NULL) {
320 		xpt_path_sbuf(cm->cm_ccb->csio.ccb_h.path, &sb);
321 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
322 			scsi_command_string(&cm->cm_ccb->csio, &sb);
323 			sbuf_printf(&sb, "length %d ",
324 			    cm->cm_ccb->csio.dxfer_len);
325 		}
326 	} else {
327 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
328 		    cam_sim_name(cm->cm_sc->sassc->sim),
329 		    cam_sim_unit(cm->cm_sc->sassc->sim),
330 		    cam_sim_bus(cm->cm_sc->sassc->sim),
331 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
332 		    cm->cm_lun);
333 	}
334 
335 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
336 	sbuf_vprintf(&sb, fmt, ap);
337 	sbuf_finish(&sb);
338 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
339 
340 	va_end(ap);
341 }
342 
343 static void
344 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
345 {
346 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
347 	struct mprsas_target *targ;
348 	uint16_t handle;
349 
350 	MPR_FUNCTRACE(sc);
351 
352 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
353 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
354 	targ = tm->cm_targ;
355 
356 	if (reply == NULL) {
357 		/* XXX retry the remove after the diag reset completes? */
358 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
359 		    "0x%04x\n", __func__, handle);
360 		mprsas_free_tm(sc, tm);
361 		return;
362 	}
363 
364 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
365 	    MPI2_IOCSTATUS_SUCCESS) {
366 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
367 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
368 	}
369 
370 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
371 	    le32toh(reply->TerminationCount));
372 	mpr_free_reply(sc, tm->cm_reply_data);
373 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
374 
375 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
376 	    targ->tid, handle);
377 
378 	/*
379 	 * Don't clear target if remove fails because things will get confusing.
380 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
381 	 * this target id if possible, and so we can assign the same target id
382 	 * to this device if it comes back in the future.
383 	 */
384 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
385 	    MPI2_IOCSTATUS_SUCCESS) {
386 		targ = tm->cm_targ;
387 		targ->handle = 0x0;
388 		targ->encl_handle = 0x0;
389 		targ->encl_level_valid = 0x0;
390 		targ->encl_level = 0x0;
391 		targ->connector_name[0] = ' ';
392 		targ->connector_name[1] = ' ';
393 		targ->connector_name[2] = ' ';
394 		targ->connector_name[3] = ' ';
395 		targ->encl_slot = 0x0;
396 		targ->exp_dev_handle = 0x0;
397 		targ->phy_num = 0x0;
398 		targ->linkrate = 0x0;
399 		targ->devinfo = 0x0;
400 		targ->flags = 0x0;
401 		targ->scsi_req_desc_type = 0;
402 	}
403 
404 	mprsas_free_tm(sc, tm);
405 }
406 
407 /*
408  * Retry mprsas_prepare_remove() if some previous attempt failed to allocate
409  * high priority command due to limit reached.
410  */
411 void
412 mprsas_prepare_remove_retry(struct mprsas_softc *sassc)
413 {
414 	struct mprsas_target *target;
415 	int i;
416 
417 	if ((sassc->flags & MPRSAS_TOREMOVE) == 0)
418 		return;
419 
420 	for (i = 0; i < sassc->maxtargets; i++) {
421 		target = &sassc->targets[i];
422 		if ((target->flags & MPRSAS_TARGET_TOREMOVE) == 0)
423 			continue;
424 		if (TAILQ_EMPTY(&sassc->sc->high_priority_req_list))
425 			return;
426 		target->flags &= ~MPRSAS_TARGET_TOREMOVE;
427 		if (target->flags & MPR_TARGET_FLAGS_VOLUME)
428 			mprsas_prepare_volume_remove(sassc, target->handle);
429 		else
430 			mprsas_prepare_remove(sassc, target->handle);
431 	}
432 	sassc->flags &= ~MPRSAS_TOREMOVE;
433 }
434 
435 /*
436  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437  * Otherwise Volume Delete is same as Bare Drive Removal.
438  */
439 void
440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
441 {
442 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443 	struct mpr_softc *sc;
444 	struct mpr_command *cm;
445 	struct mprsas_target *targ = NULL;
446 
447 	MPR_FUNCTRACE(sassc->sc);
448 	sc = sassc->sc;
449 
450 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
451 	if (targ == NULL) {
452 		/* FIXME: what is the action? */
453 		/* We don't know about this device? */
454 		mpr_dprint(sc, MPR_ERROR,
455 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
456 		return;
457 	}
458 
459 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
460 
461 	cm = mprsas_alloc_tm(sc);
462 	if (cm == NULL) {
463 		targ->flags |= MPRSAS_TARGET_TOREMOVE;
464 		sassc->flags |= MPRSAS_TOREMOVE;
465 		return;
466 	}
467 
468 	mprsas_rescan_target(sc, targ);
469 
470 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471 	req->DevHandle = targ->handle;
472 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
473 
474 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
475 		/* SAS Hard Link Reset / SATA Link Reset */
476 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
477 	} else {
478 		/* PCIe Protocol Level Reset*/
479 		req->MsgFlags =
480 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
481 	}
482 
483 	cm->cm_targ = targ;
484 	cm->cm_data = NULL;
485 	cm->cm_complete = mprsas_remove_volume;
486 	cm->cm_complete_data = (void *)(uintptr_t)handle;
487 
488 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
489 	    __func__, targ->tid);
490 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
491 
492 	mpr_map_command(sc, cm);
493 }
494 
495 /*
496  * The firmware performs debounce on the link to avoid transient link errors
497  * and false removals.  When it does decide that link has been lost and a
498  * device needs to go away, it expects that the host will perform a target reset
499  * and then an op remove.  The reset has the side-effect of aborting any
500  * outstanding requests for the device, which is required for the op-remove to
501  * succeed.  It's not clear if the host should check for the device coming back
502  * alive after the reset.
503  */
504 void
505 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
506 {
507 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
508 	struct mpr_softc *sc;
509 	struct mpr_command *tm;
510 	struct mprsas_target *targ = NULL;
511 
512 	MPR_FUNCTRACE(sassc->sc);
513 
514 	sc = sassc->sc;
515 
516 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
517 	if (targ == NULL) {
518 		/* FIXME: what is the action? */
519 		/* We don't know about this device? */
520 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
521 		    __func__, handle);
522 		return;
523 	}
524 
525 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
526 
527 	tm = mprsas_alloc_tm(sc);
528 	if (tm == NULL) {
529 		targ->flags |= MPRSAS_TARGET_TOREMOVE;
530 		sassc->flags |= MPRSAS_TOREMOVE;
531 		return;
532 	}
533 
534 	mprsas_rescan_target(sc, targ);
535 
536 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
537 	req->DevHandle = htole16(targ->handle);
538 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
539 
540 	/* SAS Hard Link Reset / SATA Link Reset */
541 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
542 
543 	tm->cm_targ = targ;
544 	tm->cm_data = NULL;
545 	tm->cm_complete = mprsas_remove_device;
546 	tm->cm_complete_data = (void *)(uintptr_t)handle;
547 
548 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
549 	    __func__, targ->tid);
550 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
551 
552 	mpr_map_command(sc, tm);
553 }
554 
555 static void
556 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
557 {
558 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
559 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
560 	struct mprsas_target *targ;
561 	uint16_t handle;
562 
563 	MPR_FUNCTRACE(sc);
564 
565 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
566 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
567 	targ = tm->cm_targ;
568 
569 	/*
570 	 * Currently there should be no way we can hit this case.  It only
571 	 * happens when we have a failure to allocate chain frames, and
572 	 * task management commands don't have S/G lists.
573 	 */
574 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
575 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
576 		    "handle %#04x! This should not happen!\n", __func__,
577 		    tm->cm_flags, handle);
578 	}
579 
580 	if (reply == NULL) {
581 		/* XXX retry the remove after the diag reset completes? */
582 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
583 		    "0x%04x\n", __func__, handle);
584 		mprsas_free_tm(sc, tm);
585 		return;
586 	}
587 
588 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
589 	    MPI2_IOCSTATUS_SUCCESS) {
590 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
591 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
592 	}
593 
594 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
595 	    le32toh(reply->TerminationCount));
596 	mpr_free_reply(sc, tm->cm_reply_data);
597 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
598 
599 	/* Reuse the existing command */
600 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
601 	memset(req, 0, sizeof(*req));
602 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
603 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
604 	req->DevHandle = htole16(handle);
605 	tm->cm_data = NULL;
606 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
607 	tm->cm_complete = mprsas_remove_complete;
608 	tm->cm_complete_data = (void *)(uintptr_t)handle;
609 
610 	/*
611 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
612 	 * They should be aborted or time out and we'll kick thus off there
613 	 * if so.
614 	 */
615 	if (TAILQ_FIRST(&targ->commands) == NULL) {
616 		mpr_dprint(sc, MPR_INFO,
617 		    "No pending commands: starting remove_device for target %u handle 0x%04x\n",
618 		    targ->tid, handle);
619 		mpr_map_command(sc, tm);
620 		targ->pending_remove_tm = NULL;
621 	} else {
622 		targ->pending_remove_tm = tm;
623 	}
624 
625 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
626 	    targ->tid, handle);
627 	if (targ->encl_level_valid) {
628 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
629 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
630 		    targ->connector_name);
631 	}
632 }
633 
634 static void
635 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
636 {
637 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
638 	uint16_t handle;
639 	struct mprsas_target *targ;
640 	struct mprsas_lun *lun;
641 
642 	MPR_FUNCTRACE(sc);
643 
644 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
645 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
646 
647 	targ = tm->cm_targ;
648 
649 	/*
650 	 * At this point, we should have no pending commands for the target.
651 	 * The remove target has just completed.
652 	 */
653 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
654 	    ("%s: no commands should be pending\n", __func__));
655 
656 	/*
657 	 * Currently there should be no way we can hit this case.  It only
658 	 * happens when we have a failure to allocate chain frames, and
659 	 * task management commands don't have S/G lists.
660 	 */
661 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
662 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
663 		    "handle %#04x! This should not happen!\n", __func__,
664 		    tm->cm_flags, handle);
665 		mprsas_free_tm(sc, tm);
666 		return;
667 	}
668 
669 	if (reply == NULL) {
670 		/* most likely a chip reset */
671 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
672 		    "0x%04x\n", __func__, handle);
673 		mprsas_free_tm(sc, tm);
674 		return;
675 	}
676 
677 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
678 	    __func__, handle, le16toh(reply->IOCStatus));
679 
680 	/*
681 	 * Don't clear target if remove fails because things will get confusing.
682 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
683 	 * this target id if possible, and so we can assign the same target id
684 	 * to this device if it comes back in the future.
685 	 */
686 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
687 	    MPI2_IOCSTATUS_SUCCESS) {
688 		targ->handle = 0x0;
689 		targ->encl_handle = 0x0;
690 		targ->encl_level_valid = 0x0;
691 		targ->encl_level = 0x0;
692 		targ->connector_name[0] = ' ';
693 		targ->connector_name[1] = ' ';
694 		targ->connector_name[2] = ' ';
695 		targ->connector_name[3] = ' ';
696 		targ->encl_slot = 0x0;
697 		targ->exp_dev_handle = 0x0;
698 		targ->phy_num = 0x0;
699 		targ->linkrate = 0x0;
700 		targ->devinfo = 0x0;
701 		targ->flags = 0x0;
702 		targ->scsi_req_desc_type = 0;
703 
704 		while (!SLIST_EMPTY(&targ->luns)) {
705 			lun = SLIST_FIRST(&targ->luns);
706 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
707 			free(lun, M_MPR);
708 		}
709 	}
710 
711 	mprsas_free_tm(sc, tm);
712 }
713 
714 static int
715 mprsas_register_events(struct mpr_softc *sc)
716 {
717 	uint8_t events[16];
718 
719 	bzero(events, 16);
720 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
721 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
722 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
723 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
724 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
725 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
726 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
727 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
728 	setbit(events, MPI2_EVENT_IR_VOLUME);
729 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
730 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
731 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
732 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
733 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
734 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
735 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
736 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
737 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
738 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
739 		}
740 	}
741 
742 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
743 	    &sc->sassc->mprsas_eh);
744 
745 	return (0);
746 }
747 
748 int
749 mpr_attach_sas(struct mpr_softc *sc)
750 {
751 	struct mprsas_softc *sassc;
752 	cam_status status;
753 	int unit, error = 0, reqs;
754 
755 	MPR_FUNCTRACE(sc);
756 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
757 
758 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
759 
760 	/*
761 	 * XXX MaxTargets could change during a reinit.  Since we don't
762 	 * resize the targets[] array during such an event, cache the value
763 	 * of MaxTargets here so that we don't get into trouble later.  This
764 	 * should move into the reinit logic.
765 	 */
766 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
767 	sassc->targets = malloc(sizeof(struct mprsas_target) *
768 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
769 	sc->sassc = sassc;
770 	sassc->sc = sc;
771 
772 	reqs = sc->num_reqs - sc->num_prireqs - 1;
773 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
774 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
775 		error = ENOMEM;
776 		goto out;
777 	}
778 
779 	unit = device_get_unit(sc->mpr_dev);
780 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
781 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
782 	if (sassc->sim == NULL) {
783 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
784 		error = EINVAL;
785 		goto out;
786 	}
787 
788 	TAILQ_INIT(&sassc->ev_queue);
789 
790 	/* Initialize taskqueue for Event Handling */
791 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
792 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
793 	    taskqueue_thread_enqueue, &sassc->ev_tq);
794 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
795 	    device_get_nameunit(sc->mpr_dev));
796 
797 	mpr_lock(sc);
798 
799 	/*
800 	 * XXX There should be a bus for every port on the adapter, but since
801 	 * we're just going to fake the topology for now, we'll pretend that
802 	 * everything is just a target on a single bus.
803 	 */
804 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
805 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
806 		    "Error %d registering SCSI bus\n", error);
807 		mpr_unlock(sc);
808 		goto out;
809 	}
810 
811 	/*
812 	 * Assume that discovery events will start right away.
813 	 *
814 	 * Hold off boot until discovery is complete.
815 	 */
816 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
817 	sc->sassc->startup_refcount = 0;
818 	mprsas_startup_increment(sassc);
819 
820 	mpr_unlock(sc);
821 
822 	/*
823 	 * Register for async events so we can determine the EEDP
824 	 * capabilities of devices.
825 	 */
826 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
827 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
828 	    CAM_LUN_WILDCARD);
829 	if (status != CAM_REQ_CMP) {
830 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
831 		    "Error %#x creating sim path\n", status);
832 		sassc->path = NULL;
833 	} else {
834 		int event;
835 
836 		event = AC_ADVINFO_CHANGED;
837 		status = xpt_register_async(event, mprsas_async, sc,
838 					    sassc->path);
839 
840 		if (status != CAM_REQ_CMP) {
841 			mpr_dprint(sc, MPR_ERROR,
842 			    "Error %#x registering async handler for "
843 			    "AC_ADVINFO_CHANGED events\n", status);
844 			xpt_free_path(sassc->path);
845 			sassc->path = NULL;
846 		}
847 	}
848 	if (status != CAM_REQ_CMP) {
849 		/*
850 		 * EEDP use is the exception, not the rule.
851 		 * Warn the user, but do not fail to attach.
852 		 */
853 		mpr_printf(sc, "EEDP capabilities disabled.\n");
854 	}
855 
856 	mprsas_register_events(sc);
857 out:
858 	if (error)
859 		mpr_detach_sas(sc);
860 
861 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
862 	return (error);
863 }
864 
865 int
866 mpr_detach_sas(struct mpr_softc *sc)
867 {
868 	struct mprsas_softc *sassc;
869 	struct mprsas_lun *lun, *lun_tmp;
870 	struct mprsas_target *targ;
871 	int i;
872 
873 	MPR_FUNCTRACE(sc);
874 
875 	if (sc->sassc == NULL)
876 		return (0);
877 
878 	sassc = sc->sassc;
879 	mpr_deregister_events(sc, sassc->mprsas_eh);
880 
881 	/*
882 	 * Drain and free the event handling taskqueue with the lock
883 	 * unheld so that any parallel processing tasks drain properly
884 	 * without deadlocking.
885 	 */
886 	if (sassc->ev_tq != NULL)
887 		taskqueue_free(sassc->ev_tq);
888 
889 	/* Deregister our async handler */
890 	if (sassc->path != NULL) {
891 		xpt_register_async(0, mprsas_async, sc, sassc->path);
892 		xpt_free_path(sassc->path);
893 		sassc->path = NULL;
894 	}
895 
896 	/* Make sure CAM doesn't wedge if we had to bail out early. */
897 	mpr_lock(sc);
898 
899 	while (sassc->startup_refcount != 0)
900 		mprsas_startup_decrement(sassc);
901 
902 	if (sassc->flags & MPRSAS_IN_STARTUP)
903 		xpt_release_simq(sassc->sim, 1);
904 
905 	if (sassc->sim != NULL) {
906 		xpt_bus_deregister(cam_sim_path(sassc->sim));
907 		cam_sim_free(sassc->sim, FALSE);
908 	}
909 
910 	mpr_unlock(sc);
911 
912 	if (sassc->devq != NULL)
913 		cam_simq_free(sassc->devq);
914 
915 	for (i = 0; i < sassc->maxtargets; i++) {
916 		targ = &sassc->targets[i];
917 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
918 			free(lun, M_MPR);
919 		}
920 	}
921 	free(sassc->targets, M_MPR);
922 	free(sassc, M_MPR);
923 	sc->sassc = NULL;
924 
925 	return (0);
926 }
927 
928 void
929 mprsas_discovery_end(struct mprsas_softc *sassc)
930 {
931 	struct mpr_softc *sc = sassc->sc;
932 
933 	MPR_FUNCTRACE(sc);
934 
935 	/*
936 	 * After discovery has completed, check the mapping table for any
937 	 * missing devices and update their missing counts. Only do this once
938 	 * whenever the driver is initialized so that missing counts aren't
939 	 * updated unnecessarily. Note that just because discovery has
940 	 * completed doesn't mean that events have been processed yet. The
941 	 * check_devices function is a callout timer that checks if ALL devices
942 	 * are missing. If so, it will wait a little longer for events to
943 	 * complete and keep resetting itself until some device in the mapping
944 	 * table is not missing, meaning that event processing has started.
945 	 */
946 	if (sc->track_mapping_events) {
947 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
948 		    "completed. Check for missing devices in the mapping "
949 		    "table.\n");
950 		callout_reset(&sc->device_check_callout,
951 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
952 		    sc);
953 	}
954 }
955 
956 static void
957 mprsas_action(struct cam_sim *sim, union ccb *ccb)
958 {
959 	struct mprsas_softc *sassc;
960 
961 	sassc = cam_sim_softc(sim);
962 
963 	MPR_FUNCTRACE(sassc->sc);
964 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
965 	    ccb->ccb_h.func_code);
966 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
967 
968 	switch (ccb->ccb_h.func_code) {
969 	case XPT_PATH_INQ:
970 	{
971 		struct ccb_pathinq *cpi = &ccb->cpi;
972 		struct mpr_softc *sc = sassc->sc;
973 
974 		cpi->version_num = 1;
975 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
976 		cpi->target_sprt = 0;
977 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
978 		cpi->hba_eng_cnt = 0;
979 		cpi->max_target = sassc->maxtargets - 1;
980 		cpi->max_lun = 255;
981 
982 		/*
983 		 * initiator_id is set here to an ID outside the set of valid
984 		 * target IDs (including volumes).
985 		 */
986 		cpi->initiator_id = sassc->maxtargets;
987 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
988 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
989 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
990 		cpi->unit_number = cam_sim_unit(sim);
991 		cpi->bus_id = cam_sim_bus(sim);
992 		/*
993 		 * XXXSLM-I think this needs to change based on config page or
994 		 * something instead of hardcoded to 150000.
995 		 */
996 		cpi->base_transfer_speed = 150000;
997 		cpi->transport = XPORT_SAS;
998 		cpi->transport_version = 0;
999 		cpi->protocol = PROTO_SCSI;
1000 		cpi->protocol_version = SCSI_REV_SPC;
1001 		cpi->maxio = sc->maxio;
1002 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1003 		break;
1004 	}
1005 	case XPT_GET_TRAN_SETTINGS:
1006 	{
1007 		struct ccb_trans_settings	*cts;
1008 		struct ccb_trans_settings_sas	*sas;
1009 		struct ccb_trans_settings_scsi	*scsi;
1010 		struct mprsas_target *targ;
1011 
1012 		cts = &ccb->cts;
1013 		sas = &cts->xport_specific.sas;
1014 		scsi = &cts->proto_specific.scsi;
1015 
1016 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1017 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1018 		    cts->ccb_h.target_id));
1019 		targ = &sassc->targets[cts->ccb_h.target_id];
1020 		if (targ->handle == 0x0) {
1021 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1022 			break;
1023 		}
1024 
1025 		cts->protocol_version = SCSI_REV_SPC2;
1026 		cts->transport = XPORT_SAS;
1027 		cts->transport_version = 0;
1028 
1029 		sas->valid = CTS_SAS_VALID_SPEED;
1030 		switch (targ->linkrate) {
1031 		case 0x08:
1032 			sas->bitrate = 150000;
1033 			break;
1034 		case 0x09:
1035 			sas->bitrate = 300000;
1036 			break;
1037 		case 0x0a:
1038 			sas->bitrate = 600000;
1039 			break;
1040 		case 0x0b:
1041 			sas->bitrate = 1200000;
1042 			break;
1043 		default:
1044 			sas->valid = 0;
1045 		}
1046 
1047 		cts->protocol = PROTO_SCSI;
1048 		scsi->valid = CTS_SCSI_VALID_TQ;
1049 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1050 
1051 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1052 		break;
1053 	}
1054 	case XPT_CALC_GEOMETRY:
1055 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1056 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1057 		break;
1058 	case XPT_RESET_DEV:
1059 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1060 		    "XPT_RESET_DEV\n");
1061 		mprsas_action_resetdev(sassc, ccb);
1062 		return;
1063 	case XPT_RESET_BUS:
1064 	case XPT_ABORT:
1065 	case XPT_TERM_IO:
1066 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1067 		    "for abort or reset\n");
1068 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1069 		break;
1070 	case XPT_SCSI_IO:
1071 		mprsas_action_scsiio(sassc, ccb);
1072 		return;
1073 	case XPT_SMP_IO:
1074 		mprsas_action_smpio(sassc, ccb);
1075 		return;
1076 	default:
1077 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1078 		break;
1079 	}
1080 	xpt_done(ccb);
1081 
1082 }
1083 
1084 static void
1085 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1086     target_id_t target_id, lun_id_t lun_id)
1087 {
1088 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1089 	struct cam_path *path;
1090 
1091 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1092 	    ac_code, target_id, (uintmax_t)lun_id);
1093 
1094 	if (xpt_create_path(&path, NULL,
1095 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1096 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1097 		    "notification\n");
1098 		return;
1099 	}
1100 
1101 	xpt_async(ac_code, path, NULL);
1102 	xpt_free_path(path);
1103 }
1104 
1105 static void
1106 mprsas_complete_all_commands(struct mpr_softc *sc)
1107 {
1108 	struct mpr_command *cm;
1109 	int i;
1110 	int completed;
1111 
1112 	MPR_FUNCTRACE(sc);
1113 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1114 
1115 	/* complete all commands with a NULL reply */
1116 	for (i = 1; i < sc->num_reqs; i++) {
1117 		cm = &sc->commands[i];
1118 		if (cm->cm_state == MPR_CM_STATE_FREE)
1119 			continue;
1120 
1121 		cm->cm_state = MPR_CM_STATE_BUSY;
1122 		cm->cm_reply = NULL;
1123 		completed = 0;
1124 
1125 		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1126 			MPASS(cm->cm_data);
1127 			free(cm->cm_data, M_MPR);
1128 			cm->cm_data = NULL;
1129 		}
1130 
1131 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1132 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1133 
1134 		if (cm->cm_complete != NULL) {
1135 			mprsas_log_command(cm, MPR_RECOVERY,
1136 			    "completing cm %p state %x ccb %p for diag reset\n",
1137 			    cm, cm->cm_state, cm->cm_ccb);
1138 			cm->cm_complete(sc, cm);
1139 			completed = 1;
1140 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1141 			mprsas_log_command(cm, MPR_RECOVERY,
1142 			    "waking up cm %p state %x ccb %p for diag reset\n",
1143 			    cm, cm->cm_state, cm->cm_ccb);
1144 			wakeup(cm);
1145 			completed = 1;
1146 		}
1147 
1148 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1149 			/* this should never happen, but if it does, log */
1150 			mprsas_log_command(cm, MPR_RECOVERY,
1151 			    "cm %p state %x flags 0x%x ccb %p during diag "
1152 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1153 			    cm->cm_ccb);
1154 		}
1155 	}
1156 
1157 	sc->io_cmds_active = 0;
1158 }
1159 
1160 void
1161 mprsas_handle_reinit(struct mpr_softc *sc)
1162 {
1163 	int i;
1164 
1165 	/* Go back into startup mode and freeze the simq, so that CAM
1166 	 * doesn't send any commands until after we've rediscovered all
1167 	 * targets and found the proper device handles for them.
1168 	 *
1169 	 * After the reset, portenable will trigger discovery, and after all
1170 	 * discovery-related activities have finished, the simq will be
1171 	 * released.
1172 	 */
1173 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1174 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1175 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1176 	mprsas_startup_increment(sc->sassc);
1177 
1178 	/* notify CAM of a bus reset */
1179 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1180 	    CAM_LUN_WILDCARD);
1181 
1182 	/* complete and cleanup after all outstanding commands */
1183 	mprsas_complete_all_commands(sc);
1184 
1185 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1186 	    __func__, sc->sassc->startup_refcount);
1187 
1188 	/* zero all the target handles, since they may change after the
1189 	 * reset, and we have to rediscover all the targets and use the new
1190 	 * handles.
1191 	 */
1192 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1193 		if (sc->sassc->targets[i].outstanding != 0)
1194 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1195 			    i, sc->sassc->targets[i].outstanding);
1196 		sc->sassc->targets[i].handle = 0x0;
1197 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1198 		sc->sassc->targets[i].outstanding = 0;
1199 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1200 	}
1201 }
1202 static void
1203 mprsas_tm_timeout(void *data)
1204 {
1205 	struct mpr_command *tm = data;
1206 	struct mpr_softc *sc = tm->cm_sc;
1207 
1208 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1209 
1210 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1211 	    "out\n", tm);
1212 
1213 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1214 	    ("command not inqueue, state = %u\n", tm->cm_state));
1215 
1216 	tm->cm_state = MPR_CM_STATE_BUSY;
1217 	mpr_reinit(sc);
1218 }
1219 
1220 static void
1221 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1222 {
1223 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1224 	unsigned int cm_count = 0;
1225 	struct mpr_command *cm;
1226 	struct mprsas_target *targ;
1227 
1228 	callout_stop(&tm->cm_callout);
1229 
1230 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1231 	targ = tm->cm_targ;
1232 
1233 	/*
1234 	 * Currently there should be no way we can hit this case.  It only
1235 	 * happens when we have a failure to allocate chain frames, and
1236 	 * task management commands don't have S/G lists.
1237 	 */
1238 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1239 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1240 		    "%s: cm_flags = %#x for LUN reset! "
1241 		    "This should not happen!\n", __func__, tm->cm_flags);
1242 		mprsas_free_tm(sc, tm);
1243 		return;
1244 	}
1245 
1246 	if (reply == NULL) {
1247 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1248 		    tm);
1249 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1250 			/* this completion was due to a reset, just cleanup */
1251 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1252 			    "reset, ignoring NULL LUN reset reply\n");
1253 			targ->tm = NULL;
1254 			mprsas_free_tm(sc, tm);
1255 		}
1256 		else {
1257 			/* we should have gotten a reply. */
1258 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1259 			    "LUN reset attempt, resetting controller\n");
1260 			mpr_reinit(sc);
1261 		}
1262 		return;
1263 	}
1264 
1265 	mpr_dprint(sc, MPR_RECOVERY,
1266 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1267 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1268 	    le32toh(reply->TerminationCount));
1269 
1270 	/*
1271 	 * See if there are any outstanding commands for this LUN.
1272 	 * This could be made more efficient by using a per-LU data
1273 	 * structure of some sort.
1274 	 */
1275 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1276 		if (cm->cm_lun == tm->cm_lun)
1277 			cm_count++;
1278 	}
1279 
1280 	if (cm_count == 0) {
1281 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1282 		    "Finished recovery after LUN reset for target %u\n",
1283 		    targ->tid);
1284 
1285 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1286 		    tm->cm_lun);
1287 
1288 		/*
1289 		 * We've finished recovery for this logical unit.  check and
1290 		 * see if some other logical unit has a timedout command
1291 		 * that needs to be processed.
1292 		 */
1293 		cm = TAILQ_FIRST(&targ->timedout_commands);
1294 		if (cm) {
1295 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1296 			   "More commands to abort for target %u\n", targ->tid);
1297 			mprsas_send_abort(sc, tm, cm);
1298 		} else {
1299 			targ->tm = NULL;
1300 			mprsas_free_tm(sc, tm);
1301 		}
1302 	} else {
1303 		/* if we still have commands for this LUN, the reset
1304 		 * effectively failed, regardless of the status reported.
1305 		 * Escalate to a target reset.
1306 		 */
1307 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1308 		    "logical unit reset complete for target %u, but still "
1309 		    "have %u command(s), sending target reset\n", targ->tid,
1310 		    cm_count);
1311 		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1312 			mprsas_send_reset(sc, tm,
1313 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1314 		else
1315 			mpr_reinit(sc);
1316 	}
1317 }
1318 
1319 static void
1320 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1321 {
1322 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1323 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1324 	struct mprsas_target *targ;
1325 
1326 	callout_stop(&tm->cm_callout);
1327 
1328 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1329 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1330 	targ = tm->cm_targ;
1331 
1332 	/*
1333 	 * Currently there should be no way we can hit this case.  It only
1334 	 * happens when we have a failure to allocate chain frames, and
1335 	 * task management commands don't have S/G lists.
1336 	 */
1337 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1338 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1339 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1340 		mprsas_free_tm(sc, tm);
1341 		return;
1342 	}
1343 
1344 	if (reply == NULL) {
1345 		mpr_dprint(sc, MPR_RECOVERY,
1346 		    "NULL target reset reply for tm %p TaskMID %u\n",
1347 		    tm, le16toh(req->TaskMID));
1348 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1349 			/* this completion was due to a reset, just cleanup */
1350 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1351 			    "reset, ignoring NULL target reset reply\n");
1352 			targ->tm = NULL;
1353 			mprsas_free_tm(sc, tm);
1354 		}
1355 		else {
1356 			/* we should have gotten a reply. */
1357 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1358 			    "target reset attempt, resetting controller\n");
1359 			mpr_reinit(sc);
1360 		}
1361 		return;
1362 	}
1363 
1364 	mpr_dprint(sc, MPR_RECOVERY,
1365 	    "target reset status 0x%x code 0x%x count %u\n",
1366 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1367 	    le32toh(reply->TerminationCount));
1368 
1369 	if (targ->outstanding == 0) {
1370 		/*
1371 		 * We've finished recovery for this target and all
1372 		 * of its logical units.
1373 		 */
1374 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1375 		    "Finished reset recovery for target %u\n", targ->tid);
1376 
1377 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1378 		    CAM_LUN_WILDCARD);
1379 
1380 		targ->tm = NULL;
1381 		mprsas_free_tm(sc, tm);
1382 	} else {
1383 		/*
1384 		 * After a target reset, if this target still has
1385 		 * outstanding commands, the reset effectively failed,
1386 		 * regardless of the status reported.  escalate.
1387 		 */
1388 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1389 		    "Target reset complete for target %u, but still have %u "
1390 		    "command(s), resetting controller\n", targ->tid,
1391 		    targ->outstanding);
1392 		mpr_reinit(sc);
1393 	}
1394 }
1395 
1396 #define MPR_RESET_TIMEOUT 30
1397 
1398 int
1399 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1400 {
1401 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1402 	struct mprsas_target *target;
1403 	int err, timeout;
1404 
1405 	target = tm->cm_targ;
1406 	if (target->handle == 0) {
1407 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1408 		    "%d\n", __func__, target->tid);
1409 		return -1;
1410 	}
1411 
1412 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1413 	req->DevHandle = htole16(target->handle);
1414 	req->TaskType = type;
1415 
1416 	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1417 		timeout = MPR_RESET_TIMEOUT;
1418 		/*
1419 		 * Target reset method =
1420 		 *     SAS Hard Link Reset / SATA Link Reset
1421 		 */
1422 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1423 	} else {
1424 		timeout = (target->controller_reset_timeout) ? (
1425 		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1426 		/* PCIe Protocol Level Reset*/
1427 		req->MsgFlags =
1428 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1429 	}
1430 
1431 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1432 		/* XXX Need to handle invalid LUNs */
1433 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1434 		tm->cm_targ->logical_unit_resets++;
1435 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1436 		    "Sending logical unit reset to target %u lun %d\n",
1437 		    target->tid, tm->cm_lun);
1438 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1439 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1440 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1441 		tm->cm_targ->target_resets++;
1442 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1443 		    "Sending target reset to target %u\n", target->tid);
1444 		tm->cm_complete = mprsas_target_reset_complete;
1445 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1446 	}
1447 	else {
1448 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1449 		return -1;
1450 	}
1451 
1452 	if (target->encl_level_valid) {
1453 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1454 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1455 		    target->encl_level, target->encl_slot,
1456 		    target->connector_name);
1457 	}
1458 
1459 	tm->cm_data = NULL;
1460 	tm->cm_complete_data = (void *)tm;
1461 
1462 	callout_reset(&tm->cm_callout, timeout * hz,
1463 	    mprsas_tm_timeout, tm);
1464 
1465 	err = mpr_map_command(sc, tm);
1466 	if (err)
1467 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1468 		    "error %d sending reset type %u\n", err, type);
1469 
1470 	return err;
1471 }
1472 
1473 static void
1474 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1475 {
1476 	struct mpr_command *cm;
1477 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1478 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1479 	struct mprsas_target *targ;
1480 
1481 	callout_stop(&tm->cm_callout);
1482 
1483 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1484 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1485 	targ = tm->cm_targ;
1486 
1487 	/*
1488 	 * Currently there should be no way we can hit this case.  It only
1489 	 * happens when we have a failure to allocate chain frames, and
1490 	 * task management commands don't have S/G lists.
1491 	 */
1492 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1493 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1494 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1495 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1496 		mprsas_free_tm(sc, tm);
1497 		return;
1498 	}
1499 
1500 	if (reply == NULL) {
1501 		mpr_dprint(sc, MPR_RECOVERY,
1502 		    "NULL abort reply for tm %p TaskMID %u\n",
1503 		    tm, le16toh(req->TaskMID));
1504 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1505 			/* this completion was due to a reset, just cleanup */
1506 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1507 			    "reset, ignoring NULL abort reply\n");
1508 			targ->tm = NULL;
1509 			mprsas_free_tm(sc, tm);
1510 		} else {
1511 			/* we should have gotten a reply. */
1512 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1513 			    "abort attempt, resetting controller\n");
1514 			mpr_reinit(sc);
1515 		}
1516 		return;
1517 	}
1518 
1519 	mpr_dprint(sc, MPR_RECOVERY,
1520 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1521 	    le16toh(req->TaskMID),
1522 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1523 	    le32toh(reply->TerminationCount));
1524 
1525 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1526 	if (cm == NULL) {
1527 		/*
1528 		 * if there are no more timedout commands, we're done with
1529 		 * error recovery for this target.
1530 		 */
1531 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1532 		    "Finished abort recovery for target %u\n", targ->tid);
1533 		targ->tm = NULL;
1534 		mprsas_free_tm(sc, tm);
1535 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1536 		/* abort success, but we have more timedout commands to abort */
1537 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1538 		    "Continuing abort recovery for target %u\n", targ->tid);
1539 		mprsas_send_abort(sc, tm, cm);
1540 	} else {
1541 		/*
1542 		 * we didn't get a command completion, so the abort
1543 		 * failed as far as we're concerned.  escalate.
1544 		 */
1545 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1546 		    "Abort failed for target %u, sending logical unit reset\n",
1547 		    targ->tid);
1548 
1549 		mprsas_send_reset(sc, tm,
1550 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1551 	}
1552 }
1553 
1554 #define MPR_ABORT_TIMEOUT 5
1555 
1556 static int
1557 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1558     struct mpr_command *cm)
1559 {
1560 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1561 	struct mprsas_target *targ;
1562 	int err, timeout;
1563 
1564 	targ = cm->cm_targ;
1565 	if (targ->handle == 0) {
1566 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1567 		   "%s null devhandle for target_id %d\n",
1568 		    __func__, cm->cm_ccb->ccb_h.target_id);
1569 		return -1;
1570 	}
1571 
1572 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1573 	    "Aborting command %p\n", cm);
1574 
1575 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1576 	req->DevHandle = htole16(targ->handle);
1577 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1578 
1579 	/* XXX Need to handle invalid LUNs */
1580 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1581 
1582 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1583 
1584 	tm->cm_data = NULL;
1585 	tm->cm_complete = mprsas_abort_complete;
1586 	tm->cm_complete_data = (void *)tm;
1587 	tm->cm_targ = cm->cm_targ;
1588 	tm->cm_lun = cm->cm_lun;
1589 
1590 	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1591 		timeout	= MPR_ABORT_TIMEOUT;
1592 	else
1593 		timeout = sc->nvme_abort_timeout;
1594 
1595 	callout_reset(&tm->cm_callout, timeout * hz,
1596 	    mprsas_tm_timeout, tm);
1597 
1598 	targ->aborts++;
1599 
1600 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1601 
1602 	err = mpr_map_command(sc, tm);
1603 	if (err)
1604 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1605 		    "error %d sending abort for cm %p SMID %u\n",
1606 		    err, cm, req->TaskMID);
1607 	return err;
1608 }
1609 
1610 static void
1611 mprsas_scsiio_timeout(void *data)
1612 {
1613 	sbintime_t elapsed, now;
1614 	union ccb *ccb;
1615 	struct mpr_softc *sc;
1616 	struct mpr_command *cm;
1617 	struct mprsas_target *targ;
1618 
1619 	cm = (struct mpr_command *)data;
1620 	sc = cm->cm_sc;
1621 	ccb = cm->cm_ccb;
1622 	now = sbinuptime();
1623 
1624 	MPR_FUNCTRACE(sc);
1625 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1626 
1627 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1628 
1629 	/*
1630 	 * Run the interrupt handler to make sure it's not pending.  This
1631 	 * isn't perfect because the command could have already completed
1632 	 * and been re-used, though this is unlikely.
1633 	 */
1634 	mpr_intr_locked(sc);
1635 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1636 		mprsas_log_command(cm, MPR_XINFO,
1637 		    "SCSI command %p almost timed out\n", cm);
1638 		return;
1639 	}
1640 
1641 	if (cm->cm_ccb == NULL) {
1642 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1643 		return;
1644 	}
1645 
1646 	targ = cm->cm_targ;
1647 	targ->timeouts++;
1648 
1649 	elapsed = now - ccb->ccb_h.qos.sim_data;
1650 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1651 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1652 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1653 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1654 	if (targ->encl_level_valid) {
1655 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1656 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1657 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1658 	}
1659 
1660 	/* XXX first, check the firmware state, to see if it's still
1661 	 * operational.  if not, do a diag reset.
1662 	 */
1663 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1664 	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1665 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1666 
1667 	if (targ->tm != NULL) {
1668 		/* target already in recovery, just queue up another
1669 		 * timedout command to be processed later.
1670 		 */
1671 		mpr_dprint(sc, MPR_RECOVERY,
1672 		    "queued timedout cm %p for processing by tm %p\n",
1673 		    cm, targ->tm);
1674 	} else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1675 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1676 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1677 		    cm->cm_desc.Default.SMID);
1678 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1679 		    cm, targ->tm);
1680 
1681 		/* start recovery by aborting the first timedout command */
1682 		mprsas_send_abort(sc, targ->tm, cm);
1683 	} else {
1684 		/* XXX queue this target up for recovery once a TM becomes
1685 		 * available.  The firmware only has a limited number of
1686 		 * HighPriority credits for the high priority requests used
1687 		 * for task management, and we ran out.
1688 		 *
1689 		 * Isilon: don't worry about this for now, since we have
1690 		 * more credits than disks in an enclosure, and limit
1691 		 * ourselves to one TM per target for recovery.
1692 		 */
1693 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1694 		    "timedout cm %p failed to allocate a tm\n", cm);
1695 	}
1696 }
1697 
1698 /**
1699  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1700  *			     to SCSI Unmap.
1701  * Return 0 - for success,
1702  *	  1 - to immediately return back the command with success status to CAM
1703  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1704  *			   to FW without any translation.
1705  */
1706 static int
1707 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1708     union ccb *ccb, struct mprsas_target *targ)
1709 {
1710 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1711 	struct ccb_scsiio *csio;
1712 	struct unmap_parm_list *plist;
1713 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1714 	struct nvme_command *c;
1715 	int i, res;
1716 	uint16_t ndesc, list_len, data_length;
1717 	struct mpr_prp_page *prp_page_info;
1718 	uint64_t nvme_dsm_ranges_dma_handle;
1719 
1720 	csio = &ccb->csio;
1721 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1722 	if (!list_len) {
1723 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1724 		return -EINVAL;
1725 	}
1726 
1727 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1728 	if (!plist) {
1729 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1730 		    "save UNMAP data\n");
1731 		return -ENOMEM;
1732 	}
1733 
1734 	/* Copy SCSI unmap data to a local buffer */
1735 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1736 
1737 	/* return back the unmap command to CAM with success status,
1738 	 * if number of descripts is zero.
1739 	 */
1740 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1741 	if (!ndesc) {
1742 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1743 		    "UNMAP cmd is Zero\n");
1744 		res = 1;
1745 		goto out;
1746 	}
1747 
1748 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1749 	if (data_length > targ->MDTS) {
1750 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1751 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1752 		res = -EINVAL;
1753 		goto out;
1754 	}
1755 
1756 	prp_page_info = mpr_alloc_prp_page(sc);
1757 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1758 	    "UNMAP command.\n", __func__));
1759 
1760 	/*
1761 	 * Insert the allocated PRP page into the command's PRP page list. This
1762 	 * will be freed when the command is freed.
1763 	 */
1764 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1765 
1766 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1767 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1768 
1769 	bzero(nvme_dsm_ranges, data_length);
1770 
1771 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1772 	 * for each descriptors contained in SCSI UNMAP data.
1773 	 */
1774 	for (i = 0; i < ndesc; i++) {
1775 		nvme_dsm_ranges[i].length =
1776 		    htole32(be32toh(plist->desc[i].nlb));
1777 		nvme_dsm_ranges[i].starting_lba =
1778 		    htole64(be64toh(plist->desc[i].slba));
1779 		nvme_dsm_ranges[i].attributes = 0;
1780 	}
1781 
1782 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1783 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1784 	bzero(req, sizeof(*req));
1785 	req->DevHandle = htole16(targ->handle);
1786 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1787 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1788 	req->ErrorResponseBaseAddress.High =
1789 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1790 	req->ErrorResponseBaseAddress.Low =
1791 	    htole32(cm->cm_sense_busaddr);
1792 	req->ErrorResponseAllocationLength =
1793 	    htole16(sizeof(struct nvme_completion));
1794 	req->EncapsulatedCommandLength =
1795 	    htole16(sizeof(struct nvme_command));
1796 	req->DataLength = htole32(data_length);
1797 
1798 	/* Build NVMe DSM command */
1799 	c = (struct nvme_command *) req->NVMe_Command;
1800 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1801 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1802 	c->cdw10 = htole32(ndesc - 1);
1803 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1804 
1805 	cm->cm_length = data_length;
1806 	cm->cm_data = NULL;
1807 
1808 	cm->cm_complete = mprsas_scsiio_complete;
1809 	cm->cm_complete_data = ccb;
1810 	cm->cm_targ = targ;
1811 	cm->cm_lun = csio->ccb_h.target_lun;
1812 	cm->cm_ccb = ccb;
1813 
1814 	cm->cm_desc.Default.RequestFlags =
1815 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1816 
1817 	csio->ccb_h.qos.sim_data = sbinuptime();
1818 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1819 	    mprsas_scsiio_timeout, cm, 0);
1820 
1821 	targ->issued++;
1822 	targ->outstanding++;
1823 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1824 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1825 
1826 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1827 	    __func__, cm, ccb, targ->outstanding);
1828 
1829 	mpr_build_nvme_prp(sc, cm, req,
1830 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1831 	mpr_map_command(sc, cm);
1832 	res = 0;
1833 
1834 out:
1835 	free(plist, M_MPR);
1836 	return (res);
1837 }
1838 
1839 static void
1840 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1841 {
1842 	MPI2_SCSI_IO_REQUEST *req;
1843 	struct ccb_scsiio *csio;
1844 	struct mpr_softc *sc;
1845 	struct mprsas_target *targ;
1846 	struct mprsas_lun *lun;
1847 	struct mpr_command *cm;
1848 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1849 	uint16_t eedp_flags;
1850 	uint32_t mpi_control;
1851 	int rc;
1852 
1853 	sc = sassc->sc;
1854 	MPR_FUNCTRACE(sc);
1855 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1856 
1857 	csio = &ccb->csio;
1858 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1859 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1860 	     csio->ccb_h.target_id));
1861 	targ = &sassc->targets[csio->ccb_h.target_id];
1862 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1863 	if (targ->handle == 0x0) {
1864 		if (targ->flags & MPRSAS_TARGET_INDIAGRESET) {
1865 			mpr_dprint(sc, MPR_ERROR,
1866 			    "%s NULL handle for target %u in diag reset freezing queue\n",
1867 			    __func__, csio->ccb_h.target_id);
1868 			ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1869 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1870 			xpt_done(ccb);
1871 			return;
1872 		}
1873 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1874 		    __func__, csio->ccb_h.target_id);
1875 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1876 		xpt_done(ccb);
1877 		return;
1878 	}
1879 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1880 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1881 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1882 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1883 		xpt_done(ccb);
1884 		return;
1885 	}
1886 	/*
1887 	 * Sometimes, it is possible to get a command that is not "In
1888 	 * Progress" and was actually aborted by the upper layer.  Check for
1889 	 * this here and complete the command without error.
1890 	 */
1891 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1892 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1893 		    "target %u\n", __func__, csio->ccb_h.target_id);
1894 		xpt_done(ccb);
1895 		return;
1896 	}
1897 	/*
1898 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1899 	 * that the volume has timed out.  We want volumes to be enumerated
1900 	 * until they are deleted/removed, not just failed. In either event,
1901 	 * we're removing the target due to a firmware event telling us
1902 	 * the device is now gone (as opposed to some transient event). Since
1903 	 * we're opting to remove failed devices from the OS's view, we need
1904 	 * to propagate that status up the stack.
1905 	 */
1906 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1907 		if (targ->devinfo == 0)
1908 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1909 		else
1910 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1911 		xpt_done(ccb);
1912 		return;
1913 	}
1914 
1915 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1916 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1917 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1918 		xpt_done(ccb);
1919 		return;
1920 	}
1921 
1922 	/*
1923 	 * If target has a reset in progress, the devq should be frozen.
1924 	 * Geting here we likely hit a race, so just requeue.
1925 	 */
1926 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1927 		ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1928 		mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
1929 		    "%s: Freezing devq for target ID %d\n",
1930 		    __func__, targ->tid);
1931 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1932 		xpt_done(ccb);
1933 		return;
1934 	}
1935 
1936 	cm = mpr_alloc_command(sc);
1937 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1938 		if (cm != NULL) {
1939 			mpr_free_command(sc, cm);
1940 		}
1941 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1942 			xpt_freeze_simq(sassc->sim, 1);
1943 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1944 		}
1945 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1946 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1947 		xpt_done(ccb);
1948 		return;
1949 	}
1950 
1951 	/* For NVME device's issue UNMAP command directly to NVME drives by
1952 	 * constructing equivalent native NVMe DataSetManagement command.
1953 	 */
1954 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1955 	if (scsi_opcode == UNMAP &&
1956 	    targ->is_nvme &&
1957 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1958 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1959 		if (rc == 1) { /* return command to CAM with success status */
1960 			mpr_free_command(sc, cm);
1961 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1962 			xpt_done(ccb);
1963 			return;
1964 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1965 			return;
1966 	}
1967 
1968 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1969 	bzero(req, sizeof(*req));
1970 	req->DevHandle = htole16(targ->handle);
1971 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1972 	req->MsgFlags = 0;
1973 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1974 	req->SenseBufferLength = MPR_SENSE_LEN;
1975 	req->SGLFlags = 0;
1976 	req->ChainOffset = 0;
1977 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1978 	req->SGLOffset1= 0;
1979 	req->SGLOffset2= 0;
1980 	req->SGLOffset3= 0;
1981 	req->SkipCount = 0;
1982 	req->DataLength = htole32(csio->dxfer_len);
1983 	req->BidirectionalDataLength = 0;
1984 	req->IoFlags = htole16(csio->cdb_len);
1985 	req->EEDPFlags = 0;
1986 
1987 	/* Note: BiDirectional transfers are not supported */
1988 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1989 	case CAM_DIR_IN:
1990 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1991 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1992 		break;
1993 	case CAM_DIR_OUT:
1994 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1995 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1996 		break;
1997 	case CAM_DIR_NONE:
1998 	default:
1999 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2000 		break;
2001 	}
2002 
2003 	if (csio->cdb_len == 32)
2004 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2005 	/*
2006 	 * It looks like the hardware doesn't require an explicit tag
2007 	 * number for each transaction.  SAM Task Management not supported
2008 	 * at the moment.
2009 	 */
2010 	switch (csio->tag_action) {
2011 	case MSG_HEAD_OF_Q_TAG:
2012 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2013 		break;
2014 	case MSG_ORDERED_Q_TAG:
2015 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2016 		break;
2017 	case MSG_ACA_TASK:
2018 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2019 		break;
2020 	case CAM_TAG_ACTION_NONE:
2021 	case MSG_SIMPLE_Q_TAG:
2022 	default:
2023 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2024 		break;
2025 	}
2026 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT) &
2027 	    MPI2_SCSIIO_CONTROL_CMDPRI_MASK;
2028 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2029 	req->Control = htole32(mpi_control);
2030 
2031 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2032 		mpr_free_command(sc, cm);
2033 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2034 		xpt_done(ccb);
2035 		return;
2036 	}
2037 
2038 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2039 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2040 	else {
2041 		KASSERT(csio->cdb_len <= IOCDBLEN,
2042 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2043 		    "is not set", csio->cdb_len));
2044 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2045 	}
2046 	req->IoFlags = htole16(csio->cdb_len);
2047 
2048 	/*
2049 	 * Check if EEDP is supported and enabled.  If it is then check if the
2050 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2051 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2052 	 * for EEDP transfer.
2053 	 */
2054 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2055 	if (sc->eedp_enabled && eedp_flags) {
2056 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2057 			if (lun->lun_id == csio->ccb_h.target_lun) {
2058 				break;
2059 			}
2060 		}
2061 
2062 		if ((lun != NULL) && (lun->eedp_formatted)) {
2063 			req->EEDPBlockSize = htole32(lun->eedp_block_size);
2064 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2065 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2066 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2067 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2068 				eedp_flags |=
2069 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2070 			}
2071 			req->EEDPFlags = htole16(eedp_flags);
2072 
2073 			/*
2074 			 * If CDB less than 32, fill in Primary Ref Tag with
2075 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2076 			 * already there.  Also, set protection bit.  FreeBSD
2077 			 * currently does not support CDBs bigger than 16, but
2078 			 * the code doesn't hurt, and will be here for the
2079 			 * future.
2080 			 */
2081 			if (csio->cdb_len != 32) {
2082 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2083 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2084 				    PrimaryReferenceTag;
2085 				for (i = 0; i < 4; i++) {
2086 					*ref_tag_addr =
2087 					    req->CDB.CDB32[lba_byte + i];
2088 					ref_tag_addr++;
2089 				}
2090 				req->CDB.EEDP32.PrimaryReferenceTag =
2091 				    htole32(req->
2092 				    CDB.EEDP32.PrimaryReferenceTag);
2093 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2094 				    0xFFFF;
2095 				req->CDB.CDB32[1] =
2096 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2097 			} else {
2098 				eedp_flags |=
2099 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2100 				req->EEDPFlags = htole16(eedp_flags);
2101 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2102 				    0x1F) | 0x20;
2103 			}
2104 		}
2105 	}
2106 
2107 	cm->cm_length = csio->dxfer_len;
2108 	if (cm->cm_length != 0) {
2109 		cm->cm_data = ccb;
2110 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2111 	} else {
2112 		cm->cm_data = NULL;
2113 	}
2114 	cm->cm_sge = &req->SGL;
2115 	cm->cm_sglsize = (32 - 24) * 4;
2116 	cm->cm_complete = mprsas_scsiio_complete;
2117 	cm->cm_complete_data = ccb;
2118 	cm->cm_targ = targ;
2119 	cm->cm_lun = csio->ccb_h.target_lun;
2120 	cm->cm_ccb = ccb;
2121 	/*
2122 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2123 	 * and set descriptor type.
2124 	 */
2125 	if (targ->scsi_req_desc_type ==
2126 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2127 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2128 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2129 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2130 		if (!sc->atomic_desc_capable) {
2131 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2132 			    htole16(targ->handle);
2133 		}
2134 	} else {
2135 		cm->cm_desc.SCSIIO.RequestFlags =
2136 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2137 		if (!sc->atomic_desc_capable)
2138 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2139 	}
2140 
2141 	csio->ccb_h.qos.sim_data = sbinuptime();
2142 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2143 	    mprsas_scsiio_timeout, cm, 0);
2144 
2145 	targ->issued++;
2146 	targ->outstanding++;
2147 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2148 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2149 
2150 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2151 	    __func__, cm, ccb, targ->outstanding);
2152 
2153 	mpr_map_command(sc, cm);
2154 	return;
2155 }
2156 
2157 /**
2158  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2159  */
2160 static void
2161 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2162     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2163 {
2164 	u32 response_info;
2165 	u8 *response_bytes;
2166 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2167 	    MPI2_IOCSTATUS_MASK;
2168 	u8 scsi_state = mpi_reply->SCSIState;
2169 	u8 scsi_status = mpi_reply->SCSIStatus;
2170 	char *desc_ioc_state = NULL;
2171 	char *desc_scsi_status = NULL;
2172 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2173 
2174 	if (log_info == 0x31170000)
2175 		return;
2176 
2177 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2178 	     ioc_status);
2179 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2180 	    scsi_status);
2181 
2182 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2183 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2184 	if (targ->encl_level_valid) {
2185 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2186 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2187 		    targ->connector_name);
2188 	}
2189 
2190 	/*
2191 	 * We can add more detail about underflow data here
2192 	 * TO-DO
2193 	 */
2194 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2195 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2196 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2197 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2198 
2199 	if (sc->mpr_debug & MPR_XINFO &&
2200 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2201 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2202 		scsi_sense_print(csio);
2203 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2204 	}
2205 
2206 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2207 		response_info = le32toh(mpi_reply->ResponseInfo);
2208 		response_bytes = (u8 *)&response_info;
2209 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2210 		    response_bytes[0],
2211 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2212 		    response_bytes[0]));
2213 	}
2214 }
2215 
2216 /** mprsas_nvme_trans_status_code
2217  *
2218  * Convert Native NVMe command error status to
2219  * equivalent SCSI error status.
2220  *
2221  * Returns appropriate scsi_status
2222  */
2223 static u8
2224 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2225     struct mpr_command *cm)
2226 {
2227 	u8 status = MPI2_SCSI_STATUS_GOOD;
2228 	int skey, asc, ascq;
2229 	union ccb *ccb = cm->cm_complete_data;
2230 	int returned_sense_len;
2231 	uint8_t sct, sc;
2232 
2233 	sct = NVME_STATUS_GET_SCT(nvme_status);
2234 	sc = NVME_STATUS_GET_SC(nvme_status);
2235 
2236 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2237 	skey = SSD_KEY_ILLEGAL_REQUEST;
2238 	asc = SCSI_ASC_NO_SENSE;
2239 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2240 
2241 	switch (sct) {
2242 	case NVME_SCT_GENERIC:
2243 		switch (sc) {
2244 		case NVME_SC_SUCCESS:
2245 			status = MPI2_SCSI_STATUS_GOOD;
2246 			skey = SSD_KEY_NO_SENSE;
2247 			asc = SCSI_ASC_NO_SENSE;
2248 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2249 			break;
2250 		case NVME_SC_INVALID_OPCODE:
2251 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2252 			skey = SSD_KEY_ILLEGAL_REQUEST;
2253 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2254 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2255 			break;
2256 		case NVME_SC_INVALID_FIELD:
2257 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2258 			skey = SSD_KEY_ILLEGAL_REQUEST;
2259 			asc = SCSI_ASC_INVALID_CDB;
2260 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2261 			break;
2262 		case NVME_SC_DATA_TRANSFER_ERROR:
2263 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2264 			skey = SSD_KEY_MEDIUM_ERROR;
2265 			asc = SCSI_ASC_NO_SENSE;
2266 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2267 			break;
2268 		case NVME_SC_ABORTED_POWER_LOSS:
2269 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2270 			skey = SSD_KEY_ABORTED_COMMAND;
2271 			asc = SCSI_ASC_WARNING;
2272 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2273 			break;
2274 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2275 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2276 			skey = SSD_KEY_HARDWARE_ERROR;
2277 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2278 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2279 			break;
2280 		case NVME_SC_ABORTED_BY_REQUEST:
2281 		case NVME_SC_ABORTED_SQ_DELETION:
2282 		case NVME_SC_ABORTED_FAILED_FUSED:
2283 		case NVME_SC_ABORTED_MISSING_FUSED:
2284 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2285 			skey = SSD_KEY_ABORTED_COMMAND;
2286 			asc = SCSI_ASC_NO_SENSE;
2287 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2288 			break;
2289 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2290 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2291 			skey = SSD_KEY_ILLEGAL_REQUEST;
2292 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2293 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2294 			break;
2295 		case NVME_SC_LBA_OUT_OF_RANGE:
2296 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2297 			skey = SSD_KEY_ILLEGAL_REQUEST;
2298 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2299 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2300 			break;
2301 		case NVME_SC_CAPACITY_EXCEEDED:
2302 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2303 			skey = SSD_KEY_MEDIUM_ERROR;
2304 			asc = SCSI_ASC_NO_SENSE;
2305 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2306 			break;
2307 		case NVME_SC_NAMESPACE_NOT_READY:
2308 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2309 			skey = SSD_KEY_NOT_READY;
2310 			asc = SCSI_ASC_LUN_NOT_READY;
2311 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2312 			break;
2313 		}
2314 		break;
2315 	case NVME_SCT_COMMAND_SPECIFIC:
2316 		switch (sc) {
2317 		case NVME_SC_INVALID_FORMAT:
2318 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2319 			skey = SSD_KEY_ILLEGAL_REQUEST;
2320 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2321 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2322 			break;
2323 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2324 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2325 			skey = SSD_KEY_ILLEGAL_REQUEST;
2326 			asc = SCSI_ASC_INVALID_CDB;
2327 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2328 			break;
2329 		}
2330 		break;
2331 	case NVME_SCT_MEDIA_ERROR:
2332 		switch (sc) {
2333 		case NVME_SC_WRITE_FAULTS:
2334 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2335 			skey = SSD_KEY_MEDIUM_ERROR;
2336 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2337 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2338 			break;
2339 		case NVME_SC_UNRECOVERED_READ_ERROR:
2340 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2341 			skey = SSD_KEY_MEDIUM_ERROR;
2342 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2343 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2344 			break;
2345 		case NVME_SC_GUARD_CHECK_ERROR:
2346 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2347 			skey = SSD_KEY_MEDIUM_ERROR;
2348 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2349 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2350 			break;
2351 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2352 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2353 			skey = SSD_KEY_MEDIUM_ERROR;
2354 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2355 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2356 			break;
2357 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2358 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2359 			skey = SSD_KEY_MEDIUM_ERROR;
2360 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2361 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2362 			break;
2363 		case NVME_SC_COMPARE_FAILURE:
2364 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2365 			skey = SSD_KEY_MISCOMPARE;
2366 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2367 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2368 			break;
2369 		case NVME_SC_ACCESS_DENIED:
2370 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2371 			skey = SSD_KEY_ILLEGAL_REQUEST;
2372 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2373 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2374 			break;
2375 		}
2376 		break;
2377 	}
2378 
2379 	returned_sense_len = sizeof(struct scsi_sense_data);
2380 	if (returned_sense_len < ccb->csio.sense_len)
2381 		ccb->csio.sense_resid = ccb->csio.sense_len -
2382 		    returned_sense_len;
2383 	else
2384 		ccb->csio.sense_resid = 0;
2385 
2386 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2387 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2388 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2389 
2390 	return status;
2391 }
2392 
2393 /** mprsas_complete_nvme_unmap
2394  *
2395  * Complete native NVMe command issued using NVMe Encapsulated
2396  * Request Message.
2397  */
2398 static u8
2399 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2400 {
2401 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2402 	struct nvme_completion *nvme_completion = NULL;
2403 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2404 
2405 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2406 	if (le16toh(mpi_reply->ErrorResponseCount)){
2407 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2408 		scsi_status = mprsas_nvme_trans_status_code(
2409 		    nvme_completion->status, cm);
2410 	}
2411 	return scsi_status;
2412 }
2413 
2414 static void
2415 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2416 {
2417 	MPI2_SCSI_IO_REPLY *rep;
2418 	union ccb *ccb;
2419 	struct ccb_scsiio *csio;
2420 	struct mprsas_softc *sassc;
2421 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2422 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2423 	int dir = 0, i;
2424 	u16 alloc_len;
2425 	struct mprsas_target *target;
2426 	target_id_t target_id;
2427 
2428 	MPR_FUNCTRACE(sc);
2429 
2430 	callout_stop(&cm->cm_callout);
2431 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2432 
2433 	sassc = sc->sassc;
2434 	ccb = cm->cm_complete_data;
2435 	csio = &ccb->csio;
2436 	target_id = csio->ccb_h.target_id;
2437 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2438 	mpr_dprint(sc, MPR_TRACE,
2439 	    "cm %p SMID %u ccb %p reply %p outstanding %u csio->scsi_status 0x%x,"
2440 	    "csio->dxfer_len 0x%x, csio->msg_le 0x%xn\n", cm,
2441 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2442 	    cm->cm_targ->outstanding, csio->scsi_status,
2443 	    csio->dxfer_len, csio->msg_len);
2444 	/*
2445 	 * XXX KDM if the chain allocation fails, does it matter if we do
2446 	 * the sync and unload here?  It is simpler to do it in every case,
2447 	 * assuming it doesn't cause problems.
2448 	 */
2449 	if (cm->cm_data != NULL) {
2450 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2451 			dir = BUS_DMASYNC_POSTREAD;
2452 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2453 			dir = BUS_DMASYNC_POSTWRITE;
2454 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2455 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2456 	}
2457 
2458 	cm->cm_targ->completed++;
2459 	cm->cm_targ->outstanding--;
2460 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2461 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2462 
2463 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2464 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2465 		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2466 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2467 		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2468 		if (cm->cm_reply != NULL)
2469 			mprsas_log_command(cm, MPR_RECOVERY,
2470 			    "completed timedout cm %p ccb %p during recovery "
2471 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2472 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2473 			    rep->SCSIState, le32toh(rep->TransferCount));
2474 		else
2475 			mprsas_log_command(cm, MPR_RECOVERY,
2476 			    "completed timedout cm %p ccb %p during recovery\n",
2477 			    cm, cm->cm_ccb);
2478 	} else if (cm->cm_targ->tm != NULL) {
2479 		if (cm->cm_reply != NULL)
2480 			mprsas_log_command(cm, MPR_RECOVERY,
2481 			    "completed cm %p ccb %p during recovery "
2482 			    "ioc %x scsi %x state %x xfer %u\n",
2483 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2484 			    rep->SCSIStatus, rep->SCSIState,
2485 			    le32toh(rep->TransferCount));
2486 		else
2487 			mprsas_log_command(cm, MPR_RECOVERY,
2488 			    "completed cm %p ccb %p during recovery\n",
2489 			    cm, cm->cm_ccb);
2490 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2491 		mprsas_log_command(cm, MPR_RECOVERY,
2492 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2493 	}
2494 
2495 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2496 		/*
2497 		 * We ran into an error after we tried to map the command,
2498 		 * so we're getting a callback without queueing the command
2499 		 * to the hardware.  So we set the status here, and it will
2500 		 * be retained below.  We'll go through the "fast path",
2501 		 * because there can be no reply when we haven't actually
2502 		 * gone out to the hardware.
2503 		 */
2504 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2505 
2506 		/*
2507 		 * Currently the only error included in the mask is
2508 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2509 		 * chain frames.  We need to freeze the queue until we get
2510 		 * a command that completed without this error, which will
2511 		 * hopefully have some chain frames attached that we can
2512 		 * use.  If we wanted to get smarter about it, we would
2513 		 * only unfreeze the queue in this condition when we're
2514 		 * sure that we're getting some chain frames back.  That's
2515 		 * probably unnecessary.
2516 		 */
2517 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2518 			xpt_freeze_simq(sassc->sim, 1);
2519 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2520 			mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
2521 			    "Error sending command, freezing SIM queue\n");
2522 		}
2523 	}
2524 
2525 	/*
2526 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2527 	 * flag, and use it in a few places in the rest of this function for
2528 	 * convenience. Use the macro if available.
2529 	 */
2530 	scsi_cdb = scsiio_cdb_ptr(csio);
2531 
2532 	/*
2533 	 * If this is a Start Stop Unit command and it was issued by the driver
2534 	 * during shutdown, decrement the refcount to account for all of the
2535 	 * commands that were sent.  All SSU commands should be completed before
2536 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2537 	 * is TRUE.
2538 	 */
2539 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2540 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2541 		sc->SSU_refcount--;
2542 	}
2543 
2544 	/* Take the fast path to completion */
2545 	if (cm->cm_reply == NULL) {
2546 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2547 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2548 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2549 			else {
2550 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2551 				csio->scsi_status = SCSI_STATUS_OK;
2552 			}
2553 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2554 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2555 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2556 				mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
2557 				    "Unfreezing SIM queue\n");
2558 			}
2559 		}
2560 
2561 		/*
2562 		 * There are two scenarios where the status won't be
2563 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2564 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2565 		 */
2566 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2567 			/*
2568 			 * Freeze the dev queue so that commands are
2569 			 * executed in the correct order after error
2570 			 * recovery.
2571 			 */
2572 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2573 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2574 		}
2575 		mpr_free_command(sc, cm);
2576 		xpt_done(ccb);
2577 		return;
2578 	}
2579 
2580 	target = &sassc->targets[target_id];
2581 	if (scsi_cdb[0] == UNMAP &&
2582 	    target->is_nvme &&
2583 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2584 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2585 		csio->scsi_status = rep->SCSIStatus;
2586 	}
2587 
2588 	mprsas_log_command(cm, MPR_XINFO,
2589 	    "ioc %x scsi %x state %x xfer %u\n",
2590 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2591 	    le32toh(rep->TransferCount));
2592 
2593 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2594 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2595 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2596 		/* FALLTHROUGH */
2597 	case MPI2_IOCSTATUS_SUCCESS:
2598 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2599 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2600 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2601 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2602 
2603 		/* Completion failed at the transport level. */
2604 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2605 		    MPI2_SCSI_STATE_TERMINATED)) {
2606 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2607 			break;
2608 		}
2609 
2610 		/* In a modern packetized environment, an autosense failure
2611 		 * implies that there's not much else that can be done to
2612 		 * recover the command.
2613 		 */
2614 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2615 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2616 			break;
2617 		}
2618 
2619 		/*
2620 		 * CAM doesn't care about SAS Response Info data, but if this is
2621 		 * the state check if TLR should be done.  If not, clear the
2622 		 * TLR_bits for the target.
2623 		 */
2624 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2625 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2626 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2627 			sc->mapping_table[target_id].TLR_bits =
2628 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2629 		}
2630 
2631 		/*
2632 		 * Intentionally override the normal SCSI status reporting
2633 		 * for these two cases.  These are likely to happen in a
2634 		 * multi-initiator environment, and we want to make sure that
2635 		 * CAM retries these commands rather than fail them.
2636 		 */
2637 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2638 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2639 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2640 			break;
2641 		}
2642 
2643 		/* Handle normal status and sense */
2644 		csio->scsi_status = rep->SCSIStatus;
2645 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2646 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2647 		else
2648 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2649 
2650 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2651 			int sense_len, returned_sense_len;
2652 
2653 			returned_sense_len = min(le32toh(rep->SenseCount),
2654 			    sizeof(struct scsi_sense_data));
2655 			if (returned_sense_len < csio->sense_len)
2656 				csio->sense_resid = csio->sense_len -
2657 				    returned_sense_len;
2658 			else
2659 				csio->sense_resid = 0;
2660 
2661 			sense_len = min(returned_sense_len,
2662 			    csio->sense_len - csio->sense_resid);
2663 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2664 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2665 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2666 		}
2667 
2668 		/*
2669 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2670 		 * and it's page code 0 (Supported Page List), and there is
2671 		 * inquiry data, and this is for a sequential access device, and
2672 		 * the device is an SSP target, and TLR is supported by the
2673 		 * controller, turn the TLR_bits value ON if page 0x90 is
2674 		 * supported.
2675 		 */
2676 		if ((scsi_cdb[0] == INQUIRY) &&
2677 		    (scsi_cdb[1] & SI_EVPD) &&
2678 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2679 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2680 		    (csio->data_ptr != NULL) &&
2681 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2682 		    (sc->control_TLR) &&
2683 		    (sc->mapping_table[target_id].device_info &
2684 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2685 			vpd_list = (struct scsi_vpd_supported_page_list *)
2686 			    csio->data_ptr;
2687 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2688 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2689 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2690 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2691 			alloc_len -= csio->resid;
2692 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2693 				if (vpd_list->list[i] == 0x90) {
2694 					*TLR_bits = TLR_on;
2695 					break;
2696 				}
2697 			}
2698 		}
2699 
2700 		/*
2701 		 * If this is a SATA direct-access end device, mark it so that
2702 		 * a SCSI StartStopUnit command will be sent to it when the
2703 		 * driver is being shutdown.
2704 		 */
2705 		if ((scsi_cdb[0] == INQUIRY) &&
2706 		    (csio->data_ptr != NULL) &&
2707 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2708 		    (sc->mapping_table[target_id].device_info &
2709 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2710 		    ((sc->mapping_table[target_id].device_info &
2711 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2712 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2713 			target = &sassc->targets[target_id];
2714 			target->supports_SSU = TRUE;
2715 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2716 			    target_id);
2717 		}
2718 		break;
2719 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2720 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2721 		/*
2722 		 * If devinfo is 0 this will be a volume.  In that case don't
2723 		 * tell CAM that the volume is not there.  We want volumes to
2724 		 * be enumerated until they are deleted/removed, not just
2725 		 * failed.
2726 		 */
2727 		if (cm->cm_targ->devinfo == 0)
2728 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2729 		else
2730 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2731 		break;
2732 	case MPI2_IOCSTATUS_INVALID_SGL:
2733 		mpr_print_scsiio_cmd(sc, cm);
2734 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2735 		break;
2736 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2737 		/*
2738 		 * This is one of the responses that comes back when an I/O
2739 		 * has been aborted.  If it is because of a timeout that we
2740 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2741 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2742 		 * command is the same (it gets retried, subject to the
2743 		 * retry counter), the only difference is what gets printed
2744 		 * on the console.
2745 		 */
2746 		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2747 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2748 		else
2749 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2750 		break;
2751 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2752 		/* resid is ignored for this condition */
2753 		csio->resid = 0;
2754 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2755 		break;
2756 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2757 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2758 		/*
2759 		 * These can sometimes be transient transport-related
2760 		 * errors, and sometimes persistent drive-related errors.
2761 		 * We used to retry these without decrementing the retry
2762 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2763 		 * we hit a persistent drive problem that returns one of
2764 		 * these error codes, we would retry indefinitely.  So,
2765 		 * return CAM_REQ_CMP_ERR so that we decrement the retry
2766 		 * count and avoid infinite retries.  We're taking the
2767 		 * potential risk of flagging false failures in the event
2768 		 * of a topology-related error (e.g. a SAS expander problem
2769 		 * causes a command addressed to a drive to fail), but
2770 		 * avoiding getting into an infinite retry loop. However,
2771 		 * if we get them while were removing a device, we should
2772 		 * fail the request as 'not there' because the device
2773 		 * is effectively gone.
2774 		 */
2775 		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2776 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2777 		else
2778 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2779 		mpr_dprint(sc, MPR_INFO,
2780 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2781 		    mpr_describe_table(mpr_iocstatus_string,
2782 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2783 		    target_id, cm->cm_desc.Default.SMID,
2784 		    le32toh(rep->IOCLogInfo),
2785 		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2786 		mpr_dprint(sc, MPR_XINFO,
2787 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2788 		    rep->SCSIStatus, rep->SCSIState,
2789 		    le32toh(rep->TransferCount));
2790 		break;
2791 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2792 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2793 	case MPI2_IOCSTATUS_INVALID_VPID:
2794 	case MPI2_IOCSTATUS_INVALID_FIELD:
2795 	case MPI2_IOCSTATUS_INVALID_STATE:
2796 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2797 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2798 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2799 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2800 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2801 	default:
2802 		mprsas_log_command(cm, MPR_XINFO,
2803 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2804 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2805 		    rep->SCSIStatus, rep->SCSIState,
2806 		    le32toh(rep->TransferCount));
2807 		csio->resid = cm->cm_length;
2808 
2809 		if (scsi_cdb[0] == UNMAP &&
2810 		    target->is_nvme &&
2811 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2812 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2813 		else
2814 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2815 
2816 		break;
2817 	}
2818 
2819 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2820 
2821 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2822 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2823 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2824 		mpr_dprint(sc, MPR_INFO, "Command completed, unfreezing SIM "
2825 		    "queue\n");
2826 	}
2827 
2828 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2829 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2830 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2831 	}
2832 
2833 	/*
2834 	 * Check to see if we're removing the device. If so, and this is the
2835 	 * last command on the queue, proceed with the deferred removal of the
2836 	 * device.  Note, for removing a volume, this won't trigger because
2837 	 * pending_remove_tm will be NULL.
2838 	 */
2839 	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2840 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2841 		    cm->cm_targ->pending_remove_tm != NULL) {
2842 			mpr_dprint(sc, MPR_INFO,
2843 			    "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
2844 			    cm->cm_targ->tid, cm->cm_targ->handle);
2845 			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2846 			cm->cm_targ->pending_remove_tm = NULL;
2847 		}
2848 	}
2849 
2850 	mpr_free_command(sc, cm);
2851 	xpt_done(ccb);
2852 }
2853 
2854 static void
2855 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2856 {
2857 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2858 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2859 	uint64_t sasaddr;
2860 	union ccb *ccb;
2861 
2862 	ccb = cm->cm_complete_data;
2863 
2864 	/*
2865 	 * Currently there should be no way we can hit this case.  It only
2866 	 * happens when we have a failure to allocate chain frames, and SMP
2867 	 * commands require two S/G elements only.  That should be handled
2868 	 * in the standard request size.
2869 	 */
2870 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2871 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2872 		    "request!\n", __func__, cm->cm_flags);
2873 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2874 		goto bailout;
2875         }
2876 
2877 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2878 	if (rpl == NULL) {
2879 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2880 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2881 		goto bailout;
2882 	}
2883 
2884 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2885 	sasaddr = le32toh(req->SASAddress.Low);
2886 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2887 
2888 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2889 	    MPI2_IOCSTATUS_SUCCESS ||
2890 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2891 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2892 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2893 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2894 		goto bailout;
2895 	}
2896 
2897 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2898 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2899 
2900 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2901 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2902 	else
2903 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2904 
2905 bailout:
2906 	/*
2907 	 * We sync in both directions because we had DMAs in the S/G list
2908 	 * in both directions.
2909 	 */
2910 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2911 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2912 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2913 	mpr_free_command(sc, cm);
2914 	xpt_done(ccb);
2915 }
2916 
2917 static void
2918 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2919 {
2920 	struct mpr_command *cm;
2921 	uint8_t *request, *response;
2922 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2923 	struct mpr_softc *sc;
2924 	int error;
2925 
2926 	sc = sassc->sc;
2927 	error = 0;
2928 
2929 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2930 	case CAM_DATA_PADDR:
2931 	case CAM_DATA_SG_PADDR:
2932 		/*
2933 		 * XXX We don't yet support physical addresses here.
2934 		 */
2935 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2936 		    "supported\n", __func__);
2937 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2938 		xpt_done(ccb);
2939 		return;
2940 	case CAM_DATA_SG:
2941 		/*
2942 		 * The chip does not support more than one buffer for the
2943 		 * request or response.
2944 		 */
2945 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2946 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2947 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2948 			    "response buffer segments not supported for SMP\n",
2949 			    __func__);
2950 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2951 			xpt_done(ccb);
2952 			return;
2953 		}
2954 
2955 		/*
2956 		 * The CAM_SCATTER_VALID flag was originally implemented
2957 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2958 		 * We have two.  So, just take that flag to mean that we
2959 		 * might have S/G lists, and look at the S/G segment count
2960 		 * to figure out whether that is the case for each individual
2961 		 * buffer.
2962 		 */
2963 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2964 			bus_dma_segment_t *req_sg;
2965 
2966 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2967 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2968 		} else
2969 			request = ccb->smpio.smp_request;
2970 
2971 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2972 			bus_dma_segment_t *rsp_sg;
2973 
2974 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2975 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2976 		} else
2977 			response = ccb->smpio.smp_response;
2978 		break;
2979 	case CAM_DATA_VADDR:
2980 		request = ccb->smpio.smp_request;
2981 		response = ccb->smpio.smp_response;
2982 		break;
2983 	default:
2984 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2985 		xpt_done(ccb);
2986 		return;
2987 	}
2988 
2989 	cm = mpr_alloc_command(sc);
2990 	if (cm == NULL) {
2991 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2992 		    __func__);
2993 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2994 		xpt_done(ccb);
2995 		return;
2996 	}
2997 
2998 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2999 	bzero(req, sizeof(*req));
3000 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3001 
3002 	/* Allow the chip to use any route to this SAS address. */
3003 	req->PhysicalPort = 0xff;
3004 
3005 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3006 	req->SGLFlags =
3007 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3008 
3009 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3010 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3011 
3012 	mpr_init_sge(cm, req, &req->SGL);
3013 
3014 	/*
3015 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3016 	 * do one map command, and one busdma call in there.
3017 	 */
3018 	cm->cm_uio.uio_iov = cm->cm_iovec;
3019 	cm->cm_uio.uio_iovcnt = 2;
3020 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3021 
3022 	/*
3023 	 * The read/write flag isn't used by busdma, but set it just in
3024 	 * case.  This isn't exactly accurate, either, since we're going in
3025 	 * both directions.
3026 	 */
3027 	cm->cm_uio.uio_rw = UIO_WRITE;
3028 
3029 	cm->cm_iovec[0].iov_base = request;
3030 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3031 	cm->cm_iovec[1].iov_base = response;
3032 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3033 
3034 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3035 			       cm->cm_iovec[1].iov_len;
3036 
3037 	/*
3038 	 * Trigger a warning message in mpr_data_cb() for the user if we
3039 	 * wind up exceeding two S/G segments.  The chip expects one
3040 	 * segment for the request and another for the response.
3041 	 */
3042 	cm->cm_max_segs = 2;
3043 
3044 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3045 	cm->cm_complete = mprsas_smpio_complete;
3046 	cm->cm_complete_data = ccb;
3047 
3048 	/*
3049 	 * Tell the mapping code that we're using a uio, and that this is
3050 	 * an SMP passthrough request.  There is a little special-case
3051 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3052 	 * transfer.
3053 	 */
3054 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3055 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3056 
3057 	/* The chip data format is little endian. */
3058 	req->SASAddress.High = htole32(sasaddr >> 32);
3059 	req->SASAddress.Low = htole32(sasaddr);
3060 
3061 	/*
3062 	 * XXX Note that we don't have a timeout/abort mechanism here.
3063 	 * From the manual, it looks like task management requests only
3064 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3065 	 * have a mechanism to retry requests in the event of a chip reset
3066 	 * at least.  Hopefully the chip will insure that any errors short
3067 	 * of that are relayed back to the driver.
3068 	 */
3069 	error = mpr_map_command(sc, cm);
3070 	if ((error != 0) && (error != EINPROGRESS)) {
3071 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3072 		    "mpr_map_command()\n", __func__, error);
3073 		goto bailout_error;
3074 	}
3075 
3076 	return;
3077 
3078 bailout_error:
3079 	mpr_free_command(sc, cm);
3080 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3081 	xpt_done(ccb);
3082 	return;
3083 }
3084 
3085 static void
3086 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3087 {
3088 	struct mpr_softc *sc;
3089 	struct mprsas_target *targ;
3090 	uint64_t sasaddr = 0;
3091 
3092 	sc = sassc->sc;
3093 
3094 	/*
3095 	 * Make sure the target exists.
3096 	 */
3097 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3098 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3099 	targ = &sassc->targets[ccb->ccb_h.target_id];
3100 	if (targ->handle == 0x0) {
3101 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3102 		    __func__, ccb->ccb_h.target_id);
3103 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3104 		xpt_done(ccb);
3105 		return;
3106 	}
3107 
3108 	/*
3109 	 * If this device has an embedded SMP target, we'll talk to it
3110 	 * directly.
3111 	 * figure out what the expander's address is.
3112 	 */
3113 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3114 		sasaddr = targ->sasaddr;
3115 
3116 	/*
3117 	 * If we don't have a SAS address for the expander yet, try
3118 	 * grabbing it from the page 0x83 information cached in the
3119 	 * transport layer for this target.  LSI expanders report the
3120 	 * expander SAS address as the port-associated SAS address in
3121 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3122 	 * 0x83.
3123 	 *
3124 	 * XXX KDM disable this for now, but leave it commented out so that
3125 	 * it is obvious that this is another possible way to get the SAS
3126 	 * address.
3127 	 *
3128 	 * The parent handle method below is a little more reliable, and
3129 	 * the other benefit is that it works for devices other than SES
3130 	 * devices.  So you can send a SMP request to a da(4) device and it
3131 	 * will get routed to the expander that device is attached to.
3132 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3133 	 */
3134 #if 0
3135 	if (sasaddr == 0)
3136 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3137 #endif
3138 
3139 	/*
3140 	 * If we still don't have a SAS address for the expander, look for
3141 	 * the parent device of this device, which is probably the expander.
3142 	 */
3143 	if (sasaddr == 0) {
3144 #ifdef OLD_MPR_PROBE
3145 		struct mprsas_target *parent_target;
3146 #endif
3147 
3148 		if (targ->parent_handle == 0x0) {
3149 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3150 			    "a valid parent handle!\n", __func__, targ->handle);
3151 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3152 			goto bailout;
3153 		}
3154 #ifdef OLD_MPR_PROBE
3155 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3156 		    targ->parent_handle);
3157 
3158 		if (parent_target == NULL) {
3159 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3160 			    "a valid parent target!\n", __func__, targ->handle);
3161 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3162 			goto bailout;
3163 		}
3164 
3165 		if ((parent_target->devinfo &
3166 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3167 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3168 			    "does not have an SMP target!\n", __func__,
3169 			    targ->handle, parent_target->handle);
3170 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3171 			goto bailout;
3172 		}
3173 
3174 		sasaddr = parent_target->sasaddr;
3175 #else /* OLD_MPR_PROBE */
3176 		if ((targ->parent_devinfo &
3177 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3178 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3179 			    "does not have an SMP target!\n", __func__,
3180 			    targ->handle, targ->parent_handle);
3181 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3182 			goto bailout;
3183 		}
3184 		if (targ->parent_sasaddr == 0x0) {
3185 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3186 			    "%d does not have a valid SAS address!\n", __func__,
3187 			    targ->handle, targ->parent_handle);
3188 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3189 			goto bailout;
3190 		}
3191 
3192 		sasaddr = targ->parent_sasaddr;
3193 #endif /* OLD_MPR_PROBE */
3194 	}
3195 
3196 	if (sasaddr == 0) {
3197 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3198 		    "handle %d\n", __func__, targ->handle);
3199 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3200 		goto bailout;
3201 	}
3202 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3203 
3204 	return;
3205 
3206 bailout:
3207 	xpt_done(ccb);
3208 
3209 }
3210 
3211 static void
3212 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3213 {
3214 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3215 	struct mpr_softc *sc;
3216 	struct mpr_command *tm;
3217 	struct mprsas_target *targ;
3218 
3219 	MPR_FUNCTRACE(sassc->sc);
3220 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3221 
3222 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3223 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3224 	sc = sassc->sc;
3225 	tm = mprsas_alloc_tm(sc);
3226 	if (tm == NULL) {
3227 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3228 		    "mprsas_action_resetdev\n");
3229 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3230 		xpt_done(ccb);
3231 		return;
3232 	}
3233 
3234 	targ = &sassc->targets[ccb->ccb_h.target_id];
3235 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3236 	req->DevHandle = htole16(targ->handle);
3237 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3238 
3239 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3240 		/* SAS Hard Link Reset / SATA Link Reset */
3241 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3242 	} else {
3243 		/* PCIe Protocol Level Reset*/
3244 		req->MsgFlags =
3245 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3246 	}
3247 
3248 	tm->cm_data = NULL;
3249 	tm->cm_complete = mprsas_resetdev_complete;
3250 	tm->cm_complete_data = ccb;
3251 
3252 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3253 	    __func__, targ->tid);
3254 	tm->cm_targ = targ;
3255 
3256 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3257 	mpr_map_command(sc, tm);
3258 }
3259 
3260 static void
3261 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3262 {
3263 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3264 	union ccb *ccb;
3265 
3266 	MPR_FUNCTRACE(sc);
3267 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3268 
3269 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3270 	ccb = tm->cm_complete_data;
3271 
3272 	/*
3273 	 * Currently there should be no way we can hit this case.  It only
3274 	 * happens when we have a failure to allocate chain frames, and
3275 	 * task management commands don't have S/G lists.
3276 	 */
3277 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3278 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3279 
3280 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3281 
3282 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3283 		    "handle %#04x! This should not happen!\n", __func__,
3284 		    tm->cm_flags, req->DevHandle);
3285 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3286 		goto bailout;
3287 	}
3288 
3289 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3290 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3291 
3292 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3293 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3294 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3295 		    CAM_LUN_WILDCARD);
3296 	}
3297 	else
3298 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3299 
3300 bailout:
3301 
3302 	mprsas_free_tm(sc, tm);
3303 	xpt_done(ccb);
3304 }
3305 
3306 static void
3307 mprsas_poll(struct cam_sim *sim)
3308 {
3309 	struct mprsas_softc *sassc;
3310 
3311 	sassc = cam_sim_softc(sim);
3312 
3313 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3314 		/* frequent debug messages during a panic just slow
3315 		 * everything down too much.
3316 		 */
3317 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3318 		    __func__);
3319 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3320 	}
3321 
3322 	mpr_intr_locked(sassc->sc);
3323 }
3324 
3325 static void
3326 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3327     void *arg)
3328 {
3329 	struct mpr_softc *sc;
3330 
3331 	sc = (struct mpr_softc *)callback_arg;
3332 
3333 	mpr_lock(sc);
3334 	switch (code) {
3335 	case AC_ADVINFO_CHANGED: {
3336 		struct mprsas_target *target;
3337 		struct mprsas_softc *sassc;
3338 		struct scsi_read_capacity_data_long rcap_buf;
3339 		struct ccb_dev_advinfo cdai;
3340 		struct mprsas_lun *lun;
3341 		lun_id_t lunid;
3342 		int found_lun;
3343 		uintptr_t buftype;
3344 
3345 		buftype = (uintptr_t)arg;
3346 
3347 		found_lun = 0;
3348 		sassc = sc->sassc;
3349 
3350 		/*
3351 		 * We're only interested in read capacity data changes.
3352 		 */
3353 		if (buftype != CDAI_TYPE_RCAPLONG)
3354 			break;
3355 
3356 		/*
3357 		 * We should have a handle for this, but check to make sure.
3358 		 */
3359 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3360 		    ("Target %d out of bounds in mprsas_async\n",
3361 		    xpt_path_target_id(path)));
3362 		target = &sassc->targets[xpt_path_target_id(path)];
3363 		if (target->handle == 0)
3364 			break;
3365 
3366 		lunid = xpt_path_lun_id(path);
3367 
3368 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3369 			if (lun->lun_id == lunid) {
3370 				found_lun = 1;
3371 				break;
3372 			}
3373 		}
3374 
3375 		if (found_lun == 0) {
3376 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3377 			    M_NOWAIT | M_ZERO);
3378 			if (lun == NULL) {
3379 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3380 				    "LUN for EEDP support.\n");
3381 				break;
3382 			}
3383 			lun->lun_id = lunid;
3384 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3385 		}
3386 
3387 		bzero(&rcap_buf, sizeof(rcap_buf));
3388 		bzero(&cdai, sizeof(cdai));
3389 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3390 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3391 		cdai.ccb_h.flags = CAM_DIR_IN;
3392 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3393 		cdai.flags = CDAI_FLAG_NONE;
3394 		cdai.bufsiz = sizeof(rcap_buf);
3395 		cdai.buf = (uint8_t *)&rcap_buf;
3396 		xpt_action((union ccb *)&cdai);
3397 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3398 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3399 
3400 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3401 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3402 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3403 			case SRC16_PTYPE_1:
3404 			case SRC16_PTYPE_3:
3405 				lun->eedp_formatted = TRUE;
3406 				lun->eedp_block_size =
3407 				    scsi_4btoul(rcap_buf.length);
3408 				break;
3409 			case SRC16_PTYPE_2:
3410 			default:
3411 				lun->eedp_formatted = FALSE;
3412 				lun->eedp_block_size = 0;
3413 				break;
3414 			}
3415 		} else {
3416 			lun->eedp_formatted = FALSE;
3417 			lun->eedp_block_size = 0;
3418 		}
3419 		break;
3420 	}
3421 	default:
3422 		break;
3423 	}
3424 	mpr_unlock(sc);
3425 }
3426 
3427 /*
3428  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3429  * the target until the reset has completed.  The CCB holds the path which
3430  * is used to release the devq.  The devq is released and the CCB is freed
3431  * when the TM completes.
3432  * We only need to do this when we're entering reset, not at each time we
3433  * need to send an abort (which will happen if multiple commands timeout
3434  * while we're sending the abort). We do not release the queue for each
3435  * command we complete (just at the end when we free the tm), so freezing
3436  * it each time doesn't make sense.
3437  */
3438 void
3439 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3440     struct mprsas_target *target, lun_id_t lun_id)
3441 {
3442 	union ccb *ccb;
3443 	path_id_t path_id;
3444 
3445 	ccb = xpt_alloc_ccb_nowait();
3446 	if (ccb) {
3447 		path_id = cam_sim_path(sc->sassc->sim);
3448 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3449 		    target->tid, lun_id) != CAM_REQ_CMP) {
3450 			xpt_free_ccb(ccb);
3451 		} else {
3452 			tm->cm_ccb = ccb;
3453 			tm->cm_targ = target;
3454 			if ((target->flags & MPRSAS_TARGET_INRESET) == 0) {
3455 				mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
3456 				    "%s: Freezing devq for target ID %d\n",
3457 				    __func__, target->tid);
3458 				xpt_freeze_devq(ccb->ccb_h.path, 1);
3459 				target->flags |= MPRSAS_TARGET_INRESET;
3460 			}
3461 		}
3462 	}
3463 }
3464 
3465 int
3466 mprsas_startup(struct mpr_softc *sc)
3467 {
3468 	/*
3469 	 * Send the port enable message and set the wait_for_port_enable flag.
3470 	 * This flag helps to keep the simq frozen until all discovery events
3471 	 * are processed.
3472 	 */
3473 	sc->wait_for_port_enable = 1;
3474 	mprsas_send_portenable(sc);
3475 	return (0);
3476 }
3477 
3478 static int
3479 mprsas_send_portenable(struct mpr_softc *sc)
3480 {
3481 	MPI2_PORT_ENABLE_REQUEST *request;
3482 	struct mpr_command *cm;
3483 
3484 	MPR_FUNCTRACE(sc);
3485 
3486 	if ((cm = mpr_alloc_command(sc)) == NULL)
3487 		return (EBUSY);
3488 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3489 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3490 	request->MsgFlags = 0;
3491 	request->VP_ID = 0;
3492 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3493 	cm->cm_complete = mprsas_portenable_complete;
3494 	cm->cm_data = NULL;
3495 	cm->cm_sge = NULL;
3496 
3497 	mpr_map_command(sc, cm);
3498 	mpr_dprint(sc, MPR_XINFO,
3499 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3500 	    cm, cm->cm_req, cm->cm_complete);
3501 	return (0);
3502 }
3503 
3504 static void
3505 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3506 {
3507 	MPI2_PORT_ENABLE_REPLY *reply;
3508 	struct mprsas_softc *sassc;
3509 
3510 	MPR_FUNCTRACE(sc);
3511 	sassc = sc->sassc;
3512 
3513 	/*
3514 	 * Currently there should be no way we can hit this case.  It only
3515 	 * happens when we have a failure to allocate chain frames, and
3516 	 * port enable commands don't have S/G lists.
3517 	 */
3518 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3519 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3520 		    "This should not happen!\n", __func__, cm->cm_flags);
3521 	}
3522 
3523 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3524 	if (reply == NULL)
3525 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3526 	else if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3527 	    MPI2_IOCSTATUS_SUCCESS)
3528 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3529 
3530 	mpr_free_command(sc, cm);
3531 	/*
3532 	 * Done waiting for port enable to complete.  Decrement the refcount.
3533 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3534 	 * take place.
3535 	 */
3536 	sc->wait_for_port_enable = 0;
3537 	sc->port_enable_complete = 1;
3538 	wakeup(&sc->port_enable_complete);
3539 	mprsas_startup_decrement(sassc);
3540 }
3541 
3542 int
3543 mprsas_check_id(struct mprsas_softc *sassc, int id)
3544 {
3545 	struct mpr_softc *sc = sassc->sc;
3546 	char *ids;
3547 	char *name;
3548 
3549 	ids = &sc->exclude_ids[0];
3550 	while((name = strsep(&ids, ",")) != NULL) {
3551 		if (name[0] == '\0')
3552 			continue;
3553 		if (strtol(name, NULL, 0) == (long)id)
3554 			return (1);
3555 	}
3556 
3557 	return (0);
3558 }
3559 
3560 void
3561 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3562 {
3563 	struct mprsas_softc *sassc;
3564 	struct mprsas_lun *lun, *lun_tmp;
3565 	struct mprsas_target *targ;
3566 	int i;
3567 
3568 	sassc = sc->sassc;
3569 	/*
3570 	 * The number of targets is based on IOC Facts, so free all of
3571 	 * the allocated LUNs for each target and then the target buffer
3572 	 * itself.
3573 	 */
3574 	for (i=0; i< maxtargets; i++) {
3575 		targ = &sassc->targets[i];
3576 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3577 			free(lun, M_MPR);
3578 		}
3579 	}
3580 	free(sassc->targets, M_MPR);
3581 
3582 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3583 	    M_MPR, M_WAITOK|M_ZERO);
3584 }
3585