xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * Copyright 2000-2020 Broadcom Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 /* Communications core for Avago Technologies (LSI) MPT3 */
35 
36 /* TODO Move headers to mprvar */
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
42 #include <sys/module.h>
43 #include <sys/bus.h>
44 #include <sys/conf.h>
45 #include <sys/bio.h>
46 #include <sys/malloc.h>
47 #include <sys/uio.h>
48 #include <sys/sysctl.h>
49 #include <sys/endian.h>
50 #include <sys/queue.h>
51 #include <sys/kthread.h>
52 #include <sys/taskqueue.h>
53 #include <sys/sbuf.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <machine/stdarg.h>
60 
61 #include <cam/cam.h>
62 #include <cam/cam_ccb.h>
63 #include <cam/cam_debug.h>
64 #include <cam/cam_sim.h>
65 #include <cam/cam_xpt_sim.h>
66 #include <cam/cam_xpt_periph.h>
67 #include <cam/cam_periph.h>
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70 #include <cam/scsi/smp_all.h>
71 
72 #include <dev/nvme/nvme.h>
73 
74 #include <dev/mpr/mpi/mpi2_type.h>
75 #include <dev/mpr/mpi/mpi2.h>
76 #include <dev/mpr/mpi/mpi2_ioc.h>
77 #include <dev/mpr/mpi/mpi2_sas.h>
78 #include <dev/mpr/mpi/mpi2_pci.h>
79 #include <dev/mpr/mpi/mpi2_cnfg.h>
80 #include <dev/mpr/mpi/mpi2_init.h>
81 #include <dev/mpr/mpi/mpi2_tool.h>
82 #include <dev/mpr/mpr_ioctl.h>
83 #include <dev/mpr/mprvar.h>
84 #include <dev/mpr/mpr_table.h>
85 #include <dev/mpr/mpr_sas.h>
86 
87 #define MPRSAS_DISCOVERY_TIMEOUT	20
88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
89 
90 /*
91  * static array to check SCSI OpCode for EEDP protection bits
92  */
93 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
94 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 static uint8_t op_code_prot[256] = {
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
106 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
113 };
114 
115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
116 
117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
120 static void mprsas_poll(struct cam_sim *sim);
121 static void mprsas_scsiio_timeout(void *data);
122 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
123 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
124 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
125 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
126 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
127 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
128     struct mpr_command *cm);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130     struct cam_path *path, void *arg);
131 static int mprsas_send_portenable(struct mpr_softc *sc);
132 static void mprsas_portenable_complete(struct mpr_softc *sc,
133     struct mpr_command *cm);
134 
135 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
136 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
137     uint64_t sasaddr);
138 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
139 
140 struct mprsas_target *
141 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
142     uint16_t handle)
143 {
144 	struct mprsas_target *target;
145 	int i;
146 
147 	for (i = start; i < sassc->maxtargets; i++) {
148 		target = &sassc->targets[i];
149 		if (target->handle == handle)
150 			return (target);
151 	}
152 
153 	return (NULL);
154 }
155 
156 /* we need to freeze the simq during attach and diag reset, to avoid failing
157  * commands before device handles have been found by discovery.  Since
158  * discovery involves reading config pages and possibly sending commands,
159  * discovery actions may continue even after we receive the end of discovery
160  * event, so refcount discovery actions instead of assuming we can unfreeze
161  * the simq when we get the event.
162  */
163 void
164 mprsas_startup_increment(struct mprsas_softc *sassc)
165 {
166 	MPR_FUNCTRACE(sassc->sc);
167 
168 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
169 		if (sassc->startup_refcount++ == 0) {
170 			/* just starting, freeze the simq */
171 			mpr_dprint(sassc->sc, MPR_INIT,
172 			    "%s freezing simq\n", __func__);
173 			xpt_hold_boot();
174 			xpt_freeze_simq(sassc->sim, 1);
175 		}
176 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
177 		    sassc->startup_refcount);
178 	}
179 }
180 
181 void
182 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
183 {
184 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
185 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
186 		xpt_release_simq(sassc->sim, 1);
187 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
188 	}
189 }
190 
191 void
192 mprsas_startup_decrement(struct mprsas_softc *sassc)
193 {
194 	MPR_FUNCTRACE(sassc->sc);
195 
196 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
197 		if (--sassc->startup_refcount == 0) {
198 			/* finished all discovery-related actions, release
199 			 * the simq and rescan for the latest topology.
200 			 */
201 			mpr_dprint(sassc->sc, MPR_INIT,
202 			    "%s releasing simq\n", __func__);
203 			sassc->flags &= ~MPRSAS_IN_STARTUP;
204 			xpt_release_simq(sassc->sim, 1);
205 			xpt_release_boot();
206 		}
207 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
208 		    sassc->startup_refcount);
209 	}
210 }
211 
212 /*
213  * The firmware requires us to stop sending commands when we're doing task
214  * management.
215  * use.
216  * XXX The logic for serializing the device has been made lazy and moved to
217  * mprsas_prepare_for_tm().
218  */
219 struct mpr_command *
220 mprsas_alloc_tm(struct mpr_softc *sc)
221 {
222 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
223 	struct mpr_command *tm;
224 
225 	MPR_FUNCTRACE(sc);
226 	tm = mpr_alloc_high_priority_command(sc);
227 	if (tm == NULL)
228 		return (NULL);
229 
230 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
231 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
232 	return tm;
233 }
234 
235 void
236 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
237 {
238 
239 	MPR_FUNCTRACE(sc);
240 	if (tm == NULL)
241 		return;
242 
243 	/*
244 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
245 	 * free the resources used for freezing the devq.  Must clear the
246 	 * INRESET flag as well or scsi I/O will not work.
247 	 */
248 	if (tm->cm_ccb) {
249 		mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
250 		    "Unfreezing devq for target ID %d\n",
251 		    tm->cm_targ->tid);
252 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
253 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
254 		xpt_free_path(tm->cm_ccb->ccb_h.path);
255 		xpt_free_ccb(tm->cm_ccb);
256 	}
257 
258 	mpr_free_high_priority_command(sc, tm);
259 }
260 
261 void
262 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
263 {
264 	struct mprsas_softc *sassc = sc->sassc;
265 	path_id_t pathid;
266 	target_id_t targetid;
267 	union ccb *ccb;
268 
269 	MPR_FUNCTRACE(sc);
270 	pathid = cam_sim_path(sassc->sim);
271 	if (targ == NULL)
272 		targetid = CAM_TARGET_WILDCARD;
273 	else
274 		targetid = targ - sassc->targets;
275 
276 	/*
277 	 * Allocate a CCB and schedule a rescan.
278 	 */
279 	ccb = xpt_alloc_ccb_nowait();
280 	if (ccb == NULL) {
281 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
282 		return;
283 	}
284 
285 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
286 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
287 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
288 		xpt_free_ccb(ccb);
289 		return;
290 	}
291 
292 	if (targetid == CAM_TARGET_WILDCARD)
293 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
294 	else
295 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
296 
297 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
298 	xpt_rescan(ccb);
299 }
300 
301 static void
302 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
303 {
304 	struct sbuf sb;
305 	va_list ap;
306 	char str[224];
307 	char path_str[64];
308 
309 	if (cm == NULL)
310 		return;
311 
312 	/* No need to be in here if debugging isn't enabled */
313 	if ((cm->cm_sc->mpr_debug & level) == 0)
314 		return;
315 
316 	sbuf_new(&sb, str, sizeof(str), 0);
317 
318 	va_start(ap, fmt);
319 
320 	if (cm->cm_ccb != NULL) {
321 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
322 		    sizeof(path_str));
323 		sbuf_cat(&sb, path_str);
324 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
325 			scsi_command_string(&cm->cm_ccb->csio, &sb);
326 			sbuf_printf(&sb, "length %d ",
327 			    cm->cm_ccb->csio.dxfer_len);
328 		}
329 	} else {
330 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
331 		    cam_sim_name(cm->cm_sc->sassc->sim),
332 		    cam_sim_unit(cm->cm_sc->sassc->sim),
333 		    cam_sim_bus(cm->cm_sc->sassc->sim),
334 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
335 		    cm->cm_lun);
336 	}
337 
338 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
339 	sbuf_vprintf(&sb, fmt, ap);
340 	sbuf_finish(&sb);
341 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
342 
343 	va_end(ap);
344 }
345 
346 static void
347 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
348 {
349 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
350 	struct mprsas_target *targ;
351 	uint16_t handle;
352 
353 	MPR_FUNCTRACE(sc);
354 
355 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
356 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
357 	targ = tm->cm_targ;
358 
359 	if (reply == NULL) {
360 		/* XXX retry the remove after the diag reset completes? */
361 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
362 		    "0x%04x\n", __func__, handle);
363 		mprsas_free_tm(sc, tm);
364 		return;
365 	}
366 
367 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
368 	    MPI2_IOCSTATUS_SUCCESS) {
369 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
370 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
371 	}
372 
373 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
374 	    le32toh(reply->TerminationCount));
375 	mpr_free_reply(sc, tm->cm_reply_data);
376 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
377 
378 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
379 	    targ->tid, handle);
380 
381 	/*
382 	 * Don't clear target if remove fails because things will get confusing.
383 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
384 	 * this target id if possible, and so we can assign the same target id
385 	 * to this device if it comes back in the future.
386 	 */
387 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
388 	    MPI2_IOCSTATUS_SUCCESS) {
389 		targ = tm->cm_targ;
390 		targ->handle = 0x0;
391 		targ->encl_handle = 0x0;
392 		targ->encl_level_valid = 0x0;
393 		targ->encl_level = 0x0;
394 		targ->connector_name[0] = ' ';
395 		targ->connector_name[1] = ' ';
396 		targ->connector_name[2] = ' ';
397 		targ->connector_name[3] = ' ';
398 		targ->encl_slot = 0x0;
399 		targ->exp_dev_handle = 0x0;
400 		targ->phy_num = 0x0;
401 		targ->linkrate = 0x0;
402 		targ->devinfo = 0x0;
403 		targ->flags = 0x0;
404 		targ->scsi_req_desc_type = 0;
405 	}
406 
407 	mprsas_free_tm(sc, tm);
408 }
409 
410 /*
411  * Retry mprsas_prepare_remove() if some previous attempt failed to allocate
412  * high priority command due to limit reached.
413  */
414 void
415 mprsas_prepare_remove_retry(struct mprsas_softc *sassc)
416 {
417 	struct mprsas_target *target;
418 	int i;
419 
420 	if ((sassc->flags & MPRSAS_TOREMOVE) == 0)
421 		return;
422 
423 	for (i = 0; i < sassc->maxtargets; i++) {
424 		target = &sassc->targets[i];
425 		if ((target->flags & MPRSAS_TARGET_TOREMOVE) == 0)
426 			continue;
427 		if (TAILQ_EMPTY(&sassc->sc->high_priority_req_list))
428 			return;
429 		target->flags &= ~MPRSAS_TARGET_TOREMOVE;
430 		if (target->flags & MPR_TARGET_FLAGS_VOLUME)
431 			mprsas_prepare_volume_remove(sassc, target->handle);
432 		else
433 			mprsas_prepare_remove(sassc, target->handle);
434 	}
435 	sassc->flags &= ~MPRSAS_TOREMOVE;
436 }
437 
438 /*
439  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
440  * Otherwise Volume Delete is same as Bare Drive Removal.
441  */
442 void
443 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
444 {
445 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
446 	struct mpr_softc *sc;
447 	struct mpr_command *cm;
448 	struct mprsas_target *targ = NULL;
449 
450 	MPR_FUNCTRACE(sassc->sc);
451 	sc = sassc->sc;
452 
453 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
454 	if (targ == NULL) {
455 		/* FIXME: what is the action? */
456 		/* We don't know about this device? */
457 		mpr_dprint(sc, MPR_ERROR,
458 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
459 		return;
460 	}
461 
462 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
463 
464 	cm = mprsas_alloc_tm(sc);
465 	if (cm == NULL) {
466 		targ->flags |= MPRSAS_TARGET_TOREMOVE;
467 		sassc->flags |= MPRSAS_TOREMOVE;
468 		return;
469 	}
470 
471 	mprsas_rescan_target(sc, targ);
472 
473 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
474 	req->DevHandle = targ->handle;
475 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
476 
477 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
478 		/* SAS Hard Link Reset / SATA Link Reset */
479 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
480 	} else {
481 		/* PCIe Protocol Level Reset*/
482 		req->MsgFlags =
483 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
484 	}
485 
486 	cm->cm_targ = targ;
487 	cm->cm_data = NULL;
488 	cm->cm_complete = mprsas_remove_volume;
489 	cm->cm_complete_data = (void *)(uintptr_t)handle;
490 
491 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
492 	    __func__, targ->tid);
493 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
494 
495 	mpr_map_command(sc, cm);
496 }
497 
498 /*
499  * The firmware performs debounce on the link to avoid transient link errors
500  * and false removals.  When it does decide that link has been lost and a
501  * device needs to go away, it expects that the host will perform a target reset
502  * and then an op remove.  The reset has the side-effect of aborting any
503  * outstanding requests for the device, which is required for the op-remove to
504  * succeed.  It's not clear if the host should check for the device coming back
505  * alive after the reset.
506  */
507 void
508 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
509 {
510 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
511 	struct mpr_softc *sc;
512 	struct mpr_command *tm;
513 	struct mprsas_target *targ = NULL;
514 
515 	MPR_FUNCTRACE(sassc->sc);
516 
517 	sc = sassc->sc;
518 
519 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
520 	if (targ == NULL) {
521 		/* FIXME: what is the action? */
522 		/* We don't know about this device? */
523 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
524 		    __func__, handle);
525 		return;
526 	}
527 
528 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
529 
530 	tm = mprsas_alloc_tm(sc);
531 	if (tm == NULL) {
532 		targ->flags |= MPRSAS_TARGET_TOREMOVE;
533 		sassc->flags |= MPRSAS_TOREMOVE;
534 		return;
535 	}
536 
537 	mprsas_rescan_target(sc, targ);
538 
539 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
540 	req->DevHandle = htole16(targ->handle);
541 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
542 
543 	/* SAS Hard Link Reset / SATA Link Reset */
544 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
545 
546 	tm->cm_targ = targ;
547 	tm->cm_data = NULL;
548 	tm->cm_complete = mprsas_remove_device;
549 	tm->cm_complete_data = (void *)(uintptr_t)handle;
550 
551 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
552 	    __func__, targ->tid);
553 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
554 
555 	mpr_map_command(sc, tm);
556 }
557 
558 static void
559 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
560 {
561 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
562 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
563 	struct mprsas_target *targ;
564 	uint16_t handle;
565 
566 	MPR_FUNCTRACE(sc);
567 
568 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
569 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
570 	targ = tm->cm_targ;
571 
572 	/*
573 	 * Currently there should be no way we can hit this case.  It only
574 	 * happens when we have a failure to allocate chain frames, and
575 	 * task management commands don't have S/G lists.
576 	 */
577 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
578 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
579 		    "handle %#04x! This should not happen!\n", __func__,
580 		    tm->cm_flags, handle);
581 	}
582 
583 	if (reply == NULL) {
584 		/* XXX retry the remove after the diag reset completes? */
585 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
586 		    "0x%04x\n", __func__, handle);
587 		mprsas_free_tm(sc, tm);
588 		return;
589 	}
590 
591 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
592 	    MPI2_IOCSTATUS_SUCCESS) {
593 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
594 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
595 	}
596 
597 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
598 	    le32toh(reply->TerminationCount));
599 	mpr_free_reply(sc, tm->cm_reply_data);
600 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
601 
602 	/* Reuse the existing command */
603 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
604 	memset(req, 0, sizeof(*req));
605 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
606 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
607 	req->DevHandle = htole16(handle);
608 	tm->cm_data = NULL;
609 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
610 	tm->cm_complete = mprsas_remove_complete;
611 	tm->cm_complete_data = (void *)(uintptr_t)handle;
612 
613 	/*
614 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
615 	 * They should be aborted or time out and we'll kick thus off there
616 	 * if so.
617 	 */
618 	if (TAILQ_FIRST(&targ->commands) == NULL) {
619 		mpr_dprint(sc, MPR_INFO,
620 		    "No pending commands: starting remove_device for target %u handle 0x%04x\n",
621 		    targ->tid, handle);
622 		mpr_map_command(sc, tm);
623 		targ->pending_remove_tm = NULL;
624 	} else {
625 		targ->pending_remove_tm = tm;
626 	}
627 
628 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
629 	    targ->tid, handle);
630 	if (targ->encl_level_valid) {
631 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
632 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
633 		    targ->connector_name);
634 	}
635 }
636 
637 static void
638 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
639 {
640 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
641 	uint16_t handle;
642 	struct mprsas_target *targ;
643 	struct mprsas_lun *lun;
644 
645 	MPR_FUNCTRACE(sc);
646 
647 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
648 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
649 
650 	targ = tm->cm_targ;
651 
652 	/*
653 	 * At this point, we should have no pending commands for the target.
654 	 * The remove target has just completed.
655 	 */
656 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
657 	    ("%s: no commands should be pending\n", __func__));
658 
659 	/*
660 	 * Currently there should be no way we can hit this case.  It only
661 	 * happens when we have a failure to allocate chain frames, and
662 	 * task management commands don't have S/G lists.
663 	 */
664 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
665 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
666 		    "handle %#04x! This should not happen!\n", __func__,
667 		    tm->cm_flags, handle);
668 		mprsas_free_tm(sc, tm);
669 		return;
670 	}
671 
672 	if (reply == NULL) {
673 		/* most likely a chip reset */
674 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
675 		    "0x%04x\n", __func__, handle);
676 		mprsas_free_tm(sc, tm);
677 		return;
678 	}
679 
680 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
681 	    __func__, handle, le16toh(reply->IOCStatus));
682 
683 	/*
684 	 * Don't clear target if remove fails because things will get confusing.
685 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
686 	 * this target id if possible, and so we can assign the same target id
687 	 * to this device if it comes back in the future.
688 	 */
689 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
690 	    MPI2_IOCSTATUS_SUCCESS) {
691 		targ->handle = 0x0;
692 		targ->encl_handle = 0x0;
693 		targ->encl_level_valid = 0x0;
694 		targ->encl_level = 0x0;
695 		targ->connector_name[0] = ' ';
696 		targ->connector_name[1] = ' ';
697 		targ->connector_name[2] = ' ';
698 		targ->connector_name[3] = ' ';
699 		targ->encl_slot = 0x0;
700 		targ->exp_dev_handle = 0x0;
701 		targ->phy_num = 0x0;
702 		targ->linkrate = 0x0;
703 		targ->devinfo = 0x0;
704 		targ->flags = 0x0;
705 		targ->scsi_req_desc_type = 0;
706 
707 		while (!SLIST_EMPTY(&targ->luns)) {
708 			lun = SLIST_FIRST(&targ->luns);
709 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
710 			free(lun, M_MPR);
711 		}
712 	}
713 
714 	mprsas_free_tm(sc, tm);
715 }
716 
717 static int
718 mprsas_register_events(struct mpr_softc *sc)
719 {
720 	uint8_t events[16];
721 
722 	bzero(events, 16);
723 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
724 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
725 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
726 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
727 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
728 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
729 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
730 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
731 	setbit(events, MPI2_EVENT_IR_VOLUME);
732 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
733 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
734 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
735 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
736 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
737 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
738 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
739 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
740 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
741 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
742 		}
743 	}
744 
745 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
746 	    &sc->sassc->mprsas_eh);
747 
748 	return (0);
749 }
750 
751 int
752 mpr_attach_sas(struct mpr_softc *sc)
753 {
754 	struct mprsas_softc *sassc;
755 	cam_status status;
756 	int unit, error = 0, reqs;
757 
758 	MPR_FUNCTRACE(sc);
759 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
760 
761 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
762 
763 	/*
764 	 * XXX MaxTargets could change during a reinit.  Since we don't
765 	 * resize the targets[] array during such an event, cache the value
766 	 * of MaxTargets here so that we don't get into trouble later.  This
767 	 * should move into the reinit logic.
768 	 */
769 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
770 	sassc->targets = malloc(sizeof(struct mprsas_target) *
771 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
772 	sc->sassc = sassc;
773 	sassc->sc = sc;
774 
775 	reqs = sc->num_reqs - sc->num_prireqs - 1;
776 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
777 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
778 		error = ENOMEM;
779 		goto out;
780 	}
781 
782 	unit = device_get_unit(sc->mpr_dev);
783 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
784 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
785 	if (sassc->sim == NULL) {
786 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
787 		error = EINVAL;
788 		goto out;
789 	}
790 
791 	TAILQ_INIT(&sassc->ev_queue);
792 
793 	/* Initialize taskqueue for Event Handling */
794 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
795 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
796 	    taskqueue_thread_enqueue, &sassc->ev_tq);
797 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
798 	    device_get_nameunit(sc->mpr_dev));
799 
800 	mpr_lock(sc);
801 
802 	/*
803 	 * XXX There should be a bus for every port on the adapter, but since
804 	 * we're just going to fake the topology for now, we'll pretend that
805 	 * everything is just a target on a single bus.
806 	 */
807 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
808 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
809 		    "Error %d registering SCSI bus\n", error);
810 		mpr_unlock(sc);
811 		goto out;
812 	}
813 
814 	/*
815 	 * Assume that discovery events will start right away.
816 	 *
817 	 * Hold off boot until discovery is complete.
818 	 */
819 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
820 	sc->sassc->startup_refcount = 0;
821 	mprsas_startup_increment(sassc);
822 
823 	mpr_unlock(sc);
824 
825 	/*
826 	 * Register for async events so we can determine the EEDP
827 	 * capabilities of devices.
828 	 */
829 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
830 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
831 	    CAM_LUN_WILDCARD);
832 	if (status != CAM_REQ_CMP) {
833 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
834 		    "Error %#x creating sim path\n", status);
835 		sassc->path = NULL;
836 	} else {
837 		int event;
838 
839 		event = AC_ADVINFO_CHANGED;
840 		status = xpt_register_async(event, mprsas_async, sc,
841 					    sassc->path);
842 
843 		if (status != CAM_REQ_CMP) {
844 			mpr_dprint(sc, MPR_ERROR,
845 			    "Error %#x registering async handler for "
846 			    "AC_ADVINFO_CHANGED events\n", status);
847 			xpt_free_path(sassc->path);
848 			sassc->path = NULL;
849 		}
850 	}
851 	if (status != CAM_REQ_CMP) {
852 		/*
853 		 * EEDP use is the exception, not the rule.
854 		 * Warn the user, but do not fail to attach.
855 		 */
856 		mpr_printf(sc, "EEDP capabilities disabled.\n");
857 	}
858 
859 	mprsas_register_events(sc);
860 out:
861 	if (error)
862 		mpr_detach_sas(sc);
863 
864 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
865 	return (error);
866 }
867 
868 int
869 mpr_detach_sas(struct mpr_softc *sc)
870 {
871 	struct mprsas_softc *sassc;
872 	struct mprsas_lun *lun, *lun_tmp;
873 	struct mprsas_target *targ;
874 	int i;
875 
876 	MPR_FUNCTRACE(sc);
877 
878 	if (sc->sassc == NULL)
879 		return (0);
880 
881 	sassc = sc->sassc;
882 	mpr_deregister_events(sc, sassc->mprsas_eh);
883 
884 	/*
885 	 * Drain and free the event handling taskqueue with the lock
886 	 * unheld so that any parallel processing tasks drain properly
887 	 * without deadlocking.
888 	 */
889 	if (sassc->ev_tq != NULL)
890 		taskqueue_free(sassc->ev_tq);
891 
892 	/* Deregister our async handler */
893 	if (sassc->path != NULL) {
894 		xpt_register_async(0, mprsas_async, sc, sassc->path);
895 		xpt_free_path(sassc->path);
896 		sassc->path = NULL;
897 	}
898 
899 	/* Make sure CAM doesn't wedge if we had to bail out early. */
900 	mpr_lock(sc);
901 
902 	while (sassc->startup_refcount != 0)
903 		mprsas_startup_decrement(sassc);
904 
905 	if (sassc->flags & MPRSAS_IN_STARTUP)
906 		xpt_release_simq(sassc->sim, 1);
907 
908 	if (sassc->sim != NULL) {
909 		xpt_bus_deregister(cam_sim_path(sassc->sim));
910 		cam_sim_free(sassc->sim, FALSE);
911 	}
912 
913 	mpr_unlock(sc);
914 
915 	if (sassc->devq != NULL)
916 		cam_simq_free(sassc->devq);
917 
918 	for (i = 0; i < sassc->maxtargets; i++) {
919 		targ = &sassc->targets[i];
920 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
921 			free(lun, M_MPR);
922 		}
923 	}
924 	free(sassc->targets, M_MPR);
925 	free(sassc, M_MPR);
926 	sc->sassc = NULL;
927 
928 	return (0);
929 }
930 
931 void
932 mprsas_discovery_end(struct mprsas_softc *sassc)
933 {
934 	struct mpr_softc *sc = sassc->sc;
935 
936 	MPR_FUNCTRACE(sc);
937 
938 	/*
939 	 * After discovery has completed, check the mapping table for any
940 	 * missing devices and update their missing counts. Only do this once
941 	 * whenever the driver is initialized so that missing counts aren't
942 	 * updated unnecessarily. Note that just because discovery has
943 	 * completed doesn't mean that events have been processed yet. The
944 	 * check_devices function is a callout timer that checks if ALL devices
945 	 * are missing. If so, it will wait a little longer for events to
946 	 * complete and keep resetting itself until some device in the mapping
947 	 * table is not missing, meaning that event processing has started.
948 	 */
949 	if (sc->track_mapping_events) {
950 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
951 		    "completed. Check for missing devices in the mapping "
952 		    "table.\n");
953 		callout_reset(&sc->device_check_callout,
954 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
955 		    sc);
956 	}
957 }
958 
959 static void
960 mprsas_action(struct cam_sim *sim, union ccb *ccb)
961 {
962 	struct mprsas_softc *sassc;
963 
964 	sassc = cam_sim_softc(sim);
965 
966 	MPR_FUNCTRACE(sassc->sc);
967 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
968 	    ccb->ccb_h.func_code);
969 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
970 
971 	switch (ccb->ccb_h.func_code) {
972 	case XPT_PATH_INQ:
973 	{
974 		struct ccb_pathinq *cpi = &ccb->cpi;
975 		struct mpr_softc *sc = sassc->sc;
976 
977 		cpi->version_num = 1;
978 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
979 		cpi->target_sprt = 0;
980 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
981 		cpi->hba_eng_cnt = 0;
982 		cpi->max_target = sassc->maxtargets - 1;
983 		cpi->max_lun = 255;
984 
985 		/*
986 		 * initiator_id is set here to an ID outside the set of valid
987 		 * target IDs (including volumes).
988 		 */
989 		cpi->initiator_id = sassc->maxtargets;
990 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
991 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
992 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
993 		cpi->unit_number = cam_sim_unit(sim);
994 		cpi->bus_id = cam_sim_bus(sim);
995 		/*
996 		 * XXXSLM-I think this needs to change based on config page or
997 		 * something instead of hardcoded to 150000.
998 		 */
999 		cpi->base_transfer_speed = 150000;
1000 		cpi->transport = XPORT_SAS;
1001 		cpi->transport_version = 0;
1002 		cpi->protocol = PROTO_SCSI;
1003 		cpi->protocol_version = SCSI_REV_SPC;
1004 		cpi->maxio = sc->maxio;
1005 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1006 		break;
1007 	}
1008 	case XPT_GET_TRAN_SETTINGS:
1009 	{
1010 		struct ccb_trans_settings	*cts;
1011 		struct ccb_trans_settings_sas	*sas;
1012 		struct ccb_trans_settings_scsi	*scsi;
1013 		struct mprsas_target *targ;
1014 
1015 		cts = &ccb->cts;
1016 		sas = &cts->xport_specific.sas;
1017 		scsi = &cts->proto_specific.scsi;
1018 
1019 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1020 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1021 		    cts->ccb_h.target_id));
1022 		targ = &sassc->targets[cts->ccb_h.target_id];
1023 		if (targ->handle == 0x0) {
1024 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1025 			break;
1026 		}
1027 
1028 		cts->protocol_version = SCSI_REV_SPC2;
1029 		cts->transport = XPORT_SAS;
1030 		cts->transport_version = 0;
1031 
1032 		sas->valid = CTS_SAS_VALID_SPEED;
1033 		switch (targ->linkrate) {
1034 		case 0x08:
1035 			sas->bitrate = 150000;
1036 			break;
1037 		case 0x09:
1038 			sas->bitrate = 300000;
1039 			break;
1040 		case 0x0a:
1041 			sas->bitrate = 600000;
1042 			break;
1043 		case 0x0b:
1044 			sas->bitrate = 1200000;
1045 			break;
1046 		default:
1047 			sas->valid = 0;
1048 		}
1049 
1050 		cts->protocol = PROTO_SCSI;
1051 		scsi->valid = CTS_SCSI_VALID_TQ;
1052 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1053 
1054 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1055 		break;
1056 	}
1057 	case XPT_CALC_GEOMETRY:
1058 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1059 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1060 		break;
1061 	case XPT_RESET_DEV:
1062 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1063 		    "XPT_RESET_DEV\n");
1064 		mprsas_action_resetdev(sassc, ccb);
1065 		return;
1066 	case XPT_RESET_BUS:
1067 	case XPT_ABORT:
1068 	case XPT_TERM_IO:
1069 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1070 		    "for abort or reset\n");
1071 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1072 		break;
1073 	case XPT_SCSI_IO:
1074 		mprsas_action_scsiio(sassc, ccb);
1075 		return;
1076 	case XPT_SMP_IO:
1077 		mprsas_action_smpio(sassc, ccb);
1078 		return;
1079 	default:
1080 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1081 		break;
1082 	}
1083 	xpt_done(ccb);
1084 
1085 }
1086 
1087 static void
1088 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1089     target_id_t target_id, lun_id_t lun_id)
1090 {
1091 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1092 	struct cam_path *path;
1093 
1094 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1095 	    ac_code, target_id, (uintmax_t)lun_id);
1096 
1097 	if (xpt_create_path(&path, NULL,
1098 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1099 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1100 		    "notification\n");
1101 		return;
1102 	}
1103 
1104 	xpt_async(ac_code, path, NULL);
1105 	xpt_free_path(path);
1106 }
1107 
1108 static void
1109 mprsas_complete_all_commands(struct mpr_softc *sc)
1110 {
1111 	struct mpr_command *cm;
1112 	int i;
1113 	int completed;
1114 
1115 	MPR_FUNCTRACE(sc);
1116 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1117 
1118 	/* complete all commands with a NULL reply */
1119 	for (i = 1; i < sc->num_reqs; i++) {
1120 		cm = &sc->commands[i];
1121 		if (cm->cm_state == MPR_CM_STATE_FREE)
1122 			continue;
1123 
1124 		cm->cm_state = MPR_CM_STATE_BUSY;
1125 		cm->cm_reply = NULL;
1126 		completed = 0;
1127 
1128 		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1129 			MPASS(cm->cm_data);
1130 			free(cm->cm_data, M_MPR);
1131 			cm->cm_data = NULL;
1132 		}
1133 
1134 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1135 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1136 
1137 		if (cm->cm_complete != NULL) {
1138 			mprsas_log_command(cm, MPR_RECOVERY,
1139 			    "completing cm %p state %x ccb %p for diag reset\n",
1140 			    cm, cm->cm_state, cm->cm_ccb);
1141 			cm->cm_complete(sc, cm);
1142 			completed = 1;
1143 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1144 			mprsas_log_command(cm, MPR_RECOVERY,
1145 			    "waking up cm %p state %x ccb %p for diag reset\n",
1146 			    cm, cm->cm_state, cm->cm_ccb);
1147 			wakeup(cm);
1148 			completed = 1;
1149 		}
1150 
1151 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1152 			/* this should never happen, but if it does, log */
1153 			mprsas_log_command(cm, MPR_RECOVERY,
1154 			    "cm %p state %x flags 0x%x ccb %p during diag "
1155 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1156 			    cm->cm_ccb);
1157 		}
1158 	}
1159 
1160 	sc->io_cmds_active = 0;
1161 }
1162 
1163 void
1164 mprsas_handle_reinit(struct mpr_softc *sc)
1165 {
1166 	int i;
1167 
1168 	/* Go back into startup mode and freeze the simq, so that CAM
1169 	 * doesn't send any commands until after we've rediscovered all
1170 	 * targets and found the proper device handles for them.
1171 	 *
1172 	 * After the reset, portenable will trigger discovery, and after all
1173 	 * discovery-related activities have finished, the simq will be
1174 	 * released.
1175 	 */
1176 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1177 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1178 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1179 	mprsas_startup_increment(sc->sassc);
1180 
1181 	/* notify CAM of a bus reset */
1182 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1183 	    CAM_LUN_WILDCARD);
1184 
1185 	/* complete and cleanup after all outstanding commands */
1186 	mprsas_complete_all_commands(sc);
1187 
1188 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1189 	    __func__, sc->sassc->startup_refcount);
1190 
1191 	/* zero all the target handles, since they may change after the
1192 	 * reset, and we have to rediscover all the targets and use the new
1193 	 * handles.
1194 	 */
1195 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1196 		if (sc->sassc->targets[i].outstanding != 0)
1197 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1198 			    i, sc->sassc->targets[i].outstanding);
1199 		sc->sassc->targets[i].handle = 0x0;
1200 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1201 		sc->sassc->targets[i].outstanding = 0;
1202 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1203 	}
1204 }
1205 static void
1206 mprsas_tm_timeout(void *data)
1207 {
1208 	struct mpr_command *tm = data;
1209 	struct mpr_softc *sc = tm->cm_sc;
1210 
1211 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1212 
1213 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1214 	    "out\n", tm);
1215 
1216 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1217 	    ("command not inqueue, state = %u\n", tm->cm_state));
1218 
1219 	tm->cm_state = MPR_CM_STATE_BUSY;
1220 	mpr_reinit(sc);
1221 }
1222 
1223 static void
1224 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1225 {
1226 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1227 	unsigned int cm_count = 0;
1228 	struct mpr_command *cm;
1229 	struct mprsas_target *targ;
1230 
1231 	callout_stop(&tm->cm_callout);
1232 
1233 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1234 	targ = tm->cm_targ;
1235 
1236 	/*
1237 	 * Currently there should be no way we can hit this case.  It only
1238 	 * happens when we have a failure to allocate chain frames, and
1239 	 * task management commands don't have S/G lists.
1240 	 */
1241 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1242 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1243 		    "%s: cm_flags = %#x for LUN reset! "
1244 		    "This should not happen!\n", __func__, tm->cm_flags);
1245 		mprsas_free_tm(sc, tm);
1246 		return;
1247 	}
1248 
1249 	if (reply == NULL) {
1250 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1251 		    tm);
1252 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1253 			/* this completion was due to a reset, just cleanup */
1254 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1255 			    "reset, ignoring NULL LUN reset reply\n");
1256 			targ->tm = NULL;
1257 			mprsas_free_tm(sc, tm);
1258 		}
1259 		else {
1260 			/* we should have gotten a reply. */
1261 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1262 			    "LUN reset attempt, resetting controller\n");
1263 			mpr_reinit(sc);
1264 		}
1265 		return;
1266 	}
1267 
1268 	mpr_dprint(sc, MPR_RECOVERY,
1269 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1270 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1271 	    le32toh(reply->TerminationCount));
1272 
1273 	/*
1274 	 * See if there are any outstanding commands for this LUN.
1275 	 * This could be made more efficient by using a per-LU data
1276 	 * structure of some sort.
1277 	 */
1278 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1279 		if (cm->cm_lun == tm->cm_lun)
1280 			cm_count++;
1281 	}
1282 
1283 	if (cm_count == 0) {
1284 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1285 		    "Finished recovery after LUN reset for target %u\n",
1286 		    targ->tid);
1287 
1288 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1289 		    tm->cm_lun);
1290 
1291 		/*
1292 		 * We've finished recovery for this logical unit.  check and
1293 		 * see if some other logical unit has a timedout command
1294 		 * that needs to be processed.
1295 		 */
1296 		cm = TAILQ_FIRST(&targ->timedout_commands);
1297 		if (cm) {
1298 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1299 			   "More commands to abort for target %u\n", targ->tid);
1300 			mprsas_send_abort(sc, tm, cm);
1301 		} else {
1302 			targ->tm = NULL;
1303 			mprsas_free_tm(sc, tm);
1304 		}
1305 	} else {
1306 		/* if we still have commands for this LUN, the reset
1307 		 * effectively failed, regardless of the status reported.
1308 		 * Escalate to a target reset.
1309 		 */
1310 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1311 		    "logical unit reset complete for target %u, but still "
1312 		    "have %u command(s), sending target reset\n", targ->tid,
1313 		    cm_count);
1314 		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1315 			mprsas_send_reset(sc, tm,
1316 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1317 		else
1318 			mpr_reinit(sc);
1319 	}
1320 }
1321 
1322 static void
1323 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1324 {
1325 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1326 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1327 	struct mprsas_target *targ;
1328 
1329 	callout_stop(&tm->cm_callout);
1330 
1331 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1332 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1333 	targ = tm->cm_targ;
1334 
1335 	/*
1336 	 * Currently there should be no way we can hit this case.  It only
1337 	 * happens when we have a failure to allocate chain frames, and
1338 	 * task management commands don't have S/G lists.
1339 	 */
1340 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1341 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1342 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1343 		mprsas_free_tm(sc, tm);
1344 		return;
1345 	}
1346 
1347 	if (reply == NULL) {
1348 		mpr_dprint(sc, MPR_RECOVERY,
1349 		    "NULL target reset reply for tm %p TaskMID %u\n",
1350 		    tm, le16toh(req->TaskMID));
1351 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1352 			/* this completion was due to a reset, just cleanup */
1353 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1354 			    "reset, ignoring NULL target reset reply\n");
1355 			targ->tm = NULL;
1356 			mprsas_free_tm(sc, tm);
1357 		}
1358 		else {
1359 			/* we should have gotten a reply. */
1360 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1361 			    "target reset attempt, resetting controller\n");
1362 			mpr_reinit(sc);
1363 		}
1364 		return;
1365 	}
1366 
1367 	mpr_dprint(sc, MPR_RECOVERY,
1368 	    "target reset status 0x%x code 0x%x count %u\n",
1369 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1370 	    le32toh(reply->TerminationCount));
1371 
1372 	if (targ->outstanding == 0) {
1373 		/*
1374 		 * We've finished recovery for this target and all
1375 		 * of its logical units.
1376 		 */
1377 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1378 		    "Finished reset recovery for target %u\n", targ->tid);
1379 
1380 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1381 		    CAM_LUN_WILDCARD);
1382 
1383 		targ->tm = NULL;
1384 		mprsas_free_tm(sc, tm);
1385 	} else {
1386 		/*
1387 		 * After a target reset, if this target still has
1388 		 * outstanding commands, the reset effectively failed,
1389 		 * regardless of the status reported.  escalate.
1390 		 */
1391 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1392 		    "Target reset complete for target %u, but still have %u "
1393 		    "command(s), resetting controller\n", targ->tid,
1394 		    targ->outstanding);
1395 		mpr_reinit(sc);
1396 	}
1397 }
1398 
1399 #define MPR_RESET_TIMEOUT 30
1400 
1401 int
1402 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1403 {
1404 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1405 	struct mprsas_target *target;
1406 	int err, timeout;
1407 
1408 	target = tm->cm_targ;
1409 	if (target->handle == 0) {
1410 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1411 		    "%d\n", __func__, target->tid);
1412 		return -1;
1413 	}
1414 
1415 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1416 	req->DevHandle = htole16(target->handle);
1417 	req->TaskType = type;
1418 
1419 	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1420 		timeout = MPR_RESET_TIMEOUT;
1421 		/*
1422 		 * Target reset method =
1423 		 *     SAS Hard Link Reset / SATA Link Reset
1424 		 */
1425 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1426 	} else {
1427 		timeout = (target->controller_reset_timeout) ? (
1428 		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1429 		/* PCIe Protocol Level Reset*/
1430 		req->MsgFlags =
1431 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1432 	}
1433 
1434 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1435 		/* XXX Need to handle invalid LUNs */
1436 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1437 		tm->cm_targ->logical_unit_resets++;
1438 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1439 		    "Sending logical unit reset to target %u lun %d\n",
1440 		    target->tid, tm->cm_lun);
1441 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1442 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1443 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1444 		tm->cm_targ->target_resets++;
1445 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1446 		    "Sending target reset to target %u\n", target->tid);
1447 		tm->cm_complete = mprsas_target_reset_complete;
1448 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1449 	}
1450 	else {
1451 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1452 		return -1;
1453 	}
1454 
1455 	if (target->encl_level_valid) {
1456 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1457 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1458 		    target->encl_level, target->encl_slot,
1459 		    target->connector_name);
1460 	}
1461 
1462 	tm->cm_data = NULL;
1463 	tm->cm_complete_data = (void *)tm;
1464 
1465 	callout_reset(&tm->cm_callout, timeout * hz,
1466 	    mprsas_tm_timeout, tm);
1467 
1468 	err = mpr_map_command(sc, tm);
1469 	if (err)
1470 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1471 		    "error %d sending reset type %u\n", err, type);
1472 
1473 	return err;
1474 }
1475 
1476 static void
1477 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1478 {
1479 	struct mpr_command *cm;
1480 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1481 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1482 	struct mprsas_target *targ;
1483 
1484 	callout_stop(&tm->cm_callout);
1485 
1486 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1487 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1488 	targ = tm->cm_targ;
1489 
1490 	/*
1491 	 * Currently there should be no way we can hit this case.  It only
1492 	 * happens when we have a failure to allocate chain frames, and
1493 	 * task management commands don't have S/G lists.
1494 	 */
1495 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1496 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1497 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1498 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1499 		mprsas_free_tm(sc, tm);
1500 		return;
1501 	}
1502 
1503 	if (reply == NULL) {
1504 		mpr_dprint(sc, MPR_RECOVERY,
1505 		    "NULL abort reply for tm %p TaskMID %u\n",
1506 		    tm, le16toh(req->TaskMID));
1507 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1508 			/* this completion was due to a reset, just cleanup */
1509 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1510 			    "reset, ignoring NULL abort reply\n");
1511 			targ->tm = NULL;
1512 			mprsas_free_tm(sc, tm);
1513 		} else {
1514 			/* we should have gotten a reply. */
1515 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1516 			    "abort attempt, resetting controller\n");
1517 			mpr_reinit(sc);
1518 		}
1519 		return;
1520 	}
1521 
1522 	mpr_dprint(sc, MPR_RECOVERY,
1523 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1524 	    le16toh(req->TaskMID),
1525 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1526 	    le32toh(reply->TerminationCount));
1527 
1528 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1529 	if (cm == NULL) {
1530 		/*
1531 		 * if there are no more timedout commands, we're done with
1532 		 * error recovery for this target.
1533 		 */
1534 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1535 		    "Finished abort recovery for target %u\n", targ->tid);
1536 		targ->tm = NULL;
1537 		mprsas_free_tm(sc, tm);
1538 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1539 		/* abort success, but we have more timedout commands to abort */
1540 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1541 		    "Continuing abort recovery for target %u\n", targ->tid);
1542 		mprsas_send_abort(sc, tm, cm);
1543 	} else {
1544 		/*
1545 		 * we didn't get a command completion, so the abort
1546 		 * failed as far as we're concerned.  escalate.
1547 		 */
1548 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1549 		    "Abort failed for target %u, sending logical unit reset\n",
1550 		    targ->tid);
1551 
1552 		mprsas_send_reset(sc, tm,
1553 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1554 	}
1555 }
1556 
1557 #define MPR_ABORT_TIMEOUT 5
1558 
1559 static int
1560 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1561     struct mpr_command *cm)
1562 {
1563 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1564 	struct mprsas_target *targ;
1565 	int err, timeout;
1566 
1567 	targ = cm->cm_targ;
1568 	if (targ->handle == 0) {
1569 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1570 		   "%s null devhandle for target_id %d\n",
1571 		    __func__, cm->cm_ccb->ccb_h.target_id);
1572 		return -1;
1573 	}
1574 
1575 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1576 	    "Aborting command %p\n", cm);
1577 
1578 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1579 	req->DevHandle = htole16(targ->handle);
1580 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1581 
1582 	/* XXX Need to handle invalid LUNs */
1583 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1584 
1585 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1586 
1587 	tm->cm_data = NULL;
1588 	tm->cm_complete = mprsas_abort_complete;
1589 	tm->cm_complete_data = (void *)tm;
1590 	tm->cm_targ = cm->cm_targ;
1591 	tm->cm_lun = cm->cm_lun;
1592 
1593 	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1594 		timeout	= MPR_ABORT_TIMEOUT;
1595 	else
1596 		timeout = sc->nvme_abort_timeout;
1597 
1598 	callout_reset(&tm->cm_callout, timeout * hz,
1599 	    mprsas_tm_timeout, tm);
1600 
1601 	targ->aborts++;
1602 
1603 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1604 
1605 	err = mpr_map_command(sc, tm);
1606 	if (err)
1607 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1608 		    "error %d sending abort for cm %p SMID %u\n",
1609 		    err, cm, req->TaskMID);
1610 	return err;
1611 }
1612 
1613 static void
1614 mprsas_scsiio_timeout(void *data)
1615 {
1616 	sbintime_t elapsed, now;
1617 	union ccb *ccb;
1618 	struct mpr_softc *sc;
1619 	struct mpr_command *cm;
1620 	struct mprsas_target *targ;
1621 
1622 	cm = (struct mpr_command *)data;
1623 	sc = cm->cm_sc;
1624 	ccb = cm->cm_ccb;
1625 	now = sbinuptime();
1626 
1627 	MPR_FUNCTRACE(sc);
1628 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1629 
1630 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1631 
1632 	/*
1633 	 * Run the interrupt handler to make sure it's not pending.  This
1634 	 * isn't perfect because the command could have already completed
1635 	 * and been re-used, though this is unlikely.
1636 	 */
1637 	mpr_intr_locked(sc);
1638 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1639 		mprsas_log_command(cm, MPR_XINFO,
1640 		    "SCSI command %p almost timed out\n", cm);
1641 		return;
1642 	}
1643 
1644 	if (cm->cm_ccb == NULL) {
1645 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1646 		return;
1647 	}
1648 
1649 	targ = cm->cm_targ;
1650 	targ->timeouts++;
1651 
1652 	elapsed = now - ccb->ccb_h.qos.sim_data;
1653 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1654 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1655 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1656 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1657 	if (targ->encl_level_valid) {
1658 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1659 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1660 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1661 	}
1662 
1663 	/* XXX first, check the firmware state, to see if it's still
1664 	 * operational.  if not, do a diag reset.
1665 	 */
1666 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1667 	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1668 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1669 
1670 	if (targ->tm != NULL) {
1671 		/* target already in recovery, just queue up another
1672 		 * timedout command to be processed later.
1673 		 */
1674 		mpr_dprint(sc, MPR_RECOVERY,
1675 		    "queued timedout cm %p for processing by tm %p\n",
1676 		    cm, targ->tm);
1677 	} else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1678 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1679 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1680 		    cm->cm_desc.Default.SMID);
1681 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1682 		    cm, targ->tm);
1683 
1684 		/* start recovery by aborting the first timedout command */
1685 		mprsas_send_abort(sc, targ->tm, cm);
1686 	} else {
1687 		/* XXX queue this target up for recovery once a TM becomes
1688 		 * available.  The firmware only has a limited number of
1689 		 * HighPriority credits for the high priority requests used
1690 		 * for task management, and we ran out.
1691 		 *
1692 		 * Isilon: don't worry about this for now, since we have
1693 		 * more credits than disks in an enclosure, and limit
1694 		 * ourselves to one TM per target for recovery.
1695 		 */
1696 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1697 		    "timedout cm %p failed to allocate a tm\n", cm);
1698 	}
1699 }
1700 
1701 /**
1702  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1703  *			     to SCSI Unmap.
1704  * Return 0 - for success,
1705  *	  1 - to immediately return back the command with success status to CAM
1706  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1707  *			   to FW without any translation.
1708  */
1709 static int
1710 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1711     union ccb *ccb, struct mprsas_target *targ)
1712 {
1713 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1714 	struct ccb_scsiio *csio;
1715 	struct unmap_parm_list *plist;
1716 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1717 	struct nvme_command *c;
1718 	int i, res;
1719 	uint16_t ndesc, list_len, data_length;
1720 	struct mpr_prp_page *prp_page_info;
1721 	uint64_t nvme_dsm_ranges_dma_handle;
1722 
1723 	csio = &ccb->csio;
1724 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1725 	if (!list_len) {
1726 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1727 		return -EINVAL;
1728 	}
1729 
1730 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1731 	if (!plist) {
1732 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1733 		    "save UNMAP data\n");
1734 		return -ENOMEM;
1735 	}
1736 
1737 	/* Copy SCSI unmap data to a local buffer */
1738 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1739 
1740 	/* return back the unmap command to CAM with success status,
1741 	 * if number of descripts is zero.
1742 	 */
1743 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1744 	if (!ndesc) {
1745 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1746 		    "UNMAP cmd is Zero\n");
1747 		res = 1;
1748 		goto out;
1749 	}
1750 
1751 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1752 	if (data_length > targ->MDTS) {
1753 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1754 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1755 		res = -EINVAL;
1756 		goto out;
1757 	}
1758 
1759 	prp_page_info = mpr_alloc_prp_page(sc);
1760 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1761 	    "UNMAP command.\n", __func__));
1762 
1763 	/*
1764 	 * Insert the allocated PRP page into the command's PRP page list. This
1765 	 * will be freed when the command is freed.
1766 	 */
1767 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1768 
1769 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1770 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1771 
1772 	bzero(nvme_dsm_ranges, data_length);
1773 
1774 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1775 	 * for each descriptors contained in SCSI UNMAP data.
1776 	 */
1777 	for (i = 0; i < ndesc; i++) {
1778 		nvme_dsm_ranges[i].length =
1779 		    htole32(be32toh(plist->desc[i].nlb));
1780 		nvme_dsm_ranges[i].starting_lba =
1781 		    htole64(be64toh(plist->desc[i].slba));
1782 		nvme_dsm_ranges[i].attributes = 0;
1783 	}
1784 
1785 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1786 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1787 	bzero(req, sizeof(*req));
1788 	req->DevHandle = htole16(targ->handle);
1789 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1790 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1791 	req->ErrorResponseBaseAddress.High =
1792 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1793 	req->ErrorResponseBaseAddress.Low =
1794 	    htole32(cm->cm_sense_busaddr);
1795 	req->ErrorResponseAllocationLength =
1796 	    htole16(sizeof(struct nvme_completion));
1797 	req->EncapsulatedCommandLength =
1798 	    htole16(sizeof(struct nvme_command));
1799 	req->DataLength = htole32(data_length);
1800 
1801 	/* Build NVMe DSM command */
1802 	c = (struct nvme_command *) req->NVMe_Command;
1803 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1804 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1805 	c->cdw10 = htole32(ndesc - 1);
1806 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1807 
1808 	cm->cm_length = data_length;
1809 	cm->cm_data = NULL;
1810 
1811 	cm->cm_complete = mprsas_scsiio_complete;
1812 	cm->cm_complete_data = ccb;
1813 	cm->cm_targ = targ;
1814 	cm->cm_lun = csio->ccb_h.target_lun;
1815 	cm->cm_ccb = ccb;
1816 
1817 	cm->cm_desc.Default.RequestFlags =
1818 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1819 
1820 	csio->ccb_h.qos.sim_data = sbinuptime();
1821 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1822 	    mprsas_scsiio_timeout, cm, 0);
1823 
1824 	targ->issued++;
1825 	targ->outstanding++;
1826 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1827 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1828 
1829 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1830 	    __func__, cm, ccb, targ->outstanding);
1831 
1832 	mpr_build_nvme_prp(sc, cm, req,
1833 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1834 	mpr_map_command(sc, cm);
1835 	res = 0;
1836 
1837 out:
1838 	free(plist, M_MPR);
1839 	return (res);
1840 }
1841 
1842 static void
1843 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1844 {
1845 	MPI2_SCSI_IO_REQUEST *req;
1846 	struct ccb_scsiio *csio;
1847 	struct mpr_softc *sc;
1848 	struct mprsas_target *targ;
1849 	struct mprsas_lun *lun;
1850 	struct mpr_command *cm;
1851 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1852 	uint16_t eedp_flags;
1853 	uint32_t mpi_control;
1854 	int rc;
1855 
1856 	sc = sassc->sc;
1857 	MPR_FUNCTRACE(sc);
1858 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1859 
1860 	csio = &ccb->csio;
1861 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1862 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1863 	     csio->ccb_h.target_id));
1864 	targ = &sassc->targets[csio->ccb_h.target_id];
1865 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1866 	if (targ->handle == 0x0) {
1867 		if (targ->flags & MPRSAS_TARGET_INDIAGRESET) {
1868 			mpr_dprint(sc, MPR_ERROR,
1869 			    "%s NULL handle for target %u in diag reset freezing queue\n",
1870 			    __func__, csio->ccb_h.target_id);
1871 			ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1872 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1873 			xpt_done(ccb);
1874 			return;
1875 		}
1876 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1877 		    __func__, csio->ccb_h.target_id);
1878 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1879 		xpt_done(ccb);
1880 		return;
1881 	}
1882 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1883 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1884 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1885 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1886 		xpt_done(ccb);
1887 		return;
1888 	}
1889 	/*
1890 	 * Sometimes, it is possible to get a command that is not "In
1891 	 * Progress" and was actually aborted by the upper layer.  Check for
1892 	 * this here and complete the command without error.
1893 	 */
1894 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1895 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1896 		    "target %u\n", __func__, csio->ccb_h.target_id);
1897 		xpt_done(ccb);
1898 		return;
1899 	}
1900 	/*
1901 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1902 	 * that the volume has timed out.  We want volumes to be enumerated
1903 	 * until they are deleted/removed, not just failed. In either event,
1904 	 * we're removing the target due to a firmware event telling us
1905 	 * the device is now gone (as opposed to some transient event). Since
1906 	 * we're opting to remove failed devices from the OS's view, we need
1907 	 * to propagate that status up the stack.
1908 	 */
1909 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1910 		if (targ->devinfo == 0)
1911 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1912 		else
1913 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1914 		xpt_done(ccb);
1915 		return;
1916 	}
1917 
1918 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1919 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1920 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1921 		xpt_done(ccb);
1922 		return;
1923 	}
1924 
1925 	/*
1926 	 * If target has a reset in progress, the devq should be frozen.
1927 	 * Geting here we likely hit a race, so just requeue.
1928 	 */
1929 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1930 		ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1931 		mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
1932 		    "%s: Freezing devq for target ID %d\n",
1933 		    __func__, targ->tid);
1934 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1935 		xpt_done(ccb);
1936 		return;
1937 	}
1938 
1939 	cm = mpr_alloc_command(sc);
1940 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1941 		if (cm != NULL) {
1942 			mpr_free_command(sc, cm);
1943 		}
1944 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1945 			xpt_freeze_simq(sassc->sim, 1);
1946 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1947 		}
1948 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1949 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1950 		xpt_done(ccb);
1951 		return;
1952 	}
1953 
1954 	/* For NVME device's issue UNMAP command directly to NVME drives by
1955 	 * constructing equivalent native NVMe DataSetManagement command.
1956 	 */
1957 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1958 	if (scsi_opcode == UNMAP &&
1959 	    targ->is_nvme &&
1960 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1961 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1962 		if (rc == 1) { /* return command to CAM with success status */
1963 			mpr_free_command(sc, cm);
1964 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1965 			xpt_done(ccb);
1966 			return;
1967 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1968 			return;
1969 	}
1970 
1971 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1972 	bzero(req, sizeof(*req));
1973 	req->DevHandle = htole16(targ->handle);
1974 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1975 	req->MsgFlags = 0;
1976 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1977 	req->SenseBufferLength = MPR_SENSE_LEN;
1978 	req->SGLFlags = 0;
1979 	req->ChainOffset = 0;
1980 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1981 	req->SGLOffset1= 0;
1982 	req->SGLOffset2= 0;
1983 	req->SGLOffset3= 0;
1984 	req->SkipCount = 0;
1985 	req->DataLength = htole32(csio->dxfer_len);
1986 	req->BidirectionalDataLength = 0;
1987 	req->IoFlags = htole16(csio->cdb_len);
1988 	req->EEDPFlags = 0;
1989 
1990 	/* Note: BiDirectional transfers are not supported */
1991 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1992 	case CAM_DIR_IN:
1993 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1994 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1995 		break;
1996 	case CAM_DIR_OUT:
1997 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1998 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1999 		break;
2000 	case CAM_DIR_NONE:
2001 	default:
2002 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2003 		break;
2004 	}
2005 
2006 	if (csio->cdb_len == 32)
2007 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2008 	/*
2009 	 * It looks like the hardware doesn't require an explicit tag
2010 	 * number for each transaction.  SAM Task Management not supported
2011 	 * at the moment.
2012 	 */
2013 	switch (csio->tag_action) {
2014 	case MSG_HEAD_OF_Q_TAG:
2015 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2016 		break;
2017 	case MSG_ORDERED_Q_TAG:
2018 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2019 		break;
2020 	case MSG_ACA_TASK:
2021 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2022 		break;
2023 	case CAM_TAG_ACTION_NONE:
2024 	case MSG_SIMPLE_Q_TAG:
2025 	default:
2026 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2027 		break;
2028 	}
2029 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT) &
2030 	    MPI2_SCSIIO_CONTROL_CMDPRI_MASK;
2031 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2032 	req->Control = htole32(mpi_control);
2033 
2034 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2035 		mpr_free_command(sc, cm);
2036 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2037 		xpt_done(ccb);
2038 		return;
2039 	}
2040 
2041 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2042 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2043 	else {
2044 		KASSERT(csio->cdb_len <= IOCDBLEN,
2045 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2046 		    "is not set", csio->cdb_len));
2047 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2048 	}
2049 	req->IoFlags = htole16(csio->cdb_len);
2050 
2051 	/*
2052 	 * Check if EEDP is supported and enabled.  If it is then check if the
2053 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2054 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2055 	 * for EEDP transfer.
2056 	 */
2057 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2058 	if (sc->eedp_enabled && eedp_flags) {
2059 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2060 			if (lun->lun_id == csio->ccb_h.target_lun) {
2061 				break;
2062 			}
2063 		}
2064 
2065 		if ((lun != NULL) && (lun->eedp_formatted)) {
2066 			req->EEDPBlockSize = htole32(lun->eedp_block_size);
2067 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2068 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2069 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2070 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2071 				eedp_flags |=
2072 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2073 			}
2074 			req->EEDPFlags = htole16(eedp_flags);
2075 
2076 			/*
2077 			 * If CDB less than 32, fill in Primary Ref Tag with
2078 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2079 			 * already there.  Also, set protection bit.  FreeBSD
2080 			 * currently does not support CDBs bigger than 16, but
2081 			 * the code doesn't hurt, and will be here for the
2082 			 * future.
2083 			 */
2084 			if (csio->cdb_len != 32) {
2085 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2086 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2087 				    PrimaryReferenceTag;
2088 				for (i = 0; i < 4; i++) {
2089 					*ref_tag_addr =
2090 					    req->CDB.CDB32[lba_byte + i];
2091 					ref_tag_addr++;
2092 				}
2093 				req->CDB.EEDP32.PrimaryReferenceTag =
2094 				    htole32(req->
2095 				    CDB.EEDP32.PrimaryReferenceTag);
2096 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2097 				    0xFFFF;
2098 				req->CDB.CDB32[1] =
2099 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2100 			} else {
2101 				eedp_flags |=
2102 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2103 				req->EEDPFlags = htole16(eedp_flags);
2104 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2105 				    0x1F) | 0x20;
2106 			}
2107 		}
2108 	}
2109 
2110 	cm->cm_length = csio->dxfer_len;
2111 	if (cm->cm_length != 0) {
2112 		cm->cm_data = ccb;
2113 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2114 	} else {
2115 		cm->cm_data = NULL;
2116 	}
2117 	cm->cm_sge = &req->SGL;
2118 	cm->cm_sglsize = (32 - 24) * 4;
2119 	cm->cm_complete = mprsas_scsiio_complete;
2120 	cm->cm_complete_data = ccb;
2121 	cm->cm_targ = targ;
2122 	cm->cm_lun = csio->ccb_h.target_lun;
2123 	cm->cm_ccb = ccb;
2124 	/*
2125 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2126 	 * and set descriptor type.
2127 	 */
2128 	if (targ->scsi_req_desc_type ==
2129 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2130 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2131 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2132 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2133 		if (!sc->atomic_desc_capable) {
2134 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2135 			    htole16(targ->handle);
2136 		}
2137 	} else {
2138 		cm->cm_desc.SCSIIO.RequestFlags =
2139 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2140 		if (!sc->atomic_desc_capable)
2141 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2142 	}
2143 
2144 	csio->ccb_h.qos.sim_data = sbinuptime();
2145 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2146 	    mprsas_scsiio_timeout, cm, 0);
2147 
2148 	targ->issued++;
2149 	targ->outstanding++;
2150 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2151 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2152 
2153 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2154 	    __func__, cm, ccb, targ->outstanding);
2155 
2156 	mpr_map_command(sc, cm);
2157 	return;
2158 }
2159 
2160 /**
2161  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2162  */
2163 static void
2164 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2165     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2166 {
2167 	u32 response_info;
2168 	u8 *response_bytes;
2169 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2170 	    MPI2_IOCSTATUS_MASK;
2171 	u8 scsi_state = mpi_reply->SCSIState;
2172 	u8 scsi_status = mpi_reply->SCSIStatus;
2173 	char *desc_ioc_state = NULL;
2174 	char *desc_scsi_status = NULL;
2175 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2176 
2177 	if (log_info == 0x31170000)
2178 		return;
2179 
2180 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2181 	     ioc_status);
2182 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2183 	    scsi_status);
2184 
2185 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2186 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2187 	if (targ->encl_level_valid) {
2188 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2189 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2190 		    targ->connector_name);
2191 	}
2192 
2193 	/*
2194 	 * We can add more detail about underflow data here
2195 	 * TO-DO
2196 	 */
2197 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2198 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2199 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2200 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2201 
2202 	if (sc->mpr_debug & MPR_XINFO &&
2203 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2204 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2205 		scsi_sense_print(csio);
2206 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2207 	}
2208 
2209 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2210 		response_info = le32toh(mpi_reply->ResponseInfo);
2211 		response_bytes = (u8 *)&response_info;
2212 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2213 		    response_bytes[0],
2214 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2215 		    response_bytes[0]));
2216 	}
2217 }
2218 
2219 /** mprsas_nvme_trans_status_code
2220  *
2221  * Convert Native NVMe command error status to
2222  * equivalent SCSI error status.
2223  *
2224  * Returns appropriate scsi_status
2225  */
2226 static u8
2227 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2228     struct mpr_command *cm)
2229 {
2230 	u8 status = MPI2_SCSI_STATUS_GOOD;
2231 	int skey, asc, ascq;
2232 	union ccb *ccb = cm->cm_complete_data;
2233 	int returned_sense_len;
2234 	uint8_t sct, sc;
2235 
2236 	sct = NVME_STATUS_GET_SCT(nvme_status);
2237 	sc = NVME_STATUS_GET_SC(nvme_status);
2238 
2239 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2240 	skey = SSD_KEY_ILLEGAL_REQUEST;
2241 	asc = SCSI_ASC_NO_SENSE;
2242 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2243 
2244 	switch (sct) {
2245 	case NVME_SCT_GENERIC:
2246 		switch (sc) {
2247 		case NVME_SC_SUCCESS:
2248 			status = MPI2_SCSI_STATUS_GOOD;
2249 			skey = SSD_KEY_NO_SENSE;
2250 			asc = SCSI_ASC_NO_SENSE;
2251 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2252 			break;
2253 		case NVME_SC_INVALID_OPCODE:
2254 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2255 			skey = SSD_KEY_ILLEGAL_REQUEST;
2256 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2257 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2258 			break;
2259 		case NVME_SC_INVALID_FIELD:
2260 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2261 			skey = SSD_KEY_ILLEGAL_REQUEST;
2262 			asc = SCSI_ASC_INVALID_CDB;
2263 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2264 			break;
2265 		case NVME_SC_DATA_TRANSFER_ERROR:
2266 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2267 			skey = SSD_KEY_MEDIUM_ERROR;
2268 			asc = SCSI_ASC_NO_SENSE;
2269 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2270 			break;
2271 		case NVME_SC_ABORTED_POWER_LOSS:
2272 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2273 			skey = SSD_KEY_ABORTED_COMMAND;
2274 			asc = SCSI_ASC_WARNING;
2275 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2276 			break;
2277 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2278 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2279 			skey = SSD_KEY_HARDWARE_ERROR;
2280 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2281 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2282 			break;
2283 		case NVME_SC_ABORTED_BY_REQUEST:
2284 		case NVME_SC_ABORTED_SQ_DELETION:
2285 		case NVME_SC_ABORTED_FAILED_FUSED:
2286 		case NVME_SC_ABORTED_MISSING_FUSED:
2287 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2288 			skey = SSD_KEY_ABORTED_COMMAND;
2289 			asc = SCSI_ASC_NO_SENSE;
2290 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2291 			break;
2292 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2293 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2294 			skey = SSD_KEY_ILLEGAL_REQUEST;
2295 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2296 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2297 			break;
2298 		case NVME_SC_LBA_OUT_OF_RANGE:
2299 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2300 			skey = SSD_KEY_ILLEGAL_REQUEST;
2301 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2302 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2303 			break;
2304 		case NVME_SC_CAPACITY_EXCEEDED:
2305 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2306 			skey = SSD_KEY_MEDIUM_ERROR;
2307 			asc = SCSI_ASC_NO_SENSE;
2308 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2309 			break;
2310 		case NVME_SC_NAMESPACE_NOT_READY:
2311 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2312 			skey = SSD_KEY_NOT_READY;
2313 			asc = SCSI_ASC_LUN_NOT_READY;
2314 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2315 			break;
2316 		}
2317 		break;
2318 	case NVME_SCT_COMMAND_SPECIFIC:
2319 		switch (sc) {
2320 		case NVME_SC_INVALID_FORMAT:
2321 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2322 			skey = SSD_KEY_ILLEGAL_REQUEST;
2323 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2324 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2325 			break;
2326 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2327 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2328 			skey = SSD_KEY_ILLEGAL_REQUEST;
2329 			asc = SCSI_ASC_INVALID_CDB;
2330 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2331 			break;
2332 		}
2333 		break;
2334 	case NVME_SCT_MEDIA_ERROR:
2335 		switch (sc) {
2336 		case NVME_SC_WRITE_FAULTS:
2337 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2338 			skey = SSD_KEY_MEDIUM_ERROR;
2339 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2340 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2341 			break;
2342 		case NVME_SC_UNRECOVERED_READ_ERROR:
2343 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2344 			skey = SSD_KEY_MEDIUM_ERROR;
2345 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2346 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2347 			break;
2348 		case NVME_SC_GUARD_CHECK_ERROR:
2349 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2350 			skey = SSD_KEY_MEDIUM_ERROR;
2351 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2352 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2353 			break;
2354 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2355 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2356 			skey = SSD_KEY_MEDIUM_ERROR;
2357 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2358 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2359 			break;
2360 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2361 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2362 			skey = SSD_KEY_MEDIUM_ERROR;
2363 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2364 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2365 			break;
2366 		case NVME_SC_COMPARE_FAILURE:
2367 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2368 			skey = SSD_KEY_MISCOMPARE;
2369 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2370 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2371 			break;
2372 		case NVME_SC_ACCESS_DENIED:
2373 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2374 			skey = SSD_KEY_ILLEGAL_REQUEST;
2375 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2376 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2377 			break;
2378 		}
2379 		break;
2380 	}
2381 
2382 	returned_sense_len = sizeof(struct scsi_sense_data);
2383 	if (returned_sense_len < ccb->csio.sense_len)
2384 		ccb->csio.sense_resid = ccb->csio.sense_len -
2385 		    returned_sense_len;
2386 	else
2387 		ccb->csio.sense_resid = 0;
2388 
2389 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2390 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2391 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2392 
2393 	return status;
2394 }
2395 
2396 /** mprsas_complete_nvme_unmap
2397  *
2398  * Complete native NVMe command issued using NVMe Encapsulated
2399  * Request Message.
2400  */
2401 static u8
2402 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2403 {
2404 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2405 	struct nvme_completion *nvme_completion = NULL;
2406 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2407 
2408 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2409 	if (le16toh(mpi_reply->ErrorResponseCount)){
2410 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2411 		scsi_status = mprsas_nvme_trans_status_code(
2412 		    nvme_completion->status, cm);
2413 	}
2414 	return scsi_status;
2415 }
2416 
2417 static void
2418 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2419 {
2420 	MPI2_SCSI_IO_REPLY *rep;
2421 	union ccb *ccb;
2422 	struct ccb_scsiio *csio;
2423 	struct mprsas_softc *sassc;
2424 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2425 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2426 	int dir = 0, i;
2427 	u16 alloc_len;
2428 	struct mprsas_target *target;
2429 	target_id_t target_id;
2430 
2431 	MPR_FUNCTRACE(sc);
2432 
2433 	callout_stop(&cm->cm_callout);
2434 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2435 
2436 	sassc = sc->sassc;
2437 	ccb = cm->cm_complete_data;
2438 	csio = &ccb->csio;
2439 	target_id = csio->ccb_h.target_id;
2440 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2441 	mpr_dprint(sc, MPR_TRACE,
2442 	    "cm %p SMID %u ccb %p reply %p outstanding %u csio->scsi_status 0x%x,"
2443 	    "csio->dxfer_len 0x%x, csio->msg_le 0x%xn\n", cm,
2444 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2445 	    cm->cm_targ->outstanding, csio->scsi_status,
2446 	    csio->dxfer_len, csio->msg_len);
2447 	/*
2448 	 * XXX KDM if the chain allocation fails, does it matter if we do
2449 	 * the sync and unload here?  It is simpler to do it in every case,
2450 	 * assuming it doesn't cause problems.
2451 	 */
2452 	if (cm->cm_data != NULL) {
2453 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2454 			dir = BUS_DMASYNC_POSTREAD;
2455 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2456 			dir = BUS_DMASYNC_POSTWRITE;
2457 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2458 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2459 	}
2460 
2461 	cm->cm_targ->completed++;
2462 	cm->cm_targ->outstanding--;
2463 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2464 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2465 
2466 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2467 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2468 		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2469 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2470 		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2471 		if (cm->cm_reply != NULL)
2472 			mprsas_log_command(cm, MPR_RECOVERY,
2473 			    "completed timedout cm %p ccb %p during recovery "
2474 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2475 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2476 			    rep->SCSIState, le32toh(rep->TransferCount));
2477 		else
2478 			mprsas_log_command(cm, MPR_RECOVERY,
2479 			    "completed timedout cm %p ccb %p during recovery\n",
2480 			    cm, cm->cm_ccb);
2481 	} else if (cm->cm_targ->tm != NULL) {
2482 		if (cm->cm_reply != NULL)
2483 			mprsas_log_command(cm, MPR_RECOVERY,
2484 			    "completed cm %p ccb %p during recovery "
2485 			    "ioc %x scsi %x state %x xfer %u\n",
2486 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2487 			    rep->SCSIStatus, rep->SCSIState,
2488 			    le32toh(rep->TransferCount));
2489 		else
2490 			mprsas_log_command(cm, MPR_RECOVERY,
2491 			    "completed cm %p ccb %p during recovery\n",
2492 			    cm, cm->cm_ccb);
2493 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2494 		mprsas_log_command(cm, MPR_RECOVERY,
2495 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2496 	}
2497 
2498 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2499 		/*
2500 		 * We ran into an error after we tried to map the command,
2501 		 * so we're getting a callback without queueing the command
2502 		 * to the hardware.  So we set the status here, and it will
2503 		 * be retained below.  We'll go through the "fast path",
2504 		 * because there can be no reply when we haven't actually
2505 		 * gone out to the hardware.
2506 		 */
2507 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2508 
2509 		/*
2510 		 * Currently the only error included in the mask is
2511 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2512 		 * chain frames.  We need to freeze the queue until we get
2513 		 * a command that completed without this error, which will
2514 		 * hopefully have some chain frames attached that we can
2515 		 * use.  If we wanted to get smarter about it, we would
2516 		 * only unfreeze the queue in this condition when we're
2517 		 * sure that we're getting some chain frames back.  That's
2518 		 * probably unnecessary.
2519 		 */
2520 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2521 			xpt_freeze_simq(sassc->sim, 1);
2522 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2523 			mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
2524 			    "Error sending command, freezing SIM queue\n");
2525 		}
2526 	}
2527 
2528 	/*
2529 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2530 	 * flag, and use it in a few places in the rest of this function for
2531 	 * convenience. Use the macro if available.
2532 	 */
2533 	scsi_cdb = scsiio_cdb_ptr(csio);
2534 
2535 	/*
2536 	 * If this is a Start Stop Unit command and it was issued by the driver
2537 	 * during shutdown, decrement the refcount to account for all of the
2538 	 * commands that were sent.  All SSU commands should be completed before
2539 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2540 	 * is TRUE.
2541 	 */
2542 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2543 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2544 		sc->SSU_refcount--;
2545 	}
2546 
2547 	/* Take the fast path to completion */
2548 	if (cm->cm_reply == NULL) {
2549 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2550 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2551 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2552 			else {
2553 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2554 				csio->scsi_status = SCSI_STATUS_OK;
2555 			}
2556 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2557 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2558 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2559 				mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
2560 				    "Unfreezing SIM queue\n");
2561 			}
2562 		}
2563 
2564 		/*
2565 		 * There are two scenarios where the status won't be
2566 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2567 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2568 		 */
2569 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2570 			/*
2571 			 * Freeze the dev queue so that commands are
2572 			 * executed in the correct order after error
2573 			 * recovery.
2574 			 */
2575 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2576 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2577 		}
2578 		mpr_free_command(sc, cm);
2579 		xpt_done(ccb);
2580 		return;
2581 	}
2582 
2583 	target = &sassc->targets[target_id];
2584 	if (scsi_cdb[0] == UNMAP &&
2585 	    target->is_nvme &&
2586 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2587 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2588 		csio->scsi_status = rep->SCSIStatus;
2589 	}
2590 
2591 	mprsas_log_command(cm, MPR_XINFO,
2592 	    "ioc %x scsi %x state %x xfer %u\n",
2593 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2594 	    le32toh(rep->TransferCount));
2595 
2596 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2597 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2598 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2599 		/* FALLTHROUGH */
2600 	case MPI2_IOCSTATUS_SUCCESS:
2601 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2602 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2603 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2604 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2605 
2606 		/* Completion failed at the transport level. */
2607 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2608 		    MPI2_SCSI_STATE_TERMINATED)) {
2609 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2610 			break;
2611 		}
2612 
2613 		/* In a modern packetized environment, an autosense failure
2614 		 * implies that there's not much else that can be done to
2615 		 * recover the command.
2616 		 */
2617 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2618 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2619 			break;
2620 		}
2621 
2622 		/*
2623 		 * CAM doesn't care about SAS Response Info data, but if this is
2624 		 * the state check if TLR should be done.  If not, clear the
2625 		 * TLR_bits for the target.
2626 		 */
2627 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2628 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2629 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2630 			sc->mapping_table[target_id].TLR_bits =
2631 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2632 		}
2633 
2634 		/*
2635 		 * Intentionally override the normal SCSI status reporting
2636 		 * for these two cases.  These are likely to happen in a
2637 		 * multi-initiator environment, and we want to make sure that
2638 		 * CAM retries these commands rather than fail them.
2639 		 */
2640 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2641 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2642 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2643 			break;
2644 		}
2645 
2646 		/* Handle normal status and sense */
2647 		csio->scsi_status = rep->SCSIStatus;
2648 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2649 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2650 		else
2651 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2652 
2653 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2654 			int sense_len, returned_sense_len;
2655 
2656 			returned_sense_len = min(le32toh(rep->SenseCount),
2657 			    sizeof(struct scsi_sense_data));
2658 			if (returned_sense_len < csio->sense_len)
2659 				csio->sense_resid = csio->sense_len -
2660 				    returned_sense_len;
2661 			else
2662 				csio->sense_resid = 0;
2663 
2664 			sense_len = min(returned_sense_len,
2665 			    csio->sense_len - csio->sense_resid);
2666 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2667 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2668 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2669 		}
2670 
2671 		/*
2672 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2673 		 * and it's page code 0 (Supported Page List), and there is
2674 		 * inquiry data, and this is for a sequential access device, and
2675 		 * the device is an SSP target, and TLR is supported by the
2676 		 * controller, turn the TLR_bits value ON if page 0x90 is
2677 		 * supported.
2678 		 */
2679 		if ((scsi_cdb[0] == INQUIRY) &&
2680 		    (scsi_cdb[1] & SI_EVPD) &&
2681 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2682 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2683 		    (csio->data_ptr != NULL) &&
2684 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2685 		    (sc->control_TLR) &&
2686 		    (sc->mapping_table[target_id].device_info &
2687 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2688 			vpd_list = (struct scsi_vpd_supported_page_list *)
2689 			    csio->data_ptr;
2690 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2691 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2692 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2693 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2694 			alloc_len -= csio->resid;
2695 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2696 				if (vpd_list->list[i] == 0x90) {
2697 					*TLR_bits = TLR_on;
2698 					break;
2699 				}
2700 			}
2701 		}
2702 
2703 		/*
2704 		 * If this is a SATA direct-access end device, mark it so that
2705 		 * a SCSI StartStopUnit command will be sent to it when the
2706 		 * driver is being shutdown.
2707 		 */
2708 		if ((scsi_cdb[0] == INQUIRY) &&
2709 		    (csio->data_ptr != NULL) &&
2710 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2711 		    (sc->mapping_table[target_id].device_info &
2712 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2713 		    ((sc->mapping_table[target_id].device_info &
2714 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2715 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2716 			target = &sassc->targets[target_id];
2717 			target->supports_SSU = TRUE;
2718 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2719 			    target_id);
2720 		}
2721 		break;
2722 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2723 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2724 		/*
2725 		 * If devinfo is 0 this will be a volume.  In that case don't
2726 		 * tell CAM that the volume is not there.  We want volumes to
2727 		 * be enumerated until they are deleted/removed, not just
2728 		 * failed.
2729 		 */
2730 		if (cm->cm_targ->devinfo == 0)
2731 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2732 		else
2733 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2734 		break;
2735 	case MPI2_IOCSTATUS_INVALID_SGL:
2736 		mpr_print_scsiio_cmd(sc, cm);
2737 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2738 		break;
2739 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2740 		/*
2741 		 * This is one of the responses that comes back when an I/O
2742 		 * has been aborted.  If it is because of a timeout that we
2743 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2744 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2745 		 * command is the same (it gets retried, subject to the
2746 		 * retry counter), the only difference is what gets printed
2747 		 * on the console.
2748 		 */
2749 		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2750 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2751 		else
2752 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2753 		break;
2754 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2755 		/* resid is ignored for this condition */
2756 		csio->resid = 0;
2757 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2758 		break;
2759 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2760 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2761 		/*
2762 		 * These can sometimes be transient transport-related
2763 		 * errors, and sometimes persistent drive-related errors.
2764 		 * We used to retry these without decrementing the retry
2765 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2766 		 * we hit a persistent drive problem that returns one of
2767 		 * these error codes, we would retry indefinitely.  So,
2768 		 * return CAM_REQ_CMP_ERR so that we decrement the retry
2769 		 * count and avoid infinite retries.  We're taking the
2770 		 * potential risk of flagging false failures in the event
2771 		 * of a topology-related error (e.g. a SAS expander problem
2772 		 * causes a command addressed to a drive to fail), but
2773 		 * avoiding getting into an infinite retry loop. However,
2774 		 * if we get them while were removing a device, we should
2775 		 * fail the request as 'not there' because the device
2776 		 * is effectively gone.
2777 		 */
2778 		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2779 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2780 		else
2781 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2782 		mpr_dprint(sc, MPR_INFO,
2783 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2784 		    mpr_describe_table(mpr_iocstatus_string,
2785 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2786 		    target_id, cm->cm_desc.Default.SMID,
2787 		    le32toh(rep->IOCLogInfo),
2788 		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2789 		mpr_dprint(sc, MPR_XINFO,
2790 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2791 		    rep->SCSIStatus, rep->SCSIState,
2792 		    le32toh(rep->TransferCount));
2793 		break;
2794 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2795 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2796 	case MPI2_IOCSTATUS_INVALID_VPID:
2797 	case MPI2_IOCSTATUS_INVALID_FIELD:
2798 	case MPI2_IOCSTATUS_INVALID_STATE:
2799 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2800 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2801 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2802 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2803 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2804 	default:
2805 		mprsas_log_command(cm, MPR_XINFO,
2806 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2807 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2808 		    rep->SCSIStatus, rep->SCSIState,
2809 		    le32toh(rep->TransferCount));
2810 		csio->resid = cm->cm_length;
2811 
2812 		if (scsi_cdb[0] == UNMAP &&
2813 		    target->is_nvme &&
2814 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2815 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2816 		else
2817 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2818 
2819 		break;
2820 	}
2821 
2822 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2823 
2824 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2825 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2826 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2827 		mpr_dprint(sc, MPR_INFO, "Command completed, unfreezing SIM "
2828 		    "queue\n");
2829 	}
2830 
2831 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2832 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2833 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2834 	}
2835 
2836 	/*
2837 	 * Check to see if we're removing the device. If so, and this is the
2838 	 * last command on the queue, proceed with the deferred removal of the
2839 	 * device.  Note, for removing a volume, this won't trigger because
2840 	 * pending_remove_tm will be NULL.
2841 	 */
2842 	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2843 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2844 		    cm->cm_targ->pending_remove_tm != NULL) {
2845 			mpr_dprint(sc, MPR_INFO,
2846 			    "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
2847 			    cm->cm_targ->tid, cm->cm_targ->handle);
2848 			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2849 			cm->cm_targ->pending_remove_tm = NULL;
2850 		}
2851 	}
2852 
2853 	mpr_free_command(sc, cm);
2854 	xpt_done(ccb);
2855 }
2856 
2857 static void
2858 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2859 {
2860 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2861 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2862 	uint64_t sasaddr;
2863 	union ccb *ccb;
2864 
2865 	ccb = cm->cm_complete_data;
2866 
2867 	/*
2868 	 * Currently there should be no way we can hit this case.  It only
2869 	 * happens when we have a failure to allocate chain frames, and SMP
2870 	 * commands require two S/G elements only.  That should be handled
2871 	 * in the standard request size.
2872 	 */
2873 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2874 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2875 		    "request!\n", __func__, cm->cm_flags);
2876 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2877 		goto bailout;
2878         }
2879 
2880 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2881 	if (rpl == NULL) {
2882 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2883 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2884 		goto bailout;
2885 	}
2886 
2887 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2888 	sasaddr = le32toh(req->SASAddress.Low);
2889 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2890 
2891 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2892 	    MPI2_IOCSTATUS_SUCCESS ||
2893 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2894 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2895 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2896 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2897 		goto bailout;
2898 	}
2899 
2900 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2901 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2902 
2903 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2904 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2905 	else
2906 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2907 
2908 bailout:
2909 	/*
2910 	 * We sync in both directions because we had DMAs in the S/G list
2911 	 * in both directions.
2912 	 */
2913 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2914 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2915 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2916 	mpr_free_command(sc, cm);
2917 	xpt_done(ccb);
2918 }
2919 
2920 static void
2921 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2922 {
2923 	struct mpr_command *cm;
2924 	uint8_t *request, *response;
2925 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2926 	struct mpr_softc *sc;
2927 	int error;
2928 
2929 	sc = sassc->sc;
2930 	error = 0;
2931 
2932 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2933 	case CAM_DATA_PADDR:
2934 	case CAM_DATA_SG_PADDR:
2935 		/*
2936 		 * XXX We don't yet support physical addresses here.
2937 		 */
2938 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2939 		    "supported\n", __func__);
2940 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2941 		xpt_done(ccb);
2942 		return;
2943 	case CAM_DATA_SG:
2944 		/*
2945 		 * The chip does not support more than one buffer for the
2946 		 * request or response.
2947 		 */
2948 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2949 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2950 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2951 			    "response buffer segments not supported for SMP\n",
2952 			    __func__);
2953 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2954 			xpt_done(ccb);
2955 			return;
2956 		}
2957 
2958 		/*
2959 		 * The CAM_SCATTER_VALID flag was originally implemented
2960 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2961 		 * We have two.  So, just take that flag to mean that we
2962 		 * might have S/G lists, and look at the S/G segment count
2963 		 * to figure out whether that is the case for each individual
2964 		 * buffer.
2965 		 */
2966 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2967 			bus_dma_segment_t *req_sg;
2968 
2969 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2970 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2971 		} else
2972 			request = ccb->smpio.smp_request;
2973 
2974 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2975 			bus_dma_segment_t *rsp_sg;
2976 
2977 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2978 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2979 		} else
2980 			response = ccb->smpio.smp_response;
2981 		break;
2982 	case CAM_DATA_VADDR:
2983 		request = ccb->smpio.smp_request;
2984 		response = ccb->smpio.smp_response;
2985 		break;
2986 	default:
2987 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2988 		xpt_done(ccb);
2989 		return;
2990 	}
2991 
2992 	cm = mpr_alloc_command(sc);
2993 	if (cm == NULL) {
2994 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2995 		    __func__);
2996 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2997 		xpt_done(ccb);
2998 		return;
2999 	}
3000 
3001 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3002 	bzero(req, sizeof(*req));
3003 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3004 
3005 	/* Allow the chip to use any route to this SAS address. */
3006 	req->PhysicalPort = 0xff;
3007 
3008 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3009 	req->SGLFlags =
3010 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3011 
3012 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3013 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3014 
3015 	mpr_init_sge(cm, req, &req->SGL);
3016 
3017 	/*
3018 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3019 	 * do one map command, and one busdma call in there.
3020 	 */
3021 	cm->cm_uio.uio_iov = cm->cm_iovec;
3022 	cm->cm_uio.uio_iovcnt = 2;
3023 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3024 
3025 	/*
3026 	 * The read/write flag isn't used by busdma, but set it just in
3027 	 * case.  This isn't exactly accurate, either, since we're going in
3028 	 * both directions.
3029 	 */
3030 	cm->cm_uio.uio_rw = UIO_WRITE;
3031 
3032 	cm->cm_iovec[0].iov_base = request;
3033 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3034 	cm->cm_iovec[1].iov_base = response;
3035 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3036 
3037 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3038 			       cm->cm_iovec[1].iov_len;
3039 
3040 	/*
3041 	 * Trigger a warning message in mpr_data_cb() for the user if we
3042 	 * wind up exceeding two S/G segments.  The chip expects one
3043 	 * segment for the request and another for the response.
3044 	 */
3045 	cm->cm_max_segs = 2;
3046 
3047 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3048 	cm->cm_complete = mprsas_smpio_complete;
3049 	cm->cm_complete_data = ccb;
3050 
3051 	/*
3052 	 * Tell the mapping code that we're using a uio, and that this is
3053 	 * an SMP passthrough request.  There is a little special-case
3054 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3055 	 * transfer.
3056 	 */
3057 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3058 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3059 
3060 	/* The chip data format is little endian. */
3061 	req->SASAddress.High = htole32(sasaddr >> 32);
3062 	req->SASAddress.Low = htole32(sasaddr);
3063 
3064 	/*
3065 	 * XXX Note that we don't have a timeout/abort mechanism here.
3066 	 * From the manual, it looks like task management requests only
3067 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3068 	 * have a mechanism to retry requests in the event of a chip reset
3069 	 * at least.  Hopefully the chip will insure that any errors short
3070 	 * of that are relayed back to the driver.
3071 	 */
3072 	error = mpr_map_command(sc, cm);
3073 	if ((error != 0) && (error != EINPROGRESS)) {
3074 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3075 		    "mpr_map_command()\n", __func__, error);
3076 		goto bailout_error;
3077 	}
3078 
3079 	return;
3080 
3081 bailout_error:
3082 	mpr_free_command(sc, cm);
3083 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3084 	xpt_done(ccb);
3085 	return;
3086 }
3087 
3088 static void
3089 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3090 {
3091 	struct mpr_softc *sc;
3092 	struct mprsas_target *targ;
3093 	uint64_t sasaddr = 0;
3094 
3095 	sc = sassc->sc;
3096 
3097 	/*
3098 	 * Make sure the target exists.
3099 	 */
3100 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3101 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3102 	targ = &sassc->targets[ccb->ccb_h.target_id];
3103 	if (targ->handle == 0x0) {
3104 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3105 		    __func__, ccb->ccb_h.target_id);
3106 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3107 		xpt_done(ccb);
3108 		return;
3109 	}
3110 
3111 	/*
3112 	 * If this device has an embedded SMP target, we'll talk to it
3113 	 * directly.
3114 	 * figure out what the expander's address is.
3115 	 */
3116 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3117 		sasaddr = targ->sasaddr;
3118 
3119 	/*
3120 	 * If we don't have a SAS address for the expander yet, try
3121 	 * grabbing it from the page 0x83 information cached in the
3122 	 * transport layer for this target.  LSI expanders report the
3123 	 * expander SAS address as the port-associated SAS address in
3124 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3125 	 * 0x83.
3126 	 *
3127 	 * XXX KDM disable this for now, but leave it commented out so that
3128 	 * it is obvious that this is another possible way to get the SAS
3129 	 * address.
3130 	 *
3131 	 * The parent handle method below is a little more reliable, and
3132 	 * the other benefit is that it works for devices other than SES
3133 	 * devices.  So you can send a SMP request to a da(4) device and it
3134 	 * will get routed to the expander that device is attached to.
3135 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3136 	 */
3137 #if 0
3138 	if (sasaddr == 0)
3139 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3140 #endif
3141 
3142 	/*
3143 	 * If we still don't have a SAS address for the expander, look for
3144 	 * the parent device of this device, which is probably the expander.
3145 	 */
3146 	if (sasaddr == 0) {
3147 #ifdef OLD_MPR_PROBE
3148 		struct mprsas_target *parent_target;
3149 #endif
3150 
3151 		if (targ->parent_handle == 0x0) {
3152 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3153 			    "a valid parent handle!\n", __func__, targ->handle);
3154 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3155 			goto bailout;
3156 		}
3157 #ifdef OLD_MPR_PROBE
3158 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3159 		    targ->parent_handle);
3160 
3161 		if (parent_target == NULL) {
3162 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3163 			    "a valid parent target!\n", __func__, targ->handle);
3164 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3165 			goto bailout;
3166 		}
3167 
3168 		if ((parent_target->devinfo &
3169 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3170 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3171 			    "does not have an SMP target!\n", __func__,
3172 			    targ->handle, parent_target->handle);
3173 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3174 			goto bailout;
3175 		}
3176 
3177 		sasaddr = parent_target->sasaddr;
3178 #else /* OLD_MPR_PROBE */
3179 		if ((targ->parent_devinfo &
3180 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3181 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3182 			    "does not have an SMP target!\n", __func__,
3183 			    targ->handle, targ->parent_handle);
3184 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3185 			goto bailout;
3186 		}
3187 		if (targ->parent_sasaddr == 0x0) {
3188 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3189 			    "%d does not have a valid SAS address!\n", __func__,
3190 			    targ->handle, targ->parent_handle);
3191 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3192 			goto bailout;
3193 		}
3194 
3195 		sasaddr = targ->parent_sasaddr;
3196 #endif /* OLD_MPR_PROBE */
3197 	}
3198 
3199 	if (sasaddr == 0) {
3200 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3201 		    "handle %d\n", __func__, targ->handle);
3202 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3203 		goto bailout;
3204 	}
3205 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3206 
3207 	return;
3208 
3209 bailout:
3210 	xpt_done(ccb);
3211 
3212 }
3213 
3214 static void
3215 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3216 {
3217 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3218 	struct mpr_softc *sc;
3219 	struct mpr_command *tm;
3220 	struct mprsas_target *targ;
3221 
3222 	MPR_FUNCTRACE(sassc->sc);
3223 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3224 
3225 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3226 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3227 	sc = sassc->sc;
3228 	tm = mprsas_alloc_tm(sc);
3229 	if (tm == NULL) {
3230 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3231 		    "mprsas_action_resetdev\n");
3232 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3233 		xpt_done(ccb);
3234 		return;
3235 	}
3236 
3237 	targ = &sassc->targets[ccb->ccb_h.target_id];
3238 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3239 	req->DevHandle = htole16(targ->handle);
3240 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3241 
3242 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3243 		/* SAS Hard Link Reset / SATA Link Reset */
3244 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3245 	} else {
3246 		/* PCIe Protocol Level Reset*/
3247 		req->MsgFlags =
3248 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3249 	}
3250 
3251 	tm->cm_data = NULL;
3252 	tm->cm_complete = mprsas_resetdev_complete;
3253 	tm->cm_complete_data = ccb;
3254 
3255 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3256 	    __func__, targ->tid);
3257 	tm->cm_targ = targ;
3258 
3259 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3260 	mpr_map_command(sc, tm);
3261 }
3262 
3263 static void
3264 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3265 {
3266 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3267 	union ccb *ccb;
3268 
3269 	MPR_FUNCTRACE(sc);
3270 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3271 
3272 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3273 	ccb = tm->cm_complete_data;
3274 
3275 	/*
3276 	 * Currently there should be no way we can hit this case.  It only
3277 	 * happens when we have a failure to allocate chain frames, and
3278 	 * task management commands don't have S/G lists.
3279 	 */
3280 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3281 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3282 
3283 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3284 
3285 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3286 		    "handle %#04x! This should not happen!\n", __func__,
3287 		    tm->cm_flags, req->DevHandle);
3288 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3289 		goto bailout;
3290 	}
3291 
3292 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3293 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3294 
3295 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3296 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3297 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3298 		    CAM_LUN_WILDCARD);
3299 	}
3300 	else
3301 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3302 
3303 bailout:
3304 
3305 	mprsas_free_tm(sc, tm);
3306 	xpt_done(ccb);
3307 }
3308 
3309 static void
3310 mprsas_poll(struct cam_sim *sim)
3311 {
3312 	struct mprsas_softc *sassc;
3313 
3314 	sassc = cam_sim_softc(sim);
3315 
3316 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3317 		/* frequent debug messages during a panic just slow
3318 		 * everything down too much.
3319 		 */
3320 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3321 		    __func__);
3322 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3323 	}
3324 
3325 	mpr_intr_locked(sassc->sc);
3326 }
3327 
3328 static void
3329 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3330     void *arg)
3331 {
3332 	struct mpr_softc *sc;
3333 
3334 	sc = (struct mpr_softc *)callback_arg;
3335 
3336 	mpr_lock(sc);
3337 	switch (code) {
3338 	case AC_ADVINFO_CHANGED: {
3339 		struct mprsas_target *target;
3340 		struct mprsas_softc *sassc;
3341 		struct scsi_read_capacity_data_long rcap_buf;
3342 		struct ccb_dev_advinfo cdai;
3343 		struct mprsas_lun *lun;
3344 		lun_id_t lunid;
3345 		int found_lun;
3346 		uintptr_t buftype;
3347 
3348 		buftype = (uintptr_t)arg;
3349 
3350 		found_lun = 0;
3351 		sassc = sc->sassc;
3352 
3353 		/*
3354 		 * We're only interested in read capacity data changes.
3355 		 */
3356 		if (buftype != CDAI_TYPE_RCAPLONG)
3357 			break;
3358 
3359 		/*
3360 		 * We should have a handle for this, but check to make sure.
3361 		 */
3362 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3363 		    ("Target %d out of bounds in mprsas_async\n",
3364 		    xpt_path_target_id(path)));
3365 		target = &sassc->targets[xpt_path_target_id(path)];
3366 		if (target->handle == 0)
3367 			break;
3368 
3369 		lunid = xpt_path_lun_id(path);
3370 
3371 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3372 			if (lun->lun_id == lunid) {
3373 				found_lun = 1;
3374 				break;
3375 			}
3376 		}
3377 
3378 		if (found_lun == 0) {
3379 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3380 			    M_NOWAIT | M_ZERO);
3381 			if (lun == NULL) {
3382 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3383 				    "LUN for EEDP support.\n");
3384 				break;
3385 			}
3386 			lun->lun_id = lunid;
3387 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3388 		}
3389 
3390 		bzero(&rcap_buf, sizeof(rcap_buf));
3391 		bzero(&cdai, sizeof(cdai));
3392 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3393 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3394 		cdai.ccb_h.flags = CAM_DIR_IN;
3395 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3396 		cdai.flags = CDAI_FLAG_NONE;
3397 		cdai.bufsiz = sizeof(rcap_buf);
3398 		cdai.buf = (uint8_t *)&rcap_buf;
3399 		xpt_action((union ccb *)&cdai);
3400 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3401 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3402 
3403 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3404 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3405 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3406 			case SRC16_PTYPE_1:
3407 			case SRC16_PTYPE_3:
3408 				lun->eedp_formatted = TRUE;
3409 				lun->eedp_block_size =
3410 				    scsi_4btoul(rcap_buf.length);
3411 				break;
3412 			case SRC16_PTYPE_2:
3413 			default:
3414 				lun->eedp_formatted = FALSE;
3415 				lun->eedp_block_size = 0;
3416 				break;
3417 			}
3418 		} else {
3419 			lun->eedp_formatted = FALSE;
3420 			lun->eedp_block_size = 0;
3421 		}
3422 		break;
3423 	}
3424 	default:
3425 		break;
3426 	}
3427 	mpr_unlock(sc);
3428 }
3429 
3430 /*
3431  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3432  * the target until the reset has completed.  The CCB holds the path which
3433  * is used to release the devq.  The devq is released and the CCB is freed
3434  * when the TM completes.
3435  * We only need to do this when we're entering reset, not at each time we
3436  * need to send an abort (which will happen if multiple commands timeout
3437  * while we're sending the abort). We do not release the queue for each
3438  * command we complete (just at the end when we free the tm), so freezing
3439  * it each time doesn't make sense.
3440  */
3441 void
3442 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3443     struct mprsas_target *target, lun_id_t lun_id)
3444 {
3445 	union ccb *ccb;
3446 	path_id_t path_id;
3447 
3448 	ccb = xpt_alloc_ccb_nowait();
3449 	if (ccb) {
3450 		path_id = cam_sim_path(sc->sassc->sim);
3451 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3452 		    target->tid, lun_id) != CAM_REQ_CMP) {
3453 			xpt_free_ccb(ccb);
3454 		} else {
3455 			tm->cm_ccb = ccb;
3456 			tm->cm_targ = target;
3457 			if ((target->flags & MPRSAS_TARGET_INRESET) == 0) {
3458 				mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
3459 				    "%s: Freezing devq for target ID %d\n",
3460 				    __func__, target->tid);
3461 				xpt_freeze_devq(ccb->ccb_h.path, 1);
3462 				target->flags |= MPRSAS_TARGET_INRESET;
3463 			}
3464 		}
3465 	}
3466 }
3467 
3468 int
3469 mprsas_startup(struct mpr_softc *sc)
3470 {
3471 	/*
3472 	 * Send the port enable message and set the wait_for_port_enable flag.
3473 	 * This flag helps to keep the simq frozen until all discovery events
3474 	 * are processed.
3475 	 */
3476 	sc->wait_for_port_enable = 1;
3477 	mprsas_send_portenable(sc);
3478 	return (0);
3479 }
3480 
3481 static int
3482 mprsas_send_portenable(struct mpr_softc *sc)
3483 {
3484 	MPI2_PORT_ENABLE_REQUEST *request;
3485 	struct mpr_command *cm;
3486 
3487 	MPR_FUNCTRACE(sc);
3488 
3489 	if ((cm = mpr_alloc_command(sc)) == NULL)
3490 		return (EBUSY);
3491 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3492 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3493 	request->MsgFlags = 0;
3494 	request->VP_ID = 0;
3495 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3496 	cm->cm_complete = mprsas_portenable_complete;
3497 	cm->cm_data = NULL;
3498 	cm->cm_sge = NULL;
3499 
3500 	mpr_map_command(sc, cm);
3501 	mpr_dprint(sc, MPR_XINFO,
3502 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3503 	    cm, cm->cm_req, cm->cm_complete);
3504 	return (0);
3505 }
3506 
3507 static void
3508 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3509 {
3510 	MPI2_PORT_ENABLE_REPLY *reply;
3511 	struct mprsas_softc *sassc;
3512 
3513 	MPR_FUNCTRACE(sc);
3514 	sassc = sc->sassc;
3515 
3516 	/*
3517 	 * Currently there should be no way we can hit this case.  It only
3518 	 * happens when we have a failure to allocate chain frames, and
3519 	 * port enable commands don't have S/G lists.
3520 	 */
3521 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3522 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3523 		    "This should not happen!\n", __func__, cm->cm_flags);
3524 	}
3525 
3526 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3527 	if (reply == NULL)
3528 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3529 	else if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3530 	    MPI2_IOCSTATUS_SUCCESS)
3531 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3532 
3533 	mpr_free_command(sc, cm);
3534 	/*
3535 	 * Done waiting for port enable to complete.  Decrement the refcount.
3536 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3537 	 * take place.
3538 	 */
3539 	sc->wait_for_port_enable = 0;
3540 	sc->port_enable_complete = 1;
3541 	wakeup(&sc->port_enable_complete);
3542 	mprsas_startup_decrement(sassc);
3543 }
3544 
3545 int
3546 mprsas_check_id(struct mprsas_softc *sassc, int id)
3547 {
3548 	struct mpr_softc *sc = sassc->sc;
3549 	char *ids;
3550 	char *name;
3551 
3552 	ids = &sc->exclude_ids[0];
3553 	while((name = strsep(&ids, ",")) != NULL) {
3554 		if (name[0] == '\0')
3555 			continue;
3556 		if (strtol(name, NULL, 0) == (long)id)
3557 			return (1);
3558 	}
3559 
3560 	return (0);
3561 }
3562 
3563 void
3564 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3565 {
3566 	struct mprsas_softc *sassc;
3567 	struct mprsas_lun *lun, *lun_tmp;
3568 	struct mprsas_target *targ;
3569 	int i;
3570 
3571 	sassc = sc->sassc;
3572 	/*
3573 	 * The number of targets is based on IOC Facts, so free all of
3574 	 * the allocated LUNs for each target and then the target buffer
3575 	 * itself.
3576 	 */
3577 	for (i=0; i< maxtargets; i++) {
3578 		targ = &sassc->targets[i];
3579 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3580 			free(lun, M_MPR);
3581 		}
3582 	}
3583 	free(sassc->targets, M_MPR);
3584 
3585 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3586 	    M_MPR, M_WAITOK|M_ZERO);
3587 }
3588