xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision c2a55efd74cccb3d4e7b9037b240ad062c203bb8)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * Copyright 2000-2020 Broadcom Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30  *
31  */
32 
33 /* Communications core for Avago Technologies (LSI) MPT3 */
34 
35 /* TODO Move headers to mprvar */
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/selinfo.h>
41 #include <sys/module.h>
42 #include <sys/bus.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/malloc.h>
46 #include <sys/uio.h>
47 #include <sys/sysctl.h>
48 #include <sys/endian.h>
49 #include <sys/queue.h>
50 #include <sys/kthread.h>
51 #include <sys/taskqueue.h>
52 #include <sys/sbuf.h>
53 #include <sys/stdarg.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <cam/cam.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_debug.h>
62 #include <cam/cam_sim.h>
63 #include <cam/cam_xpt_sim.h>
64 #include <cam/cam_xpt_periph.h>
65 #include <cam/cam_periph.h>
66 #include <cam/scsi/scsi_all.h>
67 #include <cam/scsi/scsi_message.h>
68 #include <cam/scsi/smp_all.h>
69 
70 #include <dev/nvme/nvme.h>
71 
72 #include <dev/mpr/mpi/mpi2_type.h>
73 #include <dev/mpr/mpi/mpi2.h>
74 #include <dev/mpr/mpi/mpi2_ioc.h>
75 #include <dev/mpr/mpi/mpi2_sas.h>
76 #include <dev/mpr/mpi/mpi2_pci.h>
77 #include <dev/mpr/mpi/mpi2_cnfg.h>
78 #include <dev/mpr/mpi/mpi2_init.h>
79 #include <dev/mpr/mpi/mpi2_tool.h>
80 #include <dev/mpr/mpr_ioctl.h>
81 #include <dev/mpr/mprvar.h>
82 #include <dev/mpr/mpr_table.h>
83 #include <dev/mpr/mpr_sas.h>
84 
85 #define MPRSAS_DISCOVERY_TIMEOUT	20
86 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
87 
88 #include <sys/sdt.h>
89 
90 /* SDT Probes */
91 SDT_PROBE_DEFINE4(cam, , mpr, complete, "union ccb *",
92     "struct mpr_command *", "u_int", "u32");
93 
94 /*
95  * static array to check SCSI OpCode for EEDP protection bits
96  */
97 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 };
118 
119 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 
121 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
123 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mprsas_poll(struct cam_sim *sim);
125 static void mprsas_scsiio_timeout(void *data);
126 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
127 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
128 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
129 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
130 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
131 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
132     struct mpr_command *cm);
133 static void mprsas_async(void *callback_arg, uint32_t code,
134     struct cam_path *path, void *arg);
135 static int mprsas_send_portenable(struct mpr_softc *sc);
136 static void mprsas_portenable_complete(struct mpr_softc *sc,
137     struct mpr_command *cm);
138 
139 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
140 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
141     uint64_t sasaddr);
142 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
143 
144 struct mprsas_target *
145 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
146     uint16_t handle)
147 {
148 	struct mprsas_target *target;
149 	int i;
150 
151 	for (i = start; i < sassc->maxtargets; i++) {
152 		target = &sassc->targets[i];
153 		if (target->handle == handle)
154 			return (target);
155 	}
156 
157 	return (NULL);
158 }
159 
160 /* we need to freeze the simq during attach and diag reset, to avoid failing
161  * commands before device handles have been found by discovery.  Since
162  * discovery involves reading config pages and possibly sending commands,
163  * discovery actions may continue even after we receive the end of discovery
164  * event, so refcount discovery actions instead of assuming we can unfreeze
165  * the simq when we get the event.
166  */
167 void
168 mprsas_startup_increment(struct mprsas_softc *sassc)
169 {
170 	MPR_FUNCTRACE(sassc->sc);
171 
172 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
173 		if (sassc->startup_refcount++ == 0) {
174 			/* just starting, freeze the simq */
175 			mpr_dprint(sassc->sc, MPR_INIT,
176 			    "%s freezing simq\n", __func__);
177 			xpt_hold_boot();
178 			xpt_freeze_simq(sassc->sim, 1);
179 		}
180 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
181 		    sassc->startup_refcount);
182 	}
183 }
184 
185 void
186 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
187 {
188 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
189 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
190 		xpt_release_simq(sassc->sim, 1);
191 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
192 	}
193 }
194 
195 void
196 mprsas_startup_decrement(struct mprsas_softc *sassc)
197 {
198 	MPR_FUNCTRACE(sassc->sc);
199 
200 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
201 		if (--sassc->startup_refcount == 0) {
202 			/* finished all discovery-related actions, release
203 			 * the simq and rescan for the latest topology.
204 			 */
205 			mpr_dprint(sassc->sc, MPR_INIT,
206 			    "%s releasing simq\n", __func__);
207 			sassc->flags &= ~MPRSAS_IN_STARTUP;
208 			xpt_release_simq(sassc->sim, 1);
209 			xpt_release_boot();
210 		}
211 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
212 		    sassc->startup_refcount);
213 	}
214 }
215 
216 /*
217  * The firmware requires us to stop sending commands when we're doing task
218  * management.
219  * use.
220  * XXX The logic for serializing the device has been made lazy and moved to
221  * mprsas_prepare_for_tm().
222  */
223 struct mpr_command *
224 mprsas_alloc_tm(struct mpr_softc *sc)
225 {
226 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
227 	struct mpr_command *tm;
228 
229 	MPR_FUNCTRACE(sc);
230 	tm = mpr_alloc_high_priority_command(sc);
231 	if (tm == NULL)
232 		return (NULL);
233 
234 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
235 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
236 	return tm;
237 }
238 
239 void
240 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
241 {
242 
243 	MPR_FUNCTRACE(sc);
244 	if (tm == NULL)
245 		return;
246 
247 	/*
248 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
249 	 * free the resources used for freezing the devq.  Must clear the
250 	 * INRESET flag as well or scsi I/O will not work.
251 	 */
252 	if (tm->cm_ccb) {
253 		mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
254 		    "Unfreezing devq for target ID %d\n",
255 		    tm->cm_targ->tid);
256 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
257 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
258 		xpt_free_path(tm->cm_ccb->ccb_h.path);
259 		xpt_free_ccb(tm->cm_ccb);
260 	}
261 
262 	mpr_free_high_priority_command(sc, tm);
263 }
264 
265 void
266 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
267 {
268 	struct mprsas_softc *sassc = sc->sassc;
269 	path_id_t pathid;
270 	target_id_t targetid;
271 	union ccb *ccb;
272 
273 	MPR_FUNCTRACE(sc);
274 	pathid = cam_sim_path(sassc->sim);
275 	if (targ == NULL)
276 		targetid = CAM_TARGET_WILDCARD;
277 	else
278 		targetid = targ - sassc->targets;
279 
280 	/*
281 	 * Allocate a CCB and schedule a rescan.
282 	 */
283 	ccb = xpt_alloc_ccb_nowait();
284 	if (ccb == NULL) {
285 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
286 		return;
287 	}
288 
289 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
290 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
291 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
292 		xpt_free_ccb(ccb);
293 		return;
294 	}
295 
296 	if (targetid == CAM_TARGET_WILDCARD)
297 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
298 	else
299 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
300 
301 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
302 	xpt_rescan(ccb);
303 }
304 
305 static void
306 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
307 {
308 	struct sbuf sb;
309 	va_list ap;
310 	char str[224];
311 
312 	if (cm == NULL)
313 		return;
314 
315 	/* No need to be in here if debugging isn't enabled */
316 	if ((cm->cm_sc->mpr_debug & level) == 0)
317 		return;
318 
319 	sbuf_new(&sb, str, sizeof(str), 0);
320 
321 	va_start(ap, fmt);
322 
323 	if (cm->cm_ccb != NULL) {
324 		xpt_path_sbuf(cm->cm_ccb->csio.ccb_h.path, &sb);
325 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
326 			scsi_command_string(&cm->cm_ccb->csio, &sb);
327 			sbuf_printf(&sb, "length %d ",
328 			    cm->cm_ccb->csio.dxfer_len);
329 		}
330 	} else {
331 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
332 		    cam_sim_name(cm->cm_sc->sassc->sim),
333 		    cam_sim_unit(cm->cm_sc->sassc->sim),
334 		    cam_sim_bus(cm->cm_sc->sassc->sim),
335 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
336 		    cm->cm_lun);
337 	}
338 
339 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
340 	sbuf_vprintf(&sb, fmt, ap);
341 	sbuf_finish(&sb);
342 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
343 
344 	va_end(ap);
345 }
346 
347 static void
348 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
349 {
350 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
351 	struct mprsas_target *targ;
352 	uint16_t handle;
353 
354 	MPR_FUNCTRACE(sc);
355 
356 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
357 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
358 	targ = tm->cm_targ;
359 
360 	if (reply == NULL) {
361 		/* XXX retry the remove after the diag reset completes? */
362 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
363 		    "0x%04x\n", __func__, handle);
364 		mprsas_free_tm(sc, tm);
365 		return;
366 	}
367 
368 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
369 	    MPI2_IOCSTATUS_SUCCESS) {
370 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
371 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
372 	}
373 
374 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
375 	    le32toh(reply->TerminationCount));
376 	mpr_free_reply(sc, tm->cm_reply_data);
377 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
378 
379 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
380 	    targ->tid, handle);
381 
382 	/*
383 	 * Don't clear target if remove fails because things will get confusing.
384 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
385 	 * this target id if possible, and so we can assign the same target id
386 	 * to this device if it comes back in the future.
387 	 */
388 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
389 	    MPI2_IOCSTATUS_SUCCESS) {
390 		targ = tm->cm_targ;
391 		targ->handle = 0x0;
392 		targ->encl_handle = 0x0;
393 		targ->encl_level_valid = 0x0;
394 		targ->encl_level = 0x0;
395 		targ->connector_name[0] = ' ';
396 		targ->connector_name[1] = ' ';
397 		targ->connector_name[2] = ' ';
398 		targ->connector_name[3] = ' ';
399 		targ->encl_slot = 0x0;
400 		targ->exp_dev_handle = 0x0;
401 		targ->phy_num = 0x0;
402 		targ->linkrate = 0x0;
403 		targ->devinfo = 0x0;
404 		targ->flags = 0x0;
405 		targ->scsi_req_desc_type = 0;
406 	}
407 
408 	mprsas_free_tm(sc, tm);
409 }
410 
411 /*
412  * Retry mprsas_prepare_remove() if some previous attempt failed to allocate
413  * high priority command due to limit reached.
414  */
415 void
416 mprsas_prepare_remove_retry(struct mprsas_softc *sassc)
417 {
418 	struct mprsas_target *target;
419 	int i;
420 
421 	if ((sassc->flags & MPRSAS_TOREMOVE) == 0)
422 		return;
423 
424 	for (i = 0; i < sassc->maxtargets; i++) {
425 		target = &sassc->targets[i];
426 		if ((target->flags & MPRSAS_TARGET_TOREMOVE) == 0)
427 			continue;
428 		if (TAILQ_EMPTY(&sassc->sc->high_priority_req_list))
429 			return;
430 		target->flags &= ~MPRSAS_TARGET_TOREMOVE;
431 		if (target->flags & MPR_TARGET_FLAGS_VOLUME)
432 			mprsas_prepare_volume_remove(sassc, target->handle);
433 		else
434 			mprsas_prepare_remove(sassc, target->handle);
435 	}
436 	sassc->flags &= ~MPRSAS_TOREMOVE;
437 }
438 
439 /*
440  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
441  * Otherwise Volume Delete is same as Bare Drive Removal.
442  */
443 void
444 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
445 {
446 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
447 	struct mpr_softc *sc;
448 	struct mpr_command *cm;
449 	struct mprsas_target *targ = NULL;
450 
451 	MPR_FUNCTRACE(sassc->sc);
452 	sc = sassc->sc;
453 
454 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
455 	if (targ == NULL) {
456 		/* FIXME: what is the action? */
457 		/* We don't know about this device? */
458 		mpr_dprint(sc, MPR_ERROR,
459 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
460 		return;
461 	}
462 
463 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
464 
465 	cm = mprsas_alloc_tm(sc);
466 	if (cm == NULL) {
467 		targ->flags |= MPRSAS_TARGET_TOREMOVE;
468 		sassc->flags |= MPRSAS_TOREMOVE;
469 		return;
470 	}
471 
472 	mprsas_rescan_target(sc, targ);
473 
474 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
475 	req->DevHandle = targ->handle;
476 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
477 
478 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
479 		/* SAS Hard Link Reset / SATA Link Reset */
480 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
481 	} else {
482 		/* PCIe Protocol Level Reset*/
483 		req->MsgFlags =
484 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
485 	}
486 
487 	cm->cm_targ = targ;
488 	cm->cm_data = NULL;
489 	cm->cm_complete = mprsas_remove_volume;
490 	cm->cm_complete_data = (void *)(uintptr_t)handle;
491 
492 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
493 	    __func__, targ->tid);
494 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
495 
496 	mpr_map_command(sc, cm);
497 }
498 
499 /*
500  * The firmware performs debounce on the link to avoid transient link errors
501  * and false removals.  When it does decide that link has been lost and a
502  * device needs to go away, it expects that the host will perform a target reset
503  * and then an op remove.  The reset has the side-effect of aborting any
504  * outstanding requests for the device, which is required for the op-remove to
505  * succeed.  It's not clear if the host should check for the device coming back
506  * alive after the reset.
507  */
508 void
509 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
510 {
511 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
512 	struct mpr_softc *sc;
513 	struct mpr_command *tm;
514 	struct mprsas_target *targ = NULL;
515 
516 	MPR_FUNCTRACE(sassc->sc);
517 
518 	sc = sassc->sc;
519 
520 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
521 	if (targ == NULL) {
522 		/* FIXME: what is the action? */
523 		/* We don't know about this device? */
524 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
525 		    __func__, handle);
526 		return;
527 	}
528 
529 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
530 
531 	tm = mprsas_alloc_tm(sc);
532 	if (tm == NULL) {
533 		targ->flags |= MPRSAS_TARGET_TOREMOVE;
534 		sassc->flags |= MPRSAS_TOREMOVE;
535 		return;
536 	}
537 
538 	mprsas_rescan_target(sc, targ);
539 
540 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
541 	req->DevHandle = htole16(targ->handle);
542 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
543 
544 	/* SAS Hard Link Reset / SATA Link Reset */
545 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
546 
547 	tm->cm_targ = targ;
548 	tm->cm_data = NULL;
549 	tm->cm_complete = mprsas_remove_device;
550 	tm->cm_complete_data = (void *)(uintptr_t)handle;
551 
552 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
553 	    __func__, targ->tid);
554 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
555 
556 	mpr_map_command(sc, tm);
557 }
558 
559 static void
560 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
561 {
562 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
563 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
564 	struct mprsas_target *targ;
565 	uint16_t handle;
566 
567 	MPR_FUNCTRACE(sc);
568 
569 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
570 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
571 	targ = tm->cm_targ;
572 
573 	/*
574 	 * Currently there should be no way we can hit this case.  It only
575 	 * happens when we have a failure to allocate chain frames, and
576 	 * task management commands don't have S/G lists.
577 	 */
578 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
579 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
580 		    "handle %#04x! This should not happen!\n", __func__,
581 		    tm->cm_flags, handle);
582 	}
583 
584 	if (reply == NULL) {
585 		/* XXX retry the remove after the diag reset completes? */
586 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
587 		    "0x%04x\n", __func__, handle);
588 		mprsas_free_tm(sc, tm);
589 		return;
590 	}
591 
592 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
593 	    MPI2_IOCSTATUS_SUCCESS) {
594 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
595 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
596 	}
597 
598 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
599 	    le32toh(reply->TerminationCount));
600 	mpr_free_reply(sc, tm->cm_reply_data);
601 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
602 
603 	/* Reuse the existing command */
604 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
605 	memset(req, 0, sizeof(*req));
606 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
607 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
608 	req->DevHandle = htole16(handle);
609 	tm->cm_data = NULL;
610 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
611 	tm->cm_complete = mprsas_remove_complete;
612 	tm->cm_complete_data = (void *)(uintptr_t)handle;
613 
614 	/*
615 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
616 	 * They should be aborted or time out and we'll kick thus off there
617 	 * if so.
618 	 */
619 	if (TAILQ_FIRST(&targ->commands) == NULL) {
620 		mpr_dprint(sc, MPR_INFO,
621 		    "No pending commands: starting remove_device for target %u handle 0x%04x\n",
622 		    targ->tid, handle);
623 		mpr_map_command(sc, tm);
624 		targ->pending_remove_tm = NULL;
625 	} else {
626 		targ->pending_remove_tm = tm;
627 	}
628 
629 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
630 	    targ->tid, handle);
631 	if (targ->encl_level_valid) {
632 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
633 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
634 		    targ->connector_name);
635 	}
636 }
637 
638 static void
639 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
640 {
641 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
642 	uint16_t handle;
643 	struct mprsas_target *targ;
644 	struct mprsas_lun *lun;
645 
646 	MPR_FUNCTRACE(sc);
647 
648 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
649 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
650 
651 	targ = tm->cm_targ;
652 
653 	/*
654 	 * At this point, we should have no pending commands for the target.
655 	 * The remove target has just completed.
656 	 */
657 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
658 	    ("%s: no commands should be pending\n", __func__));
659 
660 	/*
661 	 * Currently there should be no way we can hit this case.  It only
662 	 * happens when we have a failure to allocate chain frames, and
663 	 * task management commands don't have S/G lists.
664 	 */
665 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
666 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
667 		    "handle %#04x! This should not happen!\n", __func__,
668 		    tm->cm_flags, handle);
669 		mprsas_free_tm(sc, tm);
670 		return;
671 	}
672 
673 	if (reply == NULL) {
674 		/* most likely a chip reset */
675 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
676 		    "0x%04x\n", __func__, handle);
677 		mprsas_free_tm(sc, tm);
678 		return;
679 	}
680 
681 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
682 	    __func__, handle, le16toh(reply->IOCStatus));
683 
684 	/*
685 	 * Don't clear target if remove fails because things will get confusing.
686 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
687 	 * this target id if possible, and so we can assign the same target id
688 	 * to this device if it comes back in the future.
689 	 */
690 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
691 	    MPI2_IOCSTATUS_SUCCESS) {
692 		targ->handle = 0x0;
693 		targ->encl_handle = 0x0;
694 		targ->encl_level_valid = 0x0;
695 		targ->encl_level = 0x0;
696 		targ->connector_name[0] = ' ';
697 		targ->connector_name[1] = ' ';
698 		targ->connector_name[2] = ' ';
699 		targ->connector_name[3] = ' ';
700 		targ->encl_slot = 0x0;
701 		targ->exp_dev_handle = 0x0;
702 		targ->phy_num = 0x0;
703 		targ->linkrate = 0x0;
704 		targ->devinfo = 0x0;
705 		targ->flags = 0x0;
706 		targ->scsi_req_desc_type = 0;
707 
708 		while (!SLIST_EMPTY(&targ->luns)) {
709 			lun = SLIST_FIRST(&targ->luns);
710 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
711 			free(lun, M_MPR);
712 		}
713 	}
714 
715 	mprsas_free_tm(sc, tm);
716 }
717 
718 static int
719 mprsas_register_events(struct mpr_softc *sc)
720 {
721 	uint8_t events[16];
722 
723 	bzero(events, 16);
724 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
725 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
726 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
727 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
728 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
729 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
730 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
731 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
732 	setbit(events, MPI2_EVENT_IR_VOLUME);
733 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
734 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
735 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
736 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
737 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
738 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
739 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
740 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
741 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
742 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
743 		}
744 	}
745 
746 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
747 	    &sc->sassc->mprsas_eh);
748 
749 	return (0);
750 }
751 
752 int
753 mpr_attach_sas(struct mpr_softc *sc)
754 {
755 	struct mprsas_softc *sassc;
756 	cam_status status;
757 	int unit, error = 0, reqs;
758 
759 	MPR_FUNCTRACE(sc);
760 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
761 
762 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
763 
764 	/*
765 	 * XXX MaxTargets could change during a reinit.  Since we don't
766 	 * resize the targets[] array during such an event, cache the value
767 	 * of MaxTargets here so that we don't get into trouble later.  This
768 	 * should move into the reinit logic.
769 	 */
770 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
771 	sassc->targets = malloc(sizeof(struct mprsas_target) *
772 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
773 	sc->sassc = sassc;
774 	sassc->sc = sc;
775 
776 	reqs = sc->num_reqs - sc->num_prireqs - 1;
777 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
778 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
779 		error = ENOMEM;
780 		goto out;
781 	}
782 
783 	unit = device_get_unit(sc->mpr_dev);
784 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
785 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
786 	if (sassc->sim == NULL) {
787 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
788 		error = EINVAL;
789 		goto out;
790 	}
791 
792 	TAILQ_INIT(&sassc->ev_queue);
793 
794 	/* Initialize taskqueue for Event Handling */
795 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
796 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
797 	    taskqueue_thread_enqueue, &sassc->ev_tq);
798 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
799 	    device_get_nameunit(sc->mpr_dev));
800 
801 	mpr_lock(sc);
802 
803 	/*
804 	 * XXX There should be a bus for every port on the adapter, but since
805 	 * we're just going to fake the topology for now, we'll pretend that
806 	 * everything is just a target on a single bus.
807 	 */
808 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
809 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
810 		    "Error %d registering SCSI bus\n", error);
811 		mpr_unlock(sc);
812 		goto out;
813 	}
814 
815 	/*
816 	 * Assume that discovery events will start right away.
817 	 *
818 	 * Hold off boot until discovery is complete.
819 	 */
820 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
821 	sc->sassc->startup_refcount = 0;
822 	mprsas_startup_increment(sassc);
823 
824 	mpr_unlock(sc);
825 
826 	/*
827 	 * Register for async events so we can determine the EEDP
828 	 * capabilities of devices.
829 	 */
830 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
831 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
832 	    CAM_LUN_WILDCARD);
833 	if (status != CAM_REQ_CMP) {
834 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
835 		    "Error %#x creating sim path\n", status);
836 		sassc->path = NULL;
837 	} else {
838 		int event;
839 
840 		event = AC_ADVINFO_CHANGED;
841 		status = xpt_register_async(event, mprsas_async, sc,
842 					    sassc->path);
843 
844 		if (status != CAM_REQ_CMP) {
845 			mpr_dprint(sc, MPR_ERROR,
846 			    "Error %#x registering async handler for "
847 			    "AC_ADVINFO_CHANGED events\n", status);
848 			xpt_free_path(sassc->path);
849 			sassc->path = NULL;
850 		}
851 	}
852 	if (status != CAM_REQ_CMP) {
853 		/*
854 		 * EEDP use is the exception, not the rule.
855 		 * Warn the user, but do not fail to attach.
856 		 */
857 		mpr_printf(sc, "EEDP capabilities disabled.\n");
858 	}
859 
860 	mprsas_register_events(sc);
861 out:
862 	if (error)
863 		mpr_detach_sas(sc);
864 
865 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
866 	return (error);
867 }
868 
869 int
870 mpr_detach_sas(struct mpr_softc *sc)
871 {
872 	struct mprsas_softc *sassc;
873 	struct mprsas_lun *lun, *lun_tmp;
874 	struct mprsas_target *targ;
875 	int i;
876 
877 	MPR_FUNCTRACE(sc);
878 
879 	if (sc->sassc == NULL)
880 		return (0);
881 
882 	sassc = sc->sassc;
883 	mpr_deregister_events(sc, sassc->mprsas_eh);
884 
885 	/*
886 	 * Drain and free the event handling taskqueue with the lock
887 	 * unheld so that any parallel processing tasks drain properly
888 	 * without deadlocking.
889 	 */
890 	if (sassc->ev_tq != NULL)
891 		taskqueue_free(sassc->ev_tq);
892 
893 	/* Deregister our async handler */
894 	if (sassc->path != NULL) {
895 		xpt_register_async(0, mprsas_async, sc, sassc->path);
896 		xpt_free_path(sassc->path);
897 		sassc->path = NULL;
898 	}
899 
900 	/* Make sure CAM doesn't wedge if we had to bail out early. */
901 	mpr_lock(sc);
902 
903 	while (sassc->startup_refcount != 0)
904 		mprsas_startup_decrement(sassc);
905 
906 	if (sassc->flags & MPRSAS_IN_STARTUP)
907 		xpt_release_simq(sassc->sim, 1);
908 
909 	if (sassc->sim != NULL) {
910 		xpt_bus_deregister(cam_sim_path(sassc->sim));
911 		cam_sim_free(sassc->sim, FALSE);
912 	}
913 
914 	mpr_unlock(sc);
915 
916 	if (sassc->devq != NULL)
917 		cam_simq_free(sassc->devq);
918 
919 	for (i = 0; i < sassc->maxtargets; i++) {
920 		targ = &sassc->targets[i];
921 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
922 			free(lun, M_MPR);
923 		}
924 	}
925 	free(sassc->targets, M_MPR);
926 	free(sassc, M_MPR);
927 	sc->sassc = NULL;
928 
929 	return (0);
930 }
931 
932 void
933 mprsas_discovery_end(struct mprsas_softc *sassc)
934 {
935 	struct mpr_softc *sc = sassc->sc;
936 
937 	MPR_FUNCTRACE(sc);
938 
939 	/*
940 	 * After discovery has completed, check the mapping table for any
941 	 * missing devices and update their missing counts. Only do this once
942 	 * whenever the driver is initialized so that missing counts aren't
943 	 * updated unnecessarily. Note that just because discovery has
944 	 * completed doesn't mean that events have been processed yet. The
945 	 * check_devices function is a callout timer that checks if ALL devices
946 	 * are missing. If so, it will wait a little longer for events to
947 	 * complete and keep resetting itself until some device in the mapping
948 	 * table is not missing, meaning that event processing has started.
949 	 */
950 	if (sc->track_mapping_events) {
951 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
952 		    "completed. Check for missing devices in the mapping "
953 		    "table.\n");
954 		callout_reset(&sc->device_check_callout,
955 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
956 		    sc);
957 	}
958 }
959 
960 static void
961 mprsas_action(struct cam_sim *sim, union ccb *ccb)
962 {
963 	struct mprsas_softc *sassc;
964 
965 	sassc = cam_sim_softc(sim);
966 
967 	MPR_FUNCTRACE(sassc->sc);
968 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
969 	    ccb->ccb_h.func_code);
970 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
971 
972 	switch (ccb->ccb_h.func_code) {
973 	case XPT_PATH_INQ:
974 	{
975 		struct ccb_pathinq *cpi = &ccb->cpi;
976 		struct mpr_softc *sc = sassc->sc;
977 
978 		cpi->version_num = 1;
979 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
980 		cpi->target_sprt = 0;
981 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
982 		cpi->hba_eng_cnt = 0;
983 		cpi->max_target = sassc->maxtargets - 1;
984 		cpi->max_lun = 255;
985 
986 		/*
987 		 * initiator_id is set here to an ID outside the set of valid
988 		 * target IDs (including volumes).
989 		 */
990 		cpi->initiator_id = sassc->maxtargets;
991 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
992 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
993 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
994 		cpi->unit_number = cam_sim_unit(sim);
995 		cpi->bus_id = cam_sim_bus(sim);
996 		/*
997 		 * XXXSLM-I think this needs to change based on config page or
998 		 * something instead of hardcoded to 150000.
999 		 */
1000 		cpi->base_transfer_speed = 150000;
1001 		cpi->transport = XPORT_SAS;
1002 		cpi->transport_version = 0;
1003 		cpi->protocol = PROTO_SCSI;
1004 		cpi->protocol_version = SCSI_REV_SPC;
1005 		cpi->maxio = sc->maxio;
1006 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1007 		break;
1008 	}
1009 	case XPT_GET_TRAN_SETTINGS:
1010 	{
1011 		struct ccb_trans_settings	*cts;
1012 		struct ccb_trans_settings_sas	*sas;
1013 		struct ccb_trans_settings_scsi	*scsi;
1014 		struct mprsas_target *targ;
1015 
1016 		cts = &ccb->cts;
1017 		sas = &cts->xport_specific.sas;
1018 		scsi = &cts->proto_specific.scsi;
1019 
1020 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1021 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1022 		    cts->ccb_h.target_id));
1023 		targ = &sassc->targets[cts->ccb_h.target_id];
1024 		if (targ->handle == 0x0) {
1025 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1026 			break;
1027 		}
1028 
1029 		cts->protocol_version = SCSI_REV_SPC2;
1030 		cts->transport = XPORT_SAS;
1031 		cts->transport_version = 0;
1032 
1033 		sas->valid = CTS_SAS_VALID_SPEED;
1034 		switch (targ->linkrate) {
1035 		case 0x08:
1036 			sas->bitrate = 150000;
1037 			break;
1038 		case 0x09:
1039 			sas->bitrate = 300000;
1040 			break;
1041 		case 0x0a:
1042 			sas->bitrate = 600000;
1043 			break;
1044 		case 0x0b:
1045 			sas->bitrate = 1200000;
1046 			break;
1047 		default:
1048 			sas->valid = 0;
1049 		}
1050 
1051 		cts->protocol = PROTO_SCSI;
1052 		scsi->valid = CTS_SCSI_VALID_TQ;
1053 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1054 
1055 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1056 		break;
1057 	}
1058 	case XPT_CALC_GEOMETRY:
1059 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1060 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1061 		break;
1062 	case XPT_RESET_DEV:
1063 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1064 		    "XPT_RESET_DEV\n");
1065 		mprsas_action_resetdev(sassc, ccb);
1066 		return;
1067 	case XPT_RESET_BUS:
1068 	case XPT_ABORT:
1069 	case XPT_TERM_IO:
1070 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1071 		    "for abort or reset\n");
1072 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1073 		break;
1074 	case XPT_SCSI_IO:
1075 		mprsas_action_scsiio(sassc, ccb);
1076 		return;
1077 	case XPT_SMP_IO:
1078 		mprsas_action_smpio(sassc, ccb);
1079 		return;
1080 	default:
1081 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1082 		break;
1083 	}
1084 	xpt_done(ccb);
1085 
1086 }
1087 
1088 static void
1089 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1090     target_id_t target_id, lun_id_t lun_id)
1091 {
1092 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1093 	struct cam_path *path;
1094 
1095 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1096 	    ac_code, target_id, (uintmax_t)lun_id);
1097 
1098 	if (xpt_create_path(&path, NULL,
1099 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1100 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1101 		    "notification\n");
1102 		return;
1103 	}
1104 
1105 	xpt_async(ac_code, path, NULL);
1106 	xpt_free_path(path);
1107 }
1108 
1109 static void
1110 mprsas_complete_all_commands(struct mpr_softc *sc)
1111 {
1112 	struct mpr_command *cm;
1113 	int i;
1114 	int completed;
1115 
1116 	MPR_FUNCTRACE(sc);
1117 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1118 
1119 	/* complete all commands with a NULL reply */
1120 	for (i = 1; i < sc->num_reqs; i++) {
1121 		cm = &sc->commands[i];
1122 		if (cm->cm_state == MPR_CM_STATE_FREE)
1123 			continue;
1124 
1125 		cm->cm_state = MPR_CM_STATE_BUSY;
1126 		cm->cm_reply = NULL;
1127 		completed = 0;
1128 
1129 		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1130 			MPASS(cm->cm_data);
1131 			free(cm->cm_data, M_MPR);
1132 			cm->cm_data = NULL;
1133 		}
1134 
1135 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1136 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1137 
1138 		if (cm->cm_complete != NULL) {
1139 			mprsas_log_command(cm, MPR_RECOVERY,
1140 			    "completing cm %p state %x ccb %p for diag reset\n",
1141 			    cm, cm->cm_state, cm->cm_ccb);
1142 			cm->cm_complete(sc, cm);
1143 			completed = 1;
1144 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1145 			mprsas_log_command(cm, MPR_RECOVERY,
1146 			    "waking up cm %p state %x ccb %p for diag reset\n",
1147 			    cm, cm->cm_state, cm->cm_ccb);
1148 			wakeup(cm);
1149 			completed = 1;
1150 		}
1151 
1152 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1153 			/* this should never happen, but if it does, log */
1154 			mprsas_log_command(cm, MPR_RECOVERY,
1155 			    "cm %p state %x flags 0x%x ccb %p during diag "
1156 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1157 			    cm->cm_ccb);
1158 		}
1159 	}
1160 
1161 	sc->io_cmds_active = 0;
1162 }
1163 
1164 void
1165 mprsas_handle_reinit(struct mpr_softc *sc)
1166 {
1167 	int i;
1168 
1169 	/* Go back into startup mode and freeze the simq, so that CAM
1170 	 * doesn't send any commands until after we've rediscovered all
1171 	 * targets and found the proper device handles for them.
1172 	 *
1173 	 * After the reset, portenable will trigger discovery, and after all
1174 	 * discovery-related activities have finished, the simq will be
1175 	 * released.
1176 	 */
1177 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1178 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1179 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1180 	mprsas_startup_increment(sc->sassc);
1181 
1182 	/* notify CAM of a bus reset */
1183 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1184 	    CAM_LUN_WILDCARD);
1185 
1186 	/* complete and cleanup after all outstanding commands */
1187 	mprsas_complete_all_commands(sc);
1188 
1189 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1190 	    __func__, sc->sassc->startup_refcount);
1191 
1192 	/* zero all the target handles, since they may change after the
1193 	 * reset, and we have to rediscover all the targets and use the new
1194 	 * handles.
1195 	 */
1196 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1197 		if (sc->sassc->targets[i].outstanding != 0)
1198 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1199 			    i, sc->sassc->targets[i].outstanding);
1200 		sc->sassc->targets[i].handle = 0x0;
1201 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1202 		sc->sassc->targets[i].outstanding = 0;
1203 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1204 	}
1205 }
1206 static void
1207 mprsas_tm_timeout(void *data)
1208 {
1209 	struct mpr_command *tm = data;
1210 	struct mpr_softc *sc = tm->cm_sc;
1211 
1212 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1213 
1214 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1215 	    "out\n", tm);
1216 
1217 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1218 	    ("command not inqueue, state = %u\n", tm->cm_state));
1219 
1220 	tm->cm_state = MPR_CM_STATE_BUSY;
1221 	mpr_reinit(sc);
1222 }
1223 
1224 static void
1225 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1226 {
1227 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1228 	unsigned int cm_count = 0;
1229 	struct mpr_command *cm;
1230 	struct mprsas_target *targ;
1231 
1232 	callout_stop(&tm->cm_callout);
1233 
1234 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1235 	targ = tm->cm_targ;
1236 
1237 	/*
1238 	 * Currently there should be no way we can hit this case.  It only
1239 	 * happens when we have a failure to allocate chain frames, and
1240 	 * task management commands don't have S/G lists.
1241 	 */
1242 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1243 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1244 		    "%s: cm_flags = %#x for LUN reset! "
1245 		    "This should not happen!\n", __func__, tm->cm_flags);
1246 		mprsas_free_tm(sc, tm);
1247 		return;
1248 	}
1249 
1250 	if (reply == NULL) {
1251 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1252 		    tm);
1253 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1254 			/* this completion was due to a reset, just cleanup */
1255 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1256 			    "reset, ignoring NULL LUN reset reply\n");
1257 			targ->tm = NULL;
1258 			mprsas_free_tm(sc, tm);
1259 		}
1260 		else {
1261 			/* we should have gotten a reply. */
1262 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1263 			    "LUN reset attempt, resetting controller\n");
1264 			mpr_reinit(sc);
1265 		}
1266 		return;
1267 	}
1268 
1269 	mpr_dprint(sc, MPR_RECOVERY,
1270 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1271 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1272 	    le32toh(reply->TerminationCount));
1273 
1274 	/*
1275 	 * See if there are any outstanding commands for this LUN.
1276 	 * This could be made more efficient by using a per-LU data
1277 	 * structure of some sort.
1278 	 */
1279 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1280 		if (cm->cm_lun == tm->cm_lun)
1281 			cm_count++;
1282 	}
1283 
1284 	if (cm_count == 0) {
1285 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1286 		    "Finished recovery after LUN reset for target %u\n",
1287 		    targ->tid);
1288 
1289 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1290 		    tm->cm_lun);
1291 
1292 		/*
1293 		 * We've finished recovery for this logical unit.  check and
1294 		 * see if some other logical unit has a timedout command
1295 		 * that needs to be processed.
1296 		 */
1297 		cm = TAILQ_FIRST(&targ->timedout_commands);
1298 		if (cm) {
1299 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1300 			   "More commands to abort for target %u\n", targ->tid);
1301 			mprsas_send_abort(sc, tm, cm);
1302 		} else {
1303 			targ->tm = NULL;
1304 			mprsas_free_tm(sc, tm);
1305 		}
1306 	} else {
1307 		/* if we still have commands for this LUN, the reset
1308 		 * effectively failed, regardless of the status reported.
1309 		 * Escalate to a target reset.
1310 		 */
1311 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1312 		    "logical unit reset complete for target %u, but still "
1313 		    "have %u command(s), sending target reset\n", targ->tid,
1314 		    cm_count);
1315 		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1316 			mprsas_send_reset(sc, tm,
1317 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1318 		else
1319 			mpr_reinit(sc);
1320 	}
1321 }
1322 
1323 static void
1324 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1325 {
1326 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1327 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1328 	struct mprsas_target *targ;
1329 
1330 	callout_stop(&tm->cm_callout);
1331 
1332 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1333 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1334 	targ = tm->cm_targ;
1335 
1336 	/*
1337 	 * Currently there should be no way we can hit this case.  It only
1338 	 * happens when we have a failure to allocate chain frames, and
1339 	 * task management commands don't have S/G lists.
1340 	 */
1341 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1342 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1343 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1344 		mprsas_free_tm(sc, tm);
1345 		return;
1346 	}
1347 
1348 	if (reply == NULL) {
1349 		mpr_dprint(sc, MPR_RECOVERY,
1350 		    "NULL target reset reply for tm %p TaskMID %u\n",
1351 		    tm, le16toh(req->TaskMID));
1352 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1353 			/* this completion was due to a reset, just cleanup */
1354 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1355 			    "reset, ignoring NULL target reset reply\n");
1356 			targ->tm = NULL;
1357 			mprsas_free_tm(sc, tm);
1358 		}
1359 		else {
1360 			/* we should have gotten a reply. */
1361 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1362 			    "target reset attempt, resetting controller\n");
1363 			mpr_reinit(sc);
1364 		}
1365 		return;
1366 	}
1367 
1368 	mpr_dprint(sc, MPR_RECOVERY,
1369 	    "target reset status 0x%x code 0x%x count %u\n",
1370 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1371 	    le32toh(reply->TerminationCount));
1372 
1373 	if (targ->outstanding == 0) {
1374 		/*
1375 		 * We've finished recovery for this target and all
1376 		 * of its logical units.
1377 		 */
1378 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1379 		    "Finished reset recovery for target %u\n", targ->tid);
1380 
1381 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1382 		    CAM_LUN_WILDCARD);
1383 
1384 		targ->tm = NULL;
1385 		mprsas_free_tm(sc, tm);
1386 	} else {
1387 		/*
1388 		 * After a target reset, if this target still has
1389 		 * outstanding commands, the reset effectively failed,
1390 		 * regardless of the status reported.  escalate.
1391 		 */
1392 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1393 		    "Target reset complete for target %u, but still have %u "
1394 		    "command(s), resetting controller\n", targ->tid,
1395 		    targ->outstanding);
1396 		mpr_reinit(sc);
1397 	}
1398 }
1399 
1400 #define MPR_RESET_TIMEOUT 30
1401 
1402 int
1403 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1404 {
1405 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1406 	struct mprsas_target *target;
1407 	int err, timeout;
1408 
1409 	target = tm->cm_targ;
1410 	if (target->handle == 0) {
1411 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1412 		    "%d\n", __func__, target->tid);
1413 		return -1;
1414 	}
1415 
1416 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1417 	req->DevHandle = htole16(target->handle);
1418 	req->TaskType = type;
1419 
1420 	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1421 		timeout = MPR_RESET_TIMEOUT;
1422 		/*
1423 		 * Target reset method =
1424 		 *     SAS Hard Link Reset / SATA Link Reset
1425 		 */
1426 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1427 	} else {
1428 		timeout = (target->controller_reset_timeout) ? (
1429 		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1430 		/* PCIe Protocol Level Reset*/
1431 		req->MsgFlags =
1432 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1433 	}
1434 
1435 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1436 		/* XXX Need to handle invalid LUNs */
1437 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1438 		tm->cm_targ->logical_unit_resets++;
1439 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1440 		    "Sending logical unit reset to target %u lun %d\n",
1441 		    target->tid, tm->cm_lun);
1442 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1443 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1444 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1445 		tm->cm_targ->target_resets++;
1446 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1447 		    "Sending target reset to target %u\n", target->tid);
1448 		tm->cm_complete = mprsas_target_reset_complete;
1449 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1450 	}
1451 	else {
1452 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1453 		return -1;
1454 	}
1455 
1456 	if (target->encl_level_valid) {
1457 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1458 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1459 		    target->encl_level, target->encl_slot,
1460 		    target->connector_name);
1461 	}
1462 
1463 	tm->cm_data = NULL;
1464 	tm->cm_complete_data = (void *)tm;
1465 
1466 	callout_reset(&tm->cm_callout, timeout * hz,
1467 	    mprsas_tm_timeout, tm);
1468 
1469 	err = mpr_map_command(sc, tm);
1470 	if (err)
1471 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1472 		    "error %d sending reset type %u\n", err, type);
1473 
1474 	return err;
1475 }
1476 
1477 static void
1478 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1479 {
1480 	struct mpr_command *cm;
1481 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1482 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1483 	struct mprsas_target *targ;
1484 
1485 	callout_stop(&tm->cm_callout);
1486 
1487 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1488 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1489 	targ = tm->cm_targ;
1490 
1491 	/*
1492 	 * Currently there should be no way we can hit this case.  It only
1493 	 * happens when we have a failure to allocate chain frames, and
1494 	 * task management commands don't have S/G lists.
1495 	 */
1496 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1497 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1498 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1499 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1500 		mprsas_free_tm(sc, tm);
1501 		return;
1502 	}
1503 
1504 	if (reply == NULL) {
1505 		mpr_dprint(sc, MPR_RECOVERY,
1506 		    "NULL abort reply for tm %p TaskMID %u\n",
1507 		    tm, le16toh(req->TaskMID));
1508 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1509 			/* this completion was due to a reset, just cleanup */
1510 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1511 			    "reset, ignoring NULL abort reply\n");
1512 			targ->tm = NULL;
1513 			mprsas_free_tm(sc, tm);
1514 		} else {
1515 			/* we should have gotten a reply. */
1516 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1517 			    "abort attempt, resetting controller\n");
1518 			mpr_reinit(sc);
1519 		}
1520 		return;
1521 	}
1522 
1523 	mpr_dprint(sc, MPR_RECOVERY,
1524 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1525 	    le16toh(req->TaskMID),
1526 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1527 	    le32toh(reply->TerminationCount));
1528 
1529 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1530 	if (cm == NULL) {
1531 		/*
1532 		 * if there are no more timedout commands, we're done with
1533 		 * error recovery for this target.
1534 		 */
1535 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1536 		    "Finished abort recovery for target %u\n", targ->tid);
1537 		targ->tm = NULL;
1538 		mprsas_free_tm(sc, tm);
1539 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1540 		/* abort success, but we have more timedout commands to abort */
1541 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1542 		    "Continuing abort recovery for target %u\n", targ->tid);
1543 		mprsas_send_abort(sc, tm, cm);
1544 	} else {
1545 		/*
1546 		 * we didn't get a command completion, so the abort
1547 		 * failed as far as we're concerned.  escalate.
1548 		 */
1549 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1550 		    "Abort failed for target %u, sending logical unit reset\n",
1551 		    targ->tid);
1552 
1553 		mprsas_send_reset(sc, tm,
1554 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1555 	}
1556 }
1557 
1558 #define MPR_ABORT_TIMEOUT 5
1559 
1560 static int
1561 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1562     struct mpr_command *cm)
1563 {
1564 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1565 	struct mprsas_target *targ;
1566 	int err, timeout;
1567 
1568 	targ = cm->cm_targ;
1569 	if (targ->handle == 0) {
1570 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1571 		   "%s null devhandle for target_id %d\n",
1572 		    __func__, cm->cm_ccb->ccb_h.target_id);
1573 		return -1;
1574 	}
1575 
1576 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1577 	    "Aborting command %p\n", cm);
1578 
1579 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1580 	req->DevHandle = htole16(targ->handle);
1581 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1582 
1583 	/* XXX Need to handle invalid LUNs */
1584 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1585 
1586 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1587 
1588 	tm->cm_data = NULL;
1589 	tm->cm_complete = mprsas_abort_complete;
1590 	tm->cm_complete_data = (void *)tm;
1591 	tm->cm_targ = cm->cm_targ;
1592 	tm->cm_lun = cm->cm_lun;
1593 
1594 	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1595 		timeout	= MPR_ABORT_TIMEOUT;
1596 	else
1597 		timeout = sc->nvme_abort_timeout;
1598 
1599 	callout_reset(&tm->cm_callout, timeout * hz,
1600 	    mprsas_tm_timeout, tm);
1601 
1602 	targ->aborts++;
1603 
1604 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1605 
1606 	err = mpr_map_command(sc, tm);
1607 	if (err)
1608 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1609 		    "error %d sending abort for cm %p SMID %u\n",
1610 		    err, cm, req->TaskMID);
1611 	return err;
1612 }
1613 
1614 static void
1615 mprsas_scsiio_timeout(void *data)
1616 {
1617 	sbintime_t elapsed, now;
1618 	union ccb *ccb;
1619 	struct mpr_softc *sc;
1620 	struct mpr_command *cm;
1621 	struct mprsas_target *targ;
1622 
1623 	cm = (struct mpr_command *)data;
1624 	sc = cm->cm_sc;
1625 	ccb = cm->cm_ccb;
1626 	now = sbinuptime();
1627 
1628 	MPR_FUNCTRACE(sc);
1629 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1630 
1631 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1632 
1633 	/*
1634 	 * Run the interrupt handler to make sure it's not pending.  This
1635 	 * isn't perfect because the command could have already completed
1636 	 * and been re-used, though this is unlikely.
1637 	 */
1638 	mpr_intr_locked(sc);
1639 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1640 		mprsas_log_command(cm, MPR_XINFO,
1641 		    "SCSI command %p almost timed out\n", cm);
1642 		return;
1643 	}
1644 
1645 	if (cm->cm_ccb == NULL) {
1646 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1647 		return;
1648 	}
1649 
1650 	targ = cm->cm_targ;
1651 	targ->timeouts++;
1652 
1653 	elapsed = now - ccb->ccb_h.qos.sim_data;
1654 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1655 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1656 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1657 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1658 	if (targ->encl_level_valid) {
1659 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1660 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1661 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1662 	}
1663 
1664 	/* XXX first, check the firmware state, to see if it's still
1665 	 * operational.  if not, do a diag reset.
1666 	 */
1667 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1668 	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1669 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1670 
1671 	if (targ->tm != NULL) {
1672 		/* target already in recovery, just queue up another
1673 		 * timedout command to be processed later.
1674 		 */
1675 		mpr_dprint(sc, MPR_RECOVERY,
1676 		    "queued timedout cm %p for processing by tm %p\n",
1677 		    cm, targ->tm);
1678 	} else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1679 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1680 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1681 		    cm->cm_desc.Default.SMID);
1682 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1683 		    cm, targ->tm);
1684 
1685 		/* start recovery by aborting the first timedout command */
1686 		mprsas_send_abort(sc, targ->tm, cm);
1687 	} else {
1688 		/* XXX queue this target up for recovery once a TM becomes
1689 		 * available.  The firmware only has a limited number of
1690 		 * HighPriority credits for the high priority requests used
1691 		 * for task management, and we ran out.
1692 		 *
1693 		 * Isilon: don't worry about this for now, since we have
1694 		 * more credits than disks in an enclosure, and limit
1695 		 * ourselves to one TM per target for recovery.
1696 		 */
1697 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1698 		    "timedout cm %p failed to allocate a tm\n", cm);
1699 	}
1700 }
1701 
1702 /**
1703  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1704  *			     to SCSI Unmap.
1705  * Return 0 - for success,
1706  *	  1 - to immediately return back the command with success status to CAM
1707  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1708  *			   to FW without any translation.
1709  */
1710 static int
1711 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1712     union ccb *ccb, struct mprsas_target *targ)
1713 {
1714 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1715 	struct ccb_scsiio *csio;
1716 	struct unmap_parm_list *plist;
1717 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1718 	struct nvme_command *c;
1719 	int i, res;
1720 	uint16_t ndesc, list_len, data_length;
1721 	struct mpr_prp_page *prp_page_info;
1722 	uint64_t nvme_dsm_ranges_dma_handle;
1723 
1724 	csio = &ccb->csio;
1725 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1726 	if (!list_len) {
1727 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1728 		return -EINVAL;
1729 	}
1730 
1731 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1732 	if (!plist) {
1733 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1734 		    "save UNMAP data\n");
1735 		return -ENOMEM;
1736 	}
1737 
1738 	/* Copy SCSI unmap data to a local buffer */
1739 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1740 
1741 	/* return back the unmap command to CAM with success status,
1742 	 * if number of descripts is zero.
1743 	 */
1744 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1745 	if (!ndesc) {
1746 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1747 		    "UNMAP cmd is Zero\n");
1748 		res = 1;
1749 		goto out;
1750 	}
1751 
1752 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1753 	if (data_length > targ->MDTS) {
1754 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1755 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1756 		res = -EINVAL;
1757 		goto out;
1758 	}
1759 
1760 	prp_page_info = mpr_alloc_prp_page(sc);
1761 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1762 	    "UNMAP command.\n", __func__));
1763 
1764 	/*
1765 	 * Insert the allocated PRP page into the command's PRP page list. This
1766 	 * will be freed when the command is freed.
1767 	 */
1768 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1769 
1770 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1771 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1772 
1773 	bzero(nvme_dsm_ranges, data_length);
1774 
1775 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1776 	 * for each descriptors contained in SCSI UNMAP data.
1777 	 */
1778 	for (i = 0; i < ndesc; i++) {
1779 		nvme_dsm_ranges[i].length =
1780 		    htole32(be32toh(plist->desc[i].nlb));
1781 		nvme_dsm_ranges[i].starting_lba =
1782 		    htole64(be64toh(plist->desc[i].slba));
1783 		nvme_dsm_ranges[i].attributes = 0;
1784 	}
1785 
1786 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1787 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1788 	bzero(req, sizeof(*req));
1789 	req->DevHandle = htole16(targ->handle);
1790 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1791 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1792 	req->ErrorResponseBaseAddress.High =
1793 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1794 	req->ErrorResponseBaseAddress.Low =
1795 	    htole32(cm->cm_sense_busaddr);
1796 	req->ErrorResponseAllocationLength =
1797 	    htole16(sizeof(struct nvme_completion));
1798 	req->EncapsulatedCommandLength =
1799 	    htole16(sizeof(struct nvme_command));
1800 	req->DataLength = htole32(data_length);
1801 
1802 	/* Build NVMe DSM command */
1803 	c = (struct nvme_command *) req->NVMe_Command;
1804 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1805 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1806 	c->cdw10 = htole32(ndesc - 1);
1807 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1808 
1809 	cm->cm_length = data_length;
1810 	cm->cm_data = NULL;
1811 
1812 	cm->cm_complete = mprsas_scsiio_complete;
1813 	cm->cm_complete_data = ccb;
1814 	cm->cm_targ = targ;
1815 	cm->cm_lun = csio->ccb_h.target_lun;
1816 	cm->cm_ccb = ccb;
1817 
1818 	cm->cm_desc.Default.RequestFlags =
1819 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1820 
1821 	csio->ccb_h.qos.sim_data = sbinuptime();
1822 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1823 	    mprsas_scsiio_timeout, cm, 0);
1824 
1825 	targ->issued++;
1826 	targ->outstanding++;
1827 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1828 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1829 
1830 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1831 	    __func__, cm, ccb, targ->outstanding);
1832 
1833 	mpr_build_nvme_prp(sc, cm, req,
1834 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1835 	mpr_map_command(sc, cm);
1836 	res = 0;
1837 
1838 out:
1839 	free(plist, M_MPR);
1840 	return (res);
1841 }
1842 
1843 static void
1844 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1845 {
1846 	MPI2_SCSI_IO_REQUEST *req;
1847 	struct ccb_scsiio *csio;
1848 	struct mpr_softc *sc;
1849 	struct mprsas_target *targ;
1850 	struct mprsas_lun *lun;
1851 	struct mpr_command *cm;
1852 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1853 	uint16_t eedp_flags;
1854 	uint32_t mpi_control;
1855 	int rc;
1856 
1857 	sc = sassc->sc;
1858 	MPR_FUNCTRACE(sc);
1859 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1860 
1861 	csio = &ccb->csio;
1862 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1863 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1864 	     csio->ccb_h.target_id));
1865 	targ = &sassc->targets[csio->ccb_h.target_id];
1866 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1867 	if (targ->handle == 0x0) {
1868 		if (targ->flags & MPRSAS_TARGET_INDIAGRESET) {
1869 			mpr_dprint(sc, MPR_ERROR,
1870 			    "%s NULL handle for target %u in diag reset freezing queue\n",
1871 			    __func__, csio->ccb_h.target_id);
1872 			ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1873 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1874 			xpt_done(ccb);
1875 			return;
1876 		}
1877 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1878 		    __func__, csio->ccb_h.target_id);
1879 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1880 		xpt_done(ccb);
1881 		return;
1882 	}
1883 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1884 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1885 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1886 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1887 		xpt_done(ccb);
1888 		return;
1889 	}
1890 	/*
1891 	 * Sometimes, it is possible to get a command that is not "In
1892 	 * Progress" and was actually aborted by the upper layer.  Check for
1893 	 * this here and complete the command without error.
1894 	 */
1895 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1896 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1897 		    "target %u\n", __func__, csio->ccb_h.target_id);
1898 		xpt_done(ccb);
1899 		return;
1900 	}
1901 	/*
1902 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1903 	 * that the volume has timed out.  We want volumes to be enumerated
1904 	 * until they are deleted/removed, not just failed. In either event,
1905 	 * we're removing the target due to a firmware event telling us
1906 	 * the device is now gone (as opposed to some transient event). Since
1907 	 * we're opting to remove failed devices from the OS's view, we need
1908 	 * to propagate that status up the stack.
1909 	 */
1910 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1911 		if (targ->devinfo == 0)
1912 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1913 		else
1914 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1915 		xpt_done(ccb);
1916 		return;
1917 	}
1918 
1919 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1920 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1921 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1922 		xpt_done(ccb);
1923 		return;
1924 	}
1925 
1926 	/*
1927 	 * If target has a reset in progress, the devq should be frozen.
1928 	 * Geting here we likely hit a race, so just requeue.
1929 	 */
1930 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1931 		ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1932 		mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
1933 		    "%s: Freezing devq for target ID %d\n",
1934 		    __func__, targ->tid);
1935 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1936 		xpt_done(ccb);
1937 		return;
1938 	}
1939 
1940 	cm = mpr_alloc_command(sc);
1941 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1942 		if (cm != NULL) {
1943 			mpr_free_command(sc, cm);
1944 		}
1945 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1946 			xpt_freeze_simq(sassc->sim, 1);
1947 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1948 		}
1949 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1950 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1951 		xpt_done(ccb);
1952 		return;
1953 	}
1954 
1955 	/* For NVME device's issue UNMAP command directly to NVME drives by
1956 	 * constructing equivalent native NVMe DataSetManagement command.
1957 	 */
1958 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1959 	if (scsi_opcode == UNMAP &&
1960 	    targ->is_nvme &&
1961 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1962 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1963 		if (rc == 1) { /* return command to CAM with success status */
1964 			mpr_free_command(sc, cm);
1965 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1966 			xpt_done(ccb);
1967 			return;
1968 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1969 			return;
1970 	}
1971 
1972 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1973 	bzero(req, sizeof(*req));
1974 	req->DevHandle = htole16(targ->handle);
1975 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1976 	req->MsgFlags = 0;
1977 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1978 	req->SenseBufferLength = MPR_SENSE_LEN;
1979 	req->SGLFlags = 0;
1980 	req->ChainOffset = 0;
1981 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1982 	req->SGLOffset1= 0;
1983 	req->SGLOffset2= 0;
1984 	req->SGLOffset3= 0;
1985 	req->SkipCount = 0;
1986 	req->DataLength = htole32(csio->dxfer_len);
1987 	req->BidirectionalDataLength = 0;
1988 	req->IoFlags = htole16(csio->cdb_len);
1989 	req->EEDPFlags = 0;
1990 
1991 	/* Note: BiDirectional transfers are not supported */
1992 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1993 	case CAM_DIR_IN:
1994 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1995 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1996 		break;
1997 	case CAM_DIR_OUT:
1998 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1999 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2000 		break;
2001 	case CAM_DIR_NONE:
2002 	default:
2003 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2004 		break;
2005 	}
2006 
2007 	if (csio->cdb_len == 32)
2008 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2009 	/*
2010 	 * It looks like the hardware doesn't require an explicit tag
2011 	 * number for each transaction.  SAM Task Management not supported
2012 	 * at the moment.
2013 	 */
2014 	switch (csio->tag_action) {
2015 	case MSG_HEAD_OF_Q_TAG:
2016 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2017 		break;
2018 	case MSG_ORDERED_Q_TAG:
2019 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2020 		break;
2021 	case MSG_ACA_TASK:
2022 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2023 		break;
2024 	case CAM_TAG_ACTION_NONE:
2025 	case MSG_SIMPLE_Q_TAG:
2026 	default:
2027 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2028 		break;
2029 	}
2030 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT) &
2031 	    MPI2_SCSIIO_CONTROL_CMDPRI_MASK;
2032 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2033 	req->Control = htole32(mpi_control);
2034 
2035 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2036 		mpr_free_command(sc, cm);
2037 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2038 		xpt_done(ccb);
2039 		return;
2040 	}
2041 
2042 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2043 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2044 	else {
2045 		KASSERT(csio->cdb_len <= IOCDBLEN,
2046 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2047 		    "is not set", csio->cdb_len));
2048 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2049 	}
2050 	req->IoFlags = htole16(csio->cdb_len);
2051 
2052 	/*
2053 	 * Check if EEDP is supported and enabled.  If it is then check if the
2054 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2055 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2056 	 * for EEDP transfer.
2057 	 */
2058 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2059 	if (sc->eedp_enabled && eedp_flags) {
2060 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2061 			if (lun->lun_id == csio->ccb_h.target_lun) {
2062 				break;
2063 			}
2064 		}
2065 
2066 		if ((lun != NULL) && (lun->eedp_formatted)) {
2067 			req->EEDPBlockSize = htole32(lun->eedp_block_size);
2068 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2069 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2070 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2071 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2072 				eedp_flags |=
2073 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2074 			}
2075 			req->EEDPFlags = htole16(eedp_flags);
2076 
2077 			/*
2078 			 * If CDB less than 32, fill in Primary Ref Tag with
2079 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2080 			 * already there.  Also, set protection bit.  FreeBSD
2081 			 * currently does not support CDBs bigger than 16, but
2082 			 * the code doesn't hurt, and will be here for the
2083 			 * future.
2084 			 */
2085 			if (csio->cdb_len != 32) {
2086 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2087 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2088 				    PrimaryReferenceTag;
2089 				for (i = 0; i < 4; i++) {
2090 					*ref_tag_addr =
2091 					    req->CDB.CDB32[lba_byte + i];
2092 					ref_tag_addr++;
2093 				}
2094 				req->CDB.EEDP32.PrimaryReferenceTag =
2095 				    htole32(req->
2096 				    CDB.EEDP32.PrimaryReferenceTag);
2097 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2098 				    0xFFFF;
2099 				req->CDB.CDB32[1] =
2100 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2101 			} else {
2102 				eedp_flags |=
2103 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2104 				req->EEDPFlags = htole16(eedp_flags);
2105 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2106 				    0x1F) | 0x20;
2107 			}
2108 		}
2109 	}
2110 
2111 	cm->cm_length = csio->dxfer_len;
2112 	if (cm->cm_length != 0) {
2113 		cm->cm_data = ccb;
2114 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2115 	} else {
2116 		cm->cm_data = NULL;
2117 	}
2118 	cm->cm_sge = &req->SGL;
2119 	cm->cm_sglsize = (32 - 24) * 4;
2120 	cm->cm_complete = mprsas_scsiio_complete;
2121 	cm->cm_complete_data = ccb;
2122 	cm->cm_targ = targ;
2123 	cm->cm_lun = csio->ccb_h.target_lun;
2124 	cm->cm_ccb = ccb;
2125 	/*
2126 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2127 	 * and set descriptor type.
2128 	 */
2129 	if (targ->scsi_req_desc_type ==
2130 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2131 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2132 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2133 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2134 		if (!sc->atomic_desc_capable) {
2135 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2136 			    htole16(targ->handle);
2137 		}
2138 	} else {
2139 		cm->cm_desc.SCSIIO.RequestFlags =
2140 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2141 		if (!sc->atomic_desc_capable)
2142 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2143 	}
2144 
2145 	csio->ccb_h.qos.sim_data = sbinuptime();
2146 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2147 	    mprsas_scsiio_timeout, cm, 0);
2148 
2149 	targ->issued++;
2150 	targ->outstanding++;
2151 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2152 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2153 
2154 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2155 	    __func__, cm, ccb, targ->outstanding);
2156 
2157 	mpr_map_command(sc, cm);
2158 	return;
2159 }
2160 
2161 /**
2162  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2163  */
2164 static void
2165 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2166     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2167 {
2168 	u32 response_info;
2169 	u8 *response_bytes;
2170 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2171 	    MPI2_IOCSTATUS_MASK;
2172 	u8 scsi_state = mpi_reply->SCSIState;
2173 	u8 scsi_status = mpi_reply->SCSIStatus;
2174 	char *desc_ioc_state = NULL;
2175 	char *desc_scsi_status = NULL;
2176 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2177 
2178 	if (log_info == 0x31170000)
2179 		return;
2180 
2181 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2182 	     ioc_status);
2183 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2184 	    scsi_status);
2185 
2186 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2187 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2188 	if (targ->encl_level_valid) {
2189 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2190 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2191 		    targ->connector_name);
2192 	}
2193 
2194 	/*
2195 	 * We can add more detail about underflow data here
2196 	 * TO-DO
2197 	 */
2198 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2199 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2200 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2201 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2202 
2203 	if (sc->mpr_debug & MPR_XINFO &&
2204 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2205 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2206 		scsi_sense_print(csio);
2207 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2208 	}
2209 
2210 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2211 		response_info = le32toh(mpi_reply->ResponseInfo);
2212 		response_bytes = (u8 *)&response_info;
2213 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2214 		    response_bytes[0],
2215 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2216 		    response_bytes[0]));
2217 	}
2218 }
2219 
2220 /** mprsas_nvme_trans_status_code
2221  *
2222  * Convert Native NVMe command error status to
2223  * equivalent SCSI error status.
2224  *
2225  * Returns appropriate scsi_status
2226  */
2227 static u8
2228 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2229     struct mpr_command *cm)
2230 {
2231 	u8 status = MPI2_SCSI_STATUS_GOOD;
2232 	int skey, asc, ascq;
2233 	union ccb *ccb = cm->cm_complete_data;
2234 	int returned_sense_len;
2235 	uint8_t sct, sc;
2236 
2237 	sct = NVME_STATUS_GET_SCT(nvme_status);
2238 	sc = NVME_STATUS_GET_SC(nvme_status);
2239 
2240 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2241 	skey = SSD_KEY_ILLEGAL_REQUEST;
2242 	asc = SCSI_ASC_NO_SENSE;
2243 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2244 
2245 	switch (sct) {
2246 	case NVME_SCT_GENERIC:
2247 		switch (sc) {
2248 		case NVME_SC_SUCCESS:
2249 			status = MPI2_SCSI_STATUS_GOOD;
2250 			skey = SSD_KEY_NO_SENSE;
2251 			asc = SCSI_ASC_NO_SENSE;
2252 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2253 			break;
2254 		case NVME_SC_INVALID_OPCODE:
2255 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2256 			skey = SSD_KEY_ILLEGAL_REQUEST;
2257 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2258 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2259 			break;
2260 		case NVME_SC_INVALID_FIELD:
2261 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2262 			skey = SSD_KEY_ILLEGAL_REQUEST;
2263 			asc = SCSI_ASC_INVALID_CDB;
2264 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2265 			break;
2266 		case NVME_SC_DATA_TRANSFER_ERROR:
2267 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2268 			skey = SSD_KEY_MEDIUM_ERROR;
2269 			asc = SCSI_ASC_NO_SENSE;
2270 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2271 			break;
2272 		case NVME_SC_ABORTED_POWER_LOSS:
2273 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2274 			skey = SSD_KEY_ABORTED_COMMAND;
2275 			asc = SCSI_ASC_WARNING;
2276 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2277 			break;
2278 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2279 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2280 			skey = SSD_KEY_HARDWARE_ERROR;
2281 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2282 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2283 			break;
2284 		case NVME_SC_ABORTED_BY_REQUEST:
2285 		case NVME_SC_ABORTED_SQ_DELETION:
2286 		case NVME_SC_ABORTED_FAILED_FUSED:
2287 		case NVME_SC_ABORTED_MISSING_FUSED:
2288 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2289 			skey = SSD_KEY_ABORTED_COMMAND;
2290 			asc = SCSI_ASC_NO_SENSE;
2291 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2292 			break;
2293 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2294 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2295 			skey = SSD_KEY_ILLEGAL_REQUEST;
2296 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2297 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2298 			break;
2299 		case NVME_SC_LBA_OUT_OF_RANGE:
2300 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2301 			skey = SSD_KEY_ILLEGAL_REQUEST;
2302 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2303 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2304 			break;
2305 		case NVME_SC_CAPACITY_EXCEEDED:
2306 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2307 			skey = SSD_KEY_MEDIUM_ERROR;
2308 			asc = SCSI_ASC_NO_SENSE;
2309 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2310 			break;
2311 		case NVME_SC_NAMESPACE_NOT_READY:
2312 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2313 			skey = SSD_KEY_NOT_READY;
2314 			asc = SCSI_ASC_LUN_NOT_READY;
2315 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2316 			break;
2317 		}
2318 		break;
2319 	case NVME_SCT_COMMAND_SPECIFIC:
2320 		switch (sc) {
2321 		case NVME_SC_INVALID_FORMAT:
2322 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2323 			skey = SSD_KEY_ILLEGAL_REQUEST;
2324 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2325 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2326 			break;
2327 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2328 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2329 			skey = SSD_KEY_ILLEGAL_REQUEST;
2330 			asc = SCSI_ASC_INVALID_CDB;
2331 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2332 			break;
2333 		}
2334 		break;
2335 	case NVME_SCT_MEDIA_ERROR:
2336 		switch (sc) {
2337 		case NVME_SC_WRITE_FAULTS:
2338 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2339 			skey = SSD_KEY_MEDIUM_ERROR;
2340 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2341 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2342 			break;
2343 		case NVME_SC_UNRECOVERED_READ_ERROR:
2344 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2345 			skey = SSD_KEY_MEDIUM_ERROR;
2346 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2347 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2348 			break;
2349 		case NVME_SC_GUARD_CHECK_ERROR:
2350 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2351 			skey = SSD_KEY_MEDIUM_ERROR;
2352 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2353 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2354 			break;
2355 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2356 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2357 			skey = SSD_KEY_MEDIUM_ERROR;
2358 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2359 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2360 			break;
2361 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2362 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2363 			skey = SSD_KEY_MEDIUM_ERROR;
2364 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2365 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2366 			break;
2367 		case NVME_SC_COMPARE_FAILURE:
2368 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2369 			skey = SSD_KEY_MISCOMPARE;
2370 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2371 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2372 			break;
2373 		case NVME_SC_ACCESS_DENIED:
2374 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2375 			skey = SSD_KEY_ILLEGAL_REQUEST;
2376 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2377 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2378 			break;
2379 		}
2380 		break;
2381 	}
2382 
2383 	returned_sense_len = sizeof(struct scsi_sense_data);
2384 	if (returned_sense_len < ccb->csio.sense_len)
2385 		ccb->csio.sense_resid = ccb->csio.sense_len -
2386 		    returned_sense_len;
2387 	else
2388 		ccb->csio.sense_resid = 0;
2389 
2390 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2391 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2392 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2393 
2394 	return status;
2395 }
2396 
2397 /** mprsas_complete_nvme_unmap
2398  *
2399  * Complete native NVMe command issued using NVMe Encapsulated
2400  * Request Message.
2401  */
2402 static u8
2403 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2404 {
2405 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2406 	struct nvme_completion *nvme_completion = NULL;
2407 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2408 
2409 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2410 	if (le16toh(mpi_reply->ErrorResponseCount)){
2411 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2412 		scsi_status = mprsas_nvme_trans_status_code(
2413 		    nvme_completion->status, cm);
2414 	}
2415 	return scsi_status;
2416 }
2417 
2418 static void
2419 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2420 {
2421 	MPI2_SCSI_IO_REPLY *rep;
2422 	union ccb *ccb;
2423 	struct ccb_scsiio *csio;
2424 	struct mprsas_softc *sassc;
2425 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2426 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2427 	int dir = 0, i;
2428 	u16 alloc_len;
2429 	struct mprsas_target *target;
2430 	target_id_t target_id;
2431 
2432 	MPR_FUNCTRACE(sc);
2433 
2434 	callout_stop(&cm->cm_callout);
2435 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2436 
2437 	sassc = sc->sassc;
2438 	ccb = cm->cm_complete_data;
2439 	csio = &ccb->csio;
2440 	target_id = csio->ccb_h.target_id;
2441 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2442 	mpr_dprint(sc, MPR_TRACE,
2443 	    "cm %p SMID %u ccb %p reply %p outstanding %u csio->scsi_status 0x%x,"
2444 	    "csio->dxfer_len 0x%x, csio->msg_le 0x%xn\n", cm,
2445 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2446 	    cm->cm_targ->outstanding, csio->scsi_status,
2447 	    csio->dxfer_len, csio->msg_len);
2448 	/*
2449 	 * XXX KDM if the chain allocation fails, does it matter if we do
2450 	 * the sync and unload here?  It is simpler to do it in every case,
2451 	 * assuming it doesn't cause problems.
2452 	 */
2453 	if (cm->cm_data != NULL) {
2454 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2455 			dir = BUS_DMASYNC_POSTREAD;
2456 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2457 			dir = BUS_DMASYNC_POSTWRITE;
2458 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2459 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2460 	}
2461 
2462 	cm->cm_targ->completed++;
2463 	cm->cm_targ->outstanding--;
2464 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2465 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2466 
2467 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2468 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2469 		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2470 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2471 		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2472 		if (cm->cm_reply != NULL)
2473 			mprsas_log_command(cm, MPR_RECOVERY,
2474 			    "completed timedout cm %p ccb %p during recovery "
2475 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2476 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2477 			    rep->SCSIState, le32toh(rep->TransferCount));
2478 		else
2479 			mprsas_log_command(cm, MPR_RECOVERY,
2480 			    "completed timedout cm %p ccb %p during recovery\n",
2481 			    cm, cm->cm_ccb);
2482 	} else if (cm->cm_targ->tm != NULL) {
2483 		if (cm->cm_reply != NULL)
2484 			mprsas_log_command(cm, MPR_RECOVERY,
2485 			    "completed cm %p ccb %p during recovery "
2486 			    "ioc %x scsi %x state %x xfer %u\n",
2487 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2488 			    rep->SCSIStatus, rep->SCSIState,
2489 			    le32toh(rep->TransferCount));
2490 		else
2491 			mprsas_log_command(cm, MPR_RECOVERY,
2492 			    "completed cm %p ccb %p during recovery\n",
2493 			    cm, cm->cm_ccb);
2494 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2495 		mprsas_log_command(cm, MPR_RECOVERY,
2496 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2497 	}
2498 
2499 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2500 		/*
2501 		 * We ran into an error after we tried to map the command,
2502 		 * so we're getting a callback without queueing the command
2503 		 * to the hardware.  So we set the status here, and it will
2504 		 * be retained below.  We'll go through the "fast path",
2505 		 * because there can be no reply when we haven't actually
2506 		 * gone out to the hardware.
2507 		 */
2508 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2509 
2510 		/*
2511 		 * Currently the only error included in the mask is
2512 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2513 		 * chain frames.  We need to freeze the queue until we get
2514 		 * a command that completed without this error, which will
2515 		 * hopefully have some chain frames attached that we can
2516 		 * use.  If we wanted to get smarter about it, we would
2517 		 * only unfreeze the queue in this condition when we're
2518 		 * sure that we're getting some chain frames back.  That's
2519 		 * probably unnecessary.
2520 		 */
2521 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2522 			xpt_freeze_simq(sassc->sim, 1);
2523 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2524 			mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
2525 			    "Error sending command, freezing SIM queue\n");
2526 		}
2527 	}
2528 
2529 	/*
2530 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2531 	 * flag, and use it in a few places in the rest of this function for
2532 	 * convenience. Use the macro if available.
2533 	 */
2534 	scsi_cdb = scsiio_cdb_ptr(csio);
2535 
2536 	/*
2537 	 * If this is a Start Stop Unit command and it was issued by the driver
2538 	 * during shutdown, decrement the refcount to account for all of the
2539 	 * commands that were sent.  All SSU commands should be completed before
2540 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2541 	 * is TRUE.
2542 	 */
2543 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2544 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2545 		sc->SSU_refcount--;
2546 	}
2547 
2548 	SDT_PROBE4(cam, , mpr, complete, ccb, cm, sassc->flags,
2549 	    sc->mapping_table[target_id].device_info);
2550 
2551 	/* Take the fast path to completion */
2552 	if (cm->cm_reply == NULL) {
2553 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2554 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2555 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2556 			else {
2557 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2558 				csio->scsi_status = SCSI_STATUS_OK;
2559 			}
2560 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2561 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2562 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2563 				mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
2564 				    "Unfreezing SIM queue\n");
2565 			}
2566 		}
2567 
2568 		/*
2569 		 * There are two scenarios where the status won't be
2570 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2571 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2572 		 */
2573 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2574 			/*
2575 			 * Freeze the dev queue so that commands are
2576 			 * executed in the correct order after error
2577 			 * recovery.
2578 			 */
2579 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2580 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2581 		}
2582 		mpr_free_command(sc, cm);
2583 		xpt_done(ccb);
2584 		return;
2585 	}
2586 
2587 	target = &sassc->targets[target_id];
2588 	if (scsi_cdb[0] == UNMAP &&
2589 	    target->is_nvme &&
2590 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2591 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2592 		csio->scsi_status = rep->SCSIStatus;
2593 	}
2594 
2595 	mprsas_log_command(cm, MPR_XINFO,
2596 	    "ioc %x scsi %x state %x xfer %u\n",
2597 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2598 	    le32toh(rep->TransferCount));
2599 
2600 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2601 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2602 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2603 		/* FALLTHROUGH */
2604 	case MPI2_IOCSTATUS_SUCCESS:
2605 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2606 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2607 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2608 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2609 
2610 		/* Completion failed at the transport level. */
2611 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2612 		    MPI2_SCSI_STATE_TERMINATED)) {
2613 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2614 			break;
2615 		}
2616 
2617 		/* In a modern packetized environment, an autosense failure
2618 		 * implies that there's not much else that can be done to
2619 		 * recover the command.
2620 		 */
2621 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2622 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2623 			break;
2624 		}
2625 
2626 		/*
2627 		 * CAM doesn't care about SAS Response Info data, but if this is
2628 		 * the state check if TLR should be done.  If not, clear the
2629 		 * TLR_bits for the target.
2630 		 */
2631 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2632 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2633 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2634 			sc->mapping_table[target_id].TLR_bits =
2635 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2636 		}
2637 
2638 		/*
2639 		 * Intentionally override the normal SCSI status reporting
2640 		 * for these two cases.  These are likely to happen in a
2641 		 * multi-initiator environment, and we want to make sure that
2642 		 * CAM retries these commands rather than fail them.
2643 		 */
2644 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2645 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2646 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2647 			break;
2648 		}
2649 
2650 		/* Handle normal status and sense */
2651 		csio->scsi_status = rep->SCSIStatus;
2652 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2653 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2654 		else
2655 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2656 
2657 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2658 			int sense_len, returned_sense_len;
2659 
2660 			returned_sense_len = min(le32toh(rep->SenseCount),
2661 			    sizeof(struct scsi_sense_data));
2662 			if (returned_sense_len < csio->sense_len)
2663 				csio->sense_resid = csio->sense_len -
2664 				    returned_sense_len;
2665 			else
2666 				csio->sense_resid = 0;
2667 
2668 			sense_len = min(returned_sense_len,
2669 			    csio->sense_len - csio->sense_resid);
2670 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2671 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2672 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2673 		}
2674 
2675 		/*
2676 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2677 		 * and it's page code 0 (Supported Page List), and there is
2678 		 * inquiry data, and this is for a sequential access device, and
2679 		 * the device is an SSP target, and TLR is supported by the
2680 		 * controller, turn the TLR_bits value ON if page 0x90 is
2681 		 * supported.
2682 		 */
2683 		if ((scsi_cdb[0] == INQUIRY) &&
2684 		    (scsi_cdb[1] & SI_EVPD) &&
2685 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2686 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2687 		    (csio->data_ptr != NULL) &&
2688 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2689 		    (sc->control_TLR) &&
2690 		    (sc->mapping_table[target_id].device_info &
2691 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2692 			vpd_list = (struct scsi_vpd_supported_page_list *)
2693 			    csio->data_ptr;
2694 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2695 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2696 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2697 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2698 			alloc_len -= csio->resid;
2699 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2700 				if (vpd_list->list[i] == 0x90) {
2701 					*TLR_bits = TLR_on;
2702 					break;
2703 				}
2704 			}
2705 		}
2706 
2707 		/*
2708 		 * If this is a SATA direct-access end device, mark it so that
2709 		 * a SCSI StartStopUnit command will be sent to it when the
2710 		 * driver is being shutdown.
2711 		 */
2712 		if ((scsi_cdb[0] == INQUIRY) &&
2713 		    (csio->data_ptr != NULL) &&
2714 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2715 		    (sc->mapping_table[target_id].device_info &
2716 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2717 		    ((sc->mapping_table[target_id].device_info &
2718 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2719 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2720 			target = &sassc->targets[target_id];
2721 			target->supports_SSU = TRUE;
2722 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2723 			    target_id);
2724 		}
2725 		break;
2726 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2727 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2728 		/*
2729 		 * If devinfo is 0 this will be a volume.  In that case don't
2730 		 * tell CAM that the volume is not there.  We want volumes to
2731 		 * be enumerated until they are deleted/removed, not just
2732 		 * failed.
2733 		 */
2734 		if (cm->cm_targ->devinfo == 0)
2735 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2736 		else
2737 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2738 		break;
2739 	case MPI2_IOCSTATUS_INVALID_SGL:
2740 		mpr_print_scsiio_cmd(sc, cm);
2741 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2742 		break;
2743 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2744 		/*
2745 		 * This is one of the responses that comes back when an I/O
2746 		 * has been aborted.  If it is because of a timeout that we
2747 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2748 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2749 		 * command is the same (it gets retried, subject to the
2750 		 * retry counter), the only difference is what gets printed
2751 		 * on the console.
2752 		 */
2753 		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2754 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2755 		else
2756 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2757 		break;
2758 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2759 		/* resid is ignored for this condition */
2760 		csio->resid = 0;
2761 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2762 		break;
2763 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2764 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2765 		/*
2766 		 * These can sometimes be transient transport-related
2767 		 * errors, and sometimes persistent drive-related errors.
2768 		 * We used to retry these without decrementing the retry
2769 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2770 		 * we hit a persistent drive problem that returns one of
2771 		 * these error codes, we would retry indefinitely.  So,
2772 		 * return CAM_REQ_CMP_ERR so that we decrement the retry
2773 		 * count and avoid infinite retries.  We're taking the
2774 		 * potential risk of flagging false failures in the event
2775 		 * of a topology-related error (e.g. a SAS expander problem
2776 		 * causes a command addressed to a drive to fail), but
2777 		 * avoiding getting into an infinite retry loop. However,
2778 		 * if we get them while were removing a device, we should
2779 		 * fail the request as 'not there' because the device
2780 		 * is effectively gone.
2781 		 */
2782 		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2783 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2784 		else
2785 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2786 		mpr_dprint(sc, MPR_INFO,
2787 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2788 		    mpr_describe_table(mpr_iocstatus_string,
2789 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2790 		    target_id, cm->cm_desc.Default.SMID,
2791 		    le32toh(rep->IOCLogInfo),
2792 		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2793 		mpr_dprint(sc, MPR_XINFO,
2794 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2795 		    rep->SCSIStatus, rep->SCSIState,
2796 		    le32toh(rep->TransferCount));
2797 		break;
2798 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2799 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2800 	case MPI2_IOCSTATUS_INVALID_VPID:
2801 	case MPI2_IOCSTATUS_INVALID_FIELD:
2802 	case MPI2_IOCSTATUS_INVALID_STATE:
2803 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2804 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2805 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2806 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2807 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2808 	default:
2809 		mprsas_log_command(cm, MPR_XINFO,
2810 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2811 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2812 		    rep->SCSIStatus, rep->SCSIState,
2813 		    le32toh(rep->TransferCount));
2814 		csio->resid = cm->cm_length;
2815 
2816 		if (scsi_cdb[0] == UNMAP &&
2817 		    target->is_nvme &&
2818 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2819 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2820 		else
2821 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2822 
2823 		break;
2824 	}
2825 
2826 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2827 
2828 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2829 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2830 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2831 		mpr_dprint(sc, MPR_INFO, "Command completed, unfreezing SIM "
2832 		    "queue\n");
2833 	}
2834 
2835 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2836 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2837 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2838 	}
2839 
2840 	/*
2841 	 * Check to see if we're removing the device. If so, and this is the
2842 	 * last command on the queue, proceed with the deferred removal of the
2843 	 * device.  Note, for removing a volume, this won't trigger because
2844 	 * pending_remove_tm will be NULL.
2845 	 */
2846 	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2847 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2848 		    cm->cm_targ->pending_remove_tm != NULL) {
2849 			mpr_dprint(sc, MPR_INFO,
2850 			    "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
2851 			    cm->cm_targ->tid, cm->cm_targ->handle);
2852 			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2853 			cm->cm_targ->pending_remove_tm = NULL;
2854 		}
2855 	}
2856 
2857 	mpr_free_command(sc, cm);
2858 	xpt_done(ccb);
2859 }
2860 
2861 static void
2862 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2863 {
2864 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2865 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2866 	uint64_t sasaddr;
2867 	union ccb *ccb;
2868 
2869 	ccb = cm->cm_complete_data;
2870 
2871 	/*
2872 	 * Currently there should be no way we can hit this case.  It only
2873 	 * happens when we have a failure to allocate chain frames, and SMP
2874 	 * commands require two S/G elements only.  That should be handled
2875 	 * in the standard request size.
2876 	 */
2877 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2878 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2879 		    "request!\n", __func__, cm->cm_flags);
2880 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2881 		goto bailout;
2882         }
2883 
2884 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2885 	if (rpl == NULL) {
2886 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2887 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2888 		goto bailout;
2889 	}
2890 
2891 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2892 	sasaddr = le32toh(req->SASAddress.Low);
2893 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2894 
2895 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2896 	    MPI2_IOCSTATUS_SUCCESS ||
2897 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2898 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2899 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2900 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2901 		goto bailout;
2902 	}
2903 
2904 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2905 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2906 
2907 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2908 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2909 	else
2910 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2911 
2912 bailout:
2913 	/*
2914 	 * We sync in both directions because we had DMAs in the S/G list
2915 	 * in both directions.
2916 	 */
2917 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2918 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2919 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2920 	mpr_free_command(sc, cm);
2921 	xpt_done(ccb);
2922 }
2923 
2924 static void
2925 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2926 {
2927 	struct mpr_command *cm;
2928 	uint8_t *request, *response;
2929 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2930 	struct mpr_softc *sc;
2931 	int error;
2932 
2933 	sc = sassc->sc;
2934 	error = 0;
2935 
2936 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2937 	case CAM_DATA_PADDR:
2938 	case CAM_DATA_SG_PADDR:
2939 		/*
2940 		 * XXX We don't yet support physical addresses here.
2941 		 */
2942 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2943 		    "supported\n", __func__);
2944 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2945 		xpt_done(ccb);
2946 		return;
2947 	case CAM_DATA_SG:
2948 		/*
2949 		 * The chip does not support more than one buffer for the
2950 		 * request or response.
2951 		 */
2952 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2953 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2954 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2955 			    "response buffer segments not supported for SMP\n",
2956 			    __func__);
2957 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2958 			xpt_done(ccb);
2959 			return;
2960 		}
2961 
2962 		/*
2963 		 * The CAM_SCATTER_VALID flag was originally implemented
2964 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2965 		 * We have two.  So, just take that flag to mean that we
2966 		 * might have S/G lists, and look at the S/G segment count
2967 		 * to figure out whether that is the case for each individual
2968 		 * buffer.
2969 		 */
2970 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2971 			bus_dma_segment_t *req_sg;
2972 
2973 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2974 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2975 		} else
2976 			request = ccb->smpio.smp_request;
2977 
2978 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2979 			bus_dma_segment_t *rsp_sg;
2980 
2981 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2982 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2983 		} else
2984 			response = ccb->smpio.smp_response;
2985 		break;
2986 	case CAM_DATA_VADDR:
2987 		request = ccb->smpio.smp_request;
2988 		response = ccb->smpio.smp_response;
2989 		break;
2990 	default:
2991 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2992 		xpt_done(ccb);
2993 		return;
2994 	}
2995 
2996 	cm = mpr_alloc_command(sc);
2997 	if (cm == NULL) {
2998 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2999 		    __func__);
3000 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3001 		xpt_done(ccb);
3002 		return;
3003 	}
3004 
3005 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3006 	bzero(req, sizeof(*req));
3007 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3008 
3009 	/* Allow the chip to use any route to this SAS address. */
3010 	req->PhysicalPort = 0xff;
3011 
3012 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3013 	req->SGLFlags =
3014 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3015 
3016 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3017 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3018 
3019 	mpr_init_sge(cm, req, &req->SGL);
3020 
3021 	/*
3022 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3023 	 * do one map command, and one busdma call in there.
3024 	 */
3025 	cm->cm_uio.uio_iov = cm->cm_iovec;
3026 	cm->cm_uio.uio_iovcnt = 2;
3027 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3028 
3029 	/*
3030 	 * The read/write flag isn't used by busdma, but set it just in
3031 	 * case.  This isn't exactly accurate, either, since we're going in
3032 	 * both directions.
3033 	 */
3034 	cm->cm_uio.uio_rw = UIO_WRITE;
3035 
3036 	cm->cm_iovec[0].iov_base = request;
3037 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3038 	cm->cm_iovec[1].iov_base = response;
3039 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3040 
3041 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3042 			       cm->cm_iovec[1].iov_len;
3043 
3044 	/*
3045 	 * Trigger a warning message in mpr_data_cb() for the user if we
3046 	 * wind up exceeding two S/G segments.  The chip expects one
3047 	 * segment for the request and another for the response.
3048 	 */
3049 	cm->cm_max_segs = 2;
3050 
3051 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3052 	cm->cm_complete = mprsas_smpio_complete;
3053 	cm->cm_complete_data = ccb;
3054 
3055 	/*
3056 	 * Tell the mapping code that we're using a uio, and that this is
3057 	 * an SMP passthrough request.  There is a little special-case
3058 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3059 	 * transfer.
3060 	 */
3061 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3062 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3063 
3064 	/* The chip data format is little endian. */
3065 	req->SASAddress.High = htole32(sasaddr >> 32);
3066 	req->SASAddress.Low = htole32(sasaddr);
3067 
3068 	/*
3069 	 * XXX Note that we don't have a timeout/abort mechanism here.
3070 	 * From the manual, it looks like task management requests only
3071 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3072 	 * have a mechanism to retry requests in the event of a chip reset
3073 	 * at least.  Hopefully the chip will insure that any errors short
3074 	 * of that are relayed back to the driver.
3075 	 */
3076 	error = mpr_map_command(sc, cm);
3077 	if ((error != 0) && (error != EINPROGRESS)) {
3078 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3079 		    "mpr_map_command()\n", __func__, error);
3080 		goto bailout_error;
3081 	}
3082 
3083 	return;
3084 
3085 bailout_error:
3086 	mpr_free_command(sc, cm);
3087 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3088 	xpt_done(ccb);
3089 	return;
3090 }
3091 
3092 static void
3093 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3094 {
3095 	struct mpr_softc *sc;
3096 	struct mprsas_target *targ;
3097 	uint64_t sasaddr = 0;
3098 
3099 	sc = sassc->sc;
3100 
3101 	/*
3102 	 * Make sure the target exists.
3103 	 */
3104 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3105 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3106 	targ = &sassc->targets[ccb->ccb_h.target_id];
3107 	if (targ->handle == 0x0) {
3108 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3109 		    __func__, ccb->ccb_h.target_id);
3110 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3111 		xpt_done(ccb);
3112 		return;
3113 	}
3114 
3115 	/*
3116 	 * If this device has an embedded SMP target, we'll talk to it
3117 	 * directly.
3118 	 * figure out what the expander's address is.
3119 	 */
3120 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3121 		sasaddr = targ->sasaddr;
3122 
3123 	/*
3124 	 * If we don't have a SAS address for the expander yet, try
3125 	 * grabbing it from the page 0x83 information cached in the
3126 	 * transport layer for this target.  LSI expanders report the
3127 	 * expander SAS address as the port-associated SAS address in
3128 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3129 	 * 0x83.
3130 	 *
3131 	 * XXX KDM disable this for now, but leave it commented out so that
3132 	 * it is obvious that this is another possible way to get the SAS
3133 	 * address.
3134 	 *
3135 	 * The parent handle method below is a little more reliable, and
3136 	 * the other benefit is that it works for devices other than SES
3137 	 * devices.  So you can send a SMP request to a da(4) device and it
3138 	 * will get routed to the expander that device is attached to.
3139 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3140 	 */
3141 #if 0
3142 	if (sasaddr == 0)
3143 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3144 #endif
3145 
3146 	/*
3147 	 * If we still don't have a SAS address for the expander, look for
3148 	 * the parent device of this device, which is probably the expander.
3149 	 */
3150 	if (sasaddr == 0) {
3151 #ifdef OLD_MPR_PROBE
3152 		struct mprsas_target *parent_target;
3153 #endif
3154 
3155 		if (targ->parent_handle == 0x0) {
3156 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3157 			    "a valid parent handle!\n", __func__, targ->handle);
3158 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3159 			goto bailout;
3160 		}
3161 #ifdef OLD_MPR_PROBE
3162 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3163 		    targ->parent_handle);
3164 
3165 		if (parent_target == NULL) {
3166 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3167 			    "a valid parent target!\n", __func__, targ->handle);
3168 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3169 			goto bailout;
3170 		}
3171 
3172 		if ((parent_target->devinfo &
3173 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3174 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3175 			    "does not have an SMP target!\n", __func__,
3176 			    targ->handle, parent_target->handle);
3177 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3178 			goto bailout;
3179 		}
3180 
3181 		sasaddr = parent_target->sasaddr;
3182 #else /* OLD_MPR_PROBE */
3183 		if ((targ->parent_devinfo &
3184 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3185 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3186 			    "does not have an SMP target!\n", __func__,
3187 			    targ->handle, targ->parent_handle);
3188 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3189 			goto bailout;
3190 		}
3191 		if (targ->parent_sasaddr == 0x0) {
3192 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3193 			    "%d does not have a valid SAS address!\n", __func__,
3194 			    targ->handle, targ->parent_handle);
3195 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3196 			goto bailout;
3197 		}
3198 
3199 		sasaddr = targ->parent_sasaddr;
3200 #endif /* OLD_MPR_PROBE */
3201 	}
3202 
3203 	if (sasaddr == 0) {
3204 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3205 		    "handle %d\n", __func__, targ->handle);
3206 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3207 		goto bailout;
3208 	}
3209 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3210 
3211 	return;
3212 
3213 bailout:
3214 	xpt_done(ccb);
3215 
3216 }
3217 
3218 static void
3219 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3220 {
3221 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3222 	struct mpr_softc *sc;
3223 	struct mpr_command *tm;
3224 	struct mprsas_target *targ;
3225 
3226 	MPR_FUNCTRACE(sassc->sc);
3227 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3228 
3229 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3230 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3231 	sc = sassc->sc;
3232 	tm = mprsas_alloc_tm(sc);
3233 	if (tm == NULL) {
3234 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3235 		    "mprsas_action_resetdev\n");
3236 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3237 		xpt_done(ccb);
3238 		return;
3239 	}
3240 
3241 	targ = &sassc->targets[ccb->ccb_h.target_id];
3242 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3243 	req->DevHandle = htole16(targ->handle);
3244 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3245 
3246 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3247 		/* SAS Hard Link Reset / SATA Link Reset */
3248 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3249 	} else {
3250 		/* PCIe Protocol Level Reset*/
3251 		req->MsgFlags =
3252 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3253 	}
3254 
3255 	tm->cm_data = NULL;
3256 	tm->cm_complete = mprsas_resetdev_complete;
3257 	tm->cm_complete_data = ccb;
3258 
3259 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3260 	    __func__, targ->tid);
3261 	tm->cm_targ = targ;
3262 
3263 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3264 	mpr_map_command(sc, tm);
3265 }
3266 
3267 static void
3268 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3269 {
3270 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3271 	union ccb *ccb;
3272 
3273 	MPR_FUNCTRACE(sc);
3274 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3275 
3276 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3277 	ccb = tm->cm_complete_data;
3278 
3279 	/*
3280 	 * Currently there should be no way we can hit this case.  It only
3281 	 * happens when we have a failure to allocate chain frames, and
3282 	 * task management commands don't have S/G lists.
3283 	 */
3284 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3285 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3286 
3287 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3288 
3289 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3290 		    "handle %#04x! This should not happen!\n", __func__,
3291 		    tm->cm_flags, req->DevHandle);
3292 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3293 		goto bailout;
3294 	}
3295 
3296 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3297 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3298 
3299 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3300 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3301 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3302 		    CAM_LUN_WILDCARD);
3303 	}
3304 	else
3305 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3306 
3307 bailout:
3308 
3309 	mprsas_free_tm(sc, tm);
3310 	xpt_done(ccb);
3311 }
3312 
3313 static void
3314 mprsas_poll(struct cam_sim *sim)
3315 {
3316 	struct mprsas_softc *sassc;
3317 
3318 	sassc = cam_sim_softc(sim);
3319 
3320 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3321 		/* frequent debug messages during a panic just slow
3322 		 * everything down too much.
3323 		 */
3324 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3325 		    __func__);
3326 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3327 	}
3328 
3329 	mpr_intr_locked(sassc->sc);
3330 }
3331 
3332 static void
3333 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3334     void *arg)
3335 {
3336 	struct mpr_softc *sc;
3337 
3338 	sc = (struct mpr_softc *)callback_arg;
3339 
3340 	mpr_lock(sc);
3341 	switch (code) {
3342 	case AC_ADVINFO_CHANGED: {
3343 		struct mprsas_target *target;
3344 		struct mprsas_softc *sassc;
3345 		struct scsi_read_capacity_data_long rcap_buf;
3346 		struct ccb_dev_advinfo cdai;
3347 		struct mprsas_lun *lun;
3348 		lun_id_t lunid;
3349 		int found_lun;
3350 		uintptr_t buftype;
3351 
3352 		buftype = (uintptr_t)arg;
3353 
3354 		found_lun = 0;
3355 		sassc = sc->sassc;
3356 
3357 		/*
3358 		 * We're only interested in read capacity data changes.
3359 		 */
3360 		if (buftype != CDAI_TYPE_RCAPLONG)
3361 			break;
3362 
3363 		/*
3364 		 * We should have a handle for this, but check to make sure.
3365 		 */
3366 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3367 		    ("Target %d out of bounds in mprsas_async\n",
3368 		    xpt_path_target_id(path)));
3369 		target = &sassc->targets[xpt_path_target_id(path)];
3370 		if (target->handle == 0)
3371 			break;
3372 
3373 		lunid = xpt_path_lun_id(path);
3374 
3375 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3376 			if (lun->lun_id == lunid) {
3377 				found_lun = 1;
3378 				break;
3379 			}
3380 		}
3381 
3382 		if (found_lun == 0) {
3383 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3384 			    M_NOWAIT | M_ZERO);
3385 			if (lun == NULL) {
3386 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3387 				    "LUN for EEDP support.\n");
3388 				break;
3389 			}
3390 			lun->lun_id = lunid;
3391 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3392 		}
3393 
3394 		bzero(&rcap_buf, sizeof(rcap_buf));
3395 		bzero(&cdai, sizeof(cdai));
3396 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3397 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3398 		cdai.ccb_h.flags = CAM_DIR_IN;
3399 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3400 		cdai.flags = CDAI_FLAG_NONE;
3401 		cdai.bufsiz = sizeof(rcap_buf);
3402 		cdai.buf = (uint8_t *)&rcap_buf;
3403 		xpt_action((union ccb *)&cdai);
3404 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3405 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3406 
3407 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3408 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3409 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3410 			case SRC16_PTYPE_1:
3411 			case SRC16_PTYPE_3:
3412 				lun->eedp_formatted = TRUE;
3413 				lun->eedp_block_size =
3414 				    scsi_4btoul(rcap_buf.length);
3415 				break;
3416 			case SRC16_PTYPE_2:
3417 			default:
3418 				lun->eedp_formatted = FALSE;
3419 				lun->eedp_block_size = 0;
3420 				break;
3421 			}
3422 		} else {
3423 			lun->eedp_formatted = FALSE;
3424 			lun->eedp_block_size = 0;
3425 		}
3426 		break;
3427 	}
3428 	default:
3429 		break;
3430 	}
3431 	mpr_unlock(sc);
3432 }
3433 
3434 /*
3435  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3436  * the target until the reset has completed.  The CCB holds the path which
3437  * is used to release the devq.  The devq is released and the CCB is freed
3438  * when the TM completes.
3439  * We only need to do this when we're entering reset, not at each time we
3440  * need to send an abort (which will happen if multiple commands timeout
3441  * while we're sending the abort). We do not release the queue for each
3442  * command we complete (just at the end when we free the tm), so freezing
3443  * it each time doesn't make sense.
3444  */
3445 void
3446 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3447     struct mprsas_target *target, lun_id_t lun_id)
3448 {
3449 	union ccb *ccb;
3450 	path_id_t path_id;
3451 
3452 	ccb = xpt_alloc_ccb_nowait();
3453 	if (ccb) {
3454 		path_id = cam_sim_path(sc->sassc->sim);
3455 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3456 		    target->tid, lun_id) != CAM_REQ_CMP) {
3457 			xpt_free_ccb(ccb);
3458 		} else {
3459 			tm->cm_ccb = ccb;
3460 			tm->cm_targ = target;
3461 			if ((target->flags & MPRSAS_TARGET_INRESET) == 0) {
3462 				mpr_dprint(sc, MPR_XINFO | MPR_RECOVERY,
3463 				    "%s: Freezing devq for target ID %d\n",
3464 				    __func__, target->tid);
3465 				xpt_freeze_devq(ccb->ccb_h.path, 1);
3466 				target->flags |= MPRSAS_TARGET_INRESET;
3467 			}
3468 		}
3469 	}
3470 }
3471 
3472 int
3473 mprsas_startup(struct mpr_softc *sc)
3474 {
3475 	/*
3476 	 * Send the port enable message and set the wait_for_port_enable flag.
3477 	 * This flag helps to keep the simq frozen until all discovery events
3478 	 * are processed.
3479 	 */
3480 	sc->wait_for_port_enable = 1;
3481 	mprsas_send_portenable(sc);
3482 	return (0);
3483 }
3484 
3485 static int
3486 mprsas_send_portenable(struct mpr_softc *sc)
3487 {
3488 	MPI2_PORT_ENABLE_REQUEST *request;
3489 	struct mpr_command *cm;
3490 
3491 	MPR_FUNCTRACE(sc);
3492 
3493 	if ((cm = mpr_alloc_command(sc)) == NULL)
3494 		return (EBUSY);
3495 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3496 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3497 	request->MsgFlags = 0;
3498 	request->VP_ID = 0;
3499 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3500 	cm->cm_complete = mprsas_portenable_complete;
3501 	cm->cm_data = NULL;
3502 	cm->cm_sge = NULL;
3503 
3504 	mpr_map_command(sc, cm);
3505 	mpr_dprint(sc, MPR_XINFO,
3506 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3507 	    cm, cm->cm_req, cm->cm_complete);
3508 	return (0);
3509 }
3510 
3511 static void
3512 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3513 {
3514 	MPI2_PORT_ENABLE_REPLY *reply;
3515 	struct mprsas_softc *sassc;
3516 
3517 	MPR_FUNCTRACE(sc);
3518 	sassc = sc->sassc;
3519 
3520 	/*
3521 	 * Currently there should be no way we can hit this case.  It only
3522 	 * happens when we have a failure to allocate chain frames, and
3523 	 * port enable commands don't have S/G lists.
3524 	 */
3525 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3526 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3527 		    "This should not happen!\n", __func__, cm->cm_flags);
3528 	}
3529 
3530 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3531 	if (reply == NULL)
3532 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3533 	else if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3534 	    MPI2_IOCSTATUS_SUCCESS)
3535 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3536 
3537 	mpr_free_command(sc, cm);
3538 	/*
3539 	 * Done waiting for port enable to complete.  Decrement the refcount.
3540 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3541 	 * take place.
3542 	 */
3543 	sc->wait_for_port_enable = 0;
3544 	sc->port_enable_complete = 1;
3545 	wakeup(&sc->port_enable_complete);
3546 	mprsas_startup_decrement(sassc);
3547 }
3548 
3549 int
3550 mprsas_check_id(struct mprsas_softc *sassc, int id)
3551 {
3552 	struct mpr_softc *sc = sassc->sc;
3553 	char *ids;
3554 	char *name;
3555 
3556 	ids = &sc->exclude_ids[0];
3557 	while((name = strsep(&ids, ",")) != NULL) {
3558 		if (name[0] == '\0')
3559 			continue;
3560 		if (strtol(name, NULL, 0) == (long)id)
3561 			return (1);
3562 	}
3563 
3564 	return (0);
3565 }
3566 
3567 void
3568 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3569 {
3570 	struct mprsas_softc *sassc;
3571 	struct mprsas_lun *lun, *lun_tmp;
3572 	struct mprsas_target *targ;
3573 	int i;
3574 
3575 	sassc = sc->sassc;
3576 	/*
3577 	 * The number of targets is based on IOC Facts, so free all of
3578 	 * the allocated LUNs for each target and then the target buffer
3579 	 * itself.
3580 	 */
3581 	for (i=0; i< maxtargets; i++) {
3582 		targ = &sassc->targets[i];
3583 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3584 			free(lun, M_MPR);
3585 		}
3586 	}
3587 	free(sassc->targets, M_MPR);
3588 
3589 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3590 	    M_MPR, M_WAITOK|M_ZERO);
3591 }
3592