xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 2c9a9dfc187d171de6b92654d71b977f067ed641)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * Copyright 2000-2020 Broadcom Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT3 */
37 
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/nvme/nvme.h>
77 
78 #include <dev/mpr/mpi/mpi2_type.h>
79 #include <dev/mpr/mpi/mpi2.h>
80 #include <dev/mpr/mpi/mpi2_ioc.h>
81 #include <dev/mpr/mpi/mpi2_sas.h>
82 #include <dev/mpr/mpi/mpi2_pci.h>
83 #include <dev/mpr/mpi/mpi2_cnfg.h>
84 #include <dev/mpr/mpi/mpi2_init.h>
85 #include <dev/mpr/mpi/mpi2_tool.h>
86 #include <dev/mpr/mpr_ioctl.h>
87 #include <dev/mpr/mprvar.h>
88 #include <dev/mpr/mpr_table.h>
89 #include <dev/mpr/mpr_sas.h>
90 
91 #define MPRSAS_DISCOVERY_TIMEOUT	20
92 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93 
94 /*
95  * static array to check SCSI OpCode for EEDP protection bits
96  */
97 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 };
118 
119 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 
121 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
123 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mprsas_poll(struct cam_sim *sim);
125 static void mprsas_scsiio_timeout(void *data);
126 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
127 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
128 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
129 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
130 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
131 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
132     struct mpr_command *cm);
133 static void mprsas_async(void *callback_arg, uint32_t code,
134     struct cam_path *path, void *arg);
135 #if (__FreeBSD_version < 901503) || \
136     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
137 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
138     struct ccb_getdev *cgd);
139 static void mprsas_read_cap_done(struct cam_periph *periph,
140     union ccb *done_ccb);
141 #endif
142 static int mprsas_send_portenable(struct mpr_softc *sc);
143 static void mprsas_portenable_complete(struct mpr_softc *sc,
144     struct mpr_command *cm);
145 
146 #if __FreeBSD_version >= 900026
147 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
148 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149     uint64_t sasaddr);
150 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif //FreeBSD_version >= 900026
152 
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155     uint16_t handle)
156 {
157 	struct mprsas_target *target;
158 	int i;
159 
160 	for (i = start; i < sassc->maxtargets; i++) {
161 		target = &sassc->targets[i];
162 		if (target->handle == handle)
163 			return (target);
164 	}
165 
166 	return (NULL);
167 }
168 
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170  * commands before device handles have been found by discovery.  Since
171  * discovery involves reading config pages and possibly sending commands,
172  * discovery actions may continue even after we receive the end of discovery
173  * event, so refcount discovery actions instead of assuming we can unfreeze
174  * the simq when we get the event.
175  */
176 void
177 mprsas_startup_increment(struct mprsas_softc *sassc)
178 {
179 	MPR_FUNCTRACE(sassc->sc);
180 
181 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 		if (sassc->startup_refcount++ == 0) {
183 			/* just starting, freeze the simq */
184 			mpr_dprint(sassc->sc, MPR_INIT,
185 			    "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
188 			xpt_hold_boot();
189 #endif
190 			xpt_freeze_simq(sassc->sim, 1);
191 		}
192 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 		    sassc->startup_refcount);
194 	}
195 }
196 
197 void
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 {
200 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 		xpt_release_simq(sassc->sim, 1);
203 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
204 	}
205 }
206 
207 void
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 {
210 	MPR_FUNCTRACE(sassc->sc);
211 
212 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 		if (--sassc->startup_refcount == 0) {
214 			/* finished all discovery-related actions, release
215 			 * the simq and rescan for the latest topology.
216 			 */
217 			mpr_dprint(sassc->sc, MPR_INIT,
218 			    "%s releasing simq\n", __func__);
219 			sassc->flags &= ~MPRSAS_IN_STARTUP;
220 			xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
223 			xpt_release_boot();
224 #else
225 			mprsas_rescan_target(sassc->sc, NULL);
226 #endif
227 		}
228 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 		    sassc->startup_refcount);
230 	}
231 }
232 
233 /*
234  * The firmware requires us to stop sending commands when we're doing task
235  * management.
236  * use.
237  * XXX The logic for serializing the device has been made lazy and moved to
238  * mprsas_prepare_for_tm().
239  */
240 struct mpr_command *
241 mprsas_alloc_tm(struct mpr_softc *sc)
242 {
243 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
244 	struct mpr_command *tm;
245 
246 	MPR_FUNCTRACE(sc);
247 	tm = mpr_alloc_high_priority_command(sc);
248 	if (tm == NULL)
249 		return (NULL);
250 
251 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
252 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
253 	return tm;
254 }
255 
256 void
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
258 {
259 	int target_id = 0xFFFFFFFF;
260 
261 	MPR_FUNCTRACE(sc);
262 	if (tm == NULL)
263 		return;
264 
265 	/*
266 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
267 	 * free the resources used for freezing the devq.  Must clear the
268 	 * INRESET flag as well or scsi I/O will not work.
269 	 */
270 	if (tm->cm_targ != NULL) {
271 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
272 		target_id = tm->cm_targ->tid;
273 	}
274 	if (tm->cm_ccb) {
275 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
276 		    target_id);
277 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
278 		xpt_free_path(tm->cm_ccb->ccb_h.path);
279 		xpt_free_ccb(tm->cm_ccb);
280 	}
281 
282 	mpr_free_high_priority_command(sc, tm);
283 }
284 
285 void
286 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
287 {
288 	struct mprsas_softc *sassc = sc->sassc;
289 	path_id_t pathid;
290 	target_id_t targetid;
291 	union ccb *ccb;
292 
293 	MPR_FUNCTRACE(sc);
294 	pathid = cam_sim_path(sassc->sim);
295 	if (targ == NULL)
296 		targetid = CAM_TARGET_WILDCARD;
297 	else
298 		targetid = targ - sassc->targets;
299 
300 	/*
301 	 * Allocate a CCB and schedule a rescan.
302 	 */
303 	ccb = xpt_alloc_ccb_nowait();
304 	if (ccb == NULL) {
305 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
306 		return;
307 	}
308 
309 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
310 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
311 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
312 		xpt_free_ccb(ccb);
313 		return;
314 	}
315 
316 	if (targetid == CAM_TARGET_WILDCARD)
317 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
318 	else
319 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
320 
321 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
322 	xpt_rescan(ccb);
323 }
324 
325 static void
326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
327 {
328 	struct sbuf sb;
329 	va_list ap;
330 	char str[192];
331 	char path_str[64];
332 
333 	if (cm == NULL)
334 		return;
335 
336 	/* No need to be in here if debugging isn't enabled */
337 	if ((cm->cm_sc->mpr_debug & level) == 0)
338 		return;
339 
340 	sbuf_new(&sb, str, sizeof(str), 0);
341 
342 	va_start(ap, fmt);
343 
344 	if (cm->cm_ccb != NULL) {
345 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
346 		    sizeof(path_str));
347 		sbuf_cat(&sb, path_str);
348 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
349 			scsi_command_string(&cm->cm_ccb->csio, &sb);
350 			sbuf_printf(&sb, "length %d ",
351 			    cm->cm_ccb->csio.dxfer_len);
352 		}
353 	} else {
354 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
355 		    cam_sim_name(cm->cm_sc->sassc->sim),
356 		    cam_sim_unit(cm->cm_sc->sassc->sim),
357 		    cam_sim_bus(cm->cm_sc->sassc->sim),
358 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
359 		    cm->cm_lun);
360 	}
361 
362 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
363 	sbuf_vprintf(&sb, fmt, ap);
364 	sbuf_finish(&sb);
365 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
366 
367 	va_end(ap);
368 }
369 
370 static void
371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
372 {
373 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
374 	struct mprsas_target *targ;
375 	uint16_t handle;
376 
377 	MPR_FUNCTRACE(sc);
378 
379 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
380 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
381 	targ = tm->cm_targ;
382 
383 	if (reply == NULL) {
384 		/* XXX retry the remove after the diag reset completes? */
385 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
386 		    "0x%04x\n", __func__, handle);
387 		mprsas_free_tm(sc, tm);
388 		return;
389 	}
390 
391 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
392 	    MPI2_IOCSTATUS_SUCCESS) {
393 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
394 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
395 	}
396 
397 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
398 	    le32toh(reply->TerminationCount));
399 	mpr_free_reply(sc, tm->cm_reply_data);
400 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
401 
402 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
403 	    targ->tid, handle);
404 
405 	/*
406 	 * Don't clear target if remove fails because things will get confusing.
407 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
408 	 * this target id if possible, and so we can assign the same target id
409 	 * to this device if it comes back in the future.
410 	 */
411 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
412 	    MPI2_IOCSTATUS_SUCCESS) {
413 		targ = tm->cm_targ;
414 		targ->handle = 0x0;
415 		targ->encl_handle = 0x0;
416 		targ->encl_level_valid = 0x0;
417 		targ->encl_level = 0x0;
418 		targ->connector_name[0] = ' ';
419 		targ->connector_name[1] = ' ';
420 		targ->connector_name[2] = ' ';
421 		targ->connector_name[3] = ' ';
422 		targ->encl_slot = 0x0;
423 		targ->exp_dev_handle = 0x0;
424 		targ->phy_num = 0x0;
425 		targ->linkrate = 0x0;
426 		targ->devinfo = 0x0;
427 		targ->flags = 0x0;
428 		targ->scsi_req_desc_type = 0;
429 	}
430 
431 	mprsas_free_tm(sc, tm);
432 }
433 
434 
435 /*
436  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437  * Otherwise Volume Delete is same as Bare Drive Removal.
438  */
439 void
440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
441 {
442 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443 	struct mpr_softc *sc;
444 	struct mpr_command *cm;
445 	struct mprsas_target *targ = NULL;
446 
447 	MPR_FUNCTRACE(sassc->sc);
448 	sc = sassc->sc;
449 
450 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
451 	if (targ == NULL) {
452 		/* FIXME: what is the action? */
453 		/* We don't know about this device? */
454 		mpr_dprint(sc, MPR_ERROR,
455 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
456 		return;
457 	}
458 
459 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
460 
461 	cm = mprsas_alloc_tm(sc);
462 	if (cm == NULL) {
463 		mpr_dprint(sc, MPR_ERROR,
464 		    "%s: command alloc failure\n", __func__);
465 		return;
466 	}
467 
468 	mprsas_rescan_target(sc, targ);
469 
470 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471 	req->DevHandle = targ->handle;
472 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
473 
474 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
475 		/* SAS Hard Link Reset / SATA Link Reset */
476 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
477 	} else {
478 		/* PCIe Protocol Level Reset*/
479 		req->MsgFlags =
480 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
481 	}
482 
483 	cm->cm_targ = targ;
484 	cm->cm_data = NULL;
485 	cm->cm_complete = mprsas_remove_volume;
486 	cm->cm_complete_data = (void *)(uintptr_t)handle;
487 
488 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
489 	    __func__, targ->tid);
490 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
491 
492 	mpr_map_command(sc, cm);
493 }
494 
495 /*
496  * The firmware performs debounce on the link to avoid transient link errors
497  * and false removals.  When it does decide that link has been lost and a
498  * device needs to go away, it expects that the host will perform a target reset
499  * and then an op remove.  The reset has the side-effect of aborting any
500  * outstanding requests for the device, which is required for the op-remove to
501  * succeed.  It's not clear if the host should check for the device coming back
502  * alive after the reset.
503  */
504 void
505 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
506 {
507 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
508 	struct mpr_softc *sc;
509 	struct mpr_command *tm;
510 	struct mprsas_target *targ = NULL;
511 
512 	MPR_FUNCTRACE(sassc->sc);
513 
514 	sc = sassc->sc;
515 
516 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
517 	if (targ == NULL) {
518 		/* FIXME: what is the action? */
519 		/* We don't know about this device? */
520 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
521 		    __func__, handle);
522 		return;
523 	}
524 
525 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
526 
527 	tm = mprsas_alloc_tm(sc);
528 	if (tm == NULL) {
529 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
530 		    __func__);
531 		return;
532 	}
533 
534 	mprsas_rescan_target(sc, targ);
535 
536 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
537 	memset(req, 0, sizeof(*req));
538 	req->DevHandle = htole16(targ->handle);
539 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
540 
541 	/* SAS Hard Link Reset / SATA Link Reset */
542 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
543 
544 	tm->cm_targ = targ;
545 	tm->cm_data = NULL;
546 	tm->cm_complete = mprsas_remove_device;
547 	tm->cm_complete_data = (void *)(uintptr_t)handle;
548 
549 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
550 	    __func__, targ->tid);
551 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
552 
553 	mpr_map_command(sc, tm);
554 }
555 
556 static void
557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
558 {
559 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
560 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
561 	struct mprsas_target *targ;
562 	struct mpr_command *next_cm;
563 	uint16_t handle;
564 
565 	MPR_FUNCTRACE(sc);
566 
567 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
568 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
569 	targ = tm->cm_targ;
570 
571 	/*
572 	 * Currently there should be no way we can hit this case.  It only
573 	 * happens when we have a failure to allocate chain frames, and
574 	 * task management commands don't have S/G lists.
575 	 */
576 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
577 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
578 		    "handle %#04x! This should not happen!\n", __func__,
579 		    tm->cm_flags, handle);
580 	}
581 
582 	if (reply == NULL) {
583 		/* XXX retry the remove after the diag reset completes? */
584 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
585 		    "0x%04x\n", __func__, handle);
586 		mprsas_free_tm(sc, tm);
587 		return;
588 	}
589 
590 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
591 	    MPI2_IOCSTATUS_SUCCESS) {
592 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
593 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
594 	}
595 
596 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
597 	    le32toh(reply->TerminationCount));
598 	mpr_free_reply(sc, tm->cm_reply_data);
599 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
600 
601 	/* Reuse the existing command */
602 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
603 	memset(req, 0, sizeof(*req));
604 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
605 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
606 	req->DevHandle = htole16(handle);
607 	tm->cm_data = NULL;
608 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
609 	tm->cm_complete = mprsas_remove_complete;
610 	tm->cm_complete_data = (void *)(uintptr_t)handle;
611 
612 	mpr_map_command(sc, tm);
613 
614 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
615 	    targ->tid, handle);
616 	if (targ->encl_level_valid) {
617 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
618 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
619 		    targ->connector_name);
620 	}
621 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
622 		union ccb *ccb;
623 
624 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
625 		ccb = tm->cm_complete_data;
626 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
627 		tm->cm_state = MPR_CM_STATE_BUSY;
628 		mprsas_scsiio_complete(sc, tm);
629 	}
630 }
631 
632 static void
633 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
634 {
635 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
636 	uint16_t handle;
637 	struct mprsas_target *targ;
638 	struct mprsas_lun *lun;
639 
640 	MPR_FUNCTRACE(sc);
641 
642 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
643 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
644 
645 	/*
646 	 * Currently there should be no way we can hit this case.  It only
647 	 * happens when we have a failure to allocate chain frames, and
648 	 * task management commands don't have S/G lists.
649 	 */
650 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
651 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
652 		    "handle %#04x! This should not happen!\n", __func__,
653 		    tm->cm_flags, handle);
654 		mprsas_free_tm(sc, tm);
655 		return;
656 	}
657 
658 	if (reply == NULL) {
659 		/* most likely a chip reset */
660 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
661 		    "0x%04x\n", __func__, handle);
662 		mprsas_free_tm(sc, tm);
663 		return;
664 	}
665 
666 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
667 	    __func__, handle, le16toh(reply->IOCStatus));
668 
669 	/*
670 	 * Don't clear target if remove fails because things will get confusing.
671 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
672 	 * this target id if possible, and so we can assign the same target id
673 	 * to this device if it comes back in the future.
674 	 */
675 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
676 	    MPI2_IOCSTATUS_SUCCESS) {
677 		targ = tm->cm_targ;
678 		targ->handle = 0x0;
679 		targ->encl_handle = 0x0;
680 		targ->encl_level_valid = 0x0;
681 		targ->encl_level = 0x0;
682 		targ->connector_name[0] = ' ';
683 		targ->connector_name[1] = ' ';
684 		targ->connector_name[2] = ' ';
685 		targ->connector_name[3] = ' ';
686 		targ->encl_slot = 0x0;
687 		targ->exp_dev_handle = 0x0;
688 		targ->phy_num = 0x0;
689 		targ->linkrate = 0x0;
690 		targ->devinfo = 0x0;
691 		targ->flags = 0x0;
692 		targ->scsi_req_desc_type = 0;
693 
694 		while (!SLIST_EMPTY(&targ->luns)) {
695 			lun = SLIST_FIRST(&targ->luns);
696 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
697 			free(lun, M_MPR);
698 		}
699 	}
700 
701 	mprsas_free_tm(sc, tm);
702 }
703 
704 static int
705 mprsas_register_events(struct mpr_softc *sc)
706 {
707 	uint8_t events[16];
708 
709 	bzero(events, 16);
710 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
711 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
712 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
713 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
714 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
715 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
716 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
717 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
718 	setbit(events, MPI2_EVENT_IR_VOLUME);
719 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
720 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
721 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
722 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
723 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
724 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
725 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
726 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
727 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
728 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
729 		}
730 	}
731 
732 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
733 	    &sc->sassc->mprsas_eh);
734 
735 	return (0);
736 }
737 
738 int
739 mpr_attach_sas(struct mpr_softc *sc)
740 {
741 	struct mprsas_softc *sassc;
742 	cam_status status;
743 	int unit, error = 0, reqs;
744 
745 	MPR_FUNCTRACE(sc);
746 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
747 
748 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
749 	if (!sassc) {
750 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
751 		    "Cannot allocate SAS subsystem memory\n");
752 		return (ENOMEM);
753 	}
754 
755 	/*
756 	 * XXX MaxTargets could change during a reinit.  Since we don't
757 	 * resize the targets[] array during such an event, cache the value
758 	 * of MaxTargets here so that we don't get into trouble later.  This
759 	 * should move into the reinit logic.
760 	 */
761 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
762 	sassc->targets = malloc(sizeof(struct mprsas_target) *
763 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
764 	if (!sassc->targets) {
765 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
766 		    "Cannot allocate SAS target memory\n");
767 		free(sassc, M_MPR);
768 		return (ENOMEM);
769 	}
770 	sc->sassc = sassc;
771 	sassc->sc = sc;
772 
773 	reqs = sc->num_reqs - sc->num_prireqs - 1;
774 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
775 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
776 		error = ENOMEM;
777 		goto out;
778 	}
779 
780 	unit = device_get_unit(sc->mpr_dev);
781 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
782 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
783 	if (sassc->sim == NULL) {
784 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
785 		error = EINVAL;
786 		goto out;
787 	}
788 
789 	TAILQ_INIT(&sassc->ev_queue);
790 
791 	/* Initialize taskqueue for Event Handling */
792 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
793 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
794 	    taskqueue_thread_enqueue, &sassc->ev_tq);
795 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
796 	    device_get_nameunit(sc->mpr_dev));
797 
798 	mpr_lock(sc);
799 
800 	/*
801 	 * XXX There should be a bus for every port on the adapter, but since
802 	 * we're just going to fake the topology for now, we'll pretend that
803 	 * everything is just a target on a single bus.
804 	 */
805 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
806 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
807 		    "Error %d registering SCSI bus\n", error);
808 		mpr_unlock(sc);
809 		goto out;
810 	}
811 
812 	/*
813 	 * Assume that discovery events will start right away.
814 	 *
815 	 * Hold off boot until discovery is complete.
816 	 */
817 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
818 	sc->sassc->startup_refcount = 0;
819 	mprsas_startup_increment(sassc);
820 
821 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
822 
823 	/*
824 	 * Register for async events so we can determine the EEDP
825 	 * capabilities of devices.
826 	 */
827 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
828 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
829 	    CAM_LUN_WILDCARD);
830 	if (status != CAM_REQ_CMP) {
831 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
832 		    "Error %#x creating sim path\n", status);
833 		sassc->path = NULL;
834 	} else {
835 		int event;
836 
837 #if (__FreeBSD_version >= 1000006) || \
838     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
839 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
840 #else
841 		event = AC_FOUND_DEVICE;
842 #endif
843 
844 		/*
845 		 * Prior to the CAM locking improvements, we can't call
846 		 * xpt_register_async() with a particular path specified.
847 		 *
848 		 * If a path isn't specified, xpt_register_async() will
849 		 * generate a wildcard path and acquire the XPT lock while
850 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
851 		 * It will then drop the XPT lock once that is done.
852 		 *
853 		 * If a path is specified for xpt_register_async(), it will
854 		 * not acquire and drop the XPT lock around the call to
855 		 * xpt_action().  xpt_action() asserts that the caller
856 		 * holds the SIM lock, so the SIM lock has to be held when
857 		 * calling xpt_register_async() when the path is specified.
858 		 *
859 		 * But xpt_register_async calls xpt_for_all_devices(),
860 		 * which calls xptbustraverse(), which will acquire each
861 		 * SIM lock.  When it traverses our particular bus, it will
862 		 * necessarily acquire the SIM lock, which will lead to a
863 		 * recursive lock acquisition.
864 		 *
865 		 * The CAM locking changes fix this problem by acquiring
866 		 * the XPT topology lock around bus traversal in
867 		 * xptbustraverse(), so the caller can hold the SIM lock
868 		 * and it does not cause a recursive lock acquisition.
869 		 *
870 		 * These __FreeBSD_version values are approximate, especially
871 		 * for stable/10, which is two months later than the actual
872 		 * change.
873 		 */
874 
875 #if (__FreeBSD_version < 1000703) || \
876     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
877 		mpr_unlock(sc);
878 		status = xpt_register_async(event, mprsas_async, sc,
879 					    NULL);
880 		mpr_lock(sc);
881 #else
882 		status = xpt_register_async(event, mprsas_async, sc,
883 					    sassc->path);
884 #endif
885 
886 		if (status != CAM_REQ_CMP) {
887 			mpr_dprint(sc, MPR_ERROR,
888 			    "Error %#x registering async handler for "
889 			    "AC_ADVINFO_CHANGED events\n", status);
890 			xpt_free_path(sassc->path);
891 			sassc->path = NULL;
892 		}
893 	}
894 	if (status != CAM_REQ_CMP) {
895 		/*
896 		 * EEDP use is the exception, not the rule.
897 		 * Warn the user, but do not fail to attach.
898 		 */
899 		mpr_printf(sc, "EEDP capabilities disabled.\n");
900 	}
901 
902 	mpr_unlock(sc);
903 
904 	mprsas_register_events(sc);
905 out:
906 	if (error)
907 		mpr_detach_sas(sc);
908 
909 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
910 	return (error);
911 }
912 
913 int
914 mpr_detach_sas(struct mpr_softc *sc)
915 {
916 	struct mprsas_softc *sassc;
917 	struct mprsas_lun *lun, *lun_tmp;
918 	struct mprsas_target *targ;
919 	int i;
920 
921 	MPR_FUNCTRACE(sc);
922 
923 	if (sc->sassc == NULL)
924 		return (0);
925 
926 	sassc = sc->sassc;
927 	mpr_deregister_events(sc, sassc->mprsas_eh);
928 
929 	/*
930 	 * Drain and free the event handling taskqueue with the lock
931 	 * unheld so that any parallel processing tasks drain properly
932 	 * without deadlocking.
933 	 */
934 	if (sassc->ev_tq != NULL)
935 		taskqueue_free(sassc->ev_tq);
936 
937 	/* Make sure CAM doesn't wedge if we had to bail out early. */
938 	mpr_lock(sc);
939 
940 	while (sassc->startup_refcount != 0)
941 		mprsas_startup_decrement(sassc);
942 
943 	/* Deregister our async handler */
944 	if (sassc->path != NULL) {
945 		xpt_register_async(0, mprsas_async, sc, sassc->path);
946 		xpt_free_path(sassc->path);
947 		sassc->path = NULL;
948 	}
949 
950 	if (sassc->flags & MPRSAS_IN_STARTUP)
951 		xpt_release_simq(sassc->sim, 1);
952 
953 	if (sassc->sim != NULL) {
954 		xpt_bus_deregister(cam_sim_path(sassc->sim));
955 		cam_sim_free(sassc->sim, FALSE);
956 	}
957 
958 	mpr_unlock(sc);
959 
960 	if (sassc->devq != NULL)
961 		cam_simq_free(sassc->devq);
962 
963 	for (i = 0; i < sassc->maxtargets; i++) {
964 		targ = &sassc->targets[i];
965 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
966 			free(lun, M_MPR);
967 		}
968 	}
969 	free(sassc->targets, M_MPR);
970 	free(sassc, M_MPR);
971 	sc->sassc = NULL;
972 
973 	return (0);
974 }
975 
976 void
977 mprsas_discovery_end(struct mprsas_softc *sassc)
978 {
979 	struct mpr_softc *sc = sassc->sc;
980 
981 	MPR_FUNCTRACE(sc);
982 
983 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
984 		callout_stop(&sassc->discovery_callout);
985 
986 	/*
987 	 * After discovery has completed, check the mapping table for any
988 	 * missing devices and update their missing counts. Only do this once
989 	 * whenever the driver is initialized so that missing counts aren't
990 	 * updated unnecessarily. Note that just because discovery has
991 	 * completed doesn't mean that events have been processed yet. The
992 	 * check_devices function is a callout timer that checks if ALL devices
993 	 * are missing. If so, it will wait a little longer for events to
994 	 * complete and keep resetting itself until some device in the mapping
995 	 * table is not missing, meaning that event processing has started.
996 	 */
997 	if (sc->track_mapping_events) {
998 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
999 		    "completed. Check for missing devices in the mapping "
1000 		    "table.\n");
1001 		callout_reset(&sc->device_check_callout,
1002 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1003 		    sc);
1004 	}
1005 }
1006 
1007 static void
1008 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1009 {
1010 	struct mprsas_softc *sassc;
1011 
1012 	sassc = cam_sim_softc(sim);
1013 
1014 	MPR_FUNCTRACE(sassc->sc);
1015 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1016 	    ccb->ccb_h.func_code);
1017 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1018 
1019 	switch (ccb->ccb_h.func_code) {
1020 	case XPT_PATH_INQ:
1021 	{
1022 		struct ccb_pathinq *cpi = &ccb->cpi;
1023 		struct mpr_softc *sc = sassc->sc;
1024 
1025 		cpi->version_num = 1;
1026 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1027 		cpi->target_sprt = 0;
1028 #if (__FreeBSD_version >= 1000039) || \
1029     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1030 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1031 #else
1032 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1033 #endif
1034 		cpi->hba_eng_cnt = 0;
1035 		cpi->max_target = sassc->maxtargets - 1;
1036 		cpi->max_lun = 255;
1037 
1038 		/*
1039 		 * initiator_id is set here to an ID outside the set of valid
1040 		 * target IDs (including volumes).
1041 		 */
1042 		cpi->initiator_id = sassc->maxtargets;
1043 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1044 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1045 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1046 		cpi->unit_number = cam_sim_unit(sim);
1047 		cpi->bus_id = cam_sim_bus(sim);
1048 		/*
1049 		 * XXXSLM-I think this needs to change based on config page or
1050 		 * something instead of hardcoded to 150000.
1051 		 */
1052 		cpi->base_transfer_speed = 150000;
1053 		cpi->transport = XPORT_SAS;
1054 		cpi->transport_version = 0;
1055 		cpi->protocol = PROTO_SCSI;
1056 		cpi->protocol_version = SCSI_REV_SPC;
1057 		cpi->maxio = sc->maxio;
1058 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1059 		break;
1060 	}
1061 	case XPT_GET_TRAN_SETTINGS:
1062 	{
1063 		struct ccb_trans_settings	*cts;
1064 		struct ccb_trans_settings_sas	*sas;
1065 		struct ccb_trans_settings_scsi	*scsi;
1066 		struct mprsas_target *targ;
1067 
1068 		cts = &ccb->cts;
1069 		sas = &cts->xport_specific.sas;
1070 		scsi = &cts->proto_specific.scsi;
1071 
1072 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1073 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1074 		    cts->ccb_h.target_id));
1075 		targ = &sassc->targets[cts->ccb_h.target_id];
1076 		if (targ->handle == 0x0) {
1077 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1078 			break;
1079 		}
1080 
1081 		cts->protocol_version = SCSI_REV_SPC2;
1082 		cts->transport = XPORT_SAS;
1083 		cts->transport_version = 0;
1084 
1085 		sas->valid = CTS_SAS_VALID_SPEED;
1086 		switch (targ->linkrate) {
1087 		case 0x08:
1088 			sas->bitrate = 150000;
1089 			break;
1090 		case 0x09:
1091 			sas->bitrate = 300000;
1092 			break;
1093 		case 0x0a:
1094 			sas->bitrate = 600000;
1095 			break;
1096 		case 0x0b:
1097 			sas->bitrate = 1200000;
1098 			break;
1099 		default:
1100 			sas->valid = 0;
1101 		}
1102 
1103 		cts->protocol = PROTO_SCSI;
1104 		scsi->valid = CTS_SCSI_VALID_TQ;
1105 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1106 
1107 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1108 		break;
1109 	}
1110 	case XPT_CALC_GEOMETRY:
1111 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1112 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1113 		break;
1114 	case XPT_RESET_DEV:
1115 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1116 		    "XPT_RESET_DEV\n");
1117 		mprsas_action_resetdev(sassc, ccb);
1118 		return;
1119 	case XPT_RESET_BUS:
1120 	case XPT_ABORT:
1121 	case XPT_TERM_IO:
1122 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1123 		    "for abort or reset\n");
1124 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1125 		break;
1126 	case XPT_SCSI_IO:
1127 		mprsas_action_scsiio(sassc, ccb);
1128 		return;
1129 #if __FreeBSD_version >= 900026
1130 	case XPT_SMP_IO:
1131 		mprsas_action_smpio(sassc, ccb);
1132 		return;
1133 #endif
1134 	default:
1135 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1136 		break;
1137 	}
1138 	xpt_done(ccb);
1139 
1140 }
1141 
1142 static void
1143 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1144     target_id_t target_id, lun_id_t lun_id)
1145 {
1146 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1147 	struct cam_path *path;
1148 
1149 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1150 	    ac_code, target_id, (uintmax_t)lun_id);
1151 
1152 	if (xpt_create_path(&path, NULL,
1153 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1154 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1155 		    "notification\n");
1156 		return;
1157 	}
1158 
1159 	xpt_async(ac_code, path, NULL);
1160 	xpt_free_path(path);
1161 }
1162 
1163 static void
1164 mprsas_complete_all_commands(struct mpr_softc *sc)
1165 {
1166 	struct mpr_command *cm;
1167 	int i;
1168 	int completed;
1169 
1170 	MPR_FUNCTRACE(sc);
1171 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1172 
1173 	/* complete all commands with a NULL reply */
1174 	for (i = 1; i < sc->num_reqs; i++) {
1175 		cm = &sc->commands[i];
1176 		if (cm->cm_state == MPR_CM_STATE_FREE)
1177 			continue;
1178 
1179 		cm->cm_state = MPR_CM_STATE_BUSY;
1180 		cm->cm_reply = NULL;
1181 		completed = 0;
1182 
1183 		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1184 			MPASS(cm->cm_data);
1185 			free(cm->cm_data, M_MPR);
1186 			cm->cm_data = NULL;
1187 		}
1188 
1189 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1190 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1191 
1192 		if (cm->cm_complete != NULL) {
1193 			mprsas_log_command(cm, MPR_RECOVERY,
1194 			    "completing cm %p state %x ccb %p for diag reset\n",
1195 			    cm, cm->cm_state, cm->cm_ccb);
1196 			cm->cm_complete(sc, cm);
1197 			completed = 1;
1198 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1199 			mprsas_log_command(cm, MPR_RECOVERY,
1200 			    "waking up cm %p state %x ccb %p for diag reset\n",
1201 			    cm, cm->cm_state, cm->cm_ccb);
1202 			wakeup(cm);
1203 			completed = 1;
1204 		}
1205 
1206 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1207 			/* this should never happen, but if it does, log */
1208 			mprsas_log_command(cm, MPR_RECOVERY,
1209 			    "cm %p state %x flags 0x%x ccb %p during diag "
1210 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1211 			    cm->cm_ccb);
1212 		}
1213 	}
1214 
1215 	sc->io_cmds_active = 0;
1216 }
1217 
1218 void
1219 mprsas_handle_reinit(struct mpr_softc *sc)
1220 {
1221 	int i;
1222 
1223 	/* Go back into startup mode and freeze the simq, so that CAM
1224 	 * doesn't send any commands until after we've rediscovered all
1225 	 * targets and found the proper device handles for them.
1226 	 *
1227 	 * After the reset, portenable will trigger discovery, and after all
1228 	 * discovery-related activities have finished, the simq will be
1229 	 * released.
1230 	 */
1231 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1232 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1233 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1234 	mprsas_startup_increment(sc->sassc);
1235 
1236 	/* notify CAM of a bus reset */
1237 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1238 	    CAM_LUN_WILDCARD);
1239 
1240 	/* complete and cleanup after all outstanding commands */
1241 	mprsas_complete_all_commands(sc);
1242 
1243 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1244 	    __func__, sc->sassc->startup_refcount);
1245 
1246 	/* zero all the target handles, since they may change after the
1247 	 * reset, and we have to rediscover all the targets and use the new
1248 	 * handles.
1249 	 */
1250 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1251 		if (sc->sassc->targets[i].outstanding != 0)
1252 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1253 			    i, sc->sassc->targets[i].outstanding);
1254 		sc->sassc->targets[i].handle = 0x0;
1255 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1256 		sc->sassc->targets[i].outstanding = 0;
1257 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1258 	}
1259 }
1260 static void
1261 mprsas_tm_timeout(void *data)
1262 {
1263 	struct mpr_command *tm = data;
1264 	struct mpr_softc *sc = tm->cm_sc;
1265 
1266 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1267 
1268 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1269 	    "out\n", tm);
1270 
1271 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1272 	    ("command not inqueue\n"));
1273 
1274 	tm->cm_state = MPR_CM_STATE_BUSY;
1275 	mpr_reinit(sc);
1276 }
1277 
1278 static void
1279 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1280 {
1281 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1282 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1283 	unsigned int cm_count = 0;
1284 	struct mpr_command *cm;
1285 	struct mprsas_target *targ;
1286 
1287 	callout_stop(&tm->cm_callout);
1288 
1289 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1290 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1291 	targ = tm->cm_targ;
1292 
1293 	/*
1294 	 * Currently there should be no way we can hit this case.  It only
1295 	 * happens when we have a failure to allocate chain frames, and
1296 	 * task management commands don't have S/G lists.
1297 	 */
1298 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1299 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1300 		    "%s: cm_flags = %#x for LUN reset! "
1301 		    "This should not happen!\n", __func__, tm->cm_flags);
1302 		mprsas_free_tm(sc, tm);
1303 		return;
1304 	}
1305 
1306 	if (reply == NULL) {
1307 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1308 		    tm);
1309 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1310 			/* this completion was due to a reset, just cleanup */
1311 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1312 			    "reset, ignoring NULL LUN reset reply\n");
1313 			targ->tm = NULL;
1314 			mprsas_free_tm(sc, tm);
1315 		}
1316 		else {
1317 			/* we should have gotten a reply. */
1318 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1319 			    "LUN reset attempt, resetting controller\n");
1320 			mpr_reinit(sc);
1321 		}
1322 		return;
1323 	}
1324 
1325 	mpr_dprint(sc, MPR_RECOVERY,
1326 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1327 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1328 	    le32toh(reply->TerminationCount));
1329 
1330 	/*
1331 	 * See if there are any outstanding commands for this LUN.
1332 	 * This could be made more efficient by using a per-LU data
1333 	 * structure of some sort.
1334 	 */
1335 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1336 		if (cm->cm_lun == tm->cm_lun)
1337 			cm_count++;
1338 	}
1339 
1340 	if (cm_count == 0) {
1341 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1342 		    "Finished recovery after LUN reset for target %u\n",
1343 		    targ->tid);
1344 
1345 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1346 		    tm->cm_lun);
1347 
1348 		/*
1349 		 * We've finished recovery for this logical unit.  check and
1350 		 * see if some other logical unit has a timedout command
1351 		 * that needs to be processed.
1352 		 */
1353 		cm = TAILQ_FIRST(&targ->timedout_commands);
1354 		if (cm) {
1355 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1356 			   "More commands to abort for target %u\n", targ->tid);
1357 			mprsas_send_abort(sc, tm, cm);
1358 		} else {
1359 			targ->tm = NULL;
1360 			mprsas_free_tm(sc, tm);
1361 		}
1362 	} else {
1363 		/* if we still have commands for this LUN, the reset
1364 		 * effectively failed, regardless of the status reported.
1365 		 * Escalate to a target reset.
1366 		 */
1367 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1368 		    "logical unit reset complete for target %u, but still "
1369 		    "have %u command(s), sending target reset\n", targ->tid,
1370 		    cm_count);
1371 		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1372 			mprsas_send_reset(sc, tm,
1373 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1374 		else
1375 			mpr_reinit(sc);
1376 	}
1377 }
1378 
1379 static void
1380 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1381 {
1382 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1383 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1384 	struct mprsas_target *targ;
1385 
1386 	callout_stop(&tm->cm_callout);
1387 
1388 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1389 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1390 	targ = tm->cm_targ;
1391 
1392 	/*
1393 	 * Currently there should be no way we can hit this case.  It only
1394 	 * happens when we have a failure to allocate chain frames, and
1395 	 * task management commands don't have S/G lists.
1396 	 */
1397 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1398 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1399 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1400 		mprsas_free_tm(sc, tm);
1401 		return;
1402 	}
1403 
1404 	if (reply == NULL) {
1405 		mpr_dprint(sc, MPR_RECOVERY,
1406 		    "NULL target reset reply for tm %p TaskMID %u\n",
1407 		    tm, le16toh(req->TaskMID));
1408 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1409 			/* this completion was due to a reset, just cleanup */
1410 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1411 			    "reset, ignoring NULL target reset reply\n");
1412 			targ->tm = NULL;
1413 			mprsas_free_tm(sc, tm);
1414 		}
1415 		else {
1416 			/* we should have gotten a reply. */
1417 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1418 			    "target reset attempt, resetting controller\n");
1419 			mpr_reinit(sc);
1420 		}
1421 		return;
1422 	}
1423 
1424 	mpr_dprint(sc, MPR_RECOVERY,
1425 	    "target reset status 0x%x code 0x%x count %u\n",
1426 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1427 	    le32toh(reply->TerminationCount));
1428 
1429 	if (targ->outstanding == 0) {
1430 		/*
1431 		 * We've finished recovery for this target and all
1432 		 * of its logical units.
1433 		 */
1434 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1435 		    "Finished reset recovery for target %u\n", targ->tid);
1436 
1437 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1438 		    CAM_LUN_WILDCARD);
1439 
1440 		targ->tm = NULL;
1441 		mprsas_free_tm(sc, tm);
1442 	} else {
1443 		/*
1444 		 * After a target reset, if this target still has
1445 		 * outstanding commands, the reset effectively failed,
1446 		 * regardless of the status reported.  escalate.
1447 		 */
1448 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1449 		    "Target reset complete for target %u, but still have %u "
1450 		    "command(s), resetting controller\n", targ->tid,
1451 		    targ->outstanding);
1452 		mpr_reinit(sc);
1453 	}
1454 }
1455 
1456 #define MPR_RESET_TIMEOUT 30
1457 
1458 int
1459 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1460 {
1461 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1462 	struct mprsas_target *target;
1463 	int err, timeout;
1464 
1465 	target = tm->cm_targ;
1466 	if (target->handle == 0) {
1467 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1468 		    "%d\n", __func__, target->tid);
1469 		return -1;
1470 	}
1471 
1472 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1473 	req->DevHandle = htole16(target->handle);
1474 	req->TaskType = type;
1475 
1476 	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1477 		timeout = MPR_RESET_TIMEOUT;
1478 		/*
1479 		 * Target reset method =
1480 		 *     SAS Hard Link Reset / SATA Link Reset
1481 		 */
1482 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1483 	} else {
1484 		timeout = (target->controller_reset_timeout) ? (
1485 		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1486 		/* PCIe Protocol Level Reset*/
1487 		req->MsgFlags =
1488 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1489 	}
1490 
1491 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1492 		/* XXX Need to handle invalid LUNs */
1493 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1494 		tm->cm_targ->logical_unit_resets++;
1495 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1496 		    "Sending logical unit reset to target %u lun %d\n",
1497 		    target->tid, tm->cm_lun);
1498 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1499 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1500 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1501 		tm->cm_targ->target_resets++;
1502 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1503 		    "Sending target reset to target %u\n", target->tid);
1504 		tm->cm_complete = mprsas_target_reset_complete;
1505 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1506 	}
1507 	else {
1508 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1509 		return -1;
1510 	}
1511 
1512 	if (target->encl_level_valid) {
1513 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1514 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1515 		    target->encl_level, target->encl_slot,
1516 		    target->connector_name);
1517 	}
1518 
1519 	tm->cm_data = NULL;
1520 	tm->cm_complete_data = (void *)tm;
1521 
1522 	callout_reset(&tm->cm_callout, timeout * hz,
1523 	    mprsas_tm_timeout, tm);
1524 
1525 	err = mpr_map_command(sc, tm);
1526 	if (err)
1527 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1528 		    "error %d sending reset type %u\n", err, type);
1529 
1530 	return err;
1531 }
1532 
1533 
1534 static void
1535 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1536 {
1537 	struct mpr_command *cm;
1538 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1539 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1540 	struct mprsas_target *targ;
1541 
1542 	callout_stop(&tm->cm_callout);
1543 
1544 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1545 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1546 	targ = tm->cm_targ;
1547 
1548 	/*
1549 	 * Currently there should be no way we can hit this case.  It only
1550 	 * happens when we have a failure to allocate chain frames, and
1551 	 * task management commands don't have S/G lists.
1552 	 */
1553 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1554 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1555 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1556 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1557 		mprsas_free_tm(sc, tm);
1558 		return;
1559 	}
1560 
1561 	if (reply == NULL) {
1562 		mpr_dprint(sc, MPR_RECOVERY,
1563 		    "NULL abort reply for tm %p TaskMID %u\n",
1564 		    tm, le16toh(req->TaskMID));
1565 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1566 			/* this completion was due to a reset, just cleanup */
1567 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1568 			    "reset, ignoring NULL abort reply\n");
1569 			targ->tm = NULL;
1570 			mprsas_free_tm(sc, tm);
1571 		} else {
1572 			/* we should have gotten a reply. */
1573 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1574 			    "abort attempt, resetting controller\n");
1575 			mpr_reinit(sc);
1576 		}
1577 		return;
1578 	}
1579 
1580 	mpr_dprint(sc, MPR_RECOVERY,
1581 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1582 	    le16toh(req->TaskMID),
1583 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1584 	    le32toh(reply->TerminationCount));
1585 
1586 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1587 	if (cm == NULL) {
1588 		/*
1589 		 * if there are no more timedout commands, we're done with
1590 		 * error recovery for this target.
1591 		 */
1592 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1593 		    "Finished abort recovery for target %u\n", targ->tid);
1594 		targ->tm = NULL;
1595 		mprsas_free_tm(sc, tm);
1596 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1597 		/* abort success, but we have more timedout commands to abort */
1598 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1599 		    "Continuing abort recovery for target %u\n", targ->tid);
1600 		mprsas_send_abort(sc, tm, cm);
1601 	} else {
1602 		/*
1603 		 * we didn't get a command completion, so the abort
1604 		 * failed as far as we're concerned.  escalate.
1605 		 */
1606 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1607 		    "Abort failed for target %u, sending logical unit reset\n",
1608 		    targ->tid);
1609 
1610 		mprsas_send_reset(sc, tm,
1611 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1612 	}
1613 }
1614 
1615 #define MPR_ABORT_TIMEOUT 5
1616 
1617 static int
1618 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1619     struct mpr_command *cm)
1620 {
1621 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1622 	struct mprsas_target *targ;
1623 	int err, timeout;
1624 
1625 	targ = cm->cm_targ;
1626 	if (targ->handle == 0) {
1627 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1628 		   "%s null devhandle for target_id %d\n",
1629 		    __func__, cm->cm_ccb->ccb_h.target_id);
1630 		return -1;
1631 	}
1632 
1633 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1634 	    "Aborting command %p\n", cm);
1635 
1636 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1637 	req->DevHandle = htole16(targ->handle);
1638 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1639 
1640 	/* XXX Need to handle invalid LUNs */
1641 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1642 
1643 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1644 
1645 	tm->cm_data = NULL;
1646 	tm->cm_complete = mprsas_abort_complete;
1647 	tm->cm_complete_data = (void *)tm;
1648 	tm->cm_targ = cm->cm_targ;
1649 	tm->cm_lun = cm->cm_lun;
1650 
1651 	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1652 		timeout	= MPR_ABORT_TIMEOUT;
1653 	else
1654 		timeout = sc->nvme_abort_timeout;
1655 
1656 	callout_reset(&tm->cm_callout, timeout * hz,
1657 	    mprsas_tm_timeout, tm);
1658 
1659 	targ->aborts++;
1660 
1661 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1662 
1663 	err = mpr_map_command(sc, tm);
1664 	if (err)
1665 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1666 		    "error %d sending abort for cm %p SMID %u\n",
1667 		    err, cm, req->TaskMID);
1668 	return err;
1669 }
1670 
1671 static void
1672 mprsas_scsiio_timeout(void *data)
1673 {
1674 	sbintime_t elapsed, now;
1675 	union ccb *ccb;
1676 	struct mpr_softc *sc;
1677 	struct mpr_command *cm;
1678 	struct mprsas_target *targ;
1679 
1680 	cm = (struct mpr_command *)data;
1681 	sc = cm->cm_sc;
1682 	ccb = cm->cm_ccb;
1683 	now = sbinuptime();
1684 
1685 	MPR_FUNCTRACE(sc);
1686 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1687 
1688 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1689 
1690 	/*
1691 	 * Run the interrupt handler to make sure it's not pending.  This
1692 	 * isn't perfect because the command could have already completed
1693 	 * and been re-used, though this is unlikely.
1694 	 */
1695 	mpr_intr_locked(sc);
1696 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1697 		mprsas_log_command(cm, MPR_XINFO,
1698 		    "SCSI command %p almost timed out\n", cm);
1699 		return;
1700 	}
1701 
1702 	if (cm->cm_ccb == NULL) {
1703 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1704 		return;
1705 	}
1706 
1707 	targ = cm->cm_targ;
1708 	targ->timeouts++;
1709 
1710 	elapsed = now - ccb->ccb_h.qos.sim_data;
1711 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1712 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1713 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1714 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1715 	if (targ->encl_level_valid) {
1716 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1717 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1718 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1719 	}
1720 
1721 	/* XXX first, check the firmware state, to see if it's still
1722 	 * operational.  if not, do a diag reset.
1723 	 */
1724 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1725 	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1726 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1727 
1728 	if (targ->tm != NULL) {
1729 		/* target already in recovery, just queue up another
1730 		 * timedout command to be processed later.
1731 		 */
1732 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1733 		    "processing by tm %p\n", cm, targ->tm);
1734 	}
1735 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1736 
1737 		/* start recovery by aborting the first timedout command */
1738 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1739 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1740 		    cm->cm_desc.Default.SMID);
1741 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1742 		    cm, targ->tm);
1743 		mprsas_send_abort(sc, targ->tm, cm);
1744 	}
1745 	else {
1746 		/* XXX queue this target up for recovery once a TM becomes
1747 		 * available.  The firmware only has a limited number of
1748 		 * HighPriority credits for the high priority requests used
1749 		 * for task management, and we ran out.
1750 		 *
1751 		 * Isilon: don't worry about this for now, since we have
1752 		 * more credits than disks in an enclosure, and limit
1753 		 * ourselves to one TM per target for recovery.
1754 		 */
1755 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1756 		    "timedout cm %p failed to allocate a tm\n", cm);
1757 	}
1758 }
1759 
1760 /**
1761  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1762  *			     to SCSI Unmap.
1763  * Return 0 - for success,
1764  *	  1 - to immediately return back the command with success status to CAM
1765  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1766  *			   to FW without any translation.
1767  */
1768 static int
1769 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1770     union ccb *ccb, struct mprsas_target *targ)
1771 {
1772 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1773 	struct ccb_scsiio *csio;
1774 	struct unmap_parm_list *plist;
1775 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1776 	struct nvme_command *c;
1777 	int i, res;
1778 	uint16_t ndesc, list_len, data_length;
1779 	struct mpr_prp_page *prp_page_info;
1780 	uint64_t nvme_dsm_ranges_dma_handle;
1781 
1782 	csio = &ccb->csio;
1783 #if __FreeBSD_version >= 1100103
1784 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1785 #else
1786 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1787 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1788 		    ccb->csio.cdb_io.cdb_ptr[8]);
1789 	} else {
1790 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1791 		    ccb->csio.cdb_io.cdb_bytes[8]);
1792 	}
1793 #endif
1794 	if (!list_len) {
1795 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1796 		return -EINVAL;
1797 	}
1798 
1799 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1800 	if (!plist) {
1801 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1802 		    "save UNMAP data\n");
1803 		return -ENOMEM;
1804 	}
1805 
1806 	/* Copy SCSI unmap data to a local buffer */
1807 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1808 
1809 	/* return back the unmap command to CAM with success status,
1810 	 * if number of descripts is zero.
1811 	 */
1812 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1813 	if (!ndesc) {
1814 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1815 		    "UNMAP cmd is Zero\n");
1816 		res = 1;
1817 		goto out;
1818 	}
1819 
1820 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1821 	if (data_length > targ->MDTS) {
1822 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1823 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1824 		res = -EINVAL;
1825 		goto out;
1826 	}
1827 
1828 	prp_page_info = mpr_alloc_prp_page(sc);
1829 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1830 	    "UNMAP command.\n", __func__));
1831 
1832 	/*
1833 	 * Insert the allocated PRP page into the command's PRP page list. This
1834 	 * will be freed when the command is freed.
1835 	 */
1836 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1837 
1838 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1839 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1840 
1841 	bzero(nvme_dsm_ranges, data_length);
1842 
1843 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1844 	 * for each descriptors contained in SCSI UNMAP data.
1845 	 */
1846 	for (i = 0; i < ndesc; i++) {
1847 		nvme_dsm_ranges[i].length =
1848 		    htole32(be32toh(plist->desc[i].nlb));
1849 		nvme_dsm_ranges[i].starting_lba =
1850 		    htole64(be64toh(plist->desc[i].slba));
1851 		nvme_dsm_ranges[i].attributes = 0;
1852 	}
1853 
1854 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1855 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1856 	bzero(req, sizeof(*req));
1857 	req->DevHandle = htole16(targ->handle);
1858 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1859 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1860 	req->ErrorResponseBaseAddress.High =
1861 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1862 	req->ErrorResponseBaseAddress.Low =
1863 	    htole32(cm->cm_sense_busaddr);
1864 	req->ErrorResponseAllocationLength =
1865 	    htole16(sizeof(struct nvme_completion));
1866 	req->EncapsulatedCommandLength =
1867 	    htole16(sizeof(struct nvme_command));
1868 	req->DataLength = htole32(data_length);
1869 
1870 	/* Build NVMe DSM command */
1871 	c = (struct nvme_command *) req->NVMe_Command;
1872 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1873 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1874 	c->cdw10 = htole32(ndesc - 1);
1875 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1876 
1877 	cm->cm_length = data_length;
1878 	cm->cm_data = NULL;
1879 
1880 	cm->cm_complete = mprsas_scsiio_complete;
1881 	cm->cm_complete_data = ccb;
1882 	cm->cm_targ = targ;
1883 	cm->cm_lun = csio->ccb_h.target_lun;
1884 	cm->cm_ccb = ccb;
1885 
1886 	cm->cm_desc.Default.RequestFlags =
1887 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1888 
1889 	csio->ccb_h.qos.sim_data = sbinuptime();
1890 #if __FreeBSD_version >= 1000029
1891 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1892 	    mprsas_scsiio_timeout, cm, 0);
1893 #else //__FreeBSD_version < 1000029
1894 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1895 	    mprsas_scsiio_timeout, cm);
1896 #endif //__FreeBSD_version >= 1000029
1897 
1898 	targ->issued++;
1899 	targ->outstanding++;
1900 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1901 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1902 
1903 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1904 	    __func__, cm, ccb, targ->outstanding);
1905 
1906 	mpr_build_nvme_prp(sc, cm, req,
1907 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1908 	mpr_map_command(sc, cm);
1909 
1910 out:
1911 	free(plist, M_MPR);
1912 	return 0;
1913 }
1914 
1915 static void
1916 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1917 {
1918 	MPI2_SCSI_IO_REQUEST *req;
1919 	struct ccb_scsiio *csio;
1920 	struct mpr_softc *sc;
1921 	struct mprsas_target *targ;
1922 	struct mprsas_lun *lun;
1923 	struct mpr_command *cm;
1924 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1925 	uint16_t eedp_flags;
1926 	uint32_t mpi_control;
1927 	int rc;
1928 
1929 	sc = sassc->sc;
1930 	MPR_FUNCTRACE(sc);
1931 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1932 
1933 	csio = &ccb->csio;
1934 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1935 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1936 	     csio->ccb_h.target_id));
1937 	targ = &sassc->targets[csio->ccb_h.target_id];
1938 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1939 	if (targ->handle == 0x0) {
1940 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1941 		    __func__, csio->ccb_h.target_id);
1942 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1943 		xpt_done(ccb);
1944 		return;
1945 	}
1946 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1947 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1948 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1949 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1950 		xpt_done(ccb);
1951 		return;
1952 	}
1953 	/*
1954 	 * Sometimes, it is possible to get a command that is not "In
1955 	 * Progress" and was actually aborted by the upper layer.  Check for
1956 	 * this here and complete the command without error.
1957 	 */
1958 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1959 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1960 		    "target %u\n", __func__, csio->ccb_h.target_id);
1961 		xpt_done(ccb);
1962 		return;
1963 	}
1964 	/*
1965 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1966 	 * that the volume has timed out.  We want volumes to be enumerated
1967 	 * until they are deleted/removed, not just failed.
1968 	 */
1969 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1970 		if (targ->devinfo == 0)
1971 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1972 		else
1973 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1974 		xpt_done(ccb);
1975 		return;
1976 	}
1977 
1978 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1979 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1980 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1981 		xpt_done(ccb);
1982 		return;
1983 	}
1984 
1985 	/*
1986 	 * If target has a reset in progress, freeze the devq and return.  The
1987 	 * devq will be released when the TM reset is finished.
1988 	 */
1989 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1990 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1991 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1992 		    __func__, targ->tid);
1993 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1994 		xpt_done(ccb);
1995 		return;
1996 	}
1997 
1998 	cm = mpr_alloc_command(sc);
1999 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
2000 		if (cm != NULL) {
2001 			mpr_free_command(sc, cm);
2002 		}
2003 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2004 			xpt_freeze_simq(sassc->sim, 1);
2005 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2006 		}
2007 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2008 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2009 		xpt_done(ccb);
2010 		return;
2011 	}
2012 
2013 	/* For NVME device's issue UNMAP command directly to NVME drives by
2014 	 * constructing equivalent native NVMe DataSetManagement command.
2015 	 */
2016 #if __FreeBSD_version >= 1100103
2017 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
2018 #else
2019 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2020 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
2021 	else
2022 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
2023 #endif
2024 	if (scsi_opcode == UNMAP &&
2025 	    targ->is_nvme &&
2026 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2027 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2028 		if (rc == 1) { /* return command to CAM with success status */
2029 			mpr_free_command(sc, cm);
2030 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2031 			xpt_done(ccb);
2032 			return;
2033 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
2034 			return;
2035 	}
2036 
2037 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2038 	bzero(req, sizeof(*req));
2039 	req->DevHandle = htole16(targ->handle);
2040 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2041 	req->MsgFlags = 0;
2042 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2043 	req->SenseBufferLength = MPR_SENSE_LEN;
2044 	req->SGLFlags = 0;
2045 	req->ChainOffset = 0;
2046 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2047 	req->SGLOffset1= 0;
2048 	req->SGLOffset2= 0;
2049 	req->SGLOffset3= 0;
2050 	req->SkipCount = 0;
2051 	req->DataLength = htole32(csio->dxfer_len);
2052 	req->BidirectionalDataLength = 0;
2053 	req->IoFlags = htole16(csio->cdb_len);
2054 	req->EEDPFlags = 0;
2055 
2056 	/* Note: BiDirectional transfers are not supported */
2057 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2058 	case CAM_DIR_IN:
2059 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2060 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2061 		break;
2062 	case CAM_DIR_OUT:
2063 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2064 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2065 		break;
2066 	case CAM_DIR_NONE:
2067 	default:
2068 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2069 		break;
2070 	}
2071 
2072 	if (csio->cdb_len == 32)
2073 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2074 	/*
2075 	 * It looks like the hardware doesn't require an explicit tag
2076 	 * number for each transaction.  SAM Task Management not supported
2077 	 * at the moment.
2078 	 */
2079 	switch (csio->tag_action) {
2080 	case MSG_HEAD_OF_Q_TAG:
2081 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2082 		break;
2083 	case MSG_ORDERED_Q_TAG:
2084 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2085 		break;
2086 	case MSG_ACA_TASK:
2087 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2088 		break;
2089 	case CAM_TAG_ACTION_NONE:
2090 	case MSG_SIMPLE_Q_TAG:
2091 	default:
2092 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2093 		break;
2094 	}
2095 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2096 	req->Control = htole32(mpi_control);
2097 
2098 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2099 		mpr_free_command(sc, cm);
2100 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2101 		xpt_done(ccb);
2102 		return;
2103 	}
2104 
2105 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2106 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2107 	else {
2108 		KASSERT(csio->cdb_len <= IOCDBLEN,
2109 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2110 		    "is not set", csio->cdb_len));
2111 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2112 	}
2113 	req->IoFlags = htole16(csio->cdb_len);
2114 
2115 	/*
2116 	 * Check if EEDP is supported and enabled.  If it is then check if the
2117 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2118 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2119 	 * for EEDP transfer.
2120 	 */
2121 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2122 	if (sc->eedp_enabled && eedp_flags) {
2123 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2124 			if (lun->lun_id == csio->ccb_h.target_lun) {
2125 				break;
2126 			}
2127 		}
2128 
2129 		if ((lun != NULL) && (lun->eedp_formatted)) {
2130 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2131 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2132 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2133 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2134 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2135 				eedp_flags |=
2136 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2137 			}
2138 			req->EEDPFlags = htole16(eedp_flags);
2139 
2140 			/*
2141 			 * If CDB less than 32, fill in Primary Ref Tag with
2142 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2143 			 * already there.  Also, set protection bit.  FreeBSD
2144 			 * currently does not support CDBs bigger than 16, but
2145 			 * the code doesn't hurt, and will be here for the
2146 			 * future.
2147 			 */
2148 			if (csio->cdb_len != 32) {
2149 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2150 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2151 				    PrimaryReferenceTag;
2152 				for (i = 0; i < 4; i++) {
2153 					*ref_tag_addr =
2154 					    req->CDB.CDB32[lba_byte + i];
2155 					ref_tag_addr++;
2156 				}
2157 				req->CDB.EEDP32.PrimaryReferenceTag =
2158 				    htole32(req->
2159 				    CDB.EEDP32.PrimaryReferenceTag);
2160 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2161 				    0xFFFF;
2162 				req->CDB.CDB32[1] =
2163 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2164 			} else {
2165 				eedp_flags |=
2166 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2167 				req->EEDPFlags = htole16(eedp_flags);
2168 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2169 				    0x1F) | 0x20;
2170 			}
2171 		}
2172 	}
2173 
2174 	cm->cm_length = csio->dxfer_len;
2175 	if (cm->cm_length != 0) {
2176 		cm->cm_data = ccb;
2177 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2178 	} else {
2179 		cm->cm_data = NULL;
2180 	}
2181 	cm->cm_sge = &req->SGL;
2182 	cm->cm_sglsize = (32 - 24) * 4;
2183 	cm->cm_complete = mprsas_scsiio_complete;
2184 	cm->cm_complete_data = ccb;
2185 	cm->cm_targ = targ;
2186 	cm->cm_lun = csio->ccb_h.target_lun;
2187 	cm->cm_ccb = ccb;
2188 	/*
2189 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2190 	 * and set descriptor type.
2191 	 */
2192 	if (targ->scsi_req_desc_type ==
2193 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2194 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2195 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2196 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2197 		if (!sc->atomic_desc_capable) {
2198 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2199 			    htole16(targ->handle);
2200 		}
2201 	} else {
2202 		cm->cm_desc.SCSIIO.RequestFlags =
2203 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2204 		if (!sc->atomic_desc_capable)
2205 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2206 	}
2207 
2208 	csio->ccb_h.qos.sim_data = sbinuptime();
2209 #if __FreeBSD_version >= 1000029
2210 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2211 	    mprsas_scsiio_timeout, cm, 0);
2212 #else //__FreeBSD_version < 1000029
2213 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2214 	    mprsas_scsiio_timeout, cm);
2215 #endif //__FreeBSD_version >= 1000029
2216 
2217 	targ->issued++;
2218 	targ->outstanding++;
2219 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2220 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2221 
2222 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2223 	    __func__, cm, ccb, targ->outstanding);
2224 
2225 	mpr_map_command(sc, cm);
2226 	return;
2227 }
2228 
2229 /**
2230  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2231  */
2232 static void
2233 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2234     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2235 {
2236 	u32 response_info;
2237 	u8 *response_bytes;
2238 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2239 	    MPI2_IOCSTATUS_MASK;
2240 	u8 scsi_state = mpi_reply->SCSIState;
2241 	u8 scsi_status = mpi_reply->SCSIStatus;
2242 	char *desc_ioc_state = NULL;
2243 	char *desc_scsi_status = NULL;
2244 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2245 
2246 	if (log_info == 0x31170000)
2247 		return;
2248 
2249 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2250 	     ioc_status);
2251 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2252 	    scsi_status);
2253 
2254 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2255 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2256 	if (targ->encl_level_valid) {
2257 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2258 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2259 		    targ->connector_name);
2260 	}
2261 
2262 	/*
2263 	 * We can add more detail about underflow data here
2264 	 * TO-DO
2265 	 */
2266 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2267 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2268 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2269 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2270 
2271 	if (sc->mpr_debug & MPR_XINFO &&
2272 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2273 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2274 		scsi_sense_print(csio);
2275 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2276 	}
2277 
2278 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2279 		response_info = le32toh(mpi_reply->ResponseInfo);
2280 		response_bytes = (u8 *)&response_info;
2281 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2282 		    response_bytes[0],
2283 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2284 		    response_bytes[0]));
2285 	}
2286 }
2287 
2288 /** mprsas_nvme_trans_status_code
2289  *
2290  * Convert Native NVMe command error status to
2291  * equivalent SCSI error status.
2292  *
2293  * Returns appropriate scsi_status
2294  */
2295 static u8
2296 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2297     struct mpr_command *cm)
2298 {
2299 	u8 status = MPI2_SCSI_STATUS_GOOD;
2300 	int skey, asc, ascq;
2301 	union ccb *ccb = cm->cm_complete_data;
2302 	int returned_sense_len;
2303 	uint8_t sct, sc;
2304 
2305 	sct = NVME_STATUS_GET_SCT(nvme_status);
2306 	sc = NVME_STATUS_GET_SC(nvme_status);
2307 
2308 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2309 	skey = SSD_KEY_ILLEGAL_REQUEST;
2310 	asc = SCSI_ASC_NO_SENSE;
2311 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2312 
2313 	switch (sct) {
2314 	case NVME_SCT_GENERIC:
2315 		switch (sc) {
2316 		case NVME_SC_SUCCESS:
2317 			status = MPI2_SCSI_STATUS_GOOD;
2318 			skey = SSD_KEY_NO_SENSE;
2319 			asc = SCSI_ASC_NO_SENSE;
2320 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2321 			break;
2322 		case NVME_SC_INVALID_OPCODE:
2323 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2324 			skey = SSD_KEY_ILLEGAL_REQUEST;
2325 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2326 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2327 			break;
2328 		case NVME_SC_INVALID_FIELD:
2329 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2330 			skey = SSD_KEY_ILLEGAL_REQUEST;
2331 			asc = SCSI_ASC_INVALID_CDB;
2332 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2333 			break;
2334 		case NVME_SC_DATA_TRANSFER_ERROR:
2335 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2336 			skey = SSD_KEY_MEDIUM_ERROR;
2337 			asc = SCSI_ASC_NO_SENSE;
2338 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2339 			break;
2340 		case NVME_SC_ABORTED_POWER_LOSS:
2341 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2342 			skey = SSD_KEY_ABORTED_COMMAND;
2343 			asc = SCSI_ASC_WARNING;
2344 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2345 			break;
2346 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2347 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2348 			skey = SSD_KEY_HARDWARE_ERROR;
2349 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2350 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2351 			break;
2352 		case NVME_SC_ABORTED_BY_REQUEST:
2353 		case NVME_SC_ABORTED_SQ_DELETION:
2354 		case NVME_SC_ABORTED_FAILED_FUSED:
2355 		case NVME_SC_ABORTED_MISSING_FUSED:
2356 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2357 			skey = SSD_KEY_ABORTED_COMMAND;
2358 			asc = SCSI_ASC_NO_SENSE;
2359 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2360 			break;
2361 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2362 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2363 			skey = SSD_KEY_ILLEGAL_REQUEST;
2364 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2365 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2366 			break;
2367 		case NVME_SC_LBA_OUT_OF_RANGE:
2368 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2369 			skey = SSD_KEY_ILLEGAL_REQUEST;
2370 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2371 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2372 			break;
2373 		case NVME_SC_CAPACITY_EXCEEDED:
2374 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2375 			skey = SSD_KEY_MEDIUM_ERROR;
2376 			asc = SCSI_ASC_NO_SENSE;
2377 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2378 			break;
2379 		case NVME_SC_NAMESPACE_NOT_READY:
2380 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2381 			skey = SSD_KEY_NOT_READY;
2382 			asc = SCSI_ASC_LUN_NOT_READY;
2383 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2384 			break;
2385 		}
2386 		break;
2387 	case NVME_SCT_COMMAND_SPECIFIC:
2388 		switch (sc) {
2389 		case NVME_SC_INVALID_FORMAT:
2390 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2391 			skey = SSD_KEY_ILLEGAL_REQUEST;
2392 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2393 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2394 			break;
2395 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2396 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2397 			skey = SSD_KEY_ILLEGAL_REQUEST;
2398 			asc = SCSI_ASC_INVALID_CDB;
2399 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2400 			break;
2401 		}
2402 		break;
2403 	case NVME_SCT_MEDIA_ERROR:
2404 		switch (sc) {
2405 		case NVME_SC_WRITE_FAULTS:
2406 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2407 			skey = SSD_KEY_MEDIUM_ERROR;
2408 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2409 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2410 			break;
2411 		case NVME_SC_UNRECOVERED_READ_ERROR:
2412 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2413 			skey = SSD_KEY_MEDIUM_ERROR;
2414 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2415 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2416 			break;
2417 		case NVME_SC_GUARD_CHECK_ERROR:
2418 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2419 			skey = SSD_KEY_MEDIUM_ERROR;
2420 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2421 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2422 			break;
2423 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2424 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2425 			skey = SSD_KEY_MEDIUM_ERROR;
2426 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2427 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2428 			break;
2429 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2430 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2431 			skey = SSD_KEY_MEDIUM_ERROR;
2432 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2433 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2434 			break;
2435 		case NVME_SC_COMPARE_FAILURE:
2436 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2437 			skey = SSD_KEY_MISCOMPARE;
2438 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2439 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2440 			break;
2441 		case NVME_SC_ACCESS_DENIED:
2442 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2443 			skey = SSD_KEY_ILLEGAL_REQUEST;
2444 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2445 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2446 			break;
2447 		}
2448 		break;
2449 	}
2450 
2451 	returned_sense_len = sizeof(struct scsi_sense_data);
2452 	if (returned_sense_len < ccb->csio.sense_len)
2453 		ccb->csio.sense_resid = ccb->csio.sense_len -
2454 		    returned_sense_len;
2455 	else
2456 		ccb->csio.sense_resid = 0;
2457 
2458 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2459 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2460 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2461 
2462 	return status;
2463 }
2464 
2465 /** mprsas_complete_nvme_unmap
2466  *
2467  * Complete native NVMe command issued using NVMe Encapsulated
2468  * Request Message.
2469  */
2470 static u8
2471 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2472 {
2473 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2474 	struct nvme_completion *nvme_completion = NULL;
2475 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2476 
2477 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2478 	if (le16toh(mpi_reply->ErrorResponseCount)){
2479 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2480 		scsi_status = mprsas_nvme_trans_status_code(
2481 		    nvme_completion->status, cm);
2482 	}
2483 	return scsi_status;
2484 }
2485 
2486 static void
2487 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2488 {
2489 	MPI2_SCSI_IO_REPLY *rep;
2490 	union ccb *ccb;
2491 	struct ccb_scsiio *csio;
2492 	struct mprsas_softc *sassc;
2493 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2494 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2495 	int dir = 0, i;
2496 	u16 alloc_len;
2497 	struct mprsas_target *target;
2498 	target_id_t target_id;
2499 
2500 	MPR_FUNCTRACE(sc);
2501 	mpr_dprint(sc, MPR_TRACE,
2502 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2503 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2504 	    cm->cm_targ->outstanding);
2505 
2506 	callout_stop(&cm->cm_callout);
2507 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2508 
2509 	sassc = sc->sassc;
2510 	ccb = cm->cm_complete_data;
2511 	csio = &ccb->csio;
2512 	target_id = csio->ccb_h.target_id;
2513 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2514 	/*
2515 	 * XXX KDM if the chain allocation fails, does it matter if we do
2516 	 * the sync and unload here?  It is simpler to do it in every case,
2517 	 * assuming it doesn't cause problems.
2518 	 */
2519 	if (cm->cm_data != NULL) {
2520 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2521 			dir = BUS_DMASYNC_POSTREAD;
2522 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2523 			dir = BUS_DMASYNC_POSTWRITE;
2524 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2525 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2526 	}
2527 
2528 	cm->cm_targ->completed++;
2529 	cm->cm_targ->outstanding--;
2530 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2531 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2532 
2533 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2534 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2535 		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2536 		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2537 		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2538 		if (cm->cm_reply != NULL)
2539 			mprsas_log_command(cm, MPR_RECOVERY,
2540 			    "completed timedout cm %p ccb %p during recovery "
2541 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2542 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2543 			    rep->SCSIState, le32toh(rep->TransferCount));
2544 		else
2545 			mprsas_log_command(cm, MPR_RECOVERY,
2546 			    "completed timedout cm %p ccb %p during recovery\n",
2547 			    cm, cm->cm_ccb);
2548 	} else if (cm->cm_targ->tm != NULL) {
2549 		if (cm->cm_reply != NULL)
2550 			mprsas_log_command(cm, MPR_RECOVERY,
2551 			    "completed cm %p ccb %p during recovery "
2552 			    "ioc %x scsi %x state %x xfer %u\n",
2553 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2554 			    rep->SCSIStatus, rep->SCSIState,
2555 			    le32toh(rep->TransferCount));
2556 		else
2557 			mprsas_log_command(cm, MPR_RECOVERY,
2558 			    "completed cm %p ccb %p during recovery\n",
2559 			    cm, cm->cm_ccb);
2560 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2561 		mprsas_log_command(cm, MPR_RECOVERY,
2562 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2563 	}
2564 
2565 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2566 		/*
2567 		 * We ran into an error after we tried to map the command,
2568 		 * so we're getting a callback without queueing the command
2569 		 * to the hardware.  So we set the status here, and it will
2570 		 * be retained below.  We'll go through the "fast path",
2571 		 * because there can be no reply when we haven't actually
2572 		 * gone out to the hardware.
2573 		 */
2574 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2575 
2576 		/*
2577 		 * Currently the only error included in the mask is
2578 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2579 		 * chain frames.  We need to freeze the queue until we get
2580 		 * a command that completed without this error, which will
2581 		 * hopefully have some chain frames attached that we can
2582 		 * use.  If we wanted to get smarter about it, we would
2583 		 * only unfreeze the queue in this condition when we're
2584 		 * sure that we're getting some chain frames back.  That's
2585 		 * probably unnecessary.
2586 		 */
2587 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2588 			xpt_freeze_simq(sassc->sim, 1);
2589 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2590 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2591 			    "freezing SIM queue\n");
2592 		}
2593 	}
2594 
2595 	/*
2596 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2597 	 * flag, and use it in a few places in the rest of this function for
2598 	 * convenience. Use the macro if available.
2599 	 */
2600 #if __FreeBSD_version >= 1100103
2601 	scsi_cdb = scsiio_cdb_ptr(csio);
2602 #else
2603 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2604 		scsi_cdb = csio->cdb_io.cdb_ptr;
2605 	else
2606 		scsi_cdb = csio->cdb_io.cdb_bytes;
2607 #endif
2608 
2609 	/*
2610 	 * If this is a Start Stop Unit command and it was issued by the driver
2611 	 * during shutdown, decrement the refcount to account for all of the
2612 	 * commands that were sent.  All SSU commands should be completed before
2613 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2614 	 * is TRUE.
2615 	 */
2616 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2617 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2618 		sc->SSU_refcount--;
2619 	}
2620 
2621 	/* Take the fast path to completion */
2622 	if (cm->cm_reply == NULL) {
2623 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2624 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2625 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2626 			else {
2627 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2628 				csio->scsi_status = SCSI_STATUS_OK;
2629 			}
2630 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2631 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2632 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2633 				mpr_dprint(sc, MPR_XINFO,
2634 				    "Unfreezing SIM queue\n");
2635 			}
2636 		}
2637 
2638 		/*
2639 		 * There are two scenarios where the status won't be
2640 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2641 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2642 		 */
2643 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2644 			/*
2645 			 * Freeze the dev queue so that commands are
2646 			 * executed in the correct order after error
2647 			 * recovery.
2648 			 */
2649 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2650 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2651 		}
2652 		mpr_free_command(sc, cm);
2653 		xpt_done(ccb);
2654 		return;
2655 	}
2656 
2657 	target = &sassc->targets[target_id];
2658 	if (scsi_cdb[0] == UNMAP &&
2659 	    target->is_nvme &&
2660 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2661 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2662 		csio->scsi_status = rep->SCSIStatus;
2663 	}
2664 
2665 	mprsas_log_command(cm, MPR_XINFO,
2666 	    "ioc %x scsi %x state %x xfer %u\n",
2667 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2668 	    le32toh(rep->TransferCount));
2669 
2670 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2671 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2672 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2673 		/* FALLTHROUGH */
2674 	case MPI2_IOCSTATUS_SUCCESS:
2675 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2676 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2677 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2678 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2679 
2680 		/* Completion failed at the transport level. */
2681 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2682 		    MPI2_SCSI_STATE_TERMINATED)) {
2683 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2684 			break;
2685 		}
2686 
2687 		/* In a modern packetized environment, an autosense failure
2688 		 * implies that there's not much else that can be done to
2689 		 * recover the command.
2690 		 */
2691 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2692 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2693 			break;
2694 		}
2695 
2696 		/*
2697 		 * CAM doesn't care about SAS Response Info data, but if this is
2698 		 * the state check if TLR should be done.  If not, clear the
2699 		 * TLR_bits for the target.
2700 		 */
2701 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2702 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2703 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2704 			sc->mapping_table[target_id].TLR_bits =
2705 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2706 		}
2707 
2708 		/*
2709 		 * Intentionally override the normal SCSI status reporting
2710 		 * for these two cases.  These are likely to happen in a
2711 		 * multi-initiator environment, and we want to make sure that
2712 		 * CAM retries these commands rather than fail them.
2713 		 */
2714 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2715 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2716 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2717 			break;
2718 		}
2719 
2720 		/* Handle normal status and sense */
2721 		csio->scsi_status = rep->SCSIStatus;
2722 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2723 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2724 		else
2725 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2726 
2727 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2728 			int sense_len, returned_sense_len;
2729 
2730 			returned_sense_len = min(le32toh(rep->SenseCount),
2731 			    sizeof(struct scsi_sense_data));
2732 			if (returned_sense_len < csio->sense_len)
2733 				csio->sense_resid = csio->sense_len -
2734 				    returned_sense_len;
2735 			else
2736 				csio->sense_resid = 0;
2737 
2738 			sense_len = min(returned_sense_len,
2739 			    csio->sense_len - csio->sense_resid);
2740 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2741 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2742 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2743 		}
2744 
2745 		/*
2746 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2747 		 * and it's page code 0 (Supported Page List), and there is
2748 		 * inquiry data, and this is for a sequential access device, and
2749 		 * the device is an SSP target, and TLR is supported by the
2750 		 * controller, turn the TLR_bits value ON if page 0x90 is
2751 		 * supported.
2752 		 */
2753 		if ((scsi_cdb[0] == INQUIRY) &&
2754 		    (scsi_cdb[1] & SI_EVPD) &&
2755 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2756 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2757 		    (csio->data_ptr != NULL) &&
2758 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2759 		    (sc->control_TLR) &&
2760 		    (sc->mapping_table[target_id].device_info &
2761 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2762 			vpd_list = (struct scsi_vpd_supported_page_list *)
2763 			    csio->data_ptr;
2764 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2765 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2766 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2767 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2768 			alloc_len -= csio->resid;
2769 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2770 				if (vpd_list->list[i] == 0x90) {
2771 					*TLR_bits = TLR_on;
2772 					break;
2773 				}
2774 			}
2775 		}
2776 
2777 		/*
2778 		 * If this is a SATA direct-access end device, mark it so that
2779 		 * a SCSI StartStopUnit command will be sent to it when the
2780 		 * driver is being shutdown.
2781 		 */
2782 		if ((scsi_cdb[0] == INQUIRY) &&
2783 		    (csio->data_ptr != NULL) &&
2784 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2785 		    (sc->mapping_table[target_id].device_info &
2786 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2787 		    ((sc->mapping_table[target_id].device_info &
2788 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2789 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2790 			target = &sassc->targets[target_id];
2791 			target->supports_SSU = TRUE;
2792 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2793 			    target_id);
2794 		}
2795 		break;
2796 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2797 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2798 		/*
2799 		 * If devinfo is 0 this will be a volume.  In that case don't
2800 		 * tell CAM that the volume is not there.  We want volumes to
2801 		 * be enumerated until they are deleted/removed, not just
2802 		 * failed.
2803 		 */
2804 		if (cm->cm_targ->devinfo == 0)
2805 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2806 		else
2807 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2808 		break;
2809 	case MPI2_IOCSTATUS_INVALID_SGL:
2810 		mpr_print_scsiio_cmd(sc, cm);
2811 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2812 		break;
2813 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2814 		/*
2815 		 * This is one of the responses that comes back when an I/O
2816 		 * has been aborted.  If it is because of a timeout that we
2817 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2818 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2819 		 * command is the same (it gets retried, subject to the
2820 		 * retry counter), the only difference is what gets printed
2821 		 * on the console.
2822 		 */
2823 		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2824 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2825 		else
2826 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2827 		break;
2828 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2829 		/* resid is ignored for this condition */
2830 		csio->resid = 0;
2831 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2832 		break;
2833 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2834 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2835 		/*
2836 		 * These can sometimes be transient transport-related
2837 		 * errors, and sometimes persistent drive-related errors.
2838 		 * We used to retry these without decrementing the retry
2839 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2840 		 * we hit a persistent drive problem that returns one of
2841 		 * these error codes, we would retry indefinitely.  So,
2842 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2843 		 * count and avoid infinite retries.  We're taking the
2844 		 * potential risk of flagging false failures in the event
2845 		 * of a topology-related error (e.g. a SAS expander problem
2846 		 * causes a command addressed to a drive to fail), but
2847 		 * avoiding getting into an infinite retry loop.
2848 		 */
2849 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2850 		mpr_dprint(sc, MPR_INFO,
2851 		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2852 		    mpr_describe_table(mpr_iocstatus_string,
2853 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2854 		    target_id, cm->cm_desc.Default.SMID,
2855 		    le32toh(rep->IOCLogInfo));
2856 		mpr_dprint(sc, MPR_XINFO,
2857 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2858 		    rep->SCSIStatus, rep->SCSIState,
2859 		    le32toh(rep->TransferCount));
2860 		break;
2861 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2862 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2863 	case MPI2_IOCSTATUS_INVALID_VPID:
2864 	case MPI2_IOCSTATUS_INVALID_FIELD:
2865 	case MPI2_IOCSTATUS_INVALID_STATE:
2866 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2867 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2868 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2869 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2870 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2871 	default:
2872 		mprsas_log_command(cm, MPR_XINFO,
2873 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2874 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2875 		    rep->SCSIStatus, rep->SCSIState,
2876 		    le32toh(rep->TransferCount));
2877 		csio->resid = cm->cm_length;
2878 
2879 		if (scsi_cdb[0] == UNMAP &&
2880 		    target->is_nvme &&
2881 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2882 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2883 		else
2884 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2885 
2886 		break;
2887 	}
2888 
2889 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2890 
2891 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2892 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2893 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2894 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2895 		    "queue\n");
2896 	}
2897 
2898 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2899 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2900 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2901 	}
2902 
2903 	mpr_free_command(sc, cm);
2904 	xpt_done(ccb);
2905 }
2906 
2907 #if __FreeBSD_version >= 900026
2908 static void
2909 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2910 {
2911 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2912 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2913 	uint64_t sasaddr;
2914 	union ccb *ccb;
2915 
2916 	ccb = cm->cm_complete_data;
2917 
2918 	/*
2919 	 * Currently there should be no way we can hit this case.  It only
2920 	 * happens when we have a failure to allocate chain frames, and SMP
2921 	 * commands require two S/G elements only.  That should be handled
2922 	 * in the standard request size.
2923 	 */
2924 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2925 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2926 		    "request!\n", __func__, cm->cm_flags);
2927 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2928 		goto bailout;
2929         }
2930 
2931 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2932 	if (rpl == NULL) {
2933 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2934 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2935 		goto bailout;
2936 	}
2937 
2938 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2939 	sasaddr = le32toh(req->SASAddress.Low);
2940 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2941 
2942 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2943 	    MPI2_IOCSTATUS_SUCCESS ||
2944 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2945 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2946 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2947 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2948 		goto bailout;
2949 	}
2950 
2951 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2952 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2953 
2954 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2955 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2956 	else
2957 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2958 
2959 bailout:
2960 	/*
2961 	 * We sync in both directions because we had DMAs in the S/G list
2962 	 * in both directions.
2963 	 */
2964 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2965 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2966 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2967 	mpr_free_command(sc, cm);
2968 	xpt_done(ccb);
2969 }
2970 
2971 static void
2972 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2973 {
2974 	struct mpr_command *cm;
2975 	uint8_t *request, *response;
2976 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2977 	struct mpr_softc *sc;
2978 	struct sglist *sg;
2979 	int error;
2980 
2981 	sc = sassc->sc;
2982 	sg = NULL;
2983 	error = 0;
2984 
2985 #if (__FreeBSD_version >= 1000028) || \
2986     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2987 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2988 	case CAM_DATA_PADDR:
2989 	case CAM_DATA_SG_PADDR:
2990 		/*
2991 		 * XXX We don't yet support physical addresses here.
2992 		 */
2993 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2994 		    "supported\n", __func__);
2995 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2996 		xpt_done(ccb);
2997 		return;
2998 	case CAM_DATA_SG:
2999 		/*
3000 		 * The chip does not support more than one buffer for the
3001 		 * request or response.
3002 		 */
3003 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
3004 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3005 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3006 			    "response buffer segments not supported for SMP\n",
3007 			    __func__);
3008 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3009 			xpt_done(ccb);
3010 			return;
3011 		}
3012 
3013 		/*
3014 		 * The CAM_SCATTER_VALID flag was originally implemented
3015 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3016 		 * We have two.  So, just take that flag to mean that we
3017 		 * might have S/G lists, and look at the S/G segment count
3018 		 * to figure out whether that is the case for each individual
3019 		 * buffer.
3020 		 */
3021 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3022 			bus_dma_segment_t *req_sg;
3023 
3024 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3025 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3026 		} else
3027 			request = ccb->smpio.smp_request;
3028 
3029 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3030 			bus_dma_segment_t *rsp_sg;
3031 
3032 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3033 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3034 		} else
3035 			response = ccb->smpio.smp_response;
3036 		break;
3037 	case CAM_DATA_VADDR:
3038 		request = ccb->smpio.smp_request;
3039 		response = ccb->smpio.smp_response;
3040 		break;
3041 	default:
3042 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3043 		xpt_done(ccb);
3044 		return;
3045 	}
3046 #else /* __FreeBSD_version < 1000028 */
3047 	/*
3048 	 * XXX We don't yet support physical addresses here.
3049 	 */
3050 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3051 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3052 		    "supported\n", __func__);
3053 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3054 		xpt_done(ccb);
3055 		return;
3056 	}
3057 
3058 	/*
3059 	 * If the user wants to send an S/G list, check to make sure they
3060 	 * have single buffers.
3061 	 */
3062 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3063 		/*
3064 		 * The chip does not support more than one buffer for the
3065 		 * request or response.
3066 		 */
3067 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3068 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3069 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3070 			    "response buffer segments not supported for SMP\n",
3071 			    __func__);
3072 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3073 			xpt_done(ccb);
3074 			return;
3075 		}
3076 
3077 		/*
3078 		 * The CAM_SCATTER_VALID flag was originally implemented
3079 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3080 		 * We have two.  So, just take that flag to mean that we
3081 		 * might have S/G lists, and look at the S/G segment count
3082 		 * to figure out whether that is the case for each individual
3083 		 * buffer.
3084 		 */
3085 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3086 			bus_dma_segment_t *req_sg;
3087 
3088 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3089 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3090 		} else
3091 			request = ccb->smpio.smp_request;
3092 
3093 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3094 			bus_dma_segment_t *rsp_sg;
3095 
3096 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3097 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3098 		} else
3099 			response = ccb->smpio.smp_response;
3100 	} else {
3101 		request = ccb->smpio.smp_request;
3102 		response = ccb->smpio.smp_response;
3103 	}
3104 #endif /* __FreeBSD_version < 1000028 */
3105 
3106 	cm = mpr_alloc_command(sc);
3107 	if (cm == NULL) {
3108 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3109 		    __func__);
3110 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3111 		xpt_done(ccb);
3112 		return;
3113 	}
3114 
3115 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3116 	bzero(req, sizeof(*req));
3117 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3118 
3119 	/* Allow the chip to use any route to this SAS address. */
3120 	req->PhysicalPort = 0xff;
3121 
3122 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3123 	req->SGLFlags =
3124 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3125 
3126 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3127 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3128 
3129 	mpr_init_sge(cm, req, &req->SGL);
3130 
3131 	/*
3132 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3133 	 * do one map command, and one busdma call in there.
3134 	 */
3135 	cm->cm_uio.uio_iov = cm->cm_iovec;
3136 	cm->cm_uio.uio_iovcnt = 2;
3137 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3138 
3139 	/*
3140 	 * The read/write flag isn't used by busdma, but set it just in
3141 	 * case.  This isn't exactly accurate, either, since we're going in
3142 	 * both directions.
3143 	 */
3144 	cm->cm_uio.uio_rw = UIO_WRITE;
3145 
3146 	cm->cm_iovec[0].iov_base = request;
3147 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3148 	cm->cm_iovec[1].iov_base = response;
3149 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3150 
3151 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3152 			       cm->cm_iovec[1].iov_len;
3153 
3154 	/*
3155 	 * Trigger a warning message in mpr_data_cb() for the user if we
3156 	 * wind up exceeding two S/G segments.  The chip expects one
3157 	 * segment for the request and another for the response.
3158 	 */
3159 	cm->cm_max_segs = 2;
3160 
3161 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3162 	cm->cm_complete = mprsas_smpio_complete;
3163 	cm->cm_complete_data = ccb;
3164 
3165 	/*
3166 	 * Tell the mapping code that we're using a uio, and that this is
3167 	 * an SMP passthrough request.  There is a little special-case
3168 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3169 	 * transfer.
3170 	 */
3171 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3172 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3173 
3174 	/* The chip data format is little endian. */
3175 	req->SASAddress.High = htole32(sasaddr >> 32);
3176 	req->SASAddress.Low = htole32(sasaddr);
3177 
3178 	/*
3179 	 * XXX Note that we don't have a timeout/abort mechanism here.
3180 	 * From the manual, it looks like task management requests only
3181 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3182 	 * have a mechanism to retry requests in the event of a chip reset
3183 	 * at least.  Hopefully the chip will insure that any errors short
3184 	 * of that are relayed back to the driver.
3185 	 */
3186 	error = mpr_map_command(sc, cm);
3187 	if ((error != 0) && (error != EINPROGRESS)) {
3188 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3189 		    "mpr_map_command()\n", __func__, error);
3190 		goto bailout_error;
3191 	}
3192 
3193 	return;
3194 
3195 bailout_error:
3196 	mpr_free_command(sc, cm);
3197 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3198 	xpt_done(ccb);
3199 	return;
3200 }
3201 
3202 static void
3203 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3204 {
3205 	struct mpr_softc *sc;
3206 	struct mprsas_target *targ;
3207 	uint64_t sasaddr = 0;
3208 
3209 	sc = sassc->sc;
3210 
3211 	/*
3212 	 * Make sure the target exists.
3213 	 */
3214 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3215 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3216 	targ = &sassc->targets[ccb->ccb_h.target_id];
3217 	if (targ->handle == 0x0) {
3218 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3219 		    __func__, ccb->ccb_h.target_id);
3220 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3221 		xpt_done(ccb);
3222 		return;
3223 	}
3224 
3225 	/*
3226 	 * If this device has an embedded SMP target, we'll talk to it
3227 	 * directly.
3228 	 * figure out what the expander's address is.
3229 	 */
3230 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3231 		sasaddr = targ->sasaddr;
3232 
3233 	/*
3234 	 * If we don't have a SAS address for the expander yet, try
3235 	 * grabbing it from the page 0x83 information cached in the
3236 	 * transport layer for this target.  LSI expanders report the
3237 	 * expander SAS address as the port-associated SAS address in
3238 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3239 	 * 0x83.
3240 	 *
3241 	 * XXX KDM disable this for now, but leave it commented out so that
3242 	 * it is obvious that this is another possible way to get the SAS
3243 	 * address.
3244 	 *
3245 	 * The parent handle method below is a little more reliable, and
3246 	 * the other benefit is that it works for devices other than SES
3247 	 * devices.  So you can send a SMP request to a da(4) device and it
3248 	 * will get routed to the expander that device is attached to.
3249 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3250 	 */
3251 #if 0
3252 	if (sasaddr == 0)
3253 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3254 #endif
3255 
3256 	/*
3257 	 * If we still don't have a SAS address for the expander, look for
3258 	 * the parent device of this device, which is probably the expander.
3259 	 */
3260 	if (sasaddr == 0) {
3261 #ifdef OLD_MPR_PROBE
3262 		struct mprsas_target *parent_target;
3263 #endif
3264 
3265 		if (targ->parent_handle == 0x0) {
3266 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3267 			    "a valid parent handle!\n", __func__, targ->handle);
3268 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3269 			goto bailout;
3270 		}
3271 #ifdef OLD_MPR_PROBE
3272 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3273 		    targ->parent_handle);
3274 
3275 		if (parent_target == NULL) {
3276 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3277 			    "a valid parent target!\n", __func__, targ->handle);
3278 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3279 			goto bailout;
3280 		}
3281 
3282 		if ((parent_target->devinfo &
3283 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3284 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3285 			    "does not have an SMP target!\n", __func__,
3286 			    targ->handle, parent_target->handle);
3287 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3288 			goto bailout;
3289 		}
3290 
3291 		sasaddr = parent_target->sasaddr;
3292 #else /* OLD_MPR_PROBE */
3293 		if ((targ->parent_devinfo &
3294 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3295 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3296 			    "does not have an SMP target!\n", __func__,
3297 			    targ->handle, targ->parent_handle);
3298 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3299 			goto bailout;
3300 
3301 		}
3302 		if (targ->parent_sasaddr == 0x0) {
3303 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3304 			    "%d does not have a valid SAS address!\n", __func__,
3305 			    targ->handle, targ->parent_handle);
3306 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3307 			goto bailout;
3308 		}
3309 
3310 		sasaddr = targ->parent_sasaddr;
3311 #endif /* OLD_MPR_PROBE */
3312 
3313 	}
3314 
3315 	if (sasaddr == 0) {
3316 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3317 		    "handle %d\n", __func__, targ->handle);
3318 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3319 		goto bailout;
3320 	}
3321 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3322 
3323 	return;
3324 
3325 bailout:
3326 	xpt_done(ccb);
3327 
3328 }
3329 #endif //__FreeBSD_version >= 900026
3330 
3331 static void
3332 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3333 {
3334 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3335 	struct mpr_softc *sc;
3336 	struct mpr_command *tm;
3337 	struct mprsas_target *targ;
3338 
3339 	MPR_FUNCTRACE(sassc->sc);
3340 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3341 
3342 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3343 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3344 	sc = sassc->sc;
3345 	tm = mprsas_alloc_tm(sc);
3346 	if (tm == NULL) {
3347 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3348 		    "mprsas_action_resetdev\n");
3349 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3350 		xpt_done(ccb);
3351 		return;
3352 	}
3353 
3354 	targ = &sassc->targets[ccb->ccb_h.target_id];
3355 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3356 	req->DevHandle = htole16(targ->handle);
3357 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3358 
3359 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3360 		/* SAS Hard Link Reset / SATA Link Reset */
3361 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3362 	} else {
3363 		/* PCIe Protocol Level Reset*/
3364 		req->MsgFlags =
3365 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3366 	}
3367 
3368 	tm->cm_data = NULL;
3369 	tm->cm_complete = mprsas_resetdev_complete;
3370 	tm->cm_complete_data = ccb;
3371 
3372 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3373 	    __func__, targ->tid);
3374 	tm->cm_targ = targ;
3375 
3376 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3377 	mpr_map_command(sc, tm);
3378 }
3379 
3380 static void
3381 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3382 {
3383 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3384 	union ccb *ccb;
3385 
3386 	MPR_FUNCTRACE(sc);
3387 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3388 
3389 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3390 	ccb = tm->cm_complete_data;
3391 
3392 	/*
3393 	 * Currently there should be no way we can hit this case.  It only
3394 	 * happens when we have a failure to allocate chain frames, and
3395 	 * task management commands don't have S/G lists.
3396 	 */
3397 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3398 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3399 
3400 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3401 
3402 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3403 		    "handle %#04x! This should not happen!\n", __func__,
3404 		    tm->cm_flags, req->DevHandle);
3405 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3406 		goto bailout;
3407 	}
3408 
3409 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3410 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3411 
3412 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3413 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3414 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3415 		    CAM_LUN_WILDCARD);
3416 	}
3417 	else
3418 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3419 
3420 bailout:
3421 
3422 	mprsas_free_tm(sc, tm);
3423 	xpt_done(ccb);
3424 }
3425 
3426 static void
3427 mprsas_poll(struct cam_sim *sim)
3428 {
3429 	struct mprsas_softc *sassc;
3430 
3431 	sassc = cam_sim_softc(sim);
3432 
3433 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3434 		/* frequent debug messages during a panic just slow
3435 		 * everything down too much.
3436 		 */
3437 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3438 		    __func__);
3439 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3440 	}
3441 
3442 	mpr_intr_locked(sassc->sc);
3443 }
3444 
3445 static void
3446 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3447     void *arg)
3448 {
3449 	struct mpr_softc *sc;
3450 
3451 	sc = (struct mpr_softc *)callback_arg;
3452 
3453 	switch (code) {
3454 #if (__FreeBSD_version >= 1000006) || \
3455     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3456 	case AC_ADVINFO_CHANGED: {
3457 		struct mprsas_target *target;
3458 		struct mprsas_softc *sassc;
3459 		struct scsi_read_capacity_data_long rcap_buf;
3460 		struct ccb_dev_advinfo cdai;
3461 		struct mprsas_lun *lun;
3462 		lun_id_t lunid;
3463 		int found_lun;
3464 		uintptr_t buftype;
3465 
3466 		buftype = (uintptr_t)arg;
3467 
3468 		found_lun = 0;
3469 		sassc = sc->sassc;
3470 
3471 		/*
3472 		 * We're only interested in read capacity data changes.
3473 		 */
3474 		if (buftype != CDAI_TYPE_RCAPLONG)
3475 			break;
3476 
3477 		/*
3478 		 * See the comment in mpr_attach_sas() for a detailed
3479 		 * explanation.  In these versions of FreeBSD we register
3480 		 * for all events and filter out the events that don't
3481 		 * apply to us.
3482 		 */
3483 #if (__FreeBSD_version < 1000703) || \
3484     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3485 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3486 			break;
3487 #endif
3488 
3489 		/*
3490 		 * We should have a handle for this, but check to make sure.
3491 		 */
3492 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3493 		    ("Target %d out of bounds in mprsas_async\n",
3494 		    xpt_path_target_id(path)));
3495 		target = &sassc->targets[xpt_path_target_id(path)];
3496 		if (target->handle == 0)
3497 			break;
3498 
3499 		lunid = xpt_path_lun_id(path);
3500 
3501 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3502 			if (lun->lun_id == lunid) {
3503 				found_lun = 1;
3504 				break;
3505 			}
3506 		}
3507 
3508 		if (found_lun == 0) {
3509 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3510 			    M_NOWAIT | M_ZERO);
3511 			if (lun == NULL) {
3512 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3513 				    "LUN for EEDP support.\n");
3514 				break;
3515 			}
3516 			lun->lun_id = lunid;
3517 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3518 		}
3519 
3520 		bzero(&rcap_buf, sizeof(rcap_buf));
3521 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3522 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3523 		cdai.ccb_h.flags = CAM_DIR_IN;
3524 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3525 #if (__FreeBSD_version >= 1100061) || \
3526     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3527 		cdai.flags = CDAI_FLAG_NONE;
3528 #else
3529 		cdai.flags = 0;
3530 #endif
3531 		cdai.bufsiz = sizeof(rcap_buf);
3532 		cdai.buf = (uint8_t *)&rcap_buf;
3533 		xpt_action((union ccb *)&cdai);
3534 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3535 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3536 
3537 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3538 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3539 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3540 			case SRC16_PTYPE_1:
3541 			case SRC16_PTYPE_3:
3542 				lun->eedp_formatted = TRUE;
3543 				lun->eedp_block_size =
3544 				    scsi_4btoul(rcap_buf.length);
3545 				break;
3546 			case SRC16_PTYPE_2:
3547 			default:
3548 				lun->eedp_formatted = FALSE;
3549 				lun->eedp_block_size = 0;
3550 				break;
3551 			}
3552 		} else {
3553 			lun->eedp_formatted = FALSE;
3554 			lun->eedp_block_size = 0;
3555 		}
3556 		break;
3557 	}
3558 #endif
3559 	case AC_FOUND_DEVICE: {
3560 		struct ccb_getdev *cgd;
3561 
3562 		/*
3563 		 * See the comment in mpr_attach_sas() for a detailed
3564 		 * explanation.  In these versions of FreeBSD we register
3565 		 * for all events and filter out the events that don't
3566 		 * apply to us.
3567 		 */
3568 #if (__FreeBSD_version < 1000703) || \
3569     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3570 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3571 			break;
3572 #endif
3573 
3574 		cgd = arg;
3575 #if (__FreeBSD_version < 901503) || \
3576     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3577 		mprsas_check_eedp(sc, path, cgd);
3578 #endif
3579 		break;
3580 	}
3581 	default:
3582 		break;
3583 	}
3584 }
3585 
3586 #if (__FreeBSD_version < 901503) || \
3587     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3588 static void
3589 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3590     struct ccb_getdev *cgd)
3591 {
3592 	struct mprsas_softc *sassc = sc->sassc;
3593 	struct ccb_scsiio *csio;
3594 	struct scsi_read_capacity_16 *scsi_cmd;
3595 	struct scsi_read_capacity_eedp *rcap_buf;
3596 	path_id_t pathid;
3597 	target_id_t targetid;
3598 	lun_id_t lunid;
3599 	union ccb *ccb;
3600 	struct cam_path *local_path;
3601 	struct mprsas_target *target;
3602 	struct mprsas_lun *lun;
3603 	uint8_t	found_lun;
3604 	char path_str[64];
3605 
3606 	pathid = cam_sim_path(sassc->sim);
3607 	targetid = xpt_path_target_id(path);
3608 	lunid = xpt_path_lun_id(path);
3609 
3610 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3611 	    "mprsas_check_eedp\n", targetid));
3612 	target = &sassc->targets[targetid];
3613 	if (target->handle == 0x0)
3614 		return;
3615 
3616 	/*
3617 	 * Determine if the device is EEDP capable.
3618 	 *
3619 	 * If this flag is set in the inquiry data, the device supports
3620 	 * protection information, and must support the 16 byte read capacity
3621 	 * command, otherwise continue without sending read cap 16.
3622 	 */
3623 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3624 		return;
3625 
3626 	/*
3627 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3628 	 * the LUN is formatted for EEDP support.
3629 	 */
3630 	ccb = xpt_alloc_ccb_nowait();
3631 	if (ccb == NULL) {
3632 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3633 		    "support.\n");
3634 		return;
3635 	}
3636 
3637 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3638 	    CAM_REQ_CMP) {
3639 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3640 		    "support.\n");
3641 		xpt_free_ccb(ccb);
3642 		return;
3643 	}
3644 
3645 	/*
3646 	 * If LUN is already in list, don't create a new one.
3647 	 */
3648 	found_lun = FALSE;
3649 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3650 		if (lun->lun_id == lunid) {
3651 			found_lun = TRUE;
3652 			break;
3653 		}
3654 	}
3655 	if (!found_lun) {
3656 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3657 		    M_NOWAIT | M_ZERO);
3658 		if (lun == NULL) {
3659 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3660 			    "EEDP support.\n");
3661 			xpt_free_path(local_path);
3662 			xpt_free_ccb(ccb);
3663 			return;
3664 		}
3665 		lun->lun_id = lunid;
3666 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3667 	}
3668 
3669 	xpt_path_string(local_path, path_str, sizeof(path_str));
3670 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3671 	    path_str, target->handle);
3672 
3673 	/*
3674 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3675 	 * mprsas_read_cap_done function will load the read cap info into the
3676 	 * LUN struct.
3677 	 */
3678 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3679 	    M_NOWAIT | M_ZERO);
3680 	if (rcap_buf == NULL) {
3681 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3682 		    "buffer for EEDP support.\n");
3683 		xpt_free_path(ccb->ccb_h.path);
3684 		xpt_free_ccb(ccb);
3685 		return;
3686 	}
3687 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3688 	csio = &ccb->csio;
3689 	csio->ccb_h.func_code = XPT_SCSI_IO;
3690 	csio->ccb_h.flags = CAM_DIR_IN;
3691 	csio->ccb_h.retry_count = 4;
3692 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3693 	csio->ccb_h.timeout = 60000;
3694 	csio->data_ptr = (uint8_t *)rcap_buf;
3695 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3696 	csio->sense_len = MPR_SENSE_LEN;
3697 	csio->cdb_len = sizeof(*scsi_cmd);
3698 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3699 
3700 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3701 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3702 	scsi_cmd->opcode = 0x9E;
3703 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3704 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3705 
3706 	ccb->ccb_h.ppriv_ptr1 = sassc;
3707 	xpt_action(ccb);
3708 }
3709 
3710 static void
3711 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3712 {
3713 	struct mprsas_softc *sassc;
3714 	struct mprsas_target *target;
3715 	struct mprsas_lun *lun;
3716 	struct scsi_read_capacity_eedp *rcap_buf;
3717 
3718 	if (done_ccb == NULL)
3719 		return;
3720 
3721 	/* Driver need to release devq, it Scsi command is
3722 	 * generated by driver internally.
3723 	 * Currently there is a single place where driver
3724 	 * calls scsi command internally. In future if driver
3725 	 * calls more scsi command internally, it needs to release
3726 	 * devq internally, since those command will not go back to
3727 	 * cam_periph.
3728 	 */
3729 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3730         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3731 		xpt_release_devq(done_ccb->ccb_h.path,
3732 			       	/*count*/ 1, /*run_queue*/TRUE);
3733 	}
3734 
3735 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3736 
3737 	/*
3738 	 * Get the LUN ID for the path and look it up in the LUN list for the
3739 	 * target.
3740 	 */
3741 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3742 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3743 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3744 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3745 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3746 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3747 			continue;
3748 
3749 		/*
3750 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3751 		 * info. If the READ CAP 16 command had some SCSI error (common
3752 		 * if command is not supported), mark the lun as not supporting
3753 		 * EEDP and set the block size to 0.
3754 		 */
3755 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3756 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3757 			lun->eedp_formatted = FALSE;
3758 			lun->eedp_block_size = 0;
3759 			break;
3760 		}
3761 
3762 		if (rcap_buf->protect & 0x01) {
3763 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3764 			    "%d is formatted for EEDP support.\n",
3765 			    done_ccb->ccb_h.target_lun,
3766 			    done_ccb->ccb_h.target_id);
3767 			lun->eedp_formatted = TRUE;
3768 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3769 		}
3770 		break;
3771 	}
3772 
3773 	// Finished with this CCB and path.
3774 	free(rcap_buf, M_MPR);
3775 	xpt_free_path(done_ccb->ccb_h.path);
3776 	xpt_free_ccb(done_ccb);
3777 }
3778 #endif /* (__FreeBSD_version < 901503) || \
3779           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3780 
3781 /*
3782  * Set the INRESET flag for this target so that no I/O will be sent to
3783  * the target until the reset has completed.  If an I/O request does
3784  * happen, the devq will be frozen.  The CCB holds the path which is
3785  * used to release the devq.  The devq is released and the CCB is freed
3786  * when the TM completes.
3787  */
3788 void
3789 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3790     struct mprsas_target *target, lun_id_t lun_id)
3791 {
3792 	union ccb *ccb;
3793 	path_id_t path_id;
3794 
3795 	ccb = xpt_alloc_ccb_nowait();
3796 	if (ccb) {
3797 		path_id = cam_sim_path(sc->sassc->sim);
3798 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3799 		    target->tid, lun_id) != CAM_REQ_CMP) {
3800 			xpt_free_ccb(ccb);
3801 		} else {
3802 			tm->cm_ccb = ccb;
3803 			tm->cm_targ = target;
3804 			target->flags |= MPRSAS_TARGET_INRESET;
3805 		}
3806 	}
3807 }
3808 
3809 int
3810 mprsas_startup(struct mpr_softc *sc)
3811 {
3812 	/*
3813 	 * Send the port enable message and set the wait_for_port_enable flag.
3814 	 * This flag helps to keep the simq frozen until all discovery events
3815 	 * are processed.
3816 	 */
3817 	sc->wait_for_port_enable = 1;
3818 	mprsas_send_portenable(sc);
3819 	return (0);
3820 }
3821 
3822 static int
3823 mprsas_send_portenable(struct mpr_softc *sc)
3824 {
3825 	MPI2_PORT_ENABLE_REQUEST *request;
3826 	struct mpr_command *cm;
3827 
3828 	MPR_FUNCTRACE(sc);
3829 
3830 	if ((cm = mpr_alloc_command(sc)) == NULL)
3831 		return (EBUSY);
3832 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3833 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3834 	request->MsgFlags = 0;
3835 	request->VP_ID = 0;
3836 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3837 	cm->cm_complete = mprsas_portenable_complete;
3838 	cm->cm_data = NULL;
3839 	cm->cm_sge = NULL;
3840 
3841 	mpr_map_command(sc, cm);
3842 	mpr_dprint(sc, MPR_XINFO,
3843 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3844 	    cm, cm->cm_req, cm->cm_complete);
3845 	return (0);
3846 }
3847 
3848 static void
3849 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3850 {
3851 	MPI2_PORT_ENABLE_REPLY *reply;
3852 	struct mprsas_softc *sassc;
3853 
3854 	MPR_FUNCTRACE(sc);
3855 	sassc = sc->sassc;
3856 
3857 	/*
3858 	 * Currently there should be no way we can hit this case.  It only
3859 	 * happens when we have a failure to allocate chain frames, and
3860 	 * port enable commands don't have S/G lists.
3861 	 */
3862 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3863 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3864 		    "This should not happen!\n", __func__, cm->cm_flags);
3865 	}
3866 
3867 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3868 	if (reply == NULL)
3869 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3870 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3871 	    MPI2_IOCSTATUS_SUCCESS)
3872 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3873 
3874 	mpr_free_command(sc, cm);
3875 	/*
3876 	 * Done waiting for port enable to complete.  Decrement the refcount.
3877 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3878 	 * take place.
3879 	 */
3880 	sc->wait_for_port_enable = 0;
3881 	sc->port_enable_complete = 1;
3882 	wakeup(&sc->port_enable_complete);
3883 	mprsas_startup_decrement(sassc);
3884 }
3885 
3886 int
3887 mprsas_check_id(struct mprsas_softc *sassc, int id)
3888 {
3889 	struct mpr_softc *sc = sassc->sc;
3890 	char *ids;
3891 	char *name;
3892 
3893 	ids = &sc->exclude_ids[0];
3894 	while((name = strsep(&ids, ",")) != NULL) {
3895 		if (name[0] == '\0')
3896 			continue;
3897 		if (strtol(name, NULL, 0) == (long)id)
3898 			return (1);
3899 	}
3900 
3901 	return (0);
3902 }
3903 
3904 void
3905 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3906 {
3907 	struct mprsas_softc *sassc;
3908 	struct mprsas_lun *lun, *lun_tmp;
3909 	struct mprsas_target *targ;
3910 	int i;
3911 
3912 	sassc = sc->sassc;
3913 	/*
3914 	 * The number of targets is based on IOC Facts, so free all of
3915 	 * the allocated LUNs for each target and then the target buffer
3916 	 * itself.
3917 	 */
3918 	for (i=0; i< maxtargets; i++) {
3919 		targ = &sassc->targets[i];
3920 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3921 			free(lun, M_MPR);
3922 		}
3923 	}
3924 	free(sassc->targets, M_MPR);
3925 
3926 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3927 	    M_MPR, M_WAITOK|M_ZERO);
3928 	if (!sassc->targets) {
3929 		panic("%s failed to alloc targets with error %d\n",
3930 		    __func__, ENOMEM);
3931 	}
3932 }
3933