xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 7029da5c36f2d3cf6bb6c81bf551229f416399e8)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * Copyright 2000-2020 Broadcom Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT3 */
37 
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/nvme/nvme.h>
77 
78 #include <dev/mpr/mpi/mpi2_type.h>
79 #include <dev/mpr/mpi/mpi2.h>
80 #include <dev/mpr/mpi/mpi2_ioc.h>
81 #include <dev/mpr/mpi/mpi2_sas.h>
82 #include <dev/mpr/mpi/mpi2_pci.h>
83 #include <dev/mpr/mpi/mpi2_cnfg.h>
84 #include <dev/mpr/mpi/mpi2_init.h>
85 #include <dev/mpr/mpi/mpi2_tool.h>
86 #include <dev/mpr/mpr_ioctl.h>
87 #include <dev/mpr/mprvar.h>
88 #include <dev/mpr/mpr_table.h>
89 #include <dev/mpr/mpr_sas.h>
90 
91 #define MPRSAS_DISCOVERY_TIMEOUT	20
92 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93 
94 /*
95  * static array to check SCSI OpCode for EEDP protection bits
96  */
97 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 };
118 
119 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 
121 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
123 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mprsas_poll(struct cam_sim *sim);
125 static void mprsas_scsiio_timeout(void *data);
126 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
127 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
128 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
129 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
130 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
131 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
132     struct mpr_command *cm);
133 static void mprsas_async(void *callback_arg, uint32_t code,
134     struct cam_path *path, void *arg);
135 #if (__FreeBSD_version < 901503) || \
136     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
137 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
138     struct ccb_getdev *cgd);
139 static void mprsas_read_cap_done(struct cam_periph *periph,
140     union ccb *done_ccb);
141 #endif
142 static int mprsas_send_portenable(struct mpr_softc *sc);
143 static void mprsas_portenable_complete(struct mpr_softc *sc,
144     struct mpr_command *cm);
145 
146 #if __FreeBSD_version >= 900026
147 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
148 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149     uint64_t sasaddr);
150 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif //FreeBSD_version >= 900026
152 
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155     uint16_t handle)
156 {
157 	struct mprsas_target *target;
158 	int i;
159 
160 	for (i = start; i < sassc->maxtargets; i++) {
161 		target = &sassc->targets[i];
162 		if (target->handle == handle)
163 			return (target);
164 	}
165 
166 	return (NULL);
167 }
168 
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170  * commands before device handles have been found by discovery.  Since
171  * discovery involves reading config pages and possibly sending commands,
172  * discovery actions may continue even after we receive the end of discovery
173  * event, so refcount discovery actions instead of assuming we can unfreeze
174  * the simq when we get the event.
175  */
176 void
177 mprsas_startup_increment(struct mprsas_softc *sassc)
178 {
179 	MPR_FUNCTRACE(sassc->sc);
180 
181 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 		if (sassc->startup_refcount++ == 0) {
183 			/* just starting, freeze the simq */
184 			mpr_dprint(sassc->sc, MPR_INIT,
185 			    "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
188 			xpt_hold_boot();
189 #endif
190 			xpt_freeze_simq(sassc->sim, 1);
191 		}
192 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 		    sassc->startup_refcount);
194 	}
195 }
196 
197 void
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 {
200 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 		xpt_release_simq(sassc->sim, 1);
203 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
204 	}
205 }
206 
207 void
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 {
210 	MPR_FUNCTRACE(sassc->sc);
211 
212 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 		if (--sassc->startup_refcount == 0) {
214 			/* finished all discovery-related actions, release
215 			 * the simq and rescan for the latest topology.
216 			 */
217 			mpr_dprint(sassc->sc, MPR_INIT,
218 			    "%s releasing simq\n", __func__);
219 			sassc->flags &= ~MPRSAS_IN_STARTUP;
220 			xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
223 			xpt_release_boot();
224 #else
225 			mprsas_rescan_target(sassc->sc, NULL);
226 #endif
227 		}
228 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 		    sassc->startup_refcount);
230 	}
231 }
232 
233 /*
234  * The firmware requires us to stop sending commands when we're doing task
235  * management.
236  * use.
237  * XXX The logic for serializing the device has been made lazy and moved to
238  * mprsas_prepare_for_tm().
239  */
240 struct mpr_command *
241 mprsas_alloc_tm(struct mpr_softc *sc)
242 {
243 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
244 	struct mpr_command *tm;
245 
246 	MPR_FUNCTRACE(sc);
247 	tm = mpr_alloc_high_priority_command(sc);
248 	if (tm == NULL)
249 		return (NULL);
250 
251 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
252 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
253 	return tm;
254 }
255 
256 void
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
258 {
259 	int target_id = 0xFFFFFFFF;
260 
261 	MPR_FUNCTRACE(sc);
262 	if (tm == NULL)
263 		return;
264 
265 	/*
266 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
267 	 * free the resources used for freezing the devq.  Must clear the
268 	 * INRESET flag as well or scsi I/O will not work.
269 	 */
270 	if (tm->cm_targ != NULL) {
271 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
272 		target_id = tm->cm_targ->tid;
273 	}
274 	if (tm->cm_ccb) {
275 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
276 		    target_id);
277 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
278 		xpt_free_path(tm->cm_ccb->ccb_h.path);
279 		xpt_free_ccb(tm->cm_ccb);
280 	}
281 
282 	mpr_free_high_priority_command(sc, tm);
283 }
284 
285 void
286 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
287 {
288 	struct mprsas_softc *sassc = sc->sassc;
289 	path_id_t pathid;
290 	target_id_t targetid;
291 	union ccb *ccb;
292 
293 	MPR_FUNCTRACE(sc);
294 	pathid = cam_sim_path(sassc->sim);
295 	if (targ == NULL)
296 		targetid = CAM_TARGET_WILDCARD;
297 	else
298 		targetid = targ - sassc->targets;
299 
300 	/*
301 	 * Allocate a CCB and schedule a rescan.
302 	 */
303 	ccb = xpt_alloc_ccb_nowait();
304 	if (ccb == NULL) {
305 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
306 		return;
307 	}
308 
309 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
310 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
311 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
312 		xpt_free_ccb(ccb);
313 		return;
314 	}
315 
316 	if (targetid == CAM_TARGET_WILDCARD)
317 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
318 	else
319 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
320 
321 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
322 	xpt_rescan(ccb);
323 }
324 
325 static void
326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
327 {
328 	struct sbuf sb;
329 	va_list ap;
330 	char str[192];
331 	char path_str[64];
332 
333 	if (cm == NULL)
334 		return;
335 
336 	/* No need to be in here if debugging isn't enabled */
337 	if ((cm->cm_sc->mpr_debug & level) == 0)
338 		return;
339 
340 	sbuf_new(&sb, str, sizeof(str), 0);
341 
342 	va_start(ap, fmt);
343 
344 	if (cm->cm_ccb != NULL) {
345 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
346 		    sizeof(path_str));
347 		sbuf_cat(&sb, path_str);
348 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
349 			scsi_command_string(&cm->cm_ccb->csio, &sb);
350 			sbuf_printf(&sb, "length %d ",
351 			    cm->cm_ccb->csio.dxfer_len);
352 		}
353 	} else {
354 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
355 		    cam_sim_name(cm->cm_sc->sassc->sim),
356 		    cam_sim_unit(cm->cm_sc->sassc->sim),
357 		    cam_sim_bus(cm->cm_sc->sassc->sim),
358 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
359 		    cm->cm_lun);
360 	}
361 
362 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
363 	sbuf_vprintf(&sb, fmt, ap);
364 	sbuf_finish(&sb);
365 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
366 
367 	va_end(ap);
368 }
369 
370 static void
371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
372 {
373 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
374 	struct mprsas_target *targ;
375 	uint16_t handle;
376 
377 	MPR_FUNCTRACE(sc);
378 
379 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
380 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
381 	targ = tm->cm_targ;
382 
383 	if (reply == NULL) {
384 		/* XXX retry the remove after the diag reset completes? */
385 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
386 		    "0x%04x\n", __func__, handle);
387 		mprsas_free_tm(sc, tm);
388 		return;
389 	}
390 
391 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
392 	    MPI2_IOCSTATUS_SUCCESS) {
393 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
394 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
395 	}
396 
397 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
398 	    le32toh(reply->TerminationCount));
399 	mpr_free_reply(sc, tm->cm_reply_data);
400 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
401 
402 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
403 	    targ->tid, handle);
404 
405 	/*
406 	 * Don't clear target if remove fails because things will get confusing.
407 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
408 	 * this target id if possible, and so we can assign the same target id
409 	 * to this device if it comes back in the future.
410 	 */
411 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
412 	    MPI2_IOCSTATUS_SUCCESS) {
413 		targ = tm->cm_targ;
414 		targ->handle = 0x0;
415 		targ->encl_handle = 0x0;
416 		targ->encl_level_valid = 0x0;
417 		targ->encl_level = 0x0;
418 		targ->connector_name[0] = ' ';
419 		targ->connector_name[1] = ' ';
420 		targ->connector_name[2] = ' ';
421 		targ->connector_name[3] = ' ';
422 		targ->encl_slot = 0x0;
423 		targ->exp_dev_handle = 0x0;
424 		targ->phy_num = 0x0;
425 		targ->linkrate = 0x0;
426 		targ->devinfo = 0x0;
427 		targ->flags = 0x0;
428 		targ->scsi_req_desc_type = 0;
429 	}
430 
431 	mprsas_free_tm(sc, tm);
432 }
433 
434 
435 /*
436  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437  * Otherwise Volume Delete is same as Bare Drive Removal.
438  */
439 void
440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
441 {
442 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443 	struct mpr_softc *sc;
444 	struct mpr_command *cm;
445 	struct mprsas_target *targ = NULL;
446 
447 	MPR_FUNCTRACE(sassc->sc);
448 	sc = sassc->sc;
449 
450 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
451 	if (targ == NULL) {
452 		/* FIXME: what is the action? */
453 		/* We don't know about this device? */
454 		mpr_dprint(sc, MPR_ERROR,
455 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
456 		return;
457 	}
458 
459 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
460 
461 	cm = mprsas_alloc_tm(sc);
462 	if (cm == NULL) {
463 		mpr_dprint(sc, MPR_ERROR,
464 		    "%s: command alloc failure\n", __func__);
465 		return;
466 	}
467 
468 	mprsas_rescan_target(sc, targ);
469 
470 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471 	req->DevHandle = targ->handle;
472 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
473 
474 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
475 		/* SAS Hard Link Reset / SATA Link Reset */
476 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
477 	} else {
478 		/* PCIe Protocol Level Reset*/
479 		req->MsgFlags =
480 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
481 	}
482 
483 	cm->cm_targ = targ;
484 	cm->cm_data = NULL;
485 	cm->cm_complete = mprsas_remove_volume;
486 	cm->cm_complete_data = (void *)(uintptr_t)handle;
487 
488 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
489 	    __func__, targ->tid);
490 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
491 
492 	mpr_map_command(sc, cm);
493 }
494 
495 /*
496  * The firmware performs debounce on the link to avoid transient link errors
497  * and false removals.  When it does decide that link has been lost and a
498  * device needs to go away, it expects that the host will perform a target reset
499  * and then an op remove.  The reset has the side-effect of aborting any
500  * outstanding requests for the device, which is required for the op-remove to
501  * succeed.  It's not clear if the host should check for the device coming back
502  * alive after the reset.
503  */
504 void
505 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
506 {
507 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
508 	struct mpr_softc *sc;
509 	struct mpr_command *tm;
510 	struct mprsas_target *targ = NULL;
511 
512 	MPR_FUNCTRACE(sassc->sc);
513 
514 	sc = sassc->sc;
515 
516 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
517 	if (targ == NULL) {
518 		/* FIXME: what is the action? */
519 		/* We don't know about this device? */
520 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
521 		    __func__, handle);
522 		return;
523 	}
524 
525 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
526 
527 	tm = mprsas_alloc_tm(sc);
528 	if (tm == NULL) {
529 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
530 		    __func__);
531 		return;
532 	}
533 
534 	mprsas_rescan_target(sc, targ);
535 
536 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
537 	memset(req, 0, sizeof(*req));
538 	req->DevHandle = htole16(targ->handle);
539 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
540 
541 	/* SAS Hard Link Reset / SATA Link Reset */
542 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
543 
544 	tm->cm_targ = targ;
545 	tm->cm_data = NULL;
546 	tm->cm_complete = mprsas_remove_device;
547 	tm->cm_complete_data = (void *)(uintptr_t)handle;
548 
549 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
550 	    __func__, targ->tid);
551 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
552 
553 	mpr_map_command(sc, tm);
554 }
555 
556 static void
557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
558 {
559 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
560 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
561 	struct mprsas_target *targ;
562 	uint16_t handle;
563 
564 	MPR_FUNCTRACE(sc);
565 
566 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
567 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
568 	targ = tm->cm_targ;
569 
570 	/*
571 	 * Currently there should be no way we can hit this case.  It only
572 	 * happens when we have a failure to allocate chain frames, and
573 	 * task management commands don't have S/G lists.
574 	 */
575 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
576 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
577 		    "handle %#04x! This should not happen!\n", __func__,
578 		    tm->cm_flags, handle);
579 	}
580 
581 	if (reply == NULL) {
582 		/* XXX retry the remove after the diag reset completes? */
583 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
584 		    "0x%04x\n", __func__, handle);
585 		mprsas_free_tm(sc, tm);
586 		return;
587 	}
588 
589 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
590 	    MPI2_IOCSTATUS_SUCCESS) {
591 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
592 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
593 	}
594 
595 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
596 	    le32toh(reply->TerminationCount));
597 	mpr_free_reply(sc, tm->cm_reply_data);
598 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
599 
600 	/* Reuse the existing command */
601 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
602 	memset(req, 0, sizeof(*req));
603 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
604 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
605 	req->DevHandle = htole16(handle);
606 	tm->cm_data = NULL;
607 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
608 	tm->cm_complete = mprsas_remove_complete;
609 	tm->cm_complete_data = (void *)(uintptr_t)handle;
610 
611 	/*
612 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
613 	 * They should be aborted or time out and we'll kick thus off there
614 	 * if so.
615 	 */
616 	if (TAILQ_FIRST(&targ->commands) == NULL) {
617 		mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n");
618 		mpr_map_command(sc, tm);
619 		targ->pending_remove_tm = NULL;
620 	} else {
621 		targ->pending_remove_tm = tm;
622 	}
623 
624 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
625 	    targ->tid, handle);
626 	if (targ->encl_level_valid) {
627 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
628 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
629 		    targ->connector_name);
630 	}
631 }
632 
633 static void
634 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
635 {
636 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
637 	uint16_t handle;
638 	struct mprsas_target *targ;
639 	struct mprsas_lun *lun;
640 
641 	MPR_FUNCTRACE(sc);
642 
643 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
644 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
645 
646 	targ = tm->cm_targ;
647 
648 	/*
649 	 * At this point, we should have no pending commands for the target.
650 	 * The remove target has just completed.
651 	 */
652 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
653 	    ("%s: no commands should be pending\n", __func__));
654 
655 	/*
656 	 * Currently there should be no way we can hit this case.  It only
657 	 * happens when we have a failure to allocate chain frames, and
658 	 * task management commands don't have S/G lists.
659 	 */
660 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
661 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
662 		    "handle %#04x! This should not happen!\n", __func__,
663 		    tm->cm_flags, handle);
664 		mprsas_free_tm(sc, tm);
665 		return;
666 	}
667 
668 	if (reply == NULL) {
669 		/* most likely a chip reset */
670 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
671 		    "0x%04x\n", __func__, handle);
672 		mprsas_free_tm(sc, tm);
673 		return;
674 	}
675 
676 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
677 	    __func__, handle, le16toh(reply->IOCStatus));
678 
679 	/*
680 	 * Don't clear target if remove fails because things will get confusing.
681 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
682 	 * this target id if possible, and so we can assign the same target id
683 	 * to this device if it comes back in the future.
684 	 */
685 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
686 	    MPI2_IOCSTATUS_SUCCESS) {
687 		targ->handle = 0x0;
688 		targ->encl_handle = 0x0;
689 		targ->encl_level_valid = 0x0;
690 		targ->encl_level = 0x0;
691 		targ->connector_name[0] = ' ';
692 		targ->connector_name[1] = ' ';
693 		targ->connector_name[2] = ' ';
694 		targ->connector_name[3] = ' ';
695 		targ->encl_slot = 0x0;
696 		targ->exp_dev_handle = 0x0;
697 		targ->phy_num = 0x0;
698 		targ->linkrate = 0x0;
699 		targ->devinfo = 0x0;
700 		targ->flags = 0x0;
701 		targ->scsi_req_desc_type = 0;
702 
703 		while (!SLIST_EMPTY(&targ->luns)) {
704 			lun = SLIST_FIRST(&targ->luns);
705 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
706 			free(lun, M_MPR);
707 		}
708 	}
709 
710 	mprsas_free_tm(sc, tm);
711 }
712 
713 static int
714 mprsas_register_events(struct mpr_softc *sc)
715 {
716 	uint8_t events[16];
717 
718 	bzero(events, 16);
719 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
720 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
721 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
722 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
723 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
724 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
725 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
726 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
727 	setbit(events, MPI2_EVENT_IR_VOLUME);
728 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
729 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
730 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
731 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
732 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
733 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
734 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
735 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
736 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
737 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
738 		}
739 	}
740 
741 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
742 	    &sc->sassc->mprsas_eh);
743 
744 	return (0);
745 }
746 
747 int
748 mpr_attach_sas(struct mpr_softc *sc)
749 {
750 	struct mprsas_softc *sassc;
751 	cam_status status;
752 	int unit, error = 0, reqs;
753 
754 	MPR_FUNCTRACE(sc);
755 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
756 
757 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
758 	if (!sassc) {
759 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
760 		    "Cannot allocate SAS subsystem memory\n");
761 		return (ENOMEM);
762 	}
763 
764 	/*
765 	 * XXX MaxTargets could change during a reinit.  Since we don't
766 	 * resize the targets[] array during such an event, cache the value
767 	 * of MaxTargets here so that we don't get into trouble later.  This
768 	 * should move into the reinit logic.
769 	 */
770 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
771 	sassc->targets = malloc(sizeof(struct mprsas_target) *
772 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
773 	if (!sassc->targets) {
774 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
775 		    "Cannot allocate SAS target memory\n");
776 		free(sassc, M_MPR);
777 		return (ENOMEM);
778 	}
779 	sc->sassc = sassc;
780 	sassc->sc = sc;
781 
782 	reqs = sc->num_reqs - sc->num_prireqs - 1;
783 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
784 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
785 		error = ENOMEM;
786 		goto out;
787 	}
788 
789 	unit = device_get_unit(sc->mpr_dev);
790 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
791 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
792 	if (sassc->sim == NULL) {
793 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
794 		error = EINVAL;
795 		goto out;
796 	}
797 
798 	TAILQ_INIT(&sassc->ev_queue);
799 
800 	/* Initialize taskqueue for Event Handling */
801 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
802 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
803 	    taskqueue_thread_enqueue, &sassc->ev_tq);
804 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
805 	    device_get_nameunit(sc->mpr_dev));
806 
807 	mpr_lock(sc);
808 
809 	/*
810 	 * XXX There should be a bus for every port on the adapter, but since
811 	 * we're just going to fake the topology for now, we'll pretend that
812 	 * everything is just a target on a single bus.
813 	 */
814 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
815 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
816 		    "Error %d registering SCSI bus\n", error);
817 		mpr_unlock(sc);
818 		goto out;
819 	}
820 
821 	/*
822 	 * Assume that discovery events will start right away.
823 	 *
824 	 * Hold off boot until discovery is complete.
825 	 */
826 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
827 	sc->sassc->startup_refcount = 0;
828 	mprsas_startup_increment(sassc);
829 
830 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
831 
832 	/*
833 	 * Register for async events so we can determine the EEDP
834 	 * capabilities of devices.
835 	 */
836 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
837 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
838 	    CAM_LUN_WILDCARD);
839 	if (status != CAM_REQ_CMP) {
840 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
841 		    "Error %#x creating sim path\n", status);
842 		sassc->path = NULL;
843 	} else {
844 		int event;
845 
846 #if (__FreeBSD_version >= 1000006) || \
847     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
848 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
849 #else
850 		event = AC_FOUND_DEVICE;
851 #endif
852 
853 		/*
854 		 * Prior to the CAM locking improvements, we can't call
855 		 * xpt_register_async() with a particular path specified.
856 		 *
857 		 * If a path isn't specified, xpt_register_async() will
858 		 * generate a wildcard path and acquire the XPT lock while
859 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
860 		 * It will then drop the XPT lock once that is done.
861 		 *
862 		 * If a path is specified for xpt_register_async(), it will
863 		 * not acquire and drop the XPT lock around the call to
864 		 * xpt_action().  xpt_action() asserts that the caller
865 		 * holds the SIM lock, so the SIM lock has to be held when
866 		 * calling xpt_register_async() when the path is specified.
867 		 *
868 		 * But xpt_register_async calls xpt_for_all_devices(),
869 		 * which calls xptbustraverse(), which will acquire each
870 		 * SIM lock.  When it traverses our particular bus, it will
871 		 * necessarily acquire the SIM lock, which will lead to a
872 		 * recursive lock acquisition.
873 		 *
874 		 * The CAM locking changes fix this problem by acquiring
875 		 * the XPT topology lock around bus traversal in
876 		 * xptbustraverse(), so the caller can hold the SIM lock
877 		 * and it does not cause a recursive lock acquisition.
878 		 *
879 		 * These __FreeBSD_version values are approximate, especially
880 		 * for stable/10, which is two months later than the actual
881 		 * change.
882 		 */
883 
884 #if (__FreeBSD_version < 1000703) || \
885     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
886 		mpr_unlock(sc);
887 		status = xpt_register_async(event, mprsas_async, sc,
888 					    NULL);
889 		mpr_lock(sc);
890 #else
891 		status = xpt_register_async(event, mprsas_async, sc,
892 					    sassc->path);
893 #endif
894 
895 		if (status != CAM_REQ_CMP) {
896 			mpr_dprint(sc, MPR_ERROR,
897 			    "Error %#x registering async handler for "
898 			    "AC_ADVINFO_CHANGED events\n", status);
899 			xpt_free_path(sassc->path);
900 			sassc->path = NULL;
901 		}
902 	}
903 	if (status != CAM_REQ_CMP) {
904 		/*
905 		 * EEDP use is the exception, not the rule.
906 		 * Warn the user, but do not fail to attach.
907 		 */
908 		mpr_printf(sc, "EEDP capabilities disabled.\n");
909 	}
910 
911 	mpr_unlock(sc);
912 
913 	mprsas_register_events(sc);
914 out:
915 	if (error)
916 		mpr_detach_sas(sc);
917 
918 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
919 	return (error);
920 }
921 
922 int
923 mpr_detach_sas(struct mpr_softc *sc)
924 {
925 	struct mprsas_softc *sassc;
926 	struct mprsas_lun *lun, *lun_tmp;
927 	struct mprsas_target *targ;
928 	int i;
929 
930 	MPR_FUNCTRACE(sc);
931 
932 	if (sc->sassc == NULL)
933 		return (0);
934 
935 	sassc = sc->sassc;
936 	mpr_deregister_events(sc, sassc->mprsas_eh);
937 
938 	/*
939 	 * Drain and free the event handling taskqueue with the lock
940 	 * unheld so that any parallel processing tasks drain properly
941 	 * without deadlocking.
942 	 */
943 	if (sassc->ev_tq != NULL)
944 		taskqueue_free(sassc->ev_tq);
945 
946 	/* Make sure CAM doesn't wedge if we had to bail out early. */
947 	mpr_lock(sc);
948 
949 	while (sassc->startup_refcount != 0)
950 		mprsas_startup_decrement(sassc);
951 
952 	/* Deregister our async handler */
953 	if (sassc->path != NULL) {
954 		xpt_register_async(0, mprsas_async, sc, sassc->path);
955 		xpt_free_path(sassc->path);
956 		sassc->path = NULL;
957 	}
958 
959 	if (sassc->flags & MPRSAS_IN_STARTUP)
960 		xpt_release_simq(sassc->sim, 1);
961 
962 	if (sassc->sim != NULL) {
963 		xpt_bus_deregister(cam_sim_path(sassc->sim));
964 		cam_sim_free(sassc->sim, FALSE);
965 	}
966 
967 	mpr_unlock(sc);
968 
969 	if (sassc->devq != NULL)
970 		cam_simq_free(sassc->devq);
971 
972 	for (i = 0; i < sassc->maxtargets; i++) {
973 		targ = &sassc->targets[i];
974 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
975 			free(lun, M_MPR);
976 		}
977 	}
978 	free(sassc->targets, M_MPR);
979 	free(sassc, M_MPR);
980 	sc->sassc = NULL;
981 
982 	return (0);
983 }
984 
985 void
986 mprsas_discovery_end(struct mprsas_softc *sassc)
987 {
988 	struct mpr_softc *sc = sassc->sc;
989 
990 	MPR_FUNCTRACE(sc);
991 
992 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
993 		callout_stop(&sassc->discovery_callout);
994 
995 	/*
996 	 * After discovery has completed, check the mapping table for any
997 	 * missing devices and update their missing counts. Only do this once
998 	 * whenever the driver is initialized so that missing counts aren't
999 	 * updated unnecessarily. Note that just because discovery has
1000 	 * completed doesn't mean that events have been processed yet. The
1001 	 * check_devices function is a callout timer that checks if ALL devices
1002 	 * are missing. If so, it will wait a little longer for events to
1003 	 * complete and keep resetting itself until some device in the mapping
1004 	 * table is not missing, meaning that event processing has started.
1005 	 */
1006 	if (sc->track_mapping_events) {
1007 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
1008 		    "completed. Check for missing devices in the mapping "
1009 		    "table.\n");
1010 		callout_reset(&sc->device_check_callout,
1011 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1012 		    sc);
1013 	}
1014 }
1015 
1016 static void
1017 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1018 {
1019 	struct mprsas_softc *sassc;
1020 
1021 	sassc = cam_sim_softc(sim);
1022 
1023 	MPR_FUNCTRACE(sassc->sc);
1024 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1025 	    ccb->ccb_h.func_code);
1026 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1027 
1028 	switch (ccb->ccb_h.func_code) {
1029 	case XPT_PATH_INQ:
1030 	{
1031 		struct ccb_pathinq *cpi = &ccb->cpi;
1032 		struct mpr_softc *sc = sassc->sc;
1033 
1034 		cpi->version_num = 1;
1035 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1036 		cpi->target_sprt = 0;
1037 #if (__FreeBSD_version >= 1000039) || \
1038     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1039 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1040 #else
1041 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1042 #endif
1043 		cpi->hba_eng_cnt = 0;
1044 		cpi->max_target = sassc->maxtargets - 1;
1045 		cpi->max_lun = 255;
1046 
1047 		/*
1048 		 * initiator_id is set here to an ID outside the set of valid
1049 		 * target IDs (including volumes).
1050 		 */
1051 		cpi->initiator_id = sassc->maxtargets;
1052 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1053 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1054 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1055 		cpi->unit_number = cam_sim_unit(sim);
1056 		cpi->bus_id = cam_sim_bus(sim);
1057 		/*
1058 		 * XXXSLM-I think this needs to change based on config page or
1059 		 * something instead of hardcoded to 150000.
1060 		 */
1061 		cpi->base_transfer_speed = 150000;
1062 		cpi->transport = XPORT_SAS;
1063 		cpi->transport_version = 0;
1064 		cpi->protocol = PROTO_SCSI;
1065 		cpi->protocol_version = SCSI_REV_SPC;
1066 		cpi->maxio = sc->maxio;
1067 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1068 		break;
1069 	}
1070 	case XPT_GET_TRAN_SETTINGS:
1071 	{
1072 		struct ccb_trans_settings	*cts;
1073 		struct ccb_trans_settings_sas	*sas;
1074 		struct ccb_trans_settings_scsi	*scsi;
1075 		struct mprsas_target *targ;
1076 
1077 		cts = &ccb->cts;
1078 		sas = &cts->xport_specific.sas;
1079 		scsi = &cts->proto_specific.scsi;
1080 
1081 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1082 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1083 		    cts->ccb_h.target_id));
1084 		targ = &sassc->targets[cts->ccb_h.target_id];
1085 		if (targ->handle == 0x0) {
1086 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1087 			break;
1088 		}
1089 
1090 		cts->protocol_version = SCSI_REV_SPC2;
1091 		cts->transport = XPORT_SAS;
1092 		cts->transport_version = 0;
1093 
1094 		sas->valid = CTS_SAS_VALID_SPEED;
1095 		switch (targ->linkrate) {
1096 		case 0x08:
1097 			sas->bitrate = 150000;
1098 			break;
1099 		case 0x09:
1100 			sas->bitrate = 300000;
1101 			break;
1102 		case 0x0a:
1103 			sas->bitrate = 600000;
1104 			break;
1105 		case 0x0b:
1106 			sas->bitrate = 1200000;
1107 			break;
1108 		default:
1109 			sas->valid = 0;
1110 		}
1111 
1112 		cts->protocol = PROTO_SCSI;
1113 		scsi->valid = CTS_SCSI_VALID_TQ;
1114 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1115 
1116 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1117 		break;
1118 	}
1119 	case XPT_CALC_GEOMETRY:
1120 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1121 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1122 		break;
1123 	case XPT_RESET_DEV:
1124 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1125 		    "XPT_RESET_DEV\n");
1126 		mprsas_action_resetdev(sassc, ccb);
1127 		return;
1128 	case XPT_RESET_BUS:
1129 	case XPT_ABORT:
1130 	case XPT_TERM_IO:
1131 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1132 		    "for abort or reset\n");
1133 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1134 		break;
1135 	case XPT_SCSI_IO:
1136 		mprsas_action_scsiio(sassc, ccb);
1137 		return;
1138 #if __FreeBSD_version >= 900026
1139 	case XPT_SMP_IO:
1140 		mprsas_action_smpio(sassc, ccb);
1141 		return;
1142 #endif
1143 	default:
1144 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1145 		break;
1146 	}
1147 	xpt_done(ccb);
1148 
1149 }
1150 
1151 static void
1152 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1153     target_id_t target_id, lun_id_t lun_id)
1154 {
1155 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1156 	struct cam_path *path;
1157 
1158 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1159 	    ac_code, target_id, (uintmax_t)lun_id);
1160 
1161 	if (xpt_create_path(&path, NULL,
1162 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1163 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1164 		    "notification\n");
1165 		return;
1166 	}
1167 
1168 	xpt_async(ac_code, path, NULL);
1169 	xpt_free_path(path);
1170 }
1171 
1172 static void
1173 mprsas_complete_all_commands(struct mpr_softc *sc)
1174 {
1175 	struct mpr_command *cm;
1176 	int i;
1177 	int completed;
1178 
1179 	MPR_FUNCTRACE(sc);
1180 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1181 
1182 	/* complete all commands with a NULL reply */
1183 	for (i = 1; i < sc->num_reqs; i++) {
1184 		cm = &sc->commands[i];
1185 		if (cm->cm_state == MPR_CM_STATE_FREE)
1186 			continue;
1187 
1188 		cm->cm_state = MPR_CM_STATE_BUSY;
1189 		cm->cm_reply = NULL;
1190 		completed = 0;
1191 
1192 		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1193 			MPASS(cm->cm_data);
1194 			free(cm->cm_data, M_MPR);
1195 			cm->cm_data = NULL;
1196 		}
1197 
1198 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1199 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1200 
1201 		if (cm->cm_complete != NULL) {
1202 			mprsas_log_command(cm, MPR_RECOVERY,
1203 			    "completing cm %p state %x ccb %p for diag reset\n",
1204 			    cm, cm->cm_state, cm->cm_ccb);
1205 			cm->cm_complete(sc, cm);
1206 			completed = 1;
1207 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1208 			mprsas_log_command(cm, MPR_RECOVERY,
1209 			    "waking up cm %p state %x ccb %p for diag reset\n",
1210 			    cm, cm->cm_state, cm->cm_ccb);
1211 			wakeup(cm);
1212 			completed = 1;
1213 		}
1214 
1215 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1216 			/* this should never happen, but if it does, log */
1217 			mprsas_log_command(cm, MPR_RECOVERY,
1218 			    "cm %p state %x flags 0x%x ccb %p during diag "
1219 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1220 			    cm->cm_ccb);
1221 		}
1222 	}
1223 
1224 	sc->io_cmds_active = 0;
1225 }
1226 
1227 void
1228 mprsas_handle_reinit(struct mpr_softc *sc)
1229 {
1230 	int i;
1231 
1232 	/* Go back into startup mode and freeze the simq, so that CAM
1233 	 * doesn't send any commands until after we've rediscovered all
1234 	 * targets and found the proper device handles for them.
1235 	 *
1236 	 * After the reset, portenable will trigger discovery, and after all
1237 	 * discovery-related activities have finished, the simq will be
1238 	 * released.
1239 	 */
1240 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1241 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1242 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1243 	mprsas_startup_increment(sc->sassc);
1244 
1245 	/* notify CAM of a bus reset */
1246 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1247 	    CAM_LUN_WILDCARD);
1248 
1249 	/* complete and cleanup after all outstanding commands */
1250 	mprsas_complete_all_commands(sc);
1251 
1252 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1253 	    __func__, sc->sassc->startup_refcount);
1254 
1255 	/* zero all the target handles, since they may change after the
1256 	 * reset, and we have to rediscover all the targets and use the new
1257 	 * handles.
1258 	 */
1259 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1260 		if (sc->sassc->targets[i].outstanding != 0)
1261 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1262 			    i, sc->sassc->targets[i].outstanding);
1263 		sc->sassc->targets[i].handle = 0x0;
1264 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1265 		sc->sassc->targets[i].outstanding = 0;
1266 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1267 	}
1268 }
1269 static void
1270 mprsas_tm_timeout(void *data)
1271 {
1272 	struct mpr_command *tm = data;
1273 	struct mpr_softc *sc = tm->cm_sc;
1274 
1275 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1276 
1277 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1278 	    "out\n", tm);
1279 
1280 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1281 	    ("command not inqueue\n"));
1282 
1283 	tm->cm_state = MPR_CM_STATE_BUSY;
1284 	mpr_reinit(sc);
1285 }
1286 
1287 static void
1288 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1289 {
1290 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1291 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1292 	unsigned int cm_count = 0;
1293 	struct mpr_command *cm;
1294 	struct mprsas_target *targ;
1295 
1296 	callout_stop(&tm->cm_callout);
1297 
1298 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1299 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1300 	targ = tm->cm_targ;
1301 
1302 	/*
1303 	 * Currently there should be no way we can hit this case.  It only
1304 	 * happens when we have a failure to allocate chain frames, and
1305 	 * task management commands don't have S/G lists.
1306 	 */
1307 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1308 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1309 		    "%s: cm_flags = %#x for LUN reset! "
1310 		    "This should not happen!\n", __func__, tm->cm_flags);
1311 		mprsas_free_tm(sc, tm);
1312 		return;
1313 	}
1314 
1315 	if (reply == NULL) {
1316 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1317 		    tm);
1318 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1319 			/* this completion was due to a reset, just cleanup */
1320 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1321 			    "reset, ignoring NULL LUN reset reply\n");
1322 			targ->tm = NULL;
1323 			mprsas_free_tm(sc, tm);
1324 		}
1325 		else {
1326 			/* we should have gotten a reply. */
1327 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1328 			    "LUN reset attempt, resetting controller\n");
1329 			mpr_reinit(sc);
1330 		}
1331 		return;
1332 	}
1333 
1334 	mpr_dprint(sc, MPR_RECOVERY,
1335 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1336 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1337 	    le32toh(reply->TerminationCount));
1338 
1339 	/*
1340 	 * See if there are any outstanding commands for this LUN.
1341 	 * This could be made more efficient by using a per-LU data
1342 	 * structure of some sort.
1343 	 */
1344 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1345 		if (cm->cm_lun == tm->cm_lun)
1346 			cm_count++;
1347 	}
1348 
1349 	if (cm_count == 0) {
1350 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1351 		    "Finished recovery after LUN reset for target %u\n",
1352 		    targ->tid);
1353 
1354 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1355 		    tm->cm_lun);
1356 
1357 		/*
1358 		 * We've finished recovery for this logical unit.  check and
1359 		 * see if some other logical unit has a timedout command
1360 		 * that needs to be processed.
1361 		 */
1362 		cm = TAILQ_FIRST(&targ->timedout_commands);
1363 		if (cm) {
1364 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1365 			   "More commands to abort for target %u\n", targ->tid);
1366 			mprsas_send_abort(sc, tm, cm);
1367 		} else {
1368 			targ->tm = NULL;
1369 			mprsas_free_tm(sc, tm);
1370 		}
1371 	} else {
1372 		/* if we still have commands for this LUN, the reset
1373 		 * effectively failed, regardless of the status reported.
1374 		 * Escalate to a target reset.
1375 		 */
1376 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1377 		    "logical unit reset complete for target %u, but still "
1378 		    "have %u command(s), sending target reset\n", targ->tid,
1379 		    cm_count);
1380 		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1381 			mprsas_send_reset(sc, tm,
1382 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1383 		else
1384 			mpr_reinit(sc);
1385 	}
1386 }
1387 
1388 static void
1389 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1390 {
1391 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1392 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1393 	struct mprsas_target *targ;
1394 
1395 	callout_stop(&tm->cm_callout);
1396 
1397 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1398 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1399 	targ = tm->cm_targ;
1400 
1401 	/*
1402 	 * Currently there should be no way we can hit this case.  It only
1403 	 * happens when we have a failure to allocate chain frames, and
1404 	 * task management commands don't have S/G lists.
1405 	 */
1406 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1407 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1408 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1409 		mprsas_free_tm(sc, tm);
1410 		return;
1411 	}
1412 
1413 	if (reply == NULL) {
1414 		mpr_dprint(sc, MPR_RECOVERY,
1415 		    "NULL target reset reply for tm %p TaskMID %u\n",
1416 		    tm, le16toh(req->TaskMID));
1417 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1418 			/* this completion was due to a reset, just cleanup */
1419 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1420 			    "reset, ignoring NULL target reset reply\n");
1421 			targ->tm = NULL;
1422 			mprsas_free_tm(sc, tm);
1423 		}
1424 		else {
1425 			/* we should have gotten a reply. */
1426 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1427 			    "target reset attempt, resetting controller\n");
1428 			mpr_reinit(sc);
1429 		}
1430 		return;
1431 	}
1432 
1433 	mpr_dprint(sc, MPR_RECOVERY,
1434 	    "target reset status 0x%x code 0x%x count %u\n",
1435 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1436 	    le32toh(reply->TerminationCount));
1437 
1438 	if (targ->outstanding == 0) {
1439 		/*
1440 		 * We've finished recovery for this target and all
1441 		 * of its logical units.
1442 		 */
1443 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1444 		    "Finished reset recovery for target %u\n", targ->tid);
1445 
1446 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1447 		    CAM_LUN_WILDCARD);
1448 
1449 		targ->tm = NULL;
1450 		mprsas_free_tm(sc, tm);
1451 	} else {
1452 		/*
1453 		 * After a target reset, if this target still has
1454 		 * outstanding commands, the reset effectively failed,
1455 		 * regardless of the status reported.  escalate.
1456 		 */
1457 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1458 		    "Target reset complete for target %u, but still have %u "
1459 		    "command(s), resetting controller\n", targ->tid,
1460 		    targ->outstanding);
1461 		mpr_reinit(sc);
1462 	}
1463 }
1464 
1465 #define MPR_RESET_TIMEOUT 30
1466 
1467 int
1468 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1469 {
1470 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1471 	struct mprsas_target *target;
1472 	int err, timeout;
1473 
1474 	target = tm->cm_targ;
1475 	if (target->handle == 0) {
1476 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1477 		    "%d\n", __func__, target->tid);
1478 		return -1;
1479 	}
1480 
1481 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1482 	req->DevHandle = htole16(target->handle);
1483 	req->TaskType = type;
1484 
1485 	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1486 		timeout = MPR_RESET_TIMEOUT;
1487 		/*
1488 		 * Target reset method =
1489 		 *     SAS Hard Link Reset / SATA Link Reset
1490 		 */
1491 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1492 	} else {
1493 		timeout = (target->controller_reset_timeout) ? (
1494 		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1495 		/* PCIe Protocol Level Reset*/
1496 		req->MsgFlags =
1497 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1498 	}
1499 
1500 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1501 		/* XXX Need to handle invalid LUNs */
1502 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1503 		tm->cm_targ->logical_unit_resets++;
1504 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1505 		    "Sending logical unit reset to target %u lun %d\n",
1506 		    target->tid, tm->cm_lun);
1507 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1508 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1509 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1510 		tm->cm_targ->target_resets++;
1511 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1512 		    "Sending target reset to target %u\n", target->tid);
1513 		tm->cm_complete = mprsas_target_reset_complete;
1514 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1515 	}
1516 	else {
1517 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1518 		return -1;
1519 	}
1520 
1521 	if (target->encl_level_valid) {
1522 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1523 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1524 		    target->encl_level, target->encl_slot,
1525 		    target->connector_name);
1526 	}
1527 
1528 	tm->cm_data = NULL;
1529 	tm->cm_complete_data = (void *)tm;
1530 
1531 	callout_reset(&tm->cm_callout, timeout * hz,
1532 	    mprsas_tm_timeout, tm);
1533 
1534 	err = mpr_map_command(sc, tm);
1535 	if (err)
1536 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1537 		    "error %d sending reset type %u\n", err, type);
1538 
1539 	return err;
1540 }
1541 
1542 
1543 static void
1544 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1545 {
1546 	struct mpr_command *cm;
1547 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1548 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1549 	struct mprsas_target *targ;
1550 
1551 	callout_stop(&tm->cm_callout);
1552 
1553 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1554 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1555 	targ = tm->cm_targ;
1556 
1557 	/*
1558 	 * Currently there should be no way we can hit this case.  It only
1559 	 * happens when we have a failure to allocate chain frames, and
1560 	 * task management commands don't have S/G lists.
1561 	 */
1562 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1563 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1564 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1565 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1566 		mprsas_free_tm(sc, tm);
1567 		return;
1568 	}
1569 
1570 	if (reply == NULL) {
1571 		mpr_dprint(sc, MPR_RECOVERY,
1572 		    "NULL abort reply for tm %p TaskMID %u\n",
1573 		    tm, le16toh(req->TaskMID));
1574 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1575 			/* this completion was due to a reset, just cleanup */
1576 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1577 			    "reset, ignoring NULL abort reply\n");
1578 			targ->tm = NULL;
1579 			mprsas_free_tm(sc, tm);
1580 		} else {
1581 			/* we should have gotten a reply. */
1582 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1583 			    "abort attempt, resetting controller\n");
1584 			mpr_reinit(sc);
1585 		}
1586 		return;
1587 	}
1588 
1589 	mpr_dprint(sc, MPR_RECOVERY,
1590 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1591 	    le16toh(req->TaskMID),
1592 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1593 	    le32toh(reply->TerminationCount));
1594 
1595 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1596 	if (cm == NULL) {
1597 		/*
1598 		 * if there are no more timedout commands, we're done with
1599 		 * error recovery for this target.
1600 		 */
1601 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1602 		    "Finished abort recovery for target %u\n", targ->tid);
1603 		targ->tm = NULL;
1604 		mprsas_free_tm(sc, tm);
1605 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1606 		/* abort success, but we have more timedout commands to abort */
1607 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1608 		    "Continuing abort recovery for target %u\n", targ->tid);
1609 		mprsas_send_abort(sc, tm, cm);
1610 	} else {
1611 		/*
1612 		 * we didn't get a command completion, so the abort
1613 		 * failed as far as we're concerned.  escalate.
1614 		 */
1615 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1616 		    "Abort failed for target %u, sending logical unit reset\n",
1617 		    targ->tid);
1618 
1619 		mprsas_send_reset(sc, tm,
1620 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1621 	}
1622 }
1623 
1624 #define MPR_ABORT_TIMEOUT 5
1625 
1626 static int
1627 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1628     struct mpr_command *cm)
1629 {
1630 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1631 	struct mprsas_target *targ;
1632 	int err, timeout;
1633 
1634 	targ = cm->cm_targ;
1635 	if (targ->handle == 0) {
1636 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1637 		   "%s null devhandle for target_id %d\n",
1638 		    __func__, cm->cm_ccb->ccb_h.target_id);
1639 		return -1;
1640 	}
1641 
1642 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1643 	    "Aborting command %p\n", cm);
1644 
1645 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1646 	req->DevHandle = htole16(targ->handle);
1647 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1648 
1649 	/* XXX Need to handle invalid LUNs */
1650 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1651 
1652 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1653 
1654 	tm->cm_data = NULL;
1655 	tm->cm_complete = mprsas_abort_complete;
1656 	tm->cm_complete_data = (void *)tm;
1657 	tm->cm_targ = cm->cm_targ;
1658 	tm->cm_lun = cm->cm_lun;
1659 
1660 	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1661 		timeout	= MPR_ABORT_TIMEOUT;
1662 	else
1663 		timeout = sc->nvme_abort_timeout;
1664 
1665 	callout_reset(&tm->cm_callout, timeout * hz,
1666 	    mprsas_tm_timeout, tm);
1667 
1668 	targ->aborts++;
1669 
1670 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1671 
1672 	err = mpr_map_command(sc, tm);
1673 	if (err)
1674 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1675 		    "error %d sending abort for cm %p SMID %u\n",
1676 		    err, cm, req->TaskMID);
1677 	return err;
1678 }
1679 
1680 static void
1681 mprsas_scsiio_timeout(void *data)
1682 {
1683 	sbintime_t elapsed, now;
1684 	union ccb *ccb;
1685 	struct mpr_softc *sc;
1686 	struct mpr_command *cm;
1687 	struct mprsas_target *targ;
1688 
1689 	cm = (struct mpr_command *)data;
1690 	sc = cm->cm_sc;
1691 	ccb = cm->cm_ccb;
1692 	now = sbinuptime();
1693 
1694 	MPR_FUNCTRACE(sc);
1695 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1696 
1697 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1698 
1699 	/*
1700 	 * Run the interrupt handler to make sure it's not pending.  This
1701 	 * isn't perfect because the command could have already completed
1702 	 * and been re-used, though this is unlikely.
1703 	 */
1704 	mpr_intr_locked(sc);
1705 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1706 		mprsas_log_command(cm, MPR_XINFO,
1707 		    "SCSI command %p almost timed out\n", cm);
1708 		return;
1709 	}
1710 
1711 	if (cm->cm_ccb == NULL) {
1712 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1713 		return;
1714 	}
1715 
1716 	targ = cm->cm_targ;
1717 	targ->timeouts++;
1718 
1719 	elapsed = now - ccb->ccb_h.qos.sim_data;
1720 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1721 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1722 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1723 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1724 	if (targ->encl_level_valid) {
1725 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1726 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1727 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1728 	}
1729 
1730 	/* XXX first, check the firmware state, to see if it's still
1731 	 * operational.  if not, do a diag reset.
1732 	 */
1733 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1734 	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1735 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1736 
1737 	if (targ->tm != NULL) {
1738 		/* target already in recovery, just queue up another
1739 		 * timedout command to be processed later.
1740 		 */
1741 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1742 		    "processing by tm %p\n", cm, targ->tm);
1743 	}
1744 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1745 
1746 		/* start recovery by aborting the first timedout command */
1747 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1748 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1749 		    cm->cm_desc.Default.SMID);
1750 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1751 		    cm, targ->tm);
1752 		mprsas_send_abort(sc, targ->tm, cm);
1753 	}
1754 	else {
1755 		/* XXX queue this target up for recovery once a TM becomes
1756 		 * available.  The firmware only has a limited number of
1757 		 * HighPriority credits for the high priority requests used
1758 		 * for task management, and we ran out.
1759 		 *
1760 		 * Isilon: don't worry about this for now, since we have
1761 		 * more credits than disks in an enclosure, and limit
1762 		 * ourselves to one TM per target for recovery.
1763 		 */
1764 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1765 		    "timedout cm %p failed to allocate a tm\n", cm);
1766 	}
1767 }
1768 
1769 /**
1770  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1771  *			     to SCSI Unmap.
1772  * Return 0 - for success,
1773  *	  1 - to immediately return back the command with success status to CAM
1774  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1775  *			   to FW without any translation.
1776  */
1777 static int
1778 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1779     union ccb *ccb, struct mprsas_target *targ)
1780 {
1781 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1782 	struct ccb_scsiio *csio;
1783 	struct unmap_parm_list *plist;
1784 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1785 	struct nvme_command *c;
1786 	int i, res;
1787 	uint16_t ndesc, list_len, data_length;
1788 	struct mpr_prp_page *prp_page_info;
1789 	uint64_t nvme_dsm_ranges_dma_handle;
1790 
1791 	csio = &ccb->csio;
1792 #if __FreeBSD_version >= 1100103
1793 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1794 #else
1795 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1796 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1797 		    ccb->csio.cdb_io.cdb_ptr[8]);
1798 	} else {
1799 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1800 		    ccb->csio.cdb_io.cdb_bytes[8]);
1801 	}
1802 #endif
1803 	if (!list_len) {
1804 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1805 		return -EINVAL;
1806 	}
1807 
1808 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1809 	if (!plist) {
1810 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1811 		    "save UNMAP data\n");
1812 		return -ENOMEM;
1813 	}
1814 
1815 	/* Copy SCSI unmap data to a local buffer */
1816 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1817 
1818 	/* return back the unmap command to CAM with success status,
1819 	 * if number of descripts is zero.
1820 	 */
1821 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1822 	if (!ndesc) {
1823 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1824 		    "UNMAP cmd is Zero\n");
1825 		res = 1;
1826 		goto out;
1827 	}
1828 
1829 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1830 	if (data_length > targ->MDTS) {
1831 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1832 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1833 		res = -EINVAL;
1834 		goto out;
1835 	}
1836 
1837 	prp_page_info = mpr_alloc_prp_page(sc);
1838 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1839 	    "UNMAP command.\n", __func__));
1840 
1841 	/*
1842 	 * Insert the allocated PRP page into the command's PRP page list. This
1843 	 * will be freed when the command is freed.
1844 	 */
1845 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1846 
1847 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1848 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1849 
1850 	bzero(nvme_dsm_ranges, data_length);
1851 
1852 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1853 	 * for each descriptors contained in SCSI UNMAP data.
1854 	 */
1855 	for (i = 0; i < ndesc; i++) {
1856 		nvme_dsm_ranges[i].length =
1857 		    htole32(be32toh(plist->desc[i].nlb));
1858 		nvme_dsm_ranges[i].starting_lba =
1859 		    htole64(be64toh(plist->desc[i].slba));
1860 		nvme_dsm_ranges[i].attributes = 0;
1861 	}
1862 
1863 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1864 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1865 	bzero(req, sizeof(*req));
1866 	req->DevHandle = htole16(targ->handle);
1867 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1868 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1869 	req->ErrorResponseBaseAddress.High =
1870 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1871 	req->ErrorResponseBaseAddress.Low =
1872 	    htole32(cm->cm_sense_busaddr);
1873 	req->ErrorResponseAllocationLength =
1874 	    htole16(sizeof(struct nvme_completion));
1875 	req->EncapsulatedCommandLength =
1876 	    htole16(sizeof(struct nvme_command));
1877 	req->DataLength = htole32(data_length);
1878 
1879 	/* Build NVMe DSM command */
1880 	c = (struct nvme_command *) req->NVMe_Command;
1881 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1882 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1883 	c->cdw10 = htole32(ndesc - 1);
1884 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1885 
1886 	cm->cm_length = data_length;
1887 	cm->cm_data = NULL;
1888 
1889 	cm->cm_complete = mprsas_scsiio_complete;
1890 	cm->cm_complete_data = ccb;
1891 	cm->cm_targ = targ;
1892 	cm->cm_lun = csio->ccb_h.target_lun;
1893 	cm->cm_ccb = ccb;
1894 
1895 	cm->cm_desc.Default.RequestFlags =
1896 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1897 
1898 	csio->ccb_h.qos.sim_data = sbinuptime();
1899 #if __FreeBSD_version >= 1000029
1900 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1901 	    mprsas_scsiio_timeout, cm, 0);
1902 #else //__FreeBSD_version < 1000029
1903 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1904 	    mprsas_scsiio_timeout, cm);
1905 #endif //__FreeBSD_version >= 1000029
1906 
1907 	targ->issued++;
1908 	targ->outstanding++;
1909 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1910 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1911 
1912 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1913 	    __func__, cm, ccb, targ->outstanding);
1914 
1915 	mpr_build_nvme_prp(sc, cm, req,
1916 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1917 	mpr_map_command(sc, cm);
1918 
1919 out:
1920 	free(plist, M_MPR);
1921 	return 0;
1922 }
1923 
1924 static void
1925 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1926 {
1927 	MPI2_SCSI_IO_REQUEST *req;
1928 	struct ccb_scsiio *csio;
1929 	struct mpr_softc *sc;
1930 	struct mprsas_target *targ;
1931 	struct mprsas_lun *lun;
1932 	struct mpr_command *cm;
1933 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1934 	uint16_t eedp_flags;
1935 	uint32_t mpi_control;
1936 	int rc;
1937 
1938 	sc = sassc->sc;
1939 	MPR_FUNCTRACE(sc);
1940 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1941 
1942 	csio = &ccb->csio;
1943 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1944 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1945 	     csio->ccb_h.target_id));
1946 	targ = &sassc->targets[csio->ccb_h.target_id];
1947 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1948 	if (targ->handle == 0x0) {
1949 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1950 		    __func__, csio->ccb_h.target_id);
1951 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1952 		xpt_done(ccb);
1953 		return;
1954 	}
1955 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1956 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1957 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1958 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1959 		xpt_done(ccb);
1960 		return;
1961 	}
1962 	/*
1963 	 * Sometimes, it is possible to get a command that is not "In
1964 	 * Progress" and was actually aborted by the upper layer.  Check for
1965 	 * this here and complete the command without error.
1966 	 */
1967 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1968 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1969 		    "target %u\n", __func__, csio->ccb_h.target_id);
1970 		xpt_done(ccb);
1971 		return;
1972 	}
1973 	/*
1974 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1975 	 * that the volume has timed out.  We want volumes to be enumerated
1976 	 * until they are deleted/removed, not just failed. In either event,
1977 	 * we're removing the target due to a firmware event telling us
1978 	 * the device is now gone (as opposed to some transient event). Since
1979 	 * we're opting to remove failed devices from the OS's view, we need
1980 	 * to propagate that status up the stack.
1981 	 */
1982 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1983 		if (targ->devinfo == 0)
1984 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1985 		else
1986 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1987 		xpt_done(ccb);
1988 		return;
1989 	}
1990 
1991 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1992 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1993 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1994 		xpt_done(ccb);
1995 		return;
1996 	}
1997 
1998 	/*
1999 	 * If target has a reset in progress, freeze the devq and return.  The
2000 	 * devq will be released when the TM reset is finished.
2001 	 */
2002 	if (targ->flags & MPRSAS_TARGET_INRESET) {
2003 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
2004 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
2005 		    __func__, targ->tid);
2006 		xpt_freeze_devq(ccb->ccb_h.path, 1);
2007 		xpt_done(ccb);
2008 		return;
2009 	}
2010 
2011 	cm = mpr_alloc_command(sc);
2012 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
2013 		if (cm != NULL) {
2014 			mpr_free_command(sc, cm);
2015 		}
2016 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2017 			xpt_freeze_simq(sassc->sim, 1);
2018 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2019 		}
2020 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2021 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2022 		xpt_done(ccb);
2023 		return;
2024 	}
2025 
2026 	/* For NVME device's issue UNMAP command directly to NVME drives by
2027 	 * constructing equivalent native NVMe DataSetManagement command.
2028 	 */
2029 #if __FreeBSD_version >= 1100103
2030 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
2031 #else
2032 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2033 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
2034 	else
2035 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
2036 #endif
2037 	if (scsi_opcode == UNMAP &&
2038 	    targ->is_nvme &&
2039 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2040 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2041 		if (rc == 1) { /* return command to CAM with success status */
2042 			mpr_free_command(sc, cm);
2043 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2044 			xpt_done(ccb);
2045 			return;
2046 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
2047 			return;
2048 	}
2049 
2050 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2051 	bzero(req, sizeof(*req));
2052 	req->DevHandle = htole16(targ->handle);
2053 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2054 	req->MsgFlags = 0;
2055 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2056 	req->SenseBufferLength = MPR_SENSE_LEN;
2057 	req->SGLFlags = 0;
2058 	req->ChainOffset = 0;
2059 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2060 	req->SGLOffset1= 0;
2061 	req->SGLOffset2= 0;
2062 	req->SGLOffset3= 0;
2063 	req->SkipCount = 0;
2064 	req->DataLength = htole32(csio->dxfer_len);
2065 	req->BidirectionalDataLength = 0;
2066 	req->IoFlags = htole16(csio->cdb_len);
2067 	req->EEDPFlags = 0;
2068 
2069 	/* Note: BiDirectional transfers are not supported */
2070 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2071 	case CAM_DIR_IN:
2072 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2073 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2074 		break;
2075 	case CAM_DIR_OUT:
2076 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2077 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2078 		break;
2079 	case CAM_DIR_NONE:
2080 	default:
2081 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2082 		break;
2083 	}
2084 
2085 	if (csio->cdb_len == 32)
2086 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2087 	/*
2088 	 * It looks like the hardware doesn't require an explicit tag
2089 	 * number for each transaction.  SAM Task Management not supported
2090 	 * at the moment.
2091 	 */
2092 	switch (csio->tag_action) {
2093 	case MSG_HEAD_OF_Q_TAG:
2094 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2095 		break;
2096 	case MSG_ORDERED_Q_TAG:
2097 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2098 		break;
2099 	case MSG_ACA_TASK:
2100 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2101 		break;
2102 	case CAM_TAG_ACTION_NONE:
2103 	case MSG_SIMPLE_Q_TAG:
2104 	default:
2105 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2106 		break;
2107 	}
2108 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2109 	req->Control = htole32(mpi_control);
2110 
2111 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2112 		mpr_free_command(sc, cm);
2113 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2114 		xpt_done(ccb);
2115 		return;
2116 	}
2117 
2118 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2119 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2120 	else {
2121 		KASSERT(csio->cdb_len <= IOCDBLEN,
2122 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2123 		    "is not set", csio->cdb_len));
2124 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2125 	}
2126 	req->IoFlags = htole16(csio->cdb_len);
2127 
2128 	/*
2129 	 * Check if EEDP is supported and enabled.  If it is then check if the
2130 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2131 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2132 	 * for EEDP transfer.
2133 	 */
2134 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2135 	if (sc->eedp_enabled && eedp_flags) {
2136 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2137 			if (lun->lun_id == csio->ccb_h.target_lun) {
2138 				break;
2139 			}
2140 		}
2141 
2142 		if ((lun != NULL) && (lun->eedp_formatted)) {
2143 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2144 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2145 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2146 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2147 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2148 				eedp_flags |=
2149 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2150 			}
2151 			req->EEDPFlags = htole16(eedp_flags);
2152 
2153 			/*
2154 			 * If CDB less than 32, fill in Primary Ref Tag with
2155 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2156 			 * already there.  Also, set protection bit.  FreeBSD
2157 			 * currently does not support CDBs bigger than 16, but
2158 			 * the code doesn't hurt, and will be here for the
2159 			 * future.
2160 			 */
2161 			if (csio->cdb_len != 32) {
2162 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2163 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2164 				    PrimaryReferenceTag;
2165 				for (i = 0; i < 4; i++) {
2166 					*ref_tag_addr =
2167 					    req->CDB.CDB32[lba_byte + i];
2168 					ref_tag_addr++;
2169 				}
2170 				req->CDB.EEDP32.PrimaryReferenceTag =
2171 				    htole32(req->
2172 				    CDB.EEDP32.PrimaryReferenceTag);
2173 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2174 				    0xFFFF;
2175 				req->CDB.CDB32[1] =
2176 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2177 			} else {
2178 				eedp_flags |=
2179 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2180 				req->EEDPFlags = htole16(eedp_flags);
2181 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2182 				    0x1F) | 0x20;
2183 			}
2184 		}
2185 	}
2186 
2187 	cm->cm_length = csio->dxfer_len;
2188 	if (cm->cm_length != 0) {
2189 		cm->cm_data = ccb;
2190 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2191 	} else {
2192 		cm->cm_data = NULL;
2193 	}
2194 	cm->cm_sge = &req->SGL;
2195 	cm->cm_sglsize = (32 - 24) * 4;
2196 	cm->cm_complete = mprsas_scsiio_complete;
2197 	cm->cm_complete_data = ccb;
2198 	cm->cm_targ = targ;
2199 	cm->cm_lun = csio->ccb_h.target_lun;
2200 	cm->cm_ccb = ccb;
2201 	/*
2202 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2203 	 * and set descriptor type.
2204 	 */
2205 	if (targ->scsi_req_desc_type ==
2206 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2207 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2208 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2209 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2210 		if (!sc->atomic_desc_capable) {
2211 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2212 			    htole16(targ->handle);
2213 		}
2214 	} else {
2215 		cm->cm_desc.SCSIIO.RequestFlags =
2216 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2217 		if (!sc->atomic_desc_capable)
2218 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2219 	}
2220 
2221 	csio->ccb_h.qos.sim_data = sbinuptime();
2222 #if __FreeBSD_version >= 1000029
2223 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2224 	    mprsas_scsiio_timeout, cm, 0);
2225 #else //__FreeBSD_version < 1000029
2226 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2227 	    mprsas_scsiio_timeout, cm);
2228 #endif //__FreeBSD_version >= 1000029
2229 
2230 	targ->issued++;
2231 	targ->outstanding++;
2232 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2233 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2234 
2235 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2236 	    __func__, cm, ccb, targ->outstanding);
2237 
2238 	mpr_map_command(sc, cm);
2239 	return;
2240 }
2241 
2242 /**
2243  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2244  */
2245 static void
2246 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2247     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2248 {
2249 	u32 response_info;
2250 	u8 *response_bytes;
2251 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2252 	    MPI2_IOCSTATUS_MASK;
2253 	u8 scsi_state = mpi_reply->SCSIState;
2254 	u8 scsi_status = mpi_reply->SCSIStatus;
2255 	char *desc_ioc_state = NULL;
2256 	char *desc_scsi_status = NULL;
2257 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2258 
2259 	if (log_info == 0x31170000)
2260 		return;
2261 
2262 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2263 	     ioc_status);
2264 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2265 	    scsi_status);
2266 
2267 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2268 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2269 	if (targ->encl_level_valid) {
2270 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2271 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2272 		    targ->connector_name);
2273 	}
2274 
2275 	/*
2276 	 * We can add more detail about underflow data here
2277 	 * TO-DO
2278 	 */
2279 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2280 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2281 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2282 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2283 
2284 	if (sc->mpr_debug & MPR_XINFO &&
2285 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2286 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2287 		scsi_sense_print(csio);
2288 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2289 	}
2290 
2291 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2292 		response_info = le32toh(mpi_reply->ResponseInfo);
2293 		response_bytes = (u8 *)&response_info;
2294 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2295 		    response_bytes[0],
2296 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2297 		    response_bytes[0]));
2298 	}
2299 }
2300 
2301 /** mprsas_nvme_trans_status_code
2302  *
2303  * Convert Native NVMe command error status to
2304  * equivalent SCSI error status.
2305  *
2306  * Returns appropriate scsi_status
2307  */
2308 static u8
2309 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2310     struct mpr_command *cm)
2311 {
2312 	u8 status = MPI2_SCSI_STATUS_GOOD;
2313 	int skey, asc, ascq;
2314 	union ccb *ccb = cm->cm_complete_data;
2315 	int returned_sense_len;
2316 	uint8_t sct, sc;
2317 
2318 	sct = NVME_STATUS_GET_SCT(nvme_status);
2319 	sc = NVME_STATUS_GET_SC(nvme_status);
2320 
2321 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2322 	skey = SSD_KEY_ILLEGAL_REQUEST;
2323 	asc = SCSI_ASC_NO_SENSE;
2324 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2325 
2326 	switch (sct) {
2327 	case NVME_SCT_GENERIC:
2328 		switch (sc) {
2329 		case NVME_SC_SUCCESS:
2330 			status = MPI2_SCSI_STATUS_GOOD;
2331 			skey = SSD_KEY_NO_SENSE;
2332 			asc = SCSI_ASC_NO_SENSE;
2333 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2334 			break;
2335 		case NVME_SC_INVALID_OPCODE:
2336 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2337 			skey = SSD_KEY_ILLEGAL_REQUEST;
2338 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2339 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2340 			break;
2341 		case NVME_SC_INVALID_FIELD:
2342 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2343 			skey = SSD_KEY_ILLEGAL_REQUEST;
2344 			asc = SCSI_ASC_INVALID_CDB;
2345 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2346 			break;
2347 		case NVME_SC_DATA_TRANSFER_ERROR:
2348 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2349 			skey = SSD_KEY_MEDIUM_ERROR;
2350 			asc = SCSI_ASC_NO_SENSE;
2351 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2352 			break;
2353 		case NVME_SC_ABORTED_POWER_LOSS:
2354 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2355 			skey = SSD_KEY_ABORTED_COMMAND;
2356 			asc = SCSI_ASC_WARNING;
2357 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2358 			break;
2359 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2360 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2361 			skey = SSD_KEY_HARDWARE_ERROR;
2362 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2363 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2364 			break;
2365 		case NVME_SC_ABORTED_BY_REQUEST:
2366 		case NVME_SC_ABORTED_SQ_DELETION:
2367 		case NVME_SC_ABORTED_FAILED_FUSED:
2368 		case NVME_SC_ABORTED_MISSING_FUSED:
2369 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2370 			skey = SSD_KEY_ABORTED_COMMAND;
2371 			asc = SCSI_ASC_NO_SENSE;
2372 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2373 			break;
2374 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2375 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2376 			skey = SSD_KEY_ILLEGAL_REQUEST;
2377 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2378 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2379 			break;
2380 		case NVME_SC_LBA_OUT_OF_RANGE:
2381 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2382 			skey = SSD_KEY_ILLEGAL_REQUEST;
2383 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2384 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2385 			break;
2386 		case NVME_SC_CAPACITY_EXCEEDED:
2387 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2388 			skey = SSD_KEY_MEDIUM_ERROR;
2389 			asc = SCSI_ASC_NO_SENSE;
2390 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2391 			break;
2392 		case NVME_SC_NAMESPACE_NOT_READY:
2393 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2394 			skey = SSD_KEY_NOT_READY;
2395 			asc = SCSI_ASC_LUN_NOT_READY;
2396 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2397 			break;
2398 		}
2399 		break;
2400 	case NVME_SCT_COMMAND_SPECIFIC:
2401 		switch (sc) {
2402 		case NVME_SC_INVALID_FORMAT:
2403 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2404 			skey = SSD_KEY_ILLEGAL_REQUEST;
2405 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2406 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2407 			break;
2408 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2409 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2410 			skey = SSD_KEY_ILLEGAL_REQUEST;
2411 			asc = SCSI_ASC_INVALID_CDB;
2412 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2413 			break;
2414 		}
2415 		break;
2416 	case NVME_SCT_MEDIA_ERROR:
2417 		switch (sc) {
2418 		case NVME_SC_WRITE_FAULTS:
2419 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2420 			skey = SSD_KEY_MEDIUM_ERROR;
2421 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2422 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2423 			break;
2424 		case NVME_SC_UNRECOVERED_READ_ERROR:
2425 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2426 			skey = SSD_KEY_MEDIUM_ERROR;
2427 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2428 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2429 			break;
2430 		case NVME_SC_GUARD_CHECK_ERROR:
2431 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2432 			skey = SSD_KEY_MEDIUM_ERROR;
2433 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2434 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2435 			break;
2436 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2437 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2438 			skey = SSD_KEY_MEDIUM_ERROR;
2439 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2440 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2441 			break;
2442 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2443 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2444 			skey = SSD_KEY_MEDIUM_ERROR;
2445 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2446 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2447 			break;
2448 		case NVME_SC_COMPARE_FAILURE:
2449 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2450 			skey = SSD_KEY_MISCOMPARE;
2451 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2452 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2453 			break;
2454 		case NVME_SC_ACCESS_DENIED:
2455 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2456 			skey = SSD_KEY_ILLEGAL_REQUEST;
2457 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2458 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2459 			break;
2460 		}
2461 		break;
2462 	}
2463 
2464 	returned_sense_len = sizeof(struct scsi_sense_data);
2465 	if (returned_sense_len < ccb->csio.sense_len)
2466 		ccb->csio.sense_resid = ccb->csio.sense_len -
2467 		    returned_sense_len;
2468 	else
2469 		ccb->csio.sense_resid = 0;
2470 
2471 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2472 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2473 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2474 
2475 	return status;
2476 }
2477 
2478 /** mprsas_complete_nvme_unmap
2479  *
2480  * Complete native NVMe command issued using NVMe Encapsulated
2481  * Request Message.
2482  */
2483 static u8
2484 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2485 {
2486 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2487 	struct nvme_completion *nvme_completion = NULL;
2488 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2489 
2490 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2491 	if (le16toh(mpi_reply->ErrorResponseCount)){
2492 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2493 		scsi_status = mprsas_nvme_trans_status_code(
2494 		    nvme_completion->status, cm);
2495 	}
2496 	return scsi_status;
2497 }
2498 
2499 static void
2500 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2501 {
2502 	MPI2_SCSI_IO_REPLY *rep;
2503 	union ccb *ccb;
2504 	struct ccb_scsiio *csio;
2505 	struct mprsas_softc *sassc;
2506 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2507 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2508 	int dir = 0, i;
2509 	u16 alloc_len;
2510 	struct mprsas_target *target;
2511 	target_id_t target_id;
2512 
2513 	MPR_FUNCTRACE(sc);
2514 	mpr_dprint(sc, MPR_TRACE,
2515 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2516 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2517 	    cm->cm_targ->outstanding);
2518 
2519 	callout_stop(&cm->cm_callout);
2520 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2521 
2522 	sassc = sc->sassc;
2523 	ccb = cm->cm_complete_data;
2524 	csio = &ccb->csio;
2525 	target_id = csio->ccb_h.target_id;
2526 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2527 	/*
2528 	 * XXX KDM if the chain allocation fails, does it matter if we do
2529 	 * the sync and unload here?  It is simpler to do it in every case,
2530 	 * assuming it doesn't cause problems.
2531 	 */
2532 	if (cm->cm_data != NULL) {
2533 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2534 			dir = BUS_DMASYNC_POSTREAD;
2535 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2536 			dir = BUS_DMASYNC_POSTWRITE;
2537 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2538 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2539 	}
2540 
2541 	cm->cm_targ->completed++;
2542 	cm->cm_targ->outstanding--;
2543 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2544 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2545 
2546 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2547 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2548 		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2549 		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2550 		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2551 		if (cm->cm_reply != NULL)
2552 			mprsas_log_command(cm, MPR_RECOVERY,
2553 			    "completed timedout cm %p ccb %p during recovery "
2554 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2555 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2556 			    rep->SCSIState, le32toh(rep->TransferCount));
2557 		else
2558 			mprsas_log_command(cm, MPR_RECOVERY,
2559 			    "completed timedout cm %p ccb %p during recovery\n",
2560 			    cm, cm->cm_ccb);
2561 	} else if (cm->cm_targ->tm != NULL) {
2562 		if (cm->cm_reply != NULL)
2563 			mprsas_log_command(cm, MPR_RECOVERY,
2564 			    "completed cm %p ccb %p during recovery "
2565 			    "ioc %x scsi %x state %x xfer %u\n",
2566 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2567 			    rep->SCSIStatus, rep->SCSIState,
2568 			    le32toh(rep->TransferCount));
2569 		else
2570 			mprsas_log_command(cm, MPR_RECOVERY,
2571 			    "completed cm %p ccb %p during recovery\n",
2572 			    cm, cm->cm_ccb);
2573 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2574 		mprsas_log_command(cm, MPR_RECOVERY,
2575 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2576 	}
2577 
2578 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2579 		/*
2580 		 * We ran into an error after we tried to map the command,
2581 		 * so we're getting a callback without queueing the command
2582 		 * to the hardware.  So we set the status here, and it will
2583 		 * be retained below.  We'll go through the "fast path",
2584 		 * because there can be no reply when we haven't actually
2585 		 * gone out to the hardware.
2586 		 */
2587 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2588 
2589 		/*
2590 		 * Currently the only error included in the mask is
2591 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2592 		 * chain frames.  We need to freeze the queue until we get
2593 		 * a command that completed without this error, which will
2594 		 * hopefully have some chain frames attached that we can
2595 		 * use.  If we wanted to get smarter about it, we would
2596 		 * only unfreeze the queue in this condition when we're
2597 		 * sure that we're getting some chain frames back.  That's
2598 		 * probably unnecessary.
2599 		 */
2600 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2601 			xpt_freeze_simq(sassc->sim, 1);
2602 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2603 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2604 			    "freezing SIM queue\n");
2605 		}
2606 	}
2607 
2608 	/*
2609 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2610 	 * flag, and use it in a few places in the rest of this function for
2611 	 * convenience. Use the macro if available.
2612 	 */
2613 #if __FreeBSD_version >= 1100103
2614 	scsi_cdb = scsiio_cdb_ptr(csio);
2615 #else
2616 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2617 		scsi_cdb = csio->cdb_io.cdb_ptr;
2618 	else
2619 		scsi_cdb = csio->cdb_io.cdb_bytes;
2620 #endif
2621 
2622 	/*
2623 	 * If this is a Start Stop Unit command and it was issued by the driver
2624 	 * during shutdown, decrement the refcount to account for all of the
2625 	 * commands that were sent.  All SSU commands should be completed before
2626 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2627 	 * is TRUE.
2628 	 */
2629 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2630 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2631 		sc->SSU_refcount--;
2632 	}
2633 
2634 	/* Take the fast path to completion */
2635 	if (cm->cm_reply == NULL) {
2636 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2637 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2638 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2639 			else {
2640 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2641 				csio->scsi_status = SCSI_STATUS_OK;
2642 			}
2643 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2644 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2645 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2646 				mpr_dprint(sc, MPR_XINFO,
2647 				    "Unfreezing SIM queue\n");
2648 			}
2649 		}
2650 
2651 		/*
2652 		 * There are two scenarios where the status won't be
2653 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2654 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2655 		 */
2656 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2657 			/*
2658 			 * Freeze the dev queue so that commands are
2659 			 * executed in the correct order after error
2660 			 * recovery.
2661 			 */
2662 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2663 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2664 		}
2665 		mpr_free_command(sc, cm);
2666 		xpt_done(ccb);
2667 		return;
2668 	}
2669 
2670 	target = &sassc->targets[target_id];
2671 	if (scsi_cdb[0] == UNMAP &&
2672 	    target->is_nvme &&
2673 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2674 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2675 		csio->scsi_status = rep->SCSIStatus;
2676 	}
2677 
2678 	mprsas_log_command(cm, MPR_XINFO,
2679 	    "ioc %x scsi %x state %x xfer %u\n",
2680 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2681 	    le32toh(rep->TransferCount));
2682 
2683 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2684 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2685 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2686 		/* FALLTHROUGH */
2687 	case MPI2_IOCSTATUS_SUCCESS:
2688 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2689 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2690 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2691 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2692 
2693 		/* Completion failed at the transport level. */
2694 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2695 		    MPI2_SCSI_STATE_TERMINATED)) {
2696 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2697 			break;
2698 		}
2699 
2700 		/* In a modern packetized environment, an autosense failure
2701 		 * implies that there's not much else that can be done to
2702 		 * recover the command.
2703 		 */
2704 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2705 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2706 			break;
2707 		}
2708 
2709 		/*
2710 		 * CAM doesn't care about SAS Response Info data, but if this is
2711 		 * the state check if TLR should be done.  If not, clear the
2712 		 * TLR_bits for the target.
2713 		 */
2714 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2715 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2716 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2717 			sc->mapping_table[target_id].TLR_bits =
2718 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2719 		}
2720 
2721 		/*
2722 		 * Intentionally override the normal SCSI status reporting
2723 		 * for these two cases.  These are likely to happen in a
2724 		 * multi-initiator environment, and we want to make sure that
2725 		 * CAM retries these commands rather than fail them.
2726 		 */
2727 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2728 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2729 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2730 			break;
2731 		}
2732 
2733 		/* Handle normal status and sense */
2734 		csio->scsi_status = rep->SCSIStatus;
2735 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2736 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2737 		else
2738 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2739 
2740 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2741 			int sense_len, returned_sense_len;
2742 
2743 			returned_sense_len = min(le32toh(rep->SenseCount),
2744 			    sizeof(struct scsi_sense_data));
2745 			if (returned_sense_len < csio->sense_len)
2746 				csio->sense_resid = csio->sense_len -
2747 				    returned_sense_len;
2748 			else
2749 				csio->sense_resid = 0;
2750 
2751 			sense_len = min(returned_sense_len,
2752 			    csio->sense_len - csio->sense_resid);
2753 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2754 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2755 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2756 		}
2757 
2758 		/*
2759 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2760 		 * and it's page code 0 (Supported Page List), and there is
2761 		 * inquiry data, and this is for a sequential access device, and
2762 		 * the device is an SSP target, and TLR is supported by the
2763 		 * controller, turn the TLR_bits value ON if page 0x90 is
2764 		 * supported.
2765 		 */
2766 		if ((scsi_cdb[0] == INQUIRY) &&
2767 		    (scsi_cdb[1] & SI_EVPD) &&
2768 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2769 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2770 		    (csio->data_ptr != NULL) &&
2771 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2772 		    (sc->control_TLR) &&
2773 		    (sc->mapping_table[target_id].device_info &
2774 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2775 			vpd_list = (struct scsi_vpd_supported_page_list *)
2776 			    csio->data_ptr;
2777 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2778 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2779 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2780 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2781 			alloc_len -= csio->resid;
2782 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2783 				if (vpd_list->list[i] == 0x90) {
2784 					*TLR_bits = TLR_on;
2785 					break;
2786 				}
2787 			}
2788 		}
2789 
2790 		/*
2791 		 * If this is a SATA direct-access end device, mark it so that
2792 		 * a SCSI StartStopUnit command will be sent to it when the
2793 		 * driver is being shutdown.
2794 		 */
2795 		if ((scsi_cdb[0] == INQUIRY) &&
2796 		    (csio->data_ptr != NULL) &&
2797 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2798 		    (sc->mapping_table[target_id].device_info &
2799 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2800 		    ((sc->mapping_table[target_id].device_info &
2801 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2802 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2803 			target = &sassc->targets[target_id];
2804 			target->supports_SSU = TRUE;
2805 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2806 			    target_id);
2807 		}
2808 		break;
2809 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2810 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2811 		/*
2812 		 * If devinfo is 0 this will be a volume.  In that case don't
2813 		 * tell CAM that the volume is not there.  We want volumes to
2814 		 * be enumerated until they are deleted/removed, not just
2815 		 * failed.
2816 		 */
2817 		if (cm->cm_targ->devinfo == 0)
2818 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2819 		else
2820 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2821 		break;
2822 	case MPI2_IOCSTATUS_INVALID_SGL:
2823 		mpr_print_scsiio_cmd(sc, cm);
2824 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2825 		break;
2826 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2827 		/*
2828 		 * This is one of the responses that comes back when an I/O
2829 		 * has been aborted.  If it is because of a timeout that we
2830 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2831 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2832 		 * command is the same (it gets retried, subject to the
2833 		 * retry counter), the only difference is what gets printed
2834 		 * on the console.
2835 		 */
2836 		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2837 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2838 		else
2839 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2840 		break;
2841 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2842 		/* resid is ignored for this condition */
2843 		csio->resid = 0;
2844 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2845 		break;
2846 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2847 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2848 		/*
2849 		 * These can sometimes be transient transport-related
2850 		 * errors, and sometimes persistent drive-related errors.
2851 		 * We used to retry these without decrementing the retry
2852 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2853 		 * we hit a persistent drive problem that returns one of
2854 		 * these error codes, we would retry indefinitely.  So,
2855 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2856 		 * count and avoid infinite retries.  We're taking the
2857 		 * potential risk of flagging false failures in the event
2858 		 * of a topology-related error (e.g. a SAS expander problem
2859 		 * causes a command addressed to a drive to fail), but
2860 		 * avoiding getting into an infinite retry loop. However,
2861 		 * if we get them while were moving a device, we should
2862 		 * fail the request as 'not there' because the device
2863 		 * is effectively gone.
2864 		 */
2865 		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2866 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2867 		else
2868 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2869 		mpr_dprint(sc, MPR_INFO,
2870 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2871 		    mpr_describe_table(mpr_iocstatus_string,
2872 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2873 		    target_id, cm->cm_desc.Default.SMID,
2874 		    le32toh(rep->IOCLogInfo),
2875 		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2876 		mpr_dprint(sc, MPR_XINFO,
2877 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2878 		    rep->SCSIStatus, rep->SCSIState,
2879 		    le32toh(rep->TransferCount));
2880 		break;
2881 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2882 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2883 	case MPI2_IOCSTATUS_INVALID_VPID:
2884 	case MPI2_IOCSTATUS_INVALID_FIELD:
2885 	case MPI2_IOCSTATUS_INVALID_STATE:
2886 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2887 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2888 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2889 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2890 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2891 	default:
2892 		mprsas_log_command(cm, MPR_XINFO,
2893 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2894 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2895 		    rep->SCSIStatus, rep->SCSIState,
2896 		    le32toh(rep->TransferCount));
2897 		csio->resid = cm->cm_length;
2898 
2899 		if (scsi_cdb[0] == UNMAP &&
2900 		    target->is_nvme &&
2901 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2902 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2903 		else
2904 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2905 
2906 		break;
2907 	}
2908 
2909 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2910 
2911 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2912 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2913 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2914 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2915 		    "queue\n");
2916 	}
2917 
2918 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2919 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2920 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2921 	}
2922 
2923 	/*
2924 	 * Check to see if we're removing the device. If so, and this is the
2925 	 * last command on the queue, proceed with the deferred removal of the
2926 	 * device.  Note, for removing a volume, this won't trigger because
2927 	 * pending_remove_tm will be NULL.
2928 	 */
2929 	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2930 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2931 		    cm->cm_targ->pending_remove_tm != NULL) {
2932 			mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n");
2933 			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2934 			cm->cm_targ->pending_remove_tm = NULL;
2935 		}
2936 	}
2937 
2938 	mpr_free_command(sc, cm);
2939 	xpt_done(ccb);
2940 }
2941 
2942 #if __FreeBSD_version >= 900026
2943 static void
2944 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2945 {
2946 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2947 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2948 	uint64_t sasaddr;
2949 	union ccb *ccb;
2950 
2951 	ccb = cm->cm_complete_data;
2952 
2953 	/*
2954 	 * Currently there should be no way we can hit this case.  It only
2955 	 * happens when we have a failure to allocate chain frames, and SMP
2956 	 * commands require two S/G elements only.  That should be handled
2957 	 * in the standard request size.
2958 	 */
2959 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2960 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2961 		    "request!\n", __func__, cm->cm_flags);
2962 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2963 		goto bailout;
2964         }
2965 
2966 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2967 	if (rpl == NULL) {
2968 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2969 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2970 		goto bailout;
2971 	}
2972 
2973 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2974 	sasaddr = le32toh(req->SASAddress.Low);
2975 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2976 
2977 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2978 	    MPI2_IOCSTATUS_SUCCESS ||
2979 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2980 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2981 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2982 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2983 		goto bailout;
2984 	}
2985 
2986 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2987 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2988 
2989 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2990 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2991 	else
2992 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2993 
2994 bailout:
2995 	/*
2996 	 * We sync in both directions because we had DMAs in the S/G list
2997 	 * in both directions.
2998 	 */
2999 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3000 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3001 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3002 	mpr_free_command(sc, cm);
3003 	xpt_done(ccb);
3004 }
3005 
3006 static void
3007 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
3008 {
3009 	struct mpr_command *cm;
3010 	uint8_t *request, *response;
3011 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
3012 	struct mpr_softc *sc;
3013 	struct sglist *sg;
3014 	int error;
3015 
3016 	sc = sassc->sc;
3017 	sg = NULL;
3018 	error = 0;
3019 
3020 #if (__FreeBSD_version >= 1000028) || \
3021     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
3022 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
3023 	case CAM_DATA_PADDR:
3024 	case CAM_DATA_SG_PADDR:
3025 		/*
3026 		 * XXX We don't yet support physical addresses here.
3027 		 */
3028 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3029 		    "supported\n", __func__);
3030 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3031 		xpt_done(ccb);
3032 		return;
3033 	case CAM_DATA_SG:
3034 		/*
3035 		 * The chip does not support more than one buffer for the
3036 		 * request or response.
3037 		 */
3038 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
3039 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3040 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3041 			    "response buffer segments not supported for SMP\n",
3042 			    __func__);
3043 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3044 			xpt_done(ccb);
3045 			return;
3046 		}
3047 
3048 		/*
3049 		 * The CAM_SCATTER_VALID flag was originally implemented
3050 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3051 		 * We have two.  So, just take that flag to mean that we
3052 		 * might have S/G lists, and look at the S/G segment count
3053 		 * to figure out whether that is the case for each individual
3054 		 * buffer.
3055 		 */
3056 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3057 			bus_dma_segment_t *req_sg;
3058 
3059 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3060 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3061 		} else
3062 			request = ccb->smpio.smp_request;
3063 
3064 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3065 			bus_dma_segment_t *rsp_sg;
3066 
3067 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3068 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3069 		} else
3070 			response = ccb->smpio.smp_response;
3071 		break;
3072 	case CAM_DATA_VADDR:
3073 		request = ccb->smpio.smp_request;
3074 		response = ccb->smpio.smp_response;
3075 		break;
3076 	default:
3077 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3078 		xpt_done(ccb);
3079 		return;
3080 	}
3081 #else /* __FreeBSD_version < 1000028 */
3082 	/*
3083 	 * XXX We don't yet support physical addresses here.
3084 	 */
3085 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3086 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3087 		    "supported\n", __func__);
3088 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3089 		xpt_done(ccb);
3090 		return;
3091 	}
3092 
3093 	/*
3094 	 * If the user wants to send an S/G list, check to make sure they
3095 	 * have single buffers.
3096 	 */
3097 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3098 		/*
3099 		 * The chip does not support more than one buffer for the
3100 		 * request or response.
3101 		 */
3102 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3103 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3104 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3105 			    "response buffer segments not supported for SMP\n",
3106 			    __func__);
3107 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3108 			xpt_done(ccb);
3109 			return;
3110 		}
3111 
3112 		/*
3113 		 * The CAM_SCATTER_VALID flag was originally implemented
3114 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3115 		 * We have two.  So, just take that flag to mean that we
3116 		 * might have S/G lists, and look at the S/G segment count
3117 		 * to figure out whether that is the case for each individual
3118 		 * buffer.
3119 		 */
3120 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3121 			bus_dma_segment_t *req_sg;
3122 
3123 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3124 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3125 		} else
3126 			request = ccb->smpio.smp_request;
3127 
3128 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3129 			bus_dma_segment_t *rsp_sg;
3130 
3131 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3132 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3133 		} else
3134 			response = ccb->smpio.smp_response;
3135 	} else {
3136 		request = ccb->smpio.smp_request;
3137 		response = ccb->smpio.smp_response;
3138 	}
3139 #endif /* __FreeBSD_version < 1000028 */
3140 
3141 	cm = mpr_alloc_command(sc);
3142 	if (cm == NULL) {
3143 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3144 		    __func__);
3145 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3146 		xpt_done(ccb);
3147 		return;
3148 	}
3149 
3150 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3151 	bzero(req, sizeof(*req));
3152 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3153 
3154 	/* Allow the chip to use any route to this SAS address. */
3155 	req->PhysicalPort = 0xff;
3156 
3157 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3158 	req->SGLFlags =
3159 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3160 
3161 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3162 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3163 
3164 	mpr_init_sge(cm, req, &req->SGL);
3165 
3166 	/*
3167 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3168 	 * do one map command, and one busdma call in there.
3169 	 */
3170 	cm->cm_uio.uio_iov = cm->cm_iovec;
3171 	cm->cm_uio.uio_iovcnt = 2;
3172 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3173 
3174 	/*
3175 	 * The read/write flag isn't used by busdma, but set it just in
3176 	 * case.  This isn't exactly accurate, either, since we're going in
3177 	 * both directions.
3178 	 */
3179 	cm->cm_uio.uio_rw = UIO_WRITE;
3180 
3181 	cm->cm_iovec[0].iov_base = request;
3182 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3183 	cm->cm_iovec[1].iov_base = response;
3184 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3185 
3186 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3187 			       cm->cm_iovec[1].iov_len;
3188 
3189 	/*
3190 	 * Trigger a warning message in mpr_data_cb() for the user if we
3191 	 * wind up exceeding two S/G segments.  The chip expects one
3192 	 * segment for the request and another for the response.
3193 	 */
3194 	cm->cm_max_segs = 2;
3195 
3196 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3197 	cm->cm_complete = mprsas_smpio_complete;
3198 	cm->cm_complete_data = ccb;
3199 
3200 	/*
3201 	 * Tell the mapping code that we're using a uio, and that this is
3202 	 * an SMP passthrough request.  There is a little special-case
3203 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3204 	 * transfer.
3205 	 */
3206 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3207 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3208 
3209 	/* The chip data format is little endian. */
3210 	req->SASAddress.High = htole32(sasaddr >> 32);
3211 	req->SASAddress.Low = htole32(sasaddr);
3212 
3213 	/*
3214 	 * XXX Note that we don't have a timeout/abort mechanism here.
3215 	 * From the manual, it looks like task management requests only
3216 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3217 	 * have a mechanism to retry requests in the event of a chip reset
3218 	 * at least.  Hopefully the chip will insure that any errors short
3219 	 * of that are relayed back to the driver.
3220 	 */
3221 	error = mpr_map_command(sc, cm);
3222 	if ((error != 0) && (error != EINPROGRESS)) {
3223 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3224 		    "mpr_map_command()\n", __func__, error);
3225 		goto bailout_error;
3226 	}
3227 
3228 	return;
3229 
3230 bailout_error:
3231 	mpr_free_command(sc, cm);
3232 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3233 	xpt_done(ccb);
3234 	return;
3235 }
3236 
3237 static void
3238 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3239 {
3240 	struct mpr_softc *sc;
3241 	struct mprsas_target *targ;
3242 	uint64_t sasaddr = 0;
3243 
3244 	sc = sassc->sc;
3245 
3246 	/*
3247 	 * Make sure the target exists.
3248 	 */
3249 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3250 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3251 	targ = &sassc->targets[ccb->ccb_h.target_id];
3252 	if (targ->handle == 0x0) {
3253 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3254 		    __func__, ccb->ccb_h.target_id);
3255 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3256 		xpt_done(ccb);
3257 		return;
3258 	}
3259 
3260 	/*
3261 	 * If this device has an embedded SMP target, we'll talk to it
3262 	 * directly.
3263 	 * figure out what the expander's address is.
3264 	 */
3265 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3266 		sasaddr = targ->sasaddr;
3267 
3268 	/*
3269 	 * If we don't have a SAS address for the expander yet, try
3270 	 * grabbing it from the page 0x83 information cached in the
3271 	 * transport layer for this target.  LSI expanders report the
3272 	 * expander SAS address as the port-associated SAS address in
3273 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3274 	 * 0x83.
3275 	 *
3276 	 * XXX KDM disable this for now, but leave it commented out so that
3277 	 * it is obvious that this is another possible way to get the SAS
3278 	 * address.
3279 	 *
3280 	 * The parent handle method below is a little more reliable, and
3281 	 * the other benefit is that it works for devices other than SES
3282 	 * devices.  So you can send a SMP request to a da(4) device and it
3283 	 * will get routed to the expander that device is attached to.
3284 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3285 	 */
3286 #if 0
3287 	if (sasaddr == 0)
3288 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3289 #endif
3290 
3291 	/*
3292 	 * If we still don't have a SAS address for the expander, look for
3293 	 * the parent device of this device, which is probably the expander.
3294 	 */
3295 	if (sasaddr == 0) {
3296 #ifdef OLD_MPR_PROBE
3297 		struct mprsas_target *parent_target;
3298 #endif
3299 
3300 		if (targ->parent_handle == 0x0) {
3301 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3302 			    "a valid parent handle!\n", __func__, targ->handle);
3303 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3304 			goto bailout;
3305 		}
3306 #ifdef OLD_MPR_PROBE
3307 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3308 		    targ->parent_handle);
3309 
3310 		if (parent_target == NULL) {
3311 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3312 			    "a valid parent target!\n", __func__, targ->handle);
3313 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3314 			goto bailout;
3315 		}
3316 
3317 		if ((parent_target->devinfo &
3318 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3319 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3320 			    "does not have an SMP target!\n", __func__,
3321 			    targ->handle, parent_target->handle);
3322 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3323 			goto bailout;
3324 		}
3325 
3326 		sasaddr = parent_target->sasaddr;
3327 #else /* OLD_MPR_PROBE */
3328 		if ((targ->parent_devinfo &
3329 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3330 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3331 			    "does not have an SMP target!\n", __func__,
3332 			    targ->handle, targ->parent_handle);
3333 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3334 			goto bailout;
3335 
3336 		}
3337 		if (targ->parent_sasaddr == 0x0) {
3338 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3339 			    "%d does not have a valid SAS address!\n", __func__,
3340 			    targ->handle, targ->parent_handle);
3341 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3342 			goto bailout;
3343 		}
3344 
3345 		sasaddr = targ->parent_sasaddr;
3346 #endif /* OLD_MPR_PROBE */
3347 
3348 	}
3349 
3350 	if (sasaddr == 0) {
3351 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3352 		    "handle %d\n", __func__, targ->handle);
3353 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3354 		goto bailout;
3355 	}
3356 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3357 
3358 	return;
3359 
3360 bailout:
3361 	xpt_done(ccb);
3362 
3363 }
3364 #endif //__FreeBSD_version >= 900026
3365 
3366 static void
3367 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3368 {
3369 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3370 	struct mpr_softc *sc;
3371 	struct mpr_command *tm;
3372 	struct mprsas_target *targ;
3373 
3374 	MPR_FUNCTRACE(sassc->sc);
3375 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3376 
3377 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3378 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3379 	sc = sassc->sc;
3380 	tm = mprsas_alloc_tm(sc);
3381 	if (tm == NULL) {
3382 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3383 		    "mprsas_action_resetdev\n");
3384 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3385 		xpt_done(ccb);
3386 		return;
3387 	}
3388 
3389 	targ = &sassc->targets[ccb->ccb_h.target_id];
3390 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3391 	req->DevHandle = htole16(targ->handle);
3392 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3393 
3394 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3395 		/* SAS Hard Link Reset / SATA Link Reset */
3396 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3397 	} else {
3398 		/* PCIe Protocol Level Reset*/
3399 		req->MsgFlags =
3400 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3401 	}
3402 
3403 	tm->cm_data = NULL;
3404 	tm->cm_complete = mprsas_resetdev_complete;
3405 	tm->cm_complete_data = ccb;
3406 
3407 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3408 	    __func__, targ->tid);
3409 	tm->cm_targ = targ;
3410 
3411 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3412 	mpr_map_command(sc, tm);
3413 }
3414 
3415 static void
3416 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3417 {
3418 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3419 	union ccb *ccb;
3420 
3421 	MPR_FUNCTRACE(sc);
3422 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3423 
3424 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3425 	ccb = tm->cm_complete_data;
3426 
3427 	/*
3428 	 * Currently there should be no way we can hit this case.  It only
3429 	 * happens when we have a failure to allocate chain frames, and
3430 	 * task management commands don't have S/G lists.
3431 	 */
3432 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3433 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3434 
3435 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3436 
3437 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3438 		    "handle %#04x! This should not happen!\n", __func__,
3439 		    tm->cm_flags, req->DevHandle);
3440 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3441 		goto bailout;
3442 	}
3443 
3444 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3445 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3446 
3447 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3448 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3449 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3450 		    CAM_LUN_WILDCARD);
3451 	}
3452 	else
3453 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3454 
3455 bailout:
3456 
3457 	mprsas_free_tm(sc, tm);
3458 	xpt_done(ccb);
3459 }
3460 
3461 static void
3462 mprsas_poll(struct cam_sim *sim)
3463 {
3464 	struct mprsas_softc *sassc;
3465 
3466 	sassc = cam_sim_softc(sim);
3467 
3468 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3469 		/* frequent debug messages during a panic just slow
3470 		 * everything down too much.
3471 		 */
3472 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3473 		    __func__);
3474 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3475 	}
3476 
3477 	mpr_intr_locked(sassc->sc);
3478 }
3479 
3480 static void
3481 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3482     void *arg)
3483 {
3484 	struct mpr_softc *sc;
3485 
3486 	sc = (struct mpr_softc *)callback_arg;
3487 
3488 	switch (code) {
3489 #if (__FreeBSD_version >= 1000006) || \
3490     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3491 	case AC_ADVINFO_CHANGED: {
3492 		struct mprsas_target *target;
3493 		struct mprsas_softc *sassc;
3494 		struct scsi_read_capacity_data_long rcap_buf;
3495 		struct ccb_dev_advinfo cdai;
3496 		struct mprsas_lun *lun;
3497 		lun_id_t lunid;
3498 		int found_lun;
3499 		uintptr_t buftype;
3500 
3501 		buftype = (uintptr_t)arg;
3502 
3503 		found_lun = 0;
3504 		sassc = sc->sassc;
3505 
3506 		/*
3507 		 * We're only interested in read capacity data changes.
3508 		 */
3509 		if (buftype != CDAI_TYPE_RCAPLONG)
3510 			break;
3511 
3512 		/*
3513 		 * See the comment in mpr_attach_sas() for a detailed
3514 		 * explanation.  In these versions of FreeBSD we register
3515 		 * for all events and filter out the events that don't
3516 		 * apply to us.
3517 		 */
3518 #if (__FreeBSD_version < 1000703) || \
3519     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3520 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3521 			break;
3522 #endif
3523 
3524 		/*
3525 		 * We should have a handle for this, but check to make sure.
3526 		 */
3527 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3528 		    ("Target %d out of bounds in mprsas_async\n",
3529 		    xpt_path_target_id(path)));
3530 		target = &sassc->targets[xpt_path_target_id(path)];
3531 		if (target->handle == 0)
3532 			break;
3533 
3534 		lunid = xpt_path_lun_id(path);
3535 
3536 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3537 			if (lun->lun_id == lunid) {
3538 				found_lun = 1;
3539 				break;
3540 			}
3541 		}
3542 
3543 		if (found_lun == 0) {
3544 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3545 			    M_NOWAIT | M_ZERO);
3546 			if (lun == NULL) {
3547 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3548 				    "LUN for EEDP support.\n");
3549 				break;
3550 			}
3551 			lun->lun_id = lunid;
3552 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3553 		}
3554 
3555 		bzero(&rcap_buf, sizeof(rcap_buf));
3556 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3557 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3558 		cdai.ccb_h.flags = CAM_DIR_IN;
3559 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3560 #if (__FreeBSD_version >= 1100061) || \
3561     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3562 		cdai.flags = CDAI_FLAG_NONE;
3563 #else
3564 		cdai.flags = 0;
3565 #endif
3566 		cdai.bufsiz = sizeof(rcap_buf);
3567 		cdai.buf = (uint8_t *)&rcap_buf;
3568 		xpt_action((union ccb *)&cdai);
3569 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3570 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3571 
3572 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3573 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3574 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3575 			case SRC16_PTYPE_1:
3576 			case SRC16_PTYPE_3:
3577 				lun->eedp_formatted = TRUE;
3578 				lun->eedp_block_size =
3579 				    scsi_4btoul(rcap_buf.length);
3580 				break;
3581 			case SRC16_PTYPE_2:
3582 			default:
3583 				lun->eedp_formatted = FALSE;
3584 				lun->eedp_block_size = 0;
3585 				break;
3586 			}
3587 		} else {
3588 			lun->eedp_formatted = FALSE;
3589 			lun->eedp_block_size = 0;
3590 		}
3591 		break;
3592 	}
3593 #endif
3594 	case AC_FOUND_DEVICE: {
3595 		struct ccb_getdev *cgd;
3596 
3597 		/*
3598 		 * See the comment in mpr_attach_sas() for a detailed
3599 		 * explanation.  In these versions of FreeBSD we register
3600 		 * for all events and filter out the events that don't
3601 		 * apply to us.
3602 		 */
3603 #if (__FreeBSD_version < 1000703) || \
3604     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3605 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3606 			break;
3607 #endif
3608 
3609 		cgd = arg;
3610 #if (__FreeBSD_version < 901503) || \
3611     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3612 		mprsas_check_eedp(sc, path, cgd);
3613 #endif
3614 		break;
3615 	}
3616 	default:
3617 		break;
3618 	}
3619 }
3620 
3621 #if (__FreeBSD_version < 901503) || \
3622     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3623 static void
3624 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3625     struct ccb_getdev *cgd)
3626 {
3627 	struct mprsas_softc *sassc = sc->sassc;
3628 	struct ccb_scsiio *csio;
3629 	struct scsi_read_capacity_16 *scsi_cmd;
3630 	struct scsi_read_capacity_eedp *rcap_buf;
3631 	path_id_t pathid;
3632 	target_id_t targetid;
3633 	lun_id_t lunid;
3634 	union ccb *ccb;
3635 	struct cam_path *local_path;
3636 	struct mprsas_target *target;
3637 	struct mprsas_lun *lun;
3638 	uint8_t	found_lun;
3639 	char path_str[64];
3640 
3641 	pathid = cam_sim_path(sassc->sim);
3642 	targetid = xpt_path_target_id(path);
3643 	lunid = xpt_path_lun_id(path);
3644 
3645 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3646 	    "mprsas_check_eedp\n", targetid));
3647 	target = &sassc->targets[targetid];
3648 	if (target->handle == 0x0)
3649 		return;
3650 
3651 	/*
3652 	 * Determine if the device is EEDP capable.
3653 	 *
3654 	 * If this flag is set in the inquiry data, the device supports
3655 	 * protection information, and must support the 16 byte read capacity
3656 	 * command, otherwise continue without sending read cap 16.
3657 	 */
3658 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3659 		return;
3660 
3661 	/*
3662 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3663 	 * the LUN is formatted for EEDP support.
3664 	 */
3665 	ccb = xpt_alloc_ccb_nowait();
3666 	if (ccb == NULL) {
3667 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3668 		    "support.\n");
3669 		return;
3670 	}
3671 
3672 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3673 	    CAM_REQ_CMP) {
3674 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3675 		    "support.\n");
3676 		xpt_free_ccb(ccb);
3677 		return;
3678 	}
3679 
3680 	/*
3681 	 * If LUN is already in list, don't create a new one.
3682 	 */
3683 	found_lun = FALSE;
3684 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3685 		if (lun->lun_id == lunid) {
3686 			found_lun = TRUE;
3687 			break;
3688 		}
3689 	}
3690 	if (!found_lun) {
3691 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3692 		    M_NOWAIT | M_ZERO);
3693 		if (lun == NULL) {
3694 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3695 			    "EEDP support.\n");
3696 			xpt_free_path(local_path);
3697 			xpt_free_ccb(ccb);
3698 			return;
3699 		}
3700 		lun->lun_id = lunid;
3701 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3702 	}
3703 
3704 	xpt_path_string(local_path, path_str, sizeof(path_str));
3705 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3706 	    path_str, target->handle);
3707 
3708 	/*
3709 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3710 	 * mprsas_read_cap_done function will load the read cap info into the
3711 	 * LUN struct.
3712 	 */
3713 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3714 	    M_NOWAIT | M_ZERO);
3715 	if (rcap_buf == NULL) {
3716 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3717 		    "buffer for EEDP support.\n");
3718 		xpt_free_path(ccb->ccb_h.path);
3719 		xpt_free_ccb(ccb);
3720 		return;
3721 	}
3722 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3723 	csio = &ccb->csio;
3724 	csio->ccb_h.func_code = XPT_SCSI_IO;
3725 	csio->ccb_h.flags = CAM_DIR_IN;
3726 	csio->ccb_h.retry_count = 4;
3727 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3728 	csio->ccb_h.timeout = 60000;
3729 	csio->data_ptr = (uint8_t *)rcap_buf;
3730 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3731 	csio->sense_len = MPR_SENSE_LEN;
3732 	csio->cdb_len = sizeof(*scsi_cmd);
3733 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3734 
3735 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3736 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3737 	scsi_cmd->opcode = 0x9E;
3738 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3739 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3740 
3741 	ccb->ccb_h.ppriv_ptr1 = sassc;
3742 	xpt_action(ccb);
3743 }
3744 
3745 static void
3746 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3747 {
3748 	struct mprsas_softc *sassc;
3749 	struct mprsas_target *target;
3750 	struct mprsas_lun *lun;
3751 	struct scsi_read_capacity_eedp *rcap_buf;
3752 
3753 	if (done_ccb == NULL)
3754 		return;
3755 
3756 	/* Driver need to release devq, it Scsi command is
3757 	 * generated by driver internally.
3758 	 * Currently there is a single place where driver
3759 	 * calls scsi command internally. In future if driver
3760 	 * calls more scsi command internally, it needs to release
3761 	 * devq internally, since those command will not go back to
3762 	 * cam_periph.
3763 	 */
3764 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3765         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3766 		xpt_release_devq(done_ccb->ccb_h.path,
3767 			       	/*count*/ 1, /*run_queue*/TRUE);
3768 	}
3769 
3770 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3771 
3772 	/*
3773 	 * Get the LUN ID for the path and look it up in the LUN list for the
3774 	 * target.
3775 	 */
3776 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3777 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3778 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3779 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3780 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3781 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3782 			continue;
3783 
3784 		/*
3785 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3786 		 * info. If the READ CAP 16 command had some SCSI error (common
3787 		 * if command is not supported), mark the lun as not supporting
3788 		 * EEDP and set the block size to 0.
3789 		 */
3790 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3791 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3792 			lun->eedp_formatted = FALSE;
3793 			lun->eedp_block_size = 0;
3794 			break;
3795 		}
3796 
3797 		if (rcap_buf->protect & 0x01) {
3798 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3799 			    "%d is formatted for EEDP support.\n",
3800 			    done_ccb->ccb_h.target_lun,
3801 			    done_ccb->ccb_h.target_id);
3802 			lun->eedp_formatted = TRUE;
3803 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3804 		}
3805 		break;
3806 	}
3807 
3808 	// Finished with this CCB and path.
3809 	free(rcap_buf, M_MPR);
3810 	xpt_free_path(done_ccb->ccb_h.path);
3811 	xpt_free_ccb(done_ccb);
3812 }
3813 #endif /* (__FreeBSD_version < 901503) || \
3814           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3815 
3816 /*
3817  * Set the INRESET flag for this target so that no I/O will be sent to
3818  * the target until the reset has completed.  If an I/O request does
3819  * happen, the devq will be frozen.  The CCB holds the path which is
3820  * used to release the devq.  The devq is released and the CCB is freed
3821  * when the TM completes.
3822  */
3823 void
3824 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3825     struct mprsas_target *target, lun_id_t lun_id)
3826 {
3827 	union ccb *ccb;
3828 	path_id_t path_id;
3829 
3830 	ccb = xpt_alloc_ccb_nowait();
3831 	if (ccb) {
3832 		path_id = cam_sim_path(sc->sassc->sim);
3833 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3834 		    target->tid, lun_id) != CAM_REQ_CMP) {
3835 			xpt_free_ccb(ccb);
3836 		} else {
3837 			tm->cm_ccb = ccb;
3838 			tm->cm_targ = target;
3839 			target->flags |= MPRSAS_TARGET_INRESET;
3840 		}
3841 	}
3842 }
3843 
3844 int
3845 mprsas_startup(struct mpr_softc *sc)
3846 {
3847 	/*
3848 	 * Send the port enable message and set the wait_for_port_enable flag.
3849 	 * This flag helps to keep the simq frozen until all discovery events
3850 	 * are processed.
3851 	 */
3852 	sc->wait_for_port_enable = 1;
3853 	mprsas_send_portenable(sc);
3854 	return (0);
3855 }
3856 
3857 static int
3858 mprsas_send_portenable(struct mpr_softc *sc)
3859 {
3860 	MPI2_PORT_ENABLE_REQUEST *request;
3861 	struct mpr_command *cm;
3862 
3863 	MPR_FUNCTRACE(sc);
3864 
3865 	if ((cm = mpr_alloc_command(sc)) == NULL)
3866 		return (EBUSY);
3867 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3868 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3869 	request->MsgFlags = 0;
3870 	request->VP_ID = 0;
3871 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3872 	cm->cm_complete = mprsas_portenable_complete;
3873 	cm->cm_data = NULL;
3874 	cm->cm_sge = NULL;
3875 
3876 	mpr_map_command(sc, cm);
3877 	mpr_dprint(sc, MPR_XINFO,
3878 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3879 	    cm, cm->cm_req, cm->cm_complete);
3880 	return (0);
3881 }
3882 
3883 static void
3884 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3885 {
3886 	MPI2_PORT_ENABLE_REPLY *reply;
3887 	struct mprsas_softc *sassc;
3888 
3889 	MPR_FUNCTRACE(sc);
3890 	sassc = sc->sassc;
3891 
3892 	/*
3893 	 * Currently there should be no way we can hit this case.  It only
3894 	 * happens when we have a failure to allocate chain frames, and
3895 	 * port enable commands don't have S/G lists.
3896 	 */
3897 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3898 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3899 		    "This should not happen!\n", __func__, cm->cm_flags);
3900 	}
3901 
3902 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3903 	if (reply == NULL)
3904 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3905 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3906 	    MPI2_IOCSTATUS_SUCCESS)
3907 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3908 
3909 	mpr_free_command(sc, cm);
3910 	/*
3911 	 * Done waiting for port enable to complete.  Decrement the refcount.
3912 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3913 	 * take place.
3914 	 */
3915 	sc->wait_for_port_enable = 0;
3916 	sc->port_enable_complete = 1;
3917 	wakeup(&sc->port_enable_complete);
3918 	mprsas_startup_decrement(sassc);
3919 }
3920 
3921 int
3922 mprsas_check_id(struct mprsas_softc *sassc, int id)
3923 {
3924 	struct mpr_softc *sc = sassc->sc;
3925 	char *ids;
3926 	char *name;
3927 
3928 	ids = &sc->exclude_ids[0];
3929 	while((name = strsep(&ids, ",")) != NULL) {
3930 		if (name[0] == '\0')
3931 			continue;
3932 		if (strtol(name, NULL, 0) == (long)id)
3933 			return (1);
3934 	}
3935 
3936 	return (0);
3937 }
3938 
3939 void
3940 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3941 {
3942 	struct mprsas_softc *sassc;
3943 	struct mprsas_lun *lun, *lun_tmp;
3944 	struct mprsas_target *targ;
3945 	int i;
3946 
3947 	sassc = sc->sassc;
3948 	/*
3949 	 * The number of targets is based on IOC Facts, so free all of
3950 	 * the allocated LUNs for each target and then the target buffer
3951 	 * itself.
3952 	 */
3953 	for (i=0; i< maxtargets; i++) {
3954 		targ = &sassc->targets[i];
3955 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3956 			free(lun, M_MPR);
3957 		}
3958 	}
3959 	free(sassc->targets, M_MPR);
3960 
3961 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3962 	    M_MPR, M_WAITOK|M_ZERO);
3963 	if (!sassc->targets) {
3964 		panic("%s failed to alloc targets with error %d\n",
3965 		    __func__, ENOMEM);
3966 	}
3967 }
3968