xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 3ea909cc7605dfabe34e3b01b07c038cda0afb86)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/nvme/nvme.h>
76 
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
89 
90 #define MPRSAS_DISCOVERY_TIMEOUT	20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
92 
93 /*
94  * static array to check SCSI OpCode for EEDP protection bits
95  */
96 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 };
117 
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131     struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133     struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137     struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139     union ccb *done_ccb);
140 #endif
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143     struct mpr_command *cm);
144 
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148     uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	int target_id = 0xFFFFFFFF;
250 
251 	MPR_FUNCTRACE(sc);
252 	if (tm == NULL)
253 		return;
254 
255 	/*
256 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
257 	 * free the resources used for freezing the devq.  Must clear the
258 	 * INRESET flag as well or scsi I/O will not work.
259 	 */
260 	if (tm->cm_targ != NULL) {
261 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 		target_id = tm->cm_targ->tid;
263 	}
264 	if (tm->cm_ccb) {
265 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 		    target_id);
267 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 		xpt_free_path(tm->cm_ccb->ccb_h.path);
269 		xpt_free_ccb(tm->cm_ccb);
270 	}
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 	    MPI2_IOCSTATUS_SUCCESS) {
383 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 	    le32toh(reply->TerminationCount));
389 	mpr_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 	    targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mpr_map_command(sc, cm);
480 }
481 
482 /*
483  * The firmware performs debounce on the link to avoid transient link errors
484  * and false removals.  When it does decide that link has been lost and a
485  * device needs to go away, it expects that the host will perform a target reset
486  * and then an op remove.  The reset has the side-effect of aborting any
487  * outstanding requests for the device, which is required for the op-remove to
488  * succeed.  It's not clear if the host should check for the device coming back
489  * alive after the reset.
490  */
491 void
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mpr_softc *sc;
496 	struct mpr_command *cm;
497 	struct mprsas_target *targ = NULL;
498 
499 	MPR_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
508 		    __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
513 
514 	cm = mprsas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
517 		    __func__);
518 		return;
519 	}
520 
521 	mprsas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags =
535 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 	cm->cm_complete = mprsas_remove_device;
537 	cm->cm_complete_data = (void *)(uintptr_t)handle;
538 
539 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 	    __func__, targ->tid);
541 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 
543 	mpr_map_command(sc, cm);
544 }
545 
546 static void
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 {
549 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 	struct mprsas_target *targ;
552 	struct mpr_command *next_cm;
553 	uint16_t handle;
554 
555 	MPR_FUNCTRACE(sc);
556 
557 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 	targ = tm->cm_targ;
560 
561 	/*
562 	 * Currently there should be no way we can hit this case.  It only
563 	 * happens when we have a failure to allocate chain frames, and
564 	 * task management commands don't have S/G lists.
565 	 */
566 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 		    "handle %#04x! This should not happen!\n", __func__,
569 		    tm->cm_flags, handle);
570 	}
571 
572 	if (reply == NULL) {
573 		/* XXX retry the remove after the diag reset completes? */
574 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 		    "0x%04x\n", __func__, handle);
576 		mprsas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 	    MPI2_IOCSTATUS_SUCCESS) {
582 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 	    MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717 		}
718 	}
719 
720 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 	    &sc->sassc->mprsas_eh);
722 
723 	return (0);
724 }
725 
726 int
727 mpr_attach_sas(struct mpr_softc *sc)
728 {
729 	struct mprsas_softc *sassc;
730 	cam_status status;
731 	int unit, error = 0;
732 
733 	MPR_FUNCTRACE(sc);
734 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
735 
736 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
737 	if (!sassc) {
738 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
739 		    "Cannot allocate SAS subsystem memory\n");
740 		return (ENOMEM);
741 	}
742 
743 	/*
744 	 * XXX MaxTargets could change during a reinit.  Since we don't
745 	 * resize the targets[] array during such an event, cache the value
746 	 * of MaxTargets here so that we don't get into trouble later.  This
747 	 * should move into the reinit logic.
748 	 */
749 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
750 	sassc->targets = malloc(sizeof(struct mprsas_target) *
751 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
752 	if (!sassc->targets) {
753 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
754 		    "Cannot allocate SAS target memory\n");
755 		free(sassc, M_MPR);
756 		return (ENOMEM);
757 	}
758 	sc->sassc = sassc;
759 	sassc->sc = sc;
760 
761 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
762 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
763 		error = ENOMEM;
764 		goto out;
765 	}
766 
767 	unit = device_get_unit(sc->mpr_dev);
768 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
769 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
770 	if (sassc->sim == NULL) {
771 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
772 		error = EINVAL;
773 		goto out;
774 	}
775 
776 	TAILQ_INIT(&sassc->ev_queue);
777 
778 	/* Initialize taskqueue for Event Handling */
779 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
780 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
781 	    taskqueue_thread_enqueue, &sassc->ev_tq);
782 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
783 	    device_get_nameunit(sc->mpr_dev));
784 
785 	mpr_lock(sc);
786 
787 	/*
788 	 * XXX There should be a bus for every port on the adapter, but since
789 	 * we're just going to fake the topology for now, we'll pretend that
790 	 * everything is just a target on a single bus.
791 	 */
792 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
793 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
794 		    "Error %d registering SCSI bus\n", error);
795 		mpr_unlock(sc);
796 		goto out;
797 	}
798 
799 	/*
800 	 * Assume that discovery events will start right away.
801 	 *
802 	 * Hold off boot until discovery is complete.
803 	 */
804 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
805 	sc->sassc->startup_refcount = 0;
806 	mprsas_startup_increment(sassc);
807 
808 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
809 
810 	/*
811 	 * Register for async events so we can determine the EEDP
812 	 * capabilities of devices.
813 	 */
814 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
815 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
816 	    CAM_LUN_WILDCARD);
817 	if (status != CAM_REQ_CMP) {
818 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
819 		    "Error %#x creating sim path\n", status);
820 		sassc->path = NULL;
821 	} else {
822 		int event;
823 
824 #if (__FreeBSD_version >= 1000006) || \
825     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
826 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
827 #else
828 		event = AC_FOUND_DEVICE;
829 #endif
830 
831 		/*
832 		 * Prior to the CAM locking improvements, we can't call
833 		 * xpt_register_async() with a particular path specified.
834 		 *
835 		 * If a path isn't specified, xpt_register_async() will
836 		 * generate a wildcard path and acquire the XPT lock while
837 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
838 		 * It will then drop the XPT lock once that is done.
839 		 *
840 		 * If a path is specified for xpt_register_async(), it will
841 		 * not acquire and drop the XPT lock around the call to
842 		 * xpt_action().  xpt_action() asserts that the caller
843 		 * holds the SIM lock, so the SIM lock has to be held when
844 		 * calling xpt_register_async() when the path is specified.
845 		 *
846 		 * But xpt_register_async calls xpt_for_all_devices(),
847 		 * which calls xptbustraverse(), which will acquire each
848 		 * SIM lock.  When it traverses our particular bus, it will
849 		 * necessarily acquire the SIM lock, which will lead to a
850 		 * recursive lock acquisition.
851 		 *
852 		 * The CAM locking changes fix this problem by acquiring
853 		 * the XPT topology lock around bus traversal in
854 		 * xptbustraverse(), so the caller can hold the SIM lock
855 		 * and it does not cause a recursive lock acquisition.
856 		 *
857 		 * These __FreeBSD_version values are approximate, especially
858 		 * for stable/10, which is two months later than the actual
859 		 * change.
860 		 */
861 
862 #if (__FreeBSD_version < 1000703) || \
863     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
864 		mpr_unlock(sc);
865 		status = xpt_register_async(event, mprsas_async, sc,
866 					    NULL);
867 		mpr_lock(sc);
868 #else
869 		status = xpt_register_async(event, mprsas_async, sc,
870 					    sassc->path);
871 #endif
872 
873 		if (status != CAM_REQ_CMP) {
874 			mpr_dprint(sc, MPR_ERROR,
875 			    "Error %#x registering async handler for "
876 			    "AC_ADVINFO_CHANGED events\n", status);
877 			xpt_free_path(sassc->path);
878 			sassc->path = NULL;
879 		}
880 	}
881 	if (status != CAM_REQ_CMP) {
882 		/*
883 		 * EEDP use is the exception, not the rule.
884 		 * Warn the user, but do not fail to attach.
885 		 */
886 		mpr_printf(sc, "EEDP capabilities disabled.\n");
887 	}
888 
889 	mpr_unlock(sc);
890 
891 	mprsas_register_events(sc);
892 out:
893 	if (error)
894 		mpr_detach_sas(sc);
895 
896 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
897 	return (error);
898 }
899 
900 int
901 mpr_detach_sas(struct mpr_softc *sc)
902 {
903 	struct mprsas_softc *sassc;
904 	struct mprsas_lun *lun, *lun_tmp;
905 	struct mprsas_target *targ;
906 	int i;
907 
908 	MPR_FUNCTRACE(sc);
909 
910 	if (sc->sassc == NULL)
911 		return (0);
912 
913 	sassc = sc->sassc;
914 	mpr_deregister_events(sc, sassc->mprsas_eh);
915 
916 	/*
917 	 * Drain and free the event handling taskqueue with the lock
918 	 * unheld so that any parallel processing tasks drain properly
919 	 * without deadlocking.
920 	 */
921 	if (sassc->ev_tq != NULL)
922 		taskqueue_free(sassc->ev_tq);
923 
924 	/* Make sure CAM doesn't wedge if we had to bail out early. */
925 	mpr_lock(sc);
926 
927 	/* Deregister our async handler */
928 	if (sassc->path != NULL) {
929 		xpt_register_async(0, mprsas_async, sc, sassc->path);
930 		xpt_free_path(sassc->path);
931 		sassc->path = NULL;
932 	}
933 
934 	if (sassc->flags & MPRSAS_IN_STARTUP)
935 		xpt_release_simq(sassc->sim, 1);
936 
937 	if (sassc->sim != NULL) {
938 		xpt_bus_deregister(cam_sim_path(sassc->sim));
939 		cam_sim_free(sassc->sim, FALSE);
940 	}
941 
942 	mpr_unlock(sc);
943 
944 	if (sassc->devq != NULL)
945 		cam_simq_free(sassc->devq);
946 
947 	for (i = 0; i < sassc->maxtargets; i++) {
948 		targ = &sassc->targets[i];
949 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
950 			free(lun, M_MPR);
951 		}
952 	}
953 	free(sassc->targets, M_MPR);
954 	free(sassc, M_MPR);
955 	sc->sassc = NULL;
956 
957 	return (0);
958 }
959 
960 void
961 mprsas_discovery_end(struct mprsas_softc *sassc)
962 {
963 	struct mpr_softc *sc = sassc->sc;
964 
965 	MPR_FUNCTRACE(sc);
966 
967 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
968 		callout_stop(&sassc->discovery_callout);
969 
970 	/*
971 	 * After discovery has completed, check the mapping table for any
972 	 * missing devices and update their missing counts. Only do this once
973 	 * whenever the driver is initialized so that missing counts aren't
974 	 * updated unnecessarily. Note that just because discovery has
975 	 * completed doesn't mean that events have been processed yet. The
976 	 * check_devices function is a callout timer that checks if ALL devices
977 	 * are missing. If so, it will wait a little longer for events to
978 	 * complete and keep resetting itself until some device in the mapping
979 	 * table is not missing, meaning that event processing has started.
980 	 */
981 	if (sc->track_mapping_events) {
982 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
983 		    "completed. Check for missing devices in the mapping "
984 		    "table.\n");
985 		callout_reset(&sc->device_check_callout,
986 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
987 		    sc);
988 	}
989 }
990 
991 static void
992 mprsas_action(struct cam_sim *sim, union ccb *ccb)
993 {
994 	struct mprsas_softc *sassc;
995 
996 	sassc = cam_sim_softc(sim);
997 
998 	MPR_FUNCTRACE(sassc->sc);
999 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1000 	    ccb->ccb_h.func_code);
1001 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1002 
1003 	switch (ccb->ccb_h.func_code) {
1004 	case XPT_PATH_INQ:
1005 	{
1006 		struct ccb_pathinq *cpi = &ccb->cpi;
1007 		struct mpr_softc *sc = sassc->sc;
1008 		uint8_t sges_per_frame;
1009 
1010 		cpi->version_num = 1;
1011 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1012 		cpi->target_sprt = 0;
1013 #if (__FreeBSD_version >= 1000039) || \
1014     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1015 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1016 #else
1017 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1018 #endif
1019 		cpi->hba_eng_cnt = 0;
1020 		cpi->max_target = sassc->maxtargets - 1;
1021 		cpi->max_lun = 255;
1022 
1023 		/*
1024 		 * initiator_id is set here to an ID outside the set of valid
1025 		 * target IDs (including volumes).
1026 		 */
1027 		cpi->initiator_id = sassc->maxtargets;
1028 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1029 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1030 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1031 		cpi->unit_number = cam_sim_unit(sim);
1032 		cpi->bus_id = cam_sim_bus(sim);
1033 		/*
1034 		 * XXXSLM-I think this needs to change based on config page or
1035 		 * something instead of hardcoded to 150000.
1036 		 */
1037 		cpi->base_transfer_speed = 150000;
1038 		cpi->transport = XPORT_SAS;
1039 		cpi->transport_version = 0;
1040 		cpi->protocol = PROTO_SCSI;
1041 		cpi->protocol_version = SCSI_REV_SPC;
1042 
1043 		/*
1044 		 * Max IO Size is Page Size * the following:
1045 		 * ((SGEs per frame - 1 for chain element) *
1046 		 * Max Chain Depth) + 1 for no chain needed in last frame
1047 		 *
1048 		 * If user suggests a Max IO size to use, use the smaller of the
1049 		 * user's value and the calculated value as long as the user's
1050 		 * value is larger than 0. The user's value is in pages.
1051 		 */
1052 		sges_per_frame = (sc->chain_frame_size /
1053 		    sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1054 		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1055 		cpi->maxio *= PAGE_SIZE;
1056 		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1057 		    cpi->maxio))
1058 			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1059 		sc->maxio = cpi->maxio;
1060 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1061 		break;
1062 	}
1063 	case XPT_GET_TRAN_SETTINGS:
1064 	{
1065 		struct ccb_trans_settings	*cts;
1066 		struct ccb_trans_settings_sas	*sas;
1067 		struct ccb_trans_settings_scsi	*scsi;
1068 		struct mprsas_target *targ;
1069 
1070 		cts = &ccb->cts;
1071 		sas = &cts->xport_specific.sas;
1072 		scsi = &cts->proto_specific.scsi;
1073 
1074 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1075 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1076 		    cts->ccb_h.target_id));
1077 		targ = &sassc->targets[cts->ccb_h.target_id];
1078 		if (targ->handle == 0x0) {
1079 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1080 			break;
1081 		}
1082 
1083 		cts->protocol_version = SCSI_REV_SPC2;
1084 		cts->transport = XPORT_SAS;
1085 		cts->transport_version = 0;
1086 
1087 		sas->valid = CTS_SAS_VALID_SPEED;
1088 		switch (targ->linkrate) {
1089 		case 0x08:
1090 			sas->bitrate = 150000;
1091 			break;
1092 		case 0x09:
1093 			sas->bitrate = 300000;
1094 			break;
1095 		case 0x0a:
1096 			sas->bitrate = 600000;
1097 			break;
1098 		case 0x0b:
1099 			sas->bitrate = 1200000;
1100 			break;
1101 		default:
1102 			sas->valid = 0;
1103 		}
1104 
1105 		cts->protocol = PROTO_SCSI;
1106 		scsi->valid = CTS_SCSI_VALID_TQ;
1107 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1108 
1109 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1110 		break;
1111 	}
1112 	case XPT_CALC_GEOMETRY:
1113 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1114 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1115 		break;
1116 	case XPT_RESET_DEV:
1117 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1118 		    "XPT_RESET_DEV\n");
1119 		mprsas_action_resetdev(sassc, ccb);
1120 		return;
1121 	case XPT_RESET_BUS:
1122 	case XPT_ABORT:
1123 	case XPT_TERM_IO:
1124 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1125 		    "for abort or reset\n");
1126 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1127 		break;
1128 	case XPT_SCSI_IO:
1129 		mprsas_action_scsiio(sassc, ccb);
1130 		return;
1131 #if __FreeBSD_version >= 900026
1132 	case XPT_SMP_IO:
1133 		mprsas_action_smpio(sassc, ccb);
1134 		return;
1135 #endif
1136 	default:
1137 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1138 		break;
1139 	}
1140 	xpt_done(ccb);
1141 
1142 }
1143 
1144 static void
1145 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1146     target_id_t target_id, lun_id_t lun_id)
1147 {
1148 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1149 	struct cam_path *path;
1150 
1151 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1152 	    ac_code, target_id, (uintmax_t)lun_id);
1153 
1154 	if (xpt_create_path(&path, NULL,
1155 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1156 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1157 		    "notification\n");
1158 		return;
1159 	}
1160 
1161 	xpt_async(ac_code, path, NULL);
1162 	xpt_free_path(path);
1163 }
1164 
1165 static void
1166 mprsas_complete_all_commands(struct mpr_softc *sc)
1167 {
1168 	struct mpr_command *cm;
1169 	int i;
1170 	int completed;
1171 
1172 	MPR_FUNCTRACE(sc);
1173 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1174 
1175 	/* complete all commands with a NULL reply */
1176 	for (i = 1; i < sc->num_reqs; i++) {
1177 		cm = &sc->commands[i];
1178 		cm->cm_reply = NULL;
1179 		completed = 0;
1180 
1181 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1182 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1183 
1184 		if (cm->cm_complete != NULL) {
1185 			mprsas_log_command(cm, MPR_RECOVERY,
1186 			    "completing cm %p state %x ccb %p for diag reset\n",
1187 			    cm, cm->cm_state, cm->cm_ccb);
1188 			cm->cm_complete(sc, cm);
1189 			completed = 1;
1190 		}
1191 
1192 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1193 			mprsas_log_command(cm, MPR_RECOVERY,
1194 			    "waking up cm %p state %x ccb %p for diag reset\n",
1195 			    cm, cm->cm_state, cm->cm_ccb);
1196 			wakeup(cm);
1197 			completed = 1;
1198 		}
1199 
1200 		if (cm->cm_sc->io_cmds_active != 0)
1201 			cm->cm_sc->io_cmds_active--;
1202 
1203 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1204 			/* this should never happen, but if it does, log */
1205 			mprsas_log_command(cm, MPR_RECOVERY,
1206 			    "cm %p state %x flags 0x%x ccb %p during diag "
1207 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1208 			    cm->cm_ccb);
1209 		}
1210 	}
1211 }
1212 
1213 void
1214 mprsas_handle_reinit(struct mpr_softc *sc)
1215 {
1216 	int i;
1217 
1218 	/* Go back into startup mode and freeze the simq, so that CAM
1219 	 * doesn't send any commands until after we've rediscovered all
1220 	 * targets and found the proper device handles for them.
1221 	 *
1222 	 * After the reset, portenable will trigger discovery, and after all
1223 	 * discovery-related activities have finished, the simq will be
1224 	 * released.
1225 	 */
1226 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1227 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1228 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1229 	mprsas_startup_increment(sc->sassc);
1230 
1231 	/* notify CAM of a bus reset */
1232 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1233 	    CAM_LUN_WILDCARD);
1234 
1235 	/* complete and cleanup after all outstanding commands */
1236 	mprsas_complete_all_commands(sc);
1237 
1238 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1239 	    __func__, sc->sassc->startup_refcount);
1240 
1241 	/* zero all the target handles, since they may change after the
1242 	 * reset, and we have to rediscover all the targets and use the new
1243 	 * handles.
1244 	 */
1245 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1246 		if (sc->sassc->targets[i].outstanding != 0)
1247 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1248 			    i, sc->sassc->targets[i].outstanding);
1249 		sc->sassc->targets[i].handle = 0x0;
1250 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1251 		sc->sassc->targets[i].outstanding = 0;
1252 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1253 	}
1254 }
1255 static void
1256 mprsas_tm_timeout(void *data)
1257 {
1258 	struct mpr_command *tm = data;
1259 	struct mpr_softc *sc = tm->cm_sc;
1260 
1261 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1262 
1263 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1264 	    "out\n", tm);
1265 	mpr_reinit(sc);
1266 }
1267 
1268 static void
1269 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1270 {
1271 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1272 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1273 	unsigned int cm_count = 0;
1274 	struct mpr_command *cm;
1275 	struct mprsas_target *targ;
1276 
1277 	callout_stop(&tm->cm_callout);
1278 
1279 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1280 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1281 	targ = tm->cm_targ;
1282 
1283 	/*
1284 	 * Currently there should be no way we can hit this case.  It only
1285 	 * happens when we have a failure to allocate chain frames, and
1286 	 * task management commands don't have S/G lists.
1287 	 */
1288 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1289 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1290 		    "This should not happen!\n", __func__, tm->cm_flags);
1291 		mprsas_free_tm(sc, tm);
1292 		return;
1293 	}
1294 
1295 	if (reply == NULL) {
1296 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1297 		    "%p\n", tm);
1298 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1299 			/* this completion was due to a reset, just cleanup */
1300 			targ->tm = NULL;
1301 			mprsas_free_tm(sc, tm);
1302 		}
1303 		else {
1304 			/* we should have gotten a reply. */
1305 			mpr_reinit(sc);
1306 		}
1307 		return;
1308 	}
1309 
1310 	mprsas_log_command(tm, MPR_RECOVERY,
1311 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1312 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1313 	    le32toh(reply->TerminationCount));
1314 
1315 	/* See if there are any outstanding commands for this LUN.
1316 	 * This could be made more efficient by using a per-LU data
1317 	 * structure of some sort.
1318 	 */
1319 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1320 		if (cm->cm_lun == tm->cm_lun)
1321 			cm_count++;
1322 	}
1323 
1324 	if (cm_count == 0) {
1325 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1326 		    "logical unit %u finished recovery after reset\n",
1327 		    tm->cm_lun, tm);
1328 
1329 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1330 		    tm->cm_lun);
1331 
1332 		/* we've finished recovery for this logical unit.  check and
1333 		 * see if some other logical unit has a timedout command
1334 		 * that needs to be processed.
1335 		 */
1336 		cm = TAILQ_FIRST(&targ->timedout_commands);
1337 		if (cm) {
1338 			mprsas_send_abort(sc, tm, cm);
1339 		}
1340 		else {
1341 			targ->tm = NULL;
1342 			mprsas_free_tm(sc, tm);
1343 		}
1344 	}
1345 	else {
1346 		/* if we still have commands for this LUN, the reset
1347 		 * effectively failed, regardless of the status reported.
1348 		 * Escalate to a target reset.
1349 		 */
1350 		mprsas_log_command(tm, MPR_RECOVERY,
1351 		    "logical unit reset complete for tm %p, but still have %u "
1352 		    "command(s)\n", tm, cm_count);
1353 		mprsas_send_reset(sc, tm,
1354 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1355 	}
1356 }
1357 
1358 static void
1359 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1360 {
1361 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1362 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1363 	struct mprsas_target *targ;
1364 
1365 	callout_stop(&tm->cm_callout);
1366 
1367 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1368 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1369 	targ = tm->cm_targ;
1370 
1371 	/*
1372 	 * Currently there should be no way we can hit this case.  It only
1373 	 * happens when we have a failure to allocate chain frames, and
1374 	 * task management commands don't have S/G lists.
1375 	 */
1376 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1377 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1378 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1379 		mprsas_free_tm(sc, tm);
1380 		return;
1381 	}
1382 
1383 	if (reply == NULL) {
1384 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1385 		    "%p\n", tm);
1386 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1387 			/* this completion was due to a reset, just cleanup */
1388 			targ->tm = NULL;
1389 			mprsas_free_tm(sc, tm);
1390 		}
1391 		else {
1392 			/* we should have gotten a reply. */
1393 			mpr_reinit(sc);
1394 		}
1395 		return;
1396 	}
1397 
1398 	mprsas_log_command(tm, MPR_RECOVERY,
1399 	    "target reset status 0x%x code 0x%x count %u\n",
1400 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1401 	    le32toh(reply->TerminationCount));
1402 
1403 	if (targ->outstanding == 0) {
1404 		/* we've finished recovery for this target and all
1405 		 * of its logical units.
1406 		 */
1407 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1408 		    "recovery finished after target reset\n");
1409 
1410 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1411 		    CAM_LUN_WILDCARD);
1412 
1413 		targ->tm = NULL;
1414 		mprsas_free_tm(sc, tm);
1415 	}
1416 	else {
1417 		/* after a target reset, if this target still has
1418 		 * outstanding commands, the reset effectively failed,
1419 		 * regardless of the status reported.  escalate.
1420 		 */
1421 		mprsas_log_command(tm, MPR_RECOVERY,
1422 		    "target reset complete for tm %p, but still have %u "
1423 		    "command(s)\n", tm, targ->outstanding);
1424 		mpr_reinit(sc);
1425 	}
1426 }
1427 
1428 #define MPR_RESET_TIMEOUT 30
1429 
1430 int
1431 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1432 {
1433 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1434 	struct mprsas_target *target;
1435 	int err;
1436 
1437 	target = tm->cm_targ;
1438 	if (target->handle == 0) {
1439 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1440 		    "%d\n", __func__, target->tid);
1441 		return -1;
1442 	}
1443 
1444 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1445 	req->DevHandle = htole16(target->handle);
1446 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1447 	req->TaskType = type;
1448 
1449 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1450 		/* XXX Need to handle invalid LUNs */
1451 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1452 		tm->cm_targ->logical_unit_resets++;
1453 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1454 		    "sending logical unit reset\n");
1455 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1456 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1457 	}
1458 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1459 		/*
1460 		 * Target reset method =
1461 		 *     SAS Hard Link Reset / SATA Link Reset
1462 		 */
1463 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1464 		tm->cm_targ->target_resets++;
1465 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1466 		    "sending target reset\n");
1467 		tm->cm_complete = mprsas_target_reset_complete;
1468 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1469 	}
1470 	else {
1471 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1472 		return -1;
1473 	}
1474 
1475 	mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1476 	    target->handle);
1477 	if (target->encl_level_valid) {
1478 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1479 		    "connector name (%4s)\n", target->encl_level,
1480 		    target->encl_slot, target->connector_name);
1481 	}
1482 
1483 	tm->cm_data = NULL;
1484 	tm->cm_desc.HighPriority.RequestFlags =
1485 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1486 	tm->cm_complete_data = (void *)tm;
1487 
1488 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1489 	    mprsas_tm_timeout, tm);
1490 
1491 	err = mpr_map_command(sc, tm);
1492 	if (err)
1493 		mprsas_log_command(tm, MPR_RECOVERY,
1494 		    "error %d sending reset type %u\n", err, type);
1495 
1496 	return err;
1497 }
1498 
1499 
1500 static void
1501 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1502 {
1503 	struct mpr_command *cm;
1504 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1505 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1506 	struct mprsas_target *targ;
1507 
1508 	callout_stop(&tm->cm_callout);
1509 
1510 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1511 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1512 	targ = tm->cm_targ;
1513 
1514 	/*
1515 	 * Currently there should be no way we can hit this case.  It only
1516 	 * happens when we have a failure to allocate chain frames, and
1517 	 * task management commands don't have S/G lists.
1518 	 */
1519 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1520 		mprsas_log_command(tm, MPR_RECOVERY,
1521 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1522 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1523 		mprsas_free_tm(sc, tm);
1524 		return;
1525 	}
1526 
1527 	if (reply == NULL) {
1528 		mprsas_log_command(tm, MPR_RECOVERY,
1529 		    "NULL abort reply for tm %p TaskMID %u\n",
1530 		    tm, le16toh(req->TaskMID));
1531 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1532 			/* this completion was due to a reset, just cleanup */
1533 			targ->tm = NULL;
1534 			mprsas_free_tm(sc, tm);
1535 		}
1536 		else {
1537 			/* we should have gotten a reply. */
1538 			mpr_reinit(sc);
1539 		}
1540 		return;
1541 	}
1542 
1543 	mprsas_log_command(tm, MPR_RECOVERY,
1544 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1545 	    le16toh(req->TaskMID),
1546 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1547 	    le32toh(reply->TerminationCount));
1548 
1549 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1550 	if (cm == NULL) {
1551 		/* if there are no more timedout commands, we're done with
1552 		 * error recovery for this target.
1553 		 */
1554 		mprsas_log_command(tm, MPR_RECOVERY,
1555 		    "finished recovery after aborting TaskMID %u\n",
1556 		    le16toh(req->TaskMID));
1557 
1558 		targ->tm = NULL;
1559 		mprsas_free_tm(sc, tm);
1560 	}
1561 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1562 		/* abort success, but we have more timedout commands to abort */
1563 		mprsas_log_command(tm, MPR_RECOVERY,
1564 		    "continuing recovery after aborting TaskMID %u\n",
1565 		    le16toh(req->TaskMID));
1566 
1567 		mprsas_send_abort(sc, tm, cm);
1568 	}
1569 	else {
1570 		/* we didn't get a command completion, so the abort
1571 		 * failed as far as we're concerned.  escalate.
1572 		 */
1573 		mprsas_log_command(tm, MPR_RECOVERY,
1574 		    "abort failed for TaskMID %u tm %p\n",
1575 		    le16toh(req->TaskMID), tm);
1576 
1577 		mprsas_send_reset(sc, tm,
1578 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1579 	}
1580 }
1581 
1582 #define MPR_ABORT_TIMEOUT 5
1583 
1584 static int
1585 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1586     struct mpr_command *cm)
1587 {
1588 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1589 	struct mprsas_target *targ;
1590 	int err;
1591 
1592 	targ = cm->cm_targ;
1593 	if (targ->handle == 0) {
1594 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1595 		    __func__, cm->cm_ccb->ccb_h.target_id);
1596 		return -1;
1597 	}
1598 
1599 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1600 	    "Aborting command %p\n", cm);
1601 
1602 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1603 	req->DevHandle = htole16(targ->handle);
1604 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1605 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1606 
1607 	/* XXX Need to handle invalid LUNs */
1608 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1609 
1610 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1611 
1612 	tm->cm_data = NULL;
1613 	tm->cm_desc.HighPriority.RequestFlags =
1614 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1615 	tm->cm_complete = mprsas_abort_complete;
1616 	tm->cm_complete_data = (void *)tm;
1617 	tm->cm_targ = cm->cm_targ;
1618 	tm->cm_lun = cm->cm_lun;
1619 
1620 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1621 	    mprsas_tm_timeout, tm);
1622 
1623 	targ->aborts++;
1624 
1625 	mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1626 	    __func__, targ->tid);
1627 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1628 
1629 	err = mpr_map_command(sc, tm);
1630 	if (err)
1631 		mpr_dprint(sc, MPR_RECOVERY,
1632 		    "error %d sending abort for cm %p SMID %u\n",
1633 		    err, cm, req->TaskMID);
1634 	return err;
1635 }
1636 
1637 static void
1638 mprsas_scsiio_timeout(void *data)
1639 {
1640 	struct mpr_softc *sc;
1641 	struct mpr_command *cm;
1642 	struct mprsas_target *targ;
1643 
1644 	cm = (struct mpr_command *)data;
1645 	sc = cm->cm_sc;
1646 
1647 	MPR_FUNCTRACE(sc);
1648 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1649 
1650 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1651 
1652 	/*
1653 	 * Run the interrupt handler to make sure it's not pending.  This
1654 	 * isn't perfect because the command could have already completed
1655 	 * and been re-used, though this is unlikely.
1656 	 */
1657 	mpr_intr_locked(sc);
1658 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1659 		mprsas_log_command(cm, MPR_XINFO,
1660 		    "SCSI command %p almost timed out\n", cm);
1661 		return;
1662 	}
1663 
1664 	if (cm->cm_ccb == NULL) {
1665 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1666 		return;
1667 	}
1668 
1669 	targ = cm->cm_targ;
1670 	targ->timeouts++;
1671 
1672 	mprsas_log_command(cm, MPR_ERROR, "command timeout %d cm %p target "
1673 	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
1674 	    targ->handle);
1675 	if (targ->encl_level_valid) {
1676 		mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1677 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1678 		    targ->connector_name);
1679 	}
1680 
1681 	/* XXX first, check the firmware state, to see if it's still
1682 	 * operational.  if not, do a diag reset.
1683 	 */
1684 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1685 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1686 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1687 
1688 	if (targ->tm != NULL) {
1689 		/* target already in recovery, just queue up another
1690 		 * timedout command to be processed later.
1691 		 */
1692 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1693 		    "processing by tm %p\n", cm, targ->tm);
1694 	}
1695 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1696 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1697 		    cm, targ->tm);
1698 
1699 		/* start recovery by aborting the first timedout command */
1700 		mprsas_send_abort(sc, targ->tm, cm);
1701 	}
1702 	else {
1703 		/* XXX queue this target up for recovery once a TM becomes
1704 		 * available.  The firmware only has a limited number of
1705 		 * HighPriority credits for the high priority requests used
1706 		 * for task management, and we ran out.
1707 		 *
1708 		 * Isilon: don't worry about this for now, since we have
1709 		 * more credits than disks in an enclosure, and limit
1710 		 * ourselves to one TM per target for recovery.
1711 		 */
1712 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to "
1713 		    "allocate a tm\n", cm);
1714 	}
1715 }
1716 
1717 /**
1718  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1719  *			     to SCSI Unmap.
1720  * Return 0 - for success,
1721  *	  1 - to immediately return back the command with success status to CAM
1722  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1723  *			   to FW without any translation.
1724  */
1725 static int
1726 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1727     union ccb *ccb, struct mprsas_target *targ)
1728 {
1729 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1730 	struct ccb_scsiio *csio;
1731 	struct unmap_parm_list *plist;
1732 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1733 	struct nvme_command *c;
1734 	int i, res;
1735 	uint16_t ndesc, list_len, data_length;
1736 	struct mpr_prp_page *prp_page_info;
1737 	uint64_t nvme_dsm_ranges_dma_handle;
1738 
1739 	csio = &ccb->csio;
1740 #if __FreeBSD_version >= 1100103
1741 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1742 #else
1743 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1744 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1745 		    ccb->csio.cdb_io.cdb_ptr[8]);
1746 	} else {
1747 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1748 		    ccb->csio.cdb_io.cdb_bytes[8]);
1749 	}
1750 #endif
1751 	if (!list_len) {
1752 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1753 		return -EINVAL;
1754 	}
1755 
1756 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1757 	if (!plist) {
1758 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1759 		    "save UNMAP data\n");
1760 		return -ENOMEM;
1761 	}
1762 
1763 	/* Copy SCSI unmap data to a local buffer */
1764 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1765 
1766 	/* return back the unmap command to CAM with success status,
1767 	 * if number of descripts is zero.
1768 	 */
1769 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1770 	if (!ndesc) {
1771 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1772 		    "UNMAP cmd is Zero\n");
1773 		res = 1;
1774 		goto out;
1775 	}
1776 
1777 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1778 	if (data_length > targ->MDTS) {
1779 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1780 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1781 		res = -EINVAL;
1782 		goto out;
1783 	}
1784 
1785 	prp_page_info = mpr_alloc_prp_page(sc);
1786 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1787 	    "UNMAP command.\n", __func__));
1788 
1789 	/*
1790 	 * Insert the allocated PRP page into the command's PRP page list. This
1791 	 * will be freed when the command is freed.
1792 	 */
1793 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1794 
1795 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1796 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1797 
1798 	bzero(nvme_dsm_ranges, data_length);
1799 
1800 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1801 	 * for each descriptors contained in SCSI UNMAP data.
1802 	 */
1803 	for (i = 0; i < ndesc; i++) {
1804 		nvme_dsm_ranges[i].length =
1805 		    htole32(be32toh(plist->desc[i].nlb));
1806 		nvme_dsm_ranges[i].starting_lba =
1807 		    htole64(be64toh(plist->desc[i].slba));
1808 		nvme_dsm_ranges[i].attributes = 0;
1809 	}
1810 
1811 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1812 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1813 	bzero(req, sizeof(*req));
1814 	req->DevHandle = htole16(targ->handle);
1815 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1816 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1817 	req->ErrorResponseBaseAddress.High =
1818 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1819 	req->ErrorResponseBaseAddress.Low =
1820 	    htole32(cm->cm_sense_busaddr);
1821 	req->ErrorResponseAllocationLength =
1822 	    htole16(sizeof(struct nvme_completion));
1823 	req->EncapsulatedCommandLength =
1824 	    htole16(sizeof(struct nvme_command));
1825 	req->DataLength = htole32(data_length);
1826 
1827 	/* Build NVMe DSM command */
1828 	c = (struct nvme_command *) req->NVMe_Command;
1829 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1830 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1831 	c->cdw10 = htole32(ndesc - 1);
1832 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1833 
1834 	cm->cm_length = data_length;
1835 	cm->cm_data = NULL;
1836 
1837 	cm->cm_complete = mprsas_scsiio_complete;
1838 	cm->cm_complete_data = ccb;
1839 	cm->cm_targ = targ;
1840 	cm->cm_lun = csio->ccb_h.target_lun;
1841 	cm->cm_ccb = ccb;
1842 
1843 	cm->cm_desc.Default.RequestFlags =
1844 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1845 
1846 #if __FreeBSD_version >= 1000029
1847 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1848 	    mprsas_scsiio_timeout, cm, 0);
1849 #else //__FreeBSD_version < 1000029
1850 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1851 	    mprsas_scsiio_timeout, cm);
1852 #endif //__FreeBSD_version >= 1000029
1853 
1854 	targ->issued++;
1855 	targ->outstanding++;
1856 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1857 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1858 
1859 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1860 	    __func__, cm, ccb, targ->outstanding);
1861 
1862 	mpr_build_nvme_prp(sc, cm, req,
1863 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1864 	mpr_map_command(sc, cm);
1865 
1866 out:
1867 	free(plist, M_MPR);
1868 	return 0;
1869 }
1870 
1871 static void
1872 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1873 {
1874 	MPI2_SCSI_IO_REQUEST *req;
1875 	struct ccb_scsiio *csio;
1876 	struct mpr_softc *sc;
1877 	struct mprsas_target *targ;
1878 	struct mprsas_lun *lun;
1879 	struct mpr_command *cm;
1880 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1881 	uint16_t eedp_flags;
1882 	uint32_t mpi_control;
1883 	int rc;
1884 
1885 	sc = sassc->sc;
1886 	MPR_FUNCTRACE(sc);
1887 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1888 
1889 	csio = &ccb->csio;
1890 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1891 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1892 	     csio->ccb_h.target_id));
1893 	targ = &sassc->targets[csio->ccb_h.target_id];
1894 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1895 	if (targ->handle == 0x0) {
1896 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1897 		    __func__, csio->ccb_h.target_id);
1898 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1899 		xpt_done(ccb);
1900 		return;
1901 	}
1902 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1903 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1904 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1905 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1906 		xpt_done(ccb);
1907 		return;
1908 	}
1909 	/*
1910 	 * Sometimes, it is possible to get a command that is not "In
1911 	 * Progress" and was actually aborted by the upper layer.  Check for
1912 	 * this here and complete the command without error.
1913 	 */
1914 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1915 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1916 		    "target %u\n", __func__, csio->ccb_h.target_id);
1917 		xpt_done(ccb);
1918 		return;
1919 	}
1920 	/*
1921 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1922 	 * that the volume has timed out.  We want volumes to be enumerated
1923 	 * until they are deleted/removed, not just failed.
1924 	 */
1925 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1926 		if (targ->devinfo == 0)
1927 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1928 		else
1929 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1930 		xpt_done(ccb);
1931 		return;
1932 	}
1933 
1934 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1935 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1936 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1937 		xpt_done(ccb);
1938 		return;
1939 	}
1940 
1941 	/*
1942 	 * If target has a reset in progress, freeze the devq and return.  The
1943 	 * devq will be released when the TM reset is finished.
1944 	 */
1945 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1946 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1947 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1948 		    __func__, targ->tid);
1949 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1950 		xpt_done(ccb);
1951 		return;
1952 	}
1953 
1954 	cm = mpr_alloc_command(sc);
1955 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1956 		if (cm != NULL) {
1957 			mpr_free_command(sc, cm);
1958 		}
1959 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1960 			xpt_freeze_simq(sassc->sim, 1);
1961 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1962 		}
1963 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1964 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1965 		xpt_done(ccb);
1966 		return;
1967 	}
1968 
1969 	/* For NVME device's issue UNMAP command directly to NVME drives by
1970 	 * constructing equivalent native NVMe DataSetManagement command.
1971 	 */
1972 #if __FreeBSD_version >= 1100103
1973 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1974 #else
1975 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1976 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
1977 	else
1978 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
1979 #endif
1980 	if (scsi_opcode == UNMAP &&
1981 	    targ->is_nvme &&
1982 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1983 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1984 		if (rc == 1) { /* return command to CAM with success status */
1985 			mpr_free_command(sc, cm);
1986 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1987 			xpt_done(ccb);
1988 			return;
1989 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1990 			return;
1991 	}
1992 
1993 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1994 	bzero(req, sizeof(*req));
1995 	req->DevHandle = htole16(targ->handle);
1996 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1997 	req->MsgFlags = 0;
1998 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1999 	req->SenseBufferLength = MPR_SENSE_LEN;
2000 	req->SGLFlags = 0;
2001 	req->ChainOffset = 0;
2002 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2003 	req->SGLOffset1= 0;
2004 	req->SGLOffset2= 0;
2005 	req->SGLOffset3= 0;
2006 	req->SkipCount = 0;
2007 	req->DataLength = htole32(csio->dxfer_len);
2008 	req->BidirectionalDataLength = 0;
2009 	req->IoFlags = htole16(csio->cdb_len);
2010 	req->EEDPFlags = 0;
2011 
2012 	/* Note: BiDirectional transfers are not supported */
2013 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2014 	case CAM_DIR_IN:
2015 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2016 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2017 		break;
2018 	case CAM_DIR_OUT:
2019 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2020 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2021 		break;
2022 	case CAM_DIR_NONE:
2023 	default:
2024 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2025 		break;
2026 	}
2027 
2028 	if (csio->cdb_len == 32)
2029 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2030 	/*
2031 	 * It looks like the hardware doesn't require an explicit tag
2032 	 * number for each transaction.  SAM Task Management not supported
2033 	 * at the moment.
2034 	 */
2035 	switch (csio->tag_action) {
2036 	case MSG_HEAD_OF_Q_TAG:
2037 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2038 		break;
2039 	case MSG_ORDERED_Q_TAG:
2040 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2041 		break;
2042 	case MSG_ACA_TASK:
2043 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2044 		break;
2045 	case CAM_TAG_ACTION_NONE:
2046 	case MSG_SIMPLE_Q_TAG:
2047 	default:
2048 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2049 		break;
2050 	}
2051 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2052 	req->Control = htole32(mpi_control);
2053 
2054 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2055 		mpr_free_command(sc, cm);
2056 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2057 		xpt_done(ccb);
2058 		return;
2059 	}
2060 
2061 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2062 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2063 	else {
2064 		KASSERT(csio->cdb_len <= IOCDBLEN,
2065 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2066 		    "is not set", csio->cdb_len));
2067 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2068 	}
2069 	req->IoFlags = htole16(csio->cdb_len);
2070 
2071 	/*
2072 	 * Check if EEDP is supported and enabled.  If it is then check if the
2073 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2074 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2075 	 * for EEDP transfer.
2076 	 */
2077 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2078 	if (sc->eedp_enabled && eedp_flags) {
2079 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2080 			if (lun->lun_id == csio->ccb_h.target_lun) {
2081 				break;
2082 			}
2083 		}
2084 
2085 		if ((lun != NULL) && (lun->eedp_formatted)) {
2086 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2087 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2088 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2089 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2090 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2091 				eedp_flags |=
2092 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2093 			}
2094 			req->EEDPFlags = htole16(eedp_flags);
2095 
2096 			/*
2097 			 * If CDB less than 32, fill in Primary Ref Tag with
2098 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2099 			 * already there.  Also, set protection bit.  FreeBSD
2100 			 * currently does not support CDBs bigger than 16, but
2101 			 * the code doesn't hurt, and will be here for the
2102 			 * future.
2103 			 */
2104 			if (csio->cdb_len != 32) {
2105 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2106 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2107 				    PrimaryReferenceTag;
2108 				for (i = 0; i < 4; i++) {
2109 					*ref_tag_addr =
2110 					    req->CDB.CDB32[lba_byte + i];
2111 					ref_tag_addr++;
2112 				}
2113 				req->CDB.EEDP32.PrimaryReferenceTag =
2114 				    htole32(req->
2115 				    CDB.EEDP32.PrimaryReferenceTag);
2116 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2117 				    0xFFFF;
2118 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2119 				    0x20;
2120 			} else {
2121 				eedp_flags |=
2122 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2123 				req->EEDPFlags = htole16(eedp_flags);
2124 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2125 				    0x1F) | 0x20;
2126 			}
2127 		}
2128 	}
2129 
2130 	cm->cm_length = csio->dxfer_len;
2131 	if (cm->cm_length != 0) {
2132 		cm->cm_data = ccb;
2133 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2134 	} else {
2135 		cm->cm_data = NULL;
2136 	}
2137 	cm->cm_sge = &req->SGL;
2138 	cm->cm_sglsize = (32 - 24) * 4;
2139 	cm->cm_complete = mprsas_scsiio_complete;
2140 	cm->cm_complete_data = ccb;
2141 	cm->cm_targ = targ;
2142 	cm->cm_lun = csio->ccb_h.target_lun;
2143 	cm->cm_ccb = ccb;
2144 	/*
2145 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2146 	 * and set descriptor type.
2147 	 */
2148 	if (targ->scsi_req_desc_type ==
2149 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2150 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2151 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2152 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2153 		if (!sc->atomic_desc_capable) {
2154 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2155 			    htole16(targ->handle);
2156 		}
2157 	} else {
2158 		cm->cm_desc.SCSIIO.RequestFlags =
2159 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2160 		if (!sc->atomic_desc_capable)
2161 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2162 	}
2163 
2164 #if __FreeBSD_version >= 1000029
2165 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2166 	    mprsas_scsiio_timeout, cm, 0);
2167 #else //__FreeBSD_version < 1000029
2168 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2169 	    mprsas_scsiio_timeout, cm);
2170 #endif //__FreeBSD_version >= 1000029
2171 
2172 	targ->issued++;
2173 	targ->outstanding++;
2174 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2175 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2176 
2177 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2178 	    __func__, cm, ccb, targ->outstanding);
2179 
2180 	mpr_map_command(sc, cm);
2181 	return;
2182 }
2183 
2184 static void
2185 mpr_response_code(struct mpr_softc *sc, u8 response_code)
2186 {
2187         char *desc;
2188 
2189         switch (response_code) {
2190         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2191                 desc = "task management request completed";
2192                 break;
2193         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2194                 desc = "invalid frame";
2195                 break;
2196         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2197                 desc = "task management request not supported";
2198                 break;
2199         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2200                 desc = "task management request failed";
2201                 break;
2202         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2203                 desc = "task management request succeeded";
2204                 break;
2205         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2206                 desc = "invalid lun";
2207                 break;
2208         case 0xA:
2209                 desc = "overlapped tag attempted";
2210                 break;
2211         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2212                 desc = "task queued, however not sent to target";
2213                 break;
2214         default:
2215                 desc = "unknown";
2216                 break;
2217         }
2218 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
2219 	    desc);
2220 }
2221 
2222 /**
2223  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2224  */
2225 static void
2226 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2227     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2228 {
2229 	u32 response_info;
2230 	u8 *response_bytes;
2231 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2232 	    MPI2_IOCSTATUS_MASK;
2233 	u8 scsi_state = mpi_reply->SCSIState;
2234 	u8 scsi_status = mpi_reply->SCSIStatus;
2235 	char *desc_ioc_state = NULL;
2236 	char *desc_scsi_status = NULL;
2237 	char *desc_scsi_state = sc->tmp_string;
2238 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2239 
2240 	if (log_info == 0x31170000)
2241 		return;
2242 
2243 	switch (ioc_status) {
2244 	case MPI2_IOCSTATUS_SUCCESS:
2245 		desc_ioc_state = "success";
2246 		break;
2247 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2248 		desc_ioc_state = "invalid function";
2249 		break;
2250 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2251 		desc_ioc_state = "scsi recovered error";
2252 		break;
2253 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2254 		desc_ioc_state = "scsi invalid dev handle";
2255 		break;
2256 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2257 		desc_ioc_state = "scsi device not there";
2258 		break;
2259 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2260 		desc_ioc_state = "scsi data overrun";
2261 		break;
2262 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2263 		desc_ioc_state = "scsi data underrun";
2264 		break;
2265 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2266 		desc_ioc_state = "scsi io data error";
2267 		break;
2268 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2269 		desc_ioc_state = "scsi protocol error";
2270 		break;
2271 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2272 		desc_ioc_state = "scsi task terminated";
2273 		break;
2274 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2275 		desc_ioc_state = "scsi residual mismatch";
2276 		break;
2277 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2278 		desc_ioc_state = "scsi task mgmt failed";
2279 		break;
2280 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2281 		desc_ioc_state = "scsi ioc terminated";
2282 		break;
2283 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2284 		desc_ioc_state = "scsi ext terminated";
2285 		break;
2286 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2287 		desc_ioc_state = "eedp guard error";
2288 		break;
2289 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2290 		desc_ioc_state = "eedp ref tag error";
2291 		break;
2292 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2293 		desc_ioc_state = "eedp app tag error";
2294 		break;
2295 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
2296 		desc_ioc_state = "insufficient power";
2297 		break;
2298 	default:
2299 		desc_ioc_state = "unknown";
2300 		break;
2301 	}
2302 
2303 	switch (scsi_status) {
2304 	case MPI2_SCSI_STATUS_GOOD:
2305 		desc_scsi_status = "good";
2306 		break;
2307 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2308 		desc_scsi_status = "check condition";
2309 		break;
2310 	case MPI2_SCSI_STATUS_CONDITION_MET:
2311 		desc_scsi_status = "condition met";
2312 		break;
2313 	case MPI2_SCSI_STATUS_BUSY:
2314 		desc_scsi_status = "busy";
2315 		break;
2316 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2317 		desc_scsi_status = "intermediate";
2318 		break;
2319 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2320 		desc_scsi_status = "intermediate condmet";
2321 		break;
2322 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2323 		desc_scsi_status = "reservation conflict";
2324 		break;
2325 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2326 		desc_scsi_status = "command terminated";
2327 		break;
2328 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2329 		desc_scsi_status = "task set full";
2330 		break;
2331 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2332 		desc_scsi_status = "aca active";
2333 		break;
2334 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2335 		desc_scsi_status = "task aborted";
2336 		break;
2337 	default:
2338 		desc_scsi_status = "unknown";
2339 		break;
2340 	}
2341 
2342 	desc_scsi_state[0] = '\0';
2343 	if (!scsi_state)
2344 		desc_scsi_state = " ";
2345 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2346 		strcat(desc_scsi_state, "response info ");
2347 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2348 		strcat(desc_scsi_state, "state terminated ");
2349 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2350 		strcat(desc_scsi_state, "no status ");
2351 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2352 		strcat(desc_scsi_state, "autosense failed ");
2353 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2354 		strcat(desc_scsi_state, "autosense valid ");
2355 
2356 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2357 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2358 	if (targ->encl_level_valid) {
2359 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2360 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2361 		    targ->connector_name);
2362 	}
2363 	/* We can add more detail about underflow data here
2364 	 * TO-DO
2365 	 * */
2366 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2367 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2368 	    desc_scsi_state, scsi_state);
2369 
2370 	if (sc->mpr_debug & MPR_XINFO &&
2371 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2372 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2373 		scsi_sense_print(csio);
2374 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2375 	}
2376 
2377 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2378 		response_info = le32toh(mpi_reply->ResponseInfo);
2379 		response_bytes = (u8 *)&response_info;
2380 		mpr_response_code(sc,response_bytes[0]);
2381 	}
2382 }
2383 
2384 /** mprsas_nvme_trans_status_code
2385  *
2386  * Convert Native NVMe command error status to
2387  * equivalent SCSI error status.
2388  *
2389  * Returns appropriate scsi_status
2390  */
2391 static u8
2392 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2393     struct mpr_command *cm)
2394 {
2395 	u8 status = MPI2_SCSI_STATUS_GOOD;
2396 	int skey, asc, ascq;
2397 	union ccb *ccb = cm->cm_complete_data;
2398 	int returned_sense_len;
2399 
2400 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2401 	skey = SSD_KEY_ILLEGAL_REQUEST;
2402 	asc = SCSI_ASC_NO_SENSE;
2403 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2404 
2405 	switch (nvme_status.sct) {
2406 	case NVME_SCT_GENERIC:
2407 		switch (nvme_status.sc) {
2408 		case NVME_SC_SUCCESS:
2409 			status = MPI2_SCSI_STATUS_GOOD;
2410 			skey = SSD_KEY_NO_SENSE;
2411 			asc = SCSI_ASC_NO_SENSE;
2412 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2413 			break;
2414 		case NVME_SC_INVALID_OPCODE:
2415 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2416 			skey = SSD_KEY_ILLEGAL_REQUEST;
2417 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2418 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2419 			break;
2420 		case NVME_SC_INVALID_FIELD:
2421 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2422 			skey = SSD_KEY_ILLEGAL_REQUEST;
2423 			asc = SCSI_ASC_INVALID_CDB;
2424 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2425 			break;
2426 		case NVME_SC_DATA_TRANSFER_ERROR:
2427 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2428 			skey = SSD_KEY_MEDIUM_ERROR;
2429 			asc = SCSI_ASC_NO_SENSE;
2430 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2431 			break;
2432 		case NVME_SC_ABORTED_POWER_LOSS:
2433 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2434 			skey = SSD_KEY_ABORTED_COMMAND;
2435 			asc = SCSI_ASC_WARNING;
2436 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2437 			break;
2438 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2439 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2440 			skey = SSD_KEY_HARDWARE_ERROR;
2441 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2442 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2443 			break;
2444 		case NVME_SC_ABORTED_BY_REQUEST:
2445 		case NVME_SC_ABORTED_SQ_DELETION:
2446 		case NVME_SC_ABORTED_FAILED_FUSED:
2447 		case NVME_SC_ABORTED_MISSING_FUSED:
2448 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2449 			skey = SSD_KEY_ABORTED_COMMAND;
2450 			asc = SCSI_ASC_NO_SENSE;
2451 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2452 			break;
2453 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2454 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2455 			skey = SSD_KEY_ILLEGAL_REQUEST;
2456 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2457 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2458 			break;
2459 		case NVME_SC_LBA_OUT_OF_RANGE:
2460 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2461 			skey = SSD_KEY_ILLEGAL_REQUEST;
2462 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2463 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2464 			break;
2465 		case NVME_SC_CAPACITY_EXCEEDED:
2466 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2467 			skey = SSD_KEY_MEDIUM_ERROR;
2468 			asc = SCSI_ASC_NO_SENSE;
2469 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2470 			break;
2471 		case NVME_SC_NAMESPACE_NOT_READY:
2472 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2473 			skey = SSD_KEY_NOT_READY;
2474 			asc = SCSI_ASC_LUN_NOT_READY;
2475 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2476 			break;
2477 		}
2478 		break;
2479 	case NVME_SCT_COMMAND_SPECIFIC:
2480 		switch (nvme_status.sc) {
2481 		case NVME_SC_INVALID_FORMAT:
2482 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2483 			skey = SSD_KEY_ILLEGAL_REQUEST;
2484 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2485 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2486 			break;
2487 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2488 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2489 			skey = SSD_KEY_ILLEGAL_REQUEST;
2490 			asc = SCSI_ASC_INVALID_CDB;
2491 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2492 			break;
2493 		}
2494 		break;
2495 	case NVME_SCT_MEDIA_ERROR:
2496 		switch (nvme_status.sc) {
2497 		case NVME_SC_WRITE_FAULTS:
2498 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2499 			skey = SSD_KEY_MEDIUM_ERROR;
2500 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2501 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2502 			break;
2503 		case NVME_SC_UNRECOVERED_READ_ERROR:
2504 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2505 			skey = SSD_KEY_MEDIUM_ERROR;
2506 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2507 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2508 			break;
2509 		case NVME_SC_GUARD_CHECK_ERROR:
2510 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2511 			skey = SSD_KEY_MEDIUM_ERROR;
2512 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2513 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2514 			break;
2515 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2516 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2517 			skey = SSD_KEY_MEDIUM_ERROR;
2518 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2519 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2520 			break;
2521 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2522 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2523 			skey = SSD_KEY_MEDIUM_ERROR;
2524 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2525 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2526 			break;
2527 		case NVME_SC_COMPARE_FAILURE:
2528 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2529 			skey = SSD_KEY_MISCOMPARE;
2530 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2531 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2532 			break;
2533 		case NVME_SC_ACCESS_DENIED:
2534 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2535 			skey = SSD_KEY_ILLEGAL_REQUEST;
2536 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2537 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2538 			break;
2539 		}
2540 		break;
2541 	}
2542 
2543 	returned_sense_len = sizeof(struct scsi_sense_data);
2544 	if (returned_sense_len < ccb->csio.sense_len)
2545 		ccb->csio.sense_resid = ccb->csio.sense_len -
2546 		    returned_sense_len;
2547 	else
2548 		ccb->csio.sense_resid = 0;
2549 
2550 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2551 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2552 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2553 
2554 	return status;
2555 }
2556 
2557 /** mprsas_complete_nvme_unmap
2558  *
2559  * Complete native NVMe command issued using NVMe Encapsulated
2560  * Request Message.
2561  */
2562 static u8
2563 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2564 {
2565 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2566 	struct nvme_completion *nvme_completion = NULL;
2567 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2568 
2569 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2570 	if (le16toh(mpi_reply->ErrorResponseCount)){
2571 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2572 		scsi_status = mprsas_nvme_trans_status_code(
2573 		    nvme_completion->status, cm);
2574 	}
2575 	return scsi_status;
2576 }
2577 
2578 static void
2579 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2580 {
2581 	MPI2_SCSI_IO_REPLY *rep;
2582 	union ccb *ccb;
2583 	struct ccb_scsiio *csio;
2584 	struct mprsas_softc *sassc;
2585 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2586 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2587 	int dir = 0, i;
2588 	u16 alloc_len;
2589 	struct mprsas_target *target;
2590 	target_id_t target_id;
2591 
2592 	MPR_FUNCTRACE(sc);
2593 	mpr_dprint(sc, MPR_TRACE,
2594 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2595 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2596 	    cm->cm_targ->outstanding);
2597 
2598 	callout_stop(&cm->cm_callout);
2599 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2600 
2601 	sassc = sc->sassc;
2602 	ccb = cm->cm_complete_data;
2603 	csio = &ccb->csio;
2604 	target_id = csio->ccb_h.target_id;
2605 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2606 	/*
2607 	 * XXX KDM if the chain allocation fails, does it matter if we do
2608 	 * the sync and unload here?  It is simpler to do it in every case,
2609 	 * assuming it doesn't cause problems.
2610 	 */
2611 	if (cm->cm_data != NULL) {
2612 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2613 			dir = BUS_DMASYNC_POSTREAD;
2614 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2615 			dir = BUS_DMASYNC_POSTWRITE;
2616 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2617 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2618 	}
2619 
2620 	cm->cm_targ->completed++;
2621 	cm->cm_targ->outstanding--;
2622 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2623 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2624 
2625 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2626 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2627 		if (cm->cm_reply != NULL)
2628 			mprsas_log_command(cm, MPR_RECOVERY,
2629 			    "completed timedout cm %p ccb %p during recovery "
2630 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2631 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2632 			    rep->SCSIState, le32toh(rep->TransferCount));
2633 		else
2634 			mprsas_log_command(cm, MPR_RECOVERY,
2635 			    "completed timedout cm %p ccb %p during recovery\n",
2636 			    cm, cm->cm_ccb);
2637 	} else if (cm->cm_targ->tm != NULL) {
2638 		if (cm->cm_reply != NULL)
2639 			mprsas_log_command(cm, MPR_RECOVERY,
2640 			    "completed cm %p ccb %p during recovery "
2641 			    "ioc %x scsi %x state %x xfer %u\n",
2642 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2643 			    rep->SCSIStatus, rep->SCSIState,
2644 			    le32toh(rep->TransferCount));
2645 		else
2646 			mprsas_log_command(cm, MPR_RECOVERY,
2647 			    "completed cm %p ccb %p during recovery\n",
2648 			    cm, cm->cm_ccb);
2649 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2650 		mprsas_log_command(cm, MPR_RECOVERY,
2651 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2652 	}
2653 
2654 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2655 		/*
2656 		 * We ran into an error after we tried to map the command,
2657 		 * so we're getting a callback without queueing the command
2658 		 * to the hardware.  So we set the status here, and it will
2659 		 * be retained below.  We'll go through the "fast path",
2660 		 * because there can be no reply when we haven't actually
2661 		 * gone out to the hardware.
2662 		 */
2663 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2664 
2665 		/*
2666 		 * Currently the only error included in the mask is
2667 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2668 		 * chain frames.  We need to freeze the queue until we get
2669 		 * a command that completed without this error, which will
2670 		 * hopefully have some chain frames attached that we can
2671 		 * use.  If we wanted to get smarter about it, we would
2672 		 * only unfreeze the queue in this condition when we're
2673 		 * sure that we're getting some chain frames back.  That's
2674 		 * probably unnecessary.
2675 		 */
2676 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2677 			xpt_freeze_simq(sassc->sim, 1);
2678 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2679 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2680 			    "freezing SIM queue\n");
2681 		}
2682 	}
2683 
2684 	/*
2685 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2686 	 * flag, and use it in a few places in the rest of this function for
2687 	 * convenience. Use the macro if available.
2688 	 */
2689 #if __FreeBSD_version >= 1100103
2690 	scsi_cdb = scsiio_cdb_ptr(csio);
2691 #else
2692 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2693 		scsi_cdb = csio->cdb_io.cdb_ptr;
2694 	else
2695 		scsi_cdb = csio->cdb_io.cdb_bytes;
2696 #endif
2697 
2698 	/*
2699 	 * If this is a Start Stop Unit command and it was issued by the driver
2700 	 * during shutdown, decrement the refcount to account for all of the
2701 	 * commands that were sent.  All SSU commands should be completed before
2702 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2703 	 * is TRUE.
2704 	 */
2705 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2706 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2707 		sc->SSU_refcount--;
2708 	}
2709 
2710 	/* Take the fast path to completion */
2711 	if (cm->cm_reply == NULL) {
2712 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2713 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2714 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2715 			else {
2716 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2717 				csio->scsi_status = SCSI_STATUS_OK;
2718 			}
2719 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2720 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2721 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2722 				mpr_dprint(sc, MPR_XINFO,
2723 				    "Unfreezing SIM queue\n");
2724 			}
2725 		}
2726 
2727 		/*
2728 		 * There are two scenarios where the status won't be
2729 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2730 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2731 		 */
2732 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2733 			/*
2734 			 * Freeze the dev queue so that commands are
2735 			 * executed in the correct order after error
2736 			 * recovery.
2737 			 */
2738 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2739 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2740 		}
2741 		mpr_free_command(sc, cm);
2742 		xpt_done(ccb);
2743 		return;
2744 	}
2745 
2746 	target = &sassc->targets[target_id];
2747 	if (scsi_cdb[0] == UNMAP &&
2748 	    target->is_nvme &&
2749 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2750 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2751 		csio->scsi_status = rep->SCSIStatus;
2752 	}
2753 
2754 	mprsas_log_command(cm, MPR_XINFO,
2755 	    "ioc %x scsi %x state %x xfer %u\n",
2756 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2757 	    le32toh(rep->TransferCount));
2758 
2759 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2760 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2761 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2762 		/* FALLTHROUGH */
2763 	case MPI2_IOCSTATUS_SUCCESS:
2764 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2765 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2766 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2767 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2768 
2769 		/* Completion failed at the transport level. */
2770 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2771 		    MPI2_SCSI_STATE_TERMINATED)) {
2772 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2773 			break;
2774 		}
2775 
2776 		/* In a modern packetized environment, an autosense failure
2777 		 * implies that there's not much else that can be done to
2778 		 * recover the command.
2779 		 */
2780 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2781 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2782 			break;
2783 		}
2784 
2785 		/*
2786 		 * CAM doesn't care about SAS Response Info data, but if this is
2787 		 * the state check if TLR should be done.  If not, clear the
2788 		 * TLR_bits for the target.
2789 		 */
2790 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2791 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2792 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2793 			sc->mapping_table[target_id].TLR_bits =
2794 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2795 		}
2796 
2797 		/*
2798 		 * Intentionally override the normal SCSI status reporting
2799 		 * for these two cases.  These are likely to happen in a
2800 		 * multi-initiator environment, and we want to make sure that
2801 		 * CAM retries these commands rather than fail them.
2802 		 */
2803 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2804 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2805 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2806 			break;
2807 		}
2808 
2809 		/* Handle normal status and sense */
2810 		csio->scsi_status = rep->SCSIStatus;
2811 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2812 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2813 		else
2814 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2815 
2816 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2817 			int sense_len, returned_sense_len;
2818 
2819 			returned_sense_len = min(le32toh(rep->SenseCount),
2820 			    sizeof(struct scsi_sense_data));
2821 			if (returned_sense_len < csio->sense_len)
2822 				csio->sense_resid = csio->sense_len -
2823 				    returned_sense_len;
2824 			else
2825 				csio->sense_resid = 0;
2826 
2827 			sense_len = min(returned_sense_len,
2828 			    csio->sense_len - csio->sense_resid);
2829 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2830 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2831 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2832 		}
2833 
2834 		/*
2835 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2836 		 * and it's page code 0 (Supported Page List), and there is
2837 		 * inquiry data, and this is for a sequential access device, and
2838 		 * the device is an SSP target, and TLR is supported by the
2839 		 * controller, turn the TLR_bits value ON if page 0x90 is
2840 		 * supported.
2841 		 */
2842 		if ((scsi_cdb[0] == INQUIRY) &&
2843 		    (scsi_cdb[1] & SI_EVPD) &&
2844 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2845 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2846 		    (csio->data_ptr != NULL) &&
2847 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2848 		    (sc->control_TLR) &&
2849 		    (sc->mapping_table[target_id].device_info &
2850 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2851 			vpd_list = (struct scsi_vpd_supported_page_list *)
2852 			    csio->data_ptr;
2853 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2854 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2855 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2856 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2857 			alloc_len -= csio->resid;
2858 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2859 				if (vpd_list->list[i] == 0x90) {
2860 					*TLR_bits = TLR_on;
2861 					break;
2862 				}
2863 			}
2864 		}
2865 
2866 		/*
2867 		 * If this is a SATA direct-access end device, mark it so that
2868 		 * a SCSI StartStopUnit command will be sent to it when the
2869 		 * driver is being shutdown.
2870 		 */
2871 		if ((scsi_cdb[0] == INQUIRY) &&
2872 		    (csio->data_ptr != NULL) &&
2873 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2874 		    (sc->mapping_table[target_id].device_info &
2875 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2876 		    ((sc->mapping_table[target_id].device_info &
2877 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2878 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2879 			target = &sassc->targets[target_id];
2880 			target->supports_SSU = TRUE;
2881 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2882 			    target_id);
2883 		}
2884 		break;
2885 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2886 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2887 		/*
2888 		 * If devinfo is 0 this will be a volume.  In that case don't
2889 		 * tell CAM that the volume is not there.  We want volumes to
2890 		 * be enumerated until they are deleted/removed, not just
2891 		 * failed.
2892 		 */
2893 		if (cm->cm_targ->devinfo == 0)
2894 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2895 		else
2896 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2897 		break;
2898 	case MPI2_IOCSTATUS_INVALID_SGL:
2899 		mpr_print_scsiio_cmd(sc, cm);
2900 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2901 		break;
2902 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2903 		/*
2904 		 * This is one of the responses that comes back when an I/O
2905 		 * has been aborted.  If it is because of a timeout that we
2906 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2907 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2908 		 * command is the same (it gets retried, subject to the
2909 		 * retry counter), the only difference is what gets printed
2910 		 * on the console.
2911 		 */
2912 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2913 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2914 		else
2915 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2916 		break;
2917 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2918 		/* resid is ignored for this condition */
2919 		csio->resid = 0;
2920 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2921 		break;
2922 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2923 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2924 		/*
2925 		 * These can sometimes be transient transport-related
2926 		 * errors, and sometimes persistent drive-related errors.
2927 		 * We used to retry these without decrementing the retry
2928 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2929 		 * we hit a persistent drive problem that returns one of
2930 		 * these error codes, we would retry indefinitely.  So,
2931 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2932 		 * count and avoid infinite retries.  We're taking the
2933 		 * potential risk of flagging false failures in the event
2934 		 * of a topology-related error (e.g. a SAS expander problem
2935 		 * causes a command addressed to a drive to fail), but
2936 		 * avoiding getting into an infinite retry loop.
2937 		 */
2938 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2939 		mprsas_log_command(cm, MPR_INFO,
2940 		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2941 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2942 		    rep->SCSIStatus, rep->SCSIState,
2943 		    le32toh(rep->TransferCount));
2944 		break;
2945 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2946 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2947 	case MPI2_IOCSTATUS_INVALID_VPID:
2948 	case MPI2_IOCSTATUS_INVALID_FIELD:
2949 	case MPI2_IOCSTATUS_INVALID_STATE:
2950 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2951 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2952 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2953 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2954 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2955 	default:
2956 		mprsas_log_command(cm, MPR_XINFO,
2957 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2958 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2959 		    rep->SCSIStatus, rep->SCSIState,
2960 		    le32toh(rep->TransferCount));
2961 		csio->resid = cm->cm_length;
2962 
2963 		if (scsi_cdb[0] == UNMAP &&
2964 		    target->is_nvme &&
2965 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2966 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2967 		else
2968 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2969 
2970 		break;
2971 	}
2972 
2973 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2974 
2975 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2976 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2977 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2978 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2979 		    "queue\n");
2980 	}
2981 
2982 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2983 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2984 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2985 	}
2986 
2987 	mpr_free_command(sc, cm);
2988 	xpt_done(ccb);
2989 }
2990 
2991 #if __FreeBSD_version >= 900026
2992 static void
2993 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2994 {
2995 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2996 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2997 	uint64_t sasaddr;
2998 	union ccb *ccb;
2999 
3000 	ccb = cm->cm_complete_data;
3001 
3002 	/*
3003 	 * Currently there should be no way we can hit this case.  It only
3004 	 * happens when we have a failure to allocate chain frames, and SMP
3005 	 * commands require two S/G elements only.  That should be handled
3006 	 * in the standard request size.
3007 	 */
3008 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3009 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
3010 		    "request!\n", __func__, cm->cm_flags);
3011 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3012 		goto bailout;
3013         }
3014 
3015 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
3016 	if (rpl == NULL) {
3017 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
3018 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3019 		goto bailout;
3020 	}
3021 
3022 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3023 	sasaddr = le32toh(req->SASAddress.Low);
3024 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
3025 
3026 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3027 	    MPI2_IOCSTATUS_SUCCESS ||
3028 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
3029 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
3030 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
3031 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3032 		goto bailout;
3033 	}
3034 
3035 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
3036 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
3037 
3038 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
3039 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3040 	else
3041 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
3042 
3043 bailout:
3044 	/*
3045 	 * We sync in both directions because we had DMAs in the S/G list
3046 	 * in both directions.
3047 	 */
3048 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3049 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3050 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3051 	mpr_free_command(sc, cm);
3052 	xpt_done(ccb);
3053 }
3054 
3055 static void
3056 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
3057 {
3058 	struct mpr_command *cm;
3059 	uint8_t *request, *response;
3060 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
3061 	struct mpr_softc *sc;
3062 	struct sglist *sg;
3063 	int error;
3064 
3065 	sc = sassc->sc;
3066 	sg = NULL;
3067 	error = 0;
3068 
3069 #if (__FreeBSD_version >= 1000028) || \
3070     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
3071 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
3072 	case CAM_DATA_PADDR:
3073 	case CAM_DATA_SG_PADDR:
3074 		/*
3075 		 * XXX We don't yet support physical addresses here.
3076 		 */
3077 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3078 		    "supported\n", __func__);
3079 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3080 		xpt_done(ccb);
3081 		return;
3082 	case CAM_DATA_SG:
3083 		/*
3084 		 * The chip does not support more than one buffer for the
3085 		 * request or response.
3086 		 */
3087 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
3088 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3089 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3090 			    "response buffer segments not supported for SMP\n",
3091 			    __func__);
3092 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3093 			xpt_done(ccb);
3094 			return;
3095 		}
3096 
3097 		/*
3098 		 * The CAM_SCATTER_VALID flag was originally implemented
3099 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3100 		 * We have two.  So, just take that flag to mean that we
3101 		 * might have S/G lists, and look at the S/G segment count
3102 		 * to figure out whether that is the case for each individual
3103 		 * buffer.
3104 		 */
3105 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3106 			bus_dma_segment_t *req_sg;
3107 
3108 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3109 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3110 		} else
3111 			request = ccb->smpio.smp_request;
3112 
3113 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3114 			bus_dma_segment_t *rsp_sg;
3115 
3116 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3117 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3118 		} else
3119 			response = ccb->smpio.smp_response;
3120 		break;
3121 	case CAM_DATA_VADDR:
3122 		request = ccb->smpio.smp_request;
3123 		response = ccb->smpio.smp_response;
3124 		break;
3125 	default:
3126 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3127 		xpt_done(ccb);
3128 		return;
3129 	}
3130 #else /* __FreeBSD_version < 1000028 */
3131 	/*
3132 	 * XXX We don't yet support physical addresses here.
3133 	 */
3134 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3135 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3136 		    "supported\n", __func__);
3137 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3138 		xpt_done(ccb);
3139 		return;
3140 	}
3141 
3142 	/*
3143 	 * If the user wants to send an S/G list, check to make sure they
3144 	 * have single buffers.
3145 	 */
3146 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3147 		/*
3148 		 * The chip does not support more than one buffer for the
3149 		 * request or response.
3150 		 */
3151 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3152 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3153 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3154 			    "response buffer segments not supported for SMP\n",
3155 			    __func__);
3156 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3157 			xpt_done(ccb);
3158 			return;
3159 		}
3160 
3161 		/*
3162 		 * The CAM_SCATTER_VALID flag was originally implemented
3163 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3164 		 * We have two.  So, just take that flag to mean that we
3165 		 * might have S/G lists, and look at the S/G segment count
3166 		 * to figure out whether that is the case for each individual
3167 		 * buffer.
3168 		 */
3169 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3170 			bus_dma_segment_t *req_sg;
3171 
3172 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3173 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3174 		} else
3175 			request = ccb->smpio.smp_request;
3176 
3177 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3178 			bus_dma_segment_t *rsp_sg;
3179 
3180 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3181 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3182 		} else
3183 			response = ccb->smpio.smp_response;
3184 	} else {
3185 		request = ccb->smpio.smp_request;
3186 		response = ccb->smpio.smp_response;
3187 	}
3188 #endif /* __FreeBSD_version < 1000028 */
3189 
3190 	cm = mpr_alloc_command(sc);
3191 	if (cm == NULL) {
3192 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3193 		    __func__);
3194 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3195 		xpt_done(ccb);
3196 		return;
3197 	}
3198 
3199 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3200 	bzero(req, sizeof(*req));
3201 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3202 
3203 	/* Allow the chip to use any route to this SAS address. */
3204 	req->PhysicalPort = 0xff;
3205 
3206 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3207 	req->SGLFlags =
3208 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3209 
3210 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3211 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3212 
3213 	mpr_init_sge(cm, req, &req->SGL);
3214 
3215 	/*
3216 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3217 	 * do one map command, and one busdma call in there.
3218 	 */
3219 	cm->cm_uio.uio_iov = cm->cm_iovec;
3220 	cm->cm_uio.uio_iovcnt = 2;
3221 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3222 
3223 	/*
3224 	 * The read/write flag isn't used by busdma, but set it just in
3225 	 * case.  This isn't exactly accurate, either, since we're going in
3226 	 * both directions.
3227 	 */
3228 	cm->cm_uio.uio_rw = UIO_WRITE;
3229 
3230 	cm->cm_iovec[0].iov_base = request;
3231 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3232 	cm->cm_iovec[1].iov_base = response;
3233 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3234 
3235 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3236 			       cm->cm_iovec[1].iov_len;
3237 
3238 	/*
3239 	 * Trigger a warning message in mpr_data_cb() for the user if we
3240 	 * wind up exceeding two S/G segments.  The chip expects one
3241 	 * segment for the request and another for the response.
3242 	 */
3243 	cm->cm_max_segs = 2;
3244 
3245 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3246 	cm->cm_complete = mprsas_smpio_complete;
3247 	cm->cm_complete_data = ccb;
3248 
3249 	/*
3250 	 * Tell the mapping code that we're using a uio, and that this is
3251 	 * an SMP passthrough request.  There is a little special-case
3252 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3253 	 * transfer.
3254 	 */
3255 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3256 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3257 
3258 	/* The chip data format is little endian. */
3259 	req->SASAddress.High = htole32(sasaddr >> 32);
3260 	req->SASAddress.Low = htole32(sasaddr);
3261 
3262 	/*
3263 	 * XXX Note that we don't have a timeout/abort mechanism here.
3264 	 * From the manual, it looks like task management requests only
3265 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3266 	 * have a mechanism to retry requests in the event of a chip reset
3267 	 * at least.  Hopefully the chip will insure that any errors short
3268 	 * of that are relayed back to the driver.
3269 	 */
3270 	error = mpr_map_command(sc, cm);
3271 	if ((error != 0) && (error != EINPROGRESS)) {
3272 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3273 		    "mpr_map_command()\n", __func__, error);
3274 		goto bailout_error;
3275 	}
3276 
3277 	return;
3278 
3279 bailout_error:
3280 	mpr_free_command(sc, cm);
3281 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3282 	xpt_done(ccb);
3283 	return;
3284 }
3285 
3286 static void
3287 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3288 {
3289 	struct mpr_softc *sc;
3290 	struct mprsas_target *targ;
3291 	uint64_t sasaddr = 0;
3292 
3293 	sc = sassc->sc;
3294 
3295 	/*
3296 	 * Make sure the target exists.
3297 	 */
3298 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3299 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3300 	targ = &sassc->targets[ccb->ccb_h.target_id];
3301 	if (targ->handle == 0x0) {
3302 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3303 		    __func__, ccb->ccb_h.target_id);
3304 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3305 		xpt_done(ccb);
3306 		return;
3307 	}
3308 
3309 	/*
3310 	 * If this device has an embedded SMP target, we'll talk to it
3311 	 * directly.
3312 	 * figure out what the expander's address is.
3313 	 */
3314 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3315 		sasaddr = targ->sasaddr;
3316 
3317 	/*
3318 	 * If we don't have a SAS address for the expander yet, try
3319 	 * grabbing it from the page 0x83 information cached in the
3320 	 * transport layer for this target.  LSI expanders report the
3321 	 * expander SAS address as the port-associated SAS address in
3322 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3323 	 * 0x83.
3324 	 *
3325 	 * XXX KDM disable this for now, but leave it commented out so that
3326 	 * it is obvious that this is another possible way to get the SAS
3327 	 * address.
3328 	 *
3329 	 * The parent handle method below is a little more reliable, and
3330 	 * the other benefit is that it works for devices other than SES
3331 	 * devices.  So you can send a SMP request to a da(4) device and it
3332 	 * will get routed to the expander that device is attached to.
3333 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3334 	 */
3335 #if 0
3336 	if (sasaddr == 0)
3337 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3338 #endif
3339 
3340 	/*
3341 	 * If we still don't have a SAS address for the expander, look for
3342 	 * the parent device of this device, which is probably the expander.
3343 	 */
3344 	if (sasaddr == 0) {
3345 #ifdef OLD_MPR_PROBE
3346 		struct mprsas_target *parent_target;
3347 #endif
3348 
3349 		if (targ->parent_handle == 0x0) {
3350 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3351 			    "a valid parent handle!\n", __func__, targ->handle);
3352 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3353 			goto bailout;
3354 		}
3355 #ifdef OLD_MPR_PROBE
3356 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3357 		    targ->parent_handle);
3358 
3359 		if (parent_target == NULL) {
3360 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3361 			    "a valid parent target!\n", __func__, targ->handle);
3362 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3363 			goto bailout;
3364 		}
3365 
3366 		if ((parent_target->devinfo &
3367 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3368 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3369 			    "does not have an SMP target!\n", __func__,
3370 			    targ->handle, parent_target->handle);
3371 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3372 			goto bailout;
3373 		}
3374 
3375 		sasaddr = parent_target->sasaddr;
3376 #else /* OLD_MPR_PROBE */
3377 		if ((targ->parent_devinfo &
3378 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3379 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3380 			    "does not have an SMP target!\n", __func__,
3381 			    targ->handle, targ->parent_handle);
3382 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3383 			goto bailout;
3384 
3385 		}
3386 		if (targ->parent_sasaddr == 0x0) {
3387 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3388 			    "%d does not have a valid SAS address!\n", __func__,
3389 			    targ->handle, targ->parent_handle);
3390 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3391 			goto bailout;
3392 		}
3393 
3394 		sasaddr = targ->parent_sasaddr;
3395 #endif /* OLD_MPR_PROBE */
3396 
3397 	}
3398 
3399 	if (sasaddr == 0) {
3400 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3401 		    "handle %d\n", __func__, targ->handle);
3402 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3403 		goto bailout;
3404 	}
3405 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3406 
3407 	return;
3408 
3409 bailout:
3410 	xpt_done(ccb);
3411 
3412 }
3413 #endif //__FreeBSD_version >= 900026
3414 
3415 static void
3416 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3417 {
3418 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3419 	struct mpr_softc *sc;
3420 	struct mpr_command *tm;
3421 	struct mprsas_target *targ;
3422 
3423 	MPR_FUNCTRACE(sassc->sc);
3424 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3425 
3426 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3427 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3428 	sc = sassc->sc;
3429 	tm = mpr_alloc_command(sc);
3430 	if (tm == NULL) {
3431 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3432 		    "mprsas_action_resetdev\n");
3433 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3434 		xpt_done(ccb);
3435 		return;
3436 	}
3437 
3438 	targ = &sassc->targets[ccb->ccb_h.target_id];
3439 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3440 	req->DevHandle = htole16(targ->handle);
3441 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3442 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3443 
3444 	/* SAS Hard Link Reset / SATA Link Reset */
3445 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3446 
3447 	tm->cm_data = NULL;
3448 	tm->cm_desc.HighPriority.RequestFlags =
3449 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3450 	tm->cm_complete = mprsas_resetdev_complete;
3451 	tm->cm_complete_data = ccb;
3452 
3453 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3454 	    __func__, targ->tid);
3455 	tm->cm_targ = targ;
3456 	targ->flags |= MPRSAS_TARGET_INRESET;
3457 
3458 	mpr_map_command(sc, tm);
3459 }
3460 
3461 static void
3462 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3463 {
3464 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3465 	union ccb *ccb;
3466 
3467 	MPR_FUNCTRACE(sc);
3468 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3469 
3470 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3471 	ccb = tm->cm_complete_data;
3472 
3473 	/*
3474 	 * Currently there should be no way we can hit this case.  It only
3475 	 * happens when we have a failure to allocate chain frames, and
3476 	 * task management commands don't have S/G lists.
3477 	 */
3478 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3479 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3480 
3481 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3482 
3483 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3484 		    "handle %#04x! This should not happen!\n", __func__,
3485 		    tm->cm_flags, req->DevHandle);
3486 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3487 		goto bailout;
3488 	}
3489 
3490 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3491 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3492 
3493 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3494 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3495 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3496 		    CAM_LUN_WILDCARD);
3497 	}
3498 	else
3499 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3500 
3501 bailout:
3502 
3503 	mprsas_free_tm(sc, tm);
3504 	xpt_done(ccb);
3505 }
3506 
3507 static void
3508 mprsas_poll(struct cam_sim *sim)
3509 {
3510 	struct mprsas_softc *sassc;
3511 
3512 	sassc = cam_sim_softc(sim);
3513 
3514 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3515 		/* frequent debug messages during a panic just slow
3516 		 * everything down too much.
3517 		 */
3518 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3519 		    __func__);
3520 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3521 	}
3522 
3523 	mpr_intr_locked(sassc->sc);
3524 }
3525 
3526 static void
3527 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3528     void *arg)
3529 {
3530 	struct mpr_softc *sc;
3531 
3532 	sc = (struct mpr_softc *)callback_arg;
3533 
3534 	switch (code) {
3535 #if (__FreeBSD_version >= 1000006) || \
3536     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3537 	case AC_ADVINFO_CHANGED: {
3538 		struct mprsas_target *target;
3539 		struct mprsas_softc *sassc;
3540 		struct scsi_read_capacity_data_long rcap_buf;
3541 		struct ccb_dev_advinfo cdai;
3542 		struct mprsas_lun *lun;
3543 		lun_id_t lunid;
3544 		int found_lun;
3545 		uintptr_t buftype;
3546 
3547 		buftype = (uintptr_t)arg;
3548 
3549 		found_lun = 0;
3550 		sassc = sc->sassc;
3551 
3552 		/*
3553 		 * We're only interested in read capacity data changes.
3554 		 */
3555 		if (buftype != CDAI_TYPE_RCAPLONG)
3556 			break;
3557 
3558 		/*
3559 		 * See the comment in mpr_attach_sas() for a detailed
3560 		 * explanation.  In these versions of FreeBSD we register
3561 		 * for all events and filter out the events that don't
3562 		 * apply to us.
3563 		 */
3564 #if (__FreeBSD_version < 1000703) || \
3565     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3566 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3567 			break;
3568 #endif
3569 
3570 		/*
3571 		 * We should have a handle for this, but check to make sure.
3572 		 */
3573 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3574 		    ("Target %d out of bounds in mprsas_async\n",
3575 		    xpt_path_target_id(path)));
3576 		target = &sassc->targets[xpt_path_target_id(path)];
3577 		if (target->handle == 0)
3578 			break;
3579 
3580 		lunid = xpt_path_lun_id(path);
3581 
3582 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3583 			if (lun->lun_id == lunid) {
3584 				found_lun = 1;
3585 				break;
3586 			}
3587 		}
3588 
3589 		if (found_lun == 0) {
3590 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3591 			    M_NOWAIT | M_ZERO);
3592 			if (lun == NULL) {
3593 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3594 				    "LUN for EEDP support.\n");
3595 				break;
3596 			}
3597 			lun->lun_id = lunid;
3598 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3599 		}
3600 
3601 		bzero(&rcap_buf, sizeof(rcap_buf));
3602 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3603 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3604 		cdai.ccb_h.flags = CAM_DIR_IN;
3605 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3606 #if (__FreeBSD_version >= 1100061) || \
3607     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3608 		cdai.flags = CDAI_FLAG_NONE;
3609 #else
3610 		cdai.flags = 0;
3611 #endif
3612 		cdai.bufsiz = sizeof(rcap_buf);
3613 		cdai.buf = (uint8_t *)&rcap_buf;
3614 		xpt_action((union ccb *)&cdai);
3615 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3616 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3617 
3618 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3619 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3620 			lun->eedp_formatted = TRUE;
3621 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3622 		} else {
3623 			lun->eedp_formatted = FALSE;
3624 			lun->eedp_block_size = 0;
3625 		}
3626 		break;
3627 	}
3628 #endif
3629 	case AC_FOUND_DEVICE: {
3630 		struct ccb_getdev *cgd;
3631 
3632 		/*
3633 		 * See the comment in mpr_attach_sas() for a detailed
3634 		 * explanation.  In these versions of FreeBSD we register
3635 		 * for all events and filter out the events that don't
3636 		 * apply to us.
3637 		 */
3638 #if (__FreeBSD_version < 1000703) || \
3639     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3640 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3641 			break;
3642 #endif
3643 
3644 		cgd = arg;
3645 #if (__FreeBSD_version < 901503) || \
3646     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3647 		mprsas_check_eedp(sc, path, cgd);
3648 #endif
3649 		break;
3650 	}
3651 	default:
3652 		break;
3653 	}
3654 }
3655 
3656 #if (__FreeBSD_version < 901503) || \
3657     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3658 static void
3659 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3660     struct ccb_getdev *cgd)
3661 {
3662 	struct mprsas_softc *sassc = sc->sassc;
3663 	struct ccb_scsiio *csio;
3664 	struct scsi_read_capacity_16 *scsi_cmd;
3665 	struct scsi_read_capacity_eedp *rcap_buf;
3666 	path_id_t pathid;
3667 	target_id_t targetid;
3668 	lun_id_t lunid;
3669 	union ccb *ccb;
3670 	struct cam_path *local_path;
3671 	struct mprsas_target *target;
3672 	struct mprsas_lun *lun;
3673 	uint8_t	found_lun;
3674 	char path_str[64];
3675 
3676 	pathid = cam_sim_path(sassc->sim);
3677 	targetid = xpt_path_target_id(path);
3678 	lunid = xpt_path_lun_id(path);
3679 
3680 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3681 	    "mprsas_check_eedp\n", targetid));
3682 	target = &sassc->targets[targetid];
3683 	if (target->handle == 0x0)
3684 		return;
3685 
3686 	/*
3687 	 * Determine if the device is EEDP capable.
3688 	 *
3689 	 * If this flag is set in the inquiry data, the device supports
3690 	 * protection information, and must support the 16 byte read capacity
3691 	 * command, otherwise continue without sending read cap 16.
3692 	 */
3693 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3694 		return;
3695 
3696 	/*
3697 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3698 	 * the LUN is formatted for EEDP support.
3699 	 */
3700 	ccb = xpt_alloc_ccb_nowait();
3701 	if (ccb == NULL) {
3702 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3703 		    "support.\n");
3704 		return;
3705 	}
3706 
3707 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3708 	    CAM_REQ_CMP) {
3709 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3710 		    "support.\n");
3711 		xpt_free_ccb(ccb);
3712 		return;
3713 	}
3714 
3715 	/*
3716 	 * If LUN is already in list, don't create a new one.
3717 	 */
3718 	found_lun = FALSE;
3719 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3720 		if (lun->lun_id == lunid) {
3721 			found_lun = TRUE;
3722 			break;
3723 		}
3724 	}
3725 	if (!found_lun) {
3726 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3727 		    M_NOWAIT | M_ZERO);
3728 		if (lun == NULL) {
3729 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3730 			    "EEDP support.\n");
3731 			xpt_free_path(local_path);
3732 			xpt_free_ccb(ccb);
3733 			return;
3734 		}
3735 		lun->lun_id = lunid;
3736 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3737 	}
3738 
3739 	xpt_path_string(local_path, path_str, sizeof(path_str));
3740 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3741 	    path_str, target->handle);
3742 
3743 	/*
3744 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3745 	 * mprsas_read_cap_done function will load the read cap info into the
3746 	 * LUN struct.
3747 	 */
3748 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3749 	    M_NOWAIT | M_ZERO);
3750 	if (rcap_buf == NULL) {
3751 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3752 		    "buffer for EEDP support.\n");
3753 		xpt_free_path(ccb->ccb_h.path);
3754 		xpt_free_ccb(ccb);
3755 		return;
3756 	}
3757 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3758 	csio = &ccb->csio;
3759 	csio->ccb_h.func_code = XPT_SCSI_IO;
3760 	csio->ccb_h.flags = CAM_DIR_IN;
3761 	csio->ccb_h.retry_count = 4;
3762 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3763 	csio->ccb_h.timeout = 60000;
3764 	csio->data_ptr = (uint8_t *)rcap_buf;
3765 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3766 	csio->sense_len = MPR_SENSE_LEN;
3767 	csio->cdb_len = sizeof(*scsi_cmd);
3768 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3769 
3770 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3771 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3772 	scsi_cmd->opcode = 0x9E;
3773 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3774 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3775 
3776 	ccb->ccb_h.ppriv_ptr1 = sassc;
3777 	xpt_action(ccb);
3778 }
3779 
3780 static void
3781 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3782 {
3783 	struct mprsas_softc *sassc;
3784 	struct mprsas_target *target;
3785 	struct mprsas_lun *lun;
3786 	struct scsi_read_capacity_eedp *rcap_buf;
3787 
3788 	if (done_ccb == NULL)
3789 		return;
3790 
3791 	/* Driver need to release devq, it Scsi command is
3792 	 * generated by driver internally.
3793 	 * Currently there is a single place where driver
3794 	 * calls scsi command internally. In future if driver
3795 	 * calls more scsi command internally, it needs to release
3796 	 * devq internally, since those command will not go back to
3797 	 * cam_periph.
3798 	 */
3799 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3800         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3801 		xpt_release_devq(done_ccb->ccb_h.path,
3802 			       	/*count*/ 1, /*run_queue*/TRUE);
3803 	}
3804 
3805 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3806 
3807 	/*
3808 	 * Get the LUN ID for the path and look it up in the LUN list for the
3809 	 * target.
3810 	 */
3811 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3812 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3813 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3814 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3815 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3816 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3817 			continue;
3818 
3819 		/*
3820 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3821 		 * info. If the READ CAP 16 command had some SCSI error (common
3822 		 * if command is not supported), mark the lun as not supporting
3823 		 * EEDP and set the block size to 0.
3824 		 */
3825 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3826 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3827 			lun->eedp_formatted = FALSE;
3828 			lun->eedp_block_size = 0;
3829 			break;
3830 		}
3831 
3832 		if (rcap_buf->protect & 0x01) {
3833 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3834 			    "%d is formatted for EEDP support.\n",
3835 			    done_ccb->ccb_h.target_lun,
3836 			    done_ccb->ccb_h.target_id);
3837 			lun->eedp_formatted = TRUE;
3838 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3839 		}
3840 		break;
3841 	}
3842 
3843 	// Finished with this CCB and path.
3844 	free(rcap_buf, M_MPR);
3845 	xpt_free_path(done_ccb->ccb_h.path);
3846 	xpt_free_ccb(done_ccb);
3847 }
3848 #endif /* (__FreeBSD_version < 901503) || \
3849           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3850 
3851 void
3852 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3853     struct mprsas_target *target, lun_id_t lun_id)
3854 {
3855 	union ccb *ccb;
3856 	path_id_t path_id;
3857 
3858 	/*
3859 	 * Set the INRESET flag for this target so that no I/O will be sent to
3860 	 * the target until the reset has completed.  If an I/O request does
3861 	 * happen, the devq will be frozen.  The CCB holds the path which is
3862 	 * used to release the devq.  The devq is released and the CCB is freed
3863 	 * when the TM completes.
3864 	 */
3865 	ccb = xpt_alloc_ccb_nowait();
3866 	if (ccb) {
3867 		path_id = cam_sim_path(sc->sassc->sim);
3868 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3869 		    target->tid, lun_id) != CAM_REQ_CMP) {
3870 			xpt_free_ccb(ccb);
3871 		} else {
3872 			tm->cm_ccb = ccb;
3873 			tm->cm_targ = target;
3874 			target->flags |= MPRSAS_TARGET_INRESET;
3875 		}
3876 	}
3877 }
3878 
3879 int
3880 mprsas_startup(struct mpr_softc *sc)
3881 {
3882 	/*
3883 	 * Send the port enable message and set the wait_for_port_enable flag.
3884 	 * This flag helps to keep the simq frozen until all discovery events
3885 	 * are processed.
3886 	 */
3887 	sc->wait_for_port_enable = 1;
3888 	mprsas_send_portenable(sc);
3889 	return (0);
3890 }
3891 
3892 static int
3893 mprsas_send_portenable(struct mpr_softc *sc)
3894 {
3895 	MPI2_PORT_ENABLE_REQUEST *request;
3896 	struct mpr_command *cm;
3897 
3898 	MPR_FUNCTRACE(sc);
3899 
3900 	if ((cm = mpr_alloc_command(sc)) == NULL)
3901 		return (EBUSY);
3902 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3903 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3904 	request->MsgFlags = 0;
3905 	request->VP_ID = 0;
3906 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3907 	cm->cm_complete = mprsas_portenable_complete;
3908 	cm->cm_data = NULL;
3909 	cm->cm_sge = NULL;
3910 
3911 	mpr_map_command(sc, cm);
3912 	mpr_dprint(sc, MPR_XINFO,
3913 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3914 	    cm, cm->cm_req, cm->cm_complete);
3915 	return (0);
3916 }
3917 
3918 static void
3919 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3920 {
3921 	MPI2_PORT_ENABLE_REPLY *reply;
3922 	struct mprsas_softc *sassc;
3923 
3924 	MPR_FUNCTRACE(sc);
3925 	sassc = sc->sassc;
3926 
3927 	/*
3928 	 * Currently there should be no way we can hit this case.  It only
3929 	 * happens when we have a failure to allocate chain frames, and
3930 	 * port enable commands don't have S/G lists.
3931 	 */
3932 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3933 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3934 		    "This should not happen!\n", __func__, cm->cm_flags);
3935 	}
3936 
3937 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3938 	if (reply == NULL)
3939 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3940 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3941 	    MPI2_IOCSTATUS_SUCCESS)
3942 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3943 
3944 	mpr_free_command(sc, cm);
3945 	if (sc->mpr_ich.ich_arg != NULL) {
3946 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3947 		config_intrhook_disestablish(&sc->mpr_ich);
3948 		sc->mpr_ich.ich_arg = NULL;
3949 	}
3950 
3951 	/*
3952 	 * Done waiting for port enable to complete.  Decrement the refcount.
3953 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3954 	 * take place.
3955 	 */
3956 	sc->wait_for_port_enable = 0;
3957 	sc->port_enable_complete = 1;
3958 	wakeup(&sc->port_enable_complete);
3959 	mprsas_startup_decrement(sassc);
3960 }
3961 
3962 int
3963 mprsas_check_id(struct mprsas_softc *sassc, int id)
3964 {
3965 	struct mpr_softc *sc = sassc->sc;
3966 	char *ids;
3967 	char *name;
3968 
3969 	ids = &sc->exclude_ids[0];
3970 	while((name = strsep(&ids, ",")) != NULL) {
3971 		if (name[0] == '\0')
3972 			continue;
3973 		if (strtol(name, NULL, 0) == (long)id)
3974 			return (1);
3975 	}
3976 
3977 	return (0);
3978 }
3979 
3980 void
3981 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3982 {
3983 	struct mprsas_softc *sassc;
3984 	struct mprsas_lun *lun, *lun_tmp;
3985 	struct mprsas_target *targ;
3986 	int i;
3987 
3988 	sassc = sc->sassc;
3989 	/*
3990 	 * The number of targets is based on IOC Facts, so free all of
3991 	 * the allocated LUNs for each target and then the target buffer
3992 	 * itself.
3993 	 */
3994 	for (i=0; i< maxtargets; i++) {
3995 		targ = &sassc->targets[i];
3996 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3997 			free(lun, M_MPR);
3998 		}
3999 	}
4000 	free(sassc->targets, M_MPR);
4001 
4002 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
4003 	    M_MPR, M_WAITOK|M_ZERO);
4004 	if (!sassc->targets) {
4005 		panic("%s failed to alloc targets with error %d\n",
4006 		    __func__, ENOMEM);
4007 	}
4008 }
4009