xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 4f52dfbb8d6c4d446500c5b097e3806ec219fbd4)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/nvme/nvme.h>
76 
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
89 
90 #define MPRSAS_DISCOVERY_TIMEOUT	20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
92 
93 /*
94  * static array to check SCSI OpCode for EEDP protection bits
95  */
96 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 };
117 
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131     struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133     struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137     struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139     union ccb *done_ccb);
140 #endif
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143     struct mpr_command *cm);
144 
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148     uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	int target_id = 0xFFFFFFFF;
250 
251 	MPR_FUNCTRACE(sc);
252 	if (tm == NULL)
253 		return;
254 
255 	/*
256 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
257 	 * free the resources used for freezing the devq.  Must clear the
258 	 * INRESET flag as well or scsi I/O will not work.
259 	 */
260 	if (tm->cm_targ != NULL) {
261 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 		target_id = tm->cm_targ->tid;
263 	}
264 	if (tm->cm_ccb) {
265 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 		    target_id);
267 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 		xpt_free_path(tm->cm_ccb->ccb_h.path);
269 		xpt_free_ccb(tm->cm_ccb);
270 	}
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 	    MPI2_IOCSTATUS_SUCCESS) {
383 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 	    le32toh(reply->TerminationCount));
389 	mpr_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 	    targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mpr_map_command(sc, cm);
480 }
481 
482 /*
483  * The firmware performs debounce on the link to avoid transient link errors
484  * and false removals.  When it does decide that link has been lost and a
485  * device needs to go away, it expects that the host will perform a target reset
486  * and then an op remove.  The reset has the side-effect of aborting any
487  * outstanding requests for the device, which is required for the op-remove to
488  * succeed.  It's not clear if the host should check for the device coming back
489  * alive after the reset.
490  */
491 void
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mpr_softc *sc;
496 	struct mpr_command *cm;
497 	struct mprsas_target *targ = NULL;
498 
499 	MPR_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
508 		    __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
513 
514 	cm = mprsas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
517 		    __func__);
518 		return;
519 	}
520 
521 	mprsas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags =
535 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 	cm->cm_complete = mprsas_remove_device;
537 	cm->cm_complete_data = (void *)(uintptr_t)handle;
538 
539 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 	    __func__, targ->tid);
541 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 
543 	mpr_map_command(sc, cm);
544 }
545 
546 static void
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 {
549 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 	struct mprsas_target *targ;
552 	struct mpr_command *next_cm;
553 	uint16_t handle;
554 
555 	MPR_FUNCTRACE(sc);
556 
557 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 	targ = tm->cm_targ;
560 
561 	/*
562 	 * Currently there should be no way we can hit this case.  It only
563 	 * happens when we have a failure to allocate chain frames, and
564 	 * task management commands don't have S/G lists.
565 	 */
566 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 		    "handle %#04x! This should not happen!\n", __func__,
569 		    tm->cm_flags, handle);
570 	}
571 
572 	if (reply == NULL) {
573 		/* XXX retry the remove after the diag reset completes? */
574 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 		    "0x%04x\n", __func__, handle);
576 		mprsas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 	    MPI2_IOCSTATUS_SUCCESS) {
582 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 	    MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
712 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
713 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
714 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
715 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
716 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
717 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
718 		}
719 	}
720 
721 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
722 	    &sc->sassc->mprsas_eh);
723 
724 	return (0);
725 }
726 
727 int
728 mpr_attach_sas(struct mpr_softc *sc)
729 {
730 	struct mprsas_softc *sassc;
731 	cam_status status;
732 	int unit, error = 0, reqs;
733 
734 	MPR_FUNCTRACE(sc);
735 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
736 
737 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
738 	if (!sassc) {
739 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
740 		    "Cannot allocate SAS subsystem memory\n");
741 		return (ENOMEM);
742 	}
743 
744 	/*
745 	 * XXX MaxTargets could change during a reinit.  Since we don't
746 	 * resize the targets[] array during such an event, cache the value
747 	 * of MaxTargets here so that we don't get into trouble later.  This
748 	 * should move into the reinit logic.
749 	 */
750 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
751 	sassc->targets = malloc(sizeof(struct mprsas_target) *
752 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
753 	if (!sassc->targets) {
754 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
755 		    "Cannot allocate SAS target memory\n");
756 		free(sassc, M_MPR);
757 		return (ENOMEM);
758 	}
759 	sc->sassc = sassc;
760 	sassc->sc = sc;
761 
762 	reqs = sc->num_reqs - sc->num_prireqs - 1;
763 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
764 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
765 		error = ENOMEM;
766 		goto out;
767 	}
768 
769 	unit = device_get_unit(sc->mpr_dev);
770 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
771 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
772 	if (sassc->sim == NULL) {
773 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
774 		error = EINVAL;
775 		goto out;
776 	}
777 
778 	TAILQ_INIT(&sassc->ev_queue);
779 
780 	/* Initialize taskqueue for Event Handling */
781 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
782 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
783 	    taskqueue_thread_enqueue, &sassc->ev_tq);
784 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
785 	    device_get_nameunit(sc->mpr_dev));
786 
787 	mpr_lock(sc);
788 
789 	/*
790 	 * XXX There should be a bus for every port on the adapter, but since
791 	 * we're just going to fake the topology for now, we'll pretend that
792 	 * everything is just a target on a single bus.
793 	 */
794 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
795 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
796 		    "Error %d registering SCSI bus\n", error);
797 		mpr_unlock(sc);
798 		goto out;
799 	}
800 
801 	/*
802 	 * Assume that discovery events will start right away.
803 	 *
804 	 * Hold off boot until discovery is complete.
805 	 */
806 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
807 	sc->sassc->startup_refcount = 0;
808 	mprsas_startup_increment(sassc);
809 
810 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
811 
812 	/*
813 	 * Register for async events so we can determine the EEDP
814 	 * capabilities of devices.
815 	 */
816 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
817 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
818 	    CAM_LUN_WILDCARD);
819 	if (status != CAM_REQ_CMP) {
820 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
821 		    "Error %#x creating sim path\n", status);
822 		sassc->path = NULL;
823 	} else {
824 		int event;
825 
826 #if (__FreeBSD_version >= 1000006) || \
827     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
828 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
829 #else
830 		event = AC_FOUND_DEVICE;
831 #endif
832 
833 		/*
834 		 * Prior to the CAM locking improvements, we can't call
835 		 * xpt_register_async() with a particular path specified.
836 		 *
837 		 * If a path isn't specified, xpt_register_async() will
838 		 * generate a wildcard path and acquire the XPT lock while
839 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
840 		 * It will then drop the XPT lock once that is done.
841 		 *
842 		 * If a path is specified for xpt_register_async(), it will
843 		 * not acquire and drop the XPT lock around the call to
844 		 * xpt_action().  xpt_action() asserts that the caller
845 		 * holds the SIM lock, so the SIM lock has to be held when
846 		 * calling xpt_register_async() when the path is specified.
847 		 *
848 		 * But xpt_register_async calls xpt_for_all_devices(),
849 		 * which calls xptbustraverse(), which will acquire each
850 		 * SIM lock.  When it traverses our particular bus, it will
851 		 * necessarily acquire the SIM lock, which will lead to a
852 		 * recursive lock acquisition.
853 		 *
854 		 * The CAM locking changes fix this problem by acquiring
855 		 * the XPT topology lock around bus traversal in
856 		 * xptbustraverse(), so the caller can hold the SIM lock
857 		 * and it does not cause a recursive lock acquisition.
858 		 *
859 		 * These __FreeBSD_version values are approximate, especially
860 		 * for stable/10, which is two months later than the actual
861 		 * change.
862 		 */
863 
864 #if (__FreeBSD_version < 1000703) || \
865     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
866 		mpr_unlock(sc);
867 		status = xpt_register_async(event, mprsas_async, sc,
868 					    NULL);
869 		mpr_lock(sc);
870 #else
871 		status = xpt_register_async(event, mprsas_async, sc,
872 					    sassc->path);
873 #endif
874 
875 		if (status != CAM_REQ_CMP) {
876 			mpr_dprint(sc, MPR_ERROR,
877 			    "Error %#x registering async handler for "
878 			    "AC_ADVINFO_CHANGED events\n", status);
879 			xpt_free_path(sassc->path);
880 			sassc->path = NULL;
881 		}
882 	}
883 	if (status != CAM_REQ_CMP) {
884 		/*
885 		 * EEDP use is the exception, not the rule.
886 		 * Warn the user, but do not fail to attach.
887 		 */
888 		mpr_printf(sc, "EEDP capabilities disabled.\n");
889 	}
890 
891 	mpr_unlock(sc);
892 
893 	mprsas_register_events(sc);
894 out:
895 	if (error)
896 		mpr_detach_sas(sc);
897 
898 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
899 	return (error);
900 }
901 
902 int
903 mpr_detach_sas(struct mpr_softc *sc)
904 {
905 	struct mprsas_softc *sassc;
906 	struct mprsas_lun *lun, *lun_tmp;
907 	struct mprsas_target *targ;
908 	int i;
909 
910 	MPR_FUNCTRACE(sc);
911 
912 	if (sc->sassc == NULL)
913 		return (0);
914 
915 	sassc = sc->sassc;
916 	mpr_deregister_events(sc, sassc->mprsas_eh);
917 
918 	/*
919 	 * Drain and free the event handling taskqueue with the lock
920 	 * unheld so that any parallel processing tasks drain properly
921 	 * without deadlocking.
922 	 */
923 	if (sassc->ev_tq != NULL)
924 		taskqueue_free(sassc->ev_tq);
925 
926 	/* Make sure CAM doesn't wedge if we had to bail out early. */
927 	mpr_lock(sc);
928 
929 	while (sassc->startup_refcount != 0)
930 		mprsas_startup_decrement(sassc);
931 
932 	/* Deregister our async handler */
933 	if (sassc->path != NULL) {
934 		xpt_register_async(0, mprsas_async, sc, sassc->path);
935 		xpt_free_path(sassc->path);
936 		sassc->path = NULL;
937 	}
938 
939 	if (sassc->flags & MPRSAS_IN_STARTUP)
940 		xpt_release_simq(sassc->sim, 1);
941 
942 	if (sassc->sim != NULL) {
943 		xpt_bus_deregister(cam_sim_path(sassc->sim));
944 		cam_sim_free(sassc->sim, FALSE);
945 	}
946 
947 	mpr_unlock(sc);
948 
949 	if (sassc->devq != NULL)
950 		cam_simq_free(sassc->devq);
951 
952 	for (i = 0; i < sassc->maxtargets; i++) {
953 		targ = &sassc->targets[i];
954 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
955 			free(lun, M_MPR);
956 		}
957 	}
958 	free(sassc->targets, M_MPR);
959 	free(sassc, M_MPR);
960 	sc->sassc = NULL;
961 
962 	return (0);
963 }
964 
965 void
966 mprsas_discovery_end(struct mprsas_softc *sassc)
967 {
968 	struct mpr_softc *sc = sassc->sc;
969 
970 	MPR_FUNCTRACE(sc);
971 
972 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
973 		callout_stop(&sassc->discovery_callout);
974 
975 	/*
976 	 * After discovery has completed, check the mapping table for any
977 	 * missing devices and update their missing counts. Only do this once
978 	 * whenever the driver is initialized so that missing counts aren't
979 	 * updated unnecessarily. Note that just because discovery has
980 	 * completed doesn't mean that events have been processed yet. The
981 	 * check_devices function is a callout timer that checks if ALL devices
982 	 * are missing. If so, it will wait a little longer for events to
983 	 * complete and keep resetting itself until some device in the mapping
984 	 * table is not missing, meaning that event processing has started.
985 	 */
986 	if (sc->track_mapping_events) {
987 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
988 		    "completed. Check for missing devices in the mapping "
989 		    "table.\n");
990 		callout_reset(&sc->device_check_callout,
991 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
992 		    sc);
993 	}
994 }
995 
996 static void
997 mprsas_action(struct cam_sim *sim, union ccb *ccb)
998 {
999 	struct mprsas_softc *sassc;
1000 
1001 	sassc = cam_sim_softc(sim);
1002 
1003 	MPR_FUNCTRACE(sassc->sc);
1004 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1005 	    ccb->ccb_h.func_code);
1006 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1007 
1008 	switch (ccb->ccb_h.func_code) {
1009 	case XPT_PATH_INQ:
1010 	{
1011 		struct ccb_pathinq *cpi = &ccb->cpi;
1012 		struct mpr_softc *sc = sassc->sc;
1013 
1014 		cpi->version_num = 1;
1015 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1016 		cpi->target_sprt = 0;
1017 #if (__FreeBSD_version >= 1000039) || \
1018     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1019 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1020 #else
1021 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1022 #endif
1023 		cpi->hba_eng_cnt = 0;
1024 		cpi->max_target = sassc->maxtargets - 1;
1025 		cpi->max_lun = 255;
1026 
1027 		/*
1028 		 * initiator_id is set here to an ID outside the set of valid
1029 		 * target IDs (including volumes).
1030 		 */
1031 		cpi->initiator_id = sassc->maxtargets;
1032 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1033 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1034 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1035 		cpi->unit_number = cam_sim_unit(sim);
1036 		cpi->bus_id = cam_sim_bus(sim);
1037 		/*
1038 		 * XXXSLM-I think this needs to change based on config page or
1039 		 * something instead of hardcoded to 150000.
1040 		 */
1041 		cpi->base_transfer_speed = 150000;
1042 		cpi->transport = XPORT_SAS;
1043 		cpi->transport_version = 0;
1044 		cpi->protocol = PROTO_SCSI;
1045 		cpi->protocol_version = SCSI_REV_SPC;
1046 		cpi->maxio = sc->maxio;
1047 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1048 		break;
1049 	}
1050 	case XPT_GET_TRAN_SETTINGS:
1051 	{
1052 		struct ccb_trans_settings	*cts;
1053 		struct ccb_trans_settings_sas	*sas;
1054 		struct ccb_trans_settings_scsi	*scsi;
1055 		struct mprsas_target *targ;
1056 
1057 		cts = &ccb->cts;
1058 		sas = &cts->xport_specific.sas;
1059 		scsi = &cts->proto_specific.scsi;
1060 
1061 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1062 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1063 		    cts->ccb_h.target_id));
1064 		targ = &sassc->targets[cts->ccb_h.target_id];
1065 		if (targ->handle == 0x0) {
1066 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1067 			break;
1068 		}
1069 
1070 		cts->protocol_version = SCSI_REV_SPC2;
1071 		cts->transport = XPORT_SAS;
1072 		cts->transport_version = 0;
1073 
1074 		sas->valid = CTS_SAS_VALID_SPEED;
1075 		switch (targ->linkrate) {
1076 		case 0x08:
1077 			sas->bitrate = 150000;
1078 			break;
1079 		case 0x09:
1080 			sas->bitrate = 300000;
1081 			break;
1082 		case 0x0a:
1083 			sas->bitrate = 600000;
1084 			break;
1085 		case 0x0b:
1086 			sas->bitrate = 1200000;
1087 			break;
1088 		default:
1089 			sas->valid = 0;
1090 		}
1091 
1092 		cts->protocol = PROTO_SCSI;
1093 		scsi->valid = CTS_SCSI_VALID_TQ;
1094 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1095 
1096 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1097 		break;
1098 	}
1099 	case XPT_CALC_GEOMETRY:
1100 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1101 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1102 		break;
1103 	case XPT_RESET_DEV:
1104 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1105 		    "XPT_RESET_DEV\n");
1106 		mprsas_action_resetdev(sassc, ccb);
1107 		return;
1108 	case XPT_RESET_BUS:
1109 	case XPT_ABORT:
1110 	case XPT_TERM_IO:
1111 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1112 		    "for abort or reset\n");
1113 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1114 		break;
1115 	case XPT_SCSI_IO:
1116 		mprsas_action_scsiio(sassc, ccb);
1117 		return;
1118 #if __FreeBSD_version >= 900026
1119 	case XPT_SMP_IO:
1120 		mprsas_action_smpio(sassc, ccb);
1121 		return;
1122 #endif
1123 	default:
1124 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1125 		break;
1126 	}
1127 	xpt_done(ccb);
1128 
1129 }
1130 
1131 static void
1132 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1133     target_id_t target_id, lun_id_t lun_id)
1134 {
1135 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1136 	struct cam_path *path;
1137 
1138 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1139 	    ac_code, target_id, (uintmax_t)lun_id);
1140 
1141 	if (xpt_create_path(&path, NULL,
1142 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1143 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1144 		    "notification\n");
1145 		return;
1146 	}
1147 
1148 	xpt_async(ac_code, path, NULL);
1149 	xpt_free_path(path);
1150 }
1151 
1152 static void
1153 mprsas_complete_all_commands(struct mpr_softc *sc)
1154 {
1155 	struct mpr_command *cm;
1156 	int i;
1157 	int completed;
1158 
1159 	MPR_FUNCTRACE(sc);
1160 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1161 
1162 	/* complete all commands with a NULL reply */
1163 	for (i = 1; i < sc->num_reqs; i++) {
1164 		cm = &sc->commands[i];
1165 		if (cm->cm_state == MPR_CM_STATE_FREE)
1166 			continue;
1167 
1168 		cm->cm_state = MPR_CM_STATE_BUSY;
1169 		cm->cm_reply = NULL;
1170 		completed = 0;
1171 
1172 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1173 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1174 
1175 		if (cm->cm_complete != NULL) {
1176 			mprsas_log_command(cm, MPR_RECOVERY,
1177 			    "completing cm %p state %x ccb %p for diag reset\n",
1178 			    cm, cm->cm_state, cm->cm_ccb);
1179 			cm->cm_complete(sc, cm);
1180 			completed = 1;
1181 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1182 			mprsas_log_command(cm, MPR_RECOVERY,
1183 			    "waking up cm %p state %x ccb %p for diag reset\n",
1184 			    cm, cm->cm_state, cm->cm_ccb);
1185 			wakeup(cm);
1186 			completed = 1;
1187 		}
1188 
1189 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1190 			/* this should never happen, but if it does, log */
1191 			mprsas_log_command(cm, MPR_RECOVERY,
1192 			    "cm %p state %x flags 0x%x ccb %p during diag "
1193 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1194 			    cm->cm_ccb);
1195 		}
1196 	}
1197 
1198 	sc->io_cmds_active = 0;
1199 }
1200 
1201 void
1202 mprsas_handle_reinit(struct mpr_softc *sc)
1203 {
1204 	int i;
1205 
1206 	/* Go back into startup mode and freeze the simq, so that CAM
1207 	 * doesn't send any commands until after we've rediscovered all
1208 	 * targets and found the proper device handles for them.
1209 	 *
1210 	 * After the reset, portenable will trigger discovery, and after all
1211 	 * discovery-related activities have finished, the simq will be
1212 	 * released.
1213 	 */
1214 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1215 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1216 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1217 	mprsas_startup_increment(sc->sassc);
1218 
1219 	/* notify CAM of a bus reset */
1220 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1221 	    CAM_LUN_WILDCARD);
1222 
1223 	/* complete and cleanup after all outstanding commands */
1224 	mprsas_complete_all_commands(sc);
1225 
1226 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1227 	    __func__, sc->sassc->startup_refcount);
1228 
1229 	/* zero all the target handles, since they may change after the
1230 	 * reset, and we have to rediscover all the targets and use the new
1231 	 * handles.
1232 	 */
1233 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1234 		if (sc->sassc->targets[i].outstanding != 0)
1235 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1236 			    i, sc->sassc->targets[i].outstanding);
1237 		sc->sassc->targets[i].handle = 0x0;
1238 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1239 		sc->sassc->targets[i].outstanding = 0;
1240 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1241 	}
1242 }
1243 static void
1244 mprsas_tm_timeout(void *data)
1245 {
1246 	struct mpr_command *tm = data;
1247 	struct mpr_softc *sc = tm->cm_sc;
1248 
1249 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1250 
1251 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1252 	    "out\n", tm);
1253 
1254 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1255 	    ("command not inqueue\n"));
1256 
1257 	tm->cm_state = MPR_CM_STATE_BUSY;
1258 	mpr_reinit(sc);
1259 }
1260 
1261 static void
1262 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1263 {
1264 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1265 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1266 	unsigned int cm_count = 0;
1267 	struct mpr_command *cm;
1268 	struct mprsas_target *targ;
1269 
1270 	callout_stop(&tm->cm_callout);
1271 
1272 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1273 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1274 	targ = tm->cm_targ;
1275 
1276 	/*
1277 	 * Currently there should be no way we can hit this case.  It only
1278 	 * happens when we have a failure to allocate chain frames, and
1279 	 * task management commands don't have S/G lists.
1280 	 */
1281 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1282 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1283 		    "%s: cm_flags = %#x for LUN reset! "
1284 		    "This should not happen!\n", __func__, tm->cm_flags);
1285 		mprsas_free_tm(sc, tm);
1286 		return;
1287 	}
1288 
1289 	if (reply == NULL) {
1290 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1291 		    tm);
1292 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1293 			/* this completion was due to a reset, just cleanup */
1294 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1295 			    "reset, ignoring NULL LUN reset reply\n");
1296 			targ->tm = NULL;
1297 			mprsas_free_tm(sc, tm);
1298 		}
1299 		else {
1300 			/* we should have gotten a reply. */
1301 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1302 			    "LUN reset attempt, resetting controller\n");
1303 			mpr_reinit(sc);
1304 		}
1305 		return;
1306 	}
1307 
1308 	mpr_dprint(sc, MPR_RECOVERY,
1309 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1310 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1311 	    le32toh(reply->TerminationCount));
1312 
1313 	/*
1314 	 * See if there are any outstanding commands for this LUN.
1315 	 * This could be made more efficient by using a per-LU data
1316 	 * structure of some sort.
1317 	 */
1318 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1319 		if (cm->cm_lun == tm->cm_lun)
1320 			cm_count++;
1321 	}
1322 
1323 	if (cm_count == 0) {
1324 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1325 		    "Finished recovery after LUN reset for target %u\n",
1326 		    targ->tid);
1327 
1328 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1329 		    tm->cm_lun);
1330 
1331 		/*
1332 		 * We've finished recovery for this logical unit.  check and
1333 		 * see if some other logical unit has a timedout command
1334 		 * that needs to be processed.
1335 		 */
1336 		cm = TAILQ_FIRST(&targ->timedout_commands);
1337 		if (cm) {
1338 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1339 			   "More commands to abort for target %u\n", targ->tid);
1340 			mprsas_send_abort(sc, tm, cm);
1341 		} else {
1342 			targ->tm = NULL;
1343 			mprsas_free_tm(sc, tm);
1344 		}
1345 	} else {
1346 		/* if we still have commands for this LUN, the reset
1347 		 * effectively failed, regardless of the status reported.
1348 		 * Escalate to a target reset.
1349 		 */
1350 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1351 		    "logical unit reset complete for target %u, but still "
1352 		    "have %u command(s), sending target reset\n", targ->tid,
1353 		    cm_count);
1354 		mprsas_send_reset(sc, tm,
1355 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1356 	}
1357 }
1358 
1359 static void
1360 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1361 {
1362 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1363 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1364 	struct mprsas_target *targ;
1365 
1366 	callout_stop(&tm->cm_callout);
1367 
1368 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1370 	targ = tm->cm_targ;
1371 
1372 	/*
1373 	 * Currently there should be no way we can hit this case.  It only
1374 	 * happens when we have a failure to allocate chain frames, and
1375 	 * task management commands don't have S/G lists.
1376 	 */
1377 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1378 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1379 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1380 		mprsas_free_tm(sc, tm);
1381 		return;
1382 	}
1383 
1384 	if (reply == NULL) {
1385 		mpr_dprint(sc, MPR_RECOVERY,
1386 		    "NULL target reset reply for tm %p TaskMID %u\n",
1387 		    tm, le16toh(req->TaskMID));
1388 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1389 			/* this completion was due to a reset, just cleanup */
1390 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1391 			    "reset, ignoring NULL target reset reply\n");
1392 			targ->tm = NULL;
1393 			mprsas_free_tm(sc, tm);
1394 		}
1395 		else {
1396 			/* we should have gotten a reply. */
1397 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1398 			    "target reset attempt, resetting controller\n");
1399 			mpr_reinit(sc);
1400 		}
1401 		return;
1402 	}
1403 
1404 	mpr_dprint(sc, MPR_RECOVERY,
1405 	    "target reset status 0x%x code 0x%x count %u\n",
1406 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1407 	    le32toh(reply->TerminationCount));
1408 
1409 	if (targ->outstanding == 0) {
1410 		/*
1411 		 * We've finished recovery for this target and all
1412 		 * of its logical units.
1413 		 */
1414 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1415 		    "Finished reset recovery for target %u\n", targ->tid);
1416 
1417 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1418 		    CAM_LUN_WILDCARD);
1419 
1420 		targ->tm = NULL;
1421 		mprsas_free_tm(sc, tm);
1422 	} else {
1423 		/*
1424 		 * After a target reset, if this target still has
1425 		 * outstanding commands, the reset effectively failed,
1426 		 * regardless of the status reported.  escalate.
1427 		 */
1428 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1429 		    "Target reset complete for target %u, but still have %u "
1430 		    "command(s), resetting controller\n", targ->tid,
1431 		    targ->outstanding);
1432 		mpr_reinit(sc);
1433 	}
1434 }
1435 
1436 #define MPR_RESET_TIMEOUT 30
1437 
1438 int
1439 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1440 {
1441 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1442 	struct mprsas_target *target;
1443 	int err;
1444 
1445 	target = tm->cm_targ;
1446 	if (target->handle == 0) {
1447 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1448 		    "%d\n", __func__, target->tid);
1449 		return -1;
1450 	}
1451 
1452 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1453 	req->DevHandle = htole16(target->handle);
1454 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1455 	req->TaskType = type;
1456 
1457 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1458 		/* XXX Need to handle invalid LUNs */
1459 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1460 		tm->cm_targ->logical_unit_resets++;
1461 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1462 		    "Sending logical unit reset to target %u lun %d\n",
1463 		    target->tid, tm->cm_lun);
1464 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1465 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1466 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1467 		/*
1468 		 * Target reset method =
1469 		 *     SAS Hard Link Reset / SATA Link Reset
1470 		 */
1471 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1472 		tm->cm_targ->target_resets++;
1473 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1474 		    "Sending target reset to target %u\n", target->tid);
1475 		tm->cm_complete = mprsas_target_reset_complete;
1476 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1477 	}
1478 	else {
1479 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1480 		return -1;
1481 	}
1482 
1483 	if (target->encl_level_valid) {
1484 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1485 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1486 		    target->encl_level, target->encl_slot,
1487 		    target->connector_name);
1488 	}
1489 
1490 	tm->cm_data = NULL;
1491 	tm->cm_desc.HighPriority.RequestFlags =
1492 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1493 	tm->cm_complete_data = (void *)tm;
1494 
1495 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1496 	    mprsas_tm_timeout, tm);
1497 
1498 	err = mpr_map_command(sc, tm);
1499 	if (err)
1500 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1501 		    "error %d sending reset type %u\n", err, type);
1502 
1503 	return err;
1504 }
1505 
1506 
1507 static void
1508 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1509 {
1510 	struct mpr_command *cm;
1511 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1512 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1513 	struct mprsas_target *targ;
1514 
1515 	callout_stop(&tm->cm_callout);
1516 
1517 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1518 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1519 	targ = tm->cm_targ;
1520 
1521 	/*
1522 	 * Currently there should be no way we can hit this case.  It only
1523 	 * happens when we have a failure to allocate chain frames, and
1524 	 * task management commands don't have S/G lists.
1525 	 */
1526 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1527 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1528 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1529 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1530 		mprsas_free_tm(sc, tm);
1531 		return;
1532 	}
1533 
1534 	if (reply == NULL) {
1535 		mpr_dprint(sc, MPR_RECOVERY,
1536 		    "NULL abort reply for tm %p TaskMID %u\n",
1537 		    tm, le16toh(req->TaskMID));
1538 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1539 			/* this completion was due to a reset, just cleanup */
1540 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1541 			    "reset, ignoring NULL abort reply\n");
1542 			targ->tm = NULL;
1543 			mprsas_free_tm(sc, tm);
1544 		} else {
1545 			/* we should have gotten a reply. */
1546 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1547 			    "abort attempt, resetting controller\n");
1548 			mpr_reinit(sc);
1549 		}
1550 		return;
1551 	}
1552 
1553 	mpr_dprint(sc, MPR_RECOVERY,
1554 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1555 	    le16toh(req->TaskMID),
1556 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1557 	    le32toh(reply->TerminationCount));
1558 
1559 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1560 	if (cm == NULL) {
1561 		/*
1562 		 * if there are no more timedout commands, we're done with
1563 		 * error recovery for this target.
1564 		 */
1565 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1566 		    "Finished abort recovery for target %u\n", targ->tid);
1567 		targ->tm = NULL;
1568 		mprsas_free_tm(sc, tm);
1569 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1570 		/* abort success, but we have more timedout commands to abort */
1571 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1572 		    "Continuing abort recovery for target %u\n", targ->tid);
1573 		mprsas_send_abort(sc, tm, cm);
1574 	} else {
1575 		/*
1576 		 * we didn't get a command completion, so the abort
1577 		 * failed as far as we're concerned.  escalate.
1578 		 */
1579 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1580 		    "Abort failed for target %u, sending logical unit reset\n",
1581 		    targ->tid);
1582 
1583 		mprsas_send_reset(sc, tm,
1584 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1585 	}
1586 }
1587 
1588 #define MPR_ABORT_TIMEOUT 5
1589 
1590 static int
1591 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1592     struct mpr_command *cm)
1593 {
1594 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1595 	struct mprsas_target *targ;
1596 	int err;
1597 
1598 	targ = cm->cm_targ;
1599 	if (targ->handle == 0) {
1600 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1601 		   "%s null devhandle for target_id %d\n",
1602 		    __func__, cm->cm_ccb->ccb_h.target_id);
1603 		return -1;
1604 	}
1605 
1606 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1607 	    "Aborting command %p\n", cm);
1608 
1609 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1610 	req->DevHandle = htole16(targ->handle);
1611 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1612 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1613 
1614 	/* XXX Need to handle invalid LUNs */
1615 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1616 
1617 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1618 
1619 	tm->cm_data = NULL;
1620 	tm->cm_desc.HighPriority.RequestFlags =
1621 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1622 	tm->cm_complete = mprsas_abort_complete;
1623 	tm->cm_complete_data = (void *)tm;
1624 	tm->cm_targ = cm->cm_targ;
1625 	tm->cm_lun = cm->cm_lun;
1626 
1627 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1628 	    mprsas_tm_timeout, tm);
1629 
1630 	targ->aborts++;
1631 
1632 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1633 
1634 	err = mpr_map_command(sc, tm);
1635 	if (err)
1636 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1637 		    "error %d sending abort for cm %p SMID %u\n",
1638 		    err, cm, req->TaskMID);
1639 	return err;
1640 }
1641 
1642 static void
1643 mprsas_scsiio_timeout(void *data)
1644 {
1645 	sbintime_t elapsed, now;
1646 	union ccb *ccb;
1647 	struct mpr_softc *sc;
1648 	struct mpr_command *cm;
1649 	struct mprsas_target *targ;
1650 
1651 	cm = (struct mpr_command *)data;
1652 	sc = cm->cm_sc;
1653 	ccb = cm->cm_ccb;
1654 	now = sbinuptime();
1655 
1656 	MPR_FUNCTRACE(sc);
1657 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1658 
1659 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1660 
1661 	/*
1662 	 * Run the interrupt handler to make sure it's not pending.  This
1663 	 * isn't perfect because the command could have already completed
1664 	 * and been re-used, though this is unlikely.
1665 	 */
1666 	mpr_intr_locked(sc);
1667 	if (cm->cm_state != MPR_CM_STATE_INQUEUE) {
1668 		mprsas_log_command(cm, MPR_XINFO,
1669 		    "SCSI command %p almost timed out\n", cm);
1670 		return;
1671 	}
1672 
1673 	if (cm->cm_ccb == NULL) {
1674 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1675 		return;
1676 	}
1677 
1678 	targ = cm->cm_targ;
1679 	targ->timeouts++;
1680 
1681 	elapsed = now - ccb->ccb_h.qos.sim_data;
1682 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1683 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1684 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1685 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1686 	if (targ->encl_level_valid) {
1687 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1688 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1689 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1690 	}
1691 
1692 	/* XXX first, check the firmware state, to see if it's still
1693 	 * operational.  if not, do a diag reset.
1694 	 */
1695 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1696 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1697 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1698 
1699 	if (targ->tm != NULL) {
1700 		/* target already in recovery, just queue up another
1701 		 * timedout command to be processed later.
1702 		 */
1703 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1704 		    "processing by tm %p\n", cm, targ->tm);
1705 	}
1706 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1707 
1708 		/* start recovery by aborting the first timedout command */
1709 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1710 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1711 		    cm->cm_desc.Default.SMID);
1712 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1713 		    cm, targ->tm);
1714 		mprsas_send_abort(sc, targ->tm, cm);
1715 	}
1716 	else {
1717 		/* XXX queue this target up for recovery once a TM becomes
1718 		 * available.  The firmware only has a limited number of
1719 		 * HighPriority credits for the high priority requests used
1720 		 * for task management, and we ran out.
1721 		 *
1722 		 * Isilon: don't worry about this for now, since we have
1723 		 * more credits than disks in an enclosure, and limit
1724 		 * ourselves to one TM per target for recovery.
1725 		 */
1726 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1727 		    "timedout cm %p failed to allocate a tm\n", cm);
1728 	}
1729 }
1730 
1731 /**
1732  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1733  *			     to SCSI Unmap.
1734  * Return 0 - for success,
1735  *	  1 - to immediately return back the command with success status to CAM
1736  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1737  *			   to FW without any translation.
1738  */
1739 static int
1740 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1741     union ccb *ccb, struct mprsas_target *targ)
1742 {
1743 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1744 	struct ccb_scsiio *csio;
1745 	struct unmap_parm_list *plist;
1746 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1747 	struct nvme_command *c;
1748 	int i, res;
1749 	uint16_t ndesc, list_len, data_length;
1750 	struct mpr_prp_page *prp_page_info;
1751 	uint64_t nvme_dsm_ranges_dma_handle;
1752 
1753 	csio = &ccb->csio;
1754 #if __FreeBSD_version >= 1100103
1755 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1756 #else
1757 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1758 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1759 		    ccb->csio.cdb_io.cdb_ptr[8]);
1760 	} else {
1761 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1762 		    ccb->csio.cdb_io.cdb_bytes[8]);
1763 	}
1764 #endif
1765 	if (!list_len) {
1766 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1767 		return -EINVAL;
1768 	}
1769 
1770 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1771 	if (!plist) {
1772 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1773 		    "save UNMAP data\n");
1774 		return -ENOMEM;
1775 	}
1776 
1777 	/* Copy SCSI unmap data to a local buffer */
1778 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1779 
1780 	/* return back the unmap command to CAM with success status,
1781 	 * if number of descripts is zero.
1782 	 */
1783 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1784 	if (!ndesc) {
1785 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1786 		    "UNMAP cmd is Zero\n");
1787 		res = 1;
1788 		goto out;
1789 	}
1790 
1791 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1792 	if (data_length > targ->MDTS) {
1793 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1794 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1795 		res = -EINVAL;
1796 		goto out;
1797 	}
1798 
1799 	prp_page_info = mpr_alloc_prp_page(sc);
1800 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1801 	    "UNMAP command.\n", __func__));
1802 
1803 	/*
1804 	 * Insert the allocated PRP page into the command's PRP page list. This
1805 	 * will be freed when the command is freed.
1806 	 */
1807 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1808 
1809 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1810 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1811 
1812 	bzero(nvme_dsm_ranges, data_length);
1813 
1814 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1815 	 * for each descriptors contained in SCSI UNMAP data.
1816 	 */
1817 	for (i = 0; i < ndesc; i++) {
1818 		nvme_dsm_ranges[i].length =
1819 		    htole32(be32toh(plist->desc[i].nlb));
1820 		nvme_dsm_ranges[i].starting_lba =
1821 		    htole64(be64toh(plist->desc[i].slba));
1822 		nvme_dsm_ranges[i].attributes = 0;
1823 	}
1824 
1825 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1826 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1827 	bzero(req, sizeof(*req));
1828 	req->DevHandle = htole16(targ->handle);
1829 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1830 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1831 	req->ErrorResponseBaseAddress.High =
1832 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1833 	req->ErrorResponseBaseAddress.Low =
1834 	    htole32(cm->cm_sense_busaddr);
1835 	req->ErrorResponseAllocationLength =
1836 	    htole16(sizeof(struct nvme_completion));
1837 	req->EncapsulatedCommandLength =
1838 	    htole16(sizeof(struct nvme_command));
1839 	req->DataLength = htole32(data_length);
1840 
1841 	/* Build NVMe DSM command */
1842 	c = (struct nvme_command *) req->NVMe_Command;
1843 	c->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_DATASET_MANAGEMENT);
1844 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1845 	c->cdw10 = htole32(ndesc - 1);
1846 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1847 
1848 	cm->cm_length = data_length;
1849 	cm->cm_data = NULL;
1850 
1851 	cm->cm_complete = mprsas_scsiio_complete;
1852 	cm->cm_complete_data = ccb;
1853 	cm->cm_targ = targ;
1854 	cm->cm_lun = csio->ccb_h.target_lun;
1855 	cm->cm_ccb = ccb;
1856 
1857 	cm->cm_desc.Default.RequestFlags =
1858 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1859 
1860 	csio->ccb_h.qos.sim_data = sbinuptime();
1861 #if __FreeBSD_version >= 1000029
1862 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1863 	    mprsas_scsiio_timeout, cm, 0);
1864 #else //__FreeBSD_version < 1000029
1865 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1866 	    mprsas_scsiio_timeout, cm);
1867 #endif //__FreeBSD_version >= 1000029
1868 
1869 	targ->issued++;
1870 	targ->outstanding++;
1871 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1872 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1873 
1874 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1875 	    __func__, cm, ccb, targ->outstanding);
1876 
1877 	mpr_build_nvme_prp(sc, cm, req,
1878 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1879 	mpr_map_command(sc, cm);
1880 
1881 out:
1882 	free(plist, M_MPR);
1883 	return 0;
1884 }
1885 
1886 static void
1887 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1888 {
1889 	MPI2_SCSI_IO_REQUEST *req;
1890 	struct ccb_scsiio *csio;
1891 	struct mpr_softc *sc;
1892 	struct mprsas_target *targ;
1893 	struct mprsas_lun *lun;
1894 	struct mpr_command *cm;
1895 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1896 	uint16_t eedp_flags;
1897 	uint32_t mpi_control;
1898 	int rc;
1899 
1900 	sc = sassc->sc;
1901 	MPR_FUNCTRACE(sc);
1902 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1903 
1904 	csio = &ccb->csio;
1905 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1906 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1907 	     csio->ccb_h.target_id));
1908 	targ = &sassc->targets[csio->ccb_h.target_id];
1909 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1910 	if (targ->handle == 0x0) {
1911 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1912 		    __func__, csio->ccb_h.target_id);
1913 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1914 		xpt_done(ccb);
1915 		return;
1916 	}
1917 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1918 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1919 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1920 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1921 		xpt_done(ccb);
1922 		return;
1923 	}
1924 	/*
1925 	 * Sometimes, it is possible to get a command that is not "In
1926 	 * Progress" and was actually aborted by the upper layer.  Check for
1927 	 * this here and complete the command without error.
1928 	 */
1929 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1930 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1931 		    "target %u\n", __func__, csio->ccb_h.target_id);
1932 		xpt_done(ccb);
1933 		return;
1934 	}
1935 	/*
1936 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1937 	 * that the volume has timed out.  We want volumes to be enumerated
1938 	 * until they are deleted/removed, not just failed.
1939 	 */
1940 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1941 		if (targ->devinfo == 0)
1942 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1943 		else
1944 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1945 		xpt_done(ccb);
1946 		return;
1947 	}
1948 
1949 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1950 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1951 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1952 		xpt_done(ccb);
1953 		return;
1954 	}
1955 
1956 	/*
1957 	 * If target has a reset in progress, freeze the devq and return.  The
1958 	 * devq will be released when the TM reset is finished.
1959 	 */
1960 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1961 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1962 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1963 		    __func__, targ->tid);
1964 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1965 		xpt_done(ccb);
1966 		return;
1967 	}
1968 
1969 	cm = mpr_alloc_command(sc);
1970 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1971 		if (cm != NULL) {
1972 			mpr_free_command(sc, cm);
1973 		}
1974 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1975 			xpt_freeze_simq(sassc->sim, 1);
1976 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1977 		}
1978 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1979 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1980 		xpt_done(ccb);
1981 		return;
1982 	}
1983 
1984 	/* For NVME device's issue UNMAP command directly to NVME drives by
1985 	 * constructing equivalent native NVMe DataSetManagement command.
1986 	 */
1987 #if __FreeBSD_version >= 1100103
1988 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1989 #else
1990 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1991 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
1992 	else
1993 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
1994 #endif
1995 	if (scsi_opcode == UNMAP &&
1996 	    targ->is_nvme &&
1997 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1998 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1999 		if (rc == 1) { /* return command to CAM with success status */
2000 			mpr_free_command(sc, cm);
2001 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2002 			xpt_done(ccb);
2003 			return;
2004 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
2005 			return;
2006 	}
2007 
2008 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2009 	bzero(req, sizeof(*req));
2010 	req->DevHandle = htole16(targ->handle);
2011 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2012 	req->MsgFlags = 0;
2013 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2014 	req->SenseBufferLength = MPR_SENSE_LEN;
2015 	req->SGLFlags = 0;
2016 	req->ChainOffset = 0;
2017 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2018 	req->SGLOffset1= 0;
2019 	req->SGLOffset2= 0;
2020 	req->SGLOffset3= 0;
2021 	req->SkipCount = 0;
2022 	req->DataLength = htole32(csio->dxfer_len);
2023 	req->BidirectionalDataLength = 0;
2024 	req->IoFlags = htole16(csio->cdb_len);
2025 	req->EEDPFlags = 0;
2026 
2027 	/* Note: BiDirectional transfers are not supported */
2028 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2029 	case CAM_DIR_IN:
2030 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2031 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2032 		break;
2033 	case CAM_DIR_OUT:
2034 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2035 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2036 		break;
2037 	case CAM_DIR_NONE:
2038 	default:
2039 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2040 		break;
2041 	}
2042 
2043 	if (csio->cdb_len == 32)
2044 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2045 	/*
2046 	 * It looks like the hardware doesn't require an explicit tag
2047 	 * number for each transaction.  SAM Task Management not supported
2048 	 * at the moment.
2049 	 */
2050 	switch (csio->tag_action) {
2051 	case MSG_HEAD_OF_Q_TAG:
2052 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2053 		break;
2054 	case MSG_ORDERED_Q_TAG:
2055 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2056 		break;
2057 	case MSG_ACA_TASK:
2058 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2059 		break;
2060 	case CAM_TAG_ACTION_NONE:
2061 	case MSG_SIMPLE_Q_TAG:
2062 	default:
2063 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2064 		break;
2065 	}
2066 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2067 	req->Control = htole32(mpi_control);
2068 
2069 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2070 		mpr_free_command(sc, cm);
2071 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2072 		xpt_done(ccb);
2073 		return;
2074 	}
2075 
2076 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2077 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2078 	else {
2079 		KASSERT(csio->cdb_len <= IOCDBLEN,
2080 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2081 		    "is not set", csio->cdb_len));
2082 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2083 	}
2084 	req->IoFlags = htole16(csio->cdb_len);
2085 
2086 	/*
2087 	 * Check if EEDP is supported and enabled.  If it is then check if the
2088 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2089 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2090 	 * for EEDP transfer.
2091 	 */
2092 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2093 	if (sc->eedp_enabled && eedp_flags) {
2094 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2095 			if (lun->lun_id == csio->ccb_h.target_lun) {
2096 				break;
2097 			}
2098 		}
2099 
2100 		if ((lun != NULL) && (lun->eedp_formatted)) {
2101 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2102 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2103 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2104 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2105 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2106 				eedp_flags |=
2107 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2108 			}
2109 			req->EEDPFlags = htole16(eedp_flags);
2110 
2111 			/*
2112 			 * If CDB less than 32, fill in Primary Ref Tag with
2113 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2114 			 * already there.  Also, set protection bit.  FreeBSD
2115 			 * currently does not support CDBs bigger than 16, but
2116 			 * the code doesn't hurt, and will be here for the
2117 			 * future.
2118 			 */
2119 			if (csio->cdb_len != 32) {
2120 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2121 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2122 				    PrimaryReferenceTag;
2123 				for (i = 0; i < 4; i++) {
2124 					*ref_tag_addr =
2125 					    req->CDB.CDB32[lba_byte + i];
2126 					ref_tag_addr++;
2127 				}
2128 				req->CDB.EEDP32.PrimaryReferenceTag =
2129 				    htole32(req->
2130 				    CDB.EEDP32.PrimaryReferenceTag);
2131 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2132 				    0xFFFF;
2133 				req->CDB.CDB32[1] =
2134 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2135 			} else {
2136 				eedp_flags |=
2137 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2138 				req->EEDPFlags = htole16(eedp_flags);
2139 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2140 				    0x1F) | 0x20;
2141 			}
2142 		}
2143 	}
2144 
2145 	cm->cm_length = csio->dxfer_len;
2146 	if (cm->cm_length != 0) {
2147 		cm->cm_data = ccb;
2148 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2149 	} else {
2150 		cm->cm_data = NULL;
2151 	}
2152 	cm->cm_sge = &req->SGL;
2153 	cm->cm_sglsize = (32 - 24) * 4;
2154 	cm->cm_complete = mprsas_scsiio_complete;
2155 	cm->cm_complete_data = ccb;
2156 	cm->cm_targ = targ;
2157 	cm->cm_lun = csio->ccb_h.target_lun;
2158 	cm->cm_ccb = ccb;
2159 	/*
2160 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2161 	 * and set descriptor type.
2162 	 */
2163 	if (targ->scsi_req_desc_type ==
2164 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2165 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2166 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2167 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2168 		if (!sc->atomic_desc_capable) {
2169 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2170 			    htole16(targ->handle);
2171 		}
2172 	} else {
2173 		cm->cm_desc.SCSIIO.RequestFlags =
2174 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2175 		if (!sc->atomic_desc_capable)
2176 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2177 	}
2178 
2179 	csio->ccb_h.qos.sim_data = sbinuptime();
2180 #if __FreeBSD_version >= 1000029
2181 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2182 	    mprsas_scsiio_timeout, cm, 0);
2183 #else //__FreeBSD_version < 1000029
2184 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2185 	    mprsas_scsiio_timeout, cm);
2186 #endif //__FreeBSD_version >= 1000029
2187 
2188 	targ->issued++;
2189 	targ->outstanding++;
2190 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2191 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2192 
2193 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2194 	    __func__, cm, ccb, targ->outstanding);
2195 
2196 	mpr_map_command(sc, cm);
2197 	return;
2198 }
2199 
2200 /**
2201  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2202  */
2203 static void
2204 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2205     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2206 {
2207 	u32 response_info;
2208 	u8 *response_bytes;
2209 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2210 	    MPI2_IOCSTATUS_MASK;
2211 	u8 scsi_state = mpi_reply->SCSIState;
2212 	u8 scsi_status = mpi_reply->SCSIStatus;
2213 	char *desc_ioc_state = NULL;
2214 	char *desc_scsi_status = NULL;
2215 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2216 
2217 	if (log_info == 0x31170000)
2218 		return;
2219 
2220 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2221 	     ioc_status);
2222 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2223 	    scsi_status);
2224 
2225 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2226 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2227 	if (targ->encl_level_valid) {
2228 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2229 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2230 		    targ->connector_name);
2231 	}
2232 
2233 	/*
2234 	 * We can add more detail about underflow data here
2235 	 * TO-DO
2236 	 */
2237 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2238 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2239 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2240 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2241 
2242 	if (sc->mpr_debug & MPR_XINFO &&
2243 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2244 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2245 		scsi_sense_print(csio);
2246 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2247 	}
2248 
2249 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2250 		response_info = le32toh(mpi_reply->ResponseInfo);
2251 		response_bytes = (u8 *)&response_info;
2252 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2253 		    response_bytes[0],
2254 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2255 		    response_bytes[0]));
2256 	}
2257 }
2258 
2259 /** mprsas_nvme_trans_status_code
2260  *
2261  * Convert Native NVMe command error status to
2262  * equivalent SCSI error status.
2263  *
2264  * Returns appropriate scsi_status
2265  */
2266 static u8
2267 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2268     struct mpr_command *cm)
2269 {
2270 	u8 status = MPI2_SCSI_STATUS_GOOD;
2271 	int skey, asc, ascq;
2272 	union ccb *ccb = cm->cm_complete_data;
2273 	int returned_sense_len;
2274 	uint8_t sct, sc;
2275 
2276 	sct = NVME_STATUS_GET_SCT(nvme_status);
2277 	sc = NVME_STATUS_GET_SC(nvme_status);
2278 
2279 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2280 	skey = SSD_KEY_ILLEGAL_REQUEST;
2281 	asc = SCSI_ASC_NO_SENSE;
2282 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2283 
2284 	switch (sct) {
2285 	case NVME_SCT_GENERIC:
2286 		switch (sc) {
2287 		case NVME_SC_SUCCESS:
2288 			status = MPI2_SCSI_STATUS_GOOD;
2289 			skey = SSD_KEY_NO_SENSE;
2290 			asc = SCSI_ASC_NO_SENSE;
2291 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2292 			break;
2293 		case NVME_SC_INVALID_OPCODE:
2294 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2295 			skey = SSD_KEY_ILLEGAL_REQUEST;
2296 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2297 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2298 			break;
2299 		case NVME_SC_INVALID_FIELD:
2300 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2301 			skey = SSD_KEY_ILLEGAL_REQUEST;
2302 			asc = SCSI_ASC_INVALID_CDB;
2303 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2304 			break;
2305 		case NVME_SC_DATA_TRANSFER_ERROR:
2306 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2307 			skey = SSD_KEY_MEDIUM_ERROR;
2308 			asc = SCSI_ASC_NO_SENSE;
2309 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2310 			break;
2311 		case NVME_SC_ABORTED_POWER_LOSS:
2312 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2313 			skey = SSD_KEY_ABORTED_COMMAND;
2314 			asc = SCSI_ASC_WARNING;
2315 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2316 			break;
2317 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2318 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2319 			skey = SSD_KEY_HARDWARE_ERROR;
2320 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2321 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2322 			break;
2323 		case NVME_SC_ABORTED_BY_REQUEST:
2324 		case NVME_SC_ABORTED_SQ_DELETION:
2325 		case NVME_SC_ABORTED_FAILED_FUSED:
2326 		case NVME_SC_ABORTED_MISSING_FUSED:
2327 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2328 			skey = SSD_KEY_ABORTED_COMMAND;
2329 			asc = SCSI_ASC_NO_SENSE;
2330 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2331 			break;
2332 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2333 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2334 			skey = SSD_KEY_ILLEGAL_REQUEST;
2335 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2336 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2337 			break;
2338 		case NVME_SC_LBA_OUT_OF_RANGE:
2339 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2340 			skey = SSD_KEY_ILLEGAL_REQUEST;
2341 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2342 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2343 			break;
2344 		case NVME_SC_CAPACITY_EXCEEDED:
2345 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2346 			skey = SSD_KEY_MEDIUM_ERROR;
2347 			asc = SCSI_ASC_NO_SENSE;
2348 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2349 			break;
2350 		case NVME_SC_NAMESPACE_NOT_READY:
2351 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2352 			skey = SSD_KEY_NOT_READY;
2353 			asc = SCSI_ASC_LUN_NOT_READY;
2354 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2355 			break;
2356 		}
2357 		break;
2358 	case NVME_SCT_COMMAND_SPECIFIC:
2359 		switch (sc) {
2360 		case NVME_SC_INVALID_FORMAT:
2361 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2362 			skey = SSD_KEY_ILLEGAL_REQUEST;
2363 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2364 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2365 			break;
2366 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2367 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2368 			skey = SSD_KEY_ILLEGAL_REQUEST;
2369 			asc = SCSI_ASC_INVALID_CDB;
2370 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2371 			break;
2372 		}
2373 		break;
2374 	case NVME_SCT_MEDIA_ERROR:
2375 		switch (sc) {
2376 		case NVME_SC_WRITE_FAULTS:
2377 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2378 			skey = SSD_KEY_MEDIUM_ERROR;
2379 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2380 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2381 			break;
2382 		case NVME_SC_UNRECOVERED_READ_ERROR:
2383 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2384 			skey = SSD_KEY_MEDIUM_ERROR;
2385 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2386 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2387 			break;
2388 		case NVME_SC_GUARD_CHECK_ERROR:
2389 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2390 			skey = SSD_KEY_MEDIUM_ERROR;
2391 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2392 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2393 			break;
2394 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2395 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2396 			skey = SSD_KEY_MEDIUM_ERROR;
2397 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2398 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2399 			break;
2400 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2401 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2402 			skey = SSD_KEY_MEDIUM_ERROR;
2403 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2404 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2405 			break;
2406 		case NVME_SC_COMPARE_FAILURE:
2407 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2408 			skey = SSD_KEY_MISCOMPARE;
2409 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2410 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2411 			break;
2412 		case NVME_SC_ACCESS_DENIED:
2413 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2414 			skey = SSD_KEY_ILLEGAL_REQUEST;
2415 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2416 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2417 			break;
2418 		}
2419 		break;
2420 	}
2421 
2422 	returned_sense_len = sizeof(struct scsi_sense_data);
2423 	if (returned_sense_len < ccb->csio.sense_len)
2424 		ccb->csio.sense_resid = ccb->csio.sense_len -
2425 		    returned_sense_len;
2426 	else
2427 		ccb->csio.sense_resid = 0;
2428 
2429 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2430 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2431 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2432 
2433 	return status;
2434 }
2435 
2436 /** mprsas_complete_nvme_unmap
2437  *
2438  * Complete native NVMe command issued using NVMe Encapsulated
2439  * Request Message.
2440  */
2441 static u8
2442 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2443 {
2444 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2445 	struct nvme_completion *nvme_completion = NULL;
2446 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2447 
2448 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2449 	if (le16toh(mpi_reply->ErrorResponseCount)){
2450 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2451 		scsi_status = mprsas_nvme_trans_status_code(
2452 		    nvme_completion->status, cm);
2453 	}
2454 	return scsi_status;
2455 }
2456 
2457 static void
2458 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2459 {
2460 	MPI2_SCSI_IO_REPLY *rep;
2461 	union ccb *ccb;
2462 	struct ccb_scsiio *csio;
2463 	struct mprsas_softc *sassc;
2464 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2465 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2466 	int dir = 0, i;
2467 	u16 alloc_len;
2468 	struct mprsas_target *target;
2469 	target_id_t target_id;
2470 
2471 	MPR_FUNCTRACE(sc);
2472 	mpr_dprint(sc, MPR_TRACE,
2473 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2474 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2475 	    cm->cm_targ->outstanding);
2476 
2477 	callout_stop(&cm->cm_callout);
2478 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2479 
2480 	sassc = sc->sassc;
2481 	ccb = cm->cm_complete_data;
2482 	csio = &ccb->csio;
2483 	target_id = csio->ccb_h.target_id;
2484 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2485 	/*
2486 	 * XXX KDM if the chain allocation fails, does it matter if we do
2487 	 * the sync and unload here?  It is simpler to do it in every case,
2488 	 * assuming it doesn't cause problems.
2489 	 */
2490 	if (cm->cm_data != NULL) {
2491 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2492 			dir = BUS_DMASYNC_POSTREAD;
2493 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2494 			dir = BUS_DMASYNC_POSTWRITE;
2495 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2496 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2497 	}
2498 
2499 	cm->cm_targ->completed++;
2500 	cm->cm_targ->outstanding--;
2501 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2502 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2503 
2504 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2505 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2506 		cm->cm_state = MPR_CM_STATE_BUSY;
2507 		if (cm->cm_reply != NULL)
2508 			mprsas_log_command(cm, MPR_RECOVERY,
2509 			    "completed timedout cm %p ccb %p during recovery "
2510 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2511 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2512 			    rep->SCSIState, le32toh(rep->TransferCount));
2513 		else
2514 			mprsas_log_command(cm, MPR_RECOVERY,
2515 			    "completed timedout cm %p ccb %p during recovery\n",
2516 			    cm, cm->cm_ccb);
2517 	} else if (cm->cm_targ->tm != NULL) {
2518 		if (cm->cm_reply != NULL)
2519 			mprsas_log_command(cm, MPR_RECOVERY,
2520 			    "completed cm %p ccb %p during recovery "
2521 			    "ioc %x scsi %x state %x xfer %u\n",
2522 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2523 			    rep->SCSIStatus, rep->SCSIState,
2524 			    le32toh(rep->TransferCount));
2525 		else
2526 			mprsas_log_command(cm, MPR_RECOVERY,
2527 			    "completed cm %p ccb %p during recovery\n",
2528 			    cm, cm->cm_ccb);
2529 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2530 		mprsas_log_command(cm, MPR_RECOVERY,
2531 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2532 	}
2533 
2534 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2535 		/*
2536 		 * We ran into an error after we tried to map the command,
2537 		 * so we're getting a callback without queueing the command
2538 		 * to the hardware.  So we set the status here, and it will
2539 		 * be retained below.  We'll go through the "fast path",
2540 		 * because there can be no reply when we haven't actually
2541 		 * gone out to the hardware.
2542 		 */
2543 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2544 
2545 		/*
2546 		 * Currently the only error included in the mask is
2547 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2548 		 * chain frames.  We need to freeze the queue until we get
2549 		 * a command that completed without this error, which will
2550 		 * hopefully have some chain frames attached that we can
2551 		 * use.  If we wanted to get smarter about it, we would
2552 		 * only unfreeze the queue in this condition when we're
2553 		 * sure that we're getting some chain frames back.  That's
2554 		 * probably unnecessary.
2555 		 */
2556 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2557 			xpt_freeze_simq(sassc->sim, 1);
2558 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2559 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2560 			    "freezing SIM queue\n");
2561 		}
2562 	}
2563 
2564 	/*
2565 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2566 	 * flag, and use it in a few places in the rest of this function for
2567 	 * convenience. Use the macro if available.
2568 	 */
2569 #if __FreeBSD_version >= 1100103
2570 	scsi_cdb = scsiio_cdb_ptr(csio);
2571 #else
2572 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2573 		scsi_cdb = csio->cdb_io.cdb_ptr;
2574 	else
2575 		scsi_cdb = csio->cdb_io.cdb_bytes;
2576 #endif
2577 
2578 	/*
2579 	 * If this is a Start Stop Unit command and it was issued by the driver
2580 	 * during shutdown, decrement the refcount to account for all of the
2581 	 * commands that were sent.  All SSU commands should be completed before
2582 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2583 	 * is TRUE.
2584 	 */
2585 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2586 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2587 		sc->SSU_refcount--;
2588 	}
2589 
2590 	/* Take the fast path to completion */
2591 	if (cm->cm_reply == NULL) {
2592 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2593 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2594 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2595 			else {
2596 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2597 				csio->scsi_status = SCSI_STATUS_OK;
2598 			}
2599 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2600 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2601 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2602 				mpr_dprint(sc, MPR_XINFO,
2603 				    "Unfreezing SIM queue\n");
2604 			}
2605 		}
2606 
2607 		/*
2608 		 * There are two scenarios where the status won't be
2609 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2610 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2611 		 */
2612 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2613 			/*
2614 			 * Freeze the dev queue so that commands are
2615 			 * executed in the correct order after error
2616 			 * recovery.
2617 			 */
2618 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2619 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2620 		}
2621 		mpr_free_command(sc, cm);
2622 		xpt_done(ccb);
2623 		return;
2624 	}
2625 
2626 	target = &sassc->targets[target_id];
2627 	if (scsi_cdb[0] == UNMAP &&
2628 	    target->is_nvme &&
2629 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2630 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2631 		csio->scsi_status = rep->SCSIStatus;
2632 	}
2633 
2634 	mprsas_log_command(cm, MPR_XINFO,
2635 	    "ioc %x scsi %x state %x xfer %u\n",
2636 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2637 	    le32toh(rep->TransferCount));
2638 
2639 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2640 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2641 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2642 		/* FALLTHROUGH */
2643 	case MPI2_IOCSTATUS_SUCCESS:
2644 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2645 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2646 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2647 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2648 
2649 		/* Completion failed at the transport level. */
2650 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2651 		    MPI2_SCSI_STATE_TERMINATED)) {
2652 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2653 			break;
2654 		}
2655 
2656 		/* In a modern packetized environment, an autosense failure
2657 		 * implies that there's not much else that can be done to
2658 		 * recover the command.
2659 		 */
2660 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2661 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2662 			break;
2663 		}
2664 
2665 		/*
2666 		 * CAM doesn't care about SAS Response Info data, but if this is
2667 		 * the state check if TLR should be done.  If not, clear the
2668 		 * TLR_bits for the target.
2669 		 */
2670 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2671 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2672 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2673 			sc->mapping_table[target_id].TLR_bits =
2674 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2675 		}
2676 
2677 		/*
2678 		 * Intentionally override the normal SCSI status reporting
2679 		 * for these two cases.  These are likely to happen in a
2680 		 * multi-initiator environment, and we want to make sure that
2681 		 * CAM retries these commands rather than fail them.
2682 		 */
2683 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2684 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2685 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2686 			break;
2687 		}
2688 
2689 		/* Handle normal status and sense */
2690 		csio->scsi_status = rep->SCSIStatus;
2691 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2692 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2693 		else
2694 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2695 
2696 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2697 			int sense_len, returned_sense_len;
2698 
2699 			returned_sense_len = min(le32toh(rep->SenseCount),
2700 			    sizeof(struct scsi_sense_data));
2701 			if (returned_sense_len < csio->sense_len)
2702 				csio->sense_resid = csio->sense_len -
2703 				    returned_sense_len;
2704 			else
2705 				csio->sense_resid = 0;
2706 
2707 			sense_len = min(returned_sense_len,
2708 			    csio->sense_len - csio->sense_resid);
2709 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2710 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2711 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2712 		}
2713 
2714 		/*
2715 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2716 		 * and it's page code 0 (Supported Page List), and there is
2717 		 * inquiry data, and this is for a sequential access device, and
2718 		 * the device is an SSP target, and TLR is supported by the
2719 		 * controller, turn the TLR_bits value ON if page 0x90 is
2720 		 * supported.
2721 		 */
2722 		if ((scsi_cdb[0] == INQUIRY) &&
2723 		    (scsi_cdb[1] & SI_EVPD) &&
2724 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2725 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2726 		    (csio->data_ptr != NULL) &&
2727 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2728 		    (sc->control_TLR) &&
2729 		    (sc->mapping_table[target_id].device_info &
2730 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2731 			vpd_list = (struct scsi_vpd_supported_page_list *)
2732 			    csio->data_ptr;
2733 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2734 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2735 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2736 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2737 			alloc_len -= csio->resid;
2738 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2739 				if (vpd_list->list[i] == 0x90) {
2740 					*TLR_bits = TLR_on;
2741 					break;
2742 				}
2743 			}
2744 		}
2745 
2746 		/*
2747 		 * If this is a SATA direct-access end device, mark it so that
2748 		 * a SCSI StartStopUnit command will be sent to it when the
2749 		 * driver is being shutdown.
2750 		 */
2751 		if ((scsi_cdb[0] == INQUIRY) &&
2752 		    (csio->data_ptr != NULL) &&
2753 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2754 		    (sc->mapping_table[target_id].device_info &
2755 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2756 		    ((sc->mapping_table[target_id].device_info &
2757 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2758 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2759 			target = &sassc->targets[target_id];
2760 			target->supports_SSU = TRUE;
2761 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2762 			    target_id);
2763 		}
2764 		break;
2765 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2766 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2767 		/*
2768 		 * If devinfo is 0 this will be a volume.  In that case don't
2769 		 * tell CAM that the volume is not there.  We want volumes to
2770 		 * be enumerated until they are deleted/removed, not just
2771 		 * failed.
2772 		 */
2773 		if (cm->cm_targ->devinfo == 0)
2774 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2775 		else
2776 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2777 		break;
2778 	case MPI2_IOCSTATUS_INVALID_SGL:
2779 		mpr_print_scsiio_cmd(sc, cm);
2780 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2781 		break;
2782 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2783 		/*
2784 		 * This is one of the responses that comes back when an I/O
2785 		 * has been aborted.  If it is because of a timeout that we
2786 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2787 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2788 		 * command is the same (it gets retried, subject to the
2789 		 * retry counter), the only difference is what gets printed
2790 		 * on the console.
2791 		 */
2792 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2793 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2794 		else
2795 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2796 		break;
2797 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2798 		/* resid is ignored for this condition */
2799 		csio->resid = 0;
2800 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2801 		break;
2802 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2803 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2804 		/*
2805 		 * These can sometimes be transient transport-related
2806 		 * errors, and sometimes persistent drive-related errors.
2807 		 * We used to retry these without decrementing the retry
2808 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2809 		 * we hit a persistent drive problem that returns one of
2810 		 * these error codes, we would retry indefinitely.  So,
2811 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2812 		 * count and avoid infinite retries.  We're taking the
2813 		 * potential risk of flagging false failures in the event
2814 		 * of a topology-related error (e.g. a SAS expander problem
2815 		 * causes a command addressed to a drive to fail), but
2816 		 * avoiding getting into an infinite retry loop.
2817 		 */
2818 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2819 		mpr_dprint(sc, MPR_INFO,
2820 		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2821 		    mpr_describe_table(mpr_iocstatus_string,
2822 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2823 		    target_id, cm->cm_desc.Default.SMID,
2824 		    le32toh(rep->IOCLogInfo));
2825 		mpr_dprint(sc, MPR_XINFO,
2826 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2827 		    rep->SCSIStatus, rep->SCSIState,
2828 		    le32toh(rep->TransferCount));
2829 		break;
2830 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2831 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2832 	case MPI2_IOCSTATUS_INVALID_VPID:
2833 	case MPI2_IOCSTATUS_INVALID_FIELD:
2834 	case MPI2_IOCSTATUS_INVALID_STATE:
2835 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2836 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2837 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2838 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2839 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2840 	default:
2841 		mprsas_log_command(cm, MPR_XINFO,
2842 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2843 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2844 		    rep->SCSIStatus, rep->SCSIState,
2845 		    le32toh(rep->TransferCount));
2846 		csio->resid = cm->cm_length;
2847 
2848 		if (scsi_cdb[0] == UNMAP &&
2849 		    target->is_nvme &&
2850 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2851 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2852 		else
2853 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2854 
2855 		break;
2856 	}
2857 
2858 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2859 
2860 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2861 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2862 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2863 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2864 		    "queue\n");
2865 	}
2866 
2867 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2868 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2869 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2870 	}
2871 
2872 	mpr_free_command(sc, cm);
2873 	xpt_done(ccb);
2874 }
2875 
2876 #if __FreeBSD_version >= 900026
2877 static void
2878 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2879 {
2880 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2881 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2882 	uint64_t sasaddr;
2883 	union ccb *ccb;
2884 
2885 	ccb = cm->cm_complete_data;
2886 
2887 	/*
2888 	 * Currently there should be no way we can hit this case.  It only
2889 	 * happens when we have a failure to allocate chain frames, and SMP
2890 	 * commands require two S/G elements only.  That should be handled
2891 	 * in the standard request size.
2892 	 */
2893 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2894 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2895 		    "request!\n", __func__, cm->cm_flags);
2896 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2897 		goto bailout;
2898         }
2899 
2900 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2901 	if (rpl == NULL) {
2902 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2903 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2904 		goto bailout;
2905 	}
2906 
2907 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2908 	sasaddr = le32toh(req->SASAddress.Low);
2909 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2910 
2911 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2912 	    MPI2_IOCSTATUS_SUCCESS ||
2913 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2914 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2915 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2916 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2917 		goto bailout;
2918 	}
2919 
2920 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2921 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2922 
2923 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2924 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2925 	else
2926 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2927 
2928 bailout:
2929 	/*
2930 	 * We sync in both directions because we had DMAs in the S/G list
2931 	 * in both directions.
2932 	 */
2933 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2934 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2935 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2936 	mpr_free_command(sc, cm);
2937 	xpt_done(ccb);
2938 }
2939 
2940 static void
2941 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2942 {
2943 	struct mpr_command *cm;
2944 	uint8_t *request, *response;
2945 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2946 	struct mpr_softc *sc;
2947 	struct sglist *sg;
2948 	int error;
2949 
2950 	sc = sassc->sc;
2951 	sg = NULL;
2952 	error = 0;
2953 
2954 #if (__FreeBSD_version >= 1000028) || \
2955     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2956 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2957 	case CAM_DATA_PADDR:
2958 	case CAM_DATA_SG_PADDR:
2959 		/*
2960 		 * XXX We don't yet support physical addresses here.
2961 		 */
2962 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2963 		    "supported\n", __func__);
2964 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2965 		xpt_done(ccb);
2966 		return;
2967 	case CAM_DATA_SG:
2968 		/*
2969 		 * The chip does not support more than one buffer for the
2970 		 * request or response.
2971 		 */
2972 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2973 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2974 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2975 			    "response buffer segments not supported for SMP\n",
2976 			    __func__);
2977 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2978 			xpt_done(ccb);
2979 			return;
2980 		}
2981 
2982 		/*
2983 		 * The CAM_SCATTER_VALID flag was originally implemented
2984 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2985 		 * We have two.  So, just take that flag to mean that we
2986 		 * might have S/G lists, and look at the S/G segment count
2987 		 * to figure out whether that is the case for each individual
2988 		 * buffer.
2989 		 */
2990 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2991 			bus_dma_segment_t *req_sg;
2992 
2993 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2994 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2995 		} else
2996 			request = ccb->smpio.smp_request;
2997 
2998 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2999 			bus_dma_segment_t *rsp_sg;
3000 
3001 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3002 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3003 		} else
3004 			response = ccb->smpio.smp_response;
3005 		break;
3006 	case CAM_DATA_VADDR:
3007 		request = ccb->smpio.smp_request;
3008 		response = ccb->smpio.smp_response;
3009 		break;
3010 	default:
3011 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3012 		xpt_done(ccb);
3013 		return;
3014 	}
3015 #else /* __FreeBSD_version < 1000028 */
3016 	/*
3017 	 * XXX We don't yet support physical addresses here.
3018 	 */
3019 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3020 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3021 		    "supported\n", __func__);
3022 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3023 		xpt_done(ccb);
3024 		return;
3025 	}
3026 
3027 	/*
3028 	 * If the user wants to send an S/G list, check to make sure they
3029 	 * have single buffers.
3030 	 */
3031 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3032 		/*
3033 		 * The chip does not support more than one buffer for the
3034 		 * request or response.
3035 		 */
3036 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3037 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3038 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3039 			    "response buffer segments not supported for SMP\n",
3040 			    __func__);
3041 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3042 			xpt_done(ccb);
3043 			return;
3044 		}
3045 
3046 		/*
3047 		 * The CAM_SCATTER_VALID flag was originally implemented
3048 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3049 		 * We have two.  So, just take that flag to mean that we
3050 		 * might have S/G lists, and look at the S/G segment count
3051 		 * to figure out whether that is the case for each individual
3052 		 * buffer.
3053 		 */
3054 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3055 			bus_dma_segment_t *req_sg;
3056 
3057 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3058 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3059 		} else
3060 			request = ccb->smpio.smp_request;
3061 
3062 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3063 			bus_dma_segment_t *rsp_sg;
3064 
3065 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3066 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3067 		} else
3068 			response = ccb->smpio.smp_response;
3069 	} else {
3070 		request = ccb->smpio.smp_request;
3071 		response = ccb->smpio.smp_response;
3072 	}
3073 #endif /* __FreeBSD_version < 1000028 */
3074 
3075 	cm = mpr_alloc_command(sc);
3076 	if (cm == NULL) {
3077 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3078 		    __func__);
3079 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3080 		xpt_done(ccb);
3081 		return;
3082 	}
3083 
3084 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3085 	bzero(req, sizeof(*req));
3086 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3087 
3088 	/* Allow the chip to use any route to this SAS address. */
3089 	req->PhysicalPort = 0xff;
3090 
3091 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3092 	req->SGLFlags =
3093 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3094 
3095 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3096 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3097 
3098 	mpr_init_sge(cm, req, &req->SGL);
3099 
3100 	/*
3101 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3102 	 * do one map command, and one busdma call in there.
3103 	 */
3104 	cm->cm_uio.uio_iov = cm->cm_iovec;
3105 	cm->cm_uio.uio_iovcnt = 2;
3106 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3107 
3108 	/*
3109 	 * The read/write flag isn't used by busdma, but set it just in
3110 	 * case.  This isn't exactly accurate, either, since we're going in
3111 	 * both directions.
3112 	 */
3113 	cm->cm_uio.uio_rw = UIO_WRITE;
3114 
3115 	cm->cm_iovec[0].iov_base = request;
3116 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3117 	cm->cm_iovec[1].iov_base = response;
3118 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3119 
3120 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3121 			       cm->cm_iovec[1].iov_len;
3122 
3123 	/*
3124 	 * Trigger a warning message in mpr_data_cb() for the user if we
3125 	 * wind up exceeding two S/G segments.  The chip expects one
3126 	 * segment for the request and another for the response.
3127 	 */
3128 	cm->cm_max_segs = 2;
3129 
3130 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3131 	cm->cm_complete = mprsas_smpio_complete;
3132 	cm->cm_complete_data = ccb;
3133 
3134 	/*
3135 	 * Tell the mapping code that we're using a uio, and that this is
3136 	 * an SMP passthrough request.  There is a little special-case
3137 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3138 	 * transfer.
3139 	 */
3140 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3141 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3142 
3143 	/* The chip data format is little endian. */
3144 	req->SASAddress.High = htole32(sasaddr >> 32);
3145 	req->SASAddress.Low = htole32(sasaddr);
3146 
3147 	/*
3148 	 * XXX Note that we don't have a timeout/abort mechanism here.
3149 	 * From the manual, it looks like task management requests only
3150 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3151 	 * have a mechanism to retry requests in the event of a chip reset
3152 	 * at least.  Hopefully the chip will insure that any errors short
3153 	 * of that are relayed back to the driver.
3154 	 */
3155 	error = mpr_map_command(sc, cm);
3156 	if ((error != 0) && (error != EINPROGRESS)) {
3157 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3158 		    "mpr_map_command()\n", __func__, error);
3159 		goto bailout_error;
3160 	}
3161 
3162 	return;
3163 
3164 bailout_error:
3165 	mpr_free_command(sc, cm);
3166 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3167 	xpt_done(ccb);
3168 	return;
3169 }
3170 
3171 static void
3172 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3173 {
3174 	struct mpr_softc *sc;
3175 	struct mprsas_target *targ;
3176 	uint64_t sasaddr = 0;
3177 
3178 	sc = sassc->sc;
3179 
3180 	/*
3181 	 * Make sure the target exists.
3182 	 */
3183 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3184 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3185 	targ = &sassc->targets[ccb->ccb_h.target_id];
3186 	if (targ->handle == 0x0) {
3187 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3188 		    __func__, ccb->ccb_h.target_id);
3189 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3190 		xpt_done(ccb);
3191 		return;
3192 	}
3193 
3194 	/*
3195 	 * If this device has an embedded SMP target, we'll talk to it
3196 	 * directly.
3197 	 * figure out what the expander's address is.
3198 	 */
3199 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3200 		sasaddr = targ->sasaddr;
3201 
3202 	/*
3203 	 * If we don't have a SAS address for the expander yet, try
3204 	 * grabbing it from the page 0x83 information cached in the
3205 	 * transport layer for this target.  LSI expanders report the
3206 	 * expander SAS address as the port-associated SAS address in
3207 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3208 	 * 0x83.
3209 	 *
3210 	 * XXX KDM disable this for now, but leave it commented out so that
3211 	 * it is obvious that this is another possible way to get the SAS
3212 	 * address.
3213 	 *
3214 	 * The parent handle method below is a little more reliable, and
3215 	 * the other benefit is that it works for devices other than SES
3216 	 * devices.  So you can send a SMP request to a da(4) device and it
3217 	 * will get routed to the expander that device is attached to.
3218 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3219 	 */
3220 #if 0
3221 	if (sasaddr == 0)
3222 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3223 #endif
3224 
3225 	/*
3226 	 * If we still don't have a SAS address for the expander, look for
3227 	 * the parent device of this device, which is probably the expander.
3228 	 */
3229 	if (sasaddr == 0) {
3230 #ifdef OLD_MPR_PROBE
3231 		struct mprsas_target *parent_target;
3232 #endif
3233 
3234 		if (targ->parent_handle == 0x0) {
3235 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3236 			    "a valid parent handle!\n", __func__, targ->handle);
3237 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3238 			goto bailout;
3239 		}
3240 #ifdef OLD_MPR_PROBE
3241 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3242 		    targ->parent_handle);
3243 
3244 		if (parent_target == NULL) {
3245 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3246 			    "a valid parent target!\n", __func__, targ->handle);
3247 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3248 			goto bailout;
3249 		}
3250 
3251 		if ((parent_target->devinfo &
3252 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3253 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3254 			    "does not have an SMP target!\n", __func__,
3255 			    targ->handle, parent_target->handle);
3256 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3257 			goto bailout;
3258 		}
3259 
3260 		sasaddr = parent_target->sasaddr;
3261 #else /* OLD_MPR_PROBE */
3262 		if ((targ->parent_devinfo &
3263 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3264 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3265 			    "does not have an SMP target!\n", __func__,
3266 			    targ->handle, targ->parent_handle);
3267 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3268 			goto bailout;
3269 
3270 		}
3271 		if (targ->parent_sasaddr == 0x0) {
3272 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3273 			    "%d does not have a valid SAS address!\n", __func__,
3274 			    targ->handle, targ->parent_handle);
3275 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3276 			goto bailout;
3277 		}
3278 
3279 		sasaddr = targ->parent_sasaddr;
3280 #endif /* OLD_MPR_PROBE */
3281 
3282 	}
3283 
3284 	if (sasaddr == 0) {
3285 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3286 		    "handle %d\n", __func__, targ->handle);
3287 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3288 		goto bailout;
3289 	}
3290 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3291 
3292 	return;
3293 
3294 bailout:
3295 	xpt_done(ccb);
3296 
3297 }
3298 #endif //__FreeBSD_version >= 900026
3299 
3300 static void
3301 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3302 {
3303 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3304 	struct mpr_softc *sc;
3305 	struct mpr_command *tm;
3306 	struct mprsas_target *targ;
3307 
3308 	MPR_FUNCTRACE(sassc->sc);
3309 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3310 
3311 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3312 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3313 	sc = sassc->sc;
3314 	tm = mpr_alloc_command(sc);
3315 	if (tm == NULL) {
3316 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3317 		    "mprsas_action_resetdev\n");
3318 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3319 		xpt_done(ccb);
3320 		return;
3321 	}
3322 
3323 	targ = &sassc->targets[ccb->ccb_h.target_id];
3324 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3325 	req->DevHandle = htole16(targ->handle);
3326 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3327 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3328 
3329 	/* SAS Hard Link Reset / SATA Link Reset */
3330 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3331 
3332 	tm->cm_data = NULL;
3333 	tm->cm_desc.HighPriority.RequestFlags =
3334 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3335 	tm->cm_complete = mprsas_resetdev_complete;
3336 	tm->cm_complete_data = ccb;
3337 
3338 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3339 	    __func__, targ->tid);
3340 	tm->cm_targ = targ;
3341 	targ->flags |= MPRSAS_TARGET_INRESET;
3342 
3343 	mpr_map_command(sc, tm);
3344 }
3345 
3346 static void
3347 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3348 {
3349 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3350 	union ccb *ccb;
3351 
3352 	MPR_FUNCTRACE(sc);
3353 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3354 
3355 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3356 	ccb = tm->cm_complete_data;
3357 
3358 	/*
3359 	 * Currently there should be no way we can hit this case.  It only
3360 	 * happens when we have a failure to allocate chain frames, and
3361 	 * task management commands don't have S/G lists.
3362 	 */
3363 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3364 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3365 
3366 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3367 
3368 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3369 		    "handle %#04x! This should not happen!\n", __func__,
3370 		    tm->cm_flags, req->DevHandle);
3371 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3372 		goto bailout;
3373 	}
3374 
3375 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3376 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3377 
3378 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3379 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3380 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3381 		    CAM_LUN_WILDCARD);
3382 	}
3383 	else
3384 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3385 
3386 bailout:
3387 
3388 	mprsas_free_tm(sc, tm);
3389 	xpt_done(ccb);
3390 }
3391 
3392 static void
3393 mprsas_poll(struct cam_sim *sim)
3394 {
3395 	struct mprsas_softc *sassc;
3396 
3397 	sassc = cam_sim_softc(sim);
3398 
3399 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3400 		/* frequent debug messages during a panic just slow
3401 		 * everything down too much.
3402 		 */
3403 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3404 		    __func__);
3405 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3406 	}
3407 
3408 	mpr_intr_locked(sassc->sc);
3409 }
3410 
3411 static void
3412 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3413     void *arg)
3414 {
3415 	struct mpr_softc *sc;
3416 
3417 	sc = (struct mpr_softc *)callback_arg;
3418 
3419 	switch (code) {
3420 #if (__FreeBSD_version >= 1000006) || \
3421     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3422 	case AC_ADVINFO_CHANGED: {
3423 		struct mprsas_target *target;
3424 		struct mprsas_softc *sassc;
3425 		struct scsi_read_capacity_data_long rcap_buf;
3426 		struct ccb_dev_advinfo cdai;
3427 		struct mprsas_lun *lun;
3428 		lun_id_t lunid;
3429 		int found_lun;
3430 		uintptr_t buftype;
3431 
3432 		buftype = (uintptr_t)arg;
3433 
3434 		found_lun = 0;
3435 		sassc = sc->sassc;
3436 
3437 		/*
3438 		 * We're only interested in read capacity data changes.
3439 		 */
3440 		if (buftype != CDAI_TYPE_RCAPLONG)
3441 			break;
3442 
3443 		/*
3444 		 * See the comment in mpr_attach_sas() for a detailed
3445 		 * explanation.  In these versions of FreeBSD we register
3446 		 * for all events and filter out the events that don't
3447 		 * apply to us.
3448 		 */
3449 #if (__FreeBSD_version < 1000703) || \
3450     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3451 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3452 			break;
3453 #endif
3454 
3455 		/*
3456 		 * We should have a handle for this, but check to make sure.
3457 		 */
3458 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3459 		    ("Target %d out of bounds in mprsas_async\n",
3460 		    xpt_path_target_id(path)));
3461 		target = &sassc->targets[xpt_path_target_id(path)];
3462 		if (target->handle == 0)
3463 			break;
3464 
3465 		lunid = xpt_path_lun_id(path);
3466 
3467 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3468 			if (lun->lun_id == lunid) {
3469 				found_lun = 1;
3470 				break;
3471 			}
3472 		}
3473 
3474 		if (found_lun == 0) {
3475 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3476 			    M_NOWAIT | M_ZERO);
3477 			if (lun == NULL) {
3478 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3479 				    "LUN for EEDP support.\n");
3480 				break;
3481 			}
3482 			lun->lun_id = lunid;
3483 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3484 		}
3485 
3486 		bzero(&rcap_buf, sizeof(rcap_buf));
3487 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3488 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3489 		cdai.ccb_h.flags = CAM_DIR_IN;
3490 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3491 #if (__FreeBSD_version >= 1100061) || \
3492     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3493 		cdai.flags = CDAI_FLAG_NONE;
3494 #else
3495 		cdai.flags = 0;
3496 #endif
3497 		cdai.bufsiz = sizeof(rcap_buf);
3498 		cdai.buf = (uint8_t *)&rcap_buf;
3499 		xpt_action((union ccb *)&cdai);
3500 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3501 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3502 
3503 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3504 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3505 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3506 			case SRC16_PTYPE_1:
3507 			case SRC16_PTYPE_3:
3508 				lun->eedp_formatted = TRUE;
3509 				lun->eedp_block_size =
3510 				    scsi_4btoul(rcap_buf.length);
3511 				break;
3512 			case SRC16_PTYPE_2:
3513 			default:
3514 				lun->eedp_formatted = FALSE;
3515 				lun->eedp_block_size = 0;
3516 				break;
3517 			}
3518 		} else {
3519 			lun->eedp_formatted = FALSE;
3520 			lun->eedp_block_size = 0;
3521 		}
3522 		break;
3523 	}
3524 #endif
3525 	case AC_FOUND_DEVICE: {
3526 		struct ccb_getdev *cgd;
3527 
3528 		/*
3529 		 * See the comment in mpr_attach_sas() for a detailed
3530 		 * explanation.  In these versions of FreeBSD we register
3531 		 * for all events and filter out the events that don't
3532 		 * apply to us.
3533 		 */
3534 #if (__FreeBSD_version < 1000703) || \
3535     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3536 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3537 			break;
3538 #endif
3539 
3540 		cgd = arg;
3541 #if (__FreeBSD_version < 901503) || \
3542     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3543 		mprsas_check_eedp(sc, path, cgd);
3544 #endif
3545 		break;
3546 	}
3547 	default:
3548 		break;
3549 	}
3550 }
3551 
3552 #if (__FreeBSD_version < 901503) || \
3553     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3554 static void
3555 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3556     struct ccb_getdev *cgd)
3557 {
3558 	struct mprsas_softc *sassc = sc->sassc;
3559 	struct ccb_scsiio *csio;
3560 	struct scsi_read_capacity_16 *scsi_cmd;
3561 	struct scsi_read_capacity_eedp *rcap_buf;
3562 	path_id_t pathid;
3563 	target_id_t targetid;
3564 	lun_id_t lunid;
3565 	union ccb *ccb;
3566 	struct cam_path *local_path;
3567 	struct mprsas_target *target;
3568 	struct mprsas_lun *lun;
3569 	uint8_t	found_lun;
3570 	char path_str[64];
3571 
3572 	pathid = cam_sim_path(sassc->sim);
3573 	targetid = xpt_path_target_id(path);
3574 	lunid = xpt_path_lun_id(path);
3575 
3576 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3577 	    "mprsas_check_eedp\n", targetid));
3578 	target = &sassc->targets[targetid];
3579 	if (target->handle == 0x0)
3580 		return;
3581 
3582 	/*
3583 	 * Determine if the device is EEDP capable.
3584 	 *
3585 	 * If this flag is set in the inquiry data, the device supports
3586 	 * protection information, and must support the 16 byte read capacity
3587 	 * command, otherwise continue without sending read cap 16.
3588 	 */
3589 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3590 		return;
3591 
3592 	/*
3593 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3594 	 * the LUN is formatted for EEDP support.
3595 	 */
3596 	ccb = xpt_alloc_ccb_nowait();
3597 	if (ccb == NULL) {
3598 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3599 		    "support.\n");
3600 		return;
3601 	}
3602 
3603 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3604 	    CAM_REQ_CMP) {
3605 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3606 		    "support.\n");
3607 		xpt_free_ccb(ccb);
3608 		return;
3609 	}
3610 
3611 	/*
3612 	 * If LUN is already in list, don't create a new one.
3613 	 */
3614 	found_lun = FALSE;
3615 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3616 		if (lun->lun_id == lunid) {
3617 			found_lun = TRUE;
3618 			break;
3619 		}
3620 	}
3621 	if (!found_lun) {
3622 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3623 		    M_NOWAIT | M_ZERO);
3624 		if (lun == NULL) {
3625 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3626 			    "EEDP support.\n");
3627 			xpt_free_path(local_path);
3628 			xpt_free_ccb(ccb);
3629 			return;
3630 		}
3631 		lun->lun_id = lunid;
3632 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3633 	}
3634 
3635 	xpt_path_string(local_path, path_str, sizeof(path_str));
3636 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3637 	    path_str, target->handle);
3638 
3639 	/*
3640 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3641 	 * mprsas_read_cap_done function will load the read cap info into the
3642 	 * LUN struct.
3643 	 */
3644 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3645 	    M_NOWAIT | M_ZERO);
3646 	if (rcap_buf == NULL) {
3647 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3648 		    "buffer for EEDP support.\n");
3649 		xpt_free_path(ccb->ccb_h.path);
3650 		xpt_free_ccb(ccb);
3651 		return;
3652 	}
3653 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3654 	csio = &ccb->csio;
3655 	csio->ccb_h.func_code = XPT_SCSI_IO;
3656 	csio->ccb_h.flags = CAM_DIR_IN;
3657 	csio->ccb_h.retry_count = 4;
3658 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3659 	csio->ccb_h.timeout = 60000;
3660 	csio->data_ptr = (uint8_t *)rcap_buf;
3661 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3662 	csio->sense_len = MPR_SENSE_LEN;
3663 	csio->cdb_len = sizeof(*scsi_cmd);
3664 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3665 
3666 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3667 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3668 	scsi_cmd->opcode = 0x9E;
3669 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3670 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3671 
3672 	ccb->ccb_h.ppriv_ptr1 = sassc;
3673 	xpt_action(ccb);
3674 }
3675 
3676 static void
3677 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3678 {
3679 	struct mprsas_softc *sassc;
3680 	struct mprsas_target *target;
3681 	struct mprsas_lun *lun;
3682 	struct scsi_read_capacity_eedp *rcap_buf;
3683 
3684 	if (done_ccb == NULL)
3685 		return;
3686 
3687 	/* Driver need to release devq, it Scsi command is
3688 	 * generated by driver internally.
3689 	 * Currently there is a single place where driver
3690 	 * calls scsi command internally. In future if driver
3691 	 * calls more scsi command internally, it needs to release
3692 	 * devq internally, since those command will not go back to
3693 	 * cam_periph.
3694 	 */
3695 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3696         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3697 		xpt_release_devq(done_ccb->ccb_h.path,
3698 			       	/*count*/ 1, /*run_queue*/TRUE);
3699 	}
3700 
3701 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3702 
3703 	/*
3704 	 * Get the LUN ID for the path and look it up in the LUN list for the
3705 	 * target.
3706 	 */
3707 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3708 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3709 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3710 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3711 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3712 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3713 			continue;
3714 
3715 		/*
3716 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3717 		 * info. If the READ CAP 16 command had some SCSI error (common
3718 		 * if command is not supported), mark the lun as not supporting
3719 		 * EEDP and set the block size to 0.
3720 		 */
3721 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3722 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3723 			lun->eedp_formatted = FALSE;
3724 			lun->eedp_block_size = 0;
3725 			break;
3726 		}
3727 
3728 		if (rcap_buf->protect & 0x01) {
3729 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3730 			    "%d is formatted for EEDP support.\n",
3731 			    done_ccb->ccb_h.target_lun,
3732 			    done_ccb->ccb_h.target_id);
3733 			lun->eedp_formatted = TRUE;
3734 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3735 		}
3736 		break;
3737 	}
3738 
3739 	// Finished with this CCB and path.
3740 	free(rcap_buf, M_MPR);
3741 	xpt_free_path(done_ccb->ccb_h.path);
3742 	xpt_free_ccb(done_ccb);
3743 }
3744 #endif /* (__FreeBSD_version < 901503) || \
3745           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3746 
3747 void
3748 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3749     struct mprsas_target *target, lun_id_t lun_id)
3750 {
3751 	union ccb *ccb;
3752 	path_id_t path_id;
3753 
3754 	/*
3755 	 * Set the INRESET flag for this target so that no I/O will be sent to
3756 	 * the target until the reset has completed.  If an I/O request does
3757 	 * happen, the devq will be frozen.  The CCB holds the path which is
3758 	 * used to release the devq.  The devq is released and the CCB is freed
3759 	 * when the TM completes.
3760 	 */
3761 	ccb = xpt_alloc_ccb_nowait();
3762 	if (ccb) {
3763 		path_id = cam_sim_path(sc->sassc->sim);
3764 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3765 		    target->tid, lun_id) != CAM_REQ_CMP) {
3766 			xpt_free_ccb(ccb);
3767 		} else {
3768 			tm->cm_ccb = ccb;
3769 			tm->cm_targ = target;
3770 			target->flags |= MPRSAS_TARGET_INRESET;
3771 		}
3772 	}
3773 }
3774 
3775 int
3776 mprsas_startup(struct mpr_softc *sc)
3777 {
3778 	/*
3779 	 * Send the port enable message and set the wait_for_port_enable flag.
3780 	 * This flag helps to keep the simq frozen until all discovery events
3781 	 * are processed.
3782 	 */
3783 	sc->wait_for_port_enable = 1;
3784 	mprsas_send_portenable(sc);
3785 	return (0);
3786 }
3787 
3788 static int
3789 mprsas_send_portenable(struct mpr_softc *sc)
3790 {
3791 	MPI2_PORT_ENABLE_REQUEST *request;
3792 	struct mpr_command *cm;
3793 
3794 	MPR_FUNCTRACE(sc);
3795 
3796 	if ((cm = mpr_alloc_command(sc)) == NULL)
3797 		return (EBUSY);
3798 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3799 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3800 	request->MsgFlags = 0;
3801 	request->VP_ID = 0;
3802 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3803 	cm->cm_complete = mprsas_portenable_complete;
3804 	cm->cm_data = NULL;
3805 	cm->cm_sge = NULL;
3806 
3807 	mpr_map_command(sc, cm);
3808 	mpr_dprint(sc, MPR_XINFO,
3809 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3810 	    cm, cm->cm_req, cm->cm_complete);
3811 	return (0);
3812 }
3813 
3814 static void
3815 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3816 {
3817 	MPI2_PORT_ENABLE_REPLY *reply;
3818 	struct mprsas_softc *sassc;
3819 
3820 	MPR_FUNCTRACE(sc);
3821 	sassc = sc->sassc;
3822 
3823 	/*
3824 	 * Currently there should be no way we can hit this case.  It only
3825 	 * happens when we have a failure to allocate chain frames, and
3826 	 * port enable commands don't have S/G lists.
3827 	 */
3828 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3829 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3830 		    "This should not happen!\n", __func__, cm->cm_flags);
3831 	}
3832 
3833 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3834 	if (reply == NULL)
3835 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3836 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3837 	    MPI2_IOCSTATUS_SUCCESS)
3838 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3839 
3840 	mpr_free_command(sc, cm);
3841 	/*
3842 	 * Done waiting for port enable to complete.  Decrement the refcount.
3843 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3844 	 * take place.
3845 	 */
3846 	sc->wait_for_port_enable = 0;
3847 	sc->port_enable_complete = 1;
3848 	wakeup(&sc->port_enable_complete);
3849 	mprsas_startup_decrement(sassc);
3850 }
3851 
3852 int
3853 mprsas_check_id(struct mprsas_softc *sassc, int id)
3854 {
3855 	struct mpr_softc *sc = sassc->sc;
3856 	char *ids;
3857 	char *name;
3858 
3859 	ids = &sc->exclude_ids[0];
3860 	while((name = strsep(&ids, ",")) != NULL) {
3861 		if (name[0] == '\0')
3862 			continue;
3863 		if (strtol(name, NULL, 0) == (long)id)
3864 			return (1);
3865 	}
3866 
3867 	return (0);
3868 }
3869 
3870 void
3871 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3872 {
3873 	struct mprsas_softc *sassc;
3874 	struct mprsas_lun *lun, *lun_tmp;
3875 	struct mprsas_target *targ;
3876 	int i;
3877 
3878 	sassc = sc->sassc;
3879 	/*
3880 	 * The number of targets is based on IOC Facts, so free all of
3881 	 * the allocated LUNs for each target and then the target buffer
3882 	 * itself.
3883 	 */
3884 	for (i=0; i< maxtargets; i++) {
3885 		targ = &sassc->targets[i];
3886 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3887 			free(lun, M_MPR);
3888 		}
3889 	}
3890 	free(sassc->targets, M_MPR);
3891 
3892 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3893 	    M_MPR, M_WAITOK|M_ZERO);
3894 	if (!sassc->targets) {
3895 		panic("%s failed to alloc targets with error %d\n",
3896 		    __func__, ENOMEM);
3897 	}
3898 }
3899