xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision a07d59d1daafdaae0d1b1ad1f977f9eda92dc83b)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/nvme/nvme.h>
76 
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
89 
90 #define MPRSAS_DISCOVERY_TIMEOUT	20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
92 
93 /*
94  * static array to check SCSI OpCode for EEDP protection bits
95  */
96 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 };
117 
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131     struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133     struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137     struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139     union ccb *done_ccb);
140 #endif
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143     struct mpr_command *cm);
144 
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148     uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	int target_id = 0xFFFFFFFF;
250 
251 	MPR_FUNCTRACE(sc);
252 	if (tm == NULL)
253 		return;
254 
255 	/*
256 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
257 	 * free the resources used for freezing the devq.  Must clear the
258 	 * INRESET flag as well or scsi I/O will not work.
259 	 */
260 	if (tm->cm_targ != NULL) {
261 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 		target_id = tm->cm_targ->tid;
263 	}
264 	if (tm->cm_ccb) {
265 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 		    target_id);
267 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 		xpt_free_path(tm->cm_ccb->ccb_h.path);
269 		xpt_free_ccb(tm->cm_ccb);
270 	}
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 	    MPI2_IOCSTATUS_SUCCESS) {
383 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 	    le32toh(reply->TerminationCount));
389 	mpr_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 	    targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mpr_map_command(sc, cm);
480 }
481 
482 /*
483  * The firmware performs debounce on the link to avoid transient link errors
484  * and false removals.  When it does decide that link has been lost and a
485  * device needs to go away, it expects that the host will perform a target reset
486  * and then an op remove.  The reset has the side-effect of aborting any
487  * outstanding requests for the device, which is required for the op-remove to
488  * succeed.  It's not clear if the host should check for the device coming back
489  * alive after the reset.
490  */
491 void
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mpr_softc *sc;
496 	struct mpr_command *cm;
497 	struct mprsas_target *targ = NULL;
498 
499 	MPR_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
508 		    __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
513 
514 	cm = mprsas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
517 		    __func__);
518 		return;
519 	}
520 
521 	mprsas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags =
535 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 	cm->cm_complete = mprsas_remove_device;
537 	cm->cm_complete_data = (void *)(uintptr_t)handle;
538 
539 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 	    __func__, targ->tid);
541 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 
543 	mpr_map_command(sc, cm);
544 }
545 
546 static void
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 {
549 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 	struct mprsas_target *targ;
552 	struct mpr_command *next_cm;
553 	uint16_t handle;
554 
555 	MPR_FUNCTRACE(sc);
556 
557 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 	targ = tm->cm_targ;
560 
561 	/*
562 	 * Currently there should be no way we can hit this case.  It only
563 	 * happens when we have a failure to allocate chain frames, and
564 	 * task management commands don't have S/G lists.
565 	 */
566 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 		    "handle %#04x! This should not happen!\n", __func__,
569 		    tm->cm_flags, handle);
570 	}
571 
572 	if (reply == NULL) {
573 		/* XXX retry the remove after the diag reset completes? */
574 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 		    "0x%04x\n", __func__, handle);
576 		mprsas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 	    MPI2_IOCSTATUS_SUCCESS) {
582 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 	    MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717 		}
718 	}
719 
720 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 	    &sc->sassc->mprsas_eh);
722 
723 	return (0);
724 }
725 
726 int
727 mpr_attach_sas(struct mpr_softc *sc)
728 {
729 	struct mprsas_softc *sassc;
730 	cam_status status;
731 	int unit, error = 0, reqs;
732 
733 	MPR_FUNCTRACE(sc);
734 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
735 
736 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
737 	if (!sassc) {
738 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
739 		    "Cannot allocate SAS subsystem memory\n");
740 		return (ENOMEM);
741 	}
742 
743 	/*
744 	 * XXX MaxTargets could change during a reinit.  Since we don't
745 	 * resize the targets[] array during such an event, cache the value
746 	 * of MaxTargets here so that we don't get into trouble later.  This
747 	 * should move into the reinit logic.
748 	 */
749 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
750 	sassc->targets = malloc(sizeof(struct mprsas_target) *
751 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
752 	if (!sassc->targets) {
753 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
754 		    "Cannot allocate SAS target memory\n");
755 		free(sassc, M_MPR);
756 		return (ENOMEM);
757 	}
758 	sc->sassc = sassc;
759 	sassc->sc = sc;
760 
761 	reqs = sc->num_reqs - sc->num_prireqs - 1;
762 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
763 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
764 		error = ENOMEM;
765 		goto out;
766 	}
767 
768 	unit = device_get_unit(sc->mpr_dev);
769 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
770 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
771 	if (sassc->sim == NULL) {
772 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
773 		error = EINVAL;
774 		goto out;
775 	}
776 
777 	TAILQ_INIT(&sassc->ev_queue);
778 
779 	/* Initialize taskqueue for Event Handling */
780 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
781 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
782 	    taskqueue_thread_enqueue, &sassc->ev_tq);
783 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
784 	    device_get_nameunit(sc->mpr_dev));
785 
786 	mpr_lock(sc);
787 
788 	/*
789 	 * XXX There should be a bus for every port on the adapter, but since
790 	 * we're just going to fake the topology for now, we'll pretend that
791 	 * everything is just a target on a single bus.
792 	 */
793 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
794 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
795 		    "Error %d registering SCSI bus\n", error);
796 		mpr_unlock(sc);
797 		goto out;
798 	}
799 
800 	/*
801 	 * Assume that discovery events will start right away.
802 	 *
803 	 * Hold off boot until discovery is complete.
804 	 */
805 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
806 	sc->sassc->startup_refcount = 0;
807 	mprsas_startup_increment(sassc);
808 
809 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
810 
811 	/*
812 	 * Register for async events so we can determine the EEDP
813 	 * capabilities of devices.
814 	 */
815 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
816 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
817 	    CAM_LUN_WILDCARD);
818 	if (status != CAM_REQ_CMP) {
819 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
820 		    "Error %#x creating sim path\n", status);
821 		sassc->path = NULL;
822 	} else {
823 		int event;
824 
825 #if (__FreeBSD_version >= 1000006) || \
826     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
827 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
828 #else
829 		event = AC_FOUND_DEVICE;
830 #endif
831 
832 		/*
833 		 * Prior to the CAM locking improvements, we can't call
834 		 * xpt_register_async() with a particular path specified.
835 		 *
836 		 * If a path isn't specified, xpt_register_async() will
837 		 * generate a wildcard path and acquire the XPT lock while
838 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
839 		 * It will then drop the XPT lock once that is done.
840 		 *
841 		 * If a path is specified for xpt_register_async(), it will
842 		 * not acquire and drop the XPT lock around the call to
843 		 * xpt_action().  xpt_action() asserts that the caller
844 		 * holds the SIM lock, so the SIM lock has to be held when
845 		 * calling xpt_register_async() when the path is specified.
846 		 *
847 		 * But xpt_register_async calls xpt_for_all_devices(),
848 		 * which calls xptbustraverse(), which will acquire each
849 		 * SIM lock.  When it traverses our particular bus, it will
850 		 * necessarily acquire the SIM lock, which will lead to a
851 		 * recursive lock acquisition.
852 		 *
853 		 * The CAM locking changes fix this problem by acquiring
854 		 * the XPT topology lock around bus traversal in
855 		 * xptbustraverse(), so the caller can hold the SIM lock
856 		 * and it does not cause a recursive lock acquisition.
857 		 *
858 		 * These __FreeBSD_version values are approximate, especially
859 		 * for stable/10, which is two months later than the actual
860 		 * change.
861 		 */
862 
863 #if (__FreeBSD_version < 1000703) || \
864     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
865 		mpr_unlock(sc);
866 		status = xpt_register_async(event, mprsas_async, sc,
867 					    NULL);
868 		mpr_lock(sc);
869 #else
870 		status = xpt_register_async(event, mprsas_async, sc,
871 					    sassc->path);
872 #endif
873 
874 		if (status != CAM_REQ_CMP) {
875 			mpr_dprint(sc, MPR_ERROR,
876 			    "Error %#x registering async handler for "
877 			    "AC_ADVINFO_CHANGED events\n", status);
878 			xpt_free_path(sassc->path);
879 			sassc->path = NULL;
880 		}
881 	}
882 	if (status != CAM_REQ_CMP) {
883 		/*
884 		 * EEDP use is the exception, not the rule.
885 		 * Warn the user, but do not fail to attach.
886 		 */
887 		mpr_printf(sc, "EEDP capabilities disabled.\n");
888 	}
889 
890 	mpr_unlock(sc);
891 
892 	mprsas_register_events(sc);
893 out:
894 	if (error)
895 		mpr_detach_sas(sc);
896 
897 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
898 	return (error);
899 }
900 
901 int
902 mpr_detach_sas(struct mpr_softc *sc)
903 {
904 	struct mprsas_softc *sassc;
905 	struct mprsas_lun *lun, *lun_tmp;
906 	struct mprsas_target *targ;
907 	int i;
908 
909 	MPR_FUNCTRACE(sc);
910 
911 	if (sc->sassc == NULL)
912 		return (0);
913 
914 	sassc = sc->sassc;
915 	mpr_deregister_events(sc, sassc->mprsas_eh);
916 
917 	/*
918 	 * Drain and free the event handling taskqueue with the lock
919 	 * unheld so that any parallel processing tasks drain properly
920 	 * without deadlocking.
921 	 */
922 	if (sassc->ev_tq != NULL)
923 		taskqueue_free(sassc->ev_tq);
924 
925 	/* Make sure CAM doesn't wedge if we had to bail out early. */
926 	mpr_lock(sc);
927 
928 	while (sassc->startup_refcount != 0)
929 		mprsas_startup_decrement(sassc);
930 
931 	/* Deregister our async handler */
932 	if (sassc->path != NULL) {
933 		xpt_register_async(0, mprsas_async, sc, sassc->path);
934 		xpt_free_path(sassc->path);
935 		sassc->path = NULL;
936 	}
937 
938 	if (sassc->flags & MPRSAS_IN_STARTUP)
939 		xpt_release_simq(sassc->sim, 1);
940 
941 	if (sassc->sim != NULL) {
942 		xpt_bus_deregister(cam_sim_path(sassc->sim));
943 		cam_sim_free(sassc->sim, FALSE);
944 	}
945 
946 	mpr_unlock(sc);
947 
948 	if (sassc->devq != NULL)
949 		cam_simq_free(sassc->devq);
950 
951 	for (i = 0; i < sassc->maxtargets; i++) {
952 		targ = &sassc->targets[i];
953 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
954 			free(lun, M_MPR);
955 		}
956 	}
957 	free(sassc->targets, M_MPR);
958 	free(sassc, M_MPR);
959 	sc->sassc = NULL;
960 
961 	return (0);
962 }
963 
964 void
965 mprsas_discovery_end(struct mprsas_softc *sassc)
966 {
967 	struct mpr_softc *sc = sassc->sc;
968 
969 	MPR_FUNCTRACE(sc);
970 
971 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
972 		callout_stop(&sassc->discovery_callout);
973 
974 	/*
975 	 * After discovery has completed, check the mapping table for any
976 	 * missing devices and update their missing counts. Only do this once
977 	 * whenever the driver is initialized so that missing counts aren't
978 	 * updated unnecessarily. Note that just because discovery has
979 	 * completed doesn't mean that events have been processed yet. The
980 	 * check_devices function is a callout timer that checks if ALL devices
981 	 * are missing. If so, it will wait a little longer for events to
982 	 * complete and keep resetting itself until some device in the mapping
983 	 * table is not missing, meaning that event processing has started.
984 	 */
985 	if (sc->track_mapping_events) {
986 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
987 		    "completed. Check for missing devices in the mapping "
988 		    "table.\n");
989 		callout_reset(&sc->device_check_callout,
990 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
991 		    sc);
992 	}
993 }
994 
995 static void
996 mprsas_action(struct cam_sim *sim, union ccb *ccb)
997 {
998 	struct mprsas_softc *sassc;
999 
1000 	sassc = cam_sim_softc(sim);
1001 
1002 	MPR_FUNCTRACE(sassc->sc);
1003 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1004 	    ccb->ccb_h.func_code);
1005 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1006 
1007 	switch (ccb->ccb_h.func_code) {
1008 	case XPT_PATH_INQ:
1009 	{
1010 		struct ccb_pathinq *cpi = &ccb->cpi;
1011 		struct mpr_softc *sc = sassc->sc;
1012 
1013 		cpi->version_num = 1;
1014 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1015 		cpi->target_sprt = 0;
1016 #if (__FreeBSD_version >= 1000039) || \
1017     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1018 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1019 #else
1020 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1021 #endif
1022 		cpi->hba_eng_cnt = 0;
1023 		cpi->max_target = sassc->maxtargets - 1;
1024 		cpi->max_lun = 255;
1025 
1026 		/*
1027 		 * initiator_id is set here to an ID outside the set of valid
1028 		 * target IDs (including volumes).
1029 		 */
1030 		cpi->initiator_id = sassc->maxtargets;
1031 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1032 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1033 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1034 		cpi->unit_number = cam_sim_unit(sim);
1035 		cpi->bus_id = cam_sim_bus(sim);
1036 		/*
1037 		 * XXXSLM-I think this needs to change based on config page or
1038 		 * something instead of hardcoded to 150000.
1039 		 */
1040 		cpi->base_transfer_speed = 150000;
1041 		cpi->transport = XPORT_SAS;
1042 		cpi->transport_version = 0;
1043 		cpi->protocol = PROTO_SCSI;
1044 		cpi->protocol_version = SCSI_REV_SPC;
1045 		cpi->maxio = sc->maxio;
1046 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1047 		break;
1048 	}
1049 	case XPT_GET_TRAN_SETTINGS:
1050 	{
1051 		struct ccb_trans_settings	*cts;
1052 		struct ccb_trans_settings_sas	*sas;
1053 		struct ccb_trans_settings_scsi	*scsi;
1054 		struct mprsas_target *targ;
1055 
1056 		cts = &ccb->cts;
1057 		sas = &cts->xport_specific.sas;
1058 		scsi = &cts->proto_specific.scsi;
1059 
1060 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1061 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1062 		    cts->ccb_h.target_id));
1063 		targ = &sassc->targets[cts->ccb_h.target_id];
1064 		if (targ->handle == 0x0) {
1065 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1066 			break;
1067 		}
1068 
1069 		cts->protocol_version = SCSI_REV_SPC2;
1070 		cts->transport = XPORT_SAS;
1071 		cts->transport_version = 0;
1072 
1073 		sas->valid = CTS_SAS_VALID_SPEED;
1074 		switch (targ->linkrate) {
1075 		case 0x08:
1076 			sas->bitrate = 150000;
1077 			break;
1078 		case 0x09:
1079 			sas->bitrate = 300000;
1080 			break;
1081 		case 0x0a:
1082 			sas->bitrate = 600000;
1083 			break;
1084 		case 0x0b:
1085 			sas->bitrate = 1200000;
1086 			break;
1087 		default:
1088 			sas->valid = 0;
1089 		}
1090 
1091 		cts->protocol = PROTO_SCSI;
1092 		scsi->valid = CTS_SCSI_VALID_TQ;
1093 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1094 
1095 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1096 		break;
1097 	}
1098 	case XPT_CALC_GEOMETRY:
1099 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1100 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1101 		break;
1102 	case XPT_RESET_DEV:
1103 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1104 		    "XPT_RESET_DEV\n");
1105 		mprsas_action_resetdev(sassc, ccb);
1106 		return;
1107 	case XPT_RESET_BUS:
1108 	case XPT_ABORT:
1109 	case XPT_TERM_IO:
1110 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1111 		    "for abort or reset\n");
1112 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1113 		break;
1114 	case XPT_SCSI_IO:
1115 		mprsas_action_scsiio(sassc, ccb);
1116 		return;
1117 #if __FreeBSD_version >= 900026
1118 	case XPT_SMP_IO:
1119 		mprsas_action_smpio(sassc, ccb);
1120 		return;
1121 #endif
1122 	default:
1123 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1124 		break;
1125 	}
1126 	xpt_done(ccb);
1127 
1128 }
1129 
1130 static void
1131 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1132     target_id_t target_id, lun_id_t lun_id)
1133 {
1134 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1135 	struct cam_path *path;
1136 
1137 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1138 	    ac_code, target_id, (uintmax_t)lun_id);
1139 
1140 	if (xpt_create_path(&path, NULL,
1141 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1142 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1143 		    "notification\n");
1144 		return;
1145 	}
1146 
1147 	xpt_async(ac_code, path, NULL);
1148 	xpt_free_path(path);
1149 }
1150 
1151 static void
1152 mprsas_complete_all_commands(struct mpr_softc *sc)
1153 {
1154 	struct mpr_command *cm;
1155 	int i;
1156 	int completed;
1157 
1158 	MPR_FUNCTRACE(sc);
1159 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1160 
1161 	/* complete all commands with a NULL reply */
1162 	for (i = 1; i < sc->num_reqs; i++) {
1163 		cm = &sc->commands[i];
1164 		cm->cm_reply = NULL;
1165 		completed = 0;
1166 
1167 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1168 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1169 
1170 		if (cm->cm_complete != NULL) {
1171 			mprsas_log_command(cm, MPR_RECOVERY,
1172 			    "completing cm %p state %x ccb %p for diag reset\n",
1173 			    cm, cm->cm_state, cm->cm_ccb);
1174 			cm->cm_complete(sc, cm);
1175 			completed = 1;
1176 		}
1177 
1178 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1179 			mprsas_log_command(cm, MPR_RECOVERY,
1180 			    "waking up cm %p state %x ccb %p for diag reset\n",
1181 			    cm, cm->cm_state, cm->cm_ccb);
1182 			wakeup(cm);
1183 			completed = 1;
1184 		}
1185 
1186 		if (cm->cm_sc->io_cmds_active != 0)
1187 			cm->cm_sc->io_cmds_active--;
1188 
1189 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1190 			/* this should never happen, but if it does, log */
1191 			mprsas_log_command(cm, MPR_RECOVERY,
1192 			    "cm %p state %x flags 0x%x ccb %p during diag "
1193 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1194 			    cm->cm_ccb);
1195 		}
1196 	}
1197 }
1198 
1199 void
1200 mprsas_handle_reinit(struct mpr_softc *sc)
1201 {
1202 	int i;
1203 
1204 	/* Go back into startup mode and freeze the simq, so that CAM
1205 	 * doesn't send any commands until after we've rediscovered all
1206 	 * targets and found the proper device handles for them.
1207 	 *
1208 	 * After the reset, portenable will trigger discovery, and after all
1209 	 * discovery-related activities have finished, the simq will be
1210 	 * released.
1211 	 */
1212 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1213 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1214 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1215 	mprsas_startup_increment(sc->sassc);
1216 
1217 	/* notify CAM of a bus reset */
1218 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1219 	    CAM_LUN_WILDCARD);
1220 
1221 	/* complete and cleanup after all outstanding commands */
1222 	mprsas_complete_all_commands(sc);
1223 
1224 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1225 	    __func__, sc->sassc->startup_refcount);
1226 
1227 	/* zero all the target handles, since they may change after the
1228 	 * reset, and we have to rediscover all the targets and use the new
1229 	 * handles.
1230 	 */
1231 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1232 		if (sc->sassc->targets[i].outstanding != 0)
1233 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1234 			    i, sc->sassc->targets[i].outstanding);
1235 		sc->sassc->targets[i].handle = 0x0;
1236 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1237 		sc->sassc->targets[i].outstanding = 0;
1238 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1239 	}
1240 }
1241 static void
1242 mprsas_tm_timeout(void *data)
1243 {
1244 	struct mpr_command *tm = data;
1245 	struct mpr_softc *sc = tm->cm_sc;
1246 
1247 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1248 
1249 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1250 	    "out\n", tm);
1251 	mpr_reinit(sc);
1252 }
1253 
1254 static void
1255 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1256 {
1257 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1258 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1259 	unsigned int cm_count = 0;
1260 	struct mpr_command *cm;
1261 	struct mprsas_target *targ;
1262 
1263 	callout_stop(&tm->cm_callout);
1264 
1265 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1266 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1267 	targ = tm->cm_targ;
1268 
1269 	/*
1270 	 * Currently there should be no way we can hit this case.  It only
1271 	 * happens when we have a failure to allocate chain frames, and
1272 	 * task management commands don't have S/G lists.
1273 	 */
1274 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1275 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1276 		    "%s: cm_flags = %#x for LUN reset! "
1277 		    "This should not happen!\n", __func__, tm->cm_flags);
1278 		mprsas_free_tm(sc, tm);
1279 		return;
1280 	}
1281 
1282 	if (reply == NULL) {
1283 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1284 		    tm);
1285 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1286 			/* this completion was due to a reset, just cleanup */
1287 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1288 			    "reset, ignoring NULL LUN reset reply\n");
1289 			targ->tm = NULL;
1290 			mprsas_free_tm(sc, tm);
1291 		}
1292 		else {
1293 			/* we should have gotten a reply. */
1294 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1295 			    "LUN reset attempt, resetting controller\n");
1296 			mpr_reinit(sc);
1297 		}
1298 		return;
1299 	}
1300 
1301 	mpr_dprint(sc, MPR_RECOVERY,
1302 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1303 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1304 	    le32toh(reply->TerminationCount));
1305 
1306 	/*
1307 	 * See if there are any outstanding commands for this LUN.
1308 	 * This could be made more efficient by using a per-LU data
1309 	 * structure of some sort.
1310 	 */
1311 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1312 		if (cm->cm_lun == tm->cm_lun)
1313 			cm_count++;
1314 	}
1315 
1316 	if (cm_count == 0) {
1317 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1318 		    "Finished recovery after LUN reset for target %u\n",
1319 		    targ->tid);
1320 
1321 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1322 		    tm->cm_lun);
1323 
1324 		/*
1325 		 * We've finished recovery for this logical unit.  check and
1326 		 * see if some other logical unit has a timedout command
1327 		 * that needs to be processed.
1328 		 */
1329 		cm = TAILQ_FIRST(&targ->timedout_commands);
1330 		if (cm) {
1331 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1332 			   "More commands to abort for target %u\n", targ->tid);
1333 			mprsas_send_abort(sc, tm, cm);
1334 		} else {
1335 			targ->tm = NULL;
1336 			mprsas_free_tm(sc, tm);
1337 		}
1338 	} else {
1339 		/* if we still have commands for this LUN, the reset
1340 		 * effectively failed, regardless of the status reported.
1341 		 * Escalate to a target reset.
1342 		 */
1343 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1344 		    "logical unit reset complete for target %u, but still "
1345 		    "have %u command(s), sending target reset\n", targ->tid,
1346 		    cm_count);
1347 		mprsas_send_reset(sc, tm,
1348 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1349 	}
1350 }
1351 
1352 static void
1353 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1354 {
1355 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1356 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1357 	struct mprsas_target *targ;
1358 
1359 	callout_stop(&tm->cm_callout);
1360 
1361 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1362 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1363 	targ = tm->cm_targ;
1364 
1365 	/*
1366 	 * Currently there should be no way we can hit this case.  It only
1367 	 * happens when we have a failure to allocate chain frames, and
1368 	 * task management commands don't have S/G lists.
1369 	 */
1370 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1371 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1372 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1373 		mprsas_free_tm(sc, tm);
1374 		return;
1375 	}
1376 
1377 	if (reply == NULL) {
1378 		mpr_dprint(sc, MPR_RECOVERY,
1379 		    "NULL target reset reply for tm %p TaskMID %u\n",
1380 		    tm, le16toh(req->TaskMID));
1381 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1382 			/* this completion was due to a reset, just cleanup */
1383 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1384 			    "reset, ignoring NULL target reset reply\n");
1385 			targ->tm = NULL;
1386 			mprsas_free_tm(sc, tm);
1387 		}
1388 		else {
1389 			/* we should have gotten a reply. */
1390 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1391 			    "target reset attempt, resetting controller\n");
1392 			mpr_reinit(sc);
1393 		}
1394 		return;
1395 	}
1396 
1397 	mpr_dprint(sc, MPR_RECOVERY,
1398 	    "target reset status 0x%x code 0x%x count %u\n",
1399 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1400 	    le32toh(reply->TerminationCount));
1401 
1402 	if (targ->outstanding == 0) {
1403 		/*
1404 		 * We've finished recovery for this target and all
1405 		 * of its logical units.
1406 		 */
1407 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1408 		    "Finished reset recovery for target %u\n", targ->tid);
1409 
1410 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1411 		    CAM_LUN_WILDCARD);
1412 
1413 		targ->tm = NULL;
1414 		mprsas_free_tm(sc, tm);
1415 	} else {
1416 		/*
1417 		 * After a target reset, if this target still has
1418 		 * outstanding commands, the reset effectively failed,
1419 		 * regardless of the status reported.  escalate.
1420 		 */
1421 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1422 		    "Target reset complete for target %u, but still have %u "
1423 		    "command(s), resetting controller\n", targ->tid,
1424 		    targ->outstanding);
1425 		mpr_reinit(sc);
1426 	}
1427 }
1428 
1429 #define MPR_RESET_TIMEOUT 30
1430 
1431 int
1432 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1433 {
1434 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1435 	struct mprsas_target *target;
1436 	int err;
1437 
1438 	target = tm->cm_targ;
1439 	if (target->handle == 0) {
1440 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1441 		    "%d\n", __func__, target->tid);
1442 		return -1;
1443 	}
1444 
1445 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1446 	req->DevHandle = htole16(target->handle);
1447 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1448 	req->TaskType = type;
1449 
1450 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1451 		/* XXX Need to handle invalid LUNs */
1452 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1453 		tm->cm_targ->logical_unit_resets++;
1454 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1455 		    "Sending logical unit reset to target %u lun %d\n",
1456 		    target->tid, tm->cm_lun);
1457 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1458 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1459 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1460 		/*
1461 		 * Target reset method =
1462 		 *     SAS Hard Link Reset / SATA Link Reset
1463 		 */
1464 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1465 		tm->cm_targ->target_resets++;
1466 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1467 		    "Sending target reset to target %u\n", target->tid);
1468 		tm->cm_complete = mprsas_target_reset_complete;
1469 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1470 	}
1471 	else {
1472 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1473 		return -1;
1474 	}
1475 
1476 	if (target->encl_level_valid) {
1477 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1478 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1479 		    target->encl_level, target->encl_slot,
1480 		    target->connector_name);
1481 	}
1482 
1483 	tm->cm_data = NULL;
1484 	tm->cm_desc.HighPriority.RequestFlags =
1485 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1486 	tm->cm_complete_data = (void *)tm;
1487 
1488 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1489 	    mprsas_tm_timeout, tm);
1490 
1491 	err = mpr_map_command(sc, tm);
1492 	if (err)
1493 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1494 		    "error %d sending reset type %u\n", err, type);
1495 
1496 	return err;
1497 }
1498 
1499 
1500 static void
1501 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1502 {
1503 	struct mpr_command *cm;
1504 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1505 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1506 	struct mprsas_target *targ;
1507 
1508 	callout_stop(&tm->cm_callout);
1509 
1510 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1511 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1512 	targ = tm->cm_targ;
1513 
1514 	/*
1515 	 * Currently there should be no way we can hit this case.  It only
1516 	 * happens when we have a failure to allocate chain frames, and
1517 	 * task management commands don't have S/G lists.
1518 	 */
1519 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1520 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1521 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1522 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1523 		mprsas_free_tm(sc, tm);
1524 		return;
1525 	}
1526 
1527 	if (reply == NULL) {
1528 		mpr_dprint(sc, MPR_RECOVERY,
1529 		    "NULL abort reply for tm %p TaskMID %u\n",
1530 		    tm, le16toh(req->TaskMID));
1531 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1532 			/* this completion was due to a reset, just cleanup */
1533 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1534 			    "reset, ignoring NULL abort reply\n");
1535 			targ->tm = NULL;
1536 			mprsas_free_tm(sc, tm);
1537 		} else {
1538 			/* we should have gotten a reply. */
1539 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1540 			    "abort attempt, resetting controller\n");
1541 			mpr_reinit(sc);
1542 		}
1543 		return;
1544 	}
1545 
1546 	mpr_dprint(sc, MPR_RECOVERY,
1547 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1548 	    le16toh(req->TaskMID),
1549 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1550 	    le32toh(reply->TerminationCount));
1551 
1552 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1553 	if (cm == NULL) {
1554 		/*
1555 		 * if there are no more timedout commands, we're done with
1556 		 * error recovery for this target.
1557 		 */
1558 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1559 		    "Finished abort recovery for target %u\n", targ->tid);
1560 		targ->tm = NULL;
1561 		mprsas_free_tm(sc, tm);
1562 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1563 		/* abort success, but we have more timedout commands to abort */
1564 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1565 		    "Continuing abort recovery for target %u\n", targ->tid);
1566 		mprsas_send_abort(sc, tm, cm);
1567 	} else {
1568 		/*
1569 		 * we didn't get a command completion, so the abort
1570 		 * failed as far as we're concerned.  escalate.
1571 		 */
1572 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1573 		    "Abort failed for target %u, sending logical unit reset\n",
1574 		    targ->tid);
1575 
1576 		mprsas_send_reset(sc, tm,
1577 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1578 	}
1579 }
1580 
1581 #define MPR_ABORT_TIMEOUT 5
1582 
1583 static int
1584 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1585     struct mpr_command *cm)
1586 {
1587 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1588 	struct mprsas_target *targ;
1589 	int err;
1590 
1591 	targ = cm->cm_targ;
1592 	if (targ->handle == 0) {
1593 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1594 		   "%s null devhandle for target_id %d\n",
1595 		    __func__, cm->cm_ccb->ccb_h.target_id);
1596 		return -1;
1597 	}
1598 
1599 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1600 	    "Aborting command %p\n", cm);
1601 
1602 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1603 	req->DevHandle = htole16(targ->handle);
1604 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1605 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1606 
1607 	/* XXX Need to handle invalid LUNs */
1608 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1609 
1610 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1611 
1612 	tm->cm_data = NULL;
1613 	tm->cm_desc.HighPriority.RequestFlags =
1614 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1615 	tm->cm_complete = mprsas_abort_complete;
1616 	tm->cm_complete_data = (void *)tm;
1617 	tm->cm_targ = cm->cm_targ;
1618 	tm->cm_lun = cm->cm_lun;
1619 
1620 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1621 	    mprsas_tm_timeout, tm);
1622 
1623 	targ->aborts++;
1624 
1625 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1626 
1627 	err = mpr_map_command(sc, tm);
1628 	if (err)
1629 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1630 		    "error %d sending abort for cm %p SMID %u\n",
1631 		    err, cm, req->TaskMID);
1632 	return err;
1633 }
1634 
1635 static void
1636 mprsas_scsiio_timeout(void *data)
1637 {
1638 	sbintime_t elapsed, now;
1639 	union ccb *ccb;
1640 	struct mpr_softc *sc;
1641 	struct mpr_command *cm;
1642 	struct mprsas_target *targ;
1643 
1644 	cm = (struct mpr_command *)data;
1645 	sc = cm->cm_sc;
1646 	ccb = cm->cm_ccb;
1647 	now = sbinuptime();
1648 
1649 	MPR_FUNCTRACE(sc);
1650 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1651 
1652 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1653 
1654 	/*
1655 	 * Run the interrupt handler to make sure it's not pending.  This
1656 	 * isn't perfect because the command could have already completed
1657 	 * and been re-used, though this is unlikely.
1658 	 */
1659 	mpr_intr_locked(sc);
1660 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1661 		mprsas_log_command(cm, MPR_XINFO,
1662 		    "SCSI command %p almost timed out\n", cm);
1663 		return;
1664 	}
1665 
1666 	if (cm->cm_ccb == NULL) {
1667 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1668 		return;
1669 	}
1670 
1671 	targ = cm->cm_targ;
1672 	targ->timeouts++;
1673 
1674 	elapsed = now - ccb->ccb_h.qos.sim_data;
1675 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1676 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1677 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1678 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1679 	if (targ->encl_level_valid) {
1680 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1681 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1682 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1683 	}
1684 
1685 	/* XXX first, check the firmware state, to see if it's still
1686 	 * operational.  if not, do a diag reset.
1687 	 */
1688 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1689 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1690 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1691 
1692 	if (targ->tm != NULL) {
1693 		/* target already in recovery, just queue up another
1694 		 * timedout command to be processed later.
1695 		 */
1696 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1697 		    "processing by tm %p\n", cm, targ->tm);
1698 	}
1699 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1700 
1701 		/* start recovery by aborting the first timedout command */
1702 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1703 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1704 		    cm->cm_desc.Default.SMID);
1705 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1706 		    cm, targ->tm);
1707 		mprsas_send_abort(sc, targ->tm, cm);
1708 	}
1709 	else {
1710 		/* XXX queue this target up for recovery once a TM becomes
1711 		 * available.  The firmware only has a limited number of
1712 		 * HighPriority credits for the high priority requests used
1713 		 * for task management, and we ran out.
1714 		 *
1715 		 * Isilon: don't worry about this for now, since we have
1716 		 * more credits than disks in an enclosure, and limit
1717 		 * ourselves to one TM per target for recovery.
1718 		 */
1719 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1720 		    "timedout cm %p failed to allocate a tm\n", cm);
1721 	}
1722 }
1723 
1724 /**
1725  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1726  *			     to SCSI Unmap.
1727  * Return 0 - for success,
1728  *	  1 - to immediately return back the command with success status to CAM
1729  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1730  *			   to FW without any translation.
1731  */
1732 static int
1733 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1734     union ccb *ccb, struct mprsas_target *targ)
1735 {
1736 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1737 	struct ccb_scsiio *csio;
1738 	struct unmap_parm_list *plist;
1739 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1740 	struct nvme_command *c;
1741 	int i, res;
1742 	uint16_t ndesc, list_len, data_length;
1743 	struct mpr_prp_page *prp_page_info;
1744 	uint64_t nvme_dsm_ranges_dma_handle;
1745 
1746 	csio = &ccb->csio;
1747 #if __FreeBSD_version >= 1100103
1748 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1749 #else
1750 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1751 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1752 		    ccb->csio.cdb_io.cdb_ptr[8]);
1753 	} else {
1754 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1755 		    ccb->csio.cdb_io.cdb_bytes[8]);
1756 	}
1757 #endif
1758 	if (!list_len) {
1759 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1760 		return -EINVAL;
1761 	}
1762 
1763 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1764 	if (!plist) {
1765 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1766 		    "save UNMAP data\n");
1767 		return -ENOMEM;
1768 	}
1769 
1770 	/* Copy SCSI unmap data to a local buffer */
1771 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1772 
1773 	/* return back the unmap command to CAM with success status,
1774 	 * if number of descripts is zero.
1775 	 */
1776 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1777 	if (!ndesc) {
1778 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1779 		    "UNMAP cmd is Zero\n");
1780 		res = 1;
1781 		goto out;
1782 	}
1783 
1784 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1785 	if (data_length > targ->MDTS) {
1786 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1787 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1788 		res = -EINVAL;
1789 		goto out;
1790 	}
1791 
1792 	prp_page_info = mpr_alloc_prp_page(sc);
1793 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1794 	    "UNMAP command.\n", __func__));
1795 
1796 	/*
1797 	 * Insert the allocated PRP page into the command's PRP page list. This
1798 	 * will be freed when the command is freed.
1799 	 */
1800 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1801 
1802 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1803 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1804 
1805 	bzero(nvme_dsm_ranges, data_length);
1806 
1807 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1808 	 * for each descriptors contained in SCSI UNMAP data.
1809 	 */
1810 	for (i = 0; i < ndesc; i++) {
1811 		nvme_dsm_ranges[i].length =
1812 		    htole32(be32toh(plist->desc[i].nlb));
1813 		nvme_dsm_ranges[i].starting_lba =
1814 		    htole64(be64toh(plist->desc[i].slba));
1815 		nvme_dsm_ranges[i].attributes = 0;
1816 	}
1817 
1818 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1819 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1820 	bzero(req, sizeof(*req));
1821 	req->DevHandle = htole16(targ->handle);
1822 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1823 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1824 	req->ErrorResponseBaseAddress.High =
1825 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1826 	req->ErrorResponseBaseAddress.Low =
1827 	    htole32(cm->cm_sense_busaddr);
1828 	req->ErrorResponseAllocationLength =
1829 	    htole16(sizeof(struct nvme_completion));
1830 	req->EncapsulatedCommandLength =
1831 	    htole16(sizeof(struct nvme_command));
1832 	req->DataLength = htole32(data_length);
1833 
1834 	/* Build NVMe DSM command */
1835 	c = (struct nvme_command *) req->NVMe_Command;
1836 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1837 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1838 	c->cdw10 = htole32(ndesc - 1);
1839 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1840 
1841 	cm->cm_length = data_length;
1842 	cm->cm_data = NULL;
1843 
1844 	cm->cm_complete = mprsas_scsiio_complete;
1845 	cm->cm_complete_data = ccb;
1846 	cm->cm_targ = targ;
1847 	cm->cm_lun = csio->ccb_h.target_lun;
1848 	cm->cm_ccb = ccb;
1849 
1850 	cm->cm_desc.Default.RequestFlags =
1851 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1852 
1853 	csio->ccb_h.qos.sim_data = sbinuptime();
1854 #if __FreeBSD_version >= 1000029
1855 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1856 	    mprsas_scsiio_timeout, cm, 0);
1857 #else //__FreeBSD_version < 1000029
1858 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1859 	    mprsas_scsiio_timeout, cm);
1860 #endif //__FreeBSD_version >= 1000029
1861 
1862 	targ->issued++;
1863 	targ->outstanding++;
1864 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1865 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1866 
1867 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1868 	    __func__, cm, ccb, targ->outstanding);
1869 
1870 	mpr_build_nvme_prp(sc, cm, req,
1871 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1872 	mpr_map_command(sc, cm);
1873 
1874 out:
1875 	free(plist, M_MPR);
1876 	return 0;
1877 }
1878 
1879 static void
1880 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1881 {
1882 	MPI2_SCSI_IO_REQUEST *req;
1883 	struct ccb_scsiio *csio;
1884 	struct mpr_softc *sc;
1885 	struct mprsas_target *targ;
1886 	struct mprsas_lun *lun;
1887 	struct mpr_command *cm;
1888 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1889 	uint16_t eedp_flags;
1890 	uint32_t mpi_control;
1891 	int rc;
1892 
1893 	sc = sassc->sc;
1894 	MPR_FUNCTRACE(sc);
1895 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1896 
1897 	csio = &ccb->csio;
1898 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1899 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1900 	     csio->ccb_h.target_id));
1901 	targ = &sassc->targets[csio->ccb_h.target_id];
1902 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1903 	if (targ->handle == 0x0) {
1904 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1905 		    __func__, csio->ccb_h.target_id);
1906 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1907 		xpt_done(ccb);
1908 		return;
1909 	}
1910 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1911 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1912 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1913 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1914 		xpt_done(ccb);
1915 		return;
1916 	}
1917 	/*
1918 	 * Sometimes, it is possible to get a command that is not "In
1919 	 * Progress" and was actually aborted by the upper layer.  Check for
1920 	 * this here and complete the command without error.
1921 	 */
1922 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1923 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1924 		    "target %u\n", __func__, csio->ccb_h.target_id);
1925 		xpt_done(ccb);
1926 		return;
1927 	}
1928 	/*
1929 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1930 	 * that the volume has timed out.  We want volumes to be enumerated
1931 	 * until they are deleted/removed, not just failed.
1932 	 */
1933 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1934 		if (targ->devinfo == 0)
1935 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1936 		else
1937 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1938 		xpt_done(ccb);
1939 		return;
1940 	}
1941 
1942 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1943 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1944 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1945 		xpt_done(ccb);
1946 		return;
1947 	}
1948 
1949 	/*
1950 	 * If target has a reset in progress, freeze the devq and return.  The
1951 	 * devq will be released when the TM reset is finished.
1952 	 */
1953 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1954 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1955 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1956 		    __func__, targ->tid);
1957 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1958 		xpt_done(ccb);
1959 		return;
1960 	}
1961 
1962 	cm = mpr_alloc_command(sc);
1963 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1964 		if (cm != NULL) {
1965 			mpr_free_command(sc, cm);
1966 		}
1967 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1968 			xpt_freeze_simq(sassc->sim, 1);
1969 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1970 		}
1971 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1972 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1973 		xpt_done(ccb);
1974 		return;
1975 	}
1976 
1977 	/* For NVME device's issue UNMAP command directly to NVME drives by
1978 	 * constructing equivalent native NVMe DataSetManagement command.
1979 	 */
1980 #if __FreeBSD_version >= 1100103
1981 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1982 #else
1983 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1984 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
1985 	else
1986 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
1987 #endif
1988 	if (scsi_opcode == UNMAP &&
1989 	    targ->is_nvme &&
1990 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1991 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1992 		if (rc == 1) { /* return command to CAM with success status */
1993 			mpr_free_command(sc, cm);
1994 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1995 			xpt_done(ccb);
1996 			return;
1997 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1998 			return;
1999 	}
2000 
2001 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2002 	bzero(req, sizeof(*req));
2003 	req->DevHandle = htole16(targ->handle);
2004 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2005 	req->MsgFlags = 0;
2006 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2007 	req->SenseBufferLength = MPR_SENSE_LEN;
2008 	req->SGLFlags = 0;
2009 	req->ChainOffset = 0;
2010 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2011 	req->SGLOffset1= 0;
2012 	req->SGLOffset2= 0;
2013 	req->SGLOffset3= 0;
2014 	req->SkipCount = 0;
2015 	req->DataLength = htole32(csio->dxfer_len);
2016 	req->BidirectionalDataLength = 0;
2017 	req->IoFlags = htole16(csio->cdb_len);
2018 	req->EEDPFlags = 0;
2019 
2020 	/* Note: BiDirectional transfers are not supported */
2021 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2022 	case CAM_DIR_IN:
2023 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2024 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2025 		break;
2026 	case CAM_DIR_OUT:
2027 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2028 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2029 		break;
2030 	case CAM_DIR_NONE:
2031 	default:
2032 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2033 		break;
2034 	}
2035 
2036 	if (csio->cdb_len == 32)
2037 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2038 	/*
2039 	 * It looks like the hardware doesn't require an explicit tag
2040 	 * number for each transaction.  SAM Task Management not supported
2041 	 * at the moment.
2042 	 */
2043 	switch (csio->tag_action) {
2044 	case MSG_HEAD_OF_Q_TAG:
2045 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2046 		break;
2047 	case MSG_ORDERED_Q_TAG:
2048 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2049 		break;
2050 	case MSG_ACA_TASK:
2051 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2052 		break;
2053 	case CAM_TAG_ACTION_NONE:
2054 	case MSG_SIMPLE_Q_TAG:
2055 	default:
2056 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2057 		break;
2058 	}
2059 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2060 	req->Control = htole32(mpi_control);
2061 
2062 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2063 		mpr_free_command(sc, cm);
2064 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2065 		xpt_done(ccb);
2066 		return;
2067 	}
2068 
2069 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2070 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2071 	else {
2072 		KASSERT(csio->cdb_len <= IOCDBLEN,
2073 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2074 		    "is not set", csio->cdb_len));
2075 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2076 	}
2077 	req->IoFlags = htole16(csio->cdb_len);
2078 
2079 	/*
2080 	 * Check if EEDP is supported and enabled.  If it is then check if the
2081 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2082 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2083 	 * for EEDP transfer.
2084 	 */
2085 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2086 	if (sc->eedp_enabled && eedp_flags) {
2087 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2088 			if (lun->lun_id == csio->ccb_h.target_lun) {
2089 				break;
2090 			}
2091 		}
2092 
2093 		if ((lun != NULL) && (lun->eedp_formatted)) {
2094 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2095 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2096 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2097 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2098 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2099 				eedp_flags |=
2100 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2101 			}
2102 			req->EEDPFlags = htole16(eedp_flags);
2103 
2104 			/*
2105 			 * If CDB less than 32, fill in Primary Ref Tag with
2106 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2107 			 * already there.  Also, set protection bit.  FreeBSD
2108 			 * currently does not support CDBs bigger than 16, but
2109 			 * the code doesn't hurt, and will be here for the
2110 			 * future.
2111 			 */
2112 			if (csio->cdb_len != 32) {
2113 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2114 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2115 				    PrimaryReferenceTag;
2116 				for (i = 0; i < 4; i++) {
2117 					*ref_tag_addr =
2118 					    req->CDB.CDB32[lba_byte + i];
2119 					ref_tag_addr++;
2120 				}
2121 				req->CDB.EEDP32.PrimaryReferenceTag =
2122 				    htole32(req->
2123 				    CDB.EEDP32.PrimaryReferenceTag);
2124 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2125 				    0xFFFF;
2126 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2127 				    0x20;
2128 			} else {
2129 				eedp_flags |=
2130 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2131 				req->EEDPFlags = htole16(eedp_flags);
2132 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2133 				    0x1F) | 0x20;
2134 			}
2135 		}
2136 	}
2137 
2138 	cm->cm_length = csio->dxfer_len;
2139 	if (cm->cm_length != 0) {
2140 		cm->cm_data = ccb;
2141 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2142 	} else {
2143 		cm->cm_data = NULL;
2144 	}
2145 	cm->cm_sge = &req->SGL;
2146 	cm->cm_sglsize = (32 - 24) * 4;
2147 	cm->cm_complete = mprsas_scsiio_complete;
2148 	cm->cm_complete_data = ccb;
2149 	cm->cm_targ = targ;
2150 	cm->cm_lun = csio->ccb_h.target_lun;
2151 	cm->cm_ccb = ccb;
2152 	/*
2153 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2154 	 * and set descriptor type.
2155 	 */
2156 	if (targ->scsi_req_desc_type ==
2157 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2158 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2159 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2160 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2161 		if (!sc->atomic_desc_capable) {
2162 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2163 			    htole16(targ->handle);
2164 		}
2165 	} else {
2166 		cm->cm_desc.SCSIIO.RequestFlags =
2167 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2168 		if (!sc->atomic_desc_capable)
2169 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2170 	}
2171 
2172 	csio->ccb_h.qos.sim_data = sbinuptime();
2173 #if __FreeBSD_version >= 1000029
2174 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2175 	    mprsas_scsiio_timeout, cm, 0);
2176 #else //__FreeBSD_version < 1000029
2177 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2178 	    mprsas_scsiio_timeout, cm);
2179 #endif //__FreeBSD_version >= 1000029
2180 
2181 	targ->issued++;
2182 	targ->outstanding++;
2183 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2184 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2185 
2186 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2187 	    __func__, cm, ccb, targ->outstanding);
2188 
2189 	mpr_map_command(sc, cm);
2190 	return;
2191 }
2192 
2193 /**
2194  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2195  */
2196 static void
2197 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2198     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2199 {
2200 	u32 response_info;
2201 	u8 *response_bytes;
2202 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2203 	    MPI2_IOCSTATUS_MASK;
2204 	u8 scsi_state = mpi_reply->SCSIState;
2205 	u8 scsi_status = mpi_reply->SCSIStatus;
2206 	char *desc_ioc_state = NULL;
2207 	char *desc_scsi_status = NULL;
2208 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2209 
2210 	if (log_info == 0x31170000)
2211 		return;
2212 
2213 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2214 	     ioc_status);
2215 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2216 	    scsi_status);
2217 
2218 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2219 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2220 	if (targ->encl_level_valid) {
2221 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2222 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2223 		    targ->connector_name);
2224 	}
2225 
2226 	/*
2227 	 * We can add more detail about underflow data here
2228 	 * TO-DO
2229 	 */
2230 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2231 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2232 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2233 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2234 
2235 	if (sc->mpr_debug & MPR_XINFO &&
2236 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2237 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2238 		scsi_sense_print(csio);
2239 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2240 	}
2241 
2242 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2243 		response_info = le32toh(mpi_reply->ResponseInfo);
2244 		response_bytes = (u8 *)&response_info;
2245 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2246 		    response_bytes[0],
2247 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2248 		    response_bytes[0]));
2249 	}
2250 }
2251 
2252 /** mprsas_nvme_trans_status_code
2253  *
2254  * Convert Native NVMe command error status to
2255  * equivalent SCSI error status.
2256  *
2257  * Returns appropriate scsi_status
2258  */
2259 static u8
2260 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2261     struct mpr_command *cm)
2262 {
2263 	u8 status = MPI2_SCSI_STATUS_GOOD;
2264 	int skey, asc, ascq;
2265 	union ccb *ccb = cm->cm_complete_data;
2266 	int returned_sense_len;
2267 
2268 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2269 	skey = SSD_KEY_ILLEGAL_REQUEST;
2270 	asc = SCSI_ASC_NO_SENSE;
2271 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2272 
2273 	switch (nvme_status.sct) {
2274 	case NVME_SCT_GENERIC:
2275 		switch (nvme_status.sc) {
2276 		case NVME_SC_SUCCESS:
2277 			status = MPI2_SCSI_STATUS_GOOD;
2278 			skey = SSD_KEY_NO_SENSE;
2279 			asc = SCSI_ASC_NO_SENSE;
2280 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2281 			break;
2282 		case NVME_SC_INVALID_OPCODE:
2283 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2284 			skey = SSD_KEY_ILLEGAL_REQUEST;
2285 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2286 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2287 			break;
2288 		case NVME_SC_INVALID_FIELD:
2289 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2290 			skey = SSD_KEY_ILLEGAL_REQUEST;
2291 			asc = SCSI_ASC_INVALID_CDB;
2292 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2293 			break;
2294 		case NVME_SC_DATA_TRANSFER_ERROR:
2295 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2296 			skey = SSD_KEY_MEDIUM_ERROR;
2297 			asc = SCSI_ASC_NO_SENSE;
2298 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2299 			break;
2300 		case NVME_SC_ABORTED_POWER_LOSS:
2301 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2302 			skey = SSD_KEY_ABORTED_COMMAND;
2303 			asc = SCSI_ASC_WARNING;
2304 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2305 			break;
2306 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2307 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2308 			skey = SSD_KEY_HARDWARE_ERROR;
2309 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2310 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2311 			break;
2312 		case NVME_SC_ABORTED_BY_REQUEST:
2313 		case NVME_SC_ABORTED_SQ_DELETION:
2314 		case NVME_SC_ABORTED_FAILED_FUSED:
2315 		case NVME_SC_ABORTED_MISSING_FUSED:
2316 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2317 			skey = SSD_KEY_ABORTED_COMMAND;
2318 			asc = SCSI_ASC_NO_SENSE;
2319 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2320 			break;
2321 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2322 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2323 			skey = SSD_KEY_ILLEGAL_REQUEST;
2324 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2325 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2326 			break;
2327 		case NVME_SC_LBA_OUT_OF_RANGE:
2328 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2329 			skey = SSD_KEY_ILLEGAL_REQUEST;
2330 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2331 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2332 			break;
2333 		case NVME_SC_CAPACITY_EXCEEDED:
2334 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2335 			skey = SSD_KEY_MEDIUM_ERROR;
2336 			asc = SCSI_ASC_NO_SENSE;
2337 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2338 			break;
2339 		case NVME_SC_NAMESPACE_NOT_READY:
2340 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2341 			skey = SSD_KEY_NOT_READY;
2342 			asc = SCSI_ASC_LUN_NOT_READY;
2343 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2344 			break;
2345 		}
2346 		break;
2347 	case NVME_SCT_COMMAND_SPECIFIC:
2348 		switch (nvme_status.sc) {
2349 		case NVME_SC_INVALID_FORMAT:
2350 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2351 			skey = SSD_KEY_ILLEGAL_REQUEST;
2352 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2353 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2354 			break;
2355 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2356 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2357 			skey = SSD_KEY_ILLEGAL_REQUEST;
2358 			asc = SCSI_ASC_INVALID_CDB;
2359 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2360 			break;
2361 		}
2362 		break;
2363 	case NVME_SCT_MEDIA_ERROR:
2364 		switch (nvme_status.sc) {
2365 		case NVME_SC_WRITE_FAULTS:
2366 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2367 			skey = SSD_KEY_MEDIUM_ERROR;
2368 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2369 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2370 			break;
2371 		case NVME_SC_UNRECOVERED_READ_ERROR:
2372 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2373 			skey = SSD_KEY_MEDIUM_ERROR;
2374 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2375 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2376 			break;
2377 		case NVME_SC_GUARD_CHECK_ERROR:
2378 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2379 			skey = SSD_KEY_MEDIUM_ERROR;
2380 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2381 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2382 			break;
2383 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2384 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2385 			skey = SSD_KEY_MEDIUM_ERROR;
2386 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2387 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2388 			break;
2389 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2390 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2391 			skey = SSD_KEY_MEDIUM_ERROR;
2392 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2393 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2394 			break;
2395 		case NVME_SC_COMPARE_FAILURE:
2396 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2397 			skey = SSD_KEY_MISCOMPARE;
2398 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2399 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2400 			break;
2401 		case NVME_SC_ACCESS_DENIED:
2402 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2403 			skey = SSD_KEY_ILLEGAL_REQUEST;
2404 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2405 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2406 			break;
2407 		}
2408 		break;
2409 	}
2410 
2411 	returned_sense_len = sizeof(struct scsi_sense_data);
2412 	if (returned_sense_len < ccb->csio.sense_len)
2413 		ccb->csio.sense_resid = ccb->csio.sense_len -
2414 		    returned_sense_len;
2415 	else
2416 		ccb->csio.sense_resid = 0;
2417 
2418 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2419 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2420 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2421 
2422 	return status;
2423 }
2424 
2425 /** mprsas_complete_nvme_unmap
2426  *
2427  * Complete native NVMe command issued using NVMe Encapsulated
2428  * Request Message.
2429  */
2430 static u8
2431 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2432 {
2433 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2434 	struct nvme_completion *nvme_completion = NULL;
2435 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2436 
2437 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2438 	if (le16toh(mpi_reply->ErrorResponseCount)){
2439 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2440 		scsi_status = mprsas_nvme_trans_status_code(
2441 		    nvme_completion->status, cm);
2442 	}
2443 	return scsi_status;
2444 }
2445 
2446 static void
2447 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2448 {
2449 	MPI2_SCSI_IO_REPLY *rep;
2450 	union ccb *ccb;
2451 	struct ccb_scsiio *csio;
2452 	struct mprsas_softc *sassc;
2453 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2454 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2455 	int dir = 0, i;
2456 	u16 alloc_len;
2457 	struct mprsas_target *target;
2458 	target_id_t target_id;
2459 
2460 	MPR_FUNCTRACE(sc);
2461 	mpr_dprint(sc, MPR_TRACE,
2462 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2463 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2464 	    cm->cm_targ->outstanding);
2465 
2466 	callout_stop(&cm->cm_callout);
2467 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2468 
2469 	sassc = sc->sassc;
2470 	ccb = cm->cm_complete_data;
2471 	csio = &ccb->csio;
2472 	target_id = csio->ccb_h.target_id;
2473 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2474 	/*
2475 	 * XXX KDM if the chain allocation fails, does it matter if we do
2476 	 * the sync and unload here?  It is simpler to do it in every case,
2477 	 * assuming it doesn't cause problems.
2478 	 */
2479 	if (cm->cm_data != NULL) {
2480 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2481 			dir = BUS_DMASYNC_POSTREAD;
2482 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2483 			dir = BUS_DMASYNC_POSTWRITE;
2484 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2485 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2486 	}
2487 
2488 	cm->cm_targ->completed++;
2489 	cm->cm_targ->outstanding--;
2490 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2491 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2492 
2493 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2494 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2495 		if (cm->cm_reply != NULL)
2496 			mprsas_log_command(cm, MPR_RECOVERY,
2497 			    "completed timedout cm %p ccb %p during recovery "
2498 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2499 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2500 			    rep->SCSIState, le32toh(rep->TransferCount));
2501 		else
2502 			mprsas_log_command(cm, MPR_RECOVERY,
2503 			    "completed timedout cm %p ccb %p during recovery\n",
2504 			    cm, cm->cm_ccb);
2505 	} else if (cm->cm_targ->tm != NULL) {
2506 		if (cm->cm_reply != NULL)
2507 			mprsas_log_command(cm, MPR_RECOVERY,
2508 			    "completed cm %p ccb %p during recovery "
2509 			    "ioc %x scsi %x state %x xfer %u\n",
2510 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2511 			    rep->SCSIStatus, rep->SCSIState,
2512 			    le32toh(rep->TransferCount));
2513 		else
2514 			mprsas_log_command(cm, MPR_RECOVERY,
2515 			    "completed cm %p ccb %p during recovery\n",
2516 			    cm, cm->cm_ccb);
2517 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2518 		mprsas_log_command(cm, MPR_RECOVERY,
2519 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2520 	}
2521 
2522 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2523 		/*
2524 		 * We ran into an error after we tried to map the command,
2525 		 * so we're getting a callback without queueing the command
2526 		 * to the hardware.  So we set the status here, and it will
2527 		 * be retained below.  We'll go through the "fast path",
2528 		 * because there can be no reply when we haven't actually
2529 		 * gone out to the hardware.
2530 		 */
2531 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2532 
2533 		/*
2534 		 * Currently the only error included in the mask is
2535 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2536 		 * chain frames.  We need to freeze the queue until we get
2537 		 * a command that completed without this error, which will
2538 		 * hopefully have some chain frames attached that we can
2539 		 * use.  If we wanted to get smarter about it, we would
2540 		 * only unfreeze the queue in this condition when we're
2541 		 * sure that we're getting some chain frames back.  That's
2542 		 * probably unnecessary.
2543 		 */
2544 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2545 			xpt_freeze_simq(sassc->sim, 1);
2546 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2547 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2548 			    "freezing SIM queue\n");
2549 		}
2550 	}
2551 
2552 	/*
2553 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2554 	 * flag, and use it in a few places in the rest of this function for
2555 	 * convenience. Use the macro if available.
2556 	 */
2557 #if __FreeBSD_version >= 1100103
2558 	scsi_cdb = scsiio_cdb_ptr(csio);
2559 #else
2560 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2561 		scsi_cdb = csio->cdb_io.cdb_ptr;
2562 	else
2563 		scsi_cdb = csio->cdb_io.cdb_bytes;
2564 #endif
2565 
2566 	/*
2567 	 * If this is a Start Stop Unit command and it was issued by the driver
2568 	 * during shutdown, decrement the refcount to account for all of the
2569 	 * commands that were sent.  All SSU commands should be completed before
2570 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2571 	 * is TRUE.
2572 	 */
2573 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2574 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2575 		sc->SSU_refcount--;
2576 	}
2577 
2578 	/* Take the fast path to completion */
2579 	if (cm->cm_reply == NULL) {
2580 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2581 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2582 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2583 			else {
2584 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2585 				csio->scsi_status = SCSI_STATUS_OK;
2586 			}
2587 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2588 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2589 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2590 				mpr_dprint(sc, MPR_XINFO,
2591 				    "Unfreezing SIM queue\n");
2592 			}
2593 		}
2594 
2595 		/*
2596 		 * There are two scenarios where the status won't be
2597 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2598 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2599 		 */
2600 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2601 			/*
2602 			 * Freeze the dev queue so that commands are
2603 			 * executed in the correct order after error
2604 			 * recovery.
2605 			 */
2606 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2607 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2608 		}
2609 		mpr_free_command(sc, cm);
2610 		xpt_done(ccb);
2611 		return;
2612 	}
2613 
2614 	target = &sassc->targets[target_id];
2615 	if (scsi_cdb[0] == UNMAP &&
2616 	    target->is_nvme &&
2617 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2618 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2619 		csio->scsi_status = rep->SCSIStatus;
2620 	}
2621 
2622 	mprsas_log_command(cm, MPR_XINFO,
2623 	    "ioc %x scsi %x state %x xfer %u\n",
2624 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2625 	    le32toh(rep->TransferCount));
2626 
2627 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2628 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2629 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2630 		/* FALLTHROUGH */
2631 	case MPI2_IOCSTATUS_SUCCESS:
2632 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2633 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2634 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2635 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2636 
2637 		/* Completion failed at the transport level. */
2638 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2639 		    MPI2_SCSI_STATE_TERMINATED)) {
2640 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2641 			break;
2642 		}
2643 
2644 		/* In a modern packetized environment, an autosense failure
2645 		 * implies that there's not much else that can be done to
2646 		 * recover the command.
2647 		 */
2648 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2649 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2650 			break;
2651 		}
2652 
2653 		/*
2654 		 * CAM doesn't care about SAS Response Info data, but if this is
2655 		 * the state check if TLR should be done.  If not, clear the
2656 		 * TLR_bits for the target.
2657 		 */
2658 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2659 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2660 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2661 			sc->mapping_table[target_id].TLR_bits =
2662 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2663 		}
2664 
2665 		/*
2666 		 * Intentionally override the normal SCSI status reporting
2667 		 * for these two cases.  These are likely to happen in a
2668 		 * multi-initiator environment, and we want to make sure that
2669 		 * CAM retries these commands rather than fail them.
2670 		 */
2671 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2672 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2673 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2674 			break;
2675 		}
2676 
2677 		/* Handle normal status and sense */
2678 		csio->scsi_status = rep->SCSIStatus;
2679 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2680 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2681 		else
2682 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2683 
2684 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2685 			int sense_len, returned_sense_len;
2686 
2687 			returned_sense_len = min(le32toh(rep->SenseCount),
2688 			    sizeof(struct scsi_sense_data));
2689 			if (returned_sense_len < csio->sense_len)
2690 				csio->sense_resid = csio->sense_len -
2691 				    returned_sense_len;
2692 			else
2693 				csio->sense_resid = 0;
2694 
2695 			sense_len = min(returned_sense_len,
2696 			    csio->sense_len - csio->sense_resid);
2697 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2698 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2699 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2700 		}
2701 
2702 		/*
2703 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2704 		 * and it's page code 0 (Supported Page List), and there is
2705 		 * inquiry data, and this is for a sequential access device, and
2706 		 * the device is an SSP target, and TLR is supported by the
2707 		 * controller, turn the TLR_bits value ON if page 0x90 is
2708 		 * supported.
2709 		 */
2710 		if ((scsi_cdb[0] == INQUIRY) &&
2711 		    (scsi_cdb[1] & SI_EVPD) &&
2712 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2713 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2714 		    (csio->data_ptr != NULL) &&
2715 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2716 		    (sc->control_TLR) &&
2717 		    (sc->mapping_table[target_id].device_info &
2718 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2719 			vpd_list = (struct scsi_vpd_supported_page_list *)
2720 			    csio->data_ptr;
2721 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2722 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2723 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2724 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2725 			alloc_len -= csio->resid;
2726 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2727 				if (vpd_list->list[i] == 0x90) {
2728 					*TLR_bits = TLR_on;
2729 					break;
2730 				}
2731 			}
2732 		}
2733 
2734 		/*
2735 		 * If this is a SATA direct-access end device, mark it so that
2736 		 * a SCSI StartStopUnit command will be sent to it when the
2737 		 * driver is being shutdown.
2738 		 */
2739 		if ((scsi_cdb[0] == INQUIRY) &&
2740 		    (csio->data_ptr != NULL) &&
2741 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2742 		    (sc->mapping_table[target_id].device_info &
2743 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2744 		    ((sc->mapping_table[target_id].device_info &
2745 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2746 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2747 			target = &sassc->targets[target_id];
2748 			target->supports_SSU = TRUE;
2749 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2750 			    target_id);
2751 		}
2752 		break;
2753 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2754 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2755 		/*
2756 		 * If devinfo is 0 this will be a volume.  In that case don't
2757 		 * tell CAM that the volume is not there.  We want volumes to
2758 		 * be enumerated until they are deleted/removed, not just
2759 		 * failed.
2760 		 */
2761 		if (cm->cm_targ->devinfo == 0)
2762 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2763 		else
2764 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2765 		break;
2766 	case MPI2_IOCSTATUS_INVALID_SGL:
2767 		mpr_print_scsiio_cmd(sc, cm);
2768 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2769 		break;
2770 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2771 		/*
2772 		 * This is one of the responses that comes back when an I/O
2773 		 * has been aborted.  If it is because of a timeout that we
2774 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2775 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2776 		 * command is the same (it gets retried, subject to the
2777 		 * retry counter), the only difference is what gets printed
2778 		 * on the console.
2779 		 */
2780 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2781 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2782 		else
2783 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2784 		break;
2785 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2786 		/* resid is ignored for this condition */
2787 		csio->resid = 0;
2788 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2789 		break;
2790 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2791 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2792 		/*
2793 		 * These can sometimes be transient transport-related
2794 		 * errors, and sometimes persistent drive-related errors.
2795 		 * We used to retry these without decrementing the retry
2796 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2797 		 * we hit a persistent drive problem that returns one of
2798 		 * these error codes, we would retry indefinitely.  So,
2799 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2800 		 * count and avoid infinite retries.  We're taking the
2801 		 * potential risk of flagging false failures in the event
2802 		 * of a topology-related error (e.g. a SAS expander problem
2803 		 * causes a command addressed to a drive to fail), but
2804 		 * avoiding getting into an infinite retry loop.
2805 		 */
2806 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2807 		mpr_dprint(sc, MPR_INFO,
2808 		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2809 		    mpr_describe_table(mpr_iocstatus_string,
2810 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2811 		    target_id, cm->cm_desc.Default.SMID,
2812 		    le32toh(rep->IOCLogInfo));
2813 		mpr_dprint(sc, MPR_XINFO,
2814 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2815 		    rep->SCSIStatus, rep->SCSIState,
2816 		    le32toh(rep->TransferCount));
2817 		break;
2818 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2819 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2820 	case MPI2_IOCSTATUS_INVALID_VPID:
2821 	case MPI2_IOCSTATUS_INVALID_FIELD:
2822 	case MPI2_IOCSTATUS_INVALID_STATE:
2823 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2824 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2825 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2826 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2827 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2828 	default:
2829 		mprsas_log_command(cm, MPR_XINFO,
2830 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2831 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2832 		    rep->SCSIStatus, rep->SCSIState,
2833 		    le32toh(rep->TransferCount));
2834 		csio->resid = cm->cm_length;
2835 
2836 		if (scsi_cdb[0] == UNMAP &&
2837 		    target->is_nvme &&
2838 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2839 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2840 		else
2841 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2842 
2843 		break;
2844 	}
2845 
2846 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2847 
2848 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2849 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2850 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2851 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2852 		    "queue\n");
2853 	}
2854 
2855 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2856 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2857 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2858 	}
2859 
2860 	mpr_free_command(sc, cm);
2861 	xpt_done(ccb);
2862 }
2863 
2864 #if __FreeBSD_version >= 900026
2865 static void
2866 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2867 {
2868 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2869 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2870 	uint64_t sasaddr;
2871 	union ccb *ccb;
2872 
2873 	ccb = cm->cm_complete_data;
2874 
2875 	/*
2876 	 * Currently there should be no way we can hit this case.  It only
2877 	 * happens when we have a failure to allocate chain frames, and SMP
2878 	 * commands require two S/G elements only.  That should be handled
2879 	 * in the standard request size.
2880 	 */
2881 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2882 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2883 		    "request!\n", __func__, cm->cm_flags);
2884 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2885 		goto bailout;
2886         }
2887 
2888 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2889 	if (rpl == NULL) {
2890 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2891 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2892 		goto bailout;
2893 	}
2894 
2895 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2896 	sasaddr = le32toh(req->SASAddress.Low);
2897 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2898 
2899 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2900 	    MPI2_IOCSTATUS_SUCCESS ||
2901 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2902 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2903 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2904 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2905 		goto bailout;
2906 	}
2907 
2908 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2909 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2910 
2911 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2912 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2913 	else
2914 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2915 
2916 bailout:
2917 	/*
2918 	 * We sync in both directions because we had DMAs in the S/G list
2919 	 * in both directions.
2920 	 */
2921 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2922 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2923 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2924 	mpr_free_command(sc, cm);
2925 	xpt_done(ccb);
2926 }
2927 
2928 static void
2929 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2930 {
2931 	struct mpr_command *cm;
2932 	uint8_t *request, *response;
2933 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2934 	struct mpr_softc *sc;
2935 	struct sglist *sg;
2936 	int error;
2937 
2938 	sc = sassc->sc;
2939 	sg = NULL;
2940 	error = 0;
2941 
2942 #if (__FreeBSD_version >= 1000028) || \
2943     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2944 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2945 	case CAM_DATA_PADDR:
2946 	case CAM_DATA_SG_PADDR:
2947 		/*
2948 		 * XXX We don't yet support physical addresses here.
2949 		 */
2950 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2951 		    "supported\n", __func__);
2952 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2953 		xpt_done(ccb);
2954 		return;
2955 	case CAM_DATA_SG:
2956 		/*
2957 		 * The chip does not support more than one buffer for the
2958 		 * request or response.
2959 		 */
2960 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2961 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2962 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2963 			    "response buffer segments not supported for SMP\n",
2964 			    __func__);
2965 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2966 			xpt_done(ccb);
2967 			return;
2968 		}
2969 
2970 		/*
2971 		 * The CAM_SCATTER_VALID flag was originally implemented
2972 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2973 		 * We have two.  So, just take that flag to mean that we
2974 		 * might have S/G lists, and look at the S/G segment count
2975 		 * to figure out whether that is the case for each individual
2976 		 * buffer.
2977 		 */
2978 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2979 			bus_dma_segment_t *req_sg;
2980 
2981 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2982 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2983 		} else
2984 			request = ccb->smpio.smp_request;
2985 
2986 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2987 			bus_dma_segment_t *rsp_sg;
2988 
2989 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2990 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2991 		} else
2992 			response = ccb->smpio.smp_response;
2993 		break;
2994 	case CAM_DATA_VADDR:
2995 		request = ccb->smpio.smp_request;
2996 		response = ccb->smpio.smp_response;
2997 		break;
2998 	default:
2999 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3000 		xpt_done(ccb);
3001 		return;
3002 	}
3003 #else /* __FreeBSD_version < 1000028 */
3004 	/*
3005 	 * XXX We don't yet support physical addresses here.
3006 	 */
3007 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3008 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3009 		    "supported\n", __func__);
3010 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3011 		xpt_done(ccb);
3012 		return;
3013 	}
3014 
3015 	/*
3016 	 * If the user wants to send an S/G list, check to make sure they
3017 	 * have single buffers.
3018 	 */
3019 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3020 		/*
3021 		 * The chip does not support more than one buffer for the
3022 		 * request or response.
3023 		 */
3024 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3025 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3026 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3027 			    "response buffer segments not supported for SMP\n",
3028 			    __func__);
3029 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3030 			xpt_done(ccb);
3031 			return;
3032 		}
3033 
3034 		/*
3035 		 * The CAM_SCATTER_VALID flag was originally implemented
3036 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3037 		 * We have two.  So, just take that flag to mean that we
3038 		 * might have S/G lists, and look at the S/G segment count
3039 		 * to figure out whether that is the case for each individual
3040 		 * buffer.
3041 		 */
3042 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3043 			bus_dma_segment_t *req_sg;
3044 
3045 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3046 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3047 		} else
3048 			request = ccb->smpio.smp_request;
3049 
3050 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3051 			bus_dma_segment_t *rsp_sg;
3052 
3053 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3054 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3055 		} else
3056 			response = ccb->smpio.smp_response;
3057 	} else {
3058 		request = ccb->smpio.smp_request;
3059 		response = ccb->smpio.smp_response;
3060 	}
3061 #endif /* __FreeBSD_version < 1000028 */
3062 
3063 	cm = mpr_alloc_command(sc);
3064 	if (cm == NULL) {
3065 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3066 		    __func__);
3067 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3068 		xpt_done(ccb);
3069 		return;
3070 	}
3071 
3072 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3073 	bzero(req, sizeof(*req));
3074 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3075 
3076 	/* Allow the chip to use any route to this SAS address. */
3077 	req->PhysicalPort = 0xff;
3078 
3079 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3080 	req->SGLFlags =
3081 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3082 
3083 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3084 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3085 
3086 	mpr_init_sge(cm, req, &req->SGL);
3087 
3088 	/*
3089 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3090 	 * do one map command, and one busdma call in there.
3091 	 */
3092 	cm->cm_uio.uio_iov = cm->cm_iovec;
3093 	cm->cm_uio.uio_iovcnt = 2;
3094 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3095 
3096 	/*
3097 	 * The read/write flag isn't used by busdma, but set it just in
3098 	 * case.  This isn't exactly accurate, either, since we're going in
3099 	 * both directions.
3100 	 */
3101 	cm->cm_uio.uio_rw = UIO_WRITE;
3102 
3103 	cm->cm_iovec[0].iov_base = request;
3104 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3105 	cm->cm_iovec[1].iov_base = response;
3106 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3107 
3108 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3109 			       cm->cm_iovec[1].iov_len;
3110 
3111 	/*
3112 	 * Trigger a warning message in mpr_data_cb() for the user if we
3113 	 * wind up exceeding two S/G segments.  The chip expects one
3114 	 * segment for the request and another for the response.
3115 	 */
3116 	cm->cm_max_segs = 2;
3117 
3118 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3119 	cm->cm_complete = mprsas_smpio_complete;
3120 	cm->cm_complete_data = ccb;
3121 
3122 	/*
3123 	 * Tell the mapping code that we're using a uio, and that this is
3124 	 * an SMP passthrough request.  There is a little special-case
3125 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3126 	 * transfer.
3127 	 */
3128 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3129 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3130 
3131 	/* The chip data format is little endian. */
3132 	req->SASAddress.High = htole32(sasaddr >> 32);
3133 	req->SASAddress.Low = htole32(sasaddr);
3134 
3135 	/*
3136 	 * XXX Note that we don't have a timeout/abort mechanism here.
3137 	 * From the manual, it looks like task management requests only
3138 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3139 	 * have a mechanism to retry requests in the event of a chip reset
3140 	 * at least.  Hopefully the chip will insure that any errors short
3141 	 * of that are relayed back to the driver.
3142 	 */
3143 	error = mpr_map_command(sc, cm);
3144 	if ((error != 0) && (error != EINPROGRESS)) {
3145 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3146 		    "mpr_map_command()\n", __func__, error);
3147 		goto bailout_error;
3148 	}
3149 
3150 	return;
3151 
3152 bailout_error:
3153 	mpr_free_command(sc, cm);
3154 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3155 	xpt_done(ccb);
3156 	return;
3157 }
3158 
3159 static void
3160 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3161 {
3162 	struct mpr_softc *sc;
3163 	struct mprsas_target *targ;
3164 	uint64_t sasaddr = 0;
3165 
3166 	sc = sassc->sc;
3167 
3168 	/*
3169 	 * Make sure the target exists.
3170 	 */
3171 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3172 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3173 	targ = &sassc->targets[ccb->ccb_h.target_id];
3174 	if (targ->handle == 0x0) {
3175 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3176 		    __func__, ccb->ccb_h.target_id);
3177 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3178 		xpt_done(ccb);
3179 		return;
3180 	}
3181 
3182 	/*
3183 	 * If this device has an embedded SMP target, we'll talk to it
3184 	 * directly.
3185 	 * figure out what the expander's address is.
3186 	 */
3187 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3188 		sasaddr = targ->sasaddr;
3189 
3190 	/*
3191 	 * If we don't have a SAS address for the expander yet, try
3192 	 * grabbing it from the page 0x83 information cached in the
3193 	 * transport layer for this target.  LSI expanders report the
3194 	 * expander SAS address as the port-associated SAS address in
3195 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3196 	 * 0x83.
3197 	 *
3198 	 * XXX KDM disable this for now, but leave it commented out so that
3199 	 * it is obvious that this is another possible way to get the SAS
3200 	 * address.
3201 	 *
3202 	 * The parent handle method below is a little more reliable, and
3203 	 * the other benefit is that it works for devices other than SES
3204 	 * devices.  So you can send a SMP request to a da(4) device and it
3205 	 * will get routed to the expander that device is attached to.
3206 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3207 	 */
3208 #if 0
3209 	if (sasaddr == 0)
3210 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3211 #endif
3212 
3213 	/*
3214 	 * If we still don't have a SAS address for the expander, look for
3215 	 * the parent device of this device, which is probably the expander.
3216 	 */
3217 	if (sasaddr == 0) {
3218 #ifdef OLD_MPR_PROBE
3219 		struct mprsas_target *parent_target;
3220 #endif
3221 
3222 		if (targ->parent_handle == 0x0) {
3223 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3224 			    "a valid parent handle!\n", __func__, targ->handle);
3225 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3226 			goto bailout;
3227 		}
3228 #ifdef OLD_MPR_PROBE
3229 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3230 		    targ->parent_handle);
3231 
3232 		if (parent_target == NULL) {
3233 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3234 			    "a valid parent target!\n", __func__, targ->handle);
3235 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3236 			goto bailout;
3237 		}
3238 
3239 		if ((parent_target->devinfo &
3240 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3241 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3242 			    "does not have an SMP target!\n", __func__,
3243 			    targ->handle, parent_target->handle);
3244 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3245 			goto bailout;
3246 		}
3247 
3248 		sasaddr = parent_target->sasaddr;
3249 #else /* OLD_MPR_PROBE */
3250 		if ((targ->parent_devinfo &
3251 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3252 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3253 			    "does not have an SMP target!\n", __func__,
3254 			    targ->handle, targ->parent_handle);
3255 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3256 			goto bailout;
3257 
3258 		}
3259 		if (targ->parent_sasaddr == 0x0) {
3260 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3261 			    "%d does not have a valid SAS address!\n", __func__,
3262 			    targ->handle, targ->parent_handle);
3263 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3264 			goto bailout;
3265 		}
3266 
3267 		sasaddr = targ->parent_sasaddr;
3268 #endif /* OLD_MPR_PROBE */
3269 
3270 	}
3271 
3272 	if (sasaddr == 0) {
3273 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3274 		    "handle %d\n", __func__, targ->handle);
3275 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3276 		goto bailout;
3277 	}
3278 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3279 
3280 	return;
3281 
3282 bailout:
3283 	xpt_done(ccb);
3284 
3285 }
3286 #endif //__FreeBSD_version >= 900026
3287 
3288 static void
3289 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3290 {
3291 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3292 	struct mpr_softc *sc;
3293 	struct mpr_command *tm;
3294 	struct mprsas_target *targ;
3295 
3296 	MPR_FUNCTRACE(sassc->sc);
3297 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3298 
3299 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3300 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3301 	sc = sassc->sc;
3302 	tm = mpr_alloc_command(sc);
3303 	if (tm == NULL) {
3304 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3305 		    "mprsas_action_resetdev\n");
3306 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3307 		xpt_done(ccb);
3308 		return;
3309 	}
3310 
3311 	targ = &sassc->targets[ccb->ccb_h.target_id];
3312 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3313 	req->DevHandle = htole16(targ->handle);
3314 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3315 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3316 
3317 	/* SAS Hard Link Reset / SATA Link Reset */
3318 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3319 
3320 	tm->cm_data = NULL;
3321 	tm->cm_desc.HighPriority.RequestFlags =
3322 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3323 	tm->cm_complete = mprsas_resetdev_complete;
3324 	tm->cm_complete_data = ccb;
3325 
3326 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3327 	    __func__, targ->tid);
3328 	tm->cm_targ = targ;
3329 	targ->flags |= MPRSAS_TARGET_INRESET;
3330 
3331 	mpr_map_command(sc, tm);
3332 }
3333 
3334 static void
3335 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3336 {
3337 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3338 	union ccb *ccb;
3339 
3340 	MPR_FUNCTRACE(sc);
3341 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3342 
3343 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3344 	ccb = tm->cm_complete_data;
3345 
3346 	/*
3347 	 * Currently there should be no way we can hit this case.  It only
3348 	 * happens when we have a failure to allocate chain frames, and
3349 	 * task management commands don't have S/G lists.
3350 	 */
3351 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3352 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3353 
3354 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3355 
3356 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3357 		    "handle %#04x! This should not happen!\n", __func__,
3358 		    tm->cm_flags, req->DevHandle);
3359 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3360 		goto bailout;
3361 	}
3362 
3363 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3364 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3365 
3366 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3367 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3368 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3369 		    CAM_LUN_WILDCARD);
3370 	}
3371 	else
3372 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3373 
3374 bailout:
3375 
3376 	mprsas_free_tm(sc, tm);
3377 	xpt_done(ccb);
3378 }
3379 
3380 static void
3381 mprsas_poll(struct cam_sim *sim)
3382 {
3383 	struct mprsas_softc *sassc;
3384 
3385 	sassc = cam_sim_softc(sim);
3386 
3387 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3388 		/* frequent debug messages during a panic just slow
3389 		 * everything down too much.
3390 		 */
3391 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3392 		    __func__);
3393 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3394 	}
3395 
3396 	mpr_intr_locked(sassc->sc);
3397 }
3398 
3399 static void
3400 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3401     void *arg)
3402 {
3403 	struct mpr_softc *sc;
3404 
3405 	sc = (struct mpr_softc *)callback_arg;
3406 
3407 	switch (code) {
3408 #if (__FreeBSD_version >= 1000006) || \
3409     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3410 	case AC_ADVINFO_CHANGED: {
3411 		struct mprsas_target *target;
3412 		struct mprsas_softc *sassc;
3413 		struct scsi_read_capacity_data_long rcap_buf;
3414 		struct ccb_dev_advinfo cdai;
3415 		struct mprsas_lun *lun;
3416 		lun_id_t lunid;
3417 		int found_lun;
3418 		uintptr_t buftype;
3419 
3420 		buftype = (uintptr_t)arg;
3421 
3422 		found_lun = 0;
3423 		sassc = sc->sassc;
3424 
3425 		/*
3426 		 * We're only interested in read capacity data changes.
3427 		 */
3428 		if (buftype != CDAI_TYPE_RCAPLONG)
3429 			break;
3430 
3431 		/*
3432 		 * See the comment in mpr_attach_sas() for a detailed
3433 		 * explanation.  In these versions of FreeBSD we register
3434 		 * for all events and filter out the events that don't
3435 		 * apply to us.
3436 		 */
3437 #if (__FreeBSD_version < 1000703) || \
3438     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3439 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3440 			break;
3441 #endif
3442 
3443 		/*
3444 		 * We should have a handle for this, but check to make sure.
3445 		 */
3446 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3447 		    ("Target %d out of bounds in mprsas_async\n",
3448 		    xpt_path_target_id(path)));
3449 		target = &sassc->targets[xpt_path_target_id(path)];
3450 		if (target->handle == 0)
3451 			break;
3452 
3453 		lunid = xpt_path_lun_id(path);
3454 
3455 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3456 			if (lun->lun_id == lunid) {
3457 				found_lun = 1;
3458 				break;
3459 			}
3460 		}
3461 
3462 		if (found_lun == 0) {
3463 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3464 			    M_NOWAIT | M_ZERO);
3465 			if (lun == NULL) {
3466 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3467 				    "LUN for EEDP support.\n");
3468 				break;
3469 			}
3470 			lun->lun_id = lunid;
3471 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3472 		}
3473 
3474 		bzero(&rcap_buf, sizeof(rcap_buf));
3475 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3476 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3477 		cdai.ccb_h.flags = CAM_DIR_IN;
3478 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3479 #if (__FreeBSD_version >= 1100061) || \
3480     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3481 		cdai.flags = CDAI_FLAG_NONE;
3482 #else
3483 		cdai.flags = 0;
3484 #endif
3485 		cdai.bufsiz = sizeof(rcap_buf);
3486 		cdai.buf = (uint8_t *)&rcap_buf;
3487 		xpt_action((union ccb *)&cdai);
3488 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3489 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3490 
3491 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3492 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3493 			lun->eedp_formatted = TRUE;
3494 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3495 		} else {
3496 			lun->eedp_formatted = FALSE;
3497 			lun->eedp_block_size = 0;
3498 		}
3499 		break;
3500 	}
3501 #endif
3502 	case AC_FOUND_DEVICE: {
3503 		struct ccb_getdev *cgd;
3504 
3505 		/*
3506 		 * See the comment in mpr_attach_sas() for a detailed
3507 		 * explanation.  In these versions of FreeBSD we register
3508 		 * for all events and filter out the events that don't
3509 		 * apply to us.
3510 		 */
3511 #if (__FreeBSD_version < 1000703) || \
3512     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3513 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3514 			break;
3515 #endif
3516 
3517 		cgd = arg;
3518 #if (__FreeBSD_version < 901503) || \
3519     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3520 		mprsas_check_eedp(sc, path, cgd);
3521 #endif
3522 		break;
3523 	}
3524 	default:
3525 		break;
3526 	}
3527 }
3528 
3529 #if (__FreeBSD_version < 901503) || \
3530     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3531 static void
3532 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3533     struct ccb_getdev *cgd)
3534 {
3535 	struct mprsas_softc *sassc = sc->sassc;
3536 	struct ccb_scsiio *csio;
3537 	struct scsi_read_capacity_16 *scsi_cmd;
3538 	struct scsi_read_capacity_eedp *rcap_buf;
3539 	path_id_t pathid;
3540 	target_id_t targetid;
3541 	lun_id_t lunid;
3542 	union ccb *ccb;
3543 	struct cam_path *local_path;
3544 	struct mprsas_target *target;
3545 	struct mprsas_lun *lun;
3546 	uint8_t	found_lun;
3547 	char path_str[64];
3548 
3549 	pathid = cam_sim_path(sassc->sim);
3550 	targetid = xpt_path_target_id(path);
3551 	lunid = xpt_path_lun_id(path);
3552 
3553 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3554 	    "mprsas_check_eedp\n", targetid));
3555 	target = &sassc->targets[targetid];
3556 	if (target->handle == 0x0)
3557 		return;
3558 
3559 	/*
3560 	 * Determine if the device is EEDP capable.
3561 	 *
3562 	 * If this flag is set in the inquiry data, the device supports
3563 	 * protection information, and must support the 16 byte read capacity
3564 	 * command, otherwise continue without sending read cap 16.
3565 	 */
3566 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3567 		return;
3568 
3569 	/*
3570 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3571 	 * the LUN is formatted for EEDP support.
3572 	 */
3573 	ccb = xpt_alloc_ccb_nowait();
3574 	if (ccb == NULL) {
3575 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3576 		    "support.\n");
3577 		return;
3578 	}
3579 
3580 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3581 	    CAM_REQ_CMP) {
3582 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3583 		    "support.\n");
3584 		xpt_free_ccb(ccb);
3585 		return;
3586 	}
3587 
3588 	/*
3589 	 * If LUN is already in list, don't create a new one.
3590 	 */
3591 	found_lun = FALSE;
3592 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3593 		if (lun->lun_id == lunid) {
3594 			found_lun = TRUE;
3595 			break;
3596 		}
3597 	}
3598 	if (!found_lun) {
3599 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3600 		    M_NOWAIT | M_ZERO);
3601 		if (lun == NULL) {
3602 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3603 			    "EEDP support.\n");
3604 			xpt_free_path(local_path);
3605 			xpt_free_ccb(ccb);
3606 			return;
3607 		}
3608 		lun->lun_id = lunid;
3609 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3610 	}
3611 
3612 	xpt_path_string(local_path, path_str, sizeof(path_str));
3613 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3614 	    path_str, target->handle);
3615 
3616 	/*
3617 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3618 	 * mprsas_read_cap_done function will load the read cap info into the
3619 	 * LUN struct.
3620 	 */
3621 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3622 	    M_NOWAIT | M_ZERO);
3623 	if (rcap_buf == NULL) {
3624 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3625 		    "buffer for EEDP support.\n");
3626 		xpt_free_path(ccb->ccb_h.path);
3627 		xpt_free_ccb(ccb);
3628 		return;
3629 	}
3630 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3631 	csio = &ccb->csio;
3632 	csio->ccb_h.func_code = XPT_SCSI_IO;
3633 	csio->ccb_h.flags = CAM_DIR_IN;
3634 	csio->ccb_h.retry_count = 4;
3635 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3636 	csio->ccb_h.timeout = 60000;
3637 	csio->data_ptr = (uint8_t *)rcap_buf;
3638 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3639 	csio->sense_len = MPR_SENSE_LEN;
3640 	csio->cdb_len = sizeof(*scsi_cmd);
3641 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3642 
3643 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3644 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3645 	scsi_cmd->opcode = 0x9E;
3646 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3647 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3648 
3649 	ccb->ccb_h.ppriv_ptr1 = sassc;
3650 	xpt_action(ccb);
3651 }
3652 
3653 static void
3654 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3655 {
3656 	struct mprsas_softc *sassc;
3657 	struct mprsas_target *target;
3658 	struct mprsas_lun *lun;
3659 	struct scsi_read_capacity_eedp *rcap_buf;
3660 
3661 	if (done_ccb == NULL)
3662 		return;
3663 
3664 	/* Driver need to release devq, it Scsi command is
3665 	 * generated by driver internally.
3666 	 * Currently there is a single place where driver
3667 	 * calls scsi command internally. In future if driver
3668 	 * calls more scsi command internally, it needs to release
3669 	 * devq internally, since those command will not go back to
3670 	 * cam_periph.
3671 	 */
3672 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3673         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3674 		xpt_release_devq(done_ccb->ccb_h.path,
3675 			       	/*count*/ 1, /*run_queue*/TRUE);
3676 	}
3677 
3678 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3679 
3680 	/*
3681 	 * Get the LUN ID for the path and look it up in the LUN list for the
3682 	 * target.
3683 	 */
3684 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3685 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3686 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3687 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3688 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3689 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3690 			continue;
3691 
3692 		/*
3693 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3694 		 * info. If the READ CAP 16 command had some SCSI error (common
3695 		 * if command is not supported), mark the lun as not supporting
3696 		 * EEDP and set the block size to 0.
3697 		 */
3698 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3699 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3700 			lun->eedp_formatted = FALSE;
3701 			lun->eedp_block_size = 0;
3702 			break;
3703 		}
3704 
3705 		if (rcap_buf->protect & 0x01) {
3706 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3707 			    "%d is formatted for EEDP support.\n",
3708 			    done_ccb->ccb_h.target_lun,
3709 			    done_ccb->ccb_h.target_id);
3710 			lun->eedp_formatted = TRUE;
3711 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3712 		}
3713 		break;
3714 	}
3715 
3716 	// Finished with this CCB and path.
3717 	free(rcap_buf, M_MPR);
3718 	xpt_free_path(done_ccb->ccb_h.path);
3719 	xpt_free_ccb(done_ccb);
3720 }
3721 #endif /* (__FreeBSD_version < 901503) || \
3722           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3723 
3724 void
3725 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3726     struct mprsas_target *target, lun_id_t lun_id)
3727 {
3728 	union ccb *ccb;
3729 	path_id_t path_id;
3730 
3731 	/*
3732 	 * Set the INRESET flag for this target so that no I/O will be sent to
3733 	 * the target until the reset has completed.  If an I/O request does
3734 	 * happen, the devq will be frozen.  The CCB holds the path which is
3735 	 * used to release the devq.  The devq is released and the CCB is freed
3736 	 * when the TM completes.
3737 	 */
3738 	ccb = xpt_alloc_ccb_nowait();
3739 	if (ccb) {
3740 		path_id = cam_sim_path(sc->sassc->sim);
3741 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3742 		    target->tid, lun_id) != CAM_REQ_CMP) {
3743 			xpt_free_ccb(ccb);
3744 		} else {
3745 			tm->cm_ccb = ccb;
3746 			tm->cm_targ = target;
3747 			target->flags |= MPRSAS_TARGET_INRESET;
3748 		}
3749 	}
3750 }
3751 
3752 int
3753 mprsas_startup(struct mpr_softc *sc)
3754 {
3755 	/*
3756 	 * Send the port enable message and set the wait_for_port_enable flag.
3757 	 * This flag helps to keep the simq frozen until all discovery events
3758 	 * are processed.
3759 	 */
3760 	sc->wait_for_port_enable = 1;
3761 	mprsas_send_portenable(sc);
3762 	return (0);
3763 }
3764 
3765 static int
3766 mprsas_send_portenable(struct mpr_softc *sc)
3767 {
3768 	MPI2_PORT_ENABLE_REQUEST *request;
3769 	struct mpr_command *cm;
3770 
3771 	MPR_FUNCTRACE(sc);
3772 
3773 	if ((cm = mpr_alloc_command(sc)) == NULL)
3774 		return (EBUSY);
3775 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3776 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3777 	request->MsgFlags = 0;
3778 	request->VP_ID = 0;
3779 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3780 	cm->cm_complete = mprsas_portenable_complete;
3781 	cm->cm_data = NULL;
3782 	cm->cm_sge = NULL;
3783 
3784 	mpr_map_command(sc, cm);
3785 	mpr_dprint(sc, MPR_XINFO,
3786 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3787 	    cm, cm->cm_req, cm->cm_complete);
3788 	return (0);
3789 }
3790 
3791 static void
3792 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3793 {
3794 	MPI2_PORT_ENABLE_REPLY *reply;
3795 	struct mprsas_softc *sassc;
3796 
3797 	MPR_FUNCTRACE(sc);
3798 	sassc = sc->sassc;
3799 
3800 	/*
3801 	 * Currently there should be no way we can hit this case.  It only
3802 	 * happens when we have a failure to allocate chain frames, and
3803 	 * port enable commands don't have S/G lists.
3804 	 */
3805 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3806 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3807 		    "This should not happen!\n", __func__, cm->cm_flags);
3808 	}
3809 
3810 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3811 	if (reply == NULL)
3812 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3813 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3814 	    MPI2_IOCSTATUS_SUCCESS)
3815 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3816 
3817 	mpr_free_command(sc, cm);
3818 	/*
3819 	 * Done waiting for port enable to complete.  Decrement the refcount.
3820 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3821 	 * take place.
3822 	 */
3823 	sc->wait_for_port_enable = 0;
3824 	sc->port_enable_complete = 1;
3825 	wakeup(&sc->port_enable_complete);
3826 	mprsas_startup_decrement(sassc);
3827 }
3828 
3829 int
3830 mprsas_check_id(struct mprsas_softc *sassc, int id)
3831 {
3832 	struct mpr_softc *sc = sassc->sc;
3833 	char *ids;
3834 	char *name;
3835 
3836 	ids = &sc->exclude_ids[0];
3837 	while((name = strsep(&ids, ",")) != NULL) {
3838 		if (name[0] == '\0')
3839 			continue;
3840 		if (strtol(name, NULL, 0) == (long)id)
3841 			return (1);
3842 	}
3843 
3844 	return (0);
3845 }
3846 
3847 void
3848 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3849 {
3850 	struct mprsas_softc *sassc;
3851 	struct mprsas_lun *lun, *lun_tmp;
3852 	struct mprsas_target *targ;
3853 	int i;
3854 
3855 	sassc = sc->sassc;
3856 	/*
3857 	 * The number of targets is based on IOC Facts, so free all of
3858 	 * the allocated LUNs for each target and then the target buffer
3859 	 * itself.
3860 	 */
3861 	for (i=0; i< maxtargets; i++) {
3862 		targ = &sassc->targets[i];
3863 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3864 			free(lun, M_MPR);
3865 		}
3866 	}
3867 	free(sassc->targets, M_MPR);
3868 
3869 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3870 	    M_MPR, M_WAITOK|M_ZERO);
3871 	if (!sassc->targets) {
3872 		panic("%s failed to alloc targets with error %d\n",
3873 		    __func__, ENOMEM);
3874 	}
3875 }
3876