xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 99429157e8615dc3b7f11afbe3ed92de7476a5db)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/nvme/nvme.h>
76 
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
89 
90 #define MPRSAS_DISCOVERY_TIMEOUT	20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
92 
93 /*
94  * static array to check SCSI OpCode for EEDP protection bits
95  */
96 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 };
117 
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131     struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133     struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137     struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139     union ccb *done_ccb);
140 #endif
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143     struct mpr_command *cm);
144 
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148     uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	int target_id = 0xFFFFFFFF;
250 
251 	MPR_FUNCTRACE(sc);
252 	if (tm == NULL)
253 		return;
254 
255 	/*
256 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
257 	 * free the resources used for freezing the devq.  Must clear the
258 	 * INRESET flag as well or scsi I/O will not work.
259 	 */
260 	if (tm->cm_targ != NULL) {
261 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 		target_id = tm->cm_targ->tid;
263 	}
264 	if (tm->cm_ccb) {
265 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 		    target_id);
267 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 		xpt_free_path(tm->cm_ccb->ccb_h.path);
269 		xpt_free_ccb(tm->cm_ccb);
270 	}
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 	    MPI2_IOCSTATUS_SUCCESS) {
383 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 	    le32toh(reply->TerminationCount));
389 	mpr_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 	    targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mpr_map_command(sc, cm);
480 }
481 
482 /*
483  * The firmware performs debounce on the link to avoid transient link errors
484  * and false removals.  When it does decide that link has been lost and a
485  * device needs to go away, it expects that the host will perform a target reset
486  * and then an op remove.  The reset has the side-effect of aborting any
487  * outstanding requests for the device, which is required for the op-remove to
488  * succeed.  It's not clear if the host should check for the device coming back
489  * alive after the reset.
490  */
491 void
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mpr_softc *sc;
496 	struct mpr_command *cm;
497 	struct mprsas_target *targ = NULL;
498 
499 	MPR_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
508 		    __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
513 
514 	cm = mprsas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
517 		    __func__);
518 		return;
519 	}
520 
521 	mprsas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags =
535 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 	cm->cm_complete = mprsas_remove_device;
537 	cm->cm_complete_data = (void *)(uintptr_t)handle;
538 
539 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 	    __func__, targ->tid);
541 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 
543 	mpr_map_command(sc, cm);
544 }
545 
546 static void
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 {
549 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 	struct mprsas_target *targ;
552 	struct mpr_command *next_cm;
553 	uint16_t handle;
554 
555 	MPR_FUNCTRACE(sc);
556 
557 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 	targ = tm->cm_targ;
560 
561 	/*
562 	 * Currently there should be no way we can hit this case.  It only
563 	 * happens when we have a failure to allocate chain frames, and
564 	 * task management commands don't have S/G lists.
565 	 */
566 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 		    "handle %#04x! This should not happen!\n", __func__,
569 		    tm->cm_flags, handle);
570 	}
571 
572 	if (reply == NULL) {
573 		/* XXX retry the remove after the diag reset completes? */
574 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 		    "0x%04x\n", __func__, handle);
576 		mprsas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 	    MPI2_IOCSTATUS_SUCCESS) {
582 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 	    MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717 		}
718 	}
719 
720 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 	    &sc->sassc->mprsas_eh);
722 
723 	return (0);
724 }
725 
726 int
727 mpr_attach_sas(struct mpr_softc *sc)
728 {
729 	struct mprsas_softc *sassc;
730 	cam_status status;
731 	int unit, error = 0;
732 
733 	MPR_FUNCTRACE(sc);
734 
735 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
736 	if (!sassc) {
737 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
738 		    __func__, __LINE__);
739 		return (ENOMEM);
740 	}
741 
742 	/*
743 	 * XXX MaxTargets could change during a reinit.  Since we don't
744 	 * resize the targets[] array during such an event, cache the value
745 	 * of MaxTargets here so that we don't get into trouble later.  This
746 	 * should move into the reinit logic.
747 	 */
748 	sassc->maxtargets = sc->facts->MaxTargets;
749 	sassc->targets = malloc(sizeof(struct mprsas_target) *
750 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
751 	if (!sassc->targets) {
752 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
753 		    __func__, __LINE__);
754 		free(sassc, M_MPR);
755 		return (ENOMEM);
756 	}
757 	sc->sassc = sassc;
758 	sassc->sc = sc;
759 
760 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
761 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
762 		error = ENOMEM;
763 		goto out;
764 	}
765 
766 	unit = device_get_unit(sc->mpr_dev);
767 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
768 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
769 	if (sassc->sim == NULL) {
770 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
771 		error = EINVAL;
772 		goto out;
773 	}
774 
775 	TAILQ_INIT(&sassc->ev_queue);
776 
777 	/* Initialize taskqueue for Event Handling */
778 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
779 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
780 	    taskqueue_thread_enqueue, &sassc->ev_tq);
781 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
782 	    device_get_nameunit(sc->mpr_dev));
783 
784 	mpr_lock(sc);
785 
786 	/*
787 	 * XXX There should be a bus for every port on the adapter, but since
788 	 * we're just going to fake the topology for now, we'll pretend that
789 	 * everything is just a target on a single bus.
790 	 */
791 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
792 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
793 		    error);
794 		mpr_unlock(sc);
795 		goto out;
796 	}
797 
798 	/*
799 	 * Assume that discovery events will start right away.
800 	 *
801 	 * Hold off boot until discovery is complete.
802 	 */
803 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
804 	sc->sassc->startup_refcount = 0;
805 	mprsas_startup_increment(sassc);
806 
807 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
808 
809 	/*
810 	 * Register for async events so we can determine the EEDP
811 	 * capabilities of devices.
812 	 */
813 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
814 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
815 	    CAM_LUN_WILDCARD);
816 	if (status != CAM_REQ_CMP) {
817 		mpr_printf(sc, "Error %#x creating sim path\n", status);
818 		sassc->path = NULL;
819 	} else {
820 		int event;
821 
822 #if (__FreeBSD_version >= 1000006) || \
823     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
824 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
825 #else
826 		event = AC_FOUND_DEVICE;
827 #endif
828 
829 		/*
830 		 * Prior to the CAM locking improvements, we can't call
831 		 * xpt_register_async() with a particular path specified.
832 		 *
833 		 * If a path isn't specified, xpt_register_async() will
834 		 * generate a wildcard path and acquire the XPT lock while
835 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
836 		 * It will then drop the XPT lock once that is done.
837 		 *
838 		 * If a path is specified for xpt_register_async(), it will
839 		 * not acquire and drop the XPT lock around the call to
840 		 * xpt_action().  xpt_action() asserts that the caller
841 		 * holds the SIM lock, so the SIM lock has to be held when
842 		 * calling xpt_register_async() when the path is specified.
843 		 *
844 		 * But xpt_register_async calls xpt_for_all_devices(),
845 		 * which calls xptbustraverse(), which will acquire each
846 		 * SIM lock.  When it traverses our particular bus, it will
847 		 * necessarily acquire the SIM lock, which will lead to a
848 		 * recursive lock acquisition.
849 		 *
850 		 * The CAM locking changes fix this problem by acquiring
851 		 * the XPT topology lock around bus traversal in
852 		 * xptbustraverse(), so the caller can hold the SIM lock
853 		 * and it does not cause a recursive lock acquisition.
854 		 *
855 		 * These __FreeBSD_version values are approximate, especially
856 		 * for stable/10, which is two months later than the actual
857 		 * change.
858 		 */
859 
860 #if (__FreeBSD_version < 1000703) || \
861     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
862 		mpr_unlock(sc);
863 		status = xpt_register_async(event, mprsas_async, sc,
864 					    NULL);
865 		mpr_lock(sc);
866 #else
867 		status = xpt_register_async(event, mprsas_async, sc,
868 					    sassc->path);
869 #endif
870 
871 		if (status != CAM_REQ_CMP) {
872 			mpr_dprint(sc, MPR_ERROR,
873 			    "Error %#x registering async handler for "
874 			    "AC_ADVINFO_CHANGED events\n", status);
875 			xpt_free_path(sassc->path);
876 			sassc->path = NULL;
877 		}
878 	}
879 	if (status != CAM_REQ_CMP) {
880 		/*
881 		 * EEDP use is the exception, not the rule.
882 		 * Warn the user, but do not fail to attach.
883 		 */
884 		mpr_printf(sc, "EEDP capabilities disabled.\n");
885 	}
886 
887 	mpr_unlock(sc);
888 
889 	mprsas_register_events(sc);
890 out:
891 	if (error)
892 		mpr_detach_sas(sc);
893 	return (error);
894 }
895 
896 int
897 mpr_detach_sas(struct mpr_softc *sc)
898 {
899 	struct mprsas_softc *sassc;
900 	struct mprsas_lun *lun, *lun_tmp;
901 	struct mprsas_target *targ;
902 	int i;
903 
904 	MPR_FUNCTRACE(sc);
905 
906 	if (sc->sassc == NULL)
907 		return (0);
908 
909 	sassc = sc->sassc;
910 	mpr_deregister_events(sc, sassc->mprsas_eh);
911 
912 	/*
913 	 * Drain and free the event handling taskqueue with the lock
914 	 * unheld so that any parallel processing tasks drain properly
915 	 * without deadlocking.
916 	 */
917 	if (sassc->ev_tq != NULL)
918 		taskqueue_free(sassc->ev_tq);
919 
920 	/* Make sure CAM doesn't wedge if we had to bail out early. */
921 	mpr_lock(sc);
922 
923 	/* Deregister our async handler */
924 	if (sassc->path != NULL) {
925 		xpt_register_async(0, mprsas_async, sc, sassc->path);
926 		xpt_free_path(sassc->path);
927 		sassc->path = NULL;
928 	}
929 
930 	if (sassc->flags & MPRSAS_IN_STARTUP)
931 		xpt_release_simq(sassc->sim, 1);
932 
933 	if (sassc->sim != NULL) {
934 		xpt_bus_deregister(cam_sim_path(sassc->sim));
935 		cam_sim_free(sassc->sim, FALSE);
936 	}
937 
938 	mpr_unlock(sc);
939 
940 	if (sassc->devq != NULL)
941 		cam_simq_free(sassc->devq);
942 
943 	for (i = 0; i < sassc->maxtargets; i++) {
944 		targ = &sassc->targets[i];
945 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
946 			free(lun, M_MPR);
947 		}
948 	}
949 	free(sassc->targets, M_MPR);
950 	free(sassc, M_MPR);
951 	sc->sassc = NULL;
952 
953 	return (0);
954 }
955 
956 void
957 mprsas_discovery_end(struct mprsas_softc *sassc)
958 {
959 	struct mpr_softc *sc = sassc->sc;
960 
961 	MPR_FUNCTRACE(sc);
962 
963 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
964 		callout_stop(&sassc->discovery_callout);
965 
966 }
967 
968 static void
969 mprsas_action(struct cam_sim *sim, union ccb *ccb)
970 {
971 	struct mprsas_softc *sassc;
972 
973 	sassc = cam_sim_softc(sim);
974 
975 	MPR_FUNCTRACE(sassc->sc);
976 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
977 	    ccb->ccb_h.func_code);
978 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
979 
980 	switch (ccb->ccb_h.func_code) {
981 	case XPT_PATH_INQ:
982 	{
983 		struct ccb_pathinq *cpi = &ccb->cpi;
984 		struct mpr_softc *sc = sassc->sc;
985 		uint8_t sges_per_frame;
986 
987 		cpi->version_num = 1;
988 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
989 		cpi->target_sprt = 0;
990 #if (__FreeBSD_version >= 1000039) || \
991     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
992 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
993 #else
994 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
995 #endif
996 		cpi->hba_eng_cnt = 0;
997 		cpi->max_target = sassc->maxtargets - 1;
998 		cpi->max_lun = 255;
999 		cpi->initiator_id = sassc->maxtargets - 1;
1000 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1001 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1002 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1003 		cpi->unit_number = cam_sim_unit(sim);
1004 		cpi->bus_id = cam_sim_bus(sim);
1005 		/*
1006 		 * XXXSLM-I think this needs to change based on config page or
1007 		 * something instead of hardcoded to 150000.
1008 		 */
1009 		cpi->base_transfer_speed = 150000;
1010 		cpi->transport = XPORT_SAS;
1011 		cpi->transport_version = 0;
1012 		cpi->protocol = PROTO_SCSI;
1013 		cpi->protocol_version = SCSI_REV_SPC;
1014 
1015 		/*
1016 		 * Max IO Size is Page Size * the following:
1017 		 * ((SGEs per frame - 1 for chain element) *
1018 		 * Max Chain Depth) + 1 for no chain needed in last frame
1019 		 *
1020 		 * If user suggests a Max IO size to use, use the smaller of the
1021 		 * user's value and the calculated value as long as the user's
1022 		 * value is larger than 0. The user's value is in pages.
1023 		 */
1024 		sges_per_frame = (sc->chain_frame_size /
1025 		    sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1026 		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1027 		cpi->maxio *= PAGE_SIZE;
1028 		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1029 		    cpi->maxio))
1030 			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1031 		sc->maxio = cpi->maxio;
1032 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1033 		break;
1034 	}
1035 	case XPT_GET_TRAN_SETTINGS:
1036 	{
1037 		struct ccb_trans_settings	*cts;
1038 		struct ccb_trans_settings_sas	*sas;
1039 		struct ccb_trans_settings_scsi	*scsi;
1040 		struct mprsas_target *targ;
1041 
1042 		cts = &ccb->cts;
1043 		sas = &cts->xport_specific.sas;
1044 		scsi = &cts->proto_specific.scsi;
1045 
1046 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1047 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1048 		    cts->ccb_h.target_id));
1049 		targ = &sassc->targets[cts->ccb_h.target_id];
1050 		if (targ->handle == 0x0) {
1051 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1052 			break;
1053 		}
1054 
1055 		cts->protocol_version = SCSI_REV_SPC2;
1056 		cts->transport = XPORT_SAS;
1057 		cts->transport_version = 0;
1058 
1059 		sas->valid = CTS_SAS_VALID_SPEED;
1060 		switch (targ->linkrate) {
1061 		case 0x08:
1062 			sas->bitrate = 150000;
1063 			break;
1064 		case 0x09:
1065 			sas->bitrate = 300000;
1066 			break;
1067 		case 0x0a:
1068 			sas->bitrate = 600000;
1069 			break;
1070 		case 0x0b:
1071 			sas->bitrate = 1200000;
1072 			break;
1073 		default:
1074 			sas->valid = 0;
1075 		}
1076 
1077 		cts->protocol = PROTO_SCSI;
1078 		scsi->valid = CTS_SCSI_VALID_TQ;
1079 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1080 
1081 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1082 		break;
1083 	}
1084 	case XPT_CALC_GEOMETRY:
1085 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1086 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1087 		break;
1088 	case XPT_RESET_DEV:
1089 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1090 		    "XPT_RESET_DEV\n");
1091 		mprsas_action_resetdev(sassc, ccb);
1092 		return;
1093 	case XPT_RESET_BUS:
1094 	case XPT_ABORT:
1095 	case XPT_TERM_IO:
1096 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1097 		    "for abort or reset\n");
1098 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1099 		break;
1100 	case XPT_SCSI_IO:
1101 		mprsas_action_scsiio(sassc, ccb);
1102 		return;
1103 #if __FreeBSD_version >= 900026
1104 	case XPT_SMP_IO:
1105 		mprsas_action_smpio(sassc, ccb);
1106 		return;
1107 #endif
1108 	default:
1109 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1110 		break;
1111 	}
1112 	xpt_done(ccb);
1113 
1114 }
1115 
1116 static void
1117 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1118     target_id_t target_id, lun_id_t lun_id)
1119 {
1120 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1121 	struct cam_path *path;
1122 
1123 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1124 	    ac_code, target_id, (uintmax_t)lun_id);
1125 
1126 	if (xpt_create_path(&path, NULL,
1127 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1128 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1129 		    "notification\n");
1130 		return;
1131 	}
1132 
1133 	xpt_async(ac_code, path, NULL);
1134 	xpt_free_path(path);
1135 }
1136 
1137 static void
1138 mprsas_complete_all_commands(struct mpr_softc *sc)
1139 {
1140 	struct mpr_command *cm;
1141 	int i;
1142 	int completed;
1143 
1144 	MPR_FUNCTRACE(sc);
1145 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1146 
1147 	/* complete all commands with a NULL reply */
1148 	for (i = 1; i < sc->num_reqs; i++) {
1149 		cm = &sc->commands[i];
1150 		cm->cm_reply = NULL;
1151 		completed = 0;
1152 
1153 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1154 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1155 
1156 		if (cm->cm_complete != NULL) {
1157 			mprsas_log_command(cm, MPR_RECOVERY,
1158 			    "completing cm %p state %x ccb %p for diag reset\n",
1159 			    cm, cm->cm_state, cm->cm_ccb);
1160 			cm->cm_complete(sc, cm);
1161 			completed = 1;
1162 		}
1163 
1164 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1165 			mprsas_log_command(cm, MPR_RECOVERY,
1166 			    "waking up cm %p state %x ccb %p for diag reset\n",
1167 			    cm, cm->cm_state, cm->cm_ccb);
1168 			wakeup(cm);
1169 			completed = 1;
1170 		}
1171 
1172 		if (cm->cm_sc->io_cmds_active != 0) {
1173 			cm->cm_sc->io_cmds_active--;
1174 		} else {
1175 			mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: "
1176 			    "io_cmds_active is out of sync - resynching to "
1177 			    "0\n");
1178 		}
1179 
1180 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1181 			/* this should never happen, but if it does, log */
1182 			mprsas_log_command(cm, MPR_RECOVERY,
1183 			    "cm %p state %x flags 0x%x ccb %p during diag "
1184 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1185 			    cm->cm_ccb);
1186 		}
1187 	}
1188 }
1189 
1190 void
1191 mprsas_handle_reinit(struct mpr_softc *sc)
1192 {
1193 	int i;
1194 
1195 	/* Go back into startup mode and freeze the simq, so that CAM
1196 	 * doesn't send any commands until after we've rediscovered all
1197 	 * targets and found the proper device handles for them.
1198 	 *
1199 	 * After the reset, portenable will trigger discovery, and after all
1200 	 * discovery-related activities have finished, the simq will be
1201 	 * released.
1202 	 */
1203 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1204 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1205 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1206 	mprsas_startup_increment(sc->sassc);
1207 
1208 	/* notify CAM of a bus reset */
1209 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1210 	    CAM_LUN_WILDCARD);
1211 
1212 	/* complete and cleanup after all outstanding commands */
1213 	mprsas_complete_all_commands(sc);
1214 
1215 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1216 	    __func__, sc->sassc->startup_refcount);
1217 
1218 	/* zero all the target handles, since they may change after the
1219 	 * reset, and we have to rediscover all the targets and use the new
1220 	 * handles.
1221 	 */
1222 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1223 		if (sc->sassc->targets[i].outstanding != 0)
1224 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1225 			    i, sc->sassc->targets[i].outstanding);
1226 		sc->sassc->targets[i].handle = 0x0;
1227 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1228 		sc->sassc->targets[i].outstanding = 0;
1229 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1230 	}
1231 }
1232 static void
1233 mprsas_tm_timeout(void *data)
1234 {
1235 	struct mpr_command *tm = data;
1236 	struct mpr_softc *sc = tm->cm_sc;
1237 
1238 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1239 
1240 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1241 	    "out\n", tm);
1242 	mpr_reinit(sc);
1243 }
1244 
1245 static void
1246 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1247 {
1248 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1249 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1250 	unsigned int cm_count = 0;
1251 	struct mpr_command *cm;
1252 	struct mprsas_target *targ;
1253 
1254 	callout_stop(&tm->cm_callout);
1255 
1256 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1257 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1258 	targ = tm->cm_targ;
1259 
1260 	/*
1261 	 * Currently there should be no way we can hit this case.  It only
1262 	 * happens when we have a failure to allocate chain frames, and
1263 	 * task management commands don't have S/G lists.
1264 	 */
1265 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1266 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1267 		    "This should not happen!\n", __func__, tm->cm_flags);
1268 		mprsas_free_tm(sc, tm);
1269 		return;
1270 	}
1271 
1272 	if (reply == NULL) {
1273 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1274 		    "%p\n", tm);
1275 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1276 			/* this completion was due to a reset, just cleanup */
1277 			targ->tm = NULL;
1278 			mprsas_free_tm(sc, tm);
1279 		}
1280 		else {
1281 			/* we should have gotten a reply. */
1282 			mpr_reinit(sc);
1283 		}
1284 		return;
1285 	}
1286 
1287 	mprsas_log_command(tm, MPR_RECOVERY,
1288 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1289 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1290 	    le32toh(reply->TerminationCount));
1291 
1292 	/* See if there are any outstanding commands for this LUN.
1293 	 * This could be made more efficient by using a per-LU data
1294 	 * structure of some sort.
1295 	 */
1296 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1297 		if (cm->cm_lun == tm->cm_lun)
1298 			cm_count++;
1299 	}
1300 
1301 	if (cm_count == 0) {
1302 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1303 		    "logical unit %u finished recovery after reset\n",
1304 		    tm->cm_lun, tm);
1305 
1306 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1307 		    tm->cm_lun);
1308 
1309 		/* we've finished recovery for this logical unit.  check and
1310 		 * see if some other logical unit has a timedout command
1311 		 * that needs to be processed.
1312 		 */
1313 		cm = TAILQ_FIRST(&targ->timedout_commands);
1314 		if (cm) {
1315 			mprsas_send_abort(sc, tm, cm);
1316 		}
1317 		else {
1318 			targ->tm = NULL;
1319 			mprsas_free_tm(sc, tm);
1320 		}
1321 	}
1322 	else {
1323 		/* if we still have commands for this LUN, the reset
1324 		 * effectively failed, regardless of the status reported.
1325 		 * Escalate to a target reset.
1326 		 */
1327 		mprsas_log_command(tm, MPR_RECOVERY,
1328 		    "logical unit reset complete for tm %p, but still have %u "
1329 		    "command(s)\n", tm, cm_count);
1330 		mprsas_send_reset(sc, tm,
1331 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1332 	}
1333 }
1334 
1335 static void
1336 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1337 {
1338 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1339 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1340 	struct mprsas_target *targ;
1341 
1342 	callout_stop(&tm->cm_callout);
1343 
1344 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1345 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1346 	targ = tm->cm_targ;
1347 
1348 	/*
1349 	 * Currently there should be no way we can hit this case.  It only
1350 	 * happens when we have a failure to allocate chain frames, and
1351 	 * task management commands don't have S/G lists.
1352 	 */
1353 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1354 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1355 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1356 		mprsas_free_tm(sc, tm);
1357 		return;
1358 	}
1359 
1360 	if (reply == NULL) {
1361 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1362 		    "%p\n", tm);
1363 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1364 			/* this completion was due to a reset, just cleanup */
1365 			targ->tm = NULL;
1366 			mprsas_free_tm(sc, tm);
1367 		}
1368 		else {
1369 			/* we should have gotten a reply. */
1370 			mpr_reinit(sc);
1371 		}
1372 		return;
1373 	}
1374 
1375 	mprsas_log_command(tm, MPR_RECOVERY,
1376 	    "target reset status 0x%x code 0x%x count %u\n",
1377 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1378 	    le32toh(reply->TerminationCount));
1379 
1380 	if (targ->outstanding == 0) {
1381 		/* we've finished recovery for this target and all
1382 		 * of its logical units.
1383 		 */
1384 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1385 		    "recovery finished after target reset\n");
1386 
1387 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1388 		    CAM_LUN_WILDCARD);
1389 
1390 		targ->tm = NULL;
1391 		mprsas_free_tm(sc, tm);
1392 	}
1393 	else {
1394 		/* after a target reset, if this target still has
1395 		 * outstanding commands, the reset effectively failed,
1396 		 * regardless of the status reported.  escalate.
1397 		 */
1398 		mprsas_log_command(tm, MPR_RECOVERY,
1399 		    "target reset complete for tm %p, but still have %u "
1400 		    "command(s)\n", tm, targ->outstanding);
1401 		mpr_reinit(sc);
1402 	}
1403 }
1404 
1405 #define MPR_RESET_TIMEOUT 30
1406 
1407 int
1408 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1409 {
1410 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1411 	struct mprsas_target *target;
1412 	int err;
1413 
1414 	target = tm->cm_targ;
1415 	if (target->handle == 0) {
1416 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1417 		    "%d\n", __func__, target->tid);
1418 		return -1;
1419 	}
1420 
1421 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1422 	req->DevHandle = htole16(target->handle);
1423 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1424 	req->TaskType = type;
1425 
1426 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1427 		/* XXX Need to handle invalid LUNs */
1428 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1429 		tm->cm_targ->logical_unit_resets++;
1430 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1431 		    "sending logical unit reset\n");
1432 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1433 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1434 	}
1435 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1436 		/*
1437 		 * Target reset method =
1438 		 *     SAS Hard Link Reset / SATA Link Reset
1439 		 */
1440 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1441 		tm->cm_targ->target_resets++;
1442 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1443 		    "sending target reset\n");
1444 		tm->cm_complete = mprsas_target_reset_complete;
1445 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1446 	}
1447 	else {
1448 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1449 		return -1;
1450 	}
1451 
1452 	mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1453 	    target->handle);
1454 	if (target->encl_level_valid) {
1455 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1456 		    "connector name (%4s)\n", target->encl_level,
1457 		    target->encl_slot, target->connector_name);
1458 	}
1459 
1460 	tm->cm_data = NULL;
1461 	tm->cm_desc.HighPriority.RequestFlags =
1462 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1463 	tm->cm_complete_data = (void *)tm;
1464 
1465 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1466 	    mprsas_tm_timeout, tm);
1467 
1468 	err = mpr_map_command(sc, tm);
1469 	if (err)
1470 		mprsas_log_command(tm, MPR_RECOVERY,
1471 		    "error %d sending reset type %u\n", err, type);
1472 
1473 	return err;
1474 }
1475 
1476 
1477 static void
1478 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1479 {
1480 	struct mpr_command *cm;
1481 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1482 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1483 	struct mprsas_target *targ;
1484 
1485 	callout_stop(&tm->cm_callout);
1486 
1487 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1488 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1489 	targ = tm->cm_targ;
1490 
1491 	/*
1492 	 * Currently there should be no way we can hit this case.  It only
1493 	 * happens when we have a failure to allocate chain frames, and
1494 	 * task management commands don't have S/G lists.
1495 	 */
1496 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1497 		mprsas_log_command(tm, MPR_RECOVERY,
1498 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1499 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1500 		mprsas_free_tm(sc, tm);
1501 		return;
1502 	}
1503 
1504 	if (reply == NULL) {
1505 		mprsas_log_command(tm, MPR_RECOVERY,
1506 		    "NULL abort reply for tm %p TaskMID %u\n",
1507 		    tm, le16toh(req->TaskMID));
1508 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1509 			/* this completion was due to a reset, just cleanup */
1510 			targ->tm = NULL;
1511 			mprsas_free_tm(sc, tm);
1512 		}
1513 		else {
1514 			/* we should have gotten a reply. */
1515 			mpr_reinit(sc);
1516 		}
1517 		return;
1518 	}
1519 
1520 	mprsas_log_command(tm, MPR_RECOVERY,
1521 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1522 	    le16toh(req->TaskMID),
1523 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1524 	    le32toh(reply->TerminationCount));
1525 
1526 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1527 	if (cm == NULL) {
1528 		/* if there are no more timedout commands, we're done with
1529 		 * error recovery for this target.
1530 		 */
1531 		mprsas_log_command(tm, MPR_RECOVERY,
1532 		    "finished recovery after aborting TaskMID %u\n",
1533 		    le16toh(req->TaskMID));
1534 
1535 		targ->tm = NULL;
1536 		mprsas_free_tm(sc, tm);
1537 	}
1538 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1539 		/* abort success, but we have more timedout commands to abort */
1540 		mprsas_log_command(tm, MPR_RECOVERY,
1541 		    "continuing recovery after aborting TaskMID %u\n",
1542 		    le16toh(req->TaskMID));
1543 
1544 		mprsas_send_abort(sc, tm, cm);
1545 	}
1546 	else {
1547 		/* we didn't get a command completion, so the abort
1548 		 * failed as far as we're concerned.  escalate.
1549 		 */
1550 		mprsas_log_command(tm, MPR_RECOVERY,
1551 		    "abort failed for TaskMID %u tm %p\n",
1552 		    le16toh(req->TaskMID), tm);
1553 
1554 		mprsas_send_reset(sc, tm,
1555 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1556 	}
1557 }
1558 
1559 #define MPR_ABORT_TIMEOUT 5
1560 
1561 static int
1562 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1563     struct mpr_command *cm)
1564 {
1565 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1566 	struct mprsas_target *targ;
1567 	int err;
1568 
1569 	targ = cm->cm_targ;
1570 	if (targ->handle == 0) {
1571 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1572 		    __func__, cm->cm_ccb->ccb_h.target_id);
1573 		return -1;
1574 	}
1575 
1576 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1577 	    "Aborting command %p\n", cm);
1578 
1579 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1580 	req->DevHandle = htole16(targ->handle);
1581 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1582 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1583 
1584 	/* XXX Need to handle invalid LUNs */
1585 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1586 
1587 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1588 
1589 	tm->cm_data = NULL;
1590 	tm->cm_desc.HighPriority.RequestFlags =
1591 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1592 	tm->cm_complete = mprsas_abort_complete;
1593 	tm->cm_complete_data = (void *)tm;
1594 	tm->cm_targ = cm->cm_targ;
1595 	tm->cm_lun = cm->cm_lun;
1596 
1597 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1598 	    mprsas_tm_timeout, tm);
1599 
1600 	targ->aborts++;
1601 
1602 	mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1603 	    __func__, targ->tid);
1604 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1605 
1606 	err = mpr_map_command(sc, tm);
1607 	if (err)
1608 		mpr_dprint(sc, MPR_RECOVERY,
1609 		    "error %d sending abort for cm %p SMID %u\n",
1610 		    err, cm, req->TaskMID);
1611 	return err;
1612 }
1613 
1614 static void
1615 mprsas_scsiio_timeout(void *data)
1616 {
1617 	struct mpr_softc *sc;
1618 	struct mpr_command *cm;
1619 	struct mprsas_target *targ;
1620 
1621 	cm = (struct mpr_command *)data;
1622 	sc = cm->cm_sc;
1623 
1624 	MPR_FUNCTRACE(sc);
1625 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1626 
1627 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1628 
1629 	/*
1630 	 * Run the interrupt handler to make sure it's not pending.  This
1631 	 * isn't perfect because the command could have already completed
1632 	 * and been re-used, though this is unlikely.
1633 	 */
1634 	mpr_intr_locked(sc);
1635 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1636 		mprsas_log_command(cm, MPR_XINFO,
1637 		    "SCSI command %p almost timed out\n", cm);
1638 		return;
1639 	}
1640 
1641 	if (cm->cm_ccb == NULL) {
1642 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1643 		return;
1644 	}
1645 
1646 	targ = cm->cm_targ;
1647 	targ->timeouts++;
1648 
1649 	mprsas_log_command(cm, MPR_ERROR, "command timeout %d cm %p target "
1650 	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
1651 	    targ->handle);
1652 	if (targ->encl_level_valid) {
1653 		mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1654 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1655 		    targ->connector_name);
1656 	}
1657 
1658 	/* XXX first, check the firmware state, to see if it's still
1659 	 * operational.  if not, do a diag reset.
1660 	 */
1661 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1662 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1663 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1664 
1665 	if (targ->tm != NULL) {
1666 		/* target already in recovery, just queue up another
1667 		 * timedout command to be processed later.
1668 		 */
1669 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1670 		    "processing by tm %p\n", cm, targ->tm);
1671 	}
1672 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1673 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1674 		    cm, targ->tm);
1675 
1676 		/* start recovery by aborting the first timedout command */
1677 		mprsas_send_abort(sc, targ->tm, cm);
1678 	}
1679 	else {
1680 		/* XXX queue this target up for recovery once a TM becomes
1681 		 * available.  The firmware only has a limited number of
1682 		 * HighPriority credits for the high priority requests used
1683 		 * for task management, and we ran out.
1684 		 *
1685 		 * Isilon: don't worry about this for now, since we have
1686 		 * more credits than disks in an enclosure, and limit
1687 		 * ourselves to one TM per target for recovery.
1688 		 */
1689 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to "
1690 		    "allocate a tm\n", cm);
1691 	}
1692 }
1693 
1694 /**
1695  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1696  *			     to SCSI Unmap.
1697  * Return 0 - for success,
1698  *	  1 - to immediately return back the command with success status to CAM
1699  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1700  *			   to FW without any translation.
1701  */
1702 static int
1703 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1704     union ccb *ccb, struct mprsas_target *targ)
1705 {
1706 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1707 	struct ccb_scsiio *csio;
1708 	struct unmap_parm_list *plist;
1709 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1710 	struct nvme_command *c;
1711 	int i, res;
1712 	uint16_t ndesc, list_len, data_length;
1713 	struct mpr_prp_page *prp_page_info;
1714 	uint64_t nvme_dsm_ranges_dma_handle;
1715 
1716 	csio = &ccb->csio;
1717 #if __FreeBSD_version >= 1100103
1718 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1719 #else
1720 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1721 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1722 		    ccb->csio.cdb_io.cdb_ptr[8]);
1723 	} else {
1724 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1725 		    ccb->csio.cdb_io.cdb_bytes[8]);
1726 	}
1727 #endif
1728 	if (!list_len) {
1729 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1730 		return -EINVAL;
1731 	}
1732 
1733 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1734 	if (!plist) {
1735 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1736 		    "save UNMAP data\n");
1737 		return -ENOMEM;
1738 	}
1739 
1740 	/* Copy SCSI unmap data to a local buffer */
1741 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1742 
1743 	/* return back the unmap command to CAM with success status,
1744 	 * if number of descripts is zero.
1745 	 */
1746 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1747 	if (!ndesc) {
1748 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1749 		    "UNMAP cmd is Zero\n");
1750 		res = 1;
1751 		goto out;
1752 	}
1753 
1754 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1755 	if (data_length > targ->MDTS) {
1756 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1757 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1758 		res = -EINVAL;
1759 		goto out;
1760 	}
1761 
1762 	prp_page_info = mpr_alloc_prp_page(sc);
1763 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1764 	    "UNMAP command.\n", __func__));
1765 
1766 	/*
1767 	 * Insert the allocated PRP page into the command's PRP page list. This
1768 	 * will be freed when the command is freed.
1769 	 */
1770 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1771 
1772 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1773 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1774 
1775 	bzero(nvme_dsm_ranges, data_length);
1776 
1777 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1778 	 * for each descriptors contained in SCSI UNMAP data.
1779 	 */
1780 	for (i = 0; i < ndesc; i++) {
1781 		nvme_dsm_ranges[i].length =
1782 		    htole32(be32toh(plist->desc[i].nlb));
1783 		nvme_dsm_ranges[i].starting_lba =
1784 		    htole64(be64toh(plist->desc[i].slba));
1785 		nvme_dsm_ranges[i].attributes = 0;
1786 	}
1787 
1788 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1789 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1790 	bzero(req, sizeof(*req));
1791 	req->DevHandle = htole16(targ->handle);
1792 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1793 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1794 	req->ErrorResponseBaseAddress.High =
1795 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1796 	req->ErrorResponseBaseAddress.Low =
1797 	    htole32(cm->cm_sense_busaddr);
1798 	req->ErrorResponseAllocationLength =
1799 	    htole16(sizeof(struct nvme_completion));
1800 	req->EncapsulatedCommandLength =
1801 	    htole16(sizeof(struct nvme_command));
1802 	req->DataLength = htole32(data_length);
1803 
1804 	/* Build NVMe DSM command */
1805 	c = (struct nvme_command *) req->NVMe_Command;
1806 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1807 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1808 	c->cdw10 = htole32(ndesc - 1);
1809 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1810 
1811 	cm->cm_length = data_length;
1812 	cm->cm_data = NULL;
1813 
1814 	cm->cm_complete = mprsas_scsiio_complete;
1815 	cm->cm_complete_data = ccb;
1816 	cm->cm_targ = targ;
1817 	cm->cm_lun = csio->ccb_h.target_lun;
1818 	cm->cm_ccb = ccb;
1819 
1820 	cm->cm_desc.Default.RequestFlags =
1821 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1822 
1823 #if __FreeBSD_version >= 1000029
1824 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1825 	    mprsas_scsiio_timeout, cm, 0);
1826 #else //__FreeBSD_version < 1000029
1827 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1828 	    mprsas_scsiio_timeout, cm);
1829 #endif //__FreeBSD_version >= 1000029
1830 
1831 	targ->issued++;
1832 	targ->outstanding++;
1833 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1834 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1835 
1836 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1837 	    __func__, cm, ccb, targ->outstanding);
1838 
1839 	mpr_build_nvme_prp(sc, cm, req, (void *)nvme_dsm_ranges_dma_handle, 0,
1840 	    data_length);
1841 	mpr_map_command(sc, cm);
1842 
1843 out:
1844 	free(plist, M_MPR);
1845 	return 0;
1846 }
1847 
1848 static void
1849 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1850 {
1851 	MPI2_SCSI_IO_REQUEST *req;
1852 	struct ccb_scsiio *csio;
1853 	struct mpr_softc *sc;
1854 	struct mprsas_target *targ;
1855 	struct mprsas_lun *lun;
1856 	struct mpr_command *cm;
1857 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1858 	uint16_t eedp_flags;
1859 	uint32_t mpi_control;
1860 	int rc;
1861 
1862 	sc = sassc->sc;
1863 	MPR_FUNCTRACE(sc);
1864 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1865 
1866 	csio = &ccb->csio;
1867 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1868 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1869 	     csio->ccb_h.target_id));
1870 	targ = &sassc->targets[csio->ccb_h.target_id];
1871 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1872 	if (targ->handle == 0x0) {
1873 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1874 		    __func__, csio->ccb_h.target_id);
1875 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1876 		xpt_done(ccb);
1877 		return;
1878 	}
1879 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1880 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1881 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1882 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1883 		xpt_done(ccb);
1884 		return;
1885 	}
1886 	/*
1887 	 * Sometimes, it is possible to get a command that is not "In
1888 	 * Progress" and was actually aborted by the upper layer.  Check for
1889 	 * this here and complete the command without error.
1890 	 */
1891 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1892 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1893 		    "target %u\n", __func__, csio->ccb_h.target_id);
1894 		xpt_done(ccb);
1895 		return;
1896 	}
1897 	/*
1898 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1899 	 * that the volume has timed out.  We want volumes to be enumerated
1900 	 * until they are deleted/removed, not just failed.
1901 	 */
1902 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1903 		if (targ->devinfo == 0)
1904 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1905 		else
1906 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1907 		xpt_done(ccb);
1908 		return;
1909 	}
1910 
1911 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1912 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1913 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1914 		xpt_done(ccb);
1915 		return;
1916 	}
1917 
1918 	/*
1919 	 * If target has a reset in progress, freeze the devq and return.  The
1920 	 * devq will be released when the TM reset is finished.
1921 	 */
1922 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1923 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1924 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1925 		    __func__, targ->tid);
1926 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1927 		xpt_done(ccb);
1928 		return;
1929 	}
1930 
1931 	cm = mpr_alloc_command(sc);
1932 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1933 		if (cm != NULL) {
1934 			mpr_free_command(sc, cm);
1935 		}
1936 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1937 			xpt_freeze_simq(sassc->sim, 1);
1938 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1939 		}
1940 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1941 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1942 		xpt_done(ccb);
1943 		return;
1944 	}
1945 
1946 	/* For NVME device's issue UNMAP command directly to NVME drives by
1947 	 * constructing equivalent native NVMe DataSetManagement command.
1948 	 */
1949 #if __FreeBSD_version >= 1100103
1950 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1951 #else
1952 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1953 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
1954 	else
1955 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
1956 #endif
1957 	if (scsi_opcode == UNMAP &&
1958 	    targ->is_nvme &&
1959 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1960 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1961 		if (rc == 1) { /* return command to CAM with success status */
1962 			mpr_free_command(sc, cm);
1963 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1964 			xpt_done(ccb);
1965 			return;
1966 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1967 			return;
1968 	}
1969 
1970 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1971 	bzero(req, sizeof(*req));
1972 	req->DevHandle = htole16(targ->handle);
1973 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1974 	req->MsgFlags = 0;
1975 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1976 	req->SenseBufferLength = MPR_SENSE_LEN;
1977 	req->SGLFlags = 0;
1978 	req->ChainOffset = 0;
1979 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1980 	req->SGLOffset1= 0;
1981 	req->SGLOffset2= 0;
1982 	req->SGLOffset3= 0;
1983 	req->SkipCount = 0;
1984 	req->DataLength = htole32(csio->dxfer_len);
1985 	req->BidirectionalDataLength = 0;
1986 	req->IoFlags = htole16(csio->cdb_len);
1987 	req->EEDPFlags = 0;
1988 
1989 	/* Note: BiDirectional transfers are not supported */
1990 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1991 	case CAM_DIR_IN:
1992 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1993 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1994 		break;
1995 	case CAM_DIR_OUT:
1996 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1997 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1998 		break;
1999 	case CAM_DIR_NONE:
2000 	default:
2001 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2002 		break;
2003 	}
2004 
2005 	if (csio->cdb_len == 32)
2006 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2007 	/*
2008 	 * It looks like the hardware doesn't require an explicit tag
2009 	 * number for each transaction.  SAM Task Management not supported
2010 	 * at the moment.
2011 	 */
2012 	switch (csio->tag_action) {
2013 	case MSG_HEAD_OF_Q_TAG:
2014 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2015 		break;
2016 	case MSG_ORDERED_Q_TAG:
2017 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2018 		break;
2019 	case MSG_ACA_TASK:
2020 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2021 		break;
2022 	case CAM_TAG_ACTION_NONE:
2023 	case MSG_SIMPLE_Q_TAG:
2024 	default:
2025 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2026 		break;
2027 	}
2028 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2029 	req->Control = htole32(mpi_control);
2030 
2031 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2032 		mpr_free_command(sc, cm);
2033 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2034 		xpt_done(ccb);
2035 		return;
2036 	}
2037 
2038 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2039 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2040 	else {
2041 		KASSERT(csio->cdb_len <= IOCDBLEN,
2042 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2043 		    "is not set", csio->cdb_len));
2044 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2045 	}
2046 	req->IoFlags = htole16(csio->cdb_len);
2047 
2048 	/*
2049 	 * Check if EEDP is supported and enabled.  If it is then check if the
2050 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2051 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2052 	 * for EEDP transfer.
2053 	 */
2054 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2055 	if (sc->eedp_enabled && eedp_flags) {
2056 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2057 			if (lun->lun_id == csio->ccb_h.target_lun) {
2058 				break;
2059 			}
2060 		}
2061 
2062 		if ((lun != NULL) && (lun->eedp_formatted)) {
2063 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2064 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2065 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2066 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2067 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2068 				eedp_flags |=
2069 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2070 			}
2071 			req->EEDPFlags = htole16(eedp_flags);
2072 
2073 			/*
2074 			 * If CDB less than 32, fill in Primary Ref Tag with
2075 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2076 			 * already there.  Also, set protection bit.  FreeBSD
2077 			 * currently does not support CDBs bigger than 16, but
2078 			 * the code doesn't hurt, and will be here for the
2079 			 * future.
2080 			 */
2081 			if (csio->cdb_len != 32) {
2082 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2083 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2084 				    PrimaryReferenceTag;
2085 				for (i = 0; i < 4; i++) {
2086 					*ref_tag_addr =
2087 					    req->CDB.CDB32[lba_byte + i];
2088 					ref_tag_addr++;
2089 				}
2090 				req->CDB.EEDP32.PrimaryReferenceTag =
2091 				    htole32(req->
2092 				    CDB.EEDP32.PrimaryReferenceTag);
2093 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2094 				    0xFFFF;
2095 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2096 				    0x20;
2097 			} else {
2098 				eedp_flags |=
2099 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2100 				req->EEDPFlags = htole16(eedp_flags);
2101 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2102 				    0x1F) | 0x20;
2103 			}
2104 		}
2105 	}
2106 
2107 	cm->cm_length = csio->dxfer_len;
2108 	if (cm->cm_length != 0) {
2109 		cm->cm_data = ccb;
2110 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2111 	} else {
2112 		cm->cm_data = NULL;
2113 	}
2114 	cm->cm_sge = &req->SGL;
2115 	cm->cm_sglsize = (32 - 24) * 4;
2116 	cm->cm_complete = mprsas_scsiio_complete;
2117 	cm->cm_complete_data = ccb;
2118 	cm->cm_targ = targ;
2119 	cm->cm_lun = csio->ccb_h.target_lun;
2120 	cm->cm_ccb = ccb;
2121 	/*
2122 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2123 	 * and set descriptor type.
2124 	 */
2125 	if (targ->scsi_req_desc_type ==
2126 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2127 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2128 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2129 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2130 		if (!sc->atomic_desc_capable) {
2131 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2132 			    htole16(targ->handle);
2133 		}
2134 	} else {
2135 		cm->cm_desc.SCSIIO.RequestFlags =
2136 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2137 		if (!sc->atomic_desc_capable)
2138 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2139 	}
2140 
2141 #if __FreeBSD_version >= 1000029
2142 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2143 	    mprsas_scsiio_timeout, cm, 0);
2144 #else //__FreeBSD_version < 1000029
2145 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2146 	    mprsas_scsiio_timeout, cm);
2147 #endif //__FreeBSD_version >= 1000029
2148 
2149 	targ->issued++;
2150 	targ->outstanding++;
2151 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2152 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2153 
2154 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2155 	    __func__, cm, ccb, targ->outstanding);
2156 
2157 	mpr_map_command(sc, cm);
2158 	return;
2159 }
2160 
2161 static void
2162 mpr_response_code(struct mpr_softc *sc, u8 response_code)
2163 {
2164         char *desc;
2165 
2166         switch (response_code) {
2167         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2168                 desc = "task management request completed";
2169                 break;
2170         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2171                 desc = "invalid frame";
2172                 break;
2173         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2174                 desc = "task management request not supported";
2175                 break;
2176         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2177                 desc = "task management request failed";
2178                 break;
2179         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2180                 desc = "task management request succeeded";
2181                 break;
2182         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2183                 desc = "invalid lun";
2184                 break;
2185         case 0xA:
2186                 desc = "overlapped tag attempted";
2187                 break;
2188         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2189                 desc = "task queued, however not sent to target";
2190                 break;
2191         default:
2192                 desc = "unknown";
2193                 break;
2194         }
2195 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
2196 	    desc);
2197 }
2198 
2199 /**
2200  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2201  */
2202 static void
2203 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2204     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2205 {
2206 	u32 response_info;
2207 	u8 *response_bytes;
2208 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2209 	    MPI2_IOCSTATUS_MASK;
2210 	u8 scsi_state = mpi_reply->SCSIState;
2211 	u8 scsi_status = mpi_reply->SCSIStatus;
2212 	char *desc_ioc_state = NULL;
2213 	char *desc_scsi_status = NULL;
2214 	char *desc_scsi_state = sc->tmp_string;
2215 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2216 
2217 	if (log_info == 0x31170000)
2218 		return;
2219 
2220 	switch (ioc_status) {
2221 	case MPI2_IOCSTATUS_SUCCESS:
2222 		desc_ioc_state = "success";
2223 		break;
2224 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2225 		desc_ioc_state = "invalid function";
2226 		break;
2227 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2228 		desc_ioc_state = "scsi recovered error";
2229 		break;
2230 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2231 		desc_ioc_state = "scsi invalid dev handle";
2232 		break;
2233 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2234 		desc_ioc_state = "scsi device not there";
2235 		break;
2236 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2237 		desc_ioc_state = "scsi data overrun";
2238 		break;
2239 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2240 		desc_ioc_state = "scsi data underrun";
2241 		break;
2242 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2243 		desc_ioc_state = "scsi io data error";
2244 		break;
2245 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2246 		desc_ioc_state = "scsi protocol error";
2247 		break;
2248 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2249 		desc_ioc_state = "scsi task terminated";
2250 		break;
2251 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2252 		desc_ioc_state = "scsi residual mismatch";
2253 		break;
2254 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2255 		desc_ioc_state = "scsi task mgmt failed";
2256 		break;
2257 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2258 		desc_ioc_state = "scsi ioc terminated";
2259 		break;
2260 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2261 		desc_ioc_state = "scsi ext terminated";
2262 		break;
2263 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2264 		desc_ioc_state = "eedp guard error";
2265 		break;
2266 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2267 		desc_ioc_state = "eedp ref tag error";
2268 		break;
2269 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2270 		desc_ioc_state = "eedp app tag error";
2271 		break;
2272 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
2273 		desc_ioc_state = "insufficient power";
2274 		break;
2275 	default:
2276 		desc_ioc_state = "unknown";
2277 		break;
2278 	}
2279 
2280 	switch (scsi_status) {
2281 	case MPI2_SCSI_STATUS_GOOD:
2282 		desc_scsi_status = "good";
2283 		break;
2284 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2285 		desc_scsi_status = "check condition";
2286 		break;
2287 	case MPI2_SCSI_STATUS_CONDITION_MET:
2288 		desc_scsi_status = "condition met";
2289 		break;
2290 	case MPI2_SCSI_STATUS_BUSY:
2291 		desc_scsi_status = "busy";
2292 		break;
2293 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2294 		desc_scsi_status = "intermediate";
2295 		break;
2296 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2297 		desc_scsi_status = "intermediate condmet";
2298 		break;
2299 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2300 		desc_scsi_status = "reservation conflict";
2301 		break;
2302 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2303 		desc_scsi_status = "command terminated";
2304 		break;
2305 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2306 		desc_scsi_status = "task set full";
2307 		break;
2308 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2309 		desc_scsi_status = "aca active";
2310 		break;
2311 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2312 		desc_scsi_status = "task aborted";
2313 		break;
2314 	default:
2315 		desc_scsi_status = "unknown";
2316 		break;
2317 	}
2318 
2319 	desc_scsi_state[0] = '\0';
2320 	if (!scsi_state)
2321 		desc_scsi_state = " ";
2322 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2323 		strcat(desc_scsi_state, "response info ");
2324 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2325 		strcat(desc_scsi_state, "state terminated ");
2326 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2327 		strcat(desc_scsi_state, "no status ");
2328 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2329 		strcat(desc_scsi_state, "autosense failed ");
2330 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2331 		strcat(desc_scsi_state, "autosense valid ");
2332 
2333 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2334 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2335 	if (targ->encl_level_valid) {
2336 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2337 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2338 		    targ->connector_name);
2339 	}
2340 	/* We can add more detail about underflow data here
2341 	 * TO-DO
2342 	 * */
2343 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2344 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2345 	    desc_scsi_state, scsi_state);
2346 
2347 	if (sc->mpr_debug & MPR_XINFO &&
2348 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2349 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2350 		scsi_sense_print(csio);
2351 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2352 	}
2353 
2354 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2355 		response_info = le32toh(mpi_reply->ResponseInfo);
2356 		response_bytes = (u8 *)&response_info;
2357 		mpr_response_code(sc,response_bytes[0]);
2358 	}
2359 }
2360 
2361 /** mprsas_nvme_trans_status_code
2362  *
2363  * Convert Native NVMe command error status to
2364  * equivalent SCSI error status.
2365  *
2366  * Returns appropriate scsi_status
2367  */
2368 static u8
2369 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2370     struct mpr_command *cm)
2371 {
2372 	u8 status = MPI2_SCSI_STATUS_GOOD;
2373 	int skey, asc, ascq;
2374 	union ccb *ccb = cm->cm_complete_data;
2375 	int returned_sense_len;
2376 
2377 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2378 	skey = SSD_KEY_ILLEGAL_REQUEST;
2379 	asc = SCSI_ASC_NO_SENSE;
2380 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2381 
2382 	switch (nvme_status.sct) {
2383 	case NVME_SCT_GENERIC:
2384 		switch (nvme_status.sc) {
2385 		case NVME_SC_SUCCESS:
2386 			status = MPI2_SCSI_STATUS_GOOD;
2387 			skey = SSD_KEY_NO_SENSE;
2388 			asc = SCSI_ASC_NO_SENSE;
2389 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2390 			break;
2391 		case NVME_SC_INVALID_OPCODE:
2392 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2393 			skey = SSD_KEY_ILLEGAL_REQUEST;
2394 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2395 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2396 			break;
2397 		case NVME_SC_INVALID_FIELD:
2398 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2399 			skey = SSD_KEY_ILLEGAL_REQUEST;
2400 			asc = SCSI_ASC_INVALID_CDB;
2401 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2402 			break;
2403 		case NVME_SC_DATA_TRANSFER_ERROR:
2404 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2405 			skey = SSD_KEY_MEDIUM_ERROR;
2406 			asc = SCSI_ASC_NO_SENSE;
2407 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2408 			break;
2409 		case NVME_SC_ABORTED_POWER_LOSS:
2410 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2411 			skey = SSD_KEY_ABORTED_COMMAND;
2412 			asc = SCSI_ASC_WARNING;
2413 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2414 			break;
2415 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2416 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2417 			skey = SSD_KEY_HARDWARE_ERROR;
2418 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2419 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2420 			break;
2421 		case NVME_SC_ABORTED_BY_REQUEST:
2422 		case NVME_SC_ABORTED_SQ_DELETION:
2423 		case NVME_SC_ABORTED_FAILED_FUSED:
2424 		case NVME_SC_ABORTED_MISSING_FUSED:
2425 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2426 			skey = SSD_KEY_ABORTED_COMMAND;
2427 			asc = SCSI_ASC_NO_SENSE;
2428 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2429 			break;
2430 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2431 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2432 			skey = SSD_KEY_ILLEGAL_REQUEST;
2433 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2434 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2435 			break;
2436 		case NVME_SC_LBA_OUT_OF_RANGE:
2437 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2438 			skey = SSD_KEY_ILLEGAL_REQUEST;
2439 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2440 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2441 			break;
2442 		case NVME_SC_CAPACITY_EXCEEDED:
2443 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2444 			skey = SSD_KEY_MEDIUM_ERROR;
2445 			asc = SCSI_ASC_NO_SENSE;
2446 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2447 			break;
2448 		case NVME_SC_NAMESPACE_NOT_READY:
2449 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2450 			skey = SSD_KEY_NOT_READY;
2451 			asc = SCSI_ASC_LUN_NOT_READY;
2452 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2453 			break;
2454 		}
2455 		break;
2456 	case NVME_SCT_COMMAND_SPECIFIC:
2457 		switch (nvme_status.sc) {
2458 		case NVME_SC_INVALID_FORMAT:
2459 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2460 			skey = SSD_KEY_ILLEGAL_REQUEST;
2461 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2462 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2463 			break;
2464 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2465 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2466 			skey = SSD_KEY_ILLEGAL_REQUEST;
2467 			asc = SCSI_ASC_INVALID_CDB;
2468 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2469 			break;
2470 		}
2471 		break;
2472 	case NVME_SCT_MEDIA_ERROR:
2473 		switch (nvme_status.sc) {
2474 		case NVME_SC_WRITE_FAULTS:
2475 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2476 			skey = SSD_KEY_MEDIUM_ERROR;
2477 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2478 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2479 			break;
2480 		case NVME_SC_UNRECOVERED_READ_ERROR:
2481 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2482 			skey = SSD_KEY_MEDIUM_ERROR;
2483 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2484 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2485 			break;
2486 		case NVME_SC_GUARD_CHECK_ERROR:
2487 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2488 			skey = SSD_KEY_MEDIUM_ERROR;
2489 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2490 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2491 			break;
2492 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2493 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2494 			skey = SSD_KEY_MEDIUM_ERROR;
2495 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2496 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2497 			break;
2498 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2499 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2500 			skey = SSD_KEY_MEDIUM_ERROR;
2501 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2502 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2503 			break;
2504 		case NVME_SC_COMPARE_FAILURE:
2505 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2506 			skey = SSD_KEY_MISCOMPARE;
2507 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2508 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2509 			break;
2510 		case NVME_SC_ACCESS_DENIED:
2511 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2512 			skey = SSD_KEY_ILLEGAL_REQUEST;
2513 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2514 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2515 			break;
2516 		}
2517 		break;
2518 	}
2519 
2520 	returned_sense_len = sizeof(struct scsi_sense_data);
2521 	if (returned_sense_len < ccb->csio.sense_len)
2522 		ccb->csio.sense_resid = ccb->csio.sense_len -
2523 		    returned_sense_len;
2524 	else
2525 		ccb->csio.sense_resid = 0;
2526 
2527 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2528 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2529 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2530 
2531 	return status;
2532 }
2533 
2534 /** mprsas_complete_nvme_unmap
2535  *
2536  * Complete native NVMe command issued using NVMe Encapsulated
2537  * Request Message.
2538  */
2539 static u8
2540 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2541 {
2542 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2543 	struct nvme_completion *nvme_completion = NULL;
2544 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2545 
2546 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2547 	if (le16toh(mpi_reply->ErrorResponseCount)){
2548 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2549 		scsi_status = mprsas_nvme_trans_status_code(
2550 		    nvme_completion->status, cm);
2551 	}
2552 	return scsi_status;
2553 }
2554 
2555 static void
2556 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2557 {
2558 	MPI2_SCSI_IO_REPLY *rep;
2559 	union ccb *ccb;
2560 	struct ccb_scsiio *csio;
2561 	struct mprsas_softc *sassc;
2562 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2563 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2564 	int dir = 0, i;
2565 	u16 alloc_len;
2566 	struct mprsas_target *target;
2567 	target_id_t target_id;
2568 
2569 	MPR_FUNCTRACE(sc);
2570 	mpr_dprint(sc, MPR_TRACE,
2571 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2572 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2573 	    cm->cm_targ->outstanding);
2574 
2575 	callout_stop(&cm->cm_callout);
2576 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2577 
2578 	sassc = sc->sassc;
2579 	ccb = cm->cm_complete_data;
2580 	csio = &ccb->csio;
2581 	target_id = csio->ccb_h.target_id;
2582 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2583 	/*
2584 	 * XXX KDM if the chain allocation fails, does it matter if we do
2585 	 * the sync and unload here?  It is simpler to do it in every case,
2586 	 * assuming it doesn't cause problems.
2587 	 */
2588 	if (cm->cm_data != NULL) {
2589 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2590 			dir = BUS_DMASYNC_POSTREAD;
2591 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2592 			dir = BUS_DMASYNC_POSTWRITE;
2593 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2594 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2595 	}
2596 
2597 	cm->cm_targ->completed++;
2598 	cm->cm_targ->outstanding--;
2599 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2600 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2601 
2602 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2603 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2604 		if (cm->cm_reply != NULL)
2605 			mprsas_log_command(cm, MPR_RECOVERY,
2606 			    "completed timedout cm %p ccb %p during recovery "
2607 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2608 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2609 			    rep->SCSIState, le32toh(rep->TransferCount));
2610 		else
2611 			mprsas_log_command(cm, MPR_RECOVERY,
2612 			    "completed timedout cm %p ccb %p during recovery\n",
2613 			    cm, cm->cm_ccb);
2614 	} else if (cm->cm_targ->tm != NULL) {
2615 		if (cm->cm_reply != NULL)
2616 			mprsas_log_command(cm, MPR_RECOVERY,
2617 			    "completed cm %p ccb %p during recovery "
2618 			    "ioc %x scsi %x state %x xfer %u\n",
2619 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2620 			    rep->SCSIStatus, rep->SCSIState,
2621 			    le32toh(rep->TransferCount));
2622 		else
2623 			mprsas_log_command(cm, MPR_RECOVERY,
2624 			    "completed cm %p ccb %p during recovery\n",
2625 			    cm, cm->cm_ccb);
2626 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2627 		mprsas_log_command(cm, MPR_RECOVERY,
2628 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2629 	}
2630 
2631 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2632 		/*
2633 		 * We ran into an error after we tried to map the command,
2634 		 * so we're getting a callback without queueing the command
2635 		 * to the hardware.  So we set the status here, and it will
2636 		 * be retained below.  We'll go through the "fast path",
2637 		 * because there can be no reply when we haven't actually
2638 		 * gone out to the hardware.
2639 		 */
2640 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2641 
2642 		/*
2643 		 * Currently the only error included in the mask is
2644 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2645 		 * chain frames.  We need to freeze the queue until we get
2646 		 * a command that completed without this error, which will
2647 		 * hopefully have some chain frames attached that we can
2648 		 * use.  If we wanted to get smarter about it, we would
2649 		 * only unfreeze the queue in this condition when we're
2650 		 * sure that we're getting some chain frames back.  That's
2651 		 * probably unnecessary.
2652 		 */
2653 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2654 			xpt_freeze_simq(sassc->sim, 1);
2655 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2656 			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2657 			    "freezing SIM queue\n");
2658 		}
2659 	}
2660 
2661 	/*
2662 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2663 	 * flag, and use it in a few places in the rest of this function for
2664 	 * convenience. Use the macro if available.
2665 	 */
2666 #if __FreeBSD_version >= 1100103
2667 	scsi_cdb = scsiio_cdb_ptr(csio);
2668 #else
2669 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2670 		scsi_cdb = csio->cdb_io.cdb_ptr;
2671 	else
2672 		scsi_cdb = csio->cdb_io.cdb_bytes;
2673 #endif
2674 
2675 	/*
2676 	 * If this is a Start Stop Unit command and it was issued by the driver
2677 	 * during shutdown, decrement the refcount to account for all of the
2678 	 * commands that were sent.  All SSU commands should be completed before
2679 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2680 	 * is TRUE.
2681 	 */
2682 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2683 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2684 		sc->SSU_refcount--;
2685 	}
2686 
2687 	/* Take the fast path to completion */
2688 	if (cm->cm_reply == NULL) {
2689 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2690 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2691 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2692 			else {
2693 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2694 				csio->scsi_status = SCSI_STATUS_OK;
2695 			}
2696 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2697 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2698 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2699 				mpr_dprint(sc, MPR_XINFO,
2700 				    "Unfreezing SIM queue\n");
2701 			}
2702 		}
2703 
2704 		/*
2705 		 * There are two scenarios where the status won't be
2706 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2707 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2708 		 */
2709 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2710 			/*
2711 			 * Freeze the dev queue so that commands are
2712 			 * executed in the correct order after error
2713 			 * recovery.
2714 			 */
2715 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2716 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2717 		}
2718 		mpr_free_command(sc, cm);
2719 		xpt_done(ccb);
2720 		return;
2721 	}
2722 
2723 	target = &sassc->targets[target_id];
2724 	if (scsi_cdb[0] == UNMAP &&
2725 	    target->is_nvme &&
2726 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2727 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2728 		csio->scsi_status = rep->SCSIStatus;
2729 	}
2730 
2731 	mprsas_log_command(cm, MPR_XINFO,
2732 	    "ioc %x scsi %x state %x xfer %u\n",
2733 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2734 	    le32toh(rep->TransferCount));
2735 
2736 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2737 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2738 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2739 		/* FALLTHROUGH */
2740 	case MPI2_IOCSTATUS_SUCCESS:
2741 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2742 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2743 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2744 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2745 
2746 		/* Completion failed at the transport level. */
2747 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2748 		    MPI2_SCSI_STATE_TERMINATED)) {
2749 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2750 			break;
2751 		}
2752 
2753 		/* In a modern packetized environment, an autosense failure
2754 		 * implies that there's not much else that can be done to
2755 		 * recover the command.
2756 		 */
2757 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2758 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2759 			break;
2760 		}
2761 
2762 		/*
2763 		 * CAM doesn't care about SAS Response Info data, but if this is
2764 		 * the state check if TLR should be done.  If not, clear the
2765 		 * TLR_bits for the target.
2766 		 */
2767 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2768 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2769 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2770 			sc->mapping_table[target_id].TLR_bits =
2771 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2772 		}
2773 
2774 		/*
2775 		 * Intentionally override the normal SCSI status reporting
2776 		 * for these two cases.  These are likely to happen in a
2777 		 * multi-initiator environment, and we want to make sure that
2778 		 * CAM retries these commands rather than fail them.
2779 		 */
2780 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2781 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2782 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2783 			break;
2784 		}
2785 
2786 		/* Handle normal status and sense */
2787 		csio->scsi_status = rep->SCSIStatus;
2788 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2789 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2790 		else
2791 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2792 
2793 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2794 			int sense_len, returned_sense_len;
2795 
2796 			returned_sense_len = min(le32toh(rep->SenseCount),
2797 			    sizeof(struct scsi_sense_data));
2798 			if (returned_sense_len < csio->sense_len)
2799 				csio->sense_resid = csio->sense_len -
2800 				    returned_sense_len;
2801 			else
2802 				csio->sense_resid = 0;
2803 
2804 			sense_len = min(returned_sense_len,
2805 			    csio->sense_len - csio->sense_resid);
2806 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2807 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2808 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2809 		}
2810 
2811 		/*
2812 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2813 		 * and it's page code 0 (Supported Page List), and there is
2814 		 * inquiry data, and this is for a sequential access device, and
2815 		 * the device is an SSP target, and TLR is supported by the
2816 		 * controller, turn the TLR_bits value ON if page 0x90 is
2817 		 * supported.
2818 		 */
2819 		if ((scsi_cdb[0] == INQUIRY) &&
2820 		    (scsi_cdb[1] & SI_EVPD) &&
2821 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2822 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2823 		    (csio->data_ptr != NULL) &&
2824 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2825 		    (sc->control_TLR) &&
2826 		    (sc->mapping_table[target_id].device_info &
2827 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2828 			vpd_list = (struct scsi_vpd_supported_page_list *)
2829 			    csio->data_ptr;
2830 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2831 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2832 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2833 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2834 			alloc_len -= csio->resid;
2835 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2836 				if (vpd_list->list[i] == 0x90) {
2837 					*TLR_bits = TLR_on;
2838 					break;
2839 				}
2840 			}
2841 		}
2842 
2843 		/*
2844 		 * If this is a SATA direct-access end device, mark it so that
2845 		 * a SCSI StartStopUnit command will be sent to it when the
2846 		 * driver is being shutdown.
2847 		 */
2848 		if ((scsi_cdb[0] == INQUIRY) &&
2849 		    (csio->data_ptr != NULL) &&
2850 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2851 		    (sc->mapping_table[target_id].device_info &
2852 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2853 		    ((sc->mapping_table[target_id].device_info &
2854 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2855 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2856 			target = &sassc->targets[target_id];
2857 			target->supports_SSU = TRUE;
2858 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2859 			    target_id);
2860 		}
2861 		break;
2862 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2863 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2864 		/*
2865 		 * If devinfo is 0 this will be a volume.  In that case don't
2866 		 * tell CAM that the volume is not there.  We want volumes to
2867 		 * be enumerated until they are deleted/removed, not just
2868 		 * failed.
2869 		 */
2870 		if (cm->cm_targ->devinfo == 0)
2871 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2872 		else
2873 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2874 		break;
2875 	case MPI2_IOCSTATUS_INVALID_SGL:
2876 		mpr_print_scsiio_cmd(sc, cm);
2877 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2878 		break;
2879 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2880 		/*
2881 		 * This is one of the responses that comes back when an I/O
2882 		 * has been aborted.  If it is because of a timeout that we
2883 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2884 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2885 		 * command is the same (it gets retried, subject to the
2886 		 * retry counter), the only difference is what gets printed
2887 		 * on the console.
2888 		 */
2889 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2890 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2891 		else
2892 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2893 		break;
2894 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2895 		/* resid is ignored for this condition */
2896 		csio->resid = 0;
2897 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2898 		break;
2899 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2900 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2901 		/*
2902 		 * These can sometimes be transient transport-related
2903 		 * errors, and sometimes persistent drive-related errors.
2904 		 * We used to retry these without decrementing the retry
2905 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2906 		 * we hit a persistent drive problem that returns one of
2907 		 * these error codes, we would retry indefinitely.  So,
2908 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2909 		 * count and avoid infinite retries.  We're taking the
2910 		 * potential risk of flagging false failures in the event
2911 		 * of a topology-related error (e.g. a SAS expander problem
2912 		 * causes a command addressed to a drive to fail), but
2913 		 * avoiding getting into an infinite retry loop.
2914 		 */
2915 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2916 		mprsas_log_command(cm, MPR_INFO,
2917 		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2918 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2919 		    rep->SCSIStatus, rep->SCSIState,
2920 		    le32toh(rep->TransferCount));
2921 		break;
2922 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2923 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2924 	case MPI2_IOCSTATUS_INVALID_VPID:
2925 	case MPI2_IOCSTATUS_INVALID_FIELD:
2926 	case MPI2_IOCSTATUS_INVALID_STATE:
2927 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2928 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2929 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2930 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2931 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2932 	default:
2933 		mprsas_log_command(cm, MPR_XINFO,
2934 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2935 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2936 		    rep->SCSIStatus, rep->SCSIState,
2937 		    le32toh(rep->TransferCount));
2938 		csio->resid = cm->cm_length;
2939 
2940 		if (scsi_cdb[0] == UNMAP &&
2941 		    target->is_nvme &&
2942 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2943 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2944 		else
2945 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2946 
2947 		break;
2948 	}
2949 
2950 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2951 
2952 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2953 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2954 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2955 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2956 		    "queue\n");
2957 	}
2958 
2959 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2960 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2961 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2962 	}
2963 
2964 	mpr_free_command(sc, cm);
2965 	xpt_done(ccb);
2966 }
2967 
2968 #if __FreeBSD_version >= 900026
2969 static void
2970 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2971 {
2972 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2973 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2974 	uint64_t sasaddr;
2975 	union ccb *ccb;
2976 
2977 	ccb = cm->cm_complete_data;
2978 
2979 	/*
2980 	 * Currently there should be no way we can hit this case.  It only
2981 	 * happens when we have a failure to allocate chain frames, and SMP
2982 	 * commands require two S/G elements only.  That should be handled
2983 	 * in the standard request size.
2984 	 */
2985 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2986 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2987 		    "request!\n", __func__, cm->cm_flags);
2988 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2989 		goto bailout;
2990         }
2991 
2992 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2993 	if (rpl == NULL) {
2994 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2995 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2996 		goto bailout;
2997 	}
2998 
2999 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3000 	sasaddr = le32toh(req->SASAddress.Low);
3001 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
3002 
3003 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3004 	    MPI2_IOCSTATUS_SUCCESS ||
3005 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
3006 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
3007 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
3008 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3009 		goto bailout;
3010 	}
3011 
3012 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
3013 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
3014 
3015 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
3016 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3017 	else
3018 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
3019 
3020 bailout:
3021 	/*
3022 	 * We sync in both directions because we had DMAs in the S/G list
3023 	 * in both directions.
3024 	 */
3025 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3026 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3027 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3028 	mpr_free_command(sc, cm);
3029 	xpt_done(ccb);
3030 }
3031 
3032 static void
3033 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
3034 {
3035 	struct mpr_command *cm;
3036 	uint8_t *request, *response;
3037 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
3038 	struct mpr_softc *sc;
3039 	struct sglist *sg;
3040 	int error;
3041 
3042 	sc = sassc->sc;
3043 	sg = NULL;
3044 	error = 0;
3045 
3046 #if (__FreeBSD_version >= 1000028) || \
3047     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
3048 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
3049 	case CAM_DATA_PADDR:
3050 	case CAM_DATA_SG_PADDR:
3051 		/*
3052 		 * XXX We don't yet support physical addresses here.
3053 		 */
3054 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3055 		    "supported\n", __func__);
3056 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3057 		xpt_done(ccb);
3058 		return;
3059 	case CAM_DATA_SG:
3060 		/*
3061 		 * The chip does not support more than one buffer for the
3062 		 * request or response.
3063 		 */
3064 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
3065 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3066 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3067 			    "response buffer segments not supported for SMP\n",
3068 			    __func__);
3069 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3070 			xpt_done(ccb);
3071 			return;
3072 		}
3073 
3074 		/*
3075 		 * The CAM_SCATTER_VALID flag was originally implemented
3076 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3077 		 * We have two.  So, just take that flag to mean that we
3078 		 * might have S/G lists, and look at the S/G segment count
3079 		 * to figure out whether that is the case for each individual
3080 		 * buffer.
3081 		 */
3082 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3083 			bus_dma_segment_t *req_sg;
3084 
3085 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3086 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3087 		} else
3088 			request = ccb->smpio.smp_request;
3089 
3090 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3091 			bus_dma_segment_t *rsp_sg;
3092 
3093 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3094 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3095 		} else
3096 			response = ccb->smpio.smp_response;
3097 		break;
3098 	case CAM_DATA_VADDR:
3099 		request = ccb->smpio.smp_request;
3100 		response = ccb->smpio.smp_response;
3101 		break;
3102 	default:
3103 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3104 		xpt_done(ccb);
3105 		return;
3106 	}
3107 #else /* __FreeBSD_version < 1000028 */
3108 	/*
3109 	 * XXX We don't yet support physical addresses here.
3110 	 */
3111 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3112 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3113 		    "supported\n", __func__);
3114 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3115 		xpt_done(ccb);
3116 		return;
3117 	}
3118 
3119 	/*
3120 	 * If the user wants to send an S/G list, check to make sure they
3121 	 * have single buffers.
3122 	 */
3123 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3124 		/*
3125 		 * The chip does not support more than one buffer for the
3126 		 * request or response.
3127 		 */
3128 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3129 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3130 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3131 			    "response buffer segments not supported for SMP\n",
3132 			    __func__);
3133 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3134 			xpt_done(ccb);
3135 			return;
3136 		}
3137 
3138 		/*
3139 		 * The CAM_SCATTER_VALID flag was originally implemented
3140 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3141 		 * We have two.  So, just take that flag to mean that we
3142 		 * might have S/G lists, and look at the S/G segment count
3143 		 * to figure out whether that is the case for each individual
3144 		 * buffer.
3145 		 */
3146 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3147 			bus_dma_segment_t *req_sg;
3148 
3149 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3150 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3151 		} else
3152 			request = ccb->smpio.smp_request;
3153 
3154 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3155 			bus_dma_segment_t *rsp_sg;
3156 
3157 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3158 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3159 		} else
3160 			response = ccb->smpio.smp_response;
3161 	} else {
3162 		request = ccb->smpio.smp_request;
3163 		response = ccb->smpio.smp_response;
3164 	}
3165 #endif /* __FreeBSD_version < 1000028 */
3166 
3167 	cm = mpr_alloc_command(sc);
3168 	if (cm == NULL) {
3169 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3170 		    __func__);
3171 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3172 		xpt_done(ccb);
3173 		return;
3174 	}
3175 
3176 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3177 	bzero(req, sizeof(*req));
3178 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3179 
3180 	/* Allow the chip to use any route to this SAS address. */
3181 	req->PhysicalPort = 0xff;
3182 
3183 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3184 	req->SGLFlags =
3185 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3186 
3187 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3188 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3189 
3190 	mpr_init_sge(cm, req, &req->SGL);
3191 
3192 	/*
3193 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3194 	 * do one map command, and one busdma call in there.
3195 	 */
3196 	cm->cm_uio.uio_iov = cm->cm_iovec;
3197 	cm->cm_uio.uio_iovcnt = 2;
3198 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3199 
3200 	/*
3201 	 * The read/write flag isn't used by busdma, but set it just in
3202 	 * case.  This isn't exactly accurate, either, since we're going in
3203 	 * both directions.
3204 	 */
3205 	cm->cm_uio.uio_rw = UIO_WRITE;
3206 
3207 	cm->cm_iovec[0].iov_base = request;
3208 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3209 	cm->cm_iovec[1].iov_base = response;
3210 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3211 
3212 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3213 			       cm->cm_iovec[1].iov_len;
3214 
3215 	/*
3216 	 * Trigger a warning message in mpr_data_cb() for the user if we
3217 	 * wind up exceeding two S/G segments.  The chip expects one
3218 	 * segment for the request and another for the response.
3219 	 */
3220 	cm->cm_max_segs = 2;
3221 
3222 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3223 	cm->cm_complete = mprsas_smpio_complete;
3224 	cm->cm_complete_data = ccb;
3225 
3226 	/*
3227 	 * Tell the mapping code that we're using a uio, and that this is
3228 	 * an SMP passthrough request.  There is a little special-case
3229 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3230 	 * transfer.
3231 	 */
3232 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3233 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3234 
3235 	/* The chip data format is little endian. */
3236 	req->SASAddress.High = htole32(sasaddr >> 32);
3237 	req->SASAddress.Low = htole32(sasaddr);
3238 
3239 	/*
3240 	 * XXX Note that we don't have a timeout/abort mechanism here.
3241 	 * From the manual, it looks like task management requests only
3242 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3243 	 * have a mechanism to retry requests in the event of a chip reset
3244 	 * at least.  Hopefully the chip will insure that any errors short
3245 	 * of that are relayed back to the driver.
3246 	 */
3247 	error = mpr_map_command(sc, cm);
3248 	if ((error != 0) && (error != EINPROGRESS)) {
3249 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3250 		    "mpr_map_command()\n", __func__, error);
3251 		goto bailout_error;
3252 	}
3253 
3254 	return;
3255 
3256 bailout_error:
3257 	mpr_free_command(sc, cm);
3258 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3259 	xpt_done(ccb);
3260 	return;
3261 }
3262 
3263 static void
3264 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3265 {
3266 	struct mpr_softc *sc;
3267 	struct mprsas_target *targ;
3268 	uint64_t sasaddr = 0;
3269 
3270 	sc = sassc->sc;
3271 
3272 	/*
3273 	 * Make sure the target exists.
3274 	 */
3275 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3276 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3277 	targ = &sassc->targets[ccb->ccb_h.target_id];
3278 	if (targ->handle == 0x0) {
3279 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3280 		    __func__, ccb->ccb_h.target_id);
3281 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3282 		xpt_done(ccb);
3283 		return;
3284 	}
3285 
3286 	/*
3287 	 * If this device has an embedded SMP target, we'll talk to it
3288 	 * directly.
3289 	 * figure out what the expander's address is.
3290 	 */
3291 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3292 		sasaddr = targ->sasaddr;
3293 
3294 	/*
3295 	 * If we don't have a SAS address for the expander yet, try
3296 	 * grabbing it from the page 0x83 information cached in the
3297 	 * transport layer for this target.  LSI expanders report the
3298 	 * expander SAS address as the port-associated SAS address in
3299 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3300 	 * 0x83.
3301 	 *
3302 	 * XXX KDM disable this for now, but leave it commented out so that
3303 	 * it is obvious that this is another possible way to get the SAS
3304 	 * address.
3305 	 *
3306 	 * The parent handle method below is a little more reliable, and
3307 	 * the other benefit is that it works for devices other than SES
3308 	 * devices.  So you can send a SMP request to a da(4) device and it
3309 	 * will get routed to the expander that device is attached to.
3310 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3311 	 */
3312 #if 0
3313 	if (sasaddr == 0)
3314 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3315 #endif
3316 
3317 	/*
3318 	 * If we still don't have a SAS address for the expander, look for
3319 	 * the parent device of this device, which is probably the expander.
3320 	 */
3321 	if (sasaddr == 0) {
3322 #ifdef OLD_MPR_PROBE
3323 		struct mprsas_target *parent_target;
3324 #endif
3325 
3326 		if (targ->parent_handle == 0x0) {
3327 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3328 			    "a valid parent handle!\n", __func__, targ->handle);
3329 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3330 			goto bailout;
3331 		}
3332 #ifdef OLD_MPR_PROBE
3333 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3334 		    targ->parent_handle);
3335 
3336 		if (parent_target == NULL) {
3337 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3338 			    "a valid parent target!\n", __func__, targ->handle);
3339 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3340 			goto bailout;
3341 		}
3342 
3343 		if ((parent_target->devinfo &
3344 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3345 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3346 			    "does not have an SMP target!\n", __func__,
3347 			    targ->handle, parent_target->handle);
3348 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3349 			goto bailout;
3350 		}
3351 
3352 		sasaddr = parent_target->sasaddr;
3353 #else /* OLD_MPR_PROBE */
3354 		if ((targ->parent_devinfo &
3355 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3356 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3357 			    "does not have an SMP target!\n", __func__,
3358 			    targ->handle, targ->parent_handle);
3359 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3360 			goto bailout;
3361 
3362 		}
3363 		if (targ->parent_sasaddr == 0x0) {
3364 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3365 			    "%d does not have a valid SAS address!\n", __func__,
3366 			    targ->handle, targ->parent_handle);
3367 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3368 			goto bailout;
3369 		}
3370 
3371 		sasaddr = targ->parent_sasaddr;
3372 #endif /* OLD_MPR_PROBE */
3373 
3374 	}
3375 
3376 	if (sasaddr == 0) {
3377 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3378 		    "handle %d\n", __func__, targ->handle);
3379 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3380 		goto bailout;
3381 	}
3382 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3383 
3384 	return;
3385 
3386 bailout:
3387 	xpt_done(ccb);
3388 
3389 }
3390 #endif //__FreeBSD_version >= 900026
3391 
3392 static void
3393 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3394 {
3395 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3396 	struct mpr_softc *sc;
3397 	struct mpr_command *tm;
3398 	struct mprsas_target *targ;
3399 
3400 	MPR_FUNCTRACE(sassc->sc);
3401 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3402 
3403 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3404 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3405 	sc = sassc->sc;
3406 	tm = mpr_alloc_command(sc);
3407 	if (tm == NULL) {
3408 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3409 		    "mprsas_action_resetdev\n");
3410 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3411 		xpt_done(ccb);
3412 		return;
3413 	}
3414 
3415 	targ = &sassc->targets[ccb->ccb_h.target_id];
3416 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3417 	req->DevHandle = htole16(targ->handle);
3418 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3419 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3420 
3421 	/* SAS Hard Link Reset / SATA Link Reset */
3422 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3423 
3424 	tm->cm_data = NULL;
3425 	tm->cm_desc.HighPriority.RequestFlags =
3426 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3427 	tm->cm_complete = mprsas_resetdev_complete;
3428 	tm->cm_complete_data = ccb;
3429 
3430 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3431 	    __func__, targ->tid);
3432 	tm->cm_targ = targ;
3433 	targ->flags |= MPRSAS_TARGET_INRESET;
3434 
3435 	mpr_map_command(sc, tm);
3436 }
3437 
3438 static void
3439 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3440 {
3441 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3442 	union ccb *ccb;
3443 
3444 	MPR_FUNCTRACE(sc);
3445 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3446 
3447 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3448 	ccb = tm->cm_complete_data;
3449 
3450 	/*
3451 	 * Currently there should be no way we can hit this case.  It only
3452 	 * happens when we have a failure to allocate chain frames, and
3453 	 * task management commands don't have S/G lists.
3454 	 */
3455 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3456 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3457 
3458 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3459 
3460 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3461 		    "handle %#04x! This should not happen!\n", __func__,
3462 		    tm->cm_flags, req->DevHandle);
3463 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3464 		goto bailout;
3465 	}
3466 
3467 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3468 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3469 
3470 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3471 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3472 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3473 		    CAM_LUN_WILDCARD);
3474 	}
3475 	else
3476 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3477 
3478 bailout:
3479 
3480 	mprsas_free_tm(sc, tm);
3481 	xpt_done(ccb);
3482 }
3483 
3484 static void
3485 mprsas_poll(struct cam_sim *sim)
3486 {
3487 	struct mprsas_softc *sassc;
3488 
3489 	sassc = cam_sim_softc(sim);
3490 
3491 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3492 		/* frequent debug messages during a panic just slow
3493 		 * everything down too much.
3494 		 */
3495 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3496 		    __func__);
3497 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3498 	}
3499 
3500 	mpr_intr_locked(sassc->sc);
3501 }
3502 
3503 static void
3504 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3505     void *arg)
3506 {
3507 	struct mpr_softc *sc;
3508 
3509 	sc = (struct mpr_softc *)callback_arg;
3510 
3511 	switch (code) {
3512 #if (__FreeBSD_version >= 1000006) || \
3513     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3514 	case AC_ADVINFO_CHANGED: {
3515 		struct mprsas_target *target;
3516 		struct mprsas_softc *sassc;
3517 		struct scsi_read_capacity_data_long rcap_buf;
3518 		struct ccb_dev_advinfo cdai;
3519 		struct mprsas_lun *lun;
3520 		lun_id_t lunid;
3521 		int found_lun;
3522 		uintptr_t buftype;
3523 
3524 		buftype = (uintptr_t)arg;
3525 
3526 		found_lun = 0;
3527 		sassc = sc->sassc;
3528 
3529 		/*
3530 		 * We're only interested in read capacity data changes.
3531 		 */
3532 		if (buftype != CDAI_TYPE_RCAPLONG)
3533 			break;
3534 
3535 		/*
3536 		 * See the comment in mpr_attach_sas() for a detailed
3537 		 * explanation.  In these versions of FreeBSD we register
3538 		 * for all events and filter out the events that don't
3539 		 * apply to us.
3540 		 */
3541 #if (__FreeBSD_version < 1000703) || \
3542     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3543 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3544 			break;
3545 #endif
3546 
3547 		/*
3548 		 * We should have a handle for this, but check to make sure.
3549 		 */
3550 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3551 		    ("Target %d out of bounds in mprsas_async\n",
3552 		    xpt_path_target_id(path)));
3553 		target = &sassc->targets[xpt_path_target_id(path)];
3554 		if (target->handle == 0)
3555 			break;
3556 
3557 		lunid = xpt_path_lun_id(path);
3558 
3559 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3560 			if (lun->lun_id == lunid) {
3561 				found_lun = 1;
3562 				break;
3563 			}
3564 		}
3565 
3566 		if (found_lun == 0) {
3567 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3568 			    M_NOWAIT | M_ZERO);
3569 			if (lun == NULL) {
3570 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3571 				    "LUN for EEDP support.\n");
3572 				break;
3573 			}
3574 			lun->lun_id = lunid;
3575 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3576 		}
3577 
3578 		bzero(&rcap_buf, sizeof(rcap_buf));
3579 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3580 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3581 		cdai.ccb_h.flags = CAM_DIR_IN;
3582 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3583 #if (__FreeBSD_version >= 1100061) || \
3584     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3585 		cdai.flags = CDAI_FLAG_NONE;
3586 #else
3587 		cdai.flags = 0;
3588 #endif
3589 		cdai.bufsiz = sizeof(rcap_buf);
3590 		cdai.buf = (uint8_t *)&rcap_buf;
3591 		xpt_action((union ccb *)&cdai);
3592 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3593 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3594 
3595 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3596 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3597 			lun->eedp_formatted = TRUE;
3598 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3599 		} else {
3600 			lun->eedp_formatted = FALSE;
3601 			lun->eedp_block_size = 0;
3602 		}
3603 		break;
3604 	}
3605 #endif
3606 	case AC_FOUND_DEVICE: {
3607 		struct ccb_getdev *cgd;
3608 
3609 		/*
3610 		 * See the comment in mpr_attach_sas() for a detailed
3611 		 * explanation.  In these versions of FreeBSD we register
3612 		 * for all events and filter out the events that don't
3613 		 * apply to us.
3614 		 */
3615 #if (__FreeBSD_version < 1000703) || \
3616     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3617 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3618 			break;
3619 #endif
3620 
3621 		cgd = arg;
3622 #if (__FreeBSD_version < 901503) || \
3623     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3624 		mprsas_check_eedp(sc, path, cgd);
3625 #endif
3626 		break;
3627 	}
3628 	default:
3629 		break;
3630 	}
3631 }
3632 
3633 #if (__FreeBSD_version < 901503) || \
3634     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3635 static void
3636 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3637     struct ccb_getdev *cgd)
3638 {
3639 	struct mprsas_softc *sassc = sc->sassc;
3640 	struct ccb_scsiio *csio;
3641 	struct scsi_read_capacity_16 *scsi_cmd;
3642 	struct scsi_read_capacity_eedp *rcap_buf;
3643 	path_id_t pathid;
3644 	target_id_t targetid;
3645 	lun_id_t lunid;
3646 	union ccb *ccb;
3647 	struct cam_path *local_path;
3648 	struct mprsas_target *target;
3649 	struct mprsas_lun *lun;
3650 	uint8_t	found_lun;
3651 	char path_str[64];
3652 
3653 	pathid = cam_sim_path(sassc->sim);
3654 	targetid = xpt_path_target_id(path);
3655 	lunid = xpt_path_lun_id(path);
3656 
3657 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3658 	    "mprsas_check_eedp\n", targetid));
3659 	target = &sassc->targets[targetid];
3660 	if (target->handle == 0x0)
3661 		return;
3662 
3663 	/*
3664 	 * Determine if the device is EEDP capable.
3665 	 *
3666 	 * If this flag is set in the inquiry data, the device supports
3667 	 * protection information, and must support the 16 byte read capacity
3668 	 * command, otherwise continue without sending read cap 16.
3669 	 */
3670 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3671 		return;
3672 
3673 	/*
3674 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3675 	 * the LUN is formatted for EEDP support.
3676 	 */
3677 	ccb = xpt_alloc_ccb_nowait();
3678 	if (ccb == NULL) {
3679 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3680 		    "support.\n");
3681 		return;
3682 	}
3683 
3684 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3685 	    CAM_REQ_CMP) {
3686 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3687 		    "support.\n");
3688 		xpt_free_ccb(ccb);
3689 		return;
3690 	}
3691 
3692 	/*
3693 	 * If LUN is already in list, don't create a new one.
3694 	 */
3695 	found_lun = FALSE;
3696 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3697 		if (lun->lun_id == lunid) {
3698 			found_lun = TRUE;
3699 			break;
3700 		}
3701 	}
3702 	if (!found_lun) {
3703 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3704 		    M_NOWAIT | M_ZERO);
3705 		if (lun == NULL) {
3706 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3707 			    "EEDP support.\n");
3708 			xpt_free_path(local_path);
3709 			xpt_free_ccb(ccb);
3710 			return;
3711 		}
3712 		lun->lun_id = lunid;
3713 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3714 	}
3715 
3716 	xpt_path_string(local_path, path_str, sizeof(path_str));
3717 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3718 	    path_str, target->handle);
3719 
3720 	/*
3721 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3722 	 * mprsas_read_cap_done function will load the read cap info into the
3723 	 * LUN struct.
3724 	 */
3725 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3726 	    M_NOWAIT | M_ZERO);
3727 	if (rcap_buf == NULL) {
3728 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3729 		    "buffer for EEDP support.\n");
3730 		xpt_free_path(ccb->ccb_h.path);
3731 		xpt_free_ccb(ccb);
3732 		return;
3733 	}
3734 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3735 	csio = &ccb->csio;
3736 	csio->ccb_h.func_code = XPT_SCSI_IO;
3737 	csio->ccb_h.flags = CAM_DIR_IN;
3738 	csio->ccb_h.retry_count = 4;
3739 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3740 	csio->ccb_h.timeout = 60000;
3741 	csio->data_ptr = (uint8_t *)rcap_buf;
3742 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3743 	csio->sense_len = MPR_SENSE_LEN;
3744 	csio->cdb_len = sizeof(*scsi_cmd);
3745 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3746 
3747 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3748 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3749 	scsi_cmd->opcode = 0x9E;
3750 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3751 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3752 
3753 	ccb->ccb_h.ppriv_ptr1 = sassc;
3754 	xpt_action(ccb);
3755 }
3756 
3757 static void
3758 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3759 {
3760 	struct mprsas_softc *sassc;
3761 	struct mprsas_target *target;
3762 	struct mprsas_lun *lun;
3763 	struct scsi_read_capacity_eedp *rcap_buf;
3764 
3765 	if (done_ccb == NULL)
3766 		return;
3767 
3768 	/* Driver need to release devq, it Scsi command is
3769 	 * generated by driver internally.
3770 	 * Currently there is a single place where driver
3771 	 * calls scsi command internally. In future if driver
3772 	 * calls more scsi command internally, it needs to release
3773 	 * devq internally, since those command will not go back to
3774 	 * cam_periph.
3775 	 */
3776 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3777         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3778 		xpt_release_devq(done_ccb->ccb_h.path,
3779 			       	/*count*/ 1, /*run_queue*/TRUE);
3780 	}
3781 
3782 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3783 
3784 	/*
3785 	 * Get the LUN ID for the path and look it up in the LUN list for the
3786 	 * target.
3787 	 */
3788 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3789 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3790 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3791 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3792 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3793 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3794 			continue;
3795 
3796 		/*
3797 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3798 		 * info. If the READ CAP 16 command had some SCSI error (common
3799 		 * if command is not supported), mark the lun as not supporting
3800 		 * EEDP and set the block size to 0.
3801 		 */
3802 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3803 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3804 			lun->eedp_formatted = FALSE;
3805 			lun->eedp_block_size = 0;
3806 			break;
3807 		}
3808 
3809 		if (rcap_buf->protect & 0x01) {
3810 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3811 			    "%d is formatted for EEDP support.\n",
3812 			    done_ccb->ccb_h.target_lun,
3813 			    done_ccb->ccb_h.target_id);
3814 			lun->eedp_formatted = TRUE;
3815 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3816 		}
3817 		break;
3818 	}
3819 
3820 	// Finished with this CCB and path.
3821 	free(rcap_buf, M_MPR);
3822 	xpt_free_path(done_ccb->ccb_h.path);
3823 	xpt_free_ccb(done_ccb);
3824 }
3825 #endif /* (__FreeBSD_version < 901503) || \
3826           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3827 
3828 void
3829 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3830     struct mprsas_target *target, lun_id_t lun_id)
3831 {
3832 	union ccb *ccb;
3833 	path_id_t path_id;
3834 
3835 	/*
3836 	 * Set the INRESET flag for this target so that no I/O will be sent to
3837 	 * the target until the reset has completed.  If an I/O request does
3838 	 * happen, the devq will be frozen.  The CCB holds the path which is
3839 	 * used to release the devq.  The devq is released and the CCB is freed
3840 	 * when the TM completes.
3841 	 */
3842 	ccb = xpt_alloc_ccb_nowait();
3843 	if (ccb) {
3844 		path_id = cam_sim_path(sc->sassc->sim);
3845 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3846 		    target->tid, lun_id) != CAM_REQ_CMP) {
3847 			xpt_free_ccb(ccb);
3848 		} else {
3849 			tm->cm_ccb = ccb;
3850 			tm->cm_targ = target;
3851 			target->flags |= MPRSAS_TARGET_INRESET;
3852 		}
3853 	}
3854 }
3855 
3856 int
3857 mprsas_startup(struct mpr_softc *sc)
3858 {
3859 	/*
3860 	 * Send the port enable message and set the wait_for_port_enable flag.
3861 	 * This flag helps to keep the simq frozen until all discovery events
3862 	 * are processed.
3863 	 */
3864 	sc->wait_for_port_enable = 1;
3865 	mprsas_send_portenable(sc);
3866 	return (0);
3867 }
3868 
3869 static int
3870 mprsas_send_portenable(struct mpr_softc *sc)
3871 {
3872 	MPI2_PORT_ENABLE_REQUEST *request;
3873 	struct mpr_command *cm;
3874 
3875 	MPR_FUNCTRACE(sc);
3876 
3877 	if ((cm = mpr_alloc_command(sc)) == NULL)
3878 		return (EBUSY);
3879 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3880 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3881 	request->MsgFlags = 0;
3882 	request->VP_ID = 0;
3883 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3884 	cm->cm_complete = mprsas_portenable_complete;
3885 	cm->cm_data = NULL;
3886 	cm->cm_sge = NULL;
3887 
3888 	mpr_map_command(sc, cm);
3889 	mpr_dprint(sc, MPR_XINFO,
3890 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3891 	    cm, cm->cm_req, cm->cm_complete);
3892 	return (0);
3893 }
3894 
3895 static void
3896 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3897 {
3898 	MPI2_PORT_ENABLE_REPLY *reply;
3899 	struct mprsas_softc *sassc;
3900 
3901 	MPR_FUNCTRACE(sc);
3902 	sassc = sc->sassc;
3903 
3904 	/*
3905 	 * Currently there should be no way we can hit this case.  It only
3906 	 * happens when we have a failure to allocate chain frames, and
3907 	 * port enable commands don't have S/G lists.
3908 	 */
3909 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3910 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3911 		    "This should not happen!\n", __func__, cm->cm_flags);
3912 	}
3913 
3914 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3915 	if (reply == NULL)
3916 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3917 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3918 	    MPI2_IOCSTATUS_SUCCESS)
3919 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3920 
3921 	mpr_free_command(sc, cm);
3922 	if (sc->mpr_ich.ich_arg != NULL) {
3923 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3924 		config_intrhook_disestablish(&sc->mpr_ich);
3925 		sc->mpr_ich.ich_arg = NULL;
3926 	}
3927 
3928 	/*
3929 	 * Done waiting for port enable to complete.  Decrement the refcount.
3930 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3931 	 * take place.
3932 	 */
3933 	sc->wait_for_port_enable = 0;
3934 	sc->port_enable_complete = 1;
3935 	wakeup(&sc->port_enable_complete);
3936 	mprsas_startup_decrement(sassc);
3937 }
3938 
3939 int
3940 mprsas_check_id(struct mprsas_softc *sassc, int id)
3941 {
3942 	struct mpr_softc *sc = sassc->sc;
3943 	char *ids;
3944 	char *name;
3945 
3946 	ids = &sc->exclude_ids[0];
3947 	while((name = strsep(&ids, ",")) != NULL) {
3948 		if (name[0] == '\0')
3949 			continue;
3950 		if (strtol(name, NULL, 0) == (long)id)
3951 			return (1);
3952 	}
3953 
3954 	return (0);
3955 }
3956 
3957 void
3958 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3959 {
3960 	struct mprsas_softc *sassc;
3961 	struct mprsas_lun *lun, *lun_tmp;
3962 	struct mprsas_target *targ;
3963 	int i;
3964 
3965 	sassc = sc->sassc;
3966 	/*
3967 	 * The number of targets is based on IOC Facts, so free all of
3968 	 * the allocated LUNs for each target and then the target buffer
3969 	 * itself.
3970 	 */
3971 	for (i=0; i< maxtargets; i++) {
3972 		targ = &sassc->targets[i];
3973 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3974 			free(lun, M_MPR);
3975 		}
3976 	}
3977 	free(sassc->targets, M_MPR);
3978 
3979 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3980 	    M_MPR, M_WAITOK|M_ZERO);
3981 	if (!sassc->targets) {
3982 		panic("%s failed to alloc targets with error %d\n",
3983 		    __func__, ENOMEM);
3984 	}
3985 }
3986